1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 
56 #include <machine/cpu.h>
57 #include <machine/globaldata.h>
58 #include <machine/tls.h>
59 #include <machine/md_var.h>
60 #include <machine/vmparam.h>
61 #include <cpu/specialreg.h>
62 
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/bridge/if_bridgevar.h>
67 #include <netinet/in.h>
68 #include <arpa/inet.h>
69 
70 #include <stdio.h>
71 #include <stdlib.h>
72 #include <stdarg.h>
73 #include <stdbool.h>
74 #include <unistd.h>
75 #include <fcntl.h>
76 #include <string.h>
77 #include <err.h>
78 #include <errno.h>
79 #include <assert.h>
80 #include <sysexits.h>
81 
82 vm_paddr_t phys_avail[16];
83 vm_paddr_t Maxmem;
84 vm_paddr_t Maxmem_bytes;
85 long physmem;
86 int MemImageFd = -1;
87 struct vkdisk_info DiskInfo[VKDISK_MAX];
88 int DiskNum;
89 struct vknetif_info NetifInfo[VKNETIF_MAX];
90 int NetifNum;
91 char *pid_file;
92 vm_offset_t KvaStart;
93 vm_offset_t KvaEnd;
94 vm_offset_t KvaSize;
95 vm_offset_t virtual_start;
96 vm_offset_t virtual_end;
97 vm_offset_t virtual2_start;
98 vm_offset_t virtual2_end;
99 vm_offset_t kernel_vm_end;
100 vm_offset_t crashdumpmap;
101 vm_offset_t clean_sva;
102 vm_offset_t clean_eva;
103 struct msgbuf *msgbufp;
104 caddr_t ptvmmap;
105 vpte_t	*KernelPTD;
106 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
107 void *dmap_min_address;
108 u_int cpu_feature;	/* XXX */
109 int tsc_present;
110 int64_t tsc_frequency;
111 int optcpus;		/* number of cpus - see mp_start() */
112 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
113 int real_ncpus;		/* number of real CPUs */
114 int next_cpu;		/* next real CPU to lock a virtual CPU to */
115 
116 struct privatespace *CPU_prvspace;
117 
118 static struct trapframe proc0_tf;
119 static void *proc0paddr;
120 
121 static void init_sys_memory(char *imageFile);
122 static void init_kern_memory(void);
123 static void init_globaldata(void);
124 static void init_vkernel(void);
125 static void init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type);
126 static void init_netif(char *netifExp[], int netifFileNum);
127 static void writepid(void);
128 static void cleanpid(void);
129 static int unix_connect(const char *path);
130 static void usage_err(const char *ctl, ...);
131 static void usage_help(_Bool);
132 
133 static int save_ac;
134 static char **save_av;
135 
136 /*
137  * Kernel startup for virtual kernels - standard main()
138  */
139 int
140 main(int ac, char **av)
141 {
142 	char *memImageFile = NULL;
143 	char *netifFile[VKNETIF_MAX];
144 	char *diskFile[VKDISK_MAX];
145 	char *cdFile[VKDISK_MAX];
146 	char *suffix;
147 	char *endp;
148 	char *tmp;
149 	int netifFileNum = 0;
150 	int diskFileNum = 0;
151 	int cdFileNum = 0;
152 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
153 	int c;
154 	int i;
155 	int j;
156 	int n;
157 	int isq;
158 	int pos;
159 	int eflag;
160 	int real_vkernel_enable;
161 	int supports_sse;
162 	size_t vsize;
163 	size_t kenv_size;
164 	size_t kenv_size2;
165 
166 	save_ac = ac;
167 	save_av = av;
168 	eflag = 0;
169 	pos = 0;
170 	kenv_size = 0;
171 
172 	/*
173 	 * Process options
174 	 */
175 	kernel_mem_readonly = 1;
176 #ifdef SMP
177 	optcpus = 2;
178 #endif
179 	lwp_cpu_lock = LCL_NONE;
180 
181 	real_vkernel_enable = 0;
182 	vsize = sizeof(real_vkernel_enable);
183 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
184 
185 	if (real_vkernel_enable == 0) {
186 		errx(1, "vm.vkernel_enable is 0, must be set "
187 			"to 1 to execute a vkernel!");
188 	}
189 
190 	real_ncpus = 1;
191 	vsize = sizeof(real_ncpus);
192 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
193 
194 	if (ac < 2)
195 		usage_help(false);
196 
197 	while ((c = getopt(ac, av, "c:hsvl:m:n:r:e:i:p:I:U")) != -1) {
198 		switch(c) {
199 		case 'e':
200 			/*
201 			 * name=value:name=value:name=value...
202 			 * name="value"...
203 			 *
204 			 * Allow values to be quoted but note that shells
205 			 * may remove the quotes, so using this feature
206 			 * to embed colons may require a backslash.
207 			 */
208 			n = strlen(optarg);
209 			isq = 0;
210 
211 			if (eflag == 0) {
212 				kenv_size = n + 2;
213 				kern_envp = malloc(kenv_size);
214 				if (kern_envp == NULL)
215 					errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
216 			} else {
217 				kenv_size2 = kenv_size + n + 1;
218 				pos = kenv_size - 1;
219 				if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
220 					errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
221 				kern_envp = tmp;
222 				kenv_size = kenv_size2;
223 			}
224 
225 			for (i = 0, j = pos; i < n; ++i) {
226 				if (optarg[i] == '"')
227 					isq ^= 1;
228 				else if (optarg[i] == '\'')
229 					isq ^= 2;
230 				else if (isq == 0 && optarg[i] == ':')
231 					kern_envp[j++] = 0;
232 				else
233 					kern_envp[j++] = optarg[i];
234 			}
235 			kern_envp[j++] = 0;
236 			kern_envp[j++] = 0;
237 			eflag++;
238 			break;
239 		case 's':
240 			boothowto |= RB_SINGLE;
241 			break;
242 		case 'v':
243 			bootverbose = 1;
244 			break;
245 		case 'i':
246 			memImageFile = optarg;
247 			break;
248 		case 'I':
249 			if (netifFileNum < VKNETIF_MAX)
250 				netifFile[netifFileNum++] = strdup(optarg);
251 			break;
252 		case 'r':
253 			if (bootOnDisk < 0)
254 				bootOnDisk = 1;
255 			if (diskFileNum + cdFileNum < VKDISK_MAX)
256 				diskFile[diskFileNum++] = strdup(optarg);
257 			break;
258 		case 'c':
259 			if (bootOnDisk < 0)
260 				bootOnDisk = 0;
261 			if (diskFileNum + cdFileNum < VKDISK_MAX)
262 				cdFile[cdFileNum++] = strdup(optarg);
263 			break;
264 		case 'm':
265 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
266 			if (suffix) {
267 				switch(*suffix) {
268 				case 'g':
269 				case 'G':
270 					Maxmem_bytes <<= 30;
271 					break;
272 				case 'm':
273 				case 'M':
274 					Maxmem_bytes <<= 20;
275 					break;
276 				case 'k':
277 				case 'K':
278 					Maxmem_bytes <<= 10;
279 					break;
280 				default:
281 					Maxmem_bytes = 0;
282 					usage_err("Bad maxmem option");
283 					/* NOT REACHED */
284 					break;
285 				}
286 			}
287 			break;
288 		case 'l':
289 			next_cpu = -1;
290 			if (strncmp("map", optarg, 3) == 0) {
291 				lwp_cpu_lock = LCL_PER_CPU;
292 				if (optarg[3] == ',') {
293 					next_cpu = strtol(optarg+4, &endp, 0);
294 					if (*endp != '\0')
295 						usage_err("Bad target CPU number at '%s'", endp);
296 				} else {
297 					next_cpu = 0;
298 				}
299 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
300 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
301 			} else if (strncmp("any", optarg, 3) == 0) {
302 				lwp_cpu_lock = LCL_NONE;
303 			} else {
304 				lwp_cpu_lock = LCL_SINGLE_CPU;
305 				next_cpu = strtol(optarg, &endp, 0);
306 				if (*endp != '\0')
307 					usage_err("Bad target CPU number at '%s'", endp);
308 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
309 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
310 			}
311 			break;
312 		case 'n':
313 			/*
314 			 * This value is set up by mp_start(), don't just
315 			 * set ncpus here.
316 			 */
317 #ifdef SMP
318 			optcpus = strtol(optarg, NULL, 0);
319 			if (optcpus < 1 || optcpus > MAXCPU)
320 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
321 #else
322 			if (strtol(optarg, NULL, 0) != 1) {
323 				usage_err("You built a UP vkernel, only 1 cpu!");
324 			}
325 #endif
326 
327 			break;
328 		case 'p':
329 			pid_file = optarg;
330 			break;
331 		case 'U':
332 			kernel_mem_readonly = 0;
333 			break;
334 		case 'h':
335 			usage_help(true);
336 			break;
337 		default:
338 			usage_help(false);
339 		}
340 	}
341 
342 	writepid();
343 	cpu_disable_intr();
344 	init_sys_memory(memImageFile);
345 	init_kern_memory();
346 	init_globaldata();
347 	init_vkernel();
348 	setrealcpu();
349 	init_kqueue();
350 
351 	vmm_guest = 1;
352 
353 	/*
354 	 * Check TSC
355 	 */
356 	vsize = sizeof(tsc_present);
357 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
358 	vsize = sizeof(tsc_frequency);
359 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
360 	if (tsc_present)
361 		cpu_feature |= CPUID_TSC;
362 
363 	/*
364 	 * Check SSE
365 	 */
366 	vsize = sizeof(supports_sse);
367 	supports_sse = 0;
368 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
369 	init_fpu(supports_sse);
370 	if (supports_sse)
371 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
372 
373 	/*
374 	 * We boot from the first installed disk.
375 	 */
376 	if (bootOnDisk == 1) {
377 		init_disk(diskFile, diskFileNum, VKD_DISK);
378 		init_disk(cdFile, cdFileNum, VKD_CD);
379 	} else {
380 		init_disk(cdFile, cdFileNum, VKD_CD);
381 		init_disk(diskFile, diskFileNum, VKD_DISK);
382 	}
383 	init_netif(netifFile, netifFileNum);
384 	init_exceptions();
385 	mi_startup();
386 	/* NOT REACHED */
387 	exit(EX_SOFTWARE);
388 }
389 
390 /*
391  * Initialize system memory.  This is the virtual kernel's 'RAM'.
392  */
393 static
394 void
395 init_sys_memory(char *imageFile)
396 {
397 	struct stat st;
398 	int i;
399 	int fd;
400 
401 	/*
402 	 * Figure out the system memory image size.  If an image file was
403 	 * specified and -m was not specified, use the image file's size.
404 	 */
405 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
406 		Maxmem_bytes = (vm_paddr_t)st.st_size;
407 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
408 	    Maxmem_bytes == 0) {
409 		errx(1, "Cannot create new memory file %s unless "
410 		       "system memory size is specified with -m",
411 		       imageFile);
412 		/* NOT REACHED */
413 	}
414 
415 	/*
416 	 * Maxmem must be known at this time
417 	 */
418 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
419 		errx(1, "Bad maxmem specification: 64MB minimum, "
420 		       "multiples of %dMB only",
421 		       SEG_SIZE / 1024 / 1024);
422 		/* NOT REACHED */
423 	}
424 
425 	/*
426 	 * Generate an image file name if necessary, then open/create the
427 	 * file exclusively locked.  Do not allow multiple virtual kernels
428 	 * to use the same image file.
429 	 *
430 	 * Don't iterate through a million files if we do not have write
431 	 * access to the directory, stop if our open() failed on a
432 	 * non-existant file.  Otherwise opens can fail for any number
433 	 */
434 	if (imageFile == NULL) {
435 		for (i = 0; i < 1000000; ++i) {
436 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
437 			fd = open(imageFile,
438 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
439 			if (fd < 0 && stat(imageFile, &st) == 0) {
440 				free(imageFile);
441 				continue;
442 			}
443 			break;
444 		}
445 	} else {
446 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
447 	}
448 	fprintf(stderr, "Using memory file: %s\n", imageFile);
449 	if (fd < 0 || fstat(fd, &st) < 0) {
450 		err(1, "Unable to open/create %s", imageFile);
451 		/* NOT REACHED */
452 	}
453 
454 	/*
455 	 * Truncate or extend the file as necessary.  Clean out the contents
456 	 * of the file, we want it to be full of holes so we don't waste
457 	 * time reading in data from an old file that we no longer care
458 	 * about.
459 	 */
460 	ftruncate(fd, 0);
461 	ftruncate(fd, Maxmem_bytes);
462 
463 	MemImageFd = fd;
464 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
465 	physmem = Maxmem;
466 }
467 
468 /*
469  * Initialize kernel memory.  This reserves kernel virtual memory by using
470  * MAP_VPAGETABLE
471  */
472 
473 static
474 void
475 init_kern_memory(void)
476 {
477 	void *base;
478 	void *try;
479 	char dummy;
480 	char *topofstack = &dummy;
481 	int i;
482 	void *firstfree;
483 
484 	/*
485 	 * Memory map our kernel virtual memory space.  Note that the
486 	 * kernel image itself is not made part of this memory for the
487 	 * moment.
488 	 *
489 	 * The memory map must be segment-aligned so we can properly
490 	 * offset KernelPTD.
491 	 *
492 	 * If the system kernel has a different MAXDSIZ, it might not
493 	 * be possible to map kernel memory in its prefered location.
494 	 * Try a number of different locations.
495 	 */
496 	try = (void *)(512UL << 30);
497 	base = NULL;
498 	while ((char *)try + KERNEL_KVA_SIZE < topofstack) {
499 		base = mmap(try, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
500 			    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE,
501 			    MemImageFd, (off_t)try);
502 		if (base == try)
503 			break;
504 		if (base != MAP_FAILED)
505 			munmap(base, KERNEL_KVA_SIZE);
506 		try = (char *)try + (512UL << 30);
507 	}
508 	if (base != try) {
509 		err(1, "Unable to mmap() kernel virtual memory!");
510 		/* NOT REACHED */
511 	}
512 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
513 	KvaStart = (vm_offset_t)base;
514 	KvaSize = KERNEL_KVA_SIZE;
515 	KvaEnd = KvaStart + KvaSize;
516 
517 	/* cannot use kprintf yet */
518 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
519 
520 	/* MAP_FILE? */
521 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
522 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
523 				MemImageFd, 0);
524 	if (dmap_min_address == MAP_FAILED) {
525 		err(1, "Unable to mmap() kernel DMAP region!");
526 		/* NOT REACHED */
527 	}
528 
529 	firstfree = NULL;
530 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
531 
532 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
533 		 0 | VPTE_R | VPTE_W | VPTE_V);
534 
535 	/*
536 	 * phys_avail[] represents unallocated physical memory.  MI code
537 	 * will use phys_avail[] to create the vm_page array.
538 	 */
539 	phys_avail[0] = (vm_paddr_t)firstfree;
540 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
541 	phys_avail[1] = Maxmem_bytes;
542 
543 #if JGV
544 	/*
545 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
546 	 * memory.  MI code will create kernel_map using these parameters.
547 	 */
548 	virtual_start = KvaStart + (long)firstfree;
549 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
550 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
551 #endif
552 
553 	/*
554 	 * pmap_growkernel() will set the correct value.
555 	 */
556 	kernel_vm_end = 0;
557 
558 	/*
559 	 * Allocate space for process 0's UAREA.
560 	 */
561 	proc0paddr = (void *)virtual_start;
562 	for (i = 0; i < UPAGES; ++i) {
563 		pmap_kenter_quick(virtual_start, phys_avail[0]);
564 		virtual_start += PAGE_SIZE;
565 		phys_avail[0] += PAGE_SIZE;
566 	}
567 
568 	/*
569 	 * crashdumpmap
570 	 */
571 	crashdumpmap = virtual_start;
572 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
573 
574 	/*
575 	 * msgbufp maps the system message buffer
576 	 */
577 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
578 	msgbufp = (void *)virtual_start;
579 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
580 		pmap_kenter_quick(virtual_start, phys_avail[0]);
581 		virtual_start += PAGE_SIZE;
582 		phys_avail[0] += PAGE_SIZE;
583 	}
584 	msgbufinit(msgbufp, MSGBUF_SIZE);
585 
586 	/*
587 	 * used by kern_memio for /dev/mem access
588 	 */
589 	ptvmmap = (caddr_t)virtual_start;
590 	virtual_start += PAGE_SIZE;
591 
592 	/*
593 	 * Bootstrap the kernel_pmap
594 	 */
595 #if JGV
596 	pmap_bootstrap();
597 #endif
598 }
599 
600 /*
601  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
602  * virtual_start and phys_avail[0]
603  */
604 static
605 void
606 init_globaldata(void)
607 {
608 	int i;
609 	vm_paddr_t pa;
610 	vm_offset_t va;
611 
612 	/*
613 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
614 	 * amount of KVA since the privatespace structure includes two
615 	 * whole page table mappings.
616 	 */
617 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
618 	CPU_prvspace = (void *)virtual_start;
619 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
620 
621 	/*
622 	 * Allocate enough physical memory to cover the mdglobaldata
623 	 * portion of the space and the idle stack and map the pages
624 	 * into KVA.  For cpu #0 only.
625 	 */
626 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
627 		pa = phys_avail[0];
628 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
629 		pmap_kenter_quick(va, pa);
630 		phys_avail[0] += PAGE_SIZE;
631 	}
632 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
633 		pa = phys_avail[0];
634 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
635 		pmap_kenter_quick(va, pa);
636 		phys_avail[0] += PAGE_SIZE;
637 	}
638 
639 	/*
640 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
641 	 * point.  Note that %fs is used by pthreads.
642 	 */
643 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
644 }
645 
646 /*
647  * Initialize very low level systems including thread0, proc0, etc.
648  */
649 static
650 void
651 init_vkernel(void)
652 {
653 	struct mdglobaldata *gd;
654 
655 	gd = &CPU_prvspace[0].mdglobaldata;
656 	bzero(gd, sizeof(*gd));
657 
658 	gd->mi.gd_curthread = &thread0;
659 	thread0.td_gd = &gd->mi;
660 	ncpus = 1;
661 	ncpus2 = 1;	/* rounded down power of 2 */
662 	ncpus_fit = 1;	/* rounded up power of 2 */
663 	/* ncpus2_mask and ncpus_fit_mask are 0 */
664 	init_param1();
665 	gd->mi.gd_prvspace = &CPU_prvspace[0];
666 	mi_gdinit(&gd->mi, 0);
667 	cpu_gdinit(gd, 0);
668 	mi_proc0init(&gd->mi, proc0paddr);
669 	lwp0.lwp_md.md_regs = &proc0_tf;
670 
671 	/*init_locks();*/
672 #ifdef SMP
673 	/*
674 	 * Get the initial mplock with a count of 1 for the BSP.
675 	 * This uses a LOGICAL cpu ID, ie BSP == 0.
676 	 */
677 	cpu_get_initial_mplock();
678 #endif
679 	cninit();
680 	rand_initialize();
681 #if 0	/* #ifdef DDB */
682 	kdb_init();
683 	if (boothowto & RB_KDB)
684 		Debugger("Boot flags requested debugger");
685 #endif
686 	identcpu();
687 #if 0
688 	initializecpu();	/* Initialize CPU registers */
689 #endif
690 	init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
691 
692 #if 0
693 	/*
694 	 * Map the message buffer
695 	 */
696 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
697 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
698 	msgbufinit(msgbufp, MSGBUF_SIZE);
699 #endif
700 #if 0
701 	thread0.td_pcb_cr3 ... MMU
702 	lwp0.lwp_md.md_regs = &proc0_tf;
703 #endif
704 }
705 
706 /*
707  * Filesystem image paths for the virtual kernel are optional.
708  * If specified they each should point to a disk image,
709  * the first of which will become the root disk.
710  *
711  * The virtual kernel caches data from our 'disk' just like a normal kernel,
712  * so we do not really want the real kernel to cache the data too.  Use
713  * O_DIRECT to remove the duplication.
714  */
715 static
716 void
717 init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type)
718 {
719 	int i;
720 
721         if (diskFileNum == 0)
722                 return;
723 
724 	for(i=0; i < diskFileNum; i++){
725 		char *fname;
726 		fname = diskExp[i];
727 
728 		if (fname == NULL) {
729                         warnx("Invalid argument to '-r'");
730                         continue;
731                 }
732 
733 		if (DiskNum < VKDISK_MAX) {
734 			struct stat st;
735 			struct vkdisk_info* info = NULL;
736 			int fd;
737 			size_t l = 0;
738 
739 			if (type == VKD_DISK)
740 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
741 			else
742 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
743 			if (fd < 0 || fstat(fd, &st) < 0) {
744 				err(1, "Unable to open/create %s", fname);
745 				/* NOT REACHED */
746 			}
747 			if (S_ISREG(st.st_mode)) {
748 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
749 					errx(1, "Disk image %s is already "
750 						"in use\n", fname);
751 					/* NOT REACHED */
752 				}
753 			}
754 
755 			info = &DiskInfo[DiskNum];
756 			l = strlen(fname);
757 
758 			info->unit = i;
759 			info->fd = fd;
760 			info->type = type;
761 			memcpy(info->fname, fname, l);
762 
763 			if (DiskNum == 0) {
764 				if (type == VKD_CD) {
765 				    rootdevnames[0] = "cd9660:vcd0a";
766 				} else if (type == VKD_DISK) {
767 				    rootdevnames[0] = "ufs:vkd0s0a";
768 				    rootdevnames[1] = "ufs:vkd0s1a";
769 				}
770 			}
771 
772 			DiskNum++;
773 		} else {
774                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
775                         continue;
776 		}
777 	}
778 }
779 
780 static
781 int
782 netif_set_tapflags(int tap_unit, int f, int s)
783 {
784 	struct ifreq ifr;
785 	int flags;
786 
787 	bzero(&ifr, sizeof(ifr));
788 
789 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
790 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
791 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
792 		return -1;
793 	}
794 
795 	/*
796 	 * Adjust if_flags
797 	 *
798 	 * If the flags are already set/cleared, then we return
799 	 * immediately to avoid extra syscalls
800 	 */
801 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
802 	if (f < 0) {
803 		/* Turn off flags */
804 		f = -f;
805 		if ((flags & f) == 0)
806 			return 0;
807 		flags &= ~f;
808 	} else {
809 		/* Turn on flags */
810 		if (flags & f)
811 			return 0;
812 		flags |= f;
813 	}
814 
815 	/*
816 	 * Fix up ifreq.ifr_name, since it may be trashed
817 	 * in previous ioctl(SIOCGIFFLAGS)
818 	 */
819 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
820 
821 	ifr.ifr_flags = flags & 0xffff;
822 	ifr.ifr_flagshigh = flags >> 16;
823 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
824 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
825 		return -1;
826 	}
827 	return 0;
828 }
829 
830 static
831 int
832 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
833 {
834 	struct ifaliasreq ifra;
835 	struct sockaddr_in *in;
836 
837 	bzero(&ifra, sizeof(ifra));
838 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
839 
840 	/* Setup address */
841 	in = (struct sockaddr_in *)&ifra.ifra_addr;
842 	in->sin_family = AF_INET;
843 	in->sin_len = sizeof(*in);
844 	in->sin_addr.s_addr = addr;
845 
846 	if (mask != 0) {
847 		/* Setup netmask */
848 		in = (struct sockaddr_in *)&ifra.ifra_mask;
849 		in->sin_len = sizeof(*in);
850 		in->sin_addr.s_addr = mask;
851 	}
852 
853 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
854 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
855 		return -1;
856 	}
857 	return 0;
858 }
859 
860 static
861 int
862 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
863 {
864 	struct ifbreq ifbr;
865 	struct ifdrv ifd;
866 
867 	bzero(&ifbr, sizeof(ifbr));
868 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
869 		 "tap%d", tap_unit);
870 
871 	bzero(&ifd, sizeof(ifd));
872 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
873 	ifd.ifd_cmd = BRDGADD;
874 	ifd.ifd_len = sizeof(ifbr);
875 	ifd.ifd_data = &ifbr;
876 
877 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
878 		/*
879 		 * 'errno == EEXIST' means that the tap(4) is already
880 		 * a member of the bridge(4)
881 		 */
882 		if (errno != EEXIST) {
883 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
884 			return -1;
885 		}
886 	}
887 	return 0;
888 }
889 
890 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
891 
892 /*
893  * Locate the first unused tap(4) device file if auto mode is requested,
894  * or open the user supplied device file, and bring up the corresponding
895  * tap(4) interface.
896  *
897  * NOTE: Only tap(4) device file is supported currently
898  */
899 static
900 int
901 netif_open_tap(const char *netif, int *tap_unit, int s)
902 {
903 	char tap_dev[MAXPATHLEN];
904 	int tap_fd, failed;
905 	struct stat st;
906 	char *dname;
907 
908 	*tap_unit = -1;
909 
910 	if (strcmp(netif, "auto") == 0) {
911 		/*
912 		 * Find first unused tap(4) device file
913 		 */
914 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
915 		if (tap_fd < 0) {
916 			warnc(errno, "Unable to find a free tap(4)");
917 			return -1;
918 		}
919 	} else {
920 		/*
921 		 * User supplied tap(4) device file or unix socket.
922 		 */
923 		if (netif[0] == '/')	/* Absolute path */
924 			strlcpy(tap_dev, netif, sizeof(tap_dev));
925 		else
926 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
927 
928 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
929 
930 		/*
931 		 * If we cannot open normally try to connect to it.
932 		 */
933 		if (tap_fd < 0)
934 			tap_fd = unix_connect(tap_dev);
935 
936 		if (tap_fd < 0) {
937 			warn("Unable to open %s", tap_dev);
938 			return -1;
939 		}
940 	}
941 
942 	/*
943 	 * Check whether the device file is a tap(4)
944 	 */
945 	if (fstat(tap_fd, &st) < 0) {
946 		failed = 1;
947 	} else if (S_ISCHR(st.st_mode)) {
948 		dname = fdevname(tap_fd);
949 		if (dname)
950 			dname = strstr(dname, "tap");
951 		if (dname) {
952 			/*
953 			 * Bring up the corresponding tap(4) interface
954 			 */
955 			*tap_unit = strtol(dname + 3, NULL, 10);
956 			printf("TAP UNIT %d\n", *tap_unit);
957 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
958 				failed = 0;
959 			else
960 				failed = 1;
961 		} else {
962 			failed = 1;
963 		}
964 	} else if (S_ISSOCK(st.st_mode)) {
965 		/*
966 		 * Special socket connection (typically to vknet).  We
967 		 * do not have to do anything.
968 		 */
969 		failed = 0;
970 	} else {
971 		failed = 1;
972 	}
973 
974 	if (failed) {
975 		warnx("%s is not a tap(4) device or socket", tap_dev);
976 		close(tap_fd);
977 		tap_fd = -1;
978 		*tap_unit = -1;
979 	}
980 	return tap_fd;
981 }
982 
983 static int
984 unix_connect(const char *path)
985 {
986 	struct sockaddr_un sunx;
987 	int len;
988 	int net_fd;
989 	int sndbuf = 262144;
990 	struct stat st;
991 
992 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
993 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
994 	++len;	/* include nul */
995 	sunx.sun_family = AF_UNIX;
996 	sunx.sun_len = len;
997 
998 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
999 	if (net_fd < 0)
1000 		return(-1);
1001 	if (connect(net_fd, (void *)&sunx, len) < 0) {
1002 		close(net_fd);
1003 		return(-1);
1004 	}
1005 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1006 	if (fstat(net_fd, &st) == 0)
1007 		printf("Network socket buffer: %d bytes\n", st.st_blksize);
1008 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
1009 	return(net_fd);
1010 }
1011 
1012 #undef TAPDEV_MAJOR
1013 #undef TAPDEV_MINOR
1014 #undef TAPDEV_OFLAGS
1015 
1016 /*
1017  * Following syntax is supported,
1018  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
1019  *
1020  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
1021  *                        tap(4)'s netmask len is z
1022  *
1023  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
1024  *                        pseudo netif's address is y.y.y.y
1025  *
1026  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1027  *                        pseudo netif's address is y.y.y.y
1028  *                        tap(4) and pseudo netif's netmask len are z
1029  *
1030  * 5) bridgeX             tap(4) will be added to bridgeX
1031  *
1032  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1033  *                        pseudo netif's address is y.y.y.y
1034  *
1035  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1036  *                        pseudo netif's address is y.y.y.y
1037  *                        pseudo netif's netmask len is z
1038  */
1039 static
1040 int
1041 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1042 {
1043 	in_addr_t tap_addr, netmask, netif_addr;
1044 	int next_netif_addr;
1045 	char *tok, *masklen_str, *ifbridge;
1046 
1047 	*addr = 0;
1048 	*mask = 0;
1049 
1050 	tok = strtok(NULL, ":/");
1051 	if (tok == NULL) {
1052 		/*
1053 		 * Nothing special, simply use tap(4) as backend
1054 		 */
1055 		return 0;
1056 	}
1057 
1058 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1059 		/*
1060 		 * tap(4)'s address is supplied
1061 		 */
1062 		ifbridge = NULL;
1063 
1064 		/*
1065 		 * If there is next token, then it may be pseudo
1066 		 * netif's address or netmask len for tap(4)
1067 		 */
1068 		next_netif_addr = 0;
1069 	} else {
1070 		/*
1071 		 * Not tap(4)'s address, assume it as a bridge(4)
1072 		 * iface name
1073 		 */
1074 		tap_addr = 0;
1075 		ifbridge = tok;
1076 
1077 		/*
1078 		 * If there is next token, then it must be pseudo
1079 		 * netif's address
1080 		 */
1081 		next_netif_addr = 1;
1082 	}
1083 
1084 	netmask = netif_addr = 0;
1085 
1086 	tok = strtok(NULL, ":/");
1087 	if (tok == NULL)
1088 		goto back;
1089 
1090 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1091 		if (next_netif_addr) {
1092 			warnx("Invalid pseudo netif address: %s", tok);
1093 			return -1;
1094 		}
1095 		netif_addr = 0;
1096 
1097 		/*
1098 		 * Current token is not address, then it must be netmask len
1099 		 */
1100 		masklen_str = tok;
1101 	} else {
1102 		/*
1103 		 * Current token is pseudo netif address, if there is next token
1104 		 * it must be netmask len
1105 		 */
1106 		masklen_str = strtok(NULL, "/");
1107 	}
1108 
1109 	/* Calculate netmask */
1110 	if (masklen_str != NULL) {
1111 		u_long masklen;
1112 
1113 		masklen = strtoul(masklen_str, NULL, 10);
1114 		if (masklen < 32 && masklen > 0) {
1115 			netmask = htonl(~((1LL << (32 - masklen)) - 1)
1116 					& 0xffffffff);
1117 		} else {
1118 			warnx("Invalid netmask len: %lu", masklen);
1119 			return -1;
1120 		}
1121 	}
1122 
1123 	/* Make sure there is no more token left */
1124 	if (strtok(NULL, ":/") != NULL) {
1125 		warnx("Invalid argument to '-I'");
1126 		return -1;
1127 	}
1128 
1129 back:
1130 	if (tap_unit < 0) {
1131 		/* Do nothing */
1132 	} else if (ifbridge == NULL) {
1133 		/* Set tap(4) address/netmask */
1134 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1135 			return -1;
1136 	} else {
1137 		/* Tie tap(4) to bridge(4) */
1138 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1139 			return -1;
1140 	}
1141 
1142 	*addr = netif_addr;
1143 	*mask = netmask;
1144 	return 0;
1145 }
1146 
1147 /*
1148  * NetifInfo[] will be filled for pseudo netif initialization.
1149  * NetifNum will be bumped to reflect the number of valid entries
1150  * in NetifInfo[].
1151  */
1152 static
1153 void
1154 init_netif(char *netifExp[], int netifExpNum)
1155 {
1156 	int i, s;
1157 
1158 	if (netifExpNum == 0)
1159 		return;
1160 
1161 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1162 	if (s < 0)
1163 		return;
1164 
1165 	for (i = 0; i < netifExpNum; ++i) {
1166 		struct vknetif_info *info;
1167 		in_addr_t netif_addr, netif_mask;
1168 		int tap_fd, tap_unit;
1169 		char *netif;
1170 
1171 		netif = strtok(netifExp[i], ":");
1172 		if (netif == NULL) {
1173 			warnx("Invalid argument to '-I'");
1174 			continue;
1175 		}
1176 
1177 		/*
1178 		 * Open tap(4) device file and bring up the
1179 		 * corresponding interface
1180 		 */
1181 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1182 		if (tap_fd < 0)
1183 			continue;
1184 
1185 		/*
1186 		 * Initialize tap(4) and get address/netmask
1187 		 * for pseudo netif
1188 		 *
1189 		 * NB: Rest part of netifExp[i] is passed
1190 		 *     to netif_init_tap() implicitly.
1191 		 */
1192 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1193 			/*
1194 			 * NB: Closing tap(4) device file will bring
1195 			 *     down the corresponding interface
1196 			 */
1197 			close(tap_fd);
1198 			continue;
1199 		}
1200 
1201 		info = &NetifInfo[NetifNum];
1202 		info->tap_fd = tap_fd;
1203 		info->tap_unit = tap_unit;
1204 		info->netif_addr = netif_addr;
1205 		info->netif_mask = netif_mask;
1206 
1207 		NetifNum++;
1208 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1209 			break;
1210 	}
1211 	close(s);
1212 }
1213 
1214 /*
1215  * Create the pid file and leave it open and locked while the vkernel is
1216  * running.  This allows a script to use /usr/bin/lockf to probe whether
1217  * a vkernel is still running (so as not to accidently kill an unrelated
1218  * process from a stale pid file).
1219  */
1220 static
1221 void
1222 writepid(void)
1223 {
1224 	char buf[32];
1225 	int fd;
1226 
1227 	if (pid_file != NULL) {
1228 		snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1229 		fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1230 		if (fd < 0) {
1231 			if (errno == EWOULDBLOCK) {
1232 				perror("Failed to lock pidfile, "
1233 				       "vkernel already running");
1234 			} else {
1235 				perror("Failed to create pidfile");
1236 			}
1237 			exit(EX_SOFTWARE);
1238 		}
1239 		ftruncate(fd, 0);
1240 		write(fd, buf, strlen(buf));
1241 		/* leave the file open to maintain the lock */
1242 	}
1243 }
1244 
1245 static
1246 void
1247 cleanpid( void )
1248 {
1249 	if (pid_file != NULL) {
1250 		if (unlink(pid_file) < 0)
1251 			perror("Warning: couldn't remove pidfile");
1252 	}
1253 }
1254 
1255 static
1256 void
1257 usage_err(const char *ctl, ...)
1258 {
1259 	va_list va;
1260 
1261 	va_start(va, ctl);
1262 	vfprintf(stderr, ctl, va);
1263 	va_end(va);
1264 	fprintf(stderr, "\n");
1265 	exit(EX_USAGE);
1266 }
1267 
1268 static
1269 void
1270 usage_help(_Bool help)
1271 {
1272 	fprintf(stderr, "Usage: %s [-hsUv] [-c file] [-e name=value:name=value:...]\n"
1273 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1274 	    "\t[-m size] [-n numcpus] [-p file] [-r file]\n", save_av[0]);
1275 
1276 	if (help)
1277 		fprintf(stderr, "\nArguments:\n"
1278 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1279 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1280 		    "\t-h\tThis list of options.\n"
1281 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1282 		    "\t-I\tCreate a virtual network device.\n"
1283 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1284 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1285 		    "\t-n\tSpecify the number of CPUs you wish to emulate.\n"
1286 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1287 		    "\t-r\tSpecify a R/W disk image file to be used by the kernel.\n"
1288 		    "\t-s\tBoot into single-user mode.\n"
1289 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1290 		    "\t-v\tTurn on verbose booting.\n");
1291 
1292 	exit(EX_USAGE);
1293 }
1294 
1295 void
1296 cpu_reset(void)
1297 {
1298 	kprintf("cpu reset, rebooting vkernel\n");
1299 	closefrom(3);
1300 	cleanpid();
1301 	execv(save_av[0], save_av);
1302 }
1303 
1304 void
1305 cpu_halt(void)
1306 {
1307 	kprintf("cpu halt, exiting vkernel\n");
1308 	cleanpid();
1309 	exit(EX_OK);
1310 }
1311 
1312 void
1313 setrealcpu(void)
1314 {
1315 	switch(lwp_cpu_lock) {
1316 	case LCL_PER_CPU:
1317 		if (bootverbose)
1318 			kprintf("Locking CPU%d to real cpu %d\n",
1319 				mycpuid, next_cpu);
1320 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1321 		next_cpu++;
1322 		if (next_cpu >= real_ncpus)
1323 			next_cpu = 0;
1324 		break;
1325 	case LCL_SINGLE_CPU:
1326 		if (bootverbose)
1327 			kprintf("Locking CPU%d to real cpu %d\n",
1328 				mycpuid, next_cpu);
1329 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1330 		break;
1331 	default:
1332 		/* do not map virtual cpus to real cpus */
1333 		break;
1334 	}
1335 }
1336 
1337 /*
1338  * Allocate and free memory for module loading.  The loaded module
1339  * has to be placed somewhere near the current kernel binary load
1340  * point or the relocations will not work.
1341  *
1342  * I'm not sure why this isn't working.
1343  */
1344 int
1345 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1346 {
1347 	kprintf("module loading for vkernel64's not currently supported\n");
1348 	*basep = 0;
1349 	return ENOMEM;
1350 #if 0
1351 #if 1
1352 	size_t xtra;
1353 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1354 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1355 	bzero((void *)*basep, bytes);
1356 #else
1357 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1358 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1359 				   MAP_ANON|MAP_SHARED, -1, 0);
1360 	if ((void *)*basep == MAP_FAILED)
1361 		return ENOMEM;
1362 #endif
1363 	kprintf("basep %p %p %zd\n",
1364 		(void *)vkernel_module_memory_alloc, (void *)*basep, bytes);
1365 	return 0;
1366 #endif
1367 }
1368 
1369 void
1370 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1371 {
1372 #if 0
1373 #if 0
1374 	munmap((void *)base, bytes);
1375 #endif
1376 #endif
1377 }
1378