1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 
56 #include <machine/cpu.h>
57 #include <machine/globaldata.h>
58 #include <machine/tls.h>
59 #include <machine/md_var.h>
60 #include <machine/vmparam.h>
61 #include <cpu/specialreg.h>
62 
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/bridge/if_bridgevar.h>
67 #include <netinet/in.h>
68 #include <arpa/inet.h>
69 
70 #include <stdio.h>
71 #include <stdlib.h>
72 #include <stdarg.h>
73 #include <stdbool.h>
74 #include <unistd.h>
75 #include <fcntl.h>
76 #include <string.h>
77 #include <err.h>
78 #include <errno.h>
79 #include <assert.h>
80 #include <sysexits.h>
81 
82 vm_paddr_t phys_avail[16];
83 vm_paddr_t Maxmem;
84 vm_paddr_t Maxmem_bytes;
85 long physmem;
86 int MemImageFd = -1;
87 struct vkdisk_info DiskInfo[VKDISK_MAX];
88 int DiskNum;
89 struct vknetif_info NetifInfo[VKNETIF_MAX];
90 int NetifNum;
91 char *pid_file;
92 vm_offset_t KvaStart;
93 vm_offset_t KvaEnd;
94 vm_offset_t KvaSize;
95 vm_offset_t virtual_start;
96 vm_offset_t virtual_end;
97 vm_offset_t virtual2_start;
98 vm_offset_t virtual2_end;
99 vm_offset_t kernel_vm_end;
100 vm_offset_t crashdumpmap;
101 vm_offset_t clean_sva;
102 vm_offset_t clean_eva;
103 struct msgbuf *msgbufp;
104 caddr_t ptvmmap;
105 vpte_t	*KernelPTD;
106 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
107 void *dmap_min_address;
108 u_int cpu_feature;	/* XXX */
109 int tsc_present;
110 int64_t tsc_frequency;
111 int optcpus;		/* number of cpus - see mp_start() */
112 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
113 int real_ncpus;		/* number of real CPUs */
114 int next_cpu;		/* next real CPU to lock a virtual CPU to */
115 
116 struct privatespace *CPU_prvspace;
117 
118 static struct trapframe proc0_tf;
119 static void *proc0paddr;
120 
121 static void init_sys_memory(char *imageFile);
122 static void init_kern_memory(void);
123 static void init_globaldata(void);
124 static void init_vkernel(void);
125 static void init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type);
126 static void init_netif(char *netifExp[], int netifFileNum);
127 static void writepid(void);
128 static void cleanpid(void);
129 static int unix_connect(const char *path);
130 static void usage_err(const char *ctl, ...);
131 static void usage_help(_Bool);
132 
133 static int save_ac;
134 static char **save_av;
135 
136 /*
137  * Kernel startup for virtual kernels - standard main()
138  */
139 int
140 main(int ac, char **av)
141 {
142 	char *memImageFile = NULL;
143 	char *netifFile[VKNETIF_MAX];
144 	char *diskFile[VKDISK_MAX];
145 	char *cdFile[VKDISK_MAX];
146 	char *suffix;
147 	char *endp;
148 	char *tmp;
149 	int netifFileNum = 0;
150 	int diskFileNum = 0;
151 	int cdFileNum = 0;
152 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
153 	int c;
154 	int i;
155 	int j;
156 	int n;
157 	int isq;
158 	int pos;
159 	int eflag;
160 	int real_vkernel_enable;
161 	int supports_sse;
162 	size_t vsize;
163 	size_t kenv_size;
164 	size_t kenv_size2;
165 
166 	save_ac = ac;
167 	save_av = av;
168 	eflag = 0;
169 	pos = 0;
170 	kenv_size = 0;
171 
172 	/*
173 	 * Process options
174 	 */
175 	kernel_mem_readonly = 1;
176 #ifdef SMP
177 	optcpus = 2;
178 #endif
179 	lwp_cpu_lock = LCL_NONE;
180 
181 	real_vkernel_enable = 0;
182 	vsize = sizeof(real_vkernel_enable);
183 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
184 
185 	if (real_vkernel_enable == 0) {
186 		errx(1, "vm.vkernel_enable is 0, must be set "
187 			"to 1 to execute a vkernel!");
188 	}
189 
190 	real_ncpus = 1;
191 	vsize = sizeof(real_ncpus);
192 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
193 
194 	if (ac < 2)
195 		usage_help(false);
196 
197 	while ((c = getopt(ac, av, "c:hsvl:m:n:r:e:i:p:I:U")) != -1) {
198 		switch(c) {
199 		case 'e':
200 			/*
201 			 * name=value:name=value:name=value...
202 			 * name="value"...
203 			 *
204 			 * Allow values to be quoted but note that shells
205 			 * may remove the quotes, so using this feature
206 			 * to embed colons may require a backslash.
207 			 */
208 			n = strlen(optarg);
209 			isq = 0;
210 
211 			if (eflag == 0) {
212 				kenv_size = n + 2;
213 				kern_envp = malloc(kenv_size);
214 				if (kern_envp == NULL)
215 					errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
216 			} else {
217 				kenv_size2 = kenv_size + n + 1;
218 				pos = kenv_size - 1;
219 				if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
220 					errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
221 				kern_envp = tmp;
222 				kenv_size = kenv_size2;
223 			}
224 
225 			for (i = 0, j = pos; i < n; ++i) {
226 				if (optarg[i] == '"')
227 					isq ^= 1;
228 				else if (optarg[i] == '\'')
229 					isq ^= 2;
230 				else if (isq == 0 && optarg[i] == ':')
231 					kern_envp[j++] = 0;
232 				else
233 					kern_envp[j++] = optarg[i];
234 			}
235 			kern_envp[j++] = 0;
236 			kern_envp[j++] = 0;
237 			eflag++;
238 			break;
239 		case 's':
240 			boothowto |= RB_SINGLE;
241 			break;
242 		case 'v':
243 			bootverbose = 1;
244 			break;
245 		case 'i':
246 			memImageFile = optarg;
247 			break;
248 		case 'I':
249 			if (netifFileNum < VKNETIF_MAX)
250 				netifFile[netifFileNum++] = strdup(optarg);
251 			break;
252 		case 'r':
253 			if (bootOnDisk < 0)
254 				bootOnDisk = 1;
255 			if (diskFileNum + cdFileNum < VKDISK_MAX)
256 				diskFile[diskFileNum++] = strdup(optarg);
257 			break;
258 		case 'c':
259 			if (bootOnDisk < 0)
260 				bootOnDisk = 0;
261 			if (diskFileNum + cdFileNum < VKDISK_MAX)
262 				cdFile[cdFileNum++] = strdup(optarg);
263 			break;
264 		case 'm':
265 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
266 			if (suffix) {
267 				switch(*suffix) {
268 				case 'g':
269 				case 'G':
270 					Maxmem_bytes <<= 30;
271 					break;
272 				case 'm':
273 				case 'M':
274 					Maxmem_bytes <<= 20;
275 					break;
276 				case 'k':
277 				case 'K':
278 					Maxmem_bytes <<= 10;
279 					break;
280 				default:
281 					Maxmem_bytes = 0;
282 					usage_err("Bad maxmem option");
283 					/* NOT REACHED */
284 					break;
285 				}
286 			}
287 			break;
288 		case 'l':
289 			next_cpu = -1;
290 			if (strncmp("map", optarg, 3) == 0) {
291 				lwp_cpu_lock = LCL_PER_CPU;
292 				if (optarg[3] == ',') {
293 					next_cpu = strtol(optarg+4, &endp, 0);
294 					if (*endp != '\0')
295 						usage_err("Bad target CPU number at '%s'", endp);
296 				} else {
297 					next_cpu = 0;
298 				}
299 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
300 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
301 			} else if (strncmp("any", optarg, 3) == 0) {
302 				lwp_cpu_lock = LCL_NONE;
303 			} else {
304 				lwp_cpu_lock = LCL_SINGLE_CPU;
305 				next_cpu = strtol(optarg, &endp, 0);
306 				if (*endp != '\0')
307 					usage_err("Bad target CPU number at '%s'", endp);
308 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
309 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
310 			}
311 			break;
312 		case 'n':
313 			/*
314 			 * This value is set up by mp_start(), don't just
315 			 * set ncpus here.
316 			 */
317 #ifdef SMP
318 			optcpus = strtol(optarg, NULL, 0);
319 			if (optcpus < 1 || optcpus > MAXCPU)
320 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
321 #else
322 			if (strtol(optarg, NULL, 0) != 1) {
323 				usage_err("You built a UP vkernel, only 1 cpu!");
324 			}
325 #endif
326 
327 			break;
328 		case 'p':
329 			pid_file = optarg;
330 			break;
331 		case 'U':
332 			kernel_mem_readonly = 0;
333 			break;
334 		case 'h':
335 			usage_help(true);
336 			break;
337 		default:
338 			usage_help(false);
339 		}
340 	}
341 
342 	writepid();
343 	cpu_disable_intr();
344 	init_sys_memory(memImageFile);
345 	init_kern_memory();
346 	init_globaldata();
347 	init_vkernel();
348 	setrealcpu();
349 	init_kqueue();
350 
351 	/*
352 	 * Check TSC
353 	 */
354 	vsize = sizeof(tsc_present);
355 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
356 	vsize = sizeof(tsc_frequency);
357 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
358 	if (tsc_present)
359 		cpu_feature |= CPUID_TSC;
360 
361 	/*
362 	 * Check SSE
363 	 */
364 	vsize = sizeof(supports_sse);
365 	supports_sse = 0;
366 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
367 	init_fpu(supports_sse);
368 	if (supports_sse)
369 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
370 
371 	/*
372 	 * We boot from the first installed disk.
373 	 */
374 	if (bootOnDisk == 1) {
375 		init_disk(diskFile, diskFileNum, VKD_DISK);
376 		init_disk(cdFile, cdFileNum, VKD_CD);
377 	} else {
378 		init_disk(cdFile, cdFileNum, VKD_CD);
379 		init_disk(diskFile, diskFileNum, VKD_DISK);
380 	}
381 	init_netif(netifFile, netifFileNum);
382 	init_exceptions();
383 	mi_startup();
384 	/* NOT REACHED */
385 	exit(EX_SOFTWARE);
386 }
387 
388 /*
389  * Initialize system memory.  This is the virtual kernel's 'RAM'.
390  */
391 static
392 void
393 init_sys_memory(char *imageFile)
394 {
395 	struct stat st;
396 	int i;
397 	int fd;
398 
399 	/*
400 	 * Figure out the system memory image size.  If an image file was
401 	 * specified and -m was not specified, use the image file's size.
402 	 */
403 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
404 		Maxmem_bytes = (vm_paddr_t)st.st_size;
405 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
406 	    Maxmem_bytes == 0) {
407 		errx(1, "Cannot create new memory file %s unless "
408 		       "system memory size is specified with -m",
409 		       imageFile);
410 		/* NOT REACHED */
411 	}
412 
413 	/*
414 	 * Maxmem must be known at this time
415 	 */
416 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
417 		errx(1, "Bad maxmem specification: 64MB minimum, "
418 		       "multiples of %dMB only",
419 		       SEG_SIZE / 1024 / 1024);
420 		/* NOT REACHED */
421 	}
422 
423 	/*
424 	 * Generate an image file name if necessary, then open/create the
425 	 * file exclusively locked.  Do not allow multiple virtual kernels
426 	 * to use the same image file.
427 	 *
428 	 * Don't iterate through a million files if we do not have write
429 	 * access to the directory, stop if our open() failed on a
430 	 * non-existant file.  Otherwise opens can fail for any number
431 	 */
432 	if (imageFile == NULL) {
433 		for (i = 0; i < 1000000; ++i) {
434 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
435 			fd = open(imageFile,
436 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
437 			if (fd < 0 && stat(imageFile, &st) == 0) {
438 				free(imageFile);
439 				continue;
440 			}
441 			break;
442 		}
443 	} else {
444 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
445 	}
446 	fprintf(stderr, "Using memory file: %s\n", imageFile);
447 	if (fd < 0 || fstat(fd, &st) < 0) {
448 		err(1, "Unable to open/create %s", imageFile);
449 		/* NOT REACHED */
450 	}
451 
452 	/*
453 	 * Truncate or extend the file as necessary.  Clean out the contents
454 	 * of the file, we want it to be full of holes so we don't waste
455 	 * time reading in data from an old file that we no longer care
456 	 * about.
457 	 */
458 	ftruncate(fd, 0);
459 	ftruncate(fd, Maxmem_bytes);
460 
461 	MemImageFd = fd;
462 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
463 	physmem = Maxmem;
464 }
465 
466 /*
467  * Initialize kernel memory.  This reserves kernel virtual memory by using
468  * MAP_VPAGETABLE
469  */
470 
471 static
472 void
473 init_kern_memory(void)
474 {
475 	void *base;
476 	void *try;
477 	char dummy;
478 	char *topofstack = &dummy;
479 	int i;
480 	void *firstfree;
481 
482 	/*
483 	 * Memory map our kernel virtual memory space.  Note that the
484 	 * kernel image itself is not made part of this memory for the
485 	 * moment.
486 	 *
487 	 * The memory map must be segment-aligned so we can properly
488 	 * offset KernelPTD.
489 	 *
490 	 * If the system kernel has a different MAXDSIZ, it might not
491 	 * be possible to map kernel memory in its prefered location.
492 	 * Try a number of different locations.
493 	 */
494 	try = (void *)(512UL << 30);
495 	base = NULL;
496 	while ((char *)try + KERNEL_KVA_SIZE < topofstack) {
497 		base = mmap(try, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
498 			    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE,
499 			    MemImageFd, (off_t)try);
500 		if (base == try)
501 			break;
502 		if (base != MAP_FAILED)
503 			munmap(base, KERNEL_KVA_SIZE);
504 		try = (char *)try + (512UL << 30);
505 	}
506 	if (base != try) {
507 		err(1, "Unable to mmap() kernel virtual memory!");
508 		/* NOT REACHED */
509 	}
510 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
511 	KvaStart = (vm_offset_t)base;
512 	KvaSize = KERNEL_KVA_SIZE;
513 	KvaEnd = KvaStart + KvaSize;
514 
515 	/* cannot use kprintf yet */
516 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
517 
518 	/* MAP_FILE? */
519 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
520 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
521 				MemImageFd, 0);
522 	if (dmap_min_address == MAP_FAILED) {
523 		err(1, "Unable to mmap() kernel DMAP region!");
524 		/* NOT REACHED */
525 	}
526 
527 	firstfree = NULL;
528 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
529 
530 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
531 		 0 | VPTE_R | VPTE_W | VPTE_V);
532 
533 	/*
534 	 * phys_avail[] represents unallocated physical memory.  MI code
535 	 * will use phys_avail[] to create the vm_page array.
536 	 */
537 	phys_avail[0] = (vm_paddr_t)firstfree;
538 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
539 	phys_avail[1] = Maxmem_bytes;
540 
541 #if JGV
542 	/*
543 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
544 	 * memory.  MI code will create kernel_map using these parameters.
545 	 */
546 	virtual_start = KvaStart + (long)firstfree;
547 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
548 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
549 #endif
550 
551 	/*
552 	 * pmap_growkernel() will set the correct value.
553 	 */
554 	kernel_vm_end = 0;
555 
556 	/*
557 	 * Allocate space for process 0's UAREA.
558 	 */
559 	proc0paddr = (void *)virtual_start;
560 	for (i = 0; i < UPAGES; ++i) {
561 		pmap_kenter_quick(virtual_start, phys_avail[0]);
562 		virtual_start += PAGE_SIZE;
563 		phys_avail[0] += PAGE_SIZE;
564 	}
565 
566 	/*
567 	 * crashdumpmap
568 	 */
569 	crashdumpmap = virtual_start;
570 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
571 
572 	/*
573 	 * msgbufp maps the system message buffer
574 	 */
575 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
576 	msgbufp = (void *)virtual_start;
577 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
578 		pmap_kenter_quick(virtual_start, phys_avail[0]);
579 		virtual_start += PAGE_SIZE;
580 		phys_avail[0] += PAGE_SIZE;
581 	}
582 	msgbufinit(msgbufp, MSGBUF_SIZE);
583 
584 	/*
585 	 * used by kern_memio for /dev/mem access
586 	 */
587 	ptvmmap = (caddr_t)virtual_start;
588 	virtual_start += PAGE_SIZE;
589 
590 	/*
591 	 * Bootstrap the kernel_pmap
592 	 */
593 #if JGV
594 	pmap_bootstrap();
595 #endif
596 }
597 
598 /*
599  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
600  * virtual_start and phys_avail[0]
601  */
602 static
603 void
604 init_globaldata(void)
605 {
606 	int i;
607 	vm_paddr_t pa;
608 	vm_offset_t va;
609 
610 	/*
611 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
612 	 * amount of KVA since the privatespace structure includes two
613 	 * whole page table mappings.
614 	 */
615 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
616 	CPU_prvspace = (void *)virtual_start;
617 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
618 
619 	/*
620 	 * Allocate enough physical memory to cover the mdglobaldata
621 	 * portion of the space and the idle stack and map the pages
622 	 * into KVA.  For cpu #0 only.
623 	 */
624 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
625 		pa = phys_avail[0];
626 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
627 		pmap_kenter_quick(va, pa);
628 		phys_avail[0] += PAGE_SIZE;
629 	}
630 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
631 		pa = phys_avail[0];
632 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
633 		pmap_kenter_quick(va, pa);
634 		phys_avail[0] += PAGE_SIZE;
635 	}
636 
637 	/*
638 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
639 	 * point.  Note that %fs is used by pthreads.
640 	 */
641 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
642 }
643 
644 /*
645  * Initialize very low level systems including thread0, proc0, etc.
646  */
647 static
648 void
649 init_vkernel(void)
650 {
651 	struct mdglobaldata *gd;
652 
653 	gd = &CPU_prvspace[0].mdglobaldata;
654 	bzero(gd, sizeof(*gd));
655 
656 	gd->mi.gd_curthread = &thread0;
657 	thread0.td_gd = &gd->mi;
658 	ncpus = 1;
659 	ncpus2 = 1;	/* rounded down power of 2 */
660 	ncpus_fit = 1;	/* rounded up power of 2 */
661 	/* ncpus2_mask and ncpus_fit_mask are 0 */
662 	init_param1();
663 	gd->mi.gd_prvspace = &CPU_prvspace[0];
664 	mi_gdinit(&gd->mi, 0);
665 	cpu_gdinit(gd, 0);
666 	mi_proc0init(&gd->mi, proc0paddr);
667 	lwp0.lwp_md.md_regs = &proc0_tf;
668 
669 	/*init_locks();*/
670 #ifdef SMP
671 	/*
672 	 * Get the initial mplock with a count of 1 for the BSP.
673 	 * This uses a LOGICAL cpu ID, ie BSP == 0.
674 	 */
675 	cpu_get_initial_mplock();
676 #endif
677 	cninit();
678 	rand_initialize();
679 #if 0	/* #ifdef DDB */
680 	kdb_init();
681 	if (boothowto & RB_KDB)
682 		Debugger("Boot flags requested debugger");
683 #endif
684 	identcpu();
685 #if 0
686 	initializecpu();	/* Initialize CPU registers */
687 #endif
688 	init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
689 
690 #if 0
691 	/*
692 	 * Map the message buffer
693 	 */
694 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
695 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
696 	msgbufinit(msgbufp, MSGBUF_SIZE);
697 #endif
698 #if 0
699 	thread0.td_pcb_cr3 ... MMU
700 	lwp0.lwp_md.md_regs = &proc0_tf;
701 #endif
702 }
703 
704 /*
705  * Filesystem image paths for the virtual kernel are optional.
706  * If specified they each should point to a disk image,
707  * the first of which will become the root disk.
708  *
709  * The virtual kernel caches data from our 'disk' just like a normal kernel,
710  * so we do not really want the real kernel to cache the data too.  Use
711  * O_DIRECT to remove the duplication.
712  */
713 static
714 void
715 init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type)
716 {
717 	int i;
718 
719         if (diskFileNum == 0)
720                 return;
721 
722 	for(i=0; i < diskFileNum; i++){
723 		char *fname;
724 		fname = diskExp[i];
725 
726 		if (fname == NULL) {
727                         warnx("Invalid argument to '-r'");
728                         continue;
729                 }
730 
731 		if (DiskNum < VKDISK_MAX) {
732 			struct stat st;
733 			struct vkdisk_info* info = NULL;
734 			int fd;
735 			size_t l = 0;
736 
737 			if (type == VKD_DISK)
738 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
739 			else
740 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
741 			if (fd < 0 || fstat(fd, &st) < 0) {
742 				err(1, "Unable to open/create %s", fname);
743 				/* NOT REACHED */
744 			}
745 			if (S_ISREG(st.st_mode)) {
746 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
747 					errx(1, "Disk image %s is already "
748 						"in use\n", fname);
749 					/* NOT REACHED */
750 				}
751 			}
752 
753 			info = &DiskInfo[DiskNum];
754 			l = strlen(fname);
755 
756 			info->unit = i;
757 			info->fd = fd;
758 			info->type = type;
759 			memcpy(info->fname, fname, l);
760 
761 			if (DiskNum == 0) {
762 				if (type == VKD_CD) {
763 				    rootdevnames[0] = "cd9660:vcd0a";
764 				} else if (type == VKD_DISK) {
765 				    rootdevnames[0] = "ufs:vkd0s0a";
766 				    rootdevnames[1] = "ufs:vkd0s1a";
767 				}
768 			}
769 
770 			DiskNum++;
771 		} else {
772                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
773                         continue;
774 		}
775 	}
776 }
777 
778 static
779 int
780 netif_set_tapflags(int tap_unit, int f, int s)
781 {
782 	struct ifreq ifr;
783 	int flags;
784 
785 	bzero(&ifr, sizeof(ifr));
786 
787 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
788 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
789 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
790 		return -1;
791 	}
792 
793 	/*
794 	 * Adjust if_flags
795 	 *
796 	 * If the flags are already set/cleared, then we return
797 	 * immediately to avoid extra syscalls
798 	 */
799 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
800 	if (f < 0) {
801 		/* Turn off flags */
802 		f = -f;
803 		if ((flags & f) == 0)
804 			return 0;
805 		flags &= ~f;
806 	} else {
807 		/* Turn on flags */
808 		if (flags & f)
809 			return 0;
810 		flags |= f;
811 	}
812 
813 	/*
814 	 * Fix up ifreq.ifr_name, since it may be trashed
815 	 * in previous ioctl(SIOCGIFFLAGS)
816 	 */
817 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
818 
819 	ifr.ifr_flags = flags & 0xffff;
820 	ifr.ifr_flagshigh = flags >> 16;
821 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
822 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
823 		return -1;
824 	}
825 	return 0;
826 }
827 
828 static
829 int
830 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
831 {
832 	struct ifaliasreq ifra;
833 	struct sockaddr_in *in;
834 
835 	bzero(&ifra, sizeof(ifra));
836 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
837 
838 	/* Setup address */
839 	in = (struct sockaddr_in *)&ifra.ifra_addr;
840 	in->sin_family = AF_INET;
841 	in->sin_len = sizeof(*in);
842 	in->sin_addr.s_addr = addr;
843 
844 	if (mask != 0) {
845 		/* Setup netmask */
846 		in = (struct sockaddr_in *)&ifra.ifra_mask;
847 		in->sin_len = sizeof(*in);
848 		in->sin_addr.s_addr = mask;
849 	}
850 
851 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
852 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
853 		return -1;
854 	}
855 	return 0;
856 }
857 
858 static
859 int
860 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
861 {
862 	struct ifbreq ifbr;
863 	struct ifdrv ifd;
864 
865 	bzero(&ifbr, sizeof(ifbr));
866 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
867 		 "tap%d", tap_unit);
868 
869 	bzero(&ifd, sizeof(ifd));
870 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
871 	ifd.ifd_cmd = BRDGADD;
872 	ifd.ifd_len = sizeof(ifbr);
873 	ifd.ifd_data = &ifbr;
874 
875 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
876 		/*
877 		 * 'errno == EEXIST' means that the tap(4) is already
878 		 * a member of the bridge(4)
879 		 */
880 		if (errno != EEXIST) {
881 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
882 			return -1;
883 		}
884 	}
885 	return 0;
886 }
887 
888 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
889 
890 /*
891  * Locate the first unused tap(4) device file if auto mode is requested,
892  * or open the user supplied device file, and bring up the corresponding
893  * tap(4) interface.
894  *
895  * NOTE: Only tap(4) device file is supported currently
896  */
897 static
898 int
899 netif_open_tap(const char *netif, int *tap_unit, int s)
900 {
901 	char tap_dev[MAXPATHLEN];
902 	int tap_fd, failed;
903 	struct stat st;
904 	char *dname;
905 
906 	*tap_unit = -1;
907 
908 	if (strcmp(netif, "auto") == 0) {
909 		/*
910 		 * Find first unused tap(4) device file
911 		 */
912 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
913 		if (tap_fd < 0) {
914 			warnc(errno, "Unable to find a free tap(4)");
915 			return -1;
916 		}
917 	} else {
918 		/*
919 		 * User supplied tap(4) device file or unix socket.
920 		 */
921 		if (netif[0] == '/')	/* Absolute path */
922 			strlcpy(tap_dev, netif, sizeof(tap_dev));
923 		else
924 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
925 
926 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
927 
928 		/*
929 		 * If we cannot open normally try to connect to it.
930 		 */
931 		if (tap_fd < 0)
932 			tap_fd = unix_connect(tap_dev);
933 
934 		if (tap_fd < 0) {
935 			warn("Unable to open %s", tap_dev);
936 			return -1;
937 		}
938 	}
939 
940 	/*
941 	 * Check whether the device file is a tap(4)
942 	 */
943 	if (fstat(tap_fd, &st) < 0) {
944 		failed = 1;
945 	} else if (S_ISCHR(st.st_mode)) {
946 		dname = fdevname(tap_fd);
947 		if (dname)
948 			dname = strstr(dname, "tap");
949 		if (dname) {
950 			/*
951 			 * Bring up the corresponding tap(4) interface
952 			 */
953 			*tap_unit = strtol(dname + 3, NULL, 10);
954 			printf("TAP UNIT %d\n", *tap_unit);
955 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
956 				failed = 0;
957 			else
958 				failed = 1;
959 		} else {
960 			failed = 1;
961 		}
962 	} else if (S_ISSOCK(st.st_mode)) {
963 		/*
964 		 * Special socket connection (typically to vknet).  We
965 		 * do not have to do anything.
966 		 */
967 		failed = 0;
968 	} else {
969 		failed = 1;
970 	}
971 
972 	if (failed) {
973 		warnx("%s is not a tap(4) device or socket", tap_dev);
974 		close(tap_fd);
975 		tap_fd = -1;
976 		*tap_unit = -1;
977 	}
978 	return tap_fd;
979 }
980 
981 static int
982 unix_connect(const char *path)
983 {
984 	struct sockaddr_un sunx;
985 	int len;
986 	int net_fd;
987 	int sndbuf = 262144;
988 	struct stat st;
989 
990 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
991 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
992 	++len;	/* include nul */
993 	sunx.sun_family = AF_UNIX;
994 	sunx.sun_len = len;
995 
996 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
997 	if (net_fd < 0)
998 		return(-1);
999 	if (connect(net_fd, (void *)&sunx, len) < 0) {
1000 		close(net_fd);
1001 		return(-1);
1002 	}
1003 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1004 	if (fstat(net_fd, &st) == 0)
1005 		printf("Network socket buffer: %d bytes\n", st.st_blksize);
1006 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
1007 	return(net_fd);
1008 }
1009 
1010 #undef TAPDEV_MAJOR
1011 #undef TAPDEV_MINOR
1012 #undef TAPDEV_OFLAGS
1013 
1014 /*
1015  * Following syntax is supported,
1016  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
1017  *
1018  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
1019  *                        tap(4)'s netmask len is z
1020  *
1021  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
1022  *                        pseudo netif's address is y.y.y.y
1023  *
1024  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1025  *                        pseudo netif's address is y.y.y.y
1026  *                        tap(4) and pseudo netif's netmask len are z
1027  *
1028  * 5) bridgeX             tap(4) will be added to bridgeX
1029  *
1030  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1031  *                        pseudo netif's address is y.y.y.y
1032  *
1033  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1034  *                        pseudo netif's address is y.y.y.y
1035  *                        pseudo netif's netmask len is z
1036  */
1037 static
1038 int
1039 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1040 {
1041 	in_addr_t tap_addr, netmask, netif_addr;
1042 	int next_netif_addr;
1043 	char *tok, *masklen_str, *ifbridge;
1044 
1045 	*addr = 0;
1046 	*mask = 0;
1047 
1048 	tok = strtok(NULL, ":/");
1049 	if (tok == NULL) {
1050 		/*
1051 		 * Nothing special, simply use tap(4) as backend
1052 		 */
1053 		return 0;
1054 	}
1055 
1056 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1057 		/*
1058 		 * tap(4)'s address is supplied
1059 		 */
1060 		ifbridge = NULL;
1061 
1062 		/*
1063 		 * If there is next token, then it may be pseudo
1064 		 * netif's address or netmask len for tap(4)
1065 		 */
1066 		next_netif_addr = 0;
1067 	} else {
1068 		/*
1069 		 * Not tap(4)'s address, assume it as a bridge(4)
1070 		 * iface name
1071 		 */
1072 		tap_addr = 0;
1073 		ifbridge = tok;
1074 
1075 		/*
1076 		 * If there is next token, then it must be pseudo
1077 		 * netif's address
1078 		 */
1079 		next_netif_addr = 1;
1080 	}
1081 
1082 	netmask = netif_addr = 0;
1083 
1084 	tok = strtok(NULL, ":/");
1085 	if (tok == NULL)
1086 		goto back;
1087 
1088 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1089 		if (next_netif_addr) {
1090 			warnx("Invalid pseudo netif address: %s", tok);
1091 			return -1;
1092 		}
1093 		netif_addr = 0;
1094 
1095 		/*
1096 		 * Current token is not address, then it must be netmask len
1097 		 */
1098 		masklen_str = tok;
1099 	} else {
1100 		/*
1101 		 * Current token is pseudo netif address, if there is next token
1102 		 * it must be netmask len
1103 		 */
1104 		masklen_str = strtok(NULL, "/");
1105 	}
1106 
1107 	/* Calculate netmask */
1108 	if (masklen_str != NULL) {
1109 		u_long masklen;
1110 
1111 		masklen = strtoul(masklen_str, NULL, 10);
1112 		if (masklen < 32 && masklen > 0) {
1113 			netmask = htonl(~((1LL << (32 - masklen)) - 1)
1114 					& 0xffffffff);
1115 		} else {
1116 			warnx("Invalid netmask len: %lu", masklen);
1117 			return -1;
1118 		}
1119 	}
1120 
1121 	/* Make sure there is no more token left */
1122 	if (strtok(NULL, ":/") != NULL) {
1123 		warnx("Invalid argument to '-I'");
1124 		return -1;
1125 	}
1126 
1127 back:
1128 	if (tap_unit < 0) {
1129 		/* Do nothing */
1130 	} else if (ifbridge == NULL) {
1131 		/* Set tap(4) address/netmask */
1132 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1133 			return -1;
1134 	} else {
1135 		/* Tie tap(4) to bridge(4) */
1136 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1137 			return -1;
1138 	}
1139 
1140 	*addr = netif_addr;
1141 	*mask = netmask;
1142 	return 0;
1143 }
1144 
1145 /*
1146  * NetifInfo[] will be filled for pseudo netif initialization.
1147  * NetifNum will be bumped to reflect the number of valid entries
1148  * in NetifInfo[].
1149  */
1150 static
1151 void
1152 init_netif(char *netifExp[], int netifExpNum)
1153 {
1154 	int i, s;
1155 
1156 	if (netifExpNum == 0)
1157 		return;
1158 
1159 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1160 	if (s < 0)
1161 		return;
1162 
1163 	for (i = 0; i < netifExpNum; ++i) {
1164 		struct vknetif_info *info;
1165 		in_addr_t netif_addr, netif_mask;
1166 		int tap_fd, tap_unit;
1167 		char *netif;
1168 
1169 		netif = strtok(netifExp[i], ":");
1170 		if (netif == NULL) {
1171 			warnx("Invalid argument to '-I'");
1172 			continue;
1173 		}
1174 
1175 		/*
1176 		 * Open tap(4) device file and bring up the
1177 		 * corresponding interface
1178 		 */
1179 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1180 		if (tap_fd < 0)
1181 			continue;
1182 
1183 		/*
1184 		 * Initialize tap(4) and get address/netmask
1185 		 * for pseudo netif
1186 		 *
1187 		 * NB: Rest part of netifExp[i] is passed
1188 		 *     to netif_init_tap() implicitly.
1189 		 */
1190 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1191 			/*
1192 			 * NB: Closing tap(4) device file will bring
1193 			 *     down the corresponding interface
1194 			 */
1195 			close(tap_fd);
1196 			continue;
1197 		}
1198 
1199 		info = &NetifInfo[NetifNum];
1200 		info->tap_fd = tap_fd;
1201 		info->tap_unit = tap_unit;
1202 		info->netif_addr = netif_addr;
1203 		info->netif_mask = netif_mask;
1204 
1205 		NetifNum++;
1206 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1207 			break;
1208 	}
1209 	close(s);
1210 }
1211 
1212 /*
1213  * Create the pid file and leave it open and locked while the vkernel is
1214  * running.  This allows a script to use /usr/bin/lockf to probe whether
1215  * a vkernel is still running (so as not to accidently kill an unrelated
1216  * process from a stale pid file).
1217  */
1218 static
1219 void
1220 writepid(void)
1221 {
1222 	char buf[32];
1223 	int fd;
1224 
1225 	if (pid_file != NULL) {
1226 		snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1227 		fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1228 		if (fd < 0) {
1229 			if (errno == EWOULDBLOCK) {
1230 				perror("Failed to lock pidfile, "
1231 				       "vkernel already running");
1232 			} else {
1233 				perror("Failed to create pidfile");
1234 			}
1235 			exit(EX_SOFTWARE);
1236 		}
1237 		ftruncate(fd, 0);
1238 		write(fd, buf, strlen(buf));
1239 		/* leave the file open to maintain the lock */
1240 	}
1241 }
1242 
1243 static
1244 void
1245 cleanpid( void )
1246 {
1247 	if (pid_file != NULL) {
1248 		if (unlink(pid_file) < 0)
1249 			perror("Warning: couldn't remove pidfile");
1250 	}
1251 }
1252 
1253 static
1254 void
1255 usage_err(const char *ctl, ...)
1256 {
1257 	va_list va;
1258 
1259 	va_start(va, ctl);
1260 	vfprintf(stderr, ctl, va);
1261 	va_end(va);
1262 	fprintf(stderr, "\n");
1263 	exit(EX_USAGE);
1264 }
1265 
1266 static
1267 void
1268 usage_help(_Bool help)
1269 {
1270 	fprintf(stderr, "Usage: %s [-hsUv] [-c file] [-e name=value:name=value:...]\n"
1271 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1272 	    "\t[-m size] [-n numcpus] [-p file] [-r file]\n", save_av[0]);
1273 
1274 	if (help)
1275 		fprintf(stderr, "\nArguments:\n"
1276 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1277 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1278 		    "\t-h\tThis list of options.\n"
1279 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1280 		    "\t-I\tCreate a virtual network device.\n"
1281 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1282 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1283 		    "\t-n\tSpecify the number of CPUs you wish to emulate.\n"
1284 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1285 		    "\t-r\tSpecify a R/W disk image file to be used by the kernel.\n"
1286 		    "\t-s\tBoot into single-user mode.\n"
1287 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1288 		    "\t-v\tTurn on verbose booting.\n");
1289 
1290 	exit(EX_USAGE);
1291 }
1292 
1293 void
1294 cpu_reset(void)
1295 {
1296 	kprintf("cpu reset, rebooting vkernel\n");
1297 	closefrom(3);
1298 	cleanpid();
1299 	execv(save_av[0], save_av);
1300 }
1301 
1302 void
1303 cpu_halt(void)
1304 {
1305 	kprintf("cpu halt, exiting vkernel\n");
1306 	cleanpid();
1307 	exit(EX_OK);
1308 }
1309 
1310 void
1311 setrealcpu(void)
1312 {
1313 	switch(lwp_cpu_lock) {
1314 	case LCL_PER_CPU:
1315 		if (bootverbose)
1316 			kprintf("Locking CPU%d to real cpu %d\n",
1317 				mycpuid, next_cpu);
1318 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1319 		next_cpu++;
1320 		if (next_cpu >= real_ncpus)
1321 			next_cpu = 0;
1322 		break;
1323 	case LCL_SINGLE_CPU:
1324 		if (bootverbose)
1325 			kprintf("Locking CPU%d to real cpu %d\n",
1326 				mycpuid, next_cpu);
1327 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1328 		break;
1329 	default:
1330 		/* do not map virtual cpus to real cpus */
1331 		break;
1332 	}
1333 }
1334 
1335 /*
1336  * Allocate and free memory for module loading.  The loaded module
1337  * has to be placed somewhere near the current kernel binary load
1338  * point or the relocations will not work.
1339  *
1340  * I'm not sure why this isn't working.
1341  */
1342 int
1343 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1344 {
1345 	kprintf("module loading for vkernel64's not currently supported\n");
1346 	*basep = 0;
1347 	return ENOMEM;
1348 #if 0
1349 #if 1
1350 	size_t xtra;
1351 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1352 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1353 	bzero((void *)*basep, bytes);
1354 #else
1355 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1356 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1357 				   MAP_ANON|MAP_SHARED, -1, 0);
1358 	if ((void *)*basep == MAP_FAILED)
1359 		return ENOMEM;
1360 #endif
1361 	kprintf("basep %p %p %zd\n",
1362 		(void *)vkernel_module_memory_alloc, (void *)*basep, bytes);
1363 	return 0;
1364 #endif
1365 }
1366 
1367 void
1368 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1369 {
1370 #if 0
1371 #if 0
1372 	munmap((void *)base, bytes);
1373 #endif
1374 #endif
1375 }
1376