1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 #include <sys/wait.h>
56 
57 #include <machine/cpu.h>
58 #include <machine/globaldata.h>
59 #include <machine/tls.h>
60 #include <machine/md_var.h>
61 #include <machine/vmparam.h>
62 #include <cpu/specialreg.h>
63 
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/ethernet.h>
67 #include <net/bridge/if_bridgevar.h>
68 #include <netinet/in.h>
69 #include <arpa/inet.h>
70 #include <net/if_var.h>
71 
72 #include <stdio.h>
73 #include <stdlib.h>
74 #include <stdarg.h>
75 #include <stdbool.h>
76 #include <unistd.h>
77 #include <fcntl.h>
78 #include <string.h>
79 #include <err.h>
80 #include <errno.h>
81 #include <assert.h>
82 #include <sysexits.h>
83 #include <pthread.h>
84 
85 #define EX_VKERNEL_REBOOT	32
86 
87 vm_phystable_t phys_avail[16];
88 vm_paddr_t Maxmem;
89 vm_paddr_t Maxmem_bytes;
90 long physmem;
91 int MemImageFd = -1;
92 struct vkdisk_info DiskInfo[VKDISK_MAX];
93 int DiskNum;
94 struct vknetif_info NetifInfo[VKNETIF_MAX];
95 int NetifNum;
96 char *pid_file;
97 vm_offset_t KvaStart;
98 vm_offset_t KvaEnd;
99 vm_offset_t KvaSize;
100 vm_offset_t virtual_start;
101 vm_offset_t virtual_end;
102 vm_offset_t virtual2_start;
103 vm_offset_t virtual2_end;
104 vm_offset_t kernel_vm_end;
105 vm_offset_t crashdumpmap;
106 vm_offset_t clean_sva;
107 vm_offset_t clean_eva;
108 struct msgbuf *msgbufp;
109 caddr_t ptvmmap;
110 vpte_t	*KernelPTD;
111 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
112 void *dmap_min_address;
113 void *vkernel_stack;
114 u_int cpu_feature;	/* XXX */
115 int tsc_present;
116 int tsc_invariant;
117 int tsc_mpsync;
118 int optcpus;		/* number of cpus - see mp_start() */
119 int cpu_bits;
120 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
121 int real_ncpus;		/* number of real CPUs */
122 int next_cpu;		/* next real CPU to lock a virtual CPU to */
123 int vkernel_b_arg;	/* no of logical CPU bits - only SMP */
124 int vkernel_B_arg;	/* no of core bits - only SMP */
125 int use_precise_timer = 0;	/* use a precise timer (more expensive) */
126 struct privatespace *CPU_prvspace;
127 
128 tsc_uclock_t tsc_frequency;
129 tsc_uclock_t tsc_oneus_approx;
130 
131 extern uint64_t KPML4phys;	/* phys addr of kernel level 4 */
132 
133 static struct trapframe proc0_tf;
134 static void *proc0paddr;
135 
136 static void init_sys_memory(char *imageFile);
137 static void init_kern_memory(void);
138 static void init_globaldata(void);
139 static void init_vkernel(void);
140 static void init_disk(char **diskExp, int *diskFlags, int diskFileNum, enum vkdisk_type type);
141 static void init_netif(char *netifExp[], int netifFileNum);
142 static void writepid(void);
143 static void cleanpid(void);
144 static int unix_connect(const char *path);
145 static void usage_err(const char *ctl, ...) __printflike(1, 2);
146 static void usage_help(_Bool);
147 static void init_locks(void);
148 static void handle_term(int);
149 
150 pid_t childpid;
151 
152 static int save_ac;
153 static int prezeromem;
154 static char **save_av;
155 
156 /*
157  * Kernel startup for virtual kernels - standard main()
158  */
159 int
160 main(int ac, char **av)
161 {
162 	char *memImageFile = NULL;
163 	char *netifFile[VKNETIF_MAX];
164 	char *diskFile[VKDISK_MAX];
165 	char *cdFile[VKDISK_MAX];
166 	char *suffix;
167 	char *endp;
168 	char *tmp;
169 	char *tok;
170 	int diskFlags[VKDISK_MAX];
171 	int netifFileNum = 0;
172 	int diskFileNum = 0;
173 	int cdFileNum = 0;
174 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
175 	int c;
176 	int i;
177 	int j;
178 	int n;
179 	int isq;
180 	int pos;
181 	int eflag;
182 	int real_vkernel_enable;
183 	int supports_sse;
184 	uint32_t mxcsr_mask;
185 	size_t vsize;
186 	size_t msize;
187 	size_t kenv_size;
188 	size_t kenv_size2;
189 	int status;
190 	struct sigaction sa;
191 
192 	/*
193 	 * Currently a bad hack but rtld-elf needs LD_SHAREDLIB_BASE to
194 	 * be set to force it to mmap() shared libraries into low memory,
195 	 * so our module loader can link against the related symbols.
196 	 */
197 	if (getenv("LD_SHAREDLIB_BASE") == NULL) {
198 		setenv("LD_SHAREDLIB_BASE", "0x10000000", 1);
199 		execv(av[0], av);
200 		fprintf(stderr, "Must run %s with full path\n", av[0]);
201 		exit(1);
202 	}
203 
204 	while ((childpid = fork()) != 0) {
205 		/* Ignore signals */
206 		bzero(&sa, sizeof(sa));
207 		sigemptyset(&sa.sa_mask);
208 		sa.sa_handler = SIG_IGN;
209 		sigaction(SIGINT, &sa, NULL);
210 		sigaction(SIGQUIT, &sa, NULL);
211 		sigaction(SIGHUP, &sa, NULL);
212 
213 		/*
214 		 * Forward SIGTERM to the child so that
215 		 * the shutdown process initiates correctly.
216 		 */
217 		sa.sa_handler = handle_term;
218 		sigaction(SIGTERM, &sa, NULL);
219 
220 		/*
221 		 * Wait for child to terminate, exit if
222 		 * someone stole our child.
223 		 */
224 		while (waitpid(childpid, &status, 0) != childpid) {
225 			if (errno == ECHILD)
226 				exit(1);
227 		}
228 		if (WEXITSTATUS(status) != EX_VKERNEL_REBOOT)
229 			return 0;
230 	}
231 
232 	/*
233 	 * Starting for real
234 	 */
235 	save_ac = ac;
236 	save_av = av;
237 	eflag = 0;
238 	pos = 0;
239 	kenv_size = 0;
240 
241 	/*
242 	 * Process options
243 	 */
244 	kernel_mem_readonly = 1;
245 	optcpus = 2;
246 	cpu_bits = 1;
247 	vkernel_b_arg = 0;
248 	vkernel_B_arg = 0;
249 	lwp_cpu_lock = LCL_NONE;
250 
251 	real_vkernel_enable = 0;
252 	vsize = sizeof(real_vkernel_enable);
253 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
254 
255 	if (real_vkernel_enable == 0) {
256 		errx(1, "vm.vkernel_enable is 0, must be set "
257 			"to 1 to execute a vkernel!");
258 	}
259 
260 	real_ncpus = 1;
261 	vsize = sizeof(real_ncpus);
262 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
263 
264 	if (ac < 2)
265 		usage_help(false);
266 
267 	while ((c = getopt(ac, av, "c:hsvztTl:m:n:r:R:e:i:p:I:U")) != -1) {
268 		switch(c) {
269 		case 'e':
270 			/*
271 			 * name=value:name=value:name=value...
272 			 * name="value"...
273 			 *
274 			 * Allow values to be quoted but note that shells
275 			 * may remove the quotes, so using this feature
276 			 * to embed colons may require a backslash.
277 			 */
278 			n = strlen(optarg);
279 			isq = 0;
280 
281 			if (eflag == 0) {
282 				kenv_size = n + 2;
283 				kern_envp = malloc(kenv_size);
284 				if (kern_envp == NULL)
285 					errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
286 			} else {
287 				kenv_size2 = kenv_size + n + 1;
288 				pos = kenv_size - 1;
289 				if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
290 					errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
291 				kern_envp = tmp;
292 				kenv_size = kenv_size2;
293 			}
294 
295 			for (i = 0, j = pos; i < n; ++i) {
296 				if (optarg[i] == '"')
297 					isq ^= 1;
298 				else if (optarg[i] == '\'')
299 					isq ^= 2;
300 				else if (isq == 0 && optarg[i] == ':')
301 					kern_envp[j++] = 0;
302 				else
303 					kern_envp[j++] = optarg[i];
304 			}
305 			kern_envp[j++] = 0;
306 			kern_envp[j++] = 0;
307 			eflag++;
308 			break;
309 		case 's':
310 			boothowto |= RB_SINGLE;
311 			break;
312 		case 't':
313 			use_precise_timer = 1;
314 			break;
315 		case 'v':
316 			bootverbose = 1;
317 			break;
318 		case 'i':
319 			memImageFile = optarg;
320 			break;
321 		case 'I':
322 			if (netifFileNum < VKNETIF_MAX)
323 				netifFile[netifFileNum++] = strdup(optarg);
324 			break;
325 		case 'r':
326 		case 'R':
327 			if (bootOnDisk < 0)
328 				bootOnDisk = 1;
329 			if (diskFileNum + cdFileNum < VKDISK_MAX) {
330 				diskFile[diskFileNum] = strdup(optarg);
331 				diskFlags[diskFileNum] = (c == 'R');
332 				++diskFileNum;
333 			}
334 			break;
335 		case 'c':
336 			if (bootOnDisk < 0)
337 				bootOnDisk = 0;
338 			if (diskFileNum + cdFileNum < VKDISK_MAX)
339 				cdFile[cdFileNum++] = strdup(optarg);
340 			break;
341 		case 'm':
342 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
343 			if (suffix) {
344 				switch(*suffix) {
345 				case 'g':
346 				case 'G':
347 					Maxmem_bytes <<= 30;
348 					break;
349 				case 'm':
350 				case 'M':
351 					Maxmem_bytes <<= 20;
352 					break;
353 				case 'k':
354 				case 'K':
355 					Maxmem_bytes <<= 10;
356 					break;
357 				default:
358 					Maxmem_bytes = 0;
359 					usage_err("Bad maxmem option");
360 					/* NOT REACHED */
361 					break;
362 				}
363 			}
364 			break;
365 		case 'l':
366 			next_cpu = -1;
367 			if (strncmp("map", optarg, 3) == 0) {
368 				lwp_cpu_lock = LCL_PER_CPU;
369 				if (optarg[3] == ',') {
370 					next_cpu = strtol(optarg+4, &endp, 0);
371 					if (*endp != '\0')
372 						usage_err("Bad target CPU number at '%s'", endp);
373 				} else {
374 					next_cpu = 0;
375 				}
376 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
377 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
378 			} else if (strncmp("any", optarg, 3) == 0) {
379 				lwp_cpu_lock = LCL_NONE;
380 			} else {
381 				lwp_cpu_lock = LCL_SINGLE_CPU;
382 				next_cpu = strtol(optarg, &endp, 0);
383 				if (*endp != '\0')
384 					usage_err("Bad target CPU number at '%s'", endp);
385 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
386 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
387 			}
388 			break;
389 		case 'n':
390 			/*
391 			 * This value is set up by mp_start(), don't just
392 			 * set ncpus here.
393 			 */
394 			tok = strtok(optarg, ":");
395 			optcpus = strtol(tok, NULL, 0);
396 			if (optcpus < 1 || optcpus > MAXCPU)
397 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
398 			cpu_bits = 1;
399 			while ((1 << cpu_bits) < optcpus)
400 				++cpu_bits;
401 
402 			/*
403 			 * By default assume simple hyper-threading
404 			 */
405 			vkernel_b_arg = 1;
406 			vkernel_B_arg = cpu_bits - vkernel_b_arg;
407 
408 			/*
409 			 * [:lbits[:cbits]] override # of cpu bits
410 			 * for logical and core extraction, supplying
411 			 * defaults for any omission.
412 			 */
413 			tok = strtok(NULL, ":");
414 			if (tok != NULL) {
415 				vkernel_b_arg = strtol(tok, NULL, 0);
416 				vkernel_B_arg = cpu_bits - vkernel_b_arg;
417 
418 				/* :cbits argument */
419 				tok = strtok(NULL, ":");
420 				if (tok != NULL) {
421 					vkernel_B_arg = strtol(tok, NULL, 0);
422 				}
423 			}
424 			break;
425 		case 'p':
426 			pid_file = optarg;
427 			break;
428 		case 'U':
429 			kernel_mem_readonly = 0;
430 			break;
431 		case 'h':
432 			usage_help(true);
433 			break;
434 		case 'z':
435 			prezeromem = 1;
436 			break;
437 		default:
438 			usage_help(false);
439 		}
440 	}
441 
442 	writepid();
443 	cpu_disable_intr();
444 	init_sys_memory(memImageFile);
445 	init_kern_memory();
446 	init_globaldata();
447 	init_vkernel();
448 	setrealcpu();
449 	init_kqueue();
450 
451 	vmm_guest = VMM_GUEST_VKERNEL;
452 
453 	/*
454 	 * Check TSC
455 	 */
456 	vsize = sizeof(tsc_present);
457 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
458 	vsize = sizeof(tsc_invariant);
459 	sysctlbyname("hw.tsc_invariant", &tsc_invariant, &vsize, NULL, 0);
460 	vsize = sizeof(tsc_mpsync);
461 	sysctlbyname("hw.tsc_mpsync", &tsc_mpsync, &vsize, NULL, 0);
462 	vsize = sizeof(tsc_frequency);
463 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
464 	if (tsc_present)
465 		cpu_feature |= CPUID_TSC;
466 	tsc_oneus_approx = ((tsc_frequency|1) + 999999) / 1000000;
467 
468 	/*
469 	 * Check SSE
470 	 */
471 	vsize = sizeof(supports_sse);
472 	supports_sse = 0;
473 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
474 	sysctlbyname("hw.mxcsr_mask", &mxcsr_mask, &msize, NULL, 0);
475 	init_fpu(supports_sse);
476 	if (supports_sse)
477 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
478 
479 	/*
480 	 * We boot from the first installed disk.
481 	 */
482 	if (bootOnDisk == 1) {
483 		init_disk(diskFile, diskFlags, diskFileNum, VKD_DISK);
484 		init_disk(cdFile, NULL, cdFileNum, VKD_CD);
485 	} else {
486 		init_disk(cdFile, NULL, cdFileNum, VKD_CD);
487 		init_disk(diskFile, diskFlags, diskFileNum, VKD_DISK);
488 	}
489 
490 	init_netif(netifFile, netifFileNum);
491 	init_exceptions();
492 	mi_startup();
493 	/* NOT REACHED */
494 	exit(EX_SOFTWARE);
495 }
496 
497 /* SIGTERM handler */
498 static
499 void
500 handle_term(int sig)
501 {
502 	kill(childpid, sig);
503 }
504 
505 /*
506  * Initialize system memory.  This is the virtual kernel's 'RAM'.
507  */
508 static
509 void
510 init_sys_memory(char *imageFile)
511 {
512 	struct stat st;
513 	int i;
514 	int fd;
515 
516 	/*
517 	 * Figure out the system memory image size.  If an image file was
518 	 * specified and -m was not specified, use the image file's size.
519 	 */
520 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
521 		Maxmem_bytes = (vm_paddr_t)st.st_size;
522 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
523 	    Maxmem_bytes == 0) {
524 		errx(1, "Cannot create new memory file %s unless "
525 		       "system memory size is specified with -m",
526 		       imageFile);
527 		/* NOT REACHED */
528 	}
529 
530 	/*
531 	 * Maxmem must be known at this time
532 	 */
533 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
534 		errx(1, "Bad maxmem specification: 64MB minimum, "
535 		       "multiples of %dMB only",
536 		       SEG_SIZE / 1024 / 1024);
537 		/* NOT REACHED */
538 	}
539 
540 	/*
541 	 * Generate an image file name if necessary, then open/create the
542 	 * file exclusively locked.  Do not allow multiple virtual kernels
543 	 * to use the same image file.
544 	 *
545 	 * Don't iterate through a million files if we do not have write
546 	 * access to the directory, stop if our open() failed on a
547 	 * non-existant file.  Otherwise opens can fail for any number
548 	 */
549 	if (imageFile == NULL) {
550 		for (i = 0; i < 1000000; ++i) {
551 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
552 			fd = open(imageFile,
553 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
554 			if (fd < 0 && stat(imageFile, &st) == 0) {
555 				free(imageFile);
556 				continue;
557 			}
558 			break;
559 		}
560 	} else {
561 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
562 	}
563 	fprintf(stderr, "Using memory file: %s\n", imageFile);
564 	if (fd < 0 || fstat(fd, &st) < 0) {
565 		err(1, "Unable to open/create %s", imageFile);
566 		/* NOT REACHED */
567 	}
568 
569 	/*
570 	 * Truncate or extend the file as necessary.  Clean out the contents
571 	 * of the file, we want it to be full of holes so we don't waste
572 	 * time reading in data from an old file that we no longer care
573 	 * about.
574 	 */
575 	ftruncate(fd, 0);
576 	ftruncate(fd, Maxmem_bytes);
577 
578 	MemImageFd = fd;
579 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
580 	physmem = Maxmem;
581 }
582 
583 /*
584  * Initialize kernel memory.  This reserves kernel virtual memory by using
585  * MAP_VPAGETABLE
586  *
587  * XXX NOTE!  MAP_VPAGETABLE is being ripped out and will break VKERNELs
588  *	      for a while, until we get hardware virtualization working.
589  */
590 
591 static
592 void
593 init_kern_memory(void)
594 {
595 	void *base;
596 	int i;
597 	void *firstfree;
598 
599 	/*
600 	 * Memory map our kernel virtual memory space.  Note that the
601 	 * kernel image itself is not made part of this memory for the
602 	 * moment.
603 	 *
604 	 * The memory map must be segment-aligned so we can properly
605 	 * offset KernelPTD.
606 	 *
607 	 * If the system kernel has a different MAXDSIZ, it might not
608 	 * be possible to map kernel memory in its prefered location.
609 	 * Try a number of different locations.
610 	 */
611 
612 	base = mmap((void*)KERNEL_KVA_START, KERNEL_KVA_SIZE,
613 		    PROT_READ|PROT_WRITE|PROT_EXEC,
614 		    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE|MAP_FIXED|MAP_TRYFIXED,
615 		    MemImageFd, (off_t)KERNEL_KVA_START);
616 
617 	if (base == MAP_FAILED) {
618 		err(1, "Unable to mmap() kernel virtual memory!");
619 		/* NOT REACHED */
620 	}
621 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
622 	KvaStart = (vm_offset_t)base;
623 	KvaSize = KERNEL_KVA_SIZE;
624 	KvaEnd = KvaStart + KvaSize;
625 
626 	/* cannot use kprintf yet */
627 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
628 
629 	/* MAP_FILE? */
630 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
631 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
632 				MemImageFd, 0);
633 	if (dmap_min_address == MAP_FAILED) {
634 		err(1, "Unable to mmap() kernel DMAP region!");
635 		/* NOT REACHED */
636 	}
637 
638 	/*
639 	 * Prefault the memory.  The vkernel is going to fault it all in
640 	 * anyway, and faults on the backing store itself are very expensive
641 	 * once we go SMP (contend a lot).  So do it now.
642 	 */
643 	if (prezeromem)
644 		bzero(dmap_min_address, Maxmem_bytes);
645 
646 	/*
647 	 * Bootstrap the kernel_pmap
648 	 */
649 	firstfree = NULL;
650 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
651 
652 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
653 		 0 | VPTE_RW | VPTE_V);
654 
655 	/*
656 	 * phys_avail[] represents unallocated physical memory.  MI code
657 	 * will use phys_avail[] to create the vm_page array.
658 	 */
659 	phys_avail[0].phys_beg = (vm_paddr_t)firstfree;
660 	phys_avail[0].phys_beg = (phys_avail[0].phys_beg + PAGE_MASK) &
661 				 ~(vm_paddr_t)PAGE_MASK;
662 	phys_avail[0].phys_end = Maxmem_bytes;
663 
664 #if 0 /* JGV */
665 	/*
666 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
667 	 * memory.  MI code will create kernel_map using these parameters.
668 	 */
669 	virtual_start = KvaStart + (long)firstfree;
670 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
671 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
672 #endif
673 
674 	/*
675 	 * pmap_growkernel() will set the correct value.
676 	 */
677 	kernel_vm_end = 0;
678 
679 	/*
680 	 * Allocate space for process 0's UAREA.
681 	 */
682 	proc0paddr = (void *)virtual_start;
683 	for (i = 0; i < UPAGES; ++i) {
684 		pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
685 		virtual_start += PAGE_SIZE;
686 		phys_avail[0].phys_beg += PAGE_SIZE;
687 	}
688 
689 	/*
690 	 * crashdumpmap
691 	 */
692 	crashdumpmap = virtual_start;
693 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
694 
695 	/*
696 	 * msgbufp maps the system message buffer
697 	 */
698 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
699 	msgbufp = (void *)virtual_start;
700 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
701 		pmap_kenter_quick(virtual_start, phys_avail[0].phys_beg);
702 		virtual_start += PAGE_SIZE;
703 		phys_avail[0].phys_beg += PAGE_SIZE;
704 	}
705 	msgbufinit(msgbufp, MSGBUF_SIZE);
706 
707 	/*
708 	 * used by kern_memio for /dev/mem access
709 	 */
710 	ptvmmap = (caddr_t)virtual_start;
711 	virtual_start += PAGE_SIZE;
712 }
713 
714 /*
715  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
716  * virtual_start and phys_avail[0]
717  */
718 static
719 void
720 init_globaldata(void)
721 {
722 	int i;
723 	vm_paddr_t pa;
724 	vm_offset_t va;
725 
726 	/*
727 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
728 	 * amount of KVA since the privatespace structure includes two
729 	 * whole page table mappings.
730 	 */
731 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
732 	CPU_prvspace = (void *)virtual_start;
733 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
734 
735 	/*
736 	 * Allocate enough physical memory to cover the mdglobaldata
737 	 * portion of the space and the idle stack and map the pages
738 	 * into KVA.  For cpu #0 only.
739 	 */
740 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
741 		pa = phys_avail[0].phys_beg;
742 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
743 		pmap_kenter_quick(va, pa);
744 		phys_avail[0].phys_beg += PAGE_SIZE;
745 	}
746 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
747 		pa = phys_avail[0].phys_beg;
748 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
749 		pmap_kenter_quick(va, pa);
750 		phys_avail[0].phys_beg += PAGE_SIZE;
751 	}
752 
753 	/*
754 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
755 	 * point.  Note that %fs is used by pthreads.
756 	 */
757 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
758 }
759 
760 
761 /*
762  * Initialize pool tokens and other necessary locks
763  */
764 static void
765 init_locks(void)
766 {
767 
768         /*
769          * Get the initial mplock with a count of 1 for the BSP.
770          * This uses a LOGICAL cpu ID, ie BSP == 0.
771          */
772         cpu_get_initial_mplock();
773 
774         /* our token pool needs to work early */
775         lwkt_token_pool_init();
776 
777 }
778 
779 
780 /*
781  * Initialize very low level systems including thread0, proc0, etc.
782  */
783 static
784 void
785 init_vkernel(void)
786 {
787 	struct mdglobaldata *gd;
788 
789 	gd = &CPU_prvspace[0].mdglobaldata;
790 	bzero(gd, sizeof(*gd));
791 
792 	gd->mi.gd_curthread = &thread0;
793 	thread0.td_gd = &gd->mi;
794 	ncpus = 1;
795 	ncpus_fit = 1;	/* rounded up power of 2 */
796 	/* ncpus_fit_mask are 0 */
797 	init_param1();
798 	gd->mi.gd_prvspace = &CPU_prvspace[0];
799 	mi_gdinit(&gd->mi, 0);
800 	cpu_gdinit(gd, 0);
801 	mi_proc0init(&gd->mi, proc0paddr);
802 	lwp0.lwp_md.md_regs = &proc0_tf;
803 
804 	init_locks();
805 	cninit();
806 	rand_initialize();
807 #if 0	/* #ifdef DDB */
808 	kdb_init();
809 	if (boothowto & RB_KDB)
810 		Debugger("Boot flags requested debugger");
811 #endif
812 	identcpu();
813 #if 0
814 	initializecpu();	/* Initialize CPU registers */
815 #endif
816 	init_param2((phys_avail[0].phys_end -
817 		     phys_avail[0].phys_beg) / PAGE_SIZE);
818 
819 #if 0
820 	/*
821 	 * Map the message buffer
822 	 */
823 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
824 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
825 	msgbufinit(msgbufp, MSGBUF_SIZE);
826 #endif
827 #if 0
828 	thread0.td_pcb_cr3 ... MMU
829 	lwp0.lwp_md.md_regs = &proc0_tf;
830 #endif
831 }
832 
833 /*
834  * Filesystem image paths for the virtual kernel are optional.
835  * If specified they each should point to a disk image,
836  * the first of which will become the root disk.
837  *
838  * The virtual kernel caches data from our 'disk' just like a normal kernel,
839  * so we do not really want the real kernel to cache the data too.  Use
840  * O_DIRECT to remove the duplication.
841  */
842 static
843 void
844 init_disk(char **diskExp, int *diskFlags, int diskFileNum, enum vkdisk_type type)
845 {
846 	char *serno;
847 	int i;
848 
849         if (diskFileNum == 0)
850                 return;
851 
852 	for (i=0; i < diskFileNum; i++){
853 		char *fname;
854 		fname = diskExp[i];
855 
856 		if (fname == NULL) {
857                         warnx("Invalid argument to '-r'");
858                         continue;
859                 }
860 		/*
861 		 * Check for a serial number for the virtual disk
862 		 * passed from the command line.
863 		 */
864 		serno = fname;
865 		strsep(&serno, ":");
866 
867 		if (DiskNum < VKDISK_MAX) {
868 			struct stat st;
869 			struct vkdisk_info *info = NULL;
870 			int fd;
871 			size_t l = 0;
872 
873 			if (type == VKD_DISK)
874 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
875 			else
876 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
877 			if (fd < 0 || fstat(fd, &st) < 0) {
878 				err(1, "Unable to open/create %s", fname);
879 				/* NOT REACHED */
880 			}
881 			if (S_ISREG(st.st_mode) && (diskFlags[i] & 1) == 0) {
882 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
883 					errx(1, "Disk image %s is already "
884 						"in use\n", fname);
885 					/* NOT REACHED */
886 				}
887 			}
888 
889 			info = &DiskInfo[DiskNum];
890 			l = strlen(fname);
891 
892 			info->unit = i;
893 			info->fd = fd;
894 			info->type = type;
895 			info->flags = diskFlags[i];
896 			memcpy(info->fname, fname, l);
897 			info->serno = NULL;
898 			if (serno) {
899 				if ((info->serno = malloc(SERNOLEN)) != NULL)
900 					strlcpy(info->serno, serno, SERNOLEN);
901 				else
902 					warnx("Couldn't allocate memory for the operation");
903 			}
904 
905 			if (DiskNum == 0) {
906 				if (type == VKD_CD) {
907 					rootdevnames[0] = "cd9660:vcd0";
908 				} else if (type == VKD_DISK) {
909 					rootdevnames[0] = "ufs:vkd0s0a";
910 					rootdevnames[1] = "ufs:vkd0s1a";
911 				}
912 			}
913 
914 			DiskNum++;
915 		} else {
916                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
917                         continue;
918 		}
919 	}
920 }
921 
922 static
923 int
924 netif_set_tapflags(int tap_unit, int f, int s)
925 {
926 	struct ifreq ifr;
927 	int flags;
928 
929 	bzero(&ifr, sizeof(ifr));
930 
931 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
932 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
933 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
934 		return -1;
935 	}
936 
937 	/*
938 	 * Adjust if_flags
939 	 *
940 	 * If the flags are already set/cleared, then we return
941 	 * immediately to avoid extra syscalls
942 	 */
943 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
944 	if (f < 0) {
945 		/* Turn off flags */
946 		f = -f;
947 		if ((flags & f) == 0)
948 			return 0;
949 		flags &= ~f;
950 	} else {
951 		/* Turn on flags */
952 		if (flags & f)
953 			return 0;
954 		flags |= f;
955 	}
956 
957 	/*
958 	 * Fix up ifreq.ifr_name, since it may be trashed
959 	 * in previous ioctl(SIOCGIFFLAGS)
960 	 */
961 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
962 
963 	ifr.ifr_flags = flags & 0xffff;
964 	ifr.ifr_flagshigh = flags >> 16;
965 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
966 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
967 		return -1;
968 	}
969 	return 0;
970 }
971 
972 static
973 int
974 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
975 {
976 	struct ifaliasreq ifra;
977 	struct sockaddr_in *in;
978 
979 	bzero(&ifra, sizeof(ifra));
980 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
981 
982 	/* Setup address */
983 	in = (struct sockaddr_in *)&ifra.ifra_addr;
984 	in->sin_family = AF_INET;
985 	in->sin_len = sizeof(*in);
986 	in->sin_addr.s_addr = addr;
987 
988 	if (mask != 0) {
989 		/* Setup netmask */
990 		in = (struct sockaddr_in *)&ifra.ifra_mask;
991 		in->sin_len = sizeof(*in);
992 		in->sin_addr.s_addr = mask;
993 	}
994 
995 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
996 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
997 		return -1;
998 	}
999 	return 0;
1000 }
1001 
1002 static
1003 int
1004 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
1005 {
1006 	struct ifbreq ifbr;
1007 	struct ifdrv ifd;
1008 
1009 	bzero(&ifbr, sizeof(ifbr));
1010 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
1011 		 "tap%d", tap_unit);
1012 
1013 	bzero(&ifd, sizeof(ifd));
1014 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
1015 	ifd.ifd_cmd = BRDGADD;
1016 	ifd.ifd_len = sizeof(ifbr);
1017 	ifd.ifd_data = &ifbr;
1018 
1019 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
1020 		/*
1021 		 * 'errno == EEXIST' means that the tap(4) is already
1022 		 * a member of the bridge(4)
1023 		 */
1024 		if (errno != EEXIST) {
1025 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
1026 			return -1;
1027 		}
1028 	}
1029 	return 0;
1030 }
1031 
1032 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
1033 
1034 /*
1035  * Locate the first unused tap(4) device file if auto mode is requested,
1036  * or open the user supplied device file, and bring up the corresponding
1037  * tap(4) interface.
1038  *
1039  * NOTE: Only tap(4) device file is supported currently
1040  */
1041 static
1042 int
1043 netif_open_tap(const char *netif, int *tap_unit, int s)
1044 {
1045 	char tap_dev[MAXPATHLEN];
1046 	int tap_fd, failed;
1047 	struct stat st;
1048 	char *dname;
1049 
1050 	*tap_unit = -1;
1051 
1052 	if (strcmp(netif, "auto") == 0) {
1053 		/*
1054 		 * Find first unused tap(4) device file
1055 		 */
1056 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
1057 		if (tap_fd < 0) {
1058 			warnc(errno, "Unable to find a free tap(4)");
1059 			return -1;
1060 		}
1061 	} else {
1062 		/*
1063 		 * User supplied tap(4) device file or unix socket.
1064 		 */
1065 		if (netif[0] == '/')	/* Absolute path */
1066 			strlcpy(tap_dev, netif, sizeof(tap_dev));
1067 		else
1068 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
1069 
1070 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
1071 
1072 		/*
1073 		 * If we cannot open normally try to connect to it.
1074 		 */
1075 		if (tap_fd < 0)
1076 			tap_fd = unix_connect(tap_dev);
1077 
1078 		if (tap_fd < 0) {
1079 			warn("Unable to open %s", tap_dev);
1080 			return -1;
1081 		}
1082 	}
1083 
1084 	/*
1085 	 * Check whether the device file is a tap(4)
1086 	 */
1087 	if (fstat(tap_fd, &st) < 0) {
1088 		failed = 1;
1089 	} else if (S_ISCHR(st.st_mode)) {
1090 		dname = fdevname(tap_fd);
1091 		if (dname)
1092 			dname = strstr(dname, "tap");
1093 		if (dname) {
1094 			/*
1095 			 * Bring up the corresponding tap(4) interface
1096 			 */
1097 			*tap_unit = strtol(dname + 3, NULL, 10);
1098 			printf("TAP UNIT %d\n", *tap_unit);
1099 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
1100 				failed = 0;
1101 			else
1102 				failed = 1;
1103 		} else {
1104 			failed = 1;
1105 		}
1106 	} else if (S_ISSOCK(st.st_mode)) {
1107 		/*
1108 		 * Special socket connection (typically to vknet).  We
1109 		 * do not have to do anything.
1110 		 */
1111 		failed = 0;
1112 	} else {
1113 		failed = 1;
1114 	}
1115 
1116 	if (failed) {
1117 		warnx("%s is not a tap(4) device or socket", tap_dev);
1118 		close(tap_fd);
1119 		tap_fd = -1;
1120 		*tap_unit = -1;
1121 	}
1122 	return tap_fd;
1123 }
1124 
1125 static int
1126 unix_connect(const char *path)
1127 {
1128 	struct sockaddr_un sunx;
1129 	int len;
1130 	int net_fd;
1131 	int sndbuf = 262144;
1132 	struct stat st;
1133 
1134 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
1135 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
1136 	++len;	/* include nul */
1137 	sunx.sun_family = AF_UNIX;
1138 	sunx.sun_len = len;
1139 
1140 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
1141 	if (net_fd < 0)
1142 		return(-1);
1143 	if (connect(net_fd, (void *)&sunx, len) < 0) {
1144 		close(net_fd);
1145 		return(-1);
1146 	}
1147 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1148 	if (fstat(net_fd, &st) == 0)
1149 		printf("Network socket buffer: %ld bytes\n", st.st_blksize);
1150 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
1151 	return(net_fd);
1152 }
1153 
1154 #undef TAPDEV_MAJOR
1155 #undef TAPDEV_MINOR
1156 #undef TAPDEV_OFLAGS
1157 
1158 /*
1159  * Following syntax is supported,
1160  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
1161  *
1162  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
1163  *                        tap(4)'s netmask len is z
1164  *
1165  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
1166  *                        pseudo netif's address is y.y.y.y
1167  *
1168  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1169  *                        pseudo netif's address is y.y.y.y
1170  *                        tap(4) and pseudo netif's netmask len are z
1171  *
1172  * 5) bridgeX             tap(4) will be added to bridgeX
1173  *
1174  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1175  *                        pseudo netif's address is y.y.y.y
1176  *
1177  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1178  *                        pseudo netif's address is y.y.y.y
1179  *                        pseudo netif's netmask len is z
1180  */
1181 static
1182 int
1183 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1184 {
1185 	in_addr_t tap_addr, netmask, netif_addr;
1186 	int next_netif_addr;
1187 	char *tok, *masklen_str, *ifbridge;
1188 
1189 	*addr = 0;
1190 	*mask = 0;
1191 
1192 	tok = strtok(NULL, ":/");
1193 	if (tok == NULL) {
1194 		/*
1195 		 * Nothing special, simply use tap(4) as backend
1196 		 */
1197 		return 0;
1198 	}
1199 
1200 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1201 		/*
1202 		 * tap(4)'s address is supplied
1203 		 */
1204 		ifbridge = NULL;
1205 
1206 		/*
1207 		 * If there is next token, then it may be pseudo
1208 		 * netif's address or netmask len for tap(4)
1209 		 */
1210 		next_netif_addr = 0;
1211 	} else {
1212 		/*
1213 		 * Not tap(4)'s address, assume it as a bridge(4)
1214 		 * iface name
1215 		 */
1216 		tap_addr = 0;
1217 		ifbridge = tok;
1218 
1219 		/*
1220 		 * If there is next token, then it must be pseudo
1221 		 * netif's address
1222 		 */
1223 		next_netif_addr = 1;
1224 	}
1225 
1226 	netmask = netif_addr = 0;
1227 
1228 	tok = strtok(NULL, ":/");
1229 	if (tok == NULL)
1230 		goto back;
1231 
1232 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1233 		if (next_netif_addr) {
1234 			warnx("Invalid pseudo netif address: %s", tok);
1235 			return -1;
1236 		}
1237 		netif_addr = 0;
1238 
1239 		/*
1240 		 * Current token is not address, then it must be netmask len
1241 		 */
1242 		masklen_str = tok;
1243 	} else {
1244 		/*
1245 		 * Current token is pseudo netif address, if there is next token
1246 		 * it must be netmask len
1247 		 */
1248 		masklen_str = strtok(NULL, "/");
1249 	}
1250 
1251 	/* Calculate netmask */
1252 	if (masklen_str != NULL) {
1253 		u_long masklen;
1254 
1255 		masklen = strtoul(masklen_str, NULL, 10);
1256 		if (masklen < 32 && masklen > 0) {
1257 			netmask =
1258 			    htonl(rounddown2(0xffffffff, 1LL << (32 - masklen)));
1259 		} else {
1260 			warnx("Invalid netmask len: %lu", masklen);
1261 			return -1;
1262 		}
1263 	}
1264 
1265 	/* Make sure there is no more token left */
1266 	if (strtok(NULL, ":/") != NULL) {
1267 		warnx("Invalid argument to '-I'");
1268 		return -1;
1269 	}
1270 
1271 back:
1272 	if (tap_unit < 0) {
1273 		/* Do nothing */
1274 	} else if (ifbridge == NULL) {
1275 		/* Set tap(4) address/netmask */
1276 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1277 			return -1;
1278 	} else {
1279 		/* Tie tap(4) to bridge(4) */
1280 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1281 			return -1;
1282 	}
1283 
1284 	*addr = netif_addr;
1285 	*mask = netmask;
1286 	return 0;
1287 }
1288 
1289 /*
1290  * NetifInfo[] will be filled for pseudo netif initialization.
1291  * NetifNum will be bumped to reflect the number of valid entries
1292  * in NetifInfo[].
1293  */
1294 static
1295 void
1296 init_netif(char *netifExp[], int netifExpNum)
1297 {
1298 	int i, s;
1299 	char *tmp;
1300 
1301 	if (netifExpNum == 0)
1302 		return;
1303 
1304 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1305 	if (s < 0)
1306 		return;
1307 
1308 	for (i = 0; i < netifExpNum; ++i) {
1309 		struct vknetif_info *info;
1310 		in_addr_t netif_addr, netif_mask;
1311 		int tap_fd, tap_unit;
1312 		char *netif;
1313 
1314 		/* Extract MAC address if there is one */
1315 		tmp = netifExp[i];
1316 		strsep(&tmp, "=");
1317 
1318 		netif = strtok(netifExp[i], ":");
1319 		if (netif == NULL) {
1320 			warnx("Invalid argument to '-I'");
1321 			continue;
1322 		}
1323 
1324 		/*
1325 		 * Open tap(4) device file and bring up the
1326 		 * corresponding interface
1327 		 */
1328 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1329 		if (tap_fd < 0)
1330 			continue;
1331 
1332 		/*
1333 		 * Initialize tap(4) and get address/netmask
1334 		 * for pseudo netif
1335 		 *
1336 		 * NB: Rest part of netifExp[i] is passed
1337 		 *     to netif_init_tap() implicitly.
1338 		 */
1339 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1340 			/*
1341 			 * NB: Closing tap(4) device file will bring
1342 			 *     down the corresponding interface
1343 			 */
1344 			close(tap_fd);
1345 			continue;
1346 		}
1347 
1348 		info = &NetifInfo[NetifNum];
1349 		bzero(info, sizeof(*info));
1350 		info->tap_fd = tap_fd;
1351 		info->tap_unit = tap_unit;
1352 		info->netif_addr = netif_addr;
1353 		info->netif_mask = netif_mask;
1354 		/*
1355 		 * If tmp isn't NULL it means a MAC could have been
1356 		 * specified so attempt to convert it.
1357 		 * Setting enaddr to NULL will tell vke_attach() we
1358 		 * need a pseudo-random MAC address.
1359 		 */
1360 		if (tmp != NULL) {
1361 			if ((info->enaddr = malloc(ETHER_ADDR_LEN)) == NULL)
1362 				warnx("Couldn't allocate memory for the operation");
1363 			else {
1364 				if ((kether_aton(tmp, info->enaddr)) == NULL) {
1365 					free(info->enaddr);
1366 					info->enaddr = NULL;
1367 				}
1368 			}
1369 		}
1370 
1371 		NetifNum++;
1372 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1373 			break;
1374 	}
1375 	close(s);
1376 }
1377 
1378 /*
1379  * Create the pid file and leave it open and locked while the vkernel is
1380  * running.  This allows a script to use /usr/bin/lockf to probe whether
1381  * a vkernel is still running (so as not to accidently kill an unrelated
1382  * process from a stale pid file).
1383  */
1384 static
1385 void
1386 writepid(void)
1387 {
1388 	char buf[32];
1389 	int fd;
1390 
1391 	if (pid_file != NULL) {
1392 		snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1393 		fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1394 		if (fd < 0) {
1395 			if (errno == EWOULDBLOCK) {
1396 				perror("Failed to lock pidfile, "
1397 				       "vkernel already running");
1398 			} else {
1399 				perror("Failed to create pidfile");
1400 			}
1401 			exit(EX_SOFTWARE);
1402 		}
1403 		ftruncate(fd, 0);
1404 		write(fd, buf, strlen(buf));
1405 		/* leave the file open to maintain the lock */
1406 	}
1407 }
1408 
1409 static
1410 void
1411 cleanpid( void )
1412 {
1413 	if (pid_file != NULL) {
1414 		if (unlink(pid_file) < 0)
1415 			perror("Warning: couldn't remove pidfile");
1416 	}
1417 }
1418 
1419 static
1420 void
1421 usage_err(const char *ctl, ...)
1422 {
1423 	va_list va;
1424 
1425 	va_start(va, ctl);
1426 	vfprintf(stderr, ctl, va);
1427 	va_end(va);
1428 	fprintf(stderr, "\n");
1429 	exit(EX_USAGE);
1430 }
1431 
1432 static
1433 void
1434 usage_help(_Bool help)
1435 {
1436 	fprintf(stderr, "Usage: %s [-hsUvdt] [-c file] [-e name=value:name=value:...]\n"
1437 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1438 	    "\t[-m size] [-n numcpus[:lbits[:cbits]]]\n"
1439 	    "\t[-p file] [-r file]\n", save_av[0]);
1440 
1441 	if (help)
1442 		fprintf(stderr, "\nArguments:\n"
1443 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1444 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1445 		    "\t-h\tThis list of options.\n"
1446 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1447 		    "\t-I\tCreate a virtual network device.\n"
1448 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1449 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1450 		    "\t-n\tSpecify the number of CPUs and the topology you wish to emulate:\n"
1451 		    "\t\t\tnumcpus - number of cpus\n"
1452 		    "\t\t\tlbits - specify the number of bits within APICID(=CPUID)\n"
1453 		    "\t\t\t        needed for representing the logical ID.\n"
1454 		    "\t\t\t        Controls the number of threads/core:\n"
1455 		    "\t\t\t        (0 bits - 1 thread, 1 bit - 2 threads).\n"
1456 		    "\t\t\tcbits - specify the number of bits within APICID(=CPUID)\n"
1457 		    "\t\t\t        needed for representing the core ID.\n"
1458 		    "\t\t\t        Controls the number of cores/package:\n"
1459 		    "\t\t\t        (0 bits - 1 core, 1 bit - 2 cores).\n"
1460 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1461 		    "\t-r\tSpecify a R/W disk image file, iterates vkd0..n\n"
1462 		    "\t-R\tSpecify a COW disk image file, iterates vkd0..n\n"
1463 		    "\t-s\tBoot into single-user mode.\n"
1464 		    "\t-t\tUse a precise host timer when calculating clock values.\n"
1465 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1466 		    "\t-v\tTurn on verbose booting.\n");
1467 
1468 	exit(EX_USAGE);
1469 }
1470 
1471 void
1472 cpu_smp_stopped(void)
1473 {
1474 }
1475 
1476 void
1477 cpu_reset(void)
1478 {
1479 	kprintf("cpu reset, rebooting vkernel\n");
1480 	closefrom(3);
1481 	cleanpid();
1482 	exit(EX_VKERNEL_REBOOT);
1483 }
1484 
1485 void
1486 cpu_halt(void)
1487 {
1488 	kprintf("cpu halt, exiting vkernel\n");
1489 	cleanpid();
1490 	exit(EX_OK);
1491 }
1492 
1493 void
1494 setrealcpu(void)
1495 {
1496 	switch(lwp_cpu_lock) {
1497 	case LCL_PER_CPU:
1498 		if (bootverbose)
1499 			kprintf("Locking CPU%d to real cpu %d\n",
1500 				mycpuid, next_cpu);
1501 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1502 		next_cpu++;
1503 		if (next_cpu >= real_ncpus)
1504 			next_cpu = 0;
1505 		break;
1506 	case LCL_SINGLE_CPU:
1507 		if (bootverbose)
1508 			kprintf("Locking CPU%d to real cpu %d\n",
1509 				mycpuid, next_cpu);
1510 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1511 		break;
1512 	default:
1513 		/* do not map virtual cpus to real cpus */
1514 		break;
1515 	}
1516 }
1517 
1518 /*
1519  * Allocate and free memory for module loading.  The loaded module
1520  * has to be placed somewhere near the current kernel binary load
1521  * point or the relocations will not work.
1522  *
1523  * I'm not sure why this isn't working.
1524  */
1525 int
1526 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1527 {
1528 #if 1
1529 	size_t xtra;
1530 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1531 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1532 	bzero((void *)*basep, bytes);
1533 #else
1534 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1535 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1536 				   MAP_ANON|MAP_SHARED, -1, 0);
1537 	if ((void *)*basep == MAP_FAILED)
1538 		return ENOMEM;
1539 #endif
1540 	return 0;
1541 }
1542 
1543 void
1544 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1545 {
1546 #if 0
1547 #if 0
1548 	munmap((void *)base, bytes);
1549 #endif
1550 #endif
1551 }
1552 
1553 /*
1554  * VKERNEL64 implementation functions using ptrheads.
1555  */
1556 void
1557 vkernel_yield(void)
1558 {
1559 	pthread_yield();
1560 }
1561