1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 #include <sys/wait.h>
56 #include <sys/vmm.h>
57 
58 #include <machine/cpu.h>
59 #include <machine/globaldata.h>
60 #include <machine/tls.h>
61 #include <machine/md_var.h>
62 #include <machine/vmparam.h>
63 #include <cpu/specialreg.h>
64 
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/bridge/if_bridgevar.h>
69 #include <netinet/in.h>
70 #include <arpa/inet.h>
71 #include <net/if_var.h>
72 
73 #include <stdio.h>
74 #include <stdlib.h>
75 #include <stdarg.h>
76 #include <stdbool.h>
77 #include <unistd.h>
78 #include <fcntl.h>
79 #include <string.h>
80 #include <err.h>
81 #include <errno.h>
82 #include <assert.h>
83 #include <sysexits.h>
84 
85 #define EX_VKERNEL_REBOOT	32
86 
87 vm_paddr_t phys_avail[16];
88 vm_paddr_t Maxmem;
89 vm_paddr_t Maxmem_bytes;
90 long physmem;
91 int MemImageFd = -1;
92 struct vkdisk_info DiskInfo[VKDISK_MAX];
93 int DiskNum;
94 struct vknetif_info NetifInfo[VKNETIF_MAX];
95 int NetifNum;
96 char *pid_file;
97 vm_offset_t KvaStart;
98 vm_offset_t KvaEnd;
99 vm_offset_t KvaSize;
100 vm_offset_t virtual_start;
101 vm_offset_t virtual_end;
102 vm_offset_t virtual2_start;
103 vm_offset_t virtual2_end;
104 vm_offset_t kernel_vm_end;
105 vm_offset_t crashdumpmap;
106 vm_offset_t clean_sva;
107 vm_offset_t clean_eva;
108 struct msgbuf *msgbufp;
109 caddr_t ptvmmap;
110 vpte_t	*KernelPTD;
111 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
112 void *dmap_min_address;
113 void *vkernel_stack;
114 u_int cpu_feature;	/* XXX */
115 int tsc_present;
116 int tsc_invariant;
117 int tsc_mpsync;
118 int64_t tsc_frequency;
119 int optcpus;		/* number of cpus - see mp_start() */
120 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
121 int real_ncpus;		/* number of real CPUs */
122 int next_cpu;		/* next real CPU to lock a virtual CPU to */
123 int vkernel_b_arg;	/* no of logical CPU bits - only SMP */
124 int vkernel_B_arg;	/* no of core bits - only SMP */
125 int vmm_enabled;	/* VMM HW assisted enable */
126 struct privatespace *CPU_prvspace;
127 
128 extern uint64_t KPML4phys;	/* phys addr of kernel level 4 */
129 
130 static struct trapframe proc0_tf;
131 static void *proc0paddr;
132 
133 static void init_sys_memory(char *imageFile);
134 static void init_kern_memory(void);
135 static void init_kern_memory_vmm(void);
136 static void init_globaldata(void);
137 static void init_vkernel(void);
138 static void init_disk(char **diskExp, int *diskFlags, int diskFileNum, enum vkdisk_type type);
139 static void init_netif(char *netifExp[], int netifFileNum);
140 static void writepid(void);
141 static void cleanpid(void);
142 static int unix_connect(const char *path);
143 static void usage_err(const char *ctl, ...);
144 static void usage_help(_Bool);
145 static void init_locks(void);
146 static void handle_term(int);
147 
148 pid_t childpid;
149 
150 static int save_ac;
151 static char **save_av;
152 
153 /*
154  * Kernel startup for virtual kernels - standard main()
155  */
156 int
157 main(int ac, char **av)
158 {
159 	char *memImageFile = NULL;
160 	char *netifFile[VKNETIF_MAX];
161 	char *diskFile[VKDISK_MAX];
162 	char *cdFile[VKDISK_MAX];
163 	char *suffix;
164 	char *endp;
165 	char *tmp;
166 	char *tok;
167 	int diskFlags[VKDISK_MAX];
168 	int netifFileNum = 0;
169 	int diskFileNum = 0;
170 	int cdFileNum = 0;
171 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
172 	int c;
173 	int i;
174 	int j;
175 	int n;
176 	int isq;
177 	int pos;
178 	int eflag;
179 	int dflag = 0;		/* disable vmm */
180 	int real_vkernel_enable;
181 	int supports_sse;
182 	uint32_t mxcsr_mask;
183 	size_t vsize;
184 	size_t msize;
185 	size_t kenv_size;
186 	size_t kenv_size2;
187 	int status;
188 	struct sigaction sa;
189 
190 	/*
191 	 * Currently a bad hack but rtld-elf needs LD_SHAREDLIB_BASE to
192 	 * be set to force it to mmap() shared libraries into low memory,
193 	 * so our module loader can link against the related symbols.
194 	 */
195 	if (getenv("LD_SHAREDLIB_BASE") == NULL) {
196 		setenv("LD_SHAREDLIB_BASE", "0x10000000", 1);
197 		execv(av[0], av);
198 		fprintf(stderr, "Must run %s with full path\n", av[0]);
199 		exit(1);
200 	}
201 
202 	while ((childpid = fork()) != 0) {
203 		/* Ignore signals */
204 		bzero(&sa, sizeof(sa));
205 		sigemptyset(&sa.sa_mask);
206 		sa.sa_handler = SIG_IGN;
207 		sigaction(SIGINT, &sa, NULL);
208 		sigaction(SIGQUIT, &sa, NULL);
209 		sigaction(SIGHUP, &sa, NULL);
210 
211 		/*
212 		 * Forward SIGTERM to the child so that
213 		 * the shutdown process initiates correctly.
214 		 */
215 		sa.sa_handler = handle_term;
216 		sigaction(SIGTERM, &sa, NULL);
217 
218 		/*
219 		 * Wait for child to terminate, exit if
220 		 * someone stole our child.
221 		 */
222 		while (waitpid(childpid, &status, 0) != childpid) {
223 			if (errno == ECHILD)
224 				exit(1);
225 		}
226 		if (WEXITSTATUS(status) != EX_VKERNEL_REBOOT)
227 			return 0;
228 	}
229 
230 	/*
231 	 * Starting for real
232 	 */
233 	save_ac = ac;
234 	save_av = av;
235 	eflag = 0;
236 	pos = 0;
237 	kenv_size = 0;
238 	/*
239 	 * Process options
240 	 */
241 	kernel_mem_readonly = 1;
242 	optcpus = 2;
243 	vkernel_b_arg = 0;
244 	vkernel_B_arg = 0;
245 	lwp_cpu_lock = LCL_NONE;
246 
247 	real_vkernel_enable = 0;
248 	vsize = sizeof(real_vkernel_enable);
249 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
250 
251 	if (real_vkernel_enable == 0) {
252 		errx(1, "vm.vkernel_enable is 0, must be set "
253 			"to 1 to execute a vkernel!");
254 	}
255 
256 	real_ncpus = 1;
257 	vsize = sizeof(real_ncpus);
258 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
259 
260 	if (ac < 2)
261 		usage_help(false);
262 
263 	while ((c = getopt(ac, av, "c:hsvl:m:n:r:R:e:i:p:I:Ud")) != -1) {
264 		switch(c) {
265 		case 'd':
266 			dflag = 1;
267 			break;
268 		case 'e':
269 			/*
270 			 * name=value:name=value:name=value...
271 			 * name="value"...
272 			 *
273 			 * Allow values to be quoted but note that shells
274 			 * may remove the quotes, so using this feature
275 			 * to embed colons may require a backslash.
276 			 */
277 			n = strlen(optarg);
278 			isq = 0;
279 
280 			if (eflag == 0) {
281 				kenv_size = n + 2;
282 				kern_envp = malloc(kenv_size);
283 				if (kern_envp == NULL)
284 					errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
285 			} else {
286 				kenv_size2 = kenv_size + n + 1;
287 				pos = kenv_size - 1;
288 				if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
289 					errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
290 				kern_envp = tmp;
291 				kenv_size = kenv_size2;
292 			}
293 
294 			for (i = 0, j = pos; i < n; ++i) {
295 				if (optarg[i] == '"')
296 					isq ^= 1;
297 				else if (optarg[i] == '\'')
298 					isq ^= 2;
299 				else if (isq == 0 && optarg[i] == ':')
300 					kern_envp[j++] = 0;
301 				else
302 					kern_envp[j++] = optarg[i];
303 			}
304 			kern_envp[j++] = 0;
305 			kern_envp[j++] = 0;
306 			eflag++;
307 			break;
308 		case 's':
309 			boothowto |= RB_SINGLE;
310 			break;
311 		case 'v':
312 			bootverbose = 1;
313 			break;
314 		case 'i':
315 			memImageFile = optarg;
316 			break;
317 		case 'I':
318 			if (netifFileNum < VKNETIF_MAX)
319 				netifFile[netifFileNum++] = strdup(optarg);
320 			break;
321 		case 'r':
322 		case 'R':
323 			if (bootOnDisk < 0)
324 				bootOnDisk = 1;
325 			if (diskFileNum + cdFileNum < VKDISK_MAX) {
326 				diskFile[diskFileNum] = strdup(optarg);
327 				diskFlags[diskFileNum] = (c == 'R');
328 				++diskFileNum;
329 			}
330 			break;
331 		case 'c':
332 			if (bootOnDisk < 0)
333 				bootOnDisk = 0;
334 			if (diskFileNum + cdFileNum < VKDISK_MAX)
335 				cdFile[cdFileNum++] = strdup(optarg);
336 			break;
337 		case 'm':
338 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
339 			if (suffix) {
340 				switch(*suffix) {
341 				case 'g':
342 				case 'G':
343 					Maxmem_bytes <<= 30;
344 					break;
345 				case 'm':
346 				case 'M':
347 					Maxmem_bytes <<= 20;
348 					break;
349 				case 'k':
350 				case 'K':
351 					Maxmem_bytes <<= 10;
352 					break;
353 				default:
354 					Maxmem_bytes = 0;
355 					usage_err("Bad maxmem option");
356 					/* NOT REACHED */
357 					break;
358 				}
359 			}
360 			break;
361 		case 'l':
362 			next_cpu = -1;
363 			if (strncmp("map", optarg, 3) == 0) {
364 				lwp_cpu_lock = LCL_PER_CPU;
365 				if (optarg[3] == ',') {
366 					next_cpu = strtol(optarg+4, &endp, 0);
367 					if (*endp != '\0')
368 						usage_err("Bad target CPU number at '%s'", endp);
369 				} else {
370 					next_cpu = 0;
371 				}
372 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
373 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
374 			} else if (strncmp("any", optarg, 3) == 0) {
375 				lwp_cpu_lock = LCL_NONE;
376 			} else {
377 				lwp_cpu_lock = LCL_SINGLE_CPU;
378 				next_cpu = strtol(optarg, &endp, 0);
379 				if (*endp != '\0')
380 					usage_err("Bad target CPU number at '%s'", endp);
381 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
382 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
383 			}
384 			break;
385 		case 'n':
386 			/*
387 			 * This value is set up by mp_start(), don't just
388 			 * set ncpus here.
389 			 */
390 			tok = strtok(optarg, ":");
391 			optcpus = strtol(tok, NULL, 0);
392 			if (optcpus < 1 || optcpus > MAXCPU)
393 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
394 
395 			/* :lbits argument */
396 			tok = strtok(NULL, ":");
397 			if (tok != NULL) {
398 				vkernel_b_arg = strtol(tok, NULL, 0);
399 
400 				/* :cbits argument */
401 				tok = strtok(NULL, ":");
402 				if (tok != NULL) {
403 					vkernel_B_arg = strtol(tok, NULL, 0);
404 				}
405 
406 			}
407 			break;
408 		case 'p':
409 			pid_file = optarg;
410 			break;
411 		case 'U':
412 			kernel_mem_readonly = 0;
413 			break;
414 		case 'h':
415 			usage_help(true);
416 			break;
417 		default:
418 			usage_help(false);
419 		}
420 	}
421 
422 	/*
423 	 * Check VMM presence
424 	 */
425 	vsize = sizeof(vmm_enabled);
426 	sysctlbyname("hw.vmm.enable", &vmm_enabled, &vsize, NULL, 0);
427 	vmm_enabled = (vmm_enabled && !dflag);
428 
429 	writepid();
430 	cpu_disable_intr();
431 	if (vmm_enabled) {
432 		/* use a MAP_ANON directly */
433 		init_kern_memory_vmm();
434 	} else {
435 		init_sys_memory(memImageFile);
436 		init_kern_memory();
437 	}
438 	init_globaldata();
439 	init_vkernel();
440 	setrealcpu();
441 	init_kqueue();
442 
443 	vmm_guest = VMM_GUEST_VKERNEL;
444 
445 	/*
446 	 * Check TSC
447 	 */
448 	vsize = sizeof(tsc_present);
449 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
450 	vsize = sizeof(tsc_invariant);
451 	sysctlbyname("hw.tsc_invariant", &tsc_invariant, &vsize, NULL, 0);
452 	vsize = sizeof(tsc_mpsync);
453 	sysctlbyname("hw.tsc_mpsync", &tsc_mpsync, &vsize, NULL, 0);
454 	vsize = sizeof(tsc_frequency);
455 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
456 	if (tsc_present)
457 		cpu_feature |= CPUID_TSC;
458 
459 	/*
460 	 * Check SSE
461 	 */
462 	vsize = sizeof(supports_sse);
463 	supports_sse = 0;
464 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
465 	sysctlbyname("hw.mxcsr_mask", &mxcsr_mask, &msize, NULL, 0);
466 	init_fpu(supports_sse);
467 	if (supports_sse)
468 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
469 
470 	/*
471 	 * We boot from the first installed disk.
472 	 */
473 	if (bootOnDisk == 1) {
474 		init_disk(diskFile, diskFlags, diskFileNum, VKD_DISK);
475 		init_disk(cdFile, NULL, cdFileNum, VKD_CD);
476 	} else {
477 		init_disk(cdFile, NULL, cdFileNum, VKD_CD);
478 		init_disk(diskFile, diskFlags, diskFileNum, VKD_DISK);
479 	}
480 
481 	init_netif(netifFile, netifFileNum);
482 	init_exceptions();
483 	mi_startup();
484 	/* NOT REACHED */
485 	exit(EX_SOFTWARE);
486 }
487 
488 /* SIGTERM handler */
489 static
490 void
491 handle_term(int sig)
492 {
493 	kill(childpid, sig);
494 }
495 
496 /*
497  * Initialize system memory.  This is the virtual kernel's 'RAM'.
498  */
499 static
500 void
501 init_sys_memory(char *imageFile)
502 {
503 	struct stat st;
504 	int i;
505 	int fd;
506 
507 	/*
508 	 * Figure out the system memory image size.  If an image file was
509 	 * specified and -m was not specified, use the image file's size.
510 	 */
511 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
512 		Maxmem_bytes = (vm_paddr_t)st.st_size;
513 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
514 	    Maxmem_bytes == 0) {
515 		errx(1, "Cannot create new memory file %s unless "
516 		       "system memory size is specified with -m",
517 		       imageFile);
518 		/* NOT REACHED */
519 	}
520 
521 	/*
522 	 * Maxmem must be known at this time
523 	 */
524 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
525 		errx(1, "Bad maxmem specification: 64MB minimum, "
526 		       "multiples of %dMB only",
527 		       SEG_SIZE / 1024 / 1024);
528 		/* NOT REACHED */
529 	}
530 
531 	/*
532 	 * Generate an image file name if necessary, then open/create the
533 	 * file exclusively locked.  Do not allow multiple virtual kernels
534 	 * to use the same image file.
535 	 *
536 	 * Don't iterate through a million files if we do not have write
537 	 * access to the directory, stop if our open() failed on a
538 	 * non-existant file.  Otherwise opens can fail for any number
539 	 */
540 	if (imageFile == NULL) {
541 		for (i = 0; i < 1000000; ++i) {
542 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
543 			fd = open(imageFile,
544 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
545 			if (fd < 0 && stat(imageFile, &st) == 0) {
546 				free(imageFile);
547 				continue;
548 			}
549 			break;
550 		}
551 	} else {
552 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
553 	}
554 	fprintf(stderr, "Using memory file: %s\n", imageFile);
555 	if (fd < 0 || fstat(fd, &st) < 0) {
556 		err(1, "Unable to open/create %s", imageFile);
557 		/* NOT REACHED */
558 	}
559 
560 	/*
561 	 * Truncate or extend the file as necessary.  Clean out the contents
562 	 * of the file, we want it to be full of holes so we don't waste
563 	 * time reading in data from an old file that we no longer care
564 	 * about.
565 	 */
566 	ftruncate(fd, 0);
567 	ftruncate(fd, Maxmem_bytes);
568 
569 	MemImageFd = fd;
570 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
571 	physmem = Maxmem;
572 }
573 
574 /*
575  * Initialize kernel memory.  This reserves kernel virtual memory by using
576  * MAP_VPAGETABLE
577  */
578 
579 static
580 void
581 init_kern_memory(void)
582 {
583 	void *base;
584 	int i;
585 	void *firstfree;
586 
587 	/*
588 	 * Memory map our kernel virtual memory space.  Note that the
589 	 * kernel image itself is not made part of this memory for the
590 	 * moment.
591 	 *
592 	 * The memory map must be segment-aligned so we can properly
593 	 * offset KernelPTD.
594 	 *
595 	 * If the system kernel has a different MAXDSIZ, it might not
596 	 * be possible to map kernel memory in its prefered location.
597 	 * Try a number of different locations.
598 	 */
599 
600 	base = mmap((void*)KERNEL_KVA_START, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
601 		    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE|MAP_FIXED|MAP_TRYFIXED,
602 		    MemImageFd, (off_t)KERNEL_KVA_START);
603 
604 	if (base == MAP_FAILED) {
605 		err(1, "Unable to mmap() kernel virtual memory!");
606 		/* NOT REACHED */
607 	}
608 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
609 	KvaStart = (vm_offset_t)base;
610 	KvaSize = KERNEL_KVA_SIZE;
611 	KvaEnd = KvaStart + KvaSize;
612 
613 	/* cannot use kprintf yet */
614 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
615 
616 	/* MAP_FILE? */
617 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
618 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
619 				MemImageFd, 0);
620 	if (dmap_min_address == MAP_FAILED) {
621 		err(1, "Unable to mmap() kernel DMAP region!");
622 		/* NOT REACHED */
623 	}
624 
625 	/*
626 	 * Bootstrap the kernel_pmap
627 	 */
628 	firstfree = NULL;
629 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
630 
631 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
632 		 0 | VPTE_RW | VPTE_V);
633 
634 	/*
635 	 * phys_avail[] represents unallocated physical memory.  MI code
636 	 * will use phys_avail[] to create the vm_page array.
637 	 */
638 	phys_avail[0] = (vm_paddr_t)firstfree;
639 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
640 	phys_avail[1] = Maxmem_bytes;
641 
642 #if JGV
643 	/*
644 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
645 	 * memory.  MI code will create kernel_map using these parameters.
646 	 */
647 	virtual_start = KvaStart + (long)firstfree;
648 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
649 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
650 #endif
651 
652 	/*
653 	 * pmap_growkernel() will set the correct value.
654 	 */
655 	kernel_vm_end = 0;
656 
657 	/*
658 	 * Allocate space for process 0's UAREA.
659 	 */
660 	proc0paddr = (void *)virtual_start;
661 	for (i = 0; i < UPAGES; ++i) {
662 		pmap_kenter_quick(virtual_start, phys_avail[0]);
663 		virtual_start += PAGE_SIZE;
664 		phys_avail[0] += PAGE_SIZE;
665 	}
666 
667 	/*
668 	 * crashdumpmap
669 	 */
670 	crashdumpmap = virtual_start;
671 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
672 
673 	/*
674 	 * msgbufp maps the system message buffer
675 	 */
676 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
677 	msgbufp = (void *)virtual_start;
678 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
679 		pmap_kenter_quick(virtual_start, phys_avail[0]);
680 		virtual_start += PAGE_SIZE;
681 		phys_avail[0] += PAGE_SIZE;
682 	}
683 	msgbufinit(msgbufp, MSGBUF_SIZE);
684 
685 	/*
686 	 * used by kern_memio for /dev/mem access
687 	 */
688 	ptvmmap = (caddr_t)virtual_start;
689 	virtual_start += PAGE_SIZE;
690 }
691 
692 static
693 void
694 init_kern_memory_vmm(void)
695 {
696 	int i;
697 	void *firstfree;
698 	struct vmm_guest_options options;
699 	void *dmap_address;
700 
701 	KvaStart = (vm_offset_t)KERNEL_KVA_START;
702 	KvaSize = KERNEL_KVA_SIZE;
703 	KvaEnd = KvaStart + KvaSize;
704 
705 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
706 	physmem = Maxmem;
707 
708 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
709 		errx(1, "Bad maxmem specification: 64MB minimum, "
710 		       "multiples of %dMB only",
711 		       SEG_SIZE / 1024 / 1024);
712 		/* NOT REACHED */
713 	}
714 
715 	/* Call the vmspace_create to allocate the internal
716 	 * vkernel structures. Won't do anything else (no new
717 	 * vmspace)
718 	 */
719 	if (vmspace_create(NULL, 0, NULL) < 0)
720 		panic("vmspace_create() failed");
721 
722 
723 	/*
724 	 * MAP_ANON the region of the VKERNEL phyisical memory
725 	 * (known as GPA - Guest Physical Address
726 	 */
727 	dmap_address = mmap(NULL, Maxmem_bytes, PROT_READ|PROT_WRITE|PROT_EXEC,
728 	    MAP_ANON|MAP_SHARED, -1, 0);
729 	if (dmap_address == MAP_FAILED) {
730 		err(1, "Unable to mmap() RAM region!");
731 		/* NOT REACHED */
732 	}
733 	/* bzero(dmap_address, Maxmem_bytes); */
734 
735 	/* Alloc a new stack in the lowmem */
736 	vkernel_stack = mmap(NULL, KERNEL_STACK_SIZE,
737 	    PROT_READ|PROT_WRITE|PROT_EXEC,
738 	    MAP_ANON, -1, 0);
739 	if (vkernel_stack == MAP_FAILED) {
740 		err(1, "Unable to allocate stack\n");
741 	}
742 
743 	/*
744 	 * Bootstrap the kernel_pmap
745 	 */
746 	firstfree = dmap_address;
747 	dmap_min_address = NULL; /* VIRT == PHYS in the first 512G */
748 	pmap_bootstrap((vm_paddr_t *)&firstfree, (uint64_t)KvaStart);
749 
750 	/*
751 	 * Enter VMM mode
752 	 */
753 	options.guest_cr3 = (register_t) KPML4phys;
754 	options.new_stack = (uint64_t) vkernel_stack + KERNEL_STACK_SIZE;
755 	options.master = 1;
756 	if (vmm_guest_ctl(VMM_GUEST_RUN, &options)) {
757 		err(1, "Unable to enter VMM mode.");
758 	}
759 
760 	/*
761 	 * phys_avail[] represents unallocated physical memory.  MI code
762 	 * will use phys_avail[] to create the vm_page array.
763 	 */
764 	phys_avail[0] = (vm_paddr_t)firstfree;
765 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
766 	phys_avail[1] = (vm_paddr_t)dmap_address + Maxmem_bytes;
767 
768 	/*
769 	 * pmap_growkernel() will set the correct value.
770 	 */
771 	kernel_vm_end = 0;
772 
773 	/*
774 	 * Allocate space for process 0's UAREA.
775 	 */
776 	proc0paddr = (void *)virtual_start;
777 	for (i = 0; i < UPAGES; ++i) {
778 		pmap_kenter_quick(virtual_start, phys_avail[0]);
779 		virtual_start += PAGE_SIZE;
780 		phys_avail[0] += PAGE_SIZE;
781 	}
782 
783 	/*
784 	 * crashdumpmap
785 	 */
786 	crashdumpmap = virtual_start;
787 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
788 
789 	/*
790 	 * msgbufp maps the system message buffer
791 	 */
792 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
793 	msgbufp = (void *)virtual_start;
794 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
795 
796 		pmap_kenter_quick(virtual_start, phys_avail[0]);
797 		virtual_start += PAGE_SIZE;
798 		phys_avail[0] += PAGE_SIZE;
799 	}
800 
801 	msgbufinit(msgbufp, MSGBUF_SIZE);
802 
803 	/*
804 	 * used by kern_memio for /dev/mem access
805 	 */
806 	ptvmmap = (caddr_t)virtual_start;
807 	virtual_start += PAGE_SIZE;
808 
809 	printf("vmm: Hardware pagetable enabled for guest\n");
810 }
811 
812 
813 /*
814  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
815  * virtual_start and phys_avail[0]
816  */
817 static
818 void
819 init_globaldata(void)
820 {
821 	int i;
822 	vm_paddr_t pa;
823 	vm_offset_t va;
824 
825 	/*
826 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
827 	 * amount of KVA since the privatespace structure includes two
828 	 * whole page table mappings.
829 	 */
830 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
831 	CPU_prvspace = (void *)virtual_start;
832 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
833 
834 	/*
835 	 * Allocate enough physical memory to cover the mdglobaldata
836 	 * portion of the space and the idle stack and map the pages
837 	 * into KVA.  For cpu #0 only.
838 	 */
839 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
840 		pa = phys_avail[0];
841 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
842 		pmap_kenter_quick(va, pa);
843 		phys_avail[0] += PAGE_SIZE;
844 	}
845 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
846 		pa = phys_avail[0];
847 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
848 		pmap_kenter_quick(va, pa);
849 		phys_avail[0] += PAGE_SIZE;
850 	}
851 
852 	/*
853 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
854 	 * point.  Note that %fs is used by pthreads.
855 	 */
856 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
857 }
858 
859 
860 /*
861  * Initialize pool tokens and other necessary locks
862  */
863 static void
864 init_locks(void)
865 {
866 
867         /*
868          * Get the initial mplock with a count of 1 for the BSP.
869          * This uses a LOGICAL cpu ID, ie BSP == 0.
870          */
871         cpu_get_initial_mplock();
872 
873         /* our token pool needs to work early */
874         lwkt_token_pool_init();
875 
876 }
877 
878 
879 /*
880  * Initialize very low level systems including thread0, proc0, etc.
881  */
882 static
883 void
884 init_vkernel(void)
885 {
886 	struct mdglobaldata *gd;
887 
888 	gd = &CPU_prvspace[0].mdglobaldata;
889 	bzero(gd, sizeof(*gd));
890 
891 	gd->mi.gd_curthread = &thread0;
892 	thread0.td_gd = &gd->mi;
893 	ncpus = 1;
894 	ncpus2 = 1;	/* rounded down power of 2 */
895 	ncpus_fit = 1;	/* rounded up power of 2 */
896 	/* ncpus2_mask and ncpus_fit_mask are 0 */
897 	init_param1();
898 	gd->mi.gd_prvspace = &CPU_prvspace[0];
899 	mi_gdinit(&gd->mi, 0);
900 	cpu_gdinit(gd, 0);
901 	mi_proc0init(&gd->mi, proc0paddr);
902 	lwp0.lwp_md.md_regs = &proc0_tf;
903 
904 	init_locks();
905 	cninit();
906 	rand_initialize();
907 #if 0	/* #ifdef DDB */
908 	kdb_init();
909 	if (boothowto & RB_KDB)
910 		Debugger("Boot flags requested debugger");
911 #endif
912 	identcpu();
913 #if 0
914 	initializecpu();	/* Initialize CPU registers */
915 #endif
916 	init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
917 
918 #if 0
919 	/*
920 	 * Map the message buffer
921 	 */
922 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
923 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
924 	msgbufinit(msgbufp, MSGBUF_SIZE);
925 #endif
926 #if 0
927 	thread0.td_pcb_cr3 ... MMU
928 	lwp0.lwp_md.md_regs = &proc0_tf;
929 #endif
930 }
931 
932 /*
933  * Filesystem image paths for the virtual kernel are optional.
934  * If specified they each should point to a disk image,
935  * the first of which will become the root disk.
936  *
937  * The virtual kernel caches data from our 'disk' just like a normal kernel,
938  * so we do not really want the real kernel to cache the data too.  Use
939  * O_DIRECT to remove the duplication.
940  */
941 static
942 void
943 init_disk(char **diskExp, int *diskFlags, int diskFileNum, enum vkdisk_type type)
944 {
945 	char *serno;
946 	int i;
947 
948         if (diskFileNum == 0)
949                 return;
950 
951 	for (i=0; i < diskFileNum; i++){
952 		char *fname;
953 		fname = diskExp[i];
954 
955 		if (fname == NULL) {
956                         warnx("Invalid argument to '-r'");
957                         continue;
958                 }
959 		/*
960 		 * Check for a serial number for the virtual disk
961 		 * passed from the command line.
962 		 */
963 		serno = fname;
964 		strsep(&serno, ":");
965 
966 		if (DiskNum < VKDISK_MAX) {
967 			struct stat st;
968 			struct vkdisk_info *info = NULL;
969 			int fd;
970 			size_t l = 0;
971 
972 			if (type == VKD_DISK)
973 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
974 			else
975 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
976 			if (fd < 0 || fstat(fd, &st) < 0) {
977 				err(1, "Unable to open/create %s", fname);
978 				/* NOT REACHED */
979 			}
980 			if (S_ISREG(st.st_mode) && (diskFlags[i] & 1) == 0) {
981 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
982 					errx(1, "Disk image %s is already "
983 						"in use\n", fname);
984 					/* NOT REACHED */
985 				}
986 			}
987 
988 			info = &DiskInfo[DiskNum];
989 			l = strlen(fname);
990 
991 			info->unit = i;
992 			info->fd = fd;
993 			info->type = type;
994 			info->flags = diskFlags[i];
995 			memcpy(info->fname, fname, l);
996 			info->serno = NULL;
997 			if (serno) {
998 				if ((info->serno = malloc(SERNOLEN)) != NULL)
999 					strlcpy(info->serno, serno, SERNOLEN);
1000 				else
1001 					warnx("Couldn't allocate memory for the operation");
1002 			}
1003 
1004 			if (DiskNum == 0) {
1005 				if (type == VKD_CD) {
1006 					rootdevnames[0] = "cd9660:vcd0";
1007 				} else if (type == VKD_DISK) {
1008 					rootdevnames[0] = "ufs:vkd0s0a";
1009 					rootdevnames[1] = "ufs:vkd0s1a";
1010 				}
1011 			}
1012 
1013 			DiskNum++;
1014 		} else {
1015                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
1016                         continue;
1017 		}
1018 	}
1019 }
1020 
1021 static
1022 int
1023 netif_set_tapflags(int tap_unit, int f, int s)
1024 {
1025 	struct ifreq ifr;
1026 	int flags;
1027 
1028 	bzero(&ifr, sizeof(ifr));
1029 
1030 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
1031 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1032 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
1033 		return -1;
1034 	}
1035 
1036 	/*
1037 	 * Adjust if_flags
1038 	 *
1039 	 * If the flags are already set/cleared, then we return
1040 	 * immediately to avoid extra syscalls
1041 	 */
1042 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
1043 	if (f < 0) {
1044 		/* Turn off flags */
1045 		f = -f;
1046 		if ((flags & f) == 0)
1047 			return 0;
1048 		flags &= ~f;
1049 	} else {
1050 		/* Turn on flags */
1051 		if (flags & f)
1052 			return 0;
1053 		flags |= f;
1054 	}
1055 
1056 	/*
1057 	 * Fix up ifreq.ifr_name, since it may be trashed
1058 	 * in previous ioctl(SIOCGIFFLAGS)
1059 	 */
1060 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
1061 
1062 	ifr.ifr_flags = flags & 0xffff;
1063 	ifr.ifr_flagshigh = flags >> 16;
1064 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1065 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
1066 		return -1;
1067 	}
1068 	return 0;
1069 }
1070 
1071 static
1072 int
1073 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
1074 {
1075 	struct ifaliasreq ifra;
1076 	struct sockaddr_in *in;
1077 
1078 	bzero(&ifra, sizeof(ifra));
1079 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
1080 
1081 	/* Setup address */
1082 	in = (struct sockaddr_in *)&ifra.ifra_addr;
1083 	in->sin_family = AF_INET;
1084 	in->sin_len = sizeof(*in);
1085 	in->sin_addr.s_addr = addr;
1086 
1087 	if (mask != 0) {
1088 		/* Setup netmask */
1089 		in = (struct sockaddr_in *)&ifra.ifra_mask;
1090 		in->sin_len = sizeof(*in);
1091 		in->sin_addr.s_addr = mask;
1092 	}
1093 
1094 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
1095 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
1096 		return -1;
1097 	}
1098 	return 0;
1099 }
1100 
1101 static
1102 int
1103 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
1104 {
1105 	struct ifbreq ifbr;
1106 	struct ifdrv ifd;
1107 
1108 	bzero(&ifbr, sizeof(ifbr));
1109 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
1110 		 "tap%d", tap_unit);
1111 
1112 	bzero(&ifd, sizeof(ifd));
1113 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
1114 	ifd.ifd_cmd = BRDGADD;
1115 	ifd.ifd_len = sizeof(ifbr);
1116 	ifd.ifd_data = &ifbr;
1117 
1118 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
1119 		/*
1120 		 * 'errno == EEXIST' means that the tap(4) is already
1121 		 * a member of the bridge(4)
1122 		 */
1123 		if (errno != EEXIST) {
1124 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
1125 			return -1;
1126 		}
1127 	}
1128 	return 0;
1129 }
1130 
1131 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
1132 
1133 /*
1134  * Locate the first unused tap(4) device file if auto mode is requested,
1135  * or open the user supplied device file, and bring up the corresponding
1136  * tap(4) interface.
1137  *
1138  * NOTE: Only tap(4) device file is supported currently
1139  */
1140 static
1141 int
1142 netif_open_tap(const char *netif, int *tap_unit, int s)
1143 {
1144 	char tap_dev[MAXPATHLEN];
1145 	int tap_fd, failed;
1146 	struct stat st;
1147 	char *dname;
1148 
1149 	*tap_unit = -1;
1150 
1151 	if (strcmp(netif, "auto") == 0) {
1152 		/*
1153 		 * Find first unused tap(4) device file
1154 		 */
1155 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
1156 		if (tap_fd < 0) {
1157 			warnc(errno, "Unable to find a free tap(4)");
1158 			return -1;
1159 		}
1160 	} else {
1161 		/*
1162 		 * User supplied tap(4) device file or unix socket.
1163 		 */
1164 		if (netif[0] == '/')	/* Absolute path */
1165 			strlcpy(tap_dev, netif, sizeof(tap_dev));
1166 		else
1167 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
1168 
1169 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
1170 
1171 		/*
1172 		 * If we cannot open normally try to connect to it.
1173 		 */
1174 		if (tap_fd < 0)
1175 			tap_fd = unix_connect(tap_dev);
1176 
1177 		if (tap_fd < 0) {
1178 			warn("Unable to open %s", tap_dev);
1179 			return -1;
1180 		}
1181 	}
1182 
1183 	/*
1184 	 * Check whether the device file is a tap(4)
1185 	 */
1186 	if (fstat(tap_fd, &st) < 0) {
1187 		failed = 1;
1188 	} else if (S_ISCHR(st.st_mode)) {
1189 		dname = fdevname(tap_fd);
1190 		if (dname)
1191 			dname = strstr(dname, "tap");
1192 		if (dname) {
1193 			/*
1194 			 * Bring up the corresponding tap(4) interface
1195 			 */
1196 			*tap_unit = strtol(dname + 3, NULL, 10);
1197 			printf("TAP UNIT %d\n", *tap_unit);
1198 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
1199 				failed = 0;
1200 			else
1201 				failed = 1;
1202 		} else {
1203 			failed = 1;
1204 		}
1205 	} else if (S_ISSOCK(st.st_mode)) {
1206 		/*
1207 		 * Special socket connection (typically to vknet).  We
1208 		 * do not have to do anything.
1209 		 */
1210 		failed = 0;
1211 	} else {
1212 		failed = 1;
1213 	}
1214 
1215 	if (failed) {
1216 		warnx("%s is not a tap(4) device or socket", tap_dev);
1217 		close(tap_fd);
1218 		tap_fd = -1;
1219 		*tap_unit = -1;
1220 	}
1221 	return tap_fd;
1222 }
1223 
1224 static int
1225 unix_connect(const char *path)
1226 {
1227 	struct sockaddr_un sunx;
1228 	int len;
1229 	int net_fd;
1230 	int sndbuf = 262144;
1231 	struct stat st;
1232 
1233 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
1234 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
1235 	++len;	/* include nul */
1236 	sunx.sun_family = AF_UNIX;
1237 	sunx.sun_len = len;
1238 
1239 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
1240 	if (net_fd < 0)
1241 		return(-1);
1242 	if (connect(net_fd, (void *)&sunx, len) < 0) {
1243 		close(net_fd);
1244 		return(-1);
1245 	}
1246 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1247 	if (fstat(net_fd, &st) == 0)
1248 		printf("Network socket buffer: %d bytes\n", st.st_blksize);
1249 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
1250 	return(net_fd);
1251 }
1252 
1253 #undef TAPDEV_MAJOR
1254 #undef TAPDEV_MINOR
1255 #undef TAPDEV_OFLAGS
1256 
1257 /*
1258  * Following syntax is supported,
1259  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
1260  *
1261  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
1262  *                        tap(4)'s netmask len is z
1263  *
1264  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
1265  *                        pseudo netif's address is y.y.y.y
1266  *
1267  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1268  *                        pseudo netif's address is y.y.y.y
1269  *                        tap(4) and pseudo netif's netmask len are z
1270  *
1271  * 5) bridgeX             tap(4) will be added to bridgeX
1272  *
1273  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1274  *                        pseudo netif's address is y.y.y.y
1275  *
1276  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1277  *                        pseudo netif's address is y.y.y.y
1278  *                        pseudo netif's netmask len is z
1279  */
1280 static
1281 int
1282 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1283 {
1284 	in_addr_t tap_addr, netmask, netif_addr;
1285 	int next_netif_addr;
1286 	char *tok, *masklen_str, *ifbridge;
1287 
1288 	*addr = 0;
1289 	*mask = 0;
1290 
1291 	tok = strtok(NULL, ":/");
1292 	if (tok == NULL) {
1293 		/*
1294 		 * Nothing special, simply use tap(4) as backend
1295 		 */
1296 		return 0;
1297 	}
1298 
1299 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1300 		/*
1301 		 * tap(4)'s address is supplied
1302 		 */
1303 		ifbridge = NULL;
1304 
1305 		/*
1306 		 * If there is next token, then it may be pseudo
1307 		 * netif's address or netmask len for tap(4)
1308 		 */
1309 		next_netif_addr = 0;
1310 	} else {
1311 		/*
1312 		 * Not tap(4)'s address, assume it as a bridge(4)
1313 		 * iface name
1314 		 */
1315 		tap_addr = 0;
1316 		ifbridge = tok;
1317 
1318 		/*
1319 		 * If there is next token, then it must be pseudo
1320 		 * netif's address
1321 		 */
1322 		next_netif_addr = 1;
1323 	}
1324 
1325 	netmask = netif_addr = 0;
1326 
1327 	tok = strtok(NULL, ":/");
1328 	if (tok == NULL)
1329 		goto back;
1330 
1331 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1332 		if (next_netif_addr) {
1333 			warnx("Invalid pseudo netif address: %s", tok);
1334 			return -1;
1335 		}
1336 		netif_addr = 0;
1337 
1338 		/*
1339 		 * Current token is not address, then it must be netmask len
1340 		 */
1341 		masklen_str = tok;
1342 	} else {
1343 		/*
1344 		 * Current token is pseudo netif address, if there is next token
1345 		 * it must be netmask len
1346 		 */
1347 		masklen_str = strtok(NULL, "/");
1348 	}
1349 
1350 	/* Calculate netmask */
1351 	if (masklen_str != NULL) {
1352 		u_long masklen;
1353 
1354 		masklen = strtoul(masklen_str, NULL, 10);
1355 		if (masklen < 32 && masklen > 0) {
1356 			netmask = htonl(~((1LL << (32 - masklen)) - 1)
1357 					& 0xffffffff);
1358 		} else {
1359 			warnx("Invalid netmask len: %lu", masklen);
1360 			return -1;
1361 		}
1362 	}
1363 
1364 	/* Make sure there is no more token left */
1365 	if (strtok(NULL, ":/") != NULL) {
1366 		warnx("Invalid argument to '-I'");
1367 		return -1;
1368 	}
1369 
1370 back:
1371 	if (tap_unit < 0) {
1372 		/* Do nothing */
1373 	} else if (ifbridge == NULL) {
1374 		/* Set tap(4) address/netmask */
1375 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1376 			return -1;
1377 	} else {
1378 		/* Tie tap(4) to bridge(4) */
1379 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1380 			return -1;
1381 	}
1382 
1383 	*addr = netif_addr;
1384 	*mask = netmask;
1385 	return 0;
1386 }
1387 
1388 /*
1389  * NetifInfo[] will be filled for pseudo netif initialization.
1390  * NetifNum will be bumped to reflect the number of valid entries
1391  * in NetifInfo[].
1392  */
1393 static
1394 void
1395 init_netif(char *netifExp[], int netifExpNum)
1396 {
1397 	int i, s;
1398 	char *tmp;
1399 
1400 	if (netifExpNum == 0)
1401 		return;
1402 
1403 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1404 	if (s < 0)
1405 		return;
1406 
1407 	for (i = 0; i < netifExpNum; ++i) {
1408 		struct vknetif_info *info;
1409 		in_addr_t netif_addr, netif_mask;
1410 		int tap_fd, tap_unit;
1411 		char *netif;
1412 
1413 		/* Extract MAC address if there is one */
1414 		tmp = netifExp[i];
1415 		strsep(&tmp, "=");
1416 
1417 		netif = strtok(netifExp[i], ":");
1418 		if (netif == NULL) {
1419 			warnx("Invalid argument to '-I'");
1420 			continue;
1421 		}
1422 
1423 		/*
1424 		 * Open tap(4) device file and bring up the
1425 		 * corresponding interface
1426 		 */
1427 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1428 		if (tap_fd < 0)
1429 			continue;
1430 
1431 		/*
1432 		 * Initialize tap(4) and get address/netmask
1433 		 * for pseudo netif
1434 		 *
1435 		 * NB: Rest part of netifExp[i] is passed
1436 		 *     to netif_init_tap() implicitly.
1437 		 */
1438 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1439 			/*
1440 			 * NB: Closing tap(4) device file will bring
1441 			 *     down the corresponding interface
1442 			 */
1443 			close(tap_fd);
1444 			continue;
1445 		}
1446 
1447 		info = &NetifInfo[NetifNum];
1448 		bzero(info, sizeof(*info));
1449 		info->tap_fd = tap_fd;
1450 		info->tap_unit = tap_unit;
1451 		info->netif_addr = netif_addr;
1452 		info->netif_mask = netif_mask;
1453 		/*
1454 		 * If tmp isn't NULL it means a MAC could have been
1455 		 * specified so attempt to convert it.
1456 		 * Setting enaddr to NULL will tell vke_attach() we
1457 		 * need a pseudo-random MAC address.
1458 		 */
1459 		if (tmp != NULL) {
1460 			if ((info->enaddr = malloc(ETHER_ADDR_LEN)) == NULL)
1461 				warnx("Couldn't allocate memory for the operation");
1462 			else {
1463 				if ((kether_aton(tmp, info->enaddr)) == NULL) {
1464 					free(info->enaddr);
1465 					info->enaddr = NULL;
1466 				}
1467 			}
1468 		}
1469 
1470 		NetifNum++;
1471 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1472 			break;
1473 	}
1474 	close(s);
1475 }
1476 
1477 /*
1478  * Create the pid file and leave it open and locked while the vkernel is
1479  * running.  This allows a script to use /usr/bin/lockf to probe whether
1480  * a vkernel is still running (so as not to accidently kill an unrelated
1481  * process from a stale pid file).
1482  */
1483 static
1484 void
1485 writepid(void)
1486 {
1487 	char buf[32];
1488 	int fd;
1489 
1490 	if (pid_file != NULL) {
1491 		snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1492 		fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1493 		if (fd < 0) {
1494 			if (errno == EWOULDBLOCK) {
1495 				perror("Failed to lock pidfile, "
1496 				       "vkernel already running");
1497 			} else {
1498 				perror("Failed to create pidfile");
1499 			}
1500 			exit(EX_SOFTWARE);
1501 		}
1502 		ftruncate(fd, 0);
1503 		write(fd, buf, strlen(buf));
1504 		/* leave the file open to maintain the lock */
1505 	}
1506 }
1507 
1508 static
1509 void
1510 cleanpid( void )
1511 {
1512 	if (pid_file != NULL) {
1513 		if (unlink(pid_file) < 0)
1514 			perror("Warning: couldn't remove pidfile");
1515 	}
1516 }
1517 
1518 static
1519 void
1520 usage_err(const char *ctl, ...)
1521 {
1522 	va_list va;
1523 
1524 	va_start(va, ctl);
1525 	vfprintf(stderr, ctl, va);
1526 	va_end(va);
1527 	fprintf(stderr, "\n");
1528 	exit(EX_USAGE);
1529 }
1530 
1531 static
1532 void
1533 usage_help(_Bool help)
1534 {
1535 	fprintf(stderr, "Usage: %s [-hsUvd] [-c file] [-e name=value:name=value:...]\n"
1536 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1537 	    "\t[-m size] [-n numcpus[:lbits[:cbits]]]\n"
1538 	    "\t[-p file] [-r file]\n", save_av[0]);
1539 
1540 	if (help)
1541 		fprintf(stderr, "\nArguments:\n"
1542 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1543 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1544 		    "\t-h\tThis list of options.\n"
1545 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1546 		    "\t-I\tCreate a virtual network device.\n"
1547 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1548 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1549 		    "\t-n\tSpecify the number of CPUs and the topology you wish to emulate:\n"
1550 		    "\t\t\tnumcpus - number of cpus\n"
1551 		    "\t\t\tlbits - specify the number of bits within APICID(=CPUID)\n"
1552 		    "\t\t\t        needed for representing the logical ID.\n"
1553 		    "\t\t\t        Controls the number of threads/core:\n"
1554 		    "\t\t\t        (0 bits - 1 thread, 1 bit - 2 threads).\n"
1555 		    "\t\t\tcbits - specify the number of bits within APICID(=CPUID)\n"
1556 		    "\t\t\t        needed for representing the core ID.\n"
1557 		    "\t\t\t        Controls the number of cores/package:\n"
1558 		    "\t\t\t        (0 bits - 1 core, 1 bit - 2 cores).\n"
1559 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1560 		    "\t-r\tSpecify a R/W disk image file, iterates vkd0..n\n"
1561 		    "\t-R\tSpecify a COW disk image file, iterates vkd0..n\n"
1562 		    "\t-s\tBoot into single-user mode.\n"
1563 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1564 		    "\t-v\tTurn on verbose booting.\n");
1565 
1566 	exit(EX_USAGE);
1567 }
1568 
1569 void
1570 cpu_smp_stopped(void)
1571 {
1572 }
1573 
1574 void
1575 cpu_reset(void)
1576 {
1577 	kprintf("cpu reset, rebooting vkernel\n");
1578 	closefrom(3);
1579 	cleanpid();
1580 	exit(EX_VKERNEL_REBOOT);
1581 }
1582 
1583 void
1584 cpu_halt(void)
1585 {
1586 	kprintf("cpu halt, exiting vkernel\n");
1587 	cleanpid();
1588 	exit(EX_OK);
1589 }
1590 
1591 void
1592 setrealcpu(void)
1593 {
1594 	switch(lwp_cpu_lock) {
1595 	case LCL_PER_CPU:
1596 		if (bootverbose)
1597 			kprintf("Locking CPU%d to real cpu %d\n",
1598 				mycpuid, next_cpu);
1599 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1600 		next_cpu++;
1601 		if (next_cpu >= real_ncpus)
1602 			next_cpu = 0;
1603 		break;
1604 	case LCL_SINGLE_CPU:
1605 		if (bootverbose)
1606 			kprintf("Locking CPU%d to real cpu %d\n",
1607 				mycpuid, next_cpu);
1608 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1609 		break;
1610 	default:
1611 		/* do not map virtual cpus to real cpus */
1612 		break;
1613 	}
1614 }
1615 
1616 /*
1617  * Allocate and free memory for module loading.  The loaded module
1618  * has to be placed somewhere near the current kernel binary load
1619  * point or the relocations will not work.
1620  *
1621  * I'm not sure why this isn't working.
1622  */
1623 int
1624 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1625 {
1626 #if 1
1627 	size_t xtra;
1628 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1629 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1630 	bzero((void *)*basep, bytes);
1631 #else
1632 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1633 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1634 				   MAP_ANON|MAP_SHARED, -1, 0);
1635 	if ((void *)*basep == MAP_FAILED)
1636 		return ENOMEM;
1637 #endif
1638 	return 0;
1639 }
1640 
1641 void
1642 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1643 {
1644 #if 0
1645 #if 0
1646 	munmap((void *)base, bytes);
1647 #endif
1648 #endif
1649 }
1650