1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 #include <sys/wait.h>
56 #include <sys/vmm.h>
57 
58 #include <machine/cpu.h>
59 #include <machine/globaldata.h>
60 #include <machine/tls.h>
61 #include <machine/md_var.h>
62 #include <machine/vmparam.h>
63 #include <cpu/specialreg.h>
64 
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/bridge/if_bridgevar.h>
69 #include <netinet/in.h>
70 #include <arpa/inet.h>
71 #include <net/if_var.h>
72 
73 #include <stdio.h>
74 #include <stdlib.h>
75 #include <stdarg.h>
76 #include <stdbool.h>
77 #include <unistd.h>
78 #include <fcntl.h>
79 #include <string.h>
80 #include <err.h>
81 #include <errno.h>
82 #include <assert.h>
83 #include <sysexits.h>
84 
85 #define EX_VKERNEL_REBOOT	32
86 
87 vm_paddr_t phys_avail[16];
88 vm_paddr_t Maxmem;
89 vm_paddr_t Maxmem_bytes;
90 long physmem;
91 int MemImageFd = -1;
92 struct vkdisk_info DiskInfo[VKDISK_MAX];
93 int DiskNum;
94 struct vknetif_info NetifInfo[VKNETIF_MAX];
95 int NetifNum;
96 char *pid_file;
97 vm_offset_t KvaStart;
98 vm_offset_t KvaEnd;
99 vm_offset_t KvaSize;
100 vm_offset_t virtual_start;
101 vm_offset_t virtual_end;
102 vm_offset_t virtual2_start;
103 vm_offset_t virtual2_end;
104 vm_offset_t kernel_vm_end;
105 vm_offset_t crashdumpmap;
106 vm_offset_t clean_sva;
107 vm_offset_t clean_eva;
108 struct msgbuf *msgbufp;
109 caddr_t ptvmmap;
110 vpte_t	*KernelPTD;
111 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
112 void *dmap_min_address;
113 void *vkernel_stack;
114 u_int cpu_feature;	/* XXX */
115 int tsc_present;
116 int tsc_invariant;
117 int tsc_mpsync;
118 int64_t tsc_frequency;
119 int optcpus;		/* number of cpus - see mp_start() */
120 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
121 int real_ncpus;		/* number of real CPUs */
122 int next_cpu;		/* next real CPU to lock a virtual CPU to */
123 int vkernel_b_arg;	/* no of logical CPU bits - only SMP */
124 int vkernel_B_arg;	/* no of core bits - only SMP */
125 int vmm_enabled;	/* VMM HW assisted enable */
126 struct privatespace *CPU_prvspace;
127 
128 extern uint64_t KPML4phys;	/* phys addr of kernel level 4 */
129 
130 static struct trapframe proc0_tf;
131 static void *proc0paddr;
132 
133 static void init_sys_memory(char *imageFile);
134 static void init_kern_memory(void);
135 static void init_kern_memory_vmm(void);
136 static void init_globaldata(void);
137 static void init_vkernel(void);
138 static void init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type);
139 static void init_netif(char *netifExp[], int netifFileNum);
140 static void writepid(void);
141 static void cleanpid(void);
142 static int unix_connect(const char *path);
143 static void usage_err(const char *ctl, ...);
144 static void usage_help(_Bool);
145 static void init_locks(void);
146 
147 static int save_ac;
148 static char **save_av;
149 
150 /*
151  * Kernel startup for virtual kernels - standard main()
152  */
153 int main(int ac, char **av) {
154 	char *memImageFile = NULL;
155 	char *netifFile[VKNETIF_MAX];
156 	char *diskFile[VKDISK_MAX];
157 	char *cdFile[VKDISK_MAX];
158 	char *suffix;
159 	char *endp;
160 	char *tmp;
161 	char *tok;
162 	int netifFileNum = 0;
163 	int diskFileNum = 0;
164 	int cdFileNum = 0;
165 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
166 	int c;
167 	int i;
168 	int j;
169 	int n;
170 	int isq;
171 	int pos;
172 	int eflag;
173 	int dflag = 0;		/* disable vmm */
174 	int real_vkernel_enable;
175 	int supports_sse;
176 	uint32_t mxcsr_mask;
177 	size_t vsize;
178 	size_t msize;
179 	size_t kenv_size;
180 	size_t kenv_size2;
181 	pid_t pid;
182 	int status;
183 	struct sigaction sa;
184 
185 	/*
186 	 * Currently a bad hack but rtld-elf needs LD_SHAREDLIB_BASE to
187 	 * be set to force it to mmap() shared libraries into low memory,
188 	 * so our module loader can link against the related symbols.
189 	 */
190 	if (getenv("LD_SHAREDLIB_BASE") == NULL) {
191 		setenv("LD_SHAREDLIB_BASE", "0x10000000", 1);
192 		execv(av[0], av);
193 		fprintf(stderr, "Must run %s with full path\n", av[0]);
194 		exit(1);
195 	}
196 
197 	while ((pid = fork()) != 0) {
198 		/* Ignore signals */
199 		bzero(&sa, sizeof(sa));
200 		sigemptyset(&sa.sa_mask);
201 		sa.sa_handler = SIG_IGN;
202 		sigaction(SIGINT, &sa, NULL);
203 		sigaction(SIGQUIT, &sa, NULL);
204 		sigaction(SIGHUP, &sa, NULL);
205 
206 		/*
207 		 * Wait for child to terminate, exit if
208 		 * someone stole our child.
209 		 */
210 		while (waitpid(pid, &status, 0) != pid) {
211 			if (errno == ECHILD)
212 				exit(1);
213 		}
214 		if (WEXITSTATUS(status) != EX_VKERNEL_REBOOT)
215 			return 0;
216 	}
217 
218 	/*
219 	 * Starting for real
220 	 */
221 	save_ac = ac;
222 	save_av = av;
223 	eflag = 0;
224 	pos = 0;
225 	kenv_size = 0;
226 	/*
227 	 * Process options
228 	 */
229 	kernel_mem_readonly = 1;
230 	optcpus = 2;
231 	vkernel_b_arg = 0;
232 	vkernel_B_arg = 0;
233 	lwp_cpu_lock = LCL_NONE;
234 
235 	real_vkernel_enable = 0;
236 	vsize = sizeof(real_vkernel_enable);
237 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
238 
239 	if (real_vkernel_enable == 0) {
240 		errx(1, "vm.vkernel_enable is 0, must be set "
241 			"to 1 to execute a vkernel!");
242 	}
243 
244 	real_ncpus = 1;
245 	vsize = sizeof(real_ncpus);
246 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
247 
248 	if (ac < 2)
249 		usage_help(false);
250 
251 	while ((c = getopt(ac, av, "c:hsvl:m:n:r:e:i:p:I:Ud")) != -1) {
252 		switch(c) {
253 		case 'd':
254 			dflag = 1;
255 			break;
256 		case 'e':
257 			/*
258 			 * name=value:name=value:name=value...
259 			 * name="value"...
260 			 *
261 			 * Allow values to be quoted but note that shells
262 			 * may remove the quotes, so using this feature
263 			 * to embed colons may require a backslash.
264 			 */
265 			n = strlen(optarg);
266 			isq = 0;
267 
268 			if (eflag == 0) {
269 				kenv_size = n + 2;
270 				kern_envp = malloc(kenv_size);
271 				if (kern_envp == NULL)
272 					errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
273 			} else {
274 				kenv_size2 = kenv_size + n + 1;
275 				pos = kenv_size - 1;
276 				if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
277 					errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
278 				kern_envp = tmp;
279 				kenv_size = kenv_size2;
280 			}
281 
282 			for (i = 0, j = pos; i < n; ++i) {
283 				if (optarg[i] == '"')
284 					isq ^= 1;
285 				else if (optarg[i] == '\'')
286 					isq ^= 2;
287 				else if (isq == 0 && optarg[i] == ':')
288 					kern_envp[j++] = 0;
289 				else
290 					kern_envp[j++] = optarg[i];
291 			}
292 			kern_envp[j++] = 0;
293 			kern_envp[j++] = 0;
294 			eflag++;
295 			break;
296 		case 's':
297 			boothowto |= RB_SINGLE;
298 			break;
299 		case 'v':
300 			bootverbose = 1;
301 			break;
302 		case 'i':
303 			memImageFile = optarg;
304 			break;
305 		case 'I':
306 			if (netifFileNum < VKNETIF_MAX)
307 				netifFile[netifFileNum++] = strdup(optarg);
308 			break;
309 		case 'r':
310 			if (bootOnDisk < 0)
311 				bootOnDisk = 1;
312 			if (diskFileNum + cdFileNum < VKDISK_MAX)
313 				diskFile[diskFileNum++] = strdup(optarg);
314 			break;
315 		case 'c':
316 			if (bootOnDisk < 0)
317 				bootOnDisk = 0;
318 			if (diskFileNum + cdFileNum < VKDISK_MAX)
319 				cdFile[cdFileNum++] = strdup(optarg);
320 			break;
321 		case 'm':
322 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
323 			if (suffix) {
324 				switch(*suffix) {
325 				case 'g':
326 				case 'G':
327 					Maxmem_bytes <<= 30;
328 					break;
329 				case 'm':
330 				case 'M':
331 					Maxmem_bytes <<= 20;
332 					break;
333 				case 'k':
334 				case 'K':
335 					Maxmem_bytes <<= 10;
336 					break;
337 				default:
338 					Maxmem_bytes = 0;
339 					usage_err("Bad maxmem option");
340 					/* NOT REACHED */
341 					break;
342 				}
343 			}
344 			break;
345 		case 'l':
346 			next_cpu = -1;
347 			if (strncmp("map", optarg, 3) == 0) {
348 				lwp_cpu_lock = LCL_PER_CPU;
349 				if (optarg[3] == ',') {
350 					next_cpu = strtol(optarg+4, &endp, 0);
351 					if (*endp != '\0')
352 						usage_err("Bad target CPU number at '%s'", endp);
353 				} else {
354 					next_cpu = 0;
355 				}
356 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
357 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
358 			} else if (strncmp("any", optarg, 3) == 0) {
359 				lwp_cpu_lock = LCL_NONE;
360 			} else {
361 				lwp_cpu_lock = LCL_SINGLE_CPU;
362 				next_cpu = strtol(optarg, &endp, 0);
363 				if (*endp != '\0')
364 					usage_err("Bad target CPU number at '%s'", endp);
365 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
366 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
367 			}
368 			break;
369 		case 'n':
370 			/*
371 			 * This value is set up by mp_start(), don't just
372 			 * set ncpus here.
373 			 */
374 			tok = strtok(optarg, ":");
375 			optcpus = strtol(tok, NULL, 0);
376 			if (optcpus < 1 || optcpus > MAXCPU)
377 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
378 
379 			/* :lbits argument */
380 			tok = strtok(NULL, ":");
381 			if (tok != NULL) {
382 				vkernel_b_arg = strtol(tok, NULL, 0);
383 
384 				/* :cbits argument */
385 				tok = strtok(NULL, ":");
386 				if (tok != NULL) {
387 					vkernel_B_arg = strtol(tok, NULL, 0);
388 				}
389 
390 			}
391 			break;
392 		case 'p':
393 			pid_file = optarg;
394 			break;
395 		case 'U':
396 			kernel_mem_readonly = 0;
397 			break;
398 		case 'h':
399 			usage_help(true);
400 			break;
401 		default:
402 			usage_help(false);
403 		}
404 	}
405 
406 	/*
407 	 * Check VMM presence
408 	 */
409 	vsize = sizeof(vmm_enabled);
410 	sysctlbyname("hw.vmm.enable", &vmm_enabled, &vsize, NULL, 0);
411 	vmm_enabled = (vmm_enabled && !dflag);
412 
413 	writepid();
414 	cpu_disable_intr();
415 	if (vmm_enabled) {
416 		/* use a MAP_ANON directly */
417 		init_kern_memory_vmm();
418 	} else {
419 		init_sys_memory(memImageFile);
420 		init_kern_memory();
421 	}
422 	init_globaldata();
423 	init_vkernel();
424 	setrealcpu();
425 	init_kqueue();
426 
427 	vmm_guest = VMM_GUEST_VKERNEL;
428 
429 	/*
430 	 * Check TSC
431 	 */
432 	vsize = sizeof(tsc_present);
433 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
434 	vsize = sizeof(tsc_invariant);
435 	sysctlbyname("hw.tsc_invariant", &tsc_invariant, &vsize, NULL, 0);
436 	vsize = sizeof(tsc_mpsync);
437 	sysctlbyname("hw.tsc_mpsync", &tsc_mpsync, &vsize, NULL, 0);
438 	vsize = sizeof(tsc_frequency);
439 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
440 	if (tsc_present)
441 		cpu_feature |= CPUID_TSC;
442 
443 	/*
444 	 * Check SSE
445 	 */
446 	vsize = sizeof(supports_sse);
447 	supports_sse = 0;
448 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
449 	sysctlbyname("hw.mxcsr_mask", &mxcsr_mask, &msize, NULL, 0);
450 	init_fpu(supports_sse);
451 	if (supports_sse)
452 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
453 
454 	/*
455 	 * We boot from the first installed disk.
456 	 */
457 	if (bootOnDisk == 1) {
458 		init_disk(diskFile, diskFileNum, VKD_DISK);
459 		init_disk(cdFile, cdFileNum, VKD_CD);
460 	} else {
461 		init_disk(cdFile, cdFileNum, VKD_CD);
462 		init_disk(diskFile, diskFileNum, VKD_DISK);
463 	}
464 
465 	init_netif(netifFile, netifFileNum);
466 	init_exceptions();
467 	mi_startup();
468 	/* NOT REACHED */
469 	exit(EX_SOFTWARE);
470 }
471 
472 /*
473  * Initialize system memory.  This is the virtual kernel's 'RAM'.
474  */
475 static
476 void
477 init_sys_memory(char *imageFile)
478 {
479 	struct stat st;
480 	int i;
481 	int fd;
482 
483 	/*
484 	 * Figure out the system memory image size.  If an image file was
485 	 * specified and -m was not specified, use the image file's size.
486 	 */
487 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
488 		Maxmem_bytes = (vm_paddr_t)st.st_size;
489 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
490 	    Maxmem_bytes == 0) {
491 		errx(1, "Cannot create new memory file %s unless "
492 		       "system memory size is specified with -m",
493 		       imageFile);
494 		/* NOT REACHED */
495 	}
496 
497 	/*
498 	 * Maxmem must be known at this time
499 	 */
500 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
501 		errx(1, "Bad maxmem specification: 64MB minimum, "
502 		       "multiples of %dMB only",
503 		       SEG_SIZE / 1024 / 1024);
504 		/* NOT REACHED */
505 	}
506 
507 	/*
508 	 * Generate an image file name if necessary, then open/create the
509 	 * file exclusively locked.  Do not allow multiple virtual kernels
510 	 * to use the same image file.
511 	 *
512 	 * Don't iterate through a million files if we do not have write
513 	 * access to the directory, stop if our open() failed on a
514 	 * non-existant file.  Otherwise opens can fail for any number
515 	 */
516 	if (imageFile == NULL) {
517 		for (i = 0; i < 1000000; ++i) {
518 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
519 			fd = open(imageFile,
520 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
521 			if (fd < 0 && stat(imageFile, &st) == 0) {
522 				free(imageFile);
523 				continue;
524 			}
525 			break;
526 		}
527 	} else {
528 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
529 	}
530 	fprintf(stderr, "Using memory file: %s\n", imageFile);
531 	if (fd < 0 || fstat(fd, &st) < 0) {
532 		err(1, "Unable to open/create %s", imageFile);
533 		/* NOT REACHED */
534 	}
535 
536 	/*
537 	 * Truncate or extend the file as necessary.  Clean out the contents
538 	 * of the file, we want it to be full of holes so we don't waste
539 	 * time reading in data from an old file that we no longer care
540 	 * about.
541 	 */
542 	ftruncate(fd, 0);
543 	ftruncate(fd, Maxmem_bytes);
544 
545 	MemImageFd = fd;
546 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
547 	physmem = Maxmem;
548 }
549 
550 /*
551  * Initialize kernel memory.  This reserves kernel virtual memory by using
552  * MAP_VPAGETABLE
553  */
554 
555 static
556 void
557 init_kern_memory(void)
558 {
559 	void *base;
560 	int i;
561 	void *firstfree;
562 
563 	/*
564 	 * Memory map our kernel virtual memory space.  Note that the
565 	 * kernel image itself is not made part of this memory for the
566 	 * moment.
567 	 *
568 	 * The memory map must be segment-aligned so we can properly
569 	 * offset KernelPTD.
570 	 *
571 	 * If the system kernel has a different MAXDSIZ, it might not
572 	 * be possible to map kernel memory in its prefered location.
573 	 * Try a number of different locations.
574 	 */
575 
576 	base = mmap((void*)KERNEL_KVA_START, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
577 		    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE|MAP_FIXED|MAP_TRYFIXED,
578 		    MemImageFd, (off_t)KERNEL_KVA_START);
579 
580 	if (base == MAP_FAILED) {
581 		err(1, "Unable to mmap() kernel virtual memory!");
582 		/* NOT REACHED */
583 	}
584 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
585 	KvaStart = (vm_offset_t)base;
586 	KvaSize = KERNEL_KVA_SIZE;
587 	KvaEnd = KvaStart + KvaSize;
588 
589 	/* cannot use kprintf yet */
590 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
591 
592 	/* MAP_FILE? */
593 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
594 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
595 				MemImageFd, 0);
596 	if (dmap_min_address == MAP_FAILED) {
597 		err(1, "Unable to mmap() kernel DMAP region!");
598 		/* NOT REACHED */
599 	}
600 
601 	/*
602 	 * Bootstrap the kernel_pmap
603 	 */
604 	firstfree = NULL;
605 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
606 
607 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
608 		 0 | VPTE_RW | VPTE_V);
609 
610 	/*
611 	 * phys_avail[] represents unallocated physical memory.  MI code
612 	 * will use phys_avail[] to create the vm_page array.
613 	 */
614 	phys_avail[0] = (vm_paddr_t)firstfree;
615 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
616 	phys_avail[1] = Maxmem_bytes;
617 
618 #if JGV
619 	/*
620 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
621 	 * memory.  MI code will create kernel_map using these parameters.
622 	 */
623 	virtual_start = KvaStart + (long)firstfree;
624 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
625 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
626 #endif
627 
628 	/*
629 	 * pmap_growkernel() will set the correct value.
630 	 */
631 	kernel_vm_end = 0;
632 
633 	/*
634 	 * Allocate space for process 0's UAREA.
635 	 */
636 	proc0paddr = (void *)virtual_start;
637 	for (i = 0; i < UPAGES; ++i) {
638 		pmap_kenter_quick(virtual_start, phys_avail[0]);
639 		virtual_start += PAGE_SIZE;
640 		phys_avail[0] += PAGE_SIZE;
641 	}
642 
643 	/*
644 	 * crashdumpmap
645 	 */
646 	crashdumpmap = virtual_start;
647 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
648 
649 	/*
650 	 * msgbufp maps the system message buffer
651 	 */
652 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
653 	msgbufp = (void *)virtual_start;
654 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
655 		pmap_kenter_quick(virtual_start, phys_avail[0]);
656 		virtual_start += PAGE_SIZE;
657 		phys_avail[0] += PAGE_SIZE;
658 	}
659 	msgbufinit(msgbufp, MSGBUF_SIZE);
660 
661 	/*
662 	 * used by kern_memio for /dev/mem access
663 	 */
664 	ptvmmap = (caddr_t)virtual_start;
665 	virtual_start += PAGE_SIZE;
666 }
667 
668 static
669 void
670 init_kern_memory_vmm(void)
671 {
672 	int i;
673 	void *firstfree;
674 	struct vmm_guest_options options;
675 	void *dmap_address;
676 
677 	KvaStart = (vm_offset_t)KERNEL_KVA_START;
678 	KvaSize = KERNEL_KVA_SIZE;
679 	KvaEnd = KvaStart + KvaSize;
680 
681 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
682 	physmem = Maxmem;
683 
684 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
685 		errx(1, "Bad maxmem specification: 64MB minimum, "
686 		       "multiples of %dMB only",
687 		       SEG_SIZE / 1024 / 1024);
688 		/* NOT REACHED */
689 	}
690 
691 	/* Call the vmspace_create to allocate the internal
692 	 * vkernel structures. Won't do anything else (no new
693 	 * vmspace)
694 	 */
695 	if (vmspace_create(NULL, 0, NULL) < 0)
696 		panic("vmspace_create() failed");
697 
698 
699 	/*
700 	 * MAP_ANON the region of the VKERNEL phyisical memory
701 	 * (known as GPA - Guest Physical Address
702 	 */
703 	dmap_address = mmap(NULL, Maxmem_bytes, PROT_READ|PROT_WRITE|PROT_EXEC,
704 	    MAP_ANON|MAP_SHARED, -1, 0);
705 	if (dmap_address == MAP_FAILED) {
706 		err(1, "Unable to mmap() RAM region!");
707 		/* NOT REACHED */
708 	}
709 
710 	/* Alloc a new stack in the lowmem */
711 	vkernel_stack = mmap(NULL, KERNEL_STACK_SIZE,
712 	    PROT_READ|PROT_WRITE|PROT_EXEC,
713 	    MAP_ANON, -1, 0);
714 	if (vkernel_stack == MAP_FAILED) {
715 		err(1, "Unable to allocate stack\n");
716 	}
717 
718 	/*
719 	 * Bootstrap the kernel_pmap
720 	 */
721 	firstfree = dmap_address;
722 	dmap_min_address = NULL; /* VIRT == PHYS in the first 512G */
723 	pmap_bootstrap((vm_paddr_t *)&firstfree, (uint64_t)KvaStart);
724 
725 	/*
726 	 * Enter VMM mode
727 	 */
728 	options.guest_cr3 = (register_t) KPML4phys;
729 	options.new_stack = (uint64_t) vkernel_stack + KERNEL_STACK_SIZE;
730 	options.master = 1;
731 	if (vmm_guest_ctl(VMM_GUEST_RUN, &options)) {
732 		err(1, "Unable to enter VMM mode.");
733 	}
734 
735 	/*
736 	 * phys_avail[] represents unallocated physical memory.  MI code
737 	 * will use phys_avail[] to create the vm_page array.
738 	 */
739 	phys_avail[0] = (vm_paddr_t)firstfree;
740 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
741 	phys_avail[1] = (vm_paddr_t)dmap_address + Maxmem_bytes;
742 
743 	/*
744 	 * pmap_growkernel() will set the correct value.
745 	 */
746 	kernel_vm_end = 0;
747 
748 	/*
749 	 * Allocate space for process 0's UAREA.
750 	 */
751 	proc0paddr = (void *)virtual_start;
752 	for (i = 0; i < UPAGES; ++i) {
753 		pmap_kenter_quick(virtual_start, phys_avail[0]);
754 		virtual_start += PAGE_SIZE;
755 		phys_avail[0] += PAGE_SIZE;
756 	}
757 
758 	/*
759 	 * crashdumpmap
760 	 */
761 	crashdumpmap = virtual_start;
762 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
763 
764 	/*
765 	 * msgbufp maps the system message buffer
766 	 */
767 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
768 	msgbufp = (void *)virtual_start;
769 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
770 
771 		pmap_kenter_quick(virtual_start, phys_avail[0]);
772 		virtual_start += PAGE_SIZE;
773 		phys_avail[0] += PAGE_SIZE;
774 	}
775 
776 	msgbufinit(msgbufp, MSGBUF_SIZE);
777 
778 	/*
779 	 * used by kern_memio for /dev/mem access
780 	 */
781 	ptvmmap = (caddr_t)virtual_start;
782 	virtual_start += PAGE_SIZE;
783 
784 	printf("vmm: Hardware pagetable enabled for guest\n");
785 }
786 
787 
788 /*
789  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
790  * virtual_start and phys_avail[0]
791  */
792 static
793 void
794 init_globaldata(void)
795 {
796 	int i;
797 	vm_paddr_t pa;
798 	vm_offset_t va;
799 
800 	/*
801 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
802 	 * amount of KVA since the privatespace structure includes two
803 	 * whole page table mappings.
804 	 */
805 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
806 	CPU_prvspace = (void *)virtual_start;
807 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
808 
809 	/*
810 	 * Allocate enough physical memory to cover the mdglobaldata
811 	 * portion of the space and the idle stack and map the pages
812 	 * into KVA.  For cpu #0 only.
813 	 */
814 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
815 		pa = phys_avail[0];
816 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
817 		pmap_kenter_quick(va, pa);
818 		phys_avail[0] += PAGE_SIZE;
819 	}
820 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
821 		pa = phys_avail[0];
822 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
823 		pmap_kenter_quick(va, pa);
824 		phys_avail[0] += PAGE_SIZE;
825 	}
826 
827 	/*
828 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
829 	 * point.  Note that %fs is used by pthreads.
830 	 */
831 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
832 }
833 
834 
835 /*
836  * Initialize pool tokens and other necessary locks
837  */
838 static void
839 init_locks(void)
840 {
841 
842         /*
843          * Get the initial mplock with a count of 1 for the BSP.
844          * This uses a LOGICAL cpu ID, ie BSP == 0.
845          */
846         cpu_get_initial_mplock();
847 
848         /* our token pool needs to work early */
849         lwkt_token_pool_init();
850 
851 }
852 
853 
854 /*
855  * Initialize very low level systems including thread0, proc0, etc.
856  */
857 static
858 void
859 init_vkernel(void)
860 {
861 	struct mdglobaldata *gd;
862 
863 	gd = &CPU_prvspace[0].mdglobaldata;
864 	bzero(gd, sizeof(*gd));
865 
866 	gd->mi.gd_curthread = &thread0;
867 	thread0.td_gd = &gd->mi;
868 	ncpus = 1;
869 	ncpus2 = 1;	/* rounded down power of 2 */
870 	ncpus_fit = 1;	/* rounded up power of 2 */
871 	/* ncpus2_mask and ncpus_fit_mask are 0 */
872 	init_param1();
873 	gd->mi.gd_prvspace = &CPU_prvspace[0];
874 	mi_gdinit(&gd->mi, 0);
875 	cpu_gdinit(gd, 0);
876 	mi_proc0init(&gd->mi, proc0paddr);
877 	lwp0.lwp_md.md_regs = &proc0_tf;
878 
879 	init_locks();
880 	cninit();
881 	rand_initialize();
882 #if 0	/* #ifdef DDB */
883 	kdb_init();
884 	if (boothowto & RB_KDB)
885 		Debugger("Boot flags requested debugger");
886 #endif
887 	identcpu();
888 #if 0
889 	initializecpu();	/* Initialize CPU registers */
890 #endif
891 	init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
892 
893 #if 0
894 	/*
895 	 * Map the message buffer
896 	 */
897 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
898 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
899 	msgbufinit(msgbufp, MSGBUF_SIZE);
900 #endif
901 #if 0
902 	thread0.td_pcb_cr3 ... MMU
903 	lwp0.lwp_md.md_regs = &proc0_tf;
904 #endif
905 }
906 
907 /*
908  * Filesystem image paths for the virtual kernel are optional.
909  * If specified they each should point to a disk image,
910  * the first of which will become the root disk.
911  *
912  * The virtual kernel caches data from our 'disk' just like a normal kernel,
913  * so we do not really want the real kernel to cache the data too.  Use
914  * O_DIRECT to remove the duplication.
915  */
916 static
917 void
918 init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type)
919 {
920 	char *serno;
921 	int i;
922 
923         if (diskFileNum == 0)
924                 return;
925 
926 	for(i=0; i < diskFileNum; i++){
927 		char *fname;
928 		fname = diskExp[i];
929 
930 		if (fname == NULL) {
931                         warnx("Invalid argument to '-r'");
932                         continue;
933                 }
934 		/*
935 		 * Check for a serial number for the virtual disk
936 		 * passed from the command line.
937 		 */
938 		serno = fname;
939 		strsep(&serno, ":");
940 
941 		if (DiskNum < VKDISK_MAX) {
942 			struct stat st;
943 			struct vkdisk_info* info = NULL;
944 			int fd;
945 			size_t l = 0;
946 
947 			if (type == VKD_DISK)
948 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
949 			else
950 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
951 			if (fd < 0 || fstat(fd, &st) < 0) {
952 				err(1, "Unable to open/create %s", fname);
953 				/* NOT REACHED */
954 			}
955 			if (S_ISREG(st.st_mode)) {
956 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
957 					errx(1, "Disk image %s is already "
958 						"in use\n", fname);
959 					/* NOT REACHED */
960 				}
961 			}
962 
963 			info = &DiskInfo[DiskNum];
964 			l = strlen(fname);
965 
966 			info->unit = i;
967 			info->fd = fd;
968 			info->type = type;
969 			memcpy(info->fname, fname, l);
970 			info->serno = NULL;
971 			if (serno) {
972 				if ((info->serno = malloc(SERNOLEN)) != NULL)
973 					strlcpy(info->serno, serno, SERNOLEN);
974 				else
975 					warnx("Couldn't allocate memory for the operation");
976 			}
977 
978 			if (DiskNum == 0) {
979 				if (type == VKD_CD) {
980 				    rootdevnames[0] = "cd9660:vcd0";
981 				} else if (type == VKD_DISK) {
982 				    rootdevnames[0] = "ufs:vkd0s0a";
983 				    rootdevnames[1] = "ufs:vkd0s1a";
984 				}
985 			}
986 
987 			DiskNum++;
988 		} else {
989                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
990                         continue;
991 		}
992 	}
993 }
994 
995 static
996 int
997 netif_set_tapflags(int tap_unit, int f, int s)
998 {
999 	struct ifreq ifr;
1000 	int flags;
1001 
1002 	bzero(&ifr, sizeof(ifr));
1003 
1004 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
1005 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1006 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
1007 		return -1;
1008 	}
1009 
1010 	/*
1011 	 * Adjust if_flags
1012 	 *
1013 	 * If the flags are already set/cleared, then we return
1014 	 * immediately to avoid extra syscalls
1015 	 */
1016 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
1017 	if (f < 0) {
1018 		/* Turn off flags */
1019 		f = -f;
1020 		if ((flags & f) == 0)
1021 			return 0;
1022 		flags &= ~f;
1023 	} else {
1024 		/* Turn on flags */
1025 		if (flags & f)
1026 			return 0;
1027 		flags |= f;
1028 	}
1029 
1030 	/*
1031 	 * Fix up ifreq.ifr_name, since it may be trashed
1032 	 * in previous ioctl(SIOCGIFFLAGS)
1033 	 */
1034 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
1035 
1036 	ifr.ifr_flags = flags & 0xffff;
1037 	ifr.ifr_flagshigh = flags >> 16;
1038 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1039 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
1040 		return -1;
1041 	}
1042 	return 0;
1043 }
1044 
1045 static
1046 int
1047 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
1048 {
1049 	struct ifaliasreq ifra;
1050 	struct sockaddr_in *in;
1051 
1052 	bzero(&ifra, sizeof(ifra));
1053 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
1054 
1055 	/* Setup address */
1056 	in = (struct sockaddr_in *)&ifra.ifra_addr;
1057 	in->sin_family = AF_INET;
1058 	in->sin_len = sizeof(*in);
1059 	in->sin_addr.s_addr = addr;
1060 
1061 	if (mask != 0) {
1062 		/* Setup netmask */
1063 		in = (struct sockaddr_in *)&ifra.ifra_mask;
1064 		in->sin_len = sizeof(*in);
1065 		in->sin_addr.s_addr = mask;
1066 	}
1067 
1068 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
1069 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
1070 		return -1;
1071 	}
1072 	return 0;
1073 }
1074 
1075 static
1076 int
1077 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
1078 {
1079 	struct ifbreq ifbr;
1080 	struct ifdrv ifd;
1081 
1082 	bzero(&ifbr, sizeof(ifbr));
1083 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
1084 		 "tap%d", tap_unit);
1085 
1086 	bzero(&ifd, sizeof(ifd));
1087 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
1088 	ifd.ifd_cmd = BRDGADD;
1089 	ifd.ifd_len = sizeof(ifbr);
1090 	ifd.ifd_data = &ifbr;
1091 
1092 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
1093 		/*
1094 		 * 'errno == EEXIST' means that the tap(4) is already
1095 		 * a member of the bridge(4)
1096 		 */
1097 		if (errno != EEXIST) {
1098 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
1099 			return -1;
1100 		}
1101 	}
1102 	return 0;
1103 }
1104 
1105 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
1106 
1107 /*
1108  * Locate the first unused tap(4) device file if auto mode is requested,
1109  * or open the user supplied device file, and bring up the corresponding
1110  * tap(4) interface.
1111  *
1112  * NOTE: Only tap(4) device file is supported currently
1113  */
1114 static
1115 int
1116 netif_open_tap(const char *netif, int *tap_unit, int s)
1117 {
1118 	char tap_dev[MAXPATHLEN];
1119 	int tap_fd, failed;
1120 	struct stat st;
1121 	char *dname;
1122 
1123 	*tap_unit = -1;
1124 
1125 	if (strcmp(netif, "auto") == 0) {
1126 		/*
1127 		 * Find first unused tap(4) device file
1128 		 */
1129 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
1130 		if (tap_fd < 0) {
1131 			warnc(errno, "Unable to find a free tap(4)");
1132 			return -1;
1133 		}
1134 	} else {
1135 		/*
1136 		 * User supplied tap(4) device file or unix socket.
1137 		 */
1138 		if (netif[0] == '/')	/* Absolute path */
1139 			strlcpy(tap_dev, netif, sizeof(tap_dev));
1140 		else
1141 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
1142 
1143 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
1144 
1145 		/*
1146 		 * If we cannot open normally try to connect to it.
1147 		 */
1148 		if (tap_fd < 0)
1149 			tap_fd = unix_connect(tap_dev);
1150 
1151 		if (tap_fd < 0) {
1152 			warn("Unable to open %s", tap_dev);
1153 			return -1;
1154 		}
1155 	}
1156 
1157 	/*
1158 	 * Check whether the device file is a tap(4)
1159 	 */
1160 	if (fstat(tap_fd, &st) < 0) {
1161 		failed = 1;
1162 	} else if (S_ISCHR(st.st_mode)) {
1163 		dname = fdevname(tap_fd);
1164 		if (dname)
1165 			dname = strstr(dname, "tap");
1166 		if (dname) {
1167 			/*
1168 			 * Bring up the corresponding tap(4) interface
1169 			 */
1170 			*tap_unit = strtol(dname + 3, NULL, 10);
1171 			printf("TAP UNIT %d\n", *tap_unit);
1172 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
1173 				failed = 0;
1174 			else
1175 				failed = 1;
1176 		} else {
1177 			failed = 1;
1178 		}
1179 	} else if (S_ISSOCK(st.st_mode)) {
1180 		/*
1181 		 * Special socket connection (typically to vknet).  We
1182 		 * do not have to do anything.
1183 		 */
1184 		failed = 0;
1185 	} else {
1186 		failed = 1;
1187 	}
1188 
1189 	if (failed) {
1190 		warnx("%s is not a tap(4) device or socket", tap_dev);
1191 		close(tap_fd);
1192 		tap_fd = -1;
1193 		*tap_unit = -1;
1194 	}
1195 	return tap_fd;
1196 }
1197 
1198 static int
1199 unix_connect(const char *path)
1200 {
1201 	struct sockaddr_un sunx;
1202 	int len;
1203 	int net_fd;
1204 	int sndbuf = 262144;
1205 	struct stat st;
1206 
1207 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
1208 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
1209 	++len;	/* include nul */
1210 	sunx.sun_family = AF_UNIX;
1211 	sunx.sun_len = len;
1212 
1213 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
1214 	if (net_fd < 0)
1215 		return(-1);
1216 	if (connect(net_fd, (void *)&sunx, len) < 0) {
1217 		close(net_fd);
1218 		return(-1);
1219 	}
1220 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1221 	if (fstat(net_fd, &st) == 0)
1222 		printf("Network socket buffer: %d bytes\n", st.st_blksize);
1223 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
1224 	return(net_fd);
1225 }
1226 
1227 #undef TAPDEV_MAJOR
1228 #undef TAPDEV_MINOR
1229 #undef TAPDEV_OFLAGS
1230 
1231 /*
1232  * Following syntax is supported,
1233  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
1234  *
1235  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
1236  *                        tap(4)'s netmask len is z
1237  *
1238  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
1239  *                        pseudo netif's address is y.y.y.y
1240  *
1241  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1242  *                        pseudo netif's address is y.y.y.y
1243  *                        tap(4) and pseudo netif's netmask len are z
1244  *
1245  * 5) bridgeX             tap(4) will be added to bridgeX
1246  *
1247  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1248  *                        pseudo netif's address is y.y.y.y
1249  *
1250  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1251  *                        pseudo netif's address is y.y.y.y
1252  *                        pseudo netif's netmask len is z
1253  */
1254 static
1255 int
1256 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1257 {
1258 	in_addr_t tap_addr, netmask, netif_addr;
1259 	int next_netif_addr;
1260 	char *tok, *masklen_str, *ifbridge;
1261 
1262 	*addr = 0;
1263 	*mask = 0;
1264 
1265 	tok = strtok(NULL, ":/");
1266 	if (tok == NULL) {
1267 		/*
1268 		 * Nothing special, simply use tap(4) as backend
1269 		 */
1270 		return 0;
1271 	}
1272 
1273 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1274 		/*
1275 		 * tap(4)'s address is supplied
1276 		 */
1277 		ifbridge = NULL;
1278 
1279 		/*
1280 		 * If there is next token, then it may be pseudo
1281 		 * netif's address or netmask len for tap(4)
1282 		 */
1283 		next_netif_addr = 0;
1284 	} else {
1285 		/*
1286 		 * Not tap(4)'s address, assume it as a bridge(4)
1287 		 * iface name
1288 		 */
1289 		tap_addr = 0;
1290 		ifbridge = tok;
1291 
1292 		/*
1293 		 * If there is next token, then it must be pseudo
1294 		 * netif's address
1295 		 */
1296 		next_netif_addr = 1;
1297 	}
1298 
1299 	netmask = netif_addr = 0;
1300 
1301 	tok = strtok(NULL, ":/");
1302 	if (tok == NULL)
1303 		goto back;
1304 
1305 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1306 		if (next_netif_addr) {
1307 			warnx("Invalid pseudo netif address: %s", tok);
1308 			return -1;
1309 		}
1310 		netif_addr = 0;
1311 
1312 		/*
1313 		 * Current token is not address, then it must be netmask len
1314 		 */
1315 		masklen_str = tok;
1316 	} else {
1317 		/*
1318 		 * Current token is pseudo netif address, if there is next token
1319 		 * it must be netmask len
1320 		 */
1321 		masklen_str = strtok(NULL, "/");
1322 	}
1323 
1324 	/* Calculate netmask */
1325 	if (masklen_str != NULL) {
1326 		u_long masklen;
1327 
1328 		masklen = strtoul(masklen_str, NULL, 10);
1329 		if (masklen < 32 && masklen > 0) {
1330 			netmask = htonl(~((1LL << (32 - masklen)) - 1)
1331 					& 0xffffffff);
1332 		} else {
1333 			warnx("Invalid netmask len: %lu", masklen);
1334 			return -1;
1335 		}
1336 	}
1337 
1338 	/* Make sure there is no more token left */
1339 	if (strtok(NULL, ":/") != NULL) {
1340 		warnx("Invalid argument to '-I'");
1341 		return -1;
1342 	}
1343 
1344 back:
1345 	if (tap_unit < 0) {
1346 		/* Do nothing */
1347 	} else if (ifbridge == NULL) {
1348 		/* Set tap(4) address/netmask */
1349 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1350 			return -1;
1351 	} else {
1352 		/* Tie tap(4) to bridge(4) */
1353 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1354 			return -1;
1355 	}
1356 
1357 	*addr = netif_addr;
1358 	*mask = netmask;
1359 	return 0;
1360 }
1361 
1362 /*
1363  * NetifInfo[] will be filled for pseudo netif initialization.
1364  * NetifNum will be bumped to reflect the number of valid entries
1365  * in NetifInfo[].
1366  */
1367 static
1368 void
1369 init_netif(char *netifExp[], int netifExpNum)
1370 {
1371 	int i, s;
1372 	char *tmp;
1373 
1374 	if (netifExpNum == 0)
1375 		return;
1376 
1377 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1378 	if (s < 0)
1379 		return;
1380 
1381 	for (i = 0; i < netifExpNum; ++i) {
1382 		struct vknetif_info *info;
1383 		in_addr_t netif_addr, netif_mask;
1384 		int tap_fd, tap_unit;
1385 		char *netif;
1386 
1387 		/* Extract MAC address if there is one */
1388 		tmp = netifExp[i];
1389 		strsep(&tmp, "=");
1390 
1391 		netif = strtok(netifExp[i], ":");
1392 		if (netif == NULL) {
1393 			warnx("Invalid argument to '-I'");
1394 			continue;
1395 		}
1396 
1397 		/*
1398 		 * Open tap(4) device file and bring up the
1399 		 * corresponding interface
1400 		 */
1401 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1402 		if (tap_fd < 0)
1403 			continue;
1404 
1405 		/*
1406 		 * Initialize tap(4) and get address/netmask
1407 		 * for pseudo netif
1408 		 *
1409 		 * NB: Rest part of netifExp[i] is passed
1410 		 *     to netif_init_tap() implicitly.
1411 		 */
1412 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1413 			/*
1414 			 * NB: Closing tap(4) device file will bring
1415 			 *     down the corresponding interface
1416 			 */
1417 			close(tap_fd);
1418 			continue;
1419 		}
1420 
1421 		info = &NetifInfo[NetifNum];
1422 		bzero(info, sizeof(*info));
1423 		info->tap_fd = tap_fd;
1424 		info->tap_unit = tap_unit;
1425 		info->netif_addr = netif_addr;
1426 		info->netif_mask = netif_mask;
1427 		/*
1428 		 * If tmp isn't NULL it means a MAC could have been
1429 		 * specified so attempt to convert it.
1430 		 * Setting enaddr to NULL will tell vke_attach() we
1431 		 * need a pseudo-random MAC address.
1432 		 */
1433 		if (tmp != NULL) {
1434 			if ((info->enaddr = malloc(ETHER_ADDR_LEN)) == NULL)
1435 				warnx("Couldn't allocate memory for the operation");
1436 			else {
1437 				if ((kether_aton(tmp, info->enaddr)) == NULL) {
1438 					free(info->enaddr);
1439 					info->enaddr = NULL;
1440 				}
1441 			}
1442 		}
1443 
1444 		NetifNum++;
1445 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1446 			break;
1447 	}
1448 	close(s);
1449 }
1450 
1451 /*
1452  * Create the pid file and leave it open and locked while the vkernel is
1453  * running.  This allows a script to use /usr/bin/lockf to probe whether
1454  * a vkernel is still running (so as not to accidently kill an unrelated
1455  * process from a stale pid file).
1456  */
1457 static
1458 void
1459 writepid(void)
1460 {
1461 	char buf[32];
1462 	int fd;
1463 
1464 	if (pid_file != NULL) {
1465 		snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1466 		fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1467 		if (fd < 0) {
1468 			if (errno == EWOULDBLOCK) {
1469 				perror("Failed to lock pidfile, "
1470 				       "vkernel already running");
1471 			} else {
1472 				perror("Failed to create pidfile");
1473 			}
1474 			exit(EX_SOFTWARE);
1475 		}
1476 		ftruncate(fd, 0);
1477 		write(fd, buf, strlen(buf));
1478 		/* leave the file open to maintain the lock */
1479 	}
1480 }
1481 
1482 static
1483 void
1484 cleanpid( void )
1485 {
1486 	if (pid_file != NULL) {
1487 		if (unlink(pid_file) < 0)
1488 			perror("Warning: couldn't remove pidfile");
1489 	}
1490 }
1491 
1492 static
1493 void
1494 usage_err(const char *ctl, ...)
1495 {
1496 	va_list va;
1497 
1498 	va_start(va, ctl);
1499 	vfprintf(stderr, ctl, va);
1500 	va_end(va);
1501 	fprintf(stderr, "\n");
1502 	exit(EX_USAGE);
1503 }
1504 
1505 static
1506 void
1507 usage_help(_Bool help)
1508 {
1509 	fprintf(stderr, "Usage: %s [-hsUvd] [-c file] [-e name=value:name=value:...]\n"
1510 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1511 	    "\t[-m size] [-n numcpus[:lbits[:cbits]]]\n"
1512 	    "\t[-p file] [-r file]\n", save_av[0]);
1513 
1514 	if (help)
1515 		fprintf(stderr, "\nArguments:\n"
1516 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1517 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1518 		    "\t-h\tThis list of options.\n"
1519 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1520 		    "\t-I\tCreate a virtual network device.\n"
1521 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1522 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1523 		    "\t-n\tSpecify the number of CPUs and the topology you wish to emulate:\n"
1524 		    "\t  \t- numcpus - number of cpus\n"
1525 		    "\t  \t- :lbits - specify the number of bits within APICID(=CPUID) needed for representing\n"
1526 		    "\t  \t  the logical ID. Controls the number of threads/core (0bits - 1 thread, 1bit - 2 threads).\n"
1527 		    "\t  \t- :cbits - specify the number of bits within APICID(=CPUID) needed for representing\n"
1528 		    "\t  \t  the core ID. Controls the number of core/package (0bits - 1 core, 1bit - 2 cores).\n"
1529 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1530 		    "\t-r\tSpecify a R/W disk image file to be used by the kernel.\n"
1531 		    "\t-s\tBoot into single-user mode.\n"
1532 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1533 		    "\t-v\tTurn on verbose booting.\n");
1534 
1535 	exit(EX_USAGE);
1536 }
1537 
1538 void
1539 cpu_reset(void)
1540 {
1541 	kprintf("cpu reset, rebooting vkernel\n");
1542 	closefrom(3);
1543 	cleanpid();
1544 	exit(EX_VKERNEL_REBOOT);
1545 
1546 }
1547 
1548 void
1549 cpu_halt(void)
1550 {
1551 	kprintf("cpu halt, exiting vkernel\n");
1552 	cleanpid();
1553 	exit(EX_OK);
1554 }
1555 
1556 void
1557 setrealcpu(void)
1558 {
1559 	switch(lwp_cpu_lock) {
1560 	case LCL_PER_CPU:
1561 		if (bootverbose)
1562 			kprintf("Locking CPU%d to real cpu %d\n",
1563 				mycpuid, next_cpu);
1564 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1565 		next_cpu++;
1566 		if (next_cpu >= real_ncpus)
1567 			next_cpu = 0;
1568 		break;
1569 	case LCL_SINGLE_CPU:
1570 		if (bootverbose)
1571 			kprintf("Locking CPU%d to real cpu %d\n",
1572 				mycpuid, next_cpu);
1573 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1574 		break;
1575 	default:
1576 		/* do not map virtual cpus to real cpus */
1577 		break;
1578 	}
1579 }
1580 
1581 /*
1582  * Allocate and free memory for module loading.  The loaded module
1583  * has to be placed somewhere near the current kernel binary load
1584  * point or the relocations will not work.
1585  *
1586  * I'm not sure why this isn't working.
1587  */
1588 int
1589 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1590 {
1591 #if 1
1592 	size_t xtra;
1593 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1594 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1595 	bzero((void *)*basep, bytes);
1596 #else
1597 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1598 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1599 				   MAP_ANON|MAP_SHARED, -1, 0);
1600 	if ((void *)*basep == MAP_FAILED)
1601 		return ENOMEM;
1602 #endif
1603 	return 0;
1604 }
1605 
1606 void
1607 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1608 {
1609 #if 0
1610 #if 0
1611 	munmap((void *)base, bytes);
1612 #endif
1613 #endif
1614 }
1615