1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/platform/vkernel/platform/init.c,v 1.56 2008/05/27 07:48:00 dillon Exp $
35  */
36 
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/stat.h>
41 #include <sys/mman.h>
42 #include <sys/cons.h>
43 #include <sys/random.h>
44 #include <sys/vkernel.h>
45 #include <sys/tls.h>
46 #include <sys/reboot.h>
47 #include <sys/proc.h>
48 #include <sys/msgbuf.h>
49 #include <sys/vmspace.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/un.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
56 #include <sys/mplock2.h>
57 
58 #include <machine/cpu.h>
59 #include <machine/globaldata.h>
60 #include <machine/tls.h>
61 #include <machine/md_var.h>
62 #include <machine/vmparam.h>
63 #include <cpu/specialreg.h>
64 
65 #include <net/if.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/bridge/if_bridgevar.h>
69 #include <netinet/in.h>
70 #include <arpa/inet.h>
71 
72 #include <stdio.h>
73 #include <stdlib.h>
74 #include <stdarg.h>
75 #include <stdbool.h>
76 #include <unistd.h>
77 #include <fcntl.h>
78 #include <string.h>
79 #include <err.h>
80 #include <errno.h>
81 #include <assert.h>
82 #include <sysexits.h>
83 
84 vm_paddr_t phys_avail[16];
85 vm_paddr_t Maxmem;
86 vm_paddr_t Maxmem_bytes;
87 long physmem;
88 int MemImageFd = -1;
89 struct vkdisk_info DiskInfo[VKDISK_MAX];
90 int DiskNum;
91 struct vknetif_info NetifInfo[VKNETIF_MAX];
92 int NetifNum;
93 char *pid_file;
94 vm_offset_t KvaStart;
95 vm_offset_t KvaEnd;
96 vm_offset_t KvaSize;
97 vm_offset_t virtual_start;
98 vm_offset_t virtual_end;
99 vm_offset_t virtual2_start;
100 vm_offset_t virtual2_end;
101 vm_offset_t kernel_vm_end;
102 vm_offset_t crashdumpmap;
103 vm_offset_t clean_sva;
104 vm_offset_t clean_eva;
105 struct msgbuf *msgbufp;
106 caddr_t ptvmmap;
107 vpte_t	*KernelPTD;
108 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
109 void *dmap_min_address;
110 u_int cpu_feature;	/* XXX */
111 int tsc_present;
112 int64_t tsc_frequency;
113 int optcpus;		/* number of cpus - see mp_start() */
114 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
115 int real_ncpus;		/* number of real CPUs */
116 int next_cpu;		/* next real CPU to lock a virtual CPU to */
117 
118 struct privatespace *CPU_prvspace;
119 
120 static struct trapframe proc0_tf;
121 static void *proc0paddr;
122 
123 static void init_sys_memory(char *imageFile);
124 static void init_kern_memory(void);
125 static void init_globaldata(void);
126 static void init_vkernel(void);
127 static void init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type);
128 static void init_netif(char *netifExp[], int netifFileNum);
129 static void writepid(void);
130 static void cleanpid(void);
131 static int unix_connect(const char *path);
132 static void usage_err(const char *ctl, ...);
133 static void usage_help(_Bool);
134 
135 static int save_ac;
136 static char **save_av;
137 
138 /*
139  * Kernel startup for virtual kernels - standard main()
140  */
141 int
142 main(int ac, char **av)
143 {
144 	char *memImageFile = NULL;
145 	char *netifFile[VKNETIF_MAX];
146 	char *diskFile[VKDISK_MAX];
147 	char *cdFile[VKDISK_MAX];
148 	char *suffix;
149 	char *endp;
150 	int netifFileNum = 0;
151 	int diskFileNum = 0;
152 	int cdFileNum = 0;
153 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
154 	int c;
155 	int i;
156 	int j;
157 	int n;
158 	int isq;
159 	int real_vkernel_enable;
160 	int supports_sse;
161 	size_t vsize;
162 
163 	save_ac = ac;
164 	save_av = av;
165 
166 	/*
167 	 * Process options
168 	 */
169 	kernel_mem_readonly = 1;
170 #ifdef SMP
171 	optcpus = 2;
172 #endif
173 	lwp_cpu_lock = LCL_NONE;
174 
175 	real_vkernel_enable = 0;
176 	vsize = sizeof(real_vkernel_enable);
177 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
178 
179 	if (real_vkernel_enable == 0) {
180 		errx(1, "vm.vkernel_enable is 0, must be set "
181 			"to 1 to execute a vkernel!");
182 	}
183 
184 	real_ncpus = 1;
185 	vsize = sizeof(real_ncpus);
186 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
187 
188 	if (ac < 2)
189 		usage_help(false);
190 
191 	while ((c = getopt(ac, av, "c:hsvl:m:n:r:e:i:p:I:U")) != -1) {
192 		switch(c) {
193 		case 'e':
194 			/*
195 			 * name=value:name=value:name=value...
196 			 * name="value"...
197 			 *
198 			 * Allow values to be quoted but note that shells
199 			 * may remove the quotes, so using this feature
200 			 * to embed colons may require a backslash.
201 			 */
202 			n = strlen(optarg);
203 			isq = 0;
204 			kern_envp = malloc(n + 2);
205 			for (i = j = 0; i < n; ++i) {
206 				if (optarg[i] == '"')
207 					isq ^= 1;
208 				else if (optarg[i] == '\'')
209 					isq ^= 2;
210 				else if (isq == 0 && optarg[i] == ':')
211 					kern_envp[j++] = 0;
212 				else
213 					kern_envp[j++] = optarg[i];
214 			}
215 			kern_envp[j++] = 0;
216 			kern_envp[j++] = 0;
217 			break;
218 		case 's':
219 			boothowto |= RB_SINGLE;
220 			break;
221 		case 'v':
222 			bootverbose = 1;
223 			break;
224 		case 'i':
225 			memImageFile = optarg;
226 			break;
227 		case 'I':
228 			if (netifFileNum < VKNETIF_MAX)
229 				netifFile[netifFileNum++] = strdup(optarg);
230 			break;
231 		case 'r':
232 			if (bootOnDisk < 0)
233 				bootOnDisk = 1;
234 			if (diskFileNum + cdFileNum < VKDISK_MAX)
235 				diskFile[diskFileNum++] = strdup(optarg);
236 			break;
237 		case 'c':
238 			if (bootOnDisk < 0)
239 				bootOnDisk = 0;
240 			if (diskFileNum + cdFileNum < VKDISK_MAX)
241 				cdFile[cdFileNum++] = strdup(optarg);
242 			break;
243 		case 'm':
244 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
245 			if (suffix) {
246 				switch(*suffix) {
247 				case 'g':
248 				case 'G':
249 					Maxmem_bytes <<= 30;
250 					break;
251 				case 'm':
252 				case 'M':
253 					Maxmem_bytes <<= 20;
254 					break;
255 				case 'k':
256 				case 'K':
257 					Maxmem_bytes <<= 10;
258 					break;
259 				default:
260 					Maxmem_bytes = 0;
261 					usage_err("Bad maxmem option");
262 					/* NOT REACHED */
263 					break;
264 				}
265 			}
266 			break;
267 		case 'l':
268 			next_cpu = -1;
269 			if (strncmp("map", optarg, 3) == 0) {
270 				lwp_cpu_lock = LCL_PER_CPU;
271 				if (optarg[3] == ',') {
272 					next_cpu = strtol(optarg+4, &endp, 0);
273 					if (*endp != '\0')
274 						usage_err("Bad target CPU number at '%s'", endp);
275 				} else {
276 					next_cpu = 0;
277 				}
278 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
279 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
280 			} else if (strncmp("any", optarg, 3) == 0) {
281 				lwp_cpu_lock = LCL_NONE;
282 			} else {
283 				lwp_cpu_lock = LCL_SINGLE_CPU;
284 				next_cpu = strtol(optarg, &endp, 0);
285 				if (*endp != '\0')
286 					usage_err("Bad target CPU number at '%s'", endp);
287 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
288 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
289 			}
290 			break;
291 		case 'n':
292 			/*
293 			 * This value is set up by mp_start(), don't just
294 			 * set ncpus here.
295 			 */
296 #ifdef SMP
297 			optcpus = strtol(optarg, NULL, 0);
298 			if (optcpus < 1 || optcpus > MAXCPU)
299 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
300 #else
301 			if (strtol(optarg, NULL, 0) != 1) {
302 				usage_err("You built a UP vkernel, only 1 cpu!");
303 			}
304 #endif
305 
306 			break;
307 		case 'p':
308 			pid_file = optarg;
309 			break;
310 		case 'U':
311 			kernel_mem_readonly = 0;
312 			break;
313 		case 'h':
314 			usage_help(true);
315 			break;
316 		default:
317 			usage_help(false);
318 		}
319 	}
320 
321 	writepid();
322 	cpu_disable_intr();
323 	init_sys_memory(memImageFile);
324 	init_kern_memory();
325 	init_globaldata();
326 	init_vkernel();
327 	setrealcpu();
328 	init_kqueue();
329 
330 	/*
331 	 * Check TSC
332 	 */
333 	vsize = sizeof(tsc_present);
334 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
335 	vsize = sizeof(tsc_frequency);
336 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
337 	if (tsc_present)
338 		cpu_feature |= CPUID_TSC;
339 
340 	/*
341 	 * Check SSE
342 	 */
343 	vsize = sizeof(supports_sse);
344 	supports_sse = 0;
345 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
346 	init_fpu(supports_sse);
347 	if (supports_sse)
348 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
349 
350 	/*
351 	 * We boot from the first installed disk.
352 	 */
353 	if (bootOnDisk == 1) {
354 		init_disk(diskFile, diskFileNum, VKD_DISK);
355 		init_disk(cdFile, cdFileNum, VKD_CD);
356 	} else {
357 		init_disk(cdFile, cdFileNum, VKD_CD);
358 		init_disk(diskFile, diskFileNum, VKD_DISK);
359 	}
360 	init_netif(netifFile, netifFileNum);
361 	init_exceptions();
362 	mi_startup();
363 	/* NOT REACHED */
364 	exit(EX_SOFTWARE);
365 }
366 
367 /*
368  * Initialize system memory.  This is the virtual kernel's 'RAM'.
369  */
370 static
371 void
372 init_sys_memory(char *imageFile)
373 {
374 	struct stat st;
375 	int i;
376 	int fd;
377 
378 	/*
379 	 * Figure out the system memory image size.  If an image file was
380 	 * specified and -m was not specified, use the image file's size.
381 	 */
382 
383 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
384 		Maxmem_bytes = (vm_paddr_t)st.st_size;
385 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
386 	    Maxmem_bytes == 0) {
387 		errx(1, "Cannot create new memory file %s unless "
388 		       "system memory size is specified with -m",
389 		       imageFile);
390 		/* NOT REACHED */
391 	}
392 
393 	/*
394 	 * Maxmem must be known at this time
395 	 */
396 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
397 		errx(1, "Bad maxmem specification: 64MB minimum, "
398 		       "multiples of %dMB only",
399 		       SEG_SIZE / 1024 / 1024);
400 		/* NOT REACHED */
401 	}
402 
403 	/*
404 	 * Generate an image file name if necessary, then open/create the
405 	 * file exclusively locked.  Do not allow multiple virtual kernels
406 	 * to use the same image file.
407 	 */
408 	if (imageFile == NULL) {
409 		for (i = 0; i < 1000000; ++i) {
410 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
411 			fd = open(imageFile,
412 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
413 			if (fd < 0 && errno == EWOULDBLOCK) {
414 				free(imageFile);
415 				continue;
416 			}
417 			break;
418 		}
419 	} else {
420 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
421 	}
422 	fprintf(stderr, "Using memory file: %s\n", imageFile);
423 	if (fd < 0 || fstat(fd, &st) < 0) {
424 		err(1, "Unable to open/create %s", imageFile);
425 		/* NOT REACHED */
426 	}
427 
428 	/*
429 	 * Truncate or extend the file as necessary.  Clean out the contents
430 	 * of the file, we want it to be full of holes so we don't waste
431 	 * time reading in data from an old file that we no longer care
432 	 * about.
433 	 */
434 	ftruncate(fd, 0);
435 	ftruncate(fd, Maxmem_bytes);
436 
437 	MemImageFd = fd;
438 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
439 	physmem = Maxmem;
440 }
441 
442 /*
443  * Initialize kernel memory.  This reserves kernel virtual memory by using
444  * MAP_VPAGETABLE
445  */
446 
447 static
448 void
449 init_kern_memory(void)
450 {
451 	void *base;
452 	void *try;
453 	char dummy;
454 	char *topofstack = &dummy;
455 	int i;
456 	void *firstfree;
457 
458 	/*
459 	 * Memory map our kernel virtual memory space.  Note that the
460 	 * kernel image itself is not made part of this memory for the
461 	 * moment.
462 	 *
463 	 * The memory map must be segment-aligned so we can properly
464 	 * offset KernelPTD.
465 	 *
466 	 * If the system kernel has a different MAXDSIZ, it might not
467 	 * be possible to map kernel memory in its prefered location.
468 	 * Try a number of different locations.
469 	 */
470 	try = (void *)(512UL << 30);
471 	base = NULL;
472 	while ((char *)try + KERNEL_KVA_SIZE < topofstack) {
473 		base = mmap(try, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
474 			    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE,
475 			    MemImageFd, (off_t)try);
476 		if (base == try)
477 			break;
478 		if (base != MAP_FAILED)
479 			munmap(base, KERNEL_KVA_SIZE);
480 		try = (char *)try + (512UL << 30);
481 	}
482 	if (base != try) {
483 		err(1, "Unable to mmap() kernel virtual memory!");
484 		/* NOT REACHED */
485 	}
486 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
487 	KvaStart = (vm_offset_t)base;
488 	KvaSize = KERNEL_KVA_SIZE;
489 	KvaEnd = KvaStart + KvaSize;
490 
491 	/* cannot use kprintf yet */
492 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
493 
494 	/* MAP_FILE? */
495 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
496 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
497 				MemImageFd, 0);
498 	if (dmap_min_address == MAP_FAILED) {
499 		err(1, "Unable to mmap() kernel DMAP region!");
500 		/* NOT REACHED */
501 	}
502 
503 	firstfree = 0;
504 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
505 
506 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
507 		 0 | VPTE_R | VPTE_W | VPTE_V);
508 
509 	/*
510 	 * phys_avail[] represents unallocated physical memory.  MI code
511 	 * will use phys_avail[] to create the vm_page array.
512 	 */
513 	phys_avail[0] = (vm_paddr_t)firstfree;
514 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
515 	phys_avail[1] = Maxmem_bytes;
516 
517 #if JGV
518 	/*
519 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
520 	 * memory.  MI code will create kernel_map using these parameters.
521 	 */
522 	virtual_start = KvaStart + (long)firstfree;
523 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
524 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
525 #endif
526 
527 	/*
528 	 * pmap_growkernel() will set the correct value.
529 	 */
530 	kernel_vm_end = 0;
531 
532 	/*
533 	 * Allocate space for process 0's UAREA.
534 	 */
535 	proc0paddr = (void *)virtual_start;
536 	for (i = 0; i < UPAGES; ++i) {
537 		pmap_kenter_quick(virtual_start, phys_avail[0]);
538 		virtual_start += PAGE_SIZE;
539 		phys_avail[0] += PAGE_SIZE;
540 	}
541 
542 	/*
543 	 * crashdumpmap
544 	 */
545 	crashdumpmap = virtual_start;
546 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
547 
548 	/*
549 	 * msgbufp maps the system message buffer
550 	 */
551 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
552 	msgbufp = (void *)virtual_start;
553 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
554 		pmap_kenter_quick(virtual_start, phys_avail[0]);
555 		virtual_start += PAGE_SIZE;
556 		phys_avail[0] += PAGE_SIZE;
557 	}
558 	msgbufinit(msgbufp, MSGBUF_SIZE);
559 
560 	/*
561 	 * used by kern_memio for /dev/mem access
562 	 */
563 	ptvmmap = (caddr_t)virtual_start;
564 	virtual_start += PAGE_SIZE;
565 
566 	/*
567 	 * Bootstrap the kernel_pmap
568 	 */
569 #if JGV
570 	pmap_bootstrap();
571 #endif
572 }
573 
574 /*
575  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
576  * virtual_start and phys_avail[0]
577  */
578 static
579 void
580 init_globaldata(void)
581 {
582 	int i;
583 	vm_paddr_t pa;
584 	vm_offset_t va;
585 
586 	/*
587 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
588 	 * amount of KVA since the privatespace structure includes two
589 	 * whole page table mappings.
590 	 */
591 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
592 	CPU_prvspace = (void *)virtual_start;
593 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
594 
595 	/*
596 	 * Allocate enough physical memory to cover the mdglobaldata
597 	 * portion of the space and the idle stack and map the pages
598 	 * into KVA.  For cpu #0 only.
599 	 */
600 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
601 		pa = phys_avail[0];
602 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
603 		pmap_kenter_quick(va, pa);
604 		phys_avail[0] += PAGE_SIZE;
605 	}
606 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
607 		pa = phys_avail[0];
608 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
609 		pmap_kenter_quick(va, pa);
610 		phys_avail[0] += PAGE_SIZE;
611 	}
612 
613 	/*
614 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
615 	 * point.  Note that %fs is used by pthreads.
616 	 */
617 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
618 }
619 
620 /*
621  * Initialize very low level systems including thread0, proc0, etc.
622  */
623 static
624 void
625 init_vkernel(void)
626 {
627 	struct mdglobaldata *gd;
628 
629 	gd = &CPU_prvspace[0].mdglobaldata;
630 	bzero(gd, sizeof(*gd));
631 
632 	gd->mi.gd_curthread = &thread0;
633 	thread0.td_gd = &gd->mi;
634 	ncpus = 1;
635 	ncpus2 = 1;	/* rounded down power of 2 */
636 	ncpus_fit = 1;	/* rounded up power of 2 */
637 	/* ncpus2_mask and ncpus_fit_mask are 0 */
638 	init_param1();
639 	gd->mi.gd_prvspace = &CPU_prvspace[0];
640 	mi_gdinit(&gd->mi, 0);
641 	cpu_gdinit(gd, 0);
642 	mi_proc0init(&gd->mi, proc0paddr);
643 	lwp0.lwp_md.md_regs = &proc0_tf;
644 
645 	/*init_locks();*/
646 #ifdef SMP
647 	/*
648 	 * Get the initial mplock with a count of 1 for the BSP.
649 	 * This uses a LOGICAL cpu ID, ie BSP == 0.
650 	 */
651 	cpu_get_initial_mplock();
652 #endif
653 	cninit();
654 	rand_initialize();
655 #if 0	/* #ifdef DDB */
656 	kdb_init();
657 	if (boothowto & RB_KDB)
658 		Debugger("Boot flags requested debugger");
659 #endif
660 	identcpu();
661 #if 0
662 	initializecpu();	/* Initialize CPU registers */
663 #endif
664 	init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
665 
666 #if 0
667 	/*
668 	 * Map the message buffer
669 	 */
670 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
671 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
672 	msgbufinit(msgbufp, MSGBUF_SIZE);
673 #endif
674 #if 0
675 	thread0.td_pcb_cr3 ... MMU
676 	lwp0.lwp_md.md_regs = &proc0_tf;
677 #endif
678 }
679 
680 /*
681  * Filesystem image paths for the virtual kernel are optional.
682  * If specified they each should point to a disk image,
683  * the first of which will become the root disk.
684  *
685  * The virtual kernel caches data from our 'disk' just like a normal kernel,
686  * so we do not really want the real kernel to cache the data too.  Use
687  * O_DIRECT to remove the duplication.
688  */
689 static
690 void
691 init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type)
692 {
693 	int i;
694 
695         if (diskFileNum == 0)
696                 return;
697 
698 	for(i=0; i < diskFileNum; i++){
699 		char *fname;
700 		fname = diskExp[i];
701 
702 		if (fname == NULL) {
703                         warnx("Invalid argument to '-r'");
704                         continue;
705                 }
706 
707 		if (DiskNum < VKDISK_MAX) {
708 			struct stat st;
709 			struct vkdisk_info* info = NULL;
710 			int fd;
711 			size_t l = 0;
712 
713 			if (type == VKD_DISK)
714 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
715 			else
716 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
717 			if (fd < 0 || fstat(fd, &st) < 0) {
718 				err(1, "Unable to open/create %s", fname);
719 				/* NOT REACHED */
720 			}
721 			if (S_ISREG(st.st_mode)) {
722 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
723 					errx(1, "Disk image %s is already "
724 						"in use\n", fname);
725 					/* NOT REACHED */
726 				}
727 			}
728 
729 			info = &DiskInfo[DiskNum];
730 			l = strlen(fname);
731 
732 			info->unit = i;
733 			info->fd = fd;
734 			info->type = type;
735 			memcpy(info->fname, fname, l);
736 
737 			if (DiskNum == 0) {
738 				if (type == VKD_CD) {
739 				    rootdevnames[0] = "cd9660:vcd0a";
740 				} else if (type == VKD_DISK) {
741 				    rootdevnames[0] = "ufs:vkd0s0a";
742 				    rootdevnames[1] = "ufs:vkd0s1a";
743 				}
744 			}
745 
746 			DiskNum++;
747 		} else {
748                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
749                         continue;
750 		}
751 	}
752 }
753 
754 static
755 int
756 netif_set_tapflags(int tap_unit, int f, int s)
757 {
758 	struct ifreq ifr;
759 	int flags;
760 
761 	bzero(&ifr, sizeof(ifr));
762 
763 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
764 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
765 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
766 		return -1;
767 	}
768 
769 	/*
770 	 * Adjust if_flags
771 	 *
772 	 * If the flags are already set/cleared, then we return
773 	 * immediately to avoid extra syscalls
774 	 */
775 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
776 	if (f < 0) {
777 		/* Turn off flags */
778 		f = -f;
779 		if ((flags & f) == 0)
780 			return 0;
781 		flags &= ~f;
782 	} else {
783 		/* Turn on flags */
784 		if (flags & f)
785 			return 0;
786 		flags |= f;
787 	}
788 
789 	/*
790 	 * Fix up ifreq.ifr_name, since it may be trashed
791 	 * in previous ioctl(SIOCGIFFLAGS)
792 	 */
793 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
794 
795 	ifr.ifr_flags = flags & 0xffff;
796 	ifr.ifr_flagshigh = flags >> 16;
797 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
798 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
799 		return -1;
800 	}
801 	return 0;
802 }
803 
804 static
805 int
806 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
807 {
808 	struct ifaliasreq ifra;
809 	struct sockaddr_in *in;
810 
811 	bzero(&ifra, sizeof(ifra));
812 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
813 
814 	/* Setup address */
815 	in = (struct sockaddr_in *)&ifra.ifra_addr;
816 	in->sin_family = AF_INET;
817 	in->sin_len = sizeof(*in);
818 	in->sin_addr.s_addr = addr;
819 
820 	if (mask != 0) {
821 		/* Setup netmask */
822 		in = (struct sockaddr_in *)&ifra.ifra_mask;
823 		in->sin_len = sizeof(*in);
824 		in->sin_addr.s_addr = mask;
825 	}
826 
827 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
828 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
829 		return -1;
830 	}
831 	return 0;
832 }
833 
834 static
835 int
836 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
837 {
838 	struct ifbreq ifbr;
839 	struct ifdrv ifd;
840 
841 	bzero(&ifbr, sizeof(ifbr));
842 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
843 		 "tap%d", tap_unit);
844 
845 	bzero(&ifd, sizeof(ifd));
846 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
847 	ifd.ifd_cmd = BRDGADD;
848 	ifd.ifd_len = sizeof(ifbr);
849 	ifd.ifd_data = &ifbr;
850 
851 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
852 		/*
853 		 * 'errno == EEXIST' means that the tap(4) is already
854 		 * a member of the bridge(4)
855 		 */
856 		if (errno != EEXIST) {
857 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
858 			return -1;
859 		}
860 	}
861 	return 0;
862 }
863 
864 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
865 
866 /*
867  * Locate the first unused tap(4) device file if auto mode is requested,
868  * or open the user supplied device file, and bring up the corresponding
869  * tap(4) interface.
870  *
871  * NOTE: Only tap(4) device file is supported currently
872  */
873 static
874 int
875 netif_open_tap(const char *netif, int *tap_unit, int s)
876 {
877 	char tap_dev[MAXPATHLEN];
878 	int tap_fd, failed;
879 	struct stat st;
880 	char *dname;
881 
882 	*tap_unit = -1;
883 
884 	if (strcmp(netif, "auto") == 0) {
885 		/*
886 		 * Find first unused tap(4) device file
887 		 */
888 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
889 		if (tap_fd < 0) {
890 			warnc(errno, "Unable to find a free tap(4)");
891 			return -1;
892 		}
893 	} else {
894 		/*
895 		 * User supplied tap(4) device file or unix socket.
896 		 */
897 		if (netif[0] == '/')	/* Absolute path */
898 			strlcpy(tap_dev, netif, sizeof(tap_dev));
899 		else
900 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
901 
902 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
903 
904 		/*
905 		 * If we cannot open normally try to connect to it.
906 		 */
907 		if (tap_fd < 0)
908 			tap_fd = unix_connect(tap_dev);
909 
910 		if (tap_fd < 0) {
911 			warn("Unable to open %s", tap_dev);
912 			return -1;
913 		}
914 	}
915 
916 	/*
917 	 * Check whether the device file is a tap(4)
918 	 */
919 	if (fstat(tap_fd, &st) < 0) {
920 		failed = 1;
921 	} else if (S_ISCHR(st.st_mode)) {
922 		dname = fdevname(tap_fd);
923 		if (dname)
924 			dname = strstr(dname, "tap");
925 		if (dname) {
926 			/*
927 			 * Bring up the corresponding tap(4) interface
928 			 */
929 			*tap_unit = strtol(dname + 3, NULL, 10);
930 			printf("TAP UNIT %d\n", *tap_unit);
931 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
932 				failed = 0;
933 			else
934 				failed = 1;
935 		} else {
936 			failed = 1;
937 		}
938 	} else if (S_ISSOCK(st.st_mode)) {
939 		/*
940 		 * Special socket connection (typically to vknet).  We
941 		 * do not have to do anything.
942 		 */
943 		failed = 0;
944 	} else {
945 		failed = 1;
946 	}
947 
948 	if (failed) {
949 		warnx("%s is not a tap(4) device or socket", tap_dev);
950 		close(tap_fd);
951 		tap_fd = -1;
952 		*tap_unit = -1;
953 	}
954 	return tap_fd;
955 }
956 
957 static int
958 unix_connect(const char *path)
959 {
960 	struct sockaddr_un sunx;
961 	int len;
962 	int net_fd;
963 	int sndbuf = 262144;
964 	struct stat st;
965 
966 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
967 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
968 	++len;	/* include nul */
969 	sunx.sun_family = AF_UNIX;
970 	sunx.sun_len = len;
971 
972 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
973 	if (net_fd < 0)
974 		return(-1);
975 	if (connect(net_fd, (void *)&sunx, len) < 0) {
976 		close(net_fd);
977 		return(-1);
978 	}
979 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
980 	if (fstat(net_fd, &st) == 0)
981 		printf("Network socket buffer: %d bytes\n", st.st_blksize);
982 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
983 	return(net_fd);
984 }
985 
986 #undef TAPDEV_MAJOR
987 #undef TAPDEV_MINOR
988 #undef TAPDEV_OFLAGS
989 
990 /*
991  * Following syntax is supported,
992  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
993  *
994  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
995  *                        tap(4)'s netmask len is z
996  *
997  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
998  *                        pseudo netif's address is y.y.y.y
999  *
1000  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1001  *                        pseudo netif's address is y.y.y.y
1002  *                        tap(4) and pseudo netif's netmask len are z
1003  *
1004  * 5) bridgeX             tap(4) will be added to bridgeX
1005  *
1006  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1007  *                        pseudo netif's address is y.y.y.y
1008  *
1009  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1010  *                        pseudo netif's address is y.y.y.y
1011  *                        pseudo netif's netmask len is z
1012  */
1013 static
1014 int
1015 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1016 {
1017 	in_addr_t tap_addr, netmask, netif_addr;
1018 	int next_netif_addr;
1019 	char *tok, *masklen_str, *ifbridge;
1020 
1021 	*addr = 0;
1022 	*mask = 0;
1023 
1024 	tok = strtok(NULL, ":/");
1025 	if (tok == NULL) {
1026 		/*
1027 		 * Nothing special, simply use tap(4) as backend
1028 		 */
1029 		return 0;
1030 	}
1031 
1032 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1033 		/*
1034 		 * tap(4)'s address is supplied
1035 		 */
1036 		ifbridge = NULL;
1037 
1038 		/*
1039 		 * If there is next token, then it may be pseudo
1040 		 * netif's address or netmask len for tap(4)
1041 		 */
1042 		next_netif_addr = 0;
1043 	} else {
1044 		/*
1045 		 * Not tap(4)'s address, assume it as a bridge(4)
1046 		 * iface name
1047 		 */
1048 		tap_addr = 0;
1049 		ifbridge = tok;
1050 
1051 		/*
1052 		 * If there is next token, then it must be pseudo
1053 		 * netif's address
1054 		 */
1055 		next_netif_addr = 1;
1056 	}
1057 
1058 	netmask = netif_addr = 0;
1059 
1060 	tok = strtok(NULL, ":/");
1061 	if (tok == NULL)
1062 		goto back;
1063 
1064 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1065 		if (next_netif_addr) {
1066 			warnx("Invalid pseudo netif address: %s", tok);
1067 			return -1;
1068 		}
1069 		netif_addr = 0;
1070 
1071 		/*
1072 		 * Current token is not address, then it must be netmask len
1073 		 */
1074 		masklen_str = tok;
1075 	} else {
1076 		/*
1077 		 * Current token is pseudo netif address, if there is next token
1078 		 * it must be netmask len
1079 		 */
1080 		masklen_str = strtok(NULL, "/");
1081 	}
1082 
1083 	/* Calculate netmask */
1084 	if (masklen_str != NULL) {
1085 		u_long masklen;
1086 
1087 		masklen = strtoul(masklen_str, NULL, 10);
1088 		if (masklen < 32 && masklen > 0) {
1089 			netmask = htonl(~((1LL << (32 - masklen)) - 1)
1090 					& 0xffffffff);
1091 		} else {
1092 			warnx("Invalid netmask len: %lu", masklen);
1093 			return -1;
1094 		}
1095 	}
1096 
1097 	/* Make sure there is no more token left */
1098 	if (strtok(NULL, ":/") != NULL) {
1099 		warnx("Invalid argument to '-I'");
1100 		return -1;
1101 	}
1102 
1103 back:
1104 	if (tap_unit < 0) {
1105 		/* Do nothing */
1106 	} else if (ifbridge == NULL) {
1107 		/* Set tap(4) address/netmask */
1108 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1109 			return -1;
1110 	} else {
1111 		/* Tie tap(4) to bridge(4) */
1112 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1113 			return -1;
1114 	}
1115 
1116 	*addr = netif_addr;
1117 	*mask = netmask;
1118 	return 0;
1119 }
1120 
1121 /*
1122  * NetifInfo[] will be filled for pseudo netif initialization.
1123  * NetifNum will be bumped to reflect the number of valid entries
1124  * in NetifInfo[].
1125  */
1126 static
1127 void
1128 init_netif(char *netifExp[], int netifExpNum)
1129 {
1130 	int i, s;
1131 
1132 	if (netifExpNum == 0)
1133 		return;
1134 
1135 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1136 	if (s < 0)
1137 		return;
1138 
1139 	for (i = 0; i < netifExpNum; ++i) {
1140 		struct vknetif_info *info;
1141 		in_addr_t netif_addr, netif_mask;
1142 		int tap_fd, tap_unit;
1143 		char *netif;
1144 
1145 		netif = strtok(netifExp[i], ":");
1146 		if (netif == NULL) {
1147 			warnx("Invalid argument to '-I'");
1148 			continue;
1149 		}
1150 
1151 		/*
1152 		 * Open tap(4) device file and bring up the
1153 		 * corresponding interface
1154 		 */
1155 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1156 		if (tap_fd < 0)
1157 			continue;
1158 
1159 		/*
1160 		 * Initialize tap(4) and get address/netmask
1161 		 * for pseudo netif
1162 		 *
1163 		 * NB: Rest part of netifExp[i] is passed
1164 		 *     to netif_init_tap() implicitly.
1165 		 */
1166 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1167 			/*
1168 			 * NB: Closing tap(4) device file will bring
1169 			 *     down the corresponding interface
1170 			 */
1171 			close(tap_fd);
1172 			continue;
1173 		}
1174 
1175 		info = &NetifInfo[NetifNum];
1176 		info->tap_fd = tap_fd;
1177 		info->tap_unit = tap_unit;
1178 		info->netif_addr = netif_addr;
1179 		info->netif_mask = netif_mask;
1180 
1181 		NetifNum++;
1182 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1183 			break;
1184 	}
1185 	close(s);
1186 }
1187 
1188 static
1189 void
1190 writepid( void )
1191 {
1192 	pid_t self;
1193 	FILE *fp;
1194 
1195 	if (pid_file != NULL) {
1196 		self = getpid();
1197 		fp = fopen(pid_file, "w");
1198 
1199 		if (fp != NULL) {
1200 			fprintf(fp, "%ld\n", (long)self);
1201 			fclose(fp);
1202 		}
1203 		else {
1204 			perror("Warning: couldn't open pidfile");
1205 		}
1206 	}
1207 }
1208 
1209 static
1210 void
1211 cleanpid( void )
1212 {
1213 	if (pid_file != NULL) {
1214 		if ( unlink(pid_file) != 0 )
1215 			perror("Warning: couldn't remove pidfile");
1216 	}
1217 }
1218 
1219 static
1220 void
1221 usage_err(const char *ctl, ...)
1222 {
1223 	va_list va;
1224 
1225 	va_start(va, ctl);
1226 	vfprintf(stderr, ctl, va);
1227 	va_end(va);
1228 	fprintf(stderr, "\n");
1229 	exit(EX_USAGE);
1230 }
1231 
1232 static
1233 void
1234 usage_help(_Bool help)
1235 {
1236 	fprintf(stderr, "Usage: %s [-hsUv] [-c file] [-e name=value:name=value:...]\n"
1237 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1238 	    "\t[-m size] [-n numcpus] [-p file] [-r file]\n", save_av[0]);
1239 
1240 	if (help)
1241 		fprintf(stderr, "\nArguments:\n"
1242 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1243 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1244 		    "\t-h\tThis list of options.\n"
1245 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1246 		    "\t-I\tCreate a virtual network device.\n"
1247 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1248 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1249 		    "\t-n\tSpecify the number of CPUs you wish to emulate.\n"
1250 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1251 		    "\t-r\tSpecify a R/W disk image file to be used by the kernel.\n"
1252 		    "\t-s\tBoot into single-user mode.\n"
1253 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1254 		    "\t-v\tTurn on verbose booting.\n");
1255 
1256 	exit(EX_USAGE);
1257 }
1258 
1259 void
1260 cpu_reset(void)
1261 {
1262 	kprintf("cpu reset, rebooting vkernel\n");
1263 	closefrom(3);
1264 	cleanpid();
1265 	execv(save_av[0], save_av);
1266 }
1267 
1268 void
1269 cpu_halt(void)
1270 {
1271 	kprintf("cpu halt, exiting vkernel\n");
1272 	cleanpid();
1273 	exit(EX_OK);
1274 }
1275 
1276 void
1277 setrealcpu(void)
1278 {
1279 	switch(lwp_cpu_lock) {
1280 	case LCL_PER_CPU:
1281 		if (bootverbose)
1282 			kprintf("Locking CPU%d to real cpu %d\n",
1283 				mycpuid, next_cpu);
1284 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1285 		next_cpu++;
1286 		if (next_cpu >= real_ncpus)
1287 			next_cpu = 0;
1288 		break;
1289 	case LCL_SINGLE_CPU:
1290 		if (bootverbose)
1291 			kprintf("Locking CPU%d to real cpu %d\n",
1292 				mycpuid, next_cpu);
1293 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1294 		break;
1295 	default:
1296 		/* do not map virtual cpus to real cpus */
1297 		break;
1298 	}
1299 }
1300 
1301 /*
1302  * Allocate and free memory for module loading.  The loaded module
1303  * has to be placed somewhere near the current kernel binary load
1304  * point or the relocations will not work.
1305  *
1306  * I'm not sure why this isn't working.
1307  */
1308 int
1309 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1310 {
1311 	kprintf("module loading for vkernel64's not currently supported\n");
1312 	*basep = 0;
1313 	return ENOMEM;
1314 #if 0
1315 #if 1
1316 	size_t xtra;
1317 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1318 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1319 	bzero((void *)*basep, bytes);
1320 #else
1321 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1322 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1323 				   MAP_ANON|MAP_SHARED, -1, 0);
1324 	if ((void *)*basep == MAP_FAILED)
1325 		return ENOMEM;
1326 #endif
1327 	kprintf("basep %p %p %zd\n",
1328 		(void *)vkernel_module_memory_alloc, (void *)*basep, bytes);
1329 	return 0;
1330 #endif
1331 }
1332 
1333 void
1334 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1335 {
1336 #if 0
1337 #if 0
1338 	munmap((void *)base, bytes);
1339 #endif
1340 #endif
1341 }
1342