1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/stat.h>
39 #include <sys/mman.h>
40 #include <sys/cons.h>
41 #include <sys/random.h>
42 #include <sys/vkernel.h>
43 #include <sys/tls.h>
44 #include <sys/reboot.h>
45 #include <sys/proc.h>
46 #include <sys/msgbuf.h>
47 #include <sys/vmspace.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/un.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <sys/mplock2.h>
55 
56 #include <machine/cpu.h>
57 #include <machine/globaldata.h>
58 #include <machine/tls.h>
59 #include <machine/md_var.h>
60 #include <machine/vmparam.h>
61 #include <cpu/specialreg.h>
62 
63 #include <net/if.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/bridge/if_bridgevar.h>
67 #include <netinet/in.h>
68 #include <arpa/inet.h>
69 
70 #include <stdio.h>
71 #include <stdlib.h>
72 #include <stdarg.h>
73 #include <stdbool.h>
74 #include <unistd.h>
75 #include <fcntl.h>
76 #include <string.h>
77 #include <err.h>
78 #include <errno.h>
79 #include <assert.h>
80 #include <sysexits.h>
81 
82 vm_paddr_t phys_avail[16];
83 vm_paddr_t Maxmem;
84 vm_paddr_t Maxmem_bytes;
85 long physmem;
86 int MemImageFd = -1;
87 struct vkdisk_info DiskInfo[VKDISK_MAX];
88 int DiskNum;
89 struct vknetif_info NetifInfo[VKNETIF_MAX];
90 int NetifNum;
91 char *pid_file;
92 vm_offset_t KvaStart;
93 vm_offset_t KvaEnd;
94 vm_offset_t KvaSize;
95 vm_offset_t virtual_start;
96 vm_offset_t virtual_end;
97 vm_offset_t virtual2_start;
98 vm_offset_t virtual2_end;
99 vm_offset_t kernel_vm_end;
100 vm_offset_t crashdumpmap;
101 vm_offset_t clean_sva;
102 vm_offset_t clean_eva;
103 struct msgbuf *msgbufp;
104 caddr_t ptvmmap;
105 vpte_t	*KernelPTD;
106 vpte_t	*KernelPTA;	/* Warning: Offset for direct VA translation */
107 void *dmap_min_address;
108 u_int cpu_feature;	/* XXX */
109 int tsc_present;
110 int64_t tsc_frequency;
111 int optcpus;		/* number of cpus - see mp_start() */
112 int lwp_cpu_lock;	/* if/how to lock virtual CPUs to real CPUs */
113 int real_ncpus;		/* number of real CPUs */
114 int next_cpu;		/* next real CPU to lock a virtual CPU to */
115 int vkernel_b_arg;	/* -b argument - no of logical CPU bits - only SMP */
116 int vkernel_B_arg;	/* -B argument - no of core bits - only SMP */
117 
118 struct privatespace *CPU_prvspace;
119 
120 static struct trapframe proc0_tf;
121 static void *proc0paddr;
122 
123 static void init_sys_memory(char *imageFile);
124 static void init_kern_memory(void);
125 static void init_globaldata(void);
126 static void init_vkernel(void);
127 static void init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type);
128 static void init_netif(char *netifExp[], int netifFileNum);
129 static void writepid(void);
130 static void cleanpid(void);
131 static int unix_connect(const char *path);
132 static void usage_err(const char *ctl, ...);
133 static void usage_help(_Bool);
134 static void init_locks(void);
135 
136 static int save_ac;
137 static char **save_av;
138 
139 /*
140  * Kernel startup for virtual kernels - standard main()
141  */
142 int
143 main(int ac, char **av)
144 {
145 	char *memImageFile = NULL;
146 	char *netifFile[VKNETIF_MAX];
147 	char *diskFile[VKDISK_MAX];
148 	char *cdFile[VKDISK_MAX];
149 	char *suffix;
150 	char *endp;
151 	char *tmp;
152 	char *tok;
153 	int netifFileNum = 0;
154 	int diskFileNum = 0;
155 	int cdFileNum = 0;
156 	int bootOnDisk = -1;	/* set below to vcd (0) or vkd (1) */
157 	int c;
158 	int i;
159 	int j;
160 	int n;
161 	int isq;
162 	int pos;
163 	int eflag;
164 	int real_vkernel_enable;
165 	int supports_sse;
166 	size_t vsize;
167 	size_t kenv_size;
168 	size_t kenv_size2;
169 
170 	save_ac = ac;
171 	save_av = av;
172 	eflag = 0;
173 	pos = 0;
174 	kenv_size = 0;
175 
176 	/*
177 	 * Process options
178 	 */
179 	kernel_mem_readonly = 1;
180 	optcpus = 2;
181 	vkernel_b_arg = 0;
182 	vkernel_B_arg = 0;
183 	lwp_cpu_lock = LCL_NONE;
184 
185 	real_vkernel_enable = 0;
186 	vsize = sizeof(real_vkernel_enable);
187 	sysctlbyname("vm.vkernel_enable", &real_vkernel_enable, &vsize, NULL,0);
188 
189 	if (real_vkernel_enable == 0) {
190 		errx(1, "vm.vkernel_enable is 0, must be set "
191 			"to 1 to execute a vkernel!");
192 	}
193 
194 	real_ncpus = 1;
195 	vsize = sizeof(real_ncpus);
196 	sysctlbyname("hw.ncpu", &real_ncpus, &vsize, NULL, 0);
197 
198 	if (ac < 2)
199 		usage_help(false);
200 
201 	while ((c = getopt(ac, av, "c:hsvl:m:n:r:e:i:p:I:Ub:B:")) != -1) {
202 		switch(c) {
203 		case 'e':
204 			/*
205 			 * name=value:name=value:name=value...
206 			 * name="value"...
207 			 *
208 			 * Allow values to be quoted but note that shells
209 			 * may remove the quotes, so using this feature
210 			 * to embed colons may require a backslash.
211 			 */
212 			n = strlen(optarg);
213 			isq = 0;
214 
215 			if (eflag == 0) {
216 				kenv_size = n + 2;
217 				kern_envp = malloc(kenv_size);
218 				if (kern_envp == NULL)
219 					errx(1, "Couldn't allocate %zd bytes for kern_envp", kenv_size);
220 			} else {
221 				kenv_size2 = kenv_size + n + 1;
222 				pos = kenv_size - 1;
223 				if ((tmp = realloc(kern_envp, kenv_size2)) == NULL)
224 					errx(1, "Couldn't reallocate %zd bytes for kern_envp", kenv_size2);
225 				kern_envp = tmp;
226 				kenv_size = kenv_size2;
227 			}
228 
229 			for (i = 0, j = pos; i < n; ++i) {
230 				if (optarg[i] == '"')
231 					isq ^= 1;
232 				else if (optarg[i] == '\'')
233 					isq ^= 2;
234 				else if (isq == 0 && optarg[i] == ':')
235 					kern_envp[j++] = 0;
236 				else
237 					kern_envp[j++] = optarg[i];
238 			}
239 			kern_envp[j++] = 0;
240 			kern_envp[j++] = 0;
241 			eflag++;
242 			break;
243 		case 's':
244 			boothowto |= RB_SINGLE;
245 			break;
246 		case 'v':
247 			bootverbose = 1;
248 			break;
249 		case 'i':
250 			memImageFile = optarg;
251 			break;
252 		case 'I':
253 			if (netifFileNum < VKNETIF_MAX)
254 				netifFile[netifFileNum++] = strdup(optarg);
255 			break;
256 		case 'r':
257 			if (bootOnDisk < 0)
258 				bootOnDisk = 1;
259 			if (diskFileNum + cdFileNum < VKDISK_MAX)
260 				diskFile[diskFileNum++] = strdup(optarg);
261 			break;
262 		case 'c':
263 			if (bootOnDisk < 0)
264 				bootOnDisk = 0;
265 			if (diskFileNum + cdFileNum < VKDISK_MAX)
266 				cdFile[cdFileNum++] = strdup(optarg);
267 			break;
268 		case 'm':
269 			Maxmem_bytes = strtoull(optarg, &suffix, 0);
270 			if (suffix) {
271 				switch(*suffix) {
272 				case 'g':
273 				case 'G':
274 					Maxmem_bytes <<= 30;
275 					break;
276 				case 'm':
277 				case 'M':
278 					Maxmem_bytes <<= 20;
279 					break;
280 				case 'k':
281 				case 'K':
282 					Maxmem_bytes <<= 10;
283 					break;
284 				default:
285 					Maxmem_bytes = 0;
286 					usage_err("Bad maxmem option");
287 					/* NOT REACHED */
288 					break;
289 				}
290 			}
291 			break;
292 		case 'l':
293 			next_cpu = -1;
294 			if (strncmp("map", optarg, 3) == 0) {
295 				lwp_cpu_lock = LCL_PER_CPU;
296 				if (optarg[3] == ',') {
297 					next_cpu = strtol(optarg+4, &endp, 0);
298 					if (*endp != '\0')
299 						usage_err("Bad target CPU number at '%s'", endp);
300 				} else {
301 					next_cpu = 0;
302 				}
303 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
304 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
305 			} else if (strncmp("any", optarg, 3) == 0) {
306 				lwp_cpu_lock = LCL_NONE;
307 			} else {
308 				lwp_cpu_lock = LCL_SINGLE_CPU;
309 				next_cpu = strtol(optarg, &endp, 0);
310 				if (*endp != '\0')
311 					usage_err("Bad target CPU number at '%s'", endp);
312 				if (next_cpu < 0 || next_cpu > real_ncpus - 1)
313 					usage_err("Bad target CPU, valid range is 0-%d", real_ncpus - 1);
314 			}
315 			break;
316 		case 'n':
317 			/*
318 			 * This value is set up by mp_start(), don't just
319 			 * set ncpus here.
320 			 */
321 			tok = strtok(optarg, ":");
322 			optcpus = strtol(tok, NULL, 0);
323 			if (optcpus < 1 || optcpus > MAXCPU)
324 				usage_err("Bad ncpus, valid range is 1-%d", MAXCPU);
325 
326 			/* :lbits argument */
327 			tok = strtok(NULL, ":");
328 			if (tok != NULL) {
329 				vkernel_b_arg = strtol(tok, NULL, 0);
330 
331 				/* :cbits argument */
332 				tok = strtok(NULL, ":");
333 				if (tok != NULL) {
334 					vkernel_B_arg = strtol(tok, NULL, 0);
335 				}
336 
337 			}
338 			break;
339 		case 'p':
340 			pid_file = optarg;
341 			break;
342 		case 'U':
343 			kernel_mem_readonly = 0;
344 			break;
345 		case 'h':
346 			usage_help(true);
347 			break;
348 		default:
349 			usage_help(false);
350 		}
351 	}
352 
353 	writepid();
354 	cpu_disable_intr();
355 	init_sys_memory(memImageFile);
356 	init_kern_memory();
357 	init_globaldata();
358 	init_vkernel();
359 	setrealcpu();
360 	init_kqueue();
361 
362 	vmm_guest = 1;
363 
364 	/*
365 	 * Check TSC
366 	 */
367 	vsize = sizeof(tsc_present);
368 	sysctlbyname("hw.tsc_present", &tsc_present, &vsize, NULL, 0);
369 	vsize = sizeof(tsc_frequency);
370 	sysctlbyname("hw.tsc_frequency", &tsc_frequency, &vsize, NULL, 0);
371 	if (tsc_present)
372 		cpu_feature |= CPUID_TSC;
373 
374 	/*
375 	 * Check SSE
376 	 */
377 	vsize = sizeof(supports_sse);
378 	supports_sse = 0;
379 	sysctlbyname("hw.instruction_sse", &supports_sse, &vsize, NULL, 0);
380 	init_fpu(supports_sse);
381 	if (supports_sse)
382 		cpu_feature |= CPUID_SSE | CPUID_FXSR;
383 
384 	/*
385 	 * We boot from the first installed disk.
386 	 */
387 	if (bootOnDisk == 1) {
388 		init_disk(diskFile, diskFileNum, VKD_DISK);
389 		init_disk(cdFile, cdFileNum, VKD_CD);
390 	} else {
391 		init_disk(cdFile, cdFileNum, VKD_CD);
392 		init_disk(diskFile, diskFileNum, VKD_DISK);
393 	}
394 	init_netif(netifFile, netifFileNum);
395 	init_exceptions();
396 	mi_startup();
397 	/* NOT REACHED */
398 	exit(EX_SOFTWARE);
399 }
400 
401 /*
402  * Initialize system memory.  This is the virtual kernel's 'RAM'.
403  */
404 static
405 void
406 init_sys_memory(char *imageFile)
407 {
408 	struct stat st;
409 	int i;
410 	int fd;
411 
412 	/*
413 	 * Figure out the system memory image size.  If an image file was
414 	 * specified and -m was not specified, use the image file's size.
415 	 */
416 	if (imageFile && stat(imageFile, &st) == 0 && Maxmem_bytes == 0)
417 		Maxmem_bytes = (vm_paddr_t)st.st_size;
418 	if ((imageFile == NULL || stat(imageFile, &st) < 0) &&
419 	    Maxmem_bytes == 0) {
420 		errx(1, "Cannot create new memory file %s unless "
421 		       "system memory size is specified with -m",
422 		       imageFile);
423 		/* NOT REACHED */
424 	}
425 
426 	/*
427 	 * Maxmem must be known at this time
428 	 */
429 	if (Maxmem_bytes < 64 * 1024 * 1024 || (Maxmem_bytes & SEG_MASK)) {
430 		errx(1, "Bad maxmem specification: 64MB minimum, "
431 		       "multiples of %dMB only",
432 		       SEG_SIZE / 1024 / 1024);
433 		/* NOT REACHED */
434 	}
435 
436 	/*
437 	 * Generate an image file name if necessary, then open/create the
438 	 * file exclusively locked.  Do not allow multiple virtual kernels
439 	 * to use the same image file.
440 	 *
441 	 * Don't iterate through a million files if we do not have write
442 	 * access to the directory, stop if our open() failed on a
443 	 * non-existant file.  Otherwise opens can fail for any number
444 	 */
445 	if (imageFile == NULL) {
446 		for (i = 0; i < 1000000; ++i) {
447 			asprintf(&imageFile, "/var/vkernel/memimg.%06d", i);
448 			fd = open(imageFile,
449 				  O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
450 			if (fd < 0 && stat(imageFile, &st) == 0) {
451 				free(imageFile);
452 				continue;
453 			}
454 			break;
455 		}
456 	} else {
457 		fd = open(imageFile, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0644);
458 	}
459 	fprintf(stderr, "Using memory file: %s\n", imageFile);
460 	if (fd < 0 || fstat(fd, &st) < 0) {
461 		err(1, "Unable to open/create %s", imageFile);
462 		/* NOT REACHED */
463 	}
464 
465 	/*
466 	 * Truncate or extend the file as necessary.  Clean out the contents
467 	 * of the file, we want it to be full of holes so we don't waste
468 	 * time reading in data from an old file that we no longer care
469 	 * about.
470 	 */
471 	ftruncate(fd, 0);
472 	ftruncate(fd, Maxmem_bytes);
473 
474 	MemImageFd = fd;
475 	Maxmem = Maxmem_bytes >> PAGE_SHIFT;
476 	physmem = Maxmem;
477 }
478 
479 /*
480  * Initialize kernel memory.  This reserves kernel virtual memory by using
481  * MAP_VPAGETABLE
482  */
483 
484 static
485 void
486 init_kern_memory(void)
487 {
488 	void *base;
489 	void *try;
490 	char dummy;
491 	char *topofstack = &dummy;
492 	int i;
493 	void *firstfree;
494 
495 	/*
496 	 * Memory map our kernel virtual memory space.  Note that the
497 	 * kernel image itself is not made part of this memory for the
498 	 * moment.
499 	 *
500 	 * The memory map must be segment-aligned so we can properly
501 	 * offset KernelPTD.
502 	 *
503 	 * If the system kernel has a different MAXDSIZ, it might not
504 	 * be possible to map kernel memory in its prefered location.
505 	 * Try a number of different locations.
506 	 */
507 	try = (void *)(512UL << 30);
508 	base = NULL;
509 	while ((char *)try + KERNEL_KVA_SIZE < topofstack) {
510 		base = mmap(try, KERNEL_KVA_SIZE, PROT_READ|PROT_WRITE,
511 			    MAP_FILE|MAP_SHARED|MAP_VPAGETABLE,
512 			    MemImageFd, (off_t)try);
513 		if (base == try)
514 			break;
515 		if (base != MAP_FAILED)
516 			munmap(base, KERNEL_KVA_SIZE);
517 		try = (char *)try + (512UL << 30);
518 	}
519 	if (base != try) {
520 		err(1, "Unable to mmap() kernel virtual memory!");
521 		/* NOT REACHED */
522 	}
523 	madvise(base, KERNEL_KVA_SIZE, MADV_NOSYNC);
524 	KvaStart = (vm_offset_t)base;
525 	KvaSize = KERNEL_KVA_SIZE;
526 	KvaEnd = KvaStart + KvaSize;
527 
528 	/* cannot use kprintf yet */
529 	printf("KVM mapped at %p-%p\n", (void *)KvaStart, (void *)KvaEnd);
530 
531 	/* MAP_FILE? */
532 	dmap_min_address = mmap(0, DMAP_SIZE, PROT_READ|PROT_WRITE,
533 				MAP_NOCORE|MAP_NOSYNC|MAP_SHARED,
534 				MemImageFd, 0);
535 	if (dmap_min_address == MAP_FAILED) {
536 		err(1, "Unable to mmap() kernel DMAP region!");
537 		/* NOT REACHED */
538 	}
539 
540 	firstfree = NULL;
541 	pmap_bootstrap((vm_paddr_t *)&firstfree, (int64_t)base);
542 
543 	mcontrol(base, KERNEL_KVA_SIZE, MADV_SETMAP,
544 		 0 | VPTE_R | VPTE_W | VPTE_V);
545 
546 	/*
547 	 * phys_avail[] represents unallocated physical memory.  MI code
548 	 * will use phys_avail[] to create the vm_page array.
549 	 */
550 	phys_avail[0] = (vm_paddr_t)firstfree;
551 	phys_avail[0] = (phys_avail[0] + PAGE_MASK) & ~(vm_paddr_t)PAGE_MASK;
552 	phys_avail[1] = Maxmem_bytes;
553 
554 #if JGV
555 	/*
556 	 * (virtual_start, virtual_end) represent unallocated kernel virtual
557 	 * memory.  MI code will create kernel_map using these parameters.
558 	 */
559 	virtual_start = KvaStart + (long)firstfree;
560 	virtual_start = (virtual_start + PAGE_MASK) & ~(vm_offset_t)PAGE_MASK;
561 	virtual_end = KvaStart + KERNEL_KVA_SIZE;
562 #endif
563 
564 	/*
565 	 * pmap_growkernel() will set the correct value.
566 	 */
567 	kernel_vm_end = 0;
568 
569 	/*
570 	 * Allocate space for process 0's UAREA.
571 	 */
572 	proc0paddr = (void *)virtual_start;
573 	for (i = 0; i < UPAGES; ++i) {
574 		pmap_kenter_quick(virtual_start, phys_avail[0]);
575 		virtual_start += PAGE_SIZE;
576 		phys_avail[0] += PAGE_SIZE;
577 	}
578 
579 	/*
580 	 * crashdumpmap
581 	 */
582 	crashdumpmap = virtual_start;
583 	virtual_start += MAXDUMPPGS * PAGE_SIZE;
584 
585 	/*
586 	 * msgbufp maps the system message buffer
587 	 */
588 	assert((MSGBUF_SIZE & PAGE_MASK) == 0);
589 	msgbufp = (void *)virtual_start;
590 	for (i = 0; i < (MSGBUF_SIZE >> PAGE_SHIFT); ++i) {
591 		pmap_kenter_quick(virtual_start, phys_avail[0]);
592 		virtual_start += PAGE_SIZE;
593 		phys_avail[0] += PAGE_SIZE;
594 	}
595 	msgbufinit(msgbufp, MSGBUF_SIZE);
596 
597 	/*
598 	 * used by kern_memio for /dev/mem access
599 	 */
600 	ptvmmap = (caddr_t)virtual_start;
601 	virtual_start += PAGE_SIZE;
602 
603 	/*
604 	 * Bootstrap the kernel_pmap
605 	 */
606 #if JGV
607 	pmap_bootstrap();
608 #endif
609 }
610 
611 /*
612  * Map the per-cpu globaldata for cpu #0.  Allocate the space using
613  * virtual_start and phys_avail[0]
614  */
615 static
616 void
617 init_globaldata(void)
618 {
619 	int i;
620 	vm_paddr_t pa;
621 	vm_offset_t va;
622 
623 	/*
624 	 * Reserve enough KVA to cover possible cpus.  This is a considerable
625 	 * amount of KVA since the privatespace structure includes two
626 	 * whole page table mappings.
627 	 */
628 	virtual_start = (virtual_start + SEG_MASK) & ~(vm_offset_t)SEG_MASK;
629 	CPU_prvspace = (void *)virtual_start;
630 	virtual_start += sizeof(struct privatespace) * SMP_MAXCPU;
631 
632 	/*
633 	 * Allocate enough physical memory to cover the mdglobaldata
634 	 * portion of the space and the idle stack and map the pages
635 	 * into KVA.  For cpu #0 only.
636 	 */
637 	for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
638 		pa = phys_avail[0];
639 		va = (vm_offset_t)&CPU_prvspace[0].mdglobaldata + i;
640 		pmap_kenter_quick(va, pa);
641 		phys_avail[0] += PAGE_SIZE;
642 	}
643 	for (i = 0; i < sizeof(CPU_prvspace[0].idlestack); i += PAGE_SIZE) {
644 		pa = phys_avail[0];
645 		va = (vm_offset_t)&CPU_prvspace[0].idlestack + i;
646 		pmap_kenter_quick(va, pa);
647 		phys_avail[0] += PAGE_SIZE;
648 	}
649 
650 	/*
651 	 * Setup the %gs for cpu #0.  The mycpu macro works after this
652 	 * point.  Note that %fs is used by pthreads.
653 	 */
654 	tls_set_gs(&CPU_prvspace[0], sizeof(struct privatespace));
655 }
656 
657 
658 /*
659  * Initialize pool tokens and other necessary locks
660  */
661 static void
662 init_locks(void)
663 {
664 
665         /*
666          * Get the initial mplock with a count of 1 for the BSP.
667          * This uses a LOGICAL cpu ID, ie BSP == 0.
668          */
669         cpu_get_initial_mplock();
670 
671         /* our token pool needs to work early */
672         lwkt_token_pool_init();
673 
674 }
675 
676 
677 /*
678  * Initialize very low level systems including thread0, proc0, etc.
679  */
680 static
681 void
682 init_vkernel(void)
683 {
684 	struct mdglobaldata *gd;
685 
686 	gd = &CPU_prvspace[0].mdglobaldata;
687 	bzero(gd, sizeof(*gd));
688 
689 	gd->mi.gd_curthread = &thread0;
690 	thread0.td_gd = &gd->mi;
691 	ncpus = 1;
692 	ncpus2 = 1;	/* rounded down power of 2 */
693 	ncpus_fit = 1;	/* rounded up power of 2 */
694 	/* ncpus2_mask and ncpus_fit_mask are 0 */
695 	init_param1();
696 	gd->mi.gd_prvspace = &CPU_prvspace[0];
697 	mi_gdinit(&gd->mi, 0);
698 	cpu_gdinit(gd, 0);
699 	mi_proc0init(&gd->mi, proc0paddr);
700 	lwp0.lwp_md.md_regs = &proc0_tf;
701 
702 	init_locks();
703 	cninit();
704 	rand_initialize();
705 #if 0	/* #ifdef DDB */
706 	kdb_init();
707 	if (boothowto & RB_KDB)
708 		Debugger("Boot flags requested debugger");
709 #endif
710 	identcpu();
711 #if 0
712 	initializecpu();	/* Initialize CPU registers */
713 #endif
714 	init_param2((phys_avail[1] - phys_avail[0]) / PAGE_SIZE);
715 
716 #if 0
717 	/*
718 	 * Map the message buffer
719 	 */
720 	for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
721 		pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off);
722 	msgbufinit(msgbufp, MSGBUF_SIZE);
723 #endif
724 #if 0
725 	thread0.td_pcb_cr3 ... MMU
726 	lwp0.lwp_md.md_regs = &proc0_tf;
727 #endif
728 }
729 
730 /*
731  * Filesystem image paths for the virtual kernel are optional.
732  * If specified they each should point to a disk image,
733  * the first of which will become the root disk.
734  *
735  * The virtual kernel caches data from our 'disk' just like a normal kernel,
736  * so we do not really want the real kernel to cache the data too.  Use
737  * O_DIRECT to remove the duplication.
738  */
739 static
740 void
741 init_disk(char *diskExp[], int diskFileNum, enum vkdisk_type type)
742 {
743 	int i;
744 
745         if (diskFileNum == 0)
746                 return;
747 
748 	for(i=0; i < diskFileNum; i++){
749 		char *fname;
750 		fname = diskExp[i];
751 
752 		if (fname == NULL) {
753                         warnx("Invalid argument to '-r'");
754                         continue;
755                 }
756 
757 		if (DiskNum < VKDISK_MAX) {
758 			struct stat st;
759 			struct vkdisk_info* info = NULL;
760 			int fd;
761 			size_t l = 0;
762 
763 			if (type == VKD_DISK)
764 			    fd = open(fname, O_RDWR|O_DIRECT, 0644);
765 			else
766 			    fd = open(fname, O_RDONLY|O_DIRECT, 0644);
767 			if (fd < 0 || fstat(fd, &st) < 0) {
768 				err(1, "Unable to open/create %s", fname);
769 				/* NOT REACHED */
770 			}
771 			if (S_ISREG(st.st_mode)) {
772 				if (flock(fd, LOCK_EX|LOCK_NB) < 0) {
773 					errx(1, "Disk image %s is already "
774 						"in use\n", fname);
775 					/* NOT REACHED */
776 				}
777 			}
778 
779 			info = &DiskInfo[DiskNum];
780 			l = strlen(fname);
781 
782 			info->unit = i;
783 			info->fd = fd;
784 			info->type = type;
785 			memcpy(info->fname, fname, l);
786 
787 			if (DiskNum == 0) {
788 				if (type == VKD_CD) {
789 				    rootdevnames[0] = "cd9660:vcd0a";
790 				} else if (type == VKD_DISK) {
791 				    rootdevnames[0] = "ufs:vkd0s0a";
792 				    rootdevnames[1] = "ufs:vkd0s1a";
793 				}
794 			}
795 
796 			DiskNum++;
797 		} else {
798                         warnx("vkd%d (%s) > VKDISK_MAX", DiskNum, fname);
799                         continue;
800 		}
801 	}
802 }
803 
804 static
805 int
806 netif_set_tapflags(int tap_unit, int f, int s)
807 {
808 	struct ifreq ifr;
809 	int flags;
810 
811 	bzero(&ifr, sizeof(ifr));
812 
813 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
814 	if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
815 		warn("tap%d: ioctl(SIOCGIFFLAGS) failed", tap_unit);
816 		return -1;
817 	}
818 
819 	/*
820 	 * Adjust if_flags
821 	 *
822 	 * If the flags are already set/cleared, then we return
823 	 * immediately to avoid extra syscalls
824 	 */
825 	flags = (ifr.ifr_flags & 0xffff) | (ifr.ifr_flagshigh << 16);
826 	if (f < 0) {
827 		/* Turn off flags */
828 		f = -f;
829 		if ((flags & f) == 0)
830 			return 0;
831 		flags &= ~f;
832 	} else {
833 		/* Turn on flags */
834 		if (flags & f)
835 			return 0;
836 		flags |= f;
837 	}
838 
839 	/*
840 	 * Fix up ifreq.ifr_name, since it may be trashed
841 	 * in previous ioctl(SIOCGIFFLAGS)
842 	 */
843 	snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "tap%d", tap_unit);
844 
845 	ifr.ifr_flags = flags & 0xffff;
846 	ifr.ifr_flagshigh = flags >> 16;
847 	if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
848 		warn("tap%d: ioctl(SIOCSIFFLAGS) failed", tap_unit);
849 		return -1;
850 	}
851 	return 0;
852 }
853 
854 static
855 int
856 netif_set_tapaddr(int tap_unit, in_addr_t addr, in_addr_t mask, int s)
857 {
858 	struct ifaliasreq ifra;
859 	struct sockaddr_in *in;
860 
861 	bzero(&ifra, sizeof(ifra));
862 	snprintf(ifra.ifra_name, sizeof(ifra.ifra_name), "tap%d", tap_unit);
863 
864 	/* Setup address */
865 	in = (struct sockaddr_in *)&ifra.ifra_addr;
866 	in->sin_family = AF_INET;
867 	in->sin_len = sizeof(*in);
868 	in->sin_addr.s_addr = addr;
869 
870 	if (mask != 0) {
871 		/* Setup netmask */
872 		in = (struct sockaddr_in *)&ifra.ifra_mask;
873 		in->sin_len = sizeof(*in);
874 		in->sin_addr.s_addr = mask;
875 	}
876 
877 	if (ioctl(s, SIOCAIFADDR, &ifra) < 0) {
878 		warn("tap%d: ioctl(SIOCAIFADDR) failed", tap_unit);
879 		return -1;
880 	}
881 	return 0;
882 }
883 
884 static
885 int
886 netif_add_tap2brg(int tap_unit, const char *ifbridge, int s)
887 {
888 	struct ifbreq ifbr;
889 	struct ifdrv ifd;
890 
891 	bzero(&ifbr, sizeof(ifbr));
892 	snprintf(ifbr.ifbr_ifsname, sizeof(ifbr.ifbr_ifsname),
893 		 "tap%d", tap_unit);
894 
895 	bzero(&ifd, sizeof(ifd));
896 	strlcpy(ifd.ifd_name, ifbridge, sizeof(ifd.ifd_name));
897 	ifd.ifd_cmd = BRDGADD;
898 	ifd.ifd_len = sizeof(ifbr);
899 	ifd.ifd_data = &ifbr;
900 
901 	if (ioctl(s, SIOCSDRVSPEC, &ifd) < 0) {
902 		/*
903 		 * 'errno == EEXIST' means that the tap(4) is already
904 		 * a member of the bridge(4)
905 		 */
906 		if (errno != EEXIST) {
907 			warn("ioctl(%s, SIOCSDRVSPEC) failed", ifbridge);
908 			return -1;
909 		}
910 	}
911 	return 0;
912 }
913 
914 #define TAPDEV_OFLAGS	(O_RDWR | O_NONBLOCK)
915 
916 /*
917  * Locate the first unused tap(4) device file if auto mode is requested,
918  * or open the user supplied device file, and bring up the corresponding
919  * tap(4) interface.
920  *
921  * NOTE: Only tap(4) device file is supported currently
922  */
923 static
924 int
925 netif_open_tap(const char *netif, int *tap_unit, int s)
926 {
927 	char tap_dev[MAXPATHLEN];
928 	int tap_fd, failed;
929 	struct stat st;
930 	char *dname;
931 
932 	*tap_unit = -1;
933 
934 	if (strcmp(netif, "auto") == 0) {
935 		/*
936 		 * Find first unused tap(4) device file
937 		 */
938 		tap_fd = open("/dev/tap", TAPDEV_OFLAGS);
939 		if (tap_fd < 0) {
940 			warnc(errno, "Unable to find a free tap(4)");
941 			return -1;
942 		}
943 	} else {
944 		/*
945 		 * User supplied tap(4) device file or unix socket.
946 		 */
947 		if (netif[0] == '/')	/* Absolute path */
948 			strlcpy(tap_dev, netif, sizeof(tap_dev));
949 		else
950 			snprintf(tap_dev, sizeof(tap_dev), "/dev/%s", netif);
951 
952 		tap_fd = open(tap_dev, TAPDEV_OFLAGS);
953 
954 		/*
955 		 * If we cannot open normally try to connect to it.
956 		 */
957 		if (tap_fd < 0)
958 			tap_fd = unix_connect(tap_dev);
959 
960 		if (tap_fd < 0) {
961 			warn("Unable to open %s", tap_dev);
962 			return -1;
963 		}
964 	}
965 
966 	/*
967 	 * Check whether the device file is a tap(4)
968 	 */
969 	if (fstat(tap_fd, &st) < 0) {
970 		failed = 1;
971 	} else if (S_ISCHR(st.st_mode)) {
972 		dname = fdevname(tap_fd);
973 		if (dname)
974 			dname = strstr(dname, "tap");
975 		if (dname) {
976 			/*
977 			 * Bring up the corresponding tap(4) interface
978 			 */
979 			*tap_unit = strtol(dname + 3, NULL, 10);
980 			printf("TAP UNIT %d\n", *tap_unit);
981 			if (netif_set_tapflags(*tap_unit, IFF_UP, s) == 0)
982 				failed = 0;
983 			else
984 				failed = 1;
985 		} else {
986 			failed = 1;
987 		}
988 	} else if (S_ISSOCK(st.st_mode)) {
989 		/*
990 		 * Special socket connection (typically to vknet).  We
991 		 * do not have to do anything.
992 		 */
993 		failed = 0;
994 	} else {
995 		failed = 1;
996 	}
997 
998 	if (failed) {
999 		warnx("%s is not a tap(4) device or socket", tap_dev);
1000 		close(tap_fd);
1001 		tap_fd = -1;
1002 		*tap_unit = -1;
1003 	}
1004 	return tap_fd;
1005 }
1006 
1007 static int
1008 unix_connect(const char *path)
1009 {
1010 	struct sockaddr_un sunx;
1011 	int len;
1012 	int net_fd;
1013 	int sndbuf = 262144;
1014 	struct stat st;
1015 
1016 	snprintf(sunx.sun_path, sizeof(sunx.sun_path), "%s", path);
1017 	len = offsetof(struct sockaddr_un, sun_path[strlen(sunx.sun_path)]);
1018 	++len;	/* include nul */
1019 	sunx.sun_family = AF_UNIX;
1020 	sunx.sun_len = len;
1021 
1022 	net_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
1023 	if (net_fd < 0)
1024 		return(-1);
1025 	if (connect(net_fd, (void *)&sunx, len) < 0) {
1026 		close(net_fd);
1027 		return(-1);
1028 	}
1029 	setsockopt(net_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof(sndbuf));
1030 	if (fstat(net_fd, &st) == 0)
1031 		printf("Network socket buffer: %d bytes\n", st.st_blksize);
1032 	fcntl(net_fd, F_SETFL, O_NONBLOCK);
1033 	return(net_fd);
1034 }
1035 
1036 #undef TAPDEV_MAJOR
1037 #undef TAPDEV_MINOR
1038 #undef TAPDEV_OFLAGS
1039 
1040 /*
1041  * Following syntax is supported,
1042  * 1) x.x.x.x             tap(4)'s address is x.x.x.x
1043  *
1044  * 2) x.x.x.x/z           tap(4)'s address is x.x.x.x
1045  *                        tap(4)'s netmask len is z
1046  *
1047  * 3) x.x.x.x:y.y.y.y     tap(4)'s address is x.x.x.x
1048  *                        pseudo netif's address is y.y.y.y
1049  *
1050  * 4) x.x.x.x:y.y.y.y/z   tap(4)'s address is x.x.x.x
1051  *                        pseudo netif's address is y.y.y.y
1052  *                        tap(4) and pseudo netif's netmask len are z
1053  *
1054  * 5) bridgeX             tap(4) will be added to bridgeX
1055  *
1056  * 6) bridgeX:y.y.y.y     tap(4) will be added to bridgeX
1057  *                        pseudo netif's address is y.y.y.y
1058  *
1059  * 7) bridgeX:y.y.y.y/z   tap(4) will be added to bridgeX
1060  *                        pseudo netif's address is y.y.y.y
1061  *                        pseudo netif's netmask len is z
1062  */
1063 static
1064 int
1065 netif_init_tap(int tap_unit, in_addr_t *addr, in_addr_t *mask, int s)
1066 {
1067 	in_addr_t tap_addr, netmask, netif_addr;
1068 	int next_netif_addr;
1069 	char *tok, *masklen_str, *ifbridge;
1070 
1071 	*addr = 0;
1072 	*mask = 0;
1073 
1074 	tok = strtok(NULL, ":/");
1075 	if (tok == NULL) {
1076 		/*
1077 		 * Nothing special, simply use tap(4) as backend
1078 		 */
1079 		return 0;
1080 	}
1081 
1082 	if (inet_pton(AF_INET, tok, &tap_addr) > 0) {
1083 		/*
1084 		 * tap(4)'s address is supplied
1085 		 */
1086 		ifbridge = NULL;
1087 
1088 		/*
1089 		 * If there is next token, then it may be pseudo
1090 		 * netif's address or netmask len for tap(4)
1091 		 */
1092 		next_netif_addr = 0;
1093 	} else {
1094 		/*
1095 		 * Not tap(4)'s address, assume it as a bridge(4)
1096 		 * iface name
1097 		 */
1098 		tap_addr = 0;
1099 		ifbridge = tok;
1100 
1101 		/*
1102 		 * If there is next token, then it must be pseudo
1103 		 * netif's address
1104 		 */
1105 		next_netif_addr = 1;
1106 	}
1107 
1108 	netmask = netif_addr = 0;
1109 
1110 	tok = strtok(NULL, ":/");
1111 	if (tok == NULL)
1112 		goto back;
1113 
1114 	if (inet_pton(AF_INET, tok, &netif_addr) <= 0) {
1115 		if (next_netif_addr) {
1116 			warnx("Invalid pseudo netif address: %s", tok);
1117 			return -1;
1118 		}
1119 		netif_addr = 0;
1120 
1121 		/*
1122 		 * Current token is not address, then it must be netmask len
1123 		 */
1124 		masklen_str = tok;
1125 	} else {
1126 		/*
1127 		 * Current token is pseudo netif address, if there is next token
1128 		 * it must be netmask len
1129 		 */
1130 		masklen_str = strtok(NULL, "/");
1131 	}
1132 
1133 	/* Calculate netmask */
1134 	if (masklen_str != NULL) {
1135 		u_long masklen;
1136 
1137 		masklen = strtoul(masklen_str, NULL, 10);
1138 		if (masklen < 32 && masklen > 0) {
1139 			netmask = htonl(~((1LL << (32 - masklen)) - 1)
1140 					& 0xffffffff);
1141 		} else {
1142 			warnx("Invalid netmask len: %lu", masklen);
1143 			return -1;
1144 		}
1145 	}
1146 
1147 	/* Make sure there is no more token left */
1148 	if (strtok(NULL, ":/") != NULL) {
1149 		warnx("Invalid argument to '-I'");
1150 		return -1;
1151 	}
1152 
1153 back:
1154 	if (tap_unit < 0) {
1155 		/* Do nothing */
1156 	} else if (ifbridge == NULL) {
1157 		/* Set tap(4) address/netmask */
1158 		if (netif_set_tapaddr(tap_unit, tap_addr, netmask, s) < 0)
1159 			return -1;
1160 	} else {
1161 		/* Tie tap(4) to bridge(4) */
1162 		if (netif_add_tap2brg(tap_unit, ifbridge, s) < 0)
1163 			return -1;
1164 	}
1165 
1166 	*addr = netif_addr;
1167 	*mask = netmask;
1168 	return 0;
1169 }
1170 
1171 /*
1172  * NetifInfo[] will be filled for pseudo netif initialization.
1173  * NetifNum will be bumped to reflect the number of valid entries
1174  * in NetifInfo[].
1175  */
1176 static
1177 void
1178 init_netif(char *netifExp[], int netifExpNum)
1179 {
1180 	int i, s;
1181 
1182 	if (netifExpNum == 0)
1183 		return;
1184 
1185 	s = socket(AF_INET, SOCK_DGRAM, 0);	/* for ioctl(SIOC) */
1186 	if (s < 0)
1187 		return;
1188 
1189 	for (i = 0; i < netifExpNum; ++i) {
1190 		struct vknetif_info *info;
1191 		in_addr_t netif_addr, netif_mask;
1192 		int tap_fd, tap_unit;
1193 		char *netif;
1194 
1195 		netif = strtok(netifExp[i], ":");
1196 		if (netif == NULL) {
1197 			warnx("Invalid argument to '-I'");
1198 			continue;
1199 		}
1200 
1201 		/*
1202 		 * Open tap(4) device file and bring up the
1203 		 * corresponding interface
1204 		 */
1205 		tap_fd = netif_open_tap(netif, &tap_unit, s);
1206 		if (tap_fd < 0)
1207 			continue;
1208 
1209 		/*
1210 		 * Initialize tap(4) and get address/netmask
1211 		 * for pseudo netif
1212 		 *
1213 		 * NB: Rest part of netifExp[i] is passed
1214 		 *     to netif_init_tap() implicitly.
1215 		 */
1216 		if (netif_init_tap(tap_unit, &netif_addr, &netif_mask, s) < 0) {
1217 			/*
1218 			 * NB: Closing tap(4) device file will bring
1219 			 *     down the corresponding interface
1220 			 */
1221 			close(tap_fd);
1222 			continue;
1223 		}
1224 
1225 		info = &NetifInfo[NetifNum];
1226 		info->tap_fd = tap_fd;
1227 		info->tap_unit = tap_unit;
1228 		info->netif_addr = netif_addr;
1229 		info->netif_mask = netif_mask;
1230 
1231 		NetifNum++;
1232 		if (NetifNum >= VKNETIF_MAX)	/* XXX will this happen? */
1233 			break;
1234 	}
1235 	close(s);
1236 }
1237 
1238 /*
1239  * Create the pid file and leave it open and locked while the vkernel is
1240  * running.  This allows a script to use /usr/bin/lockf to probe whether
1241  * a vkernel is still running (so as not to accidently kill an unrelated
1242  * process from a stale pid file).
1243  */
1244 static
1245 void
1246 writepid(void)
1247 {
1248 	char buf[32];
1249 	int fd;
1250 
1251 	if (pid_file != NULL) {
1252 		snprintf(buf, sizeof(buf), "%ld\n", (long)getpid());
1253 		fd = open(pid_file, O_RDWR|O_CREAT|O_EXLOCK|O_NONBLOCK, 0666);
1254 		if (fd < 0) {
1255 			if (errno == EWOULDBLOCK) {
1256 				perror("Failed to lock pidfile, "
1257 				       "vkernel already running");
1258 			} else {
1259 				perror("Failed to create pidfile");
1260 			}
1261 			exit(EX_SOFTWARE);
1262 		}
1263 		ftruncate(fd, 0);
1264 		write(fd, buf, strlen(buf));
1265 		/* leave the file open to maintain the lock */
1266 	}
1267 }
1268 
1269 static
1270 void
1271 cleanpid( void )
1272 {
1273 	if (pid_file != NULL) {
1274 		if (unlink(pid_file) < 0)
1275 			perror("Warning: couldn't remove pidfile");
1276 	}
1277 }
1278 
1279 static
1280 void
1281 usage_err(const char *ctl, ...)
1282 {
1283 	va_list va;
1284 
1285 	va_start(va, ctl);
1286 	vfprintf(stderr, ctl, va);
1287 	va_end(va);
1288 	fprintf(stderr, "\n");
1289 	exit(EX_USAGE);
1290 }
1291 
1292 static
1293 void
1294 usage_help(_Bool help)
1295 {
1296 	fprintf(stderr, "Usage: %s [-hsUv] [-c file] [-e name=value:name=value:...]\n"
1297 	    "\t[-i file] [-I interface[:address1[:address2][/netmask]]] [-l cpulock]\n"
1298 	    "\t[-m size] [-n numcpus[:lbits[:cbits]]]\n"
1299 	    "\t[-p file] [-r file]\n", save_av[0]);
1300 
1301 	if (help)
1302 		fprintf(stderr, "\nArguments:\n"
1303 		    "\t-c\tSpecify a readonly CD-ROM image file to be used by the kernel.\n"
1304 		    "\t-e\tSpecify an environment to be used by the kernel.\n"
1305 		    "\t-h\tThis list of options.\n"
1306 		    "\t-i\tSpecify a memory image file to be used by the virtual kernel.\n"
1307 		    "\t-I\tCreate a virtual network device.\n"
1308 		    "\t-l\tSpecify which, if any, real CPUs to lock virtual CPUs to.\n"
1309 		    "\t-m\tSpecify the amount of memory to be used by the kernel in bytes.\n"
1310 		    "\t-n\tSpecify the number of CPUs and the topology you wish to emulate:\n"
1311 		    "\t  \t- numcpus - number of cpus\n"
1312 		    "\t  \t- :lbits - specify the number of bits within APICID(=CPUID) needed for representing\n"
1313 		    "\t  \t  the logical ID. Controls the number of threads/core (0bits - 1 thread, 1bit - 2 threads).\n"
1314 		    "\t  \t- :cbits - specify the number of bits within APICID(=CPUID) needed for representing\n"
1315 		    "\t  \t  the core ID. Controls the number of core/package (0bits - 1 core, 1bit - 2 cores).\n"
1316 		    "\t-p\tSpecify a file in which to store the process ID.\n"
1317 		    "\t-r\tSpecify a R/W disk image file to be used by the kernel.\n"
1318 		    "\t-s\tBoot into single-user mode.\n"
1319 		    "\t-U\tEnable writing to kernel memory and module loading.\n"
1320 		    "\t-v\tTurn on verbose booting.\n");
1321 
1322 	exit(EX_USAGE);
1323 }
1324 
1325 void
1326 cpu_reset(void)
1327 {
1328 	kprintf("cpu reset, rebooting vkernel\n");
1329 	closefrom(3);
1330 	cleanpid();
1331 	execv(save_av[0], save_av);
1332 }
1333 
1334 void
1335 cpu_halt(void)
1336 {
1337 	kprintf("cpu halt, exiting vkernel\n");
1338 	cleanpid();
1339 	exit(EX_OK);
1340 }
1341 
1342 void
1343 setrealcpu(void)
1344 {
1345 	switch(lwp_cpu_lock) {
1346 	case LCL_PER_CPU:
1347 		if (bootverbose)
1348 			kprintf("Locking CPU%d to real cpu %d\n",
1349 				mycpuid, next_cpu);
1350 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1351 		next_cpu++;
1352 		if (next_cpu >= real_ncpus)
1353 			next_cpu = 0;
1354 		break;
1355 	case LCL_SINGLE_CPU:
1356 		if (bootverbose)
1357 			kprintf("Locking CPU%d to real cpu %d\n",
1358 				mycpuid, next_cpu);
1359 		usched_set(getpid(), USCHED_SET_CPU, &next_cpu, sizeof(next_cpu));
1360 		break;
1361 	default:
1362 		/* do not map virtual cpus to real cpus */
1363 		break;
1364 	}
1365 }
1366 
1367 /*
1368  * Allocate and free memory for module loading.  The loaded module
1369  * has to be placed somewhere near the current kernel binary load
1370  * point or the relocations will not work.
1371  *
1372  * I'm not sure why this isn't working.
1373  */
1374 int
1375 vkernel_module_memory_alloc(vm_offset_t *basep, size_t bytes)
1376 {
1377 	kprintf("module loading for vkernel64's not currently supported\n");
1378 	*basep = 0;
1379 	return ENOMEM;
1380 #if 0
1381 #if 1
1382 	size_t xtra;
1383 	xtra = (PAGE_SIZE - (vm_offset_t)sbrk(0)) & PAGE_MASK;
1384 	*basep = (vm_offset_t)sbrk(xtra + bytes) + xtra;
1385 	bzero((void *)*basep, bytes);
1386 #else
1387 	*basep = (vm_offset_t)mmap((void *)0x000000000, bytes,
1388 				   PROT_READ|PROT_WRITE|PROT_EXEC,
1389 				   MAP_ANON|MAP_SHARED, -1, 0);
1390 	if ((void *)*basep == MAP_FAILED)
1391 		return ENOMEM;
1392 #endif
1393 	kprintf("basep %p %p %zd\n",
1394 		(void *)vkernel_module_memory_alloc, (void *)*basep, bytes);
1395 	return 0;
1396 #endif
1397 }
1398 
1399 void
1400 vkernel_module_memory_free(vm_offset_t base, size_t bytes)
1401 {
1402 #if 0
1403 #if 0
1404 	munmap((void *)base, bytes);
1405 #endif
1406 #endif
1407 }
1408