xref: /freebsd/sys/i386/i386/machdep.c (revision 4d3fc8b0)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 2018 The FreeBSD Foundation
5  * Copyright (c) 1992 Terrence R. Lambert.
6  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * William Jolitz.
11  *
12  * Portions of this software were developed by A. Joseph Koshy under
13  * sponsorship from the FreeBSD Foundation and Google, Inc.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *	This product includes software developed by the University of
26  *	California, Berkeley and its contributors.
27  * 4. Neither the name of the University nor the names of its contributors
28  *    may be used to endorse or promote products derived from this software
29  *    without specific prior written permission.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41  * SUCH DAMAGE.
42  *
43  *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
44  */
45 
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include "opt_apic.h"
50 #include "opt_atpic.h"
51 #include "opt_cpu.h"
52 #include "opt_ddb.h"
53 #include "opt_inet.h"
54 #include "opt_isa.h"
55 #include "opt_kstack_pages.h"
56 #include "opt_maxmem.h"
57 #include "opt_perfmon.h"
58 #include "opt_platform.h"
59 
60 #include <sys/param.h>
61 #include <sys/proc.h>
62 #include <sys/systm.h>
63 #include <sys/bio.h>
64 #include <sys/buf.h>
65 #include <sys/bus.h>
66 #include <sys/callout.h>
67 #include <sys/cons.h>
68 #include <sys/cpu.h>
69 #include <sys/eventhandler.h>
70 #include <sys/exec.h>
71 #include <sys/imgact.h>
72 #include <sys/kdb.h>
73 #include <sys/kernel.h>
74 #include <sys/ktr.h>
75 #include <sys/linker.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/memrange.h>
79 #include <sys/msgbuf.h>
80 #include <sys/mutex.h>
81 #include <sys/pcpu.h>
82 #include <sys/ptrace.h>
83 #include <sys/reboot.h>
84 #include <sys/reg.h>
85 #include <sys/rwlock.h>
86 #include <sys/sched.h>
87 #include <sys/signalvar.h>
88 #include <sys/smp.h>
89 #include <sys/syscallsubr.h>
90 #include <sys/sysctl.h>
91 #include <sys/sysent.h>
92 #include <sys/sysproto.h>
93 #include <sys/ucontext.h>
94 #include <sys/vmmeter.h>
95 
96 #include <vm/vm.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_kern.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_object.h>
103 #include <vm/vm_pager.h>
104 #include <vm/vm_phys.h>
105 #include <vm/vm_dumpset.h>
106 
107 #ifdef DDB
108 #ifndef KDB
109 #error KDB must be enabled in order for DDB to work!
110 #endif
111 #include <ddb/ddb.h>
112 #include <ddb/db_sym.h>
113 #endif
114 
115 #include <isa/rtc.h>
116 
117 #include <net/netisr.h>
118 
119 #include <dev/smbios/smbios.h>
120 
121 #include <machine/bootinfo.h>
122 #include <machine/clock.h>
123 #include <machine/cpu.h>
124 #include <machine/cputypes.h>
125 #include <machine/intr_machdep.h>
126 #include <x86/mca.h>
127 #include <machine/md_var.h>
128 #include <machine/metadata.h>
129 #include <machine/pc/bios.h>
130 #include <machine/pcb.h>
131 #include <machine/pcb_ext.h>
132 #include <machine/proc.h>
133 #include <machine/sigframe.h>
134 #include <machine/specialreg.h>
135 #include <machine/sysarch.h>
136 #include <machine/trap.h>
137 #include <x86/ucode.h>
138 #include <machine/vm86.h>
139 #include <x86/init.h>
140 #ifdef PERFMON
141 #include <machine/perfmon.h>
142 #endif
143 #ifdef SMP
144 #include <machine/smp.h>
145 #endif
146 #ifdef FDT
147 #include <x86/fdt.h>
148 #endif
149 
150 #ifdef DEV_APIC
151 #include <x86/apicvar.h>
152 #endif
153 
154 #ifdef DEV_ISA
155 #include <x86/isa/icu.h>
156 #endif
157 
158 /* Sanity check for __curthread() */
159 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
160 
161 register_t init386(int first);
162 void dblfault_handler(void);
163 void identify_cpu(void);
164 
165 static void cpu_startup(void *);
166 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
167 
168 /* Intel ICH registers */
169 #define ICH_PMBASE	0x400
170 #define ICH_SMI_EN	ICH_PMBASE + 0x30
171 
172 int	_udatasel, _ucodesel;
173 u_int	basemem;
174 static int above4g_allow = 1;
175 static int above24g_allow = 0;
176 
177 int cold = 1;
178 
179 long Maxmem = 0;
180 long realmem = 0;
181 int late_console = 1;
182 
183 #ifdef PAE
184 FEATURE(pae, "Physical Address Extensions");
185 #endif
186 
187 struct kva_md_info kmi;
188 
189 static struct trapframe proc0_tf;
190 struct pcpu __pcpu[MAXCPU];
191 
192 static void i386_clock_source_init(void);
193 
194 struct mtx icu_lock;
195 
196 struct mem_range_softc mem_range_softc;
197 
198 extern char start_exceptions[], end_exceptions[];
199 
200 extern struct sysentvec elf32_freebsd_sysvec;
201 
202 /* Default init_ops implementation. */
203 struct init_ops init_ops = {
204 	.early_clock_source_init =	i386_clock_source_init,
205 	.early_delay =			i8254_delay,
206 };
207 
208 static void
209 i386_clock_source_init(void)
210 {
211 	i8254_init();
212 }
213 
214 static void
215 cpu_startup(dummy)
216 	void *dummy;
217 {
218 	uintmax_t memsize;
219 	char *sysenv;
220 
221 	/*
222 	 * On MacBooks, we need to disallow the legacy USB circuit to
223 	 * generate an SMI# because this can cause several problems,
224 	 * namely: incorrect CPU frequency detection and failure to
225 	 * start the APs.
226 	 * We do this by disabling a bit in the SMI_EN (SMI Control and
227 	 * Enable register) of the Intel ICH LPC Interface Bridge.
228 	 */
229 	sysenv = kern_getenv("smbios.system.product");
230 	if (sysenv != NULL) {
231 		if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
232 		    strncmp(sysenv, "MacBook3,1", 10) == 0 ||
233 		    strncmp(sysenv, "MacBook4,1", 10) == 0 ||
234 		    strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
235 		    strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
236 		    strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
237 		    strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
238 		    strncmp(sysenv, "Macmini1,1", 10) == 0) {
239 			if (bootverbose)
240 				printf("Disabling LEGACY_USB_EN bit on "
241 				    "Intel ICH.\n");
242 			outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
243 		}
244 		freeenv(sysenv);
245 	}
246 
247 	/*
248 	 * Good {morning,afternoon,evening,night}.
249 	 */
250 	startrtclock();
251 	printcpuinfo();
252 	panicifcpuunsupported();
253 #ifdef PERFMON
254 	perfmon_init();
255 #endif
256 
257 	/*
258 	 * Display physical memory if SMBIOS reports reasonable amount.
259 	 */
260 	memsize = 0;
261 	sysenv = kern_getenv("smbios.memory.enabled");
262 	if (sysenv != NULL) {
263 		memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
264 		freeenv(sysenv);
265 	}
266 	if (memsize < ptoa((uintmax_t)vm_free_count()))
267 		memsize = ptoa((uintmax_t)Maxmem);
268 	printf("real memory  = %ju (%ju MB)\n", memsize, memsize >> 20);
269 	realmem = atop(memsize);
270 
271 	/*
272 	 * Display any holes after the first chunk of extended memory.
273 	 */
274 	if (bootverbose) {
275 		int indx;
276 
277 		printf("Physical memory chunk(s):\n");
278 		for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
279 			vm_paddr_t size;
280 
281 			size = phys_avail[indx + 1] - phys_avail[indx];
282 			printf(
283 			    "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
284 			    (uintmax_t)phys_avail[indx],
285 			    (uintmax_t)phys_avail[indx + 1] - 1,
286 			    (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
287 		}
288 	}
289 
290 	vm_ksubmap_init(&kmi);
291 
292 	printf("avail memory = %ju (%ju MB)\n",
293 	    ptoa((uintmax_t)vm_free_count()),
294 	    ptoa((uintmax_t)vm_free_count()) / 1048576);
295 
296 	/*
297 	 * Set up buffers, so they can be used to read disk labels.
298 	 */
299 	bufinit();
300 	vm_pager_bufferinit();
301 	cpu_setregs();
302 }
303 
304 void
305 cpu_setregs(void)
306 {
307 	unsigned int cr0;
308 
309 	cr0 = rcr0();
310 
311 	/*
312 	 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
313 	 *
314 	 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
315 	 * instructions.  We must set the CR0_MP bit and use the CR0_TS
316 	 * bit to control the trap, because setting the CR0_EM bit does
317 	 * not cause WAIT instructions to trap.  It's important to trap
318 	 * WAIT instructions - otherwise the "wait" variants of no-wait
319 	 * control instructions would degenerate to the "no-wait" variants
320 	 * after FP context switches but work correctly otherwise.  It's
321 	 * particularly important to trap WAITs when there is no NPX -
322 	 * otherwise the "wait" variants would always degenerate.
323 	 *
324 	 * Try setting CR0_NE to get correct error reporting on 486DX's.
325 	 * Setting it should fail or do nothing on lesser processors.
326 	 */
327 	cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
328 	load_cr0(cr0);
329 	load_gs(_udatasel);
330 }
331 
332 u_long bootdev;		/* not a struct cdev *- encoding is different */
333 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
334 	CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
335 
336 /*
337  * Initialize 386 and configure to run kernel
338  */
339 
340 /*
341  * Initialize segments & interrupt table
342  */
343 
344 int _default_ldt;
345 
346 struct mtx dt_lock;			/* lock for GDT and LDT */
347 
348 union descriptor gdt0[NGDT];	/* initial global descriptor table */
349 union descriptor *gdt = gdt0;	/* global descriptor table */
350 
351 union descriptor *ldt;		/* local descriptor table */
352 
353 static struct gate_descriptor idt0[NIDT];
354 struct gate_descriptor *idt = &idt0[0];	/* interrupt descriptor table */
355 
356 static struct i386tss *dblfault_tss;
357 static char *dblfault_stack;
358 
359 static struct i386tss common_tss0;
360 
361 vm_offset_t proc0kstack;
362 
363 /*
364  * software prototypes -- in more palatable form.
365  *
366  * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
367  * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
368  */
369 struct soft_segment_descriptor gdt_segs[] = {
370 /* GNULL_SEL	0 Null Descriptor */
371 {	.ssd_base = 0x0,
372 	.ssd_limit = 0x0,
373 	.ssd_type = 0,
374 	.ssd_dpl = SEL_KPL,
375 	.ssd_p = 0,
376 	.ssd_xx = 0, .ssd_xx1 = 0,
377 	.ssd_def32 = 0,
378 	.ssd_gran = 0		},
379 /* GPRIV_SEL	1 SMP Per-Processor Private Data Descriptor */
380 {	.ssd_base = 0x0,
381 	.ssd_limit = 0xfffff,
382 	.ssd_type = SDT_MEMRWA,
383 	.ssd_dpl = SEL_KPL,
384 	.ssd_p = 1,
385 	.ssd_xx = 0, .ssd_xx1 = 0,
386 	.ssd_def32 = 1,
387 	.ssd_gran = 1		},
388 /* GUFS_SEL	2 %fs Descriptor for user */
389 {	.ssd_base = 0x0,
390 	.ssd_limit = 0xfffff,
391 	.ssd_type = SDT_MEMRWA,
392 	.ssd_dpl = SEL_UPL,
393 	.ssd_p = 1,
394 	.ssd_xx = 0, .ssd_xx1 = 0,
395 	.ssd_def32 = 1,
396 	.ssd_gran = 1		},
397 /* GUGS_SEL	3 %gs Descriptor for user */
398 {	.ssd_base = 0x0,
399 	.ssd_limit = 0xfffff,
400 	.ssd_type = SDT_MEMRWA,
401 	.ssd_dpl = SEL_UPL,
402 	.ssd_p = 1,
403 	.ssd_xx = 0, .ssd_xx1 = 0,
404 	.ssd_def32 = 1,
405 	.ssd_gran = 1		},
406 /* GCODE_SEL	4 Code Descriptor for kernel */
407 {	.ssd_base = 0x0,
408 	.ssd_limit = 0xfffff,
409 	.ssd_type = SDT_MEMERA,
410 	.ssd_dpl = SEL_KPL,
411 	.ssd_p = 1,
412 	.ssd_xx = 0, .ssd_xx1 = 0,
413 	.ssd_def32 = 1,
414 	.ssd_gran = 1		},
415 /* GDATA_SEL	5 Data Descriptor for kernel */
416 {	.ssd_base = 0x0,
417 	.ssd_limit = 0xfffff,
418 	.ssd_type = SDT_MEMRWA,
419 	.ssd_dpl = SEL_KPL,
420 	.ssd_p = 1,
421 	.ssd_xx = 0, .ssd_xx1 = 0,
422 	.ssd_def32 = 1,
423 	.ssd_gran = 1		},
424 /* GUCODE_SEL	6 Code Descriptor for user */
425 {	.ssd_base = 0x0,
426 	.ssd_limit = 0xfffff,
427 	.ssd_type = SDT_MEMERA,
428 	.ssd_dpl = SEL_UPL,
429 	.ssd_p = 1,
430 	.ssd_xx = 0, .ssd_xx1 = 0,
431 	.ssd_def32 = 1,
432 	.ssd_gran = 1		},
433 /* GUDATA_SEL	7 Data Descriptor for user */
434 {	.ssd_base = 0x0,
435 	.ssd_limit = 0xfffff,
436 	.ssd_type = SDT_MEMRWA,
437 	.ssd_dpl = SEL_UPL,
438 	.ssd_p = 1,
439 	.ssd_xx = 0, .ssd_xx1 = 0,
440 	.ssd_def32 = 1,
441 	.ssd_gran = 1		},
442 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
443 {	.ssd_base = 0x400,
444 	.ssd_limit = 0xfffff,
445 	.ssd_type = SDT_MEMRWA,
446 	.ssd_dpl = SEL_KPL,
447 	.ssd_p = 1,
448 	.ssd_xx = 0, .ssd_xx1 = 0,
449 	.ssd_def32 = 1,
450 	.ssd_gran = 1		},
451 /* GPROC0_SEL	9 Proc 0 Tss Descriptor */
452 {
453 	.ssd_base = 0x0,
454 	.ssd_limit = sizeof(struct i386tss)-1,
455 	.ssd_type = SDT_SYS386TSS,
456 	.ssd_dpl = 0,
457 	.ssd_p = 1,
458 	.ssd_xx = 0, .ssd_xx1 = 0,
459 	.ssd_def32 = 0,
460 	.ssd_gran = 0		},
461 /* GLDT_SEL	10 LDT Descriptor */
462 {	.ssd_base = 0,
463 	.ssd_limit = sizeof(union descriptor) * NLDT - 1,
464 	.ssd_type = SDT_SYSLDT,
465 	.ssd_dpl = SEL_UPL,
466 	.ssd_p = 1,
467 	.ssd_xx = 0, .ssd_xx1 = 0,
468 	.ssd_def32 = 0,
469 	.ssd_gran = 0		},
470 /* GUSERLDT_SEL	11 User LDT Descriptor per process */
471 {	.ssd_base = 0,
472 	.ssd_limit = (512 * sizeof(union descriptor)-1),
473 	.ssd_type = SDT_SYSLDT,
474 	.ssd_dpl = 0,
475 	.ssd_p = 1,
476 	.ssd_xx = 0, .ssd_xx1 = 0,
477 	.ssd_def32 = 0,
478 	.ssd_gran = 0		},
479 /* GPANIC_SEL	12 Panic Tss Descriptor */
480 {	.ssd_base = 0,
481 	.ssd_limit = sizeof(struct i386tss)-1,
482 	.ssd_type = SDT_SYS386TSS,
483 	.ssd_dpl = 0,
484 	.ssd_p = 1,
485 	.ssd_xx = 0, .ssd_xx1 = 0,
486 	.ssd_def32 = 0,
487 	.ssd_gran = 0		},
488 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
489 {	.ssd_base = 0,
490 	.ssd_limit = 0xfffff,
491 	.ssd_type = SDT_MEMERA,
492 	.ssd_dpl = 0,
493 	.ssd_p = 1,
494 	.ssd_xx = 0, .ssd_xx1 = 0,
495 	.ssd_def32 = 0,
496 	.ssd_gran = 1		},
497 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
498 {	.ssd_base = 0,
499 	.ssd_limit = 0xfffff,
500 	.ssd_type = SDT_MEMERA,
501 	.ssd_dpl = 0,
502 	.ssd_p = 1,
503 	.ssd_xx = 0, .ssd_xx1 = 0,
504 	.ssd_def32 = 0,
505 	.ssd_gran = 1		},
506 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
507 {	.ssd_base = 0,
508 	.ssd_limit = 0xfffff,
509 	.ssd_type = SDT_MEMRWA,
510 	.ssd_dpl = 0,
511 	.ssd_p = 1,
512 	.ssd_xx = 0, .ssd_xx1 = 0,
513 	.ssd_def32 = 1,
514 	.ssd_gran = 1		},
515 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
516 {	.ssd_base = 0,
517 	.ssd_limit = 0xfffff,
518 	.ssd_type = SDT_MEMRWA,
519 	.ssd_dpl = 0,
520 	.ssd_p = 1,
521 	.ssd_xx = 0, .ssd_xx1 = 0,
522 	.ssd_def32 = 0,
523 	.ssd_gran = 1		},
524 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
525 {	.ssd_base = 0,
526 	.ssd_limit = 0xfffff,
527 	.ssd_type = SDT_MEMRWA,
528 	.ssd_dpl = 0,
529 	.ssd_p = 1,
530 	.ssd_xx = 0, .ssd_xx1 = 0,
531 	.ssd_def32 = 0,
532 	.ssd_gran = 1		},
533 /* GNDIS_SEL	18 NDIS Descriptor */
534 {	.ssd_base = 0x0,
535 	.ssd_limit = 0x0,
536 	.ssd_type = 0,
537 	.ssd_dpl = 0,
538 	.ssd_p = 0,
539 	.ssd_xx = 0, .ssd_xx1 = 0,
540 	.ssd_def32 = 0,
541 	.ssd_gran = 0		},
542 };
543 
544 static struct soft_segment_descriptor ldt_segs[] = {
545 	/* Null Descriptor - overwritten by call gate */
546 {	.ssd_base = 0x0,
547 	.ssd_limit = 0x0,
548 	.ssd_type = 0,
549 	.ssd_dpl = 0,
550 	.ssd_p = 0,
551 	.ssd_xx = 0, .ssd_xx1 = 0,
552 	.ssd_def32 = 0,
553 	.ssd_gran = 0		},
554 	/* Null Descriptor - overwritten by call gate */
555 {	.ssd_base = 0x0,
556 	.ssd_limit = 0x0,
557 	.ssd_type = 0,
558 	.ssd_dpl = 0,
559 	.ssd_p = 0,
560 	.ssd_xx = 0, .ssd_xx1 = 0,
561 	.ssd_def32 = 0,
562 	.ssd_gran = 0		},
563 	/* Null Descriptor - overwritten by call gate */
564 {	.ssd_base = 0x0,
565 	.ssd_limit = 0x0,
566 	.ssd_type = 0,
567 	.ssd_dpl = 0,
568 	.ssd_p = 0,
569 	.ssd_xx = 0, .ssd_xx1 = 0,
570 	.ssd_def32 = 0,
571 	.ssd_gran = 0		},
572 	/* Code Descriptor for user */
573 {	.ssd_base = 0x0,
574 	.ssd_limit = 0xfffff,
575 	.ssd_type = SDT_MEMERA,
576 	.ssd_dpl = SEL_UPL,
577 	.ssd_p = 1,
578 	.ssd_xx = 0, .ssd_xx1 = 0,
579 	.ssd_def32 = 1,
580 	.ssd_gran = 1		},
581 	/* Null Descriptor - overwritten by call gate */
582 {	.ssd_base = 0x0,
583 	.ssd_limit = 0x0,
584 	.ssd_type = 0,
585 	.ssd_dpl = 0,
586 	.ssd_p = 0,
587 	.ssd_xx = 0, .ssd_xx1 = 0,
588 	.ssd_def32 = 0,
589 	.ssd_gran = 0		},
590 	/* Data Descriptor for user */
591 {	.ssd_base = 0x0,
592 	.ssd_limit = 0xfffff,
593 	.ssd_type = SDT_MEMRWA,
594 	.ssd_dpl = SEL_UPL,
595 	.ssd_p = 1,
596 	.ssd_xx = 0, .ssd_xx1 = 0,
597 	.ssd_def32 = 1,
598 	.ssd_gran = 1		},
599 };
600 
601 size_t setidt_disp;
602 
603 void
604 setidt(int idx, inthand_t *func, int typ, int dpl, int selec)
605 {
606 	uintptr_t off;
607 
608 	off = func != NULL ? (uintptr_t)func + setidt_disp : 0;
609 	setidt_nodisp(idx, off, typ, dpl, selec);
610 }
611 
612 void
613 setidt_nodisp(int idx, uintptr_t off, int typ, int dpl, int selec)
614 {
615 	struct gate_descriptor *ip;
616 
617 	ip = idt + idx;
618 	ip->gd_looffset = off;
619 	ip->gd_selector = selec;
620 	ip->gd_stkcpy = 0;
621 	ip->gd_xx = 0;
622 	ip->gd_type = typ;
623 	ip->gd_dpl = dpl;
624 	ip->gd_p = 1;
625 	ip->gd_hioffset = ((u_int)off) >> 16 ;
626 }
627 
628 extern inthand_t
629 	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
630 	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
631 	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
632 	IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
633 	IDTVEC(xmm),
634 #ifdef KDTRACE_HOOKS
635 	IDTVEC(dtrace_ret),
636 #endif
637 #ifdef XENHVM
638 	IDTVEC(xen_intr_upcall),
639 #endif
640 	IDTVEC(int0x80_syscall);
641 
642 #ifdef DDB
643 /*
644  * Display the index and function name of any IDT entries that don't use
645  * the default 'rsvd' entry point.
646  */
647 DB_SHOW_COMMAND_FLAGS(idt, db_show_idt, DB_CMD_MEMSAFE)
648 {
649 	struct gate_descriptor *ip;
650 	int idx;
651 	uintptr_t func, func_trm;
652 	bool trm;
653 
654 	ip = idt;
655 	for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
656 		if (ip->gd_type == SDT_SYSTASKGT) {
657 			db_printf("%3d\t<TASK>\n", idx);
658 		} else {
659 			func = (ip->gd_hioffset << 16 | ip->gd_looffset);
660 			if (func >= PMAP_TRM_MIN_ADDRESS) {
661 				func_trm = func;
662 				func -= setidt_disp;
663 				trm = true;
664 			} else
665 				trm = false;
666 			if (func != (uintptr_t)&IDTVEC(rsvd)) {
667 				db_printf("%3d\t", idx);
668 				db_printsym(func, DB_STGY_PROC);
669 				if (trm)
670 					db_printf(" (trampoline %#x)",
671 					    func_trm);
672 				db_printf("\n");
673 			}
674 		}
675 		ip++;
676 	}
677 }
678 
679 /* Show privileged registers. */
680 DB_SHOW_COMMAND_FLAGS(sysregs, db_show_sysregs, DB_CMD_MEMSAFE)
681 {
682 	uint64_t idtr, gdtr;
683 
684 	idtr = ridt();
685 	db_printf("idtr\t0x%08x/%04x\n",
686 	    (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
687 	gdtr = rgdt();
688 	db_printf("gdtr\t0x%08x/%04x\n",
689 	    (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
690 	db_printf("ldtr\t0x%04x\n", rldt());
691 	db_printf("tr\t0x%04x\n", rtr());
692 	db_printf("cr0\t0x%08x\n", rcr0());
693 	db_printf("cr2\t0x%08x\n", rcr2());
694 	db_printf("cr3\t0x%08x\n", rcr3());
695 	db_printf("cr4\t0x%08x\n", rcr4());
696 	if (rcr4() & CR4_XSAVE)
697 		db_printf("xcr0\t0x%016llx\n", rxcr(0));
698 	if (amd_feature & (AMDID_NX | AMDID_LM))
699 		db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
700 	if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
701 		db_printf("FEATURES_CTL\t0x%016llx\n",
702 		    rdmsr(MSR_IA32_FEATURE_CONTROL));
703 	if (((cpu_vendor_id == CPU_VENDOR_INTEL ||
704 	    cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6) ||
705 	    cpu_vendor_id == CPU_VENDOR_HYGON)
706 		db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
707 	if (cpu_feature & CPUID_PAT)
708 		db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
709 }
710 
711 DB_SHOW_COMMAND_FLAGS(dbregs, db_show_dbregs, DB_CMD_MEMSAFE)
712 {
713 
714 	db_printf("dr0\t0x%08x\n", rdr0());
715 	db_printf("dr1\t0x%08x\n", rdr1());
716 	db_printf("dr2\t0x%08x\n", rdr2());
717 	db_printf("dr3\t0x%08x\n", rdr3());
718 	db_printf("dr6\t0x%08x\n", rdr6());
719 	db_printf("dr7\t0x%08x\n", rdr7());
720 }
721 
722 DB_SHOW_COMMAND(frame, db_show_frame)
723 {
724 	struct trapframe *frame;
725 
726 	frame = have_addr ? (struct trapframe *)addr : curthread->td_frame;
727 	printf("ss %#x esp %#x efl %#x cs %#x eip %#x\n",
728 	    frame->tf_ss, frame->tf_esp, frame->tf_eflags, frame->tf_cs,
729 	    frame->tf_eip);
730 	printf("err %#x trapno %d\n", frame->tf_err, frame->tf_trapno);
731 	printf("ds %#x es %#x fs %#x\n",
732 	    frame->tf_ds, frame->tf_es, frame->tf_fs);
733 	printf("eax %#x ecx %#x edx %#x ebx %#x\n",
734 	    frame->tf_eax, frame->tf_ecx, frame->tf_edx, frame->tf_ebx);
735 	printf("ebp %#x esi %#x edi %#x\n",
736 	    frame->tf_ebp, frame->tf_esi, frame->tf_edi);
737 
738 }
739 #endif
740 
741 void
742 sdtossd(sd, ssd)
743 	struct segment_descriptor *sd;
744 	struct soft_segment_descriptor *ssd;
745 {
746 	ssd->ssd_base  = (sd->sd_hibase << 24) | sd->sd_lobase;
747 	ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
748 	ssd->ssd_type  = sd->sd_type;
749 	ssd->ssd_dpl   = sd->sd_dpl;
750 	ssd->ssd_p     = sd->sd_p;
751 	ssd->ssd_def32 = sd->sd_def32;
752 	ssd->ssd_gran  = sd->sd_gran;
753 }
754 
755 static int
756 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
757     int *physmap_idxp)
758 {
759 	uint64_t lim, ign;
760 	int i, insert_idx, physmap_idx;
761 
762 	physmap_idx = *physmap_idxp;
763 
764 	if (length == 0)
765 		return (1);
766 
767 	lim = 0x100000000;					/*  4G */
768 	if (pae_mode && above4g_allow)
769 		lim = above24g_allow ? -1ULL : 0x600000000;	/* 24G */
770 	if (base >= lim) {
771 		printf("%uK of memory above %uGB ignored, pae %d "
772 		    "above4g_allow %d above24g_allow %d\n",
773 		    (u_int)(length / 1024), (u_int)(lim >> 30), pae_mode,
774 		    above4g_allow, above24g_allow);
775 		return (1);
776 	}
777 	if (base + length >= lim) {
778 		ign = base + length - lim;
779 		length -= ign;
780 		printf("%uK of memory above %uGB ignored, pae %d "
781 		    "above4g_allow %d above24g_allow %d\n",
782 		    (u_int)(ign / 1024), (u_int)(lim >> 30), pae_mode,
783 		    above4g_allow, above24g_allow);
784 	}
785 
786 	/*
787 	 * Find insertion point while checking for overlap.  Start off by
788 	 * assuming the new entry will be added to the end.
789 	 */
790 	insert_idx = physmap_idx + 2;
791 	for (i = 0; i <= physmap_idx; i += 2) {
792 		if (base < physmap[i + 1]) {
793 			if (base + length <= physmap[i]) {
794 				insert_idx = i;
795 				break;
796 			}
797 			if (boothowto & RB_VERBOSE)
798 				printf(
799 		    "Overlapping memory regions, ignoring second region\n");
800 			return (1);
801 		}
802 	}
803 
804 	/* See if we can prepend to the next entry. */
805 	if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
806 		physmap[insert_idx] = base;
807 		return (1);
808 	}
809 
810 	/* See if we can append to the previous entry. */
811 	if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
812 		physmap[insert_idx - 1] += length;
813 		return (1);
814 	}
815 
816 	physmap_idx += 2;
817 	*physmap_idxp = physmap_idx;
818 	if (physmap_idx == PHYS_AVAIL_ENTRIES) {
819 		printf(
820 		"Too many segments in the physical address map, giving up\n");
821 		return (0);
822 	}
823 
824 	/*
825 	 * Move the last 'N' entries down to make room for the new
826 	 * entry if needed.
827 	 */
828 	for (i = physmap_idx; i > insert_idx; i -= 2) {
829 		physmap[i] = physmap[i - 2];
830 		physmap[i + 1] = physmap[i - 1];
831 	}
832 
833 	/* Insert the new entry. */
834 	physmap[insert_idx] = base;
835 	physmap[insert_idx + 1] = base + length;
836 	return (1);
837 }
838 
839 static int
840 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
841 {
842 	if (boothowto & RB_VERBOSE)
843 		printf("SMAP type=%02x base=%016llx len=%016llx\n",
844 		    smap->type, smap->base, smap->length);
845 
846 	if (smap->type != SMAP_TYPE_MEMORY)
847 		return (1);
848 
849 	return (add_physmap_entry(smap->base, smap->length, physmap,
850 	    physmap_idxp));
851 }
852 
853 static void
854 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
855     int *physmap_idxp)
856 {
857 	struct bios_smap *smap, *smapend;
858 	u_int32_t smapsize;
859 	/*
860 	 * Memory map from INT 15:E820.
861 	 *
862 	 * subr_module.c says:
863 	 * "Consumer may safely assume that size value precedes data."
864 	 * ie: an int32_t immediately precedes SMAP.
865 	 */
866 	smapsize = *((u_int32_t *)smapbase - 1);
867 	smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
868 
869 	for (smap = smapbase; smap < smapend; smap++)
870 		if (!add_smap_entry(smap, physmap, physmap_idxp))
871 			break;
872 }
873 
874 static void
875 basemem_setup(void)
876 {
877 
878 	if (basemem > 640) {
879 		printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
880 			basemem);
881 		basemem = 640;
882 	}
883 
884 	pmap_basemem_setup(basemem);
885 }
886 
887 /*
888  * Populate the (physmap) array with base/bound pairs describing the
889  * available physical memory in the system, then test this memory and
890  * build the phys_avail array describing the actually-available memory.
891  *
892  * If we cannot accurately determine the physical memory map, then use
893  * value from the 0xE801 call, and failing that, the RTC.
894  *
895  * Total memory size may be set by the kernel environment variable
896  * hw.physmem or the compile-time define MAXMEM.
897  *
898  * XXX first should be vm_paddr_t.
899  */
900 static void
901 getmemsize(int first)
902 {
903 	int has_smap, off, physmap_idx, pa_indx, da_indx;
904 	u_long memtest;
905 	vm_paddr_t physmap[PHYS_AVAIL_ENTRIES];
906 	quad_t dcons_addr, dcons_size, physmem_tunable;
907 	int hasbrokenint12, i, res __diagused;
908 	u_int extmem;
909 	struct vm86frame vmf;
910 	struct vm86context vmc;
911 	vm_paddr_t pa;
912 	struct bios_smap *smap, *smapbase;
913 	caddr_t kmdp;
914 
915 	has_smap = 0;
916 	bzero(&vmf, sizeof(vmf));
917 	bzero(physmap, sizeof(physmap));
918 	basemem = 0;
919 
920 	/*
921 	 * Tell the physical memory allocator about pages used to store
922 	 * the kernel and preloaded data.  See kmem_bootstrap_free().
923 	 */
924 	vm_phys_early_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first));
925 
926 	TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow);
927 	TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow);
928 
929 	/*
930 	 * Check if the loader supplied an SMAP memory map.  If so,
931 	 * use that and do not make any VM86 calls.
932 	 */
933 	physmap_idx = 0;
934 	kmdp = preload_search_by_type("elf kernel");
935 	if (kmdp == NULL)
936 		kmdp = preload_search_by_type("elf32 kernel");
937 	smapbase = (struct bios_smap *)preload_search_info(kmdp,
938 	    MODINFO_METADATA | MODINFOMD_SMAP);
939 	if (smapbase != NULL) {
940 		add_smap_entries(smapbase, physmap, &physmap_idx);
941 		has_smap = 1;
942 		goto have_smap;
943 	}
944 
945 	/*
946 	 * Some newer BIOSes have a broken INT 12H implementation
947 	 * which causes a kernel panic immediately.  In this case, we
948 	 * need use the SMAP to determine the base memory size.
949 	 */
950 	hasbrokenint12 = 0;
951 	TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
952 	if (hasbrokenint12 == 0) {
953 		/* Use INT12 to determine base memory size. */
954 		vm86_intcall(0x12, &vmf);
955 		basemem = vmf.vmf_ax;
956 		basemem_setup();
957 	}
958 
959 	/*
960 	 * Fetch the memory map with INT 15:E820.  Map page 1 R/W into
961 	 * the kernel page table so we can use it as a buffer.  The
962 	 * kernel will unmap this page later.
963 	 */
964 	vmc.npages = 0;
965 	smap = (void *)vm86_addpage(&vmc, 1, PMAP_MAP_LOW + ptoa(1));
966 	res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
967 	KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
968 
969 	vmf.vmf_ebx = 0;
970 	do {
971 		vmf.vmf_eax = 0xE820;
972 		vmf.vmf_edx = SMAP_SIG;
973 		vmf.vmf_ecx = sizeof(struct bios_smap);
974 		i = vm86_datacall(0x15, &vmf, &vmc);
975 		if (i || vmf.vmf_eax != SMAP_SIG)
976 			break;
977 		has_smap = 1;
978 		if (!add_smap_entry(smap, physmap, &physmap_idx))
979 			break;
980 	} while (vmf.vmf_ebx != 0);
981 
982 have_smap:
983 	/*
984 	 * If we didn't fetch the "base memory" size from INT12,
985 	 * figure it out from the SMAP (or just guess).
986 	 */
987 	if (basemem == 0) {
988 		for (i = 0; i <= physmap_idx; i += 2) {
989 			if (physmap[i] == 0x00000000) {
990 				basemem = physmap[i + 1] / 1024;
991 				break;
992 			}
993 		}
994 
995 		/* XXX: If we couldn't find basemem from SMAP, just guess. */
996 		if (basemem == 0)
997 			basemem = 640;
998 		basemem_setup();
999 	}
1000 
1001 	if (physmap[1] != 0)
1002 		goto physmap_done;
1003 
1004 	/*
1005 	 * If we failed to find an SMAP, figure out the extended
1006 	 * memory size.  We will then build a simple memory map with
1007 	 * two segments, one for "base memory" and the second for
1008 	 * "extended memory".  Note that "extended memory" starts at a
1009 	 * physical address of 1MB and that both basemem and extmem
1010 	 * are in units of 1KB.
1011 	 *
1012 	 * First, try to fetch the extended memory size via INT 15:E801.
1013 	 */
1014 	vmf.vmf_ax = 0xE801;
1015 	if (vm86_intcall(0x15, &vmf) == 0) {
1016 		extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1017 	} else {
1018 		/*
1019 		 * If INT15:E801 fails, this is our last ditch effort
1020 		 * to determine the extended memory size.  Currently
1021 		 * we prefer the RTC value over INT15:88.
1022 		 */
1023 #if 0
1024 		vmf.vmf_ah = 0x88;
1025 		vm86_intcall(0x15, &vmf);
1026 		extmem = vmf.vmf_ax;
1027 #else
1028 		extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1029 #endif
1030 	}
1031 
1032 	/*
1033 	 * Special hack for chipsets that still remap the 384k hole when
1034 	 * there's 16MB of memory - this really confuses people that
1035 	 * are trying to use bus mastering ISA controllers with the
1036 	 * "16MB limit"; they only have 16MB, but the remapping puts
1037 	 * them beyond the limit.
1038 	 *
1039 	 * If extended memory is between 15-16MB (16-17MB phys address range),
1040 	 *	chop it to 15MB.
1041 	 */
1042 	if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1043 		extmem = 15 * 1024;
1044 
1045 	physmap[0] = 0;
1046 	physmap[1] = basemem * 1024;
1047 	physmap_idx = 2;
1048 	physmap[physmap_idx] = 0x100000;
1049 	physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1050 
1051 physmap_done:
1052 	/*
1053 	 * Now, physmap contains a map of physical memory.
1054 	 */
1055 
1056 #ifdef SMP
1057 	/* make hole for AP bootstrap code */
1058 	alloc_ap_trampoline(physmap, &physmap_idx);
1059 #endif
1060 
1061 	/*
1062 	 * Maxmem isn't the "maximum memory", it's one larger than the
1063 	 * highest page of the physical address space.  It should be
1064 	 * called something like "Maxphyspage".  We may adjust this
1065 	 * based on ``hw.physmem'' and the results of the memory test.
1066 	 *
1067 	 * This is especially confusing when it is much larger than the
1068 	 * memory size and is displayed as "realmem".
1069 	 */
1070 	Maxmem = atop(physmap[physmap_idx + 1]);
1071 
1072 #ifdef MAXMEM
1073 	Maxmem = MAXMEM / 4;
1074 #endif
1075 
1076 	if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
1077 		Maxmem = atop(physmem_tunable);
1078 
1079 	/*
1080 	 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1081 	 * the amount of memory in the system.
1082 	 */
1083 	if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1084 		Maxmem = atop(physmap[physmap_idx + 1]);
1085 
1086 	/*
1087 	 * The boot memory test is disabled by default, as it takes a
1088 	 * significant amount of time on large-memory systems, and is
1089 	 * unfriendly to virtual machines as it unnecessarily touches all
1090 	 * pages.
1091 	 *
1092 	 * A general name is used as the code may be extended to support
1093 	 * additional tests beyond the current "page present" test.
1094 	 */
1095 	memtest = 0;
1096 	TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1097 
1098 	if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1099 	    (boothowto & RB_VERBOSE))
1100 		printf("Physical memory use set to %ldK\n", Maxmem * 4);
1101 
1102 	/*
1103 	 * If Maxmem has been increased beyond what the system has detected,
1104 	 * extend the last memory segment to the new limit.
1105 	 */
1106 	if (atop(physmap[physmap_idx + 1]) < Maxmem)
1107 		physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1108 
1109 	/* call pmap initialization to make new kernel address space */
1110 	pmap_bootstrap(first);
1111 
1112 	/*
1113 	 * Size up each available chunk of physical memory.
1114 	 */
1115 	physmap[0] = PAGE_SIZE;		/* mask off page 0 */
1116 	pa_indx = 0;
1117 	da_indx = 1;
1118 	phys_avail[pa_indx++] = physmap[0];
1119 	phys_avail[pa_indx] = physmap[0];
1120 	dump_avail[da_indx] = physmap[0];
1121 
1122 	/*
1123 	 * Get dcons buffer address
1124 	 */
1125 	if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1126 	    getenv_quad("dcons.size", &dcons_size) == 0)
1127 		dcons_addr = 0;
1128 
1129 	/*
1130 	 * physmap is in bytes, so when converting to page boundaries,
1131 	 * round up the start address and round down the end address.
1132 	 */
1133 	for (i = 0; i <= physmap_idx; i += 2) {
1134 		vm_paddr_t end;
1135 
1136 		end = ptoa((vm_paddr_t)Maxmem);
1137 		if (physmap[i + 1] < end)
1138 			end = trunc_page(physmap[i + 1]);
1139 		for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1140 			int tmp, page_bad, full;
1141 			int *ptr;
1142 
1143 			full = FALSE;
1144 			/*
1145 			 * block out kernel memory as not available.
1146 			 */
1147 			if (pa >= KERNLOAD && pa < first)
1148 				goto do_dump_avail;
1149 
1150 			/*
1151 			 * block out dcons buffer
1152 			 */
1153 			if (dcons_addr > 0
1154 			    && pa >= trunc_page(dcons_addr)
1155 			    && pa < dcons_addr + dcons_size)
1156 				goto do_dump_avail;
1157 
1158 			page_bad = FALSE;
1159 			if (memtest == 0)
1160 				goto skip_memtest;
1161 
1162 			/*
1163 			 * map page into kernel: valid, read/write,non-cacheable
1164 			 */
1165 			ptr = (int *)pmap_cmap3(pa, PG_V | PG_RW | PG_N);
1166 
1167 			tmp = *(int *)ptr;
1168 			/*
1169 			 * Test for alternating 1's and 0's
1170 			 */
1171 			*(volatile int *)ptr = 0xaaaaaaaa;
1172 			if (*(volatile int *)ptr != 0xaaaaaaaa)
1173 				page_bad = TRUE;
1174 			/*
1175 			 * Test for alternating 0's and 1's
1176 			 */
1177 			*(volatile int *)ptr = 0x55555555;
1178 			if (*(volatile int *)ptr != 0x55555555)
1179 				page_bad = TRUE;
1180 			/*
1181 			 * Test for all 1's
1182 			 */
1183 			*(volatile int *)ptr = 0xffffffff;
1184 			if (*(volatile int *)ptr != 0xffffffff)
1185 				page_bad = TRUE;
1186 			/*
1187 			 * Test for all 0's
1188 			 */
1189 			*(volatile int *)ptr = 0x0;
1190 			if (*(volatile int *)ptr != 0x0)
1191 				page_bad = TRUE;
1192 			/*
1193 			 * Restore original value.
1194 			 */
1195 			*(int *)ptr = tmp;
1196 
1197 skip_memtest:
1198 			/*
1199 			 * Adjust array of valid/good pages.
1200 			 */
1201 			if (page_bad == TRUE)
1202 				continue;
1203 			/*
1204 			 * If this good page is a continuation of the
1205 			 * previous set of good pages, then just increase
1206 			 * the end pointer. Otherwise start a new chunk.
1207 			 * Note that "end" points one higher than end,
1208 			 * making the range >= start and < end.
1209 			 * If we're also doing a speculative memory
1210 			 * test and we at or past the end, bump up Maxmem
1211 			 * so that we keep going. The first bad page
1212 			 * will terminate the loop.
1213 			 */
1214 			if (phys_avail[pa_indx] == pa) {
1215 				phys_avail[pa_indx] += PAGE_SIZE;
1216 			} else {
1217 				pa_indx++;
1218 				if (pa_indx == PHYS_AVAIL_ENTRIES) {
1219 					printf(
1220 		"Too many holes in the physical address space, giving up\n");
1221 					pa_indx--;
1222 					full = TRUE;
1223 					goto do_dump_avail;
1224 				}
1225 				phys_avail[pa_indx++] = pa;	/* start */
1226 				phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1227 			}
1228 			physmem++;
1229 do_dump_avail:
1230 			if (dump_avail[da_indx] == pa) {
1231 				dump_avail[da_indx] += PAGE_SIZE;
1232 			} else {
1233 				da_indx++;
1234 				if (da_indx == PHYS_AVAIL_ENTRIES) {
1235 					da_indx--;
1236 					goto do_next;
1237 				}
1238 				dump_avail[da_indx++] = pa;	/* start */
1239 				dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1240 			}
1241 do_next:
1242 			if (full)
1243 				break;
1244 		}
1245 	}
1246 	pmap_cmap3(0, 0);
1247 
1248 	/*
1249 	 * XXX
1250 	 * The last chunk must contain at least one page plus the message
1251 	 * buffer to avoid complicating other code (message buffer address
1252 	 * calculation, etc.).
1253 	 */
1254 	while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1255 	    round_page(msgbufsize) >= phys_avail[pa_indx]) {
1256 		physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1257 		phys_avail[pa_indx--] = 0;
1258 		phys_avail[pa_indx--] = 0;
1259 	}
1260 
1261 	Maxmem = atop(phys_avail[pa_indx]);
1262 
1263 	/* Trim off space for the message buffer. */
1264 	phys_avail[pa_indx] -= round_page(msgbufsize);
1265 
1266 	/* Map the message buffer. */
1267 	for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
1268 		pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1269 		    off);
1270 }
1271 
1272 static void
1273 i386_kdb_init(void)
1274 {
1275 #ifdef DDB
1276 	db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab, 0);
1277 #endif
1278 	kdb_init();
1279 #ifdef KDB
1280 	if (boothowto & RB_KDB)
1281 		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
1282 #endif
1283 }
1284 
1285 static void
1286 fixup_idt(void)
1287 {
1288 	struct gate_descriptor *ip;
1289 	uintptr_t off;
1290 	int x;
1291 
1292 	for (x = 0; x < NIDT; x++) {
1293 		ip = &idt[x];
1294 		if (ip->gd_type != SDT_SYS386IGT &&
1295 		    ip->gd_type != SDT_SYS386TGT)
1296 			continue;
1297 		off = ip->gd_looffset + (((u_int)ip->gd_hioffset) << 16);
1298 		KASSERT(off >= (uintptr_t)start_exceptions &&
1299 		    off < (uintptr_t)end_exceptions,
1300 		    ("IDT[%d] type %d off %#x", x, ip->gd_type, off));
1301 		off += setidt_disp;
1302 		MPASS(off >= PMAP_TRM_MIN_ADDRESS &&
1303 		    off < PMAP_TRM_MAX_ADDRESS);
1304 		ip->gd_looffset = off;
1305 		ip->gd_hioffset = off >> 16;
1306 	}
1307 }
1308 
1309 static void
1310 i386_setidt1(void)
1311 {
1312 	int x;
1313 
1314 	/* exceptions */
1315 	for (x = 0; x < NIDT; x++)
1316 		setidt(x, &IDTVEC(rsvd), SDT_SYS386IGT, SEL_KPL,
1317 		    GSEL(GCODE_SEL, SEL_KPL));
1318 	setidt(IDT_DE, &IDTVEC(div), SDT_SYS386IGT, SEL_KPL,
1319 	    GSEL(GCODE_SEL, SEL_KPL));
1320 	setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
1321 	    GSEL(GCODE_SEL, SEL_KPL));
1322 	setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
1323 	    GSEL(GCODE_SEL, SEL_KPL));
1324 	setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
1325 	    GSEL(GCODE_SEL, SEL_KPL));
1326 	setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386IGT, SEL_UPL,
1327 	    GSEL(GCODE_SEL, SEL_KPL));
1328 	setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386IGT, SEL_KPL,
1329 	    GSEL(GCODE_SEL, SEL_KPL));
1330 	setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL,
1331 	    GSEL(GCODE_SEL, SEL_KPL));
1332 	setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386IGT, SEL_KPL,
1333 	    GSEL(GCODE_SEL, SEL_KPL));
1334 	setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL,
1335 	    SEL_KPL));
1336 	setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386IGT,
1337 	    SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1338 	setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386IGT, SEL_KPL,
1339 	    GSEL(GCODE_SEL, SEL_KPL));
1340 	setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386IGT, SEL_KPL,
1341 	    GSEL(GCODE_SEL, SEL_KPL));
1342 	setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386IGT, SEL_KPL,
1343 	    GSEL(GCODE_SEL, SEL_KPL));
1344 	setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL,
1345 	    GSEL(GCODE_SEL, SEL_KPL));
1346 	setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
1347 	    GSEL(GCODE_SEL, SEL_KPL));
1348 	setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386IGT, SEL_KPL,
1349 	    GSEL(GCODE_SEL, SEL_KPL));
1350 	setidt(IDT_AC, &IDTVEC(align), SDT_SYS386IGT, SEL_KPL,
1351 	    GSEL(GCODE_SEL, SEL_KPL));
1352 	setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386IGT, SEL_KPL,
1353 	    GSEL(GCODE_SEL, SEL_KPL));
1354 	setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386IGT, SEL_KPL,
1355 	    GSEL(GCODE_SEL, SEL_KPL));
1356 	setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall),
1357 	    SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1358 #ifdef KDTRACE_HOOKS
1359 	setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret),
1360 	    SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1361 #endif
1362 #ifdef XENHVM
1363 	setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall),
1364 	    SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1365 #endif
1366 }
1367 
1368 static void
1369 i386_setidt2(void)
1370 {
1371 
1372 	setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL,
1373 	    GSEL(GCODE_SEL, SEL_KPL));
1374 	setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL,
1375 	    GSEL(GCODE_SEL, SEL_KPL));
1376 }
1377 
1378 #if defined(DEV_ISA) && !defined(DEV_ATPIC)
1379 static void
1380 i386_setidt3(void)
1381 {
1382 
1383 	setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint),
1384 	    SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1385 	setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint),
1386 	    SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
1387 }
1388 #endif
1389 
1390 register_t
1391 init386(int first)
1392 {
1393 	struct region_descriptor r_gdt, r_idt;	/* table descriptors */
1394 	int gsel_tss, metadata_missing, x, pa;
1395 	struct pcpu *pc;
1396 	struct xstate_hdr *xhdr;
1397 	caddr_t kmdp;
1398 	vm_offset_t addend;
1399 	size_t ucode_len;
1400 
1401 	thread0.td_kstack = proc0kstack;
1402 	thread0.td_kstack_pages = TD0_KSTACK_PAGES;
1403 
1404 	/*
1405  	 * This may be done better later if it gets more high level
1406  	 * components in it. If so just link td->td_proc here.
1407 	 */
1408 	proc_linkup0(&proc0, &thread0);
1409 
1410 	if (bootinfo.bi_modulep) {
1411 		metadata_missing = 0;
1412 		addend = (vm_paddr_t)bootinfo.bi_modulep < KERNBASE ?
1413 		    PMAP_MAP_LOW : 0;
1414 		preload_metadata = (caddr_t)bootinfo.bi_modulep + addend;
1415 		preload_bootstrap_relocate(addend);
1416 	} else {
1417 		metadata_missing = 1;
1418 	}
1419 
1420 	if (bootinfo.bi_envp != 0) {
1421 		addend = (vm_paddr_t)bootinfo.bi_envp < KERNBASE ?
1422 		    PMAP_MAP_LOW : 0;
1423 		init_static_kenv((char *)bootinfo.bi_envp + addend, 0);
1424 	} else {
1425 		init_static_kenv(NULL, 0);
1426 	}
1427 
1428 	/*
1429 	 * Re-evaluate CPU features if we loaded a microcode update.
1430 	 */
1431 	ucode_len = ucode_load_bsp(first);
1432 	if (ucode_len != 0) {
1433 		identify_cpu();
1434 		first = roundup2(first + ucode_len, PAGE_SIZE);
1435 	}
1436 
1437 	identify_hypervisor();
1438 	identify_hypervisor_smbios();
1439 
1440 	/* Init basic tunables, hz etc */
1441 	init_param1();
1442 
1443 	/* Set bootmethod to BIOS: it's the only supported on i386. */
1444 	strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1445 
1446 	/*
1447 	 * Make gdt memory segments.  All segments cover the full 4GB
1448 	 * of address space and permissions are enforced at page level.
1449 	 */
1450 	gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1451 	gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1452 	gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
1453 	gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
1454 	gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
1455 	gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
1456 
1457 	pc = &__pcpu[0];
1458 	gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
1459 	gdt_segs[GPRIV_SEL].ssd_base = (int)pc;
1460 	gdt_segs[GPROC0_SEL].ssd_base = (int)&common_tss0;
1461 
1462 	for (x = 0; x < NGDT; x++)
1463 		ssdtosd(&gdt_segs[x], &gdt0[x].sd);
1464 
1465 	r_gdt.rd_limit = NGDT * sizeof(gdt0[0]) - 1;
1466 	r_gdt.rd_base =  (int)gdt0;
1467 	mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
1468 	lgdt(&r_gdt);
1469 
1470 	pcpu_init(pc, 0, sizeof(struct pcpu));
1471 	for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
1472 		pmap_kenter(pa, pa);
1473 	dpcpu_init((void *)first, 0);
1474 	first += DPCPU_SIZE;
1475 	PCPU_SET(prvspace, pc);
1476 	PCPU_SET(curthread, &thread0);
1477 	/* Non-late cninit() and printf() can be moved up to here. */
1478 
1479 	/*
1480 	 * Initialize mutexes.
1481 	 *
1482 	 * icu_lock: in order to allow an interrupt to occur in a critical
1483 	 * 	     section, to set pcpu->ipending (etc...) properly, we
1484 	 *	     must be able to get the icu lock, so it can't be
1485 	 *	     under witness.
1486 	 */
1487 	mutex_init();
1488 	mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
1489 
1490 	i386_setidt1();
1491 
1492 	r_idt.rd_limit = sizeof(idt0) - 1;
1493 	r_idt.rd_base = (int) idt;
1494 	lidt(&r_idt);
1495 
1496 	finishidentcpu();	/* Final stage of CPU initialization */
1497 
1498 	/*
1499 	 * Initialize the clock before the console so that console
1500 	 * initialization can use DELAY().
1501 	 */
1502 	clock_init();
1503 
1504 	i386_setidt2();
1505 	pmap_set_nx();
1506 	initializecpu();	/* Initialize CPU registers */
1507 	initializecpucache();
1508 
1509 	/* pointer to selector slot for %fs/%gs */
1510 	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
1511 
1512 	/* Initialize the tss (except for the final esp0) early for vm86. */
1513 	common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages *
1514 	    PAGE_SIZE - VM86_STACK_SPACE;
1515 	common_tss0.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
1516 	common_tss0.tss_ioopt = sizeof(struct i386tss) << 16;
1517 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1518 	PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
1519 	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
1520 	ltr(gsel_tss);
1521 
1522 	/* Initialize the PIC early for vm86 calls. */
1523 #ifdef DEV_ISA
1524 #ifdef DEV_ATPIC
1525 	elcr_probe();
1526 	atpic_startup();
1527 #else
1528 	/* Reset and mask the atpics and leave them shut down. */
1529 	atpic_reset();
1530 
1531 	/*
1532 	 * Point the ICU spurious interrupt vectors at the APIC spurious
1533 	 * interrupt handler.
1534 	 */
1535 	i386_setidt3();
1536 #endif
1537 #endif
1538 
1539 	/*
1540 	 * The console and kdb should be initialized even earlier than here,
1541 	 * but some console drivers don't work until after getmemsize().
1542 	 * Default to late console initialization to support these drivers.
1543 	 * This loses mainly printf()s in getmemsize() and early debugging.
1544 	 */
1545 	TUNABLE_INT_FETCH("debug.late_console", &late_console);
1546 	if (!late_console) {
1547 		cninit();
1548 		i386_kdb_init();
1549 	}
1550 
1551 	kmdp = preload_search_by_type("elf kernel");
1552 	link_elf_ireloc(kmdp);
1553 
1554 	vm86_initialize();
1555 	getmemsize(first);
1556 	init_param2(physmem);
1557 
1558 	/* now running on new page tables, configured,and u/iom is accessible */
1559 
1560 	if (late_console)
1561 		cninit();
1562 
1563 	if (metadata_missing)
1564 		printf("WARNING: loader(8) metadata is missing!\n");
1565 
1566 	if (late_console)
1567 		i386_kdb_init();
1568 
1569 	msgbufinit(msgbufp, msgbufsize);
1570 	npxinit(true);
1571 	/*
1572 	 * Set up thread0 pcb after npxinit calculated pcb + fpu save
1573 	 * area size.  Zero out the extended state header in fpu save
1574 	 * area.
1575 	 */
1576 	thread0.td_pcb = get_pcb_td(&thread0);
1577 	thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
1578 	bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
1579 	if (use_xsave) {
1580 		xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1581 		    1);
1582 		xhdr->xstate_bv = xsave_mask;
1583 	}
1584 	PCPU_SET(curpcb, thread0.td_pcb);
1585 	/* Move esp0 in the tss to its final place. */
1586 	/* Note: -16 is so we can grow the trapframe if we came from vm86 */
1587 	common_tss0.tss_esp0 = (vm_offset_t)thread0.td_pcb - VM86_STACK_SPACE;
1588 	PCPU_SET(kesp0, common_tss0.tss_esp0);
1589 	gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;	/* clear busy bit */
1590 	ltr(gsel_tss);
1591 
1592 	/* transfer to user mode */
1593 
1594 	_ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1595 	_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1596 
1597 	/* setup proc 0's pcb */
1598 	thread0.td_pcb->pcb_flags = 0;
1599 	thread0.td_pcb->pcb_cr3 = pmap_get_kcr3();
1600 	thread0.td_pcb->pcb_ext = 0;
1601 	thread0.td_frame = &proc0_tf;
1602 
1603 #ifdef FDT
1604 	x86_init_fdt();
1605 #endif
1606 
1607 	/* Location of kernel stack for locore */
1608 	return ((register_t)thread0.td_pcb);
1609 }
1610 
1611 static void
1612 machdep_init_trampoline(void)
1613 {
1614 	struct region_descriptor r_gdt, r_idt;
1615 	struct i386tss *tss;
1616 	char *copyout_buf, *trampoline, *tramp_stack_base;
1617 	int x;
1618 
1619 	gdt = pmap_trm_alloc(sizeof(union descriptor) * NGDT * mp_ncpus,
1620 	    M_NOWAIT | M_ZERO);
1621 	bcopy(gdt0, gdt, sizeof(union descriptor) * NGDT);
1622 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1623 	r_gdt.rd_base = (int)gdt;
1624 	lgdt(&r_gdt);
1625 
1626 	tss = pmap_trm_alloc(sizeof(struct i386tss) * mp_ncpus,
1627 	    M_NOWAIT | M_ZERO);
1628 	bcopy(&common_tss0, tss, sizeof(struct i386tss));
1629 	gdt[GPROC0_SEL].sd.sd_lobase = (int)tss;
1630 	gdt[GPROC0_SEL].sd.sd_hibase = (u_int)tss >> 24;
1631 	gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
1632 
1633 	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
1634 	PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
1635 	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
1636 	PCPU_SET(common_tssp, tss);
1637 	ltr(GSEL(GPROC0_SEL, SEL_KPL));
1638 
1639 	trampoline = pmap_trm_alloc(end_exceptions - start_exceptions,
1640 	    M_NOWAIT);
1641 	bcopy(start_exceptions, trampoline, end_exceptions - start_exceptions);
1642 	tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT);
1643 	PCPU_SET(trampstk, (uintptr_t)tramp_stack_base + TRAMP_STACK_SZ -
1644 	    VM86_STACK_SPACE);
1645 	tss[0].tss_esp0 = PCPU_GET(trampstk);
1646 
1647 	idt = pmap_trm_alloc(sizeof(idt0), M_NOWAIT | M_ZERO);
1648 	bcopy(idt0, idt, sizeof(idt0));
1649 
1650 	/* Re-initialize new IDT since the handlers were relocated */
1651 	setidt_disp = trampoline - start_exceptions;
1652 	if (bootverbose)
1653 		printf("Trampoline disposition %#zx\n", setidt_disp);
1654 	fixup_idt();
1655 
1656 	r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1;
1657 	r_idt.rd_base = (int)idt;
1658 	lidt(&r_idt);
1659 
1660 	/* dblfault TSS */
1661 	dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO);
1662 	dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT);
1663 	dblfault_tss->tss_esp = dblfault_tss->tss_esp0 =
1664 	    dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 =
1665 	    (int)dblfault_stack + PAGE_SIZE;
1666 	dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 =
1667 	    dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
1668 	dblfault_tss->tss_cr3 = pmap_get_kcr3();
1669 	dblfault_tss->tss_eip = (int)dblfault_handler;
1670 	dblfault_tss->tss_eflags = PSL_KERNEL;
1671 	dblfault_tss->tss_ds = dblfault_tss->tss_es =
1672 	    dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL);
1673 	dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
1674 	dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL);
1675 	dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
1676 	gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss;
1677 	gdt[GPANIC_SEL].sd.sd_hibase = (u_int)dblfault_tss >> 24;
1678 
1679 	/* make ldt memory segments */
1680 	ldt = pmap_trm_alloc(sizeof(union descriptor) * NLDT,
1681 	    M_NOWAIT | M_ZERO);
1682 	gdt[GLDT_SEL].sd.sd_lobase = (int)ldt;
1683 	gdt[GLDT_SEL].sd.sd_hibase = (u_int)ldt >> 24;
1684 	ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
1685 	ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
1686 	for (x = 0; x < nitems(ldt_segs); x++)
1687 		ssdtosd(&ldt_segs[x], &ldt[x].sd);
1688 
1689 	_default_ldt = GSEL(GLDT_SEL, SEL_KPL);
1690 	lldt(_default_ldt);
1691 	PCPU_SET(currentldt, _default_ldt);
1692 
1693 	copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT);
1694 	PCPU_SET(copyout_buf, copyout_buf);
1695 	copyout_init_tramp();
1696 }
1697 SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_SECOND, machdep_init_trampoline, NULL);
1698 
1699 #ifdef COMPAT_43
1700 static void
1701 i386_setup_lcall_gate(void)
1702 {
1703 	struct sysentvec *sv;
1704 	struct user_segment_descriptor desc;
1705 	u_int lcall_addr;
1706 
1707 	sv = &elf32_freebsd_sysvec;
1708 	lcall_addr = (uintptr_t)sv->sv_psstrings - sz_lcall_tramp;
1709 
1710 	bzero(&desc, sizeof(desc));
1711 	desc.sd_type = SDT_MEMERA;
1712 	desc.sd_dpl = SEL_UPL;
1713 	desc.sd_p = 1;
1714 	desc.sd_def32 = 1;
1715 	desc.sd_gran = 1;
1716 	desc.sd_lolimit = 0xffff;
1717 	desc.sd_hilimit = 0xf;
1718 	desc.sd_lobase = lcall_addr;
1719 	desc.sd_hibase = lcall_addr >> 24;
1720 	bcopy(&desc, &ldt[LSYS5CALLS_SEL], sizeof(desc));
1721 }
1722 SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY, i386_setup_lcall_gate, NULL);
1723 #endif
1724 
1725 void
1726 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1727 {
1728 
1729 	pcpu->pc_acpi_id = 0xffffffff;
1730 }
1731 
1732 static int
1733 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
1734 {
1735 	struct bios_smap *smapbase;
1736 	struct bios_smap_xattr smap;
1737 	caddr_t kmdp;
1738 	uint32_t *smapattr;
1739 	int count, error, i;
1740 
1741 	/* Retrieve the system memory map from the loader. */
1742 	kmdp = preload_search_by_type("elf kernel");
1743 	if (kmdp == NULL)
1744 		kmdp = preload_search_by_type("elf32 kernel");
1745 	smapbase = (struct bios_smap *)preload_search_info(kmdp,
1746 	    MODINFO_METADATA | MODINFOMD_SMAP);
1747 	if (smapbase == NULL)
1748 		return (0);
1749 	smapattr = (uint32_t *)preload_search_info(kmdp,
1750 	    MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
1751 	count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
1752 	error = 0;
1753 	for (i = 0; i < count; i++) {
1754 		smap.base = smapbase[i].base;
1755 		smap.length = smapbase[i].length;
1756 		smap.type = smapbase[i].type;
1757 		if (smapattr != NULL)
1758 			smap.xattr = smapattr[i];
1759 		else
1760 			smap.xattr = 0;
1761 		error = SYSCTL_OUT(req, &smap, sizeof(smap));
1762 	}
1763 	return (error);
1764 }
1765 SYSCTL_PROC(_machdep, OID_AUTO, smap,
1766     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1767     smap_sysctl_handler, "S,bios_smap_xattr",
1768     "Raw BIOS SMAP data");
1769 
1770 void
1771 spinlock_enter(void)
1772 {
1773 	struct thread *td;
1774 	register_t flags;
1775 
1776 	td = curthread;
1777 	if (td->td_md.md_spinlock_count == 0) {
1778 		flags = intr_disable();
1779 		td->td_md.md_spinlock_count = 1;
1780 		td->td_md.md_saved_flags = flags;
1781 		critical_enter();
1782 	} else
1783 		td->td_md.md_spinlock_count++;
1784 }
1785 
1786 void
1787 spinlock_exit(void)
1788 {
1789 	struct thread *td;
1790 	register_t flags;
1791 
1792 	td = curthread;
1793 	flags = td->td_md.md_saved_flags;
1794 	td->td_md.md_spinlock_count--;
1795 	if (td->td_md.md_spinlock_count == 0) {
1796 		critical_exit();
1797 		intr_restore(flags);
1798 	}
1799 }
1800 
1801 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1802 static void f00f_hack(void *unused);
1803 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
1804 
1805 static void
1806 f00f_hack(void *unused)
1807 {
1808 	struct region_descriptor r_idt;
1809 	struct gate_descriptor *new_idt;
1810 	vm_offset_t tmp;
1811 
1812 	if (!has_f00f_bug)
1813 		return;
1814 
1815 	printf("Intel Pentium detected, installing workaround for F00F bug\n");
1816 
1817 	tmp = (vm_offset_t)pmap_trm_alloc(PAGE_SIZE * 3, M_NOWAIT | M_ZERO);
1818 	if (tmp == 0)
1819 		panic("kmem_malloc returned 0");
1820 	tmp = round_page(tmp);
1821 
1822 	/* Put the problematic entry (#6) at the end of the lower page. */
1823 	new_idt = (struct gate_descriptor *)
1824 	    (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
1825 	bcopy(idt, new_idt, sizeof(idt0));
1826 	r_idt.rd_base = (u_int)new_idt;
1827 	r_idt.rd_limit = sizeof(idt0) - 1;
1828 	lidt(&r_idt);
1829 	/* SMP machines do not need the F00F hack. */
1830 	idt = new_idt;
1831 	pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
1832 }
1833 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
1834 
1835 /*
1836  * Construct a PCB from a trapframe. This is called from kdb_trap() where
1837  * we want to start a backtrace from the function that caused us to enter
1838  * the debugger. We have the context in the trapframe, but base the trace
1839  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1840  * enough for a backtrace.
1841  */
1842 void
1843 makectx(struct trapframe *tf, struct pcb *pcb)
1844 {
1845 
1846 	pcb->pcb_edi = tf->tf_edi;
1847 	pcb->pcb_esi = tf->tf_esi;
1848 	pcb->pcb_ebp = tf->tf_ebp;
1849 	pcb->pcb_ebx = tf->tf_ebx;
1850 	pcb->pcb_eip = tf->tf_eip;
1851 	pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
1852 	pcb->pcb_gs = rgs();
1853 }
1854 
1855 #ifdef KDB
1856 
1857 /*
1858  * Provide inb() and outb() as functions.  They are normally only available as
1859  * inline functions, thus cannot be called from the debugger.
1860  */
1861 
1862 /* silence compiler warnings */
1863 u_char inb_(u_short);
1864 void outb_(u_short, u_char);
1865 
1866 u_char
1867 inb_(u_short port)
1868 {
1869 	return inb(port);
1870 }
1871 
1872 void
1873 outb_(u_short port, u_char data)
1874 {
1875 	outb(port, data);
1876 }
1877 
1878 #endif /* KDB */
1879