1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2018 The FreeBSD Foundation 5 * Copyright (c) 1992 Terrence R. Lambert. 6 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * William Jolitz. 11 * 12 * Portions of this software were developed by A. Joseph Koshy under 13 * sponsorship from the FreeBSD Foundation and Google, Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include "opt_apic.h" 50 #include "opt_atpic.h" 51 #include "opt_cpu.h" 52 #include "opt_ddb.h" 53 #include "opt_inet.h" 54 #include "opt_isa.h" 55 #include "opt_kstack_pages.h" 56 #include "opt_maxmem.h" 57 #include "opt_perfmon.h" 58 #include "opt_platform.h" 59 60 #include <sys/param.h> 61 #include <sys/proc.h> 62 #include <sys/systm.h> 63 #include <sys/bio.h> 64 #include <sys/buf.h> 65 #include <sys/bus.h> 66 #include <sys/callout.h> 67 #include <sys/cons.h> 68 #include <sys/cpu.h> 69 #include <sys/eventhandler.h> 70 #include <sys/exec.h> 71 #include <sys/imgact.h> 72 #include <sys/kdb.h> 73 #include <sys/kernel.h> 74 #include <sys/ktr.h> 75 #include <sys/linker.h> 76 #include <sys/lock.h> 77 #include <sys/malloc.h> 78 #include <sys/memrange.h> 79 #include <sys/msgbuf.h> 80 #include <sys/mutex.h> 81 #include <sys/pcpu.h> 82 #include <sys/ptrace.h> 83 #include <sys/reboot.h> 84 #include <sys/reg.h> 85 #include <sys/rwlock.h> 86 #include <sys/sched.h> 87 #include <sys/signalvar.h> 88 #include <sys/smp.h> 89 #include <sys/syscallsubr.h> 90 #include <sys/sysctl.h> 91 #include <sys/sysent.h> 92 #include <sys/sysproto.h> 93 #include <sys/ucontext.h> 94 #include <sys/vmmeter.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_param.h> 98 #include <vm/vm_extern.h> 99 #include <vm/vm_kern.h> 100 #include <vm/vm_page.h> 101 #include <vm/vm_map.h> 102 #include <vm/vm_object.h> 103 #include <vm/vm_pager.h> 104 #include <vm/vm_phys.h> 105 #include <vm/vm_dumpset.h> 106 107 #ifdef DDB 108 #ifndef KDB 109 #error KDB must be enabled in order for DDB to work! 110 #endif 111 #include <ddb/ddb.h> 112 #include <ddb/db_sym.h> 113 #endif 114 115 #include <isa/rtc.h> 116 117 #include <net/netisr.h> 118 119 #include <machine/bootinfo.h> 120 #include <machine/clock.h> 121 #include <machine/cpu.h> 122 #include <machine/cputypes.h> 123 #include <machine/intr_machdep.h> 124 #include <x86/mca.h> 125 #include <machine/md_var.h> 126 #include <machine/metadata.h> 127 #include <machine/pc/bios.h> 128 #include <machine/pcb.h> 129 #include <machine/pcb_ext.h> 130 #include <machine/proc.h> 131 #include <machine/sigframe.h> 132 #include <machine/specialreg.h> 133 #include <machine/sysarch.h> 134 #include <machine/trap.h> 135 #include <x86/ucode.h> 136 #include <machine/vm86.h> 137 #include <x86/init.h> 138 #ifdef PERFMON 139 #include <machine/perfmon.h> 140 #endif 141 #ifdef SMP 142 #include <machine/smp.h> 143 #endif 144 #ifdef FDT 145 #include <x86/fdt.h> 146 #endif 147 148 #ifdef DEV_APIC 149 #include <x86/apicvar.h> 150 #endif 151 152 #ifdef DEV_ISA 153 #include <x86/isa/icu.h> 154 #endif 155 156 /* Sanity check for __curthread() */ 157 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 158 159 register_t init386(int first); 160 void dblfault_handler(void); 161 void identify_cpu(void); 162 163 static void cpu_startup(void *); 164 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 165 166 /* Intel ICH registers */ 167 #define ICH_PMBASE 0x400 168 #define ICH_SMI_EN ICH_PMBASE + 0x30 169 170 int _udatasel, _ucodesel; 171 u_int basemem; 172 static int above4g_allow = 1; 173 static int above24g_allow = 0; 174 175 int cold = 1; 176 177 long Maxmem = 0; 178 long realmem = 0; 179 int late_console = 1; 180 181 #ifdef PAE 182 FEATURE(pae, "Physical Address Extensions"); 183 #endif 184 185 struct kva_md_info kmi; 186 187 static struct trapframe proc0_tf; 188 struct pcpu __pcpu[MAXCPU]; 189 190 static void i386_clock_source_init(void); 191 192 struct mtx icu_lock; 193 194 struct mem_range_softc mem_range_softc; 195 196 extern char start_exceptions[], end_exceptions[]; 197 198 extern struct sysentvec elf32_freebsd_sysvec; 199 200 /* Default init_ops implementation. */ 201 struct init_ops init_ops = { 202 .early_clock_source_init = i386_clock_source_init, 203 .early_delay = i8254_delay, 204 }; 205 206 static void 207 i386_clock_source_init(void) 208 { 209 i8254_init(); 210 } 211 212 static void 213 cpu_startup(dummy) 214 void *dummy; 215 { 216 uintmax_t memsize; 217 char *sysenv; 218 219 /* 220 * On MacBooks, we need to disallow the legacy USB circuit to 221 * generate an SMI# because this can cause several problems, 222 * namely: incorrect CPU frequency detection and failure to 223 * start the APs. 224 * We do this by disabling a bit in the SMI_EN (SMI Control and 225 * Enable register) of the Intel ICH LPC Interface Bridge. 226 */ 227 sysenv = kern_getenv("smbios.system.product"); 228 if (sysenv != NULL) { 229 if (strncmp(sysenv, "MacBook1,1", 10) == 0 || 230 strncmp(sysenv, "MacBook3,1", 10) == 0 || 231 strncmp(sysenv, "MacBook4,1", 10) == 0 || 232 strncmp(sysenv, "MacBookPro1,1", 13) == 0 || 233 strncmp(sysenv, "MacBookPro1,2", 13) == 0 || 234 strncmp(sysenv, "MacBookPro3,1", 13) == 0 || 235 strncmp(sysenv, "MacBookPro4,1", 13) == 0 || 236 strncmp(sysenv, "Macmini1,1", 10) == 0) { 237 if (bootverbose) 238 printf("Disabling LEGACY_USB_EN bit on " 239 "Intel ICH.\n"); 240 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 241 } 242 freeenv(sysenv); 243 } 244 245 /* 246 * Good {morning,afternoon,evening,night}. 247 */ 248 startrtclock(); 249 printcpuinfo(); 250 panicifcpuunsupported(); 251 #ifdef PERFMON 252 perfmon_init(); 253 #endif 254 255 /* 256 * Display physical memory if SMBIOS reports reasonable amount. 257 */ 258 memsize = 0; 259 sysenv = kern_getenv("smbios.memory.enabled"); 260 if (sysenv != NULL) { 261 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; 262 freeenv(sysenv); 263 } 264 if (memsize < ptoa((uintmax_t)vm_free_count())) 265 memsize = ptoa((uintmax_t)Maxmem); 266 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); 267 realmem = atop(memsize); 268 269 /* 270 * Display any holes after the first chunk of extended memory. 271 */ 272 if (bootverbose) { 273 int indx; 274 275 printf("Physical memory chunk(s):\n"); 276 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 277 vm_paddr_t size; 278 279 size = phys_avail[indx + 1] - phys_avail[indx]; 280 printf( 281 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 282 (uintmax_t)phys_avail[indx], 283 (uintmax_t)phys_avail[indx + 1] - 1, 284 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 285 } 286 } 287 288 vm_ksubmap_init(&kmi); 289 290 printf("avail memory = %ju (%ju MB)\n", 291 ptoa((uintmax_t)vm_free_count()), 292 ptoa((uintmax_t)vm_free_count()) / 1048576); 293 294 /* 295 * Set up buffers, so they can be used to read disk labels. 296 */ 297 bufinit(); 298 vm_pager_bufferinit(); 299 cpu_setregs(); 300 } 301 302 void 303 cpu_setregs(void) 304 { 305 unsigned int cr0; 306 307 cr0 = rcr0(); 308 309 /* 310 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support: 311 * 312 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT 313 * instructions. We must set the CR0_MP bit and use the CR0_TS 314 * bit to control the trap, because setting the CR0_EM bit does 315 * not cause WAIT instructions to trap. It's important to trap 316 * WAIT instructions - otherwise the "wait" variants of no-wait 317 * control instructions would degenerate to the "no-wait" variants 318 * after FP context switches but work correctly otherwise. It's 319 * particularly important to trap WAITs when there is no NPX - 320 * otherwise the "wait" variants would always degenerate. 321 * 322 * Try setting CR0_NE to get correct error reporting on 486DX's. 323 * Setting it should fail or do nothing on lesser processors. 324 */ 325 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 326 load_cr0(cr0); 327 load_gs(_udatasel); 328 } 329 330 u_long bootdev; /* not a struct cdev *- encoding is different */ 331 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 332 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)"); 333 334 /* 335 * Initialize 386 and configure to run kernel 336 */ 337 338 /* 339 * Initialize segments & interrupt table 340 */ 341 342 int _default_ldt; 343 344 struct mtx dt_lock; /* lock for GDT and LDT */ 345 346 union descriptor gdt0[NGDT]; /* initial global descriptor table */ 347 union descriptor *gdt = gdt0; /* global descriptor table */ 348 349 union descriptor *ldt; /* local descriptor table */ 350 351 static struct gate_descriptor idt0[NIDT]; 352 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 353 354 static struct i386tss *dblfault_tss; 355 static char *dblfault_stack; 356 357 static struct i386tss common_tss0; 358 359 vm_offset_t proc0kstack; 360 361 /* 362 * software prototypes -- in more palatable form. 363 * 364 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret 365 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it) 366 */ 367 struct soft_segment_descriptor gdt_segs[] = { 368 /* GNULL_SEL 0 Null Descriptor */ 369 { .ssd_base = 0x0, 370 .ssd_limit = 0x0, 371 .ssd_type = 0, 372 .ssd_dpl = SEL_KPL, 373 .ssd_p = 0, 374 .ssd_xx = 0, .ssd_xx1 = 0, 375 .ssd_def32 = 0, 376 .ssd_gran = 0 }, 377 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */ 378 { .ssd_base = 0x0, 379 .ssd_limit = 0xfffff, 380 .ssd_type = SDT_MEMRWA, 381 .ssd_dpl = SEL_KPL, 382 .ssd_p = 1, 383 .ssd_xx = 0, .ssd_xx1 = 0, 384 .ssd_def32 = 1, 385 .ssd_gran = 1 }, 386 /* GUFS_SEL 2 %fs Descriptor for user */ 387 { .ssd_base = 0x0, 388 .ssd_limit = 0xfffff, 389 .ssd_type = SDT_MEMRWA, 390 .ssd_dpl = SEL_UPL, 391 .ssd_p = 1, 392 .ssd_xx = 0, .ssd_xx1 = 0, 393 .ssd_def32 = 1, 394 .ssd_gran = 1 }, 395 /* GUGS_SEL 3 %gs Descriptor for user */ 396 { .ssd_base = 0x0, 397 .ssd_limit = 0xfffff, 398 .ssd_type = SDT_MEMRWA, 399 .ssd_dpl = SEL_UPL, 400 .ssd_p = 1, 401 .ssd_xx = 0, .ssd_xx1 = 0, 402 .ssd_def32 = 1, 403 .ssd_gran = 1 }, 404 /* GCODE_SEL 4 Code Descriptor for kernel */ 405 { .ssd_base = 0x0, 406 .ssd_limit = 0xfffff, 407 .ssd_type = SDT_MEMERA, 408 .ssd_dpl = SEL_KPL, 409 .ssd_p = 1, 410 .ssd_xx = 0, .ssd_xx1 = 0, 411 .ssd_def32 = 1, 412 .ssd_gran = 1 }, 413 /* GDATA_SEL 5 Data Descriptor for kernel */ 414 { .ssd_base = 0x0, 415 .ssd_limit = 0xfffff, 416 .ssd_type = SDT_MEMRWA, 417 .ssd_dpl = SEL_KPL, 418 .ssd_p = 1, 419 .ssd_xx = 0, .ssd_xx1 = 0, 420 .ssd_def32 = 1, 421 .ssd_gran = 1 }, 422 /* GUCODE_SEL 6 Code Descriptor for user */ 423 { .ssd_base = 0x0, 424 .ssd_limit = 0xfffff, 425 .ssd_type = SDT_MEMERA, 426 .ssd_dpl = SEL_UPL, 427 .ssd_p = 1, 428 .ssd_xx = 0, .ssd_xx1 = 0, 429 .ssd_def32 = 1, 430 .ssd_gran = 1 }, 431 /* GUDATA_SEL 7 Data Descriptor for user */ 432 { .ssd_base = 0x0, 433 .ssd_limit = 0xfffff, 434 .ssd_type = SDT_MEMRWA, 435 .ssd_dpl = SEL_UPL, 436 .ssd_p = 1, 437 .ssd_xx = 0, .ssd_xx1 = 0, 438 .ssd_def32 = 1, 439 .ssd_gran = 1 }, 440 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 441 { .ssd_base = 0x400, 442 .ssd_limit = 0xfffff, 443 .ssd_type = SDT_MEMRWA, 444 .ssd_dpl = SEL_KPL, 445 .ssd_p = 1, 446 .ssd_xx = 0, .ssd_xx1 = 0, 447 .ssd_def32 = 1, 448 .ssd_gran = 1 }, 449 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */ 450 { 451 .ssd_base = 0x0, 452 .ssd_limit = sizeof(struct i386tss)-1, 453 .ssd_type = SDT_SYS386TSS, 454 .ssd_dpl = 0, 455 .ssd_p = 1, 456 .ssd_xx = 0, .ssd_xx1 = 0, 457 .ssd_def32 = 0, 458 .ssd_gran = 0 }, 459 /* GLDT_SEL 10 LDT Descriptor */ 460 { .ssd_base = 0, 461 .ssd_limit = sizeof(union descriptor) * NLDT - 1, 462 .ssd_type = SDT_SYSLDT, 463 .ssd_dpl = SEL_UPL, 464 .ssd_p = 1, 465 .ssd_xx = 0, .ssd_xx1 = 0, 466 .ssd_def32 = 0, 467 .ssd_gran = 0 }, 468 /* GUSERLDT_SEL 11 User LDT Descriptor per process */ 469 { .ssd_base = 0, 470 .ssd_limit = (512 * sizeof(union descriptor)-1), 471 .ssd_type = SDT_SYSLDT, 472 .ssd_dpl = 0, 473 .ssd_p = 1, 474 .ssd_xx = 0, .ssd_xx1 = 0, 475 .ssd_def32 = 0, 476 .ssd_gran = 0 }, 477 /* GPANIC_SEL 12 Panic Tss Descriptor */ 478 { .ssd_base = 0, 479 .ssd_limit = sizeof(struct i386tss)-1, 480 .ssd_type = SDT_SYS386TSS, 481 .ssd_dpl = 0, 482 .ssd_p = 1, 483 .ssd_xx = 0, .ssd_xx1 = 0, 484 .ssd_def32 = 0, 485 .ssd_gran = 0 }, 486 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */ 487 { .ssd_base = 0, 488 .ssd_limit = 0xfffff, 489 .ssd_type = SDT_MEMERA, 490 .ssd_dpl = 0, 491 .ssd_p = 1, 492 .ssd_xx = 0, .ssd_xx1 = 0, 493 .ssd_def32 = 0, 494 .ssd_gran = 1 }, 495 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */ 496 { .ssd_base = 0, 497 .ssd_limit = 0xfffff, 498 .ssd_type = SDT_MEMERA, 499 .ssd_dpl = 0, 500 .ssd_p = 1, 501 .ssd_xx = 0, .ssd_xx1 = 0, 502 .ssd_def32 = 0, 503 .ssd_gran = 1 }, 504 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */ 505 { .ssd_base = 0, 506 .ssd_limit = 0xfffff, 507 .ssd_type = SDT_MEMRWA, 508 .ssd_dpl = 0, 509 .ssd_p = 1, 510 .ssd_xx = 0, .ssd_xx1 = 0, 511 .ssd_def32 = 1, 512 .ssd_gran = 1 }, 513 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */ 514 { .ssd_base = 0, 515 .ssd_limit = 0xfffff, 516 .ssd_type = SDT_MEMRWA, 517 .ssd_dpl = 0, 518 .ssd_p = 1, 519 .ssd_xx = 0, .ssd_xx1 = 0, 520 .ssd_def32 = 0, 521 .ssd_gran = 1 }, 522 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */ 523 { .ssd_base = 0, 524 .ssd_limit = 0xfffff, 525 .ssd_type = SDT_MEMRWA, 526 .ssd_dpl = 0, 527 .ssd_p = 1, 528 .ssd_xx = 0, .ssd_xx1 = 0, 529 .ssd_def32 = 0, 530 .ssd_gran = 1 }, 531 /* GNDIS_SEL 18 NDIS Descriptor */ 532 { .ssd_base = 0x0, 533 .ssd_limit = 0x0, 534 .ssd_type = 0, 535 .ssd_dpl = 0, 536 .ssd_p = 0, 537 .ssd_xx = 0, .ssd_xx1 = 0, 538 .ssd_def32 = 0, 539 .ssd_gran = 0 }, 540 }; 541 542 static struct soft_segment_descriptor ldt_segs[] = { 543 /* Null Descriptor - overwritten by call gate */ 544 { .ssd_base = 0x0, 545 .ssd_limit = 0x0, 546 .ssd_type = 0, 547 .ssd_dpl = 0, 548 .ssd_p = 0, 549 .ssd_xx = 0, .ssd_xx1 = 0, 550 .ssd_def32 = 0, 551 .ssd_gran = 0 }, 552 /* Null Descriptor - overwritten by call gate */ 553 { .ssd_base = 0x0, 554 .ssd_limit = 0x0, 555 .ssd_type = 0, 556 .ssd_dpl = 0, 557 .ssd_p = 0, 558 .ssd_xx = 0, .ssd_xx1 = 0, 559 .ssd_def32 = 0, 560 .ssd_gran = 0 }, 561 /* Null Descriptor - overwritten by call gate */ 562 { .ssd_base = 0x0, 563 .ssd_limit = 0x0, 564 .ssd_type = 0, 565 .ssd_dpl = 0, 566 .ssd_p = 0, 567 .ssd_xx = 0, .ssd_xx1 = 0, 568 .ssd_def32 = 0, 569 .ssd_gran = 0 }, 570 /* Code Descriptor for user */ 571 { .ssd_base = 0x0, 572 .ssd_limit = 0xfffff, 573 .ssd_type = SDT_MEMERA, 574 .ssd_dpl = SEL_UPL, 575 .ssd_p = 1, 576 .ssd_xx = 0, .ssd_xx1 = 0, 577 .ssd_def32 = 1, 578 .ssd_gran = 1 }, 579 /* Null Descriptor - overwritten by call gate */ 580 { .ssd_base = 0x0, 581 .ssd_limit = 0x0, 582 .ssd_type = 0, 583 .ssd_dpl = 0, 584 .ssd_p = 0, 585 .ssd_xx = 0, .ssd_xx1 = 0, 586 .ssd_def32 = 0, 587 .ssd_gran = 0 }, 588 /* Data Descriptor for user */ 589 { .ssd_base = 0x0, 590 .ssd_limit = 0xfffff, 591 .ssd_type = SDT_MEMRWA, 592 .ssd_dpl = SEL_UPL, 593 .ssd_p = 1, 594 .ssd_xx = 0, .ssd_xx1 = 0, 595 .ssd_def32 = 1, 596 .ssd_gran = 1 }, 597 }; 598 599 size_t setidt_disp; 600 601 void 602 setidt(int idx, inthand_t *func, int typ, int dpl, int selec) 603 { 604 uintptr_t off; 605 606 off = func != NULL ? (uintptr_t)func + setidt_disp : 0; 607 setidt_nodisp(idx, off, typ, dpl, selec); 608 } 609 610 void 611 setidt_nodisp(int idx, uintptr_t off, int typ, int dpl, int selec) 612 { 613 struct gate_descriptor *ip; 614 615 ip = idt + idx; 616 ip->gd_looffset = off; 617 ip->gd_selector = selec; 618 ip->gd_stkcpy = 0; 619 ip->gd_xx = 0; 620 ip->gd_type = typ; 621 ip->gd_dpl = dpl; 622 ip->gd_p = 1; 623 ip->gd_hioffset = ((u_int)off) >> 16 ; 624 } 625 626 extern inthand_t 627 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 628 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 629 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 630 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 631 IDTVEC(xmm), 632 #ifdef KDTRACE_HOOKS 633 IDTVEC(dtrace_ret), 634 #endif 635 #ifdef XENHVM 636 IDTVEC(xen_intr_upcall), 637 #endif 638 IDTVEC(int0x80_syscall); 639 640 #ifdef DDB 641 /* 642 * Display the index and function name of any IDT entries that don't use 643 * the default 'rsvd' entry point. 644 */ 645 DB_SHOW_COMMAND_FLAGS(idt, db_show_idt, DB_CMD_MEMSAFE) 646 { 647 struct gate_descriptor *ip; 648 int idx; 649 uintptr_t func, func_trm; 650 bool trm; 651 652 ip = idt; 653 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { 654 if (ip->gd_type == SDT_SYSTASKGT) { 655 db_printf("%3d\t<TASK>\n", idx); 656 } else { 657 func = (ip->gd_hioffset << 16 | ip->gd_looffset); 658 if (func >= PMAP_TRM_MIN_ADDRESS) { 659 func_trm = func; 660 func -= setidt_disp; 661 trm = true; 662 } else 663 trm = false; 664 if (func != (uintptr_t)&IDTVEC(rsvd)) { 665 db_printf("%3d\t", idx); 666 db_printsym(func, DB_STGY_PROC); 667 if (trm) 668 db_printf(" (trampoline %#x)", 669 func_trm); 670 db_printf("\n"); 671 } 672 } 673 ip++; 674 } 675 } 676 677 /* Show privileged registers. */ 678 DB_SHOW_COMMAND_FLAGS(sysregs, db_show_sysregs, DB_CMD_MEMSAFE) 679 { 680 uint64_t idtr, gdtr; 681 682 idtr = ridt(); 683 db_printf("idtr\t0x%08x/%04x\n", 684 (u_int)(idtr >> 16), (u_int)idtr & 0xffff); 685 gdtr = rgdt(); 686 db_printf("gdtr\t0x%08x/%04x\n", 687 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff); 688 db_printf("ldtr\t0x%04x\n", rldt()); 689 db_printf("tr\t0x%04x\n", rtr()); 690 db_printf("cr0\t0x%08x\n", rcr0()); 691 db_printf("cr2\t0x%08x\n", rcr2()); 692 db_printf("cr3\t0x%08x\n", rcr3()); 693 db_printf("cr4\t0x%08x\n", rcr4()); 694 if (rcr4() & CR4_XSAVE) 695 db_printf("xcr0\t0x%016llx\n", rxcr(0)); 696 if (amd_feature & (AMDID_NX | AMDID_LM)) 697 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER)); 698 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX)) 699 db_printf("FEATURES_CTL\t0x%016llx\n", 700 rdmsr(MSR_IA32_FEATURE_CONTROL)); 701 if (((cpu_vendor_id == CPU_VENDOR_INTEL || 702 cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6) || 703 cpu_vendor_id == CPU_VENDOR_HYGON) 704 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR)); 705 if (cpu_feature & CPUID_PAT) 706 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT)); 707 } 708 709 DB_SHOW_COMMAND_FLAGS(dbregs, db_show_dbregs, DB_CMD_MEMSAFE) 710 { 711 712 db_printf("dr0\t0x%08x\n", rdr0()); 713 db_printf("dr1\t0x%08x\n", rdr1()); 714 db_printf("dr2\t0x%08x\n", rdr2()); 715 db_printf("dr3\t0x%08x\n", rdr3()); 716 db_printf("dr6\t0x%08x\n", rdr6()); 717 db_printf("dr7\t0x%08x\n", rdr7()); 718 } 719 720 DB_SHOW_COMMAND(frame, db_show_frame) 721 { 722 struct trapframe *frame; 723 724 frame = have_addr ? (struct trapframe *)addr : curthread->td_frame; 725 printf("ss %#x esp %#x efl %#x cs %#x eip %#x\n", 726 frame->tf_ss, frame->tf_esp, frame->tf_eflags, frame->tf_cs, 727 frame->tf_eip); 728 printf("err %#x trapno %d\n", frame->tf_err, frame->tf_trapno); 729 printf("ds %#x es %#x fs %#x\n", 730 frame->tf_ds, frame->tf_es, frame->tf_fs); 731 printf("eax %#x ecx %#x edx %#x ebx %#x\n", 732 frame->tf_eax, frame->tf_ecx, frame->tf_edx, frame->tf_ebx); 733 printf("ebp %#x esi %#x edi %#x\n", 734 frame->tf_ebp, frame->tf_esi, frame->tf_edi); 735 736 } 737 #endif 738 739 void 740 sdtossd(sd, ssd) 741 struct segment_descriptor *sd; 742 struct soft_segment_descriptor *ssd; 743 { 744 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 745 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 746 ssd->ssd_type = sd->sd_type; 747 ssd->ssd_dpl = sd->sd_dpl; 748 ssd->ssd_p = sd->sd_p; 749 ssd->ssd_def32 = sd->sd_def32; 750 ssd->ssd_gran = sd->sd_gran; 751 } 752 753 static int 754 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, 755 int *physmap_idxp) 756 { 757 uint64_t lim, ign; 758 int i, insert_idx, physmap_idx; 759 760 physmap_idx = *physmap_idxp; 761 762 if (length == 0) 763 return (1); 764 765 lim = 0x100000000; /* 4G */ 766 if (pae_mode && above4g_allow) 767 lim = above24g_allow ? -1ULL : 0x600000000; /* 24G */ 768 if (base >= lim) { 769 printf("%uK of memory above %uGB ignored, pae %d " 770 "above4g_allow %d above24g_allow %d\n", 771 (u_int)(length / 1024), (u_int)(lim >> 30), pae_mode, 772 above4g_allow, above24g_allow); 773 return (1); 774 } 775 if (base + length >= lim) { 776 ign = base + length - lim; 777 length -= ign; 778 printf("%uK of memory above %uGB ignored, pae %d " 779 "above4g_allow %d above24g_allow %d\n", 780 (u_int)(ign / 1024), (u_int)(lim >> 30), pae_mode, 781 above4g_allow, above24g_allow); 782 } 783 784 /* 785 * Find insertion point while checking for overlap. Start off by 786 * assuming the new entry will be added to the end. 787 */ 788 insert_idx = physmap_idx + 2; 789 for (i = 0; i <= physmap_idx; i += 2) { 790 if (base < physmap[i + 1]) { 791 if (base + length <= physmap[i]) { 792 insert_idx = i; 793 break; 794 } 795 if (boothowto & RB_VERBOSE) 796 printf( 797 "Overlapping memory regions, ignoring second region\n"); 798 return (1); 799 } 800 } 801 802 /* See if we can prepend to the next entry. */ 803 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) { 804 physmap[insert_idx] = base; 805 return (1); 806 } 807 808 /* See if we can append to the previous entry. */ 809 if (insert_idx > 0 && base == physmap[insert_idx - 1]) { 810 physmap[insert_idx - 1] += length; 811 return (1); 812 } 813 814 physmap_idx += 2; 815 *physmap_idxp = physmap_idx; 816 if (physmap_idx == PHYS_AVAIL_ENTRIES) { 817 printf( 818 "Too many segments in the physical address map, giving up\n"); 819 return (0); 820 } 821 822 /* 823 * Move the last 'N' entries down to make room for the new 824 * entry if needed. 825 */ 826 for (i = physmap_idx; i > insert_idx; i -= 2) { 827 physmap[i] = physmap[i - 2]; 828 physmap[i + 1] = physmap[i - 1]; 829 } 830 831 /* Insert the new entry. */ 832 physmap[insert_idx] = base; 833 physmap[insert_idx + 1] = base + length; 834 return (1); 835 } 836 837 static int 838 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp) 839 { 840 if (boothowto & RB_VERBOSE) 841 printf("SMAP type=%02x base=%016llx len=%016llx\n", 842 smap->type, smap->base, smap->length); 843 844 if (smap->type != SMAP_TYPE_MEMORY) 845 return (1); 846 847 return (add_physmap_entry(smap->base, smap->length, physmap, 848 physmap_idxp)); 849 } 850 851 static void 852 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap, 853 int *physmap_idxp) 854 { 855 struct bios_smap *smap, *smapend; 856 u_int32_t smapsize; 857 /* 858 * Memory map from INT 15:E820. 859 * 860 * subr_module.c says: 861 * "Consumer may safely assume that size value precedes data." 862 * ie: an int32_t immediately precedes SMAP. 863 */ 864 smapsize = *((u_int32_t *)smapbase - 1); 865 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 866 867 for (smap = smapbase; smap < smapend; smap++) 868 if (!add_smap_entry(smap, physmap, physmap_idxp)) 869 break; 870 } 871 872 static void 873 basemem_setup(void) 874 { 875 876 if (basemem > 640) { 877 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 878 basemem); 879 basemem = 640; 880 } 881 882 pmap_basemem_setup(basemem); 883 } 884 885 /* 886 * Populate the (physmap) array with base/bound pairs describing the 887 * available physical memory in the system, then test this memory and 888 * build the phys_avail array describing the actually-available memory. 889 * 890 * If we cannot accurately determine the physical memory map, then use 891 * value from the 0xE801 call, and failing that, the RTC. 892 * 893 * Total memory size may be set by the kernel environment variable 894 * hw.physmem or the compile-time define MAXMEM. 895 * 896 * XXX first should be vm_paddr_t. 897 */ 898 static void 899 getmemsize(int first) 900 { 901 int has_smap, off, physmap_idx, pa_indx, da_indx; 902 u_long memtest; 903 vm_paddr_t physmap[PHYS_AVAIL_ENTRIES]; 904 quad_t dcons_addr, dcons_size, physmem_tunable; 905 int hasbrokenint12, i, res __diagused; 906 u_int extmem; 907 struct vm86frame vmf; 908 struct vm86context vmc; 909 vm_paddr_t pa; 910 struct bios_smap *smap, *smapbase; 911 caddr_t kmdp; 912 913 has_smap = 0; 914 bzero(&vmf, sizeof(vmf)); 915 bzero(physmap, sizeof(physmap)); 916 basemem = 0; 917 918 /* 919 * Tell the physical memory allocator about pages used to store 920 * the kernel and preloaded data. See kmem_bootstrap_free(). 921 */ 922 vm_phys_early_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first)); 923 924 TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow); 925 TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow); 926 927 /* 928 * Check if the loader supplied an SMAP memory map. If so, 929 * use that and do not make any VM86 calls. 930 */ 931 physmap_idx = 0; 932 kmdp = preload_search_by_type("elf kernel"); 933 if (kmdp == NULL) 934 kmdp = preload_search_by_type("elf32 kernel"); 935 smapbase = (struct bios_smap *)preload_search_info(kmdp, 936 MODINFO_METADATA | MODINFOMD_SMAP); 937 if (smapbase != NULL) { 938 add_smap_entries(smapbase, physmap, &physmap_idx); 939 has_smap = 1; 940 goto have_smap; 941 } 942 943 /* 944 * Some newer BIOSes have a broken INT 12H implementation 945 * which causes a kernel panic immediately. In this case, we 946 * need use the SMAP to determine the base memory size. 947 */ 948 hasbrokenint12 = 0; 949 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12); 950 if (hasbrokenint12 == 0) { 951 /* Use INT12 to determine base memory size. */ 952 vm86_intcall(0x12, &vmf); 953 basemem = vmf.vmf_ax; 954 basemem_setup(); 955 } 956 957 /* 958 * Fetch the memory map with INT 15:E820. Map page 1 R/W into 959 * the kernel page table so we can use it as a buffer. The 960 * kernel will unmap this page later. 961 */ 962 vmc.npages = 0; 963 smap = (void *)vm86_addpage(&vmc, 1, PMAP_MAP_LOW + ptoa(1)); 964 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 965 KASSERT(res != 0, ("vm86_getptr() failed: address not found")); 966 967 vmf.vmf_ebx = 0; 968 do { 969 vmf.vmf_eax = 0xE820; 970 vmf.vmf_edx = SMAP_SIG; 971 vmf.vmf_ecx = sizeof(struct bios_smap); 972 i = vm86_datacall(0x15, &vmf, &vmc); 973 if (i || vmf.vmf_eax != SMAP_SIG) 974 break; 975 has_smap = 1; 976 if (!add_smap_entry(smap, physmap, &physmap_idx)) 977 break; 978 } while (vmf.vmf_ebx != 0); 979 980 have_smap: 981 /* 982 * If we didn't fetch the "base memory" size from INT12, 983 * figure it out from the SMAP (or just guess). 984 */ 985 if (basemem == 0) { 986 for (i = 0; i <= physmap_idx; i += 2) { 987 if (physmap[i] == 0x00000000) { 988 basemem = physmap[i + 1] / 1024; 989 break; 990 } 991 } 992 993 /* XXX: If we couldn't find basemem from SMAP, just guess. */ 994 if (basemem == 0) 995 basemem = 640; 996 basemem_setup(); 997 } 998 999 if (physmap[1] != 0) 1000 goto physmap_done; 1001 1002 /* 1003 * If we failed to find an SMAP, figure out the extended 1004 * memory size. We will then build a simple memory map with 1005 * two segments, one for "base memory" and the second for 1006 * "extended memory". Note that "extended memory" starts at a 1007 * physical address of 1MB and that both basemem and extmem 1008 * are in units of 1KB. 1009 * 1010 * First, try to fetch the extended memory size via INT 15:E801. 1011 */ 1012 vmf.vmf_ax = 0xE801; 1013 if (vm86_intcall(0x15, &vmf) == 0) { 1014 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1015 } else { 1016 /* 1017 * If INT15:E801 fails, this is our last ditch effort 1018 * to determine the extended memory size. Currently 1019 * we prefer the RTC value over INT15:88. 1020 */ 1021 #if 0 1022 vmf.vmf_ah = 0x88; 1023 vm86_intcall(0x15, &vmf); 1024 extmem = vmf.vmf_ax; 1025 #else 1026 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1027 #endif 1028 } 1029 1030 /* 1031 * Special hack for chipsets that still remap the 384k hole when 1032 * there's 16MB of memory - this really confuses people that 1033 * are trying to use bus mastering ISA controllers with the 1034 * "16MB limit"; they only have 16MB, but the remapping puts 1035 * them beyond the limit. 1036 * 1037 * If extended memory is between 15-16MB (16-17MB phys address range), 1038 * chop it to 15MB. 1039 */ 1040 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1041 extmem = 15 * 1024; 1042 1043 physmap[0] = 0; 1044 physmap[1] = basemem * 1024; 1045 physmap_idx = 2; 1046 physmap[physmap_idx] = 0x100000; 1047 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1048 1049 physmap_done: 1050 /* 1051 * Now, physmap contains a map of physical memory. 1052 */ 1053 1054 #ifdef SMP 1055 /* make hole for AP bootstrap code */ 1056 alloc_ap_trampoline(physmap, &physmap_idx); 1057 #endif 1058 1059 /* 1060 * Maxmem isn't the "maximum memory", it's one larger than the 1061 * highest page of the physical address space. It should be 1062 * called something like "Maxphyspage". We may adjust this 1063 * based on ``hw.physmem'' and the results of the memory test. 1064 * 1065 * This is especially confusing when it is much larger than the 1066 * memory size and is displayed as "realmem". 1067 */ 1068 Maxmem = atop(physmap[physmap_idx + 1]); 1069 1070 #ifdef MAXMEM 1071 Maxmem = MAXMEM / 4; 1072 #endif 1073 1074 if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable)) 1075 Maxmem = atop(physmem_tunable); 1076 1077 /* 1078 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend 1079 * the amount of memory in the system. 1080 */ 1081 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1])) 1082 Maxmem = atop(physmap[physmap_idx + 1]); 1083 1084 /* 1085 * The boot memory test is disabled by default, as it takes a 1086 * significant amount of time on large-memory systems, and is 1087 * unfriendly to virtual machines as it unnecessarily touches all 1088 * pages. 1089 * 1090 * A general name is used as the code may be extended to support 1091 * additional tests beyond the current "page present" test. 1092 */ 1093 memtest = 0; 1094 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest); 1095 1096 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1097 (boothowto & RB_VERBOSE)) 1098 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1099 1100 /* 1101 * If Maxmem has been increased beyond what the system has detected, 1102 * extend the last memory segment to the new limit. 1103 */ 1104 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1105 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 1106 1107 /* call pmap initialization to make new kernel address space */ 1108 pmap_bootstrap(first); 1109 1110 /* 1111 * Size up each available chunk of physical memory. 1112 */ 1113 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1114 pa_indx = 0; 1115 da_indx = 1; 1116 phys_avail[pa_indx++] = physmap[0]; 1117 phys_avail[pa_indx] = physmap[0]; 1118 dump_avail[da_indx] = physmap[0]; 1119 1120 /* 1121 * Get dcons buffer address 1122 */ 1123 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1124 getenv_quad("dcons.size", &dcons_size) == 0) 1125 dcons_addr = 0; 1126 1127 /* 1128 * physmap is in bytes, so when converting to page boundaries, 1129 * round up the start address and round down the end address. 1130 */ 1131 for (i = 0; i <= physmap_idx; i += 2) { 1132 vm_paddr_t end; 1133 1134 end = ptoa((vm_paddr_t)Maxmem); 1135 if (physmap[i + 1] < end) 1136 end = trunc_page(physmap[i + 1]); 1137 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1138 int tmp, page_bad, full; 1139 int *ptr; 1140 1141 full = FALSE; 1142 /* 1143 * block out kernel memory as not available. 1144 */ 1145 if (pa >= KERNLOAD && pa < first) 1146 goto do_dump_avail; 1147 1148 /* 1149 * block out dcons buffer 1150 */ 1151 if (dcons_addr > 0 1152 && pa >= trunc_page(dcons_addr) 1153 && pa < dcons_addr + dcons_size) 1154 goto do_dump_avail; 1155 1156 page_bad = FALSE; 1157 if (memtest == 0) 1158 goto skip_memtest; 1159 1160 /* 1161 * map page into kernel: valid, read/write,non-cacheable 1162 */ 1163 ptr = (int *)pmap_cmap3(pa, PG_V | PG_RW | PG_N); 1164 1165 tmp = *(int *)ptr; 1166 /* 1167 * Test for alternating 1's and 0's 1168 */ 1169 *(volatile int *)ptr = 0xaaaaaaaa; 1170 if (*(volatile int *)ptr != 0xaaaaaaaa) 1171 page_bad = TRUE; 1172 /* 1173 * Test for alternating 0's and 1's 1174 */ 1175 *(volatile int *)ptr = 0x55555555; 1176 if (*(volatile int *)ptr != 0x55555555) 1177 page_bad = TRUE; 1178 /* 1179 * Test for all 1's 1180 */ 1181 *(volatile int *)ptr = 0xffffffff; 1182 if (*(volatile int *)ptr != 0xffffffff) 1183 page_bad = TRUE; 1184 /* 1185 * Test for all 0's 1186 */ 1187 *(volatile int *)ptr = 0x0; 1188 if (*(volatile int *)ptr != 0x0) 1189 page_bad = TRUE; 1190 /* 1191 * Restore original value. 1192 */ 1193 *(int *)ptr = tmp; 1194 1195 skip_memtest: 1196 /* 1197 * Adjust array of valid/good pages. 1198 */ 1199 if (page_bad == TRUE) 1200 continue; 1201 /* 1202 * If this good page is a continuation of the 1203 * previous set of good pages, then just increase 1204 * the end pointer. Otherwise start a new chunk. 1205 * Note that "end" points one higher than end, 1206 * making the range >= start and < end. 1207 * If we're also doing a speculative memory 1208 * test and we at or past the end, bump up Maxmem 1209 * so that we keep going. The first bad page 1210 * will terminate the loop. 1211 */ 1212 if (phys_avail[pa_indx] == pa) { 1213 phys_avail[pa_indx] += PAGE_SIZE; 1214 } else { 1215 pa_indx++; 1216 if (pa_indx == PHYS_AVAIL_ENTRIES) { 1217 printf( 1218 "Too many holes in the physical address space, giving up\n"); 1219 pa_indx--; 1220 full = TRUE; 1221 goto do_dump_avail; 1222 } 1223 phys_avail[pa_indx++] = pa; /* start */ 1224 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1225 } 1226 physmem++; 1227 do_dump_avail: 1228 if (dump_avail[da_indx] == pa) { 1229 dump_avail[da_indx] += PAGE_SIZE; 1230 } else { 1231 da_indx++; 1232 if (da_indx == PHYS_AVAIL_ENTRIES) { 1233 da_indx--; 1234 goto do_next; 1235 } 1236 dump_avail[da_indx++] = pa; /* start */ 1237 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 1238 } 1239 do_next: 1240 if (full) 1241 break; 1242 } 1243 } 1244 pmap_cmap3(0, 0); 1245 1246 /* 1247 * XXX 1248 * The last chunk must contain at least one page plus the message 1249 * buffer to avoid complicating other code (message buffer address 1250 * calculation, etc.). 1251 */ 1252 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1253 round_page(msgbufsize) >= phys_avail[pa_indx]) { 1254 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1255 phys_avail[pa_indx--] = 0; 1256 phys_avail[pa_indx--] = 0; 1257 } 1258 1259 Maxmem = atop(phys_avail[pa_indx]); 1260 1261 /* Trim off space for the message buffer. */ 1262 phys_avail[pa_indx] -= round_page(msgbufsize); 1263 1264 /* Map the message buffer. */ 1265 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE) 1266 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 1267 off); 1268 } 1269 1270 static void 1271 i386_kdb_init(void) 1272 { 1273 #ifdef DDB 1274 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab, 0); 1275 #endif 1276 kdb_init(); 1277 #ifdef KDB 1278 if (boothowto & RB_KDB) 1279 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); 1280 #endif 1281 } 1282 1283 static void 1284 fixup_idt(void) 1285 { 1286 struct gate_descriptor *ip; 1287 uintptr_t off; 1288 int x; 1289 1290 for (x = 0; x < NIDT; x++) { 1291 ip = &idt[x]; 1292 if (ip->gd_type != SDT_SYS386IGT && 1293 ip->gd_type != SDT_SYS386TGT) 1294 continue; 1295 off = ip->gd_looffset + (((u_int)ip->gd_hioffset) << 16); 1296 KASSERT(off >= (uintptr_t)start_exceptions && 1297 off < (uintptr_t)end_exceptions, 1298 ("IDT[%d] type %d off %#x", x, ip->gd_type, off)); 1299 off += setidt_disp; 1300 MPASS(off >= PMAP_TRM_MIN_ADDRESS && 1301 off < PMAP_TRM_MAX_ADDRESS); 1302 ip->gd_looffset = off; 1303 ip->gd_hioffset = off >> 16; 1304 } 1305 } 1306 1307 static void 1308 i386_setidt1(void) 1309 { 1310 int x; 1311 1312 /* exceptions */ 1313 for (x = 0; x < NIDT; x++) 1314 setidt(x, &IDTVEC(rsvd), SDT_SYS386IGT, SEL_KPL, 1315 GSEL(GCODE_SEL, SEL_KPL)); 1316 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386IGT, SEL_KPL, 1317 GSEL(GCODE_SEL, SEL_KPL)); 1318 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1319 GSEL(GCODE_SEL, SEL_KPL)); 1320 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL, 1321 GSEL(GCODE_SEL, SEL_KPL)); 1322 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1323 GSEL(GCODE_SEL, SEL_KPL)); 1324 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386IGT, SEL_UPL, 1325 GSEL(GCODE_SEL, SEL_KPL)); 1326 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386IGT, SEL_KPL, 1327 GSEL(GCODE_SEL, SEL_KPL)); 1328 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL, 1329 GSEL(GCODE_SEL, SEL_KPL)); 1330 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386IGT, SEL_KPL, 1331 GSEL(GCODE_SEL, SEL_KPL)); 1332 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, 1333 SEL_KPL)); 1334 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386IGT, 1335 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1336 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386IGT, SEL_KPL, 1337 GSEL(GCODE_SEL, SEL_KPL)); 1338 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386IGT, SEL_KPL, 1339 GSEL(GCODE_SEL, SEL_KPL)); 1340 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386IGT, SEL_KPL, 1341 GSEL(GCODE_SEL, SEL_KPL)); 1342 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL, 1343 GSEL(GCODE_SEL, SEL_KPL)); 1344 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1345 GSEL(GCODE_SEL, SEL_KPL)); 1346 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386IGT, SEL_KPL, 1347 GSEL(GCODE_SEL, SEL_KPL)); 1348 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386IGT, SEL_KPL, 1349 GSEL(GCODE_SEL, SEL_KPL)); 1350 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386IGT, SEL_KPL, 1351 GSEL(GCODE_SEL, SEL_KPL)); 1352 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386IGT, SEL_KPL, 1353 GSEL(GCODE_SEL, SEL_KPL)); 1354 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), 1355 SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1356 #ifdef KDTRACE_HOOKS 1357 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), 1358 SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1359 #endif 1360 #ifdef XENHVM 1361 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), 1362 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1363 #endif 1364 } 1365 1366 static void 1367 i386_setidt2(void) 1368 { 1369 1370 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL, 1371 GSEL(GCODE_SEL, SEL_KPL)); 1372 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL, 1373 GSEL(GCODE_SEL, SEL_KPL)); 1374 } 1375 1376 #if defined(DEV_ISA) && !defined(DEV_ATPIC) 1377 static void 1378 i386_setidt3(void) 1379 { 1380 1381 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), 1382 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1383 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), 1384 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1385 } 1386 #endif 1387 1388 register_t 1389 init386(int first) 1390 { 1391 struct region_descriptor r_gdt, r_idt; /* table descriptors */ 1392 int gsel_tss, metadata_missing, x, pa; 1393 struct pcpu *pc; 1394 struct xstate_hdr *xhdr; 1395 caddr_t kmdp; 1396 vm_offset_t addend; 1397 size_t ucode_len; 1398 1399 thread0.td_kstack = proc0kstack; 1400 thread0.td_kstack_pages = TD0_KSTACK_PAGES; 1401 1402 /* 1403 * This may be done better later if it gets more high level 1404 * components in it. If so just link td->td_proc here. 1405 */ 1406 proc_linkup0(&proc0, &thread0); 1407 1408 if (bootinfo.bi_modulep) { 1409 metadata_missing = 0; 1410 addend = (vm_paddr_t)bootinfo.bi_modulep < KERNBASE ? 1411 PMAP_MAP_LOW : 0; 1412 preload_metadata = (caddr_t)bootinfo.bi_modulep + addend; 1413 preload_bootstrap_relocate(addend); 1414 } else { 1415 metadata_missing = 1; 1416 } 1417 1418 if (bootinfo.bi_envp != 0) { 1419 addend = (vm_paddr_t)bootinfo.bi_envp < KERNBASE ? 1420 PMAP_MAP_LOW : 0; 1421 init_static_kenv((char *)bootinfo.bi_envp + addend, 0); 1422 } else { 1423 init_static_kenv(NULL, 0); 1424 } 1425 1426 /* 1427 * Re-evaluate CPU features if we loaded a microcode update. 1428 */ 1429 ucode_len = ucode_load_bsp(first); 1430 if (ucode_len != 0) { 1431 identify_cpu(); 1432 first = roundup2(first + ucode_len, PAGE_SIZE); 1433 } 1434 1435 identify_hypervisor(); 1436 1437 /* Init basic tunables, hz etc */ 1438 init_param1(); 1439 1440 /* Set bootmethod to BIOS: it's the only supported on i386. */ 1441 strlcpy(bootmethod, "BIOS", sizeof(bootmethod)); 1442 1443 /* 1444 * Make gdt memory segments. All segments cover the full 4GB 1445 * of address space and permissions are enforced at page level. 1446 */ 1447 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1448 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1449 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1); 1450 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1); 1451 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1); 1452 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1); 1453 1454 pc = &__pcpu[0]; 1455 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); 1456 gdt_segs[GPRIV_SEL].ssd_base = (int)pc; 1457 gdt_segs[GPROC0_SEL].ssd_base = (int)&common_tss0; 1458 1459 for (x = 0; x < NGDT; x++) 1460 ssdtosd(&gdt_segs[x], &gdt0[x].sd); 1461 1462 r_gdt.rd_limit = NGDT * sizeof(gdt0[0]) - 1; 1463 r_gdt.rd_base = (int)gdt0; 1464 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); 1465 lgdt(&r_gdt); 1466 1467 pcpu_init(pc, 0, sizeof(struct pcpu)); 1468 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) 1469 pmap_kenter(pa, pa); 1470 dpcpu_init((void *)first, 0); 1471 first += DPCPU_SIZE; 1472 PCPU_SET(prvspace, pc); 1473 PCPU_SET(curthread, &thread0); 1474 /* Non-late cninit() and printf() can be moved up to here. */ 1475 1476 /* 1477 * Initialize mutexes. 1478 * 1479 * icu_lock: in order to allow an interrupt to occur in a critical 1480 * section, to set pcpu->ipending (etc...) properly, we 1481 * must be able to get the icu lock, so it can't be 1482 * under witness. 1483 */ 1484 mutex_init(); 1485 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); 1486 1487 i386_setidt1(); 1488 1489 r_idt.rd_limit = sizeof(idt0) - 1; 1490 r_idt.rd_base = (int) idt; 1491 lidt(&r_idt); 1492 1493 finishidentcpu(); /* Final stage of CPU initialization */ 1494 1495 /* 1496 * Initialize the clock before the console so that console 1497 * initialization can use DELAY(). 1498 */ 1499 clock_init(); 1500 1501 i386_setidt2(); 1502 pmap_set_nx(); 1503 initializecpu(); /* Initialize CPU registers */ 1504 initializecpucache(); 1505 1506 /* pointer to selector slot for %fs/%gs */ 1507 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 1508 1509 /* Initialize the tss (except for the final esp0) early for vm86. */ 1510 common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages * 1511 PAGE_SIZE - VM86_STACK_SPACE; 1512 common_tss0.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 1513 common_tss0.tss_ioopt = sizeof(struct i386tss) << 16; 1514 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1515 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1516 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1517 ltr(gsel_tss); 1518 1519 /* Initialize the PIC early for vm86 calls. */ 1520 #ifdef DEV_ISA 1521 #ifdef DEV_ATPIC 1522 elcr_probe(); 1523 atpic_startup(); 1524 #else 1525 /* Reset and mask the atpics and leave them shut down. */ 1526 atpic_reset(); 1527 1528 /* 1529 * Point the ICU spurious interrupt vectors at the APIC spurious 1530 * interrupt handler. 1531 */ 1532 i386_setidt3(); 1533 #endif 1534 #endif 1535 1536 /* 1537 * The console and kdb should be initialized even earlier than here, 1538 * but some console drivers don't work until after getmemsize(). 1539 * Default to late console initialization to support these drivers. 1540 * This loses mainly printf()s in getmemsize() and early debugging. 1541 */ 1542 TUNABLE_INT_FETCH("debug.late_console", &late_console); 1543 if (!late_console) { 1544 cninit(); 1545 i386_kdb_init(); 1546 } 1547 1548 kmdp = preload_search_by_type("elf kernel"); 1549 link_elf_ireloc(kmdp); 1550 1551 vm86_initialize(); 1552 getmemsize(first); 1553 init_param2(physmem); 1554 1555 /* now running on new page tables, configured,and u/iom is accessible */ 1556 1557 if (late_console) 1558 cninit(); 1559 1560 if (metadata_missing) 1561 printf("WARNING: loader(8) metadata is missing!\n"); 1562 1563 if (late_console) 1564 i386_kdb_init(); 1565 1566 msgbufinit(msgbufp, msgbufsize); 1567 npxinit(true); 1568 /* 1569 * Set up thread0 pcb after npxinit calculated pcb + fpu save 1570 * area size. Zero out the extended state header in fpu save 1571 * area. 1572 */ 1573 thread0.td_pcb = get_pcb_td(&thread0); 1574 thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0); 1575 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size); 1576 if (use_xsave) { 1577 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) + 1578 1); 1579 xhdr->xstate_bv = xsave_mask; 1580 } 1581 PCPU_SET(curpcb, thread0.td_pcb); 1582 /* Move esp0 in the tss to its final place. */ 1583 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 1584 common_tss0.tss_esp0 = (vm_offset_t)thread0.td_pcb - VM86_STACK_SPACE; 1585 PCPU_SET(kesp0, common_tss0.tss_esp0); 1586 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; /* clear busy bit */ 1587 ltr(gsel_tss); 1588 1589 /* transfer to user mode */ 1590 1591 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1592 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1593 1594 /* setup proc 0's pcb */ 1595 thread0.td_pcb->pcb_flags = 0; 1596 thread0.td_pcb->pcb_cr3 = pmap_get_kcr3(); 1597 thread0.td_pcb->pcb_ext = 0; 1598 thread0.td_frame = &proc0_tf; 1599 1600 #ifdef FDT 1601 x86_init_fdt(); 1602 #endif 1603 1604 /* Location of kernel stack for locore */ 1605 return ((register_t)thread0.td_pcb); 1606 } 1607 1608 static void 1609 machdep_init_trampoline(void) 1610 { 1611 struct region_descriptor r_gdt, r_idt; 1612 struct i386tss *tss; 1613 char *copyout_buf, *trampoline, *tramp_stack_base; 1614 int x; 1615 1616 gdt = pmap_trm_alloc(sizeof(union descriptor) * NGDT * mp_ncpus, 1617 M_NOWAIT | M_ZERO); 1618 bcopy(gdt0, gdt, sizeof(union descriptor) * NGDT); 1619 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1620 r_gdt.rd_base = (int)gdt; 1621 lgdt(&r_gdt); 1622 1623 tss = pmap_trm_alloc(sizeof(struct i386tss) * mp_ncpus, 1624 M_NOWAIT | M_ZERO); 1625 bcopy(&common_tss0, tss, sizeof(struct i386tss)); 1626 gdt[GPROC0_SEL].sd.sd_lobase = (int)tss; 1627 gdt[GPROC0_SEL].sd.sd_hibase = (u_int)tss >> 24; 1628 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 1629 1630 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 1631 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1632 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1633 PCPU_SET(common_tssp, tss); 1634 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1635 1636 trampoline = pmap_trm_alloc(end_exceptions - start_exceptions, 1637 M_NOWAIT); 1638 bcopy(start_exceptions, trampoline, end_exceptions - start_exceptions); 1639 tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT); 1640 PCPU_SET(trampstk, (uintptr_t)tramp_stack_base + TRAMP_STACK_SZ - 1641 VM86_STACK_SPACE); 1642 tss[0].tss_esp0 = PCPU_GET(trampstk); 1643 1644 idt = pmap_trm_alloc(sizeof(idt0), M_NOWAIT | M_ZERO); 1645 bcopy(idt0, idt, sizeof(idt0)); 1646 1647 /* Re-initialize new IDT since the handlers were relocated */ 1648 setidt_disp = trampoline - start_exceptions; 1649 if (bootverbose) 1650 printf("Trampoline disposition %#zx\n", setidt_disp); 1651 fixup_idt(); 1652 1653 r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1; 1654 r_idt.rd_base = (int)idt; 1655 lidt(&r_idt); 1656 1657 /* dblfault TSS */ 1658 dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO); 1659 dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT); 1660 dblfault_tss->tss_esp = dblfault_tss->tss_esp0 = 1661 dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 = 1662 (int)dblfault_stack + PAGE_SIZE; 1663 dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 = 1664 dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1665 dblfault_tss->tss_cr3 = pmap_get_kcr3(); 1666 dblfault_tss->tss_eip = (int)dblfault_handler; 1667 dblfault_tss->tss_eflags = PSL_KERNEL; 1668 dblfault_tss->tss_ds = dblfault_tss->tss_es = 1669 dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1670 dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1671 dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1672 dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1673 gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss; 1674 gdt[GPANIC_SEL].sd.sd_hibase = (u_int)dblfault_tss >> 24; 1675 1676 /* make ldt memory segments */ 1677 ldt = pmap_trm_alloc(sizeof(union descriptor) * NLDT, 1678 M_NOWAIT | M_ZERO); 1679 gdt[GLDT_SEL].sd.sd_lobase = (int)ldt; 1680 gdt[GLDT_SEL].sd.sd_hibase = (u_int)ldt >> 24; 1681 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); 1682 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); 1683 for (x = 0; x < nitems(ldt_segs); x++) 1684 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1685 1686 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1687 lldt(_default_ldt); 1688 PCPU_SET(currentldt, _default_ldt); 1689 1690 copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT); 1691 PCPU_SET(copyout_buf, copyout_buf); 1692 copyout_init_tramp(); 1693 } 1694 SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_SECOND, machdep_init_trampoline, NULL); 1695 1696 #ifdef COMPAT_43 1697 static void 1698 i386_setup_lcall_gate(void) 1699 { 1700 struct sysentvec *sv; 1701 struct user_segment_descriptor desc; 1702 u_int lcall_addr; 1703 1704 sv = &elf32_freebsd_sysvec; 1705 lcall_addr = (uintptr_t)sv->sv_psstrings - sz_lcall_tramp; 1706 1707 bzero(&desc, sizeof(desc)); 1708 desc.sd_type = SDT_MEMERA; 1709 desc.sd_dpl = SEL_UPL; 1710 desc.sd_p = 1; 1711 desc.sd_def32 = 1; 1712 desc.sd_gran = 1; 1713 desc.sd_lolimit = 0xffff; 1714 desc.sd_hilimit = 0xf; 1715 desc.sd_lobase = lcall_addr; 1716 desc.sd_hibase = lcall_addr >> 24; 1717 bcopy(&desc, &ldt[LSYS5CALLS_SEL], sizeof(desc)); 1718 } 1719 SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY, i386_setup_lcall_gate, NULL); 1720 #endif 1721 1722 void 1723 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1724 { 1725 1726 pcpu->pc_acpi_id = 0xffffffff; 1727 } 1728 1729 static int 1730 smap_sysctl_handler(SYSCTL_HANDLER_ARGS) 1731 { 1732 struct bios_smap *smapbase; 1733 struct bios_smap_xattr smap; 1734 caddr_t kmdp; 1735 uint32_t *smapattr; 1736 int count, error, i; 1737 1738 /* Retrieve the system memory map from the loader. */ 1739 kmdp = preload_search_by_type("elf kernel"); 1740 if (kmdp == NULL) 1741 kmdp = preload_search_by_type("elf32 kernel"); 1742 smapbase = (struct bios_smap *)preload_search_info(kmdp, 1743 MODINFO_METADATA | MODINFOMD_SMAP); 1744 if (smapbase == NULL) 1745 return (0); 1746 smapattr = (uint32_t *)preload_search_info(kmdp, 1747 MODINFO_METADATA | MODINFOMD_SMAP_XATTR); 1748 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase); 1749 error = 0; 1750 for (i = 0; i < count; i++) { 1751 smap.base = smapbase[i].base; 1752 smap.length = smapbase[i].length; 1753 smap.type = smapbase[i].type; 1754 if (smapattr != NULL) 1755 smap.xattr = smapattr[i]; 1756 else 1757 smap.xattr = 0; 1758 error = SYSCTL_OUT(req, &smap, sizeof(smap)); 1759 } 1760 return (error); 1761 } 1762 SYSCTL_PROC(_machdep, OID_AUTO, smap, 1763 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1764 smap_sysctl_handler, "S,bios_smap_xattr", 1765 "Raw BIOS SMAP data"); 1766 1767 void 1768 spinlock_enter(void) 1769 { 1770 struct thread *td; 1771 register_t flags; 1772 1773 td = curthread; 1774 if (td->td_md.md_spinlock_count == 0) { 1775 flags = intr_disable(); 1776 td->td_md.md_spinlock_count = 1; 1777 td->td_md.md_saved_flags = flags; 1778 critical_enter(); 1779 } else 1780 td->td_md.md_spinlock_count++; 1781 } 1782 1783 void 1784 spinlock_exit(void) 1785 { 1786 struct thread *td; 1787 register_t flags; 1788 1789 td = curthread; 1790 flags = td->td_md.md_saved_flags; 1791 td->td_md.md_spinlock_count--; 1792 if (td->td_md.md_spinlock_count == 0) { 1793 critical_exit(); 1794 intr_restore(flags); 1795 } 1796 } 1797 1798 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 1799 static void f00f_hack(void *unused); 1800 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1801 1802 static void 1803 f00f_hack(void *unused) 1804 { 1805 struct region_descriptor r_idt; 1806 struct gate_descriptor *new_idt; 1807 vm_offset_t tmp; 1808 1809 if (!has_f00f_bug) 1810 return; 1811 1812 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 1813 1814 tmp = (vm_offset_t)pmap_trm_alloc(PAGE_SIZE * 3, M_NOWAIT | M_ZERO); 1815 if (tmp == 0) 1816 panic("kmem_malloc returned 0"); 1817 tmp = round_page(tmp); 1818 1819 /* Put the problematic entry (#6) at the end of the lower page. */ 1820 new_idt = (struct gate_descriptor *) 1821 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor)); 1822 bcopy(idt, new_idt, sizeof(idt0)); 1823 r_idt.rd_base = (u_int)new_idt; 1824 r_idt.rd_limit = sizeof(idt0) - 1; 1825 lidt(&r_idt); 1826 /* SMP machines do not need the F00F hack. */ 1827 idt = new_idt; 1828 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ); 1829 } 1830 #endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1831 1832 /* 1833 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1834 * we want to start a backtrace from the function that caused us to enter 1835 * the debugger. We have the context in the trapframe, but base the trace 1836 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1837 * enough for a backtrace. 1838 */ 1839 void 1840 makectx(struct trapframe *tf, struct pcb *pcb) 1841 { 1842 1843 pcb->pcb_edi = tf->tf_edi; 1844 pcb->pcb_esi = tf->tf_esi; 1845 pcb->pcb_ebp = tf->tf_ebp; 1846 pcb->pcb_ebx = tf->tf_ebx; 1847 pcb->pcb_eip = tf->tf_eip; 1848 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8; 1849 pcb->pcb_gs = rgs(); 1850 } 1851 1852 #ifdef KDB 1853 1854 /* 1855 * Provide inb() and outb() as functions. They are normally only available as 1856 * inline functions, thus cannot be called from the debugger. 1857 */ 1858 1859 /* silence compiler warnings */ 1860 u_char inb_(u_short); 1861 void outb_(u_short, u_char); 1862 1863 u_char 1864 inb_(u_short port) 1865 { 1866 return inb(port); 1867 } 1868 1869 void 1870 outb_(u_short port, u_char data) 1871 { 1872 outb(port, data); 1873 } 1874 1875 #endif /* KDB */ 1876