1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2018 The FreeBSD Foundation 5 * Copyright (c) 1992 Terrence R. Lambert. 6 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * William Jolitz. 11 * 12 * Portions of this software were developed by A. Joseph Koshy under 13 * sponsorship from the FreeBSD Foundation and Google, Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include "opt_apic.h" 50 #include "opt_atpic.h" 51 #include "opt_cpu.h" 52 #include "opt_ddb.h" 53 #include "opt_inet.h" 54 #include "opt_isa.h" 55 #include "opt_kstack_pages.h" 56 #include "opt_maxmem.h" 57 #include "opt_mp_watchdog.h" 58 #include "opt_perfmon.h" 59 #include "opt_platform.h" 60 61 #include <sys/param.h> 62 #include <sys/proc.h> 63 #include <sys/systm.h> 64 #include <sys/bio.h> 65 #include <sys/buf.h> 66 #include <sys/bus.h> 67 #include <sys/callout.h> 68 #include <sys/cons.h> 69 #include <sys/cpu.h> 70 #include <sys/eventhandler.h> 71 #include <sys/exec.h> 72 #include <sys/imgact.h> 73 #include <sys/kdb.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/linker.h> 77 #include <sys/lock.h> 78 #include <sys/malloc.h> 79 #include <sys/memrange.h> 80 #include <sys/msgbuf.h> 81 #include <sys/mutex.h> 82 #include <sys/pcpu.h> 83 #include <sys/ptrace.h> 84 #include <sys/reboot.h> 85 #include <sys/reg.h> 86 #include <sys/rwlock.h> 87 #include <sys/sched.h> 88 #include <sys/signalvar.h> 89 #include <sys/smp.h> 90 #include <sys/syscallsubr.h> 91 #include <sys/sysctl.h> 92 #include <sys/sysent.h> 93 #include <sys/sysproto.h> 94 #include <sys/ucontext.h> 95 #include <sys/vmmeter.h> 96 97 #include <vm/vm.h> 98 #include <vm/vm_param.h> 99 #include <vm/vm_extern.h> 100 #include <vm/vm_kern.h> 101 #include <vm/vm_page.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_object.h> 104 #include <vm/vm_pager.h> 105 #include <vm/vm_phys.h> 106 #include <vm/vm_dumpset.h> 107 108 #ifdef DDB 109 #ifndef KDB 110 #error KDB must be enabled in order for DDB to work! 111 #endif 112 #include <ddb/ddb.h> 113 #include <ddb/db_sym.h> 114 #endif 115 116 #include <isa/rtc.h> 117 118 #include <net/netisr.h> 119 120 #include <machine/bootinfo.h> 121 #include <machine/clock.h> 122 #include <machine/cpu.h> 123 #include <machine/cputypes.h> 124 #include <machine/intr_machdep.h> 125 #include <x86/mca.h> 126 #include <machine/md_var.h> 127 #include <machine/metadata.h> 128 #include <machine/mp_watchdog.h> 129 #include <machine/pc/bios.h> 130 #include <machine/pcb.h> 131 #include <machine/pcb_ext.h> 132 #include <machine/proc.h> 133 #include <machine/sigframe.h> 134 #include <machine/specialreg.h> 135 #include <machine/sysarch.h> 136 #include <machine/trap.h> 137 #include <x86/ucode.h> 138 #include <machine/vm86.h> 139 #include <x86/init.h> 140 #ifdef PERFMON 141 #include <machine/perfmon.h> 142 #endif 143 #ifdef SMP 144 #include <machine/smp.h> 145 #endif 146 #ifdef FDT 147 #include <x86/fdt.h> 148 #endif 149 150 #ifdef DEV_APIC 151 #include <x86/apicvar.h> 152 #endif 153 154 #ifdef DEV_ISA 155 #include <x86/isa/icu.h> 156 #endif 157 158 /* Sanity check for __curthread() */ 159 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 160 161 register_t init386(int first); 162 void dblfault_handler(void); 163 void identify_cpu(void); 164 165 static void cpu_startup(void *); 166 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 167 168 /* Intel ICH registers */ 169 #define ICH_PMBASE 0x400 170 #define ICH_SMI_EN ICH_PMBASE + 0x30 171 172 int _udatasel, _ucodesel; 173 u_int basemem; 174 static int above4g_allow = 1; 175 static int above24g_allow = 0; 176 177 int cold = 1; 178 179 long Maxmem = 0; 180 long realmem = 0; 181 182 #ifdef PAE 183 FEATURE(pae, "Physical Address Extensions"); 184 #endif 185 186 struct kva_md_info kmi; 187 188 static struct trapframe proc0_tf; 189 struct pcpu __pcpu[MAXCPU]; 190 191 struct mtx icu_lock; 192 193 struct mem_range_softc mem_range_softc; 194 195 extern char start_exceptions[], end_exceptions[]; 196 197 extern struct sysentvec elf32_freebsd_sysvec; 198 199 /* Default init_ops implementation. */ 200 struct init_ops init_ops = { 201 .early_clock_source_init = i8254_init, 202 .early_delay = i8254_delay, 203 }; 204 205 static void 206 cpu_startup(dummy) 207 void *dummy; 208 { 209 uintmax_t memsize; 210 char *sysenv; 211 212 /* 213 * On MacBooks, we need to disallow the legacy USB circuit to 214 * generate an SMI# because this can cause several problems, 215 * namely: incorrect CPU frequency detection and failure to 216 * start the APs. 217 * We do this by disabling a bit in the SMI_EN (SMI Control and 218 * Enable register) of the Intel ICH LPC Interface Bridge. 219 */ 220 sysenv = kern_getenv("smbios.system.product"); 221 if (sysenv != NULL) { 222 if (strncmp(sysenv, "MacBook1,1", 10) == 0 || 223 strncmp(sysenv, "MacBook3,1", 10) == 0 || 224 strncmp(sysenv, "MacBook4,1", 10) == 0 || 225 strncmp(sysenv, "MacBookPro1,1", 13) == 0 || 226 strncmp(sysenv, "MacBookPro1,2", 13) == 0 || 227 strncmp(sysenv, "MacBookPro3,1", 13) == 0 || 228 strncmp(sysenv, "MacBookPro4,1", 13) == 0 || 229 strncmp(sysenv, "Macmini1,1", 10) == 0) { 230 if (bootverbose) 231 printf("Disabling LEGACY_USB_EN bit on " 232 "Intel ICH.\n"); 233 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 234 } 235 freeenv(sysenv); 236 } 237 238 /* 239 * Good {morning,afternoon,evening,night}. 240 */ 241 startrtclock(); 242 printcpuinfo(); 243 panicifcpuunsupported(); 244 #ifdef PERFMON 245 perfmon_init(); 246 #endif 247 248 /* 249 * Display physical memory if SMBIOS reports reasonable amount. 250 */ 251 memsize = 0; 252 sysenv = kern_getenv("smbios.memory.enabled"); 253 if (sysenv != NULL) { 254 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; 255 freeenv(sysenv); 256 } 257 if (memsize < ptoa((uintmax_t)vm_free_count())) 258 memsize = ptoa((uintmax_t)Maxmem); 259 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); 260 realmem = atop(memsize); 261 262 /* 263 * Display any holes after the first chunk of extended memory. 264 */ 265 if (bootverbose) { 266 int indx; 267 268 printf("Physical memory chunk(s):\n"); 269 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 270 vm_paddr_t size; 271 272 size = phys_avail[indx + 1] - phys_avail[indx]; 273 printf( 274 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 275 (uintmax_t)phys_avail[indx], 276 (uintmax_t)phys_avail[indx + 1] - 1, 277 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 278 } 279 } 280 281 vm_ksubmap_init(&kmi); 282 283 printf("avail memory = %ju (%ju MB)\n", 284 ptoa((uintmax_t)vm_free_count()), 285 ptoa((uintmax_t)vm_free_count()) / 1048576); 286 287 /* 288 * Set up buffers, so they can be used to read disk labels. 289 */ 290 bufinit(); 291 vm_pager_bufferinit(); 292 cpu_setregs(); 293 } 294 295 void 296 cpu_setregs(void) 297 { 298 unsigned int cr0; 299 300 cr0 = rcr0(); 301 302 /* 303 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support: 304 * 305 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT 306 * instructions. We must set the CR0_MP bit and use the CR0_TS 307 * bit to control the trap, because setting the CR0_EM bit does 308 * not cause WAIT instructions to trap. It's important to trap 309 * WAIT instructions - otherwise the "wait" variants of no-wait 310 * control instructions would degenerate to the "no-wait" variants 311 * after FP context switches but work correctly otherwise. It's 312 * particularly important to trap WAITs when there is no NPX - 313 * otherwise the "wait" variants would always degenerate. 314 * 315 * Try setting CR0_NE to get correct error reporting on 486DX's. 316 * Setting it should fail or do nothing on lesser processors. 317 */ 318 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 319 load_cr0(cr0); 320 load_gs(_udatasel); 321 } 322 323 u_long bootdev; /* not a struct cdev *- encoding is different */ 324 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 325 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)"); 326 327 /* 328 * Initialize 386 and configure to run kernel 329 */ 330 331 /* 332 * Initialize segments & interrupt table 333 */ 334 335 int _default_ldt; 336 337 struct mtx dt_lock; /* lock for GDT and LDT */ 338 339 union descriptor gdt0[NGDT]; /* initial global descriptor table */ 340 union descriptor *gdt = gdt0; /* global descriptor table */ 341 342 union descriptor *ldt; /* local descriptor table */ 343 344 static struct gate_descriptor idt0[NIDT]; 345 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 346 347 static struct i386tss *dblfault_tss; 348 static char *dblfault_stack; 349 350 static struct i386tss common_tss0; 351 352 vm_offset_t proc0kstack; 353 354 /* 355 * software prototypes -- in more palatable form. 356 * 357 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret 358 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it) 359 */ 360 struct soft_segment_descriptor gdt_segs[] = { 361 /* GNULL_SEL 0 Null Descriptor */ 362 { .ssd_base = 0x0, 363 .ssd_limit = 0x0, 364 .ssd_type = 0, 365 .ssd_dpl = SEL_KPL, 366 .ssd_p = 0, 367 .ssd_xx = 0, .ssd_xx1 = 0, 368 .ssd_def32 = 0, 369 .ssd_gran = 0 }, 370 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */ 371 { .ssd_base = 0x0, 372 .ssd_limit = 0xfffff, 373 .ssd_type = SDT_MEMRWA, 374 .ssd_dpl = SEL_KPL, 375 .ssd_p = 1, 376 .ssd_xx = 0, .ssd_xx1 = 0, 377 .ssd_def32 = 1, 378 .ssd_gran = 1 }, 379 /* GUFS_SEL 2 %fs Descriptor for user */ 380 { .ssd_base = 0x0, 381 .ssd_limit = 0xfffff, 382 .ssd_type = SDT_MEMRWA, 383 .ssd_dpl = SEL_UPL, 384 .ssd_p = 1, 385 .ssd_xx = 0, .ssd_xx1 = 0, 386 .ssd_def32 = 1, 387 .ssd_gran = 1 }, 388 /* GUGS_SEL 3 %gs Descriptor for user */ 389 { .ssd_base = 0x0, 390 .ssd_limit = 0xfffff, 391 .ssd_type = SDT_MEMRWA, 392 .ssd_dpl = SEL_UPL, 393 .ssd_p = 1, 394 .ssd_xx = 0, .ssd_xx1 = 0, 395 .ssd_def32 = 1, 396 .ssd_gran = 1 }, 397 /* GCODE_SEL 4 Code Descriptor for kernel */ 398 { .ssd_base = 0x0, 399 .ssd_limit = 0xfffff, 400 .ssd_type = SDT_MEMERA, 401 .ssd_dpl = SEL_KPL, 402 .ssd_p = 1, 403 .ssd_xx = 0, .ssd_xx1 = 0, 404 .ssd_def32 = 1, 405 .ssd_gran = 1 }, 406 /* GDATA_SEL 5 Data Descriptor for kernel */ 407 { .ssd_base = 0x0, 408 .ssd_limit = 0xfffff, 409 .ssd_type = SDT_MEMRWA, 410 .ssd_dpl = SEL_KPL, 411 .ssd_p = 1, 412 .ssd_xx = 0, .ssd_xx1 = 0, 413 .ssd_def32 = 1, 414 .ssd_gran = 1 }, 415 /* GUCODE_SEL 6 Code Descriptor for user */ 416 { .ssd_base = 0x0, 417 .ssd_limit = 0xfffff, 418 .ssd_type = SDT_MEMERA, 419 .ssd_dpl = SEL_UPL, 420 .ssd_p = 1, 421 .ssd_xx = 0, .ssd_xx1 = 0, 422 .ssd_def32 = 1, 423 .ssd_gran = 1 }, 424 /* GUDATA_SEL 7 Data Descriptor for user */ 425 { .ssd_base = 0x0, 426 .ssd_limit = 0xfffff, 427 .ssd_type = SDT_MEMRWA, 428 .ssd_dpl = SEL_UPL, 429 .ssd_p = 1, 430 .ssd_xx = 0, .ssd_xx1 = 0, 431 .ssd_def32 = 1, 432 .ssd_gran = 1 }, 433 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 434 { .ssd_base = 0x400, 435 .ssd_limit = 0xfffff, 436 .ssd_type = SDT_MEMRWA, 437 .ssd_dpl = SEL_KPL, 438 .ssd_p = 1, 439 .ssd_xx = 0, .ssd_xx1 = 0, 440 .ssd_def32 = 1, 441 .ssd_gran = 1 }, 442 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */ 443 { 444 .ssd_base = 0x0, 445 .ssd_limit = sizeof(struct i386tss)-1, 446 .ssd_type = SDT_SYS386TSS, 447 .ssd_dpl = 0, 448 .ssd_p = 1, 449 .ssd_xx = 0, .ssd_xx1 = 0, 450 .ssd_def32 = 0, 451 .ssd_gran = 0 }, 452 /* GLDT_SEL 10 LDT Descriptor */ 453 { .ssd_base = 0, 454 .ssd_limit = sizeof(union descriptor) * NLDT - 1, 455 .ssd_type = SDT_SYSLDT, 456 .ssd_dpl = SEL_UPL, 457 .ssd_p = 1, 458 .ssd_xx = 0, .ssd_xx1 = 0, 459 .ssd_def32 = 0, 460 .ssd_gran = 0 }, 461 /* GUSERLDT_SEL 11 User LDT Descriptor per process */ 462 { .ssd_base = 0, 463 .ssd_limit = (512 * sizeof(union descriptor)-1), 464 .ssd_type = SDT_SYSLDT, 465 .ssd_dpl = 0, 466 .ssd_p = 1, 467 .ssd_xx = 0, .ssd_xx1 = 0, 468 .ssd_def32 = 0, 469 .ssd_gran = 0 }, 470 /* GPANIC_SEL 12 Panic Tss Descriptor */ 471 { .ssd_base = 0, 472 .ssd_limit = sizeof(struct i386tss)-1, 473 .ssd_type = SDT_SYS386TSS, 474 .ssd_dpl = 0, 475 .ssd_p = 1, 476 .ssd_xx = 0, .ssd_xx1 = 0, 477 .ssd_def32 = 0, 478 .ssd_gran = 0 }, 479 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */ 480 { .ssd_base = 0, 481 .ssd_limit = 0xfffff, 482 .ssd_type = SDT_MEMERA, 483 .ssd_dpl = 0, 484 .ssd_p = 1, 485 .ssd_xx = 0, .ssd_xx1 = 0, 486 .ssd_def32 = 0, 487 .ssd_gran = 1 }, 488 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */ 489 { .ssd_base = 0, 490 .ssd_limit = 0xfffff, 491 .ssd_type = SDT_MEMERA, 492 .ssd_dpl = 0, 493 .ssd_p = 1, 494 .ssd_xx = 0, .ssd_xx1 = 0, 495 .ssd_def32 = 0, 496 .ssd_gran = 1 }, 497 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */ 498 { .ssd_base = 0, 499 .ssd_limit = 0xfffff, 500 .ssd_type = SDT_MEMRWA, 501 .ssd_dpl = 0, 502 .ssd_p = 1, 503 .ssd_xx = 0, .ssd_xx1 = 0, 504 .ssd_def32 = 1, 505 .ssd_gran = 1 }, 506 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */ 507 { .ssd_base = 0, 508 .ssd_limit = 0xfffff, 509 .ssd_type = SDT_MEMRWA, 510 .ssd_dpl = 0, 511 .ssd_p = 1, 512 .ssd_xx = 0, .ssd_xx1 = 0, 513 .ssd_def32 = 0, 514 .ssd_gran = 1 }, 515 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */ 516 { .ssd_base = 0, 517 .ssd_limit = 0xfffff, 518 .ssd_type = SDT_MEMRWA, 519 .ssd_dpl = 0, 520 .ssd_p = 1, 521 .ssd_xx = 0, .ssd_xx1 = 0, 522 .ssd_def32 = 0, 523 .ssd_gran = 1 }, 524 /* GNDIS_SEL 18 NDIS Descriptor */ 525 { .ssd_base = 0x0, 526 .ssd_limit = 0x0, 527 .ssd_type = 0, 528 .ssd_dpl = 0, 529 .ssd_p = 0, 530 .ssd_xx = 0, .ssd_xx1 = 0, 531 .ssd_def32 = 0, 532 .ssd_gran = 0 }, 533 }; 534 535 static struct soft_segment_descriptor ldt_segs[] = { 536 /* Null Descriptor - overwritten by call gate */ 537 { .ssd_base = 0x0, 538 .ssd_limit = 0x0, 539 .ssd_type = 0, 540 .ssd_dpl = 0, 541 .ssd_p = 0, 542 .ssd_xx = 0, .ssd_xx1 = 0, 543 .ssd_def32 = 0, 544 .ssd_gran = 0 }, 545 /* Null Descriptor - overwritten by call gate */ 546 { .ssd_base = 0x0, 547 .ssd_limit = 0x0, 548 .ssd_type = 0, 549 .ssd_dpl = 0, 550 .ssd_p = 0, 551 .ssd_xx = 0, .ssd_xx1 = 0, 552 .ssd_def32 = 0, 553 .ssd_gran = 0 }, 554 /* Null Descriptor - overwritten by call gate */ 555 { .ssd_base = 0x0, 556 .ssd_limit = 0x0, 557 .ssd_type = 0, 558 .ssd_dpl = 0, 559 .ssd_p = 0, 560 .ssd_xx = 0, .ssd_xx1 = 0, 561 .ssd_def32 = 0, 562 .ssd_gran = 0 }, 563 /* Code Descriptor for user */ 564 { .ssd_base = 0x0, 565 .ssd_limit = 0xfffff, 566 .ssd_type = SDT_MEMERA, 567 .ssd_dpl = SEL_UPL, 568 .ssd_p = 1, 569 .ssd_xx = 0, .ssd_xx1 = 0, 570 .ssd_def32 = 1, 571 .ssd_gran = 1 }, 572 /* Null Descriptor - overwritten by call gate */ 573 { .ssd_base = 0x0, 574 .ssd_limit = 0x0, 575 .ssd_type = 0, 576 .ssd_dpl = 0, 577 .ssd_p = 0, 578 .ssd_xx = 0, .ssd_xx1 = 0, 579 .ssd_def32 = 0, 580 .ssd_gran = 0 }, 581 /* Data Descriptor for user */ 582 { .ssd_base = 0x0, 583 .ssd_limit = 0xfffff, 584 .ssd_type = SDT_MEMRWA, 585 .ssd_dpl = SEL_UPL, 586 .ssd_p = 1, 587 .ssd_xx = 0, .ssd_xx1 = 0, 588 .ssd_def32 = 1, 589 .ssd_gran = 1 }, 590 }; 591 592 size_t setidt_disp; 593 594 void 595 setidt(int idx, inthand_t *func, int typ, int dpl, int selec) 596 { 597 uintptr_t off; 598 599 off = func != NULL ? (uintptr_t)func + setidt_disp : 0; 600 setidt_nodisp(idx, off, typ, dpl, selec); 601 } 602 603 void 604 setidt_nodisp(int idx, uintptr_t off, int typ, int dpl, int selec) 605 { 606 struct gate_descriptor *ip; 607 608 ip = idt + idx; 609 ip->gd_looffset = off; 610 ip->gd_selector = selec; 611 ip->gd_stkcpy = 0; 612 ip->gd_xx = 0; 613 ip->gd_type = typ; 614 ip->gd_dpl = dpl; 615 ip->gd_p = 1; 616 ip->gd_hioffset = ((u_int)off) >> 16 ; 617 } 618 619 extern inthand_t 620 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 621 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 622 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 623 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 624 IDTVEC(xmm), 625 #ifdef KDTRACE_HOOKS 626 IDTVEC(dtrace_ret), 627 #endif 628 #ifdef XENHVM 629 IDTVEC(xen_intr_upcall), 630 #endif 631 IDTVEC(int0x80_syscall); 632 633 #ifdef DDB 634 /* 635 * Display the index and function name of any IDT entries that don't use 636 * the default 'rsvd' entry point. 637 */ 638 DB_SHOW_COMMAND(idt, db_show_idt) 639 { 640 struct gate_descriptor *ip; 641 int idx; 642 uintptr_t func, func_trm; 643 bool trm; 644 645 ip = idt; 646 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { 647 if (ip->gd_type == SDT_SYSTASKGT) { 648 db_printf("%3d\t<TASK>\n", idx); 649 } else { 650 func = (ip->gd_hioffset << 16 | ip->gd_looffset); 651 if (func >= PMAP_TRM_MIN_ADDRESS) { 652 func_trm = func; 653 func -= setidt_disp; 654 trm = true; 655 } else 656 trm = false; 657 if (func != (uintptr_t)&IDTVEC(rsvd)) { 658 db_printf("%3d\t", idx); 659 db_printsym(func, DB_STGY_PROC); 660 if (trm) 661 db_printf(" (trampoline %#x)", 662 func_trm); 663 db_printf("\n"); 664 } 665 } 666 ip++; 667 } 668 } 669 670 /* Show privileged registers. */ 671 DB_SHOW_COMMAND(sysregs, db_show_sysregs) 672 { 673 uint64_t idtr, gdtr; 674 675 idtr = ridt(); 676 db_printf("idtr\t0x%08x/%04x\n", 677 (u_int)(idtr >> 16), (u_int)idtr & 0xffff); 678 gdtr = rgdt(); 679 db_printf("gdtr\t0x%08x/%04x\n", 680 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff); 681 db_printf("ldtr\t0x%04x\n", rldt()); 682 db_printf("tr\t0x%04x\n", rtr()); 683 db_printf("cr0\t0x%08x\n", rcr0()); 684 db_printf("cr2\t0x%08x\n", rcr2()); 685 db_printf("cr3\t0x%08x\n", rcr3()); 686 db_printf("cr4\t0x%08x\n", rcr4()); 687 if (rcr4() & CR4_XSAVE) 688 db_printf("xcr0\t0x%016llx\n", rxcr(0)); 689 if (amd_feature & (AMDID_NX | AMDID_LM)) 690 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER)); 691 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX)) 692 db_printf("FEATURES_CTL\t0x%016llx\n", 693 rdmsr(MSR_IA32_FEATURE_CONTROL)); 694 if (((cpu_vendor_id == CPU_VENDOR_INTEL || 695 cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6) || 696 cpu_vendor_id == CPU_VENDOR_HYGON) 697 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR)); 698 if (cpu_feature & CPUID_PAT) 699 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT)); 700 } 701 702 DB_SHOW_COMMAND(dbregs, db_show_dbregs) 703 { 704 705 db_printf("dr0\t0x%08x\n", rdr0()); 706 db_printf("dr1\t0x%08x\n", rdr1()); 707 db_printf("dr2\t0x%08x\n", rdr2()); 708 db_printf("dr3\t0x%08x\n", rdr3()); 709 db_printf("dr6\t0x%08x\n", rdr6()); 710 db_printf("dr7\t0x%08x\n", rdr7()); 711 } 712 713 DB_SHOW_COMMAND(frame, db_show_frame) 714 { 715 struct trapframe *frame; 716 717 frame = have_addr ? (struct trapframe *)addr : curthread->td_frame; 718 printf("ss %#x esp %#x efl %#x cs %#x eip %#x\n", 719 frame->tf_ss, frame->tf_esp, frame->tf_eflags, frame->tf_cs, 720 frame->tf_eip); 721 printf("err %#x trapno %d\n", frame->tf_err, frame->tf_trapno); 722 printf("ds %#x es %#x fs %#x\n", 723 frame->tf_ds, frame->tf_es, frame->tf_fs); 724 printf("eax %#x ecx %#x edx %#x ebx %#x\n", 725 frame->tf_eax, frame->tf_ecx, frame->tf_edx, frame->tf_ebx); 726 printf("ebp %#x esi %#x edi %#x\n", 727 frame->tf_ebp, frame->tf_esi, frame->tf_edi); 728 729 } 730 #endif 731 732 void 733 sdtossd(sd, ssd) 734 struct segment_descriptor *sd; 735 struct soft_segment_descriptor *ssd; 736 { 737 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 738 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 739 ssd->ssd_type = sd->sd_type; 740 ssd->ssd_dpl = sd->sd_dpl; 741 ssd->ssd_p = sd->sd_p; 742 ssd->ssd_def32 = sd->sd_def32; 743 ssd->ssd_gran = sd->sd_gran; 744 } 745 746 static int 747 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, 748 int *physmap_idxp) 749 { 750 uint64_t lim, ign; 751 int i, insert_idx, physmap_idx; 752 753 physmap_idx = *physmap_idxp; 754 755 if (length == 0) 756 return (1); 757 758 lim = 0x100000000; /* 4G */ 759 if (pae_mode && above4g_allow) 760 lim = above24g_allow ? -1ULL : 0x600000000; /* 24G */ 761 if (base >= lim) { 762 printf("%uK of memory above %uGB ignored, pae %d " 763 "above4g_allow %d above24g_allow %d\n", 764 (u_int)(length / 1024), (u_int)(lim >> 30), pae_mode, 765 above4g_allow, above24g_allow); 766 return (1); 767 } 768 if (base + length >= lim) { 769 ign = base + length - lim; 770 length -= ign; 771 printf("%uK of memory above %uGB ignored, pae %d " 772 "above4g_allow %d above24g_allow %d\n", 773 (u_int)(ign / 1024), (u_int)(lim >> 30), pae_mode, 774 above4g_allow, above24g_allow); 775 } 776 777 /* 778 * Find insertion point while checking for overlap. Start off by 779 * assuming the new entry will be added to the end. 780 */ 781 insert_idx = physmap_idx + 2; 782 for (i = 0; i <= physmap_idx; i += 2) { 783 if (base < physmap[i + 1]) { 784 if (base + length <= physmap[i]) { 785 insert_idx = i; 786 break; 787 } 788 if (boothowto & RB_VERBOSE) 789 printf( 790 "Overlapping memory regions, ignoring second region\n"); 791 return (1); 792 } 793 } 794 795 /* See if we can prepend to the next entry. */ 796 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) { 797 physmap[insert_idx] = base; 798 return (1); 799 } 800 801 /* See if we can append to the previous entry. */ 802 if (insert_idx > 0 && base == physmap[insert_idx - 1]) { 803 physmap[insert_idx - 1] += length; 804 return (1); 805 } 806 807 physmap_idx += 2; 808 *physmap_idxp = physmap_idx; 809 if (physmap_idx == PHYS_AVAIL_ENTRIES) { 810 printf( 811 "Too many segments in the physical address map, giving up\n"); 812 return (0); 813 } 814 815 /* 816 * Move the last 'N' entries down to make room for the new 817 * entry if needed. 818 */ 819 for (i = physmap_idx; i > insert_idx; i -= 2) { 820 physmap[i] = physmap[i - 2]; 821 physmap[i + 1] = physmap[i - 1]; 822 } 823 824 /* Insert the new entry. */ 825 physmap[insert_idx] = base; 826 physmap[insert_idx + 1] = base + length; 827 return (1); 828 } 829 830 static int 831 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp) 832 { 833 if (boothowto & RB_VERBOSE) 834 printf("SMAP type=%02x base=%016llx len=%016llx\n", 835 smap->type, smap->base, smap->length); 836 837 if (smap->type != SMAP_TYPE_MEMORY) 838 return (1); 839 840 return (add_physmap_entry(smap->base, smap->length, physmap, 841 physmap_idxp)); 842 } 843 844 static void 845 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap, 846 int *physmap_idxp) 847 { 848 struct bios_smap *smap, *smapend; 849 u_int32_t smapsize; 850 /* 851 * Memory map from INT 15:E820. 852 * 853 * subr_module.c says: 854 * "Consumer may safely assume that size value precedes data." 855 * ie: an int32_t immediately precedes SMAP. 856 */ 857 smapsize = *((u_int32_t *)smapbase - 1); 858 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 859 860 for (smap = smapbase; smap < smapend; smap++) 861 if (!add_smap_entry(smap, physmap, physmap_idxp)) 862 break; 863 } 864 865 static void 866 basemem_setup(void) 867 { 868 869 if (basemem > 640) { 870 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 871 basemem); 872 basemem = 640; 873 } 874 875 pmap_basemem_setup(basemem); 876 } 877 878 /* 879 * Populate the (physmap) array with base/bound pairs describing the 880 * available physical memory in the system, then test this memory and 881 * build the phys_avail array describing the actually-available memory. 882 * 883 * If we cannot accurately determine the physical memory map, then use 884 * value from the 0xE801 call, and failing that, the RTC. 885 * 886 * Total memory size may be set by the kernel environment variable 887 * hw.physmem or the compile-time define MAXMEM. 888 * 889 * XXX first should be vm_paddr_t. 890 */ 891 static void 892 getmemsize(int first) 893 { 894 int has_smap, off, physmap_idx, pa_indx, da_indx; 895 u_long memtest; 896 vm_paddr_t physmap[PHYS_AVAIL_ENTRIES]; 897 quad_t dcons_addr, dcons_size, physmem_tunable; 898 int hasbrokenint12, i, res; 899 u_int extmem; 900 struct vm86frame vmf; 901 struct vm86context vmc; 902 vm_paddr_t pa; 903 struct bios_smap *smap, *smapbase; 904 caddr_t kmdp; 905 906 has_smap = 0; 907 bzero(&vmf, sizeof(vmf)); 908 bzero(physmap, sizeof(physmap)); 909 basemem = 0; 910 911 /* 912 * Tell the physical memory allocator about pages used to store 913 * the kernel and preloaded data. See kmem_bootstrap_free(). 914 */ 915 vm_phys_early_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first)); 916 917 TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow); 918 TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow); 919 920 /* 921 * Check if the loader supplied an SMAP memory map. If so, 922 * use that and do not make any VM86 calls. 923 */ 924 physmap_idx = 0; 925 kmdp = preload_search_by_type("elf kernel"); 926 if (kmdp == NULL) 927 kmdp = preload_search_by_type("elf32 kernel"); 928 smapbase = (struct bios_smap *)preload_search_info(kmdp, 929 MODINFO_METADATA | MODINFOMD_SMAP); 930 if (smapbase != NULL) { 931 add_smap_entries(smapbase, physmap, &physmap_idx); 932 has_smap = 1; 933 goto have_smap; 934 } 935 936 /* 937 * Some newer BIOSes have a broken INT 12H implementation 938 * which causes a kernel panic immediately. In this case, we 939 * need use the SMAP to determine the base memory size. 940 */ 941 hasbrokenint12 = 0; 942 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12); 943 if (hasbrokenint12 == 0) { 944 /* Use INT12 to determine base memory size. */ 945 vm86_intcall(0x12, &vmf); 946 basemem = vmf.vmf_ax; 947 basemem_setup(); 948 } 949 950 /* 951 * Fetch the memory map with INT 15:E820. Map page 1 R/W into 952 * the kernel page table so we can use it as a buffer. The 953 * kernel will unmap this page later. 954 */ 955 vmc.npages = 0; 956 smap = (void *)vm86_addpage(&vmc, 1, PMAP_MAP_LOW + ptoa(1)); 957 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 958 KASSERT(res != 0, ("vm86_getptr() failed: address not found")); 959 960 vmf.vmf_ebx = 0; 961 do { 962 vmf.vmf_eax = 0xE820; 963 vmf.vmf_edx = SMAP_SIG; 964 vmf.vmf_ecx = sizeof(struct bios_smap); 965 i = vm86_datacall(0x15, &vmf, &vmc); 966 if (i || vmf.vmf_eax != SMAP_SIG) 967 break; 968 has_smap = 1; 969 if (!add_smap_entry(smap, physmap, &physmap_idx)) 970 break; 971 } while (vmf.vmf_ebx != 0); 972 973 have_smap: 974 /* 975 * If we didn't fetch the "base memory" size from INT12, 976 * figure it out from the SMAP (or just guess). 977 */ 978 if (basemem == 0) { 979 for (i = 0; i <= physmap_idx; i += 2) { 980 if (physmap[i] == 0x00000000) { 981 basemem = physmap[i + 1] / 1024; 982 break; 983 } 984 } 985 986 /* XXX: If we couldn't find basemem from SMAP, just guess. */ 987 if (basemem == 0) 988 basemem = 640; 989 basemem_setup(); 990 } 991 992 if (physmap[1] != 0) 993 goto physmap_done; 994 995 /* 996 * If we failed to find an SMAP, figure out the extended 997 * memory size. We will then build a simple memory map with 998 * two segments, one for "base memory" and the second for 999 * "extended memory". Note that "extended memory" starts at a 1000 * physical address of 1MB and that both basemem and extmem 1001 * are in units of 1KB. 1002 * 1003 * First, try to fetch the extended memory size via INT 15:E801. 1004 */ 1005 vmf.vmf_ax = 0xE801; 1006 if (vm86_intcall(0x15, &vmf) == 0) { 1007 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1008 } else { 1009 /* 1010 * If INT15:E801 fails, this is our last ditch effort 1011 * to determine the extended memory size. Currently 1012 * we prefer the RTC value over INT15:88. 1013 */ 1014 #if 0 1015 vmf.vmf_ah = 0x88; 1016 vm86_intcall(0x15, &vmf); 1017 extmem = vmf.vmf_ax; 1018 #else 1019 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1020 #endif 1021 } 1022 1023 /* 1024 * Special hack for chipsets that still remap the 384k hole when 1025 * there's 16MB of memory - this really confuses people that 1026 * are trying to use bus mastering ISA controllers with the 1027 * "16MB limit"; they only have 16MB, but the remapping puts 1028 * them beyond the limit. 1029 * 1030 * If extended memory is between 15-16MB (16-17MB phys address range), 1031 * chop it to 15MB. 1032 */ 1033 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1034 extmem = 15 * 1024; 1035 1036 physmap[0] = 0; 1037 physmap[1] = basemem * 1024; 1038 physmap_idx = 2; 1039 physmap[physmap_idx] = 0x100000; 1040 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1041 1042 physmap_done: 1043 /* 1044 * Now, physmap contains a map of physical memory. 1045 */ 1046 1047 #ifdef SMP 1048 /* make hole for AP bootstrap code */ 1049 alloc_ap_trampoline(physmap, &physmap_idx); 1050 #endif 1051 1052 /* 1053 * Maxmem isn't the "maximum memory", it's one larger than the 1054 * highest page of the physical address space. It should be 1055 * called something like "Maxphyspage". We may adjust this 1056 * based on ``hw.physmem'' and the results of the memory test. 1057 * 1058 * This is especially confusing when it is much larger than the 1059 * memory size and is displayed as "realmem". 1060 */ 1061 Maxmem = atop(physmap[physmap_idx + 1]); 1062 1063 #ifdef MAXMEM 1064 Maxmem = MAXMEM / 4; 1065 #endif 1066 1067 if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable)) 1068 Maxmem = atop(physmem_tunable); 1069 1070 /* 1071 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend 1072 * the amount of memory in the system. 1073 */ 1074 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1])) 1075 Maxmem = atop(physmap[physmap_idx + 1]); 1076 1077 /* 1078 * The boot memory test is disabled by default, as it takes a 1079 * significant amount of time on large-memory systems, and is 1080 * unfriendly to virtual machines as it unnecessarily touches all 1081 * pages. 1082 * 1083 * A general name is used as the code may be extended to support 1084 * additional tests beyond the current "page present" test. 1085 */ 1086 memtest = 0; 1087 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest); 1088 1089 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1090 (boothowto & RB_VERBOSE)) 1091 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1092 1093 /* 1094 * If Maxmem has been increased beyond what the system has detected, 1095 * extend the last memory segment to the new limit. 1096 */ 1097 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1098 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 1099 1100 /* call pmap initialization to make new kernel address space */ 1101 pmap_bootstrap(first); 1102 1103 /* 1104 * Size up each available chunk of physical memory. 1105 */ 1106 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1107 pa_indx = 0; 1108 da_indx = 1; 1109 phys_avail[pa_indx++] = physmap[0]; 1110 phys_avail[pa_indx] = physmap[0]; 1111 dump_avail[da_indx] = physmap[0]; 1112 1113 /* 1114 * Get dcons buffer address 1115 */ 1116 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1117 getenv_quad("dcons.size", &dcons_size) == 0) 1118 dcons_addr = 0; 1119 1120 /* 1121 * physmap is in bytes, so when converting to page boundaries, 1122 * round up the start address and round down the end address. 1123 */ 1124 for (i = 0; i <= physmap_idx; i += 2) { 1125 vm_paddr_t end; 1126 1127 end = ptoa((vm_paddr_t)Maxmem); 1128 if (physmap[i + 1] < end) 1129 end = trunc_page(physmap[i + 1]); 1130 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1131 int tmp, page_bad, full; 1132 int *ptr; 1133 1134 full = FALSE; 1135 /* 1136 * block out kernel memory as not available. 1137 */ 1138 if (pa >= KERNLOAD && pa < first) 1139 goto do_dump_avail; 1140 1141 /* 1142 * block out dcons buffer 1143 */ 1144 if (dcons_addr > 0 1145 && pa >= trunc_page(dcons_addr) 1146 && pa < dcons_addr + dcons_size) 1147 goto do_dump_avail; 1148 1149 page_bad = FALSE; 1150 if (memtest == 0) 1151 goto skip_memtest; 1152 1153 /* 1154 * map page into kernel: valid, read/write,non-cacheable 1155 */ 1156 ptr = (int *)pmap_cmap3(pa, PG_V | PG_RW | PG_N); 1157 1158 tmp = *(int *)ptr; 1159 /* 1160 * Test for alternating 1's and 0's 1161 */ 1162 *(volatile int *)ptr = 0xaaaaaaaa; 1163 if (*(volatile int *)ptr != 0xaaaaaaaa) 1164 page_bad = TRUE; 1165 /* 1166 * Test for alternating 0's and 1's 1167 */ 1168 *(volatile int *)ptr = 0x55555555; 1169 if (*(volatile int *)ptr != 0x55555555) 1170 page_bad = TRUE; 1171 /* 1172 * Test for all 1's 1173 */ 1174 *(volatile int *)ptr = 0xffffffff; 1175 if (*(volatile int *)ptr != 0xffffffff) 1176 page_bad = TRUE; 1177 /* 1178 * Test for all 0's 1179 */ 1180 *(volatile int *)ptr = 0x0; 1181 if (*(volatile int *)ptr != 0x0) 1182 page_bad = TRUE; 1183 /* 1184 * Restore original value. 1185 */ 1186 *(int *)ptr = tmp; 1187 1188 skip_memtest: 1189 /* 1190 * Adjust array of valid/good pages. 1191 */ 1192 if (page_bad == TRUE) 1193 continue; 1194 /* 1195 * If this good page is a continuation of the 1196 * previous set of good pages, then just increase 1197 * the end pointer. Otherwise start a new chunk. 1198 * Note that "end" points one higher than end, 1199 * making the range >= start and < end. 1200 * If we're also doing a speculative memory 1201 * test and we at or past the end, bump up Maxmem 1202 * so that we keep going. The first bad page 1203 * will terminate the loop. 1204 */ 1205 if (phys_avail[pa_indx] == pa) { 1206 phys_avail[pa_indx] += PAGE_SIZE; 1207 } else { 1208 pa_indx++; 1209 if (pa_indx == PHYS_AVAIL_ENTRIES) { 1210 printf( 1211 "Too many holes in the physical address space, giving up\n"); 1212 pa_indx--; 1213 full = TRUE; 1214 goto do_dump_avail; 1215 } 1216 phys_avail[pa_indx++] = pa; /* start */ 1217 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1218 } 1219 physmem++; 1220 do_dump_avail: 1221 if (dump_avail[da_indx] == pa) { 1222 dump_avail[da_indx] += PAGE_SIZE; 1223 } else { 1224 da_indx++; 1225 if (da_indx == PHYS_AVAIL_ENTRIES) { 1226 da_indx--; 1227 goto do_next; 1228 } 1229 dump_avail[da_indx++] = pa; /* start */ 1230 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 1231 } 1232 do_next: 1233 if (full) 1234 break; 1235 } 1236 } 1237 pmap_cmap3(0, 0); 1238 1239 /* 1240 * XXX 1241 * The last chunk must contain at least one page plus the message 1242 * buffer to avoid complicating other code (message buffer address 1243 * calculation, etc.). 1244 */ 1245 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1246 round_page(msgbufsize) >= phys_avail[pa_indx]) { 1247 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1248 phys_avail[pa_indx--] = 0; 1249 phys_avail[pa_indx--] = 0; 1250 } 1251 1252 Maxmem = atop(phys_avail[pa_indx]); 1253 1254 /* Trim off space for the message buffer. */ 1255 phys_avail[pa_indx] -= round_page(msgbufsize); 1256 1257 /* Map the message buffer. */ 1258 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE) 1259 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 1260 off); 1261 } 1262 1263 static void 1264 i386_kdb_init(void) 1265 { 1266 #ifdef DDB 1267 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab, 0); 1268 #endif 1269 kdb_init(); 1270 #ifdef KDB 1271 if (boothowto & RB_KDB) 1272 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); 1273 #endif 1274 } 1275 1276 static void 1277 fixup_idt(void) 1278 { 1279 struct gate_descriptor *ip; 1280 uintptr_t off; 1281 int x; 1282 1283 for (x = 0; x < NIDT; x++) { 1284 ip = &idt[x]; 1285 if (ip->gd_type != SDT_SYS386IGT && 1286 ip->gd_type != SDT_SYS386TGT) 1287 continue; 1288 off = ip->gd_looffset + (((u_int)ip->gd_hioffset) << 16); 1289 KASSERT(off >= (uintptr_t)start_exceptions && 1290 off < (uintptr_t)end_exceptions, 1291 ("IDT[%d] type %d off %#x", x, ip->gd_type, off)); 1292 off += setidt_disp; 1293 MPASS(off >= PMAP_TRM_MIN_ADDRESS && 1294 off < PMAP_TRM_MAX_ADDRESS); 1295 ip->gd_looffset = off; 1296 ip->gd_hioffset = off >> 16; 1297 } 1298 } 1299 1300 static void 1301 i386_setidt1(void) 1302 { 1303 int x; 1304 1305 /* exceptions */ 1306 for (x = 0; x < NIDT; x++) 1307 setidt(x, &IDTVEC(rsvd), SDT_SYS386IGT, SEL_KPL, 1308 GSEL(GCODE_SEL, SEL_KPL)); 1309 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386IGT, SEL_KPL, 1310 GSEL(GCODE_SEL, SEL_KPL)); 1311 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1312 GSEL(GCODE_SEL, SEL_KPL)); 1313 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL, 1314 GSEL(GCODE_SEL, SEL_KPL)); 1315 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1316 GSEL(GCODE_SEL, SEL_KPL)); 1317 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386IGT, SEL_UPL, 1318 GSEL(GCODE_SEL, SEL_KPL)); 1319 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386IGT, SEL_KPL, 1320 GSEL(GCODE_SEL, SEL_KPL)); 1321 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL, 1322 GSEL(GCODE_SEL, SEL_KPL)); 1323 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386IGT, SEL_KPL, 1324 GSEL(GCODE_SEL, SEL_KPL)); 1325 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, 1326 SEL_KPL)); 1327 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386IGT, 1328 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1329 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386IGT, SEL_KPL, 1330 GSEL(GCODE_SEL, SEL_KPL)); 1331 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386IGT, SEL_KPL, 1332 GSEL(GCODE_SEL, SEL_KPL)); 1333 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386IGT, SEL_KPL, 1334 GSEL(GCODE_SEL, SEL_KPL)); 1335 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL, 1336 GSEL(GCODE_SEL, SEL_KPL)); 1337 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1338 GSEL(GCODE_SEL, SEL_KPL)); 1339 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386IGT, SEL_KPL, 1340 GSEL(GCODE_SEL, SEL_KPL)); 1341 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386IGT, SEL_KPL, 1342 GSEL(GCODE_SEL, SEL_KPL)); 1343 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386IGT, SEL_KPL, 1344 GSEL(GCODE_SEL, SEL_KPL)); 1345 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386IGT, SEL_KPL, 1346 GSEL(GCODE_SEL, SEL_KPL)); 1347 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), 1348 SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1349 #ifdef KDTRACE_HOOKS 1350 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), 1351 SDT_SYS386IGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1352 #endif 1353 #ifdef XENHVM 1354 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), 1355 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1356 #endif 1357 } 1358 1359 static void 1360 i386_setidt2(void) 1361 { 1362 1363 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386IGT, SEL_KPL, 1364 GSEL(GCODE_SEL, SEL_KPL)); 1365 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386IGT, SEL_KPL, 1366 GSEL(GCODE_SEL, SEL_KPL)); 1367 } 1368 1369 #if defined(DEV_ISA) && !defined(DEV_ATPIC) 1370 static void 1371 i386_setidt3(void) 1372 { 1373 1374 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), 1375 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1376 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), 1377 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1378 } 1379 #endif 1380 1381 register_t 1382 init386(int first) 1383 { 1384 struct region_descriptor r_gdt, r_idt; /* table descriptors */ 1385 int gsel_tss, metadata_missing, x, pa; 1386 struct pcpu *pc; 1387 struct xstate_hdr *xhdr; 1388 caddr_t kmdp; 1389 vm_offset_t addend; 1390 size_t ucode_len; 1391 int late_console; 1392 1393 thread0.td_kstack = proc0kstack; 1394 thread0.td_kstack_pages = TD0_KSTACK_PAGES; 1395 1396 /* 1397 * This may be done better later if it gets more high level 1398 * components in it. If so just link td->td_proc here. 1399 */ 1400 proc_linkup0(&proc0, &thread0); 1401 1402 if (bootinfo.bi_modulep) { 1403 metadata_missing = 0; 1404 addend = (vm_paddr_t)bootinfo.bi_modulep < KERNBASE ? 1405 PMAP_MAP_LOW : 0; 1406 preload_metadata = (caddr_t)bootinfo.bi_modulep + addend; 1407 preload_bootstrap_relocate(addend); 1408 } else { 1409 metadata_missing = 1; 1410 } 1411 1412 if (bootinfo.bi_envp != 0) { 1413 addend = (vm_paddr_t)bootinfo.bi_envp < KERNBASE ? 1414 PMAP_MAP_LOW : 0; 1415 init_static_kenv((char *)bootinfo.bi_envp + addend, 0); 1416 } else { 1417 init_static_kenv(NULL, 0); 1418 } 1419 1420 /* 1421 * Re-evaluate CPU features if we loaded a microcode update. 1422 */ 1423 ucode_len = ucode_load_bsp(first); 1424 if (ucode_len != 0) { 1425 identify_cpu(); 1426 first = roundup2(first + ucode_len, PAGE_SIZE); 1427 } 1428 1429 identify_hypervisor(); 1430 1431 /* Init basic tunables, hz etc */ 1432 init_param1(); 1433 1434 /* Set bootmethod to BIOS: it's the only supported on i386. */ 1435 strlcpy(bootmethod, "BIOS", sizeof(bootmethod)); 1436 1437 /* 1438 * Make gdt memory segments. All segments cover the full 4GB 1439 * of address space and permissions are enforced at page level. 1440 */ 1441 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1442 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1443 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1); 1444 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1); 1445 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1); 1446 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1); 1447 1448 pc = &__pcpu[0]; 1449 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1); 1450 gdt_segs[GPRIV_SEL].ssd_base = (int)pc; 1451 gdt_segs[GPROC0_SEL].ssd_base = (int)&common_tss0; 1452 1453 for (x = 0; x < NGDT; x++) 1454 ssdtosd(&gdt_segs[x], &gdt0[x].sd); 1455 1456 r_gdt.rd_limit = NGDT * sizeof(gdt0[0]) - 1; 1457 r_gdt.rd_base = (int)gdt0; 1458 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN); 1459 lgdt(&r_gdt); 1460 1461 pcpu_init(pc, 0, sizeof(struct pcpu)); 1462 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE) 1463 pmap_kenter(pa, pa); 1464 dpcpu_init((void *)first, 0); 1465 first += DPCPU_SIZE; 1466 PCPU_SET(prvspace, pc); 1467 PCPU_SET(curthread, &thread0); 1468 /* Non-late cninit() and printf() can be moved up to here. */ 1469 1470 /* 1471 * Initialize mutexes. 1472 * 1473 * icu_lock: in order to allow an interrupt to occur in a critical 1474 * section, to set pcpu->ipending (etc...) properly, we 1475 * must be able to get the icu lock, so it can't be 1476 * under witness. 1477 */ 1478 mutex_init(); 1479 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE); 1480 1481 i386_setidt1(); 1482 1483 r_idt.rd_limit = sizeof(idt0) - 1; 1484 r_idt.rd_base = (int) idt; 1485 lidt(&r_idt); 1486 1487 /* 1488 * Initialize the clock before the console so that console 1489 * initialization can use DELAY(). 1490 */ 1491 clock_init(); 1492 1493 finishidentcpu(); /* Final stage of CPU initialization */ 1494 i386_setidt2(); 1495 pmap_set_nx(); 1496 initializecpu(); /* Initialize CPU registers */ 1497 initializecpucache(); 1498 1499 /* pointer to selector slot for %fs/%gs */ 1500 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 1501 1502 /* Initialize the tss (except for the final esp0) early for vm86. */ 1503 common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages * 1504 PAGE_SIZE - VM86_STACK_SPACE; 1505 common_tss0.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 1506 common_tss0.tss_ioopt = sizeof(struct i386tss) << 16; 1507 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1508 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1509 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1510 ltr(gsel_tss); 1511 1512 /* Initialize the PIC early for vm86 calls. */ 1513 #ifdef DEV_ISA 1514 #ifdef DEV_ATPIC 1515 elcr_probe(); 1516 atpic_startup(); 1517 #else 1518 /* Reset and mask the atpics and leave them shut down. */ 1519 atpic_reset(); 1520 1521 /* 1522 * Point the ICU spurious interrupt vectors at the APIC spurious 1523 * interrupt handler. 1524 */ 1525 i386_setidt3(); 1526 #endif 1527 #endif 1528 1529 /* 1530 * The console and kdb should be initialized even earlier than here, 1531 * but some console drivers don't work until after getmemsize(). 1532 * Default to late console initialization to support these drivers. 1533 * This loses mainly printf()s in getmemsize() and early debugging. 1534 */ 1535 late_console = 1; 1536 TUNABLE_INT_FETCH("debug.late_console", &late_console); 1537 if (!late_console) { 1538 cninit(); 1539 i386_kdb_init(); 1540 } 1541 1542 kmdp = preload_search_by_type("elf kernel"); 1543 link_elf_ireloc(kmdp); 1544 1545 vm86_initialize(); 1546 getmemsize(first); 1547 init_param2(physmem); 1548 1549 /* now running on new page tables, configured,and u/iom is accessible */ 1550 1551 if (late_console) 1552 cninit(); 1553 1554 if (metadata_missing) 1555 printf("WARNING: loader(8) metadata is missing!\n"); 1556 1557 if (late_console) 1558 i386_kdb_init(); 1559 1560 msgbufinit(msgbufp, msgbufsize); 1561 npxinit(true); 1562 /* 1563 * Set up thread0 pcb after npxinit calculated pcb + fpu save 1564 * area size. Zero out the extended state header in fpu save 1565 * area. 1566 */ 1567 thread0.td_pcb = get_pcb_td(&thread0); 1568 thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0); 1569 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size); 1570 if (use_xsave) { 1571 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) + 1572 1); 1573 xhdr->xstate_bv = xsave_mask; 1574 } 1575 PCPU_SET(curpcb, thread0.td_pcb); 1576 /* Move esp0 in the tss to its final place. */ 1577 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 1578 common_tss0.tss_esp0 = (vm_offset_t)thread0.td_pcb - VM86_STACK_SPACE; 1579 PCPU_SET(kesp0, common_tss0.tss_esp0); 1580 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; /* clear busy bit */ 1581 ltr(gsel_tss); 1582 1583 /* transfer to user mode */ 1584 1585 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1586 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1587 1588 /* setup proc 0's pcb */ 1589 thread0.td_pcb->pcb_flags = 0; 1590 thread0.td_pcb->pcb_cr3 = pmap_get_kcr3(); 1591 thread0.td_pcb->pcb_ext = 0; 1592 thread0.td_frame = &proc0_tf; 1593 1594 #ifdef FDT 1595 x86_init_fdt(); 1596 #endif 1597 1598 /* Location of kernel stack for locore */ 1599 return ((register_t)thread0.td_pcb); 1600 } 1601 1602 static void 1603 machdep_init_trampoline(void) 1604 { 1605 struct region_descriptor r_gdt, r_idt; 1606 struct i386tss *tss; 1607 char *copyout_buf, *trampoline, *tramp_stack_base; 1608 int x; 1609 1610 gdt = pmap_trm_alloc(sizeof(union descriptor) * NGDT * mp_ncpus, 1611 M_NOWAIT | M_ZERO); 1612 bcopy(gdt0, gdt, sizeof(union descriptor) * NGDT); 1613 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1614 r_gdt.rd_base = (int)gdt; 1615 lgdt(&r_gdt); 1616 1617 tss = pmap_trm_alloc(sizeof(struct i386tss) * mp_ncpus, 1618 M_NOWAIT | M_ZERO); 1619 bcopy(&common_tss0, tss, sizeof(struct i386tss)); 1620 gdt[GPROC0_SEL].sd.sd_lobase = (int)tss; 1621 gdt[GPROC0_SEL].sd.sd_hibase = (u_int)tss >> 24; 1622 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 1623 1624 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); 1625 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1626 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1627 PCPU_SET(common_tssp, tss); 1628 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 1629 1630 trampoline = pmap_trm_alloc(end_exceptions - start_exceptions, 1631 M_NOWAIT); 1632 bcopy(start_exceptions, trampoline, end_exceptions - start_exceptions); 1633 tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT); 1634 PCPU_SET(trampstk, (uintptr_t)tramp_stack_base + TRAMP_STACK_SZ - 1635 VM86_STACK_SPACE); 1636 tss[0].tss_esp0 = PCPU_GET(trampstk); 1637 1638 idt = pmap_trm_alloc(sizeof(idt0), M_NOWAIT | M_ZERO); 1639 bcopy(idt0, idt, sizeof(idt0)); 1640 1641 /* Re-initialize new IDT since the handlers were relocated */ 1642 setidt_disp = trampoline - start_exceptions; 1643 fixup_idt(); 1644 1645 r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1; 1646 r_idt.rd_base = (int)idt; 1647 lidt(&r_idt); 1648 1649 /* dblfault TSS */ 1650 dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO); 1651 dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT); 1652 dblfault_tss->tss_esp = dblfault_tss->tss_esp0 = 1653 dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 = 1654 (int)dblfault_stack + PAGE_SIZE; 1655 dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 = 1656 dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1657 dblfault_tss->tss_cr3 = pmap_get_kcr3(); 1658 dblfault_tss->tss_eip = (int)dblfault_handler; 1659 dblfault_tss->tss_eflags = PSL_KERNEL; 1660 dblfault_tss->tss_ds = dblfault_tss->tss_es = 1661 dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1662 dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1663 dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1664 dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1665 gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss; 1666 gdt[GPANIC_SEL].sd.sd_hibase = (u_int)dblfault_tss >> 24; 1667 1668 /* make ldt memory segments */ 1669 ldt = pmap_trm_alloc(sizeof(union descriptor) * NLDT, 1670 M_NOWAIT | M_ZERO); 1671 gdt[GLDT_SEL].sd.sd_lobase = (int)ldt; 1672 gdt[GLDT_SEL].sd.sd_hibase = (u_int)ldt >> 24; 1673 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1); 1674 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1); 1675 for (x = 0; x < nitems(ldt_segs); x++) 1676 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1677 1678 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1679 lldt(_default_ldt); 1680 PCPU_SET(currentldt, _default_ldt); 1681 1682 copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT); 1683 PCPU_SET(copyout_buf, copyout_buf); 1684 copyout_init_tramp(); 1685 } 1686 SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_SECOND, machdep_init_trampoline, NULL); 1687 1688 #ifdef COMPAT_43 1689 static void 1690 i386_setup_lcall_gate(void) 1691 { 1692 struct sysentvec *sv; 1693 struct user_segment_descriptor desc; 1694 u_int lcall_addr; 1695 1696 sv = &elf32_freebsd_sysvec; 1697 lcall_addr = (uintptr_t)sv->sv_psstrings - sz_lcall_tramp; 1698 1699 bzero(&desc, sizeof(desc)); 1700 desc.sd_type = SDT_MEMERA; 1701 desc.sd_dpl = SEL_UPL; 1702 desc.sd_p = 1; 1703 desc.sd_def32 = 1; 1704 desc.sd_gran = 1; 1705 desc.sd_lolimit = 0xffff; 1706 desc.sd_hilimit = 0xf; 1707 desc.sd_lobase = lcall_addr; 1708 desc.sd_hibase = lcall_addr >> 24; 1709 bcopy(&desc, &ldt[LSYS5CALLS_SEL], sizeof(desc)); 1710 } 1711 SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_ANY, i386_setup_lcall_gate, NULL); 1712 #endif 1713 1714 void 1715 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1716 { 1717 1718 pcpu->pc_acpi_id = 0xffffffff; 1719 } 1720 1721 static int 1722 smap_sysctl_handler(SYSCTL_HANDLER_ARGS) 1723 { 1724 struct bios_smap *smapbase; 1725 struct bios_smap_xattr smap; 1726 caddr_t kmdp; 1727 uint32_t *smapattr; 1728 int count, error, i; 1729 1730 /* Retrieve the system memory map from the loader. */ 1731 kmdp = preload_search_by_type("elf kernel"); 1732 if (kmdp == NULL) 1733 kmdp = preload_search_by_type("elf32 kernel"); 1734 smapbase = (struct bios_smap *)preload_search_info(kmdp, 1735 MODINFO_METADATA | MODINFOMD_SMAP); 1736 if (smapbase == NULL) 1737 return (0); 1738 smapattr = (uint32_t *)preload_search_info(kmdp, 1739 MODINFO_METADATA | MODINFOMD_SMAP_XATTR); 1740 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase); 1741 error = 0; 1742 for (i = 0; i < count; i++) { 1743 smap.base = smapbase[i].base; 1744 smap.length = smapbase[i].length; 1745 smap.type = smapbase[i].type; 1746 if (smapattr != NULL) 1747 smap.xattr = smapattr[i]; 1748 else 1749 smap.xattr = 0; 1750 error = SYSCTL_OUT(req, &smap, sizeof(smap)); 1751 } 1752 return (error); 1753 } 1754 SYSCTL_PROC(_machdep, OID_AUTO, smap, 1755 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 1756 smap_sysctl_handler, "S,bios_smap_xattr", 1757 "Raw BIOS SMAP data"); 1758 1759 void 1760 spinlock_enter(void) 1761 { 1762 struct thread *td; 1763 register_t flags; 1764 1765 td = curthread; 1766 if (td->td_md.md_spinlock_count == 0) { 1767 flags = intr_disable(); 1768 td->td_md.md_spinlock_count = 1; 1769 td->td_md.md_saved_flags = flags; 1770 critical_enter(); 1771 } else 1772 td->td_md.md_spinlock_count++; 1773 } 1774 1775 void 1776 spinlock_exit(void) 1777 { 1778 struct thread *td; 1779 register_t flags; 1780 1781 td = curthread; 1782 flags = td->td_md.md_saved_flags; 1783 td->td_md.md_spinlock_count--; 1784 if (td->td_md.md_spinlock_count == 0) { 1785 critical_exit(); 1786 intr_restore(flags); 1787 } 1788 } 1789 1790 #if defined(I586_CPU) && !defined(NO_F00F_HACK) 1791 static void f00f_hack(void *unused); 1792 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1793 1794 static void 1795 f00f_hack(void *unused) 1796 { 1797 struct region_descriptor r_idt; 1798 struct gate_descriptor *new_idt; 1799 vm_offset_t tmp; 1800 1801 if (!has_f00f_bug) 1802 return; 1803 1804 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 1805 1806 tmp = (vm_offset_t)pmap_trm_alloc(PAGE_SIZE * 3, M_NOWAIT | M_ZERO); 1807 if (tmp == 0) 1808 panic("kmem_malloc returned 0"); 1809 tmp = round_page(tmp); 1810 1811 /* Put the problematic entry (#6) at the end of the lower page. */ 1812 new_idt = (struct gate_descriptor *) 1813 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor)); 1814 bcopy(idt, new_idt, sizeof(idt0)); 1815 r_idt.rd_base = (u_int)new_idt; 1816 r_idt.rd_limit = sizeof(idt0) - 1; 1817 lidt(&r_idt); 1818 /* SMP machines do not need the F00F hack. */ 1819 idt = new_idt; 1820 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ); 1821 } 1822 #endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1823 1824 /* 1825 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1826 * we want to start a backtrace from the function that caused us to enter 1827 * the debugger. We have the context in the trapframe, but base the trace 1828 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1829 * enough for a backtrace. 1830 */ 1831 void 1832 makectx(struct trapframe *tf, struct pcb *pcb) 1833 { 1834 1835 pcb->pcb_edi = tf->tf_edi; 1836 pcb->pcb_esi = tf->tf_esi; 1837 pcb->pcb_ebp = tf->tf_ebp; 1838 pcb->pcb_ebx = tf->tf_ebx; 1839 pcb->pcb_eip = tf->tf_eip; 1840 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8; 1841 pcb->pcb_gs = rgs(); 1842 } 1843 1844 #ifdef KDB 1845 1846 /* 1847 * Provide inb() and outb() as functions. They are normally only available as 1848 * inline functions, thus cannot be called from the debugger. 1849 */ 1850 1851 /* silence compiler warnings */ 1852 u_char inb_(u_short); 1853 void outb_(u_short, u_char); 1854 1855 u_char 1856 inb_(u_short port) 1857 { 1858 return inb(port); 1859 } 1860 1861 void 1862 outb_(u_short port, u_char data) 1863 { 1864 outb(port, data); 1865 } 1866 1867 #endif /* KDB */ 1868