1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #ifdef GPROF 51 #include <sys/gmon.h> 52 #endif 53 54 #include <machine/smp.h> 55 #include <machine_base/apic/apicreg.h> 56 #include <machine/atomic.h> 57 #include <machine/cpufunc.h> 58 #include <machine/cputypes.h> 59 #include <machine_base/apic/lapic.h> 60 #include <machine_base/apic/ioapic.h> 61 #include <machine_base/acpica/acpi_md_cpu.h> 62 #include <machine/psl.h> 63 #include <machine/segments.h> 64 #include <machine/tss.h> 65 #include <machine/specialreg.h> 66 #include <machine/globaldata.h> 67 #include <machine/pmap_inval.h> 68 #include <machine/clock.h> 69 70 #include <machine/md_var.h> /* setidt() */ 71 #include <machine_base/icu/icu.h> /* IPIs */ 72 #include <machine_base/icu/icu_var.h> 73 #include <machine_base/apic/ioapic_abi.h> 74 #include <machine/intr_machdep.h> /* IPIs */ 75 76 #define WARMBOOT_TARGET 0 77 #define WARMBOOT_OFF (KERNBASE + 0x0467) 78 #define WARMBOOT_SEG (KERNBASE + 0x0469) 79 80 #define CMOS_REG (0x70) 81 #define CMOS_DATA (0x71) 82 #define BIOS_RESET (0x0f) 83 #define BIOS_WARM (0x0a) 84 85 /* 86 * this code MUST be enabled here and in mpboot.s. 87 * it follows the very early stages of AP boot by placing values in CMOS ram. 88 * it NORMALLY will never be needed and thus the primitive method for enabling. 89 * 90 */ 91 #if defined(CHECK_POINTS) 92 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 93 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 94 95 #define CHECK_INIT(D); \ 96 CHECK_WRITE(0x34, (D)); \ 97 CHECK_WRITE(0x35, (D)); \ 98 CHECK_WRITE(0x36, (D)); \ 99 CHECK_WRITE(0x37, (D)); \ 100 CHECK_WRITE(0x38, (D)); \ 101 CHECK_WRITE(0x39, (D)); 102 103 #define CHECK_PRINT(S); \ 104 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 105 (S), \ 106 CHECK_READ(0x34), \ 107 CHECK_READ(0x35), \ 108 CHECK_READ(0x36), \ 109 CHECK_READ(0x37), \ 110 CHECK_READ(0x38), \ 111 CHECK_READ(0x39)); 112 113 #else /* CHECK_POINTS */ 114 115 #define CHECK_INIT(D) 116 #define CHECK_PRINT(S) 117 118 #endif /* CHECK_POINTS */ 119 120 /* 121 * Values to send to the POST hardware. 122 */ 123 #define MP_BOOTADDRESS_POST 0x10 124 #define MP_PROBE_POST 0x11 125 #define MPTABLE_PASS1_POST 0x12 126 127 #define MP_START_POST 0x13 128 #define MP_ENABLE_POST 0x14 129 #define MPTABLE_PASS2_POST 0x15 130 131 #define START_ALL_APS_POST 0x16 132 #define INSTALL_AP_TRAMP_POST 0x17 133 #define START_AP_POST 0x18 134 135 #define MP_ANNOUNCE_POST 0x19 136 137 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 138 int current_postcode; 139 140 /** XXX FIXME: what system files declare these??? */ 141 extern struct region_descriptor r_gdt; 142 143 extern int nkpt; 144 extern int naps; 145 146 int64_t tsc0_offset; 147 extern int64_t tsc_offsets[]; 148 149 /* AP uses this during bootstrap. Do not staticize. */ 150 char *bootSTK; 151 static int bootAP; 152 153 struct pcb stoppcbs[MAXCPU]; 154 155 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 156 157 /* 158 * Local data and functions. 159 */ 160 161 static u_int boot_address; 162 static int mp_finish; 163 static int mp_finish_lapic; 164 165 static int start_all_aps(u_int boot_addr); 166 #if 0 167 static void install_ap_tramp(u_int boot_addr); 168 #endif 169 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 170 static int smitest(void); 171 static void mp_bsp_simple_setup(void); 172 173 /* which cpus have been started */ 174 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 175 /* which cpus have lapic been inited */ 176 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 177 /* which cpus are ready for IPIs etc? */ 178 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 179 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 180 181 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, 182 &smp_active_mask, sizeof(smp_active_mask), "LU", ""); 183 static u_int bootMP_size; 184 static u_int report_invlpg_src; 185 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW, 186 &report_invlpg_src, 0, ""); 187 static u_int report_invltlb_src; 188 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW, 189 &report_invltlb_src, 0, ""); 190 static int optimized_invltlb; 191 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW, 192 &optimized_invltlb, 0, ""); 193 static int all_but_self_ipi_enable = 1; 194 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW, 195 &all_but_self_ipi_enable, 0, ""); 196 197 /* Local data for detecting CPU TOPOLOGY */ 198 static int core_bits = 0; 199 static int logical_CPU_bits = 0; 200 201 202 /* 203 * Calculate usable address in base memory for AP trampoline code. 204 */ 205 u_int 206 mp_bootaddress(u_int basemem) 207 { 208 POSTCODE(MP_BOOTADDRESS_POST); 209 210 bootMP_size = mptramp_end - mptramp_start; 211 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 212 if (((basemem * 1024) - boot_address) < bootMP_size) 213 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 214 /* 3 levels of page table pages */ 215 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 216 217 return mptramp_pagetables; 218 } 219 220 /* 221 * Print various information about the SMP system hardware and setup. 222 */ 223 void 224 mp_announce(void) 225 { 226 int x; 227 228 POSTCODE(MP_ANNOUNCE_POST); 229 230 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 231 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 232 for (x = 1; x <= naps; ++x) 233 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 234 235 if (!ioapic_enable) 236 kprintf(" Warning: APIC I/O disabled\n"); 237 } 238 239 /* 240 * AP cpu's call this to sync up protected mode. 241 * 242 * WARNING! %gs is not set up on entry. This routine sets up %gs. 243 */ 244 void 245 init_secondary(void) 246 { 247 int gsel_tss; 248 int x, myid = bootAP; 249 u_int64_t msr, cr0; 250 struct mdglobaldata *md; 251 struct privatespace *ps; 252 253 ps = CPU_prvspace[myid]; 254 255 gdt_segs[GPROC0_SEL].ssd_base = 256 (long) &ps->mdglobaldata.gd_common_tss; 257 ps->mdglobaldata.mi.gd_prvspace = ps; 258 259 /* We fill the 32-bit segment descriptors */ 260 for (x = 0; x < NGDT; x++) { 261 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 262 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 263 } 264 /* And now a 64-bit one */ 265 ssdtosyssd(&gdt_segs[GPROC0_SEL], 266 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 267 268 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 269 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 270 lgdt(&r_gdt); /* does magic intra-segment return */ 271 272 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 273 wrmsr(MSR_FSBASE, 0); /* User value */ 274 wrmsr(MSR_GSBASE, (u_int64_t)ps); 275 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 276 277 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 278 279 #if 0 280 lldt(_default_ldt); 281 mdcpu->gd_currentldt = _default_ldt; 282 #endif 283 284 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 285 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 286 287 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 288 289 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 290 #if 0 /* JG XXX */ 291 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 292 #endif 293 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 294 md->gd_common_tssd = *md->gd_tss_gdt; 295 296 /* double fault stack */ 297 md->gd_common_tss.tss_ist1 = 298 (long)&md->mi.gd_prvspace->idlestack[ 299 sizeof(md->mi.gd_prvspace->idlestack)]; 300 301 ltr(gsel_tss); 302 303 /* 304 * Set to a known state: 305 * Set by mpboot.s: CR0_PG, CR0_PE 306 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 307 */ 308 cr0 = rcr0(); 309 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 310 load_cr0(cr0); 311 312 /* Set up the fast syscall stuff */ 313 msr = rdmsr(MSR_EFER) | EFER_SCE; 314 wrmsr(MSR_EFER, msr); 315 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 316 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 317 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 318 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 319 wrmsr(MSR_STAR, msr); 320 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 321 322 pmap_set_opt(); /* PSE/4MB pages, etc */ 323 pmap_init_pat(); /* Page Attribute Table */ 324 325 /* set up CPU registers and state */ 326 cpu_setregs(); 327 328 /* set up SSE/NX registers */ 329 initializecpu(myid); 330 331 /* set up FPU state on the AP */ 332 npxinit(); 333 334 /* disable the APIC, just to be SURE */ 335 lapic->svr &= ~APIC_SVR_ENABLE; 336 } 337 338 /******************************************************************* 339 * local functions and data 340 */ 341 342 /* 343 * Start the SMP system 344 */ 345 static void 346 mp_start_aps(void *dummy __unused) 347 { 348 if (lapic_enable) { 349 /* start each Application Processor */ 350 start_all_aps(boot_address); 351 } else { 352 mp_bsp_simple_setup(); 353 } 354 } 355 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 356 357 /* 358 * start each AP in our list 359 */ 360 static int 361 start_all_aps(u_int boot_addr) 362 { 363 vm_offset_t va = boot_address + KERNBASE; 364 u_int64_t *pt4, *pt3, *pt2; 365 int pssize; 366 int x, i; 367 int shift; 368 int smicount; 369 int smibest; 370 int smilast; 371 u_char mpbiosreason; 372 u_long mpbioswarmvec; 373 struct mdglobaldata *gd; 374 struct privatespace *ps; 375 size_t ipiq_size; 376 377 POSTCODE(START_ALL_APS_POST); 378 379 /* install the AP 1st level boot code */ 380 pmap_kenter(va, boot_address); 381 cpu_invlpg((void *)va); /* JG XXX */ 382 bcopy(mptramp_start, (void *)va, bootMP_size); 383 384 /* Locate the page tables, they'll be below the trampoline */ 385 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 386 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 387 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 388 389 /* Create the initial 1GB replicated page tables */ 390 for (i = 0; i < 512; i++) { 391 /* Each slot of the level 4 pages points to the same level 3 page */ 392 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 393 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 394 kernel_pmap.pmap_bits[PG_RW_IDX] | 395 kernel_pmap.pmap_bits[PG_U_IDX]; 396 397 /* Each slot of the level 3 pages points to the same level 2 page */ 398 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 399 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 400 kernel_pmap.pmap_bits[PG_RW_IDX] | 401 kernel_pmap.pmap_bits[PG_U_IDX]; 402 403 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 404 pt2[i] = i * (2 * 1024 * 1024); 405 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 406 kernel_pmap.pmap_bits[PG_RW_IDX] | 407 kernel_pmap.pmap_bits[PG_PS_IDX] | 408 kernel_pmap.pmap_bits[PG_U_IDX]; 409 } 410 411 /* save the current value of the warm-start vector */ 412 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 413 outb(CMOS_REG, BIOS_RESET); 414 mpbiosreason = inb(CMOS_DATA); 415 416 /* setup a vector to our boot code */ 417 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 418 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 419 outb(CMOS_REG, BIOS_RESET); 420 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 421 422 /* 423 * If we have a TSC we can figure out the SMI interrupt rate. 424 * The SMI does not necessarily use a constant rate. Spend 425 * up to 250ms trying to figure it out. 426 */ 427 smibest = 0; 428 if (cpu_feature & CPUID_TSC) { 429 set_apic_timer(275000); 430 smilast = read_apic_timer(); 431 for (x = 0; x < 20 && read_apic_timer(); ++x) { 432 smicount = smitest(); 433 if (smibest == 0 || smilast - smicount < smibest) 434 smibest = smilast - smicount; 435 smilast = smicount; 436 } 437 if (smibest > 250000) 438 smibest = 0; 439 } 440 if (smibest) 441 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 442 1000000 / smibest, smibest); 443 444 /* start each AP */ 445 for (x = 1; x <= naps; ++x) { 446 /* This is a bit verbose, it will go away soon. */ 447 448 pssize = sizeof(struct privatespace); 449 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, 450 KM_CPU(x)); 451 CPU_prvspace[x] = ps; 452 #if 0 453 kprintf("ps %d %p %d\n", x, ps, pssize); 454 #endif 455 bzero(ps, pssize); 456 gd = &ps->mdglobaldata; 457 gd->mi.gd_prvspace = ps; 458 459 /* prime data page for it to use */ 460 mi_gdinit(&gd->mi, x); 461 cpu_gdinit(gd, x); 462 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 463 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 464 VM_SUBSYS_IPIQ, KM_CPU(x)); 465 bzero(gd->mi.gd_ipiq, ipiq_size); 466 467 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 468 469 /* initialize arc4random. */ 470 arc4_init_pcpu(x); 471 472 /* setup a vector to our boot code */ 473 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 474 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 475 outb(CMOS_REG, BIOS_RESET); 476 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 477 478 /* 479 * Setup the AP boot stack 480 */ 481 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 482 bootAP = x; 483 484 /* attempt to start the Application Processor */ 485 CHECK_INIT(99); /* setup checkpoints */ 486 if (!start_ap(gd, boot_addr, smibest)) { 487 kprintf("\nAP #%d (PHY# %d) failed!\n", 488 x, CPUID_TO_APICID(x)); 489 CHECK_PRINT("trace"); /* show checkpoints */ 490 /* better panic as the AP may be running loose */ 491 kprintf("panic y/n? [y] "); 492 cnpoll(TRUE); 493 if (cngetc() != 'n') 494 panic("bye-bye"); 495 cnpoll(FALSE); 496 } 497 CHECK_PRINT("trace"); /* show checkpoints */ 498 } 499 500 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 501 ncpus = x; 502 503 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */ 504 for (shift = 0; (1 << shift) <= ncpus; ++shift) 505 ; 506 --shift; 507 ncpus2_shift = shift; 508 ncpus2 = 1 << shift; 509 ncpus2_mask = ncpus2 - 1; 510 511 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 512 if ((1 << shift) < ncpus) 513 ++shift; 514 ncpus_fit = 1 << shift; 515 ncpus_fit_mask = ncpus_fit - 1; 516 517 /* build our map of 'other' CPUs */ 518 mycpu->gd_other_cpus = smp_startup_mask; 519 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 520 521 gd = (struct mdglobaldata *)mycpu; 522 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 523 524 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 525 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 526 VM_SUBSYS_IPIQ, KM_CPU(0)); 527 bzero(mycpu->gd_ipiq, ipiq_size); 528 529 /* initialize arc4random. */ 530 arc4_init_pcpu(0); 531 532 /* restore the warmstart vector */ 533 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 534 outb(CMOS_REG, BIOS_RESET); 535 outb(CMOS_DATA, mpbiosreason); 536 537 /* 538 * NOTE! The idlestack for the BSP was setup by locore. Finish 539 * up, clean out the P==V mapping we did earlier. 540 */ 541 pmap_set_opt(); 542 543 /* 544 * Wait all APs to finish initializing LAPIC 545 */ 546 if (bootverbose) 547 kprintf("SMP: Waiting APs LAPIC initialization\n"); 548 if (cpu_feature & CPUID_TSC) 549 tsc0_offset = rdtsc(); 550 tsc_offsets[0] = 0; 551 mp_finish_lapic = 1; 552 rel_mplock(); 553 554 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 555 cpu_pause(); 556 cpu_lfence(); 557 if (cpu_feature & CPUID_TSC) 558 tsc0_offset = rdtsc(); 559 } 560 while (try_mplock() == 0) { 561 cpu_pause(); 562 cpu_lfence(); 563 } 564 565 /* number of APs actually started */ 566 return ncpus - 1; 567 } 568 569 570 /* 571 * load the 1st level AP boot code into base memory. 572 */ 573 574 /* targets for relocation */ 575 extern void bigJump(void); 576 extern void bootCodeSeg(void); 577 extern void bootDataSeg(void); 578 extern void MPentry(void); 579 extern u_int MP_GDT; 580 extern u_int mp_gdtbase; 581 582 #if 0 583 584 static void 585 install_ap_tramp(u_int boot_addr) 586 { 587 int x; 588 int size = *(int *) ((u_long) & bootMP_size); 589 u_char *src = (u_char *) ((u_long) bootMP); 590 u_char *dst = (u_char *) boot_addr + KERNBASE; 591 u_int boot_base = (u_int) bootMP; 592 u_int8_t *dst8; 593 u_int16_t *dst16; 594 u_int32_t *dst32; 595 596 POSTCODE(INSTALL_AP_TRAMP_POST); 597 598 for (x = 0; x < size; ++x) 599 *dst++ = *src++; 600 601 /* 602 * modify addresses in code we just moved to basemem. unfortunately we 603 * need fairly detailed info about mpboot.s for this to work. changes 604 * to mpboot.s might require changes here. 605 */ 606 607 /* boot code is located in KERNEL space */ 608 dst = (u_char *) boot_addr + KERNBASE; 609 610 /* modify the lgdt arg */ 611 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 612 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 613 614 /* modify the ljmp target for MPentry() */ 615 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 616 *dst32 = ((u_int) MPentry - KERNBASE); 617 618 /* modify the target for boot code segment */ 619 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 620 dst8 = (u_int8_t *) (dst16 + 1); 621 *dst16 = (u_int) boot_addr & 0xffff; 622 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 623 624 /* modify the target for boot data segment */ 625 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 626 dst8 = (u_int8_t *) (dst16 + 1); 627 *dst16 = (u_int) boot_addr & 0xffff; 628 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 629 } 630 631 #endif 632 633 /* 634 * This function starts the AP (application processor) identified 635 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 636 * to accomplish this. This is necessary because of the nuances 637 * of the different hardware we might encounter. It ain't pretty, 638 * but it seems to work. 639 * 640 * NOTE: eventually an AP gets to ap_init(), which is called just 641 * before the AP goes into the LWKT scheduler's idle loop. 642 */ 643 static int 644 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 645 { 646 int physical_cpu; 647 int vector; 648 u_long icr_lo, icr_hi; 649 650 POSTCODE(START_AP_POST); 651 652 /* get the PHYSICAL APIC ID# */ 653 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 654 655 /* calculate the vector */ 656 vector = (boot_addr >> 12) & 0xff; 657 658 /* We don't want anything interfering */ 659 cpu_disable_intr(); 660 661 /* Make sure the target cpu sees everything */ 662 wbinvd(); 663 664 /* 665 * Try to detect when a SMI has occurred, wait up to 200ms. 666 * 667 * If a SMI occurs during an AP reset but before we issue 668 * the STARTUP command, the AP may brick. To work around 669 * this problem we hold off doing the AP startup until 670 * after we have detected the SMI. Hopefully another SMI 671 * will not occur before we finish the AP startup. 672 * 673 * Retries don't seem to help. SMIs have a window of opportunity 674 * and if USB->legacy keyboard emulation is enabled in the BIOS 675 * the interrupt rate can be quite high. 676 * 677 * NOTE: Don't worry about the L1 cache load, it might bloat 678 * ldelta a little but ndelta will be so huge when the SMI 679 * occurs the detection logic will still work fine. 680 */ 681 if (smibest) { 682 set_apic_timer(200000); 683 smitest(); 684 } 685 686 /* 687 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 688 * and running the target CPU. OR this INIT IPI might be latched (P5 689 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 690 * ignored. 691 * 692 * see apic/apicreg.h for icr bit definitions. 693 * 694 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 695 */ 696 697 /* 698 * Setup the address for the target AP. We can setup 699 * icr_hi once and then just trigger operations with 700 * icr_lo. 701 */ 702 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 703 icr_hi |= (physical_cpu << 24); 704 icr_lo = lapic->icr_lo & 0xfff00000; 705 lapic->icr_hi = icr_hi; 706 707 /* 708 * Do an INIT IPI: assert RESET 709 * 710 * Use edge triggered mode to assert INIT 711 */ 712 lapic->icr_lo = icr_lo | 0x00004500; 713 while (lapic->icr_lo & APIC_DELSTAT_MASK) 714 /* spin */ ; 715 716 /* 717 * The spec calls for a 10ms delay but we may have to use a 718 * MUCH lower delay to avoid bricking an AP due to a fast SMI 719 * interrupt. We have other loops here too and dividing by 2 720 * doesn't seem to be enough even after subtracting 350us, 721 * so we divide by 4. 722 * 723 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 724 * interrupt was detected we use the full 10ms. 725 */ 726 if (smibest == 0) 727 u_sleep(10000); 728 else if (smibest < 150 * 4 + 350) 729 u_sleep(150); 730 else if ((smibest - 350) / 4 < 10000) 731 u_sleep((smibest - 350) / 4); 732 else 733 u_sleep(10000); 734 735 /* 736 * Do an INIT IPI: deassert RESET 737 * 738 * Use level triggered mode to deassert. It is unclear 739 * why we need to do this. 740 */ 741 lapic->icr_lo = icr_lo | 0x00008500; 742 while (lapic->icr_lo & APIC_DELSTAT_MASK) 743 /* spin */ ; 744 u_sleep(150); /* wait 150us */ 745 746 /* 747 * Next we do a STARTUP IPI: the previous INIT IPI might still be 748 * latched, (P5 bug) this 1st STARTUP would then terminate 749 * immediately, and the previously started INIT IPI would continue. OR 750 * the previous INIT IPI has already run. and this STARTUP IPI will 751 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 752 * will run. 753 */ 754 lapic->icr_lo = icr_lo | 0x00000600 | vector; 755 while (lapic->icr_lo & APIC_DELSTAT_MASK) 756 /* spin */ ; 757 u_sleep(200); /* wait ~200uS */ 758 759 /* 760 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 761 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 762 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 763 * recognized after hardware RESET or INIT IPI. 764 */ 765 lapic->icr_lo = icr_lo | 0x00000600 | vector; 766 while (lapic->icr_lo & APIC_DELSTAT_MASK) 767 /* spin */ ; 768 769 /* Resume normal operation */ 770 cpu_enable_intr(); 771 772 /* wait for it to start, see ap_init() */ 773 set_apic_timer(5000000);/* == 5 seconds */ 774 while (read_apic_timer()) { 775 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 776 return 1; /* return SUCCESS */ 777 } 778 779 return 0; /* return FAILURE */ 780 } 781 782 static 783 int 784 smitest(void) 785 { 786 int64_t ltsc; 787 int64_t ntsc; 788 int64_t ldelta; 789 int64_t ndelta; 790 int count; 791 792 ldelta = 0; 793 ndelta = 0; 794 while (read_apic_timer()) { 795 ltsc = rdtsc(); 796 for (count = 0; count < 100; ++count) 797 ntsc = rdtsc(); /* force loop to occur */ 798 if (ldelta) { 799 ndelta = ntsc - ltsc; 800 if (ldelta > ndelta) 801 ldelta = ndelta; 802 if (ndelta > ldelta * 2) 803 break; 804 } else { 805 ldelta = ntsc - ltsc; 806 } 807 } 808 return(read_apic_timer()); 809 } 810 811 /* 812 * Synchronously flush the TLB on all other CPU's. The current cpu's 813 * TLB is not flushed. If the caller wishes to flush the current cpu's 814 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 815 * 816 * This routine may be called concurrently from multiple cpus. When this 817 * happens, smp_invltlb() can wind up sticking around in the confirmation 818 * while() loop at the end as additional cpus are added to the global 819 * cpumask, until they are acknowledged by another IPI. 820 * 821 * NOTE: If for some reason we were unable to start all cpus we cannot 822 * safely use broadcast IPIs. 823 */ 824 825 cpumask_t smp_smurf_mask; 826 static cpumask_t smp_invltlb_mask; 827 #define LOOPRECOVER 828 #define LOOPMASK_IN 829 #ifdef LOOPMASK_IN 830 cpumask_t smp_in_mask; 831 #endif 832 cpumask_t smp_invmask; 833 extern cpumask_t smp_idleinvl_mask; 834 extern cpumask_t smp_idleinvl_reqs; 835 836 /* 837 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove 838 * bits that do not need to be IPId. These bits are still part of the command, 839 * but the target cpus have already been signalled and do not need to be 840 * sigalled again. 841 */ 842 #include <sys/spinlock.h> 843 #include <sys/spinlock2.h> 844 845 static __noinline 846 void 847 smp_smurf_fetchset(cpumask_t *mask) 848 { 849 cpumask_t omask; 850 int i; 851 __uint64_t obits; 852 __uint64_t nbits; 853 854 i = 0; 855 while (i < CPUMASK_ELEMENTS) { 856 obits = smp_smurf_mask.ary[i]; 857 cpu_ccfence(); 858 nbits = obits | mask->ary[i]; 859 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) { 860 omask.ary[i] = obits; 861 ++i; 862 } 863 } 864 CPUMASK_NANDMASK(*mask, omask); 865 } 866 867 /* 868 * This is a mechanism which guarantees that cpu_invltlb() will be executed 869 * on idle cpus without having to signal or wake them up. The invltlb will be 870 * executed when they wake up, prior to any scheduling or interrupt thread. 871 * 872 * (*mask) is modified to remove the cpus we successfully negotiate this 873 * function with. This function may only be used with semi-synchronous 874 * commands (typically invltlb's or semi-synchronous invalidations which 875 * are usually associated only with kernel memory). 876 */ 877 void 878 smp_smurf_idleinvlclr(cpumask_t *mask) 879 { 880 if (optimized_invltlb) { 881 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask); 882 /* cpu_lfence() not needed */ 883 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask); 884 } 885 } 886 887 /* 888 * Issue cpu_invltlb() across all cpus except the current cpu. 889 * 890 * This function will arrange to avoid idle cpus, but still gurantee that 891 * invltlb is run on them when they wake up prior to any scheduling or 892 * nominal interrupt. 893 */ 894 void 895 smp_invltlb(void) 896 { 897 struct mdglobaldata *md = mdcpu; 898 cpumask_t mask; 899 unsigned long rflags; 900 #ifdef LOOPRECOVER 901 uint64_t tsc_base = rdtsc(); 902 int repeats = 0; 903 #endif 904 905 if (report_invltlb_src > 0) { 906 if (--report_invltlb_src <= 0) 907 print_backtrace(8); 908 } 909 910 /* 911 * Disallow normal interrupts, set all active cpus except our own 912 * in the global smp_invltlb_mask. 913 */ 914 ++md->mi.gd_cnt.v_smpinvltlb; 915 crit_enter_gd(&md->mi); 916 917 /* 918 * Bits we want to set in smp_invltlb_mask. We do not want to signal 919 * our own cpu. Also try to remove bits associated with idle cpus 920 * that we can flag for auto-invltlb. 921 */ 922 mask = smp_active_mask; 923 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid); 924 smp_smurf_idleinvlclr(&mask); 925 926 rflags = read_rflags(); 927 cpu_disable_intr(); 928 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask); 929 930 /* 931 * IPI non-idle cpus represented by mask. The omask calculation 932 * removes cpus from the mask which already have a Xinvltlb IPI 933 * pending (avoid double-queueing the IPI). 934 * 935 * We must disable real interrupts when setting the smurf flags or 936 * we might race a XINVLTLB before we manage to send the ipi's for 937 * the bits we set. 938 * 939 * NOTE: We are not signalling ourselves, mask already does NOT 940 * include our own cpu. 941 */ 942 smp_smurf_fetchset(&mask); 943 944 /* 945 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 946 * the critical section count on the target cpus. 947 */ 948 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 949 if (all_but_self_ipi_enable && 950 (all_but_self_ipi_enable >= 2 || 951 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 952 all_but_self_ipi(XINVLTLB_OFFSET); 953 } else { 954 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 955 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 956 } 957 958 /* 959 * Wait for acknowledgement by all cpus. smp_inval_intr() will 960 * temporarily enable interrupts to avoid deadlocking the lapic, 961 * and will also handle running cpu_invltlb() and remote invlpg 962 * command son our cpu if some other cpu requests it of us. 963 * 964 * WARNING! I originally tried to implement this as a hard loop 965 * checking only smp_invltlb_mask (and issuing a local 966 * cpu_invltlb() if requested), with interrupts enabled 967 * and without calling smp_inval_intr(). This DID NOT WORK. 968 * It resulted in weird races where smurf bits would get 969 * cleared without any action being taken. 970 */ 971 smp_inval_intr(); 972 CPUMASK_ASSZERO(mask); 973 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) { 974 smp_inval_intr(); 975 cpu_pause(); 976 #ifdef LOOPRECOVER 977 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 978 /* 979 * cpuid - cpu doing the waiting 980 * invltlb_mask - IPI in progress 981 */ 982 kprintf("smp_invltlb %d: waited too long inv=%08jx " 983 "smurf=%08jx " 984 #ifdef LOOPMASK_IN 985 "in=%08jx " 986 #endif 987 "idle=%08jx/%08jx\n", 988 md->mi.gd_cpuid, 989 smp_invltlb_mask.ary[0], 990 smp_smurf_mask.ary[0], 991 #ifdef LOOPMASK_IN 992 smp_in_mask.ary[0], 993 #endif 994 smp_idleinvl_mask.ary[0], 995 smp_idleinvl_reqs.ary[0]); 996 mdcpu->gd_xinvaltlb = 0; 997 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, 998 smp_invltlb_mask); 999 smp_invlpg(&smp_active_mask); 1000 tsc_base = rdtsc(); 1001 if (++repeats > 10) { 1002 kprintf("smp_invltlb: giving up\n"); 1003 CPUMASK_ASSZERO(smp_invltlb_mask); 1004 } 1005 } 1006 #endif 1007 } 1008 write_rflags(rflags); 1009 crit_exit_gd(&md->mi); 1010 } 1011 1012 /* 1013 * Called from a critical section with interrupts hard-disabled. 1014 * This function issues an XINVLTLB IPI and then executes any pending 1015 * command on the current cpu before returning. 1016 */ 1017 void 1018 smp_invlpg(cpumask_t *cmdmask) 1019 { 1020 struct mdglobaldata *md = mdcpu; 1021 cpumask_t mask; 1022 1023 if (report_invlpg_src > 0) { 1024 if (--report_invlpg_src <= 0) 1025 print_backtrace(8); 1026 } 1027 1028 /* 1029 * Disallow normal interrupts, set all active cpus in the pmap, 1030 * plus our own for completion processing (it might or might not 1031 * be part of the set). 1032 */ 1033 mask = smp_active_mask; 1034 CPUMASK_ANDMASK(mask, *cmdmask); 1035 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 1036 1037 /* 1038 * Avoid double-queuing IPIs, which can deadlock us. We must disable 1039 * real interrupts when setting the smurf flags or we might race a 1040 * XINVLTLB before we manage to send the ipi's for the bits we set. 1041 * 1042 * NOTE: We might be including our own cpu in the smurf mask. 1043 */ 1044 smp_smurf_fetchset(&mask); 1045 1046 /* 1047 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 1048 * the critical section count on the target cpus. 1049 * 1050 * We do not include our own cpu when issuing the IPI. 1051 */ 1052 if (all_but_self_ipi_enable && 1053 (all_but_self_ipi_enable >= 2 || 1054 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 1055 all_but_self_ipi(XINVLTLB_OFFSET); 1056 } else { 1057 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 1058 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 1059 } 1060 1061 /* 1062 * This will synchronously wait for our command to complete, 1063 * as well as process commands from other cpus. It also handles 1064 * reentrancy. 1065 * 1066 * (interrupts are disabled and we are in a critical section here) 1067 */ 1068 smp_inval_intr(); 1069 } 1070 1071 void 1072 smp_sniff(void) 1073 { 1074 globaldata_t gd = mycpu; 1075 int dummy; 1076 register_t rflags; 1077 1078 /* 1079 * Ignore all_but_self_ipi_enable here and just use it. 1080 */ 1081 rflags = read_rflags(); 1082 cpu_disable_intr(); 1083 all_but_self_ipi(XSNIFF_OFFSET); 1084 gd->gd_sample_pc = smp_sniff; 1085 gd->gd_sample_sp = &dummy; 1086 write_rflags(rflags); 1087 } 1088 1089 void 1090 cpu_sniff(int dcpu) 1091 { 1092 globaldata_t rgd = globaldata_find(dcpu); 1093 register_t rflags; 1094 int dummy; 1095 1096 /* 1097 * Ignore all_but_self_ipi_enable here and just use it. 1098 */ 1099 rflags = read_rflags(); 1100 cpu_disable_intr(); 1101 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED); 1102 rgd->gd_sample_pc = cpu_sniff; 1103 rgd->gd_sample_sp = &dummy; 1104 write_rflags(rflags); 1105 } 1106 1107 /* 1108 * Called from Xinvltlb assembly with interrupts hard-disabled and in a 1109 * critical section. gd_intr_nesting_level may or may not be bumped 1110 * depending on entry. 1111 * 1112 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT. 1113 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE 1114 * IS IN A CRITICAL SECTION. 1115 */ 1116 void 1117 smp_inval_intr(void) 1118 { 1119 struct mdglobaldata *md = mdcpu; 1120 cpumask_t cpumask; 1121 #ifdef LOOPRECOVER 1122 uint64_t tsc_base = rdtsc(); 1123 #endif 1124 1125 #if 0 1126 /* 1127 * The idle code is in a critical section, but that doesn't stop 1128 * Xinvltlb from executing, so deal with the race which can occur 1129 * in that situation. Otherwise r-m-w operations by pmap_inval_intr() 1130 * may have problems. 1131 */ 1132 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) { 1133 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid); 1134 cpu_invltlb(); 1135 cpu_mfence(); 1136 } 1137 #endif 1138 1139 /* 1140 * This is a real mess. I'd like to just leave interrupts disabled 1141 * but it can cause the lapic to deadlock if too many interrupts queue 1142 * to it, due to the idiotic design of the lapic. So instead we have 1143 * to enter a critical section so normal interrupts are made pending 1144 * and track whether this one was reentered. 1145 */ 1146 if (md->gd_xinvaltlb) { /* reentrant on cpu */ 1147 md->gd_xinvaltlb = 2; 1148 return; 1149 } 1150 md->gd_xinvaltlb = 1; 1151 1152 /* 1153 * Check only those cpus with active Xinvl* commands pending. 1154 * 1155 * We are going to enable interrupts so make sure we are in a 1156 * critical section. This is necessary to avoid deadlocking 1157 * the lapic and to ensure that we execute our commands prior to 1158 * any nominal interrupt or preemption. 1159 * 1160 * WARNING! It is very important that we only clear out but in 1161 * smp_smurf_mask once for each interrupt we take. In 1162 * this case, we clear it on initial entry and only loop 1163 * on the reentrancy detect (caused by another interrupt). 1164 */ 1165 cpumask = smp_invmask; 1166 #ifdef LOOPMASK_IN 1167 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid); 1168 #endif 1169 loop: 1170 cpu_enable_intr(); 1171 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid); 1172 1173 /* 1174 * Specific page request(s), and we can't return until all bits 1175 * are zero. 1176 */ 1177 for (;;) { 1178 int toolong; 1179 1180 /* 1181 * Also execute any pending full invalidation request in 1182 * this loop. 1183 */ 1184 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1185 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1186 md->mi.gd_cpuid); 1187 cpu_invltlb(); 1188 cpu_mfence(); 1189 } 1190 1191 #ifdef LOOPRECOVER 1192 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1193 /* 1194 * cpuid - cpu doing the waiting 1195 * invmask - IPI in progress 1196 * invltlb_mask - which ones are TLB invalidations? 1197 */ 1198 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx " 1199 "smurf=%08jx " 1200 #ifdef LOOPMASK_IN 1201 "in=%08jx " 1202 #endif 1203 "idle=%08jx/%08jx\n", 1204 md->mi.gd_cpuid, 1205 smp_invmask.ary[0], 1206 smp_invltlb_mask.ary[0], 1207 smp_smurf_mask.ary[0], 1208 #ifdef LOOPMASK_IN 1209 smp_in_mask.ary[0], 1210 #endif 1211 smp_idleinvl_mask.ary[0], 1212 smp_idleinvl_reqs.ary[0]); 1213 tsc_base = rdtsc(); 1214 toolong = 1; 1215 } else { 1216 toolong = 0; 1217 } 1218 #else 1219 toolong = 0; 1220 #endif 1221 1222 /* 1223 * We can only add bits to the cpumask to test during the 1224 * loop because the smp_invmask bit is cleared once the 1225 * originator completes the command (the targets may still 1226 * be cycling their own completions in this loop, afterwords). 1227 * 1228 * lfence required prior to all tests as this Xinvltlb 1229 * interrupt could race the originator (already be in progress 1230 * wnen the originator decides to issue, due to an issue by 1231 * another cpu). 1232 */ 1233 cpu_lfence(); 1234 CPUMASK_ORMASK(cpumask, smp_invmask); 1235 /*cpumask = smp_active_mask;*/ /* XXX */ 1236 cpu_lfence(); 1237 1238 if (pmap_inval_intr(&cpumask, toolong) == 0) { 1239 /* 1240 * Clear our smurf mask to allow new IPIs, but deal 1241 * with potential races. 1242 */ 1243 break; 1244 } 1245 1246 /* 1247 * Test if someone sent us another invalidation IPI, break 1248 * out so we can take it to avoid deadlocking the lapic 1249 * interrupt queue (? stupid intel, amd). 1250 */ 1251 if (md->gd_xinvaltlb == 2) 1252 break; 1253 /* 1254 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid)) 1255 break; 1256 */ 1257 } 1258 1259 /* 1260 * Full invalidation request 1261 */ 1262 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1263 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1264 md->mi.gd_cpuid); 1265 cpu_invltlb(); 1266 cpu_mfence(); 1267 } 1268 1269 /* 1270 * Check to see if another Xinvltlb interrupt occurred and loop up 1271 * if it did. 1272 */ 1273 cpu_disable_intr(); 1274 if (md->gd_xinvaltlb == 2) { 1275 md->gd_xinvaltlb = 1; 1276 goto loop; 1277 } 1278 #ifdef LOOPMASK_IN 1279 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid); 1280 #endif 1281 md->gd_xinvaltlb = 0; 1282 } 1283 1284 void 1285 cpu_wbinvd_on_all_cpus_callback(void *arg) 1286 { 1287 wbinvd(); 1288 } 1289 1290 /* 1291 * When called the executing CPU will send an IPI to all other CPUs 1292 * requesting that they halt execution. 1293 * 1294 * Usually (but not necessarily) called with 'other_cpus' as its arg. 1295 * 1296 * - Signals all CPUs in map to stop. 1297 * - Waits for each to stop. 1298 * 1299 * Returns: 1300 * -1: error 1301 * 0: NA 1302 * 1: ok 1303 * 1304 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 1305 * from executing at same time. 1306 */ 1307 int 1308 stop_cpus(cpumask_t map) 1309 { 1310 cpumask_t mask; 1311 1312 CPUMASK_ANDMASK(map, smp_active_mask); 1313 1314 /* send the Xcpustop IPI to all CPUs in map */ 1315 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 1316 1317 do { 1318 mask = stopped_cpus; 1319 CPUMASK_ANDMASK(mask, map); 1320 /* spin */ 1321 } while (CPUMASK_CMPMASKNEQ(mask, map)); 1322 1323 return 1; 1324 } 1325 1326 1327 /* 1328 * Called by a CPU to restart stopped CPUs. 1329 * 1330 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 1331 * 1332 * - Signals all CPUs in map to restart. 1333 * - Waits for each to restart. 1334 * 1335 * Returns: 1336 * -1: error 1337 * 0: NA 1338 * 1: ok 1339 */ 1340 int 1341 restart_cpus(cpumask_t map) 1342 { 1343 cpumask_t mask; 1344 1345 /* signal other cpus to restart */ 1346 mask = map; 1347 CPUMASK_ANDMASK(mask, smp_active_mask); 1348 cpu_ccfence(); 1349 started_cpus = mask; 1350 cpu_ccfence(); 1351 1352 /* wait for each to clear its bit */ 1353 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 1354 cpu_pause(); 1355 1356 return 1; 1357 } 1358 1359 /* 1360 * This is called once the mpboot code has gotten us properly relocated 1361 * and the MMU turned on, etc. ap_init() is actually the idle thread, 1362 * and when it returns the scheduler will call the real cpu_idle() main 1363 * loop for the idlethread. Interrupts are disabled on entry and should 1364 * remain disabled at return. 1365 */ 1366 void 1367 ap_init(void) 1368 { 1369 int cpu_id; 1370 1371 /* 1372 * Adjust smp_startup_mask to signal the BSP that we have started 1373 * up successfully. Note that we do not yet hold the BGL. The BSP 1374 * is waiting for our signal. 1375 * 1376 * We can't set our bit in smp_active_mask yet because we are holding 1377 * interrupts physically disabled and remote cpus could deadlock 1378 * trying to send us an IPI. 1379 */ 1380 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1381 cpu_mfence(); 1382 1383 /* 1384 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1385 * non-zero, then get the MP lock. 1386 * 1387 * Note: We are in a critical section. 1388 * 1389 * Note: we are the idle thread, we can only spin. 1390 * 1391 * Note: The load fence is memory volatile and prevents the compiler 1392 * from improperly caching mp_finish_lapic, and the cpu from improperly 1393 * caching it. 1394 */ 1395 while (mp_finish_lapic == 0) { 1396 cpu_pause(); 1397 cpu_lfence(); 1398 } 1399 #if 0 1400 while (try_mplock() == 0) { 1401 cpu_pause(); 1402 cpu_lfence(); 1403 } 1404 #endif 1405 1406 if (cpu_feature & CPUID_TSC) { 1407 /* 1408 * The BSP is constantly updating tsc0_offset, figure out 1409 * the relative difference to synchronize ktrdump. 1410 */ 1411 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1412 } 1413 1414 /* BSP may have changed PTD while we're waiting for the lock */ 1415 cpu_invltlb(); 1416 1417 /* Build our map of 'other' CPUs. */ 1418 mycpu->gd_other_cpus = smp_startup_mask; 1419 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1420 1421 /* A quick check from sanity claus */ 1422 cpu_id = APICID_TO_CPUID((lapic->id & 0xff000000) >> 24); 1423 if (mycpu->gd_cpuid != cpu_id) { 1424 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1425 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1426 cpu_id, (lapic->id & 0xff000000) >> 24); 1427 #if 0 /* JGXXX */ 1428 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1429 #endif 1430 panic("cpuid mismatch! boom!!"); 1431 } 1432 1433 /* Initialize AP's local APIC for irq's */ 1434 lapic_init(FALSE); 1435 1436 /* LAPIC initialization is done */ 1437 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1438 cpu_mfence(); 1439 1440 #if 0 1441 /* Let BSP move onto the next initialization stage */ 1442 rel_mplock(); 1443 #endif 1444 1445 /* 1446 * Interlock for finalization. Wait until mp_finish is non-zero, 1447 * then get the MP lock. 1448 * 1449 * Note: We are in a critical section. 1450 * 1451 * Note: we are the idle thread, we can only spin. 1452 * 1453 * Note: The load fence is memory volatile and prevents the compiler 1454 * from improperly caching mp_finish, and the cpu from improperly 1455 * caching it. 1456 */ 1457 while (mp_finish == 0) { 1458 cpu_pause(); 1459 cpu_lfence(); 1460 } 1461 1462 /* BSP may have changed PTD while we're waiting for the lock */ 1463 cpu_invltlb(); 1464 1465 /* Set memory range attributes for this CPU to match the BSP */ 1466 mem_range_AP_init(); 1467 1468 /* 1469 * Once we go active we must process any IPIQ messages that may 1470 * have been queued, because no actual IPI will occur until we 1471 * set our bit in the smp_active_mask. If we don't the IPI 1472 * message interlock could be left set which would also prevent 1473 * further IPIs. 1474 * 1475 * The idle loop doesn't expect the BGL to be held and while 1476 * lwkt_switch() normally cleans things up this is a special case 1477 * because we returning almost directly into the idle loop. 1478 * 1479 * The idle thread is never placed on the runq, make sure 1480 * nothing we've done put it there. 1481 */ 1482 1483 /* 1484 * Hold a critical section and allow real interrupts to occur. Zero 1485 * any spurious interrupts which have accumulated, then set our 1486 * smp_active_mask indicating that we are fully operational. 1487 */ 1488 crit_enter(); 1489 __asm __volatile("sti; pause; pause"::); 1490 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1491 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1492 1493 /* 1494 * Wait until all cpus have set their smp_active_mask and have fully 1495 * operational interrupts before proceeding. 1496 * 1497 * We need a final cpu_invltlb() because we would not have received 1498 * any until we set our bit in smp_active_mask. 1499 */ 1500 while (mp_finish == 1) { 1501 cpu_pause(); 1502 cpu_lfence(); 1503 } 1504 cpu_invltlb(); 1505 1506 /* 1507 * Initialize per-cpu clocks and do other per-cpu initialization. 1508 * At this point code is expected to be able to use the full kernel 1509 * API. 1510 */ 1511 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1512 1513 /* 1514 * Since we may have cleaned up the interrupt triggers, manually 1515 * process any pending IPIs before exiting our critical section. 1516 * Once the critical section has exited, normal interrupt processing 1517 * may occur. 1518 */ 1519 atomic_swap_int(&mycpu->gd_npoll, 0); 1520 lwkt_process_ipiq(); 1521 crit_exit(); 1522 1523 /* 1524 * Final final, allow the waiting BSP to resume the boot process, 1525 * return 'into' the idle thread bootstrap. 1526 */ 1527 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1528 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1529 } 1530 1531 /* 1532 * Get SMP fully working before we start initializing devices. 1533 */ 1534 static 1535 void 1536 ap_finish(void) 1537 { 1538 if (bootverbose) 1539 kprintf("Finish MP startup\n"); 1540 rel_mplock(); 1541 1542 /* 1543 * Wait for the active mask to complete, after which all cpus will 1544 * be accepting interrupts. 1545 */ 1546 mp_finish = 1; 1547 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1548 cpu_pause(); 1549 cpu_lfence(); 1550 } 1551 1552 /* 1553 * Wait for the finalization mask to complete, after which all cpus 1554 * have completely finished initializing and are entering or are in 1555 * their idle thread. 1556 * 1557 * BSP should have received all required invltlbs but do another 1558 * one just in case. 1559 */ 1560 cpu_invltlb(); 1561 mp_finish = 2; 1562 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1563 cpu_pause(); 1564 cpu_lfence(); 1565 } 1566 1567 while (try_mplock() == 0) { 1568 cpu_pause(); 1569 cpu_lfence(); 1570 } 1571 1572 if (bootverbose) { 1573 kprintf("Active CPU Mask: %016jx\n", 1574 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1575 } 1576 } 1577 1578 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1579 1580 /* 1581 * Interrupts must be hard-disabled by caller 1582 */ 1583 void 1584 cpu_send_ipiq(int dcpu) 1585 { 1586 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1587 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1588 } 1589 1590 #if 0 /* single_apic_ipi_passive() not working yet */ 1591 /* 1592 * Returns 0 on failure, 1 on success 1593 */ 1594 int 1595 cpu_send_ipiq_passive(int dcpu) 1596 { 1597 int r = 0; 1598 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1599 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1600 APIC_DELMODE_FIXED); 1601 } 1602 return(r); 1603 } 1604 #endif 1605 1606 static void 1607 mp_bsp_simple_setup(void) 1608 { 1609 struct mdglobaldata *gd; 1610 size_t ipiq_size; 1611 1612 /* build our map of 'other' CPUs */ 1613 mycpu->gd_other_cpus = smp_startup_mask; 1614 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1615 1616 gd = (struct mdglobaldata *)mycpu; 1617 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1618 1619 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1620 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 1621 VM_SUBSYS_IPIQ); 1622 bzero(mycpu->gd_ipiq, ipiq_size); 1623 1624 /* initialize arc4random. */ 1625 arc4_init_pcpu(0); 1626 1627 pmap_set_opt(); 1628 1629 if (cpu_feature & CPUID_TSC) 1630 tsc0_offset = rdtsc(); 1631 } 1632 1633 1634 /* 1635 * CPU TOPOLOGY DETECTION FUNCTIONS 1636 */ 1637 1638 /* Detect intel topology using CPUID 1639 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1640 */ 1641 static void 1642 detect_intel_topology(int count_htt_cores) 1643 { 1644 int shift = 0; 1645 int ecx_index = 0; 1646 int core_plus_logical_bits = 0; 1647 int cores_per_package; 1648 int logical_per_package; 1649 int logical_per_core; 1650 unsigned int p[4]; 1651 1652 if (cpu_high >= 0xb) { 1653 goto FUNC_B; 1654 1655 } else if (cpu_high >= 0x4) { 1656 goto FUNC_4; 1657 1658 } else { 1659 core_bits = 0; 1660 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1661 ; 1662 logical_CPU_bits = 1 << shift; 1663 return; 1664 } 1665 1666 FUNC_B: 1667 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1668 1669 /* if 0xb not supported - fallback to 0x4 */ 1670 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1671 goto FUNC_4; 1672 } 1673 1674 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1675 1676 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1677 do { 1678 cpuid_count(0xb, ecx_index, p); 1679 1680 /* Check for the Core type in the implemented sub leaves. */ 1681 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1682 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1683 break; 1684 } 1685 1686 ecx_index++; 1687 1688 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1689 1690 core_bits = core_plus_logical_bits - logical_CPU_bits; 1691 1692 return; 1693 1694 FUNC_4: 1695 cpuid_count(0x4, 0, p); 1696 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1697 1698 logical_per_package = count_htt_cores; 1699 logical_per_core = logical_per_package / cores_per_package; 1700 1701 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1702 ; 1703 logical_CPU_bits = shift; 1704 1705 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1706 ; 1707 core_bits = shift; 1708 1709 return; 1710 } 1711 1712 /* Detect AMD topology using CPUID 1713 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1714 */ 1715 static void 1716 detect_amd_topology(int count_htt_cores) 1717 { 1718 int shift = 0; 1719 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) { 1720 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1721 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 1722 AMDID_COREID_SIZE_SHIFT; 1723 } else { 1724 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1725 for (shift = 0; (1 << shift) < core_bits; ++shift) 1726 ; 1727 core_bits = shift; 1728 } 1729 1730 if (amd_feature2 & AMDID2_TOPOEXT) { 1731 u_int p[4]; 1732 int i; 1733 int type; 1734 int level; 1735 int share_count; 1736 for (i = 0; i < 256; ++i) { 1737 cpuid_count(0x8000001d, i, p); 1738 type = p[0] & 0x1f; 1739 level = (p[0] >> 5) & 0x7; 1740 share_count = 1 + ((p[0] >> 14) & 0xfff); 1741 1742 if (type == 0) 1743 break; 1744 if (bootverbose) 1745 kprintf("Topology probe i=%2d type=%d level=%d share_count=%d\n", 1746 i, type, level, share_count); 1747 if (type == 1 && share_count) { /* CPUID_TYPE_SMT */ 1748 for (shift = 0; (1 << shift) < count_htt_cores / share_count; ++shift) 1749 ; 1750 core_bits = shift; 1751 break; 1752 } 1753 } 1754 } 1755 1756 logical_CPU_bits = count_htt_cores >> core_bits; 1757 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1758 ; 1759 logical_CPU_bits = shift; 1760 } else { 1761 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1762 ; 1763 core_bits = shift; 1764 logical_CPU_bits = 0; 1765 } 1766 } 1767 1768 static void 1769 amd_get_compute_unit_id(void *arg) 1770 { 1771 u_int regs[4]; 1772 1773 do_cpuid(0x8000001e, regs); 1774 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1775 1776 /* 1777 * AMD - CPUID Specification September 2010 1778 * page 34 - //ComputeUnitID = ebx[0:7]// 1779 */ 1780 mynode->compute_unit_id = regs[1] & 0xff; 1781 } 1782 1783 int 1784 fix_amd_topology(void) 1785 { 1786 cpumask_t mask; 1787 1788 if (cpu_vendor_id != CPU_VENDOR_AMD) 1789 return -1; 1790 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1791 return -1; 1792 1793 CPUMASK_ASSALLONES(mask); 1794 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1795 1796 kprintf("Compute unit iDS:\n"); 1797 int i; 1798 for (i = 0; i < ncpus; i++) { 1799 kprintf("%d-%d; \n", 1800 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1801 } 1802 return 0; 1803 } 1804 1805 /* 1806 * Calculate 1807 * - logical_CPU_bits 1808 * - core_bits 1809 * With the values above (for AMD or INTEL) we are able to generally 1810 * detect the CPU topology (number of cores for each level): 1811 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1812 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1813 */ 1814 void 1815 detect_cpu_topology(void) 1816 { 1817 static int topology_detected = 0; 1818 int count = 0; 1819 1820 if (topology_detected) 1821 goto OUT; 1822 if ((cpu_feature & CPUID_HTT) == 0) { 1823 core_bits = 0; 1824 logical_CPU_bits = 0; 1825 goto OUT; 1826 } 1827 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT; 1828 1829 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1830 detect_intel_topology(count); 1831 else if (cpu_vendor_id == CPU_VENDOR_AMD) 1832 detect_amd_topology(count); 1833 topology_detected = 1; 1834 1835 OUT: 1836 if (bootverbose) { 1837 kprintf("Bits within APICID: logical_CPU_bits: %d; " 1838 "core_bits: %d\n", 1839 logical_CPU_bits, core_bits); 1840 } 1841 } 1842 1843 /* 1844 * Interface functions to calculate chip_ID, 1845 * core_number and logical_number 1846 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1847 */ 1848 int 1849 get_chip_ID(int cpuid) 1850 { 1851 return get_apicid_from_cpuid(cpuid) >> 1852 (logical_CPU_bits + core_bits); 1853 } 1854 1855 int 1856 get_chip_ID_from_APICID(int apicid) 1857 { 1858 return apicid >> (logical_CPU_bits + core_bits); 1859 } 1860 1861 int 1862 get_core_number_within_chip(int cpuid) 1863 { 1864 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1865 ((1 << core_bits) - 1)); 1866 } 1867 1868 int 1869 get_logical_CPU_number_within_core(int cpuid) 1870 { 1871 return (get_apicid_from_cpuid(cpuid) & 1872 ((1 << logical_CPU_bits) - 1)); 1873 } 1874