1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #ifdef GPROF 51 #include <sys/gmon.h> 52 #endif 53 54 #include <machine/smp.h> 55 #include <machine_base/apic/apicreg.h> 56 #include <machine/atomic.h> 57 #include <machine/cpufunc.h> 58 #include <machine/cputypes.h> 59 #include <machine_base/apic/lapic.h> 60 #include <machine_base/apic/ioapic.h> 61 #include <machine_base/acpica/acpi_md_cpu.h> 62 #include <machine/psl.h> 63 #include <machine/segments.h> 64 #include <machine/tss.h> 65 #include <machine/specialreg.h> 66 #include <machine/globaldata.h> 67 #include <machine/pmap_inval.h> 68 #include <machine/clock.h> 69 70 #include <machine/md_var.h> /* setidt() */ 71 #include <machine_base/icu/icu.h> /* IPIs */ 72 #include <machine_base/icu/icu_var.h> 73 #include <machine_base/apic/ioapic_abi.h> 74 #include <machine/intr_machdep.h> /* IPIs */ 75 76 #define WARMBOOT_TARGET 0 77 #define WARMBOOT_OFF (KERNBASE + 0x0467) 78 #define WARMBOOT_SEG (KERNBASE + 0x0469) 79 80 #define CMOS_REG (0x70) 81 #define CMOS_DATA (0x71) 82 #define BIOS_RESET (0x0f) 83 #define BIOS_WARM (0x0a) 84 85 /* 86 * this code MUST be enabled here and in mpboot.s. 87 * it follows the very early stages of AP boot by placing values in CMOS ram. 88 * it NORMALLY will never be needed and thus the primitive method for enabling. 89 * 90 */ 91 #if defined(CHECK_POINTS) 92 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 93 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 94 95 #define CHECK_INIT(D); \ 96 CHECK_WRITE(0x34, (D)); \ 97 CHECK_WRITE(0x35, (D)); \ 98 CHECK_WRITE(0x36, (D)); \ 99 CHECK_WRITE(0x37, (D)); \ 100 CHECK_WRITE(0x38, (D)); \ 101 CHECK_WRITE(0x39, (D)); 102 103 #define CHECK_PRINT(S); \ 104 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 105 (S), \ 106 CHECK_READ(0x34), \ 107 CHECK_READ(0x35), \ 108 CHECK_READ(0x36), \ 109 CHECK_READ(0x37), \ 110 CHECK_READ(0x38), \ 111 CHECK_READ(0x39)); 112 113 #else /* CHECK_POINTS */ 114 115 #define CHECK_INIT(D) 116 #define CHECK_PRINT(S) 117 118 #endif /* CHECK_POINTS */ 119 120 /* 121 * Values to send to the POST hardware. 122 */ 123 #define MP_BOOTADDRESS_POST 0x10 124 #define MP_PROBE_POST 0x11 125 #define MPTABLE_PASS1_POST 0x12 126 127 #define MP_START_POST 0x13 128 #define MP_ENABLE_POST 0x14 129 #define MPTABLE_PASS2_POST 0x15 130 131 #define START_ALL_APS_POST 0x16 132 #define INSTALL_AP_TRAMP_POST 0x17 133 #define START_AP_POST 0x18 134 135 #define MP_ANNOUNCE_POST 0x19 136 137 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 138 int current_postcode; 139 140 /** XXX FIXME: what system files declare these??? */ 141 extern struct region_descriptor r_gdt; 142 143 extern int naps; 144 145 int64_t tsc0_offset; 146 extern int64_t tsc_offsets[]; 147 148 /* AP uses this during bootstrap. Do not staticize. */ 149 char *bootSTK; 150 static int bootAP; 151 152 struct pcb stoppcbs[MAXCPU]; 153 154 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 155 156 /* 157 * Local data and functions. 158 */ 159 160 static u_int boot_address; 161 static int mp_finish; 162 static int mp_finish_lapic; 163 164 static int start_all_aps(u_int boot_addr); 165 #if 0 166 static void install_ap_tramp(u_int boot_addr); 167 #endif 168 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 169 static int smitest(void); 170 static void mp_bsp_simple_setup(void); 171 172 /* which cpus have been started */ 173 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 174 /* which cpus have lapic been inited */ 175 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 176 /* which cpus are ready for IPIs etc? */ 177 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 178 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 179 180 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, 181 &smp_active_mask, sizeof(smp_active_mask), "LU", ""); 182 static u_int bootMP_size; 183 static u_int report_invlpg_src; 184 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW, 185 &report_invlpg_src, 0, ""); 186 static u_int report_invltlb_src; 187 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW, 188 &report_invltlb_src, 0, ""); 189 static int optimized_invltlb; 190 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW, 191 &optimized_invltlb, 0, ""); 192 static int all_but_self_ipi_enable = 1; 193 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW, 194 &all_but_self_ipi_enable, 0, ""); 195 196 /* Local data for detecting CPU TOPOLOGY */ 197 static int core_bits = 0; 198 static int logical_CPU_bits = 0; 199 200 201 /* 202 * Calculate usable address in base memory for AP trampoline code. 203 */ 204 u_int 205 mp_bootaddress(u_int basemem) 206 { 207 POSTCODE(MP_BOOTADDRESS_POST); 208 209 bootMP_size = mptramp_end - mptramp_start; 210 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 211 if (((basemem * 1024) - boot_address) < bootMP_size) 212 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 213 /* 3 levels of page table pages */ 214 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 215 216 return mptramp_pagetables; 217 } 218 219 /* 220 * Print various information about the SMP system hardware and setup. 221 */ 222 void 223 mp_announce(void) 224 { 225 int x; 226 227 POSTCODE(MP_ANNOUNCE_POST); 228 229 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 230 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 231 for (x = 1; x <= naps; ++x) 232 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 233 234 if (!ioapic_enable) 235 kprintf(" Warning: APIC I/O disabled\n"); 236 } 237 238 /* 239 * AP cpu's call this to sync up protected mode. 240 * 241 * WARNING! %gs is not set up on entry. This routine sets up %gs. 242 */ 243 void 244 init_secondary(void) 245 { 246 int gsel_tss; 247 int x, myid = bootAP; 248 u_int64_t msr, cr0; 249 struct mdglobaldata *md; 250 struct privatespace *ps; 251 252 ps = CPU_prvspace[myid]; 253 254 gdt_segs[GPROC0_SEL].ssd_base = 255 (long) &ps->mdglobaldata.gd_common_tss; 256 ps->mdglobaldata.mi.gd_prvspace = ps; 257 258 /* We fill the 32-bit segment descriptors */ 259 for (x = 0; x < NGDT; x++) { 260 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 261 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 262 } 263 /* And now a 64-bit one */ 264 ssdtosyssd(&gdt_segs[GPROC0_SEL], 265 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 266 267 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 268 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 269 lgdt(&r_gdt); /* does magic intra-segment return */ 270 271 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 272 wrmsr(MSR_FSBASE, 0); /* User value */ 273 wrmsr(MSR_GSBASE, (u_int64_t)ps); 274 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 275 276 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 277 278 #if 0 279 lldt(_default_ldt); 280 mdcpu->gd_currentldt = _default_ldt; 281 #endif 282 283 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 284 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 285 286 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 287 288 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 289 #if 0 /* JG XXX */ 290 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 291 #endif 292 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 293 md->gd_common_tssd = *md->gd_tss_gdt; 294 295 /* double fault stack */ 296 md->gd_common_tss.tss_ist1 = 297 (long)&md->mi.gd_prvspace->idlestack[ 298 sizeof(md->mi.gd_prvspace->idlestack)]; 299 300 ltr(gsel_tss); 301 302 /* 303 * Set to a known state: 304 * Set by mpboot.s: CR0_PG, CR0_PE 305 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 306 */ 307 cr0 = rcr0(); 308 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 309 load_cr0(cr0); 310 311 /* Set up the fast syscall stuff */ 312 msr = rdmsr(MSR_EFER) | EFER_SCE; 313 wrmsr(MSR_EFER, msr); 314 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 315 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 316 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 317 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 318 wrmsr(MSR_STAR, msr); 319 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 320 321 pmap_set_opt(); /* PSE/4MB pages, etc */ 322 pmap_init_pat(); /* Page Attribute Table */ 323 324 /* set up CPU registers and state */ 325 cpu_setregs(); 326 327 /* set up SSE/NX registers */ 328 initializecpu(myid); 329 330 /* set up FPU state on the AP */ 331 npxinit(); 332 333 /* disable the APIC, just to be SURE */ 334 lapic->svr &= ~APIC_SVR_ENABLE; 335 } 336 337 /******************************************************************* 338 * local functions and data 339 */ 340 341 /* 342 * Start the SMP system 343 */ 344 static void 345 mp_start_aps(void *dummy __unused) 346 { 347 if (lapic_enable) { 348 /* start each Application Processor */ 349 start_all_aps(boot_address); 350 } else { 351 mp_bsp_simple_setup(); 352 } 353 } 354 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 355 356 /* 357 * start each AP in our list 358 */ 359 static int 360 start_all_aps(u_int boot_addr) 361 { 362 vm_offset_t va = boot_address + KERNBASE; 363 u_int64_t *pt4, *pt3, *pt2; 364 int pssize; 365 int x, i; 366 int shift; 367 int smicount; 368 int smibest; 369 int smilast; 370 u_char mpbiosreason; 371 u_long mpbioswarmvec; 372 struct mdglobaldata *gd; 373 struct privatespace *ps; 374 size_t ipiq_size; 375 376 POSTCODE(START_ALL_APS_POST); 377 378 /* install the AP 1st level boot code */ 379 pmap_kenter(va, boot_address); 380 cpu_invlpg((void *)va); /* JG XXX */ 381 bcopy(mptramp_start, (void *)va, bootMP_size); 382 383 /* Locate the page tables, they'll be below the trampoline */ 384 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 385 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 386 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 387 388 /* Create the initial 1GB replicated page tables */ 389 for (i = 0; i < 512; i++) { 390 /* Each slot of the level 4 pages points to the same level 3 page */ 391 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 392 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 393 kernel_pmap.pmap_bits[PG_RW_IDX] | 394 kernel_pmap.pmap_bits[PG_U_IDX]; 395 396 /* Each slot of the level 3 pages points to the same level 2 page */ 397 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 398 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 399 kernel_pmap.pmap_bits[PG_RW_IDX] | 400 kernel_pmap.pmap_bits[PG_U_IDX]; 401 402 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 403 pt2[i] = i * (2 * 1024 * 1024); 404 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 405 kernel_pmap.pmap_bits[PG_RW_IDX] | 406 kernel_pmap.pmap_bits[PG_PS_IDX] | 407 kernel_pmap.pmap_bits[PG_U_IDX]; 408 } 409 410 /* save the current value of the warm-start vector */ 411 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 412 outb(CMOS_REG, BIOS_RESET); 413 mpbiosreason = inb(CMOS_DATA); 414 415 /* setup a vector to our boot code */ 416 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 417 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 418 outb(CMOS_REG, BIOS_RESET); 419 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 420 421 /* 422 * If we have a TSC we can figure out the SMI interrupt rate. 423 * The SMI does not necessarily use a constant rate. Spend 424 * up to 250ms trying to figure it out. 425 */ 426 smibest = 0; 427 if (cpu_feature & CPUID_TSC) { 428 set_apic_timer(275000); 429 smilast = read_apic_timer(); 430 for (x = 0; x < 20 && read_apic_timer(); ++x) { 431 smicount = smitest(); 432 if (smibest == 0 || smilast - smicount < smibest) 433 smibest = smilast - smicount; 434 smilast = smicount; 435 } 436 if (smibest > 250000) 437 smibest = 0; 438 } 439 if (smibest) 440 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 441 1000000 / smibest, smibest); 442 443 /* start each AP */ 444 for (x = 1; x <= naps; ++x) { 445 /* This is a bit verbose, it will go away soon. */ 446 447 pssize = sizeof(struct privatespace); 448 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, 449 KM_CPU(x)); 450 CPU_prvspace[x] = ps; 451 #if 0 452 kprintf("ps %d %p %d\n", x, ps, pssize); 453 #endif 454 bzero(ps, pssize); 455 gd = &ps->mdglobaldata; 456 gd->mi.gd_prvspace = ps; 457 458 /* prime data page for it to use */ 459 mi_gdinit(&gd->mi, x); 460 cpu_gdinit(gd, x); 461 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 462 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 463 VM_SUBSYS_IPIQ, KM_CPU(x)); 464 bzero(gd->mi.gd_ipiq, ipiq_size); 465 466 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 467 468 /* initialize arc4random. */ 469 arc4_init_pcpu(x); 470 471 /* setup a vector to our boot code */ 472 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 473 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 474 outb(CMOS_REG, BIOS_RESET); 475 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 476 477 /* 478 * Setup the AP boot stack 479 */ 480 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 481 bootAP = x; 482 483 /* attempt to start the Application Processor */ 484 CHECK_INIT(99); /* setup checkpoints */ 485 if (!start_ap(gd, boot_addr, smibest)) { 486 kprintf("\nAP #%d (PHY# %d) failed!\n", 487 x, CPUID_TO_APICID(x)); 488 CHECK_PRINT("trace"); /* show checkpoints */ 489 /* better panic as the AP may be running loose */ 490 kprintf("panic y/n? [y] "); 491 cnpoll(TRUE); 492 if (cngetc() != 'n') 493 panic("bye-bye"); 494 cnpoll(FALSE); 495 } 496 CHECK_PRINT("trace"); /* show checkpoints */ 497 } 498 499 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 500 ncpus = x; 501 502 for (shift = 0; (1 << shift) <= ncpus; ++shift) 503 ; 504 --shift; 505 506 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 507 if ((1 << shift) < ncpus) 508 ++shift; 509 ncpus_fit = 1 << shift; 510 ncpus_fit_mask = ncpus_fit - 1; 511 512 /* build our map of 'other' CPUs */ 513 mycpu->gd_other_cpus = smp_startup_mask; 514 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 515 516 gd = (struct mdglobaldata *)mycpu; 517 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 518 519 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 520 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 521 VM_SUBSYS_IPIQ, KM_CPU(0)); 522 bzero(mycpu->gd_ipiq, ipiq_size); 523 524 /* initialize arc4random. */ 525 arc4_init_pcpu(0); 526 527 /* restore the warmstart vector */ 528 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 529 outb(CMOS_REG, BIOS_RESET); 530 outb(CMOS_DATA, mpbiosreason); 531 532 /* 533 * NOTE! The idlestack for the BSP was setup by locore. Finish 534 * up, clean out the P==V mapping we did earlier. 535 */ 536 pmap_set_opt(); 537 538 /* 539 * Wait all APs to finish initializing LAPIC 540 */ 541 if (bootverbose) 542 kprintf("SMP: Waiting APs LAPIC initialization\n"); 543 if (cpu_feature & CPUID_TSC) 544 tsc0_offset = rdtsc(); 545 tsc_offsets[0] = 0; 546 mp_finish_lapic = 1; 547 rel_mplock(); 548 549 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 550 cpu_pause(); 551 cpu_lfence(); 552 if (cpu_feature & CPUID_TSC) 553 tsc0_offset = rdtsc(); 554 } 555 while (try_mplock() == 0) { 556 cpu_pause(); 557 cpu_lfence(); 558 } 559 560 /* number of APs actually started */ 561 return ncpus - 1; 562 } 563 564 565 /* 566 * load the 1st level AP boot code into base memory. 567 */ 568 569 /* targets for relocation */ 570 extern void bigJump(void); 571 extern void bootCodeSeg(void); 572 extern void bootDataSeg(void); 573 extern void MPentry(void); 574 extern u_int MP_GDT; 575 extern u_int mp_gdtbase; 576 577 #if 0 578 579 static void 580 install_ap_tramp(u_int boot_addr) 581 { 582 int x; 583 int size = *(int *) ((u_long) & bootMP_size); 584 u_char *src = (u_char *) ((u_long) bootMP); 585 u_char *dst = (u_char *) boot_addr + KERNBASE; 586 u_int boot_base = (u_int) bootMP; 587 u_int8_t *dst8; 588 u_int16_t *dst16; 589 u_int32_t *dst32; 590 591 POSTCODE(INSTALL_AP_TRAMP_POST); 592 593 for (x = 0; x < size; ++x) 594 *dst++ = *src++; 595 596 /* 597 * modify addresses in code we just moved to basemem. unfortunately we 598 * need fairly detailed info about mpboot.s for this to work. changes 599 * to mpboot.s might require changes here. 600 */ 601 602 /* boot code is located in KERNEL space */ 603 dst = (u_char *) boot_addr + KERNBASE; 604 605 /* modify the lgdt arg */ 606 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 607 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 608 609 /* modify the ljmp target for MPentry() */ 610 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 611 *dst32 = ((u_int) MPentry - KERNBASE); 612 613 /* modify the target for boot code segment */ 614 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 615 dst8 = (u_int8_t *) (dst16 + 1); 616 *dst16 = (u_int) boot_addr & 0xffff; 617 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 618 619 /* modify the target for boot data segment */ 620 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 621 dst8 = (u_int8_t *) (dst16 + 1); 622 *dst16 = (u_int) boot_addr & 0xffff; 623 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 624 } 625 626 #endif 627 628 /* 629 * This function starts the AP (application processor) identified 630 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 631 * to accomplish this. This is necessary because of the nuances 632 * of the different hardware we might encounter. It ain't pretty, 633 * but it seems to work. 634 * 635 * NOTE: eventually an AP gets to ap_init(), which is called just 636 * before the AP goes into the LWKT scheduler's idle loop. 637 */ 638 static int 639 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 640 { 641 int physical_cpu; 642 int vector; 643 u_long icr_lo, icr_hi; 644 645 POSTCODE(START_AP_POST); 646 647 /* get the PHYSICAL APIC ID# */ 648 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 649 650 /* calculate the vector */ 651 vector = (boot_addr >> 12) & 0xff; 652 653 /* We don't want anything interfering */ 654 cpu_disable_intr(); 655 656 /* Make sure the target cpu sees everything */ 657 wbinvd(); 658 659 /* 660 * Try to detect when a SMI has occurred, wait up to 200ms. 661 * 662 * If a SMI occurs during an AP reset but before we issue 663 * the STARTUP command, the AP may brick. To work around 664 * this problem we hold off doing the AP startup until 665 * after we have detected the SMI. Hopefully another SMI 666 * will not occur before we finish the AP startup. 667 * 668 * Retries don't seem to help. SMIs have a window of opportunity 669 * and if USB->legacy keyboard emulation is enabled in the BIOS 670 * the interrupt rate can be quite high. 671 * 672 * NOTE: Don't worry about the L1 cache load, it might bloat 673 * ldelta a little but ndelta will be so huge when the SMI 674 * occurs the detection logic will still work fine. 675 */ 676 if (smibest) { 677 set_apic_timer(200000); 678 smitest(); 679 } 680 681 /* 682 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 683 * and running the target CPU. OR this INIT IPI might be latched (P5 684 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 685 * ignored. 686 * 687 * see apic/apicreg.h for icr bit definitions. 688 * 689 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 690 */ 691 692 /* 693 * Setup the address for the target AP. We can setup 694 * icr_hi once and then just trigger operations with 695 * icr_lo. 696 */ 697 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 698 icr_hi |= (physical_cpu << 24); 699 icr_lo = lapic->icr_lo & 0xfff00000; 700 lapic->icr_hi = icr_hi; 701 702 /* 703 * Do an INIT IPI: assert RESET 704 * 705 * Use edge triggered mode to assert INIT 706 */ 707 lapic->icr_lo = icr_lo | 0x00004500; 708 while (lapic->icr_lo & APIC_DELSTAT_MASK) 709 /* spin */ ; 710 711 /* 712 * The spec calls for a 10ms delay but we may have to use a 713 * MUCH lower delay to avoid bricking an AP due to a fast SMI 714 * interrupt. We have other loops here too and dividing by 2 715 * doesn't seem to be enough even after subtracting 350us, 716 * so we divide by 4. 717 * 718 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 719 * interrupt was detected we use the full 10ms. 720 */ 721 if (smibest == 0) 722 u_sleep(10000); 723 else if (smibest < 150 * 4 + 350) 724 u_sleep(150); 725 else if ((smibest - 350) / 4 < 10000) 726 u_sleep((smibest - 350) / 4); 727 else 728 u_sleep(10000); 729 730 /* 731 * Do an INIT IPI: deassert RESET 732 * 733 * Use level triggered mode to deassert. It is unclear 734 * why we need to do this. 735 */ 736 lapic->icr_lo = icr_lo | 0x00008500; 737 while (lapic->icr_lo & APIC_DELSTAT_MASK) 738 /* spin */ ; 739 u_sleep(150); /* wait 150us */ 740 741 /* 742 * Next we do a STARTUP IPI: the previous INIT IPI might still be 743 * latched, (P5 bug) this 1st STARTUP would then terminate 744 * immediately, and the previously started INIT IPI would continue. OR 745 * the previous INIT IPI has already run. and this STARTUP IPI will 746 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 747 * will run. 748 */ 749 lapic->icr_lo = icr_lo | 0x00000600 | vector; 750 while (lapic->icr_lo & APIC_DELSTAT_MASK) 751 /* spin */ ; 752 u_sleep(200); /* wait ~200uS */ 753 754 /* 755 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 756 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 757 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 758 * recognized after hardware RESET or INIT IPI. 759 */ 760 lapic->icr_lo = icr_lo | 0x00000600 | vector; 761 while (lapic->icr_lo & APIC_DELSTAT_MASK) 762 /* spin */ ; 763 764 /* Resume normal operation */ 765 cpu_enable_intr(); 766 767 /* wait for it to start, see ap_init() */ 768 set_apic_timer(5000000);/* == 5 seconds */ 769 while (read_apic_timer()) { 770 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 771 return 1; /* return SUCCESS */ 772 } 773 774 return 0; /* return FAILURE */ 775 } 776 777 static 778 int 779 smitest(void) 780 { 781 int64_t ltsc; 782 int64_t ntsc; 783 int64_t ldelta; 784 int64_t ndelta; 785 int count; 786 787 ldelta = 0; 788 ndelta = 0; 789 while (read_apic_timer()) { 790 ltsc = rdtsc(); 791 for (count = 0; count < 100; ++count) 792 ntsc = rdtsc(); /* force loop to occur */ 793 if (ldelta) { 794 ndelta = ntsc - ltsc; 795 if (ldelta > ndelta) 796 ldelta = ndelta; 797 if (ndelta > ldelta * 2) 798 break; 799 } else { 800 ldelta = ntsc - ltsc; 801 } 802 } 803 return(read_apic_timer()); 804 } 805 806 /* 807 * Synchronously flush the TLB on all other CPU's. The current cpu's 808 * TLB is not flushed. If the caller wishes to flush the current cpu's 809 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 810 * 811 * This routine may be called concurrently from multiple cpus. When this 812 * happens, smp_invltlb() can wind up sticking around in the confirmation 813 * while() loop at the end as additional cpus are added to the global 814 * cpumask, until they are acknowledged by another IPI. 815 * 816 * NOTE: If for some reason we were unable to start all cpus we cannot 817 * safely use broadcast IPIs. 818 */ 819 820 cpumask_t smp_smurf_mask; 821 static cpumask_t smp_invltlb_mask; 822 #define LOOPRECOVER 823 #define LOOPMASK_IN 824 #ifdef LOOPMASK_IN 825 cpumask_t smp_in_mask; 826 #endif 827 cpumask_t smp_invmask; 828 extern cpumask_t smp_idleinvl_mask; 829 extern cpumask_t smp_idleinvl_reqs; 830 831 /* 832 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove 833 * bits that do not need to be IPId. These bits are still part of the command, 834 * but the target cpus have already been signalled and do not need to be 835 * sigalled again. 836 */ 837 #include <sys/spinlock.h> 838 #include <sys/spinlock2.h> 839 840 static __noinline 841 void 842 smp_smurf_fetchset(cpumask_t *mask) 843 { 844 cpumask_t omask; 845 int i; 846 __uint64_t obits; 847 __uint64_t nbits; 848 849 i = 0; 850 while (i < CPUMASK_ELEMENTS) { 851 obits = smp_smurf_mask.ary[i]; 852 cpu_ccfence(); 853 nbits = obits | mask->ary[i]; 854 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) { 855 omask.ary[i] = obits; 856 ++i; 857 } 858 } 859 CPUMASK_NANDMASK(*mask, omask); 860 } 861 862 /* 863 * This is a mechanism which guarantees that cpu_invltlb() will be executed 864 * on idle cpus without having to signal or wake them up. The invltlb will be 865 * executed when they wake up, prior to any scheduling or interrupt thread. 866 * 867 * (*mask) is modified to remove the cpus we successfully negotiate this 868 * function with. This function may only be used with semi-synchronous 869 * commands (typically invltlb's or semi-synchronous invalidations which 870 * are usually associated only with kernel memory). 871 */ 872 void 873 smp_smurf_idleinvlclr(cpumask_t *mask) 874 { 875 if (optimized_invltlb) { 876 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask); 877 /* cpu_lfence() not needed */ 878 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask); 879 } 880 } 881 882 /* 883 * Issue cpu_invltlb() across all cpus except the current cpu. 884 * 885 * This function will arrange to avoid idle cpus, but still gurantee that 886 * invltlb is run on them when they wake up prior to any scheduling or 887 * nominal interrupt. 888 */ 889 void 890 smp_invltlb(void) 891 { 892 struct mdglobaldata *md = mdcpu; 893 cpumask_t mask; 894 unsigned long rflags; 895 #ifdef LOOPRECOVER 896 tsc_uclock_t tsc_base = rdtsc(); 897 int repeats = 0; 898 #endif 899 900 if (report_invltlb_src > 0) { 901 if (--report_invltlb_src <= 0) 902 print_backtrace(8); 903 } 904 905 /* 906 * Disallow normal interrupts, set all active cpus except our own 907 * in the global smp_invltlb_mask. 908 */ 909 ++md->mi.gd_cnt.v_smpinvltlb; 910 crit_enter_gd(&md->mi); 911 912 /* 913 * Bits we want to set in smp_invltlb_mask. We do not want to signal 914 * our own cpu. Also try to remove bits associated with idle cpus 915 * that we can flag for auto-invltlb. 916 */ 917 mask = smp_active_mask; 918 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid); 919 smp_smurf_idleinvlclr(&mask); 920 921 rflags = read_rflags(); 922 cpu_disable_intr(); 923 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask); 924 925 /* 926 * IPI non-idle cpus represented by mask. The omask calculation 927 * removes cpus from the mask which already have a Xinvltlb IPI 928 * pending (avoid double-queueing the IPI). 929 * 930 * We must disable real interrupts when setting the smurf flags or 931 * we might race a XINVLTLB before we manage to send the ipi's for 932 * the bits we set. 933 * 934 * NOTE: We are not signalling ourselves, mask already does NOT 935 * include our own cpu. 936 */ 937 smp_smurf_fetchset(&mask); 938 939 /* 940 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 941 * the critical section count on the target cpus. 942 */ 943 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 944 if (all_but_self_ipi_enable && 945 (all_but_self_ipi_enable >= 2 || 946 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 947 all_but_self_ipi(XINVLTLB_OFFSET); 948 } else { 949 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 950 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 951 } 952 953 /* 954 * Wait for acknowledgement by all cpus. smp_inval_intr() will 955 * temporarily enable interrupts to avoid deadlocking the lapic, 956 * and will also handle running cpu_invltlb() and remote invlpg 957 * command son our cpu if some other cpu requests it of us. 958 * 959 * WARNING! I originally tried to implement this as a hard loop 960 * checking only smp_invltlb_mask (and issuing a local 961 * cpu_invltlb() if requested), with interrupts enabled 962 * and without calling smp_inval_intr(). This DID NOT WORK. 963 * It resulted in weird races where smurf bits would get 964 * cleared without any action being taken. 965 */ 966 smp_inval_intr(); 967 CPUMASK_ASSZERO(mask); 968 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) { 969 smp_inval_intr(); 970 cpu_pause(); 971 #ifdef LOOPRECOVER 972 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 973 /* 974 * cpuid - cpu doing the waiting 975 * invltlb_mask - IPI in progress 976 */ 977 kprintf("smp_invltlb %d: waited too long inv=%08jx " 978 "smurf=%08jx " 979 #ifdef LOOPMASK_IN 980 "in=%08jx " 981 #endif 982 "idle=%08jx/%08jx\n", 983 md->mi.gd_cpuid, 984 smp_invltlb_mask.ary[0], 985 smp_smurf_mask.ary[0], 986 #ifdef LOOPMASK_IN 987 smp_in_mask.ary[0], 988 #endif 989 smp_idleinvl_mask.ary[0], 990 smp_idleinvl_reqs.ary[0]); 991 mdcpu->gd_xinvaltlb = 0; 992 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, 993 smp_invltlb_mask); 994 smp_invlpg(&smp_active_mask); 995 tsc_base = rdtsc(); 996 if (++repeats > 10) { 997 kprintf("smp_invltlb: giving up\n"); 998 CPUMASK_ASSZERO(smp_invltlb_mask); 999 } 1000 } 1001 #endif 1002 } 1003 write_rflags(rflags); 1004 crit_exit_gd(&md->mi); 1005 } 1006 1007 /* 1008 * Called from a critical section with interrupts hard-disabled. 1009 * This function issues an XINVLTLB IPI and then executes any pending 1010 * command on the current cpu before returning. 1011 */ 1012 void 1013 smp_invlpg(cpumask_t *cmdmask) 1014 { 1015 struct mdglobaldata *md = mdcpu; 1016 cpumask_t mask; 1017 1018 if (report_invlpg_src > 0) { 1019 if (--report_invlpg_src <= 0) 1020 print_backtrace(8); 1021 } 1022 1023 /* 1024 * Disallow normal interrupts, set all active cpus in the pmap, 1025 * plus our own for completion processing (it might or might not 1026 * be part of the set). 1027 */ 1028 mask = smp_active_mask; 1029 CPUMASK_ANDMASK(mask, *cmdmask); 1030 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 1031 1032 /* 1033 * Avoid double-queuing IPIs, which can deadlock us. We must disable 1034 * real interrupts when setting the smurf flags or we might race a 1035 * XINVLTLB before we manage to send the ipi's for the bits we set. 1036 * 1037 * NOTE: We might be including our own cpu in the smurf mask. 1038 */ 1039 smp_smurf_fetchset(&mask); 1040 1041 /* 1042 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 1043 * the critical section count on the target cpus. 1044 * 1045 * We do not include our own cpu when issuing the IPI. 1046 */ 1047 if (all_but_self_ipi_enable && 1048 (all_but_self_ipi_enable >= 2 || 1049 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 1050 all_but_self_ipi(XINVLTLB_OFFSET); 1051 } else { 1052 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 1053 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 1054 } 1055 1056 /* 1057 * This will synchronously wait for our command to complete, 1058 * as well as process commands from other cpus. It also handles 1059 * reentrancy. 1060 * 1061 * (interrupts are disabled and we are in a critical section here) 1062 */ 1063 smp_inval_intr(); 1064 } 1065 1066 void 1067 smp_sniff(void) 1068 { 1069 globaldata_t gd = mycpu; 1070 int dummy; 1071 register_t rflags; 1072 1073 /* 1074 * Ignore all_but_self_ipi_enable here and just use it. 1075 */ 1076 rflags = read_rflags(); 1077 cpu_disable_intr(); 1078 all_but_self_ipi(XSNIFF_OFFSET); 1079 gd->gd_sample_pc = smp_sniff; 1080 gd->gd_sample_sp = &dummy; 1081 write_rflags(rflags); 1082 } 1083 1084 void 1085 cpu_sniff(int dcpu) 1086 { 1087 globaldata_t rgd = globaldata_find(dcpu); 1088 register_t rflags; 1089 int dummy; 1090 1091 /* 1092 * Ignore all_but_self_ipi_enable here and just use it. 1093 */ 1094 rflags = read_rflags(); 1095 cpu_disable_intr(); 1096 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED); 1097 rgd->gd_sample_pc = cpu_sniff; 1098 rgd->gd_sample_sp = &dummy; 1099 write_rflags(rflags); 1100 } 1101 1102 /* 1103 * Called from Xinvltlb assembly with interrupts hard-disabled and in a 1104 * critical section. gd_intr_nesting_level may or may not be bumped 1105 * depending on entry. 1106 * 1107 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT. 1108 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE 1109 * IS IN A CRITICAL SECTION. 1110 */ 1111 void 1112 smp_inval_intr(void) 1113 { 1114 struct mdglobaldata *md = mdcpu; 1115 cpumask_t cpumask; 1116 #ifdef LOOPRECOVER 1117 tsc_uclock_t tsc_base = rdtsc(); 1118 #endif 1119 1120 #if 0 1121 /* 1122 * The idle code is in a critical section, but that doesn't stop 1123 * Xinvltlb from executing, so deal with the race which can occur 1124 * in that situation. Otherwise r-m-w operations by pmap_inval_intr() 1125 * may have problems. 1126 */ 1127 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) { 1128 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid); 1129 cpu_invltlb(); 1130 cpu_mfence(); 1131 } 1132 #endif 1133 1134 /* 1135 * This is a real mess. I'd like to just leave interrupts disabled 1136 * but it can cause the lapic to deadlock if too many interrupts queue 1137 * to it, due to the idiotic design of the lapic. So instead we have 1138 * to enter a critical section so normal interrupts are made pending 1139 * and track whether this one was reentered. 1140 */ 1141 if (md->gd_xinvaltlb) { /* reentrant on cpu */ 1142 md->gd_xinvaltlb = 2; 1143 return; 1144 } 1145 md->gd_xinvaltlb = 1; 1146 1147 /* 1148 * Check only those cpus with active Xinvl* commands pending. 1149 * 1150 * We are going to enable interrupts so make sure we are in a 1151 * critical section. This is necessary to avoid deadlocking 1152 * the lapic and to ensure that we execute our commands prior to 1153 * any nominal interrupt or preemption. 1154 * 1155 * WARNING! It is very important that we only clear out but in 1156 * smp_smurf_mask once for each interrupt we take. In 1157 * this case, we clear it on initial entry and only loop 1158 * on the reentrancy detect (caused by another interrupt). 1159 */ 1160 cpumask = smp_invmask; 1161 #ifdef LOOPMASK_IN 1162 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid); 1163 #endif 1164 loop: 1165 cpu_enable_intr(); 1166 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid); 1167 1168 /* 1169 * Specific page request(s), and we can't return until all bits 1170 * are zero. 1171 */ 1172 for (;;) { 1173 int toolong; 1174 1175 /* 1176 * Also execute any pending full invalidation request in 1177 * this loop. 1178 */ 1179 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1180 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1181 md->mi.gd_cpuid); 1182 cpu_invltlb(); 1183 cpu_mfence(); 1184 } 1185 1186 #ifdef LOOPRECOVER 1187 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1188 /* 1189 * cpuid - cpu doing the waiting 1190 * invmask - IPI in progress 1191 * invltlb_mask - which ones are TLB invalidations? 1192 */ 1193 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx " 1194 "smurf=%08jx " 1195 #ifdef LOOPMASK_IN 1196 "in=%08jx " 1197 #endif 1198 "idle=%08jx/%08jx\n", 1199 md->mi.gd_cpuid, 1200 smp_invmask.ary[0], 1201 smp_invltlb_mask.ary[0], 1202 smp_smurf_mask.ary[0], 1203 #ifdef LOOPMASK_IN 1204 smp_in_mask.ary[0], 1205 #endif 1206 smp_idleinvl_mask.ary[0], 1207 smp_idleinvl_reqs.ary[0]); 1208 tsc_base = rdtsc(); 1209 toolong = 1; 1210 } else { 1211 toolong = 0; 1212 } 1213 #else 1214 toolong = 0; 1215 #endif 1216 1217 /* 1218 * We can only add bits to the cpumask to test during the 1219 * loop because the smp_invmask bit is cleared once the 1220 * originator completes the command (the targets may still 1221 * be cycling their own completions in this loop, afterwords). 1222 * 1223 * lfence required prior to all tests as this Xinvltlb 1224 * interrupt could race the originator (already be in progress 1225 * wnen the originator decides to issue, due to an issue by 1226 * another cpu). 1227 */ 1228 cpu_lfence(); 1229 CPUMASK_ORMASK(cpumask, smp_invmask); 1230 /*cpumask = smp_active_mask;*/ /* XXX */ 1231 cpu_lfence(); 1232 1233 if (pmap_inval_intr(&cpumask, toolong) == 0) { 1234 /* 1235 * Clear our smurf mask to allow new IPIs, but deal 1236 * with potential races. 1237 */ 1238 break; 1239 } 1240 1241 /* 1242 * Test if someone sent us another invalidation IPI, break 1243 * out so we can take it to avoid deadlocking the lapic 1244 * interrupt queue (? stupid intel, amd). 1245 */ 1246 if (md->gd_xinvaltlb == 2) 1247 break; 1248 /* 1249 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid)) 1250 break; 1251 */ 1252 } 1253 1254 /* 1255 * Full invalidation request 1256 */ 1257 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1258 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1259 md->mi.gd_cpuid); 1260 cpu_invltlb(); 1261 cpu_mfence(); 1262 } 1263 1264 /* 1265 * Check to see if another Xinvltlb interrupt occurred and loop up 1266 * if it did. 1267 */ 1268 cpu_disable_intr(); 1269 if (md->gd_xinvaltlb == 2) { 1270 md->gd_xinvaltlb = 1; 1271 goto loop; 1272 } 1273 #ifdef LOOPMASK_IN 1274 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid); 1275 #endif 1276 md->gd_xinvaltlb = 0; 1277 } 1278 1279 void 1280 cpu_wbinvd_on_all_cpus_callback(void *arg) 1281 { 1282 wbinvd(); 1283 } 1284 1285 /* 1286 * When called the executing CPU will send an IPI to all other CPUs 1287 * requesting that they halt execution. 1288 * 1289 * Usually (but not necessarily) called with 'other_cpus' as its arg. 1290 * 1291 * - Signals all CPUs in map to stop. 1292 * - Waits for each to stop. 1293 * 1294 * Returns: 1295 * -1: error 1296 * 0: NA 1297 * 1: ok 1298 * 1299 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 1300 * from executing at same time. 1301 */ 1302 int 1303 stop_cpus(cpumask_t map) 1304 { 1305 cpumask_t mask; 1306 1307 CPUMASK_ANDMASK(map, smp_active_mask); 1308 1309 /* send the Xcpustop IPI to all CPUs in map */ 1310 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 1311 1312 do { 1313 mask = stopped_cpus; 1314 CPUMASK_ANDMASK(mask, map); 1315 /* spin */ 1316 } while (CPUMASK_CMPMASKNEQ(mask, map)); 1317 1318 return 1; 1319 } 1320 1321 1322 /* 1323 * Called by a CPU to restart stopped CPUs. 1324 * 1325 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 1326 * 1327 * - Signals all CPUs in map to restart. 1328 * - Waits for each to restart. 1329 * 1330 * Returns: 1331 * -1: error 1332 * 0: NA 1333 * 1: ok 1334 */ 1335 int 1336 restart_cpus(cpumask_t map) 1337 { 1338 cpumask_t mask; 1339 1340 /* signal other cpus to restart */ 1341 mask = map; 1342 CPUMASK_ANDMASK(mask, smp_active_mask); 1343 cpu_ccfence(); 1344 started_cpus = mask; 1345 cpu_ccfence(); 1346 1347 /* wait for each to clear its bit */ 1348 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 1349 cpu_pause(); 1350 1351 return 1; 1352 } 1353 1354 /* 1355 * This is called once the mpboot code has gotten us properly relocated 1356 * and the MMU turned on, etc. ap_init() is actually the idle thread, 1357 * and when it returns the scheduler will call the real cpu_idle() main 1358 * loop for the idlethread. Interrupts are disabled on entry and should 1359 * remain disabled at return. 1360 */ 1361 void 1362 ap_init(void) 1363 { 1364 int cpu_id; 1365 1366 /* 1367 * Adjust smp_startup_mask to signal the BSP that we have started 1368 * up successfully. Note that we do not yet hold the BGL. The BSP 1369 * is waiting for our signal. 1370 * 1371 * We can't set our bit in smp_active_mask yet because we are holding 1372 * interrupts physically disabled and remote cpus could deadlock 1373 * trying to send us an IPI. 1374 */ 1375 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1376 cpu_mfence(); 1377 1378 /* 1379 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1380 * non-zero, then get the MP lock. 1381 * 1382 * Note: We are in a critical section. 1383 * 1384 * Note: we are the idle thread, we can only spin. 1385 * 1386 * Note: The load fence is memory volatile and prevents the compiler 1387 * from improperly caching mp_finish_lapic, and the cpu from improperly 1388 * caching it. 1389 */ 1390 while (mp_finish_lapic == 0) { 1391 cpu_pause(); 1392 cpu_lfence(); 1393 } 1394 #if 0 1395 while (try_mplock() == 0) { 1396 cpu_pause(); 1397 cpu_lfence(); 1398 } 1399 #endif 1400 1401 if (cpu_feature & CPUID_TSC) { 1402 /* 1403 * The BSP is constantly updating tsc0_offset, figure out 1404 * the relative difference to synchronize ktrdump. 1405 */ 1406 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1407 } 1408 1409 /* BSP may have changed PTD while we're waiting for the lock */ 1410 cpu_invltlb(); 1411 1412 /* Build our map of 'other' CPUs. */ 1413 mycpu->gd_other_cpus = smp_startup_mask; 1414 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1415 1416 /* A quick check from sanity claus */ 1417 cpu_id = APICID_TO_CPUID((lapic->id & 0xff000000) >> 24); 1418 if (mycpu->gd_cpuid != cpu_id) { 1419 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1420 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1421 cpu_id, (lapic->id & 0xff000000) >> 24); 1422 #if 0 /* JGXXX */ 1423 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1424 #endif 1425 panic("cpuid mismatch! boom!!"); 1426 } 1427 1428 /* Initialize AP's local APIC for irq's */ 1429 lapic_init(FALSE); 1430 1431 /* LAPIC initialization is done */ 1432 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1433 cpu_mfence(); 1434 1435 #if 0 1436 /* Let BSP move onto the next initialization stage */ 1437 rel_mplock(); 1438 #endif 1439 1440 /* 1441 * Interlock for finalization. Wait until mp_finish is non-zero, 1442 * then get the MP lock. 1443 * 1444 * Note: We are in a critical section. 1445 * 1446 * Note: we are the idle thread, we can only spin. 1447 * 1448 * Note: The load fence is memory volatile and prevents the compiler 1449 * from improperly caching mp_finish, and the cpu from improperly 1450 * caching it. 1451 */ 1452 while (mp_finish == 0) { 1453 cpu_pause(); 1454 cpu_lfence(); 1455 } 1456 1457 /* BSP may have changed PTD while we're waiting for the lock */ 1458 cpu_invltlb(); 1459 1460 /* Set memory range attributes for this CPU to match the BSP */ 1461 mem_range_AP_init(); 1462 1463 /* 1464 * Once we go active we must process any IPIQ messages that may 1465 * have been queued, because no actual IPI will occur until we 1466 * set our bit in the smp_active_mask. If we don't the IPI 1467 * message interlock could be left set which would also prevent 1468 * further IPIs. 1469 * 1470 * The idle loop doesn't expect the BGL to be held and while 1471 * lwkt_switch() normally cleans things up this is a special case 1472 * because we returning almost directly into the idle loop. 1473 * 1474 * The idle thread is never placed on the runq, make sure 1475 * nothing we've done put it there. 1476 */ 1477 1478 /* 1479 * Hold a critical section and allow real interrupts to occur. Zero 1480 * any spurious interrupts which have accumulated, then set our 1481 * smp_active_mask indicating that we are fully operational. 1482 */ 1483 crit_enter(); 1484 __asm __volatile("sti; pause; pause"::); 1485 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1486 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1487 1488 /* 1489 * Wait until all cpus have set their smp_active_mask and have fully 1490 * operational interrupts before proceeding. 1491 * 1492 * We need a final cpu_invltlb() because we would not have received 1493 * any until we set our bit in smp_active_mask. 1494 */ 1495 while (mp_finish == 1) { 1496 cpu_pause(); 1497 cpu_lfence(); 1498 } 1499 cpu_invltlb(); 1500 1501 /* 1502 * Initialize per-cpu clocks and do other per-cpu initialization. 1503 * At this point code is expected to be able to use the full kernel 1504 * API. 1505 */ 1506 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1507 1508 /* 1509 * Since we may have cleaned up the interrupt triggers, manually 1510 * process any pending IPIs before exiting our critical section. 1511 * Once the critical section has exited, normal interrupt processing 1512 * may occur. 1513 */ 1514 atomic_swap_int(&mycpu->gd_npoll, 0); 1515 lwkt_process_ipiq(); 1516 crit_exit(); 1517 1518 /* 1519 * Final final, allow the waiting BSP to resume the boot process, 1520 * return 'into' the idle thread bootstrap. 1521 */ 1522 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1523 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1524 } 1525 1526 /* 1527 * Get SMP fully working before we start initializing devices. 1528 */ 1529 static 1530 void 1531 ap_finish(void) 1532 { 1533 if (bootverbose) 1534 kprintf("Finish MP startup\n"); 1535 rel_mplock(); 1536 1537 /* 1538 * Wait for the active mask to complete, after which all cpus will 1539 * be accepting interrupts. 1540 */ 1541 mp_finish = 1; 1542 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1543 cpu_pause(); 1544 cpu_lfence(); 1545 } 1546 1547 /* 1548 * Wait for the finalization mask to complete, after which all cpus 1549 * have completely finished initializing and are entering or are in 1550 * their idle thread. 1551 * 1552 * BSP should have received all required invltlbs but do another 1553 * one just in case. 1554 */ 1555 cpu_invltlb(); 1556 mp_finish = 2; 1557 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1558 cpu_pause(); 1559 cpu_lfence(); 1560 } 1561 1562 while (try_mplock() == 0) { 1563 cpu_pause(); 1564 cpu_lfence(); 1565 } 1566 1567 if (bootverbose) { 1568 kprintf("Active CPU Mask: %016jx\n", 1569 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1570 } 1571 } 1572 1573 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1574 1575 /* 1576 * Interrupts must be hard-disabled by caller 1577 */ 1578 void 1579 cpu_send_ipiq(int dcpu) 1580 { 1581 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1582 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1583 } 1584 1585 #if 0 /* single_apic_ipi_passive() not working yet */ 1586 /* 1587 * Returns 0 on failure, 1 on success 1588 */ 1589 int 1590 cpu_send_ipiq_passive(int dcpu) 1591 { 1592 int r = 0; 1593 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1594 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1595 APIC_DELMODE_FIXED); 1596 } 1597 return(r); 1598 } 1599 #endif 1600 1601 static void 1602 mp_bsp_simple_setup(void) 1603 { 1604 struct mdglobaldata *gd; 1605 size_t ipiq_size; 1606 1607 /* build our map of 'other' CPUs */ 1608 mycpu->gd_other_cpus = smp_startup_mask; 1609 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1610 1611 gd = (struct mdglobaldata *)mycpu; 1612 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1613 1614 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1615 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 1616 VM_SUBSYS_IPIQ); 1617 bzero(mycpu->gd_ipiq, ipiq_size); 1618 1619 /* initialize arc4random. */ 1620 arc4_init_pcpu(0); 1621 1622 pmap_set_opt(); 1623 1624 if (cpu_feature & CPUID_TSC) 1625 tsc0_offset = rdtsc(); 1626 } 1627 1628 1629 /* 1630 * CPU TOPOLOGY DETECTION FUNCTIONS 1631 */ 1632 1633 /* Detect intel topology using CPUID 1634 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1635 */ 1636 static void 1637 detect_intel_topology(int count_htt_cores) 1638 { 1639 int shift = 0; 1640 int ecx_index = 0; 1641 int core_plus_logical_bits = 0; 1642 int cores_per_package; 1643 int logical_per_package; 1644 int logical_per_core; 1645 unsigned int p[4]; 1646 1647 if (cpu_high >= 0xb) { 1648 goto FUNC_B; 1649 1650 } else if (cpu_high >= 0x4) { 1651 goto FUNC_4; 1652 1653 } else { 1654 core_bits = 0; 1655 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1656 ; 1657 logical_CPU_bits = 1 << shift; 1658 return; 1659 } 1660 1661 FUNC_B: 1662 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1663 1664 /* if 0xb not supported - fallback to 0x4 */ 1665 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1666 goto FUNC_4; 1667 } 1668 1669 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1670 1671 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1672 do { 1673 cpuid_count(0xb, ecx_index, p); 1674 1675 /* Check for the Core type in the implemented sub leaves. */ 1676 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1677 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1678 break; 1679 } 1680 1681 ecx_index++; 1682 1683 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1684 1685 core_bits = core_plus_logical_bits - logical_CPU_bits; 1686 1687 return; 1688 1689 FUNC_4: 1690 cpuid_count(0x4, 0, p); 1691 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1692 1693 logical_per_package = count_htt_cores; 1694 logical_per_core = logical_per_package / cores_per_package; 1695 1696 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1697 ; 1698 logical_CPU_bits = shift; 1699 1700 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1701 ; 1702 core_bits = shift; 1703 1704 return; 1705 } 1706 1707 /* Detect AMD topology using CPUID 1708 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1709 */ 1710 static void 1711 detect_amd_topology(int count_htt_cores) 1712 { 1713 int shift = 0; 1714 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) { 1715 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1716 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 1717 AMDID_COREID_SIZE_SHIFT; 1718 } else { 1719 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1720 for (shift = 0; (1 << shift) < core_bits; ++shift) 1721 ; 1722 core_bits = shift; 1723 } 1724 1725 if (amd_feature2 & AMDID2_TOPOEXT) { 1726 u_int p[4]; 1727 int i; 1728 int type; 1729 int level; 1730 int share_count; 1731 for (i = 0; i < 256; ++i) { 1732 cpuid_count(0x8000001d, i, p); 1733 type = p[0] & 0x1f; 1734 level = (p[0] >> 5) & 0x7; 1735 share_count = 1 + ((p[0] >> 14) & 0xfff); 1736 1737 if (type == 0) 1738 break; 1739 if (bootverbose) 1740 kprintf("Topology probe i=%2d type=%d level=%d share_count=%d\n", 1741 i, type, level, share_count); 1742 if (type == 1 && share_count) { /* CPUID_TYPE_SMT */ 1743 for (shift = 0; (1 << shift) < count_htt_cores / share_count; ++shift) 1744 ; 1745 core_bits = shift; 1746 break; 1747 } 1748 } 1749 } 1750 1751 logical_CPU_bits = count_htt_cores >> core_bits; 1752 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1753 ; 1754 logical_CPU_bits = shift; 1755 } else { 1756 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1757 ; 1758 core_bits = shift; 1759 logical_CPU_bits = 0; 1760 } 1761 } 1762 1763 static void 1764 amd_get_compute_unit_id(void *arg) 1765 { 1766 u_int regs[4]; 1767 1768 do_cpuid(0x8000001e, regs); 1769 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1770 1771 /* 1772 * AMD - CPUID Specification September 2010 1773 * page 34 - //ComputeUnitID = ebx[0:7]// 1774 */ 1775 mynode->compute_unit_id = regs[1] & 0xff; 1776 } 1777 1778 int 1779 fix_amd_topology(void) 1780 { 1781 cpumask_t mask; 1782 1783 if (cpu_vendor_id != CPU_VENDOR_AMD) 1784 return -1; 1785 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1786 return -1; 1787 1788 CPUMASK_ASSALLONES(mask); 1789 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1790 1791 kprintf("Compute unit iDS:\n"); 1792 int i; 1793 for (i = 0; i < ncpus; i++) { 1794 kprintf("%d-%d; \n", 1795 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1796 } 1797 return 0; 1798 } 1799 1800 /* 1801 * Calculate 1802 * - logical_CPU_bits 1803 * - core_bits 1804 * With the values above (for AMD or INTEL) we are able to generally 1805 * detect the CPU topology (number of cores for each level): 1806 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1807 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1808 */ 1809 void 1810 detect_cpu_topology(void) 1811 { 1812 static int topology_detected = 0; 1813 int count = 0; 1814 1815 if (topology_detected) 1816 goto OUT; 1817 if ((cpu_feature & CPUID_HTT) == 0) { 1818 core_bits = 0; 1819 logical_CPU_bits = 0; 1820 goto OUT; 1821 } 1822 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT; 1823 1824 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1825 detect_intel_topology(count); 1826 else if (cpu_vendor_id == CPU_VENDOR_AMD) 1827 detect_amd_topology(count); 1828 topology_detected = 1; 1829 1830 OUT: 1831 if (bootverbose) { 1832 kprintf("Bits within APICID: logical_CPU_bits: %d; " 1833 "core_bits: %d\n", 1834 logical_CPU_bits, core_bits); 1835 } 1836 } 1837 1838 /* 1839 * Interface functions to calculate chip_ID, 1840 * core_number and logical_number 1841 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1842 */ 1843 int 1844 get_chip_ID(int cpuid) 1845 { 1846 return get_apicid_from_cpuid(cpuid) >> 1847 (logical_CPU_bits + core_bits); 1848 } 1849 1850 int 1851 get_chip_ID_from_APICID(int apicid) 1852 { 1853 return apicid >> (logical_CPU_bits + core_bits); 1854 } 1855 1856 int 1857 get_core_number_within_chip(int cpuid) 1858 { 1859 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1860 ((1 << core_bits) - 1)); 1861 } 1862 1863 int 1864 get_logical_CPU_number_within_core(int cpuid) 1865 { 1866 return (get_apicid_from_cpuid(cpuid) & 1867 ((1 << logical_CPU_bits) - 1)); 1868 } 1869