1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #ifdef GPROF 51 #include <sys/gmon.h> 52 #endif 53 54 #include <machine/smp.h> 55 #include <machine_base/apic/apicreg.h> 56 #include <machine/atomic.h> 57 #include <machine/cpufunc.h> 58 #include <machine/cputypes.h> 59 #include <machine_base/apic/lapic.h> 60 #include <machine_base/apic/ioapic.h> 61 #include <machine_base/acpica/acpi_md_cpu.h> 62 #include <machine/psl.h> 63 #include <machine/segments.h> 64 #include <machine/tss.h> 65 #include <machine/specialreg.h> 66 #include <machine/globaldata.h> 67 #include <machine/pmap_inval.h> 68 #include <machine/clock.h> 69 70 #include <machine/md_var.h> /* setidt() */ 71 #include <machine_base/icu/icu.h> /* IPIs */ 72 #include <machine_base/icu/icu_var.h> 73 #include <machine_base/apic/ioapic_abi.h> 74 #include <machine/intr_machdep.h> /* IPIs */ 75 76 #define WARMBOOT_TARGET 0 77 #define WARMBOOT_OFF (KERNBASE + 0x0467) 78 #define WARMBOOT_SEG (KERNBASE + 0x0469) 79 80 #define CMOS_REG (0x70) 81 #define CMOS_DATA (0x71) 82 #define BIOS_RESET (0x0f) 83 #define BIOS_WARM (0x0a) 84 85 /* 86 * this code MUST be enabled here and in mpboot.s. 87 * it follows the very early stages of AP boot by placing values in CMOS ram. 88 * it NORMALLY will never be needed and thus the primitive method for enabling. 89 * 90 */ 91 #if defined(CHECK_POINTS) 92 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 93 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 94 95 #define CHECK_INIT(D); \ 96 CHECK_WRITE(0x34, (D)); \ 97 CHECK_WRITE(0x35, (D)); \ 98 CHECK_WRITE(0x36, (D)); \ 99 CHECK_WRITE(0x37, (D)); \ 100 CHECK_WRITE(0x38, (D)); \ 101 CHECK_WRITE(0x39, (D)); 102 103 #define CHECK_PRINT(S); \ 104 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 105 (S), \ 106 CHECK_READ(0x34), \ 107 CHECK_READ(0x35), \ 108 CHECK_READ(0x36), \ 109 CHECK_READ(0x37), \ 110 CHECK_READ(0x38), \ 111 CHECK_READ(0x39)); 112 113 #else /* CHECK_POINTS */ 114 115 #define CHECK_INIT(D) 116 #define CHECK_PRINT(S) 117 118 #endif /* CHECK_POINTS */ 119 120 /* 121 * Values to send to the POST hardware. 122 */ 123 #define MP_BOOTADDRESS_POST 0x10 124 #define MP_PROBE_POST 0x11 125 #define MPTABLE_PASS1_POST 0x12 126 127 #define MP_START_POST 0x13 128 #define MP_ENABLE_POST 0x14 129 #define MPTABLE_PASS2_POST 0x15 130 131 #define START_ALL_APS_POST 0x16 132 #define INSTALL_AP_TRAMP_POST 0x17 133 #define START_AP_POST 0x18 134 135 #define MP_ANNOUNCE_POST 0x19 136 137 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 138 int current_postcode; 139 140 /** XXX FIXME: what system files declare these??? */ 141 142 extern int naps; 143 144 int64_t tsc0_offset; 145 extern int64_t tsc_offsets[]; 146 147 /* AP uses this during bootstrap. Do not staticize. */ 148 char *bootSTK; 149 static int bootAP; 150 151 struct pcb stoppcbs[MAXCPU]; 152 153 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 154 155 /* 156 * Local data and functions. 157 */ 158 159 static u_int boot_address; 160 static int mp_finish; 161 static int mp_finish_lapic; 162 163 static int start_all_aps(u_int boot_addr); 164 #if 0 165 static void install_ap_tramp(u_int boot_addr); 166 #endif 167 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 168 static int smitest(void); 169 static void mp_bsp_simple_setup(void); 170 171 /* which cpus have been started */ 172 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 173 /* which cpus have lapic been inited */ 174 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 175 /* which cpus are ready for IPIs etc? */ 176 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 177 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 178 179 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, 180 &smp_active_mask, sizeof(smp_active_mask), "LU", ""); 181 static u_int bootMP_size; 182 static u_int report_invlpg_src; 183 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW, 184 &report_invlpg_src, 0, ""); 185 static u_int report_invltlb_src; 186 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW, 187 &report_invltlb_src, 0, ""); 188 static int optimized_invltlb; 189 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW, 190 &optimized_invltlb, 0, ""); 191 static int all_but_self_ipi_enable = 1; 192 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW, 193 &all_but_self_ipi_enable, 0, ""); 194 195 /* Local data for detecting CPU TOPOLOGY */ 196 static int core_bits = 0; 197 static int logical_CPU_bits = 0; 198 199 200 /* 201 * Calculate usable address in base memory for AP trampoline code. 202 */ 203 u_int 204 mp_bootaddress(u_int basemem) 205 { 206 POSTCODE(MP_BOOTADDRESS_POST); 207 208 bootMP_size = mptramp_end - mptramp_start; 209 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 210 if (((basemem * 1024) - boot_address) < bootMP_size) 211 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 212 /* 3 levels of page table pages */ 213 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 214 215 return mptramp_pagetables; 216 } 217 218 /* 219 * Print various information about the SMP system hardware and setup. 220 */ 221 void 222 mp_announce(void) 223 { 224 int x; 225 226 POSTCODE(MP_ANNOUNCE_POST); 227 228 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 229 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 230 for (x = 1; x <= naps; ++x) 231 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 232 233 if (!ioapic_enable) 234 kprintf(" Warning: APIC I/O disabled\n"); 235 } 236 237 /* 238 * AP cpu's call this to sync up protected mode. 239 * 240 * WARNING! %gs is not set up on entry. This routine sets up %gs. 241 */ 242 void 243 init_secondary(void) 244 { 245 int gsel_tss; 246 int x, myid = bootAP; 247 u_int64_t msr, cr0; 248 struct mdglobaldata *md; 249 struct privatespace *ps; 250 251 ps = CPU_prvspace[myid]; 252 253 gdt_segs[GPROC0_SEL].ssd_base = (long)&ps->common_tss; 254 ps->mdglobaldata.mi.gd_prvspace = ps; 255 256 /* We fill the 32-bit segment descriptors */ 257 for (x = 0; x < NGDT; x++) { 258 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 259 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 260 } 261 /* And now a 64-bit one */ 262 ssdtosyssd(&gdt_segs[GPROC0_SEL], 263 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 264 265 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 266 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 267 lgdt(&r_gdt); /* does magic intra-segment return */ 268 269 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 270 wrmsr(MSR_FSBASE, 0); /* User value */ 271 wrmsr(MSR_GSBASE, (u_int64_t)ps); 272 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 273 274 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 275 276 #if 0 277 lldt(_default_ldt); 278 mdcpu->gd_currentldt = _default_ldt; 279 #endif 280 281 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 282 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 283 284 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 285 286 /* 287 * TSS entry point for interrupts, traps, and exceptions 288 * (sans NMI). This will always go to near the top of the pcpu 289 * trampoline area. Hardware-pushed data will be copied into 290 * the trap-frame on entry, and (if necessary) returned to the 291 * trampoline on exit. 292 * 293 * We store some pcb data for the trampoline code above the 294 * stack the cpu hw pushes into, and arrange things so the 295 * address of tr_pcb_rsp is the same as the desired top of 296 * stack. 297 */ 298 ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp; 299 ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0; 300 ps->trampoline.tr_pcb_gs_kernel = (register_t)md; 301 ps->trampoline.tr_pcb_cr3 = KPML4phys; /* adj to user cr3 live */ 302 ps->dbltramp.tr_pcb_gs_kernel = (register_t)md; 303 ps->dbltramp.tr_pcb_cr3 = KPML4phys; 304 ps->dbgtramp.tr_pcb_gs_kernel = (register_t)md; 305 ps->dbgtramp.tr_pcb_cr3 = KPML4phys; 306 307 #if 0 /* JG XXX */ 308 ps->common_tss.tss_ioopt = (sizeof ps->common_tss) << 16; 309 #endif 310 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 311 md->gd_common_tssd = *md->gd_tss_gdt; 312 313 /* double fault stack */ 314 ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp; 315 ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp; 316 317 ltr(gsel_tss); 318 319 /* 320 * Set to a known state: 321 * Set by mpboot.s: CR0_PG, CR0_PE 322 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 323 */ 324 cr0 = rcr0(); 325 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 326 load_cr0(cr0); 327 328 /* Set up the fast syscall stuff */ 329 msr = rdmsr(MSR_EFER) | EFER_SCE; 330 wrmsr(MSR_EFER, msr); 331 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 332 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 333 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 334 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 335 wrmsr(MSR_STAR, msr); 336 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 337 338 pmap_set_opt(); /* PSE/4MB pages, etc */ 339 pmap_init_pat(); /* Page Attribute Table */ 340 341 /* set up CPU registers and state */ 342 cpu_setregs(); 343 344 /* set up SSE/NX registers */ 345 initializecpu(myid); 346 347 /* set up FPU state on the AP */ 348 npxinit(); 349 350 /* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */ 351 if (x2apic_enable) 352 lapic_x2apic_enter(FALSE); 353 354 /* disable the APIC, just to be SURE */ 355 LAPIC_WRITE(svr, (LAPIC_READ(svr) & ~APIC_SVR_ENABLE)); 356 } 357 358 /******************************************************************* 359 * local functions and data 360 */ 361 362 /* 363 * Start the SMP system 364 */ 365 static void 366 mp_start_aps(void *dummy __unused) 367 { 368 if (lapic_enable) { 369 /* start each Application Processor */ 370 start_all_aps(boot_address); 371 } else { 372 mp_bsp_simple_setup(); 373 } 374 } 375 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 376 377 /* 378 * start each AP in our list 379 */ 380 static int 381 start_all_aps(u_int boot_addr) 382 { 383 vm_offset_t va = boot_address + KERNBASE; 384 u_int64_t *pt4, *pt3, *pt2; 385 int pssize; 386 int x, i; 387 int shift; 388 int smicount; 389 int smibest; 390 int smilast; 391 u_char mpbiosreason; 392 u_long mpbioswarmvec; 393 struct mdglobaldata *gd; 394 struct privatespace *ps; 395 size_t ipiq_size; 396 397 POSTCODE(START_ALL_APS_POST); 398 399 /* install the AP 1st level boot code */ 400 pmap_kenter(va, boot_address); 401 cpu_invlpg((void *)va); /* JG XXX */ 402 bcopy(mptramp_start, (void *)va, bootMP_size); 403 404 /* Locate the page tables, they'll be below the trampoline */ 405 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 406 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 407 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 408 409 /* Create the initial 1GB replicated page tables */ 410 for (i = 0; i < 512; i++) { 411 /* Each slot of the level 4 pages points to the same level 3 page */ 412 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 413 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 414 kernel_pmap.pmap_bits[PG_RW_IDX] | 415 kernel_pmap.pmap_bits[PG_U_IDX]; 416 417 /* Each slot of the level 3 pages points to the same level 2 page */ 418 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 419 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 420 kernel_pmap.pmap_bits[PG_RW_IDX] | 421 kernel_pmap.pmap_bits[PG_U_IDX]; 422 423 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 424 pt2[i] = i * (2 * 1024 * 1024); 425 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 426 kernel_pmap.pmap_bits[PG_RW_IDX] | 427 kernel_pmap.pmap_bits[PG_PS_IDX] | 428 kernel_pmap.pmap_bits[PG_U_IDX]; 429 } 430 431 /* save the current value of the warm-start vector */ 432 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 433 outb(CMOS_REG, BIOS_RESET); 434 mpbiosreason = inb(CMOS_DATA); 435 436 /* setup a vector to our boot code */ 437 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 438 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 439 outb(CMOS_REG, BIOS_RESET); 440 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 441 442 /* 443 * If we have a TSC we can figure out the SMI interrupt rate. 444 * The SMI does not necessarily use a constant rate. Spend 445 * up to 250ms trying to figure it out. 446 */ 447 smibest = 0; 448 if (cpu_feature & CPUID_TSC) { 449 set_apic_timer(275000); 450 smilast = read_apic_timer(); 451 for (x = 0; x < 20 && read_apic_timer(); ++x) { 452 smicount = smitest(); 453 if (smibest == 0 || smilast - smicount < smibest) 454 smibest = smilast - smicount; 455 smilast = smicount; 456 } 457 if (smibest > 250000) 458 smibest = 0; 459 } 460 if (smibest) 461 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 462 1000000 / smibest, smibest); 463 464 /* start each AP */ 465 for (x = 1; x <= naps; ++x) { 466 /* This is a bit verbose, it will go away soon. */ 467 468 pssize = sizeof(struct privatespace); 469 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, 470 KM_CPU(x)); 471 CPU_prvspace[x] = ps; 472 #if 0 473 kprintf("ps %d %p %d\n", x, ps, pssize); 474 #endif 475 bzero(ps, pssize); 476 gd = &ps->mdglobaldata; 477 gd->mi.gd_prvspace = ps; 478 479 /* prime data page for it to use */ 480 mi_gdinit(&gd->mi, x); 481 cpu_gdinit(gd, x); 482 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 483 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 484 VM_SUBSYS_IPIQ, KM_CPU(x)); 485 bzero(gd->mi.gd_ipiq, ipiq_size); 486 487 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 488 489 /* initialize arc4random. */ 490 arc4_init_pcpu(x); 491 492 /* setup a vector to our boot code */ 493 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 494 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 495 outb(CMOS_REG, BIOS_RESET); 496 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 497 498 /* 499 * Setup the AP boot stack 500 */ 501 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 502 bootAP = x; 503 504 /* attempt to start the Application Processor */ 505 CHECK_INIT(99); /* setup checkpoints */ 506 if (!start_ap(gd, boot_addr, smibest)) { 507 kprintf("\nAP #%d (PHY# %d) failed!\n", 508 x, CPUID_TO_APICID(x)); 509 CHECK_PRINT("trace"); /* show checkpoints */ 510 /* better panic as the AP may be running loose */ 511 kprintf("panic y/n? [y] "); 512 cnpoll(TRUE); 513 if (cngetc() != 'n') 514 panic("bye-bye"); 515 cnpoll(FALSE); 516 } 517 CHECK_PRINT("trace"); /* show checkpoints */ 518 } 519 520 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 521 ncpus = x; 522 523 for (shift = 0; (1 << shift) <= ncpus; ++shift) 524 ; 525 --shift; 526 527 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 528 if ((1 << shift) < ncpus) 529 ++shift; 530 ncpus_fit = 1 << shift; 531 ncpus_fit_mask = ncpus_fit - 1; 532 533 /* build our map of 'other' CPUs */ 534 mycpu->gd_other_cpus = smp_startup_mask; 535 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 536 537 gd = (struct mdglobaldata *)mycpu; 538 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 539 540 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 541 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 542 VM_SUBSYS_IPIQ, KM_CPU(0)); 543 bzero(mycpu->gd_ipiq, ipiq_size); 544 545 /* initialize arc4random. */ 546 arc4_init_pcpu(0); 547 548 /* restore the warmstart vector */ 549 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 550 outb(CMOS_REG, BIOS_RESET); 551 outb(CMOS_DATA, mpbiosreason); 552 553 /* 554 * NOTE! The idlestack for the BSP was setup by locore. Finish 555 * up, clean out the P==V mapping we did earlier. 556 */ 557 pmap_set_opt(); 558 559 /* 560 * Wait all APs to finish initializing LAPIC 561 */ 562 if (bootverbose) 563 kprintf("SMP: Waiting APs LAPIC initialization\n"); 564 if (cpu_feature & CPUID_TSC) 565 tsc0_offset = rdtsc(); 566 tsc_offsets[0] = 0; 567 mp_finish_lapic = 1; 568 rel_mplock(); 569 570 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 571 cpu_pause(); 572 cpu_lfence(); 573 if (cpu_feature & CPUID_TSC) 574 tsc0_offset = rdtsc(); 575 } 576 while (try_mplock() == 0) { 577 cpu_pause(); 578 cpu_lfence(); 579 } 580 581 /* number of APs actually started */ 582 return ncpus - 1; 583 } 584 585 586 /* 587 * load the 1st level AP boot code into base memory. 588 */ 589 590 /* targets for relocation */ 591 extern void bigJump(void); 592 extern void bootCodeSeg(void); 593 extern void bootDataSeg(void); 594 extern void MPentry(void); 595 extern u_int MP_GDT; 596 extern u_int mp_gdtbase; 597 598 #if 0 599 600 static void 601 install_ap_tramp(u_int boot_addr) 602 { 603 int x; 604 int size = *(int *) ((u_long) & bootMP_size); 605 u_char *src = (u_char *) ((u_long) bootMP); 606 u_char *dst = (u_char *) boot_addr + KERNBASE; 607 u_int boot_base = (u_int) bootMP; 608 u_int8_t *dst8; 609 u_int16_t *dst16; 610 u_int32_t *dst32; 611 612 POSTCODE(INSTALL_AP_TRAMP_POST); 613 614 for (x = 0; x < size; ++x) 615 *dst++ = *src++; 616 617 /* 618 * modify addresses in code we just moved to basemem. unfortunately we 619 * need fairly detailed info about mpboot.s for this to work. changes 620 * to mpboot.s might require changes here. 621 */ 622 623 /* boot code is located in KERNEL space */ 624 dst = (u_char *) boot_addr + KERNBASE; 625 626 /* modify the lgdt arg */ 627 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 628 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 629 630 /* modify the ljmp target for MPentry() */ 631 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 632 *dst32 = ((u_int) MPentry - KERNBASE); 633 634 /* modify the target for boot code segment */ 635 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 636 dst8 = (u_int8_t *) (dst16 + 1); 637 *dst16 = (u_int) boot_addr & 0xffff; 638 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 639 640 /* modify the target for boot data segment */ 641 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 642 dst8 = (u_int8_t *) (dst16 + 1); 643 *dst16 = (u_int) boot_addr & 0xffff; 644 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 645 } 646 647 #endif 648 649 /* 650 * This function starts the AP (application processor) identified 651 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 652 * to accomplish this. This is necessary because of the nuances 653 * of the different hardware we might encounter. It ain't pretty, 654 * but it seems to work. 655 * 656 * NOTE: eventually an AP gets to ap_init(), which is called just 657 * before the AP goes into the LWKT scheduler's idle loop. 658 */ 659 static int 660 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 661 { 662 int physical_cpu; 663 int vector; 664 665 POSTCODE(START_AP_POST); 666 667 /* get the PHYSICAL APIC ID# */ 668 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 669 670 /* calculate the vector */ 671 vector = (boot_addr >> 12) & 0xff; 672 673 /* We don't want anything interfering */ 674 cpu_disable_intr(); 675 676 /* Make sure the target cpu sees everything */ 677 wbinvd(); 678 679 /* 680 * Try to detect when a SMI has occurred, wait up to 200ms. 681 * 682 * If a SMI occurs during an AP reset but before we issue 683 * the STARTUP command, the AP may brick. To work around 684 * this problem we hold off doing the AP startup until 685 * after we have detected the SMI. Hopefully another SMI 686 * will not occur before we finish the AP startup. 687 * 688 * Retries don't seem to help. SMIs have a window of opportunity 689 * and if USB->legacy keyboard emulation is enabled in the BIOS 690 * the interrupt rate can be quite high. 691 * 692 * NOTE: Don't worry about the L1 cache load, it might bloat 693 * ldelta a little but ndelta will be so huge when the SMI 694 * occurs the detection logic will still work fine. 695 */ 696 if (smibest) { 697 set_apic_timer(200000); 698 smitest(); 699 } 700 701 /* 702 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 703 * and running the target CPU. OR this INIT IPI might be latched (P5 704 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 705 * ignored. 706 * 707 * see apic/apicreg.h for icr bit definitions. 708 * 709 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 710 */ 711 712 /* 713 * Do an INIT IPI: assert RESET 714 * 715 * Use edge triggered mode to assert INIT 716 */ 717 lapic_seticr_sync(physical_cpu, 718 APIC_DESTMODE_PHY | 719 APIC_DEST_DESTFLD | 720 APIC_TRIGMOD_EDGE | 721 APIC_LEVEL_ASSERT | 722 APIC_DELMODE_INIT); 723 724 /* 725 * The spec calls for a 10ms delay but we may have to use a 726 * MUCH lower delay to avoid bricking an AP due to a fast SMI 727 * interrupt. We have other loops here too and dividing by 2 728 * doesn't seem to be enough even after subtracting 350us, 729 * so we divide by 4. 730 * 731 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 732 * interrupt was detected we use the full 10ms. 733 */ 734 if (smibest == 0) 735 u_sleep(10000); 736 else if (smibest < 150 * 4 + 350) 737 u_sleep(150); 738 else if ((smibest - 350) / 4 < 10000) 739 u_sleep((smibest - 350) / 4); 740 else 741 u_sleep(10000); 742 743 /* 744 * Do an INIT IPI: deassert RESET 745 * 746 * Use level triggered mode to deassert. It is unclear 747 * why we need to do this. 748 */ 749 lapic_seticr_sync(physical_cpu, 750 APIC_DESTMODE_PHY | 751 APIC_DEST_DESTFLD | 752 APIC_TRIGMOD_LEVEL | 753 APIC_LEVEL_DEASSERT | 754 APIC_DELMODE_INIT); 755 u_sleep(150); /* wait 150us */ 756 757 /* 758 * Next we do a STARTUP IPI: the previous INIT IPI might still be 759 * latched, (P5 bug) this 1st STARTUP would then terminate 760 * immediately, and the previously started INIT IPI would continue. OR 761 * the previous INIT IPI has already run. and this STARTUP IPI will 762 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 763 * will run. 764 * 765 * XXX set APIC_LEVEL_ASSERT 766 */ 767 lapic_seticr_sync(physical_cpu, 768 APIC_DESTMODE_PHY | 769 APIC_DEST_DESTFLD | 770 APIC_DELMODE_STARTUP | 771 vector); 772 u_sleep(200); /* wait ~200uS */ 773 774 /* 775 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 776 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 777 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 778 * recognized after hardware RESET or INIT IPI. 779 * 780 * XXX set APIC_LEVEL_ASSERT 781 */ 782 lapic_seticr_sync(physical_cpu, 783 APIC_DESTMODE_PHY | 784 APIC_DEST_DESTFLD | 785 APIC_DELMODE_STARTUP | 786 vector); 787 788 /* Resume normal operation */ 789 cpu_enable_intr(); 790 791 /* wait for it to start, see ap_init() */ 792 set_apic_timer(5000000);/* == 5 seconds */ 793 while (read_apic_timer()) { 794 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 795 return 1; /* return SUCCESS */ 796 } 797 798 return 0; /* return FAILURE */ 799 } 800 801 static 802 int 803 smitest(void) 804 { 805 int64_t ltsc; 806 int64_t ntsc; 807 int64_t ldelta; 808 int64_t ndelta; 809 int count; 810 811 ldelta = 0; 812 ndelta = 0; 813 while (read_apic_timer()) { 814 ltsc = rdtsc(); 815 for (count = 0; count < 100; ++count) 816 ntsc = rdtsc(); /* force loop to occur */ 817 if (ldelta) { 818 ndelta = ntsc - ltsc; 819 if (ldelta > ndelta) 820 ldelta = ndelta; 821 if (ndelta > ldelta * 2) 822 break; 823 } else { 824 ldelta = ntsc - ltsc; 825 } 826 } 827 return(read_apic_timer()); 828 } 829 830 /* 831 * Synchronously flush the TLB on all other CPU's. The current cpu's 832 * TLB is not flushed. If the caller wishes to flush the current cpu's 833 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 834 * 835 * This routine may be called concurrently from multiple cpus. When this 836 * happens, smp_invltlb() can wind up sticking around in the confirmation 837 * while() loop at the end as additional cpus are added to the global 838 * cpumask, until they are acknowledged by another IPI. 839 * 840 * NOTE: If for some reason we were unable to start all cpus we cannot 841 * safely use broadcast IPIs. 842 */ 843 844 cpumask_t smp_smurf_mask; 845 static cpumask_t smp_invltlb_mask; 846 #define LOOPRECOVER 847 #define LOOPMASK_IN 848 #ifdef LOOPMASK_IN 849 cpumask_t smp_in_mask; 850 #endif 851 cpumask_t smp_invmask; 852 extern cpumask_t smp_idleinvl_mask; 853 extern cpumask_t smp_idleinvl_reqs; 854 855 /* 856 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove 857 * bits that do not need to be IPId. These bits are still part of the command, 858 * but the target cpus have already been signalled and do not need to be 859 * sigalled again. 860 */ 861 #include <sys/spinlock.h> 862 #include <sys/spinlock2.h> 863 864 static __noinline 865 void 866 smp_smurf_fetchset(cpumask_t *mask) 867 { 868 cpumask_t omask; 869 int i; 870 __uint64_t obits; 871 __uint64_t nbits; 872 873 i = 0; 874 while (i < CPUMASK_ELEMENTS) { 875 obits = smp_smurf_mask.ary[i]; 876 cpu_ccfence(); 877 nbits = obits | mask->ary[i]; 878 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) { 879 omask.ary[i] = obits; 880 ++i; 881 } 882 } 883 CPUMASK_NANDMASK(*mask, omask); 884 } 885 886 /* 887 * This is a mechanism which guarantees that cpu_invltlb() will be executed 888 * on idle cpus without having to signal or wake them up. The invltlb will be 889 * executed when they wake up, prior to any scheduling or interrupt thread. 890 * 891 * (*mask) is modified to remove the cpus we successfully negotiate this 892 * function with. This function may only be used with semi-synchronous 893 * commands (typically invltlb's or semi-synchronous invalidations which 894 * are usually associated only with kernel memory). 895 */ 896 void 897 smp_smurf_idleinvlclr(cpumask_t *mask) 898 { 899 if (optimized_invltlb) { 900 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask); 901 /* cpu_lfence() not needed */ 902 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask); 903 } 904 } 905 906 /* 907 * Issue cpu_invltlb() across all cpus except the current cpu. 908 * 909 * This function will arrange to avoid idle cpus, but still gurantee that 910 * invltlb is run on them when they wake up prior to any scheduling or 911 * nominal interrupt. 912 */ 913 void 914 smp_invltlb(void) 915 { 916 struct mdglobaldata *md = mdcpu; 917 cpumask_t mask; 918 unsigned long rflags; 919 #ifdef LOOPRECOVER 920 tsc_uclock_t tsc_base = rdtsc(); 921 int repeats = 0; 922 #endif 923 924 if (report_invltlb_src > 0) { 925 if (--report_invltlb_src <= 0) 926 print_backtrace(8); 927 } 928 929 /* 930 * Disallow normal interrupts, set all active cpus except our own 931 * in the global smp_invltlb_mask. 932 */ 933 ++md->mi.gd_cnt.v_smpinvltlb; 934 crit_enter_gd(&md->mi); 935 936 /* 937 * Bits we want to set in smp_invltlb_mask. We do not want to signal 938 * our own cpu. Also try to remove bits associated with idle cpus 939 * that we can flag for auto-invltlb. 940 */ 941 mask = smp_active_mask; 942 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid); 943 smp_smurf_idleinvlclr(&mask); 944 945 rflags = read_rflags(); 946 cpu_disable_intr(); 947 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask); 948 949 /* 950 * IPI non-idle cpus represented by mask. The omask calculation 951 * removes cpus from the mask which already have a Xinvltlb IPI 952 * pending (avoid double-queueing the IPI). 953 * 954 * We must disable real interrupts when setting the smurf flags or 955 * we might race a XINVLTLB before we manage to send the ipi's for 956 * the bits we set. 957 * 958 * NOTE: We are not signalling ourselves, mask already does NOT 959 * include our own cpu. 960 */ 961 smp_smurf_fetchset(&mask); 962 963 /* 964 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 965 * the critical section count on the target cpus. 966 */ 967 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 968 if (all_but_self_ipi_enable && 969 (all_but_self_ipi_enable >= 2 || 970 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 971 all_but_self_ipi(XINVLTLB_OFFSET); 972 } else { 973 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 974 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 975 } 976 977 /* 978 * Wait for acknowledgement by all cpus. smp_inval_intr() will 979 * temporarily enable interrupts to avoid deadlocking the lapic, 980 * and will also handle running cpu_invltlb() and remote invlpg 981 * command son our cpu if some other cpu requests it of us. 982 * 983 * WARNING! I originally tried to implement this as a hard loop 984 * checking only smp_invltlb_mask (and issuing a local 985 * cpu_invltlb() if requested), with interrupts enabled 986 * and without calling smp_inval_intr(). This DID NOT WORK. 987 * It resulted in weird races where smurf bits would get 988 * cleared without any action being taken. 989 */ 990 smp_inval_intr(); 991 CPUMASK_ASSZERO(mask); 992 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) { 993 smp_inval_intr(); 994 cpu_pause(); 995 #ifdef LOOPRECOVER 996 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 997 /* 998 * cpuid - cpu doing the waiting 999 * invltlb_mask - IPI in progress 1000 */ 1001 kprintf("smp_invltlb %d: waited too long inv=%08jx " 1002 "smurf=%08jx " 1003 #ifdef LOOPMASK_IN 1004 "in=%08jx " 1005 #endif 1006 "idle=%08jx/%08jx\n", 1007 md->mi.gd_cpuid, 1008 smp_invltlb_mask.ary[0], 1009 smp_smurf_mask.ary[0], 1010 #ifdef LOOPMASK_IN 1011 smp_in_mask.ary[0], 1012 #endif 1013 smp_idleinvl_mask.ary[0], 1014 smp_idleinvl_reqs.ary[0]); 1015 mdcpu->gd_xinvaltlb = 0; 1016 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, 1017 smp_invltlb_mask); 1018 smp_invlpg(&smp_active_mask); 1019 tsc_base = rdtsc(); 1020 if (++repeats > 10) { 1021 kprintf("smp_invltlb: giving up\n"); 1022 CPUMASK_ASSZERO(smp_invltlb_mask); 1023 } 1024 } 1025 #endif 1026 } 1027 write_rflags(rflags); 1028 crit_exit_gd(&md->mi); 1029 } 1030 1031 /* 1032 * Called from a critical section with interrupts hard-disabled. 1033 * This function issues an XINVLTLB IPI and then executes any pending 1034 * command on the current cpu before returning. 1035 */ 1036 void 1037 smp_invlpg(cpumask_t *cmdmask) 1038 { 1039 struct mdglobaldata *md = mdcpu; 1040 cpumask_t mask; 1041 1042 if (report_invlpg_src > 0) { 1043 if (--report_invlpg_src <= 0) 1044 print_backtrace(8); 1045 } 1046 1047 /* 1048 * Disallow normal interrupts, set all active cpus in the pmap, 1049 * plus our own for completion processing (it might or might not 1050 * be part of the set). 1051 */ 1052 mask = smp_active_mask; 1053 CPUMASK_ANDMASK(mask, *cmdmask); 1054 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 1055 1056 /* 1057 * Avoid double-queuing IPIs, which can deadlock us. We must disable 1058 * real interrupts when setting the smurf flags or we might race a 1059 * XINVLTLB before we manage to send the ipi's for the bits we set. 1060 * 1061 * NOTE: We might be including our own cpu in the smurf mask. 1062 */ 1063 smp_smurf_fetchset(&mask); 1064 1065 /* 1066 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 1067 * the critical section count on the target cpus. 1068 * 1069 * We do not include our own cpu when issuing the IPI. 1070 */ 1071 if (all_but_self_ipi_enable && 1072 (all_but_self_ipi_enable >= 2 || 1073 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 1074 all_but_self_ipi(XINVLTLB_OFFSET); 1075 } else { 1076 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 1077 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 1078 } 1079 1080 /* 1081 * This will synchronously wait for our command to complete, 1082 * as well as process commands from other cpus. It also handles 1083 * reentrancy. 1084 * 1085 * (interrupts are disabled and we are in a critical section here) 1086 */ 1087 smp_inval_intr(); 1088 } 1089 1090 void 1091 smp_sniff(void) 1092 { 1093 globaldata_t gd = mycpu; 1094 int dummy; 1095 register_t rflags; 1096 1097 /* 1098 * Ignore all_but_self_ipi_enable here and just use it. 1099 */ 1100 rflags = read_rflags(); 1101 cpu_disable_intr(); 1102 all_but_self_ipi(XSNIFF_OFFSET); 1103 gd->gd_sample_pc = smp_sniff; 1104 gd->gd_sample_sp = &dummy; 1105 write_rflags(rflags); 1106 } 1107 1108 void 1109 cpu_sniff(int dcpu) 1110 { 1111 globaldata_t rgd = globaldata_find(dcpu); 1112 register_t rflags; 1113 int dummy; 1114 1115 /* 1116 * Ignore all_but_self_ipi_enable here and just use it. 1117 */ 1118 rflags = read_rflags(); 1119 cpu_disable_intr(); 1120 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED); 1121 rgd->gd_sample_pc = cpu_sniff; 1122 rgd->gd_sample_sp = &dummy; 1123 write_rflags(rflags); 1124 } 1125 1126 /* 1127 * Called from Xinvltlb assembly with interrupts hard-disabled and in a 1128 * critical section. gd_intr_nesting_level may or may not be bumped 1129 * depending on entry. 1130 * 1131 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT. 1132 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE 1133 * IS IN A CRITICAL SECTION. 1134 */ 1135 void 1136 smp_inval_intr(void) 1137 { 1138 struct mdglobaldata *md = mdcpu; 1139 cpumask_t cpumask; 1140 #ifdef LOOPRECOVER 1141 tsc_uclock_t tsc_base = rdtsc(); 1142 #endif 1143 1144 #if 0 1145 /* 1146 * The idle code is in a critical section, but that doesn't stop 1147 * Xinvltlb from executing, so deal with the race which can occur 1148 * in that situation. Otherwise r-m-w operations by pmap_inval_intr() 1149 * may have problems. 1150 */ 1151 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) { 1152 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid); 1153 cpu_invltlb(); 1154 cpu_mfence(); 1155 } 1156 #endif 1157 1158 /* 1159 * This is a real mess. I'd like to just leave interrupts disabled 1160 * but it can cause the lapic to deadlock if too many interrupts queue 1161 * to it, due to the idiotic design of the lapic. So instead we have 1162 * to enter a critical section so normal interrupts are made pending 1163 * and track whether this one was reentered. 1164 */ 1165 if (md->gd_xinvaltlb) { /* reentrant on cpu */ 1166 md->gd_xinvaltlb = 2; 1167 return; 1168 } 1169 md->gd_xinvaltlb = 1; 1170 1171 /* 1172 * Check only those cpus with active Xinvl* commands pending. 1173 * 1174 * We are going to enable interrupts so make sure we are in a 1175 * critical section. This is necessary to avoid deadlocking 1176 * the lapic and to ensure that we execute our commands prior to 1177 * any nominal interrupt or preemption. 1178 * 1179 * WARNING! It is very important that we only clear out but in 1180 * smp_smurf_mask once for each interrupt we take. In 1181 * this case, we clear it on initial entry and only loop 1182 * on the reentrancy detect (caused by another interrupt). 1183 */ 1184 cpumask = smp_invmask; 1185 #ifdef LOOPMASK_IN 1186 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid); 1187 #endif 1188 loop: 1189 cpu_enable_intr(); 1190 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid); 1191 1192 /* 1193 * Specific page request(s), and we can't return until all bits 1194 * are zero. 1195 */ 1196 for (;;) { 1197 int toolong; 1198 1199 /* 1200 * Also execute any pending full invalidation request in 1201 * this loop. 1202 */ 1203 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1204 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1205 md->mi.gd_cpuid); 1206 cpu_invltlb(); 1207 cpu_mfence(); 1208 } 1209 1210 #ifdef LOOPRECOVER 1211 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1212 /* 1213 * cpuid - cpu doing the waiting 1214 * invmask - IPI in progress 1215 * invltlb_mask - which ones are TLB invalidations? 1216 */ 1217 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx " 1218 "smurf=%08jx " 1219 #ifdef LOOPMASK_IN 1220 "in=%08jx " 1221 #endif 1222 "idle=%08jx/%08jx\n", 1223 md->mi.gd_cpuid, 1224 smp_invmask.ary[0], 1225 smp_invltlb_mask.ary[0], 1226 smp_smurf_mask.ary[0], 1227 #ifdef LOOPMASK_IN 1228 smp_in_mask.ary[0], 1229 #endif 1230 smp_idleinvl_mask.ary[0], 1231 smp_idleinvl_reqs.ary[0]); 1232 tsc_base = rdtsc(); 1233 toolong = 1; 1234 } else { 1235 toolong = 0; 1236 } 1237 #else 1238 toolong = 0; 1239 #endif 1240 1241 /* 1242 * We can only add bits to the cpumask to test during the 1243 * loop because the smp_invmask bit is cleared once the 1244 * originator completes the command (the targets may still 1245 * be cycling their own completions in this loop, afterwords). 1246 * 1247 * lfence required prior to all tests as this Xinvltlb 1248 * interrupt could race the originator (already be in progress 1249 * wnen the originator decides to issue, due to an issue by 1250 * another cpu). 1251 */ 1252 cpu_lfence(); 1253 CPUMASK_ORMASK(cpumask, smp_invmask); 1254 /*cpumask = smp_active_mask;*/ /* XXX */ 1255 cpu_lfence(); 1256 1257 if (pmap_inval_intr(&cpumask, toolong) == 0) { 1258 /* 1259 * Clear our smurf mask to allow new IPIs, but deal 1260 * with potential races. 1261 */ 1262 break; 1263 } 1264 1265 /* 1266 * Test if someone sent us another invalidation IPI, break 1267 * out so we can take it to avoid deadlocking the lapic 1268 * interrupt queue (? stupid intel, amd). 1269 */ 1270 if (md->gd_xinvaltlb == 2) 1271 break; 1272 /* 1273 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid)) 1274 break; 1275 */ 1276 } 1277 1278 /* 1279 * Full invalidation request 1280 */ 1281 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1282 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1283 md->mi.gd_cpuid); 1284 cpu_invltlb(); 1285 cpu_mfence(); 1286 } 1287 1288 /* 1289 * Check to see if another Xinvltlb interrupt occurred and loop up 1290 * if it did. 1291 */ 1292 cpu_disable_intr(); 1293 if (md->gd_xinvaltlb == 2) { 1294 md->gd_xinvaltlb = 1; 1295 goto loop; 1296 } 1297 #ifdef LOOPMASK_IN 1298 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid); 1299 #endif 1300 md->gd_xinvaltlb = 0; 1301 } 1302 1303 void 1304 cpu_wbinvd_on_all_cpus_callback(void *arg) 1305 { 1306 wbinvd(); 1307 } 1308 1309 /* 1310 * When called the executing CPU will send an IPI to all other CPUs 1311 * requesting that they halt execution. 1312 * 1313 * Usually (but not necessarily) called with 'other_cpus' as its arg. 1314 * 1315 * - Signals all CPUs in map to stop. 1316 * - Waits for each to stop. 1317 * 1318 * Returns: 1319 * -1: error 1320 * 0: NA 1321 * 1: ok 1322 * 1323 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 1324 * from executing at same time. 1325 */ 1326 int 1327 stop_cpus(cpumask_t map) 1328 { 1329 cpumask_t mask; 1330 1331 CPUMASK_ANDMASK(map, smp_active_mask); 1332 1333 /* send the Xcpustop IPI to all CPUs in map */ 1334 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 1335 1336 do { 1337 mask = stopped_cpus; 1338 CPUMASK_ANDMASK(mask, map); 1339 /* spin */ 1340 } while (CPUMASK_CMPMASKNEQ(mask, map)); 1341 1342 return 1; 1343 } 1344 1345 1346 /* 1347 * Called by a CPU to restart stopped CPUs. 1348 * 1349 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 1350 * 1351 * - Signals all CPUs in map to restart. 1352 * - Waits for each to restart. 1353 * 1354 * Returns: 1355 * -1: error 1356 * 0: NA 1357 * 1: ok 1358 */ 1359 int 1360 restart_cpus(cpumask_t map) 1361 { 1362 cpumask_t mask; 1363 1364 /* signal other cpus to restart */ 1365 mask = map; 1366 CPUMASK_ANDMASK(mask, smp_active_mask); 1367 cpu_ccfence(); 1368 started_cpus = mask; 1369 cpu_ccfence(); 1370 1371 /* wait for each to clear its bit */ 1372 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 1373 cpu_pause(); 1374 1375 return 1; 1376 } 1377 1378 /* 1379 * This is called once the mpboot code has gotten us properly relocated 1380 * and the MMU turned on, etc. ap_init() is actually the idle thread, 1381 * and when it returns the scheduler will call the real cpu_idle() main 1382 * loop for the idlethread. Interrupts are disabled on entry and should 1383 * remain disabled at return. 1384 */ 1385 void 1386 ap_init(void) 1387 { 1388 int cpu_id; 1389 1390 /* 1391 * Adjust smp_startup_mask to signal the BSP that we have started 1392 * up successfully. Note that we do not yet hold the BGL. The BSP 1393 * is waiting for our signal. 1394 * 1395 * We can't set our bit in smp_active_mask yet because we are holding 1396 * interrupts physically disabled and remote cpus could deadlock 1397 * trying to send us an IPI. 1398 */ 1399 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1400 cpu_mfence(); 1401 1402 /* 1403 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1404 * non-zero, then get the MP lock. 1405 * 1406 * Note: We are in a critical section. 1407 * 1408 * Note: we are the idle thread, we can only spin. 1409 * 1410 * Note: The load fence is memory volatile and prevents the compiler 1411 * from improperly caching mp_finish_lapic, and the cpu from improperly 1412 * caching it. 1413 */ 1414 while (mp_finish_lapic == 0) { 1415 cpu_pause(); 1416 cpu_lfence(); 1417 } 1418 #if 0 1419 while (try_mplock() == 0) { 1420 cpu_pause(); 1421 cpu_lfence(); 1422 } 1423 #endif 1424 1425 if (cpu_feature & CPUID_TSC) { 1426 /* 1427 * The BSP is constantly updating tsc0_offset, figure out 1428 * the relative difference to synchronize ktrdump. 1429 */ 1430 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1431 } 1432 1433 /* BSP may have changed PTD while we're waiting for the lock */ 1434 cpu_invltlb(); 1435 1436 /* Build our map of 'other' CPUs. */ 1437 mycpu->gd_other_cpus = smp_startup_mask; 1438 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1439 1440 /* A quick check from sanity claus */ 1441 cpu_id = APICID_TO_CPUID(LAPIC_READID); 1442 if (mycpu->gd_cpuid != cpu_id) { 1443 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1444 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1445 cpu_id, LAPIC_READID); 1446 #if 0 /* JGXXX */ 1447 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1448 #endif 1449 panic("cpuid mismatch! boom!!"); 1450 } 1451 1452 /* Initialize AP's local APIC for irq's */ 1453 lapic_init(FALSE); 1454 1455 /* LAPIC initialization is done */ 1456 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1457 cpu_mfence(); 1458 1459 #if 0 1460 /* Let BSP move onto the next initialization stage */ 1461 rel_mplock(); 1462 #endif 1463 1464 /* 1465 * Interlock for finalization. Wait until mp_finish is non-zero, 1466 * then get the MP lock. 1467 * 1468 * Note: We are in a critical section. 1469 * 1470 * Note: we are the idle thread, we can only spin. 1471 * 1472 * Note: The load fence is memory volatile and prevents the compiler 1473 * from improperly caching mp_finish, and the cpu from improperly 1474 * caching it. 1475 */ 1476 while (mp_finish == 0) { 1477 cpu_pause(); 1478 cpu_lfence(); 1479 } 1480 1481 /* BSP may have changed PTD while we're waiting for the lock */ 1482 cpu_invltlb(); 1483 1484 /* Set memory range attributes for this CPU to match the BSP */ 1485 mem_range_AP_init(); 1486 1487 /* 1488 * Once we go active we must process any IPIQ messages that may 1489 * have been queued, because no actual IPI will occur until we 1490 * set our bit in the smp_active_mask. If we don't the IPI 1491 * message interlock could be left set which would also prevent 1492 * further IPIs. 1493 * 1494 * The idle loop doesn't expect the BGL to be held and while 1495 * lwkt_switch() normally cleans things up this is a special case 1496 * because we returning almost directly into the idle loop. 1497 * 1498 * The idle thread is never placed on the runq, make sure 1499 * nothing we've done put it there. 1500 */ 1501 1502 /* 1503 * Hold a critical section and allow real interrupts to occur. Zero 1504 * any spurious interrupts which have accumulated, then set our 1505 * smp_active_mask indicating that we are fully operational. 1506 */ 1507 crit_enter(); 1508 __asm __volatile("sti; pause; pause"::); 1509 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1510 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1511 1512 /* 1513 * Wait until all cpus have set their smp_active_mask and have fully 1514 * operational interrupts before proceeding. 1515 * 1516 * We need a final cpu_invltlb() because we would not have received 1517 * any until we set our bit in smp_active_mask. 1518 */ 1519 while (mp_finish == 1) { 1520 cpu_pause(); 1521 cpu_lfence(); 1522 } 1523 cpu_invltlb(); 1524 1525 /* 1526 * Initialize per-cpu clocks and do other per-cpu initialization. 1527 * At this point code is expected to be able to use the full kernel 1528 * API. 1529 */ 1530 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1531 1532 /* 1533 * Since we may have cleaned up the interrupt triggers, manually 1534 * process any pending IPIs before exiting our critical section. 1535 * Once the critical section has exited, normal interrupt processing 1536 * may occur. 1537 */ 1538 atomic_swap_int(&mycpu->gd_npoll, 0); 1539 lwkt_process_ipiq(); 1540 crit_exit(); 1541 1542 /* 1543 * Final final, allow the waiting BSP to resume the boot process, 1544 * return 'into' the idle thread bootstrap. 1545 */ 1546 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1547 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1548 } 1549 1550 /* 1551 * Get SMP fully working before we start initializing devices. 1552 */ 1553 static 1554 void 1555 ap_finish(void) 1556 { 1557 if (bootverbose) 1558 kprintf("Finish MP startup\n"); 1559 rel_mplock(); 1560 1561 /* 1562 * Wait for the active mask to complete, after which all cpus will 1563 * be accepting interrupts. 1564 */ 1565 mp_finish = 1; 1566 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1567 cpu_pause(); 1568 cpu_lfence(); 1569 } 1570 1571 /* 1572 * Wait for the finalization mask to complete, after which all cpus 1573 * have completely finished initializing and are entering or are in 1574 * their idle thread. 1575 * 1576 * BSP should have received all required invltlbs but do another 1577 * one just in case. 1578 */ 1579 cpu_invltlb(); 1580 mp_finish = 2; 1581 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1582 cpu_pause(); 1583 cpu_lfence(); 1584 } 1585 1586 while (try_mplock() == 0) { 1587 cpu_pause(); 1588 cpu_lfence(); 1589 } 1590 1591 if (bootverbose) { 1592 kprintf("Active CPU Mask: %016jx\n", 1593 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1594 } 1595 } 1596 1597 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1598 1599 /* 1600 * Interrupts must be hard-disabled by caller 1601 */ 1602 void 1603 cpu_send_ipiq(int dcpu) 1604 { 1605 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1606 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1607 } 1608 1609 #if 0 /* single_apic_ipi_passive() not working yet */ 1610 /* 1611 * Returns 0 on failure, 1 on success 1612 */ 1613 int 1614 cpu_send_ipiq_passive(int dcpu) 1615 { 1616 int r = 0; 1617 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1618 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1619 APIC_DELMODE_FIXED); 1620 } 1621 return(r); 1622 } 1623 #endif 1624 1625 static void 1626 mp_bsp_simple_setup(void) 1627 { 1628 struct mdglobaldata *gd; 1629 size_t ipiq_size; 1630 1631 /* build our map of 'other' CPUs */ 1632 mycpu->gd_other_cpus = smp_startup_mask; 1633 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1634 1635 gd = (struct mdglobaldata *)mycpu; 1636 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1637 1638 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1639 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 1640 VM_SUBSYS_IPIQ); 1641 bzero(mycpu->gd_ipiq, ipiq_size); 1642 1643 /* initialize arc4random. */ 1644 arc4_init_pcpu(0); 1645 1646 pmap_set_opt(); 1647 1648 if (cpu_feature & CPUID_TSC) 1649 tsc0_offset = rdtsc(); 1650 } 1651 1652 1653 /* 1654 * CPU TOPOLOGY DETECTION FUNCTIONS 1655 */ 1656 1657 /* Detect intel topology using CPUID 1658 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1659 */ 1660 static void 1661 detect_intel_topology(int count_htt_cores) 1662 { 1663 int shift = 0; 1664 int ecx_index = 0; 1665 int core_plus_logical_bits = 0; 1666 int cores_per_package; 1667 int logical_per_package; 1668 int logical_per_core; 1669 unsigned int p[4]; 1670 1671 if (cpu_high >= 0xb) { 1672 goto FUNC_B; 1673 1674 } else if (cpu_high >= 0x4) { 1675 goto FUNC_4; 1676 1677 } else { 1678 core_bits = 0; 1679 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1680 ; 1681 logical_CPU_bits = 1 << shift; 1682 return; 1683 } 1684 1685 FUNC_B: 1686 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1687 1688 /* if 0xb not supported - fallback to 0x4 */ 1689 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1690 goto FUNC_4; 1691 } 1692 1693 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1694 1695 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1696 do { 1697 cpuid_count(0xb, ecx_index, p); 1698 1699 /* Check for the Core type in the implemented sub leaves. */ 1700 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1701 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1702 break; 1703 } 1704 1705 ecx_index++; 1706 1707 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1708 1709 core_bits = core_plus_logical_bits - logical_CPU_bits; 1710 1711 return; 1712 1713 FUNC_4: 1714 cpuid_count(0x4, 0, p); 1715 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1716 1717 logical_per_package = count_htt_cores; 1718 logical_per_core = logical_per_package / cores_per_package; 1719 1720 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1721 ; 1722 logical_CPU_bits = shift; 1723 1724 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1725 ; 1726 core_bits = shift; 1727 1728 return; 1729 } 1730 1731 /* Detect AMD topology using CPUID 1732 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1733 */ 1734 static void 1735 detect_amd_topology(int count_htt_cores) 1736 { 1737 int shift = 0; 1738 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) { 1739 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1740 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 1741 AMDID_COREID_SIZE_SHIFT; 1742 } else { 1743 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1744 for (shift = 0; (1 << shift) < core_bits; ++shift) 1745 ; 1746 core_bits = shift; 1747 } 1748 1749 if (amd_feature2 & AMDID2_TOPOEXT) { 1750 u_int p[4]; 1751 int i; 1752 int type; 1753 int level; 1754 int share_count; 1755 for (i = 0; i < 256; ++i) { 1756 cpuid_count(0x8000001d, i, p); 1757 type = p[0] & 0x1f; 1758 level = (p[0] >> 5) & 0x7; 1759 share_count = 1 + ((p[0] >> 14) & 0xfff); 1760 1761 if (type == 0) 1762 break; 1763 if (bootverbose) 1764 kprintf("Topology probe i=%2d type=%d level=%d share_count=%d\n", 1765 i, type, level, share_count); 1766 if (type == 1 && share_count) { /* CPUID_TYPE_SMT */ 1767 for (shift = 0; (1 << shift) < count_htt_cores / share_count; ++shift) 1768 ; 1769 core_bits = shift; 1770 break; 1771 } 1772 } 1773 } 1774 1775 logical_CPU_bits = count_htt_cores >> core_bits; 1776 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1777 ; 1778 logical_CPU_bits = shift; 1779 } else { 1780 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1781 ; 1782 core_bits = shift; 1783 logical_CPU_bits = 0; 1784 } 1785 } 1786 1787 static void 1788 amd_get_compute_unit_id(void *arg) 1789 { 1790 u_int regs[4]; 1791 1792 do_cpuid(0x8000001e, regs); 1793 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1794 1795 /* 1796 * AMD - CPUID Specification September 2010 1797 * page 34 - //ComputeUnitID = ebx[0:7]// 1798 */ 1799 mynode->compute_unit_id = regs[1] & 0xff; 1800 } 1801 1802 int 1803 fix_amd_topology(void) 1804 { 1805 cpumask_t mask; 1806 1807 if (cpu_vendor_id != CPU_VENDOR_AMD) 1808 return -1; 1809 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1810 return -1; 1811 1812 CPUMASK_ASSALLONES(mask); 1813 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1814 1815 kprintf("Compute unit iDS:\n"); 1816 int i; 1817 for (i = 0; i < ncpus; i++) { 1818 kprintf("%d-%d; \n", 1819 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1820 } 1821 return 0; 1822 } 1823 1824 /* 1825 * Calculate 1826 * - logical_CPU_bits 1827 * - core_bits 1828 * With the values above (for AMD or INTEL) we are able to generally 1829 * detect the CPU topology (number of cores for each level): 1830 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1831 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1832 */ 1833 void 1834 detect_cpu_topology(void) 1835 { 1836 static int topology_detected = 0; 1837 int count = 0; 1838 1839 if (topology_detected) 1840 goto OUT; 1841 if ((cpu_feature & CPUID_HTT) == 0) { 1842 core_bits = 0; 1843 logical_CPU_bits = 0; 1844 goto OUT; 1845 } 1846 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT; 1847 1848 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1849 detect_intel_topology(count); 1850 else if (cpu_vendor_id == CPU_VENDOR_AMD) 1851 detect_amd_topology(count); 1852 topology_detected = 1; 1853 1854 OUT: 1855 if (bootverbose) { 1856 kprintf("Bits within APICID: logical_CPU_bits: %d; " 1857 "core_bits: %d\n", 1858 logical_CPU_bits, core_bits); 1859 } 1860 } 1861 1862 /* 1863 * Interface functions to calculate chip_ID, 1864 * core_number and logical_number 1865 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1866 */ 1867 int 1868 get_chip_ID(int cpuid) 1869 { 1870 return get_apicid_from_cpuid(cpuid) >> 1871 (logical_CPU_bits + core_bits); 1872 } 1873 1874 int 1875 get_chip_ID_from_APICID(int apicid) 1876 { 1877 return apicid >> (logical_CPU_bits + core_bits); 1878 } 1879 1880 int 1881 get_core_number_within_chip(int cpuid) 1882 { 1883 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1884 ((1 << core_bits) - 1)); 1885 } 1886 1887 int 1888 get_logical_CPU_number_within_core(int cpuid) 1889 { 1890 return (get_apicid_from_cpuid(cpuid) & 1891 ((1 << logical_CPU_bits) - 1)); 1892 } 1893