1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 50 #include <machine/smp.h> 51 #include <machine_base/apic/apicreg.h> 52 #include <machine/atomic.h> 53 #include <machine/cpufunc.h> 54 #include <machine/cputypes.h> 55 #include <machine_base/apic/lapic.h> 56 #include <machine_base/apic/ioapic.h> 57 #include <machine_base/acpica/acpi_md_cpu.h> 58 #include <machine/psl.h> 59 #include <machine/segments.h> 60 #include <machine/tss.h> 61 #include <machine/specialreg.h> 62 #include <machine/globaldata.h> 63 #include <machine/pmap_inval.h> 64 #include <machine/clock.h> 65 66 #include <machine/md_var.h> /* setidt() */ 67 #include <machine_base/icu/icu.h> /* IPIs */ 68 #include <machine_base/icu/icu_var.h> 69 #include <machine_base/apic/ioapic_abi.h> 70 #include <machine/intr_machdep.h> /* IPIs */ 71 72 #define WARMBOOT_TARGET 0 73 #define WARMBOOT_OFF (KERNBASE + 0x0467) 74 #define WARMBOOT_SEG (KERNBASE + 0x0469) 75 76 #define CMOS_REG (0x70) 77 #define CMOS_DATA (0x71) 78 #define BIOS_RESET (0x0f) 79 #define BIOS_WARM (0x0a) 80 81 /* 82 * this code MUST be enabled here and in mpboot.s. 83 * it follows the very early stages of AP boot by placing values in CMOS ram. 84 * it NORMALLY will never be needed and thus the primitive method for enabling. 85 * 86 */ 87 #if defined(CHECK_POINTS) 88 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 89 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 90 91 #define CHECK_INIT(D); \ 92 CHECK_WRITE(0x34, (D)); \ 93 CHECK_WRITE(0x35, (D)); \ 94 CHECK_WRITE(0x36, (D)); \ 95 CHECK_WRITE(0x37, (D)); \ 96 CHECK_WRITE(0x38, (D)); \ 97 CHECK_WRITE(0x39, (D)); 98 99 #define CHECK_PRINT(S); \ 100 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 101 (S), \ 102 CHECK_READ(0x34), \ 103 CHECK_READ(0x35), \ 104 CHECK_READ(0x36), \ 105 CHECK_READ(0x37), \ 106 CHECK_READ(0x38), \ 107 CHECK_READ(0x39)); 108 109 #else /* CHECK_POINTS */ 110 111 #define CHECK_INIT(D) 112 #define CHECK_PRINT(S) 113 114 #endif /* CHECK_POINTS */ 115 116 /* 117 * Values to send to the POST hardware. 118 */ 119 #define MP_BOOTADDRESS_POST 0x10 120 #define MP_PROBE_POST 0x11 121 #define MPTABLE_PASS1_POST 0x12 122 123 #define MP_START_POST 0x13 124 #define MP_ENABLE_POST 0x14 125 #define MPTABLE_PASS2_POST 0x15 126 127 #define START_ALL_APS_POST 0x16 128 #define INSTALL_AP_TRAMP_POST 0x17 129 #define START_AP_POST 0x18 130 131 #define MP_ANNOUNCE_POST 0x19 132 133 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 134 int current_postcode; 135 136 /** XXX FIXME: what system files declare these??? */ 137 138 extern int naps; 139 140 int64_t tsc0_offset; 141 extern int64_t tsc_offsets[]; 142 143 /* AP uses this during bootstrap. Do not staticize. */ 144 char *bootSTK; 145 static int bootAP; 146 147 struct pcb stoppcbs[MAXCPU]; 148 149 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 150 151 /* 152 * Local data and functions. 153 */ 154 155 static u_int boot_address; 156 static int mp_finish; 157 static int mp_finish_lapic; 158 159 static int start_all_aps(u_int boot_addr); 160 #if 0 161 static void install_ap_tramp(u_int boot_addr); 162 #endif 163 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 164 static int smitest(void); 165 static void mp_bsp_simple_setup(void); 166 167 /* which cpus have been started */ 168 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 169 /* which cpus have lapic been inited */ 170 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 171 /* which cpus are ready for IPIs etc? */ 172 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 173 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 174 175 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, 176 &smp_active_mask, sizeof(smp_active_mask), "LU", ""); 177 static u_int bootMP_size; 178 static u_int report_invlpg_src; 179 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW, 180 &report_invlpg_src, 0, ""); 181 static u_int report_invltlb_src; 182 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW, 183 &report_invltlb_src, 0, ""); 184 static int optimized_invltlb; 185 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW, 186 &optimized_invltlb, 0, ""); 187 static int all_but_self_ipi_enable = 1; 188 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW, 189 &all_but_self_ipi_enable, 0, ""); 190 191 /* Local data for detecting CPU TOPOLOGY */ 192 static int core_bits = 0; 193 static int logical_CPU_bits = 0; 194 195 196 /* 197 * Calculate usable address in base memory for AP trampoline code. 198 */ 199 u_int 200 mp_bootaddress(u_int basemem) 201 { 202 POSTCODE(MP_BOOTADDRESS_POST); 203 204 bootMP_size = mptramp_end - mptramp_start; 205 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 206 if (((basemem * 1024) - boot_address) < bootMP_size) 207 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 208 /* 3 levels of page table pages */ 209 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 210 211 return mptramp_pagetables; 212 } 213 214 /* 215 * Print various information about the SMP system hardware and setup. 216 */ 217 void 218 mp_announce(void) 219 { 220 int x; 221 222 POSTCODE(MP_ANNOUNCE_POST); 223 224 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 225 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 226 for (x = 1; x <= naps; ++x) 227 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 228 229 if (!ioapic_enable) 230 kprintf(" Warning: APIC I/O disabled\n"); 231 } 232 233 /* 234 * AP cpu's call this to sync up protected mode. 235 * 236 * WARNING! %gs is not set up on entry. This routine sets up %gs. 237 */ 238 void 239 init_secondary(void) 240 { 241 int gsel_tss; 242 int x, myid = bootAP; 243 u_int64_t msr, cr0; 244 struct mdglobaldata *md; 245 struct privatespace *ps; 246 247 ps = CPU_prvspace[myid]; 248 249 gdt_segs[GPROC0_SEL].ssd_base = (long)&ps->common_tss; 250 ps->mdglobaldata.mi.gd_prvspace = ps; 251 252 /* We fill the 32-bit segment descriptors */ 253 for (x = 0; x < NGDT; x++) { 254 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 255 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 256 } 257 /* And now a 64-bit one */ 258 ssdtosyssd(&gdt_segs[GPROC0_SEL], 259 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 260 261 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 262 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 263 lgdt(&r_gdt); /* does magic intra-segment return */ 264 265 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 266 wrmsr(MSR_FSBASE, 0); /* User value */ 267 wrmsr(MSR_GSBASE, (u_int64_t)ps); 268 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 269 270 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 271 272 #if 0 273 lldt(_default_ldt); 274 mdcpu->gd_currentldt = _default_ldt; 275 #endif 276 277 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 278 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 279 280 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 281 282 /* 283 * TSS entry point for interrupts, traps, and exceptions 284 * (sans NMI). This will always go to near the top of the pcpu 285 * trampoline area. Hardware-pushed data will be copied into 286 * the trap-frame on entry, and (if necessary) returned to the 287 * trampoline on exit. 288 * 289 * We store some pcb data for the trampoline code above the 290 * stack the cpu hw pushes into, and arrange things so the 291 * address of tr_pcb_rsp is the same as the desired top of 292 * stack. 293 */ 294 ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp; 295 ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0; 296 ps->trampoline.tr_pcb_gs_kernel = (register_t)md; 297 ps->trampoline.tr_pcb_cr3 = KPML4phys; /* adj to user cr3 live */ 298 ps->dbltramp.tr_pcb_gs_kernel = (register_t)md; 299 ps->dbltramp.tr_pcb_cr3 = KPML4phys; 300 ps->dbgtramp.tr_pcb_gs_kernel = (register_t)md; 301 ps->dbgtramp.tr_pcb_cr3 = KPML4phys; 302 303 #if 0 /* JG XXX */ 304 ps->common_tss.tss_ioopt = (sizeof ps->common_tss) << 16; 305 #endif 306 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 307 md->gd_common_tssd = *md->gd_tss_gdt; 308 309 /* double fault stack */ 310 ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp; 311 ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp; 312 313 ltr(gsel_tss); 314 315 /* 316 * Set to a known state: 317 * Set by mpboot.s: CR0_PG, CR0_PE 318 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 319 */ 320 cr0 = rcr0(); 321 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 322 load_cr0(cr0); 323 324 /* Set up the fast syscall stuff */ 325 msr = rdmsr(MSR_EFER) | EFER_SCE; 326 wrmsr(MSR_EFER, msr); 327 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 328 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 329 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 330 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 331 wrmsr(MSR_STAR, msr); 332 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 333 334 pmap_set_opt(); /* PSE/4MB pages, etc */ 335 pmap_init_pat(); /* Page Attribute Table */ 336 337 /* set up CPU registers and state */ 338 cpu_setregs(); 339 340 /* set up SSE/NX registers */ 341 initializecpu(myid); 342 343 /* set up FPU state on the AP */ 344 npxinit(); 345 346 /* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */ 347 if (x2apic_enable) 348 lapic_x2apic_enter(FALSE); 349 350 /* disable the APIC, just to be SURE */ 351 LAPIC_WRITE(svr, (LAPIC_READ(svr) & ~APIC_SVR_ENABLE)); 352 } 353 354 /******************************************************************* 355 * local functions and data 356 */ 357 358 /* 359 * Start the SMP system 360 */ 361 static void 362 mp_start_aps(void *dummy __unused) 363 { 364 if (lapic_enable) { 365 /* start each Application Processor */ 366 start_all_aps(boot_address); 367 } else { 368 mp_bsp_simple_setup(); 369 } 370 } 371 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 372 373 /* 374 * start each AP in our list 375 */ 376 static int 377 start_all_aps(u_int boot_addr) 378 { 379 vm_offset_t va = boot_address + KERNBASE; 380 u_int64_t *pt4, *pt3, *pt2; 381 int pssize; 382 int x, i; 383 int shift; 384 int smicount; 385 int smibest; 386 int smilast; 387 u_char mpbiosreason; 388 u_long mpbioswarmvec; 389 struct mdglobaldata *gd; 390 struct privatespace *ps; 391 size_t ipiq_size; 392 393 POSTCODE(START_ALL_APS_POST); 394 395 /* install the AP 1st level boot code */ 396 pmap_kenter(va, boot_address); 397 cpu_invlpg((void *)va); /* JG XXX */ 398 bcopy(mptramp_start, (void *)va, bootMP_size); 399 400 /* Locate the page tables, they'll be below the trampoline */ 401 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 402 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 403 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 404 405 /* Create the initial 1GB replicated page tables */ 406 for (i = 0; i < 512; i++) { 407 /* Each slot of the level 4 pages points to the same level 3 page */ 408 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 409 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 410 kernel_pmap.pmap_bits[PG_RW_IDX] | 411 kernel_pmap.pmap_bits[PG_U_IDX]; 412 413 /* Each slot of the level 3 pages points to the same level 2 page */ 414 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 415 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 416 kernel_pmap.pmap_bits[PG_RW_IDX] | 417 kernel_pmap.pmap_bits[PG_U_IDX]; 418 419 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 420 pt2[i] = i * (2 * 1024 * 1024); 421 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 422 kernel_pmap.pmap_bits[PG_RW_IDX] | 423 kernel_pmap.pmap_bits[PG_PS_IDX] | 424 kernel_pmap.pmap_bits[PG_U_IDX]; 425 } 426 427 /* save the current value of the warm-start vector */ 428 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 429 outb(CMOS_REG, BIOS_RESET); 430 mpbiosreason = inb(CMOS_DATA); 431 432 /* setup a vector to our boot code */ 433 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 434 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 435 outb(CMOS_REG, BIOS_RESET); 436 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 437 438 /* 439 * If we have a TSC we can figure out the SMI interrupt rate. 440 * The SMI does not necessarily use a constant rate. Spend 441 * up to 250ms trying to figure it out. 442 */ 443 smibest = 0; 444 if (cpu_feature & CPUID_TSC) { 445 set_apic_timer(275000); 446 smilast = read_apic_timer(); 447 for (x = 0; x < 20 && read_apic_timer(); ++x) { 448 smicount = smitest(); 449 if (smibest == 0 || smilast - smicount < smibest) 450 smibest = smilast - smicount; 451 smilast = smicount; 452 } 453 if (smibest > 250000) 454 smibest = 0; 455 } 456 if (smibest) 457 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 458 1000000 / smibest, smibest); 459 460 /* start each AP */ 461 for (x = 1; x <= naps; ++x) { 462 /* This is a bit verbose, it will go away soon. */ 463 464 pssize = sizeof(struct privatespace); 465 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, 466 KM_CPU(x)); 467 CPU_prvspace[x] = ps; 468 #if 0 469 kprintf("ps %d %p %d\n", x, ps, pssize); 470 #endif 471 bzero(ps, pssize); 472 gd = &ps->mdglobaldata; 473 gd->mi.gd_prvspace = ps; 474 475 /* prime data page for it to use */ 476 mi_gdinit(&gd->mi, x); 477 cpu_gdinit(gd, x); 478 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 479 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 480 VM_SUBSYS_IPIQ, KM_CPU(x)); 481 bzero(gd->mi.gd_ipiq, ipiq_size); 482 483 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 484 485 /* initialize arc4random. */ 486 arc4_init_pcpu(x); 487 488 /* setup a vector to our boot code */ 489 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 490 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 491 outb(CMOS_REG, BIOS_RESET); 492 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 493 494 /* 495 * Setup the AP boot stack 496 */ 497 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 498 bootAP = x; 499 500 /* attempt to start the Application Processor */ 501 CHECK_INIT(99); /* setup checkpoints */ 502 if (!start_ap(gd, boot_addr, smibest)) { 503 kprintf("\nAP #%d (PHY# %d) failed!\n", 504 x, CPUID_TO_APICID(x)); 505 CHECK_PRINT("trace"); /* show checkpoints */ 506 /* better panic as the AP may be running loose */ 507 kprintf("panic y/n? [y] "); 508 cnpoll(TRUE); 509 if (cngetc() != 'n') 510 panic("bye-bye"); 511 cnpoll(FALSE); 512 } 513 CHECK_PRINT("trace"); /* show checkpoints */ 514 } 515 516 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 517 ncpus = x; 518 519 for (shift = 0; (1 << shift) <= ncpus; ++shift) 520 ; 521 --shift; 522 523 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 524 if ((1 << shift) < ncpus) 525 ++shift; 526 ncpus_fit = 1 << shift; 527 ncpus_fit_mask = ncpus_fit - 1; 528 529 /* build our map of 'other' CPUs */ 530 mycpu->gd_other_cpus = smp_startup_mask; 531 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 532 533 gd = (struct mdglobaldata *)mycpu; 534 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 535 536 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 537 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 538 VM_SUBSYS_IPIQ, KM_CPU(0)); 539 bzero(mycpu->gd_ipiq, ipiq_size); 540 541 /* initialize arc4random. */ 542 arc4_init_pcpu(0); 543 544 /* restore the warmstart vector */ 545 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 546 outb(CMOS_REG, BIOS_RESET); 547 outb(CMOS_DATA, mpbiosreason); 548 549 /* 550 * NOTE! The idlestack for the BSP was setup by locore. Finish 551 * up, clean out the P==V mapping we did earlier. 552 */ 553 pmap_set_opt(); 554 555 /* 556 * Wait all APs to finish initializing LAPIC 557 */ 558 if (bootverbose) 559 kprintf("SMP: Waiting APs LAPIC initialization\n"); 560 if (cpu_feature & CPUID_TSC) 561 tsc0_offset = rdtsc(); 562 tsc_offsets[0] = 0; 563 mp_finish_lapic = 1; 564 rel_mplock(); 565 566 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 567 cpu_pause(); 568 cpu_lfence(); 569 if (cpu_feature & CPUID_TSC) 570 tsc0_offset = rdtsc(); 571 } 572 while (try_mplock() == 0) { 573 cpu_pause(); 574 cpu_lfence(); 575 } 576 577 /* number of APs actually started */ 578 return ncpus - 1; 579 } 580 581 582 /* 583 * load the 1st level AP boot code into base memory. 584 */ 585 586 /* targets for relocation */ 587 extern void bigJump(void); 588 extern void bootCodeSeg(void); 589 extern void bootDataSeg(void); 590 extern void MPentry(void); 591 extern u_int MP_GDT; 592 extern u_int mp_gdtbase; 593 594 #if 0 595 596 static void 597 install_ap_tramp(u_int boot_addr) 598 { 599 int x; 600 int size = *(int *) ((u_long) & bootMP_size); 601 u_char *src = (u_char *) ((u_long) bootMP); 602 u_char *dst = (u_char *) boot_addr + KERNBASE; 603 u_int boot_base = (u_int) bootMP; 604 u_int8_t *dst8; 605 u_int16_t *dst16; 606 u_int32_t *dst32; 607 608 POSTCODE(INSTALL_AP_TRAMP_POST); 609 610 for (x = 0; x < size; ++x) 611 *dst++ = *src++; 612 613 /* 614 * modify addresses in code we just moved to basemem. unfortunately we 615 * need fairly detailed info about mpboot.s for this to work. changes 616 * to mpboot.s might require changes here. 617 */ 618 619 /* boot code is located in KERNEL space */ 620 dst = (u_char *) boot_addr + KERNBASE; 621 622 /* modify the lgdt arg */ 623 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 624 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 625 626 /* modify the ljmp target for MPentry() */ 627 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 628 *dst32 = ((u_int) MPentry - KERNBASE); 629 630 /* modify the target for boot code segment */ 631 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 632 dst8 = (u_int8_t *) (dst16 + 1); 633 *dst16 = (u_int) boot_addr & 0xffff; 634 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 635 636 /* modify the target for boot data segment */ 637 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 638 dst8 = (u_int8_t *) (dst16 + 1); 639 *dst16 = (u_int) boot_addr & 0xffff; 640 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 641 } 642 643 #endif 644 645 /* 646 * This function starts the AP (application processor) identified 647 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 648 * to accomplish this. This is necessary because of the nuances 649 * of the different hardware we might encounter. It ain't pretty, 650 * but it seems to work. 651 * 652 * NOTE: eventually an AP gets to ap_init(), which is called just 653 * before the AP goes into the LWKT scheduler's idle loop. 654 */ 655 static int 656 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 657 { 658 int physical_cpu; 659 int vector; 660 661 POSTCODE(START_AP_POST); 662 663 /* get the PHYSICAL APIC ID# */ 664 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 665 666 /* calculate the vector */ 667 vector = (boot_addr >> 12) & 0xff; 668 669 /* We don't want anything interfering */ 670 cpu_disable_intr(); 671 672 /* Make sure the target cpu sees everything */ 673 wbinvd(); 674 675 /* 676 * Try to detect when a SMI has occurred, wait up to 200ms. 677 * 678 * If a SMI occurs during an AP reset but before we issue 679 * the STARTUP command, the AP may brick. To work around 680 * this problem we hold off doing the AP startup until 681 * after we have detected the SMI. Hopefully another SMI 682 * will not occur before we finish the AP startup. 683 * 684 * Retries don't seem to help. SMIs have a window of opportunity 685 * and if USB->legacy keyboard emulation is enabled in the BIOS 686 * the interrupt rate can be quite high. 687 * 688 * NOTE: Don't worry about the L1 cache load, it might bloat 689 * ldelta a little but ndelta will be so huge when the SMI 690 * occurs the detection logic will still work fine. 691 */ 692 if (smibest) { 693 set_apic_timer(200000); 694 smitest(); 695 } 696 697 /* 698 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 699 * and running the target CPU. OR this INIT IPI might be latched (P5 700 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 701 * ignored. 702 * 703 * see apic/apicreg.h for icr bit definitions. 704 * 705 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 706 */ 707 708 /* 709 * Do an INIT IPI: assert RESET 710 * 711 * Use edge triggered mode to assert INIT 712 */ 713 lapic_seticr_sync(physical_cpu, 714 APIC_DESTMODE_PHY | 715 APIC_DEST_DESTFLD | 716 APIC_TRIGMOD_EDGE | 717 APIC_LEVEL_ASSERT | 718 APIC_DELMODE_INIT); 719 720 /* 721 * The spec calls for a 10ms delay but we may have to use a 722 * MUCH lower delay to avoid bricking an AP due to a fast SMI 723 * interrupt. We have other loops here too and dividing by 2 724 * doesn't seem to be enough even after subtracting 350us, 725 * so we divide by 4. 726 * 727 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 728 * interrupt was detected we use the full 10ms. 729 */ 730 if (smibest == 0) 731 u_sleep(10000); 732 else if (smibest < 150 * 4 + 350) 733 u_sleep(150); 734 else if ((smibest - 350) / 4 < 10000) 735 u_sleep((smibest - 350) / 4); 736 else 737 u_sleep(10000); 738 739 /* 740 * Do an INIT IPI: deassert RESET 741 * 742 * Use level triggered mode to deassert. It is unclear 743 * why we need to do this. 744 */ 745 lapic_seticr_sync(physical_cpu, 746 APIC_DESTMODE_PHY | 747 APIC_DEST_DESTFLD | 748 APIC_TRIGMOD_LEVEL | 749 APIC_LEVEL_DEASSERT | 750 APIC_DELMODE_INIT); 751 u_sleep(150); /* wait 150us */ 752 753 /* 754 * Next we do a STARTUP IPI: the previous INIT IPI might still be 755 * latched, (P5 bug) this 1st STARTUP would then terminate 756 * immediately, and the previously started INIT IPI would continue. OR 757 * the previous INIT IPI has already run. and this STARTUP IPI will 758 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 759 * will run. 760 * 761 * XXX set APIC_LEVEL_ASSERT 762 */ 763 lapic_seticr_sync(physical_cpu, 764 APIC_DESTMODE_PHY | 765 APIC_DEST_DESTFLD | 766 APIC_DELMODE_STARTUP | 767 vector); 768 u_sleep(200); /* wait ~200uS */ 769 770 /* 771 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 772 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 773 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 774 * recognized after hardware RESET or INIT IPI. 775 * 776 * XXX set APIC_LEVEL_ASSERT 777 */ 778 lapic_seticr_sync(physical_cpu, 779 APIC_DESTMODE_PHY | 780 APIC_DEST_DESTFLD | 781 APIC_DELMODE_STARTUP | 782 vector); 783 784 /* Resume normal operation */ 785 cpu_enable_intr(); 786 787 /* wait for it to start, see ap_init() */ 788 set_apic_timer(5000000);/* == 5 seconds */ 789 while (read_apic_timer()) { 790 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 791 return 1; /* return SUCCESS */ 792 } 793 794 return 0; /* return FAILURE */ 795 } 796 797 static 798 int 799 smitest(void) 800 { 801 int64_t ltsc; 802 int64_t ntsc; 803 int64_t ldelta; 804 int64_t ndelta; 805 int count; 806 807 ldelta = 0; 808 ndelta = 0; 809 while (read_apic_timer()) { 810 ltsc = rdtsc(); 811 for (count = 0; count < 100; ++count) 812 ntsc = rdtsc(); /* force loop to occur */ 813 if (ldelta) { 814 ndelta = ntsc - ltsc; 815 if (ldelta > ndelta) 816 ldelta = ndelta; 817 if (ndelta > ldelta * 2) 818 break; 819 } else { 820 ldelta = ntsc - ltsc; 821 } 822 } 823 return(read_apic_timer()); 824 } 825 826 /* 827 * Synchronously flush the TLB on all other CPU's. The current cpu's 828 * TLB is not flushed. If the caller wishes to flush the current cpu's 829 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 830 * 831 * This routine may be called concurrently from multiple cpus. When this 832 * happens, smp_invltlb() can wind up sticking around in the confirmation 833 * while() loop at the end as additional cpus are added to the global 834 * cpumask, until they are acknowledged by another IPI. 835 * 836 * NOTE: If for some reason we were unable to start all cpus we cannot 837 * safely use broadcast IPIs. 838 */ 839 840 cpumask_t smp_smurf_mask; 841 static cpumask_t smp_invltlb_mask; 842 #define LOOPRECOVER 843 #define LOOPMASK_IN 844 #ifdef LOOPMASK_IN 845 cpumask_t smp_in_mask; 846 #endif 847 cpumask_t smp_invmask; 848 extern cpumask_t smp_idleinvl_mask; 849 extern cpumask_t smp_idleinvl_reqs; 850 851 /* 852 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove 853 * bits that do not need to be IPId. These bits are still part of the command, 854 * but the target cpus have already been signalled and do not need to be 855 * sigalled again. 856 */ 857 #include <sys/spinlock.h> 858 #include <sys/spinlock2.h> 859 860 static __noinline 861 void 862 smp_smurf_fetchset(cpumask_t *mask) 863 { 864 cpumask_t omask; 865 int i; 866 __uint64_t obits; 867 __uint64_t nbits; 868 869 i = 0; 870 while (i < CPUMASK_ELEMENTS) { 871 obits = smp_smurf_mask.ary[i]; 872 cpu_ccfence(); 873 nbits = obits | mask->ary[i]; 874 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) { 875 omask.ary[i] = obits; 876 ++i; 877 } 878 } 879 CPUMASK_NANDMASK(*mask, omask); 880 } 881 882 /* 883 * This is a mechanism which guarantees that cpu_invltlb() will be executed 884 * on idle cpus without having to signal or wake them up. The invltlb will be 885 * executed when they wake up, prior to any scheduling or interrupt thread. 886 * 887 * (*mask) is modified to remove the cpus we successfully negotiate this 888 * function with. This function may only be used with semi-synchronous 889 * commands (typically invltlb's or semi-synchronous invalidations which 890 * are usually associated only with kernel memory). 891 */ 892 void 893 smp_smurf_idleinvlclr(cpumask_t *mask) 894 { 895 if (optimized_invltlb) { 896 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask); 897 /* cpu_lfence() not needed */ 898 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask); 899 } 900 } 901 902 /* 903 * Issue cpu_invltlb() across all cpus except the current cpu. 904 * 905 * This function will arrange to avoid idle cpus, but still gurantee that 906 * invltlb is run on them when they wake up prior to any scheduling or 907 * nominal interrupt. 908 */ 909 void 910 smp_invltlb(void) 911 { 912 struct mdglobaldata *md = mdcpu; 913 cpumask_t mask; 914 unsigned long rflags; 915 #ifdef LOOPRECOVER 916 tsc_uclock_t tsc_base = rdtsc(); 917 int repeats = 0; 918 #endif 919 920 if (report_invltlb_src > 0) { 921 if (--report_invltlb_src <= 0) 922 print_backtrace(8); 923 } 924 925 /* 926 * Disallow normal interrupts, set all active cpus except our own 927 * in the global smp_invltlb_mask. 928 */ 929 ++md->mi.gd_cnt.v_smpinvltlb; 930 crit_enter_gd(&md->mi); 931 932 /* 933 * Bits we want to set in smp_invltlb_mask. We do not want to signal 934 * our own cpu. Also try to remove bits associated with idle cpus 935 * that we can flag for auto-invltlb. 936 */ 937 mask = smp_active_mask; 938 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid); 939 smp_smurf_idleinvlclr(&mask); 940 941 rflags = read_rflags(); 942 cpu_disable_intr(); 943 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask); 944 945 /* 946 * IPI non-idle cpus represented by mask. The omask calculation 947 * removes cpus from the mask which already have a Xinvltlb IPI 948 * pending (avoid double-queueing the IPI). 949 * 950 * We must disable real interrupts when setting the smurf flags or 951 * we might race a XINVLTLB before we manage to send the ipi's for 952 * the bits we set. 953 * 954 * NOTE: We are not signalling ourselves, mask already does NOT 955 * include our own cpu. 956 */ 957 smp_smurf_fetchset(&mask); 958 959 /* 960 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 961 * the critical section count on the target cpus. 962 */ 963 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 964 if (all_but_self_ipi_enable && 965 (all_but_self_ipi_enable >= 2 || 966 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 967 all_but_self_ipi(XINVLTLB_OFFSET); 968 } else { 969 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 970 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 971 } 972 973 /* 974 * Wait for acknowledgement by all cpus. smp_inval_intr() will 975 * temporarily enable interrupts to avoid deadlocking the lapic, 976 * and will also handle running cpu_invltlb() and remote invlpg 977 * command son our cpu if some other cpu requests it of us. 978 * 979 * WARNING! I originally tried to implement this as a hard loop 980 * checking only smp_invltlb_mask (and issuing a local 981 * cpu_invltlb() if requested), with interrupts enabled 982 * and without calling smp_inval_intr(). This DID NOT WORK. 983 * It resulted in weird races where smurf bits would get 984 * cleared without any action being taken. 985 */ 986 smp_inval_intr(); 987 CPUMASK_ASSZERO(mask); 988 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) { 989 smp_inval_intr(); 990 cpu_pause(); 991 #ifdef LOOPRECOVER 992 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 993 /* 994 * cpuid - cpu doing the waiting 995 * invltlb_mask - IPI in progress 996 */ 997 kprintf("smp_invltlb %d: waited too long inv=%08jx " 998 "smurf=%08jx " 999 #ifdef LOOPMASK_IN 1000 "in=%08jx " 1001 #endif 1002 "idle=%08jx/%08jx\n", 1003 md->mi.gd_cpuid, 1004 smp_invltlb_mask.ary[0], 1005 smp_smurf_mask.ary[0], 1006 #ifdef LOOPMASK_IN 1007 smp_in_mask.ary[0], 1008 #endif 1009 smp_idleinvl_mask.ary[0], 1010 smp_idleinvl_reqs.ary[0]); 1011 mdcpu->gd_xinvaltlb = 0; 1012 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, 1013 smp_invltlb_mask); 1014 smp_invlpg(&smp_active_mask); 1015 tsc_base = rdtsc(); 1016 if (++repeats > 10) { 1017 kprintf("smp_invltlb: giving up\n"); 1018 CPUMASK_ASSZERO(smp_invltlb_mask); 1019 } 1020 } 1021 #endif 1022 } 1023 write_rflags(rflags); 1024 crit_exit_gd(&md->mi); 1025 } 1026 1027 /* 1028 * Called from a critical section with interrupts hard-disabled. 1029 * This function issues an XINVLTLB IPI and then executes any pending 1030 * command on the current cpu before returning. 1031 */ 1032 void 1033 smp_invlpg(cpumask_t *cmdmask) 1034 { 1035 struct mdglobaldata *md = mdcpu; 1036 cpumask_t mask; 1037 1038 if (report_invlpg_src > 0) { 1039 if (--report_invlpg_src <= 0) 1040 print_backtrace(8); 1041 } 1042 1043 /* 1044 * Disallow normal interrupts, set all active cpus in the pmap, 1045 * plus our own for completion processing (it might or might not 1046 * be part of the set). 1047 */ 1048 mask = smp_active_mask; 1049 CPUMASK_ANDMASK(mask, *cmdmask); 1050 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 1051 1052 /* 1053 * Avoid double-queuing IPIs, which can deadlock us. We must disable 1054 * real interrupts when setting the smurf flags or we might race a 1055 * XINVLTLB before we manage to send the ipi's for the bits we set. 1056 * 1057 * NOTE: We might be including our own cpu in the smurf mask. 1058 */ 1059 smp_smurf_fetchset(&mask); 1060 1061 /* 1062 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 1063 * the critical section count on the target cpus. 1064 * 1065 * We do not include our own cpu when issuing the IPI. 1066 */ 1067 if (all_but_self_ipi_enable && 1068 (all_but_self_ipi_enable >= 2 || 1069 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 1070 all_but_self_ipi(XINVLTLB_OFFSET); 1071 } else { 1072 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 1073 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 1074 } 1075 1076 /* 1077 * This will synchronously wait for our command to complete, 1078 * as well as process commands from other cpus. It also handles 1079 * reentrancy. 1080 * 1081 * (interrupts are disabled and we are in a critical section here) 1082 */ 1083 smp_inval_intr(); 1084 } 1085 1086 void 1087 smp_sniff(void) 1088 { 1089 globaldata_t gd = mycpu; 1090 int dummy; 1091 register_t rflags; 1092 1093 /* 1094 * Ignore all_but_self_ipi_enable here and just use it. 1095 */ 1096 rflags = read_rflags(); 1097 cpu_disable_intr(); 1098 all_but_self_ipi(XSNIFF_OFFSET); 1099 gd->gd_sample_pc = smp_sniff; 1100 gd->gd_sample_sp = &dummy; 1101 write_rflags(rflags); 1102 } 1103 1104 void 1105 cpu_sniff(int dcpu) 1106 { 1107 globaldata_t rgd = globaldata_find(dcpu); 1108 register_t rflags; 1109 int dummy; 1110 1111 /* 1112 * Ignore all_but_self_ipi_enable here and just use it. 1113 */ 1114 rflags = read_rflags(); 1115 cpu_disable_intr(); 1116 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED); 1117 rgd->gd_sample_pc = cpu_sniff; 1118 rgd->gd_sample_sp = &dummy; 1119 write_rflags(rflags); 1120 } 1121 1122 /* 1123 * Called from Xinvltlb assembly with interrupts hard-disabled and in a 1124 * critical section. gd_intr_nesting_level may or may not be bumped 1125 * depending on entry. 1126 * 1127 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT. 1128 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE 1129 * IS IN A CRITICAL SECTION. 1130 */ 1131 void 1132 smp_inval_intr(void) 1133 { 1134 struct mdglobaldata *md = mdcpu; 1135 cpumask_t cpumask; 1136 #ifdef LOOPRECOVER 1137 tsc_uclock_t tsc_base = rdtsc(); 1138 #endif 1139 1140 #if 0 1141 /* 1142 * The idle code is in a critical section, but that doesn't stop 1143 * Xinvltlb from executing, so deal with the race which can occur 1144 * in that situation. Otherwise r-m-w operations by pmap_inval_intr() 1145 * may have problems. 1146 */ 1147 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) { 1148 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid); 1149 cpu_invltlb(); 1150 cpu_mfence(); 1151 } 1152 #endif 1153 1154 /* 1155 * This is a real mess. I'd like to just leave interrupts disabled 1156 * but it can cause the lapic to deadlock if too many interrupts queue 1157 * to it, due to the idiotic design of the lapic. So instead we have 1158 * to enter a critical section so normal interrupts are made pending 1159 * and track whether this one was reentered. 1160 */ 1161 if (md->gd_xinvaltlb) { /* reentrant on cpu */ 1162 md->gd_xinvaltlb = 2; 1163 return; 1164 } 1165 md->gd_xinvaltlb = 1; 1166 1167 /* 1168 * Check only those cpus with active Xinvl* commands pending. 1169 * 1170 * We are going to enable interrupts so make sure we are in a 1171 * critical section. This is necessary to avoid deadlocking 1172 * the lapic and to ensure that we execute our commands prior to 1173 * any nominal interrupt or preemption. 1174 * 1175 * WARNING! It is very important that we only clear out but in 1176 * smp_smurf_mask once for each interrupt we take. In 1177 * this case, we clear it on initial entry and only loop 1178 * on the reentrancy detect (caused by another interrupt). 1179 */ 1180 cpumask = smp_invmask; 1181 #ifdef LOOPMASK_IN 1182 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid); 1183 #endif 1184 loop: 1185 cpu_enable_intr(); 1186 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid); 1187 1188 /* 1189 * Specific page request(s), and we can't return until all bits 1190 * are zero. 1191 */ 1192 for (;;) { 1193 int toolong; 1194 1195 /* 1196 * Also execute any pending full invalidation request in 1197 * this loop. 1198 */ 1199 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1200 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1201 md->mi.gd_cpuid); 1202 cpu_invltlb(); 1203 cpu_mfence(); 1204 } 1205 1206 #ifdef LOOPRECOVER 1207 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1208 /* 1209 * cpuid - cpu doing the waiting 1210 * invmask - IPI in progress 1211 * invltlb_mask - which ones are TLB invalidations? 1212 */ 1213 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx " 1214 "smurf=%08jx " 1215 #ifdef LOOPMASK_IN 1216 "in=%08jx " 1217 #endif 1218 "idle=%08jx/%08jx\n", 1219 md->mi.gd_cpuid, 1220 smp_invmask.ary[0], 1221 smp_invltlb_mask.ary[0], 1222 smp_smurf_mask.ary[0], 1223 #ifdef LOOPMASK_IN 1224 smp_in_mask.ary[0], 1225 #endif 1226 smp_idleinvl_mask.ary[0], 1227 smp_idleinvl_reqs.ary[0]); 1228 tsc_base = rdtsc(); 1229 toolong = 1; 1230 } else { 1231 toolong = 0; 1232 } 1233 #else 1234 toolong = 0; 1235 #endif 1236 1237 /* 1238 * We can only add bits to the cpumask to test during the 1239 * loop because the smp_invmask bit is cleared once the 1240 * originator completes the command (the targets may still 1241 * be cycling their own completions in this loop, afterwords). 1242 * 1243 * lfence required prior to all tests as this Xinvltlb 1244 * interrupt could race the originator (already be in progress 1245 * wnen the originator decides to issue, due to an issue by 1246 * another cpu). 1247 */ 1248 cpu_lfence(); 1249 CPUMASK_ORMASK(cpumask, smp_invmask); 1250 /*cpumask = smp_active_mask;*/ /* XXX */ 1251 cpu_lfence(); 1252 1253 if (pmap_inval_intr(&cpumask, toolong) == 0) { 1254 /* 1255 * Clear our smurf mask to allow new IPIs, but deal 1256 * with potential races. 1257 */ 1258 break; 1259 } 1260 1261 /* 1262 * Test if someone sent us another invalidation IPI, break 1263 * out so we can take it to avoid deadlocking the lapic 1264 * interrupt queue (? stupid intel, amd). 1265 */ 1266 if (md->gd_xinvaltlb == 2) 1267 break; 1268 /* 1269 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid)) 1270 break; 1271 */ 1272 } 1273 1274 /* 1275 * Full invalidation request 1276 */ 1277 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1278 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1279 md->mi.gd_cpuid); 1280 cpu_invltlb(); 1281 cpu_mfence(); 1282 } 1283 1284 /* 1285 * Check to see if another Xinvltlb interrupt occurred and loop up 1286 * if it did. 1287 */ 1288 cpu_disable_intr(); 1289 if (md->gd_xinvaltlb == 2) { 1290 md->gd_xinvaltlb = 1; 1291 goto loop; 1292 } 1293 #ifdef LOOPMASK_IN 1294 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid); 1295 #endif 1296 md->gd_xinvaltlb = 0; 1297 } 1298 1299 void 1300 cpu_wbinvd_on_all_cpus_callback(void *arg) 1301 { 1302 wbinvd(); 1303 } 1304 1305 /* 1306 * When called the executing CPU will send an IPI to all other CPUs 1307 * requesting that they halt execution. 1308 * 1309 * Usually (but not necessarily) called with 'other_cpus' as its arg. 1310 * 1311 * - Signals all CPUs in map to stop. 1312 * - Waits for each to stop. 1313 * 1314 * Returns: 1315 * -1: error 1316 * 0: NA 1317 * 1: ok 1318 * 1319 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 1320 * from executing at same time. 1321 */ 1322 int 1323 stop_cpus(cpumask_t map) 1324 { 1325 cpumask_t mask; 1326 1327 CPUMASK_ANDMASK(map, smp_active_mask); 1328 1329 /* send the Xcpustop IPI to all CPUs in map */ 1330 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 1331 1332 do { 1333 mask = stopped_cpus; 1334 CPUMASK_ANDMASK(mask, map); 1335 /* spin */ 1336 } while (CPUMASK_CMPMASKNEQ(mask, map)); 1337 1338 return 1; 1339 } 1340 1341 1342 /* 1343 * Called by a CPU to restart stopped CPUs. 1344 * 1345 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 1346 * 1347 * - Signals all CPUs in map to restart. 1348 * - Waits for each to restart. 1349 * 1350 * Returns: 1351 * -1: error 1352 * 0: NA 1353 * 1: ok 1354 */ 1355 int 1356 restart_cpus(cpumask_t map) 1357 { 1358 cpumask_t mask; 1359 1360 /* signal other cpus to restart */ 1361 mask = map; 1362 CPUMASK_ANDMASK(mask, smp_active_mask); 1363 cpu_ccfence(); 1364 started_cpus = mask; 1365 cpu_ccfence(); 1366 1367 /* wait for each to clear its bit */ 1368 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 1369 cpu_pause(); 1370 1371 return 1; 1372 } 1373 1374 /* 1375 * This is called once the mpboot code has gotten us properly relocated 1376 * and the MMU turned on, etc. ap_init() is actually the idle thread, 1377 * and when it returns the scheduler will call the real cpu_idle() main 1378 * loop for the idlethread. Interrupts are disabled on entry and should 1379 * remain disabled at return. 1380 */ 1381 void 1382 ap_init(void) 1383 { 1384 int cpu_id; 1385 1386 /* 1387 * Adjust smp_startup_mask to signal the BSP that we have started 1388 * up successfully. Note that we do not yet hold the BGL. The BSP 1389 * is waiting for our signal. 1390 * 1391 * We can't set our bit in smp_active_mask yet because we are holding 1392 * interrupts physically disabled and remote cpus could deadlock 1393 * trying to send us an IPI. 1394 */ 1395 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1396 cpu_mfence(); 1397 1398 /* 1399 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1400 * non-zero, then get the MP lock. 1401 * 1402 * Note: We are in a critical section. 1403 * 1404 * Note: we are the idle thread, we can only spin. 1405 * 1406 * Note: The load fence is memory volatile and prevents the compiler 1407 * from improperly caching mp_finish_lapic, and the cpu from improperly 1408 * caching it. 1409 */ 1410 while (mp_finish_lapic == 0) { 1411 cpu_pause(); 1412 cpu_lfence(); 1413 } 1414 #if 0 1415 while (try_mplock() == 0) { 1416 cpu_pause(); 1417 cpu_lfence(); 1418 } 1419 #endif 1420 1421 if (cpu_feature & CPUID_TSC) { 1422 /* 1423 * The BSP is constantly updating tsc0_offset, figure out 1424 * the relative difference to synchronize ktrdump. 1425 */ 1426 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1427 } 1428 1429 /* BSP may have changed PTD while we're waiting for the lock */ 1430 cpu_invltlb(); 1431 1432 /* Build our map of 'other' CPUs. */ 1433 mycpu->gd_other_cpus = smp_startup_mask; 1434 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1435 1436 /* A quick check from sanity claus */ 1437 cpu_id = APICID_TO_CPUID(LAPIC_READID); 1438 if (mycpu->gd_cpuid != cpu_id) { 1439 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1440 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1441 cpu_id, LAPIC_READID); 1442 #if 0 /* JGXXX */ 1443 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1444 #endif 1445 panic("cpuid mismatch! boom!!"); 1446 } 1447 1448 /* Initialize AP's local APIC for irq's */ 1449 lapic_init(FALSE); 1450 1451 /* LAPIC initialization is done */ 1452 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1453 cpu_mfence(); 1454 1455 #if 0 1456 /* Let BSP move onto the next initialization stage */ 1457 rel_mplock(); 1458 #endif 1459 1460 /* 1461 * Interlock for finalization. Wait until mp_finish is non-zero, 1462 * then get the MP lock. 1463 * 1464 * Note: We are in a critical section. 1465 * 1466 * Note: we are the idle thread, we can only spin. 1467 * 1468 * Note: The load fence is memory volatile and prevents the compiler 1469 * from improperly caching mp_finish, and the cpu from improperly 1470 * caching it. 1471 */ 1472 while (mp_finish == 0) { 1473 cpu_pause(); 1474 cpu_lfence(); 1475 } 1476 1477 /* BSP may have changed PTD while we're waiting for the lock */ 1478 cpu_invltlb(); 1479 1480 /* Set memory range attributes for this CPU to match the BSP */ 1481 mem_range_AP_init(); 1482 1483 /* 1484 * Once we go active we must process any IPIQ messages that may 1485 * have been queued, because no actual IPI will occur until we 1486 * set our bit in the smp_active_mask. If we don't the IPI 1487 * message interlock could be left set which would also prevent 1488 * further IPIs. 1489 * 1490 * The idle loop doesn't expect the BGL to be held and while 1491 * lwkt_switch() normally cleans things up this is a special case 1492 * because we returning almost directly into the idle loop. 1493 * 1494 * The idle thread is never placed on the runq, make sure 1495 * nothing we've done put it there. 1496 */ 1497 1498 /* 1499 * Hold a critical section and allow real interrupts to occur. Zero 1500 * any spurious interrupts which have accumulated, then set our 1501 * smp_active_mask indicating that we are fully operational. 1502 */ 1503 crit_enter(); 1504 __asm __volatile("sti; pause; pause"::); 1505 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1506 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1507 1508 /* 1509 * Wait until all cpus have set their smp_active_mask and have fully 1510 * operational interrupts before proceeding. 1511 * 1512 * We need a final cpu_invltlb() because we would not have received 1513 * any until we set our bit in smp_active_mask. 1514 */ 1515 while (mp_finish == 1) { 1516 cpu_pause(); 1517 cpu_lfence(); 1518 } 1519 cpu_invltlb(); 1520 1521 /* 1522 * Initialize per-cpu clocks and do other per-cpu initialization. 1523 * At this point code is expected to be able to use the full kernel 1524 * API. 1525 */ 1526 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1527 1528 /* 1529 * Since we may have cleaned up the interrupt triggers, manually 1530 * process any pending IPIs before exiting our critical section. 1531 * Once the critical section has exited, normal interrupt processing 1532 * may occur. 1533 */ 1534 atomic_swap_int(&mycpu->gd_npoll, 0); 1535 lwkt_process_ipiq(); 1536 crit_exit(); 1537 1538 /* 1539 * Final final, allow the waiting BSP to resume the boot process, 1540 * return 'into' the idle thread bootstrap. 1541 */ 1542 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1543 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1544 } 1545 1546 /* 1547 * Get SMP fully working before we start initializing devices. 1548 */ 1549 static 1550 void 1551 ap_finish(void) 1552 { 1553 if (bootverbose) 1554 kprintf("Finish MP startup\n"); 1555 rel_mplock(); 1556 1557 /* 1558 * Wait for the active mask to complete, after which all cpus will 1559 * be accepting interrupts. 1560 */ 1561 mp_finish = 1; 1562 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1563 cpu_pause(); 1564 cpu_lfence(); 1565 } 1566 1567 /* 1568 * Wait for the finalization mask to complete, after which all cpus 1569 * have completely finished initializing and are entering or are in 1570 * their idle thread. 1571 * 1572 * BSP should have received all required invltlbs but do another 1573 * one just in case. 1574 */ 1575 cpu_invltlb(); 1576 mp_finish = 2; 1577 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1578 cpu_pause(); 1579 cpu_lfence(); 1580 } 1581 1582 while (try_mplock() == 0) { 1583 cpu_pause(); 1584 cpu_lfence(); 1585 } 1586 1587 if (bootverbose) { 1588 kprintf("Active CPU Mask: %016jx\n", 1589 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1590 } 1591 } 1592 1593 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1594 1595 /* 1596 * Interrupts must be hard-disabled by caller 1597 */ 1598 void 1599 cpu_send_ipiq(int dcpu) 1600 { 1601 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1602 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1603 } 1604 1605 #if 0 /* single_apic_ipi_passive() not working yet */ 1606 /* 1607 * Returns 0 on failure, 1 on success 1608 */ 1609 int 1610 cpu_send_ipiq_passive(int dcpu) 1611 { 1612 int r = 0; 1613 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1614 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1615 APIC_DELMODE_FIXED); 1616 } 1617 return(r); 1618 } 1619 #endif 1620 1621 static void 1622 mp_bsp_simple_setup(void) 1623 { 1624 struct mdglobaldata *gd; 1625 size_t ipiq_size; 1626 1627 /* build our map of 'other' CPUs */ 1628 mycpu->gd_other_cpus = smp_startup_mask; 1629 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1630 1631 gd = (struct mdglobaldata *)mycpu; 1632 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1633 1634 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1635 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 1636 VM_SUBSYS_IPIQ); 1637 bzero(mycpu->gd_ipiq, ipiq_size); 1638 1639 /* initialize arc4random. */ 1640 arc4_init_pcpu(0); 1641 1642 pmap_set_opt(); 1643 1644 if (cpu_feature & CPUID_TSC) 1645 tsc0_offset = rdtsc(); 1646 } 1647 1648 1649 /* 1650 * CPU TOPOLOGY DETECTION FUNCTIONS 1651 */ 1652 1653 /* Detect intel topology using CPUID 1654 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1655 */ 1656 static void 1657 detect_intel_topology(int count_htt_cores) 1658 { 1659 int shift = 0; 1660 int ecx_index = 0; 1661 int core_plus_logical_bits = 0; 1662 int cores_per_package; 1663 int logical_per_package; 1664 int logical_per_core; 1665 unsigned int p[4]; 1666 1667 if (cpu_high >= 0xb) { 1668 goto FUNC_B; 1669 1670 } else if (cpu_high >= 0x4) { 1671 goto FUNC_4; 1672 1673 } else { 1674 core_bits = 0; 1675 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1676 ; 1677 logical_CPU_bits = 1 << shift; 1678 return; 1679 } 1680 1681 FUNC_B: 1682 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1683 1684 /* if 0xb not supported - fallback to 0x4 */ 1685 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1686 goto FUNC_4; 1687 } 1688 1689 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1690 1691 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1692 do { 1693 cpuid_count(0xb, ecx_index, p); 1694 1695 /* Check for the Core type in the implemented sub leaves. */ 1696 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1697 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1698 break; 1699 } 1700 1701 ecx_index++; 1702 1703 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1704 1705 core_bits = core_plus_logical_bits - logical_CPU_bits; 1706 1707 return; 1708 1709 FUNC_4: 1710 cpuid_count(0x4, 0, p); 1711 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1712 1713 logical_per_package = count_htt_cores; 1714 logical_per_core = logical_per_package / cores_per_package; 1715 1716 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1717 ; 1718 logical_CPU_bits = shift; 1719 1720 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1721 ; 1722 core_bits = shift; 1723 1724 return; 1725 } 1726 1727 /* Detect AMD topology using CPUID 1728 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1729 */ 1730 static void 1731 detect_amd_topology(int count_htt_cores) 1732 { 1733 int shift = 0; 1734 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) { 1735 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1736 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 1737 AMDID_COREID_SIZE_SHIFT; 1738 } else { 1739 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1740 for (shift = 0; (1 << shift) < core_bits; ++shift) 1741 ; 1742 core_bits = shift; 1743 } 1744 logical_CPU_bits = count_htt_cores >> core_bits; 1745 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1746 ; 1747 logical_CPU_bits = shift; 1748 1749 kprintf("core_bits %d logical_CPU_bits %d\n", 1750 core_bits - logical_CPU_bits, logical_CPU_bits); 1751 1752 if (amd_feature2 & AMDID2_TOPOEXT) { 1753 u_int p[4]; /* eax,ebx,ecx,edx */ 1754 int nodes; 1755 1756 cpuid_count(0x8000001e, 0, p); 1757 1758 switch(((p[1] >> 8) & 3) + 1) { 1759 case 1: 1760 logical_CPU_bits = 0; 1761 break; 1762 case 2: 1763 logical_CPU_bits = 1; 1764 break; 1765 case 3: 1766 case 4: 1767 logical_CPU_bits = 2; 1768 break; 1769 } 1770 1771 /* 1772 * Nodes are kind of a stand-in for packages*sockets, 1773 * but can be thought of in terms of Numa domains. 1774 */ 1775 nodes = ((p[2] >> 8) & 7) + 1; 1776 switch(nodes) { 1777 case 8: 1778 case 7: 1779 case 6: 1780 case 5: 1781 --core_bits; 1782 /* fallthrough */ 1783 case 4: 1784 case 3: 1785 --core_bits; 1786 /* fallthrough */ 1787 case 2: 1788 --core_bits; 1789 /* fallthrough */ 1790 case 1: 1791 break; 1792 } 1793 core_bits -= logical_CPU_bits; 1794 kprintf("%d-way htt, %d Nodes, %d cores/node\n", 1795 (int)(((p[1] >> 8) & 3) + 1), 1796 nodes, 1797 1 << core_bits); 1798 1799 } 1800 #if 0 1801 if (amd_feature2 & AMDID2_TOPOEXT) { 1802 u_int p[4]; 1803 int i; 1804 int type; 1805 int level; 1806 int share_count; 1807 1808 logical_CPU_bits = 0; 1809 core_bits = 0; 1810 1811 for (i = 0; i < 256; ++i) { 1812 cpuid_count(0x8000001d, i, p); 1813 type = p[0] & 0x1f; 1814 level = (p[0] >> 5) & 0x7; 1815 share_count = 1 + ((p[0] >> 14) & 0xfff); 1816 1817 if (type == 0) 1818 break; 1819 kprintf("Topology probe i=%2d type=%d " 1820 "level=%d share_count=%d\n", 1821 i, type, level, share_count); 1822 shift = 0; 1823 while ((1 << shift) < share_count) 1824 ++shift; 1825 1826 switch(type) { 1827 case 1: 1828 /* 1829 * CPUID_TYPE_SMT 1830 * 1831 * Logical CPU (SMT) 1832 */ 1833 logical_CPU_bits = shift; 1834 break; 1835 case 2: 1836 /* 1837 * CPUID_TYPE_CORE 1838 * 1839 * Physical subdivision of a package 1840 */ 1841 core_bits = logical_CPU_bits + 1842 shift; 1843 break; 1844 case 3: 1845 /* 1846 * CPUID_TYPE_CACHE 1847 * 1848 * CPU L1/L2/L3 cache 1849 */ 1850 break; 1851 case 4: 1852 /* 1853 * CPUID_TYPE_PKG 1854 * 1855 * Package aka chip, equivalent to 1856 * socket 1857 */ 1858 break; 1859 } 1860 } 1861 } 1862 #endif 1863 } else { 1864 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1865 ; 1866 core_bits = shift; 1867 logical_CPU_bits = 0; 1868 } 1869 } 1870 1871 static void 1872 amd_get_compute_unit_id(void *arg) 1873 { 1874 u_int regs[4]; 1875 1876 do_cpuid(0x8000001e, regs); 1877 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1878 1879 /* 1880 * AMD - CPUID Specification September 2010 1881 * page 34 - //ComputeUnitID = ebx[0:7]// 1882 */ 1883 mynode->compute_unit_id = regs[1] & 0xff; 1884 } 1885 1886 int 1887 fix_amd_topology(void) 1888 { 1889 cpumask_t mask; 1890 1891 if (cpu_vendor_id != CPU_VENDOR_AMD) 1892 return -1; 1893 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1894 return -1; 1895 1896 CPUMASK_ASSALLONES(mask); 1897 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1898 1899 kprintf("Compute unit iDS:\n"); 1900 int i; 1901 for (i = 0; i < ncpus; i++) { 1902 kprintf("%d-%d; \n", 1903 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1904 } 1905 return 0; 1906 } 1907 1908 /* 1909 * Calculate 1910 * - logical_CPU_bits 1911 * - core_bits 1912 * With the values above (for AMD or INTEL) we are able to generally 1913 * detect the CPU topology (number of cores for each level): 1914 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1915 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1916 */ 1917 void 1918 detect_cpu_topology(void) 1919 { 1920 static int topology_detected = 0; 1921 int count = 0; 1922 1923 if (topology_detected) 1924 goto OUT; 1925 if ((cpu_feature & CPUID_HTT) == 0) { 1926 core_bits = 0; 1927 logical_CPU_bits = 0; 1928 goto OUT; 1929 } 1930 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT; 1931 1932 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1933 detect_intel_topology(count); 1934 else if (cpu_vendor_id == CPU_VENDOR_AMD) 1935 detect_amd_topology(count); 1936 topology_detected = 1; 1937 1938 OUT: 1939 if (bootverbose) { 1940 kprintf("Bits within APICID: logical_CPU_bits: %d; " 1941 "core_bits: %d\n", 1942 logical_CPU_bits, core_bits); 1943 } 1944 } 1945 1946 /* 1947 * Interface functions to calculate chip_ID, 1948 * core_number and logical_number 1949 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1950 */ 1951 int 1952 get_chip_ID(int cpuid) 1953 { 1954 return get_apicid_from_cpuid(cpuid) >> 1955 (logical_CPU_bits + core_bits); 1956 } 1957 1958 int 1959 get_chip_ID_from_APICID(int apicid) 1960 { 1961 return apicid >> (logical_CPU_bits + core_bits); 1962 } 1963 1964 int 1965 get_core_number_within_chip(int cpuid) 1966 { 1967 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1968 ((1 << core_bits) - 1)); 1969 } 1970 1971 int 1972 get_logical_CPU_number_within_core(int cpuid) 1973 { 1974 return (get_apicid_from_cpuid(cpuid) & 1975 ((1 << logical_CPU_bits) - 1)); 1976 } 1977