1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 51 #include <machine/smp.h> 52 #include <machine_base/apic/apicreg.h> 53 #include <machine/atomic.h> 54 #include <machine/cpufunc.h> 55 #include <machine/cputypes.h> 56 #include <machine_base/apic/lapic.h> 57 #include <machine_base/apic/ioapic.h> 58 #include <machine_base/acpica/acpi_md_cpu.h> 59 #include <machine/psl.h> 60 #include <machine/segments.h> 61 #include <machine/tss.h> 62 #include <machine/specialreg.h> 63 #include <machine/globaldata.h> 64 #include <machine/pmap_inval.h> 65 #include <machine/clock.h> 66 67 #include <machine/md_var.h> /* setidt() */ 68 #include <machine_base/icu/icu.h> /* IPIs */ 69 #include <machine_base/icu/icu_var.h> 70 #include <machine_base/apic/ioapic_abi.h> 71 #include <machine/intr_machdep.h> /* IPIs */ 72 73 #define WARMBOOT_TARGET 0 74 #define WARMBOOT_OFF (KERNBASE + 0x0467) 75 #define WARMBOOT_SEG (KERNBASE + 0x0469) 76 77 #define CMOS_REG (0x70) 78 #define CMOS_DATA (0x71) 79 #define BIOS_RESET (0x0f) 80 #define BIOS_WARM (0x0a) 81 82 /* 83 * this code MUST be enabled here and in mpboot.s. 84 * it follows the very early stages of AP boot by placing values in CMOS ram. 85 * it NORMALLY will never be needed and thus the primitive method for enabling. 86 * 87 */ 88 #if defined(CHECK_POINTS) 89 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 90 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 91 92 #define CHECK_INIT(D); \ 93 CHECK_WRITE(0x34, (D)); \ 94 CHECK_WRITE(0x35, (D)); \ 95 CHECK_WRITE(0x36, (D)); \ 96 CHECK_WRITE(0x37, (D)); \ 97 CHECK_WRITE(0x38, (D)); \ 98 CHECK_WRITE(0x39, (D)); 99 100 #define CHECK_PRINT(S); \ 101 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 102 (S), \ 103 CHECK_READ(0x34), \ 104 CHECK_READ(0x35), \ 105 CHECK_READ(0x36), \ 106 CHECK_READ(0x37), \ 107 CHECK_READ(0x38), \ 108 CHECK_READ(0x39)); 109 110 #else /* CHECK_POINTS */ 111 112 #define CHECK_INIT(D) 113 #define CHECK_PRINT(S) 114 115 #endif /* CHECK_POINTS */ 116 117 /* 118 * Values to send to the POST hardware. 119 */ 120 #define MP_BOOTADDRESS_POST 0x10 121 #define MP_PROBE_POST 0x11 122 #define MPTABLE_PASS1_POST 0x12 123 124 #define MP_START_POST 0x13 125 #define MP_ENABLE_POST 0x14 126 #define MPTABLE_PASS2_POST 0x15 127 128 #define START_ALL_APS_POST 0x16 129 #define INSTALL_AP_TRAMP_POST 0x17 130 #define START_AP_POST 0x18 131 132 #define MP_ANNOUNCE_POST 0x19 133 134 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 135 int current_postcode; 136 137 /** XXX FIXME: what system files declare these??? */ 138 139 extern int naps; 140 141 int64_t tsc0_offset; 142 extern int64_t tsc_offsets[]; 143 144 /* AP uses this during bootstrap. Do not staticize. */ 145 char *bootSTK; 146 static int bootAP; 147 148 struct pcb stoppcbs[MAXCPU]; 149 150 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 151 152 /* 153 * Local data and functions. 154 */ 155 156 static u_int boot_address; 157 static int mp_finish; 158 static int mp_finish_lapic; 159 160 static int start_all_aps(u_int boot_addr); 161 #if 0 162 static void install_ap_tramp(u_int boot_addr); 163 #endif 164 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 165 static int smitest(void); 166 static void mp_bsp_simple_setup(void); 167 168 /* which cpus have been started */ 169 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 170 /* which cpus have lapic been inited */ 171 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 172 /* which cpus are ready for IPIs etc? */ 173 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 174 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 175 176 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, 177 &smp_active_mask, sizeof(smp_active_mask), "LU", ""); 178 static u_int bootMP_size; 179 static u_int report_invlpg_src; 180 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW, 181 &report_invlpg_src, 0, ""); 182 static u_int report_invltlb_src; 183 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW, 184 &report_invltlb_src, 0, ""); 185 static int optimized_invltlb; 186 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW, 187 &optimized_invltlb, 0, ""); 188 static int all_but_self_ipi_enable = 1; 189 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW, 190 &all_but_self_ipi_enable, 0, ""); 191 192 /* Local data for detecting CPU TOPOLOGY */ 193 static int core_bits = 0; 194 static int logical_CPU_bits = 0; 195 196 197 /* 198 * Calculate usable address in base memory for AP trampoline code. 199 */ 200 u_int 201 mp_bootaddress(u_int basemem) 202 { 203 POSTCODE(MP_BOOTADDRESS_POST); 204 205 bootMP_size = mptramp_end - mptramp_start; 206 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 207 if (((basemem * 1024) - boot_address) < bootMP_size) 208 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 209 /* 3 levels of page table pages */ 210 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 211 212 return mptramp_pagetables; 213 } 214 215 /* 216 * Print various information about the SMP system hardware and setup. 217 */ 218 void 219 mp_announce(void) 220 { 221 int x; 222 223 POSTCODE(MP_ANNOUNCE_POST); 224 225 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 226 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 227 for (x = 1; x <= naps; ++x) 228 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 229 230 if (!ioapic_enable) 231 kprintf(" Warning: APIC I/O disabled\n"); 232 } 233 234 /* 235 * AP cpu's call this to sync up protected mode. 236 * 237 * WARNING! %gs is not set up on entry. This routine sets up %gs. 238 */ 239 void 240 init_secondary(void) 241 { 242 int gsel_tss; 243 int x, myid = bootAP; 244 u_int64_t msr, cr0; 245 struct mdglobaldata *md; 246 struct privatespace *ps; 247 248 ps = CPU_prvspace[myid]; 249 250 gdt_segs[GPROC0_SEL].ssd_base = (long)&ps->common_tss; 251 ps->mdglobaldata.mi.gd_prvspace = ps; 252 253 /* We fill the 32-bit segment descriptors */ 254 for (x = 0; x < NGDT; x++) { 255 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 256 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 257 } 258 /* And now a 64-bit one */ 259 ssdtosyssd(&gdt_segs[GPROC0_SEL], 260 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 261 262 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 263 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 264 lgdt(&r_gdt); /* does magic intra-segment return */ 265 266 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 267 wrmsr(MSR_FSBASE, 0); /* User value */ 268 wrmsr(MSR_GSBASE, (u_int64_t)ps); 269 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 270 271 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 272 273 #if 0 274 lldt(_default_ldt); 275 mdcpu->gd_currentldt = _default_ldt; 276 #endif 277 278 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 279 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 280 281 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 282 283 /* 284 * TSS entry point for interrupts, traps, and exceptions 285 * (sans NMI). This will always go to near the top of the pcpu 286 * trampoline area. Hardware-pushed data will be copied into 287 * the trap-frame on entry, and (if necessary) returned to the 288 * trampoline on exit. 289 * 290 * We store some pcb data for the trampoline code above the 291 * stack the cpu hw pushes into, and arrange things so the 292 * address of tr_pcb_rsp is the same as the desired top of 293 * stack. 294 */ 295 ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp; 296 ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0; 297 ps->trampoline.tr_pcb_gs_kernel = (register_t)md; 298 ps->trampoline.tr_pcb_cr3 = KPML4phys; /* adj to user cr3 live */ 299 ps->dbltramp.tr_pcb_gs_kernel = (register_t)md; 300 ps->dbltramp.tr_pcb_cr3 = KPML4phys; 301 ps->dbgtramp.tr_pcb_gs_kernel = (register_t)md; 302 ps->dbgtramp.tr_pcb_cr3 = KPML4phys; 303 304 #if 0 /* JG XXX */ 305 ps->common_tss.tss_ioopt = (sizeof ps->common_tss) << 16; 306 #endif 307 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 308 md->gd_common_tssd = *md->gd_tss_gdt; 309 310 /* double fault stack */ 311 ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp; 312 ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp; 313 314 ltr(gsel_tss); 315 316 /* 317 * Set to a known state: 318 * Set by mpboot.s: CR0_PG, CR0_PE 319 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 320 */ 321 cr0 = rcr0(); 322 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 323 load_cr0(cr0); 324 325 /* Set up the fast syscall stuff */ 326 msr = rdmsr(MSR_EFER) | EFER_SCE; 327 wrmsr(MSR_EFER, msr); 328 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 329 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 330 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 331 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 332 wrmsr(MSR_STAR, msr); 333 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 334 335 pmap_set_opt(); /* PSE/4MB pages, etc */ 336 pmap_init_pat(); /* Page Attribute Table */ 337 338 /* set up CPU registers and state */ 339 cpu_setregs(); 340 341 /* set up SSE/NX registers */ 342 initializecpu(myid); 343 344 /* set up FPU state on the AP */ 345 npxinit(); 346 347 /* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */ 348 if (x2apic_enable) 349 lapic_x2apic_enter(FALSE); 350 351 /* disable the APIC, just to be SURE */ 352 LAPIC_WRITE(svr, (LAPIC_READ(svr) & ~APIC_SVR_ENABLE)); 353 } 354 355 /******************************************************************* 356 * local functions and data 357 */ 358 359 /* 360 * Start the SMP system 361 */ 362 static void 363 mp_start_aps(void *dummy __unused) 364 { 365 if (lapic_enable) { 366 /* start each Application Processor */ 367 start_all_aps(boot_address); 368 } else { 369 mp_bsp_simple_setup(); 370 } 371 } 372 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 373 374 /* 375 * start each AP in our list 376 */ 377 static int 378 start_all_aps(u_int boot_addr) 379 { 380 vm_offset_t va = boot_address + KERNBASE; 381 u_int64_t *pt4, *pt3, *pt2; 382 int pssize; 383 int x, i; 384 int shift; 385 int smicount; 386 int smibest; 387 int smilast; 388 u_char mpbiosreason; 389 u_long mpbioswarmvec; 390 struct mdglobaldata *gd; 391 struct privatespace *ps; 392 size_t ipiq_size; 393 394 POSTCODE(START_ALL_APS_POST); 395 396 /* install the AP 1st level boot code */ 397 pmap_kenter(va, boot_address); 398 cpu_invlpg((void *)va); /* JG XXX */ 399 bcopy(mptramp_start, (void *)va, bootMP_size); 400 401 /* Locate the page tables, they'll be below the trampoline */ 402 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 403 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 404 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 405 406 /* Create the initial 1GB replicated page tables */ 407 for (i = 0; i < 512; i++) { 408 /* Each slot of the level 4 pages points to the same level 3 page */ 409 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 410 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 411 kernel_pmap.pmap_bits[PG_RW_IDX] | 412 kernel_pmap.pmap_bits[PG_U_IDX]; 413 414 /* Each slot of the level 3 pages points to the same level 2 page */ 415 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 416 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 417 kernel_pmap.pmap_bits[PG_RW_IDX] | 418 kernel_pmap.pmap_bits[PG_U_IDX]; 419 420 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 421 pt2[i] = i * (2 * 1024 * 1024); 422 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 423 kernel_pmap.pmap_bits[PG_RW_IDX] | 424 kernel_pmap.pmap_bits[PG_PS_IDX] | 425 kernel_pmap.pmap_bits[PG_U_IDX]; 426 } 427 428 /* save the current value of the warm-start vector */ 429 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 430 outb(CMOS_REG, BIOS_RESET); 431 mpbiosreason = inb(CMOS_DATA); 432 433 /* setup a vector to our boot code */ 434 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 435 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 436 outb(CMOS_REG, BIOS_RESET); 437 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 438 439 /* 440 * If we have a TSC we can figure out the SMI interrupt rate. 441 * The SMI does not necessarily use a constant rate. Spend 442 * up to 250ms trying to figure it out. 443 */ 444 smibest = 0; 445 if (cpu_feature & CPUID_TSC) { 446 set_apic_timer(275000); 447 smilast = read_apic_timer(); 448 for (x = 0; x < 20 && read_apic_timer(); ++x) { 449 smicount = smitest(); 450 if (smibest == 0 || smilast - smicount < smibest) 451 smibest = smilast - smicount; 452 smilast = smicount; 453 } 454 if (smibest > 250000) 455 smibest = 0; 456 } 457 if (smibest) 458 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 459 1000000 / smibest, smibest); 460 461 /* start each AP */ 462 for (x = 1; x <= naps; ++x) { 463 /* This is a bit verbose, it will go away soon. */ 464 465 pssize = sizeof(struct privatespace); 466 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, 467 KM_CPU(x)); 468 CPU_prvspace[x] = ps; 469 #if 0 470 kprintf("ps %d %p %d\n", x, ps, pssize); 471 #endif 472 bzero(ps, pssize); 473 gd = &ps->mdglobaldata; 474 gd->mi.gd_prvspace = ps; 475 476 /* prime data page for it to use */ 477 mi_gdinit(&gd->mi, x); 478 cpu_gdinit(gd, x); 479 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 480 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 481 VM_SUBSYS_IPIQ, KM_CPU(x)); 482 bzero(gd->mi.gd_ipiq, ipiq_size); 483 484 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 485 486 /* initialize arc4random. */ 487 arc4_init_pcpu(x); 488 489 /* setup a vector to our boot code */ 490 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 491 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 492 outb(CMOS_REG, BIOS_RESET); 493 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 494 495 /* 496 * Setup the AP boot stack 497 */ 498 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 499 bootAP = x; 500 501 /* attempt to start the Application Processor */ 502 CHECK_INIT(99); /* setup checkpoints */ 503 if (!start_ap(gd, boot_addr, smibest)) { 504 kprintf("\nAP #%d (PHY# %d) failed!\n", 505 x, CPUID_TO_APICID(x)); 506 CHECK_PRINT("trace"); /* show checkpoints */ 507 /* better panic as the AP may be running loose */ 508 kprintf("panic y/n? [y] "); 509 cnpoll(TRUE); 510 if (cngetc() != 'n') 511 panic("bye-bye"); 512 cnpoll(FALSE); 513 } 514 CHECK_PRINT("trace"); /* show checkpoints */ 515 } 516 517 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 518 ncpus = x; 519 520 for (shift = 0; (1 << shift) <= ncpus; ++shift) 521 ; 522 --shift; 523 524 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 525 if ((1 << shift) < ncpus) 526 ++shift; 527 ncpus_fit = 1 << shift; 528 ncpus_fit_mask = ncpus_fit - 1; 529 530 /* build our map of 'other' CPUs */ 531 mycpu->gd_other_cpus = smp_startup_mask; 532 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 533 534 gd = (struct mdglobaldata *)mycpu; 535 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 536 537 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 538 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 539 VM_SUBSYS_IPIQ, KM_CPU(0)); 540 bzero(mycpu->gd_ipiq, ipiq_size); 541 542 /* initialize arc4random. */ 543 arc4_init_pcpu(0); 544 545 /* restore the warmstart vector */ 546 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 547 outb(CMOS_REG, BIOS_RESET); 548 outb(CMOS_DATA, mpbiosreason); 549 550 /* 551 * NOTE! The idlestack for the BSP was setup by locore. Finish 552 * up, clean out the P==V mapping we did earlier. 553 */ 554 pmap_set_opt(); 555 556 /* 557 * Wait all APs to finish initializing LAPIC 558 */ 559 if (bootverbose) 560 kprintf("SMP: Waiting APs LAPIC initialization\n"); 561 if (cpu_feature & CPUID_TSC) 562 tsc0_offset = rdtsc(); 563 tsc_offsets[0] = 0; 564 mp_finish_lapic = 1; 565 rel_mplock(); 566 567 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 568 cpu_pause(); 569 cpu_lfence(); 570 if (cpu_feature & CPUID_TSC) 571 tsc0_offset = rdtsc(); 572 } 573 while (try_mplock() == 0) { 574 cpu_pause(); 575 cpu_lfence(); 576 } 577 578 /* number of APs actually started */ 579 return ncpus - 1; 580 } 581 582 583 /* 584 * load the 1st level AP boot code into base memory. 585 */ 586 587 /* targets for relocation */ 588 extern void bigJump(void); 589 extern void bootCodeSeg(void); 590 extern void bootDataSeg(void); 591 extern void MPentry(void); 592 extern u_int MP_GDT; 593 extern u_int mp_gdtbase; 594 595 #if 0 596 597 static void 598 install_ap_tramp(u_int boot_addr) 599 { 600 int x; 601 int size = *(int *) ((u_long) & bootMP_size); 602 u_char *src = (u_char *) ((u_long) bootMP); 603 u_char *dst = (u_char *) boot_addr + KERNBASE; 604 u_int boot_base = (u_int) bootMP; 605 u_int8_t *dst8; 606 u_int16_t *dst16; 607 u_int32_t *dst32; 608 609 POSTCODE(INSTALL_AP_TRAMP_POST); 610 611 for (x = 0; x < size; ++x) 612 *dst++ = *src++; 613 614 /* 615 * modify addresses in code we just moved to basemem. unfortunately we 616 * need fairly detailed info about mpboot.s for this to work. changes 617 * to mpboot.s might require changes here. 618 */ 619 620 /* boot code is located in KERNEL space */ 621 dst = (u_char *) boot_addr + KERNBASE; 622 623 /* modify the lgdt arg */ 624 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 625 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 626 627 /* modify the ljmp target for MPentry() */ 628 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 629 *dst32 = ((u_int) MPentry - KERNBASE); 630 631 /* modify the target for boot code segment */ 632 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 633 dst8 = (u_int8_t *) (dst16 + 1); 634 *dst16 = (u_int) boot_addr & 0xffff; 635 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 636 637 /* modify the target for boot data segment */ 638 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 639 dst8 = (u_int8_t *) (dst16 + 1); 640 *dst16 = (u_int) boot_addr & 0xffff; 641 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 642 } 643 644 #endif 645 646 /* 647 * This function starts the AP (application processor) identified 648 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 649 * to accomplish this. This is necessary because of the nuances 650 * of the different hardware we might encounter. It ain't pretty, 651 * but it seems to work. 652 * 653 * NOTE: eventually an AP gets to ap_init(), which is called just 654 * before the AP goes into the LWKT scheduler's idle loop. 655 */ 656 static int 657 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 658 { 659 int physical_cpu; 660 int vector; 661 662 POSTCODE(START_AP_POST); 663 664 /* get the PHYSICAL APIC ID# */ 665 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 666 667 /* calculate the vector */ 668 vector = (boot_addr >> 12) & 0xff; 669 670 /* We don't want anything interfering */ 671 cpu_disable_intr(); 672 673 /* Make sure the target cpu sees everything */ 674 wbinvd(); 675 676 /* 677 * Try to detect when a SMI has occurred, wait up to 200ms. 678 * 679 * If a SMI occurs during an AP reset but before we issue 680 * the STARTUP command, the AP may brick. To work around 681 * this problem we hold off doing the AP startup until 682 * after we have detected the SMI. Hopefully another SMI 683 * will not occur before we finish the AP startup. 684 * 685 * Retries don't seem to help. SMIs have a window of opportunity 686 * and if USB->legacy keyboard emulation is enabled in the BIOS 687 * the interrupt rate can be quite high. 688 * 689 * NOTE: Don't worry about the L1 cache load, it might bloat 690 * ldelta a little but ndelta will be so huge when the SMI 691 * occurs the detection logic will still work fine. 692 */ 693 if (smibest) { 694 set_apic_timer(200000); 695 smitest(); 696 } 697 698 /* 699 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 700 * and running the target CPU. OR this INIT IPI might be latched (P5 701 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 702 * ignored. 703 * 704 * see apic/apicreg.h for icr bit definitions. 705 * 706 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 707 */ 708 709 /* 710 * Do an INIT IPI: assert RESET 711 * 712 * Use edge triggered mode to assert INIT 713 */ 714 lapic_seticr_sync(physical_cpu, 715 APIC_DESTMODE_PHY | 716 APIC_DEST_DESTFLD | 717 APIC_TRIGMOD_EDGE | 718 APIC_LEVEL_ASSERT | 719 APIC_DELMODE_INIT); 720 721 /* 722 * The spec calls for a 10ms delay but we may have to use a 723 * MUCH lower delay to avoid bricking an AP due to a fast SMI 724 * interrupt. We have other loops here too and dividing by 2 725 * doesn't seem to be enough even after subtracting 350us, 726 * so we divide by 4. 727 * 728 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 729 * interrupt was detected we use the full 10ms. 730 */ 731 if (smibest == 0) 732 u_sleep(10000); 733 else if (smibest < 150 * 4 + 350) 734 u_sleep(150); 735 else if ((smibest - 350) / 4 < 10000) 736 u_sleep((smibest - 350) / 4); 737 else 738 u_sleep(10000); 739 740 /* 741 * Do an INIT IPI: deassert RESET 742 * 743 * Use level triggered mode to deassert. It is unclear 744 * why we need to do this. 745 */ 746 lapic_seticr_sync(physical_cpu, 747 APIC_DESTMODE_PHY | 748 APIC_DEST_DESTFLD | 749 APIC_TRIGMOD_LEVEL | 750 APIC_LEVEL_DEASSERT | 751 APIC_DELMODE_INIT); 752 u_sleep(150); /* wait 150us */ 753 754 /* 755 * Next we do a STARTUP IPI: the previous INIT IPI might still be 756 * latched, (P5 bug) this 1st STARTUP would then terminate 757 * immediately, and the previously started INIT IPI would continue. OR 758 * the previous INIT IPI has already run. and this STARTUP IPI will 759 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 760 * will run. 761 * 762 * XXX set APIC_LEVEL_ASSERT 763 */ 764 lapic_seticr_sync(physical_cpu, 765 APIC_DESTMODE_PHY | 766 APIC_DEST_DESTFLD | 767 APIC_DELMODE_STARTUP | 768 vector); 769 u_sleep(200); /* wait ~200uS */ 770 771 /* 772 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 773 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 774 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 775 * recognized after hardware RESET or INIT IPI. 776 * 777 * XXX set APIC_LEVEL_ASSERT 778 */ 779 lapic_seticr_sync(physical_cpu, 780 APIC_DESTMODE_PHY | 781 APIC_DEST_DESTFLD | 782 APIC_DELMODE_STARTUP | 783 vector); 784 785 /* Resume normal operation */ 786 cpu_enable_intr(); 787 788 /* wait for it to start, see ap_init() */ 789 set_apic_timer(5000000);/* == 5 seconds */ 790 while (read_apic_timer()) { 791 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 792 return 1; /* return SUCCESS */ 793 } 794 795 return 0; /* return FAILURE */ 796 } 797 798 static 799 int 800 smitest(void) 801 { 802 int64_t ltsc; 803 int64_t ntsc; 804 int64_t ldelta; 805 int64_t ndelta; 806 int count; 807 808 ldelta = 0; 809 ndelta = 0; 810 while (read_apic_timer()) { 811 ltsc = rdtsc(); 812 for (count = 0; count < 100; ++count) 813 ntsc = rdtsc(); /* force loop to occur */ 814 if (ldelta) { 815 ndelta = ntsc - ltsc; 816 if (ldelta > ndelta) 817 ldelta = ndelta; 818 if (ndelta > ldelta * 2) 819 break; 820 } else { 821 ldelta = ntsc - ltsc; 822 } 823 } 824 return(read_apic_timer()); 825 } 826 827 /* 828 * Synchronously flush the TLB on all other CPU's. The current cpu's 829 * TLB is not flushed. If the caller wishes to flush the current cpu's 830 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 831 * 832 * This routine may be called concurrently from multiple cpus. When this 833 * happens, smp_invltlb() can wind up sticking around in the confirmation 834 * while() loop at the end as additional cpus are added to the global 835 * cpumask, until they are acknowledged by another IPI. 836 * 837 * NOTE: If for some reason we were unable to start all cpus we cannot 838 * safely use broadcast IPIs. 839 */ 840 841 cpumask_t smp_smurf_mask; 842 static cpumask_t smp_invltlb_mask; 843 #define LOOPRECOVER 844 #define LOOPMASK_IN 845 #ifdef LOOPMASK_IN 846 cpumask_t smp_in_mask; 847 #endif 848 cpumask_t smp_invmask; 849 extern cpumask_t smp_idleinvl_mask; 850 extern cpumask_t smp_idleinvl_reqs; 851 852 /* 853 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove 854 * bits that do not need to be IPId. These bits are still part of the command, 855 * but the target cpus have already been signalled and do not need to be 856 * sigalled again. 857 */ 858 #include <sys/spinlock.h> 859 #include <sys/spinlock2.h> 860 861 static __noinline 862 void 863 smp_smurf_fetchset(cpumask_t *mask) 864 { 865 cpumask_t omask; 866 int i; 867 __uint64_t obits; 868 __uint64_t nbits; 869 870 i = 0; 871 while (i < CPUMASK_ELEMENTS) { 872 obits = smp_smurf_mask.ary[i]; 873 cpu_ccfence(); 874 nbits = obits | mask->ary[i]; 875 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) { 876 omask.ary[i] = obits; 877 ++i; 878 } 879 } 880 CPUMASK_NANDMASK(*mask, omask); 881 } 882 883 /* 884 * This is a mechanism which guarantees that cpu_invltlb() will be executed 885 * on idle cpus without having to signal or wake them up. The invltlb will be 886 * executed when they wake up, prior to any scheduling or interrupt thread. 887 * 888 * (*mask) is modified to remove the cpus we successfully negotiate this 889 * function with. This function may only be used with semi-synchronous 890 * commands (typically invltlb's or semi-synchronous invalidations which 891 * are usually associated only with kernel memory). 892 */ 893 void 894 smp_smurf_idleinvlclr(cpumask_t *mask) 895 { 896 if (optimized_invltlb) { 897 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask); 898 /* cpu_lfence() not needed */ 899 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask); 900 } 901 } 902 903 /* 904 * Issue cpu_invltlb() across all cpus except the current cpu. 905 * 906 * This function will arrange to avoid idle cpus, but still gurantee that 907 * invltlb is run on them when they wake up prior to any scheduling or 908 * nominal interrupt. 909 */ 910 void 911 smp_invltlb(void) 912 { 913 struct mdglobaldata *md = mdcpu; 914 cpumask_t mask; 915 unsigned long rflags; 916 #ifdef LOOPRECOVER 917 tsc_uclock_t tsc_base = rdtsc(); 918 int repeats = 0; 919 #endif 920 921 if (report_invltlb_src > 0) { 922 if (--report_invltlb_src <= 0) 923 print_backtrace(8); 924 } 925 926 /* 927 * Disallow normal interrupts, set all active cpus except our own 928 * in the global smp_invltlb_mask. 929 */ 930 ++md->mi.gd_cnt.v_smpinvltlb; 931 crit_enter_gd(&md->mi); 932 933 /* 934 * Bits we want to set in smp_invltlb_mask. We do not want to signal 935 * our own cpu. Also try to remove bits associated with idle cpus 936 * that we can flag for auto-invltlb. 937 */ 938 mask = smp_active_mask; 939 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid); 940 smp_smurf_idleinvlclr(&mask); 941 942 rflags = read_rflags(); 943 cpu_disable_intr(); 944 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask); 945 946 /* 947 * IPI non-idle cpus represented by mask. The omask calculation 948 * removes cpus from the mask which already have a Xinvltlb IPI 949 * pending (avoid double-queueing the IPI). 950 * 951 * We must disable real interrupts when setting the smurf flags or 952 * we might race a XINVLTLB before we manage to send the ipi's for 953 * the bits we set. 954 * 955 * NOTE: We are not signalling ourselves, mask already does NOT 956 * include our own cpu. 957 */ 958 smp_smurf_fetchset(&mask); 959 960 /* 961 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 962 * the critical section count on the target cpus. 963 */ 964 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 965 if (all_but_self_ipi_enable && 966 (all_but_self_ipi_enable >= 2 || 967 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 968 all_but_self_ipi(XINVLTLB_OFFSET); 969 } else { 970 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 971 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 972 } 973 974 /* 975 * Wait for acknowledgement by all cpus. smp_inval_intr() will 976 * temporarily enable interrupts to avoid deadlocking the lapic, 977 * and will also handle running cpu_invltlb() and remote invlpg 978 * command son our cpu if some other cpu requests it of us. 979 * 980 * WARNING! I originally tried to implement this as a hard loop 981 * checking only smp_invltlb_mask (and issuing a local 982 * cpu_invltlb() if requested), with interrupts enabled 983 * and without calling smp_inval_intr(). This DID NOT WORK. 984 * It resulted in weird races where smurf bits would get 985 * cleared without any action being taken. 986 */ 987 smp_inval_intr(); 988 CPUMASK_ASSZERO(mask); 989 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) { 990 smp_inval_intr(); 991 cpu_pause(); 992 #ifdef LOOPRECOVER 993 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 994 /* 995 * cpuid - cpu doing the waiting 996 * invltlb_mask - IPI in progress 997 */ 998 kprintf("smp_invltlb %d: waited too long inv=%08jx " 999 "smurf=%08jx " 1000 #ifdef LOOPMASK_IN 1001 "in=%08jx " 1002 #endif 1003 "idle=%08jx/%08jx\n", 1004 md->mi.gd_cpuid, 1005 smp_invltlb_mask.ary[0], 1006 smp_smurf_mask.ary[0], 1007 #ifdef LOOPMASK_IN 1008 smp_in_mask.ary[0], 1009 #endif 1010 smp_idleinvl_mask.ary[0], 1011 smp_idleinvl_reqs.ary[0]); 1012 mdcpu->gd_xinvaltlb = 0; 1013 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, 1014 smp_invltlb_mask); 1015 smp_invlpg(&smp_active_mask); 1016 tsc_base = rdtsc(); 1017 if (++repeats > 10) { 1018 kprintf("smp_invltlb: giving up\n"); 1019 CPUMASK_ASSZERO(smp_invltlb_mask); 1020 } 1021 } 1022 #endif 1023 } 1024 write_rflags(rflags); 1025 crit_exit_gd(&md->mi); 1026 } 1027 1028 /* 1029 * Called from a critical section with interrupts hard-disabled. 1030 * This function issues an XINVLTLB IPI and then executes any pending 1031 * command on the current cpu before returning. 1032 */ 1033 void 1034 smp_invlpg(cpumask_t *cmdmask) 1035 { 1036 struct mdglobaldata *md = mdcpu; 1037 cpumask_t mask; 1038 1039 if (report_invlpg_src > 0) { 1040 if (--report_invlpg_src <= 0) 1041 print_backtrace(8); 1042 } 1043 1044 /* 1045 * Disallow normal interrupts, set all active cpus in the pmap, 1046 * plus our own for completion processing (it might or might not 1047 * be part of the set). 1048 */ 1049 mask = smp_active_mask; 1050 CPUMASK_ANDMASK(mask, *cmdmask); 1051 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 1052 1053 /* 1054 * Avoid double-queuing IPIs, which can deadlock us. We must disable 1055 * real interrupts when setting the smurf flags or we might race a 1056 * XINVLTLB before we manage to send the ipi's for the bits we set. 1057 * 1058 * NOTE: We might be including our own cpu in the smurf mask. 1059 */ 1060 smp_smurf_fetchset(&mask); 1061 1062 /* 1063 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 1064 * the critical section count on the target cpus. 1065 * 1066 * We do not include our own cpu when issuing the IPI. 1067 */ 1068 if (all_but_self_ipi_enable && 1069 (all_but_self_ipi_enable >= 2 || 1070 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 1071 all_but_self_ipi(XINVLTLB_OFFSET); 1072 } else { 1073 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 1074 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 1075 } 1076 1077 /* 1078 * This will synchronously wait for our command to complete, 1079 * as well as process commands from other cpus. It also handles 1080 * reentrancy. 1081 * 1082 * (interrupts are disabled and we are in a critical section here) 1083 */ 1084 smp_inval_intr(); 1085 } 1086 1087 void 1088 smp_sniff(void) 1089 { 1090 globaldata_t gd = mycpu; 1091 int dummy; 1092 register_t rflags; 1093 1094 /* 1095 * Ignore all_but_self_ipi_enable here and just use it. 1096 */ 1097 rflags = read_rflags(); 1098 cpu_disable_intr(); 1099 all_but_self_ipi(XSNIFF_OFFSET); 1100 gd->gd_sample_pc = smp_sniff; 1101 gd->gd_sample_sp = &dummy; 1102 write_rflags(rflags); 1103 } 1104 1105 void 1106 cpu_sniff(int dcpu) 1107 { 1108 globaldata_t rgd = globaldata_find(dcpu); 1109 register_t rflags; 1110 int dummy; 1111 1112 /* 1113 * Ignore all_but_self_ipi_enable here and just use it. 1114 */ 1115 rflags = read_rflags(); 1116 cpu_disable_intr(); 1117 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED); 1118 rgd->gd_sample_pc = cpu_sniff; 1119 rgd->gd_sample_sp = &dummy; 1120 write_rflags(rflags); 1121 } 1122 1123 /* 1124 * Called from Xinvltlb assembly with interrupts hard-disabled and in a 1125 * critical section. gd_intr_nesting_level may or may not be bumped 1126 * depending on entry. 1127 * 1128 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT. 1129 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE 1130 * IS IN A CRITICAL SECTION. 1131 */ 1132 void 1133 smp_inval_intr(void) 1134 { 1135 struct mdglobaldata *md = mdcpu; 1136 cpumask_t cpumask; 1137 #ifdef LOOPRECOVER 1138 tsc_uclock_t tsc_base = rdtsc(); 1139 #endif 1140 1141 #if 0 1142 /* 1143 * The idle code is in a critical section, but that doesn't stop 1144 * Xinvltlb from executing, so deal with the race which can occur 1145 * in that situation. Otherwise r-m-w operations by pmap_inval_intr() 1146 * may have problems. 1147 */ 1148 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) { 1149 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid); 1150 cpu_invltlb(); 1151 cpu_mfence(); 1152 } 1153 #endif 1154 1155 /* 1156 * This is a real mess. I'd like to just leave interrupts disabled 1157 * but it can cause the lapic to deadlock if too many interrupts queue 1158 * to it, due to the idiotic design of the lapic. So instead we have 1159 * to enter a critical section so normal interrupts are made pending 1160 * and track whether this one was reentered. 1161 */ 1162 if (md->gd_xinvaltlb) { /* reentrant on cpu */ 1163 md->gd_xinvaltlb = 2; 1164 return; 1165 } 1166 md->gd_xinvaltlb = 1; 1167 1168 /* 1169 * Check only those cpus with active Xinvl* commands pending. 1170 * 1171 * We are going to enable interrupts so make sure we are in a 1172 * critical section. This is necessary to avoid deadlocking 1173 * the lapic and to ensure that we execute our commands prior to 1174 * any nominal interrupt or preemption. 1175 * 1176 * WARNING! It is very important that we only clear out but in 1177 * smp_smurf_mask once for each interrupt we take. In 1178 * this case, we clear it on initial entry and only loop 1179 * on the reentrancy detect (caused by another interrupt). 1180 */ 1181 cpumask = smp_invmask; 1182 #ifdef LOOPMASK_IN 1183 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid); 1184 #endif 1185 loop: 1186 cpu_enable_intr(); 1187 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid); 1188 1189 /* 1190 * Specific page request(s), and we can't return until all bits 1191 * are zero. 1192 */ 1193 for (;;) { 1194 int toolong; 1195 1196 /* 1197 * Also execute any pending full invalidation request in 1198 * this loop. 1199 */ 1200 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1201 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1202 md->mi.gd_cpuid); 1203 cpu_invltlb(); 1204 cpu_mfence(); 1205 } 1206 1207 #ifdef LOOPRECOVER 1208 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1209 /* 1210 * cpuid - cpu doing the waiting 1211 * invmask - IPI in progress 1212 * invltlb_mask - which ones are TLB invalidations? 1213 */ 1214 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx " 1215 "smurf=%08jx " 1216 #ifdef LOOPMASK_IN 1217 "in=%08jx " 1218 #endif 1219 "idle=%08jx/%08jx\n", 1220 md->mi.gd_cpuid, 1221 smp_invmask.ary[0], 1222 smp_invltlb_mask.ary[0], 1223 smp_smurf_mask.ary[0], 1224 #ifdef LOOPMASK_IN 1225 smp_in_mask.ary[0], 1226 #endif 1227 smp_idleinvl_mask.ary[0], 1228 smp_idleinvl_reqs.ary[0]); 1229 tsc_base = rdtsc(); 1230 toolong = 1; 1231 } else { 1232 toolong = 0; 1233 } 1234 #else 1235 toolong = 0; 1236 #endif 1237 1238 /* 1239 * We can only add bits to the cpumask to test during the 1240 * loop because the smp_invmask bit is cleared once the 1241 * originator completes the command (the targets may still 1242 * be cycling their own completions in this loop, afterwords). 1243 * 1244 * lfence required prior to all tests as this Xinvltlb 1245 * interrupt could race the originator (already be in progress 1246 * wnen the originator decides to issue, due to an issue by 1247 * another cpu). 1248 */ 1249 cpu_lfence(); 1250 CPUMASK_ORMASK(cpumask, smp_invmask); 1251 /*cpumask = smp_active_mask;*/ /* XXX */ 1252 cpu_lfence(); 1253 1254 if (pmap_inval_intr(&cpumask, toolong) == 0) { 1255 /* 1256 * Clear our smurf mask to allow new IPIs, but deal 1257 * with potential races. 1258 */ 1259 break; 1260 } 1261 1262 /* 1263 * Test if someone sent us another invalidation IPI, break 1264 * out so we can take it to avoid deadlocking the lapic 1265 * interrupt queue (? stupid intel, amd). 1266 */ 1267 if (md->gd_xinvaltlb == 2) 1268 break; 1269 /* 1270 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid)) 1271 break; 1272 */ 1273 } 1274 1275 /* 1276 * Full invalidation request 1277 */ 1278 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1279 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1280 md->mi.gd_cpuid); 1281 cpu_invltlb(); 1282 cpu_mfence(); 1283 } 1284 1285 /* 1286 * Check to see if another Xinvltlb interrupt occurred and loop up 1287 * if it did. 1288 */ 1289 cpu_disable_intr(); 1290 if (md->gd_xinvaltlb == 2) { 1291 md->gd_xinvaltlb = 1; 1292 goto loop; 1293 } 1294 #ifdef LOOPMASK_IN 1295 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid); 1296 #endif 1297 md->gd_xinvaltlb = 0; 1298 } 1299 1300 void 1301 cpu_wbinvd_on_all_cpus_callback(void *arg) 1302 { 1303 wbinvd(); 1304 } 1305 1306 /* 1307 * When called the executing CPU will send an IPI to all other CPUs 1308 * requesting that they halt execution. 1309 * 1310 * Usually (but not necessarily) called with 'other_cpus' as its arg. 1311 * 1312 * - Signals all CPUs in map to stop. 1313 * - Waits for each to stop. 1314 * 1315 * Returns: 1316 * -1: error 1317 * 0: NA 1318 * 1: ok 1319 * 1320 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 1321 * from executing at same time. 1322 */ 1323 int 1324 stop_cpus(cpumask_t map) 1325 { 1326 cpumask_t mask; 1327 1328 CPUMASK_ANDMASK(map, smp_active_mask); 1329 1330 /* send the Xcpustop IPI to all CPUs in map */ 1331 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 1332 1333 do { 1334 mask = stopped_cpus; 1335 CPUMASK_ANDMASK(mask, map); 1336 /* spin */ 1337 } while (CPUMASK_CMPMASKNEQ(mask, map)); 1338 1339 return 1; 1340 } 1341 1342 1343 /* 1344 * Called by a CPU to restart stopped CPUs. 1345 * 1346 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 1347 * 1348 * - Signals all CPUs in map to restart. 1349 * - Waits for each to restart. 1350 * 1351 * Returns: 1352 * -1: error 1353 * 0: NA 1354 * 1: ok 1355 */ 1356 int 1357 restart_cpus(cpumask_t map) 1358 { 1359 cpumask_t mask; 1360 1361 /* signal other cpus to restart */ 1362 mask = map; 1363 CPUMASK_ANDMASK(mask, smp_active_mask); 1364 cpu_ccfence(); 1365 started_cpus = mask; 1366 cpu_ccfence(); 1367 1368 /* wait for each to clear its bit */ 1369 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 1370 cpu_pause(); 1371 1372 return 1; 1373 } 1374 1375 /* 1376 * This is called once the mpboot code has gotten us properly relocated 1377 * and the MMU turned on, etc. ap_init() is actually the idle thread, 1378 * and when it returns the scheduler will call the real cpu_idle() main 1379 * loop for the idlethread. Interrupts are disabled on entry and should 1380 * remain disabled at return. 1381 */ 1382 void 1383 ap_init(void) 1384 { 1385 int cpu_id; 1386 1387 /* 1388 * Adjust smp_startup_mask to signal the BSP that we have started 1389 * up successfully. Note that we do not yet hold the BGL. The BSP 1390 * is waiting for our signal. 1391 * 1392 * We can't set our bit in smp_active_mask yet because we are holding 1393 * interrupts physically disabled and remote cpus could deadlock 1394 * trying to send us an IPI. 1395 */ 1396 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1397 cpu_mfence(); 1398 1399 /* 1400 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1401 * non-zero, then get the MP lock. 1402 * 1403 * Note: We are in a critical section. 1404 * 1405 * Note: we are the idle thread, we can only spin. 1406 * 1407 * Note: The load fence is memory volatile and prevents the compiler 1408 * from improperly caching mp_finish_lapic, and the cpu from improperly 1409 * caching it. 1410 */ 1411 while (mp_finish_lapic == 0) { 1412 cpu_pause(); 1413 cpu_lfence(); 1414 } 1415 #if 0 1416 while (try_mplock() == 0) { 1417 cpu_pause(); 1418 cpu_lfence(); 1419 } 1420 #endif 1421 1422 if (cpu_feature & CPUID_TSC) { 1423 /* 1424 * The BSP is constantly updating tsc0_offset, figure out 1425 * the relative difference to synchronize ktrdump. 1426 */ 1427 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1428 } 1429 1430 /* BSP may have changed PTD while we're waiting for the lock */ 1431 cpu_invltlb(); 1432 1433 /* Build our map of 'other' CPUs. */ 1434 mycpu->gd_other_cpus = smp_startup_mask; 1435 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1436 1437 /* A quick check from sanity claus */ 1438 cpu_id = APICID_TO_CPUID(LAPIC_READID); 1439 if (mycpu->gd_cpuid != cpu_id) { 1440 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1441 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1442 cpu_id, LAPIC_READID); 1443 #if 0 /* JGXXX */ 1444 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1445 #endif 1446 panic("cpuid mismatch! boom!!"); 1447 } 1448 1449 /* Initialize AP's local APIC for irq's */ 1450 lapic_init(FALSE); 1451 1452 /* LAPIC initialization is done */ 1453 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1454 cpu_mfence(); 1455 1456 #if 0 1457 /* Let BSP move onto the next initialization stage */ 1458 rel_mplock(); 1459 #endif 1460 1461 /* 1462 * Interlock for finalization. Wait until mp_finish is non-zero, 1463 * then get the MP lock. 1464 * 1465 * Note: We are in a critical section. 1466 * 1467 * Note: we are the idle thread, we can only spin. 1468 * 1469 * Note: The load fence is memory volatile and prevents the compiler 1470 * from improperly caching mp_finish, and the cpu from improperly 1471 * caching it. 1472 */ 1473 while (mp_finish == 0) { 1474 cpu_pause(); 1475 cpu_lfence(); 1476 } 1477 1478 /* BSP may have changed PTD while we're waiting for the lock */ 1479 cpu_invltlb(); 1480 1481 /* Set memory range attributes for this CPU to match the BSP */ 1482 mem_range_AP_init(); 1483 1484 /* 1485 * Once we go active we must process any IPIQ messages that may 1486 * have been queued, because no actual IPI will occur until we 1487 * set our bit in the smp_active_mask. If we don't the IPI 1488 * message interlock could be left set which would also prevent 1489 * further IPIs. 1490 * 1491 * The idle loop doesn't expect the BGL to be held and while 1492 * lwkt_switch() normally cleans things up this is a special case 1493 * because we returning almost directly into the idle loop. 1494 * 1495 * The idle thread is never placed on the runq, make sure 1496 * nothing we've done put it there. 1497 */ 1498 1499 /* 1500 * Hold a critical section and allow real interrupts to occur. Zero 1501 * any spurious interrupts which have accumulated, then set our 1502 * smp_active_mask indicating that we are fully operational. 1503 */ 1504 crit_enter(); 1505 __asm __volatile("sti; pause; pause"::); 1506 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1507 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1508 1509 /* 1510 * Wait until all cpus have set their smp_active_mask and have fully 1511 * operational interrupts before proceeding. 1512 * 1513 * We need a final cpu_invltlb() because we would not have received 1514 * any until we set our bit in smp_active_mask. 1515 */ 1516 while (mp_finish == 1) { 1517 cpu_pause(); 1518 cpu_lfence(); 1519 } 1520 cpu_invltlb(); 1521 1522 /* 1523 * Initialize per-cpu clocks and do other per-cpu initialization. 1524 * At this point code is expected to be able to use the full kernel 1525 * API. 1526 */ 1527 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1528 1529 /* 1530 * Since we may have cleaned up the interrupt triggers, manually 1531 * process any pending IPIs before exiting our critical section. 1532 * Once the critical section has exited, normal interrupt processing 1533 * may occur. 1534 */ 1535 atomic_swap_int(&mycpu->gd_npoll, 0); 1536 lwkt_process_ipiq(); 1537 crit_exit(); 1538 1539 /* 1540 * Final final, allow the waiting BSP to resume the boot process, 1541 * return 'into' the idle thread bootstrap. 1542 */ 1543 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1544 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1545 } 1546 1547 /* 1548 * Get SMP fully working before we start initializing devices. 1549 */ 1550 static 1551 void 1552 ap_finish(void) 1553 { 1554 if (bootverbose) 1555 kprintf("Finish MP startup\n"); 1556 rel_mplock(); 1557 1558 /* 1559 * Wait for the active mask to complete, after which all cpus will 1560 * be accepting interrupts. 1561 */ 1562 mp_finish = 1; 1563 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1564 cpu_pause(); 1565 cpu_lfence(); 1566 } 1567 1568 /* 1569 * Wait for the finalization mask to complete, after which all cpus 1570 * have completely finished initializing and are entering or are in 1571 * their idle thread. 1572 * 1573 * BSP should have received all required invltlbs but do another 1574 * one just in case. 1575 */ 1576 cpu_invltlb(); 1577 mp_finish = 2; 1578 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1579 cpu_pause(); 1580 cpu_lfence(); 1581 } 1582 1583 while (try_mplock() == 0) { 1584 cpu_pause(); 1585 cpu_lfence(); 1586 } 1587 1588 if (bootverbose) { 1589 kprintf("Active CPU Mask: %016jx\n", 1590 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1591 } 1592 } 1593 1594 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1595 1596 /* 1597 * Interrupts must be hard-disabled by caller 1598 */ 1599 void 1600 cpu_send_ipiq(int dcpu) 1601 { 1602 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1603 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1604 } 1605 1606 #if 0 /* single_apic_ipi_passive() not working yet */ 1607 /* 1608 * Returns 0 on failure, 1 on success 1609 */ 1610 int 1611 cpu_send_ipiq_passive(int dcpu) 1612 { 1613 int r = 0; 1614 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1615 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1616 APIC_DELMODE_FIXED); 1617 } 1618 return(r); 1619 } 1620 #endif 1621 1622 static void 1623 mp_bsp_simple_setup(void) 1624 { 1625 struct mdglobaldata *gd; 1626 size_t ipiq_size; 1627 1628 /* build our map of 'other' CPUs */ 1629 mycpu->gd_other_cpus = smp_startup_mask; 1630 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1631 1632 gd = (struct mdglobaldata *)mycpu; 1633 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1634 1635 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1636 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 1637 VM_SUBSYS_IPIQ); 1638 bzero(mycpu->gd_ipiq, ipiq_size); 1639 1640 /* initialize arc4random. */ 1641 arc4_init_pcpu(0); 1642 1643 pmap_set_opt(); 1644 1645 if (cpu_feature & CPUID_TSC) 1646 tsc0_offset = rdtsc(); 1647 } 1648 1649 1650 /* 1651 * CPU TOPOLOGY DETECTION FUNCTIONS 1652 */ 1653 1654 /* Detect intel topology using CPUID 1655 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1656 */ 1657 static void 1658 detect_intel_topology(int count_htt_cores) 1659 { 1660 int shift = 0; 1661 int ecx_index = 0; 1662 int core_plus_logical_bits = 0; 1663 int cores_per_package; 1664 int logical_per_package; 1665 int logical_per_core; 1666 unsigned int p[4]; 1667 1668 if (cpu_high >= 0xb) { 1669 goto FUNC_B; 1670 1671 } else if (cpu_high >= 0x4) { 1672 goto FUNC_4; 1673 1674 } else { 1675 core_bits = 0; 1676 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1677 ; 1678 logical_CPU_bits = 1 << shift; 1679 return; 1680 } 1681 1682 FUNC_B: 1683 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1684 1685 /* if 0xb not supported - fallback to 0x4 */ 1686 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1687 goto FUNC_4; 1688 } 1689 1690 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1691 1692 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1693 do { 1694 cpuid_count(0xb, ecx_index, p); 1695 1696 /* Check for the Core type in the implemented sub leaves. */ 1697 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1698 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1699 break; 1700 } 1701 1702 ecx_index++; 1703 1704 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1705 1706 core_bits = core_plus_logical_bits - logical_CPU_bits; 1707 1708 return; 1709 1710 FUNC_4: 1711 cpuid_count(0x4, 0, p); 1712 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1713 1714 logical_per_package = count_htt_cores; 1715 logical_per_core = logical_per_package / cores_per_package; 1716 1717 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1718 ; 1719 logical_CPU_bits = shift; 1720 1721 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1722 ; 1723 core_bits = shift; 1724 1725 return; 1726 } 1727 1728 /* Detect AMD topology using CPUID 1729 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1730 */ 1731 static void 1732 detect_amd_topology(int count_htt_cores) 1733 { 1734 int shift = 0; 1735 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) { 1736 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1737 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 1738 AMDID_COREID_SIZE_SHIFT; 1739 } else { 1740 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1741 for (shift = 0; (1 << shift) < core_bits; ++shift) 1742 ; 1743 core_bits = shift; 1744 } 1745 logical_CPU_bits = count_htt_cores >> core_bits; 1746 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1747 ; 1748 logical_CPU_bits = shift; 1749 1750 kprintf("core_bits %d logical_CPU_bits %d\n", 1751 core_bits - logical_CPU_bits, logical_CPU_bits); 1752 1753 if (amd_feature2 & AMDID2_TOPOEXT) { 1754 u_int p[4]; /* eax,ebx,ecx,edx */ 1755 int nodes; 1756 1757 cpuid_count(0x8000001e, 0, p); 1758 1759 switch(((p[1] >> 8) & 3) + 1) { 1760 case 1: 1761 logical_CPU_bits = 0; 1762 break; 1763 case 2: 1764 logical_CPU_bits = 1; 1765 break; 1766 case 3: 1767 case 4: 1768 logical_CPU_bits = 2; 1769 break; 1770 } 1771 1772 /* 1773 * Nodes are kind of a stand-in for packages*sockets, 1774 * but can be thought of in terms of Numa domains. 1775 */ 1776 nodes = ((p[2] >> 8) & 7) + 1; 1777 switch(nodes) { 1778 case 8: 1779 case 7: 1780 case 6: 1781 case 5: 1782 --core_bits; 1783 /* fallthrough */ 1784 case 4: 1785 case 3: 1786 --core_bits; 1787 /* fallthrough */ 1788 case 2: 1789 --core_bits; 1790 /* fallthrough */ 1791 case 1: 1792 break; 1793 } 1794 core_bits -= logical_CPU_bits; 1795 kprintf("%d-way htt, %d Nodes, %d cores/node\n", 1796 (int)(((p[1] >> 8) & 3) + 1), 1797 nodes, 1798 1 << core_bits); 1799 1800 } 1801 #if 0 1802 if (amd_feature2 & AMDID2_TOPOEXT) { 1803 u_int p[4]; 1804 int i; 1805 int type; 1806 int level; 1807 int share_count; 1808 1809 logical_CPU_bits = 0; 1810 core_bits = 0; 1811 1812 for (i = 0; i < 256; ++i) { 1813 cpuid_count(0x8000001d, i, p); 1814 type = p[0] & 0x1f; 1815 level = (p[0] >> 5) & 0x7; 1816 share_count = 1 + ((p[0] >> 14) & 0xfff); 1817 1818 if (type == 0) 1819 break; 1820 kprintf("Topology probe i=%2d type=%d " 1821 "level=%d share_count=%d\n", 1822 i, type, level, share_count); 1823 shift = 0; 1824 while ((1 << shift) < share_count) 1825 ++shift; 1826 1827 switch(type) { 1828 case 1: 1829 /* 1830 * CPUID_TYPE_SMT 1831 * 1832 * Logical CPU (SMT) 1833 */ 1834 logical_CPU_bits = shift; 1835 break; 1836 case 2: 1837 /* 1838 * CPUID_TYPE_CORE 1839 * 1840 * Physical subdivision of a package 1841 */ 1842 core_bits = logical_CPU_bits + 1843 shift; 1844 break; 1845 case 3: 1846 /* 1847 * CPUID_TYPE_CACHE 1848 * 1849 * CPU L1/L2/L3 cache 1850 */ 1851 break; 1852 case 4: 1853 /* 1854 * CPUID_TYPE_PKG 1855 * 1856 * Package aka chip, equivalent to 1857 * socket 1858 */ 1859 break; 1860 } 1861 } 1862 } 1863 #endif 1864 } else { 1865 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1866 ; 1867 core_bits = shift; 1868 logical_CPU_bits = 0; 1869 } 1870 } 1871 1872 static void 1873 amd_get_compute_unit_id(void *arg) 1874 { 1875 u_int regs[4]; 1876 1877 do_cpuid(0x8000001e, regs); 1878 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1879 1880 /* 1881 * AMD - CPUID Specification September 2010 1882 * page 34 - //ComputeUnitID = ebx[0:7]// 1883 */ 1884 mynode->compute_unit_id = regs[1] & 0xff; 1885 } 1886 1887 int 1888 fix_amd_topology(void) 1889 { 1890 cpumask_t mask; 1891 1892 if (cpu_vendor_id != CPU_VENDOR_AMD) 1893 return -1; 1894 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1895 return -1; 1896 1897 CPUMASK_ASSALLONES(mask); 1898 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1899 1900 kprintf("Compute unit iDS:\n"); 1901 int i; 1902 for (i = 0; i < ncpus; i++) { 1903 kprintf("%d-%d; \n", 1904 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1905 } 1906 return 0; 1907 } 1908 1909 /* 1910 * Calculate 1911 * - logical_CPU_bits 1912 * - core_bits 1913 * With the values above (for AMD or INTEL) we are able to generally 1914 * detect the CPU topology (number of cores for each level): 1915 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1916 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1917 */ 1918 void 1919 detect_cpu_topology(void) 1920 { 1921 static int topology_detected = 0; 1922 int count = 0; 1923 1924 if (topology_detected) 1925 goto OUT; 1926 if ((cpu_feature & CPUID_HTT) == 0) { 1927 core_bits = 0; 1928 logical_CPU_bits = 0; 1929 goto OUT; 1930 } 1931 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT; 1932 1933 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1934 detect_intel_topology(count); 1935 else if (cpu_vendor_id == CPU_VENDOR_AMD) 1936 detect_amd_topology(count); 1937 topology_detected = 1; 1938 1939 OUT: 1940 if (bootverbose) { 1941 kprintf("Bits within APICID: logical_CPU_bits: %d; " 1942 "core_bits: %d\n", 1943 logical_CPU_bits, core_bits); 1944 } 1945 } 1946 1947 /* 1948 * Interface functions to calculate chip_ID, 1949 * core_number and logical_number 1950 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1951 */ 1952 int 1953 get_chip_ID(int cpuid) 1954 { 1955 return get_apicid_from_cpuid(cpuid) >> 1956 (logical_CPU_bits + core_bits); 1957 } 1958 1959 int 1960 get_chip_ID_from_APICID(int apicid) 1961 { 1962 return apicid >> (logical_CPU_bits + core_bits); 1963 } 1964 1965 int 1966 get_core_number_within_chip(int cpuid) 1967 { 1968 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1969 ((1 << core_bits) - 1)); 1970 } 1971 1972 int 1973 get_logical_CPU_number_within_core(int cpuid) 1974 { 1975 return (get_apicid_from_cpuid(cpuid) & 1976 ((1 << logical_CPU_bits) - 1)); 1977 } 1978