1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 50 #include <machine/smp.h> 51 #include <machine_base/apic/apicreg.h> 52 #include <machine/atomic.h> 53 #include <machine/cpufunc.h> 54 #include <machine/cputypes.h> 55 #include <machine_base/apic/lapic.h> 56 #include <machine_base/apic/ioapic.h> 57 #include <machine_base/acpica/acpi_md_cpu.h> 58 #include <machine/psl.h> 59 #include <machine/segments.h> 60 #include <machine/tss.h> 61 #include <machine/specialreg.h> 62 #include <machine/globaldata.h> 63 #include <machine/pmap_inval.h> 64 #include <machine/clock.h> 65 66 #include <machine/md_var.h> /* setidt() */ 67 #include <machine_base/icu/icu.h> /* IPIs */ 68 #include <machine_base/icu/icu_var.h> 69 #include <machine_base/apic/ioapic_abi.h> 70 #include <machine/intr_machdep.h> /* IPIs */ 71 72 #define WARMBOOT_TARGET 0 73 #define WARMBOOT_OFF (KERNBASE + 0x0467) 74 #define WARMBOOT_SEG (KERNBASE + 0x0469) 75 76 #define CMOS_REG (0x70) 77 #define CMOS_DATA (0x71) 78 #define BIOS_RESET (0x0f) 79 #define BIOS_WARM (0x0a) 80 81 /* 82 * this code MUST be enabled here and in mpboot.s. 83 * it follows the very early stages of AP boot by placing values in CMOS ram. 84 * it NORMALLY will never be needed and thus the primitive method for enabling. 85 * 86 */ 87 #if defined(CHECK_POINTS) 88 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 89 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 90 91 #define CHECK_INIT(D); \ 92 CHECK_WRITE(0x34, (D)); \ 93 CHECK_WRITE(0x35, (D)); \ 94 CHECK_WRITE(0x36, (D)); \ 95 CHECK_WRITE(0x37, (D)); \ 96 CHECK_WRITE(0x38, (D)); \ 97 CHECK_WRITE(0x39, (D)); 98 99 #define CHECK_PRINT(S); \ 100 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 101 (S), \ 102 CHECK_READ(0x34), \ 103 CHECK_READ(0x35), \ 104 CHECK_READ(0x36), \ 105 CHECK_READ(0x37), \ 106 CHECK_READ(0x38), \ 107 CHECK_READ(0x39)); 108 109 #else /* CHECK_POINTS */ 110 111 #define CHECK_INIT(D) 112 #define CHECK_PRINT(S) 113 114 #endif /* CHECK_POINTS */ 115 116 /* 117 * Values to send to the POST hardware. 118 */ 119 #define MP_BOOTADDRESS_POST 0x10 120 #define MP_PROBE_POST 0x11 121 #define MPTABLE_PASS1_POST 0x12 122 123 #define MP_START_POST 0x13 124 #define MP_ENABLE_POST 0x14 125 #define MPTABLE_PASS2_POST 0x15 126 127 #define START_ALL_APS_POST 0x16 128 #define INSTALL_AP_TRAMP_POST 0x17 129 #define START_AP_POST 0x18 130 131 #define MP_ANNOUNCE_POST 0x19 132 133 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 134 int current_postcode; 135 136 /** XXX FIXME: what system files declare these??? */ 137 138 extern int naps; 139 extern int _udatasel; 140 141 int64_t tsc0_offset; 142 extern int64_t tsc_offsets[]; 143 144 /* AP uses this during bootstrap. Do not staticize. */ 145 char *bootSTK; 146 static int bootAP; 147 148 struct pcb stoppcbs[MAXCPU]; 149 150 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 151 152 /* 153 * Local data and functions. 154 */ 155 156 static u_int boot_address; 157 static int mp_finish; 158 static int mp_finish_lapic; 159 160 static int start_all_aps(u_int boot_addr); 161 #if 0 162 static void install_ap_tramp(u_int boot_addr); 163 #endif 164 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 165 static int smitest(void); 166 static void mp_bsp_simple_setup(void); 167 168 /* which cpus have been started */ 169 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 170 /* which cpus have lapic been inited */ 171 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 172 /* which cpus are ready for IPIs etc? */ 173 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 174 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 175 176 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, 177 &smp_active_mask, sizeof(smp_active_mask), "LU", ""); 178 static u_int bootMP_size; 179 static u_int report_invlpg_src; 180 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW, 181 &report_invlpg_src, 0, ""); 182 static u_int report_invltlb_src; 183 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW, 184 &report_invltlb_src, 0, ""); 185 static int optimized_invltlb; 186 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW, 187 &optimized_invltlb, 0, ""); 188 static int all_but_self_ipi_enable = 1; 189 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW, 190 &all_but_self_ipi_enable, 0, ""); 191 192 /* Local data for detecting CPU TOPOLOGY */ 193 static int core_bits = 0; 194 static int logical_CPU_bits = 0; 195 196 197 /* 198 * Calculate usable address in base memory for AP trampoline code. 199 */ 200 u_int 201 mp_bootaddress(u_int basemem) 202 { 203 POSTCODE(MP_BOOTADDRESS_POST); 204 205 bootMP_size = mptramp_end - mptramp_start; 206 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 207 if (((basemem * 1024) - boot_address) < bootMP_size) 208 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 209 /* 3 levels of page table pages */ 210 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 211 212 return mptramp_pagetables; 213 } 214 215 /* 216 * Print various information about the SMP system hardware and setup. 217 */ 218 void 219 mp_announce(void) 220 { 221 int x; 222 223 POSTCODE(MP_ANNOUNCE_POST); 224 225 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 226 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 227 for (x = 1; x <= naps; ++x) 228 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 229 230 if (!ioapic_enable) 231 kprintf(" Warning: APIC I/O disabled\n"); 232 } 233 234 /* 235 * AP cpu's call this to sync up protected mode. 236 * 237 * WARNING! %gs is not set up on entry. This routine sets up %gs. 238 */ 239 void 240 init_secondary(void) 241 { 242 int gsel_tss; 243 int x, myid = bootAP; 244 u_int64_t msr, cr0; 245 struct mdglobaldata *md; 246 struct privatespace *ps; 247 248 ps = CPU_prvspace[myid]; 249 250 gdt_segs[GPROC0_SEL].ssd_base = (long)&ps->common_tss; 251 ps->mdglobaldata.mi.gd_prvspace = ps; 252 253 /* We fill the 32-bit segment descriptors */ 254 for (x = 0; x < NGDT; x++) { 255 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 256 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 257 } 258 /* And now a 64-bit one */ 259 ssdtosyssd(&gdt_segs[GPROC0_SEL], 260 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 261 262 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 263 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 264 lgdt(&r_gdt); /* does magic intra-segment return */ 265 266 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 267 wrmsr(MSR_FSBASE, 0); /* User value */ 268 wrmsr(MSR_GSBASE, (u_int64_t)ps); 269 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 270 271 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 272 273 load_ds(_udatasel); 274 load_es(_udatasel); 275 load_fs(_udatasel); 276 277 #if 0 278 lldt(_default_ldt); 279 mdcpu->gd_currentldt = _default_ldt; 280 #endif 281 282 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 283 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 284 285 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 286 287 /* 288 * TSS entry point for interrupts, traps, and exceptions 289 * (sans NMI). This will always go to near the top of the pcpu 290 * trampoline area. Hardware-pushed data will be copied into 291 * the trap-frame on entry, and (if necessary) returned to the 292 * trampoline on exit. 293 * 294 * We store some pcb data for the trampoline code above the 295 * stack the cpu hw pushes into, and arrange things so the 296 * address of tr_pcb_rsp is the same as the desired top of 297 * stack. 298 */ 299 ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp; 300 ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0; 301 ps->trampoline.tr_pcb_gs_kernel = (register_t)md; 302 ps->trampoline.tr_pcb_cr3 = KPML4phys; /* adj to user cr3 live */ 303 ps->dbltramp.tr_pcb_gs_kernel = (register_t)md; 304 ps->dbltramp.tr_pcb_cr3 = KPML4phys; 305 ps->dbgtramp.tr_pcb_gs_kernel = (register_t)md; 306 ps->dbgtramp.tr_pcb_cr3 = KPML4phys; 307 308 #if 0 /* JG XXX */ 309 ps->common_tss.tss_ioopt = (sizeof ps->common_tss) << 16; 310 #endif 311 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 312 md->gd_common_tssd = *md->gd_tss_gdt; 313 314 /* double fault stack */ 315 ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp; 316 ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp; 317 318 ltr(gsel_tss); 319 320 /* 321 * Set to a known state: 322 * Set by mpboot.s: CR0_PG, CR0_PE 323 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 324 */ 325 cr0 = rcr0(); 326 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 327 load_cr0(cr0); 328 329 /* Set up the fast syscall stuff */ 330 msr = rdmsr(MSR_EFER) | EFER_SCE; 331 wrmsr(MSR_EFER, msr); 332 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 333 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 334 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 335 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 336 wrmsr(MSR_STAR, msr); 337 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL|PSL_AC); 338 339 pmap_set_opt(); /* PSE/4MB pages, etc */ 340 pmap_init_pat(); /* Page Attribute Table */ 341 342 /* set up CPU registers and state */ 343 cpu_setregs(); 344 345 /* set up SSE/NX registers */ 346 initializecpu(myid); 347 348 /* set up FPU state on the AP */ 349 npxinit(); 350 351 /* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */ 352 if (x2apic_enable) 353 lapic_x2apic_enter(FALSE); 354 355 /* disable the APIC, just to be SURE */ 356 LAPIC_WRITE(svr, (LAPIC_READ(svr) & ~APIC_SVR_ENABLE)); 357 } 358 359 /******************************************************************* 360 * local functions and data 361 */ 362 363 /* 364 * Start the SMP system 365 */ 366 static void 367 mp_start_aps(void *dummy __unused) 368 { 369 if (lapic_enable) { 370 /* start each Application Processor */ 371 start_all_aps(boot_address); 372 } else { 373 mp_bsp_simple_setup(); 374 } 375 } 376 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 377 378 /* 379 * start each AP in our list 380 */ 381 static int 382 start_all_aps(u_int boot_addr) 383 { 384 vm_offset_t va = boot_address + KERNBASE; 385 u_int64_t *pt4, *pt3, *pt2; 386 int pssize; 387 int x, i; 388 int shift; 389 int smicount; 390 int smibest; 391 int smilast; 392 u_char mpbiosreason; 393 u_long mpbioswarmvec; 394 struct mdglobaldata *gd; 395 struct privatespace *ps; 396 size_t ipiq_size; 397 398 POSTCODE(START_ALL_APS_POST); 399 400 /* install the AP 1st level boot code */ 401 pmap_kenter(va, boot_address); 402 cpu_invlpg((void *)va); /* JG XXX */ 403 bcopy(mptramp_start, (void *)va, bootMP_size); 404 405 /* Locate the page tables, they'll be below the trampoline */ 406 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 407 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 408 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 409 410 /* Create the initial 1GB replicated page tables */ 411 for (i = 0; i < 512; i++) { 412 /* Each slot of the level 4 pages points to the same level 3 page */ 413 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 414 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 415 kernel_pmap.pmap_bits[PG_RW_IDX] | 416 kernel_pmap.pmap_bits[PG_U_IDX]; 417 418 /* Each slot of the level 3 pages points to the same level 2 page */ 419 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 420 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 421 kernel_pmap.pmap_bits[PG_RW_IDX] | 422 kernel_pmap.pmap_bits[PG_U_IDX]; 423 424 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 425 pt2[i] = i * (2 * 1024 * 1024); 426 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 427 kernel_pmap.pmap_bits[PG_RW_IDX] | 428 kernel_pmap.pmap_bits[PG_PS_IDX] | 429 kernel_pmap.pmap_bits[PG_U_IDX]; 430 } 431 432 /* save the current value of the warm-start vector */ 433 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 434 outb(CMOS_REG, BIOS_RESET); 435 mpbiosreason = inb(CMOS_DATA); 436 437 /* setup a vector to our boot code */ 438 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 439 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 440 outb(CMOS_REG, BIOS_RESET); 441 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 442 443 /* 444 * If we have a TSC we can figure out the SMI interrupt rate. 445 * The SMI does not necessarily use a constant rate. Spend 446 * up to 250ms trying to figure it out. 447 */ 448 smibest = 0; 449 if (cpu_feature & CPUID_TSC) { 450 set_apic_timer(275000); 451 smilast = read_apic_timer(); 452 for (x = 0; x < 20 && read_apic_timer(); ++x) { 453 smicount = smitest(); 454 if (smibest == 0 || smilast - smicount < smibest) 455 smibest = smilast - smicount; 456 smilast = smicount; 457 } 458 if (smibest > 250000) 459 smibest = 0; 460 } 461 if (smibest) 462 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 463 1000000 / smibest, smibest); 464 465 /* start each AP */ 466 for (x = 1; x <= naps; ++x) { 467 /* This is a bit verbose, it will go away soon. */ 468 469 pssize = sizeof(struct privatespace); 470 ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, 471 KM_CPU(x)); 472 CPU_prvspace[x] = ps; 473 #if 0 474 kprintf("ps %d %p %d\n", x, ps, pssize); 475 #endif 476 bzero(ps, pssize); 477 gd = &ps->mdglobaldata; 478 gd->mi.gd_prvspace = ps; 479 480 /* prime data page for it to use */ 481 mi_gdinit(&gd->mi, x); 482 cpu_gdinit(gd, x); 483 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 484 gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 485 VM_SUBSYS_IPIQ, KM_CPU(x)); 486 bzero(gd->mi.gd_ipiq, ipiq_size); 487 488 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 489 490 /* initialize arc4random. */ 491 arc4_init_pcpu(x); 492 493 /* setup a vector to our boot code */ 494 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 495 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 496 outb(CMOS_REG, BIOS_RESET); 497 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 498 499 /* 500 * Setup the AP boot stack 501 */ 502 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 503 bootAP = x; 504 505 /* attempt to start the Application Processor */ 506 CHECK_INIT(99); /* setup checkpoints */ 507 if (!start_ap(gd, boot_addr, smibest)) { 508 kprintf("\nAP #%d (PHY# %d) failed!\n", 509 x, CPUID_TO_APICID(x)); 510 CHECK_PRINT("trace"); /* show checkpoints */ 511 /* better panic as the AP may be running loose */ 512 kprintf("panic y/n? [y] "); 513 cnpoll(TRUE); 514 if (cngetc() != 'n') 515 panic("bye-bye"); 516 cnpoll(FALSE); 517 } 518 CHECK_PRINT("trace"); /* show checkpoints */ 519 } 520 521 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 522 ncpus = x; 523 524 for (shift = 0; (1 << shift) <= ncpus; ++shift) 525 ; 526 --shift; 527 528 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 529 if ((1 << shift) < ncpus) 530 ++shift; 531 ncpus_fit = 1 << shift; 532 ncpus_fit_mask = ncpus_fit - 1; 533 534 /* build our map of 'other' CPUs */ 535 mycpu->gd_other_cpus = smp_startup_mask; 536 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 537 538 malloc_reinit_ncpus(); 539 540 gd = (struct mdglobaldata *)mycpu; 541 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 542 543 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 544 mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, 545 VM_SUBSYS_IPIQ, KM_CPU(0)); 546 bzero(mycpu->gd_ipiq, ipiq_size); 547 548 /* initialize arc4random. */ 549 arc4_init_pcpu(0); 550 551 /* restore the warmstart vector */ 552 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 553 outb(CMOS_REG, BIOS_RESET); 554 outb(CMOS_DATA, mpbiosreason); 555 556 /* 557 * NOTE! The idlestack for the BSP was setup by locore. Finish 558 * up, clean out the P==V mapping we did earlier. 559 */ 560 pmap_set_opt(); 561 562 /* 563 * Wait all APs to finish initializing LAPIC 564 */ 565 if (bootverbose) 566 kprintf("SMP: Waiting APs LAPIC initialization\n"); 567 if (cpu_feature & CPUID_TSC) 568 tsc0_offset = rdtsc(); 569 tsc_offsets[0] = 0; 570 mp_finish_lapic = 1; 571 rel_mplock(); 572 573 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 574 cpu_pause(); 575 cpu_lfence(); 576 if (cpu_feature & CPUID_TSC) 577 tsc0_offset = rdtsc(); 578 } 579 while (try_mplock() == 0) { 580 cpu_pause(); 581 cpu_lfence(); 582 } 583 584 /* number of APs actually started */ 585 return ncpus - 1; 586 } 587 588 589 /* 590 * load the 1st level AP boot code into base memory. 591 */ 592 593 /* targets for relocation */ 594 extern void bigJump(void); 595 extern void bootCodeSeg(void); 596 extern void bootDataSeg(void); 597 extern void MPentry(void); 598 extern u_int MP_GDT; 599 extern u_int mp_gdtbase; 600 601 #if 0 602 603 static void 604 install_ap_tramp(u_int boot_addr) 605 { 606 int x; 607 int size = *(int *) ((u_long) & bootMP_size); 608 u_char *src = (u_char *) ((u_long) bootMP); 609 u_char *dst = (u_char *) boot_addr + KERNBASE; 610 u_int boot_base = (u_int) bootMP; 611 u_int8_t *dst8; 612 u_int16_t *dst16; 613 u_int32_t *dst32; 614 615 POSTCODE(INSTALL_AP_TRAMP_POST); 616 617 for (x = 0; x < size; ++x) 618 *dst++ = *src++; 619 620 /* 621 * modify addresses in code we just moved to basemem. unfortunately we 622 * need fairly detailed info about mpboot.s for this to work. changes 623 * to mpboot.s might require changes here. 624 */ 625 626 /* boot code is located in KERNEL space */ 627 dst = (u_char *) boot_addr + KERNBASE; 628 629 /* modify the lgdt arg */ 630 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 631 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 632 633 /* modify the ljmp target for MPentry() */ 634 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 635 *dst32 = ((u_int) MPentry - KERNBASE); 636 637 /* modify the target for boot code segment */ 638 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 639 dst8 = (u_int8_t *) (dst16 + 1); 640 *dst16 = (u_int) boot_addr & 0xffff; 641 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 642 643 /* modify the target for boot data segment */ 644 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 645 dst8 = (u_int8_t *) (dst16 + 1); 646 *dst16 = (u_int) boot_addr & 0xffff; 647 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 648 } 649 650 #endif 651 652 /* 653 * This function starts the AP (application processor) identified 654 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 655 * to accomplish this. This is necessary because of the nuances 656 * of the different hardware we might encounter. It ain't pretty, 657 * but it seems to work. 658 * 659 * NOTE: eventually an AP gets to ap_init(), which is called just 660 * before the AP goes into the LWKT scheduler's idle loop. 661 */ 662 static int 663 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 664 { 665 int physical_cpu; 666 int vector; 667 668 POSTCODE(START_AP_POST); 669 670 /* get the PHYSICAL APIC ID# */ 671 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 672 673 /* calculate the vector */ 674 vector = (boot_addr >> 12) & 0xff; 675 676 /* We don't want anything interfering */ 677 cpu_disable_intr(); 678 679 /* Make sure the target cpu sees everything */ 680 wbinvd(); 681 682 /* 683 * Try to detect when a SMI has occurred, wait up to 200ms. 684 * 685 * If a SMI occurs during an AP reset but before we issue 686 * the STARTUP command, the AP may brick. To work around 687 * this problem we hold off doing the AP startup until 688 * after we have detected the SMI. Hopefully another SMI 689 * will not occur before we finish the AP startup. 690 * 691 * Retries don't seem to help. SMIs have a window of opportunity 692 * and if USB->legacy keyboard emulation is enabled in the BIOS 693 * the interrupt rate can be quite high. 694 * 695 * NOTE: Don't worry about the L1 cache load, it might bloat 696 * ldelta a little but ndelta will be so huge when the SMI 697 * occurs the detection logic will still work fine. 698 */ 699 if (smibest) { 700 set_apic_timer(200000); 701 smitest(); 702 } 703 704 /* 705 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 706 * and running the target CPU. OR this INIT IPI might be latched (P5 707 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 708 * ignored. 709 * 710 * see apic/apicreg.h for icr bit definitions. 711 * 712 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 713 */ 714 715 /* 716 * Do an INIT IPI: assert RESET 717 * 718 * Use edge triggered mode to assert INIT 719 */ 720 lapic_seticr_sync(physical_cpu, 721 APIC_DESTMODE_PHY | 722 APIC_DEST_DESTFLD | 723 APIC_TRIGMOD_EDGE | 724 APIC_LEVEL_ASSERT | 725 APIC_DELMODE_INIT); 726 727 /* 728 * The spec calls for a 10ms delay but we may have to use a 729 * MUCH lower delay to avoid bricking an AP due to a fast SMI 730 * interrupt. We have other loops here too and dividing by 2 731 * doesn't seem to be enough even after subtracting 350us, 732 * so we divide by 4. 733 * 734 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 735 * interrupt was detected we use the full 10ms. 736 */ 737 if (smibest == 0) 738 u_sleep(10000); 739 else if (smibest < 150 * 4 + 350) 740 u_sleep(150); 741 else if ((smibest - 350) / 4 < 10000) 742 u_sleep((smibest - 350) / 4); 743 else 744 u_sleep(10000); 745 746 /* 747 * Do an INIT IPI: deassert RESET 748 * 749 * Use level triggered mode to deassert. It is unclear 750 * why we need to do this. 751 */ 752 lapic_seticr_sync(physical_cpu, 753 APIC_DESTMODE_PHY | 754 APIC_DEST_DESTFLD | 755 APIC_TRIGMOD_LEVEL | 756 APIC_LEVEL_DEASSERT | 757 APIC_DELMODE_INIT); 758 u_sleep(150); /* wait 150us */ 759 760 /* 761 * Next we do a STARTUP IPI: the previous INIT IPI might still be 762 * latched, (P5 bug) this 1st STARTUP would then terminate 763 * immediately, and the previously started INIT IPI would continue. OR 764 * the previous INIT IPI has already run. and this STARTUP IPI will 765 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 766 * will run. 767 * 768 * XXX set APIC_LEVEL_ASSERT 769 */ 770 lapic_seticr_sync(physical_cpu, 771 APIC_DESTMODE_PHY | 772 APIC_DEST_DESTFLD | 773 APIC_DELMODE_STARTUP | 774 vector); 775 u_sleep(200); /* wait ~200uS */ 776 777 /* 778 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 779 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 780 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 781 * recognized after hardware RESET or INIT IPI. 782 * 783 * XXX set APIC_LEVEL_ASSERT 784 */ 785 lapic_seticr_sync(physical_cpu, 786 APIC_DESTMODE_PHY | 787 APIC_DEST_DESTFLD | 788 APIC_DELMODE_STARTUP | 789 vector); 790 791 /* Resume normal operation */ 792 cpu_enable_intr(); 793 794 /* wait for it to start, see ap_init() */ 795 set_apic_timer(5000000);/* == 5 seconds */ 796 while (read_apic_timer()) { 797 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 798 return 1; /* return SUCCESS */ 799 } 800 801 return 0; /* return FAILURE */ 802 } 803 804 static 805 int 806 smitest(void) 807 { 808 int64_t ltsc; 809 int64_t ntsc; 810 int64_t ldelta; 811 int64_t ndelta; 812 int count; 813 814 ldelta = 0; 815 ndelta = 0; 816 while (read_apic_timer()) { 817 ltsc = rdtsc(); 818 for (count = 0; count < 100; ++count) 819 ntsc = rdtsc(); /* force loop to occur */ 820 if (ldelta) { 821 ndelta = ntsc - ltsc; 822 if (ldelta > ndelta) 823 ldelta = ndelta; 824 if (ndelta > ldelta * 2) 825 break; 826 } else { 827 ldelta = ntsc - ltsc; 828 } 829 } 830 return(read_apic_timer()); 831 } 832 833 /* 834 * Synchronously flush the TLB on all other CPU's. The current cpu's 835 * TLB is not flushed. If the caller wishes to flush the current cpu's 836 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 837 * 838 * This routine may be called concurrently from multiple cpus. When this 839 * happens, smp_invltlb() can wind up sticking around in the confirmation 840 * while() loop at the end as additional cpus are added to the global 841 * cpumask, until they are acknowledged by another IPI. 842 * 843 * NOTE: If for some reason we were unable to start all cpus we cannot 844 * safely use broadcast IPIs. 845 */ 846 847 cpumask_t smp_smurf_mask; 848 static cpumask_t smp_invltlb_mask; 849 #define LOOPRECOVER 850 #define LOOPMASK_IN 851 #ifdef LOOPMASK_IN 852 cpumask_t smp_in_mask; 853 #endif 854 cpumask_t smp_invmask; 855 extern cpumask_t smp_idleinvl_mask; 856 extern cpumask_t smp_idleinvl_reqs; 857 858 /* 859 * Atomically OR bits in *mask to smp_smurf_mask. Adjust *mask to remove 860 * bits that do not need to be IPId. These bits are still part of the command, 861 * but the target cpus have already been signalled and do not need to be 862 * sigalled again. 863 */ 864 #include <sys/spinlock.h> 865 #include <sys/spinlock2.h> 866 867 static __noinline 868 void 869 smp_smurf_fetchset(cpumask_t *mask) 870 { 871 cpumask_t omask; 872 int i; 873 __uint64_t obits; 874 __uint64_t nbits; 875 876 i = 0; 877 while (i < CPUMASK_ELEMENTS) { 878 obits = smp_smurf_mask.ary[i]; 879 cpu_ccfence(); 880 nbits = obits | mask->ary[i]; 881 if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) { 882 omask.ary[i] = obits; 883 ++i; 884 } 885 } 886 CPUMASK_NANDMASK(*mask, omask); 887 } 888 889 /* 890 * This is a mechanism which guarantees that cpu_invltlb() will be executed 891 * on idle cpus without having to signal or wake them up. The invltlb will be 892 * executed when they wake up, prior to any scheduling or interrupt thread. 893 * 894 * (*mask) is modified to remove the cpus we successfully negotiate this 895 * function with. This function may only be used with semi-synchronous 896 * commands (typically invltlb's or semi-synchronous invalidations which 897 * are usually associated only with kernel memory). 898 */ 899 void 900 smp_smurf_idleinvlclr(cpumask_t *mask) 901 { 902 if (optimized_invltlb) { 903 ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask); 904 /* cpu_lfence() not needed */ 905 CPUMASK_NANDMASK(*mask, smp_idleinvl_mask); 906 } 907 } 908 909 /* 910 * Issue cpu_invltlb() across all cpus except the current cpu. 911 * 912 * This function will arrange to avoid idle cpus, but still gurantee that 913 * invltlb is run on them when they wake up prior to any scheduling or 914 * nominal interrupt. 915 */ 916 void 917 smp_invltlb(void) 918 { 919 struct mdglobaldata *md = mdcpu; 920 cpumask_t mask; 921 unsigned long rflags; 922 #ifdef LOOPRECOVER 923 tsc_uclock_t tsc_base = rdtsc(); 924 int repeats = 0; 925 #endif 926 927 if (report_invltlb_src > 0) { 928 if (--report_invltlb_src <= 0) 929 print_backtrace(8); 930 } 931 932 /* 933 * Disallow normal interrupts, set all active cpus except our own 934 * in the global smp_invltlb_mask. 935 */ 936 ++md->mi.gd_cnt.v_smpinvltlb; 937 crit_enter_gd(&md->mi); 938 939 /* 940 * Bits we want to set in smp_invltlb_mask. We do not want to signal 941 * our own cpu. Also try to remove bits associated with idle cpus 942 * that we can flag for auto-invltlb. 943 */ 944 mask = smp_active_mask; 945 CPUMASK_NANDBIT(mask, md->mi.gd_cpuid); 946 smp_smurf_idleinvlclr(&mask); 947 948 rflags = read_rflags(); 949 cpu_disable_intr(); 950 ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask); 951 952 /* 953 * IPI non-idle cpus represented by mask. The omask calculation 954 * removes cpus from the mask which already have a Xinvltlb IPI 955 * pending (avoid double-queueing the IPI). 956 * 957 * We must disable real interrupts when setting the smurf flags or 958 * we might race a XINVLTLB before we manage to send the ipi's for 959 * the bits we set. 960 * 961 * NOTE: We are not signalling ourselves, mask already does NOT 962 * include our own cpu. 963 */ 964 smp_smurf_fetchset(&mask); 965 966 /* 967 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 968 * the critical section count on the target cpus. 969 */ 970 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 971 if (all_but_self_ipi_enable && 972 (all_but_self_ipi_enable >= 2 || 973 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 974 all_but_self_ipi(XINVLTLB_OFFSET); 975 } else { 976 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 977 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 978 } 979 980 /* 981 * Wait for acknowledgement by all cpus. smp_inval_intr() will 982 * temporarily enable interrupts to avoid deadlocking the lapic, 983 * and will also handle running cpu_invltlb() and remote invlpg 984 * command son our cpu if some other cpu requests it of us. 985 * 986 * WARNING! I originally tried to implement this as a hard loop 987 * checking only smp_invltlb_mask (and issuing a local 988 * cpu_invltlb() if requested), with interrupts enabled 989 * and without calling smp_inval_intr(). This DID NOT WORK. 990 * It resulted in weird races where smurf bits would get 991 * cleared without any action being taken. 992 */ 993 smp_inval_intr(); 994 CPUMASK_ASSZERO(mask); 995 while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) { 996 smp_inval_intr(); 997 cpu_pause(); 998 #ifdef LOOPRECOVER 999 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1000 /* 1001 * cpuid - cpu doing the waiting 1002 * invltlb_mask - IPI in progress 1003 */ 1004 kprintf("smp_invltlb %d: waited too long inv=%08jx " 1005 "smurf=%08jx " 1006 #ifdef LOOPMASK_IN 1007 "in=%08jx " 1008 #endif 1009 "idle=%08jx/%08jx\n", 1010 md->mi.gd_cpuid, 1011 smp_invltlb_mask.ary[0], 1012 smp_smurf_mask.ary[0], 1013 #ifdef LOOPMASK_IN 1014 smp_in_mask.ary[0], 1015 #endif 1016 smp_idleinvl_mask.ary[0], 1017 smp_idleinvl_reqs.ary[0]); 1018 mdcpu->gd_xinvaltlb = 0; 1019 ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask, 1020 smp_invltlb_mask); 1021 smp_invlpg(&smp_active_mask); 1022 tsc_base = rdtsc(); 1023 if (++repeats > 10) { 1024 kprintf("smp_invltlb: giving up\n"); 1025 CPUMASK_ASSZERO(smp_invltlb_mask); 1026 } 1027 } 1028 #endif 1029 } 1030 write_rflags(rflags); 1031 crit_exit_gd(&md->mi); 1032 } 1033 1034 /* 1035 * Called from a critical section with interrupts hard-disabled. 1036 * This function issues an XINVLTLB IPI and then executes any pending 1037 * command on the current cpu before returning. 1038 */ 1039 void 1040 smp_invlpg(cpumask_t *cmdmask) 1041 { 1042 struct mdglobaldata *md = mdcpu; 1043 cpumask_t mask; 1044 1045 if (report_invlpg_src > 0) { 1046 if (--report_invlpg_src <= 0) 1047 print_backtrace(8); 1048 } 1049 1050 /* 1051 * Disallow normal interrupts, set all active cpus in the pmap, 1052 * plus our own for completion processing (it might or might not 1053 * be part of the set). 1054 */ 1055 mask = smp_active_mask; 1056 CPUMASK_ANDMASK(mask, *cmdmask); 1057 CPUMASK_ORMASK(mask, md->mi.gd_cpumask); 1058 1059 /* 1060 * Avoid double-queuing IPIs, which can deadlock us. We must disable 1061 * real interrupts when setting the smurf flags or we might race a 1062 * XINVLTLB before we manage to send the ipi's for the bits we set. 1063 * 1064 * NOTE: We might be including our own cpu in the smurf mask. 1065 */ 1066 smp_smurf_fetchset(&mask); 1067 1068 /* 1069 * Issue the IPI. Note that the XINVLTLB IPI runs regardless of 1070 * the critical section count on the target cpus. 1071 * 1072 * We do not include our own cpu when issuing the IPI. 1073 */ 1074 if (all_but_self_ipi_enable && 1075 (all_but_self_ipi_enable >= 2 || 1076 CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) { 1077 all_but_self_ipi(XINVLTLB_OFFSET); 1078 } else { 1079 CPUMASK_NANDMASK(mask, md->mi.gd_cpumask); 1080 selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 1081 } 1082 1083 /* 1084 * This will synchronously wait for our command to complete, 1085 * as well as process commands from other cpus. It also handles 1086 * reentrancy. 1087 * 1088 * (interrupts are disabled and we are in a critical section here) 1089 */ 1090 smp_inval_intr(); 1091 } 1092 1093 void 1094 smp_sniff(void) 1095 { 1096 globaldata_t gd = mycpu; 1097 int dummy; 1098 register_t rflags; 1099 1100 /* 1101 * Ignore all_but_self_ipi_enable here and just use it. 1102 */ 1103 rflags = read_rflags(); 1104 cpu_disable_intr(); 1105 all_but_self_ipi(XSNIFF_OFFSET); 1106 gd->gd_sample_pc = smp_sniff; 1107 gd->gd_sample_sp = &dummy; 1108 write_rflags(rflags); 1109 } 1110 1111 void 1112 cpu_sniff(int dcpu) 1113 { 1114 globaldata_t rgd = globaldata_find(dcpu); 1115 register_t rflags; 1116 int dummy; 1117 1118 /* 1119 * Ignore all_but_self_ipi_enable here and just use it. 1120 */ 1121 rflags = read_rflags(); 1122 cpu_disable_intr(); 1123 single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED); 1124 rgd->gd_sample_pc = cpu_sniff; 1125 rgd->gd_sample_sp = &dummy; 1126 write_rflags(rflags); 1127 } 1128 1129 /* 1130 * Called from Xinvltlb assembly with interrupts hard-disabled and in a 1131 * critical section. gd_intr_nesting_level may or may not be bumped 1132 * depending on entry. 1133 * 1134 * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT. 1135 * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE 1136 * IS IN A CRITICAL SECTION. 1137 */ 1138 void 1139 smp_inval_intr(void) 1140 { 1141 struct mdglobaldata *md = mdcpu; 1142 cpumask_t cpumask; 1143 #ifdef LOOPRECOVER 1144 tsc_uclock_t tsc_base = rdtsc(); 1145 #endif 1146 1147 #if 0 1148 /* 1149 * The idle code is in a critical section, but that doesn't stop 1150 * Xinvltlb from executing, so deal with the race which can occur 1151 * in that situation. Otherwise r-m-w operations by pmap_inval_intr() 1152 * may have problems. 1153 */ 1154 if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) { 1155 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid); 1156 cpu_invltlb(); 1157 cpu_mfence(); 1158 } 1159 #endif 1160 1161 /* 1162 * This is a real mess. I'd like to just leave interrupts disabled 1163 * but it can cause the lapic to deadlock if too many interrupts queue 1164 * to it, due to the idiotic design of the lapic. So instead we have 1165 * to enter a critical section so normal interrupts are made pending 1166 * and track whether this one was reentered. 1167 */ 1168 if (md->gd_xinvaltlb) { /* reentrant on cpu */ 1169 md->gd_xinvaltlb = 2; 1170 return; 1171 } 1172 md->gd_xinvaltlb = 1; 1173 1174 /* 1175 * Check only those cpus with active Xinvl* commands pending. 1176 * 1177 * We are going to enable interrupts so make sure we are in a 1178 * critical section. This is necessary to avoid deadlocking 1179 * the lapic and to ensure that we execute our commands prior to 1180 * any nominal interrupt or preemption. 1181 * 1182 * WARNING! It is very important that we only clear out but in 1183 * smp_smurf_mask once for each interrupt we take. In 1184 * this case, we clear it on initial entry and only loop 1185 * on the reentrancy detect (caused by another interrupt). 1186 */ 1187 cpumask = smp_invmask; 1188 #ifdef LOOPMASK_IN 1189 ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid); 1190 #endif 1191 loop: 1192 cpu_enable_intr(); 1193 ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid); 1194 1195 /* 1196 * Specific page request(s), and we can't return until all bits 1197 * are zero. 1198 */ 1199 for (;;) { 1200 int toolong; 1201 1202 /* 1203 * Also execute any pending full invalidation request in 1204 * this loop. 1205 */ 1206 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1207 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1208 md->mi.gd_cpuid); 1209 cpu_invltlb(); 1210 cpu_mfence(); 1211 } 1212 1213 #ifdef LOOPRECOVER 1214 if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) { 1215 /* 1216 * cpuid - cpu doing the waiting 1217 * invmask - IPI in progress 1218 * invltlb_mask - which ones are TLB invalidations? 1219 */ 1220 kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx " 1221 "smurf=%08jx " 1222 #ifdef LOOPMASK_IN 1223 "in=%08jx " 1224 #endif 1225 "idle=%08jx/%08jx\n", 1226 md->mi.gd_cpuid, 1227 smp_invmask.ary[0], 1228 smp_invltlb_mask.ary[0], 1229 smp_smurf_mask.ary[0], 1230 #ifdef LOOPMASK_IN 1231 smp_in_mask.ary[0], 1232 #endif 1233 smp_idleinvl_mask.ary[0], 1234 smp_idleinvl_reqs.ary[0]); 1235 tsc_base = rdtsc(); 1236 toolong = 1; 1237 } else { 1238 toolong = 0; 1239 } 1240 #else 1241 toolong = 0; 1242 #endif 1243 1244 /* 1245 * We can only add bits to the cpumask to test during the 1246 * loop because the smp_invmask bit is cleared once the 1247 * originator completes the command (the targets may still 1248 * be cycling their own completions in this loop, afterwords). 1249 * 1250 * lfence required prior to all tests as this Xinvltlb 1251 * interrupt could race the originator (already be in progress 1252 * wnen the originator decides to issue, due to an issue by 1253 * another cpu). 1254 */ 1255 cpu_lfence(); 1256 CPUMASK_ORMASK(cpumask, smp_invmask); 1257 /*cpumask = smp_active_mask;*/ /* XXX */ 1258 cpu_lfence(); 1259 1260 if (pmap_inval_intr(&cpumask, toolong) == 0) { 1261 /* 1262 * Clear our smurf mask to allow new IPIs, but deal 1263 * with potential races. 1264 */ 1265 break; 1266 } 1267 1268 /* 1269 * Test if someone sent us another invalidation IPI, break 1270 * out so we can take it to avoid deadlocking the lapic 1271 * interrupt queue (? stupid intel, amd). 1272 */ 1273 if (md->gd_xinvaltlb == 2) 1274 break; 1275 /* 1276 if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid)) 1277 break; 1278 */ 1279 } 1280 1281 /* 1282 * Full invalidation request 1283 */ 1284 if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) { 1285 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, 1286 md->mi.gd_cpuid); 1287 cpu_invltlb(); 1288 cpu_mfence(); 1289 } 1290 1291 /* 1292 * Check to see if another Xinvltlb interrupt occurred and loop up 1293 * if it did. 1294 */ 1295 cpu_disable_intr(); 1296 if (md->gd_xinvaltlb == 2) { 1297 md->gd_xinvaltlb = 1; 1298 goto loop; 1299 } 1300 #ifdef LOOPMASK_IN 1301 ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid); 1302 #endif 1303 md->gd_xinvaltlb = 0; 1304 } 1305 1306 void 1307 cpu_wbinvd_on_all_cpus_callback(void *arg) 1308 { 1309 wbinvd(); 1310 } 1311 1312 /* 1313 * When called the executing CPU will send an IPI to all other CPUs 1314 * requesting that they halt execution. 1315 * 1316 * Usually (but not necessarily) called with 'other_cpus' as its arg. 1317 * 1318 * - Signals all CPUs in map to stop. 1319 * - Waits for each to stop. 1320 * 1321 * Returns: 1322 * -1: error 1323 * 0: NA 1324 * 1: ok 1325 * 1326 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 1327 * from executing at same time. 1328 */ 1329 int 1330 stop_cpus(cpumask_t map) 1331 { 1332 cpumask_t mask; 1333 1334 CPUMASK_ANDMASK(map, smp_active_mask); 1335 1336 /* send the Xcpustop IPI to all CPUs in map */ 1337 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 1338 1339 do { 1340 mask = stopped_cpus; 1341 CPUMASK_ANDMASK(mask, map); 1342 /* spin */ 1343 } while (CPUMASK_CMPMASKNEQ(mask, map)); 1344 1345 return 1; 1346 } 1347 1348 1349 /* 1350 * Called by a CPU to restart stopped CPUs. 1351 * 1352 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 1353 * 1354 * - Signals all CPUs in map to restart. 1355 * - Waits for each to restart. 1356 * 1357 * Returns: 1358 * -1: error 1359 * 0: NA 1360 * 1: ok 1361 */ 1362 int 1363 restart_cpus(cpumask_t map) 1364 { 1365 cpumask_t mask; 1366 1367 /* signal other cpus to restart */ 1368 mask = map; 1369 CPUMASK_ANDMASK(mask, smp_active_mask); 1370 cpu_ccfence(); 1371 started_cpus = mask; 1372 cpu_ccfence(); 1373 1374 /* wait for each to clear its bit */ 1375 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 1376 cpu_pause(); 1377 1378 return 1; 1379 } 1380 1381 /* 1382 * This is called once the mpboot code has gotten us properly relocated 1383 * and the MMU turned on, etc. ap_init() is actually the idle thread, 1384 * and when it returns the scheduler will call the real cpu_idle() main 1385 * loop for the idlethread. Interrupts are disabled on entry and should 1386 * remain disabled at return. 1387 */ 1388 void 1389 ap_init(void) 1390 { 1391 int cpu_id; 1392 1393 /* 1394 * Adjust smp_startup_mask to signal the BSP that we have started 1395 * up successfully. Note that we do not yet hold the BGL. The BSP 1396 * is waiting for our signal. 1397 * 1398 * We can't set our bit in smp_active_mask yet because we are holding 1399 * interrupts physically disabled and remote cpus could deadlock 1400 * trying to send us an IPI. 1401 */ 1402 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1403 cpu_mfence(); 1404 1405 /* 1406 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1407 * non-zero, then get the MP lock. 1408 * 1409 * Note: We are in a critical section. 1410 * 1411 * Note: we are the idle thread, we can only spin. 1412 * 1413 * Note: The load fence is memory volatile and prevents the compiler 1414 * from improperly caching mp_finish_lapic, and the cpu from improperly 1415 * caching it. 1416 */ 1417 while (mp_finish_lapic == 0) { 1418 cpu_pause(); 1419 cpu_lfence(); 1420 } 1421 #if 0 1422 while (try_mplock() == 0) { 1423 cpu_pause(); 1424 cpu_lfence(); 1425 } 1426 #endif 1427 1428 if (cpu_feature & CPUID_TSC) { 1429 /* 1430 * The BSP is constantly updating tsc0_offset, figure out 1431 * the relative difference to synchronize ktrdump. 1432 */ 1433 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1434 } 1435 1436 /* BSP may have changed PTD while we're waiting for the lock */ 1437 cpu_invltlb(); 1438 1439 /* Build our map of 'other' CPUs. */ 1440 mycpu->gd_other_cpus = smp_startup_mask; 1441 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1442 1443 /* A quick check from sanity claus */ 1444 cpu_id = APICID_TO_CPUID(LAPIC_READID); 1445 if (mycpu->gd_cpuid != cpu_id) { 1446 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1447 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1448 cpu_id, LAPIC_READID); 1449 #if 0 /* JGXXX */ 1450 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1451 #endif 1452 panic("cpuid mismatch! boom!!"); 1453 } 1454 1455 /* Initialize AP's local APIC for irq's */ 1456 lapic_init(FALSE); 1457 1458 /* LAPIC initialization is done */ 1459 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1460 cpu_mfence(); 1461 1462 #if 0 1463 /* Let BSP move onto the next initialization stage */ 1464 rel_mplock(); 1465 #endif 1466 1467 /* 1468 * Interlock for finalization. Wait until mp_finish is non-zero, 1469 * then get the MP lock. 1470 * 1471 * Note: We are in a critical section. 1472 * 1473 * Note: we are the idle thread, we can only spin. 1474 * 1475 * Note: The load fence is memory volatile and prevents the compiler 1476 * from improperly caching mp_finish, and the cpu from improperly 1477 * caching it. 1478 */ 1479 while (mp_finish == 0) { 1480 cpu_pause(); 1481 cpu_lfence(); 1482 } 1483 1484 /* BSP may have changed PTD while we're waiting for the lock */ 1485 cpu_invltlb(); 1486 1487 /* Set memory range attributes for this CPU to match the BSP */ 1488 mem_range_AP_init(); 1489 1490 /* 1491 * Once we go active we must process any IPIQ messages that may 1492 * have been queued, because no actual IPI will occur until we 1493 * set our bit in the smp_active_mask. If we don't the IPI 1494 * message interlock could be left set which would also prevent 1495 * further IPIs. 1496 * 1497 * The idle loop doesn't expect the BGL to be held and while 1498 * lwkt_switch() normally cleans things up this is a special case 1499 * because we returning almost directly into the idle loop. 1500 * 1501 * The idle thread is never placed on the runq, make sure 1502 * nothing we've done put it there. 1503 */ 1504 1505 /* 1506 * Hold a critical section and allow real interrupts to occur. Zero 1507 * any spurious interrupts which have accumulated, then set our 1508 * smp_active_mask indicating that we are fully operational. 1509 */ 1510 crit_enter(); 1511 __asm __volatile("sti; pause; pause"::); 1512 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1513 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1514 1515 /* 1516 * Wait until all cpus have set their smp_active_mask and have fully 1517 * operational interrupts before proceeding. 1518 * 1519 * We need a final cpu_invltlb() because we would not have received 1520 * any until we set our bit in smp_active_mask. 1521 */ 1522 while (mp_finish == 1) { 1523 cpu_pause(); 1524 cpu_lfence(); 1525 } 1526 cpu_invltlb(); 1527 1528 /* 1529 * Initialize per-cpu clocks and do other per-cpu initialization. 1530 * At this point code is expected to be able to use the full kernel 1531 * API. 1532 */ 1533 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1534 1535 /* 1536 * Since we may have cleaned up the interrupt triggers, manually 1537 * process any pending IPIs before exiting our critical section. 1538 * Once the critical section has exited, normal interrupt processing 1539 * may occur. 1540 */ 1541 atomic_swap_int(&mycpu->gd_npoll, 0); 1542 lwkt_process_ipiq(); 1543 crit_exit(); 1544 1545 /* 1546 * Final final, allow the waiting BSP to resume the boot process, 1547 * return 'into' the idle thread bootstrap. 1548 */ 1549 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1550 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1551 } 1552 1553 /* 1554 * Get SMP fully working before we start initializing devices. 1555 */ 1556 static 1557 void 1558 ap_finish(void) 1559 { 1560 if (bootverbose) 1561 kprintf("Finish MP startup\n"); 1562 rel_mplock(); 1563 1564 /* 1565 * Wait for the active mask to complete, after which all cpus will 1566 * be accepting interrupts. 1567 */ 1568 mp_finish = 1; 1569 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1570 cpu_pause(); 1571 cpu_lfence(); 1572 } 1573 1574 /* 1575 * Wait for the finalization mask to complete, after which all cpus 1576 * have completely finished initializing and are entering or are in 1577 * their idle thread. 1578 * 1579 * BSP should have received all required invltlbs but do another 1580 * one just in case. 1581 */ 1582 cpu_invltlb(); 1583 mp_finish = 2; 1584 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1585 cpu_pause(); 1586 cpu_lfence(); 1587 } 1588 1589 while (try_mplock() == 0) { 1590 cpu_pause(); 1591 cpu_lfence(); 1592 } 1593 1594 if (bootverbose) { 1595 kprintf("Active CPU Mask: %016jx\n", 1596 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1597 } 1598 } 1599 1600 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1601 1602 /* 1603 * Interrupts must be hard-disabled by caller 1604 */ 1605 void 1606 cpu_send_ipiq(int dcpu) 1607 { 1608 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1609 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1610 } 1611 1612 #if 0 /* single_apic_ipi_passive() not working yet */ 1613 /* 1614 * Returns 0 on failure, 1 on success 1615 */ 1616 int 1617 cpu_send_ipiq_passive(int dcpu) 1618 { 1619 int r = 0; 1620 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1621 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1622 APIC_DELMODE_FIXED); 1623 } 1624 return(r); 1625 } 1626 #endif 1627 1628 static void 1629 mp_bsp_simple_setup(void) 1630 { 1631 struct mdglobaldata *gd; 1632 size_t ipiq_size; 1633 1634 /* build our map of 'other' CPUs */ 1635 mycpu->gd_other_cpus = smp_startup_mask; 1636 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1637 1638 gd = (struct mdglobaldata *)mycpu; 1639 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1640 1641 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1642 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 1643 VM_SUBSYS_IPIQ); 1644 bzero(mycpu->gd_ipiq, ipiq_size); 1645 1646 /* initialize arc4random. */ 1647 arc4_init_pcpu(0); 1648 1649 pmap_set_opt(); 1650 1651 if (cpu_feature & CPUID_TSC) 1652 tsc0_offset = rdtsc(); 1653 } 1654 1655 1656 /* 1657 * CPU TOPOLOGY DETECTION FUNCTIONS 1658 */ 1659 1660 /* Detect intel topology using CPUID 1661 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1662 */ 1663 static void 1664 detect_intel_topology(int count_htt_cores) 1665 { 1666 int shift = 0; 1667 int ecx_index = 0; 1668 int core_plus_logical_bits = 0; 1669 int cores_per_package; 1670 int logical_per_package; 1671 int logical_per_core; 1672 unsigned int p[4]; 1673 1674 if (cpu_high >= 0xb) { 1675 goto FUNC_B; 1676 1677 } else if (cpu_high >= 0x4) { 1678 goto FUNC_4; 1679 1680 } else { 1681 core_bits = 0; 1682 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1683 ; 1684 logical_CPU_bits = 1 << shift; 1685 return; 1686 } 1687 1688 FUNC_B: 1689 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1690 1691 /* if 0xb not supported - fallback to 0x4 */ 1692 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1693 goto FUNC_4; 1694 } 1695 1696 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1697 1698 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1699 do { 1700 cpuid_count(0xb, ecx_index, p); 1701 1702 /* Check for the Core type in the implemented sub leaves. */ 1703 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1704 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1705 break; 1706 } 1707 1708 ecx_index++; 1709 1710 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1711 1712 core_bits = core_plus_logical_bits - logical_CPU_bits; 1713 1714 return; 1715 1716 FUNC_4: 1717 cpuid_count(0x4, 0, p); 1718 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1719 1720 logical_per_package = count_htt_cores; 1721 logical_per_core = logical_per_package / cores_per_package; 1722 1723 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1724 ; 1725 logical_CPU_bits = shift; 1726 1727 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1728 ; 1729 core_bits = shift; 1730 1731 return; 1732 } 1733 1734 /* Detect AMD topology using CPUID 1735 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1736 */ 1737 static void 1738 detect_amd_topology(int count_htt_cores) 1739 { 1740 int shift = 0; 1741 if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) { 1742 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1743 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 1744 AMDID_COREID_SIZE_SHIFT; 1745 } else { 1746 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1747 for (shift = 0; (1 << shift) < core_bits; ++shift) 1748 ; 1749 core_bits = shift; 1750 } 1751 logical_CPU_bits = count_htt_cores >> core_bits; 1752 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1753 ; 1754 logical_CPU_bits = shift; 1755 1756 kprintf("core_bits %d logical_CPU_bits %d\n", 1757 core_bits - logical_CPU_bits, logical_CPU_bits); 1758 1759 if (amd_feature2 & AMDID2_TOPOEXT) { 1760 u_int p[4]; /* eax,ebx,ecx,edx */ 1761 int nodes; 1762 1763 cpuid_count(0x8000001e, 0, p); 1764 1765 switch(((p[1] >> 8) & 3) + 1) { 1766 case 1: 1767 logical_CPU_bits = 0; 1768 break; 1769 case 2: 1770 logical_CPU_bits = 1; 1771 break; 1772 case 3: 1773 case 4: 1774 logical_CPU_bits = 2; 1775 break; 1776 } 1777 1778 /* 1779 * Nodes are kind of a stand-in for packages*sockets, 1780 * but can be thought of in terms of Numa domains. 1781 */ 1782 nodes = ((p[2] >> 8) & 7) + 1; 1783 switch(nodes) { 1784 case 8: 1785 case 7: 1786 case 6: 1787 case 5: 1788 --core_bits; 1789 /* fallthrough */ 1790 case 4: 1791 case 3: 1792 --core_bits; 1793 /* fallthrough */ 1794 case 2: 1795 --core_bits; 1796 /* fallthrough */ 1797 case 1: 1798 break; 1799 } 1800 core_bits -= logical_CPU_bits; 1801 kprintf("%d-way htt, %d Nodes, %d cores/node\n", 1802 (int)(((p[1] >> 8) & 3) + 1), 1803 nodes, 1804 1 << core_bits); 1805 1806 } 1807 #if 0 1808 if (amd_feature2 & AMDID2_TOPOEXT) { 1809 u_int p[4]; 1810 int i; 1811 int type; 1812 int level; 1813 int share_count; 1814 1815 logical_CPU_bits = 0; 1816 core_bits = 0; 1817 1818 for (i = 0; i < 256; ++i) { 1819 cpuid_count(0x8000001d, i, p); 1820 type = p[0] & 0x1f; 1821 level = (p[0] >> 5) & 0x7; 1822 share_count = 1 + ((p[0] >> 14) & 0xfff); 1823 1824 if (type == 0) 1825 break; 1826 kprintf("Topology probe i=%2d type=%d " 1827 "level=%d share_count=%d\n", 1828 i, type, level, share_count); 1829 shift = 0; 1830 while ((1 << shift) < share_count) 1831 ++shift; 1832 1833 switch(type) { 1834 case 1: 1835 /* 1836 * CPUID_TYPE_SMT 1837 * 1838 * Logical CPU (SMT) 1839 */ 1840 logical_CPU_bits = shift; 1841 break; 1842 case 2: 1843 /* 1844 * CPUID_TYPE_CORE 1845 * 1846 * Physical subdivision of a package 1847 */ 1848 core_bits = logical_CPU_bits + 1849 shift; 1850 break; 1851 case 3: 1852 /* 1853 * CPUID_TYPE_CACHE 1854 * 1855 * CPU L1/L2/L3 cache 1856 */ 1857 break; 1858 case 4: 1859 /* 1860 * CPUID_TYPE_PKG 1861 * 1862 * Package aka chip, equivalent to 1863 * socket 1864 */ 1865 break; 1866 } 1867 } 1868 } 1869 #endif 1870 } else { 1871 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1872 ; 1873 core_bits = shift; 1874 logical_CPU_bits = 0; 1875 } 1876 } 1877 1878 static void 1879 amd_get_compute_unit_id(void *arg) 1880 { 1881 u_int regs[4]; 1882 1883 do_cpuid(0x8000001e, regs); 1884 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1885 1886 /* 1887 * AMD - CPUID Specification September 2010 1888 * page 34 - //ComputeUnitID = ebx[0:7]// 1889 */ 1890 mynode->compute_unit_id = regs[1] & 0xff; 1891 } 1892 1893 int 1894 fix_amd_topology(void) 1895 { 1896 cpumask_t mask; 1897 1898 if (cpu_vendor_id != CPU_VENDOR_AMD) 1899 return -1; 1900 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1901 return -1; 1902 1903 CPUMASK_ASSALLONES(mask); 1904 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1905 1906 kprintf("Compute unit iDS:\n"); 1907 int i; 1908 for (i = 0; i < ncpus; i++) { 1909 kprintf("%d-%d; \n", 1910 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1911 } 1912 return 0; 1913 } 1914 1915 /* 1916 * Calculate 1917 * - logical_CPU_bits 1918 * - core_bits 1919 * With the values above (for AMD or INTEL) we are able to generally 1920 * detect the CPU topology (number of cores for each level): 1921 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1922 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1923 */ 1924 void 1925 detect_cpu_topology(void) 1926 { 1927 static int topology_detected = 0; 1928 int count = 0; 1929 1930 if (topology_detected) 1931 goto OUT; 1932 if ((cpu_feature & CPUID_HTT) == 0) { 1933 core_bits = 0; 1934 logical_CPU_bits = 0; 1935 goto OUT; 1936 } 1937 count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT; 1938 1939 if (cpu_vendor_id == CPU_VENDOR_INTEL) 1940 detect_intel_topology(count); 1941 else if (cpu_vendor_id == CPU_VENDOR_AMD) 1942 detect_amd_topology(count); 1943 topology_detected = 1; 1944 1945 OUT: 1946 if (bootverbose) { 1947 kprintf("Bits within APICID: logical_CPU_bits: %d; " 1948 "core_bits: %d\n", 1949 logical_CPU_bits, core_bits); 1950 } 1951 } 1952 1953 /* 1954 * Interface functions to calculate chip_ID, 1955 * core_number and logical_number 1956 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1957 */ 1958 int 1959 get_chip_ID(int cpuid) 1960 { 1961 return get_apicid_from_cpuid(cpuid) >> 1962 (logical_CPU_bits + core_bits); 1963 } 1964 1965 int 1966 get_chip_ID_from_APICID(int apicid) 1967 { 1968 return apicid >> (logical_CPU_bits + core_bits); 1969 } 1970 1971 int 1972 get_core_number_within_chip(int cpuid) 1973 { 1974 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1975 ((1 << core_bits) - 1)); 1976 } 1977 1978 int 1979 get_logical_CPU_number_within_core(int cpuid) 1980 { 1981 return (get_apicid_from_cpuid(cpuid) & 1982 ((1 << logical_CPU_bits) - 1)); 1983 } 1984