1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #ifdef GPROF 51 #include <sys/gmon.h> 52 #endif 53 54 #include <machine/smp.h> 55 #include <machine_base/apic/apicreg.h> 56 #include <machine/atomic.h> 57 #include <machine/cpufunc.h> 58 #include <machine/cputypes.h> 59 #include <machine_base/apic/lapic.h> 60 #include <machine_base/apic/ioapic.h> 61 #include <machine/psl.h> 62 #include <machine/segments.h> 63 #include <machine/tss.h> 64 #include <machine/specialreg.h> 65 #include <machine/globaldata.h> 66 #include <machine/pmap_inval.h> 67 68 #include <machine/md_var.h> /* setidt() */ 69 #include <machine_base/icu/icu.h> /* IPIs */ 70 #include <machine_base/icu/icu_var.h> 71 #include <machine_base/apic/ioapic_abi.h> 72 #include <machine/intr_machdep.h> /* IPIs */ 73 74 #define WARMBOOT_TARGET 0 75 #define WARMBOOT_OFF (KERNBASE + 0x0467) 76 #define WARMBOOT_SEG (KERNBASE + 0x0469) 77 78 #define CMOS_REG (0x70) 79 #define CMOS_DATA (0x71) 80 #define BIOS_RESET (0x0f) 81 #define BIOS_WARM (0x0a) 82 83 /* 84 * this code MUST be enabled here and in mpboot.s. 85 * it follows the very early stages of AP boot by placing values in CMOS ram. 86 * it NORMALLY will never be needed and thus the primitive method for enabling. 87 * 88 */ 89 #if defined(CHECK_POINTS) 90 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 91 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 92 93 #define CHECK_INIT(D); \ 94 CHECK_WRITE(0x34, (D)); \ 95 CHECK_WRITE(0x35, (D)); \ 96 CHECK_WRITE(0x36, (D)); \ 97 CHECK_WRITE(0x37, (D)); \ 98 CHECK_WRITE(0x38, (D)); \ 99 CHECK_WRITE(0x39, (D)); 100 101 #define CHECK_PRINT(S); \ 102 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 103 (S), \ 104 CHECK_READ(0x34), \ 105 CHECK_READ(0x35), \ 106 CHECK_READ(0x36), \ 107 CHECK_READ(0x37), \ 108 CHECK_READ(0x38), \ 109 CHECK_READ(0x39)); 110 111 #else /* CHECK_POINTS */ 112 113 #define CHECK_INIT(D) 114 #define CHECK_PRINT(S) 115 116 #endif /* CHECK_POINTS */ 117 118 /* 119 * Values to send to the POST hardware. 120 */ 121 #define MP_BOOTADDRESS_POST 0x10 122 #define MP_PROBE_POST 0x11 123 #define MPTABLE_PASS1_POST 0x12 124 125 #define MP_START_POST 0x13 126 #define MP_ENABLE_POST 0x14 127 #define MPTABLE_PASS2_POST 0x15 128 129 #define START_ALL_APS_POST 0x16 130 #define INSTALL_AP_TRAMP_POST 0x17 131 #define START_AP_POST 0x18 132 133 #define MP_ANNOUNCE_POST 0x19 134 135 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 136 int current_postcode; 137 138 /** XXX FIXME: what system files declare these??? */ 139 extern struct region_descriptor r_gdt; 140 141 extern int nkpt; 142 extern int naps; 143 144 int64_t tsc0_offset; 145 extern int64_t tsc_offsets[]; 146 147 /* AP uses this during bootstrap. Do not staticize. */ 148 char *bootSTK; 149 static int bootAP; 150 151 struct pcb stoppcbs[MAXCPU]; 152 153 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 154 155 /* 156 * Local data and functions. 157 */ 158 159 static u_int boot_address; 160 static int mp_finish; 161 static int mp_finish_lapic; 162 163 static int start_all_aps(u_int boot_addr); 164 #if 0 165 static void install_ap_tramp(u_int boot_addr); 166 #endif 167 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 168 static int smitest(void); 169 static void mp_bsp_simple_setup(void); 170 171 /* which cpus have been started */ 172 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 173 /* which cpus have lapic been inited */ 174 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 175 /* which cpus are ready for IPIs etc? */ 176 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 177 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 178 179 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, ""); 180 static u_int bootMP_size; 181 182 /* Local data for detecting CPU TOPOLOGY */ 183 static int core_bits = 0; 184 static int logical_CPU_bits = 0; 185 186 187 /* 188 * Calculate usable address in base memory for AP trampoline code. 189 */ 190 u_int 191 mp_bootaddress(u_int basemem) 192 { 193 POSTCODE(MP_BOOTADDRESS_POST); 194 195 bootMP_size = mptramp_end - mptramp_start; 196 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 197 if (((basemem * 1024) - boot_address) < bootMP_size) 198 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 199 /* 3 levels of page table pages */ 200 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 201 202 return mptramp_pagetables; 203 } 204 205 /* 206 * Print various information about the SMP system hardware and setup. 207 */ 208 void 209 mp_announce(void) 210 { 211 int x; 212 213 POSTCODE(MP_ANNOUNCE_POST); 214 215 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 216 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 217 for (x = 1; x <= naps; ++x) 218 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 219 220 if (!ioapic_enable) 221 kprintf(" Warning: APIC I/O disabled\n"); 222 } 223 224 /* 225 * AP cpu's call this to sync up protected mode. 226 * 227 * WARNING! %gs is not set up on entry. This routine sets up %gs. 228 */ 229 void 230 init_secondary(void) 231 { 232 int gsel_tss; 233 int x, myid = bootAP; 234 u_int64_t msr, cr0; 235 struct mdglobaldata *md; 236 struct privatespace *ps; 237 238 ps = CPU_prvspace[myid]; 239 240 gdt_segs[GPROC0_SEL].ssd_base = 241 (long) &ps->mdglobaldata.gd_common_tss; 242 ps->mdglobaldata.mi.gd_prvspace = ps; 243 244 /* We fill the 32-bit segment descriptors */ 245 for (x = 0; x < NGDT; x++) { 246 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 247 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 248 } 249 /* And now a 64-bit one */ 250 ssdtosyssd(&gdt_segs[GPROC0_SEL], 251 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 252 253 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 254 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 255 lgdt(&r_gdt); /* does magic intra-segment return */ 256 257 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 258 wrmsr(MSR_FSBASE, 0); /* User value */ 259 wrmsr(MSR_GSBASE, (u_int64_t)ps); 260 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 261 262 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 263 264 #if 0 265 lldt(_default_ldt); 266 mdcpu->gd_currentldt = _default_ldt; 267 #endif 268 269 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 270 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 271 272 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 273 274 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 275 #if 0 /* JG XXX */ 276 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 277 #endif 278 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 279 md->gd_common_tssd = *md->gd_tss_gdt; 280 281 /* double fault stack */ 282 md->gd_common_tss.tss_ist1 = 283 (long)&md->mi.gd_prvspace->idlestack[ 284 sizeof(md->mi.gd_prvspace->idlestack)]; 285 286 ltr(gsel_tss); 287 288 /* 289 * Set to a known state: 290 * Set by mpboot.s: CR0_PG, CR0_PE 291 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 292 */ 293 cr0 = rcr0(); 294 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 295 load_cr0(cr0); 296 297 /* Set up the fast syscall stuff */ 298 msr = rdmsr(MSR_EFER) | EFER_SCE; 299 wrmsr(MSR_EFER, msr); 300 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 301 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 302 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 303 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 304 wrmsr(MSR_STAR, msr); 305 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 306 307 pmap_set_opt(); /* PSE/4MB pages, etc */ 308 pmap_init_pat(); /* Page Attribute Table */ 309 310 /* set up CPU registers and state */ 311 cpu_setregs(); 312 313 /* set up SSE/NX registers */ 314 initializecpu(myid); 315 316 /* set up FPU state on the AP */ 317 npxinit(); 318 319 /* disable the APIC, just to be SURE */ 320 lapic->svr &= ~APIC_SVR_ENABLE; 321 } 322 323 /******************************************************************* 324 * local functions and data 325 */ 326 327 /* 328 * Start the SMP system 329 */ 330 static void 331 mp_start_aps(void *dummy __unused) 332 { 333 if (lapic_enable) { 334 /* start each Application Processor */ 335 start_all_aps(boot_address); 336 } else { 337 mp_bsp_simple_setup(); 338 } 339 } 340 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL) 341 342 /* 343 * start each AP in our list 344 */ 345 static int 346 start_all_aps(u_int boot_addr) 347 { 348 vm_offset_t va = boot_address + KERNBASE; 349 u_int64_t *pt4, *pt3, *pt2; 350 int pssize; 351 int x, i; 352 int shift; 353 int smicount; 354 int smibest; 355 int smilast; 356 u_char mpbiosreason; 357 u_long mpbioswarmvec; 358 struct mdglobaldata *gd; 359 struct privatespace *ps; 360 size_t ipiq_size; 361 362 POSTCODE(START_ALL_APS_POST); 363 364 /* install the AP 1st level boot code */ 365 pmap_kenter(va, boot_address); 366 cpu_invlpg((void *)va); /* JG XXX */ 367 bcopy(mptramp_start, (void *)va, bootMP_size); 368 369 /* Locate the page tables, they'll be below the trampoline */ 370 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 371 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 372 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 373 374 /* Create the initial 1GB replicated page tables */ 375 for (i = 0; i < 512; i++) { 376 /* Each slot of the level 4 pages points to the same level 3 page */ 377 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 378 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 379 kernel_pmap.pmap_bits[PG_RW_IDX] | 380 kernel_pmap.pmap_bits[PG_U_IDX]; 381 382 /* Each slot of the level 3 pages points to the same level 2 page */ 383 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 384 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 385 kernel_pmap.pmap_bits[PG_RW_IDX] | 386 kernel_pmap.pmap_bits[PG_U_IDX]; 387 388 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 389 pt2[i] = i * (2 * 1024 * 1024); 390 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 391 kernel_pmap.pmap_bits[PG_RW_IDX] | 392 kernel_pmap.pmap_bits[PG_PS_IDX] | 393 kernel_pmap.pmap_bits[PG_U_IDX]; 394 } 395 396 /* save the current value of the warm-start vector */ 397 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 398 outb(CMOS_REG, BIOS_RESET); 399 mpbiosreason = inb(CMOS_DATA); 400 401 /* setup a vector to our boot code */ 402 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 403 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 404 outb(CMOS_REG, BIOS_RESET); 405 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 406 407 /* 408 * If we have a TSC we can figure out the SMI interrupt rate. 409 * The SMI does not necessarily use a constant rate. Spend 410 * up to 250ms trying to figure it out. 411 */ 412 smibest = 0; 413 if (cpu_feature & CPUID_TSC) { 414 set_apic_timer(275000); 415 smilast = read_apic_timer(); 416 for (x = 0; x < 20 && read_apic_timer(); ++x) { 417 smicount = smitest(); 418 if (smibest == 0 || smilast - smicount < smibest) 419 smibest = smilast - smicount; 420 smilast = smicount; 421 } 422 if (smibest > 250000) 423 smibest = 0; 424 if (smibest) { 425 smibest = smibest * (int64_t)1000000 / 426 get_apic_timer_frequency(); 427 } 428 } 429 if (smibest) 430 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 431 1000000 / smibest, smibest); 432 433 /* start each AP */ 434 for (x = 1; x <= naps; ++x) { 435 /* This is a bit verbose, it will go away soon. */ 436 437 pssize = sizeof(struct privatespace); 438 ps = (void *)kmem_alloc(&kernel_map, pssize); 439 CPU_prvspace[x] = ps; 440 #if 0 441 kprintf("ps %d %p %d\n", x, ps, pssize); 442 #endif 443 bzero(ps, pssize); 444 gd = &ps->mdglobaldata; 445 gd->mi.gd_prvspace = ps; 446 447 /* prime data page for it to use */ 448 mi_gdinit(&gd->mi, x); 449 cpu_gdinit(gd, x); 450 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 451 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 452 bzero(gd->mi.gd_ipiq, ipiq_size); 453 454 /* setup a vector to our boot code */ 455 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 456 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 457 outb(CMOS_REG, BIOS_RESET); 458 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 459 460 /* 461 * Setup the AP boot stack 462 */ 463 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 464 bootAP = x; 465 466 /* attempt to start the Application Processor */ 467 CHECK_INIT(99); /* setup checkpoints */ 468 if (!start_ap(gd, boot_addr, smibest)) { 469 kprintf("\nAP #%d (PHY# %d) failed!\n", 470 x, CPUID_TO_APICID(x)); 471 CHECK_PRINT("trace"); /* show checkpoints */ 472 /* better panic as the AP may be running loose */ 473 kprintf("panic y/n? [y] "); 474 if (cngetc() != 'n') 475 panic("bye-bye"); 476 } 477 CHECK_PRINT("trace"); /* show checkpoints */ 478 } 479 480 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 481 ncpus = x; 482 483 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */ 484 for (shift = 0; (1 << shift) <= ncpus; ++shift) 485 ; 486 --shift; 487 ncpus2_shift = shift; 488 ncpus2 = 1 << shift; 489 ncpus2_mask = ncpus2 - 1; 490 491 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 492 if ((1 << shift) < ncpus) 493 ++shift; 494 ncpus_fit = 1 << shift; 495 ncpus_fit_mask = ncpus_fit - 1; 496 497 /* build our map of 'other' CPUs */ 498 mycpu->gd_other_cpus = smp_startup_mask; 499 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 500 501 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 502 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 503 bzero(mycpu->gd_ipiq, ipiq_size); 504 505 /* restore the warmstart vector */ 506 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 507 outb(CMOS_REG, BIOS_RESET); 508 outb(CMOS_DATA, mpbiosreason); 509 510 /* 511 * NOTE! The idlestack for the BSP was setup by locore. Finish 512 * up, clean out the P==V mapping we did earlier. 513 */ 514 pmap_set_opt(); 515 516 /* 517 * Wait all APs to finish initializing LAPIC 518 */ 519 if (bootverbose) 520 kprintf("SMP: Waiting APs LAPIC initialization\n"); 521 if (cpu_feature & CPUID_TSC) 522 tsc0_offset = rdtsc(); 523 tsc_offsets[0] = 0; 524 mp_finish_lapic = 1; 525 rel_mplock(); 526 527 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 528 cpu_pause(); 529 cpu_lfence(); 530 if (cpu_feature & CPUID_TSC) 531 tsc0_offset = rdtsc(); 532 } 533 while (try_mplock() == 0) { 534 cpu_pause(); 535 cpu_lfence(); 536 } 537 538 /* number of APs actually started */ 539 return ncpus - 1; 540 } 541 542 543 /* 544 * load the 1st level AP boot code into base memory. 545 */ 546 547 /* targets for relocation */ 548 extern void bigJump(void); 549 extern void bootCodeSeg(void); 550 extern void bootDataSeg(void); 551 extern void MPentry(void); 552 extern u_int MP_GDT; 553 extern u_int mp_gdtbase; 554 555 #if 0 556 557 static void 558 install_ap_tramp(u_int boot_addr) 559 { 560 int x; 561 int size = *(int *) ((u_long) & bootMP_size); 562 u_char *src = (u_char *) ((u_long) bootMP); 563 u_char *dst = (u_char *) boot_addr + KERNBASE; 564 u_int boot_base = (u_int) bootMP; 565 u_int8_t *dst8; 566 u_int16_t *dst16; 567 u_int32_t *dst32; 568 569 POSTCODE(INSTALL_AP_TRAMP_POST); 570 571 for (x = 0; x < size; ++x) 572 *dst++ = *src++; 573 574 /* 575 * modify addresses in code we just moved to basemem. unfortunately we 576 * need fairly detailed info about mpboot.s for this to work. changes 577 * to mpboot.s might require changes here. 578 */ 579 580 /* boot code is located in KERNEL space */ 581 dst = (u_char *) boot_addr + KERNBASE; 582 583 /* modify the lgdt arg */ 584 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 585 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 586 587 /* modify the ljmp target for MPentry() */ 588 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 589 *dst32 = ((u_int) MPentry - KERNBASE); 590 591 /* modify the target for boot code segment */ 592 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 593 dst8 = (u_int8_t *) (dst16 + 1); 594 *dst16 = (u_int) boot_addr & 0xffff; 595 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 596 597 /* modify the target for boot data segment */ 598 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 599 dst8 = (u_int8_t *) (dst16 + 1); 600 *dst16 = (u_int) boot_addr & 0xffff; 601 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 602 } 603 604 #endif 605 606 /* 607 * This function starts the AP (application processor) identified 608 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 609 * to accomplish this. This is necessary because of the nuances 610 * of the different hardware we might encounter. It ain't pretty, 611 * but it seems to work. 612 * 613 * NOTE: eventually an AP gets to ap_init(), which is called just 614 * before the AP goes into the LWKT scheduler's idle loop. 615 */ 616 static int 617 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 618 { 619 int physical_cpu; 620 int vector; 621 u_long icr_lo, icr_hi; 622 623 POSTCODE(START_AP_POST); 624 625 /* get the PHYSICAL APIC ID# */ 626 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 627 628 /* calculate the vector */ 629 vector = (boot_addr >> 12) & 0xff; 630 631 /* We don't want anything interfering */ 632 cpu_disable_intr(); 633 634 /* Make sure the target cpu sees everything */ 635 wbinvd(); 636 637 /* 638 * Try to detect when a SMI has occurred, wait up to 200ms. 639 * 640 * If a SMI occurs during an AP reset but before we issue 641 * the STARTUP command, the AP may brick. To work around 642 * this problem we hold off doing the AP startup until 643 * after we have detected the SMI. Hopefully another SMI 644 * will not occur before we finish the AP startup. 645 * 646 * Retries don't seem to help. SMIs have a window of opportunity 647 * and if USB->legacy keyboard emulation is enabled in the BIOS 648 * the interrupt rate can be quite high. 649 * 650 * NOTE: Don't worry about the L1 cache load, it might bloat 651 * ldelta a little but ndelta will be so huge when the SMI 652 * occurs the detection logic will still work fine. 653 */ 654 if (smibest) { 655 set_apic_timer(200000); 656 smitest(); 657 } 658 659 /* 660 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 661 * and running the target CPU. OR this INIT IPI might be latched (P5 662 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 663 * ignored. 664 * 665 * see apic/apicreg.h for icr bit definitions. 666 * 667 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 668 */ 669 670 /* 671 * Setup the address for the target AP. We can setup 672 * icr_hi once and then just trigger operations with 673 * icr_lo. 674 */ 675 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 676 icr_hi |= (physical_cpu << 24); 677 icr_lo = lapic->icr_lo & 0xfff00000; 678 lapic->icr_hi = icr_hi; 679 680 /* 681 * Do an INIT IPI: assert RESET 682 * 683 * Use edge triggered mode to assert INIT 684 */ 685 lapic->icr_lo = icr_lo | 0x00004500; 686 while (lapic->icr_lo & APIC_DELSTAT_MASK) 687 /* spin */ ; 688 689 /* 690 * The spec calls for a 10ms delay but we may have to use a 691 * MUCH lower delay to avoid bricking an AP due to a fast SMI 692 * interrupt. We have other loops here too and dividing by 2 693 * doesn't seem to be enough even after subtracting 350us, 694 * so we divide by 4. 695 * 696 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 697 * interrupt was detected we use the full 10ms. 698 */ 699 if (smibest == 0) 700 u_sleep(10000); 701 else if (smibest < 150 * 4 + 350) 702 u_sleep(150); 703 else if ((smibest - 350) / 4 < 10000) 704 u_sleep((smibest - 350) / 4); 705 else 706 u_sleep(10000); 707 708 /* 709 * Do an INIT IPI: deassert RESET 710 * 711 * Use level triggered mode to deassert. It is unclear 712 * why we need to do this. 713 */ 714 lapic->icr_lo = icr_lo | 0x00008500; 715 while (lapic->icr_lo & APIC_DELSTAT_MASK) 716 /* spin */ ; 717 u_sleep(150); /* wait 150us */ 718 719 /* 720 * Next we do a STARTUP IPI: the previous INIT IPI might still be 721 * latched, (P5 bug) this 1st STARTUP would then terminate 722 * immediately, and the previously started INIT IPI would continue. OR 723 * the previous INIT IPI has already run. and this STARTUP IPI will 724 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 725 * will run. 726 */ 727 lapic->icr_lo = icr_lo | 0x00000600 | vector; 728 while (lapic->icr_lo & APIC_DELSTAT_MASK) 729 /* spin */ ; 730 u_sleep(200); /* wait ~200uS */ 731 732 /* 733 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 734 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 735 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 736 * recognized after hardware RESET or INIT IPI. 737 */ 738 lapic->icr_lo = icr_lo | 0x00000600 | vector; 739 while (lapic->icr_lo & APIC_DELSTAT_MASK) 740 /* spin */ ; 741 742 /* Resume normal operation */ 743 cpu_enable_intr(); 744 745 /* wait for it to start, see ap_init() */ 746 set_apic_timer(5000000);/* == 5 seconds */ 747 while (read_apic_timer()) { 748 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 749 return 1; /* return SUCCESS */ 750 } 751 752 return 0; /* return FAILURE */ 753 } 754 755 static 756 int 757 smitest(void) 758 { 759 int64_t ltsc; 760 int64_t ntsc; 761 int64_t ldelta; 762 int64_t ndelta; 763 int count; 764 765 ldelta = 0; 766 ndelta = 0; 767 while (read_apic_timer()) { 768 ltsc = rdtsc(); 769 for (count = 0; count < 100; ++count) 770 ntsc = rdtsc(); /* force loop to occur */ 771 if (ldelta) { 772 ndelta = ntsc - ltsc; 773 if (ldelta > ndelta) 774 ldelta = ndelta; 775 if (ndelta > ldelta * 2) 776 break; 777 } else { 778 ldelta = ntsc - ltsc; 779 } 780 } 781 return(read_apic_timer()); 782 } 783 784 /* 785 * Synchronously flush the TLB on all other CPU's. The current cpu's 786 * TLB is not flushed. If the caller wishes to flush the current cpu's 787 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 788 * 789 * NOTE: If for some reason we were unable to start all cpus we cannot 790 * safely use broadcast IPIs. 791 */ 792 793 static cpumask_t smp_invltlb_req; 794 795 #define SMP_INVLTLB_DEBUG 796 797 void 798 smp_invltlb(void) 799 { 800 struct mdglobaldata *md = mdcpu; 801 #ifdef SMP_INVLTLB_DEBUG 802 long count = 0; 803 long xcount = 0; 804 #endif 805 cpumask_t tmpmask; 806 cpumask_t tmpmask2; 807 808 crit_enter_gd(&md->mi); 809 CPUMASK_ASSZERO(md->gd_invltlb_ret); 810 ++md->mi.gd_cnt.v_smpinvltlb; 811 ATOMIC_CPUMASK_ORBIT(smp_invltlb_req, md->mi.gd_cpuid); 812 #ifdef SMP_INVLTLB_DEBUG 813 again: 814 #endif 815 if (CPUMASK_CMPMASKEQ(smp_startup_mask, smp_active_mask)) { 816 all_but_self_ipi(XINVLTLB_OFFSET); 817 } else { 818 tmpmask = smp_active_mask; 819 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask); 820 selected_apic_ipi(tmpmask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 821 } 822 823 #ifdef SMP_INVLTLB_DEBUG 824 if (xcount) 825 kprintf("smp_invltlb: ipi sent\n"); 826 #endif 827 for (;;) { 828 tmpmask = smp_active_mask; 829 tmpmask2 = tmpmask; 830 CPUMASK_ANDMASK(tmpmask, md->gd_invltlb_ret); 831 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask); 832 CPUMASK_NANDMASK(tmpmask2, md->mi.gd_cpumask); 833 834 if (CPUMASK_CMPMASKEQ(tmpmask, tmpmask2)) 835 break; 836 cpu_mfence(); 837 cpu_pause(); 838 #ifdef SMP_INVLTLB_DEBUG 839 /* DEBUGGING */ 840 if (++count == 400000000) { 841 print_backtrace(-1); 842 kprintf("smp_invltlb: endless loop %08lx %08lx, " 843 "rflags %016jx retry", 844 (long)CPUMASK_LOWMASK(md->gd_invltlb_ret), 845 (long)CPUMASK_LOWMASK(smp_invltlb_req), 846 (intmax_t)read_rflags()); 847 __asm __volatile ("sti"); 848 ++xcount; 849 if (xcount > 2) 850 lwkt_process_ipiq(); 851 if (xcount > 3) { 852 int bcpu; 853 globaldata_t xgd; 854 855 tmpmask = smp_active_mask; 856 CPUMASK_NANDMASK(tmpmask, md->gd_invltlb_ret); 857 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask); 858 bcpu = BSFCPUMASK(tmpmask); 859 860 kprintf("bcpu %d\n", bcpu); 861 xgd = globaldata_find(bcpu); 862 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm); 863 } 864 if (xcount > 5) 865 Debugger("giving up"); 866 count = 0; 867 goto again; 868 } 869 #endif 870 } 871 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_req, md->mi.gd_cpuid); 872 crit_exit_gd(&md->mi); 873 } 874 875 /* 876 * Called from Xinvltlb assembly with interrupts disabled. We didn't 877 * bother to bump the critical section count or nested interrupt count 878 * so only do very low level operations here. 879 */ 880 void 881 smp_invltlb_intr(void) 882 { 883 struct mdglobaldata *md = mdcpu; 884 struct mdglobaldata *omd; 885 cpumask_t mask; 886 int cpu; 887 888 cpu_mfence(); 889 mask = smp_invltlb_req; 890 cpu_invltlb(); 891 while (CPUMASK_TESTNZERO(mask)) { 892 cpu = BSFCPUMASK(mask); 893 CPUMASK_NANDBIT(mask, cpu); 894 omd = (struct mdglobaldata *)globaldata_find(cpu); 895 ATOMIC_CPUMASK_ORBIT(omd->gd_invltlb_ret, md->mi.gd_cpuid); 896 } 897 } 898 899 void 900 cpu_wbinvd_on_all_cpus_callback(void *arg) 901 { 902 wbinvd(); 903 } 904 905 void 906 smp_invlpg_range_cpusync(void *arg) 907 { 908 vm_offset_t eva, sva, addr; 909 sva = ((struct smp_invlpg_range_cpusync_arg *)arg)->sva; 910 eva = ((struct smp_invlpg_range_cpusync_arg *)arg)->eva; 911 912 for (addr = sva; addr < eva; addr += PAGE_SIZE) { 913 cpu_invlpg((void *)addr); 914 } 915 } 916 917 /* 918 * When called the executing CPU will send an IPI to all other CPUs 919 * requesting that they halt execution. 920 * 921 * Usually (but not necessarily) called with 'other_cpus' as its arg. 922 * 923 * - Signals all CPUs in map to stop. 924 * - Waits for each to stop. 925 * 926 * Returns: 927 * -1: error 928 * 0: NA 929 * 1: ok 930 * 931 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 932 * from executing at same time. 933 */ 934 int 935 stop_cpus(cpumask_t map) 936 { 937 cpumask_t mask; 938 939 CPUMASK_ANDMASK(map, smp_active_mask); 940 941 /* send the Xcpustop IPI to all CPUs in map */ 942 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 943 944 do { 945 mask = stopped_cpus; 946 CPUMASK_ANDMASK(mask, map); 947 /* spin */ 948 } while (CPUMASK_CMPMASKNEQ(mask, map)); 949 950 return 1; 951 } 952 953 954 /* 955 * Called by a CPU to restart stopped CPUs. 956 * 957 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 958 * 959 * - Signals all CPUs in map to restart. 960 * - Waits for each to restart. 961 * 962 * Returns: 963 * -1: error 964 * 0: NA 965 * 1: ok 966 */ 967 int 968 restart_cpus(cpumask_t map) 969 { 970 cpumask_t mask; 971 972 /* signal other cpus to restart */ 973 mask = map; 974 CPUMASK_ANDMASK(mask, smp_active_mask); 975 cpu_ccfence(); 976 started_cpus = mask; 977 cpu_ccfence(); 978 979 /* wait for each to clear its bit */ 980 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 981 cpu_pause(); 982 983 return 1; 984 } 985 986 /* 987 * This is called once the mpboot code has gotten us properly relocated 988 * and the MMU turned on, etc. ap_init() is actually the idle thread, 989 * and when it returns the scheduler will call the real cpu_idle() main 990 * loop for the idlethread. Interrupts are disabled on entry and should 991 * remain disabled at return. 992 */ 993 void 994 ap_init(void) 995 { 996 int cpu_id; 997 998 /* 999 * Adjust smp_startup_mask to signal the BSP that we have started 1000 * up successfully. Note that we do not yet hold the BGL. The BSP 1001 * is waiting for our signal. 1002 * 1003 * We can't set our bit in smp_active_mask yet because we are holding 1004 * interrupts physically disabled and remote cpus could deadlock 1005 * trying to send us an IPI. 1006 */ 1007 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1008 cpu_mfence(); 1009 1010 /* 1011 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1012 * non-zero, then get the MP lock. 1013 * 1014 * Note: We are in a critical section. 1015 * 1016 * Note: we are the idle thread, we can only spin. 1017 * 1018 * Note: The load fence is memory volatile and prevents the compiler 1019 * from improperly caching mp_finish_lapic, and the cpu from improperly 1020 * caching it. 1021 */ 1022 while (mp_finish_lapic == 0) { 1023 cpu_pause(); 1024 cpu_lfence(); 1025 } 1026 #if 0 1027 while (try_mplock() == 0) { 1028 cpu_pause(); 1029 cpu_lfence(); 1030 } 1031 #endif 1032 1033 if (cpu_feature & CPUID_TSC) { 1034 /* 1035 * The BSP is constantly updating tsc0_offset, figure out 1036 * the relative difference to synchronize ktrdump. 1037 */ 1038 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1039 } 1040 1041 /* BSP may have changed PTD while we're waiting for the lock */ 1042 cpu_invltlb(); 1043 1044 /* Build our map of 'other' CPUs. */ 1045 mycpu->gd_other_cpus = smp_startup_mask; 1046 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1047 1048 /* A quick check from sanity claus */ 1049 cpu_id = APICID_TO_CPUID((lapic->id & 0xff000000) >> 24); 1050 if (mycpu->gd_cpuid != cpu_id) { 1051 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1052 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1053 cpu_id, (lapic->id & 0xff000000) >> 24); 1054 #if JGXXX 1055 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1056 #endif 1057 panic("cpuid mismatch! boom!!"); 1058 } 1059 1060 /* Initialize AP's local APIC for irq's */ 1061 lapic_init(FALSE); 1062 1063 /* LAPIC initialization is done */ 1064 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1065 cpu_mfence(); 1066 1067 #if 0 1068 /* Let BSP move onto the next initialization stage */ 1069 rel_mplock(); 1070 #endif 1071 1072 /* 1073 * Interlock for finalization. Wait until mp_finish is non-zero, 1074 * then get the MP lock. 1075 * 1076 * Note: We are in a critical section. 1077 * 1078 * Note: we are the idle thread, we can only spin. 1079 * 1080 * Note: The load fence is memory volatile and prevents the compiler 1081 * from improperly caching mp_finish, and the cpu from improperly 1082 * caching it. 1083 */ 1084 while (mp_finish == 0) { 1085 cpu_pause(); 1086 cpu_lfence(); 1087 } 1088 1089 /* BSP may have changed PTD while we're waiting for the lock */ 1090 cpu_invltlb(); 1091 1092 /* Set memory range attributes for this CPU to match the BSP */ 1093 mem_range_AP_init(); 1094 1095 /* 1096 * Once we go active we must process any IPIQ messages that may 1097 * have been queued, because no actual IPI will occur until we 1098 * set our bit in the smp_active_mask. If we don't the IPI 1099 * message interlock could be left set which would also prevent 1100 * further IPIs. 1101 * 1102 * The idle loop doesn't expect the BGL to be held and while 1103 * lwkt_switch() normally cleans things up this is a special case 1104 * because we returning almost directly into the idle loop. 1105 * 1106 * The idle thread is never placed on the runq, make sure 1107 * nothing we've done put it there. 1108 */ 1109 1110 /* 1111 * Hold a critical section and allow real interrupts to occur. Zero 1112 * any spurious interrupts which have accumulated, then set our 1113 * smp_active_mask indicating that we are fully operational. 1114 */ 1115 crit_enter(); 1116 __asm __volatile("sti; pause; pause"::); 1117 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1118 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1119 1120 /* 1121 * Wait until all cpus have set their smp_active_mask and have fully 1122 * operational interrupts before proceeding. 1123 * 1124 * We need a final cpu_invltlb() because we would not have received 1125 * any until we set our bit in smp_active_mask. 1126 */ 1127 while (mp_finish == 1) { 1128 cpu_pause(); 1129 cpu_lfence(); 1130 } 1131 cpu_invltlb(); 1132 1133 /* 1134 * Initialize per-cpu clocks and do other per-cpu initialization. 1135 * At this point code is expected to be able to use the full kernel 1136 * API. 1137 */ 1138 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1139 1140 /* 1141 * Since we may have cleaned up the interrupt triggers, manually 1142 * process any pending IPIs before exiting our critical section. 1143 * Once the critical section has exited, normal interrupt processing 1144 * may occur. 1145 */ 1146 lwkt_process_ipiq(); 1147 crit_exit_noyield(mycpu->gd_curthread); 1148 1149 /* 1150 * Final final, allow the waiting BSP to resume the boot process, 1151 * return 'into' the idle thread bootstrap. 1152 */ 1153 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1154 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1155 } 1156 1157 /* 1158 * Get SMP fully working before we start initializing devices. 1159 */ 1160 static 1161 void 1162 ap_finish(void) 1163 { 1164 if (bootverbose) 1165 kprintf("Finish MP startup\n"); 1166 rel_mplock(); 1167 1168 /* 1169 * Wait for the active mask to complete, after which all cpus will 1170 * be accepting interrupts. 1171 */ 1172 mp_finish = 1; 1173 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1174 cpu_pause(); 1175 cpu_lfence(); 1176 } 1177 1178 /* 1179 * Wait for the finalization mask to complete, after which all cpus 1180 * have completely finished initializing and are entering or are in 1181 * their idle thread. 1182 * 1183 * BSP should have received all required invltlbs but do another 1184 * one just in case. 1185 */ 1186 cpu_invltlb(); 1187 mp_finish = 2; 1188 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1189 cpu_pause(); 1190 cpu_lfence(); 1191 } 1192 1193 while (try_mplock() == 0) { 1194 cpu_pause(); 1195 cpu_lfence(); 1196 } 1197 1198 if (bootverbose) { 1199 kprintf("Active CPU Mask: %016jx\n", 1200 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1201 } 1202 } 1203 1204 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL) 1205 1206 void 1207 cpu_send_ipiq(int dcpu) 1208 { 1209 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1210 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1211 } 1212 1213 #if 0 /* single_apic_ipi_passive() not working yet */ 1214 /* 1215 * Returns 0 on failure, 1 on success 1216 */ 1217 int 1218 cpu_send_ipiq_passive(int dcpu) 1219 { 1220 int r = 0; 1221 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1222 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1223 APIC_DELMODE_FIXED); 1224 } 1225 return(r); 1226 } 1227 #endif 1228 1229 static void 1230 mp_bsp_simple_setup(void) 1231 { 1232 size_t ipiq_size; 1233 1234 /* build our map of 'other' CPUs */ 1235 mycpu->gd_other_cpus = smp_startup_mask; 1236 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1237 1238 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1239 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 1240 bzero(mycpu->gd_ipiq, ipiq_size); 1241 1242 pmap_set_opt(); 1243 1244 if (cpu_feature & CPUID_TSC) 1245 tsc0_offset = rdtsc(); 1246 } 1247 1248 1249 /* 1250 * CPU TOPOLOGY DETECTION FUNCTIONS 1251 */ 1252 1253 /* Detect intel topology using CPUID 1254 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1255 */ 1256 static void 1257 detect_intel_topology(int count_htt_cores) 1258 { 1259 int shift = 0; 1260 int ecx_index = 0; 1261 int core_plus_logical_bits = 0; 1262 int cores_per_package; 1263 int logical_per_package; 1264 int logical_per_core; 1265 unsigned int p[4]; 1266 1267 if (cpu_high >= 0xb) { 1268 goto FUNC_B; 1269 1270 } else if (cpu_high >= 0x4) { 1271 goto FUNC_4; 1272 1273 } else { 1274 core_bits = 0; 1275 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1276 ; 1277 logical_CPU_bits = 1 << shift; 1278 return; 1279 } 1280 1281 FUNC_B: 1282 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1283 1284 /* if 0xb not supported - fallback to 0x4 */ 1285 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1286 goto FUNC_4; 1287 } 1288 1289 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1290 1291 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1292 do { 1293 cpuid_count(0xb, ecx_index, p); 1294 1295 /* Check for the Core type in the implemented sub leaves. */ 1296 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1297 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1298 break; 1299 } 1300 1301 ecx_index++; 1302 1303 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1304 1305 core_bits = core_plus_logical_bits - logical_CPU_bits; 1306 1307 return; 1308 1309 FUNC_4: 1310 cpuid_count(0x4, 0, p); 1311 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1312 1313 logical_per_package = count_htt_cores; 1314 logical_per_core = logical_per_package / cores_per_package; 1315 1316 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1317 ; 1318 logical_CPU_bits = shift; 1319 1320 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1321 ; 1322 core_bits = shift; 1323 1324 return; 1325 } 1326 1327 /* Detect AMD topology using CPUID 1328 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1329 */ 1330 static void 1331 detect_amd_topology(int count_htt_cores) 1332 { 1333 int shift = 0; 1334 if ((cpu_feature & CPUID_HTT) 1335 && (amd_feature2 & AMDID2_CMP)) { 1336 1337 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1338 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) 1339 >> AMDID_COREID_SIZE_SHIFT; 1340 } else { 1341 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1342 for (shift = 0; (1 << shift) < core_bits; ++shift) 1343 ; 1344 core_bits = shift; 1345 } 1346 1347 logical_CPU_bits = count_htt_cores >> core_bits; 1348 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1349 ; 1350 logical_CPU_bits = shift; 1351 } else { 1352 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1353 ; 1354 core_bits = shift; 1355 logical_CPU_bits = 0; 1356 } 1357 } 1358 1359 static void 1360 amd_get_compute_unit_id(void *arg) 1361 { 1362 u_int regs[4]; 1363 1364 do_cpuid(0x8000001e, regs); 1365 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1366 /* 1367 * AMD - CPUID Specification September 2010 1368 * page 34 - //ComputeUnitID = ebx[0:7]// 1369 */ 1370 mynode->compute_unit_id = regs[1] & 0xff; 1371 } 1372 1373 int 1374 fix_amd_topology(void) 1375 { 1376 cpumask_t mask; 1377 1378 if (cpu_vendor_id != CPU_VENDOR_AMD) 1379 return -1; 1380 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1381 return -1; 1382 1383 CPUMASK_ASSALLONES(mask); 1384 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1385 1386 kprintf("Compute unit iDS:\n"); 1387 int i; 1388 for (i = 0; i < ncpus; i++) { 1389 kprintf("%d-%d; \n", 1390 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1391 } 1392 1393 return 0; 1394 } 1395 1396 /* Calculate 1397 * - logical_CPU_bits 1398 * - core_bits 1399 * With the values above (for AMD or INTEL) we are able to generally 1400 * detect the CPU topology (number of cores for each level): 1401 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1402 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1403 */ 1404 void 1405 detect_cpu_topology(void) 1406 { 1407 static int topology_detected = 0; 1408 int count = 0; 1409 1410 if (topology_detected) { 1411 goto OUT; 1412 } 1413 1414 if ((cpu_feature & CPUID_HTT) == 0) { 1415 core_bits = 0; 1416 logical_CPU_bits = 0; 1417 goto OUT; 1418 } else { 1419 count = (cpu_procinfo & CPUID_HTT_CORES) 1420 >> CPUID_HTT_CORE_SHIFT; 1421 } 1422 1423 if (cpu_vendor_id == CPU_VENDOR_INTEL) { 1424 detect_intel_topology(count); 1425 } else if (cpu_vendor_id == CPU_VENDOR_AMD) { 1426 detect_amd_topology(count); 1427 } 1428 1429 OUT: 1430 if (bootverbose) 1431 kprintf("Bits within APICID: logical_CPU_bits: %d; core_bits: %d\n", 1432 logical_CPU_bits, core_bits); 1433 1434 topology_detected = 1; 1435 } 1436 1437 /* Interface functions to calculate chip_ID, 1438 * core_number and logical_number 1439 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1440 */ 1441 int 1442 get_chip_ID(int cpuid) 1443 { 1444 return get_apicid_from_cpuid(cpuid) >> 1445 (logical_CPU_bits + core_bits); 1446 } 1447 1448 int 1449 get_core_number_within_chip(int cpuid) 1450 { 1451 return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1452 ( (1 << core_bits) -1); 1453 } 1454 1455 int 1456 get_logical_CPU_number_within_core(int cpuid) 1457 { 1458 return get_apicid_from_cpuid(cpuid) & 1459 ( (1 << logical_CPU_bits) -1); 1460 } 1461