1 /* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ 26 */ 27 28 #include "opt_cpu.h" 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/sysctl.h> 34 #include <sys/malloc.h> 35 #include <sys/memrange.h> 36 #include <sys/cons.h> /* cngetc() */ 37 #include <sys/machintr.h> 38 #include <sys/cpu_topology.h> 39 40 #include <sys/mplock2.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 #include <vm/vm_kern.h> 46 #include <vm/vm_extern.h> 47 #include <sys/lock.h> 48 #include <vm/vm_map.h> 49 #include <sys/user.h> 50 #ifdef GPROF 51 #include <sys/gmon.h> 52 #endif 53 54 #include <machine/smp.h> 55 #include <machine_base/apic/apicreg.h> 56 #include <machine/atomic.h> 57 #include <machine/cpufunc.h> 58 #include <machine/cputypes.h> 59 #include <machine_base/apic/lapic.h> 60 #include <machine_base/apic/ioapic.h> 61 #include <machine_base/acpica/acpi_md_cpu.h> 62 #include <machine/psl.h> 63 #include <machine/segments.h> 64 #include <machine/tss.h> 65 #include <machine/specialreg.h> 66 #include <machine/globaldata.h> 67 #include <machine/pmap_inval.h> 68 69 #include <machine/md_var.h> /* setidt() */ 70 #include <machine_base/icu/icu.h> /* IPIs */ 71 #include <machine_base/icu/icu_var.h> 72 #include <machine_base/apic/ioapic_abi.h> 73 #include <machine/intr_machdep.h> /* IPIs */ 74 75 #define WARMBOOT_TARGET 0 76 #define WARMBOOT_OFF (KERNBASE + 0x0467) 77 #define WARMBOOT_SEG (KERNBASE + 0x0469) 78 79 #define CMOS_REG (0x70) 80 #define CMOS_DATA (0x71) 81 #define BIOS_RESET (0x0f) 82 #define BIOS_WARM (0x0a) 83 84 /* 85 * this code MUST be enabled here and in mpboot.s. 86 * it follows the very early stages of AP boot by placing values in CMOS ram. 87 * it NORMALLY will never be needed and thus the primitive method for enabling. 88 * 89 */ 90 #if defined(CHECK_POINTS) 91 #define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 92 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 93 94 #define CHECK_INIT(D); \ 95 CHECK_WRITE(0x34, (D)); \ 96 CHECK_WRITE(0x35, (D)); \ 97 CHECK_WRITE(0x36, (D)); \ 98 CHECK_WRITE(0x37, (D)); \ 99 CHECK_WRITE(0x38, (D)); \ 100 CHECK_WRITE(0x39, (D)); 101 102 #define CHECK_PRINT(S); \ 103 kprintf("%s: %d, %d, %d, %d, %d, %d\n", \ 104 (S), \ 105 CHECK_READ(0x34), \ 106 CHECK_READ(0x35), \ 107 CHECK_READ(0x36), \ 108 CHECK_READ(0x37), \ 109 CHECK_READ(0x38), \ 110 CHECK_READ(0x39)); 111 112 #else /* CHECK_POINTS */ 113 114 #define CHECK_INIT(D) 115 #define CHECK_PRINT(S) 116 117 #endif /* CHECK_POINTS */ 118 119 /* 120 * Values to send to the POST hardware. 121 */ 122 #define MP_BOOTADDRESS_POST 0x10 123 #define MP_PROBE_POST 0x11 124 #define MPTABLE_PASS1_POST 0x12 125 126 #define MP_START_POST 0x13 127 #define MP_ENABLE_POST 0x14 128 #define MPTABLE_PASS2_POST 0x15 129 130 #define START_ALL_APS_POST 0x16 131 #define INSTALL_AP_TRAMP_POST 0x17 132 #define START_AP_POST 0x18 133 134 #define MP_ANNOUNCE_POST 0x19 135 136 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 137 int current_postcode; 138 139 /** XXX FIXME: what system files declare these??? */ 140 extern struct region_descriptor r_gdt; 141 142 extern int nkpt; 143 extern int naps; 144 145 int64_t tsc0_offset; 146 extern int64_t tsc_offsets[]; 147 148 /* AP uses this during bootstrap. Do not staticize. */ 149 char *bootSTK; 150 static int bootAP; 151 152 struct pcb stoppcbs[MAXCPU]; 153 154 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 155 156 /* 157 * Local data and functions. 158 */ 159 160 static u_int boot_address; 161 static int mp_finish; 162 static int mp_finish_lapic; 163 164 static int start_all_aps(u_int boot_addr); 165 #if 0 166 static void install_ap_tramp(u_int boot_addr); 167 #endif 168 static int start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest); 169 static int smitest(void); 170 static void mp_bsp_simple_setup(void); 171 172 /* which cpus have been started */ 173 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 174 /* which cpus have lapic been inited */ 175 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE; 176 /* which cpus are ready for IPIs etc? */ 177 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 178 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE; 179 180 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, ""); 181 static u_int bootMP_size; 182 183 /* Local data for detecting CPU TOPOLOGY */ 184 static int core_bits = 0; 185 static int logical_CPU_bits = 0; 186 187 188 /* 189 * Calculate usable address in base memory for AP trampoline code. 190 */ 191 u_int 192 mp_bootaddress(u_int basemem) 193 { 194 POSTCODE(MP_BOOTADDRESS_POST); 195 196 bootMP_size = mptramp_end - mptramp_start; 197 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 198 if (((basemem * 1024) - boot_address) < bootMP_size) 199 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 200 /* 3 levels of page table pages */ 201 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 202 203 return mptramp_pagetables; 204 } 205 206 /* 207 * Print various information about the SMP system hardware and setup. 208 */ 209 void 210 mp_announce(void) 211 { 212 int x; 213 214 POSTCODE(MP_ANNOUNCE_POST); 215 216 kprintf("DragonFly/MP: Multiprocessor motherboard\n"); 217 kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0)); 218 for (x = 1; x <= naps; ++x) 219 kprintf(" cpu%d (AP): apic id: %2d\n", x, CPUID_TO_APICID(x)); 220 221 if (!ioapic_enable) 222 kprintf(" Warning: APIC I/O disabled\n"); 223 } 224 225 /* 226 * AP cpu's call this to sync up protected mode. 227 * 228 * WARNING! %gs is not set up on entry. This routine sets up %gs. 229 */ 230 void 231 init_secondary(void) 232 { 233 int gsel_tss; 234 int x, myid = bootAP; 235 u_int64_t msr, cr0; 236 struct mdglobaldata *md; 237 struct privatespace *ps; 238 239 ps = CPU_prvspace[myid]; 240 241 gdt_segs[GPROC0_SEL].ssd_base = 242 (long) &ps->mdglobaldata.gd_common_tss; 243 ps->mdglobaldata.mi.gd_prvspace = ps; 244 245 /* We fill the 32-bit segment descriptors */ 246 for (x = 0; x < NGDT; x++) { 247 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 248 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); 249 } 250 /* And now a 64-bit one */ 251 ssdtosyssd(&gdt_segs[GPROC0_SEL], 252 (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); 253 254 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 255 r_gdt.rd_base = (long) &gdt[myid * NGDT]; 256 lgdt(&r_gdt); /* does magic intra-segment return */ 257 258 /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ 259 wrmsr(MSR_FSBASE, 0); /* User value */ 260 wrmsr(MSR_GSBASE, (u_int64_t)ps); 261 wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ 262 263 lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); 264 265 #if 0 266 lldt(_default_ldt); 267 mdcpu->gd_currentldt = _default_ldt; 268 #endif 269 270 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 271 gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; 272 273 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 274 275 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 276 #if 0 /* JG XXX */ 277 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 278 #endif 279 md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; 280 md->gd_common_tssd = *md->gd_tss_gdt; 281 282 /* double fault stack */ 283 md->gd_common_tss.tss_ist1 = 284 (long)&md->mi.gd_prvspace->idlestack[ 285 sizeof(md->mi.gd_prvspace->idlestack)]; 286 287 ltr(gsel_tss); 288 289 /* 290 * Set to a known state: 291 * Set by mpboot.s: CR0_PG, CR0_PE 292 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 293 */ 294 cr0 = rcr0(); 295 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 296 load_cr0(cr0); 297 298 /* Set up the fast syscall stuff */ 299 msr = rdmsr(MSR_EFER) | EFER_SCE; 300 wrmsr(MSR_EFER, msr); 301 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 302 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 303 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 304 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 305 wrmsr(MSR_STAR, msr); 306 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); 307 308 pmap_set_opt(); /* PSE/4MB pages, etc */ 309 pmap_init_pat(); /* Page Attribute Table */ 310 311 /* set up CPU registers and state */ 312 cpu_setregs(); 313 314 /* set up SSE/NX registers */ 315 initializecpu(myid); 316 317 /* set up FPU state on the AP */ 318 npxinit(); 319 320 /* disable the APIC, just to be SURE */ 321 lapic->svr &= ~APIC_SVR_ENABLE; 322 } 323 324 /******************************************************************* 325 * local functions and data 326 */ 327 328 /* 329 * Start the SMP system 330 */ 331 static void 332 mp_start_aps(void *dummy __unused) 333 { 334 if (lapic_enable) { 335 /* start each Application Processor */ 336 start_all_aps(boot_address); 337 } else { 338 mp_bsp_simple_setup(); 339 } 340 } 341 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL); 342 343 /* 344 * start each AP in our list 345 */ 346 static int 347 start_all_aps(u_int boot_addr) 348 { 349 vm_offset_t va = boot_address + KERNBASE; 350 u_int64_t *pt4, *pt3, *pt2; 351 int pssize; 352 int x, i; 353 int shift; 354 int smicount; 355 int smibest; 356 int smilast; 357 u_char mpbiosreason; 358 u_long mpbioswarmvec; 359 struct mdglobaldata *gd; 360 struct privatespace *ps; 361 size_t ipiq_size; 362 363 POSTCODE(START_ALL_APS_POST); 364 365 /* install the AP 1st level boot code */ 366 pmap_kenter(va, boot_address); 367 cpu_invlpg((void *)va); /* JG XXX */ 368 bcopy(mptramp_start, (void *)va, bootMP_size); 369 370 /* Locate the page tables, they'll be below the trampoline */ 371 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 372 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 373 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 374 375 /* Create the initial 1GB replicated page tables */ 376 for (i = 0; i < 512; i++) { 377 /* Each slot of the level 4 pages points to the same level 3 page */ 378 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 379 pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 380 kernel_pmap.pmap_bits[PG_RW_IDX] | 381 kernel_pmap.pmap_bits[PG_U_IDX]; 382 383 /* Each slot of the level 3 pages points to the same level 2 page */ 384 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 385 pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 386 kernel_pmap.pmap_bits[PG_RW_IDX] | 387 kernel_pmap.pmap_bits[PG_U_IDX]; 388 389 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 390 pt2[i] = i * (2 * 1024 * 1024); 391 pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] | 392 kernel_pmap.pmap_bits[PG_RW_IDX] | 393 kernel_pmap.pmap_bits[PG_PS_IDX] | 394 kernel_pmap.pmap_bits[PG_U_IDX]; 395 } 396 397 /* save the current value of the warm-start vector */ 398 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 399 outb(CMOS_REG, BIOS_RESET); 400 mpbiosreason = inb(CMOS_DATA); 401 402 /* setup a vector to our boot code */ 403 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 404 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 405 outb(CMOS_REG, BIOS_RESET); 406 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 407 408 /* 409 * If we have a TSC we can figure out the SMI interrupt rate. 410 * The SMI does not necessarily use a constant rate. Spend 411 * up to 250ms trying to figure it out. 412 */ 413 smibest = 0; 414 if (cpu_feature & CPUID_TSC) { 415 set_apic_timer(275000); 416 smilast = read_apic_timer(); 417 for (x = 0; x < 20 && read_apic_timer(); ++x) { 418 smicount = smitest(); 419 if (smibest == 0 || smilast - smicount < smibest) 420 smibest = smilast - smicount; 421 smilast = smicount; 422 } 423 if (smibest > 250000) 424 smibest = 0; 425 if (smibest) { 426 smibest = smibest * (int64_t)1000000 / 427 get_apic_timer_frequency(); 428 } 429 } 430 if (smibest) 431 kprintf("SMI Frequency (worst case): %d Hz (%d us)\n", 432 1000000 / smibest, smibest); 433 434 /* start each AP */ 435 for (x = 1; x <= naps; ++x) { 436 /* This is a bit verbose, it will go away soon. */ 437 438 pssize = sizeof(struct privatespace); 439 ps = (void *)kmem_alloc(&kernel_map, pssize); 440 CPU_prvspace[x] = ps; 441 #if 0 442 kprintf("ps %d %p %d\n", x, ps, pssize); 443 #endif 444 bzero(ps, pssize); 445 gd = &ps->mdglobaldata; 446 gd->mi.gd_prvspace = ps; 447 448 /* prime data page for it to use */ 449 mi_gdinit(&gd->mi, x); 450 cpu_gdinit(gd, x); 451 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 452 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 453 bzero(gd->mi.gd_ipiq, ipiq_size); 454 455 gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid); 456 457 /* setup a vector to our boot code */ 458 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 459 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 460 outb(CMOS_REG, BIOS_RESET); 461 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 462 463 /* 464 * Setup the AP boot stack 465 */ 466 bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE]; 467 bootAP = x; 468 469 /* attempt to start the Application Processor */ 470 CHECK_INIT(99); /* setup checkpoints */ 471 if (!start_ap(gd, boot_addr, smibest)) { 472 kprintf("\nAP #%d (PHY# %d) failed!\n", 473 x, CPUID_TO_APICID(x)); 474 CHECK_PRINT("trace"); /* show checkpoints */ 475 /* better panic as the AP may be running loose */ 476 kprintf("panic y/n? [y] "); 477 cnpoll(TRUE); 478 if (cngetc() != 'n') 479 panic("bye-bye"); 480 cnpoll(FALSE); 481 } 482 CHECK_PRINT("trace"); /* show checkpoints */ 483 } 484 485 /* set ncpus to 1 + highest logical cpu. Not all may have come up */ 486 ncpus = x; 487 488 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */ 489 for (shift = 0; (1 << shift) <= ncpus; ++shift) 490 ; 491 --shift; 492 ncpus2_shift = shift; 493 ncpus2 = 1 << shift; 494 ncpus2_mask = ncpus2 - 1; 495 496 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 497 if ((1 << shift) < ncpus) 498 ++shift; 499 ncpus_fit = 1 << shift; 500 ncpus_fit_mask = ncpus_fit - 1; 501 502 /* build our map of 'other' CPUs */ 503 mycpu->gd_other_cpus = smp_startup_mask; 504 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 505 506 gd = (struct mdglobaldata *)mycpu; 507 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 508 509 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 510 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 511 bzero(mycpu->gd_ipiq, ipiq_size); 512 513 /* restore the warmstart vector */ 514 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 515 outb(CMOS_REG, BIOS_RESET); 516 outb(CMOS_DATA, mpbiosreason); 517 518 /* 519 * NOTE! The idlestack for the BSP was setup by locore. Finish 520 * up, clean out the P==V mapping we did earlier. 521 */ 522 pmap_set_opt(); 523 524 /* 525 * Wait all APs to finish initializing LAPIC 526 */ 527 if (bootverbose) 528 kprintf("SMP: Waiting APs LAPIC initialization\n"); 529 if (cpu_feature & CPUID_TSC) 530 tsc0_offset = rdtsc(); 531 tsc_offsets[0] = 0; 532 mp_finish_lapic = 1; 533 rel_mplock(); 534 535 while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) { 536 cpu_pause(); 537 cpu_lfence(); 538 if (cpu_feature & CPUID_TSC) 539 tsc0_offset = rdtsc(); 540 } 541 while (try_mplock() == 0) { 542 cpu_pause(); 543 cpu_lfence(); 544 } 545 546 /* number of APs actually started */ 547 return ncpus - 1; 548 } 549 550 551 /* 552 * load the 1st level AP boot code into base memory. 553 */ 554 555 /* targets for relocation */ 556 extern void bigJump(void); 557 extern void bootCodeSeg(void); 558 extern void bootDataSeg(void); 559 extern void MPentry(void); 560 extern u_int MP_GDT; 561 extern u_int mp_gdtbase; 562 563 #if 0 564 565 static void 566 install_ap_tramp(u_int boot_addr) 567 { 568 int x; 569 int size = *(int *) ((u_long) & bootMP_size); 570 u_char *src = (u_char *) ((u_long) bootMP); 571 u_char *dst = (u_char *) boot_addr + KERNBASE; 572 u_int boot_base = (u_int) bootMP; 573 u_int8_t *dst8; 574 u_int16_t *dst16; 575 u_int32_t *dst32; 576 577 POSTCODE(INSTALL_AP_TRAMP_POST); 578 579 for (x = 0; x < size; ++x) 580 *dst++ = *src++; 581 582 /* 583 * modify addresses in code we just moved to basemem. unfortunately we 584 * need fairly detailed info about mpboot.s for this to work. changes 585 * to mpboot.s might require changes here. 586 */ 587 588 /* boot code is located in KERNEL space */ 589 dst = (u_char *) boot_addr + KERNBASE; 590 591 /* modify the lgdt arg */ 592 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 593 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 594 595 /* modify the ljmp target for MPentry() */ 596 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 597 *dst32 = ((u_int) MPentry - KERNBASE); 598 599 /* modify the target for boot code segment */ 600 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 601 dst8 = (u_int8_t *) (dst16 + 1); 602 *dst16 = (u_int) boot_addr & 0xffff; 603 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 604 605 /* modify the target for boot data segment */ 606 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 607 dst8 = (u_int8_t *) (dst16 + 1); 608 *dst16 = (u_int) boot_addr & 0xffff; 609 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 610 } 611 612 #endif 613 614 /* 615 * This function starts the AP (application processor) identified 616 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 617 * to accomplish this. This is necessary because of the nuances 618 * of the different hardware we might encounter. It ain't pretty, 619 * but it seems to work. 620 * 621 * NOTE: eventually an AP gets to ap_init(), which is called just 622 * before the AP goes into the LWKT scheduler's idle loop. 623 */ 624 static int 625 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest) 626 { 627 int physical_cpu; 628 int vector; 629 u_long icr_lo, icr_hi; 630 631 POSTCODE(START_AP_POST); 632 633 /* get the PHYSICAL APIC ID# */ 634 physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid); 635 636 /* calculate the vector */ 637 vector = (boot_addr >> 12) & 0xff; 638 639 /* We don't want anything interfering */ 640 cpu_disable_intr(); 641 642 /* Make sure the target cpu sees everything */ 643 wbinvd(); 644 645 /* 646 * Try to detect when a SMI has occurred, wait up to 200ms. 647 * 648 * If a SMI occurs during an AP reset but before we issue 649 * the STARTUP command, the AP may brick. To work around 650 * this problem we hold off doing the AP startup until 651 * after we have detected the SMI. Hopefully another SMI 652 * will not occur before we finish the AP startup. 653 * 654 * Retries don't seem to help. SMIs have a window of opportunity 655 * and if USB->legacy keyboard emulation is enabled in the BIOS 656 * the interrupt rate can be quite high. 657 * 658 * NOTE: Don't worry about the L1 cache load, it might bloat 659 * ldelta a little but ndelta will be so huge when the SMI 660 * occurs the detection logic will still work fine. 661 */ 662 if (smibest) { 663 set_apic_timer(200000); 664 smitest(); 665 } 666 667 /* 668 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 669 * and running the target CPU. OR this INIT IPI might be latched (P5 670 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 671 * ignored. 672 * 673 * see apic/apicreg.h for icr bit definitions. 674 * 675 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH. 676 */ 677 678 /* 679 * Setup the address for the target AP. We can setup 680 * icr_hi once and then just trigger operations with 681 * icr_lo. 682 */ 683 icr_hi = lapic->icr_hi & ~APIC_ID_MASK; 684 icr_hi |= (physical_cpu << 24); 685 icr_lo = lapic->icr_lo & 0xfff00000; 686 lapic->icr_hi = icr_hi; 687 688 /* 689 * Do an INIT IPI: assert RESET 690 * 691 * Use edge triggered mode to assert INIT 692 */ 693 lapic->icr_lo = icr_lo | 0x00004500; 694 while (lapic->icr_lo & APIC_DELSTAT_MASK) 695 /* spin */ ; 696 697 /* 698 * The spec calls for a 10ms delay but we may have to use a 699 * MUCH lower delay to avoid bricking an AP due to a fast SMI 700 * interrupt. We have other loops here too and dividing by 2 701 * doesn't seem to be enough even after subtracting 350us, 702 * so we divide by 4. 703 * 704 * Our minimum delay is 150uS, maximum is 10ms. If no SMI 705 * interrupt was detected we use the full 10ms. 706 */ 707 if (smibest == 0) 708 u_sleep(10000); 709 else if (smibest < 150 * 4 + 350) 710 u_sleep(150); 711 else if ((smibest - 350) / 4 < 10000) 712 u_sleep((smibest - 350) / 4); 713 else 714 u_sleep(10000); 715 716 /* 717 * Do an INIT IPI: deassert RESET 718 * 719 * Use level triggered mode to deassert. It is unclear 720 * why we need to do this. 721 */ 722 lapic->icr_lo = icr_lo | 0x00008500; 723 while (lapic->icr_lo & APIC_DELSTAT_MASK) 724 /* spin */ ; 725 u_sleep(150); /* wait 150us */ 726 727 /* 728 * Next we do a STARTUP IPI: the previous INIT IPI might still be 729 * latched, (P5 bug) this 1st STARTUP would then terminate 730 * immediately, and the previously started INIT IPI would continue. OR 731 * the previous INIT IPI has already run. and this STARTUP IPI will 732 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 733 * will run. 734 */ 735 lapic->icr_lo = icr_lo | 0x00000600 | vector; 736 while (lapic->icr_lo & APIC_DELSTAT_MASK) 737 /* spin */ ; 738 u_sleep(200); /* wait ~200uS */ 739 740 /* 741 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 742 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 743 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 744 * recognized after hardware RESET or INIT IPI. 745 */ 746 lapic->icr_lo = icr_lo | 0x00000600 | vector; 747 while (lapic->icr_lo & APIC_DELSTAT_MASK) 748 /* spin */ ; 749 750 /* Resume normal operation */ 751 cpu_enable_intr(); 752 753 /* wait for it to start, see ap_init() */ 754 set_apic_timer(5000000);/* == 5 seconds */ 755 while (read_apic_timer()) { 756 if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid)) 757 return 1; /* return SUCCESS */ 758 } 759 760 return 0; /* return FAILURE */ 761 } 762 763 static 764 int 765 smitest(void) 766 { 767 int64_t ltsc; 768 int64_t ntsc; 769 int64_t ldelta; 770 int64_t ndelta; 771 int count; 772 773 ldelta = 0; 774 ndelta = 0; 775 while (read_apic_timer()) { 776 ltsc = rdtsc(); 777 for (count = 0; count < 100; ++count) 778 ntsc = rdtsc(); /* force loop to occur */ 779 if (ldelta) { 780 ndelta = ntsc - ltsc; 781 if (ldelta > ndelta) 782 ldelta = ndelta; 783 if (ndelta > ldelta * 2) 784 break; 785 } else { 786 ldelta = ntsc - ltsc; 787 } 788 } 789 return(read_apic_timer()); 790 } 791 792 /* 793 * Synchronously flush the TLB on all other CPU's. The current cpu's 794 * TLB is not flushed. If the caller wishes to flush the current cpu's 795 * TLB the caller must call cpu_invltlb() in addition to smp_invltlb(). 796 * 797 * NOTE: If for some reason we were unable to start all cpus we cannot 798 * safely use broadcast IPIs. 799 */ 800 801 static cpumask_t smp_invltlb_req; 802 803 #define SMP_INVLTLB_DEBUG 804 805 void 806 smp_invltlb(void) 807 { 808 struct mdglobaldata *md = mdcpu; 809 #ifdef SMP_INVLTLB_DEBUG 810 long count = 0; 811 long xcount = 0; 812 #endif 813 cpumask_t tmpmask; 814 cpumask_t tmpmask2; 815 816 crit_enter_gd(&md->mi); 817 CPUMASK_ASSZERO(md->gd_invltlb_ret); 818 ++md->mi.gd_cnt.v_smpinvltlb; 819 ATOMIC_CPUMASK_ORBIT(smp_invltlb_req, md->mi.gd_cpuid); 820 #ifdef SMP_INVLTLB_DEBUG 821 again: 822 #endif 823 if (CPUMASK_CMPMASKEQ(smp_startup_mask, smp_active_mask)) { 824 all_but_self_ipi(XINVLTLB_OFFSET); 825 } else { 826 tmpmask = smp_active_mask; 827 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask); 828 selected_apic_ipi(tmpmask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED); 829 } 830 831 #ifdef SMP_INVLTLB_DEBUG 832 if (xcount) 833 kprintf("smp_invltlb: ipi sent\n"); 834 #endif 835 for (;;) { 836 tmpmask = smp_active_mask; 837 tmpmask2 = tmpmask; 838 CPUMASK_ANDMASK(tmpmask, md->gd_invltlb_ret); 839 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask); 840 CPUMASK_NANDMASK(tmpmask2, md->mi.gd_cpumask); 841 842 if (CPUMASK_CMPMASKEQ(tmpmask, tmpmask2)) 843 break; 844 cpu_mfence(); 845 cpu_pause(); 846 #ifdef SMP_INVLTLB_DEBUG 847 /* DEBUGGING */ 848 if (++count == 400000000) { 849 print_backtrace(-1); 850 kprintf("smp_invltlb: endless loop %08lx %08lx, " 851 "rflags %016jx retry", 852 (long)CPUMASK_LOWMASK(md->gd_invltlb_ret), 853 (long)CPUMASK_LOWMASK(smp_invltlb_req), 854 (intmax_t)read_rflags()); 855 __asm __volatile ("sti"); 856 ++xcount; 857 if (xcount > 2) 858 lwkt_process_ipiq(); 859 if (xcount > 3) { 860 int bcpu; 861 globaldata_t xgd; 862 863 tmpmask = smp_active_mask; 864 CPUMASK_NANDMASK(tmpmask, md->gd_invltlb_ret); 865 CPUMASK_NANDMASK(tmpmask, md->mi.gd_cpumask); 866 bcpu = BSFCPUMASK(tmpmask); 867 868 kprintf("bcpu %d\n", bcpu); 869 xgd = globaldata_find(bcpu); 870 kprintf("thread %p %s\n", xgd->gd_curthread, xgd->gd_curthread->td_comm); 871 } 872 if (xcount > 5) 873 Debugger("giving up"); 874 count = 0; 875 goto again; 876 } 877 #endif 878 } 879 ATOMIC_CPUMASK_NANDBIT(smp_invltlb_req, md->mi.gd_cpuid); 880 crit_exit_gd(&md->mi); 881 } 882 883 /* 884 * Called from Xinvltlb assembly with interrupts disabled. We didn't 885 * bother to bump the critical section count or nested interrupt count 886 * so only do very low level operations here. 887 */ 888 void 889 smp_invltlb_intr(void) 890 { 891 struct mdglobaldata *md = mdcpu; 892 struct mdglobaldata *omd; 893 cpumask_t mask; 894 int cpu; 895 896 cpu_mfence(); 897 mask = smp_invltlb_req; 898 cpu_invltlb(); 899 while (CPUMASK_TESTNZERO(mask)) { 900 cpu = BSFCPUMASK(mask); 901 CPUMASK_NANDBIT(mask, cpu); 902 omd = (struct mdglobaldata *)globaldata_find(cpu); 903 ATOMIC_CPUMASK_ORBIT(omd->gd_invltlb_ret, md->mi.gd_cpuid); 904 } 905 } 906 907 void 908 cpu_wbinvd_on_all_cpus_callback(void *arg) 909 { 910 wbinvd(); 911 } 912 913 void 914 smp_invlpg_range_cpusync(void *arg) 915 { 916 vm_offset_t eva, sva, addr; 917 sva = ((struct smp_invlpg_range_cpusync_arg *)arg)->sva; 918 eva = ((struct smp_invlpg_range_cpusync_arg *)arg)->eva; 919 920 for (addr = sva; addr < eva; addr += PAGE_SIZE) { 921 cpu_invlpg((void *)addr); 922 } 923 } 924 925 /* 926 * When called the executing CPU will send an IPI to all other CPUs 927 * requesting that they halt execution. 928 * 929 * Usually (but not necessarily) called with 'other_cpus' as its arg. 930 * 931 * - Signals all CPUs in map to stop. 932 * - Waits for each to stop. 933 * 934 * Returns: 935 * -1: error 936 * 0: NA 937 * 1: ok 938 * 939 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 940 * from executing at same time. 941 */ 942 int 943 stop_cpus(cpumask_t map) 944 { 945 cpumask_t mask; 946 947 CPUMASK_ANDMASK(map, smp_active_mask); 948 949 /* send the Xcpustop IPI to all CPUs in map */ 950 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 951 952 do { 953 mask = stopped_cpus; 954 CPUMASK_ANDMASK(mask, map); 955 /* spin */ 956 } while (CPUMASK_CMPMASKNEQ(mask, map)); 957 958 return 1; 959 } 960 961 962 /* 963 * Called by a CPU to restart stopped CPUs. 964 * 965 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 966 * 967 * - Signals all CPUs in map to restart. 968 * - Waits for each to restart. 969 * 970 * Returns: 971 * -1: error 972 * 0: NA 973 * 1: ok 974 */ 975 int 976 restart_cpus(cpumask_t map) 977 { 978 cpumask_t mask; 979 980 /* signal other cpus to restart */ 981 mask = map; 982 CPUMASK_ANDMASK(mask, smp_active_mask); 983 cpu_ccfence(); 984 started_cpus = mask; 985 cpu_ccfence(); 986 987 /* wait for each to clear its bit */ 988 while (CPUMASK_CMPMASKNEQ(stopped_cpus, map)) 989 cpu_pause(); 990 991 return 1; 992 } 993 994 /* 995 * This is called once the mpboot code has gotten us properly relocated 996 * and the MMU turned on, etc. ap_init() is actually the idle thread, 997 * and when it returns the scheduler will call the real cpu_idle() main 998 * loop for the idlethread. Interrupts are disabled on entry and should 999 * remain disabled at return. 1000 */ 1001 void 1002 ap_init(void) 1003 { 1004 int cpu_id; 1005 1006 /* 1007 * Adjust smp_startup_mask to signal the BSP that we have started 1008 * up successfully. Note that we do not yet hold the BGL. The BSP 1009 * is waiting for our signal. 1010 * 1011 * We can't set our bit in smp_active_mask yet because we are holding 1012 * interrupts physically disabled and remote cpus could deadlock 1013 * trying to send us an IPI. 1014 */ 1015 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 1016 cpu_mfence(); 1017 1018 /* 1019 * Interlock for LAPIC initialization. Wait until mp_finish_lapic is 1020 * non-zero, then get the MP lock. 1021 * 1022 * Note: We are in a critical section. 1023 * 1024 * Note: we are the idle thread, we can only spin. 1025 * 1026 * Note: The load fence is memory volatile and prevents the compiler 1027 * from improperly caching mp_finish_lapic, and the cpu from improperly 1028 * caching it. 1029 */ 1030 while (mp_finish_lapic == 0) { 1031 cpu_pause(); 1032 cpu_lfence(); 1033 } 1034 #if 0 1035 while (try_mplock() == 0) { 1036 cpu_pause(); 1037 cpu_lfence(); 1038 } 1039 #endif 1040 1041 if (cpu_feature & CPUID_TSC) { 1042 /* 1043 * The BSP is constantly updating tsc0_offset, figure out 1044 * the relative difference to synchronize ktrdump. 1045 */ 1046 tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset; 1047 } 1048 1049 /* BSP may have changed PTD while we're waiting for the lock */ 1050 cpu_invltlb(); 1051 1052 /* Build our map of 'other' CPUs. */ 1053 mycpu->gd_other_cpus = smp_startup_mask; 1054 ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1055 1056 /* A quick check from sanity claus */ 1057 cpu_id = APICID_TO_CPUID((lapic->id & 0xff000000) >> 24); 1058 if (mycpu->gd_cpuid != cpu_id) { 1059 kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid); 1060 kprintf("SMP: actual cpuid = %d lapicid %d\n", 1061 cpu_id, (lapic->id & 0xff000000) >> 24); 1062 #if 0 /* JGXXX */ 1063 kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 1064 #endif 1065 panic("cpuid mismatch! boom!!"); 1066 } 1067 1068 /* Initialize AP's local APIC for irq's */ 1069 lapic_init(FALSE); 1070 1071 /* LAPIC initialization is done */ 1072 ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid); 1073 cpu_mfence(); 1074 1075 #if 0 1076 /* Let BSP move onto the next initialization stage */ 1077 rel_mplock(); 1078 #endif 1079 1080 /* 1081 * Interlock for finalization. Wait until mp_finish is non-zero, 1082 * then get the MP lock. 1083 * 1084 * Note: We are in a critical section. 1085 * 1086 * Note: we are the idle thread, we can only spin. 1087 * 1088 * Note: The load fence is memory volatile and prevents the compiler 1089 * from improperly caching mp_finish, and the cpu from improperly 1090 * caching it. 1091 */ 1092 while (mp_finish == 0) { 1093 cpu_pause(); 1094 cpu_lfence(); 1095 } 1096 1097 /* BSP may have changed PTD while we're waiting for the lock */ 1098 cpu_invltlb(); 1099 1100 /* Set memory range attributes for this CPU to match the BSP */ 1101 mem_range_AP_init(); 1102 1103 /* 1104 * Once we go active we must process any IPIQ messages that may 1105 * have been queued, because no actual IPI will occur until we 1106 * set our bit in the smp_active_mask. If we don't the IPI 1107 * message interlock could be left set which would also prevent 1108 * further IPIs. 1109 * 1110 * The idle loop doesn't expect the BGL to be held and while 1111 * lwkt_switch() normally cleans things up this is a special case 1112 * because we returning almost directly into the idle loop. 1113 * 1114 * The idle thread is never placed on the runq, make sure 1115 * nothing we've done put it there. 1116 */ 1117 1118 /* 1119 * Hold a critical section and allow real interrupts to occur. Zero 1120 * any spurious interrupts which have accumulated, then set our 1121 * smp_active_mask indicating that we are fully operational. 1122 */ 1123 crit_enter(); 1124 __asm __volatile("sti; pause; pause"::); 1125 bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending)); 1126 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 1127 1128 /* 1129 * Wait until all cpus have set their smp_active_mask and have fully 1130 * operational interrupts before proceeding. 1131 * 1132 * We need a final cpu_invltlb() because we would not have received 1133 * any until we set our bit in smp_active_mask. 1134 */ 1135 while (mp_finish == 1) { 1136 cpu_pause(); 1137 cpu_lfence(); 1138 } 1139 cpu_invltlb(); 1140 1141 /* 1142 * Initialize per-cpu clocks and do other per-cpu initialization. 1143 * At this point code is expected to be able to use the full kernel 1144 * API. 1145 */ 1146 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 1147 1148 /* 1149 * Since we may have cleaned up the interrupt triggers, manually 1150 * process any pending IPIs before exiting our critical section. 1151 * Once the critical section has exited, normal interrupt processing 1152 * may occur. 1153 */ 1154 lwkt_process_ipiq(); 1155 crit_exit_noyield(mycpu->gd_curthread); 1156 1157 /* 1158 * Final final, allow the waiting BSP to resume the boot process, 1159 * return 'into' the idle thread bootstrap. 1160 */ 1161 ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid); 1162 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 1163 } 1164 1165 /* 1166 * Get SMP fully working before we start initializing devices. 1167 */ 1168 static 1169 void 1170 ap_finish(void) 1171 { 1172 if (bootverbose) 1173 kprintf("Finish MP startup\n"); 1174 rel_mplock(); 1175 1176 /* 1177 * Wait for the active mask to complete, after which all cpus will 1178 * be accepting interrupts. 1179 */ 1180 mp_finish = 1; 1181 while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) { 1182 cpu_pause(); 1183 cpu_lfence(); 1184 } 1185 1186 /* 1187 * Wait for the finalization mask to complete, after which all cpus 1188 * have completely finished initializing and are entering or are in 1189 * their idle thread. 1190 * 1191 * BSP should have received all required invltlbs but do another 1192 * one just in case. 1193 */ 1194 cpu_invltlb(); 1195 mp_finish = 2; 1196 while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) { 1197 cpu_pause(); 1198 cpu_lfence(); 1199 } 1200 1201 while (try_mplock() == 0) { 1202 cpu_pause(); 1203 cpu_lfence(); 1204 } 1205 1206 if (bootverbose) { 1207 kprintf("Active CPU Mask: %016jx\n", 1208 (uintmax_t)CPUMASK_LOWMASK(smp_active_mask)); 1209 } 1210 } 1211 1212 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 1213 1214 void 1215 cpu_send_ipiq(int dcpu) 1216 { 1217 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) 1218 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED); 1219 } 1220 1221 #if 0 /* single_apic_ipi_passive() not working yet */ 1222 /* 1223 * Returns 0 on failure, 1 on success 1224 */ 1225 int 1226 cpu_send_ipiq_passive(int dcpu) 1227 { 1228 int r = 0; 1229 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 1230 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET, 1231 APIC_DELMODE_FIXED); 1232 } 1233 return(r); 1234 } 1235 #endif 1236 1237 static void 1238 mp_bsp_simple_setup(void) 1239 { 1240 struct mdglobaldata *gd; 1241 size_t ipiq_size; 1242 1243 /* build our map of 'other' CPUs */ 1244 mycpu->gd_other_cpus = smp_startup_mask; 1245 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 1246 1247 gd = (struct mdglobaldata *)mycpu; 1248 gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); 1249 1250 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 1251 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 1252 bzero(mycpu->gd_ipiq, ipiq_size); 1253 1254 pmap_set_opt(); 1255 1256 if (cpu_feature & CPUID_TSC) 1257 tsc0_offset = rdtsc(); 1258 } 1259 1260 1261 /* 1262 * CPU TOPOLOGY DETECTION FUNCTIONS 1263 */ 1264 1265 /* Detect intel topology using CPUID 1266 * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41 1267 */ 1268 static void 1269 detect_intel_topology(int count_htt_cores) 1270 { 1271 int shift = 0; 1272 int ecx_index = 0; 1273 int core_plus_logical_bits = 0; 1274 int cores_per_package; 1275 int logical_per_package; 1276 int logical_per_core; 1277 unsigned int p[4]; 1278 1279 if (cpu_high >= 0xb) { 1280 goto FUNC_B; 1281 1282 } else if (cpu_high >= 0x4) { 1283 goto FUNC_4; 1284 1285 } else { 1286 core_bits = 0; 1287 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1288 ; 1289 logical_CPU_bits = 1 << shift; 1290 return; 1291 } 1292 1293 FUNC_B: 1294 cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p); 1295 1296 /* if 0xb not supported - fallback to 0x4 */ 1297 if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) { 1298 goto FUNC_4; 1299 } 1300 1301 logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1302 1303 ecx_index = FUNC_B_THREAD_LEVEL + 1; 1304 do { 1305 cpuid_count(0xb, ecx_index, p); 1306 1307 /* Check for the Core type in the implemented sub leaves. */ 1308 if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) { 1309 core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]); 1310 break; 1311 } 1312 1313 ecx_index++; 1314 1315 } while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE); 1316 1317 core_bits = core_plus_logical_bits - logical_CPU_bits; 1318 1319 return; 1320 1321 FUNC_4: 1322 cpuid_count(0x4, 0, p); 1323 cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1; 1324 1325 logical_per_package = count_htt_cores; 1326 logical_per_core = logical_per_package / cores_per_package; 1327 1328 for (shift = 0; (1 << shift) < logical_per_core; ++shift) 1329 ; 1330 logical_CPU_bits = shift; 1331 1332 for (shift = 0; (1 << shift) < cores_per_package; ++shift) 1333 ; 1334 core_bits = shift; 1335 1336 return; 1337 } 1338 1339 /* Detect AMD topology using CPUID 1340 * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page 1341 */ 1342 static void 1343 detect_amd_topology(int count_htt_cores) 1344 { 1345 int shift = 0; 1346 if ((cpu_feature & CPUID_HTT) 1347 && (amd_feature2 & AMDID2_CMP)) { 1348 1349 if (cpu_procinfo2 & AMDID_COREID_SIZE) { 1350 core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) 1351 >> AMDID_COREID_SIZE_SHIFT; 1352 } else { 1353 core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 1354 for (shift = 0; (1 << shift) < core_bits; ++shift) 1355 ; 1356 core_bits = shift; 1357 } 1358 1359 logical_CPU_bits = count_htt_cores >> core_bits; 1360 for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift) 1361 ; 1362 logical_CPU_bits = shift; 1363 } else { 1364 for (shift = 0; (1 << shift) < count_htt_cores; ++shift) 1365 ; 1366 core_bits = shift; 1367 logical_CPU_bits = 0; 1368 } 1369 } 1370 1371 static void 1372 amd_get_compute_unit_id(void *arg) 1373 { 1374 u_int regs[4]; 1375 1376 do_cpuid(0x8000001e, regs); 1377 cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid); 1378 /* 1379 * AMD - CPUID Specification September 2010 1380 * page 34 - //ComputeUnitID = ebx[0:7]// 1381 */ 1382 mynode->compute_unit_id = regs[1] & 0xff; 1383 } 1384 1385 int 1386 fix_amd_topology(void) 1387 { 1388 cpumask_t mask; 1389 1390 if (cpu_vendor_id != CPU_VENDOR_AMD) 1391 return -1; 1392 if ((amd_feature2 & AMDID2_TOPOEXT) == 0) 1393 return -1; 1394 1395 CPUMASK_ASSALLONES(mask); 1396 lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL); 1397 1398 kprintf("Compute unit iDS:\n"); 1399 int i; 1400 for (i = 0; i < ncpus; i++) { 1401 kprintf("%d-%d; \n", 1402 i, get_cpu_node_by_cpuid(i)->compute_unit_id); 1403 } 1404 1405 return 0; 1406 } 1407 1408 /* Calculate 1409 * - logical_CPU_bits 1410 * - core_bits 1411 * With the values above (for AMD or INTEL) we are able to generally 1412 * detect the CPU topology (number of cores for each level): 1413 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1414 * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf 1415 */ 1416 void 1417 detect_cpu_topology(void) 1418 { 1419 static int topology_detected = 0; 1420 int count = 0; 1421 1422 if (topology_detected) { 1423 goto OUT; 1424 } 1425 1426 if ((cpu_feature & CPUID_HTT) == 0) { 1427 core_bits = 0; 1428 logical_CPU_bits = 0; 1429 goto OUT; 1430 } else { 1431 count = (cpu_procinfo & CPUID_HTT_CORES) 1432 >> CPUID_HTT_CORE_SHIFT; 1433 } 1434 1435 if (cpu_vendor_id == CPU_VENDOR_INTEL) { 1436 detect_intel_topology(count); 1437 } else if (cpu_vendor_id == CPU_VENDOR_AMD) { 1438 detect_amd_topology(count); 1439 } 1440 1441 OUT: 1442 if (bootverbose) 1443 kprintf("Bits within APICID: logical_CPU_bits: %d; core_bits: %d\n", 1444 logical_CPU_bits, core_bits); 1445 1446 topology_detected = 1; 1447 } 1448 1449 /* Interface functions to calculate chip_ID, 1450 * core_number and logical_number 1451 * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86) 1452 */ 1453 int 1454 get_chip_ID(int cpuid) 1455 { 1456 return get_apicid_from_cpuid(cpuid) >> 1457 (logical_CPU_bits + core_bits); 1458 } 1459 1460 int 1461 get_core_number_within_chip(int cpuid) 1462 { 1463 return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 1464 ( (1 << core_bits) -1); 1465 } 1466 1467 int 1468 get_logical_CPU_number_within_core(int cpuid) 1469 { 1470 return get_apicid_from_cpuid(cpuid) & 1471 ( (1 << logical_CPU_bits) -1); 1472 } 1473