1 /*- 2 * Copyright (c) 1996, by Steve Passe 3 * Copyright (c) 2003, by Peter Wemm 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. The name of the developer may NOT be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_acpi.h" 31 #ifdef __i386__ 32 #include "opt_apic.h" 33 #endif 34 #include "opt_cpu.h" 35 #include "opt_ddb.h" 36 #include "opt_gdb.h" 37 #include "opt_kstack_pages.h" 38 #include "opt_pmap.h" 39 #include "opt_sched.h" 40 #include "opt_smp.h" 41 #include "opt_stack.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/asan.h> 46 #include <sys/bus.h> 47 #include <sys/cons.h> /* cngetc() */ 48 #include <sys/cpuset.h> 49 #include <sys/csan.h> 50 #include <sys/interrupt.h> 51 #include <sys/kdb.h> 52 #include <sys/kernel.h> 53 #include <sys/ktr.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/memrange.h> 57 #include <sys/mutex.h> 58 #include <sys/pcpu.h> 59 #include <sys/proc.h> 60 #include <sys/sched.h> 61 #include <sys/smp.h> 62 #include <sys/sysctl.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/pmap.h> 67 #include <vm/vm_kern.h> 68 #include <vm/vm_extern.h> 69 #include <vm/vm_map.h> 70 71 #include <x86/apicreg.h> 72 #include <machine/clock.h> 73 #include <machine/cpu.h> 74 #include <machine/cputypes.h> 75 #include <x86/mca.h> 76 #include <machine/md_var.h> 77 #include <machine/pcb.h> 78 #include <machine/psl.h> 79 #include <machine/smp.h> 80 #include <machine/specialreg.h> 81 #include <machine/stack.h> 82 #include <x86/ucode.h> 83 84 #ifdef DEV_ACPI 85 #include <contrib/dev/acpica/include/acpi.h> 86 #include <dev/acpica/acpivar.h> 87 #endif 88 89 static MALLOC_DEFINE(M_CPUS, "cpus", "CPU items"); 90 91 /* lock region used by kernel profiling */ 92 int mcount_lock; 93 94 int mp_naps; /* # of Applications processors */ 95 int boot_cpu_id = -1; /* designated BSP */ 96 97 /* AP uses this during bootstrap. Do not staticize. */ 98 char *bootSTK; 99 int bootAP; 100 101 /* Free these after use */ 102 void *bootstacks[MAXCPU]; 103 void *dpcpu; 104 105 struct pcb stoppcbs[MAXCPU]; 106 struct susppcb **susppcbs; 107 108 #ifdef COUNT_IPIS 109 /* Interrupt counts. */ 110 static u_long *ipi_preempt_counts[MAXCPU]; 111 static u_long *ipi_ast_counts[MAXCPU]; 112 u_long *ipi_invltlb_counts[MAXCPU]; 113 u_long *ipi_invlrng_counts[MAXCPU]; 114 u_long *ipi_invlpg_counts[MAXCPU]; 115 u_long *ipi_invlcache_counts[MAXCPU]; 116 u_long *ipi_rendezvous_counts[MAXCPU]; 117 static u_long *ipi_hardclock_counts[MAXCPU]; 118 #endif 119 120 /* Default cpu_ops implementation. */ 121 struct cpu_ops cpu_ops; 122 123 /* 124 * Local data and functions. 125 */ 126 127 static volatile cpuset_t ipi_stop_nmi_pending; 128 129 volatile cpuset_t resuming_cpus; 130 volatile cpuset_t toresume_cpus; 131 132 /* used to hold the AP's until we are ready to release them */ 133 struct mtx ap_boot_mtx; 134 135 /* Set to 1 once we're ready to let the APs out of the pen. */ 136 volatile int aps_ready = 0; 137 138 /* 139 * Store data from cpu_add() until later in the boot when we actually setup 140 * the APs. 141 */ 142 struct cpu_info *cpu_info; 143 int *apic_cpuids; 144 int cpu_apic_ids[MAXCPU]; 145 _Static_assert(MAXCPU <= MAX_APIC_ID, 146 "MAXCPU cannot be larger that MAX_APIC_ID"); 147 _Static_assert(xAPIC_MAX_APIC_ID <= MAX_APIC_ID, 148 "xAPIC_MAX_APIC_ID cannot be larger that MAX_APIC_ID"); 149 150 static void release_aps(void *dummy); 151 static void cpustop_handler_post(u_int cpu); 152 153 static int hyperthreading_allowed = 1; 154 SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_allowed, CTLFLAG_RDTUN, 155 &hyperthreading_allowed, 0, "Use Intel HTT logical CPUs"); 156 157 static int hyperthreading_intr_allowed = 0; 158 SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_intr_allowed, CTLFLAG_RDTUN, 159 &hyperthreading_intr_allowed, 0, 160 "Allow interrupts on HTT logical CPUs"); 161 162 static struct topo_node topo_root; 163 164 static int pkg_id_shift; 165 static int node_id_shift; 166 static int core_id_shift; 167 static int disabled_cpus; 168 169 struct cache_info { 170 int id_shift; 171 int present; 172 } static caches[MAX_CACHE_LEVELS]; 173 174 static bool stop_mwait = false; 175 SYSCTL_BOOL(_machdep, OID_AUTO, stop_mwait, CTLFLAG_RWTUN, &stop_mwait, 0, 176 "Use MONITOR/MWAIT when stopping CPU, if available"); 177 178 void 179 mem_range_AP_init(void) 180 { 181 182 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 183 mem_range_softc.mr_op->initAP(&mem_range_softc); 184 } 185 186 /* 187 * Round up to the next power of two, if necessary, and then 188 * take log2. 189 * Returns -1 if argument is zero. 190 */ 191 static __inline int 192 mask_width(u_int x) 193 { 194 195 return (fls(x << (1 - powerof2(x))) - 1); 196 } 197 198 /* 199 * Add a cache level to the cache topology description. 200 */ 201 static int 202 add_deterministic_cache(int type, int level, int share_count) 203 { 204 205 if (type == 0) 206 return (0); 207 if (type > 3) { 208 printf("unexpected cache type %d\n", type); 209 return (1); 210 } 211 if (type == 2) /* ignore instruction cache */ 212 return (1); 213 if (level == 0 || level > MAX_CACHE_LEVELS) { 214 printf("unexpected cache level %d\n", level); 215 return (1); 216 } 217 218 if (caches[level - 1].present) { 219 printf("WARNING: multiple entries for L%u data cache\n", level); 220 printf("%u => %u\n", caches[level - 1].id_shift, 221 mask_width(share_count)); 222 } 223 caches[level - 1].id_shift = mask_width(share_count); 224 caches[level - 1].present = 1; 225 226 if (caches[level - 1].id_shift > pkg_id_shift) { 227 printf("WARNING: L%u data cache covers more " 228 "APIC IDs than a package (%u > %u)\n", level, 229 caches[level - 1].id_shift, pkg_id_shift); 230 caches[level - 1].id_shift = pkg_id_shift; 231 } 232 if (caches[level - 1].id_shift < core_id_shift) { 233 printf("WARNING: L%u data cache covers fewer " 234 "APIC IDs than a core (%u < %u)\n", level, 235 caches[level - 1].id_shift, core_id_shift); 236 caches[level - 1].id_shift = core_id_shift; 237 } 238 239 return (1); 240 } 241 242 /* 243 * Determine topology of processing units and caches for AMD CPUs. 244 * See: 245 * - AMD CPUID Specification (Publication # 25481) 246 * - BKDG for AMD NPT Family 0Fh Processors (Publication # 32559) 247 * - BKDG For AMD Family 10h Processors (Publication # 31116) 248 * - BKDG For AMD Family 15h Models 00h-0Fh Processors (Publication # 42301) 249 * - BKDG For AMD Family 16h Models 00h-0Fh Processors (Publication # 48751) 250 * - PPR For AMD Family 17h Models 00h-0Fh Processors (Publication # 54945) 251 */ 252 static void 253 topo_probe_amd(void) 254 { 255 u_int p[4]; 256 uint64_t v; 257 int level; 258 int nodes_per_socket; 259 int share_count; 260 int type; 261 int i; 262 263 /* No multi-core capability. */ 264 if ((amd_feature2 & AMDID2_CMP) == 0) 265 return; 266 267 /* For families 10h and newer. */ 268 pkg_id_shift = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 269 AMDID_COREID_SIZE_SHIFT; 270 271 /* For 0Fh family. */ 272 if (pkg_id_shift == 0) 273 pkg_id_shift = 274 mask_width((cpu_procinfo2 & AMDID_CMP_CORES) + 1); 275 276 /* 277 * Families prior to 16h define the following value as 278 * cores per compute unit and we don't really care about the AMD 279 * compute units at the moment. Perhaps we should treat them as 280 * cores and cores within the compute units as hardware threads, 281 * but that's up for debate. 282 * Later families define the value as threads per compute unit, 283 * so we are following AMD's nomenclature here. 284 */ 285 if ((amd_feature2 & AMDID2_TOPOLOGY) != 0 && 286 CPUID_TO_FAMILY(cpu_id) >= 0x16) { 287 cpuid_count(0x8000001e, 0, p); 288 share_count = ((p[1] >> 8) & 0xff) + 1; 289 core_id_shift = mask_width(share_count); 290 291 /* 292 * For Zen (17h), gather Nodes per Processor. Each node is a 293 * Zeppelin die; TR and EPYC CPUs will have multiple dies per 294 * package. Communication latency between dies is higher than 295 * within them. 296 */ 297 nodes_per_socket = ((p[2] >> 8) & 0x7) + 1; 298 node_id_shift = pkg_id_shift - mask_width(nodes_per_socket); 299 } 300 301 if ((amd_feature2 & AMDID2_TOPOLOGY) != 0) { 302 for (i = 0; ; i++) { 303 cpuid_count(0x8000001d, i, p); 304 type = p[0] & 0x1f; 305 level = (p[0] >> 5) & 0x7; 306 share_count = 1 + ((p[0] >> 14) & 0xfff); 307 308 if (!add_deterministic_cache(type, level, share_count)) 309 break; 310 } 311 } else { 312 if (cpu_exthigh >= 0x80000005) { 313 cpuid_count(0x80000005, 0, p); 314 if (((p[2] >> 24) & 0xff) != 0) { 315 caches[0].id_shift = 0; 316 caches[0].present = 1; 317 } 318 } 319 if (cpu_exthigh >= 0x80000006) { 320 cpuid_count(0x80000006, 0, p); 321 if (((p[2] >> 16) & 0xffff) != 0) { 322 caches[1].id_shift = 0; 323 caches[1].present = 1; 324 } 325 if (((p[3] >> 18) & 0x3fff) != 0) { 326 nodes_per_socket = 1; 327 if ((amd_feature2 & AMDID2_NODE_ID) != 0) { 328 /* 329 * Handle multi-node processors that 330 * have multiple chips, each with its 331 * own L3 cache, on the same die. 332 */ 333 v = rdmsr(0xc001100c); 334 nodes_per_socket = 1 + ((v >> 3) & 0x7); 335 } 336 caches[2].id_shift = 337 pkg_id_shift - mask_width(nodes_per_socket); 338 caches[2].present = 1; 339 } 340 } 341 } 342 } 343 344 /* 345 * Determine topology of processing units for Intel CPUs 346 * using CPUID Leaf 1 and Leaf 4, if supported. 347 * See: 348 * - Intel 64 Architecture Processor Topology Enumeration 349 * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual, 350 * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS 351 * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS 352 */ 353 static void 354 topo_probe_intel_0x4(void) 355 { 356 u_int p[4]; 357 int max_cores; 358 int max_logical; 359 360 /* Both zero and one here mean one logical processor per package. */ 361 max_logical = (cpu_feature & CPUID_HTT) != 0 ? 362 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1; 363 if (max_logical <= 1) 364 return; 365 366 if (cpu_high >= 0x4) { 367 cpuid_count(0x04, 0, p); 368 max_cores = ((p[0] >> 26) & 0x3f) + 1; 369 } else 370 max_cores = 1; 371 372 core_id_shift = mask_width(max_logical/max_cores); 373 KASSERT(core_id_shift >= 0, 374 ("intel topo: max_cores > max_logical\n")); 375 pkg_id_shift = core_id_shift + mask_width(max_cores); 376 } 377 378 /* 379 * Determine topology of processing units for Intel CPUs 380 * using CPUID Leaf 1Fh or 0Bh, if supported. 381 * See: 382 * - Intel 64 Architecture Processor Topology Enumeration 383 * - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual, 384 * Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS 385 * FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS 386 */ 387 static void 388 topo_probe_intel_0xb(void) 389 { 390 u_int leaf; 391 u_int p[4] = { 0 }; 392 int bits; 393 int type; 394 int i; 395 396 /* Prefer leaf 1Fh (V2 Extended Topology Enumeration). */ 397 if (cpu_high >= 0x1f) { 398 leaf = 0x1f; 399 cpuid_count(leaf, 0, p); 400 } 401 /* Fall back to leaf 0Bh (Extended Topology Enumeration). */ 402 if (p[1] == 0) { 403 leaf = 0x0b; 404 cpuid_count(leaf, 0, p); 405 } 406 /* Fall back to leaf 04h (Deterministic Cache Parameters). */ 407 if (p[1] == 0) { 408 topo_probe_intel_0x4(); 409 return; 410 } 411 412 /* We only support three levels for now. */ 413 for (i = 0; ; i++) { 414 cpuid_count(leaf, i, p); 415 416 bits = p[0] & 0x1f; 417 type = (p[2] >> 8) & 0xff; 418 419 if (type == 0) 420 break; 421 422 if (type == CPUID_TYPE_SMT) 423 core_id_shift = bits; 424 else if (type == CPUID_TYPE_CORE) 425 pkg_id_shift = bits; 426 else if (bootverbose) 427 printf("Topology level type %d shift: %d\n", type, bits); 428 } 429 430 if (pkg_id_shift < core_id_shift) { 431 printf("WARNING: core covers more APIC IDs than a package\n"); 432 core_id_shift = pkg_id_shift; 433 } 434 } 435 436 /* 437 * Determine topology of caches for Intel CPUs. 438 * See: 439 * - Intel 64 Architecture Processor Topology Enumeration 440 * - Intel 64 and IA-32 Architectures Software Developer’s Manual 441 * Volume 2A: Instruction Set Reference, A-M, 442 * CPUID instruction 443 */ 444 static void 445 topo_probe_intel_caches(void) 446 { 447 u_int p[4]; 448 int level; 449 int share_count; 450 int type; 451 int i; 452 453 if (cpu_high < 0x4) { 454 /* 455 * Available cache level and sizes can be determined 456 * via CPUID leaf 2, but that requires a huge table of hardcoded 457 * values, so for now just assume L1 and L2 caches potentially 458 * shared only by HTT processing units, if HTT is present. 459 */ 460 caches[0].id_shift = pkg_id_shift; 461 caches[0].present = 1; 462 caches[1].id_shift = pkg_id_shift; 463 caches[1].present = 1; 464 return; 465 } 466 467 for (i = 0; ; i++) { 468 cpuid_count(0x4, i, p); 469 type = p[0] & 0x1f; 470 level = (p[0] >> 5) & 0x7; 471 share_count = 1 + ((p[0] >> 14) & 0xfff); 472 473 if (!add_deterministic_cache(type, level, share_count)) 474 break; 475 } 476 } 477 478 /* 479 * Determine topology of processing units and caches for Intel CPUs. 480 * See: 481 * - Intel 64 Architecture Processor Topology Enumeration 482 */ 483 static void 484 topo_probe_intel(void) 485 { 486 487 /* 488 * Note that 0x1 <= cpu_high < 4 case should be 489 * compatible with topo_probe_intel_0x4() logic when 490 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1) 491 * or it should trigger the fallback otherwise. 492 */ 493 if (cpu_high >= 0xb) 494 topo_probe_intel_0xb(); 495 else if (cpu_high >= 0x1) 496 topo_probe_intel_0x4(); 497 498 topo_probe_intel_caches(); 499 } 500 501 /* 502 * Topology information is queried only on BSP, on which this 503 * code runs and for which it can query CPUID information. 504 * Then topology is extrapolated on all packages using an 505 * assumption that APIC ID to hardware component ID mapping is 506 * homogenious. 507 * That doesn't necesserily imply that the topology is uniform. 508 */ 509 void 510 topo_probe(void) 511 { 512 static int cpu_topo_probed = 0; 513 struct x86_topo_layer { 514 int type; 515 int subtype; 516 int id_shift; 517 } topo_layers[MAX_CACHE_LEVELS + 5]; 518 struct topo_node *parent; 519 struct topo_node *node; 520 int layer; 521 int nlayers; 522 int node_id; 523 int i; 524 #if defined(DEV_ACPI) && MAXMEMDOM > 1 525 int d, domain; 526 #endif 527 528 if (cpu_topo_probed) 529 return; 530 531 CPU_ZERO(&logical_cpus_mask); 532 533 if (mp_ncpus <= 1) 534 ; /* nothing */ 535 else if (cpu_vendor_id == CPU_VENDOR_AMD || 536 cpu_vendor_id == CPU_VENDOR_HYGON) 537 topo_probe_amd(); 538 else if (cpu_vendor_id == CPU_VENDOR_INTEL) 539 topo_probe_intel(); 540 541 KASSERT(pkg_id_shift >= core_id_shift, 542 ("bug in APIC topology discovery")); 543 544 nlayers = 0; 545 bzero(topo_layers, sizeof(topo_layers)); 546 547 topo_layers[nlayers].type = TOPO_TYPE_PKG; 548 topo_layers[nlayers].id_shift = pkg_id_shift; 549 if (bootverbose) 550 printf("Package ID shift: %u\n", topo_layers[nlayers].id_shift); 551 nlayers++; 552 553 if (pkg_id_shift > node_id_shift && node_id_shift != 0) { 554 topo_layers[nlayers].type = TOPO_TYPE_GROUP; 555 topo_layers[nlayers].id_shift = node_id_shift; 556 if (bootverbose) 557 printf("Node ID shift: %u\n", 558 topo_layers[nlayers].id_shift); 559 nlayers++; 560 } 561 562 /* 563 * Consider all caches to be within a package/chip 564 * and "in front" of all sub-components like 565 * cores and hardware threads. 566 */ 567 for (i = MAX_CACHE_LEVELS - 1; i >= 0; --i) { 568 if (caches[i].present) { 569 if (node_id_shift != 0) 570 KASSERT(caches[i].id_shift <= node_id_shift, 571 ("bug in APIC topology discovery")); 572 KASSERT(caches[i].id_shift <= pkg_id_shift, 573 ("bug in APIC topology discovery")); 574 KASSERT(caches[i].id_shift >= core_id_shift, 575 ("bug in APIC topology discovery")); 576 577 topo_layers[nlayers].type = TOPO_TYPE_CACHE; 578 topo_layers[nlayers].subtype = i + 1; 579 topo_layers[nlayers].id_shift = caches[i].id_shift; 580 if (bootverbose) 581 printf("L%u cache ID shift: %u\n", 582 topo_layers[nlayers].subtype, 583 topo_layers[nlayers].id_shift); 584 nlayers++; 585 } 586 } 587 588 if (pkg_id_shift > core_id_shift) { 589 topo_layers[nlayers].type = TOPO_TYPE_CORE; 590 topo_layers[nlayers].id_shift = core_id_shift; 591 if (bootverbose) 592 printf("Core ID shift: %u\n", 593 topo_layers[nlayers].id_shift); 594 nlayers++; 595 } 596 597 topo_layers[nlayers].type = TOPO_TYPE_PU; 598 topo_layers[nlayers].id_shift = 0; 599 nlayers++; 600 601 #if defined(DEV_ACPI) && MAXMEMDOM > 1 602 if (vm_ndomains > 1) { 603 for (layer = 0; layer < nlayers; ++layer) { 604 for (i = 0; i <= max_apic_id; ++i) { 605 if ((i & ((1 << topo_layers[layer].id_shift) - 1)) == 0) 606 domain = -1; 607 if (!cpu_info[i].cpu_present) 608 continue; 609 d = acpi_pxm_get_cpu_locality(i); 610 if (domain >= 0 && domain != d) 611 break; 612 domain = d; 613 } 614 if (i > max_apic_id) 615 break; 616 } 617 KASSERT(layer < nlayers, ("NUMA domain smaller than PU")); 618 memmove(&topo_layers[layer+1], &topo_layers[layer], 619 sizeof(*topo_layers) * (nlayers - layer)); 620 topo_layers[layer].type = TOPO_TYPE_NODE; 621 topo_layers[layer].subtype = CG_SHARE_NONE; 622 nlayers++; 623 } 624 #endif 625 626 topo_init_root(&topo_root); 627 for (i = 0; i <= max_apic_id; ++i) { 628 if (!cpu_info[i].cpu_present) 629 continue; 630 631 parent = &topo_root; 632 for (layer = 0; layer < nlayers; ++layer) { 633 #if defined(DEV_ACPI) && MAXMEMDOM > 1 634 if (topo_layers[layer].type == TOPO_TYPE_NODE) { 635 node_id = acpi_pxm_get_cpu_locality(i); 636 } else 637 #endif 638 node_id = i >> topo_layers[layer].id_shift; 639 parent = topo_add_node_by_hwid(parent, node_id, 640 topo_layers[layer].type, 641 topo_layers[layer].subtype); 642 } 643 } 644 645 parent = &topo_root; 646 for (layer = 0; layer < nlayers; ++layer) { 647 #if defined(DEV_ACPI) && MAXMEMDOM > 1 648 if (topo_layers[layer].type == TOPO_TYPE_NODE) 649 node_id = acpi_pxm_get_cpu_locality(boot_cpu_id); 650 else 651 #endif 652 node_id = boot_cpu_id >> topo_layers[layer].id_shift; 653 node = topo_find_node_by_hwid(parent, node_id, 654 topo_layers[layer].type, 655 topo_layers[layer].subtype); 656 topo_promote_child(node); 657 parent = node; 658 } 659 660 cpu_topo_probed = 1; 661 } 662 663 /* 664 * Assign logical CPU IDs to local APICs. 665 */ 666 void 667 assign_cpu_ids(void) 668 { 669 struct topo_node *node; 670 u_int smt_mask; 671 int nhyper; 672 673 smt_mask = (1u << core_id_shift) - 1; 674 675 /* 676 * Assign CPU IDs to local APIC IDs and disable any CPUs 677 * beyond MAXCPU. CPU 0 is always assigned to the BSP. 678 */ 679 mp_ncpus = 0; 680 nhyper = 0; 681 TOPO_FOREACH(node, &topo_root) { 682 if (node->type != TOPO_TYPE_PU) 683 continue; 684 685 if ((node->hwid & smt_mask) != (boot_cpu_id & smt_mask)) 686 cpu_info[node->hwid].cpu_hyperthread = 1; 687 688 if (resource_disabled("lapic", node->hwid)) { 689 if (node->hwid != boot_cpu_id) 690 cpu_info[node->hwid].cpu_disabled = 1; 691 else 692 printf("Cannot disable BSP, APIC ID = %d\n", 693 node->hwid); 694 } 695 696 if (!hyperthreading_allowed && 697 cpu_info[node->hwid].cpu_hyperthread) 698 cpu_info[node->hwid].cpu_disabled = 1; 699 700 if (mp_ncpus >= MAXCPU) 701 cpu_info[node->hwid].cpu_disabled = 1; 702 703 if (cpu_info[node->hwid].cpu_disabled) { 704 disabled_cpus++; 705 continue; 706 } 707 708 if (cpu_info[node->hwid].cpu_hyperthread) 709 nhyper++; 710 711 cpu_apic_ids[mp_ncpus] = node->hwid; 712 apic_cpuids[node->hwid] = mp_ncpus; 713 topo_set_pu_id(node, mp_ncpus); 714 mp_ncpus++; 715 } 716 717 KASSERT(mp_maxid >= mp_ncpus - 1, 718 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid, 719 mp_ncpus)); 720 721 mp_ncores = mp_ncpus - nhyper; 722 smp_threads_per_core = mp_ncpus / mp_ncores; 723 } 724 725 /* 726 * Print various information about the SMP system hardware and setup. 727 */ 728 void 729 cpu_mp_announce(void) 730 { 731 struct topo_node *node; 732 const char *hyperthread; 733 struct topo_analysis topology; 734 735 printf("FreeBSD/SMP: "); 736 if (topo_analyze(&topo_root, 1, &topology)) { 737 printf("%d package(s)", topology.entities[TOPO_LEVEL_PKG]); 738 if (topology.entities[TOPO_LEVEL_GROUP] > 1) 739 printf(" x %d groups", 740 topology.entities[TOPO_LEVEL_GROUP]); 741 if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1) 742 printf(" x %d cache groups", 743 topology.entities[TOPO_LEVEL_CACHEGROUP]); 744 if (topology.entities[TOPO_LEVEL_CORE] > 0) 745 printf(" x %d core(s)", 746 topology.entities[TOPO_LEVEL_CORE]); 747 if (topology.entities[TOPO_LEVEL_THREAD] > 1) 748 printf(" x %d hardware threads", 749 topology.entities[TOPO_LEVEL_THREAD]); 750 } else { 751 printf("Non-uniform topology"); 752 } 753 printf("\n"); 754 755 if (disabled_cpus) { 756 printf("FreeBSD/SMP Online: "); 757 if (topo_analyze(&topo_root, 0, &topology)) { 758 printf("%d package(s)", 759 topology.entities[TOPO_LEVEL_PKG]); 760 if (topology.entities[TOPO_LEVEL_GROUP] > 1) 761 printf(" x %d groups", 762 topology.entities[TOPO_LEVEL_GROUP]); 763 if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1) 764 printf(" x %d cache groups", 765 topology.entities[TOPO_LEVEL_CACHEGROUP]); 766 if (topology.entities[TOPO_LEVEL_CORE] > 0) 767 printf(" x %d core(s)", 768 topology.entities[TOPO_LEVEL_CORE]); 769 if (topology.entities[TOPO_LEVEL_THREAD] > 1) 770 printf(" x %d hardware threads", 771 topology.entities[TOPO_LEVEL_THREAD]); 772 } else { 773 printf("Non-uniform topology"); 774 } 775 printf("\n"); 776 } 777 778 if (!bootverbose) 779 return; 780 781 TOPO_FOREACH(node, &topo_root) { 782 switch (node->type) { 783 case TOPO_TYPE_PKG: 784 printf("Package HW ID = %u\n", node->hwid); 785 break; 786 case TOPO_TYPE_CORE: 787 printf("\tCore HW ID = %u\n", node->hwid); 788 break; 789 case TOPO_TYPE_PU: 790 if (cpu_info[node->hwid].cpu_hyperthread) 791 hyperthread = "/HT"; 792 else 793 hyperthread = ""; 794 795 if (node->subtype == 0) 796 printf("\t\tCPU (AP%s): APIC ID: %u" 797 "(disabled)\n", hyperthread, node->hwid); 798 else if (node->id == 0) 799 printf("\t\tCPU0 (BSP): APIC ID: %u\n", 800 node->hwid); 801 else 802 printf("\t\tCPU%u (AP%s): APIC ID: %u\n", 803 node->id, hyperthread, node->hwid); 804 break; 805 default: 806 /* ignored */ 807 break; 808 } 809 } 810 } 811 812 /* 813 * Add a scheduling group, a group of logical processors sharing 814 * a particular cache (and, thus having an affinity), to the scheduling 815 * topology. 816 * This function recursively works on lower level caches. 817 */ 818 static void 819 x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root) 820 { 821 struct topo_node *node; 822 int nchildren; 823 int ncores; 824 int i; 825 826 KASSERT(root->type == TOPO_TYPE_SYSTEM || root->type == TOPO_TYPE_CACHE || 827 root->type == TOPO_TYPE_NODE || root->type == TOPO_TYPE_GROUP, 828 ("x86topo_add_sched_group: bad type: %u", root->type)); 829 CPU_COPY(&root->cpuset, &cg_root->cg_mask); 830 cg_root->cg_count = root->cpu_count; 831 if (root->type == TOPO_TYPE_CACHE) 832 cg_root->cg_level = root->subtype; 833 else 834 cg_root->cg_level = CG_SHARE_NONE; 835 if (root->type == TOPO_TYPE_NODE) 836 cg_root->cg_flags = CG_FLAG_NODE; 837 else 838 cg_root->cg_flags = 0; 839 840 /* 841 * Check how many core nodes we have under the given root node. 842 * If we have multiple logical processors, but not multiple 843 * cores, then those processors must be hardware threads. 844 */ 845 ncores = 0; 846 node = root; 847 while (node != NULL) { 848 if (node->type != TOPO_TYPE_CORE) { 849 node = topo_next_node(root, node); 850 continue; 851 } 852 853 ncores++; 854 node = topo_next_nonchild_node(root, node); 855 } 856 857 if (cg_root->cg_level != CG_SHARE_NONE && 858 root->cpu_count > 1 && ncores < 2) 859 cg_root->cg_flags |= CG_FLAG_SMT; 860 861 /* 862 * Find out how many cache nodes we have under the given root node. 863 * We ignore cache nodes that cover all the same processors as the 864 * root node. Also, we do not descend below found cache nodes. 865 * That is, we count top-level "non-redundant" caches under the root 866 * node. 867 */ 868 nchildren = 0; 869 node = root; 870 while (node != NULL) { 871 /* 872 * When some APICs are disabled by tunables, nodes can end up 873 * with an empty cpuset. Nodes with an empty cpuset will be 874 * translated into cpu groups with empty cpusets. smp_topo_fill 875 * will then set cg_first and cg_last to -1. This isn't 876 * correctly handled in all functions. E.g. when 877 * cpu_search_lowest and cpu_search_highest loop through all 878 * cpus, they call CPU_ISSET on cpu -1 which ends up in a 879 * general protection fault. 880 * 881 * We could fix the scheduler to handle empty cpu groups 882 * correctly. Nevertheless, empty cpu groups are causing 883 * overhead for no value. So, it makes more sense to just don't 884 * create them. 885 */ 886 if (CPU_EMPTY(&node->cpuset)) { 887 node = topo_next_node(root, node); 888 continue; 889 } 890 if (CPU_CMP(&node->cpuset, &root->cpuset) == 0) { 891 if (node->type == TOPO_TYPE_CACHE && 892 cg_root->cg_level < node->subtype) 893 cg_root->cg_level = node->subtype; 894 if (node->type == TOPO_TYPE_NODE) 895 cg_root->cg_flags |= CG_FLAG_NODE; 896 node = topo_next_node(root, node); 897 continue; 898 } 899 if (node->type != TOPO_TYPE_GROUP && 900 node->type != TOPO_TYPE_NODE && 901 node->type != TOPO_TYPE_CACHE) { 902 node = topo_next_node(root, node); 903 continue; 904 } 905 nchildren++; 906 node = topo_next_nonchild_node(root, node); 907 } 908 909 /* 910 * We are not interested in nodes including only one CPU each. 911 */ 912 if (nchildren == root->cpu_count) 913 return; 914 915 /* 916 * We are not interested in nodes without children. 917 */ 918 cg_root->cg_children = nchildren; 919 if (nchildren == 0) 920 return; 921 922 cg_root->cg_child = smp_topo_alloc(nchildren); 923 924 /* 925 * Now find again the same cache nodes as above and recursively 926 * build scheduling topologies for them. 927 */ 928 node = root; 929 i = 0; 930 while (node != NULL) { 931 if ((node->type != TOPO_TYPE_GROUP && 932 node->type != TOPO_TYPE_NODE && 933 node->type != TOPO_TYPE_CACHE) || 934 CPU_CMP(&node->cpuset, &root->cpuset) == 0 || 935 CPU_EMPTY(&node->cpuset)) { 936 node = topo_next_node(root, node); 937 continue; 938 } 939 cg_root->cg_child[i].cg_parent = cg_root; 940 x86topo_add_sched_group(node, &cg_root->cg_child[i]); 941 i++; 942 node = topo_next_nonchild_node(root, node); 943 } 944 } 945 946 /* 947 * Build the MI scheduling topology from the discovered hardware topology. 948 */ 949 struct cpu_group * 950 cpu_topo(void) 951 { 952 struct cpu_group *cg_root; 953 954 if (mp_ncpus <= 1) 955 return (smp_topo_none()); 956 957 cg_root = smp_topo_alloc(1); 958 x86topo_add_sched_group(&topo_root, cg_root); 959 return (cg_root); 960 } 961 962 static void 963 cpu_alloc(void *dummy __unused) 964 { 965 /* 966 * Dynamically allocate the arrays that depend on the 967 * maximum APIC ID. 968 */ 969 cpu_info = malloc(sizeof(*cpu_info) * (max_apic_id + 1), M_CPUS, 970 M_WAITOK | M_ZERO); 971 apic_cpuids = malloc(sizeof(*apic_cpuids) * (max_apic_id + 1), M_CPUS, 972 M_WAITOK | M_ZERO); 973 } 974 SYSINIT(cpu_alloc, SI_SUB_CPU, SI_ORDER_FIRST, cpu_alloc, NULL); 975 976 /* 977 * Add a logical CPU to the topology. 978 */ 979 void 980 cpu_add(u_int apic_id, char boot_cpu) 981 { 982 983 if (apic_id > max_apic_id) 984 panic("SMP: APIC ID %d too high", apic_id); 985 986 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %u added twice", 987 apic_id)); 988 cpu_info[apic_id].cpu_present = 1; 989 if (boot_cpu) { 990 KASSERT(boot_cpu_id == -1, 991 ("CPU %u claims to be BSP, but CPU %u already is", apic_id, 992 boot_cpu_id)); 993 boot_cpu_id = apic_id; 994 cpu_info[apic_id].cpu_bsp = 1; 995 } 996 if (bootverbose) 997 printf("SMP: Added CPU %u (%s)\n", apic_id, boot_cpu ? "BSP" : 998 "AP"); 999 } 1000 1001 void 1002 cpu_mp_setmaxid(void) 1003 { 1004 1005 /* 1006 * mp_ncpus and mp_maxid should be already set by calls to cpu_add(). 1007 * If there were no calls to cpu_add() assume this is a UP system. 1008 */ 1009 if (mp_ncpus == 0) 1010 mp_ncpus = 1; 1011 } 1012 1013 int 1014 cpu_mp_probe(void) 1015 { 1016 1017 /* 1018 * Always record BSP in CPU map so that the mbuf init code works 1019 * correctly. 1020 */ 1021 CPU_SETOF(0, &all_cpus); 1022 return (mp_ncpus > 1); 1023 } 1024 1025 /* 1026 * AP CPU's call this to initialize themselves. 1027 */ 1028 void 1029 init_secondary_tail(void) 1030 { 1031 u_int cpuid; 1032 1033 pmap_activate_boot(vmspace_pmap(proc0.p_vmspace)); 1034 1035 /* 1036 * On real hardware, switch to x2apic mode if possible. Do it 1037 * after aps_ready was signalled, to avoid manipulating the 1038 * mode while BSP might still want to send some IPI to us 1039 * (second startup IPI is ignored on modern hardware etc). 1040 */ 1041 lapic_xapic_mode(); 1042 1043 /* Initialize the PAT MSR. */ 1044 pmap_init_pat(); 1045 1046 /* set up CPU registers and state */ 1047 cpu_setregs(); 1048 1049 /* set up SSE/NX */ 1050 initializecpu(); 1051 1052 /* set up FPU state on the AP */ 1053 #ifdef __amd64__ 1054 fpuinit(); 1055 #else 1056 npxinit(false); 1057 #endif 1058 1059 if (cpu_ops.cpu_init) 1060 cpu_ops.cpu_init(); 1061 1062 /* A quick check from sanity claus */ 1063 cpuid = PCPU_GET(cpuid); 1064 if (PCPU_GET(apic_id) != lapic_id()) { 1065 printf("SMP: cpuid = %d\n", cpuid); 1066 printf("SMP: actual apic_id = %d\n", lapic_id()); 1067 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); 1068 panic("cpuid mismatch! boom!!"); 1069 } 1070 1071 /* Initialize curthread. */ 1072 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 1073 PCPU_SET(curthread, PCPU_GET(idlethread)); 1074 schedinit_ap(); 1075 1076 mtx_lock_spin(&ap_boot_mtx); 1077 1078 mca_init(); 1079 1080 /* Init local apic for irq's */ 1081 lapic_setup(1); 1082 1083 /* Set memory range attributes for this CPU to match the BSP */ 1084 mem_range_AP_init(); 1085 1086 smp_cpus++; 1087 1088 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid); 1089 if (bootverbose) 1090 printf("SMP: AP CPU #%d Launched!\n", cpuid); 1091 else 1092 printf("%s%d%s", smp_cpus == 2 ? "Launching APs: " : "", 1093 cpuid, smp_cpus == mp_ncpus ? "\n" : " "); 1094 1095 /* Determine if we are a logical CPU. */ 1096 if (cpu_info[PCPU_GET(apic_id)].cpu_hyperthread) 1097 CPU_SET(cpuid, &logical_cpus_mask); 1098 1099 if (bootverbose) 1100 lapic_dump("AP"); 1101 1102 if (smp_cpus == mp_ncpus) { 1103 /* enable IPI's, tlb shootdown, freezes etc */ 1104 atomic_store_rel_int(&smp_started, 1); 1105 } 1106 1107 #ifdef __amd64__ 1108 if (pmap_pcid_enabled) 1109 load_cr4(rcr4() | CR4_PCIDE); 1110 load_ds(_udatasel); 1111 load_es(_udatasel); 1112 load_fs(_ufssel); 1113 #endif 1114 1115 mtx_unlock_spin(&ap_boot_mtx); 1116 1117 /* Wait until all the AP's are up. */ 1118 while (atomic_load_acq_int(&smp_started) == 0) 1119 ia32_pause(); 1120 1121 #ifndef EARLY_AP_STARTUP 1122 /* Start per-CPU event timers. */ 1123 cpu_initclocks_ap(); 1124 #endif 1125 1126 kcsan_cpu_init(cpuid); 1127 1128 sched_ap_entry(); 1129 1130 panic("scheduler returned us to %s", __func__); 1131 /* NOTREACHED */ 1132 } 1133 1134 static void 1135 smp_after_idle_runnable(void *arg __unused) 1136 { 1137 int cpu; 1138 1139 if (mp_ncpus == 1) 1140 return; 1141 1142 KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); 1143 1144 /* 1145 * Wait for all APs to handle an interrupt. After that, we know that 1146 * the APs have entered the scheduler at least once, so the boot stacks 1147 * are safe to free. 1148 */ 1149 smp_rendezvous(smp_no_rendezvous_barrier, NULL, 1150 smp_no_rendezvous_barrier, NULL); 1151 1152 for (cpu = 1; cpu < mp_ncpus; cpu++) { 1153 kmem_free(bootstacks[cpu], kstack_pages * PAGE_SIZE); 1154 } 1155 } 1156 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, 1157 smp_after_idle_runnable, NULL); 1158 1159 /* 1160 * We tell the I/O APIC code about all the CPUs we want to receive 1161 * interrupts. If we don't want certain CPUs to receive IRQs we 1162 * can simply not tell the I/O APIC code about them in this function. 1163 * We also do not tell it about the BSP since it tells itself about 1164 * the BSP internally to work with UP kernels and on UP machines. 1165 */ 1166 void 1167 set_interrupt_apic_ids(void) 1168 { 1169 u_int i, apic_id; 1170 1171 for (i = 0; i < MAXCPU; i++) { 1172 apic_id = cpu_apic_ids[i]; 1173 if (apic_id == -1) 1174 continue; 1175 if (cpu_info[apic_id].cpu_bsp) 1176 continue; 1177 if (cpu_info[apic_id].cpu_disabled) 1178 continue; 1179 1180 /* Don't let hyperthreads service interrupts. */ 1181 if (cpu_info[apic_id].cpu_hyperthread && 1182 !hyperthreading_intr_allowed) 1183 continue; 1184 1185 intr_add_cpu(i); 1186 } 1187 } 1188 1189 #ifdef COUNT_XINVLTLB_HITS 1190 u_int xhits_gbl[MAXCPU]; 1191 u_int xhits_pg[MAXCPU]; 1192 u_int xhits_rng[MAXCPU]; 1193 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1194 ""); 1195 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl, 1196 sizeof(xhits_gbl), "IU", ""); 1197 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg, 1198 sizeof(xhits_pg), "IU", ""); 1199 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng, 1200 sizeof(xhits_rng), "IU", ""); 1201 1202 u_int ipi_global; 1203 u_int ipi_page; 1204 u_int ipi_range; 1205 u_int ipi_range_size; 1206 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, ""); 1207 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, ""); 1208 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, ""); 1209 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size, 1210 0, ""); 1211 #endif /* COUNT_XINVLTLB_HITS */ 1212 1213 /* 1214 * Init and startup IPI. 1215 */ 1216 void 1217 ipi_startup(int apic_id, int vector) 1218 { 1219 1220 /* 1221 * This attempts to follow the algorithm described in the 1222 * Intel Multiprocessor Specification v1.4 in section B.4. 1223 * For each IPI, we allow the local APIC ~20us to deliver the 1224 * IPI. If that times out, we panic. 1225 */ 1226 1227 /* 1228 * first we do an INIT IPI: this INIT IPI might be run, resetting 1229 * and running the target CPU. OR this INIT IPI might be latched (P5 1230 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 1231 * ignored. 1232 */ 1233 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL | 1234 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id); 1235 lapic_ipi_wait(100); 1236 1237 /* Explicitly deassert the INIT IPI. */ 1238 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL | 1239 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 1240 apic_id); 1241 1242 DELAY(10000); /* wait ~10mS */ 1243 1244 /* 1245 * next we do a STARTUP IPI: the previous INIT IPI might still be 1246 * latched, (P5 bug) this 1st STARTUP would then terminate 1247 * immediately, and the previously started INIT IPI would continue. OR 1248 * the previous INIT IPI has already run. and this STARTUP IPI will 1249 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 1250 * will run. 1251 */ 1252 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 1253 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 1254 vector, apic_id); 1255 if (!lapic_ipi_wait(100)) 1256 panic("Failed to deliver first STARTUP IPI to APIC %d", 1257 apic_id); 1258 DELAY(200); /* wait ~200uS */ 1259 1260 /* 1261 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 1262 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 1263 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 1264 * recognized after hardware RESET or INIT IPI. 1265 */ 1266 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 1267 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 1268 vector, apic_id); 1269 if (!lapic_ipi_wait(100)) 1270 panic("Failed to deliver second STARTUP IPI to APIC %d", 1271 apic_id); 1272 1273 DELAY(200); /* wait ~200uS */ 1274 } 1275 1276 static bool 1277 ipi_bitmap_set(int cpu, u_int ipi) 1278 { 1279 u_int bitmap, old, new; 1280 u_int *cpu_bitmap; 1281 1282 bitmap = 1 << ipi; 1283 cpu_bitmap = &cpuid_to_pcpu[cpu]->pc_ipi_bitmap; 1284 old = *cpu_bitmap; 1285 for (;;) { 1286 if ((old & bitmap) != 0) 1287 break; 1288 new = old | bitmap; 1289 if (atomic_fcmpset_int(cpu_bitmap, &old, new)) 1290 break; 1291 } 1292 return (old != 0); 1293 } 1294 1295 /* 1296 * Send an IPI to specified CPU handling the bitmap logic. 1297 */ 1298 static void 1299 ipi_send_cpu(int cpu, u_int ipi) 1300 { 1301 1302 KASSERT((u_int)cpu < MAXCPU && cpu_apic_ids[cpu] != -1, 1303 ("IPI to non-existent CPU %d", cpu)); 1304 1305 if (IPI_IS_BITMAPED(ipi)) { 1306 if (ipi_bitmap_set(cpu, ipi)) 1307 return; 1308 ipi = IPI_BITMAP_VECTOR; 1309 } 1310 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]); 1311 } 1312 1313 void 1314 ipi_bitmap_handler(struct trapframe frame) 1315 { 1316 struct trapframe *oldframe; 1317 struct thread *td; 1318 int cpu = PCPU_GET(cpuid); 1319 u_int ipi_bitmap; 1320 1321 kasan_mark(&frame, sizeof(frame), sizeof(frame), 0); 1322 1323 td = curthread; 1324 ipi_bitmap = atomic_readandclear_int(&cpuid_to_pcpu[cpu]-> 1325 pc_ipi_bitmap); 1326 1327 /* 1328 * sched_preempt() must be called to clear the pending preempt 1329 * IPI to enable delivery of further preempts. However, the 1330 * critical section will cause extra scheduler lock thrashing 1331 * when used unconditionally. Only critical_enter() if 1332 * hardclock must also run, which requires the section entry. 1333 */ 1334 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) 1335 critical_enter(); 1336 1337 td->td_intr_nesting_level++; 1338 oldframe = td->td_intr_frame; 1339 td->td_intr_frame = &frame; 1340 #if defined(STACK) || defined(DDB) 1341 if (ipi_bitmap & (1 << IPI_TRACE)) 1342 stack_capture_intr(); 1343 #endif 1344 if (ipi_bitmap & (1 << IPI_PREEMPT)) { 1345 #ifdef COUNT_IPIS 1346 (*ipi_preempt_counts[cpu])++; 1347 #endif 1348 sched_preempt(td); 1349 } 1350 if (ipi_bitmap & (1 << IPI_AST)) { 1351 #ifdef COUNT_IPIS 1352 (*ipi_ast_counts[cpu])++; 1353 #endif 1354 /* Nothing to do for AST */ 1355 } 1356 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) { 1357 #ifdef COUNT_IPIS 1358 (*ipi_hardclock_counts[cpu])++; 1359 #endif 1360 hardclockintr(); 1361 } 1362 td->td_intr_frame = oldframe; 1363 td->td_intr_nesting_level--; 1364 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) 1365 critical_exit(); 1366 } 1367 1368 /* 1369 * send an IPI to a set of cpus. 1370 */ 1371 void 1372 ipi_selected(cpuset_t cpus, u_int ipi) 1373 { 1374 int cpu; 1375 1376 /* 1377 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1378 * of help in order to understand what is the source. 1379 * Set the mask of receiving CPUs for this purpose. 1380 */ 1381 if (ipi == IPI_STOP_HARD) 1382 CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus); 1383 1384 CPU_FOREACH_ISSET(cpu, &cpus) { 1385 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); 1386 ipi_send_cpu(cpu, ipi); 1387 } 1388 } 1389 1390 /* 1391 * send an IPI to a specific CPU. 1392 */ 1393 void 1394 ipi_cpu(int cpu, u_int ipi) 1395 { 1396 1397 /* 1398 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1399 * of help in order to understand what is the source. 1400 * Set the mask of receiving CPUs for this purpose. 1401 */ 1402 if (ipi == IPI_STOP_HARD) 1403 CPU_SET_ATOMIC(cpu, &ipi_stop_nmi_pending); 1404 1405 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); 1406 ipi_send_cpu(cpu, ipi); 1407 } 1408 1409 /* 1410 * send an IPI to all CPUs EXCEPT myself 1411 */ 1412 void 1413 ipi_all_but_self(u_int ipi) 1414 { 1415 cpuset_t other_cpus; 1416 int cpu, c; 1417 1418 /* 1419 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1420 * of help in order to understand what is the source. 1421 * Set the mask of receiving CPUs for this purpose. 1422 */ 1423 if (ipi == IPI_STOP_HARD) { 1424 other_cpus = all_cpus; 1425 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 1426 CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &other_cpus); 1427 } 1428 1429 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1430 if (IPI_IS_BITMAPED(ipi)) { 1431 cpu = PCPU_GET(cpuid); 1432 CPU_FOREACH(c) { 1433 if (c != cpu) 1434 ipi_bitmap_set(c, ipi); 1435 } 1436 ipi = IPI_BITMAP_VECTOR; 1437 } 1438 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); 1439 } 1440 1441 void 1442 ipi_self_from_nmi(u_int vector) 1443 { 1444 1445 lapic_ipi_vectored(vector, APIC_IPI_DEST_SELF); 1446 1447 /* Wait for IPI to finish. */ 1448 if (!lapic_ipi_wait(50000)) { 1449 if (KERNEL_PANICKED()) 1450 return; 1451 else 1452 panic("APIC: IPI is stuck"); 1453 } 1454 } 1455 1456 int 1457 ipi_nmi_handler(void) 1458 { 1459 u_int cpuid; 1460 1461 /* 1462 * As long as there is not a simple way to know about a NMI's 1463 * source, if the bitmask for the current CPU is present in 1464 * the global pending bitword an IPI_STOP_HARD has been issued 1465 * and should be handled. 1466 */ 1467 cpuid = PCPU_GET(cpuid); 1468 if (!CPU_ISSET(cpuid, &ipi_stop_nmi_pending)) 1469 return (1); 1470 1471 CPU_CLR_ATOMIC(cpuid, &ipi_stop_nmi_pending); 1472 cpustop_handler(); 1473 return (0); 1474 } 1475 1476 int nmi_kdb_lock; 1477 1478 void 1479 nmi_call_kdb_smp(u_int type, struct trapframe *frame) 1480 { 1481 int cpu; 1482 bool call_post; 1483 1484 cpu = PCPU_GET(cpuid); 1485 if (atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1)) { 1486 nmi_call_kdb(cpu, type, frame); 1487 call_post = false; 1488 } else { 1489 savectx(&stoppcbs[cpu]); 1490 CPU_SET_ATOMIC(cpu, &stopped_cpus); 1491 while (!atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1)) 1492 ia32_pause(); 1493 call_post = true; 1494 } 1495 atomic_store_rel_int(&nmi_kdb_lock, 0); 1496 if (call_post) 1497 cpustop_handler_post(cpu); 1498 } 1499 1500 /* 1501 * Handle an IPI_STOP by saving our current context and spinning (or mwaiting, 1502 * if available) until we are resumed. 1503 */ 1504 void 1505 cpustop_handler(void) 1506 { 1507 struct monitorbuf *mb; 1508 u_int cpu; 1509 bool use_mwait; 1510 1511 cpu = PCPU_GET(cpuid); 1512 1513 savectx(&stoppcbs[cpu]); 1514 1515 use_mwait = (stop_mwait && (cpu_feature2 & CPUID2_MON) != 0 && 1516 !mwait_cpustop_broken); 1517 if (use_mwait) { 1518 mb = PCPU_PTR(monitorbuf); 1519 atomic_store_int(&mb->stop_state, 1520 MONITOR_STOPSTATE_STOPPED); 1521 } 1522 1523 /* Indicate that we are stopped */ 1524 CPU_SET_ATOMIC(cpu, &stopped_cpus); 1525 1526 /* Wait for restart */ 1527 while (!CPU_ISSET(cpu, &started_cpus)) { 1528 if (use_mwait) { 1529 cpu_monitor(mb, 0, 0); 1530 if (atomic_load_int(&mb->stop_state) == 1531 MONITOR_STOPSTATE_STOPPED) 1532 cpu_mwait(0, MWAIT_C1); 1533 continue; 1534 } 1535 1536 ia32_pause(); 1537 1538 /* 1539 * Halt non-BSP CPUs on panic -- we're never going to need them 1540 * again, and might as well save power / release resources 1541 * (e.g., overprovisioned VM infrastructure). 1542 */ 1543 while (__predict_false(!IS_BSP() && KERNEL_PANICKED())) 1544 halt(); 1545 } 1546 1547 cpustop_handler_post(cpu); 1548 } 1549 1550 static void 1551 cpustop_handler_post(u_int cpu) 1552 { 1553 1554 CPU_CLR_ATOMIC(cpu, &started_cpus); 1555 CPU_CLR_ATOMIC(cpu, &stopped_cpus); 1556 1557 /* 1558 * We don't broadcast TLB invalidations to other CPUs when they are 1559 * stopped. Hence, we clear the TLB before resuming. 1560 */ 1561 invltlb_glob(); 1562 1563 #if defined(__amd64__) && (defined(DDB) || defined(GDB)) 1564 amd64_db_resume_dbreg(); 1565 #endif 1566 1567 if (cpu == 0 && cpustop_restartfunc != NULL) { 1568 cpustop_restartfunc(); 1569 cpustop_restartfunc = NULL; 1570 } 1571 } 1572 1573 /* 1574 * Handle an IPI_SUSPEND by saving our current context and spinning until we 1575 * are resumed. 1576 */ 1577 void 1578 cpususpend_handler(void) 1579 { 1580 u_int cpu; 1581 1582 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED); 1583 1584 cpu = PCPU_GET(cpuid); 1585 if (savectx(&susppcbs[cpu]->sp_pcb)) { 1586 #ifdef __amd64__ 1587 fpususpend(susppcbs[cpu]->sp_fpususpend); 1588 #else 1589 npxsuspend(susppcbs[cpu]->sp_fpususpend); 1590 #endif 1591 /* 1592 * suspended_cpus is cleared shortly after each AP is restarted 1593 * by a Startup IPI, so that the BSP can proceed to restarting 1594 * the next AP. 1595 * 1596 * resuming_cpus gets cleared when the AP completes 1597 * initialization after having been released by the BSP. 1598 * resuming_cpus is probably not the best name for the 1599 * variable, because it is actually a set of processors that 1600 * haven't resumed yet and haven't necessarily started resuming. 1601 * 1602 * Note that suspended_cpus is meaningful only for ACPI suspend 1603 * as it's not really used for Xen suspend since the APs are 1604 * automatically restored to the running state and the correct 1605 * context. For the same reason resumectx is never called in 1606 * that case. 1607 */ 1608 CPU_SET_ATOMIC(cpu, &suspended_cpus); 1609 CPU_SET_ATOMIC(cpu, &resuming_cpus); 1610 1611 /* 1612 * Invalidate the cache after setting the global status bits. 1613 * The last AP to set its bit may end up being an Owner of the 1614 * corresponding cache line in MOESI protocol. The AP may be 1615 * stopped before the cache line is written to the main memory. 1616 */ 1617 wbinvd(); 1618 } else { 1619 #ifdef __amd64__ 1620 fpuresume(susppcbs[cpu]->sp_fpususpend); 1621 #else 1622 npxresume(susppcbs[cpu]->sp_fpususpend); 1623 #endif 1624 pmap_init_pat(); 1625 initializecpu(); 1626 PCPU_SET(switchtime, 0); 1627 PCPU_SET(switchticks, ticks); 1628 1629 /* Indicate that we have restarted and restored the context. */ 1630 CPU_CLR_ATOMIC(cpu, &suspended_cpus); 1631 } 1632 1633 /* Wait for resume directive */ 1634 while (!CPU_ISSET(cpu, &toresume_cpus)) 1635 ia32_pause(); 1636 1637 /* Re-apply microcode updates. */ 1638 ucode_reload(); 1639 1640 #ifdef __i386__ 1641 /* Finish removing the identity mapping of low memory for this AP. */ 1642 invltlb_glob(); 1643 #endif 1644 1645 if (cpu_ops.cpu_resume) 1646 cpu_ops.cpu_resume(); 1647 #ifdef __amd64__ 1648 if (vmm_resume_p) 1649 vmm_resume_p(); 1650 #endif 1651 1652 /* Resume MCA and local APIC */ 1653 lapic_xapic_mode(); 1654 mca_resume(); 1655 lapic_setup(0); 1656 1657 /* Indicate that we are resumed */ 1658 CPU_CLR_ATOMIC(cpu, &resuming_cpus); 1659 CPU_CLR_ATOMIC(cpu, &suspended_cpus); 1660 CPU_CLR_ATOMIC(cpu, &toresume_cpus); 1661 } 1662 1663 /* 1664 * Handle an IPI_SWI by waking delayed SWI thread. 1665 */ 1666 void 1667 ipi_swi_handler(struct trapframe frame) 1668 { 1669 1670 intr_event_handle(clk_intr_event, &frame); 1671 } 1672 1673 /* 1674 * This is called once the rest of the system is up and running and we're 1675 * ready to let the AP's out of the pen. 1676 */ 1677 static void 1678 release_aps(void *dummy __unused) 1679 { 1680 1681 if (mp_ncpus == 1) 1682 return; 1683 atomic_store_rel_int(&aps_ready, 1); 1684 while (smp_started == 0) 1685 ia32_pause(); 1686 } 1687 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 1688 1689 #ifdef COUNT_IPIS 1690 /* 1691 * Setup interrupt counters for IPI handlers. 1692 */ 1693 static void 1694 mp_ipi_intrcnt(void *dummy) 1695 { 1696 char buf[64]; 1697 int i; 1698 1699 CPU_FOREACH(i) { 1700 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i); 1701 intrcnt_add(buf, &ipi_invltlb_counts[i]); 1702 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i); 1703 intrcnt_add(buf, &ipi_invlrng_counts[i]); 1704 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i); 1705 intrcnt_add(buf, &ipi_invlpg_counts[i]); 1706 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i); 1707 intrcnt_add(buf, &ipi_invlcache_counts[i]); 1708 snprintf(buf, sizeof(buf), "cpu%d:preempt", i); 1709 intrcnt_add(buf, &ipi_preempt_counts[i]); 1710 snprintf(buf, sizeof(buf), "cpu%d:ast", i); 1711 intrcnt_add(buf, &ipi_ast_counts[i]); 1712 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i); 1713 intrcnt_add(buf, &ipi_rendezvous_counts[i]); 1714 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i); 1715 intrcnt_add(buf, &ipi_hardclock_counts[i]); 1716 } 1717 } 1718 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL); 1719 #endif 1720