xref: /freebsd/sys/x86/x86/mp_x86.c (revision e7e24315)
1 /*-
2  * Copyright (c) 1996, by Steve Passe
3  * Copyright (c) 2003, by Peter Wemm
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. The name of the developer may NOT be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include "opt_acpi.h"
29 #ifdef __i386__
30 #include "opt_apic.h"
31 #endif
32 #include "opt_cpu.h"
33 #include "opt_ddb.h"
34 #include "opt_gdb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_pmap.h"
37 #include "opt_sched.h"
38 #include "opt_smp.h"
39 #include "opt_stack.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/asan.h>
44 #include <sys/bus.h>
45 #include <sys/cons.h>	/* cngetc() */
46 #include <sys/cpuset.h>
47 #include <sys/csan.h>
48 #include <sys/interrupt.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/memrange.h>
55 #include <sys/mutex.h>
56 #include <sys/pcpu.h>
57 #include <sys/proc.h>
58 #include <sys/sched.h>
59 #include <sys/smp.h>
60 #include <sys/sysctl.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_map.h>
68 
69 #include <x86/apicreg.h>
70 #include <machine/clock.h>
71 #include <machine/cpu.h>
72 #include <machine/cputypes.h>
73 #include <x86/mca.h>
74 #include <machine/md_var.h>
75 #include <machine/pcb.h>
76 #include <machine/psl.h>
77 #include <machine/smp.h>
78 #include <machine/specialreg.h>
79 #include <machine/stack.h>
80 #include <x86/ucode.h>
81 
82 #ifdef DEV_ACPI
83 #include <contrib/dev/acpica/include/acpi.h>
84 #include <dev/acpica/acpivar.h>
85 #endif
86 
87 static MALLOC_DEFINE(M_CPUS, "cpus", "CPU items");
88 
89 int	mp_naps;		/* # of Applications processors */
90 int	boot_cpu_id = -1;	/* designated BSP */
91 
92 /* AP uses this during bootstrap.  Do not staticize.  */
93 char *bootSTK;
94 int bootAP;
95 
96 /* Free these after use */
97 void *bootstacks[MAXCPU];
98 void *dpcpu;
99 
100 struct susppcb **susppcbs;
101 
102 #ifdef COUNT_IPIS
103 /* Interrupt counts. */
104 static u_long *ipi_preempt_counts[MAXCPU];
105 static u_long *ipi_ast_counts[MAXCPU];
106 u_long *ipi_invltlb_counts[MAXCPU];
107 u_long *ipi_invlrng_counts[MAXCPU];
108 u_long *ipi_invlpg_counts[MAXCPU];
109 u_long *ipi_invlcache_counts[MAXCPU];
110 u_long *ipi_rendezvous_counts[MAXCPU];
111 static u_long *ipi_hardclock_counts[MAXCPU];
112 #endif
113 
114 /* Default cpu_ops implementation. */
115 struct cpu_ops cpu_ops;
116 
117 /*
118  * Local data and functions.
119  */
120 
121 static volatile cpuset_t ipi_stop_nmi_pending;
122 
123 volatile cpuset_t resuming_cpus;
124 volatile cpuset_t toresume_cpus;
125 
126 /* used to hold the AP's until we are ready to release them */
127 struct mtx ap_boot_mtx;
128 
129 /* Set to 1 once we're ready to let the APs out of the pen. */
130 volatile int aps_ready = 0;
131 
132 /*
133  * Store data from cpu_add() until later in the boot when we actually setup
134  * the APs.
135  */
136 struct cpu_info *cpu_info;
137 int *apic_cpuids;
138 int cpu_apic_ids[MAXCPU];
139 _Static_assert(MAXCPU <= MAX_APIC_ID,
140     "MAXCPU cannot be larger that MAX_APIC_ID");
141 _Static_assert(xAPIC_MAX_APIC_ID <= MAX_APIC_ID,
142     "xAPIC_MAX_APIC_ID cannot be larger that MAX_APIC_ID");
143 
144 static void	release_aps(void *dummy);
145 static void	cpustop_handler_post(u_int cpu);
146 
147 static int	hyperthreading_allowed = 1;
148 SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_allowed, CTLFLAG_RDTUN,
149 	&hyperthreading_allowed, 0, "Use Intel HTT logical CPUs");
150 
151 static int	hyperthreading_intr_allowed = 0;
152 SYSCTL_INT(_machdep, OID_AUTO, hyperthreading_intr_allowed, CTLFLAG_RDTUN,
153 	&hyperthreading_intr_allowed, 0,
154 	"Allow interrupts on HTT logical CPUs");
155 
156 static int	intr_apic_id_limit = -1;
157 SYSCTL_INT(_machdep, OID_AUTO, intr_apic_id_limit, CTLFLAG_RDTUN,
158 	&intr_apic_id_limit, 0,
159 	"Maximum permitted APIC ID for interrupt delivery (-1 is unlimited)");
160 
161 static struct topo_node topo_root;
162 
163 static int pkg_id_shift;
164 static int node_id_shift;
165 static int core_id_shift;
166 static int disabled_cpus;
167 
168 struct cache_info {
169 	int	id_shift;
170 	int	present;
171 } static caches[MAX_CACHE_LEVELS];
172 
173 static bool stop_mwait = false;
174 SYSCTL_BOOL(_machdep, OID_AUTO, stop_mwait, CTLFLAG_RWTUN, &stop_mwait, 0,
175     "Use MONITOR/MWAIT when stopping CPU, if available");
176 
177 void
mem_range_AP_init(void)178 mem_range_AP_init(void)
179 {
180 
181 	if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
182 		mem_range_softc.mr_op->initAP(&mem_range_softc);
183 }
184 
185 /*
186  * Round up to the next power of two, if necessary, and then
187  * take log2.
188  * Returns -1 if argument is zero.
189  */
190 static __inline int
mask_width(u_int x)191 mask_width(u_int x)
192 {
193 
194 	return (fls(x << (1 - powerof2(x))) - 1);
195 }
196 
197 /*
198  * Add a cache level to the cache topology description.
199  */
200 static int
add_deterministic_cache(int type,int level,int share_count)201 add_deterministic_cache(int type, int level, int share_count)
202 {
203 
204 	if (type == 0)
205 		return (0);
206 	if (type > 3) {
207 		printf("unexpected cache type %d\n", type);
208 		return (1);
209 	}
210 	if (type == 2) /* ignore instruction cache */
211 		return (1);
212 	if (level == 0 || level > MAX_CACHE_LEVELS) {
213 		printf("unexpected cache level %d\n", level);
214 		return (1);
215 	}
216 
217 	if (caches[level - 1].present) {
218 		printf("WARNING: multiple entries for L%u data cache\n", level);
219 		printf("%u => %u\n", caches[level - 1].id_shift,
220 		    mask_width(share_count));
221 	}
222 	caches[level - 1].id_shift = mask_width(share_count);
223 	caches[level - 1].present = 1;
224 
225 	if (caches[level - 1].id_shift > pkg_id_shift) {
226 		printf("WARNING: L%u data cache covers more "
227 		    "APIC IDs than a package (%u > %u)\n", level,
228 		    caches[level - 1].id_shift, pkg_id_shift);
229 		caches[level - 1].id_shift = pkg_id_shift;
230 	}
231 	if (caches[level - 1].id_shift < core_id_shift) {
232 		printf("WARNING: L%u data cache covers fewer "
233 		    "APIC IDs than a core (%u < %u)\n", level,
234 		    caches[level - 1].id_shift, core_id_shift);
235 		caches[level - 1].id_shift = core_id_shift;
236 	}
237 
238 	return (1);
239 }
240 
241 /*
242  * Determine topology of processing units and caches for AMD CPUs.
243  * See:
244  *  - AMD CPUID Specification (Publication # 25481)
245  *  - BKDG for AMD NPT Family 0Fh Processors (Publication # 32559)
246  *  - BKDG For AMD Family 10h Processors (Publication # 31116)
247  *  - BKDG For AMD Family 15h Models 00h-0Fh Processors (Publication # 42301)
248  *  - BKDG For AMD Family 16h Models 00h-0Fh Processors (Publication # 48751)
249  *  - PPR For AMD Family 17h Models 00h-0Fh Processors (Publication # 54945)
250  */
251 static void
topo_probe_amd(void)252 topo_probe_amd(void)
253 {
254 	u_int p[4];
255 	uint64_t v;
256 	int level;
257 	int nodes_per_socket;
258 	int share_count;
259 	int type;
260 	int i;
261 
262 	/* No multi-core capability. */
263 	if ((amd_feature2 & AMDID2_CMP) == 0)
264 		return;
265 
266 	/*
267 	 * XXX Lack of an AMD IOMMU driver prevents use of APIC IDs above
268 	 * xAPIC_MAX_APIC_ID.  This is a workaround so we boot and function on
269 	 * AMD systems with high thread counts, albeit with reduced interrupt
270 	 * performance.
271 	 *
272 	 * We should really set the limit to xAPIC_MAX_APIC_ID by default, and
273 	 * have the IOMMU driver increase it.  That way if a driver is present
274 	 * but disabled, or is otherwise not able to route the interrupts, the
275 	 * system can fall back to a functional state.  That will require a more
276 	 * substantial change though, including having the IOMMU initialize
277 	 * earlier.
278 	 */
279 	if (intr_apic_id_limit == -1)
280 		intr_apic_id_limit = xAPIC_MAX_APIC_ID;
281 
282 	/* For families 10h and newer. */
283 	pkg_id_shift = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
284 	    AMDID_COREID_SIZE_SHIFT;
285 
286 	/* For 0Fh family. */
287 	if (pkg_id_shift == 0)
288 		pkg_id_shift =
289 		    mask_width((cpu_procinfo2 & AMDID_CMP_CORES) + 1);
290 
291 	/*
292 	 * Families prior to 16h define the following value as
293 	 * cores per compute unit and we don't really care about the AMD
294 	 * compute units at the moment.  Perhaps we should treat them as
295 	 * cores and cores within the compute units as hardware threads,
296 	 * but that's up for debate.
297 	 * Later families define the value as threads per compute unit,
298 	 * so we are following AMD's nomenclature here.
299 	 */
300 	if ((amd_feature2 & AMDID2_TOPOLOGY) != 0 &&
301 	    CPUID_TO_FAMILY(cpu_id) >= 0x16) {
302 		cpuid_count(0x8000001e, 0, p);
303 		share_count = ((p[1] >> 8) & 0xff) + 1;
304 		core_id_shift = mask_width(share_count);
305 
306 		/*
307 		 * For Zen (17h), gather Nodes per Processor.  Each node is a
308 		 * Zeppelin die; TR and EPYC CPUs will have multiple dies per
309 		 * package.  Communication latency between dies is higher than
310 		 * within them.
311 		 */
312 		nodes_per_socket = ((p[2] >> 8) & 0x7) + 1;
313 		node_id_shift = pkg_id_shift - mask_width(nodes_per_socket);
314 	}
315 
316 	if ((amd_feature2 & AMDID2_TOPOLOGY) != 0) {
317 		for (i = 0; ; i++) {
318 			cpuid_count(0x8000001d, i, p);
319 			type = p[0] & 0x1f;
320 			level = (p[0] >> 5) & 0x7;
321 			share_count = 1 + ((p[0] >> 14) & 0xfff);
322 
323 			if (!add_deterministic_cache(type, level, share_count))
324 				break;
325 		}
326 	} else {
327 		if (cpu_exthigh >= 0x80000005) {
328 			cpuid_count(0x80000005, 0, p);
329 			if (((p[2] >> 24) & 0xff) != 0) {
330 				caches[0].id_shift = 0;
331 				caches[0].present = 1;
332 			}
333 		}
334 		if (cpu_exthigh >= 0x80000006) {
335 			cpuid_count(0x80000006, 0, p);
336 			if (((p[2] >> 16) & 0xffff) != 0) {
337 				caches[1].id_shift = 0;
338 				caches[1].present = 1;
339 			}
340 			if (((p[3] >> 18) & 0x3fff) != 0) {
341 				nodes_per_socket = 1;
342 				if ((amd_feature2 & AMDID2_NODE_ID) != 0) {
343 					/*
344 					 * Handle multi-node processors that
345 					 * have multiple chips, each with its
346 					 * own L3 cache, on the same die.
347 					 */
348 					v = rdmsr(0xc001100c);
349 					nodes_per_socket = 1 + ((v >> 3) & 0x7);
350 				}
351 				caches[2].id_shift =
352 				    pkg_id_shift - mask_width(nodes_per_socket);
353 				caches[2].present = 1;
354 			}
355 		}
356 	}
357 }
358 
359 /*
360  * Determine topology of processing units for Intel CPUs
361  * using CPUID Leaf 1 and Leaf 4, if supported.
362  * See:
363  *  - Intel 64 Architecture Processor Topology Enumeration
364  *  - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
365  *    Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
366  *    FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
367  */
368 static void
topo_probe_intel_0x4(void)369 topo_probe_intel_0x4(void)
370 {
371 	u_int p[4];
372 	int max_cores;
373 	int max_logical;
374 
375 	/* Both zero and one here mean one logical processor per package. */
376 	max_logical = (cpu_feature & CPUID_HTT) != 0 ?
377 	    (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1;
378 	if (max_logical <= 1)
379 		return;
380 
381 	if (cpu_high >= 0x4) {
382 		cpuid_count(0x04, 0, p);
383 		max_cores = ((p[0] >> 26) & 0x3f) + 1;
384 	} else
385 		max_cores = 1;
386 
387 	core_id_shift = mask_width(max_logical/max_cores);
388 	KASSERT(core_id_shift >= 0,
389 	    ("intel topo: max_cores > max_logical\n"));
390 	pkg_id_shift = core_id_shift + mask_width(max_cores);
391 }
392 
393 /*
394  * Determine topology of processing units for Intel CPUs
395  * using CPUID Leaf 1Fh or 0Bh, if supported.
396  * See:
397  *  - Intel 64 Architecture Processor Topology Enumeration
398  *  - Intel 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
399  *    Volume 3A: System Programming Guide, PROGRAMMING CONSIDERATIONS
400  *    FOR HARDWARE MULTI-THREADING CAPABLE PROCESSORS
401  */
402 static void
topo_probe_intel_0xb(void)403 topo_probe_intel_0xb(void)
404 {
405 	u_int leaf;
406 	u_int p[4] = { 0 };
407 	int bits;
408 	int type;
409 	int i;
410 
411 	/* Prefer leaf 1Fh (V2 Extended Topology Enumeration). */
412 	if (cpu_high >= 0x1f) {
413 		leaf = 0x1f;
414 		cpuid_count(leaf, 0, p);
415 	}
416 	/* Fall back to leaf 0Bh (Extended Topology Enumeration). */
417 	if (p[1] == 0) {
418 		leaf = 0x0b;
419 		cpuid_count(leaf, 0, p);
420 	}
421 	/* Fall back to leaf 04h (Deterministic Cache Parameters). */
422 	if (p[1] == 0) {
423 		topo_probe_intel_0x4();
424 		return;
425 	}
426 
427 	/* We only support three levels for now. */
428 	for (i = 0; ; i++) {
429 		cpuid_count(leaf, i, p);
430 
431 		bits = p[0] & 0x1f;
432 		type = (p[2] >> 8) & 0xff;
433 
434 		if (type == 0)
435 			break;
436 
437 		if (type == CPUID_TYPE_SMT)
438 			core_id_shift = bits;
439 		else if (type == CPUID_TYPE_CORE)
440 			pkg_id_shift = bits;
441 		else if (bootverbose)
442 			printf("Topology level type %d shift: %d\n", type, bits);
443 	}
444 
445 	if (pkg_id_shift < core_id_shift) {
446 		printf("WARNING: core covers more APIC IDs than a package\n");
447 		core_id_shift = pkg_id_shift;
448 	}
449 }
450 
451 /*
452  * Determine topology of caches for Intel CPUs.
453  * See:
454  *  - Intel 64 Architecture Processor Topology Enumeration
455  *  - Intel 64 and IA-32 Architectures Software Developer’s Manual
456  *    Volume 2A: Instruction Set Reference, A-M,
457  *    CPUID instruction
458  */
459 static void
topo_probe_intel_caches(void)460 topo_probe_intel_caches(void)
461 {
462 	u_int p[4];
463 	int level;
464 	int share_count;
465 	int type;
466 	int i;
467 
468 	if (cpu_high < 0x4) {
469 		/*
470 		 * Available cache level and sizes can be determined
471 		 * via CPUID leaf 2, but that requires a huge table of hardcoded
472 		 * values, so for now just assume L1 and L2 caches potentially
473 		 * shared only by HTT processing units, if HTT is present.
474 		 */
475 		caches[0].id_shift = pkg_id_shift;
476 		caches[0].present = 1;
477 		caches[1].id_shift = pkg_id_shift;
478 		caches[1].present = 1;
479 		return;
480 	}
481 
482 	for (i = 0; ; i++) {
483 		cpuid_count(0x4, i, p);
484 		type = p[0] & 0x1f;
485 		level = (p[0] >> 5) & 0x7;
486 		share_count = 1 + ((p[0] >> 14) & 0xfff);
487 
488 		if (!add_deterministic_cache(type, level, share_count))
489 			break;
490 	}
491 }
492 
493 /*
494  * Determine topology of processing units and caches for Intel CPUs.
495  * See:
496  *  - Intel 64 Architecture Processor Topology Enumeration
497  */
498 static void
topo_probe_intel(void)499 topo_probe_intel(void)
500 {
501 
502 	/*
503 	 * Note that 0x1 <= cpu_high < 4 case should be
504 	 * compatible with topo_probe_intel_0x4() logic when
505 	 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1)
506 	 * or it should trigger the fallback otherwise.
507 	 */
508 	if (cpu_high >= 0xb)
509 		topo_probe_intel_0xb();
510 	else if (cpu_high >= 0x1)
511 		topo_probe_intel_0x4();
512 
513 	topo_probe_intel_caches();
514 }
515 
516 /*
517  * Topology information is queried only on BSP, on which this
518  * code runs and for which it can query CPUID information.
519  * Then topology is extrapolated on all packages using an
520  * assumption that APIC ID to hardware component ID mapping is
521  * homogenious.
522  * That doesn't necesserily imply that the topology is uniform.
523  */
524 void
topo_probe(void)525 topo_probe(void)
526 {
527 	static int cpu_topo_probed = 0;
528 	struct x86_topo_layer {
529 		int type;
530 		int subtype;
531 		int id_shift;
532 	} topo_layers[MAX_CACHE_LEVELS + 5];
533 	struct topo_node *parent;
534 	struct topo_node *node;
535 	int layer;
536 	int nlayers;
537 	int node_id;
538 	int i;
539 #if defined(DEV_ACPI) && MAXMEMDOM > 1
540 	int d, domain;
541 #endif
542 
543 	if (cpu_topo_probed)
544 		return;
545 
546 	CPU_ZERO(&logical_cpus_mask);
547 
548 	if (mp_ncpus <= 1)
549 		; /* nothing */
550 	else if (cpu_vendor_id == CPU_VENDOR_AMD ||
551 	    cpu_vendor_id == CPU_VENDOR_HYGON)
552 		topo_probe_amd();
553 	else if (cpu_vendor_id == CPU_VENDOR_INTEL)
554 		topo_probe_intel();
555 
556 	KASSERT(pkg_id_shift >= core_id_shift,
557 	    ("bug in APIC topology discovery"));
558 
559 	nlayers = 0;
560 	bzero(topo_layers, sizeof(topo_layers));
561 
562 	topo_layers[nlayers].type = TOPO_TYPE_PKG;
563 	topo_layers[nlayers].id_shift = pkg_id_shift;
564 	if (bootverbose)
565 		printf("Package ID shift: %u\n", topo_layers[nlayers].id_shift);
566 	nlayers++;
567 
568 	if (pkg_id_shift > node_id_shift && node_id_shift != 0) {
569 		topo_layers[nlayers].type = TOPO_TYPE_GROUP;
570 		topo_layers[nlayers].id_shift = node_id_shift;
571 		if (bootverbose)
572 			printf("Node ID shift: %u\n",
573 			    topo_layers[nlayers].id_shift);
574 		nlayers++;
575 	}
576 
577 	/*
578 	 * Consider all caches to be within a package/chip
579 	 * and "in front" of all sub-components like
580 	 * cores and hardware threads.
581 	 */
582 	for (i = MAX_CACHE_LEVELS - 1; i >= 0; --i) {
583 		if (caches[i].present) {
584 			if (node_id_shift != 0)
585 				KASSERT(caches[i].id_shift <= node_id_shift,
586 					("bug in APIC topology discovery"));
587 			KASSERT(caches[i].id_shift <= pkg_id_shift,
588 				("bug in APIC topology discovery"));
589 			KASSERT(caches[i].id_shift >= core_id_shift,
590 				("bug in APIC topology discovery"));
591 
592 			topo_layers[nlayers].type = TOPO_TYPE_CACHE;
593 			topo_layers[nlayers].subtype = i + 1;
594 			topo_layers[nlayers].id_shift = caches[i].id_shift;
595 			if (bootverbose)
596 				printf("L%u cache ID shift: %u\n",
597 				    topo_layers[nlayers].subtype,
598 				    topo_layers[nlayers].id_shift);
599 			nlayers++;
600 		}
601 	}
602 
603 	if (pkg_id_shift > core_id_shift) {
604 		topo_layers[nlayers].type = TOPO_TYPE_CORE;
605 		topo_layers[nlayers].id_shift = core_id_shift;
606 		if (bootverbose)
607 			printf("Core ID shift: %u\n",
608 			    topo_layers[nlayers].id_shift);
609 		nlayers++;
610 	}
611 
612 	topo_layers[nlayers].type = TOPO_TYPE_PU;
613 	topo_layers[nlayers].id_shift = 0;
614 	nlayers++;
615 
616 #if defined(DEV_ACPI) && MAXMEMDOM > 1
617 	if (vm_ndomains > 1) {
618 		for (layer = 0; layer < nlayers; ++layer) {
619 			for (i = 0; i <= max_apic_id; ++i) {
620 				if ((i & ((1 << topo_layers[layer].id_shift) - 1)) == 0)
621 					domain = -1;
622 				if (!cpu_info[i].cpu_present)
623 					continue;
624 				d = acpi_pxm_get_cpu_locality(i);
625 				if (domain >= 0 && domain != d)
626 					break;
627 				domain = d;
628 			}
629 			if (i > max_apic_id)
630 				break;
631 		}
632 		KASSERT(layer < nlayers, ("NUMA domain smaller than PU"));
633 		memmove(&topo_layers[layer+1], &topo_layers[layer],
634 		    sizeof(*topo_layers) * (nlayers - layer));
635 		topo_layers[layer].type = TOPO_TYPE_NODE;
636 		topo_layers[layer].subtype = CG_SHARE_NONE;
637 		nlayers++;
638 	}
639 #endif
640 
641 	topo_init_root(&topo_root);
642 	for (i = 0; i <= max_apic_id; ++i) {
643 		if (!cpu_info[i].cpu_present)
644 			continue;
645 
646 		parent = &topo_root;
647 		for (layer = 0; layer < nlayers; ++layer) {
648 #if defined(DEV_ACPI) && MAXMEMDOM > 1
649 			if (topo_layers[layer].type == TOPO_TYPE_NODE) {
650 				node_id = acpi_pxm_get_cpu_locality(i);
651 			} else
652 #endif
653 				node_id = i >> topo_layers[layer].id_shift;
654 			parent = topo_add_node_by_hwid(parent, node_id,
655 			    topo_layers[layer].type,
656 			    topo_layers[layer].subtype);
657 		}
658 	}
659 
660 	parent = &topo_root;
661 	for (layer = 0; layer < nlayers; ++layer) {
662 #if defined(DEV_ACPI) && MAXMEMDOM > 1
663 		if (topo_layers[layer].type == TOPO_TYPE_NODE)
664 			node_id = acpi_pxm_get_cpu_locality(boot_cpu_id);
665 		else
666 #endif
667 			node_id = boot_cpu_id >> topo_layers[layer].id_shift;
668 		node = topo_find_node_by_hwid(parent, node_id,
669 		    topo_layers[layer].type,
670 		    topo_layers[layer].subtype);
671 		topo_promote_child(node);
672 		parent = node;
673 	}
674 
675 	cpu_topo_probed = 1;
676 }
677 
678 /*
679  * Assign logical CPU IDs to local APICs.
680  */
681 void
assign_cpu_ids(void)682 assign_cpu_ids(void)
683 {
684 	struct topo_node *node;
685 	u_int smt_mask;
686 	int nhyper;
687 
688 	smt_mask = (1u << core_id_shift) - 1;
689 
690 	/*
691 	 * Assign CPU IDs to local APIC IDs and disable any CPUs
692 	 * beyond MAXCPU.  CPU 0 is always assigned to the BSP.
693 	 */
694 	mp_ncpus = 0;
695 	nhyper = 0;
696 	TOPO_FOREACH(node, &topo_root) {
697 		if (node->type != TOPO_TYPE_PU)
698 			continue;
699 
700 		if ((node->hwid & smt_mask) != (boot_cpu_id & smt_mask))
701 			cpu_info[node->hwid].cpu_hyperthread = 1;
702 
703 		if (resource_disabled("lapic", node->hwid)) {
704 			if (node->hwid != boot_cpu_id)
705 				cpu_info[node->hwid].cpu_disabled = 1;
706 			else
707 				printf("Cannot disable BSP, APIC ID = %d\n",
708 				    node->hwid);
709 		}
710 
711 		if (!hyperthreading_allowed &&
712 		    cpu_info[node->hwid].cpu_hyperthread)
713 			cpu_info[node->hwid].cpu_disabled = 1;
714 
715 		if (mp_ncpus >= MAXCPU)
716 			cpu_info[node->hwid].cpu_disabled = 1;
717 
718 		if (cpu_info[node->hwid].cpu_disabled) {
719 			disabled_cpus++;
720 			continue;
721 		}
722 
723 		if (cpu_info[node->hwid].cpu_hyperthread)
724 			nhyper++;
725 
726 		cpu_apic_ids[mp_ncpus] = node->hwid;
727 		apic_cpuids[node->hwid] = mp_ncpus;
728 		topo_set_pu_id(node, mp_ncpus);
729 		mp_ncpus++;
730 	}
731 
732 	KASSERT(mp_maxid >= mp_ncpus - 1,
733 	    ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid,
734 	    mp_ncpus));
735 
736 	mp_ncores = mp_ncpus - nhyper;
737 	smp_threads_per_core = mp_ncpus / mp_ncores;
738 }
739 
740 /*
741  * Print various information about the SMP system hardware and setup.
742  */
743 void
cpu_mp_announce(void)744 cpu_mp_announce(void)
745 {
746 	struct topo_node *node;
747 	const char *hyperthread;
748 	struct topo_analysis topology;
749 
750 	printf("FreeBSD/SMP: ");
751 	if (topo_analyze(&topo_root, 1, &topology)) {
752 		printf("%d package(s)", topology.entities[TOPO_LEVEL_PKG]);
753 		if (topology.entities[TOPO_LEVEL_GROUP] > 1)
754 			printf(" x %d groups",
755 			    topology.entities[TOPO_LEVEL_GROUP]);
756 		if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1)
757 			printf(" x %d cache groups",
758 			    topology.entities[TOPO_LEVEL_CACHEGROUP]);
759 		if (topology.entities[TOPO_LEVEL_CORE] > 0)
760 			printf(" x %d core(s)",
761 			    topology.entities[TOPO_LEVEL_CORE]);
762 		if (topology.entities[TOPO_LEVEL_THREAD] > 1)
763 			printf(" x %d hardware threads",
764 			    topology.entities[TOPO_LEVEL_THREAD]);
765 	} else {
766 		printf("Non-uniform topology");
767 	}
768 	printf("\n");
769 
770 	if (disabled_cpus) {
771 		printf("FreeBSD/SMP Online: ");
772 		if (topo_analyze(&topo_root, 0, &topology)) {
773 			printf("%d package(s)",
774 			    topology.entities[TOPO_LEVEL_PKG]);
775 			if (topology.entities[TOPO_LEVEL_GROUP] > 1)
776 				printf(" x %d groups",
777 				    topology.entities[TOPO_LEVEL_GROUP]);
778 			if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1)
779 				printf(" x %d cache groups",
780 				    topology.entities[TOPO_LEVEL_CACHEGROUP]);
781 			if (topology.entities[TOPO_LEVEL_CORE] > 0)
782 				printf(" x %d core(s)",
783 				    topology.entities[TOPO_LEVEL_CORE]);
784 			if (topology.entities[TOPO_LEVEL_THREAD] > 1)
785 				printf(" x %d hardware threads",
786 				    topology.entities[TOPO_LEVEL_THREAD]);
787 		} else {
788 			printf("Non-uniform topology");
789 		}
790 		printf("\n");
791 	}
792 
793 	if (!bootverbose)
794 		return;
795 
796 	TOPO_FOREACH(node, &topo_root) {
797 		switch (node->type) {
798 		case TOPO_TYPE_PKG:
799 			printf("Package HW ID = %u\n", node->hwid);
800 			break;
801 		case TOPO_TYPE_CORE:
802 			printf("\tCore HW ID = %u\n", node->hwid);
803 			break;
804 		case TOPO_TYPE_PU:
805 			if (cpu_info[node->hwid].cpu_hyperthread)
806 				hyperthread = "/HT";
807 			else
808 				hyperthread = "";
809 
810 			if (node->subtype == 0)
811 				printf("\t\tCPU (AP%s): APIC ID: %u"
812 				    "(disabled)\n", hyperthread, node->hwid);
813 			else if (node->id == 0)
814 				printf("\t\tCPU0 (BSP): APIC ID: %u\n",
815 				    node->hwid);
816 			else
817 				printf("\t\tCPU%u (AP%s): APIC ID: %u\n",
818 				    node->id, hyperthread, node->hwid);
819 			break;
820 		default:
821 			/* ignored */
822 			break;
823 		}
824 	}
825 }
826 
827 /*
828  * Add a scheduling group, a group of logical processors sharing
829  * a particular cache (and, thus having an affinity), to the scheduling
830  * topology.
831  * This function recursively works on lower level caches.
832  */
833 static void
x86topo_add_sched_group(struct topo_node * root,struct cpu_group * cg_root)834 x86topo_add_sched_group(struct topo_node *root, struct cpu_group *cg_root)
835 {
836 	struct topo_node *node;
837 	int nchildren;
838 	int ncores;
839 	int i;
840 
841 	KASSERT(root->type == TOPO_TYPE_SYSTEM || root->type == TOPO_TYPE_CACHE ||
842 	    root->type == TOPO_TYPE_NODE || root->type == TOPO_TYPE_GROUP,
843 	    ("x86topo_add_sched_group: bad type: %u", root->type));
844 	CPU_COPY(&root->cpuset, &cg_root->cg_mask);
845 	cg_root->cg_count = root->cpu_count;
846 	if (root->type == TOPO_TYPE_CACHE)
847 		cg_root->cg_level = root->subtype;
848 	else
849 		cg_root->cg_level = CG_SHARE_NONE;
850 	if (root->type == TOPO_TYPE_NODE)
851 		cg_root->cg_flags = CG_FLAG_NODE;
852 	else
853 		cg_root->cg_flags = 0;
854 
855 	/*
856 	 * Check how many core nodes we have under the given root node.
857 	 * If we have multiple logical processors, but not multiple
858 	 * cores, then those processors must be hardware threads.
859 	 */
860 	ncores = 0;
861 	node = root;
862 	while (node != NULL) {
863 		if (node->type != TOPO_TYPE_CORE) {
864 			node = topo_next_node(root, node);
865 			continue;
866 		}
867 
868 		ncores++;
869 		node = topo_next_nonchild_node(root, node);
870 	}
871 
872 	if (cg_root->cg_level != CG_SHARE_NONE &&
873 	    root->cpu_count > 1 && ncores < 2)
874 		cg_root->cg_flags |= CG_FLAG_SMT;
875 
876 	/*
877 	 * Find out how many cache nodes we have under the given root node.
878 	 * We ignore cache nodes that cover all the same processors as the
879 	 * root node.  Also, we do not descend below found cache nodes.
880 	 * That is, we count top-level "non-redundant" caches under the root
881 	 * node.
882 	 */
883 	nchildren = 0;
884 	node = root;
885 	while (node != NULL) {
886 		/*
887 		 * When some APICs are disabled by tunables, nodes can end up
888 		 * with an empty cpuset. Nodes with an empty cpuset will be
889 		 * translated into cpu groups with empty cpusets. smp_topo_fill
890 		 * will then set cg_first and cg_last to -1. This isn't
891 		 * correctly handled in all functions. E.g. when
892 		 * cpu_search_lowest and cpu_search_highest loop through all
893 		 * cpus, they call CPU_ISSET on cpu -1 which ends up in a
894 		 * general protection fault.
895 		 *
896 		 * We could fix the scheduler to handle empty cpu groups
897 		 * correctly. Nevertheless, empty cpu groups are causing
898 		 * overhead for no value. So, it makes more sense to just don't
899 		 * create them.
900 		 */
901 		if (CPU_EMPTY(&node->cpuset)) {
902 			node = topo_next_node(root, node);
903 			continue;
904 		}
905 		if (CPU_CMP(&node->cpuset, &root->cpuset) == 0) {
906 			if (node->type == TOPO_TYPE_CACHE &&
907 			    cg_root->cg_level < node->subtype)
908 				cg_root->cg_level = node->subtype;
909 			if (node->type == TOPO_TYPE_NODE)
910 				cg_root->cg_flags |= CG_FLAG_NODE;
911 			node = topo_next_node(root, node);
912 			continue;
913 		}
914 		if (node->type != TOPO_TYPE_GROUP &&
915 		    node->type != TOPO_TYPE_NODE &&
916 		    node->type != TOPO_TYPE_CACHE) {
917 			node = topo_next_node(root, node);
918 			continue;
919 		}
920 		nchildren++;
921 		node = topo_next_nonchild_node(root, node);
922 	}
923 
924 	/*
925 	 * We are not interested in nodes including only one CPU each.
926 	 */
927 	if (nchildren == root->cpu_count)
928 		return;
929 
930 	/*
931 	 * We are not interested in nodes without children.
932 	 */
933 	cg_root->cg_children = nchildren;
934 	if (nchildren == 0)
935 		return;
936 
937 	cg_root->cg_child = smp_topo_alloc(nchildren);
938 
939 	/*
940 	 * Now find again the same cache nodes as above and recursively
941 	 * build scheduling topologies for them.
942 	 */
943 	node = root;
944 	i = 0;
945 	while (node != NULL) {
946 		if ((node->type != TOPO_TYPE_GROUP &&
947 		    node->type != TOPO_TYPE_NODE &&
948 		    node->type != TOPO_TYPE_CACHE) ||
949 		    CPU_CMP(&node->cpuset, &root->cpuset) == 0 ||
950 		    CPU_EMPTY(&node->cpuset)) {
951 			node = topo_next_node(root, node);
952 			continue;
953 		}
954 		cg_root->cg_child[i].cg_parent = cg_root;
955 		x86topo_add_sched_group(node, &cg_root->cg_child[i]);
956 		i++;
957 		node = topo_next_nonchild_node(root, node);
958 	}
959 }
960 
961 /*
962  * Build the MI scheduling topology from the discovered hardware topology.
963  */
964 struct cpu_group *
cpu_topo(void)965 cpu_topo(void)
966 {
967 	struct cpu_group *cg_root;
968 
969 	if (mp_ncpus <= 1)
970 		return (smp_topo_none());
971 
972 	cg_root = smp_topo_alloc(1);
973 	x86topo_add_sched_group(&topo_root, cg_root);
974 	return (cg_root);
975 }
976 
977 static void
cpu_alloc(void * dummy __unused)978 cpu_alloc(void *dummy __unused)
979 {
980 	/*
981 	 * Dynamically allocate the arrays that depend on the
982 	 * maximum APIC ID.
983 	 */
984 	cpu_info = malloc(sizeof(*cpu_info) * (max_apic_id + 1), M_CPUS,
985 	    M_WAITOK | M_ZERO);
986 	apic_cpuids = malloc(sizeof(*apic_cpuids) * (max_apic_id + 1), M_CPUS,
987 	    M_WAITOK | M_ZERO);
988 }
989 SYSINIT(cpu_alloc, SI_SUB_CPU, SI_ORDER_FIRST, cpu_alloc, NULL);
990 
991 /*
992  * Add a logical CPU to the topology.
993  */
994 void
cpu_add(u_int apic_id,char boot_cpu)995 cpu_add(u_int apic_id, char boot_cpu)
996 {
997 
998 	if (apic_id > max_apic_id)
999 		panic("SMP: APIC ID %d too high", apic_id);
1000 
1001 	KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %u added twice",
1002 	    apic_id));
1003 	cpu_info[apic_id].cpu_present = 1;
1004 	if (boot_cpu) {
1005 		KASSERT(boot_cpu_id == -1,
1006 		    ("CPU %u claims to be BSP, but CPU %u already is", apic_id,
1007 		    boot_cpu_id));
1008 		boot_cpu_id = apic_id;
1009 		cpu_info[apic_id].cpu_bsp = 1;
1010 	}
1011 	if (bootverbose)
1012 		printf("SMP: Added CPU %u (%s)\n", apic_id, boot_cpu ? "BSP" :
1013 		    "AP");
1014 }
1015 
1016 void
cpu_mp_setmaxid(void)1017 cpu_mp_setmaxid(void)
1018 {
1019 
1020 	/*
1021 	 * mp_ncpus and mp_maxid should be already set by calls to cpu_add().
1022 	 * If there were no calls to cpu_add() assume this is a UP system.
1023 	 */
1024 	if (mp_ncpus == 0)
1025 		mp_ncpus = 1;
1026 }
1027 
1028 int
cpu_mp_probe(void)1029 cpu_mp_probe(void)
1030 {
1031 
1032 	/*
1033 	 * Always record BSP in CPU map so that the mbuf init code works
1034 	 * correctly.
1035 	 */
1036 	CPU_SETOF(0, &all_cpus);
1037 	return (mp_ncpus > 1);
1038 }
1039 
1040 /*
1041  * AP CPU's call this to initialize themselves.
1042  */
1043 void
init_secondary_tail(void)1044 init_secondary_tail(void)
1045 {
1046 	u_int cpuid;
1047 
1048 	pmap_activate_boot(vmspace_pmap(proc0.p_vmspace));
1049 
1050 	/*
1051 	 * On real hardware, switch to x2apic mode if possible.  Do it
1052 	 * after aps_ready was signalled, to avoid manipulating the
1053 	 * mode while BSP might still want to send some IPI to us
1054 	 * (second startup IPI is ignored on modern hardware etc).
1055 	 */
1056 	lapic_xapic_mode();
1057 
1058 	/* Initialize the PAT MSR. */
1059 	pmap_init_pat();
1060 
1061 	/* set up CPU registers and state */
1062 	cpu_setregs();
1063 
1064 	/* set up SSE/NX */
1065 	initializecpu();
1066 
1067 	/* set up FPU state on the AP */
1068 #ifdef __amd64__
1069 	fpuinit();
1070 #else
1071 	npxinit(false);
1072 #endif
1073 
1074 	if (cpu_ops.cpu_init)
1075 		cpu_ops.cpu_init();
1076 
1077 	/* A quick check from sanity claus */
1078 	cpuid = PCPU_GET(cpuid);
1079 	if (PCPU_GET(apic_id) != lapic_id()) {
1080 		printf("SMP: cpuid = %d\n", cpuid);
1081 		printf("SMP: actual apic_id = %d\n", lapic_id());
1082 		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
1083 		panic("cpuid mismatch! boom!!");
1084 	}
1085 
1086 	/* Initialize curthread. */
1087 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
1088 	PCPU_SET(curthread, PCPU_GET(idlethread));
1089 	schedinit_ap();
1090 
1091 	mtx_lock_spin(&ap_boot_mtx);
1092 
1093 	mca_init();
1094 
1095 	/* Init local apic for irq's */
1096 	lapic_setup(1);
1097 
1098 	/* Set memory range attributes for this CPU to match the BSP */
1099 	mem_range_AP_init();
1100 
1101 	smp_cpus++;
1102 
1103 	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
1104 	if (bootverbose)
1105 		printf("SMP: AP CPU #%d Launched!\n", cpuid);
1106 	else
1107 		printf("%s%d%s", smp_cpus == 2 ? "Launching APs: " : "",
1108 		    cpuid, smp_cpus == mp_ncpus ? "\n" : " ");
1109 
1110 	/* Determine if we are a logical CPU. */
1111 	if (cpu_info[PCPU_GET(apic_id)].cpu_hyperthread)
1112 		CPU_SET(cpuid, &logical_cpus_mask);
1113 
1114 	if (bootverbose)
1115 		lapic_dump("AP");
1116 
1117 	if (smp_cpus == mp_ncpus) {
1118 		/* enable IPI's, tlb shootdown, freezes etc */
1119 		atomic_store_rel_int(&smp_started, 1);
1120 	}
1121 
1122 #ifdef __amd64__
1123 	if (pmap_pcid_enabled)
1124 		load_cr4(rcr4() | CR4_PCIDE);
1125 	load_ds(_udatasel);
1126 	load_es(_udatasel);
1127 	load_fs(_ufssel);
1128 #endif
1129 
1130 	mtx_unlock_spin(&ap_boot_mtx);
1131 
1132 	/* Wait until all the AP's are up. */
1133 	while (atomic_load_acq_int(&smp_started) == 0)
1134 		ia32_pause();
1135 
1136 	kcsan_cpu_init(cpuid);
1137 
1138 	sched_ap_entry();
1139 
1140 	panic("scheduler returned us to %s", __func__);
1141 	/* NOTREACHED */
1142 }
1143 
1144 static void
smp_after_idle_runnable(void * arg __unused)1145 smp_after_idle_runnable(void *arg __unused)
1146 {
1147 	int cpu;
1148 
1149 	if (mp_ncpus == 1)
1150 		return;
1151 
1152 	KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
1153 
1154 	/*
1155 	 * Wait for all APs to handle an interrupt.  After that, we know that
1156 	 * the APs have entered the scheduler at least once, so the boot stacks
1157 	 * are safe to free.
1158 	 */
1159 	smp_rendezvous(smp_no_rendezvous_barrier, NULL,
1160 	    smp_no_rendezvous_barrier, NULL);
1161 
1162 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
1163 		kmem_free(bootstacks[cpu], kstack_pages * PAGE_SIZE);
1164 	}
1165 }
1166 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
1167     smp_after_idle_runnable, NULL);
1168 
1169 /*
1170  * We tell the I/O APIC code about all the CPUs we want to receive
1171  * interrupts.  If we don't want certain CPUs to receive IRQs we
1172  * can simply not tell the I/O APIC code about them in this function.
1173  * We also do not tell it about the BSP since it tells itself about
1174  * the BSP internally to work with UP kernels and on UP machines.
1175  */
1176 void
set_interrupt_apic_ids(void)1177 set_interrupt_apic_ids(void)
1178 {
1179 	u_int i, apic_id;
1180 
1181 	for (i = 0; i < MAXCPU; i++) {
1182 		apic_id = cpu_apic_ids[i];
1183 		if (apic_id == -1)
1184 			continue;
1185 		if (cpu_info[apic_id].cpu_bsp)
1186 			continue;
1187 		if (cpu_info[apic_id].cpu_disabled)
1188 			continue;
1189 		if (intr_apic_id_limit >= 0 && apic_id > intr_apic_id_limit)
1190 			continue;
1191 
1192 		/* Don't let hyperthreads service interrupts. */
1193 		if (cpu_info[apic_id].cpu_hyperthread &&
1194 		    !hyperthreading_intr_allowed)
1195 			continue;
1196 
1197 		intr_add_cpu(i);
1198 	}
1199 }
1200 
1201 #ifdef COUNT_XINVLTLB_HITS
1202 u_int xhits_gbl[MAXCPU];
1203 u_int xhits_pg[MAXCPU];
1204 u_int xhits_rng[MAXCPU];
1205 static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1206     "");
1207 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl,
1208     sizeof(xhits_gbl), "IU", "");
1209 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg,
1210     sizeof(xhits_pg), "IU", "");
1211 SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng,
1212     sizeof(xhits_rng), "IU", "");
1213 
1214 u_int ipi_global;
1215 u_int ipi_page;
1216 u_int ipi_range;
1217 u_int ipi_range_size;
1218 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, "");
1219 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, "");
1220 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, "");
1221 SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size,
1222     0, "");
1223 #endif /* COUNT_XINVLTLB_HITS */
1224 
1225 /*
1226  * Init and startup IPI.
1227  */
1228 void
ipi_startup(int apic_id,int vector)1229 ipi_startup(int apic_id, int vector)
1230 {
1231 
1232 	/*
1233 	 * This attempts to follow the algorithm described in the
1234 	 * Intel Multiprocessor Specification v1.4 in section B.4.
1235 	 * For each IPI, we allow the local APIC ~20us to deliver the
1236 	 * IPI.  If that times out, we panic.
1237 	 */
1238 
1239 	/*
1240 	 * first we do an INIT IPI: this INIT IPI might be run, resetting
1241 	 * and running the target CPU. OR this INIT IPI might be latched (P5
1242 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
1243 	 * ignored.
1244 	 */
1245 	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1246 	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
1247 	lapic_ipi_wait(100);
1248 
1249 	/* Explicitly deassert the INIT IPI. */
1250 	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_LEVEL |
1251 	    APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT,
1252 	    apic_id);
1253 
1254 	DELAY(10000);		/* wait ~10mS */
1255 
1256 	/*
1257 	 * next we do a STARTUP IPI: the previous INIT IPI might still be
1258 	 * latched, (P5 bug) this 1st STARTUP would then terminate
1259 	 * immediately, and the previously started INIT IPI would continue. OR
1260 	 * the previous INIT IPI has already run. and this STARTUP IPI will
1261 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
1262 	 * will run.
1263 	 */
1264 	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1265 	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1266 	    vector, apic_id);
1267 	if (!lapic_ipi_wait(100))
1268 		panic("Failed to deliver first STARTUP IPI to APIC %d",
1269 		    apic_id);
1270 	DELAY(200);		/* wait ~200uS */
1271 
1272 	/*
1273 	 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
1274 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
1275 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
1276 	 * recognized after hardware RESET or INIT IPI.
1277 	 */
1278 	lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
1279 	    APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
1280 	    vector, apic_id);
1281 	if (!lapic_ipi_wait(100))
1282 		panic("Failed to deliver second STARTUP IPI to APIC %d",
1283 		    apic_id);
1284 
1285 	DELAY(200);		/* wait ~200uS */
1286 }
1287 
1288 static bool
ipi_bitmap_set(int cpu,u_int ipi)1289 ipi_bitmap_set(int cpu, u_int ipi)
1290 {
1291 	u_int bitmap, old, new;
1292 	u_int *cpu_bitmap;
1293 
1294 	bitmap = 1 << ipi;
1295 	cpu_bitmap = &cpuid_to_pcpu[cpu]->pc_ipi_bitmap;
1296 	old = *cpu_bitmap;
1297 	for (;;) {
1298 		if ((old & bitmap) != 0)
1299 			break;
1300 		new = old | bitmap;
1301 		if (atomic_fcmpset_int(cpu_bitmap, &old, new))
1302 			break;
1303 	}
1304 	return (old != 0);
1305 }
1306 
1307 /*
1308  * Send an IPI to specified CPU handling the bitmap logic.
1309  */
1310 static void
ipi_send_cpu(int cpu,u_int ipi)1311 ipi_send_cpu(int cpu, u_int ipi)
1312 {
1313 
1314 	KASSERT((u_int)cpu < MAXCPU && cpu_apic_ids[cpu] != -1,
1315 	    ("IPI to non-existent CPU %d", cpu));
1316 
1317 	if (IPI_IS_BITMAPED(ipi)) {
1318 		if (ipi_bitmap_set(cpu, ipi))
1319 			return;
1320 		ipi = IPI_BITMAP_VECTOR;
1321 	}
1322 	lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
1323 }
1324 
1325 void
ipi_bitmap_handler(struct trapframe frame)1326 ipi_bitmap_handler(struct trapframe frame)
1327 {
1328 	struct trapframe *oldframe;
1329 	struct thread *td;
1330 	int cpu = PCPU_GET(cpuid);
1331 	u_int ipi_bitmap;
1332 
1333 	kasan_mark(&frame, sizeof(frame), sizeof(frame), 0);
1334 
1335 	td = curthread;
1336 	ipi_bitmap = atomic_readandclear_int(&cpuid_to_pcpu[cpu]->
1337 	    pc_ipi_bitmap);
1338 
1339 	/*
1340 	 * sched_preempt() must be called to clear the pending preempt
1341 	 * IPI to enable delivery of further preempts.  However, the
1342 	 * critical section will cause extra scheduler lock thrashing
1343 	 * when used unconditionally.  Only critical_enter() if
1344 	 * hardclock must also run, which requires the section entry.
1345 	 */
1346 	if (ipi_bitmap & (1 << IPI_HARDCLOCK))
1347 		critical_enter();
1348 
1349 	td->td_intr_nesting_level++;
1350 	oldframe = td->td_intr_frame;
1351 	td->td_intr_frame = &frame;
1352 #if defined(STACK) || defined(DDB)
1353 	if (ipi_bitmap & (1 << IPI_TRACE))
1354 		stack_capture_intr();
1355 #endif
1356 	if (ipi_bitmap & (1 << IPI_PREEMPT)) {
1357 #ifdef COUNT_IPIS
1358 		(*ipi_preempt_counts[cpu])++;
1359 #endif
1360 		sched_preempt(td);
1361 	}
1362 	if (ipi_bitmap & (1 << IPI_AST)) {
1363 #ifdef COUNT_IPIS
1364 		(*ipi_ast_counts[cpu])++;
1365 #endif
1366 		/* Nothing to do for AST */
1367 	}
1368 	if (ipi_bitmap & (1 << IPI_HARDCLOCK)) {
1369 #ifdef COUNT_IPIS
1370 		(*ipi_hardclock_counts[cpu])++;
1371 #endif
1372 		hardclockintr();
1373 	}
1374 	td->td_intr_frame = oldframe;
1375 	td->td_intr_nesting_level--;
1376 	if (ipi_bitmap & (1 << IPI_HARDCLOCK))
1377 		critical_exit();
1378 }
1379 
1380 /*
1381  * send an IPI to a set of cpus.
1382  */
1383 void
ipi_selected(cpuset_t cpus,u_int ipi)1384 ipi_selected(cpuset_t cpus, u_int ipi)
1385 {
1386 	int cpu;
1387 
1388 	/*
1389 	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1390 	 * of help in order to understand what is the source.
1391 	 * Set the mask of receiving CPUs for this purpose.
1392 	 */
1393 	if (ipi == IPI_STOP_HARD)
1394 		CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &cpus);
1395 
1396 	CPU_FOREACH_ISSET(cpu, &cpus) {
1397 		CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1398 		ipi_send_cpu(cpu, ipi);
1399 	}
1400 }
1401 
1402 /*
1403  * send an IPI to a specific CPU.
1404  */
1405 void
ipi_cpu(int cpu,u_int ipi)1406 ipi_cpu(int cpu, u_int ipi)
1407 {
1408 
1409 	/*
1410 	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1411 	 * of help in order to understand what is the source.
1412 	 * Set the mask of receiving CPUs for this purpose.
1413 	 */
1414 	if (ipi == IPI_STOP_HARD)
1415 		CPU_SET_ATOMIC(cpu, &ipi_stop_nmi_pending);
1416 
1417 	CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
1418 	ipi_send_cpu(cpu, ipi);
1419 }
1420 
1421 /*
1422  * send an IPI to all CPUs EXCEPT myself
1423  */
1424 void
ipi_all_but_self(u_int ipi)1425 ipi_all_but_self(u_int ipi)
1426 {
1427 	cpuset_t other_cpus;
1428 	int cpu, c;
1429 
1430 	/*
1431 	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
1432 	 * of help in order to understand what is the source.
1433 	 * Set the mask of receiving CPUs for this purpose.
1434 	 */
1435 	if (ipi == IPI_STOP_HARD) {
1436 		other_cpus = all_cpus;
1437 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
1438 		CPU_OR_ATOMIC(&ipi_stop_nmi_pending, &other_cpus);
1439 	}
1440 
1441 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
1442 	if (IPI_IS_BITMAPED(ipi)) {
1443 		cpu = PCPU_GET(cpuid);
1444 		CPU_FOREACH(c) {
1445 			if (c != cpu)
1446 				ipi_bitmap_set(c, ipi);
1447 		}
1448 		ipi = IPI_BITMAP_VECTOR;
1449 	}
1450 	lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
1451 }
1452 
1453 void
ipi_self_from_nmi(u_int vector)1454 ipi_self_from_nmi(u_int vector)
1455 {
1456 
1457 	lapic_ipi_vectored(vector, APIC_IPI_DEST_SELF);
1458 
1459 	/* Wait for IPI to finish. */
1460 	if (!lapic_ipi_wait(50000)) {
1461 		if (KERNEL_PANICKED())
1462 			return;
1463 		else
1464 			panic("APIC: IPI is stuck");
1465 	}
1466 }
1467 
1468 int
ipi_nmi_handler(void)1469 ipi_nmi_handler(void)
1470 {
1471 	u_int cpuid;
1472 
1473 	/*
1474 	 * As long as there is not a simple way to know about a NMI's
1475 	 * source, if the bitmask for the current CPU is present in
1476 	 * the global pending bitword an IPI_STOP_HARD has been issued
1477 	 * and should be handled.
1478 	 */
1479 	cpuid = PCPU_GET(cpuid);
1480 	if (!CPU_ISSET(cpuid, &ipi_stop_nmi_pending))
1481 		return (1);
1482 
1483 	CPU_CLR_ATOMIC(cpuid, &ipi_stop_nmi_pending);
1484 	cpustop_handler();
1485 	return (0);
1486 }
1487 
1488 int nmi_kdb_lock;
1489 
1490 void
nmi_call_kdb_smp(u_int type,struct trapframe * frame)1491 nmi_call_kdb_smp(u_int type, struct trapframe *frame)
1492 {
1493 	int cpu;
1494 	bool call_post;
1495 
1496 	cpu = PCPU_GET(cpuid);
1497 	if (atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1)) {
1498 		nmi_call_kdb(cpu, type, frame);
1499 		call_post = false;
1500 	} else {
1501 		savectx(&stoppcbs[cpu]);
1502 		CPU_SET_ATOMIC(cpu, &stopped_cpus);
1503 		while (!atomic_cmpset_acq_int(&nmi_kdb_lock, 0, 1))
1504 			ia32_pause();
1505 		call_post = true;
1506 	}
1507 	atomic_store_rel_int(&nmi_kdb_lock, 0);
1508 	if (call_post)
1509 		cpustop_handler_post(cpu);
1510 }
1511 
1512 /*
1513  * Handle an IPI_STOP by saving our current context and spinning (or mwaiting,
1514  * if available) until we are resumed.
1515  */
1516 void
cpustop_handler(void)1517 cpustop_handler(void)
1518 {
1519 	struct monitorbuf *mb;
1520 	u_int cpu;
1521 	bool use_mwait;
1522 
1523 	cpu = PCPU_GET(cpuid);
1524 
1525 	savectx(&stoppcbs[cpu]);
1526 
1527 	use_mwait = (stop_mwait && (cpu_feature2 & CPUID2_MON) != 0 &&
1528 	    !mwait_cpustop_broken);
1529 	if (use_mwait) {
1530 		mb = PCPU_PTR(monitorbuf);
1531 		atomic_store_int(&mb->stop_state,
1532 		    MONITOR_STOPSTATE_STOPPED);
1533 	}
1534 
1535 	/* Indicate that we are stopped */
1536 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
1537 
1538 	/* Wait for restart */
1539 	while (!CPU_ISSET(cpu, &started_cpus)) {
1540 		if (use_mwait) {
1541 			cpu_monitor(mb, 0, 0);
1542 			if (atomic_load_int(&mb->stop_state) ==
1543 			    MONITOR_STOPSTATE_STOPPED)
1544 				cpu_mwait(0, MWAIT_C1);
1545 			continue;
1546 		}
1547 
1548 		ia32_pause();
1549 
1550 		/*
1551 		 * Halt non-BSP CPUs on panic -- we're never going to need them
1552 		 * again, and might as well save power / release resources
1553 		 * (e.g., overprovisioned VM infrastructure).
1554 		 */
1555 		while (__predict_false(!IS_BSP() && KERNEL_PANICKED()))
1556 			halt();
1557 	}
1558 
1559 	cpustop_handler_post(cpu);
1560 }
1561 
1562 static void
cpustop_handler_post(u_int cpu)1563 cpustop_handler_post(u_int cpu)
1564 {
1565 
1566 	CPU_CLR_ATOMIC(cpu, &started_cpus);
1567 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1568 
1569 	/*
1570 	 * We don't broadcast TLB invalidations to other CPUs when they are
1571 	 * stopped. Hence, we clear the TLB before resuming.
1572 	 */
1573 	invltlb_glob();
1574 
1575 #if defined(__amd64__) && (defined(DDB) || defined(GDB))
1576 	amd64_db_resume_dbreg();
1577 #endif
1578 
1579 	if (cpu == 0 && cpustop_restartfunc != NULL) {
1580 		cpustop_restartfunc();
1581 		cpustop_restartfunc = NULL;
1582 	}
1583 }
1584 
1585 /*
1586  * Handle an IPI_SUSPEND by saving our current context and spinning until we
1587  * are resumed.
1588  */
1589 void
cpususpend_handler(void)1590 cpususpend_handler(void)
1591 {
1592 	u_int cpu;
1593 
1594 	mtx_assert(&smp_ipi_mtx, MA_NOTOWNED);
1595 
1596 	cpu = PCPU_GET(cpuid);
1597 
1598 #ifdef XENHVM
1599 	/*
1600 	 * Some Xen guest types (PVH) expose a very minimal set of ACPI tables,
1601 	 * and for example have no support for SCI.  That leads to the suspend
1602 	 * stacks not being allocated, and hence when attempting to perform a
1603 	 * Xen triggered suspension FreeBSD will hit a #PF.  Avoid saving the
1604 	 * CPU and FPU contexts if the stacks are not allocated, as the
1605 	 * hypervisor will already take care of this.  Note that we could even
1606 	 * do this for Xen triggered suspensions on guests that have full ACPI
1607 	 * support, but doing so would introduce extra complexity.
1608 	 */
1609 	if (susppcbs == NULL) {
1610 		KASSERT(vm_guest == VM_GUEST_XEN, ("Missing suspend stack"));
1611 		CPU_SET_ATOMIC(cpu, &suspended_cpus);
1612 		CPU_SET_ATOMIC(cpu, &resuming_cpus);
1613 	} else
1614 #endif
1615 	if (savectx(&susppcbs[cpu]->sp_pcb)) {
1616 #ifdef __amd64__
1617 		fpususpend(susppcbs[cpu]->sp_fpususpend);
1618 #else
1619 		npxsuspend(susppcbs[cpu]->sp_fpususpend);
1620 #endif
1621 		/*
1622 		 * suspended_cpus is cleared shortly after each AP is restarted
1623 		 * by a Startup IPI, so that the BSP can proceed to restarting
1624 		 * the next AP.
1625 		 *
1626 		 * resuming_cpus gets cleared when the AP completes
1627 		 * initialization after having been released by the BSP.
1628 		 * resuming_cpus is probably not the best name for the
1629 		 * variable, because it is actually a set of processors that
1630 		 * haven't resumed yet and haven't necessarily started resuming.
1631 		 *
1632 		 * Note that suspended_cpus is meaningful only for ACPI suspend
1633 		 * as it's not really used for Xen suspend since the APs are
1634 		 * automatically restored to the running state and the correct
1635 		 * context.  For the same reason resumectx is never called in
1636 		 * that case.
1637 		 */
1638 		CPU_SET_ATOMIC(cpu, &suspended_cpus);
1639 		CPU_SET_ATOMIC(cpu, &resuming_cpus);
1640 
1641 		/*
1642 		 * Invalidate the cache after setting the global status bits.
1643 		 * The last AP to set its bit may end up being an Owner of the
1644 		 * corresponding cache line in MOESI protocol.  The AP may be
1645 		 * stopped before the cache line is written to the main memory.
1646 		 */
1647 		wbinvd();
1648 	} else {
1649 #ifdef __amd64__
1650 		fpuresume(susppcbs[cpu]->sp_fpususpend);
1651 #else
1652 		npxresume(susppcbs[cpu]->sp_fpususpend);
1653 #endif
1654 		pmap_init_pat();
1655 		initializecpu();
1656 		PCPU_SET(switchtime, 0);
1657 		PCPU_SET(switchticks, ticks);
1658 
1659 		/* Indicate that we have restarted and restored the context. */
1660 		CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1661 	}
1662 
1663 	/* Wait for resume directive */
1664 	while (!CPU_ISSET(cpu, &toresume_cpus))
1665 		ia32_pause();
1666 
1667 	/* Re-apply microcode updates. */
1668 	ucode_reload();
1669 
1670 #ifdef __i386__
1671 	/* Finish removing the identity mapping of low memory for this AP. */
1672 	invltlb_glob();
1673 #endif
1674 
1675 	if (cpu_ops.cpu_resume)
1676 		cpu_ops.cpu_resume();
1677 #ifdef __amd64__
1678 	if (vmm_resume_p)
1679 		vmm_resume_p();
1680 #endif
1681 
1682 	/* Resume MCA and local APIC */
1683 	lapic_xapic_mode();
1684 	mca_resume();
1685 	lapic_setup(0);
1686 
1687 	/* Indicate that we are resumed */
1688 	CPU_CLR_ATOMIC(cpu, &resuming_cpus);
1689 	CPU_CLR_ATOMIC(cpu, &suspended_cpus);
1690 	CPU_CLR_ATOMIC(cpu, &toresume_cpus);
1691 }
1692 
1693 /*
1694  * Handle an IPI_SWI by waking delayed SWI thread.
1695  */
1696 void
ipi_swi_handler(struct trapframe frame)1697 ipi_swi_handler(struct trapframe frame)
1698 {
1699 
1700 	intr_event_handle(clk_intr_event, &frame);
1701 }
1702 
1703 /*
1704  * This is called once the rest of the system is up and running and we're
1705  * ready to let the AP's out of the pen.
1706  */
1707 static void
release_aps(void * dummy __unused)1708 release_aps(void *dummy __unused)
1709 {
1710 
1711 	if (mp_ncpus == 1)
1712 		return;
1713 	atomic_store_rel_int(&aps_ready, 1);
1714 	while (smp_started == 0)
1715 		ia32_pause();
1716 }
1717 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
1718 
1719 #ifdef COUNT_IPIS
1720 /*
1721  * Setup interrupt counters for IPI handlers.
1722  */
1723 static void
mp_ipi_intrcnt(void * dummy)1724 mp_ipi_intrcnt(void *dummy)
1725 {
1726 	char buf[64];
1727 	int i;
1728 
1729 	CPU_FOREACH(i) {
1730 		snprintf(buf, sizeof(buf), "cpu%d:invltlb", i);
1731 		intrcnt_add(buf, &ipi_invltlb_counts[i]);
1732 		snprintf(buf, sizeof(buf), "cpu%d:invlrng", i);
1733 		intrcnt_add(buf, &ipi_invlrng_counts[i]);
1734 		snprintf(buf, sizeof(buf), "cpu%d:invlpg", i);
1735 		intrcnt_add(buf, &ipi_invlpg_counts[i]);
1736 		snprintf(buf, sizeof(buf), "cpu%d:invlcache", i);
1737 		intrcnt_add(buf, &ipi_invlcache_counts[i]);
1738 		snprintf(buf, sizeof(buf), "cpu%d:preempt", i);
1739 		intrcnt_add(buf, &ipi_preempt_counts[i]);
1740 		snprintf(buf, sizeof(buf), "cpu%d:ast", i);
1741 		intrcnt_add(buf, &ipi_ast_counts[i]);
1742 		snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i);
1743 		intrcnt_add(buf, &ipi_rendezvous_counts[i]);
1744 		snprintf(buf, sizeof(buf), "cpu%d:hardclock", i);
1745 		intrcnt_add(buf, &ipi_hardclock_counts[i]);
1746 	}
1747 }
1748 SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL);
1749 #endif
1750