xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision e17f5b1d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_acpi.h"
33 #include "opt_cpu.h"
34 #include "opt_ddb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
37 #include "opt_smp.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
44 #ifdef GPROF
45 #include <sys/gmon.h>
46 #endif
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/mutex.h>
54 #include <sys/pcpu.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
67 
68 #include <x86/apicreg.h>
69 #include <machine/clock.h>
70 #include <machine/cputypes.h>
71 #include <machine/cpufunc.h>
72 #include <x86/mca.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/psl.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79 #include <x86/ucode.h>
80 #include <machine/cpu.h>
81 #include <x86/init.h>
82 
83 #ifdef DEV_ACPI
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
86 #endif
87 
88 #define WARMBOOT_TARGET		0
89 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
90 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
91 
92 #define CMOS_REG		(0x70)
93 #define CMOS_DATA		(0x71)
94 #define BIOS_RESET		(0x0f)
95 #define BIOS_WARM		(0x0a)
96 
97 #define GiB(v)			(v ## ULL << 30)
98 
99 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 3)
100 
101 /* Temporary variables for init_secondary()  */
102 char *doublefault_stack;
103 char *mce_stack;
104 char *nmi_stack;
105 char *dbg_stack;
106 
107 /*
108  * Local data and functions.
109  */
110 
111 static int	start_ap(int apic_id);
112 
113 static bool
114 is_kernel_paddr(vm_paddr_t pa)
115 {
116 
117 	return (pa >= trunc_2mpage(btext - KERNBASE) &&
118 	   pa < round_page(_end - KERNBASE));
119 }
120 
121 static bool
122 is_mpboot_good(vm_paddr_t start, vm_paddr_t end)
123 {
124 
125 	return (start + AP_BOOTPT_SZ <= GiB(4) && atop(end) < Maxmem);
126 }
127 
128 /*
129  * Calculate usable address in base memory for AP trampoline code.
130  */
131 void
132 mp_bootaddress(vm_paddr_t *physmap, unsigned int *physmap_idx)
133 {
134 	vm_paddr_t start, end;
135 	unsigned int i;
136 	bool allocated;
137 
138 	alloc_ap_trampoline(physmap, physmap_idx);
139 
140 	/*
141 	 * Find a memory region big enough below the 4GB boundary to
142 	 * store the initial page tables.  Region must be mapped by
143 	 * the direct map.
144 	 *
145 	 * Note that it needs to be aligned to a page boundary.
146 	 */
147 	allocated = false;
148 	for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
149 		/*
150 		 * First, try to chomp at the start of the physmap region.
151 		 * Kernel binary might claim it already.
152 		 */
153 		start = round_page(physmap[i]);
154 		end = start + AP_BOOTPT_SZ;
155 		if (start < end && end <= physmap[i + 1] &&
156 		    is_mpboot_good(start, end) &&
157 		    !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
158 			allocated = true;
159 			physmap[i] = end;
160 			break;
161 		}
162 
163 		/*
164 		 * Second, try to chomp at the end.  Again, check
165 		 * against kernel.
166 		 */
167 		end = trunc_page(physmap[i + 1]);
168 		start = end - AP_BOOTPT_SZ;
169 		if (start < end && start >= physmap[i] &&
170 		    is_mpboot_good(start, end) &&
171 		    !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
172 			allocated = true;
173 			physmap[i + 1] = start;
174 			break;
175 		}
176 	}
177 	if (allocated) {
178 		mptramp_pagetables = start;
179 		if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
180 			memmove(&physmap[i], &physmap[i + 2],
181 			    sizeof(*physmap) * (*physmap_idx - i + 2));
182 			*physmap_idx -= 2;
183 		}
184 	} else {
185 		mptramp_pagetables = trunc_page(boot_address) - AP_BOOTPT_SZ;
186 		if (bootverbose)
187 			printf(
188 "Cannot find enough space for the initial AP page tables, placing them at %#x",
189 			    mptramp_pagetables);
190 	}
191 }
192 
193 /*
194  * Initialize the IPI handlers and start up the AP's.
195  */
196 void
197 cpu_mp_start(void)
198 {
199 	int i;
200 
201 	/* Initialize the logical ID to APIC ID table. */
202 	for (i = 0; i < MAXCPU; i++) {
203 		cpu_apic_ids[i] = -1;
204 	}
205 
206 	/* Install an inter-CPU IPI for cache and TLB invalidations. */
207 	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
208 	    SDT_SYSIGT, SEL_KPL, 0);
209 
210 	/* Install an inter-CPU IPI for all-CPU rendezvous */
211 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
212 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
213 
214 	/* Install generic inter-CPU IPI handler */
215 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
216 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
217 
218 	/* Install an inter-CPU IPI for CPU stop/restart */
219 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
220 	    SDT_SYSIGT, SEL_KPL, 0);
221 
222 	/* Install an inter-CPU IPI for CPU suspend/resume */
223 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
224 	    SDT_SYSIGT, SEL_KPL, 0);
225 
226 	/* Set boot_cpu_id if needed. */
227 	if (boot_cpu_id == -1) {
228 		boot_cpu_id = PCPU_GET(apic_id);
229 		cpu_info[boot_cpu_id].cpu_bsp = 1;
230 	} else
231 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
232 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
233 
234 	/* Probe logical/physical core configuration. */
235 	topo_probe();
236 
237 	assign_cpu_ids();
238 
239 	/* Start each Application Processor */
240 	init_ops.start_all_aps();
241 
242 	set_interrupt_apic_ids();
243 
244 #if defined(DEV_ACPI) && MAXMEMDOM > 1
245 	acpi_pxm_set_cpu_locality();
246 #endif
247 }
248 
249 /*
250  * AP CPU's call this to initialize themselves.
251  */
252 void
253 init_secondary(void)
254 {
255 	struct pcpu *pc;
256 	struct nmi_pcpu *np;
257 	struct user_segment_descriptor *gdt;
258 	struct region_descriptor ap_gdt;
259 	u_int64_t cr0;
260 	int cpu, gsel_tss, x;
261 
262 	/* Set by the startup code for us to use */
263 	cpu = bootAP;
264 
265 	/* Update microcode before doing anything else. */
266 	ucode_load_ap(cpu);
267 
268 	/* Get per-cpu data and save  */
269 	pc = &__pcpu[cpu];
270 
271 	/* prime data page for it to use */
272 	pcpu_init(pc, cpu, sizeof(struct pcpu));
273 	dpcpu_init(dpcpu, cpu);
274 	pc->pc_apic_id = cpu_apic_ids[cpu];
275 	pc->pc_prvspace = pc;
276 	pc->pc_curthread = 0;
277 	pc->pc_tssp = &pc->pc_common_tss;
278 	pc->pc_rsp0 = 0;
279 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
280 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
281 	gdt = pc->pc_gdt;
282 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
283 	pc->pc_fs32p = &gdt[GUFS32_SEL];
284 	pc->pc_gs32p = &gdt[GUGS32_SEL];
285 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
286 	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
287 	/* See comment in pmap_bootstrap(). */
288 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
289 	pc->pc_pcid_gen = 1;
290 
291 	pc->pc_smp_tlb_gen = 1;
292 
293 	/* Init tss */
294 	pc->pc_common_tss = __pcpu[0].pc_common_tss;
295 	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
296 	    IOPERM_BITMAP_SIZE;
297 	pc->pc_common_tss.tss_rsp0 = 0;
298 
299 	/* The doublefault stack runs on IST1. */
300 	np = ((struct nmi_pcpu *)&doublefault_stack[PAGE_SIZE]) - 1;
301 	np->np_pcpu = (register_t)pc;
302 	pc->pc_common_tss.tss_ist1 = (long)np;
303 
304 	/* The NMI stack runs on IST2. */
305 	np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
306 	np->np_pcpu = (register_t)pc;
307 	pc->pc_common_tss.tss_ist2 = (long)np;
308 
309 	/* The MC# stack runs on IST3. */
310 	np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
311 	np->np_pcpu = (register_t)pc;
312 	pc->pc_common_tss.tss_ist3 = (long)np;
313 
314 	/* The DB# stack runs on IST4. */
315 	np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
316 	np->np_pcpu = (register_t)pc;
317 	pc->pc_common_tss.tss_ist4 = (long)np;
318 
319 	/* Prepare private GDT */
320 	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
321 	for (x = 0; x < NGDT; x++) {
322 		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
323 		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
324 			ssdtosd(&gdt_segs[x], &gdt[x]);
325 	}
326 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
327 	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
328 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
329 	ap_gdt.rd_base = (u_long)gdt;
330 	lgdt(&ap_gdt);			/* does magic intra-segment return */
331 
332 	wrmsr(MSR_FSBASE, 0);		/* User value */
333 	wrmsr(MSR_GSBASE, (u_int64_t)pc);
334 	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
335 	fix_cpuid();
336 
337 	lidt(&r_idt);
338 
339 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
340 	ltr(gsel_tss);
341 
342 	/*
343 	 * Set to a known state:
344 	 * Set by mpboot.s: CR0_PG, CR0_PE
345 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
346 	 */
347 	cr0 = rcr0();
348 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
349 	load_cr0(cr0);
350 
351 	amd64_conf_fast_syscall();
352 
353 	/* signal our startup to the BSP. */
354 	mp_naps++;
355 
356 	/* Spin until the BSP releases the AP's. */
357 	while (atomic_load_acq_int(&aps_ready) == 0)
358 		ia32_pause();
359 
360 	init_secondary_tail();
361 }
362 
363 /*******************************************************************
364  * local functions and data
365  */
366 
367 #ifdef NUMA
368 static void
369 mp_realloc_pcpu(int cpuid, int domain)
370 {
371 	vm_page_t m;
372 	vm_offset_t oa, na;
373 
374 	oa = (vm_offset_t)&__pcpu[cpuid];
375 	if (_vm_phys_domain(pmap_kextract(oa)) == domain)
376 		return;
377 	m = vm_page_alloc_domain(NULL, 0, domain,
378 	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
379 	if (m == NULL)
380 		return;
381 	na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
382 	pagecopy((void *)oa, (void *)na);
383 	pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
384 	/* XXX old pcpu page leaked. */
385 }
386 #endif
387 
388 /*
389  * start each AP in our list
390  */
391 int
392 native_start_all_aps(void)
393 {
394 	u_int64_t *pt4, *pt3, *pt2;
395 	u_int32_t mpbioswarmvec;
396 	int apic_id, cpu, domain, i;
397 	u_char mpbiosreason;
398 
399 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
400 
401 	/* copy the AP 1st level boot code */
402 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
403 
404 	/* Locate the page tables, they'll be below the trampoline */
405 	pt4 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables);
406 	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
407 	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
408 
409 	/* Create the initial 1GB replicated page tables */
410 	for (i = 0; i < 512; i++) {
411 		/* Each slot of the level 4 pages points to the same level 3 page */
412 		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
413 		pt4[i] |= PG_V | PG_RW | PG_U;
414 
415 		/* Each slot of the level 3 pages points to the same level 2 page */
416 		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
417 		pt3[i] |= PG_V | PG_RW | PG_U;
418 
419 		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
420 		pt2[i] = i * (2 * 1024 * 1024);
421 		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
422 	}
423 
424 	/* save the current value of the warm-start vector */
425 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
426 	outb(CMOS_REG, BIOS_RESET);
427 	mpbiosreason = inb(CMOS_DATA);
428 
429 	/* setup a vector to our boot code */
430 	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
431 	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
432 	outb(CMOS_REG, BIOS_RESET);
433 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
434 
435 	/* Relocate pcpu areas to the correct domain. */
436 #ifdef NUMA
437 	if (vm_ndomains > 1)
438 		for (cpu = 1; cpu < mp_ncpus; cpu++) {
439 			apic_id = cpu_apic_ids[cpu];
440 			domain = acpi_pxm_get_cpu_locality(apic_id);
441 			mp_realloc_pcpu(cpu, domain);
442 		}
443 #endif
444 
445 	/* start each AP */
446 	domain = 0;
447 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
448 		apic_id = cpu_apic_ids[cpu];
449 #ifdef NUMA
450 		if (vm_ndomains > 1)
451 			domain = acpi_pxm_get_cpu_locality(apic_id);
452 #endif
453 		/* allocate and set up an idle stack data page */
454 		bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
455 		    M_WAITOK | M_ZERO);
456 		doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK |
457 		    M_ZERO);
458 		mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
459 		nmi_stack = (char *)kmem_malloc_domainset(
460 		    DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
461 		dbg_stack = (char *)kmem_malloc_domainset(
462 		    DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
463 		dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
464 		    DPCPU_SIZE, M_WAITOK | M_ZERO);
465 
466 		bootSTK = (char *)bootstacks[cpu] +
467 		    kstack_pages * PAGE_SIZE - 8;
468 		bootAP = cpu;
469 
470 		/* attempt to start the Application Processor */
471 		if (!start_ap(apic_id)) {
472 			/* restore the warmstart vector */
473 			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
474 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
475 		}
476 
477 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
478 	}
479 
480 	/* restore the warmstart vector */
481 	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
482 
483 	outb(CMOS_REG, BIOS_RESET);
484 	outb(CMOS_DATA, mpbiosreason);
485 
486 	/* number of APs actually started */
487 	return (mp_naps);
488 }
489 
490 
491 /*
492  * This function starts the AP (application processor) identified
493  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
494  * to accomplish this.  This is necessary because of the nuances
495  * of the different hardware we might encounter.  It isn't pretty,
496  * but it seems to work.
497  */
498 static int
499 start_ap(int apic_id)
500 {
501 	int vector, ms;
502 	int cpus;
503 
504 	/* calculate the vector */
505 	vector = (boot_address >> 12) & 0xff;
506 
507 	/* used as a watchpoint to signal AP startup */
508 	cpus = mp_naps;
509 
510 	ipi_startup(apic_id, vector);
511 
512 	/* Wait up to 5 seconds for it to start. */
513 	for (ms = 0; ms < 5000; ms++) {
514 		if (mp_naps > cpus)
515 			return 1;	/* return SUCCESS */
516 		DELAY(1000);
517 	}
518 	return 0;		/* return FAILURE */
519 }
520 
521 /*
522  * Flush the TLB on other CPU's
523  */
524 
525 /*
526  * Invalidation request.  PCPU pc_smp_tlb_op uses u_int instead of the
527  * enum to avoid both namespace and ABI issues (with enums).
528  */
529 enum invl_op_codes {
530       INVL_OP_TLB		= 1,
531       INVL_OP_TLB_INVPCID	= 2,
532       INVL_OP_TLB_INVPCID_PTI	= 3,
533       INVL_OP_TLB_PCID		= 4,
534       INVL_OP_PGRNG		= 5,
535       INVL_OP_PGRNG_INVPCID	= 6,
536       INVL_OP_PGRNG_PCID	= 7,
537       INVL_OP_PG		= 8,
538       INVL_OP_PG_INVPCID	= 9,
539       INVL_OP_PG_PCID		= 10,
540       INVL_OP_CACHE		= 11,
541 };
542 
543 /*
544  * These variables are initialized at startup to reflect how each of
545  * the different kinds of invalidations should be performed on the
546  * current machine and environment.
547  */
548 static enum invl_op_codes invl_op_tlb;
549 static enum invl_op_codes invl_op_pgrng;
550 static enum invl_op_codes invl_op_pg;
551 
552 /*
553  * Scoreboard of IPI completion notifications from target to IPI initiator.
554  *
555  * Each CPU can initiate shootdown IPI independently from other CPUs.
556  * Initiator enters critical section, then fills its local PCPU
557  * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
558  * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
559  * sent to all targets which scan for zeroed scoreboard generation
560  * words.  Upon finding such word the shootdown data is read from
561  * corresponding cpu's pcpu, and generation is set.  Meantime initiator
562  * loops waiting for all zeroed generations in scoreboard to update.
563  */
564 static uint32_t *invl_scoreboard;
565 
566 static void
567 invl_scoreboard_init(void *arg __unused)
568 {
569 	u_int i;
570 
571 	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
572 	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
573 	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
574 		invl_scoreboard[i] = 1;
575 
576 	if (pmap_pcid_enabled) {
577 		if (invpcid_works) {
578 			if (pti)
579 				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
580 			else
581 				invl_op_tlb = INVL_OP_TLB_INVPCID;
582 			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
583 			invl_op_pg = INVL_OP_PG_INVPCID;
584 		} else {
585 			invl_op_tlb = INVL_OP_TLB_PCID;
586 			invl_op_pgrng = INVL_OP_PGRNG_PCID;
587 			invl_op_pg = INVL_OP_PG_PCID;
588 		}
589 	} else {
590 		invl_op_tlb = INVL_OP_TLB;
591 		invl_op_pgrng = INVL_OP_PGRNG;
592 		invl_op_pg = INVL_OP_PG;
593 	}
594 }
595 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL);
596 
597 static uint32_t *
598 invl_scoreboard_getcpu(u_int cpu)
599 {
600 	return (invl_scoreboard + cpu * (mp_maxid + 1));
601 }
602 
603 static uint32_t *
604 invl_scoreboard_slot(u_int cpu)
605 {
606 	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
607 }
608 
609 /*
610  * Used by pmap to request cache or TLB invalidation on local and
611  * remote processors.  Mask provides the set of remote CPUs which are
612  * to be signalled with the invalidation IPI.  As an optimization, the
613  * curcpu_cb callback is invoked on the calling CPU while waiting for
614  * remote CPUs to complete the operation.
615  *
616  * The callback function is called unconditionally on the caller's
617  * underlying processor, even when this processor is not set in the
618  * mask.  So, the callback function must be prepared to handle such
619  * spurious invocations.
620  *
621  * Interrupts must be enabled when calling the function with smp
622  * started, to avoid deadlock with other IPIs that are protected with
623  * smp_ipi_mtx spinlock at the initiator side.
624  */
625 static void
626 smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
627     vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
628 {
629 	cpuset_t other_cpus, mask1;
630 	uint32_t generation, *p_cpudone;
631 	int cpu;
632 
633 	/*
634 	 * It is not necessary to signal other CPUs while booting or
635 	 * when in the debugger.
636 	 */
637 	if (kdb_active || KERNEL_PANICKED() || !smp_started) {
638 		curcpu_cb(pmap, addr1, addr2);
639 		return;
640 	}
641 
642 	sched_pin();
643 
644 	/*
645 	 * Check for other cpus.  Return if none.
646 	 */
647 	if (CPU_ISFULLSET(&mask)) {
648 		if (mp_ncpus <= 1)
649 			goto nospinexit;
650 	} else {
651 		CPU_CLR(PCPU_GET(cpuid), &mask);
652 		if (CPU_EMPTY(&mask))
653 			goto nospinexit;
654 	}
655 
656 	/*
657 	 * Initiator must have interrupts enabled, which prevents
658 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
659 	 * from deadlocking with us.  On the other hand, preemption
660 	 * must be disabled to pin initiator to the instance of the
661 	 * pcpu pc_smp_tlb data and scoreboard line.
662 	 */
663 	KASSERT((read_rflags() & PSL_I) != 0,
664 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
665 	critical_enter();
666 
667 	PCPU_SET(smp_tlb_addr1, addr1);
668 	PCPU_SET(smp_tlb_addr2, addr2);
669 	PCPU_SET(smp_tlb_pmap, pmap);
670 	generation = PCPU_GET(smp_tlb_gen);
671 	if (++generation == 0)
672 		generation = 1;
673 	PCPU_SET(smp_tlb_gen, generation);
674 	PCPU_SET(smp_tlb_op, op);
675 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
676 	atomic_thread_fence_rel();
677 
678 	mask1 = mask;
679 	while ((cpu = CPU_FFS(&mask1)) != 0) {
680 		cpu--;
681 		CPU_CLR(cpu, &mask1);
682 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
683 		    ("IPI scoreboard is zero, initiator %d target %d",
684 		    PCPU_GET(cpuid), cpu));
685 		*invl_scoreboard_slot(cpu) = 0;
686 	}
687 
688 	/*
689 	 * IPI acts as a fence between writing to the scoreboard above
690 	 * (zeroing slot) and reading from it below (wait for
691 	 * acknowledgment).
692 	 */
693 	if (CPU_ISFULLSET(&mask)) {
694 		ipi_all_but_self(IPI_INVLOP);
695 		other_cpus = all_cpus;
696 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
697 	} else {
698 		other_cpus = mask;
699 		while ((cpu = CPU_FFS(&mask)) != 0) {
700 			cpu--;
701 			CPU_CLR(cpu, &mask);
702 			CTR3(KTR_SMP, "%s: cpu: %d invl ipi op: %x", __func__,
703 			    cpu, op);
704 			ipi_send_cpu(cpu, IPI_INVLOP);
705 		}
706 	}
707 	curcpu_cb(pmap, addr1, addr2);
708 	while ((cpu = CPU_FFS(&other_cpus)) != 0) {
709 		cpu--;
710 		CPU_CLR(cpu, &other_cpus);
711 		p_cpudone = invl_scoreboard_slot(cpu);
712 		while (atomic_load_int(p_cpudone) != generation)
713 			ia32_pause();
714 	}
715 	critical_exit();
716 	sched_unpin();
717 	return;
718 
719 nospinexit:
720 	curcpu_cb(pmap, addr1, addr2);
721 	sched_unpin();
722 }
723 
724 void
725 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
726 {
727 	smp_targeted_tlb_shootdown(mask, pmap, 0, 0, curcpu_cb, invl_op_tlb);
728 #ifdef COUNT_XINVLTLB_HITS
729 	ipi_global++;
730 #endif
731 }
732 
733 void
734 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
735     smp_invl_cb_t curcpu_cb)
736 {
737 	smp_targeted_tlb_shootdown(mask, pmap, addr, 0, curcpu_cb, invl_op_pg);
738 #ifdef COUNT_XINVLTLB_HITS
739 	ipi_page++;
740 #endif
741 }
742 
743 void
744 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
745     pmap_t pmap, smp_invl_cb_t curcpu_cb)
746 {
747 	smp_targeted_tlb_shootdown(mask, pmap, addr1, addr2, curcpu_cb,
748 	    invl_op_pgrng);
749 #ifdef COUNT_XINVLTLB_HITS
750 	ipi_range++;
751 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
752 #endif
753 }
754 
755 void
756 smp_cache_flush(smp_invl_cb_t curcpu_cb)
757 {
758 	smp_targeted_tlb_shootdown(all_cpus, NULL, 0, 0, curcpu_cb,
759 	    INVL_OP_CACHE);
760 }
761 
762 /*
763  * Handlers for TLB related IPIs
764  */
765 static void
766 invltlb_handler(pmap_t smp_tlb_pmap)
767 {
768 #ifdef COUNT_XINVLTLB_HITS
769 	xhits_gbl[PCPU_GET(cpuid)]++;
770 #endif /* COUNT_XINVLTLB_HITS */
771 #ifdef COUNT_IPIS
772 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
773 #endif /* COUNT_IPIS */
774 
775 	if (smp_tlb_pmap == kernel_pmap)
776 		invltlb_glob();
777 	else
778 		invltlb();
779 }
780 
781 static void
782 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
783 {
784 	struct invpcid_descr d;
785 
786 #ifdef COUNT_XINVLTLB_HITS
787 	xhits_gbl[PCPU_GET(cpuid)]++;
788 #endif /* COUNT_XINVLTLB_HITS */
789 #ifdef COUNT_IPIS
790 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
791 #endif /* COUNT_IPIS */
792 
793 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
794 	d.pad = 0;
795 	d.addr = 0;
796 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
797 	    INVPCID_CTX);
798 }
799 
800 static void
801 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
802 {
803 	struct invpcid_descr d;
804 
805 #ifdef COUNT_XINVLTLB_HITS
806 	xhits_gbl[PCPU_GET(cpuid)]++;
807 #endif /* COUNT_XINVLTLB_HITS */
808 #ifdef COUNT_IPIS
809 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
810 #endif /* COUNT_IPIS */
811 
812 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
813 	d.pad = 0;
814 	d.addr = 0;
815 	if (smp_tlb_pmap == kernel_pmap) {
816 		/*
817 		 * This invalidation actually needs to clear kernel
818 		 * mappings from the TLB in the current pmap, but
819 		 * since we were asked for the flush in the kernel
820 		 * pmap, achieve it by performing global flush.
821 		 */
822 		invpcid(&d, INVPCID_CTXGLOB);
823 	} else {
824 		invpcid(&d, INVPCID_CTX);
825 		if (smp_tlb_pmap == PCPU_GET(curpmap))
826 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
827 	}
828 }
829 
830 static void
831 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
832 {
833 	uint32_t pcid;
834 
835 #ifdef COUNT_XINVLTLB_HITS
836 	xhits_gbl[PCPU_GET(cpuid)]++;
837 #endif /* COUNT_XINVLTLB_HITS */
838 #ifdef COUNT_IPIS
839 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
840 #endif /* COUNT_IPIS */
841 
842 	if (smp_tlb_pmap == kernel_pmap) {
843 		invltlb_glob();
844 	} else {
845 		/*
846 		 * The current pmap might not be equal to
847 		 * smp_tlb_pmap.  The clearing of the pm_gen in
848 		 * pmap_invalidate_all() takes care of TLB
849 		 * invalidation when switching to the pmap on this
850 		 * CPU.
851 		 */
852 		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
853 			pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
854 			load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
855 			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
856 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
857 		}
858 	}
859 }
860 
861 static void
862 invlpg_handler(vm_offset_t smp_tlb_addr1)
863 {
864 #ifdef COUNT_XINVLTLB_HITS
865 	xhits_pg[PCPU_GET(cpuid)]++;
866 #endif /* COUNT_XINVLTLB_HITS */
867 #ifdef COUNT_IPIS
868 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
869 #endif /* COUNT_IPIS */
870 
871 	invlpg(smp_tlb_addr1);
872 }
873 
874 static void
875 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
876 {
877 	struct invpcid_descr d;
878 
879 #ifdef COUNT_XINVLTLB_HITS
880 	xhits_pg[PCPU_GET(cpuid)]++;
881 #endif /* COUNT_XINVLTLB_HITS */
882 #ifdef COUNT_IPIS
883 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
884 #endif /* COUNT_IPIS */
885 
886 	invlpg(smp_tlb_addr1);
887 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
888 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
889 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
890 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
891 		    PMAP_PCID_USER_PT;
892 		d.pad = 0;
893 		d.addr = smp_tlb_addr1;
894 		invpcid(&d, INVPCID_ADDR);
895 	}
896 }
897 
898 static void
899 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
900 {
901 	uint64_t kcr3, ucr3;
902 	uint32_t pcid;
903 
904 #ifdef COUNT_XINVLTLB_HITS
905 	xhits_pg[PCPU_GET(cpuid)]++;
906 #endif /* COUNT_XINVLTLB_HITS */
907 #ifdef COUNT_IPIS
908 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
909 #endif /* COUNT_IPIS */
910 
911 	invlpg(smp_tlb_addr1);
912 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
913 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
914 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
915 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
916 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
917 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
918 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
919 	}
920 }
921 
922 static void
923 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
924 {
925 	vm_offset_t addr, addr2;
926 
927 #ifdef COUNT_XINVLTLB_HITS
928 	xhits_rng[PCPU_GET(cpuid)]++;
929 #endif /* COUNT_XINVLTLB_HITS */
930 #ifdef COUNT_IPIS
931 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
932 #endif /* COUNT_IPIS */
933 
934 	addr = smp_tlb_addr1;
935 	addr2 = smp_tlb_addr2;
936 	do {
937 		invlpg(addr);
938 		addr += PAGE_SIZE;
939 	} while (addr < addr2);
940 }
941 
942 static void
943 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
944     vm_offset_t smp_tlb_addr2)
945 {
946 	struct invpcid_descr d;
947 	vm_offset_t addr, addr2;
948 
949 #ifdef COUNT_XINVLTLB_HITS
950 	xhits_rng[PCPU_GET(cpuid)]++;
951 #endif /* COUNT_XINVLTLB_HITS */
952 #ifdef COUNT_IPIS
953 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
954 #endif /* COUNT_IPIS */
955 
956 	addr = smp_tlb_addr1;
957 	addr2 = smp_tlb_addr2;
958 	do {
959 		invlpg(addr);
960 		addr += PAGE_SIZE;
961 	} while (addr < addr2);
962 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
963 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
964 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
965 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
966 		    PMAP_PCID_USER_PT;
967 		d.pad = 0;
968 		d.addr = smp_tlb_addr1;
969 		do {
970 			invpcid(&d, INVPCID_ADDR);
971 			d.addr += PAGE_SIZE;
972 		} while (d.addr < addr2);
973 	}
974 }
975 
976 static void
977 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
978     vm_offset_t smp_tlb_addr2)
979 {
980 	vm_offset_t addr, addr2;
981 	uint64_t kcr3, ucr3;
982 	uint32_t pcid;
983 
984 #ifdef COUNT_XINVLTLB_HITS
985 	xhits_rng[PCPU_GET(cpuid)]++;
986 #endif /* COUNT_XINVLTLB_HITS */
987 #ifdef COUNT_IPIS
988 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
989 #endif /* COUNT_IPIS */
990 
991 	addr = smp_tlb_addr1;
992 	addr2 = smp_tlb_addr2;
993 	do {
994 		invlpg(addr);
995 		addr += PAGE_SIZE;
996 	} while (addr < addr2);
997 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
998 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
999 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
1000 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
1001 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1002 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1003 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
1004 	}
1005 }
1006 
1007 static void
1008 invlcache_handler(void)
1009 {
1010 #ifdef COUNT_IPIS
1011 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1012 #endif /* COUNT_IPIS */
1013 	wbinvd();
1014 }
1015 
1016 static void
1017 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1018     vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1019 {
1020 	switch (smp_tlb_op) {
1021 	case INVL_OP_TLB:
1022 		invltlb_handler(smp_tlb_pmap);
1023 		break;
1024 	case INVL_OP_TLB_INVPCID:
1025 		invltlb_invpcid_handler(smp_tlb_pmap);
1026 		break;
1027 	case INVL_OP_TLB_INVPCID_PTI:
1028 		invltlb_invpcid_pti_handler(smp_tlb_pmap);
1029 		break;
1030 	case INVL_OP_TLB_PCID:
1031 		invltlb_pcid_handler(smp_tlb_pmap);
1032 		break;
1033 	case INVL_OP_PGRNG:
1034 		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1035 		break;
1036 	case INVL_OP_PGRNG_INVPCID:
1037 		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1038 		    smp_tlb_addr2);
1039 		break;
1040 	case INVL_OP_PGRNG_PCID:
1041 		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1042 		    smp_tlb_addr2);
1043 		break;
1044 	case INVL_OP_PG:
1045 		invlpg_handler(smp_tlb_addr1);
1046 		break;
1047 	case INVL_OP_PG_INVPCID:
1048 		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1049 		break;
1050 	case INVL_OP_PG_PCID:
1051 		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1052 		break;
1053 	case INVL_OP_CACHE:
1054 		invlcache_handler();
1055 		break;
1056 	default:
1057 		__assert_unreachable();
1058 		break;
1059 	}
1060 }
1061 
1062 void
1063 invlop_handler(void)
1064 {
1065 	struct pcpu *initiator_pc;
1066 	pmap_t smp_tlb_pmap;
1067 	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1068 	u_int initiator_cpu_id;
1069 	enum invl_op_codes smp_tlb_op;
1070 	uint32_t *scoreboard, smp_tlb_gen;
1071 
1072 	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1073 	for (;;) {
1074 		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1075 		    initiator_cpu_id++) {
1076 			if (scoreboard[initiator_cpu_id] == 0)
1077 				break;
1078 		}
1079 		if (initiator_cpu_id > mp_maxid)
1080 			break;
1081 		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1082 
1083 		/*
1084 		 * This acquire fence and its corresponding release
1085 		 * fence in smp_targeted_tlb_shootdown() is between
1086 		 * reading zero scoreboard slot and accessing PCPU of
1087 		 * initiator for pc_smp_tlb values.
1088 		 */
1089 		atomic_thread_fence_acq();
1090 		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1091 		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1092 		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1093 		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1094 		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1095 
1096 		/*
1097 		 * Ensure that we do not make our scoreboard
1098 		 * notification visible to the initiator until the
1099 		 * pc_smp_tlb values are read.  The corresponding
1100 		 * fence is implicitly provided by the barrier in the
1101 		 * IPI send operation before the APIC ICR register
1102 		 * write.
1103 		 *
1104 		 * As an optimization, the request is acknowledged
1105 		 * before the actual invalidation is performed.  It is
1106 		 * safe because target CPU cannot return to userspace
1107 		 * before handler finishes. Only NMI can preempt the
1108 		 * handler, but NMI would see the kernel handler frame
1109 		 * and not touch not-invalidated user page table.
1110 		 */
1111 		atomic_thread_fence_acq();
1112 		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1113 
1114 		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1115 		    smp_tlb_addr2);
1116 	}
1117 }
1118