xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision a37e484d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include "opt_acpi.h"
31 #include "opt_cpu.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_sched.h"
35 #include "opt_smp.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpuset.h>
41 #include <sys/domainset.h>
42 #include <sys/kdb.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/memrange.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_phys.h>
62 
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cputypes.h>
66 #include <machine/cpufunc.h>
67 #include <x86/mca.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
73 #include <machine/tss.h>
74 #include <x86/ucode.h>
75 #include <machine/cpu.h>
76 #include <x86/init.h>
77 
78 #ifdef DEV_ACPI
79 #include <contrib/dev/acpica/include/acpi.h>
80 #include <dev/acpica/acpivar.h>
81 #endif
82 
83 #define WARMBOOT_TARGET		0
84 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
85 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
86 
87 #define CMOS_REG		(0x70)
88 #define CMOS_DATA		(0x71)
89 #define BIOS_RESET		(0x0f)
90 #define BIOS_WARM		(0x0a)
91 
92 #define GiB(v)			(v ## ULL << 30)
93 
94 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 4)
95 
96 /* Temporary variables for init_secondary()  */
97 static char *doublefault_stack;
98 static char *mce_stack;
99 static char *nmi_stack;
100 static char *dbg_stack;
101 void *bootpcpu;
102 
103 extern u_int mptramp_la57;
104 extern u_int mptramp_nx;
105 
106 /*
107  * Local data and functions.
108  */
109 
110 static int start_ap(int apic_id, vm_paddr_t boot_address);
111 
112 /*
113  * Initialize the IPI handlers and start up the AP's.
114  */
115 void
cpu_mp_start(void)116 cpu_mp_start(void)
117 {
118 	int i;
119 
120 	/* Initialize the logical ID to APIC ID table. */
121 	for (i = 0; i < MAXCPU; i++) {
122 		cpu_apic_ids[i] = -1;
123 	}
124 
125 	/* Install an inter-CPU IPI for cache and TLB invalidations. */
126 	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
127 	    SDT_SYSIGT, SEL_KPL, 0);
128 
129 	/* Install an inter-CPU IPI for all-CPU rendezvous */
130 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
131 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
132 
133 	/* Install generic inter-CPU IPI handler */
134 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
135 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
136 
137 	/* Install an inter-CPU IPI for CPU stop/restart */
138 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
139 	    SDT_SYSIGT, SEL_KPL, 0);
140 
141 	/* Install an inter-CPU IPI for CPU suspend/resume */
142 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
143 	    SDT_SYSIGT, SEL_KPL, 0);
144 
145 	/* Install an IPI for calling delayed SWI */
146 	setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
147 	    SDT_SYSIGT, SEL_KPL, 0);
148 
149 	/* Set boot_cpu_id if needed. */
150 	if (boot_cpu_id == -1) {
151 		boot_cpu_id = PCPU_GET(apic_id);
152 		cpu_info[boot_cpu_id].cpu_bsp = 1;
153 	} else
154 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
155 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
156 
157 	/* Probe logical/physical core configuration. */
158 	topo_probe();
159 
160 	assign_cpu_ids();
161 
162 	mptramp_la57 = la57;
163 	mptramp_nx = pg_nx != 0;
164 	MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
165 	mptramp_pagetables = kernel_pmap->pm_cr3;
166 
167 	/* Start each Application Processor */
168 	start_all_aps();
169 
170 	set_interrupt_apic_ids();
171 
172 #if defined(DEV_ACPI) && MAXMEMDOM > 1
173 	acpi_pxm_set_cpu_locality();
174 #endif
175 }
176 
177 /*
178  * AP CPU's call this to initialize themselves.
179  */
180 void
init_secondary(void)181 init_secondary(void)
182 {
183 	struct pcpu *pc;
184 	struct nmi_pcpu *np;
185 	struct user_segment_descriptor *gdt;
186 	struct region_descriptor ap_gdt;
187 	u_int64_t cr0;
188 	int cpu, gsel_tss, x;
189 
190 	/* Set by the startup code for us to use */
191 	cpu = bootAP;
192 
193 	/* Update microcode before doing anything else. */
194 	ucode_load_ap(cpu);
195 
196 	/* Initialize the PCPU area. */
197 	pc = bootpcpu;
198 	pcpu_init(pc, cpu, sizeof(struct pcpu));
199 	dpcpu_init(dpcpu, cpu);
200 	pc->pc_apic_id = cpu_apic_ids[cpu];
201 	pc->pc_prvspace = pc;
202 	pc->pc_curthread = 0;
203 	pc->pc_tssp = &pc->pc_common_tss;
204 	pc->pc_rsp0 = 0;
205 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
206 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
207 	gdt = pc->pc_gdt;
208 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
209 	pc->pc_fs32p = &gdt[GUFS32_SEL];
210 	pc->pc_gs32p = &gdt[GUGS32_SEL];
211 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
212 	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
213 	/* See comment in pmap_bootstrap(). */
214 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
215 	pc->pc_pcid_gen = 1;
216 	pc->pc_kpmap_store.pm_pcid = PMAP_PCID_KERN;
217 	pc->pc_kpmap_store.pm_gen = 1;
218 
219 	pc->pc_smp_tlb_gen = 1;
220 
221 	/* Init tss */
222 	pc->pc_common_tss = __pcpu[0].pc_common_tss;
223 	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
224 	    IOPERM_BITMAP_SIZE;
225 	pc->pc_common_tss.tss_rsp0 = 0;
226 
227 	/* The doublefault stack runs on IST1. */
228 	np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
229 	np->np_pcpu = (register_t)pc;
230 	pc->pc_common_tss.tss_ist1 = (long)np;
231 
232 	/* The NMI stack runs on IST2. */
233 	np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
234 	np->np_pcpu = (register_t)pc;
235 	pc->pc_common_tss.tss_ist2 = (long)np;
236 
237 	/* The MC# stack runs on IST3. */
238 	np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
239 	np->np_pcpu = (register_t)pc;
240 	pc->pc_common_tss.tss_ist3 = (long)np;
241 
242 	/* The DB# stack runs on IST4. */
243 	np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
244 	np->np_pcpu = (register_t)pc;
245 	pc->pc_common_tss.tss_ist4 = (long)np;
246 
247 	/* Prepare private GDT */
248 	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
249 	for (x = 0; x < NGDT; x++) {
250 		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
251 		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
252 			ssdtosd(&gdt_segs[x], &gdt[x]);
253 	}
254 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
255 	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
256 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
257 	ap_gdt.rd_base = (u_long)gdt;
258 	lgdt(&ap_gdt);			/* does magic intra-segment return */
259 
260 	wrmsr(MSR_FSBASE, 0);		/* User value */
261 	wrmsr(MSR_GSBASE, (uint64_t)pc);
262 	wrmsr(MSR_KGSBASE, 0);		/* User value */
263 	fix_cpuid();
264 
265 	lidt(&r_idt);
266 
267 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
268 	ltr(gsel_tss);
269 
270 	/*
271 	 * Set to a known state:
272 	 * Set by mpboot.s: CR0_PG, CR0_PE
273 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
274 	 */
275 	cr0 = rcr0();
276 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
277 	load_cr0(cr0);
278 
279 	amd64_conf_fast_syscall();
280 
281 	/* signal our startup to the BSP. */
282 	mp_naps++;
283 
284 	/* Spin until the BSP releases the AP's. */
285 	while (atomic_load_acq_int(&aps_ready) == 0)
286 		ia32_pause();
287 
288 	init_secondary_tail();
289 }
290 
291 static void
amd64_mp_alloc_pcpu(void)292 amd64_mp_alloc_pcpu(void)
293 {
294 	vm_page_t m;
295 	int cpu;
296 
297 	/* Allocate pcpu areas to the correct domain. */
298 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
299 #ifdef NUMA
300 		m = NULL;
301 		if (vm_ndomains > 1) {
302 			m = vm_page_alloc_noobj_domain(
303 			    acpi_pxm_get_cpu_locality(cpu_apic_ids[cpu]),
304 			    VM_ALLOC_ZERO);
305 		}
306 		if (m == NULL)
307 #endif
308 			m = vm_page_alloc_noobj(VM_ALLOC_ZERO);
309 		if (m == NULL)
310 			panic("cannot alloc pcpu page for cpu %d", cpu);
311 		pmap_qenter((vm_offset_t)&__pcpu[cpu], &m, 1);
312 	}
313 }
314 
315 /*
316  * start each AP in our list
317  */
318 int
start_all_aps(void)319 start_all_aps(void)
320 {
321 	vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
322 	pml5_entry_t old_pml45;
323 	pml4_entry_t *v_pml4;
324 	pdp_entry_t *v_pdp;
325 	pd_entry_t *v_pd;
326 	vm_paddr_t boot_address;
327 	u_int32_t mpbioswarmvec;
328 	int apic_id, cpu, domain, i;
329 	u_char mpbiosreason;
330 
331 	amd64_mp_alloc_pcpu();
332 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
333 
334 	MPASS(bootMP_size <= PAGE_SIZE);
335 	m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
336 	    (1ULL << 20), /* Trampoline should be below 1M for real mode */
337 	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
338 	boot_address = VM_PAGE_TO_PHYS(m_boottramp);
339 
340 	/* Create a transient 1:1 mapping of low 4G */
341 	if (la57) {
342 		m_pml4 = pmap_page_alloc_below_4g(true);
343 		v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
344 	} else {
345 		v_pml4 = &kernel_pmap->pm_pmltop[0];
346 	}
347 	m_pdp = pmap_page_alloc_below_4g(true);
348 	v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
349 	m_pd[0] = pmap_page_alloc_below_4g(false);
350 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
351 	for (i = 0; i < NPDEPG; i++)
352 		v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
353 		    X86_PG_M | PG_PS;
354 	m_pd[1] = pmap_page_alloc_below_4g(false);
355 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
356 	for (i = 0; i < NPDEPG; i++)
357 		v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
358 		    X86_PG_A | X86_PG_M | PG_PS;
359 	m_pd[2] = pmap_page_alloc_below_4g(false);
360 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
361 	for (i = 0; i < NPDEPG; i++)
362 		v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
363 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
364 	m_pd[3] = pmap_page_alloc_below_4g(false);
365 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
366 	for (i = 0; i < NPDEPG; i++)
367 		v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
368 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
369 	v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
370 	    X86_PG_RW | X86_PG_A | X86_PG_M;
371 	v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
372 	    X86_PG_RW | X86_PG_A | X86_PG_M;
373 	v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
374 	    X86_PG_RW | X86_PG_A | X86_PG_M;
375 	v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
376 	    X86_PG_RW | X86_PG_A | X86_PG_M;
377 	old_pml45 = kernel_pmap->pm_pmltop[0];
378 	if (la57) {
379 		kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
380 		    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
381 	}
382 	v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
383 	    X86_PG_RW | X86_PG_A | X86_PG_M;
384 	pmap_invalidate_all(kernel_pmap);
385 
386 	/* copy the AP 1st level boot code */
387 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
388 	if (bootverbose)
389 		printf("AP boot address %#lx\n", boot_address);
390 
391 	/* save the current value of the warm-start vector */
392 	if (!efi_boot)
393 		mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
394 	outb(CMOS_REG, BIOS_RESET);
395 	mpbiosreason = inb(CMOS_DATA);
396 
397 	/* setup a vector to our boot code */
398 	if (!efi_boot) {
399 		*((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
400 		*((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
401 	}
402 	outb(CMOS_REG, BIOS_RESET);
403 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
404 
405 	/* start each AP */
406 	domain = 0;
407 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
408 		apic_id = cpu_apic_ids[cpu];
409 #ifdef NUMA
410 		if (vm_ndomains > 1)
411 			domain = acpi_pxm_get_cpu_locality(apic_id);
412 #endif
413 		/* allocate and set up an idle stack data page */
414 		bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
415 		    M_WAITOK | M_ZERO);
416 		doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
417 		    M_WAITOK | M_ZERO);
418 		mce_stack = kmem_malloc(MCE_STACK_SIZE,
419 		    M_WAITOK | M_ZERO);
420 		nmi_stack = kmem_malloc_domainset(
421 		    DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
422 		dbg_stack = kmem_malloc_domainset(
423 		    DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
424 		dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
425 		    DPCPU_SIZE, M_WAITOK | M_ZERO);
426 
427 		bootpcpu = &__pcpu[cpu];
428 		bootSTK = (char *)bootstacks[cpu] +
429 		    kstack_pages * PAGE_SIZE - 8;
430 		bootAP = cpu;
431 
432 		/* attempt to start the Application Processor */
433 		if (!start_ap(apic_id, boot_address)) {
434 			/* restore the warmstart vector */
435 			if (!efi_boot)
436 				*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
437 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
438 		}
439 
440 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
441 	}
442 
443 	/* restore the warmstart vector */
444 	if (!efi_boot)
445 		*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
446 
447 	outb(CMOS_REG, BIOS_RESET);
448 	outb(CMOS_DATA, mpbiosreason);
449 
450 	/* Destroy transient 1:1 mapping */
451 	kernel_pmap->pm_pmltop[0] = old_pml45;
452 	invlpg(0);
453 	if (la57)
454 		vm_page_free(m_pml4);
455 	vm_page_free(m_pd[3]);
456 	vm_page_free(m_pd[2]);
457 	vm_page_free(m_pd[1]);
458 	vm_page_free(m_pd[0]);
459 	vm_page_free(m_pdp);
460 	vm_page_free(m_boottramp);
461 
462 	/* number of APs actually started */
463 	return (mp_naps);
464 }
465 
466 /*
467  * This function starts the AP (application processor) identified
468  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
469  * to accomplish this.  This is necessary because of the nuances
470  * of the different hardware we might encounter.  It isn't pretty,
471  * but it seems to work.
472  */
473 static int
start_ap(int apic_id,vm_paddr_t boot_address)474 start_ap(int apic_id, vm_paddr_t boot_address)
475 {
476 	int vector, ms;
477 	int cpus;
478 
479 	/* calculate the vector */
480 	vector = (boot_address >> 12) & 0xff;
481 
482 	/* used as a watchpoint to signal AP startup */
483 	cpus = mp_naps;
484 
485 	ipi_startup(apic_id, vector);
486 
487 	/* Wait up to 5 seconds for it to start. */
488 	for (ms = 0; ms < 5000; ms++) {
489 		if (mp_naps > cpus)
490 			return 1;	/* return SUCCESS */
491 		DELAY(1000);
492 	}
493 	return 0;		/* return FAILURE */
494 }
495 
496 /*
497  * Flush the TLB on other CPU's
498  */
499 
500 /*
501  * Invalidation request.  PCPU pc_smp_tlb_op uses u_int instead of the
502  * enum to avoid both namespace and ABI issues (with enums).
503  */
504 enum invl_op_codes {
505       INVL_OP_TLB		= 1,
506       INVL_OP_TLB_INVPCID	= 2,
507       INVL_OP_TLB_INVPCID_PTI	= 3,
508       INVL_OP_TLB_PCID		= 4,
509       INVL_OP_PGRNG		= 5,
510       INVL_OP_PGRNG_INVPCID	= 6,
511       INVL_OP_PGRNG_PCID	= 7,
512       INVL_OP_PG		= 8,
513       INVL_OP_PG_INVPCID	= 9,
514       INVL_OP_PG_PCID		= 10,
515       INVL_OP_CACHE		= 11,
516 };
517 
518 /*
519  * These variables are initialized at startup to reflect how each of
520  * the different kinds of invalidations should be performed on the
521  * current machine and environment.
522  */
523 static enum invl_op_codes invl_op_tlb;
524 static enum invl_op_codes invl_op_pgrng;
525 static enum invl_op_codes invl_op_pg;
526 
527 /*
528  * Scoreboard of IPI completion notifications from target to IPI initiator.
529  *
530  * Each CPU can initiate shootdown IPI independently from other CPUs.
531  * Initiator enters critical section, then fills its local PCPU
532  * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
533  * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
534  * sent to all targets which scan for zeroed scoreboard generation
535  * words.  Upon finding such word the shootdown data is read from
536  * corresponding cpu's pcpu, and generation is set.  Meantime initiator
537  * loops waiting for all zeroed generations in scoreboard to update.
538  */
539 static uint32_t *invl_scoreboard;
540 
541 static void
invl_scoreboard_init(void * arg __unused)542 invl_scoreboard_init(void *arg __unused)
543 {
544 	u_int i;
545 
546 	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
547 	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
548 	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
549 		invl_scoreboard[i] = 1;
550 
551 	if (pmap_pcid_enabled) {
552 		if (invpcid_works) {
553 			if (pti)
554 				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
555 			else
556 				invl_op_tlb = INVL_OP_TLB_INVPCID;
557 			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
558 			invl_op_pg = INVL_OP_PG_INVPCID;
559 		} else {
560 			invl_op_tlb = INVL_OP_TLB_PCID;
561 			invl_op_pgrng = INVL_OP_PGRNG_PCID;
562 			invl_op_pg = INVL_OP_PG_PCID;
563 		}
564 	} else {
565 		invl_op_tlb = INVL_OP_TLB;
566 		invl_op_pgrng = INVL_OP_PGRNG;
567 		invl_op_pg = INVL_OP_PG;
568 	}
569 }
570 SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
571 
572 static uint32_t *
invl_scoreboard_getcpu(u_int cpu)573 invl_scoreboard_getcpu(u_int cpu)
574 {
575 	return (invl_scoreboard + cpu * (mp_maxid + 1));
576 }
577 
578 static uint32_t *
invl_scoreboard_slot(u_int cpu)579 invl_scoreboard_slot(u_int cpu)
580 {
581 	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
582 }
583 
584 /*
585  * Used by the pmap to request cache or TLB invalidation on local and
586  * remote processors.  Mask provides the set of remote CPUs that are
587  * to be signalled with the invalidation IPI.  As an optimization, the
588  * curcpu_cb callback is invoked on the calling CPU in a critical
589  * section while waiting for the remote CPUs to complete the operation.
590  *
591  * The callback function is called unconditionally on the caller's
592  * underlying processor, even when this processor is not set in the
593  * mask.  So, the callback function must be prepared to handle such
594  * spurious invocations.
595  *
596  * Interrupts must be enabled when calling the function with smp
597  * started, to avoid deadlock with other IPIs that are protected with
598  * smp_ipi_mtx spinlock at the initiator side.
599  *
600  * Function must be called with the thread pinned, and it unpins on
601  * completion.
602  */
603 static void
smp_targeted_tlb_shootdown(pmap_t pmap,vm_offset_t addr1,vm_offset_t addr2,smp_invl_cb_t curcpu_cb,enum invl_op_codes op)604 smp_targeted_tlb_shootdown(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
605     smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
606 {
607 	cpuset_t mask;
608 	uint32_t generation, *p_cpudone;
609 	int cpu;
610 	bool is_all;
611 
612 	/*
613 	 * It is not necessary to signal other CPUs while booting or
614 	 * when in the debugger.
615 	 */
616 	if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
617 		goto local_cb;
618 
619 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
620 
621 	/*
622 	 * Make a stable copy of the set of CPUs on which the pmap is active.
623 	 * See if we have to interrupt other CPUs.
624 	 */
625 	CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
626 	is_all = CPU_CMP(&mask, &all_cpus) == 0;
627 	CPU_CLR(curcpu, &mask);
628 	if (CPU_EMPTY(&mask))
629 		goto local_cb;
630 
631 	/*
632 	 * Initiator must have interrupts enabled, which prevents
633 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
634 	 * from deadlocking with us.  On the other hand, preemption
635 	 * must be disabled to pin initiator to the instance of the
636 	 * pcpu pc_smp_tlb data and scoreboard line.
637 	 */
638 	KASSERT((read_rflags() & PSL_I) != 0,
639 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
640 	critical_enter();
641 
642 	PCPU_SET(smp_tlb_addr1, addr1);
643 	PCPU_SET(smp_tlb_addr2, addr2);
644 	PCPU_SET(smp_tlb_pmap, pmap);
645 	generation = PCPU_GET(smp_tlb_gen);
646 	if (++generation == 0)
647 		generation = 1;
648 	PCPU_SET(smp_tlb_gen, generation);
649 	PCPU_SET(smp_tlb_op, op);
650 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
651 	atomic_thread_fence_rel();
652 
653 	CPU_FOREACH_ISSET(cpu, &mask) {
654 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
655 		    ("IPI scoreboard is zero, initiator %d target %d",
656 		    curcpu, cpu));
657 		*invl_scoreboard_slot(cpu) = 0;
658 	}
659 
660 	/*
661 	 * IPI acts as a fence between writing to the scoreboard above
662 	 * (zeroing slot) and reading from it below (wait for
663 	 * acknowledgment).
664 	 */
665 	if (is_all) {
666 		ipi_all_but_self(IPI_INVLOP);
667 	} else {
668 		ipi_selected(mask, IPI_INVLOP);
669 	}
670 	curcpu_cb(pmap, addr1, addr2);
671 	CPU_FOREACH_ISSET(cpu, &mask) {
672 		p_cpudone = invl_scoreboard_slot(cpu);
673 		while (atomic_load_int(p_cpudone) != generation)
674 			ia32_pause();
675 	}
676 
677 	/*
678 	 * Unpin before leaving critical section.  If the thread owes
679 	 * preemption, this allows scheduler to select thread on any
680 	 * CPU from its cpuset.
681 	 */
682 	sched_unpin();
683 	critical_exit();
684 
685 	return;
686 
687 local_cb:
688 	critical_enter();
689 	curcpu_cb(pmap, addr1, addr2);
690 	sched_unpin();
691 	critical_exit();
692 }
693 
694 void
smp_masked_invltlb(pmap_t pmap,smp_invl_cb_t curcpu_cb)695 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
696 {
697 	smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
698 #ifdef COUNT_XINVLTLB_HITS
699 	ipi_global++;
700 #endif
701 }
702 
703 void
smp_masked_invlpg(vm_offset_t addr,pmap_t pmap,smp_invl_cb_t curcpu_cb)704 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
705 {
706 	smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
707 #ifdef COUNT_XINVLTLB_HITS
708 	ipi_page++;
709 #endif
710 }
711 
712 void
smp_masked_invlpg_range(vm_offset_t addr1,vm_offset_t addr2,pmap_t pmap,smp_invl_cb_t curcpu_cb)713 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
714     smp_invl_cb_t curcpu_cb)
715 {
716 	smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
717 	    invl_op_pgrng);
718 #ifdef COUNT_XINVLTLB_HITS
719 	ipi_range++;
720 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
721 #endif
722 }
723 
724 void
smp_cache_flush(smp_invl_cb_t curcpu_cb)725 smp_cache_flush(smp_invl_cb_t curcpu_cb)
726 {
727 	smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
728 }
729 
730 /*
731  * Handlers for TLB related IPIs
732  */
733 static void
invltlb_handler(pmap_t smp_tlb_pmap)734 invltlb_handler(pmap_t smp_tlb_pmap)
735 {
736 #ifdef COUNT_XINVLTLB_HITS
737 	xhits_gbl[PCPU_GET(cpuid)]++;
738 #endif /* COUNT_XINVLTLB_HITS */
739 #ifdef COUNT_IPIS
740 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
741 #endif /* COUNT_IPIS */
742 
743 	if (smp_tlb_pmap == kernel_pmap)
744 		invltlb_glob();
745 	else
746 		invltlb();
747 }
748 
749 static void
invltlb_invpcid_handler(pmap_t smp_tlb_pmap)750 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
751 {
752 	struct invpcid_descr d;
753 
754 #ifdef COUNT_XINVLTLB_HITS
755 	xhits_gbl[PCPU_GET(cpuid)]++;
756 #endif /* COUNT_XINVLTLB_HITS */
757 #ifdef COUNT_IPIS
758 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
759 #endif /* COUNT_IPIS */
760 
761 	d.pcid = pmap_get_pcid(smp_tlb_pmap);
762 	d.pad = 0;
763 	d.addr = 0;
764 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
765 	    INVPCID_CTX);
766 }
767 
768 static void
invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)769 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
770 {
771 	struct invpcid_descr d;
772 
773 #ifdef COUNT_XINVLTLB_HITS
774 	xhits_gbl[PCPU_GET(cpuid)]++;
775 #endif /* COUNT_XINVLTLB_HITS */
776 #ifdef COUNT_IPIS
777 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
778 #endif /* COUNT_IPIS */
779 
780 	d.pcid = pmap_get_pcid(smp_tlb_pmap);
781 	d.pad = 0;
782 	d.addr = 0;
783 	if (smp_tlb_pmap == kernel_pmap) {
784 		/*
785 		 * This invalidation actually needs to clear kernel
786 		 * mappings from the TLB in the current pmap, but
787 		 * since we were asked for the flush in the kernel
788 		 * pmap, achieve it by performing global flush.
789 		 */
790 		invpcid(&d, INVPCID_CTXGLOB);
791 	} else {
792 		invpcid(&d, INVPCID_CTX);
793 		if (smp_tlb_pmap == PCPU_GET(curpmap) &&
794 		    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
795 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
796 	}
797 }
798 
799 static void
invltlb_pcid_handler(pmap_t smp_tlb_pmap)800 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
801 {
802 #ifdef COUNT_XINVLTLB_HITS
803 	xhits_gbl[PCPU_GET(cpuid)]++;
804 #endif /* COUNT_XINVLTLB_HITS */
805 #ifdef COUNT_IPIS
806 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
807 #endif /* COUNT_IPIS */
808 
809 	if (smp_tlb_pmap == kernel_pmap) {
810 		invltlb_glob();
811 	} else {
812 		/*
813 		 * The current pmap might not be equal to
814 		 * smp_tlb_pmap.  The clearing of the pm_gen in
815 		 * pmap_invalidate_all() takes care of TLB
816 		 * invalidation when switching to the pmap on this
817 		 * CPU.
818 		 */
819 		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
820 			load_cr3(smp_tlb_pmap->pm_cr3 |
821 			    pmap_get_pcid(smp_tlb_pmap));
822 			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
823 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
824 		}
825 	}
826 }
827 
828 static void
invlpg_handler(vm_offset_t smp_tlb_addr1)829 invlpg_handler(vm_offset_t smp_tlb_addr1)
830 {
831 #ifdef COUNT_XINVLTLB_HITS
832 	xhits_pg[PCPU_GET(cpuid)]++;
833 #endif /* COUNT_XINVLTLB_HITS */
834 #ifdef COUNT_IPIS
835 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
836 #endif /* COUNT_IPIS */
837 
838 	invlpg(smp_tlb_addr1);
839 }
840 
841 static void
invlpg_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)842 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
843 {
844 	struct invpcid_descr d;
845 
846 #ifdef COUNT_XINVLTLB_HITS
847 	xhits_pg[PCPU_GET(cpuid)]++;
848 #endif /* COUNT_XINVLTLB_HITS */
849 #ifdef COUNT_IPIS
850 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
851 #endif /* COUNT_IPIS */
852 
853 	pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
854 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
855 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
856 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
857 		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
858 		d.pad = 0;
859 		d.addr = smp_tlb_addr1;
860 		invpcid(&d, INVPCID_ADDR);
861 	}
862 }
863 
864 static void
invlpg_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)865 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
866 {
867 	uint64_t kcr3, ucr3;
868 	uint32_t pcid;
869 
870 #ifdef COUNT_XINVLTLB_HITS
871 	xhits_pg[PCPU_GET(cpuid)]++;
872 #endif /* COUNT_XINVLTLB_HITS */
873 #ifdef COUNT_IPIS
874 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
875 #endif /* COUNT_IPIS */
876 
877 	invlpg(smp_tlb_addr1);
878 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
879 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
880 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
881 		pcid = pmap_get_pcid(smp_tlb_pmap);
882 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
883 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
884 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
885 	}
886 }
887 
888 static void
invlrng_handler(vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)889 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
890 {
891 	vm_offset_t addr;
892 
893 #ifdef COUNT_XINVLTLB_HITS
894 	xhits_rng[PCPU_GET(cpuid)]++;
895 #endif /* COUNT_XINVLTLB_HITS */
896 #ifdef COUNT_IPIS
897 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
898 #endif /* COUNT_IPIS */
899 
900 	addr = smp_tlb_addr1;
901 	do {
902 		invlpg(addr);
903 		addr += PAGE_SIZE;
904 	} while (addr < smp_tlb_addr2);
905 }
906 
907 static void
invlrng_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)908 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
909     vm_offset_t smp_tlb_addr2)
910 {
911 	struct invpcid_descr d;
912 	vm_offset_t addr;
913 
914 #ifdef COUNT_XINVLTLB_HITS
915 	xhits_rng[PCPU_GET(cpuid)]++;
916 #endif /* COUNT_XINVLTLB_HITS */
917 #ifdef COUNT_IPIS
918 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
919 #endif /* COUNT_IPIS */
920 
921 	addr = smp_tlb_addr1;
922 	if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
923 		struct invpcid_descr d = { 0 };
924 
925 		invpcid(&d, INVPCID_CTXGLOB);
926 	} else {
927 		do {
928 			invlpg(addr);
929 			addr += PAGE_SIZE;
930 		} while (addr < smp_tlb_addr2);
931 	}
932 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
933 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
934 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
935 		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
936 		d.pad = 0;
937 		d.addr = smp_tlb_addr1;
938 		do {
939 			invpcid(&d, INVPCID_ADDR);
940 			d.addr += PAGE_SIZE;
941 		} while (d.addr < smp_tlb_addr2);
942 	}
943 }
944 
945 static void
invlrng_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)946 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
947     vm_offset_t smp_tlb_addr2)
948 {
949 	vm_offset_t addr;
950 	uint64_t kcr3, ucr3;
951 	uint32_t pcid;
952 
953 #ifdef COUNT_XINVLTLB_HITS
954 	xhits_rng[PCPU_GET(cpuid)]++;
955 #endif /* COUNT_XINVLTLB_HITS */
956 #ifdef COUNT_IPIS
957 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
958 #endif /* COUNT_IPIS */
959 
960 	addr = smp_tlb_addr1;
961 	do {
962 		invlpg(addr);
963 		addr += PAGE_SIZE;
964 	} while (addr < smp_tlb_addr2);
965 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
966 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
967 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
968 		pcid = pmap_get_pcid(smp_tlb_pmap);
969 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
970 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
971 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
972 	}
973 }
974 
975 static void
invlcache_handler(void)976 invlcache_handler(void)
977 {
978 #ifdef COUNT_IPIS
979 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
980 #endif /* COUNT_IPIS */
981 	wbinvd();
982 }
983 
984 static void
invlop_handler_one_req(enum invl_op_codes smp_tlb_op,pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)985 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
986     vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
987 {
988 	switch (smp_tlb_op) {
989 	case INVL_OP_TLB:
990 		invltlb_handler(smp_tlb_pmap);
991 		break;
992 	case INVL_OP_TLB_INVPCID:
993 		invltlb_invpcid_handler(smp_tlb_pmap);
994 		break;
995 	case INVL_OP_TLB_INVPCID_PTI:
996 		invltlb_invpcid_pti_handler(smp_tlb_pmap);
997 		break;
998 	case INVL_OP_TLB_PCID:
999 		invltlb_pcid_handler(smp_tlb_pmap);
1000 		break;
1001 	case INVL_OP_PGRNG:
1002 		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1003 		break;
1004 	case INVL_OP_PGRNG_INVPCID:
1005 		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1006 		    smp_tlb_addr2);
1007 		break;
1008 	case INVL_OP_PGRNG_PCID:
1009 		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1010 		    smp_tlb_addr2);
1011 		break;
1012 	case INVL_OP_PG:
1013 		invlpg_handler(smp_tlb_addr1);
1014 		break;
1015 	case INVL_OP_PG_INVPCID:
1016 		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1017 		break;
1018 	case INVL_OP_PG_PCID:
1019 		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1020 		break;
1021 	case INVL_OP_CACHE:
1022 		invlcache_handler();
1023 		break;
1024 	default:
1025 		__assert_unreachable();
1026 		break;
1027 	}
1028 }
1029 
1030 void
invlop_handler(void)1031 invlop_handler(void)
1032 {
1033 	struct pcpu *initiator_pc;
1034 	pmap_t smp_tlb_pmap;
1035 	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1036 	u_int initiator_cpu_id;
1037 	enum invl_op_codes smp_tlb_op;
1038 	uint32_t *scoreboard, smp_tlb_gen;
1039 
1040 	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1041 	for (;;) {
1042 		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1043 		    initiator_cpu_id++) {
1044 			if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1045 				break;
1046 		}
1047 		if (initiator_cpu_id > mp_maxid)
1048 			break;
1049 		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1050 
1051 		/*
1052 		 * This acquire fence and its corresponding release
1053 		 * fence in smp_targeted_tlb_shootdown() is between
1054 		 * reading zero scoreboard slot and accessing PCPU of
1055 		 * initiator for pc_smp_tlb values.
1056 		 */
1057 		atomic_thread_fence_acq();
1058 		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1059 		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1060 		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1061 		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1062 		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1063 
1064 		/*
1065 		 * Ensure that we do not make our scoreboard
1066 		 * notification visible to the initiator until the
1067 		 * pc_smp_tlb values are read.  The corresponding
1068 		 * fence is implicitly provided by the barrier in the
1069 		 * IPI send operation before the APIC ICR register
1070 		 * write.
1071 		 *
1072 		 * As an optimization, the request is acknowledged
1073 		 * before the actual invalidation is performed.  It is
1074 		 * safe because target CPU cannot return to userspace
1075 		 * before handler finishes. Only NMI can preempt the
1076 		 * handler, but NMI would see the kernel handler frame
1077 		 * and not touch not-invalidated user page table.
1078 		 */
1079 		atomic_thread_fence_acq();
1080 		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1081 
1082 		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1083 		    smp_tlb_addr2);
1084 	}
1085 }
1086