xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision 53b70c86)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_acpi.h"
33 #include "opt_cpu.h"
34 #include "opt_ddb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
37 #include "opt_smp.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
44 #ifdef GPROF
45 #include <sys/gmon.h>
46 #endif
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/mutex.h>
54 #include <sys/pcpu.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
67 
68 #include <x86/apicreg.h>
69 #include <machine/clock.h>
70 #include <machine/cputypes.h>
71 #include <machine/cpufunc.h>
72 #include <x86/mca.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/psl.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79 #include <x86/ucode.h>
80 #include <machine/cpu.h>
81 #include <x86/init.h>
82 
83 #ifdef DEV_ACPI
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
86 #endif
87 
88 #define WARMBOOT_TARGET		0
89 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
90 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
91 
92 #define CMOS_REG		(0x70)
93 #define CMOS_DATA		(0x71)
94 #define BIOS_RESET		(0x0f)
95 #define BIOS_WARM		(0x0a)
96 
97 #define GiB(v)			(v ## ULL << 30)
98 
99 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 4)
100 
101 /* Temporary variables for init_secondary()  */
102 static char *doublefault_stack;
103 static char *mce_stack;
104 static char *nmi_stack;
105 static char *dbg_stack;
106 void *bootpcpu;
107 
108 extern u_int mptramp_la57;
109 extern u_int mptramp_nx;
110 
111 /*
112  * Local data and functions.
113  */
114 
115 static int start_ap(int apic_id, vm_paddr_t boot_address);
116 
117 /*
118  * Initialize the IPI handlers and start up the AP's.
119  */
120 void
121 cpu_mp_start(void)
122 {
123 	int i;
124 
125 	/* Initialize the logical ID to APIC ID table. */
126 	for (i = 0; i < MAXCPU; i++) {
127 		cpu_apic_ids[i] = -1;
128 	}
129 
130 	/* Install an inter-CPU IPI for cache and TLB invalidations. */
131 	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
132 	    SDT_SYSIGT, SEL_KPL, 0);
133 
134 	/* Install an inter-CPU IPI for all-CPU rendezvous */
135 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
136 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
137 
138 	/* Install generic inter-CPU IPI handler */
139 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
140 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
141 
142 	/* Install an inter-CPU IPI for CPU stop/restart */
143 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
144 	    SDT_SYSIGT, SEL_KPL, 0);
145 
146 	/* Install an inter-CPU IPI for CPU suspend/resume */
147 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
148 	    SDT_SYSIGT, SEL_KPL, 0);
149 
150 	/* Install an IPI for calling delayed SWI */
151 	setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
152 	    SDT_SYSIGT, SEL_KPL, 0);
153 
154 	/* Set boot_cpu_id if needed. */
155 	if (boot_cpu_id == -1) {
156 		boot_cpu_id = PCPU_GET(apic_id);
157 		cpu_info[boot_cpu_id].cpu_bsp = 1;
158 	} else
159 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
160 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
161 
162 	/* Probe logical/physical core configuration. */
163 	topo_probe();
164 
165 	assign_cpu_ids();
166 
167 	mptramp_la57 = la57;
168 	mptramp_nx = pg_nx != 0;
169 	MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
170 	mptramp_pagetables = kernel_pmap->pm_cr3;
171 
172 	/* Start each Application Processor */
173 	start_all_aps();
174 
175 	set_interrupt_apic_ids();
176 
177 #if defined(DEV_ACPI) && MAXMEMDOM > 1
178 	acpi_pxm_set_cpu_locality();
179 #endif
180 }
181 
182 /*
183  * AP CPU's call this to initialize themselves.
184  */
185 void
186 init_secondary(void)
187 {
188 	struct pcpu *pc;
189 	struct nmi_pcpu *np;
190 	struct user_segment_descriptor *gdt;
191 	struct region_descriptor ap_gdt;
192 	u_int64_t cr0;
193 	int cpu, gsel_tss, x;
194 
195 	/* Set by the startup code for us to use */
196 	cpu = bootAP;
197 
198 	/* Update microcode before doing anything else. */
199 	ucode_load_ap(cpu);
200 
201 	/* Initialize the PCPU area. */
202 	pc = bootpcpu;
203 	pcpu_init(pc, cpu, sizeof(struct pcpu));
204 	dpcpu_init(dpcpu, cpu);
205 	pc->pc_apic_id = cpu_apic_ids[cpu];
206 	pc->pc_prvspace = pc;
207 	pc->pc_curthread = 0;
208 	pc->pc_tssp = &pc->pc_common_tss;
209 	pc->pc_rsp0 = 0;
210 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
211 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
212 	gdt = pc->pc_gdt;
213 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
214 	pc->pc_fs32p = &gdt[GUFS32_SEL];
215 	pc->pc_gs32p = &gdt[GUGS32_SEL];
216 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
217 	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
218 	/* See comment in pmap_bootstrap(). */
219 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
220 	pc->pc_pcid_gen = 1;
221 
222 	pc->pc_smp_tlb_gen = 1;
223 
224 	/* Init tss */
225 	pc->pc_common_tss = __pcpu[0].pc_common_tss;
226 	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
227 	    IOPERM_BITMAP_SIZE;
228 	pc->pc_common_tss.tss_rsp0 = 0;
229 
230 	/* The doublefault stack runs on IST1. */
231 	np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
232 	np->np_pcpu = (register_t)pc;
233 	pc->pc_common_tss.tss_ist1 = (long)np;
234 
235 	/* The NMI stack runs on IST2. */
236 	np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
237 	np->np_pcpu = (register_t)pc;
238 	pc->pc_common_tss.tss_ist2 = (long)np;
239 
240 	/* The MC# stack runs on IST3. */
241 	np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
242 	np->np_pcpu = (register_t)pc;
243 	pc->pc_common_tss.tss_ist3 = (long)np;
244 
245 	/* The DB# stack runs on IST4. */
246 	np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
247 	np->np_pcpu = (register_t)pc;
248 	pc->pc_common_tss.tss_ist4 = (long)np;
249 
250 	/* Prepare private GDT */
251 	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
252 	for (x = 0; x < NGDT; x++) {
253 		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
254 		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
255 			ssdtosd(&gdt_segs[x], &gdt[x]);
256 	}
257 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
258 	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
259 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
260 	ap_gdt.rd_base = (u_long)gdt;
261 	lgdt(&ap_gdt);			/* does magic intra-segment return */
262 
263 	wrmsr(MSR_FSBASE, 0);		/* User value */
264 	wrmsr(MSR_GSBASE, (uint64_t)pc);
265 	wrmsr(MSR_KGSBASE, 0);		/* User value */
266 	fix_cpuid();
267 
268 	lidt(&r_idt);
269 
270 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
271 	ltr(gsel_tss);
272 
273 	/*
274 	 * Set to a known state:
275 	 * Set by mpboot.s: CR0_PG, CR0_PE
276 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
277 	 */
278 	cr0 = rcr0();
279 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
280 	load_cr0(cr0);
281 
282 	amd64_conf_fast_syscall();
283 
284 	/* signal our startup to the BSP. */
285 	mp_naps++;
286 
287 	/* Spin until the BSP releases the AP's. */
288 	while (atomic_load_acq_int(&aps_ready) == 0)
289 		ia32_pause();
290 
291 	init_secondary_tail();
292 }
293 
294 /*******************************************************************
295  * local functions and data
296  */
297 
298 #ifdef NUMA
299 static void
300 mp_realloc_pcpu(int cpuid, int domain)
301 {
302 	vm_page_t m;
303 	vm_offset_t oa, na;
304 
305 	oa = (vm_offset_t)&__pcpu[cpuid];
306 	if (vm_phys_domain(pmap_kextract(oa)) == domain)
307 		return;
308 	m = vm_page_alloc_domain(NULL, 0, domain,
309 	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
310 	if (m == NULL)
311 		return;
312 	na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
313 	pagecopy((void *)oa, (void *)na);
314 	pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
315 	/* XXX old pcpu page leaked. */
316 }
317 #endif
318 
319 /*
320  * start each AP in our list
321  */
322 int
323 start_all_aps(void)
324 {
325 	vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
326 	pml5_entry_t old_pml45;
327 	pml4_entry_t *v_pml4;
328 	pdp_entry_t *v_pdp;
329 	pd_entry_t *v_pd;
330 	vm_paddr_t boot_address;
331 	u_int32_t mpbioswarmvec;
332 	int apic_id, cpu, domain, i;
333 	u_char mpbiosreason;
334 
335 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
336 
337 	MPASS(bootMP_size <= PAGE_SIZE);
338 	m_boottramp = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL |
339 	    VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ, 1, 0,
340 	    (1ULL << 20), /* Trampoline should be below 1M for real mode */
341 	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
342 	boot_address = VM_PAGE_TO_PHYS(m_boottramp);
343 
344 	/* Create a transient 1:1 mapping of low 4G */
345 	if (la57) {
346 		m_pml4 = pmap_page_alloc_below_4g(true);
347 		v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
348 	} else {
349 		v_pml4 = &kernel_pmap->pm_pmltop[0];
350 	}
351 	m_pdp = pmap_page_alloc_below_4g(true);
352 	v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
353 	m_pd[0] = pmap_page_alloc_below_4g(false);
354 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
355 	for (i = 0; i < NPDEPG; i++)
356 		v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
357 		    X86_PG_M | PG_PS;
358 	m_pd[1] = pmap_page_alloc_below_4g(false);
359 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
360 	for (i = 0; i < NPDEPG; i++)
361 		v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
362 		    X86_PG_A | X86_PG_M | PG_PS;
363 	m_pd[2] = pmap_page_alloc_below_4g(false);
364 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
365 	for (i = 0; i < NPDEPG; i++)
366 		v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
367 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
368 	m_pd[3] = pmap_page_alloc_below_4g(false);
369 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
370 	for (i = 0; i < NPDEPG; i++)
371 		v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
372 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
373 	v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
374 	    X86_PG_RW | X86_PG_A | X86_PG_M;
375 	v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
376 	    X86_PG_RW | X86_PG_A | X86_PG_M;
377 	v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
378 	    X86_PG_RW | X86_PG_A | X86_PG_M;
379 	v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
380 	    X86_PG_RW | X86_PG_A | X86_PG_M;
381 	old_pml45 = kernel_pmap->pm_pmltop[0];
382 	if (la57) {
383 		kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
384 		    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
385 	}
386 	v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
387 	    X86_PG_RW | X86_PG_A | X86_PG_M;
388 	pmap_invalidate_all(kernel_pmap);
389 
390 	/* copy the AP 1st level boot code */
391 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
392 	if (bootverbose)
393 		printf("AP boot address %#lx\n", boot_address);
394 
395 	/* save the current value of the warm-start vector */
396 	if (!efi_boot)
397 		mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
398 	outb(CMOS_REG, BIOS_RESET);
399 	mpbiosreason = inb(CMOS_DATA);
400 
401 	/* setup a vector to our boot code */
402 	if (!efi_boot) {
403 		*((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
404 		*((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
405 	}
406 	outb(CMOS_REG, BIOS_RESET);
407 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
408 
409 	/* Relocate pcpu areas to the correct domain. */
410 #ifdef NUMA
411 	if (vm_ndomains > 1)
412 		for (cpu = 1; cpu < mp_ncpus; cpu++) {
413 			apic_id = cpu_apic_ids[cpu];
414 			domain = acpi_pxm_get_cpu_locality(apic_id);
415 			mp_realloc_pcpu(cpu, domain);
416 		}
417 #endif
418 
419 	/* start each AP */
420 	domain = 0;
421 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
422 		apic_id = cpu_apic_ids[cpu];
423 #ifdef NUMA
424 		if (vm_ndomains > 1)
425 			domain = acpi_pxm_get_cpu_locality(apic_id);
426 #endif
427 		/* allocate and set up an idle stack data page */
428 		bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
429 		    M_WAITOK | M_ZERO);
430 		doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE,
431 		    M_WAITOK | M_ZERO);
432 		mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE,
433 		    M_WAITOK | M_ZERO);
434 		nmi_stack = (char *)kmem_malloc_domainset(
435 		    DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
436 		dbg_stack = (char *)kmem_malloc_domainset(
437 		    DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
438 		dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
439 		    DPCPU_SIZE, M_WAITOK | M_ZERO);
440 
441 		bootpcpu = &__pcpu[cpu];
442 		bootSTK = (char *)bootstacks[cpu] +
443 		    kstack_pages * PAGE_SIZE - 8;
444 		bootAP = cpu;
445 
446 		/* attempt to start the Application Processor */
447 		if (!start_ap(apic_id, boot_address)) {
448 			/* restore the warmstart vector */
449 			if (!efi_boot)
450 				*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
451 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
452 		}
453 
454 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
455 	}
456 
457 	/* restore the warmstart vector */
458 	if (!efi_boot)
459 		*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
460 
461 	outb(CMOS_REG, BIOS_RESET);
462 	outb(CMOS_DATA, mpbiosreason);
463 
464 	/* Destroy transient 1:1 mapping */
465 	kernel_pmap->pm_pmltop[0] = old_pml45;
466 	invlpg(0);
467 	if (la57)
468 		vm_page_free(m_pml4);
469 	vm_page_free(m_pd[3]);
470 	vm_page_free(m_pd[2]);
471 	vm_page_free(m_pd[1]);
472 	vm_page_free(m_pd[0]);
473 	vm_page_free(m_pdp);
474 	vm_page_free(m_boottramp);
475 
476 	/* number of APs actually started */
477 	return (mp_naps);
478 }
479 
480 /*
481  * This function starts the AP (application processor) identified
482  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
483  * to accomplish this.  This is necessary because of the nuances
484  * of the different hardware we might encounter.  It isn't pretty,
485  * but it seems to work.
486  */
487 static int
488 start_ap(int apic_id, vm_paddr_t boot_address)
489 {
490 	int vector, ms;
491 	int cpus;
492 
493 	/* calculate the vector */
494 	vector = (boot_address >> 12) & 0xff;
495 
496 	/* used as a watchpoint to signal AP startup */
497 	cpus = mp_naps;
498 
499 	ipi_startup(apic_id, vector);
500 
501 	/* Wait up to 5 seconds for it to start. */
502 	for (ms = 0; ms < 5000; ms++) {
503 		if (mp_naps > cpus)
504 			return 1;	/* return SUCCESS */
505 		DELAY(1000);
506 	}
507 	return 0;		/* return FAILURE */
508 }
509 
510 /*
511  * Flush the TLB on other CPU's
512  */
513 
514 /*
515  * Invalidation request.  PCPU pc_smp_tlb_op uses u_int instead of the
516  * enum to avoid both namespace and ABI issues (with enums).
517  */
518 enum invl_op_codes {
519       INVL_OP_TLB		= 1,
520       INVL_OP_TLB_INVPCID	= 2,
521       INVL_OP_TLB_INVPCID_PTI	= 3,
522       INVL_OP_TLB_PCID		= 4,
523       INVL_OP_PGRNG		= 5,
524       INVL_OP_PGRNG_INVPCID	= 6,
525       INVL_OP_PGRNG_PCID	= 7,
526       INVL_OP_PG		= 8,
527       INVL_OP_PG_INVPCID	= 9,
528       INVL_OP_PG_PCID		= 10,
529       INVL_OP_CACHE		= 11,
530 };
531 
532 /*
533  * These variables are initialized at startup to reflect how each of
534  * the different kinds of invalidations should be performed on the
535  * current machine and environment.
536  */
537 static enum invl_op_codes invl_op_tlb;
538 static enum invl_op_codes invl_op_pgrng;
539 static enum invl_op_codes invl_op_pg;
540 
541 /*
542  * Scoreboard of IPI completion notifications from target to IPI initiator.
543  *
544  * Each CPU can initiate shootdown IPI independently from other CPUs.
545  * Initiator enters critical section, then fills its local PCPU
546  * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
547  * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
548  * sent to all targets which scan for zeroed scoreboard generation
549  * words.  Upon finding such word the shootdown data is read from
550  * corresponding cpu's pcpu, and generation is set.  Meantime initiator
551  * loops waiting for all zeroed generations in scoreboard to update.
552  */
553 static uint32_t *invl_scoreboard;
554 
555 static void
556 invl_scoreboard_init(void *arg __unused)
557 {
558 	u_int i;
559 
560 	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
561 	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
562 	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
563 		invl_scoreboard[i] = 1;
564 
565 	if (pmap_pcid_enabled) {
566 		if (invpcid_works) {
567 			if (pti)
568 				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
569 			else
570 				invl_op_tlb = INVL_OP_TLB_INVPCID;
571 			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
572 			invl_op_pg = INVL_OP_PG_INVPCID;
573 		} else {
574 			invl_op_tlb = INVL_OP_TLB_PCID;
575 			invl_op_pgrng = INVL_OP_PGRNG_PCID;
576 			invl_op_pg = INVL_OP_PG_PCID;
577 		}
578 	} else {
579 		invl_op_tlb = INVL_OP_TLB;
580 		invl_op_pgrng = INVL_OP_PGRNG;
581 		invl_op_pg = INVL_OP_PG;
582 	}
583 }
584 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL);
585 
586 static uint32_t *
587 invl_scoreboard_getcpu(u_int cpu)
588 {
589 	return (invl_scoreboard + cpu * (mp_maxid + 1));
590 }
591 
592 static uint32_t *
593 invl_scoreboard_slot(u_int cpu)
594 {
595 	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
596 }
597 
598 /*
599  * Used by the pmap to request cache or TLB invalidation on local and
600  * remote processors.  Mask provides the set of remote CPUs that are
601  * to be signalled with the invalidation IPI.  As an optimization, the
602  * curcpu_cb callback is invoked on the calling CPU in a critical
603  * section while waiting for the remote CPUs to complete the operation.
604  *
605  * The callback function is called unconditionally on the caller's
606  * underlying processor, even when this processor is not set in the
607  * mask.  So, the callback function must be prepared to handle such
608  * spurious invocations.
609  *
610  * Interrupts must be enabled when calling the function with smp
611  * started, to avoid deadlock with other IPIs that are protected with
612  * smp_ipi_mtx spinlock at the initiator side.
613  *
614  * Function must be called with the thread pinned, and it unpins on
615  * completion.
616  */
617 static void
618 smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
619     vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
620 {
621 	cpuset_t other_cpus, mask1;
622 	uint32_t generation, *p_cpudone;
623 	int cpu;
624 	bool is_all;
625 
626 	/*
627 	 * It is not necessary to signal other CPUs while booting or
628 	 * when in the debugger.
629 	 */
630 	if (kdb_active || KERNEL_PANICKED() || !smp_started)
631 		goto local_cb;
632 
633 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
634 
635 	/*
636 	 * Check for other cpus.  Return if none.
637 	 */
638 	is_all = !CPU_CMP(&mask, &all_cpus);
639 	CPU_CLR(PCPU_GET(cpuid), &mask);
640 	if (CPU_EMPTY(&mask))
641 		goto local_cb;
642 
643 	/*
644 	 * Initiator must have interrupts enabled, which prevents
645 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
646 	 * from deadlocking with us.  On the other hand, preemption
647 	 * must be disabled to pin initiator to the instance of the
648 	 * pcpu pc_smp_tlb data and scoreboard line.
649 	 */
650 	KASSERT((read_rflags() & PSL_I) != 0,
651 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
652 	critical_enter();
653 
654 	PCPU_SET(smp_tlb_addr1, addr1);
655 	PCPU_SET(smp_tlb_addr2, addr2);
656 	PCPU_SET(smp_tlb_pmap, pmap);
657 	generation = PCPU_GET(smp_tlb_gen);
658 	if (++generation == 0)
659 		generation = 1;
660 	PCPU_SET(smp_tlb_gen, generation);
661 	PCPU_SET(smp_tlb_op, op);
662 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
663 	atomic_thread_fence_rel();
664 
665 	mask1 = mask;
666 	while ((cpu = CPU_FFS(&mask1)) != 0) {
667 		cpu--;
668 		CPU_CLR(cpu, &mask1);
669 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
670 		    ("IPI scoreboard is zero, initiator %d target %d",
671 		    PCPU_GET(cpuid), cpu));
672 		*invl_scoreboard_slot(cpu) = 0;
673 	}
674 
675 	/*
676 	 * IPI acts as a fence between writing to the scoreboard above
677 	 * (zeroing slot) and reading from it below (wait for
678 	 * acknowledgment).
679 	 */
680 	if (is_all) {
681 		ipi_all_but_self(IPI_INVLOP);
682 		other_cpus = all_cpus;
683 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
684 	} else {
685 		other_cpus = mask;
686 		ipi_selected(mask, IPI_INVLOP);
687 	}
688 	curcpu_cb(pmap, addr1, addr2);
689 	while ((cpu = CPU_FFS(&other_cpus)) != 0) {
690 		cpu--;
691 		CPU_CLR(cpu, &other_cpus);
692 		p_cpudone = invl_scoreboard_slot(cpu);
693 		while (atomic_load_int(p_cpudone) != generation)
694 			ia32_pause();
695 	}
696 
697 	/*
698 	 * Unpin before leaving critical section.  If the thread owes
699 	 * preemption, this allows scheduler to select thread on any
700 	 * CPU from its cpuset.
701 	 */
702 	sched_unpin();
703 	critical_exit();
704 
705 	return;
706 
707 local_cb:
708 	critical_enter();
709 	curcpu_cb(pmap, addr1, addr2);
710 	sched_unpin();
711 	critical_exit();
712 }
713 
714 void
715 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
716 {
717 	smp_targeted_tlb_shootdown(mask, pmap, 0, 0, curcpu_cb, invl_op_tlb);
718 #ifdef COUNT_XINVLTLB_HITS
719 	ipi_global++;
720 #endif
721 }
722 
723 void
724 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
725     smp_invl_cb_t curcpu_cb)
726 {
727 	smp_targeted_tlb_shootdown(mask, pmap, addr, 0, curcpu_cb, invl_op_pg);
728 #ifdef COUNT_XINVLTLB_HITS
729 	ipi_page++;
730 #endif
731 }
732 
733 void
734 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
735     pmap_t pmap, smp_invl_cb_t curcpu_cb)
736 {
737 	smp_targeted_tlb_shootdown(mask, pmap, addr1, addr2, curcpu_cb,
738 	    invl_op_pgrng);
739 #ifdef COUNT_XINVLTLB_HITS
740 	ipi_range++;
741 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
742 #endif
743 }
744 
745 void
746 smp_cache_flush(smp_invl_cb_t curcpu_cb)
747 {
748 	smp_targeted_tlb_shootdown(all_cpus, NULL, 0, 0, curcpu_cb,
749 	    INVL_OP_CACHE);
750 }
751 
752 /*
753  * Handlers for TLB related IPIs
754  */
755 static void
756 invltlb_handler(pmap_t smp_tlb_pmap)
757 {
758 #ifdef COUNT_XINVLTLB_HITS
759 	xhits_gbl[PCPU_GET(cpuid)]++;
760 #endif /* COUNT_XINVLTLB_HITS */
761 #ifdef COUNT_IPIS
762 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
763 #endif /* COUNT_IPIS */
764 
765 	if (smp_tlb_pmap == kernel_pmap)
766 		invltlb_glob();
767 	else
768 		invltlb();
769 }
770 
771 static void
772 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
773 {
774 	struct invpcid_descr d;
775 
776 #ifdef COUNT_XINVLTLB_HITS
777 	xhits_gbl[PCPU_GET(cpuid)]++;
778 #endif /* COUNT_XINVLTLB_HITS */
779 #ifdef COUNT_IPIS
780 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
781 #endif /* COUNT_IPIS */
782 
783 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
784 	d.pad = 0;
785 	d.addr = 0;
786 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
787 	    INVPCID_CTX);
788 }
789 
790 static void
791 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
792 {
793 	struct invpcid_descr d;
794 
795 #ifdef COUNT_XINVLTLB_HITS
796 	xhits_gbl[PCPU_GET(cpuid)]++;
797 #endif /* COUNT_XINVLTLB_HITS */
798 #ifdef COUNT_IPIS
799 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
800 #endif /* COUNT_IPIS */
801 
802 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
803 	d.pad = 0;
804 	d.addr = 0;
805 	if (smp_tlb_pmap == kernel_pmap) {
806 		/*
807 		 * This invalidation actually needs to clear kernel
808 		 * mappings from the TLB in the current pmap, but
809 		 * since we were asked for the flush in the kernel
810 		 * pmap, achieve it by performing global flush.
811 		 */
812 		invpcid(&d, INVPCID_CTXGLOB);
813 	} else {
814 		invpcid(&d, INVPCID_CTX);
815 		if (smp_tlb_pmap == PCPU_GET(curpmap) &&
816 		    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
817 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
818 	}
819 }
820 
821 static void
822 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
823 {
824 	uint32_t pcid;
825 
826 #ifdef COUNT_XINVLTLB_HITS
827 	xhits_gbl[PCPU_GET(cpuid)]++;
828 #endif /* COUNT_XINVLTLB_HITS */
829 #ifdef COUNT_IPIS
830 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
831 #endif /* COUNT_IPIS */
832 
833 	if (smp_tlb_pmap == kernel_pmap) {
834 		invltlb_glob();
835 	} else {
836 		/*
837 		 * The current pmap might not be equal to
838 		 * smp_tlb_pmap.  The clearing of the pm_gen in
839 		 * pmap_invalidate_all() takes care of TLB
840 		 * invalidation when switching to the pmap on this
841 		 * CPU.
842 		 */
843 		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
844 			pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
845 			load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
846 			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
847 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
848 		}
849 	}
850 }
851 
852 static void
853 invlpg_handler(vm_offset_t smp_tlb_addr1)
854 {
855 #ifdef COUNT_XINVLTLB_HITS
856 	xhits_pg[PCPU_GET(cpuid)]++;
857 #endif /* COUNT_XINVLTLB_HITS */
858 #ifdef COUNT_IPIS
859 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
860 #endif /* COUNT_IPIS */
861 
862 	invlpg(smp_tlb_addr1);
863 }
864 
865 static void
866 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
867 {
868 	struct invpcid_descr d;
869 
870 #ifdef COUNT_XINVLTLB_HITS
871 	xhits_pg[PCPU_GET(cpuid)]++;
872 #endif /* COUNT_XINVLTLB_HITS */
873 #ifdef COUNT_IPIS
874 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
875 #endif /* COUNT_IPIS */
876 
877 	invlpg(smp_tlb_addr1);
878 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
879 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
880 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
881 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
882 		    PMAP_PCID_USER_PT;
883 		d.pad = 0;
884 		d.addr = smp_tlb_addr1;
885 		invpcid(&d, INVPCID_ADDR);
886 	}
887 }
888 
889 static void
890 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
891 {
892 	uint64_t kcr3, ucr3;
893 	uint32_t pcid;
894 
895 #ifdef COUNT_XINVLTLB_HITS
896 	xhits_pg[PCPU_GET(cpuid)]++;
897 #endif /* COUNT_XINVLTLB_HITS */
898 #ifdef COUNT_IPIS
899 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
900 #endif /* COUNT_IPIS */
901 
902 	invlpg(smp_tlb_addr1);
903 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
904 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
905 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
906 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
907 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
908 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
909 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
910 	}
911 }
912 
913 static void
914 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
915 {
916 	vm_offset_t addr, addr2;
917 
918 #ifdef COUNT_XINVLTLB_HITS
919 	xhits_rng[PCPU_GET(cpuid)]++;
920 #endif /* COUNT_XINVLTLB_HITS */
921 #ifdef COUNT_IPIS
922 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
923 #endif /* COUNT_IPIS */
924 
925 	addr = smp_tlb_addr1;
926 	addr2 = smp_tlb_addr2;
927 	do {
928 		invlpg(addr);
929 		addr += PAGE_SIZE;
930 	} while (addr < addr2);
931 }
932 
933 static void
934 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
935     vm_offset_t smp_tlb_addr2)
936 {
937 	struct invpcid_descr d;
938 	vm_offset_t addr, addr2;
939 
940 #ifdef COUNT_XINVLTLB_HITS
941 	xhits_rng[PCPU_GET(cpuid)]++;
942 #endif /* COUNT_XINVLTLB_HITS */
943 #ifdef COUNT_IPIS
944 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
945 #endif /* COUNT_IPIS */
946 
947 	addr = smp_tlb_addr1;
948 	addr2 = smp_tlb_addr2;
949 	do {
950 		invlpg(addr);
951 		addr += PAGE_SIZE;
952 	} while (addr < addr2);
953 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
954 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
955 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
956 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
957 		    PMAP_PCID_USER_PT;
958 		d.pad = 0;
959 		d.addr = smp_tlb_addr1;
960 		do {
961 			invpcid(&d, INVPCID_ADDR);
962 			d.addr += PAGE_SIZE;
963 		} while (d.addr < addr2);
964 	}
965 }
966 
967 static void
968 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
969     vm_offset_t smp_tlb_addr2)
970 {
971 	vm_offset_t addr, addr2;
972 	uint64_t kcr3, ucr3;
973 	uint32_t pcid;
974 
975 #ifdef COUNT_XINVLTLB_HITS
976 	xhits_rng[PCPU_GET(cpuid)]++;
977 #endif /* COUNT_XINVLTLB_HITS */
978 #ifdef COUNT_IPIS
979 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
980 #endif /* COUNT_IPIS */
981 
982 	addr = smp_tlb_addr1;
983 	addr2 = smp_tlb_addr2;
984 	do {
985 		invlpg(addr);
986 		addr += PAGE_SIZE;
987 	} while (addr < addr2);
988 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
989 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
990 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
991 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
992 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
993 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
994 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
995 	}
996 }
997 
998 static void
999 invlcache_handler(void)
1000 {
1001 #ifdef COUNT_IPIS
1002 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1003 #endif /* COUNT_IPIS */
1004 	wbinvd();
1005 }
1006 
1007 static void
1008 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1009     vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1010 {
1011 	switch (smp_tlb_op) {
1012 	case INVL_OP_TLB:
1013 		invltlb_handler(smp_tlb_pmap);
1014 		break;
1015 	case INVL_OP_TLB_INVPCID:
1016 		invltlb_invpcid_handler(smp_tlb_pmap);
1017 		break;
1018 	case INVL_OP_TLB_INVPCID_PTI:
1019 		invltlb_invpcid_pti_handler(smp_tlb_pmap);
1020 		break;
1021 	case INVL_OP_TLB_PCID:
1022 		invltlb_pcid_handler(smp_tlb_pmap);
1023 		break;
1024 	case INVL_OP_PGRNG:
1025 		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1026 		break;
1027 	case INVL_OP_PGRNG_INVPCID:
1028 		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1029 		    smp_tlb_addr2);
1030 		break;
1031 	case INVL_OP_PGRNG_PCID:
1032 		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1033 		    smp_tlb_addr2);
1034 		break;
1035 	case INVL_OP_PG:
1036 		invlpg_handler(smp_tlb_addr1);
1037 		break;
1038 	case INVL_OP_PG_INVPCID:
1039 		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1040 		break;
1041 	case INVL_OP_PG_PCID:
1042 		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1043 		break;
1044 	case INVL_OP_CACHE:
1045 		invlcache_handler();
1046 		break;
1047 	default:
1048 		__assert_unreachable();
1049 		break;
1050 	}
1051 }
1052 
1053 void
1054 invlop_handler(void)
1055 {
1056 	struct pcpu *initiator_pc;
1057 	pmap_t smp_tlb_pmap;
1058 	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1059 	u_int initiator_cpu_id;
1060 	enum invl_op_codes smp_tlb_op;
1061 	uint32_t *scoreboard, smp_tlb_gen;
1062 
1063 	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1064 	for (;;) {
1065 		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1066 		    initiator_cpu_id++) {
1067 			if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1068 				break;
1069 		}
1070 		if (initiator_cpu_id > mp_maxid)
1071 			break;
1072 		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1073 
1074 		/*
1075 		 * This acquire fence and its corresponding release
1076 		 * fence in smp_targeted_tlb_shootdown() is between
1077 		 * reading zero scoreboard slot and accessing PCPU of
1078 		 * initiator for pc_smp_tlb values.
1079 		 */
1080 		atomic_thread_fence_acq();
1081 		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1082 		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1083 		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1084 		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1085 		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1086 
1087 		/*
1088 		 * Ensure that we do not make our scoreboard
1089 		 * notification visible to the initiator until the
1090 		 * pc_smp_tlb values are read.  The corresponding
1091 		 * fence is implicitly provided by the barrier in the
1092 		 * IPI send operation before the APIC ICR register
1093 		 * write.
1094 		 *
1095 		 * As an optimization, the request is acknowledged
1096 		 * before the actual invalidation is performed.  It is
1097 		 * safe because target CPU cannot return to userspace
1098 		 * before handler finishes. Only NMI can preempt the
1099 		 * handler, but NMI would see the kernel handler frame
1100 		 * and not touch not-invalidated user page table.
1101 		 */
1102 		atomic_thread_fence_acq();
1103 		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1104 
1105 		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1106 		    smp_tlb_addr2);
1107 	}
1108 }
1109