xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision 85732ac8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_cpu.h"
33 #include "opt_ddb.h"
34 #include "opt_kstack_pages.h"
35 #include "opt_sched.h"
36 #include "opt_smp.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpuset.h>
42 #ifdef GPROF
43 #include <sys/gmon.h>
44 #endif
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/memrange.h>
50 #include <sys/mutex.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_extern.h>
62 
63 #include <x86/apicreg.h>
64 #include <machine/clock.h>
65 #include <machine/cputypes.h>
66 #include <machine/cpufunc.h>
67 #include <x86/mca.h>
68 #include <machine/md_var.h>
69 #include <machine/pcb.h>
70 #include <machine/psl.h>
71 #include <machine/smp.h>
72 #include <machine/specialreg.h>
73 #include <machine/tss.h>
74 #include <x86/ucode.h>
75 #include <machine/cpu.h>
76 #include <x86/init.h>
77 
78 #define WARMBOOT_TARGET		0
79 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
80 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
81 
82 #define CMOS_REG		(0x70)
83 #define CMOS_DATA		(0x71)
84 #define BIOS_RESET		(0x0f)
85 #define BIOS_WARM		(0x0a)
86 
87 #define GiB(v)			(v ## ULL << 30)
88 
89 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 3)
90 
91 extern	struct pcpu __pcpu[];
92 
93 /* Temporary variables for init_secondary()  */
94 char *doublefault_stack;
95 char *mce_stack;
96 char *nmi_stack;
97 char *dbg_stack;
98 
99 /*
100  * Local data and functions.
101  */
102 
103 static int	start_ap(int apic_id);
104 
105 static bool
106 is_kernel_paddr(vm_paddr_t pa)
107 {
108 
109 	return (pa >= trunc_2mpage(btext - KERNBASE) &&
110 	   pa < round_page(_end - KERNBASE));
111 }
112 
113 static bool
114 is_mpboot_good(vm_paddr_t start, vm_paddr_t end)
115 {
116 
117 	return (start + AP_BOOTPT_SZ <= GiB(4) && atop(end) < Maxmem);
118 }
119 
120 /*
121  * Calculate usable address in base memory for AP trampoline code.
122  */
123 void
124 mp_bootaddress(vm_paddr_t *physmap, unsigned int *physmap_idx)
125 {
126 	vm_paddr_t start, end;
127 	unsigned int i;
128 	bool allocated;
129 
130 	alloc_ap_trampoline(physmap, physmap_idx);
131 
132 	/*
133 	 * Find a memory region big enough below the 4GB boundary to
134 	 * store the initial page tables.  Region must be mapped by
135 	 * the direct map.
136 	 *
137 	 * Note that it needs to be aligned to a page boundary.
138 	 */
139 	allocated = false;
140 	for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
141 		/*
142 		 * First, try to chomp at the start of the physmap region.
143 		 * Kernel binary might claim it already.
144 		 */
145 		start = round_page(physmap[i]);
146 		end = start + AP_BOOTPT_SZ;
147 		if (start < end && end <= physmap[i + 1] &&
148 		    is_mpboot_good(start, end) &&
149 		    !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
150 			allocated = true;
151 			physmap[i] = end;
152 			break;
153 		}
154 
155 		/*
156 		 * Second, try to chomp at the end.  Again, check
157 		 * against kernel.
158 		 */
159 		end = trunc_page(physmap[i + 1]);
160 		start = end - AP_BOOTPT_SZ;
161 		if (start < end && start >= physmap[i] &&
162 		    is_mpboot_good(start, end) &&
163 		    !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
164 			allocated = true;
165 			physmap[i + 1] = start;
166 			break;
167 		}
168 	}
169 	if (allocated) {
170 		mptramp_pagetables = start;
171 		if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
172 			memmove(&physmap[i], &physmap[i + 2],
173 			    sizeof(*physmap) * (*physmap_idx - i + 2));
174 			*physmap_idx -= 2;
175 		}
176 	} else {
177 		mptramp_pagetables = trunc_page(boot_address) - AP_BOOTPT_SZ;
178 		if (bootverbose)
179 			printf(
180 "Cannot find enough space for the initial AP page tables, placing them at %#x",
181 			    mptramp_pagetables);
182 	}
183 }
184 
185 /*
186  * Initialize the IPI handlers and start up the AP's.
187  */
188 void
189 cpu_mp_start(void)
190 {
191 	int i;
192 
193 	/* Initialize the logical ID to APIC ID table. */
194 	for (i = 0; i < MAXCPU; i++) {
195 		cpu_apic_ids[i] = -1;
196 		cpu_ipi_pending[i] = 0;
197 	}
198 
199 	/* Install an inter-CPU IPI for TLB invalidation */
200 	if (pmap_pcid_enabled) {
201 		if (invpcid_works) {
202 			setidt(IPI_INVLTLB, pti ?
203 			    IDTVEC(invltlb_invpcid_pti_pti) :
204 			    IDTVEC(invltlb_invpcid_nopti), SDT_SYSIGT,
205 			    SEL_KPL, 0);
206 			setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_invpcid_pti) :
207 			    IDTVEC(invlpg_invpcid), SDT_SYSIGT, SEL_KPL, 0);
208 			setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_invpcid_pti) :
209 			    IDTVEC(invlrng_invpcid), SDT_SYSIGT, SEL_KPL, 0);
210 		} else {
211 			setidt(IPI_INVLTLB, pti ? IDTVEC(invltlb_pcid_pti) :
212 			    IDTVEC(invltlb_pcid), SDT_SYSIGT, SEL_KPL, 0);
213 			setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_pcid_pti) :
214 			    IDTVEC(invlpg_pcid), SDT_SYSIGT, SEL_KPL, 0);
215 			setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_pcid_pti) :
216 			    IDTVEC(invlrng_pcid), SDT_SYSIGT, SEL_KPL, 0);
217 		}
218 	} else {
219 		setidt(IPI_INVLTLB, pti ? IDTVEC(invltlb_pti) : IDTVEC(invltlb),
220 		    SDT_SYSIGT, SEL_KPL, 0);
221 		setidt(IPI_INVLPG, pti ? IDTVEC(invlpg_pti) : IDTVEC(invlpg),
222 		    SDT_SYSIGT, SEL_KPL, 0);
223 		setidt(IPI_INVLRNG, pti ? IDTVEC(invlrng_pti) : IDTVEC(invlrng),
224 		    SDT_SYSIGT, SEL_KPL, 0);
225 	}
226 
227 	/* Install an inter-CPU IPI for cache invalidation. */
228 	setidt(IPI_INVLCACHE, pti ? IDTVEC(invlcache_pti) : IDTVEC(invlcache),
229 	    SDT_SYSIGT, SEL_KPL, 0);
230 
231 	/* Install an inter-CPU IPI for all-CPU rendezvous */
232 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
233 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
234 
235 	/* Install generic inter-CPU IPI handler */
236 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
237 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
238 
239 	/* Install an inter-CPU IPI for CPU stop/restart */
240 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
241 	    SDT_SYSIGT, SEL_KPL, 0);
242 
243 	/* Install an inter-CPU IPI for CPU suspend/resume */
244 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
245 	    SDT_SYSIGT, SEL_KPL, 0);
246 
247 	/* Set boot_cpu_id if needed. */
248 	if (boot_cpu_id == -1) {
249 		boot_cpu_id = PCPU_GET(apic_id);
250 		cpu_info[boot_cpu_id].cpu_bsp = 1;
251 	} else
252 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
253 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
254 
255 	/* Probe logical/physical core configuration. */
256 	topo_probe();
257 
258 	assign_cpu_ids();
259 
260 	/* Start each Application Processor */
261 	init_ops.start_all_aps();
262 
263 	set_interrupt_apic_ids();
264 }
265 
266 
267 /*
268  * AP CPU's call this to initialize themselves.
269  */
270 void
271 init_secondary(void)
272 {
273 	struct pcpu *pc;
274 	struct nmi_pcpu *np;
275 	u_int64_t cr0;
276 	int cpu, gsel_tss, x;
277 	struct region_descriptor ap_gdt;
278 
279 	/* Set by the startup code for us to use */
280 	cpu = bootAP;
281 
282 	/* Update microcode before doing anything else. */
283 	ucode_load_ap(cpu);
284 
285 	/* Init tss */
286 	common_tss[cpu] = common_tss[0];
287 	common_tss[cpu].tss_iobase = sizeof(struct amd64tss) +
288 	    IOPERM_BITMAP_SIZE;
289 	common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
290 
291 	/* The NMI stack runs on IST2. */
292 	np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
293 	common_tss[cpu].tss_ist2 = (long) np;
294 
295 	/* The MC# stack runs on IST3. */
296 	np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
297 	common_tss[cpu].tss_ist3 = (long) np;
298 
299 	/* The DB# stack runs on IST4. */
300 	np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
301 	common_tss[cpu].tss_ist4 = (long) np;
302 
303 	/* Prepare private GDT */
304 	gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
305 	for (x = 0; x < NGDT; x++) {
306 		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
307 		    x != GUSERLDT_SEL && x != (GUSERLDT_SEL + 1))
308 			ssdtosd(&gdt_segs[x], &gdt[NGDT * cpu + x]);
309 	}
310 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
311 	    (struct system_segment_descriptor *)&gdt[NGDT * cpu + GPROC0_SEL]);
312 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
313 	ap_gdt.rd_base =  (long) &gdt[NGDT * cpu];
314 	lgdt(&ap_gdt);			/* does magic intra-segment return */
315 
316 	/* Get per-cpu data */
317 	pc = &__pcpu[cpu];
318 
319 	/* prime data page for it to use */
320 	pcpu_init(pc, cpu, sizeof(struct pcpu));
321 	dpcpu_init(dpcpu, cpu);
322 	pc->pc_apic_id = cpu_apic_ids[cpu];
323 	pc->pc_prvspace = pc;
324 	pc->pc_curthread = 0;
325 	pc->pc_tssp = &common_tss[cpu];
326 	pc->pc_commontssp = &common_tss[cpu];
327 	pc->pc_rsp0 = 0;
328 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
329 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
330 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
331 	    GPROC0_SEL];
332 	pc->pc_fs32p = &gdt[NGDT * cpu + GUFS32_SEL];
333 	pc->pc_gs32p = &gdt[NGDT * cpu + GUGS32_SEL];
334 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[NGDT * cpu +
335 	    GUSERLDT_SEL];
336 	/* See comment in pmap_bootstrap(). */
337 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
338 	pc->pc_pcid_gen = 1;
339 	common_tss[cpu].tss_rsp0 = 0;
340 
341 	/* Save the per-cpu pointer for use by the NMI handler. */
342 	np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
343 	np->np_pcpu = (register_t) pc;
344 
345 	/* Save the per-cpu pointer for use by the MC# handler. */
346 	np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
347 	np->np_pcpu = (register_t) pc;
348 
349 	/* Save the per-cpu pointer for use by the DB# handler. */
350 	np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
351 	np->np_pcpu = (register_t) pc;
352 
353 	wrmsr(MSR_FSBASE, 0);		/* User value */
354 	wrmsr(MSR_GSBASE, (u_int64_t)pc);
355 	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
356 	fix_cpuid();
357 
358 	lidt(&r_idt);
359 
360 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
361 	ltr(gsel_tss);
362 
363 	/*
364 	 * Set to a known state:
365 	 * Set by mpboot.s: CR0_PG, CR0_PE
366 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
367 	 */
368 	cr0 = rcr0();
369 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
370 	load_cr0(cr0);
371 
372 	amd64_conf_fast_syscall();
373 
374 	/* signal our startup to the BSP. */
375 	mp_naps++;
376 
377 	/* Spin until the BSP releases the AP's. */
378 	while (atomic_load_acq_int(&aps_ready) == 0)
379 		ia32_pause();
380 
381 	init_secondary_tail();
382 }
383 
384 /*******************************************************************
385  * local functions and data
386  */
387 
388 /*
389  * start each AP in our list
390  */
391 int
392 native_start_all_aps(void)
393 {
394 	u_int64_t *pt4, *pt3, *pt2;
395 	u_int32_t mpbioswarmvec;
396 	int apic_id, cpu, i;
397 	u_char mpbiosreason;
398 
399 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
400 
401 	/* copy the AP 1st level boot code */
402 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
403 
404 	/* Locate the page tables, they'll be below the trampoline */
405 	pt4 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables);
406 	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
407 	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
408 
409 	/* Create the initial 1GB replicated page tables */
410 	for (i = 0; i < 512; i++) {
411 		/* Each slot of the level 4 pages points to the same level 3 page */
412 		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
413 		pt4[i] |= PG_V | PG_RW | PG_U;
414 
415 		/* Each slot of the level 3 pages points to the same level 2 page */
416 		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
417 		pt3[i] |= PG_V | PG_RW | PG_U;
418 
419 		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
420 		pt2[i] = i * (2 * 1024 * 1024);
421 		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
422 	}
423 
424 	/* save the current value of the warm-start vector */
425 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
426 	outb(CMOS_REG, BIOS_RESET);
427 	mpbiosreason = inb(CMOS_DATA);
428 
429 	/* setup a vector to our boot code */
430 	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
431 	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
432 	outb(CMOS_REG, BIOS_RESET);
433 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
434 
435 	/* start each AP */
436 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
437 		apic_id = cpu_apic_ids[cpu];
438 
439 		/* allocate and set up an idle stack data page */
440 		bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
441 		    M_WAITOK | M_ZERO);
442 		doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK |
443 		    M_ZERO);
444 		mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
445 		nmi_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
446 		dbg_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
447 		dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
448 
449 		bootSTK = (char *)bootstacks[cpu] + kstack_pages * PAGE_SIZE - 8;
450 		bootAP = cpu;
451 
452 		/* attempt to start the Application Processor */
453 		if (!start_ap(apic_id)) {
454 			/* restore the warmstart vector */
455 			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
456 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
457 		}
458 
459 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
460 	}
461 
462 	/* restore the warmstart vector */
463 	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
464 
465 	outb(CMOS_REG, BIOS_RESET);
466 	outb(CMOS_DATA, mpbiosreason);
467 
468 	/* number of APs actually started */
469 	return mp_naps;
470 }
471 
472 
473 /*
474  * This function starts the AP (application processor) identified
475  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
476  * to accomplish this.  This is necessary because of the nuances
477  * of the different hardware we might encounter.  It isn't pretty,
478  * but it seems to work.
479  */
480 static int
481 start_ap(int apic_id)
482 {
483 	int vector, ms;
484 	int cpus;
485 
486 	/* calculate the vector */
487 	vector = (boot_address >> 12) & 0xff;
488 
489 	/* used as a watchpoint to signal AP startup */
490 	cpus = mp_naps;
491 
492 	ipi_startup(apic_id, vector);
493 
494 	/* Wait up to 5 seconds for it to start. */
495 	for (ms = 0; ms < 5000; ms++) {
496 		if (mp_naps > cpus)
497 			return 1;	/* return SUCCESS */
498 		DELAY(1000);
499 	}
500 	return 0;		/* return FAILURE */
501 }
502 
503 void
504 invltlb_invpcid_handler(void)
505 {
506 	struct invpcid_descr d;
507 	uint32_t generation;
508 
509 #ifdef COUNT_XINVLTLB_HITS
510 	xhits_gbl[PCPU_GET(cpuid)]++;
511 #endif /* COUNT_XINVLTLB_HITS */
512 #ifdef COUNT_IPIS
513 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
514 #endif /* COUNT_IPIS */
515 
516 	generation = smp_tlb_generation;
517 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
518 	d.pad = 0;
519 	d.addr = 0;
520 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
521 	    INVPCID_CTX);
522 	PCPU_SET(smp_tlb_done, generation);
523 }
524 
525 void
526 invltlb_invpcid_pti_handler(void)
527 {
528 	struct invpcid_descr d;
529 	uint32_t generation;
530 
531 #ifdef COUNT_XINVLTLB_HITS
532 	xhits_gbl[PCPU_GET(cpuid)]++;
533 #endif /* COUNT_XINVLTLB_HITS */
534 #ifdef COUNT_IPIS
535 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
536 #endif /* COUNT_IPIS */
537 
538 	generation = smp_tlb_generation;
539 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
540 	d.pad = 0;
541 	d.addr = 0;
542 	if (smp_tlb_pmap == kernel_pmap) {
543 		/*
544 		 * This invalidation actually needs to clear kernel
545 		 * mappings from the TLB in the current pmap, but
546 		 * since we were asked for the flush in the kernel
547 		 * pmap, achieve it by performing global flush.
548 		 */
549 		invpcid(&d, INVPCID_CTXGLOB);
550 	} else {
551 		invpcid(&d, INVPCID_CTX);
552 		d.pcid |= PMAP_PCID_USER_PT;
553 		invpcid(&d, INVPCID_CTX);
554 	}
555 	PCPU_SET(smp_tlb_done, generation);
556 }
557 
558 void
559 invltlb_pcid_handler(void)
560 {
561 	uint64_t kcr3, ucr3;
562 	uint32_t generation, pcid;
563 
564 #ifdef COUNT_XINVLTLB_HITS
565 	xhits_gbl[PCPU_GET(cpuid)]++;
566 #endif /* COUNT_XINVLTLB_HITS */
567 #ifdef COUNT_IPIS
568 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
569 #endif /* COUNT_IPIS */
570 
571 	generation = smp_tlb_generation;	/* Overlap with serialization */
572 	if (smp_tlb_pmap == kernel_pmap) {
573 		invltlb_glob();
574 	} else {
575 		/*
576 		 * The current pmap might not be equal to
577 		 * smp_tlb_pmap.  The clearing of the pm_gen in
578 		 * pmap_invalidate_all() takes care of TLB
579 		 * invalidation when switching to the pmap on this
580 		 * CPU.
581 		 */
582 		if (PCPU_GET(curpmap) == smp_tlb_pmap) {
583 			pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
584 			kcr3 = smp_tlb_pmap->pm_cr3 | pcid;
585 			ucr3 = smp_tlb_pmap->pm_ucr3;
586 			if (ucr3 != PMAP_NO_CR3) {
587 				ucr3 |= PMAP_PCID_USER_PT | pcid;
588 				pmap_pti_pcid_invalidate(ucr3, kcr3);
589 			} else
590 				load_cr3(kcr3);
591 		}
592 	}
593 	PCPU_SET(smp_tlb_done, generation);
594 }
595 
596 void
597 invlpg_invpcid_handler(void)
598 {
599 	struct invpcid_descr d;
600 	uint32_t generation;
601 
602 #ifdef COUNT_XINVLTLB_HITS
603 	xhits_pg[PCPU_GET(cpuid)]++;
604 #endif /* COUNT_XINVLTLB_HITS */
605 #ifdef COUNT_IPIS
606 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
607 #endif /* COUNT_IPIS */
608 
609 	generation = smp_tlb_generation;	/* Overlap with serialization */
610 	invlpg(smp_tlb_addr1);
611 	if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) {
612 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
613 		    PMAP_PCID_USER_PT;
614 		d.pad = 0;
615 		d.addr = smp_tlb_addr1;
616 		invpcid(&d, INVPCID_ADDR);
617 	}
618 	PCPU_SET(smp_tlb_done, generation);
619 }
620 
621 void
622 invlpg_pcid_handler(void)
623 {
624 	uint64_t kcr3, ucr3;
625 	uint32_t generation;
626 	uint32_t pcid;
627 
628 #ifdef COUNT_XINVLTLB_HITS
629 	xhits_pg[PCPU_GET(cpuid)]++;
630 #endif /* COUNT_XINVLTLB_HITS */
631 #ifdef COUNT_IPIS
632 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
633 #endif /* COUNT_IPIS */
634 
635 	generation = smp_tlb_generation;	/* Overlap with serialization */
636 	invlpg(smp_tlb_addr1);
637 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
638 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3) {
639 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
640 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
641 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
642 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
643 	}
644 	PCPU_SET(smp_tlb_done, generation);
645 }
646 
647 void
648 invlrng_invpcid_handler(void)
649 {
650 	struct invpcid_descr d;
651 	vm_offset_t addr, addr2;
652 	uint32_t generation;
653 
654 #ifdef COUNT_XINVLTLB_HITS
655 	xhits_rng[PCPU_GET(cpuid)]++;
656 #endif /* COUNT_XINVLTLB_HITS */
657 #ifdef COUNT_IPIS
658 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
659 #endif /* COUNT_IPIS */
660 
661 	addr = smp_tlb_addr1;
662 	addr2 = smp_tlb_addr2;
663 	generation = smp_tlb_generation;	/* Overlap with serialization */
664 	do {
665 		invlpg(addr);
666 		addr += PAGE_SIZE;
667 	} while (addr < addr2);
668 	if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3) {
669 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
670 		    PMAP_PCID_USER_PT;
671 		d.pad = 0;
672 		d.addr = smp_tlb_addr1;
673 		do {
674 			invpcid(&d, INVPCID_ADDR);
675 			d.addr += PAGE_SIZE;
676 		} while (d.addr < addr2);
677 	}
678 	PCPU_SET(smp_tlb_done, generation);
679 }
680 
681 void
682 invlrng_pcid_handler(void)
683 {
684 	vm_offset_t addr, addr2;
685 	uint64_t kcr3, ucr3;
686 	uint32_t generation;
687 	uint32_t pcid;
688 
689 #ifdef COUNT_XINVLTLB_HITS
690 	xhits_rng[PCPU_GET(cpuid)]++;
691 #endif /* COUNT_XINVLTLB_HITS */
692 #ifdef COUNT_IPIS
693 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
694 #endif /* COUNT_IPIS */
695 
696 	addr = smp_tlb_addr1;
697 	addr2 = smp_tlb_addr2;
698 	generation = smp_tlb_generation;	/* Overlap with serialization */
699 	do {
700 		invlpg(addr);
701 		addr += PAGE_SIZE;
702 	} while (addr < addr2);
703 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
704 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3) {
705 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
706 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
707 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
708 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
709 	}
710 	PCPU_SET(smp_tlb_done, generation);
711 }
712