xref: /freebsd/sys/i386/i386/mp_machdep.c (revision 315ee00f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. The name of the developer may NOT be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include "opt_apic.h"
31 #include "opt_cpu.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_pmap.h"
34 #include "opt_sched.h"
35 #include "opt_smp.h"
36 
37 #if !defined(lint)
38 #if !defined(SMP)
39 #error How did you get here?
40 #endif
41 
42 #ifndef DEV_APIC
43 #error The apic device is required for SMP, add "device apic" to your config file.
44 #endif
45 #endif /* not lint */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bus.h>
50 #include <sys/cons.h>	/* cngetc() */
51 #include <sys/cpuset.h>
52 #include <sys/kdb.h>
53 #include <sys/kernel.h>
54 #include <sys/ktr.h>
55 #include <sys/lock.h>
56 #include <sys/malloc.h>
57 #include <sys/memrange.h>
58 #include <sys/mutex.h>
59 #include <sys/pcpu.h>
60 #include <sys/proc.h>
61 #include <sys/sched.h>
62 #include <sys/smp.h>
63 #include <sys/sysctl.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_extern.h>
70 
71 #include <x86/apicreg.h>
72 #include <machine/clock.h>
73 #include <machine/cpu.h>
74 #include <machine/cputypes.h>
75 #include <x86/mca.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/psl.h>
79 #include <machine/smp.h>
80 #include <machine/specialreg.h>
81 #include <x86/ucode.h>
82 
83 #ifdef DEV_ACPI
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
86 #endif
87 
88 #define WARMBOOT_TARGET		0
89 #define WARMBOOT_OFF		(PMAP_MAP_LOW + 0x0467)
90 #define WARMBOOT_SEG		(PMAP_MAP_LOW + 0x0469)
91 
92 #define CMOS_REG		(0x70)
93 #define CMOS_DATA		(0x71)
94 #define BIOS_RESET		(0x0f)
95 #define BIOS_WARM		(0x0a)
96 
97 /*
98  * this code MUST be enabled here and in mpboot.s.
99  * it follows the very early stages of AP boot by placing values in CMOS ram.
100  * it NORMALLY will never be needed and thus the primitive method for enabling.
101  *
102 #define CHECK_POINTS
103  */
104 
105 #if defined(CHECK_POINTS)
106 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
107 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
108 
109 #define CHECK_INIT(D);				\
110 	CHECK_WRITE(0x34, (D));			\
111 	CHECK_WRITE(0x35, (D));			\
112 	CHECK_WRITE(0x36, (D));			\
113 	CHECK_WRITE(0x37, (D));			\
114 	CHECK_WRITE(0x38, (D));			\
115 	CHECK_WRITE(0x39, (D));
116 
117 #define CHECK_PRINT(S);				\
118 	printf("%s: %d, %d, %d, %d, %d, %d\n",	\
119 	   (S),					\
120 	   CHECK_READ(0x34),			\
121 	   CHECK_READ(0x35),			\
122 	   CHECK_READ(0x36),			\
123 	   CHECK_READ(0x37),			\
124 	   CHECK_READ(0x38),			\
125 	   CHECK_READ(0x39));
126 
127 #else				/* CHECK_POINTS */
128 
129 #define CHECK_INIT(D)
130 #define CHECK_PRINT(S)
131 #define CHECK_WRITE(A, D)
132 
133 #endif				/* CHECK_POINTS */
134 
135 /*
136  * Local data and functions.
137  */
138 
139 static void	install_ap_tramp(void);
140 static int	start_all_aps(void);
141 static int	start_ap(int apic_id);
142 
143 static char *ap_copyout_buf;
144 static char *ap_tramp_stack_base;
145 
146 unsigned int boot_address;
147 
148 #define MiB(v)	(v ## ULL << 20)
149 
150 /* Allocate memory for the AP trampoline. */
151 void
152 alloc_ap_trampoline(vm_paddr_t *physmap, unsigned int *physmap_idx)
153 {
154 	unsigned int i;
155 	bool allocated;
156 
157 	allocated = false;
158 	for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
159 		/*
160 		 * Find a memory region big enough and below the 1MB boundary
161 		 * for the trampoline code.
162 		 * NB: needs to be page aligned.
163 		 */
164 		if (physmap[i] >= MiB(1) ||
165 		    (trunc_page(physmap[i + 1]) - round_page(physmap[i])) <
166 		    round_page(bootMP_size))
167 			continue;
168 
169 		allocated = true;
170 		/*
171 		 * Try to steal from the end of the region to mimic previous
172 		 * behaviour, else fallback to steal from the start.
173 		 */
174 		if (physmap[i + 1] < MiB(1)) {
175 			boot_address = trunc_page(physmap[i + 1]);
176 			if ((physmap[i + 1] - boot_address) < bootMP_size)
177 				boot_address -= round_page(bootMP_size);
178 			physmap[i + 1] = boot_address;
179 		} else {
180 			boot_address = round_page(physmap[i]);
181 			physmap[i] = boot_address + round_page(bootMP_size);
182 		}
183 		if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
184 			memmove(&physmap[i], &physmap[i + 2],
185 			    sizeof(*physmap) * (*physmap_idx - i + 2));
186 			*physmap_idx -= 2;
187 		}
188 		break;
189 	}
190 
191 	if (!allocated) {
192 		boot_address = basemem * 1024 - bootMP_size;
193 		if (bootverbose)
194 			printf(
195 "Cannot find enough space for the boot trampoline, placing it at %#x",
196 			    boot_address);
197 	}
198 }
199 
200 /*
201  * Initialize the IPI handlers and start up the AP's.
202  */
203 void
204 cpu_mp_start(void)
205 {
206 	int i;
207 
208 	/* Initialize the logical ID to APIC ID table. */
209 	for (i = 0; i < MAXCPU; i++) {
210 		cpu_apic_ids[i] = -1;
211 	}
212 
213 	/* Install an inter-CPU IPI for TLB invalidation */
214 	setidt(IPI_INVLTLB, IDTVEC(invltlb),
215 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
216 	setidt(IPI_INVLPG, IDTVEC(invlpg),
217 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
218 	setidt(IPI_INVLRNG, IDTVEC(invlrng),
219 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
220 
221 	/* Install an inter-CPU IPI for cache invalidation. */
222 	setidt(IPI_INVLCACHE, IDTVEC(invlcache),
223 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
224 
225 	/* Install an inter-CPU IPI for all-CPU rendezvous */
226 	setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous),
227 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
228 
229 	/* Install generic inter-CPU IPI handler */
230 	setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler),
231 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
232 
233 	/* Install an inter-CPU IPI for CPU stop/restart */
234 	setidt(IPI_STOP, IDTVEC(cpustop),
235 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
236 
237 	/* Install an inter-CPU IPI for CPU suspend/resume */
238 	setidt(IPI_SUSPEND, IDTVEC(cpususpend),
239 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
240 
241 	/* Install an IPI for calling delayed SWI */
242 	setidt(IPI_SWI, IDTVEC(ipi_swi),
243 	       SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
244 
245 	/* Set boot_cpu_id if needed. */
246 	if (boot_cpu_id == -1) {
247 		boot_cpu_id = PCPU_GET(apic_id);
248 		cpu_info[boot_cpu_id].cpu_bsp = 1;
249 	} else
250 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
251 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
252 
253 	/* Probe logical/physical core configuration. */
254 	topo_probe();
255 
256 	assign_cpu_ids();
257 
258 	/* Start each Application Processor */
259 	start_all_aps();
260 
261 	set_interrupt_apic_ids();
262 
263 #if defined(DEV_ACPI) && MAXMEMDOM > 1
264 	acpi_pxm_set_cpu_locality();
265 #endif
266 }
267 
268 /*
269  * AP CPU's call this to initialize themselves.
270  */
271 void
272 init_secondary(void)
273 {
274 	struct pcpu *pc;
275 	struct i386tss *common_tssp;
276 	struct region_descriptor r_gdt, r_idt;
277 	int gsel_tss, myid, x;
278 	u_int cr0;
279 
280 	/* bootAP is set in start_ap() to our ID. */
281 	myid = bootAP;
282 
283 	/* Update microcode before doing anything else. */
284 	ucode_load_ap(myid);
285 
286 	/* Get per-cpu data */
287 	pc = &__pcpu[myid];
288 
289 	/* prime data page for it to use */
290 	pcpu_init(pc, myid, sizeof(struct pcpu));
291 	dpcpu_init(dpcpu, myid);
292 	pc->pc_apic_id = cpu_apic_ids[myid];
293 	pc->pc_prvspace = pc;
294 	pc->pc_curthread = 0;
295 	pc->pc_common_tssp = common_tssp = &(__pcpu[0].pc_common_tssp)[myid];
296 
297 	fix_cpuid();
298 
299 	gdt_segs[GPRIV_SEL].ssd_base = (int)pc;
300 	gdt_segs[GPROC0_SEL].ssd_base = (int)common_tssp;
301 	gdt_segs[GLDT_SEL].ssd_base = (int)ldt;
302 
303 	for (x = 0; x < NGDT; x++) {
304 		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
305 	}
306 
307 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
308 	r_gdt.rd_base = (int) &gdt[myid * NGDT];
309 	lgdt(&r_gdt);			/* does magic intra-segment return */
310 
311 	r_idt.rd_limit = sizeof(struct gate_descriptor) * NIDT - 1;
312 	r_idt.rd_base = (int)idt;
313 	lidt(&r_idt);
314 
315 	lldt(_default_ldt);
316 	PCPU_SET(currentldt, _default_ldt);
317 
318 	PCPU_SET(trampstk, (uintptr_t)ap_tramp_stack_base + TRAMP_STACK_SZ -
319 	    VM86_STACK_SPACE);
320 
321 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
322 	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
323 	common_tssp->tss_esp0 = PCPU_GET(trampstk);
324 	common_tssp->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
325 	common_tssp->tss_ioopt = sizeof(struct i386tss) << 16;
326 	PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd);
327 	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
328 	ltr(gsel_tss);
329 
330 	PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd);
331 	PCPU_SET(copyout_buf, ap_copyout_buf);
332 
333 	/*
334 	 * Set to a known state:
335 	 * Set by mpboot.s: CR0_PG, CR0_PE
336 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
337 	 */
338 	cr0 = rcr0();
339 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
340 	load_cr0(cr0);
341 	CHECK_WRITE(0x38, 5);
342 
343 	/* signal our startup to the BSP. */
344 	mp_naps++;
345 	CHECK_WRITE(0x39, 6);
346 
347 	/* Spin until the BSP releases the AP's. */
348 	while (atomic_load_acq_int(&aps_ready) == 0)
349 		ia32_pause();
350 
351 	/* BSP may have changed PTD while we were waiting */
352 	invltlb();
353 
354 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
355 	lidt(&r_idt);
356 #endif
357 
358 	init_secondary_tail();
359 }
360 
361 /*
362  * start each AP in our list
363  */
364 #define TMPMAP_START 1
365 static int
366 start_all_aps(void)
367 {
368 	u_char mpbiosreason;
369 	u_int32_t mpbioswarmvec;
370 	int apic_id, cpu;
371 
372 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
373 
374 	pmap_remap_lower(true);
375 
376 	/* install the AP 1st level boot code */
377 	install_ap_tramp();
378 
379 	/* save the current value of the warm-start vector */
380 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
381 	outb(CMOS_REG, BIOS_RESET);
382 	mpbiosreason = inb(CMOS_DATA);
383 
384 	/* take advantage of the P==V mapping for PTD[0] for AP boot */
385 
386 	/* start each AP */
387 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
388 		apic_id = cpu_apic_ids[cpu];
389 
390 		/* allocate and set up a boot stack data page */
391 		bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
392 		    M_WAITOK | M_ZERO);
393 		dpcpu = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
394 		/* setup a vector to our boot code */
395 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
396 		*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
397 		outb(CMOS_REG, BIOS_RESET);
398 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
399 
400 		bootSTK = (char *)bootstacks[cpu] + kstack_pages *
401 		    PAGE_SIZE - 4;
402 		bootAP = cpu;
403 
404 		ap_tramp_stack_base = pmap_trm_alloc(TRAMP_STACK_SZ, M_NOWAIT);
405 		ap_copyout_buf = pmap_trm_alloc(TRAMP_COPYOUT_SZ, M_NOWAIT);
406 
407 		/* attempt to start the Application Processor */
408 		CHECK_INIT(99);	/* setup checkpoints */
409 		if (!start_ap(apic_id)) {
410 			printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id);
411 			CHECK_PRINT("trace");	/* show checkpoints */
412 			/* better panic as the AP may be running loose */
413 			printf("panic y/n? [y] ");
414 			if (cngetc() != 'n')
415 				panic("bye-bye");
416 		}
417 		CHECK_PRINT("trace");		/* show checkpoints */
418 
419 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
420 	}
421 
422 	pmap_remap_lower(false);
423 
424 	/* restore the warmstart vector */
425 	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
426 
427 	outb(CMOS_REG, BIOS_RESET);
428 	outb(CMOS_DATA, mpbiosreason);
429 
430 	/* number of APs actually started */
431 	return mp_naps;
432 }
433 
434 /*
435  * load the 1st level AP boot code into base memory.
436  */
437 
438 /* targets for relocation */
439 extern void bigJump(void);
440 extern void bootCodeSeg(void);
441 extern void bootDataSeg(void);
442 extern void MPentry(void);
443 extern u_int MP_GDT;
444 extern u_int mp_gdtbase;
445 
446 static void
447 install_ap_tramp(void)
448 {
449 	int     x;
450 	int     size = *(int *) ((u_long) & bootMP_size);
451 	vm_offset_t va = boot_address;
452 	u_char *src = (u_char *) ((u_long) bootMP);
453 	u_char *dst = (u_char *) va;
454 	u_int   boot_base = (u_int) bootMP;
455 	u_int8_t *dst8;
456 	u_int16_t *dst16;
457 	u_int32_t *dst32;
458 
459 	KASSERT (size <= PAGE_SIZE,
460 	    ("'size' do not fit into PAGE_SIZE, as expected."));
461 	pmap_kenter(va, boot_address);
462 	pmap_invalidate_page (kernel_pmap, va);
463 	for (x = 0; x < size; ++x)
464 		*dst++ = *src++;
465 
466 	/*
467 	 * modify addresses in code we just moved to basemem. unfortunately we
468 	 * need fairly detailed info about mpboot.s for this to work.  changes
469 	 * to mpboot.s might require changes here.
470 	 */
471 
472 	/* boot code is located in KERNEL space */
473 	dst = (u_char *) va;
474 
475 	/* modify the lgdt arg */
476 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
477 	*dst32 = boot_address + ((u_int) & MP_GDT - boot_base);
478 
479 	/* modify the ljmp target for MPentry() */
480 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
481 	*dst32 = (u_int)MPentry;
482 
483 	/* modify the target for boot code segment */
484 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
485 	dst8 = (u_int8_t *) (dst16 + 1);
486 	*dst16 = (u_int) boot_address & 0xffff;
487 	*dst8 = ((u_int) boot_address >> 16) & 0xff;
488 
489 	/* modify the target for boot data segment */
490 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
491 	dst8 = (u_int8_t *) (dst16 + 1);
492 	*dst16 = (u_int) boot_address & 0xffff;
493 	*dst8 = ((u_int) boot_address >> 16) & 0xff;
494 }
495 
496 /*
497  * This function starts the AP (application processor) identified
498  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
499  * to accomplish this.  This is necessary because of the nuances
500  * of the different hardware we might encounter.  It isn't pretty,
501  * but it seems to work.
502  */
503 static int
504 start_ap(int apic_id)
505 {
506 	int vector, ms;
507 	int cpus;
508 
509 	/* calculate the vector */
510 	vector = (boot_address >> 12) & 0xff;
511 
512 	/* used as a watchpoint to signal AP startup */
513 	cpus = mp_naps;
514 
515 	ipi_startup(apic_id, vector);
516 
517 	/* Wait up to 5 seconds for it to start. */
518 	for (ms = 0; ms < 5000; ms++) {
519 		if (mp_naps > cpus)
520 			return 1;	/* return SUCCESS */
521 		DELAY(1000);
522 	}
523 	return 0;		/* return FAILURE */
524 }
525 
526 /*
527  * Flush the TLB on other CPU's
528  */
529 
530 /* Variables needed for SMP tlb shootdown. */
531 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
532 pmap_t smp_tlb_pmap;
533 volatile uint32_t smp_tlb_generation;
534 
535 /*
536  * Used by pmap to request cache or TLB invalidation on local and
537  * remote processors.  Mask provides the set of remote CPUs which are
538  * to be signalled with the invalidation IPI.  Vector specifies which
539  * invalidation IPI is used.  As an optimization, the curcpu_cb
540  * callback is invoked on the calling CPU while waiting for remote
541  * CPUs to complete the operation.
542  *
543  * The callback function is called unconditionally on the caller's
544  * underlying processor, even when this processor is not set in the
545  * mask.  So, the callback function must be prepared to handle such
546  * spurious invocations.
547  */
548 static void
549 smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, pmap_t pmap,
550     vm_offset_t addr1, vm_offset_t addr2, smp_invl_cb_t curcpu_cb)
551 {
552 	cpuset_t other_cpus;
553 	volatile uint32_t *p_cpudone;
554 	uint32_t generation;
555 	int cpu;
556 
557 	/*
558 	 * It is not necessary to signal other CPUs while booting or
559 	 * when in the debugger.
560 	 */
561 	if (kdb_active || KERNEL_PANICKED() || !smp_started) {
562 		curcpu_cb(pmap, addr1, addr2);
563 		return;
564 	}
565 
566 	sched_pin();
567 
568 	/*
569 	 * Check for other cpus.  Return if none.
570 	 */
571 	if (CPU_ISFULLSET(&mask)) {
572 		if (mp_ncpus <= 1)
573 			goto nospinexit;
574 	} else {
575 		CPU_CLR(PCPU_GET(cpuid), &mask);
576 		if (CPU_EMPTY(&mask))
577 			goto nospinexit;
578 	}
579 
580 	KASSERT((read_eflags() & PSL_I) != 0,
581 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
582 	mtx_lock_spin(&smp_ipi_mtx);
583 	smp_tlb_addr1 = addr1;
584 	smp_tlb_addr2 = addr2;
585 	smp_tlb_pmap = pmap;
586 	generation = ++smp_tlb_generation;
587 	if (CPU_ISFULLSET(&mask)) {
588 		ipi_all_but_self(vector);
589 		other_cpus = all_cpus;
590 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
591 	} else {
592 		other_cpus = mask;
593 		ipi_selected(mask, vector);
594 	}
595 	curcpu_cb(pmap, addr1, addr2);
596 	CPU_FOREACH_ISSET(cpu, &other_cpus) {
597 		p_cpudone = &cpuid_to_pcpu[cpu]->pc_smp_tlb_done;
598 		while (*p_cpudone != generation)
599 			ia32_pause();
600 	}
601 	mtx_unlock_spin(&smp_ipi_mtx);
602 	sched_unpin();
603 	return;
604 
605 nospinexit:
606 	curcpu_cb(pmap, addr1, addr2);
607 	sched_unpin();
608 }
609 
610 void
611 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
612 {
613 	smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, pmap, 0, 0, curcpu_cb);
614 #ifdef COUNT_XINVLTLB_HITS
615 	ipi_global++;
616 #endif
617 }
618 
619 void
620 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
621     smp_invl_cb_t curcpu_cb)
622 {
623 	smp_targeted_tlb_shootdown(mask, IPI_INVLPG, pmap, addr, 0, curcpu_cb);
624 #ifdef COUNT_XINVLTLB_HITS
625 	ipi_page++;
626 #endif
627 }
628 
629 void
630 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
631     pmap_t pmap, smp_invl_cb_t curcpu_cb)
632 {
633 	smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, pmap, addr1, addr2,
634 	    curcpu_cb);
635 #ifdef COUNT_XINVLTLB_HITS
636 	ipi_range++;
637 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
638 #endif
639 }
640 
641 void
642 smp_cache_flush(smp_invl_cb_t curcpu_cb)
643 {
644 	smp_targeted_tlb_shootdown(all_cpus, IPI_INVLCACHE, NULL, 0, 0,
645 	    curcpu_cb);
646 }
647 
648 /*
649  * Handlers for TLB related IPIs
650  */
651 void
652 invltlb_handler(void)
653 {
654 	uint32_t generation;
655 
656 	trap_check_kstack();
657 #ifdef COUNT_XINVLTLB_HITS
658 	xhits_gbl[PCPU_GET(cpuid)]++;
659 #endif /* COUNT_XINVLTLB_HITS */
660 #ifdef COUNT_IPIS
661 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
662 #endif /* COUNT_IPIS */
663 
664 	/*
665 	 * Reading the generation here allows greater parallelism
666 	 * since invalidating the TLB is a serializing operation.
667 	 */
668 	generation = smp_tlb_generation;
669 	if (smp_tlb_pmap == kernel_pmap)
670 		invltlb_glob();
671 	PCPU_SET(smp_tlb_done, generation);
672 }
673 
674 void
675 invlpg_handler(void)
676 {
677 	uint32_t generation;
678 
679 	trap_check_kstack();
680 #ifdef COUNT_XINVLTLB_HITS
681 	xhits_pg[PCPU_GET(cpuid)]++;
682 #endif /* COUNT_XINVLTLB_HITS */
683 #ifdef COUNT_IPIS
684 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
685 #endif /* COUNT_IPIS */
686 
687 	generation = smp_tlb_generation;	/* Overlap with serialization */
688 	if (smp_tlb_pmap == kernel_pmap)
689 		invlpg(smp_tlb_addr1);
690 	PCPU_SET(smp_tlb_done, generation);
691 }
692 
693 void
694 invlrng_handler(void)
695 {
696 	vm_offset_t addr, addr2;
697 	uint32_t generation;
698 
699 	trap_check_kstack();
700 #ifdef COUNT_XINVLTLB_HITS
701 	xhits_rng[PCPU_GET(cpuid)]++;
702 #endif /* COUNT_XINVLTLB_HITS */
703 #ifdef COUNT_IPIS
704 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
705 #endif /* COUNT_IPIS */
706 
707 	addr = smp_tlb_addr1;
708 	addr2 = smp_tlb_addr2;
709 	generation = smp_tlb_generation;	/* Overlap with serialization */
710 	if (smp_tlb_pmap == kernel_pmap) {
711 		do {
712 			invlpg(addr);
713 			addr += PAGE_SIZE;
714 		} while (addr < addr2);
715 	}
716 
717 	PCPU_SET(smp_tlb_done, generation);
718 }
719 
720 void
721 invlcache_handler(void)
722 {
723 	uint32_t generation;
724 
725 	trap_check_kstack();
726 #ifdef COUNT_IPIS
727 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
728 #endif /* COUNT_IPIS */
729 
730 	/*
731 	 * Reading the generation here allows greater parallelism
732 	 * since wbinvd is a serializing instruction.  Without the
733 	 * temporary, we'd wait for wbinvd to complete, then the read
734 	 * would execute, then the dependent write, which must then
735 	 * complete before return from interrupt.
736 	 */
737 	generation = smp_tlb_generation;
738 	wbinvd();
739 	PCPU_SET(smp_tlb_done, generation);
740 }
741