xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 
55 #include <machine/debug_monitor.h>
56 #include <machine/machdep.h>
57 #include <machine/intr.h>
58 #include <machine/smp.h>
59 #ifdef VFP
60 #include <machine/vfp.h>
61 #endif
62 
63 #ifdef DEV_ACPI
64 #include <contrib/dev/acpica/include/acpi.h>
65 #include <dev/acpica/acpivar.h>
66 #endif
67 
68 #ifdef FDT
69 #include <dev/ofw/openfirm.h>
70 #include <dev/ofw/ofw_cpu.h>
71 #endif
72 
73 #include <dev/psci/psci.h>
74 
75 #include "pic_if.h"
76 
77 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
78 typedef void intr_ipi_handler_t(void *);
79 
80 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
81 struct intr_ipi {
82 	intr_ipi_handler_t *	ii_handler;
83 	void *			ii_handler_arg;
84 	intr_ipi_send_t *	ii_send;
85 	void *			ii_send_arg;
86 	char			ii_name[INTR_IPI_NAMELEN];
87 	u_long *		ii_count;
88 };
89 
90 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
91 
92 static struct intr_ipi *intr_ipi_lookup(u_int);
93 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
94     void *);
95 
96 extern struct pcpu __pcpu[];
97 
98 static device_identify_t arm64_cpu_identify;
99 static device_probe_t arm64_cpu_probe;
100 static device_attach_t arm64_cpu_attach;
101 
102 static void ipi_ast(void *);
103 static void ipi_hardclock(void *);
104 static void ipi_preempt(void *);
105 static void ipi_rendezvous(void *);
106 static void ipi_stop(void *);
107 
108 struct mtx ap_boot_mtx;
109 struct pcb stoppcbs[MAXCPU];
110 
111 static device_t cpu_list[MAXCPU];
112 
113 /*
114  * Not all systems boot from the first CPU in the device tree. To work around
115  * this we need to find which CPU we have booted from so when we later
116  * enable the secondary CPUs we skip this one.
117  */
118 static int cpu0 = -1;
119 
120 void mpentry(unsigned long cpuid);
121 void init_secondary(uint64_t);
122 
123 uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16);
124 
125 /* Set to 1 once we're ready to let the APs out of the pen. */
126 volatile int aps_ready = 0;
127 
128 /* Temporary variables for init_secondary()  */
129 void *dpcpu[MAXCPU - 1];
130 
131 static device_method_t arm64_cpu_methods[] = {
132 	/* Device interface */
133 	DEVMETHOD(device_identify,	arm64_cpu_identify),
134 	DEVMETHOD(device_probe,		arm64_cpu_probe),
135 	DEVMETHOD(device_attach,	arm64_cpu_attach),
136 
137 	DEVMETHOD_END
138 };
139 
140 static devclass_t arm64_cpu_devclass;
141 static driver_t arm64_cpu_driver = {
142 	"arm64_cpu",
143 	arm64_cpu_methods,
144 	0
145 };
146 
147 DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0);
148 
149 static void
150 arm64_cpu_identify(driver_t *driver, device_t parent)
151 {
152 
153 	if (device_find_child(parent, "arm64_cpu", -1) != NULL)
154 		return;
155 	if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL)
156 		device_printf(parent, "add child failed\n");
157 }
158 
159 static int
160 arm64_cpu_probe(device_t dev)
161 {
162 	u_int cpuid;
163 
164 	cpuid = device_get_unit(dev);
165 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
166 		return (EINVAL);
167 
168 	device_quiet(dev);
169 	return (0);
170 }
171 
172 static int
173 arm64_cpu_attach(device_t dev)
174 {
175 	const uint32_t *reg;
176 	size_t reg_size;
177 	u_int cpuid;
178 	int i;
179 
180 	cpuid = device_get_unit(dev);
181 
182 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
183 		return (EINVAL);
184 	KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
185 
186 	reg = cpu_get_cpuid(dev, &reg_size);
187 	if (reg == NULL)
188 		return (EINVAL);
189 
190 	if (bootverbose) {
191 		device_printf(dev, "register <");
192 		for (i = 0; i < reg_size; i++)
193 			printf("%s%x", (i == 0) ? "" : " ", reg[i]);
194 		printf(">\n");
195 	}
196 
197 	/* Set the device to start it later */
198 	cpu_list[cpuid] = dev;
199 
200 	return (0);
201 }
202 
203 static void
204 release_aps(void *dummy __unused)
205 {
206 	int i;
207 
208 	/* Only release CPUs if they exist */
209 	if (mp_ncpus == 1)
210 		return;
211 
212 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
213 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
214 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
215 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
216 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
217 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
218 
219 	atomic_store_rel_int(&aps_ready, 1);
220 	/* Wake up the other CPUs */
221 	__asm __volatile("sev");
222 
223 	printf("Release APs\n");
224 
225 	for (i = 0; i < 2000; i++) {
226 		if (smp_started)
227 			return;
228 		DELAY(1000);
229 	}
230 
231 	printf("APs not started\n");
232 }
233 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
234 
235 void
236 init_secondary(uint64_t cpu)
237 {
238 	struct pcpu *pcpup;
239 
240 	pcpup = &__pcpu[cpu];
241 	/*
242 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
243 	 * loaded when entering the kernel from userland.
244 	 */
245 	__asm __volatile(
246 	    "mov x18, %0 \n"
247 	    "msr tpidr_el1, %0" :: "r"(pcpup));
248 
249 	/* Spin until the BSP releases the APs */
250 	while (!aps_ready)
251 		__asm __volatile("wfe");
252 
253 	/* Initialize curthread */
254 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
255 	pcpup->pc_curthread = pcpup->pc_idlethread;
256 	pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb;
257 
258 	/*
259 	 * Identify current CPU. This is necessary to setup
260 	 * affinity registers and to provide support for
261 	 * runtime chip identification.
262 	 */
263 	identify_cpu();
264 
265 	intr_pic_init_secondary();
266 
267 	/* Start per-CPU event timers. */
268 	cpu_initclocks_ap();
269 
270 #ifdef VFP
271 	vfp_init();
272 #endif
273 
274 	dbg_monitor_init();
275 	pan_enable();
276 
277 	/* Enable interrupts */
278 	intr_enable();
279 
280 	mtx_lock_spin(&ap_boot_mtx);
281 
282 	atomic_add_rel_32(&smp_cpus, 1);
283 
284 	if (smp_cpus == mp_ncpus) {
285 		/* enable IPI's, tlb shootdown, freezes etc */
286 		atomic_store_rel_int(&smp_started, 1);
287 	}
288 
289 	mtx_unlock_spin(&ap_boot_mtx);
290 
291 	/* Enter the scheduler */
292 	sched_throw(NULL);
293 
294 	panic("scheduler returned us to init_secondary");
295 	/* NOTREACHED */
296 }
297 
298 /*
299  *  Send IPI thru interrupt controller.
300  */
301 static void
302 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
303 {
304 
305 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
306 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
307 }
308 
309 /*
310  *  Setup IPI handler on interrupt controller.
311  *
312  *  Not SMP coherent.
313  */
314 static void
315 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
316     void *arg)
317 {
318 	struct intr_irqsrc *isrc;
319 	struct intr_ipi *ii;
320 	int error;
321 
322 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
323 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
324 
325 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
326 	if (error != 0)
327 		return;
328 
329 	isrc->isrc_handlers++;
330 
331 	ii = intr_ipi_lookup(ipi);
332 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
333 
334 	ii->ii_handler = hand;
335 	ii->ii_handler_arg = arg;
336 	ii->ii_send = pic_ipi_send;
337 	ii->ii_send_arg = isrc;
338 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
339 	ii->ii_count = intr_ipi_setup_counters(name);
340 }
341 
342 static void
343 intr_ipi_send(cpuset_t cpus, u_int ipi)
344 {
345 	struct intr_ipi *ii;
346 
347 	ii = intr_ipi_lookup(ipi);
348 	if (ii->ii_count == NULL)
349 		panic("%s: not setup IPI %u", __func__, ipi);
350 
351 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
352 }
353 
354 static void
355 ipi_ast(void *dummy __unused)
356 {
357 
358 	CTR0(KTR_SMP, "IPI_AST");
359 }
360 
361 static void
362 ipi_hardclock(void *dummy __unused)
363 {
364 
365 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
366 	hardclockintr();
367 }
368 
369 static void
370 ipi_preempt(void *dummy __unused)
371 {
372 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
373 	sched_preempt(curthread);
374 }
375 
376 static void
377 ipi_rendezvous(void *dummy __unused)
378 {
379 
380 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
381 	smp_rendezvous_action();
382 }
383 
384 static void
385 ipi_stop(void *dummy __unused)
386 {
387 	u_int cpu;
388 
389 	CTR0(KTR_SMP, "IPI_STOP");
390 
391 	cpu = PCPU_GET(cpuid);
392 	savectx(&stoppcbs[cpu]);
393 
394 	/* Indicate we are stopped */
395 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
396 
397 	/* Wait for restart */
398 	while (!CPU_ISSET(cpu, &started_cpus))
399 		cpu_spinwait();
400 
401 	CPU_CLR_ATOMIC(cpu, &started_cpus);
402 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
403 	CTR0(KTR_SMP, "IPI_STOP (restart)");
404 }
405 
406 struct cpu_group *
407 cpu_topo(void)
408 {
409 
410 	return (smp_topo_none());
411 }
412 
413 /* Determine if we running MP machine */
414 int
415 cpu_mp_probe(void)
416 {
417 
418 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
419 	return (1);
420 }
421 
422 static bool
423 start_cpu(u_int id, uint64_t target_cpu)
424 {
425 	struct pcpu *pcpup;
426 	vm_paddr_t pa;
427 	u_int cpuid;
428 	int err;
429 
430 	/* Check we are able to start this cpu */
431 	if (id > mp_maxid)
432 		return (false);
433 
434 	KASSERT(id < MAXCPU, ("Too many CPUs"));
435 
436 	/* We are already running on cpu 0 */
437 	if (id == cpu0)
438 		return (true);
439 
440 	/*
441 	 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
442 	 * CPUs ordered as the are likely grouped into clusters so it can be
443 	 * useful to keep that property, e.g. for the GICv3 driver to send
444 	 * an IPI to all CPUs in the cluster.
445 	 */
446 	cpuid = id;
447 	if (cpuid < cpu0)
448 		cpuid += mp_maxid + 1;
449 	cpuid -= cpu0;
450 
451 	pcpup = &__pcpu[cpuid];
452 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
453 
454 	dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
455 	    M_WAITOK | M_ZERO);
456 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
457 
458 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
459 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
460 
461 	err = psci_cpu_on(target_cpu, pa, cpuid);
462 	if (err != PSCI_RETVAL_SUCCESS) {
463 		/*
464 		 * Panic here if INVARIANTS are enabled and PSCI failed to
465 		 * start the requested CPU. If psci_cpu_on returns PSCI_MISSING
466 		 * to indicate we are unable to use it to start the given CPU.
467 		 */
468 		KASSERT(err == PSCI_MISSING,
469 		    ("Failed to start CPU %u (%lx)\n", id, target_cpu));
470 
471 		pcpu_destroy(pcpup);
472 		kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1],
473 		    DPCPU_SIZE);
474 		dpcpu[cpuid - 1] = NULL;
475 		/* Notify the user that the CPU failed to start */
476 		printf("Failed to start CPU %u (%lx)\n", id, target_cpu);
477 	} else
478 		CPU_SET(cpuid, &all_cpus);
479 
480 	return (true);
481 }
482 
483 #ifdef DEV_ACPI
484 static void
485 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
486 {
487 	ACPI_MADT_GENERIC_INTERRUPT *intr;
488 	u_int *cpuid;
489 
490 	switch(entry->Type) {
491 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
492 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
493 		cpuid = arg;
494 
495 		start_cpu((*cpuid), intr->ArmMpidr);
496 		(*cpuid)++;
497 		break;
498 	default:
499 		break;
500 	}
501 }
502 
503 static void
504 cpu_init_acpi(void)
505 {
506 	ACPI_TABLE_MADT *madt;
507 	vm_paddr_t physaddr;
508 	u_int cpuid;
509 
510 	physaddr = acpi_find_table(ACPI_SIG_MADT);
511 	if (physaddr == 0)
512 		return;
513 
514 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
515 	if (madt == NULL) {
516 		printf("Unable to map the MADT, not starting APs\n");
517 		return;
518 	}
519 
520 	cpuid = 0;
521 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
522 	    madt_handler, &cpuid);
523 
524 	acpi_unmap_table(madt);
525 }
526 #endif
527 
528 #ifdef FDT
529 static boolean_t
530 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
531 {
532 	uint64_t target_cpu;
533 
534 	target_cpu = reg[0];
535 	if (addr_size == 2) {
536 		target_cpu <<= 32;
537 		target_cpu |= reg[1];
538 	}
539 
540 	return (start_cpu(id, target_cpu) ? TRUE : FALSE);
541 }
542 #endif
543 
544 /* Initialize and fire up non-boot processors */
545 void
546 cpu_mp_start(void)
547 {
548 
549 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
550 
551 	CPU_SET(0, &all_cpus);
552 
553 	switch(arm64_bus_method) {
554 #ifdef DEV_ACPI
555 	case ARM64_BUS_ACPI:
556 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
557 		cpu_init_acpi();
558 		break;
559 #endif
560 #ifdef FDT
561 	case ARM64_BUS_FDT:
562 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
563 		ofw_cpu_early_foreach(cpu_init_fdt, true);
564 		break;
565 #endif
566 	default:
567 		break;
568 	}
569 }
570 
571 /* Introduce rest of cores to the world */
572 void
573 cpu_mp_announce(void)
574 {
575 }
576 
577 #ifdef DEV_ACPI
578 static void
579 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
580 {
581 	ACPI_MADT_GENERIC_INTERRUPT *intr;
582 	u_int *cores = arg;
583 	uint64_t mpidr_reg;
584 
585 	switch(entry->Type) {
586 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
587 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
588 		if (cpu0 < 0) {
589 			mpidr_reg = READ_SPECIALREG(mpidr_el1);
590 			if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
591 				cpu0 = *cores;
592 		}
593 		(*cores)++;
594 		break;
595 	default:
596 		break;
597 	}
598 }
599 
600 static u_int
601 cpu_count_acpi(void)
602 {
603 	ACPI_TABLE_MADT *madt;
604 	vm_paddr_t physaddr;
605 	u_int cores;
606 
607 	physaddr = acpi_find_table(ACPI_SIG_MADT);
608 	if (physaddr == 0)
609 		return (0);
610 
611 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
612 	if (madt == NULL) {
613 		printf("Unable to map the MADT, not starting APs\n");
614 		return (0);
615 	}
616 
617 	cores = 0;
618 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
619 	    cpu_count_acpi_handler, &cores);
620 
621 	acpi_unmap_table(madt);
622 
623 	return (cores);
624 }
625 #endif
626 
627 #ifdef FDT
628 static boolean_t
629 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
630 {
631 	uint64_t mpidr_fdt, mpidr_reg;
632 
633 	if (cpu0 < 0) {
634 		mpidr_fdt = reg[0];
635 		if (addr_size == 2) {
636 			mpidr_fdt <<= 32;
637 			mpidr_fdt |= reg[1];
638 		}
639 
640 		mpidr_reg = READ_SPECIALREG(mpidr_el1);
641 
642 		if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
643 			cpu0 = id;
644 	}
645 
646 	return (TRUE);
647 }
648 #endif
649 
650 void
651 cpu_mp_setmaxid(void)
652 {
653 #if defined(DEV_ACPI) || defined(FDT)
654 	int cores;
655 #endif
656 
657 	switch(arm64_bus_method) {
658 #ifdef DEV_ACPI
659 	case ARM64_BUS_ACPI:
660 		cores = cpu_count_acpi();
661 		if (cores > 0) {
662 			cores = MIN(cores, MAXCPU);
663 			if (bootverbose)
664 				printf("Found %d CPUs in the ACPI tables\n",
665 				    cores);
666 			mp_ncpus = cores;
667 			mp_maxid = cores - 1;
668 			return;
669 		}
670 		break;
671 #endif
672 #ifdef FDT
673 	case ARM64_BUS_FDT:
674 		cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
675 		if (cores > 0) {
676 			cores = MIN(cores, MAXCPU);
677 			if (bootverbose)
678 				printf("Found %d CPUs in the device tree\n",
679 				    cores);
680 			mp_ncpus = cores;
681 			mp_maxid = cores - 1;
682 			return;
683 		}
684 		break;
685 #endif
686 	default:
687 		break;
688 	}
689 
690 	if (bootverbose)
691 		printf("No CPU data, limiting to 1 core\n");
692 	mp_ncpus = 1;
693 	mp_maxid = 0;
694 }
695 
696 /*
697  *  Lookup IPI source.
698  */
699 static struct intr_ipi *
700 intr_ipi_lookup(u_int ipi)
701 {
702 
703 	if (ipi >= INTR_IPI_COUNT)
704 		panic("%s: no such IPI %u", __func__, ipi);
705 
706 	return (&ipi_sources[ipi]);
707 }
708 
709 /*
710  *  interrupt controller dispatch function for IPIs. It should
711  *  be called straight from the interrupt controller, when associated
712  *  interrupt source is learned. Or from anybody who has an interrupt
713  *  source mapped.
714  */
715 void
716 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
717 {
718 	void *arg;
719 	struct intr_ipi *ii;
720 
721 	ii = intr_ipi_lookup(ipi);
722 	if (ii->ii_count == NULL)
723 		panic("%s: not setup IPI %u", __func__, ipi);
724 
725 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
726 
727 	/*
728 	 * Supply ipi filter with trapframe argument
729 	 * if none is registered.
730 	 */
731 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
732 	ii->ii_handler(arg);
733 }
734 
735 #ifdef notyet
736 /*
737  *  Map IPI into interrupt controller.
738  *
739  *  Not SMP coherent.
740  */
741 static int
742 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
743 {
744 	boolean_t is_percpu;
745 	int error;
746 
747 	if (ipi >= INTR_IPI_COUNT)
748 		panic("%s: no such IPI %u", __func__, ipi);
749 
750 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
751 
752 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
753 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
754 	isrc->isrc_nspc_num = ipi_next_num;
755 
756 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
757 	if (error == 0) {
758 		isrc->isrc_dev = intr_irq_root_dev;
759 		ipi_next_num++;
760 	}
761 	return (error);
762 }
763 
764 /*
765  *  Setup IPI handler to interrupt source.
766  *
767  *  Note that there could be more ways how to send and receive IPIs
768  *  on a platform like fast interrupts for example. In that case,
769  *  one can call this function with ASIF_NOALLOC flag set and then
770  *  call intr_ipi_dispatch() when appropriate.
771  *
772  *  Not SMP coherent.
773  */
774 int
775 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
776     void *arg, u_int flags)
777 {
778 	struct intr_irqsrc *isrc;
779 	int error;
780 
781 	if (filter == NULL)
782 		return(EINVAL);
783 
784 	isrc = intr_ipi_lookup(ipi);
785 	if (isrc->isrc_ipifilter != NULL)
786 		return (EEXIST);
787 
788 	if ((flags & AISHF_NOALLOC) == 0) {
789 		error = ipi_map(isrc, ipi);
790 		if (error != 0)
791 			return (error);
792 	}
793 
794 	isrc->isrc_ipifilter = filter;
795 	isrc->isrc_arg = arg;
796 	isrc->isrc_handlers = 1;
797 	isrc->isrc_count = intr_ipi_setup_counters(name);
798 	isrc->isrc_index = 0; /* it should not be used in IPI case */
799 
800 	if (isrc->isrc_dev != NULL) {
801 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
802 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
803 	}
804 	return (0);
805 }
806 #endif
807 
808 /* Sending IPI */
809 void
810 ipi_all_but_self(u_int ipi)
811 {
812 	cpuset_t cpus;
813 
814 	cpus = all_cpus;
815 	CPU_CLR(PCPU_GET(cpuid), &cpus);
816 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
817 	intr_ipi_send(cpus, ipi);
818 }
819 
820 void
821 ipi_cpu(int cpu, u_int ipi)
822 {
823 	cpuset_t cpus;
824 
825 	CPU_ZERO(&cpus);
826 	CPU_SET(cpu, &cpus);
827 
828 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
829 	intr_ipi_send(cpus, ipi);
830 }
831 
832 void
833 ipi_selected(cpuset_t cpus, u_int ipi)
834 {
835 
836 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
837 	intr_ipi_send(cpus, ipi);
838 }
839