xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 
55 #include <machine/machdep.h>
56 #include <machine/intr.h>
57 #include <machine/smp.h>
58 #ifdef VFP
59 #include <machine/vfp.h>
60 #endif
61 
62 #ifdef DEV_ACPI
63 #include <contrib/dev/acpica/include/acpi.h>
64 #include <dev/acpica/acpivar.h>
65 #endif
66 
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #include <dev/ofw/ofw_cpu.h>
72 #endif
73 
74 #include <dev/psci/psci.h>
75 
76 #include "pic_if.h"
77 
78 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
79 					/* don't panic if one fails to start */
80 static uint32_t mp_quirks;
81 
82 #ifdef FDT
83 static struct {
84 	const char *compat;
85 	uint32_t quirks;
86 } fdt_quirks[] = {
87 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
88 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
89 	/* This is incorrect in some DTS files */
90 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
91 	{ NULL, 0 },
92 };
93 #endif
94 
95 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
96 typedef void intr_ipi_handler_t(void *);
97 
98 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
99 struct intr_ipi {
100 	intr_ipi_handler_t *	ii_handler;
101 	void *			ii_handler_arg;
102 	intr_ipi_send_t *	ii_send;
103 	void *			ii_send_arg;
104 	char			ii_name[INTR_IPI_NAMELEN];
105 	u_long *		ii_count;
106 };
107 
108 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
109 
110 static struct intr_ipi *intr_ipi_lookup(u_int);
111 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
112     void *);
113 
114 extern struct pcpu __pcpu[];
115 
116 static void ipi_ast(void *);
117 static void ipi_hardclock(void *);
118 static void ipi_preempt(void *);
119 static void ipi_rendezvous(void *);
120 static void ipi_stop(void *);
121 
122 struct mtx ap_boot_mtx;
123 struct pcb stoppcbs[MAXCPU];
124 
125 /*
126  * Not all systems boot from the first CPU in the device tree. To work around
127  * this we need to find which CPU we have booted from so when we later
128  * enable the secondary CPUs we skip this one.
129  */
130 static int cpu0 = -1;
131 
132 void mpentry(unsigned long cpuid);
133 void init_secondary(uint64_t);
134 
135 uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16);
136 
137 /* Set to 1 once we're ready to let the APs out of the pen. */
138 volatile int aps_ready = 0;
139 
140 /* Temporary variables for init_secondary()  */
141 void *dpcpu[MAXCPU - 1];
142 
143 static void
144 release_aps(void *dummy __unused)
145 {
146 	int i, started;
147 
148 	/* Only release CPUs if they exist */
149 	if (mp_ncpus == 1)
150 		return;
151 
152 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
153 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
154 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
155 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
156 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
157 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
158 
159 	atomic_store_rel_int(&aps_ready, 1);
160 	/* Wake up the other CPUs */
161 	__asm __volatile(
162 	    "dsb ishst	\n"
163 	    "sev	\n"
164 	    ::: "memory");
165 
166 	printf("Release APs...");
167 
168 	started = 0;
169 	for (i = 0; i < 2000; i++) {
170 		if (smp_started) {
171 			printf("done\n");
172 			return;
173 		}
174 		/*
175 		 * Don't time out while we are making progress. Some large
176 		 * systems can take a while to start all CPUs.
177 		 */
178 		if (smp_cpus > started) {
179 			i = 0;
180 			started = smp_cpus;
181 		}
182 		DELAY(1000);
183 	}
184 
185 	printf("APs not started\n");
186 }
187 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
188 
189 void
190 init_secondary(uint64_t cpu)
191 {
192 	struct pcpu *pcpup;
193 
194 	pcpup = &__pcpu[cpu];
195 	/*
196 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
197 	 * loaded when entering the kernel from userland.
198 	 */
199 	__asm __volatile(
200 	    "mov x18, %0 \n"
201 	    "msr tpidr_el1, %0" :: "r"(pcpup));
202 
203 	/* Spin until the BSP releases the APs */
204 	while (!aps_ready)
205 		__asm __volatile("wfe");
206 
207 	/* Initialize curthread */
208 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
209 	pcpup->pc_curthread = pcpup->pc_idlethread;
210 	pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb;
211 
212 	/*
213 	 * Identify current CPU. This is necessary to setup
214 	 * affinity registers and to provide support for
215 	 * runtime chip identification.
216 	 */
217 	identify_cpu();
218 	install_cpu_errata();
219 
220 	intr_pic_init_secondary();
221 
222 	/* Start per-CPU event timers. */
223 	cpu_initclocks_ap();
224 
225 #ifdef VFP
226 	vfp_init();
227 #endif
228 
229 	dbg_init();
230 	pan_enable();
231 
232 	/* Enable interrupts */
233 	intr_enable();
234 
235 	mtx_lock_spin(&ap_boot_mtx);
236 
237 	atomic_add_rel_32(&smp_cpus, 1);
238 
239 	if (smp_cpus == mp_ncpus) {
240 		/* enable IPI's, tlb shootdown, freezes etc */
241 		atomic_store_rel_int(&smp_started, 1);
242 	}
243 
244 	mtx_unlock_spin(&ap_boot_mtx);
245 
246 	/* Enter the scheduler */
247 	sched_throw(NULL);
248 
249 	panic("scheduler returned us to init_secondary");
250 	/* NOTREACHED */
251 }
252 
253 /*
254  *  Send IPI thru interrupt controller.
255  */
256 static void
257 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
258 {
259 
260 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
261 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
262 }
263 
264 /*
265  *  Setup IPI handler on interrupt controller.
266  *
267  *  Not SMP coherent.
268  */
269 static void
270 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
271     void *arg)
272 {
273 	struct intr_irqsrc *isrc;
274 	struct intr_ipi *ii;
275 	int error;
276 
277 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
278 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
279 
280 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
281 	if (error != 0)
282 		return;
283 
284 	isrc->isrc_handlers++;
285 
286 	ii = intr_ipi_lookup(ipi);
287 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
288 
289 	ii->ii_handler = hand;
290 	ii->ii_handler_arg = arg;
291 	ii->ii_send = pic_ipi_send;
292 	ii->ii_send_arg = isrc;
293 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
294 	ii->ii_count = intr_ipi_setup_counters(name);
295 }
296 
297 static void
298 intr_ipi_send(cpuset_t cpus, u_int ipi)
299 {
300 	struct intr_ipi *ii;
301 
302 	ii = intr_ipi_lookup(ipi);
303 	if (ii->ii_count == NULL)
304 		panic("%s: not setup IPI %u", __func__, ipi);
305 
306 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
307 }
308 
309 static void
310 ipi_ast(void *dummy __unused)
311 {
312 
313 	CTR0(KTR_SMP, "IPI_AST");
314 }
315 
316 static void
317 ipi_hardclock(void *dummy __unused)
318 {
319 
320 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
321 	hardclockintr();
322 }
323 
324 static void
325 ipi_preempt(void *dummy __unused)
326 {
327 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
328 	sched_preempt(curthread);
329 }
330 
331 static void
332 ipi_rendezvous(void *dummy __unused)
333 {
334 
335 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
336 	smp_rendezvous_action();
337 }
338 
339 static void
340 ipi_stop(void *dummy __unused)
341 {
342 	u_int cpu;
343 
344 	CTR0(KTR_SMP, "IPI_STOP");
345 
346 	cpu = PCPU_GET(cpuid);
347 	savectx(&stoppcbs[cpu]);
348 
349 	/* Indicate we are stopped */
350 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
351 
352 	/* Wait for restart */
353 	while (!CPU_ISSET(cpu, &started_cpus))
354 		cpu_spinwait();
355 
356 	CPU_CLR_ATOMIC(cpu, &started_cpus);
357 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
358 	CTR0(KTR_SMP, "IPI_STOP (restart)");
359 }
360 
361 struct cpu_group *
362 cpu_topo(void)
363 {
364 
365 	return (smp_topo_none());
366 }
367 
368 /* Determine if we running MP machine */
369 int
370 cpu_mp_probe(void)
371 {
372 
373 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
374 	return (1);
375 }
376 
377 static bool
378 start_cpu(u_int id, uint64_t target_cpu)
379 {
380 	struct pcpu *pcpup;
381 	vm_paddr_t pa;
382 	u_int cpuid;
383 	int err;
384 
385 	/* Check we are able to start this cpu */
386 	if (id > mp_maxid)
387 		return (false);
388 
389 	KASSERT(id < MAXCPU, ("Too many CPUs"));
390 
391 	/* We are already running on cpu 0 */
392 	if (id == cpu0)
393 		return (true);
394 
395 	/*
396 	 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
397 	 * CPUs ordered as the are likely grouped into clusters so it can be
398 	 * useful to keep that property, e.g. for the GICv3 driver to send
399 	 * an IPI to all CPUs in the cluster.
400 	 */
401 	cpuid = id;
402 	if (cpuid < cpu0)
403 		cpuid += mp_maxid + 1;
404 	cpuid -= cpu0;
405 
406 	pcpup = &__pcpu[cpuid];
407 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
408 
409 	dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
410 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
411 
412 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
413 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
414 
415 	err = psci_cpu_on(target_cpu, pa, cpuid);
416 	if (err != PSCI_RETVAL_SUCCESS) {
417 		/*
418 		 * Panic here if INVARIANTS are enabled and PSCI failed to
419 		 * start the requested CPU. If psci_cpu_on returns PSCI_MISSING
420 		 * to indicate we are unable to use it to start the given CPU.
421 		 */
422 		KASSERT(err == PSCI_MISSING ||
423 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
424 		    ("Failed to start CPU %u (%lx)\n", id, target_cpu));
425 
426 		pcpu_destroy(pcpup);
427 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
428 		dpcpu[cpuid - 1] = NULL;
429 		mp_ncpus--;
430 
431 		/* Notify the user that the CPU failed to start */
432 		printf("Failed to start CPU %u (%lx)\n", id, target_cpu);
433 	} else
434 		CPU_SET(cpuid, &all_cpus);
435 
436 	return (true);
437 }
438 
439 #ifdef DEV_ACPI
440 static void
441 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
442 {
443 	ACPI_MADT_GENERIC_INTERRUPT *intr;
444 	u_int *cpuid;
445 	u_int id;
446 
447 	switch(entry->Type) {
448 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
449 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
450 		cpuid = arg;
451 		id = *cpuid;
452 		start_cpu(id, intr->ArmMpidr);
453 		__pcpu[id].pc_acpi_id = intr->Uid;
454 		(*cpuid)++;
455 		break;
456 	default:
457 		break;
458 	}
459 }
460 
461 static void
462 cpu_init_acpi(void)
463 {
464 	ACPI_TABLE_MADT *madt;
465 	vm_paddr_t physaddr;
466 	u_int cpuid;
467 
468 	physaddr = acpi_find_table(ACPI_SIG_MADT);
469 	if (physaddr == 0)
470 		return;
471 
472 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
473 	if (madt == NULL) {
474 		printf("Unable to map the MADT, not starting APs\n");
475 		return;
476 	}
477 
478 	cpuid = 0;
479 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
480 	    madt_handler, &cpuid);
481 
482 	acpi_unmap_table(madt);
483 
484 #if MAXMEMDOM > 1
485 	/* set proximity info */
486 	acpi_pxm_set_cpu_locality();
487 	acpi_pxm_free();
488 #endif
489 }
490 #endif
491 
492 #ifdef FDT
493 static boolean_t
494 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
495 {
496 	uint64_t target_cpu;
497 	int domain;
498 
499 	target_cpu = reg[0];
500 	if (addr_size == 2) {
501 		target_cpu <<= 32;
502 		target_cpu |= reg[1];
503 	}
504 
505 	if (!start_cpu(id, target_cpu))
506 		return (FALSE);
507 
508 	/* Try to read the numa node of this cpu */
509 	if (vm_ndomains == 1 ||
510 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
511 		domain = 0;
512 	__pcpu[id].pc_domain = domain;
513 	if (domain < MAXMEMDOM)
514 		CPU_SET(id, &cpuset_domain[domain]);
515 
516 	return (TRUE);
517 }
518 #endif
519 
520 /* Initialize and fire up non-boot processors */
521 void
522 cpu_mp_start(void)
523 {
524 #ifdef FDT
525 	phandle_t node;
526 	int i;
527 #endif
528 
529 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
530 
531 	CPU_SET(0, &all_cpus);
532 
533 	switch(arm64_bus_method) {
534 #ifdef DEV_ACPI
535 	case ARM64_BUS_ACPI:
536 		mp_quirks = MP_QUIRK_CPULIST;
537 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
538 		cpu_init_acpi();
539 		break;
540 #endif
541 #ifdef FDT
542 	case ARM64_BUS_FDT:
543 		node = OF_peer(0);
544 		for (i = 0; fdt_quirks[i].compat != NULL; i++) {
545 			if (ofw_bus_node_is_compatible(node,
546 			    fdt_quirks[i].compat) != 0) {
547 				mp_quirks = fdt_quirks[i].quirks;
548 			}
549 		}
550 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
551 		ofw_cpu_early_foreach(cpu_init_fdt, true);
552 		break;
553 #endif
554 	default:
555 		break;
556 	}
557 }
558 
559 /* Introduce rest of cores to the world */
560 void
561 cpu_mp_announce(void)
562 {
563 }
564 
565 #ifdef DEV_ACPI
566 static void
567 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
568 {
569 	ACPI_MADT_GENERIC_INTERRUPT *intr;
570 	u_int *cores = arg;
571 	uint64_t mpidr_reg;
572 
573 	switch(entry->Type) {
574 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
575 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
576 		if (cpu0 < 0) {
577 			mpidr_reg = READ_SPECIALREG(mpidr_el1);
578 			if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
579 				cpu0 = *cores;
580 		}
581 		(*cores)++;
582 		break;
583 	default:
584 		break;
585 	}
586 }
587 
588 static u_int
589 cpu_count_acpi(void)
590 {
591 	ACPI_TABLE_MADT *madt;
592 	vm_paddr_t physaddr;
593 	u_int cores;
594 
595 	physaddr = acpi_find_table(ACPI_SIG_MADT);
596 	if (physaddr == 0)
597 		return (0);
598 
599 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
600 	if (madt == NULL) {
601 		printf("Unable to map the MADT, not starting APs\n");
602 		return (0);
603 	}
604 
605 	cores = 0;
606 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
607 	    cpu_count_acpi_handler, &cores);
608 
609 	acpi_unmap_table(madt);
610 
611 	return (cores);
612 }
613 #endif
614 
615 #ifdef FDT
616 static boolean_t
617 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
618 {
619 	uint64_t mpidr_fdt, mpidr_reg;
620 
621 	if (cpu0 < 0) {
622 		mpidr_fdt = reg[0];
623 		if (addr_size == 2) {
624 			mpidr_fdt <<= 32;
625 			mpidr_fdt |= reg[1];
626 		}
627 
628 		mpidr_reg = READ_SPECIALREG(mpidr_el1);
629 
630 		if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
631 			cpu0 = id;
632 	}
633 
634 	return (TRUE);
635 }
636 #endif
637 
638 void
639 cpu_mp_setmaxid(void)
640 {
641 #if defined(DEV_ACPI) || defined(FDT)
642 	int cores;
643 #endif
644 
645 	switch(arm64_bus_method) {
646 #ifdef DEV_ACPI
647 	case ARM64_BUS_ACPI:
648 		cores = cpu_count_acpi();
649 		if (cores > 0) {
650 			cores = MIN(cores, MAXCPU);
651 			if (bootverbose)
652 				printf("Found %d CPUs in the ACPI tables\n",
653 				    cores);
654 			mp_ncpus = cores;
655 			mp_maxid = cores - 1;
656 			return;
657 		}
658 		break;
659 #endif
660 #ifdef FDT
661 	case ARM64_BUS_FDT:
662 		cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
663 		if (cores > 0) {
664 			cores = MIN(cores, MAXCPU);
665 			if (bootverbose)
666 				printf("Found %d CPUs in the device tree\n",
667 				    cores);
668 			mp_ncpus = cores;
669 			mp_maxid = cores - 1;
670 			return;
671 		}
672 		break;
673 #endif
674 	default:
675 		break;
676 	}
677 
678 	if (bootverbose)
679 		printf("No CPU data, limiting to 1 core\n");
680 	mp_ncpus = 1;
681 	mp_maxid = 0;
682 }
683 
684 /*
685  *  Lookup IPI source.
686  */
687 static struct intr_ipi *
688 intr_ipi_lookup(u_int ipi)
689 {
690 
691 	if (ipi >= INTR_IPI_COUNT)
692 		panic("%s: no such IPI %u", __func__, ipi);
693 
694 	return (&ipi_sources[ipi]);
695 }
696 
697 /*
698  *  interrupt controller dispatch function for IPIs. It should
699  *  be called straight from the interrupt controller, when associated
700  *  interrupt source is learned. Or from anybody who has an interrupt
701  *  source mapped.
702  */
703 void
704 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
705 {
706 	void *arg;
707 	struct intr_ipi *ii;
708 
709 	ii = intr_ipi_lookup(ipi);
710 	if (ii->ii_count == NULL)
711 		panic("%s: not setup IPI %u", __func__, ipi);
712 
713 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
714 
715 	/*
716 	 * Supply ipi filter with trapframe argument
717 	 * if none is registered.
718 	 */
719 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
720 	ii->ii_handler(arg);
721 }
722 
723 #ifdef notyet
724 /*
725  *  Map IPI into interrupt controller.
726  *
727  *  Not SMP coherent.
728  */
729 static int
730 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
731 {
732 	boolean_t is_percpu;
733 	int error;
734 
735 	if (ipi >= INTR_IPI_COUNT)
736 		panic("%s: no such IPI %u", __func__, ipi);
737 
738 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
739 
740 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
741 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
742 	isrc->isrc_nspc_num = ipi_next_num;
743 
744 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
745 	if (error == 0) {
746 		isrc->isrc_dev = intr_irq_root_dev;
747 		ipi_next_num++;
748 	}
749 	return (error);
750 }
751 
752 /*
753  *  Setup IPI handler to interrupt source.
754  *
755  *  Note that there could be more ways how to send and receive IPIs
756  *  on a platform like fast interrupts for example. In that case,
757  *  one can call this function with ASIF_NOALLOC flag set and then
758  *  call intr_ipi_dispatch() when appropriate.
759  *
760  *  Not SMP coherent.
761  */
762 int
763 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
764     void *arg, u_int flags)
765 {
766 	struct intr_irqsrc *isrc;
767 	int error;
768 
769 	if (filter == NULL)
770 		return(EINVAL);
771 
772 	isrc = intr_ipi_lookup(ipi);
773 	if (isrc->isrc_ipifilter != NULL)
774 		return (EEXIST);
775 
776 	if ((flags & AISHF_NOALLOC) == 0) {
777 		error = ipi_map(isrc, ipi);
778 		if (error != 0)
779 			return (error);
780 	}
781 
782 	isrc->isrc_ipifilter = filter;
783 	isrc->isrc_arg = arg;
784 	isrc->isrc_handlers = 1;
785 	isrc->isrc_count = intr_ipi_setup_counters(name);
786 	isrc->isrc_index = 0; /* it should not be used in IPI case */
787 
788 	if (isrc->isrc_dev != NULL) {
789 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
790 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
791 	}
792 	return (0);
793 }
794 #endif
795 
796 /* Sending IPI */
797 void
798 ipi_all_but_self(u_int ipi)
799 {
800 	cpuset_t cpus;
801 
802 	cpus = all_cpus;
803 	CPU_CLR(PCPU_GET(cpuid), &cpus);
804 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
805 	intr_ipi_send(cpus, ipi);
806 }
807 
808 void
809 ipi_cpu(int cpu, u_int ipi)
810 {
811 	cpuset_t cpus;
812 
813 	CPU_ZERO(&cpus);
814 	CPU_SET(cpu, &cpus);
815 
816 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
817 	intr_ipi_send(cpus, ipi);
818 }
819 
820 void
821 ipi_selected(cpuset_t cpus, u_int ipi)
822 {
823 
824 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
825 	intr_ipi_send(cpus, ipi);
826 }
827