xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_platform.h"
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpu.h>
43 #include <sys/csan.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/machdep.h>
61 #include <machine/debug_monitor.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 #ifdef VFP
65 #include <machine/vfp.h>
66 #endif
67 
68 #ifdef DEV_ACPI
69 #include <contrib/dev/acpica/include/acpi.h>
70 #include <dev/acpica/acpivar.h>
71 #endif
72 
73 #ifdef FDT
74 #include <dev/ofw/openfirm.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/ofw/ofw_cpu.h>
78 #endif
79 
80 #include <dev/psci/psci.h>
81 
82 #include "pic_if.h"
83 
84 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
85 					/* don't panic if one fails to start */
86 static uint32_t mp_quirks;
87 
88 #ifdef FDT
89 static struct {
90 	const char *compat;
91 	uint32_t quirks;
92 } fdt_quirks[] = {
93 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
94 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
95 	/* This is incorrect in some DTS files */
96 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
97 	{ NULL, 0 },
98 };
99 #endif
100 
101 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
102 typedef void intr_ipi_handler_t(void *);
103 
104 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
105 struct intr_ipi {
106 	intr_ipi_handler_t *	ii_handler;
107 	void *			ii_handler_arg;
108 	intr_ipi_send_t *	ii_send;
109 	void *			ii_send_arg;
110 	char			ii_name[INTR_IPI_NAMELEN];
111 	u_long *		ii_count;
112 };
113 
114 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
115 
116 static struct intr_ipi *intr_ipi_lookup(u_int);
117 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
118     void *);
119 
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
125 
126 struct pcb stoppcbs[MAXCPU];
127 
128 /*
129  * Not all systems boot from the first CPU in the device tree. To work around
130  * this we need to find which CPU we have booted from so when we later
131  * enable the secondary CPUs we skip this one.
132  */
133 static int cpu0 = -1;
134 
135 void mpentry(unsigned long cpuid);
136 void init_secondary(uint64_t);
137 
138 /* Synchronize AP startup. */
139 static struct mtx ap_boot_mtx;
140 
141 /* Stacks for AP initialization, discarded once idle threads are started. */
142 void *bootstack;
143 static void *bootstacks[MAXCPU];
144 
145 /* Count of started APs, used to synchronize access to bootstack. */
146 static volatile int aps_started;
147 
148 /* Set to 1 once we're ready to let the APs out of the pen. */
149 static volatile int aps_ready;
150 
151 /* Temporary variables for init_secondary()  */
152 void *dpcpu[MAXCPU - 1];
153 
154 static void
155 release_aps(void *dummy __unused)
156 {
157 	int i, started;
158 
159 	/* Only release CPUs if they exist */
160 	if (mp_ncpus == 1)
161 		return;
162 
163 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
164 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
165 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
166 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
167 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
168 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
169 
170 	atomic_store_rel_int(&aps_ready, 1);
171 	/* Wake up the other CPUs */
172 	__asm __volatile(
173 	    "dsb ishst	\n"
174 	    "sev	\n"
175 	    ::: "memory");
176 
177 	printf("Release APs...");
178 
179 	started = 0;
180 	for (i = 0; i < 2000; i++) {
181 		if (smp_started) {
182 			printf("done\n");
183 			return;
184 		}
185 		/*
186 		 * Don't time out while we are making progress. Some large
187 		 * systems can take a while to start all CPUs.
188 		 */
189 		if (smp_cpus > started) {
190 			i = 0;
191 			started = smp_cpus;
192 		}
193 		DELAY(1000);
194 	}
195 
196 	printf("APs not started\n");
197 }
198 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
199 
200 void
201 init_secondary(uint64_t cpu)
202 {
203 	struct pcpu *pcpup;
204 	pmap_t pmap0;
205 
206 	pcpup = &__pcpu[cpu];
207 	/*
208 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
209 	 * loaded when entering the kernel from userland.
210 	 */
211 	__asm __volatile(
212 	    "mov x18, %0 \n"
213 	    "msr tpidr_el1, %0" :: "r"(pcpup));
214 
215 	/*
216 	 * Identify current CPU. This is necessary to setup
217 	 * affinity registers and to provide support for
218 	 * runtime chip identification.
219 	 *
220 	 * We need this before signalling the CPU is ready to
221 	 * let the boot CPU use the results.
222 	 */
223 	identify_cpu(cpu);
224 
225 	/* Ensure the stores in identify_cpu have completed */
226 	atomic_thread_fence_acq_rel();
227 
228 	/* Signal the BSP and spin until it has released all APs. */
229 	atomic_add_int(&aps_started, 1);
230 	while (!atomic_load_int(&aps_ready))
231 		__asm __volatile("wfe");
232 
233 	pcpup->pc_midr = get_midr();
234 
235 	/* Initialize curthread */
236 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
237 	pcpup->pc_curthread = pcpup->pc_idlethread;
238 
239 	/* Initialize curpmap to match TTBR0's current setting. */
240 	pmap0 = vmspace_pmap(&vmspace0);
241 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
242 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
243 	pcpup->pc_curpmap = pmap0;
244 
245 	install_cpu_errata();
246 
247 	intr_pic_init_secondary();
248 
249 	/* Start per-CPU event timers. */
250 	cpu_initclocks_ap();
251 
252 #ifdef VFP
253 	vfp_init();
254 #endif
255 
256 	dbg_init();
257 	pan_enable();
258 
259 	mtx_lock_spin(&ap_boot_mtx);
260 	atomic_add_rel_32(&smp_cpus, 1);
261 	if (smp_cpus == mp_ncpus) {
262 		/* enable IPI's, tlb shootdown, freezes etc */
263 		atomic_store_rel_int(&smp_started, 1);
264 	}
265 	mtx_unlock_spin(&ap_boot_mtx);
266 
267 	kcsan_cpu_init(cpu);
268 
269 	/*
270 	 * Assert that smp_after_idle_runnable condition is reasonable.
271 	 */
272 	MPASS(PCPU_GET(curpcb) == NULL);
273 
274 	/* Enter the scheduler */
275 	sched_throw(NULL);
276 
277 	panic("scheduler returned us to init_secondary");
278 	/* NOTREACHED */
279 }
280 
281 static void
282 smp_after_idle_runnable(void *arg __unused)
283 {
284 	struct pcpu *pc;
285 	int cpu;
286 
287 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
288 		if (bootstacks[cpu] != NULL) {
289 			pc = pcpu_find(cpu);
290 			while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
291 				cpu_spinwait();
292 			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
293 		}
294 	}
295 }
296 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
297     smp_after_idle_runnable, NULL);
298 
299 /*
300  *  Send IPI thru interrupt controller.
301  */
302 static void
303 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
304 {
305 
306 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
307 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
308 }
309 
310 /*
311  *  Setup IPI handler on interrupt controller.
312  *
313  *  Not SMP coherent.
314  */
315 static void
316 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
317     void *arg)
318 {
319 	struct intr_irqsrc *isrc;
320 	struct intr_ipi *ii;
321 	int error;
322 
323 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
324 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
325 
326 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
327 	if (error != 0)
328 		return;
329 
330 	isrc->isrc_handlers++;
331 
332 	ii = intr_ipi_lookup(ipi);
333 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
334 
335 	ii->ii_handler = hand;
336 	ii->ii_handler_arg = arg;
337 	ii->ii_send = pic_ipi_send;
338 	ii->ii_send_arg = isrc;
339 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
340 	ii->ii_count = intr_ipi_setup_counters(name);
341 }
342 
343 static void
344 intr_ipi_send(cpuset_t cpus, u_int ipi)
345 {
346 	struct intr_ipi *ii;
347 
348 	ii = intr_ipi_lookup(ipi);
349 	if (ii->ii_count == NULL)
350 		panic("%s: not setup IPI %u", __func__, ipi);
351 
352 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
353 }
354 
355 static void
356 ipi_ast(void *dummy __unused)
357 {
358 
359 	CTR0(KTR_SMP, "IPI_AST");
360 }
361 
362 static void
363 ipi_hardclock(void *dummy __unused)
364 {
365 
366 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
367 	hardclockintr();
368 }
369 
370 static void
371 ipi_preempt(void *dummy __unused)
372 {
373 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
374 	sched_preempt(curthread);
375 }
376 
377 static void
378 ipi_rendezvous(void *dummy __unused)
379 {
380 
381 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
382 	smp_rendezvous_action();
383 }
384 
385 static void
386 ipi_stop(void *dummy __unused)
387 {
388 	u_int cpu;
389 
390 	CTR0(KTR_SMP, "IPI_STOP");
391 
392 	cpu = PCPU_GET(cpuid);
393 	savectx(&stoppcbs[cpu]);
394 
395 	/* Indicate we are stopped */
396 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
397 
398 	/* Wait for restart */
399 	while (!CPU_ISSET(cpu, &started_cpus))
400 		cpu_spinwait();
401 
402 #ifdef DDB
403 	dbg_register_sync(NULL);
404 #endif
405 
406 	CPU_CLR_ATOMIC(cpu, &started_cpus);
407 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
408 	CTR0(KTR_SMP, "IPI_STOP (restart)");
409 }
410 
411 struct cpu_group *
412 cpu_topo(void)
413 {
414 
415 	return (smp_topo_none());
416 }
417 
418 /* Determine if we running MP machine */
419 int
420 cpu_mp_probe(void)
421 {
422 
423 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
424 	return (1);
425 }
426 
427 static bool
428 start_cpu(u_int id, uint64_t target_cpu)
429 {
430 	struct pcpu *pcpup;
431 	vm_paddr_t pa;
432 	u_int cpuid;
433 	int err, naps;
434 
435 	/* Check we are able to start this cpu */
436 	if (id > mp_maxid)
437 		return (false);
438 
439 	KASSERT(id < MAXCPU, ("Too many CPUs"));
440 
441 	/* We are already running on cpu 0 */
442 	if (id == cpu0)
443 		return (true);
444 
445 	/*
446 	 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
447 	 * CPUs ordered as they are likely grouped into clusters so it can be
448 	 * useful to keep that property, e.g. for the GICv3 driver to send
449 	 * an IPI to all CPUs in the cluster.
450 	 */
451 	cpuid = id;
452 	if (cpuid < cpu0)
453 		cpuid += mp_maxid + 1;
454 	cpuid -= cpu0;
455 
456 	pcpup = &__pcpu[cpuid];
457 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
458 
459 	dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
460 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
461 
462 	bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
463 
464 	naps = atomic_load_int(&aps_started);
465 	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
466 
467 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
468 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
469 	err = psci_cpu_on(target_cpu, pa, cpuid);
470 	if (err != PSCI_RETVAL_SUCCESS) {
471 		/*
472 		 * Panic here if INVARIANTS are enabled and PSCI failed to
473 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
474 		 * to indicate we are unable to use it to start the given CPU.
475 		 */
476 		KASSERT(err == PSCI_MISSING ||
477 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
478 		    ("Failed to start CPU %u (%lx), error %d\n",
479 		    id, target_cpu, err));
480 
481 		pcpu_destroy(pcpup);
482 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
483 		dpcpu[cpuid - 1] = NULL;
484 		kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
485 		bootstacks[cpuid] = NULL;
486 		mp_ncpus--;
487 
488 		/* Notify the user that the CPU failed to start */
489 		printf("Failed to start CPU %u (%lx), error %d\n",
490 		    id, target_cpu, err);
491 	} else {
492 		/* Wait for the AP to switch to its boot stack. */
493 		while (atomic_load_int(&aps_started) < naps + 1)
494 			cpu_spinwait();
495 		CPU_SET(cpuid, &all_cpus);
496 	}
497 
498 	return (true);
499 }
500 
501 #ifdef DEV_ACPI
502 static void
503 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
504 {
505 	ACPI_MADT_GENERIC_INTERRUPT *intr;
506 	u_int *cpuid;
507 	u_int id;
508 
509 	switch(entry->Type) {
510 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
511 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
512 		cpuid = arg;
513 		id = *cpuid;
514 		start_cpu(id, intr->ArmMpidr);
515 		__pcpu[id].pc_acpi_id = intr->Uid;
516 		(*cpuid)++;
517 		break;
518 	default:
519 		break;
520 	}
521 }
522 
523 static void
524 cpu_init_acpi(void)
525 {
526 	ACPI_TABLE_MADT *madt;
527 	vm_paddr_t physaddr;
528 	u_int cpuid;
529 
530 	physaddr = acpi_find_table(ACPI_SIG_MADT);
531 	if (physaddr == 0)
532 		return;
533 
534 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
535 	if (madt == NULL) {
536 		printf("Unable to map the MADT, not starting APs\n");
537 		return;
538 	}
539 
540 	cpuid = 0;
541 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
542 	    madt_handler, &cpuid);
543 
544 	acpi_unmap_table(madt);
545 
546 #if MAXMEMDOM > 1
547 	acpi_pxm_set_cpu_locality();
548 #endif
549 }
550 #endif
551 
552 #ifdef FDT
553 static boolean_t
554 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
555 {
556 	uint64_t target_cpu;
557 	int domain;
558 
559 	target_cpu = reg[0];
560 	if (addr_size == 2) {
561 		target_cpu <<= 32;
562 		target_cpu |= reg[1];
563 	}
564 
565 	if (!start_cpu(id, target_cpu))
566 		return (FALSE);
567 
568 	/* Try to read the numa node of this cpu */
569 	if (vm_ndomains == 1 ||
570 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
571 		domain = 0;
572 	__pcpu[id].pc_domain = domain;
573 	if (domain < MAXMEMDOM)
574 		CPU_SET(id, &cpuset_domain[domain]);
575 
576 	return (TRUE);
577 }
578 #endif
579 
580 /* Initialize and fire up non-boot processors */
581 void
582 cpu_mp_start(void)
583 {
584 #ifdef FDT
585 	phandle_t node;
586 	int i;
587 #endif
588 
589 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
590 
591 	CPU_SET(0, &all_cpus);
592 
593 	switch(arm64_bus_method) {
594 #ifdef DEV_ACPI
595 	case ARM64_BUS_ACPI:
596 		mp_quirks = MP_QUIRK_CPULIST;
597 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
598 		cpu_init_acpi();
599 		break;
600 #endif
601 #ifdef FDT
602 	case ARM64_BUS_FDT:
603 		node = OF_peer(0);
604 		for (i = 0; fdt_quirks[i].compat != NULL; i++) {
605 			if (ofw_bus_node_is_compatible(node,
606 			    fdt_quirks[i].compat) != 0) {
607 				mp_quirks = fdt_quirks[i].quirks;
608 			}
609 		}
610 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
611 		ofw_cpu_early_foreach(cpu_init_fdt, true);
612 		break;
613 #endif
614 	default:
615 		break;
616 	}
617 }
618 
619 /* Introduce rest of cores to the world */
620 void
621 cpu_mp_announce(void)
622 {
623 }
624 
625 #ifdef DEV_ACPI
626 static void
627 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
628 {
629 	ACPI_MADT_GENERIC_INTERRUPT *intr;
630 	u_int *cores = arg;
631 	uint64_t mpidr_reg;
632 
633 	switch(entry->Type) {
634 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
635 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
636 		if (cpu0 < 0) {
637 			mpidr_reg = READ_SPECIALREG(mpidr_el1);
638 			if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
639 				cpu0 = *cores;
640 		}
641 		(*cores)++;
642 		break;
643 	default:
644 		break;
645 	}
646 }
647 
648 static u_int
649 cpu_count_acpi(void)
650 {
651 	ACPI_TABLE_MADT *madt;
652 	vm_paddr_t physaddr;
653 	u_int cores;
654 
655 	physaddr = acpi_find_table(ACPI_SIG_MADT);
656 	if (physaddr == 0)
657 		return (0);
658 
659 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
660 	if (madt == NULL) {
661 		printf("Unable to map the MADT, not starting APs\n");
662 		return (0);
663 	}
664 
665 	cores = 0;
666 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
667 	    cpu_count_acpi_handler, &cores);
668 
669 	acpi_unmap_table(madt);
670 
671 	return (cores);
672 }
673 #endif
674 
675 #ifdef FDT
676 static boolean_t
677 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
678 {
679 	uint64_t mpidr_fdt, mpidr_reg;
680 
681 	if (cpu0 < 0) {
682 		mpidr_fdt = reg[0];
683 		if (addr_size == 2) {
684 			mpidr_fdt <<= 32;
685 			mpidr_fdt |= reg[1];
686 		}
687 
688 		mpidr_reg = READ_SPECIALREG(mpidr_el1);
689 
690 		if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
691 			cpu0 = id;
692 	}
693 
694 	return (TRUE);
695 }
696 #endif
697 
698 void
699 cpu_mp_setmaxid(void)
700 {
701 	int cores;
702 
703 	mp_ncpus = 1;
704 	mp_maxid = 0;
705 
706 	switch(arm64_bus_method) {
707 #ifdef DEV_ACPI
708 	case ARM64_BUS_ACPI:
709 		cores = cpu_count_acpi();
710 		if (cores > 0) {
711 			cores = MIN(cores, MAXCPU);
712 			if (bootverbose)
713 				printf("Found %d CPUs in the ACPI tables\n",
714 				    cores);
715 			mp_ncpus = cores;
716 			mp_maxid = cores - 1;
717 		}
718 		break;
719 #endif
720 #ifdef FDT
721 	case ARM64_BUS_FDT:
722 		cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
723 		if (cores > 0) {
724 			cores = MIN(cores, MAXCPU);
725 			if (bootverbose)
726 				printf("Found %d CPUs in the device tree\n",
727 				    cores);
728 			mp_ncpus = cores;
729 			mp_maxid = cores - 1;
730 		}
731 		break;
732 #endif
733 	default:
734 		if (bootverbose)
735 			printf("No CPU data, limiting to 1 core\n");
736 		break;
737 	}
738 
739 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
740 		if (cores > 0 && cores < mp_ncpus) {
741 			mp_ncpus = cores;
742 			mp_maxid = cores - 1;
743 		}
744 	}
745 }
746 
747 /*
748  *  Lookup IPI source.
749  */
750 static struct intr_ipi *
751 intr_ipi_lookup(u_int ipi)
752 {
753 
754 	if (ipi >= INTR_IPI_COUNT)
755 		panic("%s: no such IPI %u", __func__, ipi);
756 
757 	return (&ipi_sources[ipi]);
758 }
759 
760 /*
761  *  interrupt controller dispatch function for IPIs. It should
762  *  be called straight from the interrupt controller, when associated
763  *  interrupt source is learned. Or from anybody who has an interrupt
764  *  source mapped.
765  */
766 void
767 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
768 {
769 	void *arg;
770 	struct intr_ipi *ii;
771 
772 	ii = intr_ipi_lookup(ipi);
773 	if (ii->ii_count == NULL)
774 		panic("%s: not setup IPI %u", __func__, ipi);
775 
776 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
777 
778 	/*
779 	 * Supply ipi filter with trapframe argument
780 	 * if none is registered.
781 	 */
782 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
783 	ii->ii_handler(arg);
784 }
785 
786 #ifdef notyet
787 /*
788  *  Map IPI into interrupt controller.
789  *
790  *  Not SMP coherent.
791  */
792 static int
793 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
794 {
795 	boolean_t is_percpu;
796 	int error;
797 
798 	if (ipi >= INTR_IPI_COUNT)
799 		panic("%s: no such IPI %u", __func__, ipi);
800 
801 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
802 
803 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
804 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
805 	isrc->isrc_nspc_num = ipi_next_num;
806 
807 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
808 	if (error == 0) {
809 		isrc->isrc_dev = intr_irq_root_dev;
810 		ipi_next_num++;
811 	}
812 	return (error);
813 }
814 
815 /*
816  *  Setup IPI handler to interrupt source.
817  *
818  *  Note that there could be more ways how to send and receive IPIs
819  *  on a platform like fast interrupts for example. In that case,
820  *  one can call this function with ASIF_NOALLOC flag set and then
821  *  call intr_ipi_dispatch() when appropriate.
822  *
823  *  Not SMP coherent.
824  */
825 int
826 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
827     void *arg, u_int flags)
828 {
829 	struct intr_irqsrc *isrc;
830 	int error;
831 
832 	if (filter == NULL)
833 		return(EINVAL);
834 
835 	isrc = intr_ipi_lookup(ipi);
836 	if (isrc->isrc_ipifilter != NULL)
837 		return (EEXIST);
838 
839 	if ((flags & AISHF_NOALLOC) == 0) {
840 		error = ipi_map(isrc, ipi);
841 		if (error != 0)
842 			return (error);
843 	}
844 
845 	isrc->isrc_ipifilter = filter;
846 	isrc->isrc_arg = arg;
847 	isrc->isrc_handlers = 1;
848 	isrc->isrc_count = intr_ipi_setup_counters(name);
849 	isrc->isrc_index = 0; /* it should not be used in IPI case */
850 
851 	if (isrc->isrc_dev != NULL) {
852 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
853 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
854 	}
855 	return (0);
856 }
857 #endif
858 
859 /* Sending IPI */
860 void
861 ipi_all_but_self(u_int ipi)
862 {
863 	cpuset_t cpus;
864 
865 	cpus = all_cpus;
866 	CPU_CLR(PCPU_GET(cpuid), &cpus);
867 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
868 	intr_ipi_send(cpus, ipi);
869 }
870 
871 void
872 ipi_cpu(int cpu, u_int ipi)
873 {
874 	cpuset_t cpus;
875 
876 	CPU_ZERO(&cpus);
877 	CPU_SET(cpu, &cpus);
878 
879 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
880 	intr_ipi_send(cpus, ipi);
881 }
882 
883 void
884 ipi_selected(cpuset_t cpus, u_int ipi)
885 {
886 
887 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
888 	intr_ipi_send(cpus, ipi);
889 }
890