xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 681ce946)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include "opt_acpi.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/csan.h>
43 #include <sys/domainset.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/machdep.h>
61 #include <machine/debug_monitor.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 #ifdef VFP
65 #include <machine/vfp.h>
66 #endif
67 
68 #ifdef DEV_ACPI
69 #include <contrib/dev/acpica/include/acpi.h>
70 #include <dev/acpica/acpivar.h>
71 #endif
72 
73 #ifdef FDT
74 #include <dev/ofw/openfirm.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/ofw/ofw_cpu.h>
78 #endif
79 
80 #include <dev/psci/psci.h>
81 
82 #include "pic_if.h"
83 
84 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
85 					/* don't panic if one fails to start */
86 static uint32_t mp_quirks;
87 
88 #ifdef FDT
89 static struct {
90 	const char *compat;
91 	uint32_t quirks;
92 } fdt_quirks[] = {
93 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
94 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
95 	/* This is incorrect in some DTS files */
96 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
97 	{ NULL, 0 },
98 };
99 #endif
100 
101 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
102 typedef void intr_ipi_handler_t(void *);
103 
104 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
105 struct intr_ipi {
106 	intr_ipi_handler_t *	ii_handler;
107 	void *			ii_handler_arg;
108 	intr_ipi_send_t *	ii_send;
109 	void *			ii_send_arg;
110 	char			ii_name[INTR_IPI_NAMELEN];
111 	u_long *		ii_count;
112 };
113 
114 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
115 
116 static struct intr_ipi *intr_ipi_lookup(u_int);
117 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
118     void *);
119 
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
125 
126 struct pcb stoppcbs[MAXCPU];
127 
128 #ifdef FDT
129 static u_int fdt_cpuid;
130 #endif
131 
132 void mpentry(unsigned long cpuid);
133 void init_secondary(uint64_t);
134 
135 /* Synchronize AP startup. */
136 static struct mtx ap_boot_mtx;
137 
138 /* Stacks for AP initialization, discarded once idle threads are started. */
139 void *bootstack;
140 static void *bootstacks[MAXCPU];
141 
142 /* Count of started APs, used to synchronize access to bootstack. */
143 static volatile int aps_started;
144 
145 /* Set to 1 once we're ready to let the APs out of the pen. */
146 static volatile int aps_ready;
147 
148 /* Temporary variables for init_secondary()  */
149 void *dpcpu[MAXCPU - 1];
150 
151 static bool
152 is_boot_cpu(uint64_t target_cpu)
153 {
154 
155 	return (cpuid_to_pcpu[0]->pc_mpidr == (target_cpu & CPU_AFF_MASK));
156 }
157 
158 static void
159 release_aps(void *dummy __unused)
160 {
161 	int i, started;
162 
163 	/* Only release CPUs if they exist */
164 	if (mp_ncpus == 1)
165 		return;
166 
167 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
168 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
169 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
170 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
171 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
172 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
173 
174 	atomic_store_rel_int(&aps_ready, 1);
175 	/* Wake up the other CPUs */
176 	__asm __volatile(
177 	    "dsb ishst	\n"
178 	    "sev	\n"
179 	    ::: "memory");
180 
181 	printf("Release APs...");
182 
183 	started = 0;
184 	for (i = 0; i < 2000; i++) {
185 		if (smp_started) {
186 			printf("done\n");
187 			return;
188 		}
189 		/*
190 		 * Don't time out while we are making progress. Some large
191 		 * systems can take a while to start all CPUs.
192 		 */
193 		if (smp_cpus > started) {
194 			i = 0;
195 			started = smp_cpus;
196 		}
197 		DELAY(1000);
198 	}
199 
200 	printf("APs not started\n");
201 }
202 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
203 
204 void
205 init_secondary(uint64_t cpu)
206 {
207 	struct pcpu *pcpup;
208 	pmap_t pmap0;
209 	u_int mpidr;
210 
211 	/*
212 	 * Verify that the value passed in 'cpu' argument (aka context_id) is
213 	 * valid. Some older U-Boot based PSCI implementations are buggy,
214 	 * they can pass random value in it.
215 	 */
216 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
217 	if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
218 	    cpuid_to_pcpu[cpu]->pc_mpidr != mpidr) {
219 		for (cpu = 0; cpu < mp_maxid; cpu++)
220 			if (cpuid_to_pcpu[cpu] != NULL &&
221 			    cpuid_to_pcpu[cpu]->pc_mpidr == mpidr)
222 				break;
223 		if ( cpu >= MAXCPU)
224 			panic("MPIDR for this CPU is not in pcpu table");
225 	}
226 
227 	pcpup = cpuid_to_pcpu[cpu];
228 	/*
229 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
230 	 * loaded when entering the kernel from userland.
231 	 */
232 	__asm __volatile(
233 	    "mov x18, %0 \n"
234 	    "msr tpidr_el1, %0" :: "r"(pcpup));
235 
236 	/*
237 	 * Identify current CPU. This is necessary to setup
238 	 * affinity registers and to provide support for
239 	 * runtime chip identification.
240 	 *
241 	 * We need this before signalling the CPU is ready to
242 	 * let the boot CPU use the results.
243 	 */
244 	pcpup->pc_midr = get_midr();
245 	identify_cpu(cpu);
246 
247 	/* Ensure the stores in identify_cpu have completed */
248 	atomic_thread_fence_acq_rel();
249 
250 	/* Signal the BSP and spin until it has released all APs. */
251 	atomic_add_int(&aps_started, 1);
252 	while (!atomic_load_int(&aps_ready))
253 		__asm __volatile("wfe");
254 
255 	/* Initialize curthread */
256 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
257 	pcpup->pc_curthread = pcpup->pc_idlethread;
258 	schedinit_ap();
259 
260 	/* Initialize curpmap to match TTBR0's current setting. */
261 	pmap0 = vmspace_pmap(&vmspace0);
262 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
263 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
264 	pcpup->pc_curpmap = pmap0;
265 
266 	install_cpu_errata();
267 
268 	intr_pic_init_secondary();
269 
270 	/* Start per-CPU event timers. */
271 	cpu_initclocks_ap();
272 
273 #ifdef VFP
274 	vfp_init();
275 #endif
276 
277 	dbg_init();
278 	pan_enable();
279 
280 	mtx_lock_spin(&ap_boot_mtx);
281 	atomic_add_rel_32(&smp_cpus, 1);
282 	if (smp_cpus == mp_ncpus) {
283 		/* enable IPI's, tlb shootdown, freezes etc */
284 		atomic_store_rel_int(&smp_started, 1);
285 	}
286 	mtx_unlock_spin(&ap_boot_mtx);
287 
288 	kcsan_cpu_init(cpu);
289 
290 	/*
291 	 * Assert that smp_after_idle_runnable condition is reasonable.
292 	 */
293 	MPASS(PCPU_GET(curpcb) == NULL);
294 
295 	/* Enter the scheduler */
296 	sched_ap_entry();
297 
298 	panic("scheduler returned us to init_secondary");
299 	/* NOTREACHED */
300 }
301 
302 static void
303 smp_after_idle_runnable(void *arg __unused)
304 {
305 	struct pcpu *pc;
306 	int cpu;
307 
308 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
309 		if (bootstacks[cpu] != NULL) {
310 			pc = pcpu_find(cpu);
311 			while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
312 				cpu_spinwait();
313 			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
314 		}
315 	}
316 }
317 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
318     smp_after_idle_runnable, NULL);
319 
320 /*
321  *  Send IPI thru interrupt controller.
322  */
323 static void
324 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
325 {
326 
327 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
328 
329 	/*
330 	 * Ensure that this CPU's stores will be visible to IPI
331 	 * recipients before starting to send the interrupts.
332 	 */
333 	dsb(ishst);
334 
335 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
336 }
337 
338 /*
339  *  Setup IPI handler on interrupt controller.
340  *
341  *  Not SMP coherent.
342  */
343 static void
344 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
345     void *arg)
346 {
347 	struct intr_irqsrc *isrc;
348 	struct intr_ipi *ii;
349 	int error;
350 
351 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
352 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
353 
354 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
355 	if (error != 0)
356 		return;
357 
358 	isrc->isrc_handlers++;
359 
360 	ii = intr_ipi_lookup(ipi);
361 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
362 
363 	ii->ii_handler = hand;
364 	ii->ii_handler_arg = arg;
365 	ii->ii_send = pic_ipi_send;
366 	ii->ii_send_arg = isrc;
367 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
368 	ii->ii_count = intr_ipi_setup_counters(name);
369 
370 	PIC_ENABLE_INTR(intr_irq_root_dev, isrc);
371 }
372 
373 static void
374 intr_ipi_send(cpuset_t cpus, u_int ipi)
375 {
376 	struct intr_ipi *ii;
377 
378 	ii = intr_ipi_lookup(ipi);
379 	if (ii->ii_count == NULL)
380 		panic("%s: not setup IPI %u", __func__, ipi);
381 
382 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
383 }
384 
385 static void
386 ipi_ast(void *dummy __unused)
387 {
388 
389 	CTR0(KTR_SMP, "IPI_AST");
390 }
391 
392 static void
393 ipi_hardclock(void *dummy __unused)
394 {
395 
396 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
397 	hardclockintr();
398 }
399 
400 static void
401 ipi_preempt(void *dummy __unused)
402 {
403 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
404 	sched_preempt(curthread);
405 }
406 
407 static void
408 ipi_rendezvous(void *dummy __unused)
409 {
410 
411 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
412 	smp_rendezvous_action();
413 }
414 
415 static void
416 ipi_stop(void *dummy __unused)
417 {
418 	u_int cpu;
419 
420 	CTR0(KTR_SMP, "IPI_STOP");
421 
422 	cpu = PCPU_GET(cpuid);
423 	savectx(&stoppcbs[cpu]);
424 
425 	/* Indicate we are stopped */
426 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
427 
428 	/* Wait for restart */
429 	while (!CPU_ISSET(cpu, &started_cpus))
430 		cpu_spinwait();
431 
432 #ifdef DDB
433 	dbg_register_sync(NULL);
434 #endif
435 
436 	CPU_CLR_ATOMIC(cpu, &started_cpus);
437 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
438 	CTR0(KTR_SMP, "IPI_STOP (restart)");
439 }
440 
441 struct cpu_group *
442 cpu_topo(void)
443 {
444 	struct cpu_group *dom, *root;
445 	int i;
446 
447 	root = smp_topo_alloc(1);
448 	dom = smp_topo_alloc(vm_ndomains);
449 
450 	root->cg_parent = NULL;
451 	root->cg_child = dom;
452 	CPU_COPY(&all_cpus, &root->cg_mask);
453 	root->cg_count = mp_ncpus;
454 	root->cg_children = vm_ndomains;
455 	root->cg_level = CG_SHARE_NONE;
456 	root->cg_flags = 0;
457 
458 	/*
459 	 * Redundant layers will be collapsed by the caller so we don't need a
460 	 * special case for a single domain.
461 	 */
462 	for (i = 0; i < vm_ndomains; i++, dom++) {
463 		dom->cg_parent = root;
464 		dom->cg_child = NULL;
465 		CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
466 		dom->cg_count = CPU_COUNT(&dom->cg_mask);
467 		dom->cg_children = 0;
468 		dom->cg_level = CG_SHARE_L3;
469 		dom->cg_flags = 0;
470 	}
471 
472 	return (root);
473 }
474 
475 /* Determine if we running MP machine */
476 int
477 cpu_mp_probe(void)
478 {
479 
480 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
481 	return (1);
482 }
483 
484 /*
485  * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
486  * do nothing. Returns true if the CPU is present and running.
487  */
488 static bool
489 start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
490 {
491 	struct pcpu *pcpup;
492 	vm_paddr_t pa;
493 	int err, naps;
494 
495 	/* Check we are able to start this cpu */
496 	if (cpuid > mp_maxid)
497 		return (false);
498 
499 	/* Skip boot CPU */
500 	if (is_boot_cpu(target_cpu))
501 		return (true);
502 
503 	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
504 
505 	pcpup = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
506 	    sizeof(*pcpup), M_WAITOK | M_ZERO);
507 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
508 	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
509 
510 	dpcpu[cpuid - 1] = (void *)kmem_malloc_domainset(
511 	    DOMAINSET_PREF(domain), DPCPU_SIZE, M_WAITOK | M_ZERO);
512 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
513 
514 	bootstacks[cpuid] = (void *)kmem_malloc_domainset(
515 	    DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
516 
517 	naps = atomic_load_int(&aps_started);
518 	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
519 
520 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
521 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
522 	err = psci_cpu_on(target_cpu, pa, cpuid);
523 	if (err != PSCI_RETVAL_SUCCESS) {
524 		/*
525 		 * Panic here if INVARIANTS are enabled and PSCI failed to
526 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
527 		 * to indicate we are unable to use it to start the given CPU.
528 		 */
529 		KASSERT(err == PSCI_MISSING ||
530 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
531 		    ("Failed to start CPU %u (%lx), error %d\n",
532 		    cpuid, target_cpu, err));
533 
534 		pcpu_destroy(pcpup);
535 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
536 		dpcpu[cpuid - 1] = NULL;
537 		kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
538 		bootstacks[cpuid] = NULL;
539 		mp_ncpus--;
540 		return (false);
541 	}
542 
543 	/* Wait for the AP to switch to its boot stack. */
544 	while (atomic_load_int(&aps_started) < naps + 1)
545 		cpu_spinwait();
546 	CPU_SET(cpuid, &all_cpus);
547 
548 	return (true);
549 }
550 
551 #ifdef DEV_ACPI
552 static void
553 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
554 {
555 	ACPI_MADT_GENERIC_INTERRUPT *intr;
556 	u_int *cpuid;
557 	u_int id;
558 	int domain;
559 
560 	switch(entry->Type) {
561 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
562 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
563 		cpuid = arg;
564 
565 		if (is_boot_cpu(intr->ArmMpidr))
566 			id = 0;
567 		else
568 			id = *cpuid;
569 
570 		domain = 0;
571 #ifdef NUMA
572 		if (vm_ndomains > 1)
573 			domain = acpi_pxm_get_cpu_locality(intr->Uid);
574 #endif
575 		if (start_cpu(id, intr->ArmMpidr, domain)) {
576 			MPASS(cpuid_to_pcpu[id] != NULL);
577 			cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
578 			/*
579 			 * Don't increment for the boot CPU, its CPU ID is
580 			 * reserved.
581 			 */
582 			if (!is_boot_cpu(intr->ArmMpidr))
583 				(*cpuid)++;
584 		}
585 
586 		break;
587 	default:
588 		break;
589 	}
590 }
591 
592 static void
593 cpu_init_acpi(void)
594 {
595 	ACPI_TABLE_MADT *madt;
596 	vm_paddr_t physaddr;
597 	u_int cpuid;
598 
599 	physaddr = acpi_find_table(ACPI_SIG_MADT);
600 	if (physaddr == 0)
601 		return;
602 
603 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
604 	if (madt == NULL) {
605 		printf("Unable to map the MADT, not starting APs\n");
606 		return;
607 	}
608 	/* Boot CPU is always 0 */
609 	cpuid = 1;
610 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
611 	    madt_handler, &cpuid);
612 
613 	acpi_unmap_table(madt);
614 
615 #if MAXMEMDOM > 1
616 	acpi_pxm_set_cpu_locality();
617 #endif
618 }
619 #endif
620 
621 #ifdef FDT
622 static boolean_t
623 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
624 {
625 	uint64_t target_cpu;
626 	int domain;
627 	int cpuid;
628 
629 	target_cpu = reg[0];
630 	if (addr_size == 2) {
631 		target_cpu <<= 32;
632 		target_cpu |= reg[1];
633 	}
634 
635 	if (is_boot_cpu(target_cpu))
636 		cpuid = 0;
637 	else
638 		cpuid = fdt_cpuid;
639 
640 	if (!start_cpu(cpuid, target_cpu, 0))
641 		return (FALSE);
642 
643 	/*
644 	 * Don't increment for the boot CPU, its CPU ID is reserved.
645 	 */
646 	if (!is_boot_cpu(target_cpu))
647 		fdt_cpuid++;
648 
649 	/* Try to read the numa node of this cpu */
650 	if (vm_ndomains == 1 ||
651 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
652 		domain = 0;
653 	cpuid_to_pcpu[cpuid]->pc_domain = domain;
654 	if (domain < MAXMEMDOM)
655 		CPU_SET(cpuid, &cpuset_domain[domain]);
656 	return (TRUE);
657 }
658 static void
659 cpu_init_fdt(void)
660 {
661 	phandle_t node;
662 	int i;
663 
664 	node = OF_peer(0);
665 	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
666 		if (ofw_bus_node_is_compatible(node,
667 		    fdt_quirks[i].compat) != 0) {
668 			mp_quirks = fdt_quirks[i].quirks;
669 		}
670 	}
671 	fdt_cpuid = 1;
672 	ofw_cpu_early_foreach(start_cpu_fdt, true);
673 }
674 #endif
675 
676 /* Initialize and fire up non-boot processors */
677 void
678 cpu_mp_start(void)
679 {
680 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
681 
682 	/* CPU 0 is always boot CPU. */
683 	CPU_SET(0, &all_cpus);
684 	cpuid_to_pcpu[0]->pc_mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
685 
686 	switch(arm64_bus_method) {
687 #ifdef DEV_ACPI
688 	case ARM64_BUS_ACPI:
689 		mp_quirks = MP_QUIRK_CPULIST;
690 		cpu_init_acpi();
691 		break;
692 #endif
693 #ifdef FDT
694 	case ARM64_BUS_FDT:
695 		cpu_init_fdt();
696 		break;
697 #endif
698 	default:
699 		break;
700 	}
701 }
702 
703 /* Introduce rest of cores to the world */
704 void
705 cpu_mp_announce(void)
706 {
707 }
708 
709 #ifdef DEV_ACPI
710 static void
711 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
712 {
713 	u_int *cores = arg;
714 
715 	switch(entry->Type) {
716 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
717 		(*cores)++;
718 		break;
719 	default:
720 		break;
721 	}
722 }
723 
724 static u_int
725 cpu_count_acpi(void)
726 {
727 	ACPI_TABLE_MADT *madt;
728 	vm_paddr_t physaddr;
729 	u_int cores;
730 
731 	physaddr = acpi_find_table(ACPI_SIG_MADT);
732 	if (physaddr == 0)
733 		return (0);
734 
735 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
736 	if (madt == NULL) {
737 		printf("Unable to map the MADT, not starting APs\n");
738 		return (0);
739 	}
740 
741 	cores = 0;
742 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
743 	    cpu_count_acpi_handler, &cores);
744 
745 	acpi_unmap_table(madt);
746 
747 	return (cores);
748 }
749 #endif
750 
751 void
752 cpu_mp_setmaxid(void)
753 {
754 	int cores;
755 
756 	mp_ncpus = 1;
757 	mp_maxid = 0;
758 
759 	switch(arm64_bus_method) {
760 #ifdef DEV_ACPI
761 	case ARM64_BUS_ACPI:
762 		cores = cpu_count_acpi();
763 		if (cores > 0) {
764 			cores = MIN(cores, MAXCPU);
765 			if (bootverbose)
766 				printf("Found %d CPUs in the ACPI tables\n",
767 				    cores);
768 			mp_ncpus = cores;
769 			mp_maxid = cores - 1;
770 		}
771 		break;
772 #endif
773 #ifdef FDT
774 	case ARM64_BUS_FDT:
775 		cores = ofw_cpu_early_foreach(NULL, false);
776 		if (cores > 0) {
777 			cores = MIN(cores, MAXCPU);
778 			if (bootverbose)
779 				printf("Found %d CPUs in the device tree\n",
780 				    cores);
781 			mp_ncpus = cores;
782 			mp_maxid = cores - 1;
783 		}
784 		break;
785 #endif
786 	default:
787 		if (bootverbose)
788 			printf("No CPU data, limiting to 1 core\n");
789 		break;
790 	}
791 
792 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
793 		if (cores > 0 && cores < mp_ncpus) {
794 			mp_ncpus = cores;
795 			mp_maxid = cores - 1;
796 		}
797 	}
798 }
799 
800 /*
801  *  Lookup IPI source.
802  */
803 static struct intr_ipi *
804 intr_ipi_lookup(u_int ipi)
805 {
806 
807 	if (ipi >= INTR_IPI_COUNT)
808 		panic("%s: no such IPI %u", __func__, ipi);
809 
810 	return (&ipi_sources[ipi]);
811 }
812 
813 /*
814  *  interrupt controller dispatch function for IPIs. It should
815  *  be called straight from the interrupt controller, when associated
816  *  interrupt source is learned. Or from anybody who has an interrupt
817  *  source mapped.
818  */
819 void
820 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
821 {
822 	void *arg;
823 	struct intr_ipi *ii;
824 
825 	ii = intr_ipi_lookup(ipi);
826 	if (ii->ii_count == NULL)
827 		panic("%s: not setup IPI %u", __func__, ipi);
828 
829 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
830 
831 	/*
832 	 * Supply ipi filter with trapframe argument
833 	 * if none is registered.
834 	 */
835 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
836 	ii->ii_handler(arg);
837 }
838 
839 #ifdef notyet
840 /*
841  *  Map IPI into interrupt controller.
842  *
843  *  Not SMP coherent.
844  */
845 static int
846 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
847 {
848 	boolean_t is_percpu;
849 	int error;
850 
851 	if (ipi >= INTR_IPI_COUNT)
852 		panic("%s: no such IPI %u", __func__, ipi);
853 
854 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
855 
856 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
857 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
858 	isrc->isrc_nspc_num = ipi_next_num;
859 
860 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
861 	if (error == 0) {
862 		isrc->isrc_dev = intr_irq_root_dev;
863 		ipi_next_num++;
864 	}
865 	return (error);
866 }
867 
868 /*
869  *  Setup IPI handler to interrupt source.
870  *
871  *  Note that there could be more ways how to send and receive IPIs
872  *  on a platform like fast interrupts for example. In that case,
873  *  one can call this function with ASIF_NOALLOC flag set and then
874  *  call intr_ipi_dispatch() when appropriate.
875  *
876  *  Not SMP coherent.
877  */
878 int
879 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
880     void *arg, u_int flags)
881 {
882 	struct intr_irqsrc *isrc;
883 	int error;
884 
885 	if (filter == NULL)
886 		return(EINVAL);
887 
888 	isrc = intr_ipi_lookup(ipi);
889 	if (isrc->isrc_ipifilter != NULL)
890 		return (EEXIST);
891 
892 	if ((flags & AISHF_NOALLOC) == 0) {
893 		error = ipi_map(isrc, ipi);
894 		if (error != 0)
895 			return (error);
896 	}
897 
898 	isrc->isrc_ipifilter = filter;
899 	isrc->isrc_arg = arg;
900 	isrc->isrc_handlers = 1;
901 	isrc->isrc_count = intr_ipi_setup_counters(name);
902 	isrc->isrc_index = 0; /* it should not be used in IPI case */
903 
904 	if (isrc->isrc_dev != NULL) {
905 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
906 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
907 	}
908 	return (0);
909 }
910 #endif
911 
912 /* Sending IPI */
913 void
914 ipi_all_but_self(u_int ipi)
915 {
916 	cpuset_t cpus;
917 
918 	cpus = all_cpus;
919 	CPU_CLR(PCPU_GET(cpuid), &cpus);
920 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
921 	intr_ipi_send(cpus, ipi);
922 }
923 
924 void
925 ipi_cpu(int cpu, u_int ipi)
926 {
927 	cpuset_t cpus;
928 
929 	CPU_ZERO(&cpus);
930 	CPU_SET(cpu, &cpus);
931 
932 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
933 	intr_ipi_send(cpus, ipi);
934 }
935 
936 void
937 ipi_selected(cpuset_t cpus, u_int ipi)
938 {
939 
940 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
941 	intr_ipi_send(cpus, ipi);
942 }
943