xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 1f1e2261)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include "opt_acpi.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/csan.h>
43 #include <sys/domainset.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/machdep.h>
61 #include <machine/cpu.h>
62 #include <machine/debug_monitor.h>
63 #include <machine/intr.h>
64 #include <machine/smp.h>
65 #ifdef VFP
66 #include <machine/vfp.h>
67 #endif
68 
69 #ifdef DEV_ACPI
70 #include <contrib/dev/acpica/include/acpi.h>
71 #include <dev/acpica/acpivar.h>
72 #endif
73 
74 #ifdef FDT
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_bus.h>
77 #include <dev/ofw/ofw_bus_subr.h>
78 #include <dev/ofw/ofw_cpu.h>
79 #endif
80 
81 #include <dev/psci/psci.h>
82 
83 #include "pic_if.h"
84 
85 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
86 					/* don't panic if one fails to start */
87 static uint32_t mp_quirks;
88 
89 #ifdef FDT
90 static struct {
91 	const char *compat;
92 	uint32_t quirks;
93 } fdt_quirks[] = {
94 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
95 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
96 	/* This is incorrect in some DTS files */
97 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
98 	{ NULL, 0 },
99 };
100 #endif
101 
102 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
103 typedef void intr_ipi_handler_t(void *);
104 
105 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
106 struct intr_ipi {
107 	intr_ipi_handler_t *	ii_handler;
108 	void *			ii_handler_arg;
109 	intr_ipi_send_t *	ii_send;
110 	void *			ii_send_arg;
111 	char			ii_name[INTR_IPI_NAMELEN];
112 	u_long *		ii_count;
113 };
114 
115 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
116 
117 static struct intr_ipi *intr_ipi_lookup(u_int);
118 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
119     void *);
120 
121 static void ipi_ast(void *);
122 static void ipi_hardclock(void *);
123 static void ipi_preempt(void *);
124 static void ipi_rendezvous(void *);
125 static void ipi_stop(void *);
126 
127 struct pcb stoppcbs[MAXCPU];
128 
129 #ifdef FDT
130 static u_int fdt_cpuid;
131 #endif
132 
133 void mpentry(unsigned long cpuid);
134 void init_secondary(uint64_t);
135 
136 /* Synchronize AP startup. */
137 static struct mtx ap_boot_mtx;
138 
139 /* Stacks for AP initialization, discarded once idle threads are started. */
140 void *bootstack;
141 static void *bootstacks[MAXCPU];
142 
143 /* Count of started APs, used to synchronize access to bootstack. */
144 static volatile int aps_started;
145 
146 /* Set to 1 once we're ready to let the APs out of the pen. */
147 static volatile int aps_ready;
148 
149 /* Temporary variables for init_secondary()  */
150 void *dpcpu[MAXCPU - 1];
151 
152 static bool
153 is_boot_cpu(uint64_t target_cpu)
154 {
155 
156 	return (cpuid_to_pcpu[0]->pc_mpidr == (target_cpu & CPU_AFF_MASK));
157 }
158 
159 static void
160 release_aps(void *dummy __unused)
161 {
162 	int i, started;
163 
164 	/* Only release CPUs if they exist */
165 	if (mp_ncpus == 1)
166 		return;
167 
168 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
169 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
170 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
171 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
172 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
173 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
174 
175 	atomic_store_rel_int(&aps_ready, 1);
176 	/* Wake up the other CPUs */
177 	__asm __volatile(
178 	    "dsb ishst	\n"
179 	    "sev	\n"
180 	    ::: "memory");
181 
182 	printf("Release APs...");
183 
184 	started = 0;
185 	for (i = 0; i < 2000; i++) {
186 		if (atomic_load_acq_int(&smp_started) != 0) {
187 			printf("done\n");
188 			return;
189 		}
190 		/*
191 		 * Don't time out while we are making progress. Some large
192 		 * systems can take a while to start all CPUs.
193 		 */
194 		if (smp_cpus > started) {
195 			i = 0;
196 			started = smp_cpus;
197 		}
198 		DELAY(1000);
199 	}
200 
201 	printf("APs not started\n");
202 }
203 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
204 
205 void
206 init_secondary(uint64_t cpu)
207 {
208 	struct pcpu *pcpup;
209 	pmap_t pmap0;
210 	u_int mpidr;
211 
212 	ptrauth_mp_start(cpu);
213 
214 	/*
215 	 * Verify that the value passed in 'cpu' argument (aka context_id) is
216 	 * valid. Some older U-Boot based PSCI implementations are buggy,
217 	 * they can pass random value in it.
218 	 */
219 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
220 	if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
221 	    cpuid_to_pcpu[cpu]->pc_mpidr != mpidr) {
222 		for (cpu = 0; cpu < mp_maxid; cpu++)
223 			if (cpuid_to_pcpu[cpu] != NULL &&
224 			    cpuid_to_pcpu[cpu]->pc_mpidr == mpidr)
225 				break;
226 		if ( cpu >= MAXCPU)
227 			panic("MPIDR for this CPU is not in pcpu table");
228 	}
229 
230 	pcpup = cpuid_to_pcpu[cpu];
231 	/*
232 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
233 	 * loaded when entering the kernel from userland.
234 	 */
235 	__asm __volatile(
236 	    "mov x18, %0 \n"
237 	    "msr tpidr_el1, %0" :: "r"(pcpup));
238 
239 	/*
240 	 * Identify current CPU. This is necessary to setup
241 	 * affinity registers and to provide support for
242 	 * runtime chip identification.
243 	 *
244 	 * We need this before signalling the CPU is ready to
245 	 * let the boot CPU use the results.
246 	 */
247 	pcpup->pc_midr = get_midr();
248 	identify_cpu(cpu);
249 
250 	/* Ensure the stores in identify_cpu have completed */
251 	atomic_thread_fence_acq_rel();
252 
253 	/* Signal the BSP and spin until it has released all APs. */
254 	atomic_add_int(&aps_started, 1);
255 	while (!atomic_load_int(&aps_ready))
256 		__asm __volatile("wfe");
257 
258 	/* Initialize curthread */
259 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
260 	pcpup->pc_curthread = pcpup->pc_idlethread;
261 	schedinit_ap();
262 
263 	/* Initialize curpmap to match TTBR0's current setting. */
264 	pmap0 = vmspace_pmap(&vmspace0);
265 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
266 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
267 	pcpup->pc_curpmap = pmap0;
268 
269 	install_cpu_errata();
270 
271 	intr_pic_init_secondary();
272 
273 	/* Start per-CPU event timers. */
274 	cpu_initclocks_ap();
275 
276 #ifdef VFP
277 	vfp_init();
278 #endif
279 
280 	dbg_init();
281 	pan_enable();
282 
283 	mtx_lock_spin(&ap_boot_mtx);
284 	atomic_add_rel_32(&smp_cpus, 1);
285 	if (smp_cpus == mp_ncpus) {
286 		/* enable IPI's, tlb shootdown, freezes etc */
287 		atomic_store_rel_int(&smp_started, 1);
288 	}
289 	mtx_unlock_spin(&ap_boot_mtx);
290 
291 	kcsan_cpu_init(cpu);
292 
293 	/* Enter the scheduler */
294 	sched_ap_entry();
295 
296 	panic("scheduler returned us to init_secondary");
297 	/* NOTREACHED */
298 }
299 
300 static void
301 smp_after_idle_runnable(void *arg __unused)
302 {
303 	int cpu;
304 
305 	if (mp_ncpus == 1)
306 		return;
307 
308 	KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
309 
310 	/*
311 	 * Wait for all APs to handle an interrupt.  After that, we know that
312 	 * the APs have entered the scheduler at least once, so the boot stacks
313 	 * are safe to free.
314 	 */
315 	smp_rendezvous(smp_no_rendezvous_barrier, NULL,
316 	    smp_no_rendezvous_barrier, NULL);
317 
318 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
319 		if (bootstacks[cpu] != NULL)
320 			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
321 	}
322 }
323 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
324     smp_after_idle_runnable, NULL);
325 
326 /*
327  *  Send IPI thru interrupt controller.
328  */
329 static void
330 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
331 {
332 
333 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
334 
335 	/*
336 	 * Ensure that this CPU's stores will be visible to IPI
337 	 * recipients before starting to send the interrupts.
338 	 */
339 	dsb(ishst);
340 
341 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
342 }
343 
344 /*
345  *  Setup IPI handler on interrupt controller.
346  *
347  *  Not SMP coherent.
348  */
349 static void
350 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
351     void *arg)
352 {
353 	struct intr_irqsrc *isrc;
354 	struct intr_ipi *ii;
355 	int error;
356 
357 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
358 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
359 
360 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
361 	if (error != 0)
362 		return;
363 
364 	isrc->isrc_handlers++;
365 
366 	ii = intr_ipi_lookup(ipi);
367 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
368 
369 	ii->ii_handler = hand;
370 	ii->ii_handler_arg = arg;
371 	ii->ii_send = pic_ipi_send;
372 	ii->ii_send_arg = isrc;
373 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
374 	ii->ii_count = intr_ipi_setup_counters(name);
375 
376 	PIC_ENABLE_INTR(intr_irq_root_dev, isrc);
377 }
378 
379 static void
380 intr_ipi_send(cpuset_t cpus, u_int ipi)
381 {
382 	struct intr_ipi *ii;
383 
384 	ii = intr_ipi_lookup(ipi);
385 	if (ii->ii_count == NULL)
386 		panic("%s: not setup IPI %u", __func__, ipi);
387 
388 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
389 }
390 
391 static void
392 ipi_ast(void *dummy __unused)
393 {
394 
395 	CTR0(KTR_SMP, "IPI_AST");
396 }
397 
398 static void
399 ipi_hardclock(void *dummy __unused)
400 {
401 
402 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
403 	hardclockintr();
404 }
405 
406 static void
407 ipi_preempt(void *dummy __unused)
408 {
409 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
410 	sched_preempt(curthread);
411 }
412 
413 static void
414 ipi_rendezvous(void *dummy __unused)
415 {
416 
417 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
418 	smp_rendezvous_action();
419 }
420 
421 static void
422 ipi_stop(void *dummy __unused)
423 {
424 	u_int cpu;
425 
426 	CTR0(KTR_SMP, "IPI_STOP");
427 
428 	cpu = PCPU_GET(cpuid);
429 	savectx(&stoppcbs[cpu]);
430 
431 	/* Indicate we are stopped */
432 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
433 
434 	/* Wait for restart */
435 	while (!CPU_ISSET(cpu, &started_cpus))
436 		cpu_spinwait();
437 
438 #ifdef DDB
439 	dbg_register_sync(NULL);
440 #endif
441 
442 	CPU_CLR_ATOMIC(cpu, &started_cpus);
443 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
444 	CTR0(KTR_SMP, "IPI_STOP (restart)");
445 }
446 
447 struct cpu_group *
448 cpu_topo(void)
449 {
450 	struct cpu_group *dom, *root;
451 	int i;
452 
453 	root = smp_topo_alloc(1);
454 	dom = smp_topo_alloc(vm_ndomains);
455 
456 	root->cg_parent = NULL;
457 	root->cg_child = dom;
458 	CPU_COPY(&all_cpus, &root->cg_mask);
459 	root->cg_count = mp_ncpus;
460 	root->cg_children = vm_ndomains;
461 	root->cg_level = CG_SHARE_NONE;
462 	root->cg_flags = 0;
463 
464 	/*
465 	 * Redundant layers will be collapsed by the caller so we don't need a
466 	 * special case for a single domain.
467 	 */
468 	for (i = 0; i < vm_ndomains; i++, dom++) {
469 		dom->cg_parent = root;
470 		dom->cg_child = NULL;
471 		CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
472 		dom->cg_count = CPU_COUNT(&dom->cg_mask);
473 		dom->cg_children = 0;
474 		dom->cg_level = CG_SHARE_L3;
475 		dom->cg_flags = 0;
476 	}
477 
478 	return (root);
479 }
480 
481 /* Determine if we running MP machine */
482 int
483 cpu_mp_probe(void)
484 {
485 
486 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
487 	return (1);
488 }
489 
490 /*
491  * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
492  * do nothing. Returns true if the CPU is present and running.
493  */
494 static bool
495 start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
496 {
497 	struct pcpu *pcpup;
498 	vm_paddr_t pa;
499 	int err, naps;
500 
501 	/* Check we are able to start this cpu */
502 	if (cpuid > mp_maxid)
503 		return (false);
504 
505 	/* Skip boot CPU */
506 	if (is_boot_cpu(target_cpu))
507 		return (true);
508 
509 	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
510 
511 	pcpup = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
512 	    sizeof(*pcpup), M_WAITOK | M_ZERO);
513 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
514 	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
515 
516 	dpcpu[cpuid - 1] = (void *)kmem_malloc_domainset(
517 	    DOMAINSET_PREF(domain), DPCPU_SIZE, M_WAITOK | M_ZERO);
518 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
519 
520 	bootstacks[cpuid] = (void *)kmem_malloc_domainset(
521 	    DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
522 
523 	naps = atomic_load_int(&aps_started);
524 	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
525 
526 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
527 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
528 	err = psci_cpu_on(target_cpu, pa, cpuid);
529 	if (err != PSCI_RETVAL_SUCCESS) {
530 		/*
531 		 * Panic here if INVARIANTS are enabled and PSCI failed to
532 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
533 		 * to indicate we are unable to use it to start the given CPU.
534 		 */
535 		KASSERT(err == PSCI_MISSING ||
536 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
537 		    ("Failed to start CPU %u (%lx), error %d\n",
538 		    cpuid, target_cpu, err));
539 
540 		pcpu_destroy(pcpup);
541 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
542 		dpcpu[cpuid - 1] = NULL;
543 		kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
544 		bootstacks[cpuid] = NULL;
545 		mp_ncpus--;
546 		return (false);
547 	}
548 
549 	/* Wait for the AP to switch to its boot stack. */
550 	while (atomic_load_int(&aps_started) < naps + 1)
551 		cpu_spinwait();
552 	CPU_SET(cpuid, &all_cpus);
553 
554 	return (true);
555 }
556 
557 #ifdef DEV_ACPI
558 static void
559 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
560 {
561 	ACPI_MADT_GENERIC_INTERRUPT *intr;
562 	u_int *cpuid;
563 	u_int id;
564 	int domain;
565 
566 	switch(entry->Type) {
567 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
568 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
569 		cpuid = arg;
570 
571 		if (is_boot_cpu(intr->ArmMpidr))
572 			id = 0;
573 		else
574 			id = *cpuid;
575 
576 		domain = 0;
577 #ifdef NUMA
578 		if (vm_ndomains > 1)
579 			domain = acpi_pxm_get_cpu_locality(intr->Uid);
580 #endif
581 		if (start_cpu(id, intr->ArmMpidr, domain)) {
582 			MPASS(cpuid_to_pcpu[id] != NULL);
583 			cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
584 			/*
585 			 * Don't increment for the boot CPU, its CPU ID is
586 			 * reserved.
587 			 */
588 			if (!is_boot_cpu(intr->ArmMpidr))
589 				(*cpuid)++;
590 		}
591 
592 		break;
593 	default:
594 		break;
595 	}
596 }
597 
598 static void
599 cpu_init_acpi(void)
600 {
601 	ACPI_TABLE_MADT *madt;
602 	vm_paddr_t physaddr;
603 	u_int cpuid;
604 
605 	physaddr = acpi_find_table(ACPI_SIG_MADT);
606 	if (physaddr == 0)
607 		return;
608 
609 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
610 	if (madt == NULL) {
611 		printf("Unable to map the MADT, not starting APs\n");
612 		return;
613 	}
614 	/* Boot CPU is always 0 */
615 	cpuid = 1;
616 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
617 	    madt_handler, &cpuid);
618 
619 	acpi_unmap_table(madt);
620 
621 #if MAXMEMDOM > 1
622 	acpi_pxm_set_cpu_locality();
623 #endif
624 }
625 #endif
626 
627 #ifdef FDT
628 static boolean_t
629 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
630 {
631 	uint64_t target_cpu;
632 	int domain;
633 	int cpuid;
634 
635 	target_cpu = reg[0];
636 	if (addr_size == 2) {
637 		target_cpu <<= 32;
638 		target_cpu |= reg[1];
639 	}
640 
641 	if (is_boot_cpu(target_cpu))
642 		cpuid = 0;
643 	else
644 		cpuid = fdt_cpuid;
645 
646 	if (!start_cpu(cpuid, target_cpu, 0))
647 		return (FALSE);
648 
649 	/*
650 	 * Don't increment for the boot CPU, its CPU ID is reserved.
651 	 */
652 	if (!is_boot_cpu(target_cpu))
653 		fdt_cpuid++;
654 
655 	/* Try to read the numa node of this cpu */
656 	if (vm_ndomains == 1 ||
657 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
658 		domain = 0;
659 	cpuid_to_pcpu[cpuid]->pc_domain = domain;
660 	if (domain < MAXMEMDOM)
661 		CPU_SET(cpuid, &cpuset_domain[domain]);
662 	return (TRUE);
663 }
664 static void
665 cpu_init_fdt(void)
666 {
667 	phandle_t node;
668 	int i;
669 
670 	node = OF_peer(0);
671 	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
672 		if (ofw_bus_node_is_compatible(node,
673 		    fdt_quirks[i].compat) != 0) {
674 			mp_quirks = fdt_quirks[i].quirks;
675 		}
676 	}
677 	fdt_cpuid = 1;
678 	ofw_cpu_early_foreach(start_cpu_fdt, true);
679 }
680 #endif
681 
682 /* Initialize and fire up non-boot processors */
683 void
684 cpu_mp_start(void)
685 {
686 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
687 
688 	/* CPU 0 is always boot CPU. */
689 	CPU_SET(0, &all_cpus);
690 	cpuid_to_pcpu[0]->pc_mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
691 
692 	switch(arm64_bus_method) {
693 #ifdef DEV_ACPI
694 	case ARM64_BUS_ACPI:
695 		mp_quirks = MP_QUIRK_CPULIST;
696 		cpu_init_acpi();
697 		break;
698 #endif
699 #ifdef FDT
700 	case ARM64_BUS_FDT:
701 		cpu_init_fdt();
702 		break;
703 #endif
704 	default:
705 		break;
706 	}
707 }
708 
709 /* Introduce rest of cores to the world */
710 void
711 cpu_mp_announce(void)
712 {
713 }
714 
715 #ifdef DEV_ACPI
716 static void
717 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
718 {
719 	u_int *cores = arg;
720 
721 	switch(entry->Type) {
722 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
723 		(*cores)++;
724 		break;
725 	default:
726 		break;
727 	}
728 }
729 
730 static u_int
731 cpu_count_acpi(void)
732 {
733 	ACPI_TABLE_MADT *madt;
734 	vm_paddr_t physaddr;
735 	u_int cores;
736 
737 	physaddr = acpi_find_table(ACPI_SIG_MADT);
738 	if (physaddr == 0)
739 		return (0);
740 
741 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
742 	if (madt == NULL) {
743 		printf("Unable to map the MADT, not starting APs\n");
744 		return (0);
745 	}
746 
747 	cores = 0;
748 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
749 	    cpu_count_acpi_handler, &cores);
750 
751 	acpi_unmap_table(madt);
752 
753 	return (cores);
754 }
755 #endif
756 
757 void
758 cpu_mp_setmaxid(void)
759 {
760 	int cores;
761 
762 	mp_ncpus = 1;
763 	mp_maxid = 0;
764 
765 	switch(arm64_bus_method) {
766 #ifdef DEV_ACPI
767 	case ARM64_BUS_ACPI:
768 		cores = cpu_count_acpi();
769 		if (cores > 0) {
770 			cores = MIN(cores, MAXCPU);
771 			if (bootverbose)
772 				printf("Found %d CPUs in the ACPI tables\n",
773 				    cores);
774 			mp_ncpus = cores;
775 			mp_maxid = cores - 1;
776 		}
777 		break;
778 #endif
779 #ifdef FDT
780 	case ARM64_BUS_FDT:
781 		cores = ofw_cpu_early_foreach(NULL, false);
782 		if (cores > 0) {
783 			cores = MIN(cores, MAXCPU);
784 			if (bootverbose)
785 				printf("Found %d CPUs in the device tree\n",
786 				    cores);
787 			mp_ncpus = cores;
788 			mp_maxid = cores - 1;
789 		}
790 		break;
791 #endif
792 	default:
793 		if (bootverbose)
794 			printf("No CPU data, limiting to 1 core\n");
795 		break;
796 	}
797 
798 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
799 		if (cores > 0 && cores < mp_ncpus) {
800 			mp_ncpus = cores;
801 			mp_maxid = cores - 1;
802 		}
803 	}
804 }
805 
806 /*
807  *  Lookup IPI source.
808  */
809 static struct intr_ipi *
810 intr_ipi_lookup(u_int ipi)
811 {
812 
813 	if (ipi >= INTR_IPI_COUNT)
814 		panic("%s: no such IPI %u", __func__, ipi);
815 
816 	return (&ipi_sources[ipi]);
817 }
818 
819 /*
820  *  interrupt controller dispatch function for IPIs. It should
821  *  be called straight from the interrupt controller, when associated
822  *  interrupt source is learned. Or from anybody who has an interrupt
823  *  source mapped.
824  */
825 void
826 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
827 {
828 	void *arg;
829 	struct intr_ipi *ii;
830 
831 	ii = intr_ipi_lookup(ipi);
832 	if (ii->ii_count == NULL)
833 		panic("%s: not setup IPI %u", __func__, ipi);
834 
835 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
836 
837 	/*
838 	 * Supply ipi filter with trapframe argument
839 	 * if none is registered.
840 	 */
841 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
842 	ii->ii_handler(arg);
843 }
844 
845 #ifdef notyet
846 /*
847  *  Map IPI into interrupt controller.
848  *
849  *  Not SMP coherent.
850  */
851 static int
852 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
853 {
854 	boolean_t is_percpu;
855 	int error;
856 
857 	if (ipi >= INTR_IPI_COUNT)
858 		panic("%s: no such IPI %u", __func__, ipi);
859 
860 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
861 
862 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
863 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
864 	isrc->isrc_nspc_num = ipi_next_num;
865 
866 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
867 	if (error == 0) {
868 		isrc->isrc_dev = intr_irq_root_dev;
869 		ipi_next_num++;
870 	}
871 	return (error);
872 }
873 
874 /*
875  *  Setup IPI handler to interrupt source.
876  *
877  *  Note that there could be more ways how to send and receive IPIs
878  *  on a platform like fast interrupts for example. In that case,
879  *  one can call this function with ASIF_NOALLOC flag set and then
880  *  call intr_ipi_dispatch() when appropriate.
881  *
882  *  Not SMP coherent.
883  */
884 int
885 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
886     void *arg, u_int flags)
887 {
888 	struct intr_irqsrc *isrc;
889 	int error;
890 
891 	if (filter == NULL)
892 		return(EINVAL);
893 
894 	isrc = intr_ipi_lookup(ipi);
895 	if (isrc->isrc_ipifilter != NULL)
896 		return (EEXIST);
897 
898 	if ((flags & AISHF_NOALLOC) == 0) {
899 		error = ipi_map(isrc, ipi);
900 		if (error != 0)
901 			return (error);
902 	}
903 
904 	isrc->isrc_ipifilter = filter;
905 	isrc->isrc_arg = arg;
906 	isrc->isrc_handlers = 1;
907 	isrc->isrc_count = intr_ipi_setup_counters(name);
908 	isrc->isrc_index = 0; /* it should not be used in IPI case */
909 
910 	if (isrc->isrc_dev != NULL) {
911 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
912 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
913 	}
914 	return (0);
915 }
916 #endif
917 
918 /* Sending IPI */
919 void
920 ipi_all_but_self(u_int ipi)
921 {
922 	cpuset_t cpus;
923 
924 	cpus = all_cpus;
925 	CPU_CLR(PCPU_GET(cpuid), &cpus);
926 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
927 	intr_ipi_send(cpus, ipi);
928 }
929 
930 void
931 ipi_cpu(int cpu, u_int ipi)
932 {
933 	cpuset_t cpus;
934 
935 	CPU_ZERO(&cpus);
936 	CPU_SET(cpu, &cpus);
937 
938 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
939 	intr_ipi_send(cpus, ipi);
940 }
941 
942 void
943 ipi_selected(cpuset_t cpus, u_int ipi)
944 {
945 
946 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
947 	intr_ipi_send(cpus, ipi);
948 }
949