xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision abd87254)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include "opt_acpi.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/cpu.h>
39 #include <sys/csan.h>
40 #include <sys/domainset.h>
41 #include <sys/kernel.h>
42 #include <sys/ktr.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/pcpu.h>
47 #include <sys/proc.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_map.h>
56 
57 #include <machine/machdep.h>
58 #include <machine/cpu.h>
59 #include <machine/debug_monitor.h>
60 #include <machine/intr.h>
61 #include <machine/smp.h>
62 #ifdef VFP
63 #include <machine/vfp.h>
64 #endif
65 
66 #ifdef DEV_ACPI
67 #include <contrib/dev/acpica/include/acpi.h>
68 #include <dev/acpica/acpivar.h>
69 #endif
70 
71 #ifdef FDT
72 #include <dev/ofw/openfirm.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 #include <dev/ofw/ofw_cpu.h>
76 #endif
77 
78 #include <dev/psci/psci.h>
79 
80 #define	MP_BOOTSTACK_SIZE	(kstack_pages * PAGE_SIZE)
81 
82 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
83 					/* don't panic if one fails to start */
84 static uint32_t mp_quirks;
85 
86 #ifdef FDT
87 static struct {
88 	const char *compat;
89 	uint32_t quirks;
90 } fdt_quirks[] = {
91 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
92 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
93 	/* This is incorrect in some DTS files */
94 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
95 	{ NULL, 0 },
96 };
97 #endif
98 
99 static void ipi_ast(void *);
100 static void ipi_hardclock(void *);
101 static void ipi_preempt(void *);
102 static void ipi_rendezvous(void *);
103 static void ipi_stop(void *);
104 
105 #ifdef FDT
106 static u_int fdt_cpuid;
107 #endif
108 
109 void mpentry(unsigned long cpuid);
110 void init_secondary(uint64_t);
111 
112 /* Synchronize AP startup. */
113 static struct mtx ap_boot_mtx;
114 
115 /* Used to initialize the PCPU ahead of calling init_secondary(). */
116 void *bootpcpu;
117 
118 /* Stacks for AP initialization, discarded once idle threads are started. */
119 void *bootstack;
120 static void *bootstacks[MAXCPU];
121 
122 /* Count of started APs, used to synchronize access to bootstack. */
123 static volatile int aps_started;
124 
125 /* Set to 1 once we're ready to let the APs out of the pen. */
126 static volatile int aps_ready;
127 
128 /* Temporary variables for init_secondary()  */
129 static void *dpcpu[MAXCPU - 1];
130 
131 static bool
132 is_boot_cpu(uint64_t target_cpu)
133 {
134 
135 	return (PCPU_GET_MPIDR(cpuid_to_pcpu[0]) == (target_cpu & CPU_AFF_MASK));
136 }
137 
138 static void
139 release_aps(void *dummy __unused)
140 {
141 	int i, started;
142 
143 	/* Only release CPUs if they exist */
144 	if (mp_ncpus == 1)
145 		return;
146 
147 	intr_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
148 	intr_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
149 	intr_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
150 	intr_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
151 	intr_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
152 	intr_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
153 
154 	atomic_store_rel_int(&aps_ready, 1);
155 	/* Wake up the other CPUs */
156 	__asm __volatile(
157 	    "dsb ishst	\n"
158 	    "sev	\n"
159 	    ::: "memory");
160 
161 	printf("Release APs...");
162 
163 	started = 0;
164 	for (i = 0; i < 2000; i++) {
165 		if (atomic_load_acq_int(&smp_started) != 0) {
166 			printf("done\n");
167 			return;
168 		}
169 		/*
170 		 * Don't time out while we are making progress. Some large
171 		 * systems can take a while to start all CPUs.
172 		 */
173 		if (smp_cpus > started) {
174 			i = 0;
175 			started = smp_cpus;
176 		}
177 		DELAY(1000);
178 	}
179 
180 	printf("APs not started\n");
181 }
182 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
183 
184 void
185 init_secondary(uint64_t cpu)
186 {
187 	struct pcpu *pcpup;
188 	pmap_t pmap0;
189 	uint64_t mpidr;
190 
191 	ptrauth_mp_start(cpu);
192 
193 	/*
194 	 * Verify that the value passed in 'cpu' argument (aka context_id) is
195 	 * valid. Some older U-Boot based PSCI implementations are buggy,
196 	 * they can pass random value in it.
197 	 */
198 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
199 	if (cpu >= MAXCPU || cpuid_to_pcpu[cpu] == NULL ||
200 	    PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) != mpidr) {
201 		for (cpu = 0; cpu < mp_maxid; cpu++)
202 			if (cpuid_to_pcpu[cpu] != NULL &&
203 			    PCPU_GET_MPIDR(cpuid_to_pcpu[cpu]) == mpidr)
204 				break;
205 		if ( cpu >= MAXCPU)
206 			panic("MPIDR for this CPU is not in pcpu table");
207 	}
208 
209 	/*
210 	 * Identify current CPU. This is necessary to setup
211 	 * affinity registers and to provide support for
212 	 * runtime chip identification.
213 	 *
214 	 * We need this before signalling the CPU is ready to
215 	 * let the boot CPU use the results.
216 	 */
217 	pcpup = cpuid_to_pcpu[cpu];
218 	pcpup->pc_midr = get_midr();
219 	identify_cpu(cpu);
220 
221 	/* Ensure the stores in identify_cpu have completed */
222 	atomic_thread_fence_acq_rel();
223 
224 	/* Signal the BSP and spin until it has released all APs. */
225 	atomic_add_int(&aps_started, 1);
226 	while (!atomic_load_int(&aps_ready))
227 		__asm __volatile("wfe");
228 
229 	/* Initialize curthread */
230 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
231 	pcpup->pc_curthread = pcpup->pc_idlethread;
232 	schedinit_ap();
233 
234 	/* Initialize curpmap to match TTBR0's current setting. */
235 	pmap0 = vmspace_pmap(&vmspace0);
236 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
237 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
238 	pcpup->pc_curpmap = pmap0;
239 
240 	install_cpu_errata();
241 
242 	intr_pic_init_secondary();
243 
244 	/* Start per-CPU event timers. */
245 	cpu_initclocks_ap();
246 
247 #ifdef VFP
248 	vfp_init_secondary();
249 #endif
250 
251 	dbg_init();
252 	pan_enable();
253 
254 	mtx_lock_spin(&ap_boot_mtx);
255 	atomic_add_rel_32(&smp_cpus, 1);
256 	if (smp_cpus == mp_ncpus) {
257 		/* enable IPI's, tlb shootdown, freezes etc */
258 		atomic_store_rel_int(&smp_started, 1);
259 	}
260 	mtx_unlock_spin(&ap_boot_mtx);
261 
262 	kcsan_cpu_init(cpu);
263 
264 	/* Enter the scheduler */
265 	sched_ap_entry();
266 
267 	panic("scheduler returned us to init_secondary");
268 	/* NOTREACHED */
269 }
270 
271 static void
272 smp_after_idle_runnable(void *arg __unused)
273 {
274 	int cpu;
275 
276 	if (mp_ncpus == 1)
277 		return;
278 
279 	KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
280 
281 	/*
282 	 * Wait for all APs to handle an interrupt.  After that, we know that
283 	 * the APs have entered the scheduler at least once, so the boot stacks
284 	 * are safe to free.
285 	 */
286 	smp_rendezvous(smp_no_rendezvous_barrier, NULL,
287 	    smp_no_rendezvous_barrier, NULL);
288 
289 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
290 		if (bootstacks[cpu] != NULL)
291 			kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
292 	}
293 }
294 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
295     smp_after_idle_runnable, NULL);
296 
297 static void
298 ipi_ast(void *dummy __unused)
299 {
300 
301 	CTR0(KTR_SMP, "IPI_AST");
302 }
303 
304 static void
305 ipi_hardclock(void *dummy __unused)
306 {
307 
308 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
309 	hardclockintr();
310 }
311 
312 static void
313 ipi_preempt(void *dummy __unused)
314 {
315 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
316 	sched_preempt(curthread);
317 }
318 
319 static void
320 ipi_rendezvous(void *dummy __unused)
321 {
322 
323 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
324 	smp_rendezvous_action();
325 }
326 
327 static void
328 ipi_stop(void *dummy __unused)
329 {
330 	u_int cpu;
331 
332 	CTR0(KTR_SMP, "IPI_STOP");
333 
334 	cpu = PCPU_GET(cpuid);
335 	savectx(&stoppcbs[cpu]);
336 
337 	/* Indicate we are stopped */
338 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
339 
340 	/* Wait for restart */
341 	while (!CPU_ISSET(cpu, &started_cpus))
342 		cpu_spinwait();
343 
344 #ifdef DDB
345 	dbg_register_sync(NULL);
346 #endif
347 
348 	CPU_CLR_ATOMIC(cpu, &started_cpus);
349 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
350 	CTR0(KTR_SMP, "IPI_STOP (restart)");
351 }
352 
353 struct cpu_group *
354 cpu_topo(void)
355 {
356 	struct cpu_group *dom, *root;
357 	int i;
358 
359 	root = smp_topo_alloc(1);
360 	dom = smp_topo_alloc(vm_ndomains);
361 
362 	root->cg_parent = NULL;
363 	root->cg_child = dom;
364 	CPU_COPY(&all_cpus, &root->cg_mask);
365 	root->cg_count = mp_ncpus;
366 	root->cg_children = vm_ndomains;
367 	root->cg_level = CG_SHARE_NONE;
368 	root->cg_flags = 0;
369 
370 	/*
371 	 * Redundant layers will be collapsed by the caller so we don't need a
372 	 * special case for a single domain.
373 	 */
374 	for (i = 0; i < vm_ndomains; i++, dom++) {
375 		dom->cg_parent = root;
376 		dom->cg_child = NULL;
377 		CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
378 		dom->cg_count = CPU_COUNT(&dom->cg_mask);
379 		dom->cg_children = 0;
380 		dom->cg_level = CG_SHARE_L3;
381 		dom->cg_flags = 0;
382 	}
383 
384 	return (root);
385 }
386 
387 /* Determine if we running MP machine */
388 int
389 cpu_mp_probe(void)
390 {
391 
392 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
393 	return (1);
394 }
395 
396 static int
397 enable_cpu_psci(uint64_t target_cpu, vm_paddr_t entry, u_int cpuid)
398 {
399 	int err;
400 
401 	err = psci_cpu_on(target_cpu, entry, cpuid);
402 	if (err != PSCI_RETVAL_SUCCESS) {
403 		/*
404 		 * Panic here if INVARIANTS are enabled and PSCI failed to
405 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
406 		 * to indicate we are unable to use it to start the given CPU.
407 		 */
408 		KASSERT(err == PSCI_MISSING ||
409 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
410 		    ("Failed to start CPU %u (%lx), error %d\n",
411 		    cpuid, target_cpu, err));
412 		return (EINVAL);
413 	}
414 
415 	return (0);
416 }
417 
418 static int
419 enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
420 {
421 	vm_paddr_t *release_addr;
422 
423 	release_addr = pmap_mapdev(release_paddr, sizeof(*release_addr));
424 	if (release_addr == NULL)
425 		return (ENOMEM);
426 
427 	*release_addr = entry;
428 	pmap_unmapdev(release_addr, sizeof(*release_addr));
429 
430 	__asm __volatile(
431 	    "dsb sy	\n"
432 	    "sev	\n"
433 	    ::: "memory");
434 
435 	return (0);
436 }
437 
438 /*
439  * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
440  * do nothing. Returns true if the CPU is present and running.
441  */
442 static bool
443 start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
444 {
445 	struct pcpu *pcpup;
446 	vm_size_t size;
447 	vm_paddr_t pa;
448 	int err, naps;
449 
450 	/* Check we are able to start this cpu */
451 	if (cpuid > mp_maxid)
452 		return (false);
453 
454 	/* Skip boot CPU */
455 	if (is_boot_cpu(target_cpu))
456 		return (true);
457 
458 	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
459 
460 	size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
461 	pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
462 	    M_WAITOK | M_ZERO);
463 	pmap_disable_promotion((vm_offset_t)pcpup, size);
464 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
465 	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
466 	bootpcpu = pcpup;
467 
468 	dpcpu[cpuid - 1] = (void *)(pcpup + 1);
469 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
470 
471 	bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain),
472 	    MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
473 
474 	naps = atomic_load_int(&aps_started);
475 	bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
476 
477 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
478 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
479 
480 	/*
481 	 * A limited set of hardware we support can only do spintables and
482 	 * remain useful, due to lack of EL3.  Thus, we'll usually fall into the
483 	 * PSCI branch here.
484 	 */
485 	MPASS(release_addr == 0 || !psci_present);
486 	if (release_addr != 0)
487 		err = enable_cpu_spin(target_cpu, pa, release_addr);
488 	else
489 		err = enable_cpu_psci(target_cpu, pa, cpuid);
490 
491 	if (err != 0) {
492 		pcpu_destroy(pcpup);
493 		dpcpu[cpuid - 1] = NULL;
494 		kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE);
495 		kmem_free(pcpup, size);
496 		bootstacks[cpuid] = NULL;
497 		mp_ncpus--;
498 		return (false);
499 	}
500 
501 	/* Wait for the AP to switch to its boot stack. */
502 	while (atomic_load_int(&aps_started) < naps + 1)
503 		cpu_spinwait();
504 	CPU_SET(cpuid, &all_cpus);
505 
506 	return (true);
507 }
508 
509 #ifdef DEV_ACPI
510 static void
511 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
512 {
513 	ACPI_MADT_GENERIC_INTERRUPT *intr;
514 	u_int *cpuid;
515 	u_int id;
516 	int domain;
517 
518 	switch(entry->Type) {
519 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
520 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
521 		cpuid = arg;
522 
523 		if (is_boot_cpu(intr->ArmMpidr))
524 			id = 0;
525 		else
526 			id = *cpuid;
527 
528 		domain = 0;
529 #ifdef NUMA
530 		if (vm_ndomains > 1)
531 			domain = acpi_pxm_get_cpu_locality(intr->Uid);
532 #endif
533 		if (start_cpu(id, intr->ArmMpidr, domain, 0)) {
534 			MPASS(cpuid_to_pcpu[id] != NULL);
535 			cpuid_to_pcpu[id]->pc_acpi_id = intr->Uid;
536 			/*
537 			 * Don't increment for the boot CPU, its CPU ID is
538 			 * reserved.
539 			 */
540 			if (!is_boot_cpu(intr->ArmMpidr))
541 				(*cpuid)++;
542 		}
543 
544 		break;
545 	default:
546 		break;
547 	}
548 }
549 
550 static void
551 cpu_init_acpi(void)
552 {
553 	ACPI_TABLE_MADT *madt;
554 	vm_paddr_t physaddr;
555 	u_int cpuid;
556 
557 	physaddr = acpi_find_table(ACPI_SIG_MADT);
558 	if (physaddr == 0)
559 		return;
560 
561 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
562 	if (madt == NULL) {
563 		printf("Unable to map the MADT, not starting APs\n");
564 		return;
565 	}
566 	/* Boot CPU is always 0 */
567 	cpuid = 1;
568 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
569 	    madt_handler, &cpuid);
570 
571 	acpi_unmap_table(madt);
572 
573 #if MAXMEMDOM > 1
574 	acpi_pxm_set_cpu_locality();
575 #endif
576 }
577 #endif
578 
579 #ifdef FDT
580 /*
581  * Failure is indicated by failing to populate *release_addr.
582  */
583 static void
584 populate_release_addr(phandle_t node, vm_paddr_t *release_addr)
585 {
586 	pcell_t buf[2];
587 
588 	if (OF_getencprop(node, "cpu-release-addr", buf, sizeof(buf)) !=
589 	    sizeof(buf))
590 		return;
591 
592 	*release_addr = (((uintptr_t)buf[0] << 32) | buf[1]);
593 }
594 
595 static bool
596 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
597 {
598 	uint64_t target_cpu;
599 	vm_paddr_t release_addr;
600 	char *enable_method;
601 	int domain;
602 	int cpuid;
603 
604 	target_cpu = reg[0];
605 	if (addr_size == 2) {
606 		target_cpu <<= 32;
607 		target_cpu |= reg[1];
608 	}
609 
610 	if (is_boot_cpu(target_cpu))
611 		cpuid = 0;
612 	else
613 		cpuid = fdt_cpuid;
614 
615 	/*
616 	 * If PSCI is present, we'll always use that -- the cpu_on method is
617 	 * mandated in both v0.1 and v0.2.  We'll check the enable-method if
618 	 * we don't have PSCI and use spin table if it's provided.
619 	 */
620 	release_addr = 0;
621 	if (!psci_present && cpuid != 0) {
622 		if (OF_getprop_alloc(node, "enable-method",
623 		    (void **)&enable_method) <= 0)
624 			return (false);
625 
626 		if (strcmp(enable_method, "spin-table") != 0) {
627 			OF_prop_free(enable_method);
628 			return (false);
629 		}
630 
631 		OF_prop_free(enable_method);
632 		populate_release_addr(node, &release_addr);
633 		if (release_addr == 0) {
634 			printf("Failed to fetch release address for CPU %u",
635 			    cpuid);
636 			return (false);
637 		}
638 	}
639 
640 	if (!start_cpu(cpuid, target_cpu, 0, release_addr))
641 		return (false);
642 
643 	/*
644 	 * Don't increment for the boot CPU, its CPU ID is reserved.
645 	 */
646 	if (!is_boot_cpu(target_cpu))
647 		fdt_cpuid++;
648 
649 	/* Try to read the numa node of this cpu */
650 	if (vm_ndomains == 1 ||
651 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
652 		domain = 0;
653 	cpuid_to_pcpu[cpuid]->pc_domain = domain;
654 	if (domain < MAXMEMDOM)
655 		CPU_SET(cpuid, &cpuset_domain[domain]);
656 	return (true);
657 }
658 static void
659 cpu_init_fdt(void)
660 {
661 	phandle_t node;
662 	int i;
663 
664 	node = OF_peer(0);
665 	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
666 		if (ofw_bus_node_is_compatible(node,
667 		    fdt_quirks[i].compat) != 0) {
668 			mp_quirks = fdt_quirks[i].quirks;
669 		}
670 	}
671 	fdt_cpuid = 1;
672 	ofw_cpu_early_foreach(start_cpu_fdt, true);
673 }
674 #endif
675 
676 /* Initialize and fire up non-boot processors */
677 void
678 cpu_mp_start(void)
679 {
680 	uint64_t mpidr;
681 
682 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
683 
684 	/* CPU 0 is always boot CPU. */
685 	CPU_SET(0, &all_cpus);
686 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
687 	cpuid_to_pcpu[0]->pc_mpidr = mpidr;
688 
689 	cpu_desc_init();
690 
691 	switch(arm64_bus_method) {
692 #ifdef DEV_ACPI
693 	case ARM64_BUS_ACPI:
694 		mp_quirks = MP_QUIRK_CPULIST;
695 		cpu_init_acpi();
696 		break;
697 #endif
698 #ifdef FDT
699 	case ARM64_BUS_FDT:
700 		cpu_init_fdt();
701 		break;
702 #endif
703 	default:
704 		break;
705 	}
706 }
707 
708 /* Introduce rest of cores to the world */
709 void
710 cpu_mp_announce(void)
711 {
712 }
713 
714 #ifdef DEV_ACPI
715 static void
716 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
717 {
718 	u_int *cores = arg;
719 
720 	switch(entry->Type) {
721 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
722 		(*cores)++;
723 		break;
724 	default:
725 		break;
726 	}
727 }
728 
729 static u_int
730 cpu_count_acpi(void)
731 {
732 	ACPI_TABLE_MADT *madt;
733 	vm_paddr_t physaddr;
734 	u_int cores;
735 
736 	physaddr = acpi_find_table(ACPI_SIG_MADT);
737 	if (physaddr == 0)
738 		return (0);
739 
740 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
741 	if (madt == NULL) {
742 		printf("Unable to map the MADT, not starting APs\n");
743 		return (0);
744 	}
745 
746 	cores = 0;
747 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
748 	    cpu_count_acpi_handler, &cores);
749 
750 	acpi_unmap_table(madt);
751 
752 	return (cores);
753 }
754 #endif
755 
756 void
757 cpu_mp_setmaxid(void)
758 {
759 	int cores;
760 
761 	mp_ncpus = 1;
762 	mp_maxid = 0;
763 
764 	switch(arm64_bus_method) {
765 #ifdef DEV_ACPI
766 	case ARM64_BUS_ACPI:
767 		cores = cpu_count_acpi();
768 		if (cores > 0) {
769 			cores = MIN(cores, MAXCPU);
770 			if (bootverbose)
771 				printf("Found %d CPUs in the ACPI tables\n",
772 				    cores);
773 			mp_ncpus = cores;
774 			mp_maxid = cores - 1;
775 		}
776 		break;
777 #endif
778 #ifdef FDT
779 	case ARM64_BUS_FDT:
780 		cores = ofw_cpu_early_foreach(NULL, false);
781 		if (cores > 0) {
782 			cores = MIN(cores, MAXCPU);
783 			if (bootverbose)
784 				printf("Found %d CPUs in the device tree\n",
785 				    cores);
786 			mp_ncpus = cores;
787 			mp_maxid = cores - 1;
788 		}
789 		break;
790 #endif
791 	default:
792 		if (bootverbose)
793 			printf("No CPU data, limiting to 1 core\n");
794 		break;
795 	}
796 
797 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
798 		if (cores > 0 && cores < mp_ncpus) {
799 			mp_ncpus = cores;
800 			mp_maxid = cores - 1;
801 		}
802 	}
803 }
804 
805 /* Sending IPI */
806 void
807 ipi_all_but_self(u_int ipi)
808 {
809 	cpuset_t cpus;
810 
811 	cpus = all_cpus;
812 	CPU_CLR(PCPU_GET(cpuid), &cpus);
813 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
814 	intr_ipi_send(cpus, ipi);
815 }
816 
817 void
818 ipi_cpu(int cpu, u_int ipi)
819 {
820 	cpuset_t cpus;
821 
822 	CPU_ZERO(&cpus);
823 	CPU_SET(cpu, &cpus);
824 
825 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
826 	intr_ipi_send(cpus, ipi);
827 }
828 
829 void
830 ipi_selected(cpuset_t cpus, u_int ipi)
831 {
832 
833 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
834 	intr_ipi_send(cpus, ipi);
835 }
836