xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 53b70c86)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include "opt_acpi.h"
31 #include "opt_ddb.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/csan.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_map.h>
58 
59 #include <machine/machdep.h>
60 #include <machine/debug_monitor.h>
61 #include <machine/intr.h>
62 #include <machine/smp.h>
63 #ifdef VFP
64 #include <machine/vfp.h>
65 #endif
66 
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71 
72 #ifdef FDT
73 #include <dev/ofw/openfirm.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/ofw/ofw_cpu.h>
77 #endif
78 
79 #include <dev/psci/psci.h>
80 
81 #include "pic_if.h"
82 
83 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
84 					/* don't panic if one fails to start */
85 static uint32_t mp_quirks;
86 
87 #ifdef FDT
88 static struct {
89 	const char *compat;
90 	uint32_t quirks;
91 } fdt_quirks[] = {
92 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
93 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
94 	/* This is incorrect in some DTS files */
95 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
96 	{ NULL, 0 },
97 };
98 #endif
99 
100 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
101 typedef void intr_ipi_handler_t(void *);
102 
103 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
104 struct intr_ipi {
105 	intr_ipi_handler_t *	ii_handler;
106 	void *			ii_handler_arg;
107 	intr_ipi_send_t *	ii_send;
108 	void *			ii_send_arg;
109 	char			ii_name[INTR_IPI_NAMELEN];
110 	u_long *		ii_count;
111 };
112 
113 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
114 
115 static struct intr_ipi *intr_ipi_lookup(u_int);
116 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
117     void *);
118 
119 static void ipi_ast(void *);
120 static void ipi_hardclock(void *);
121 static void ipi_preempt(void *);
122 static void ipi_rendezvous(void *);
123 static void ipi_stop(void *);
124 
125 struct pcb stoppcbs[MAXCPU];
126 
127 #ifdef FDT
128 static u_int fdt_cpuid;
129 #endif
130 
131 void mpentry(unsigned long cpuid);
132 void init_secondary(uint64_t);
133 
134 /* Synchronize AP startup. */
135 static struct mtx ap_boot_mtx;
136 
137 /* Stacks for AP initialization, discarded once idle threads are started. */
138 void *bootstack;
139 static void *bootstacks[MAXCPU];
140 
141 /* Count of started APs, used to synchronize access to bootstack. */
142 static volatile int aps_started;
143 
144 /* Set to 1 once we're ready to let the APs out of the pen. */
145 static volatile int aps_ready;
146 
147 /* Temporary variables for init_secondary()  */
148 void *dpcpu[MAXCPU - 1];
149 
150 static bool
151 is_boot_cpu(uint64_t target_cpu)
152 {
153 
154 	return (__pcpu[0].pc_mpidr == (target_cpu & CPU_AFF_MASK));
155 }
156 
157 static void
158 release_aps(void *dummy __unused)
159 {
160 	int i, started;
161 
162 	/* Only release CPUs if they exist */
163 	if (mp_ncpus == 1)
164 		return;
165 
166 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
167 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
168 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
169 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
170 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
171 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
172 
173 	atomic_store_rel_int(&aps_ready, 1);
174 	/* Wake up the other CPUs */
175 	__asm __volatile(
176 	    "dsb ishst	\n"
177 	    "sev	\n"
178 	    ::: "memory");
179 
180 	printf("Release APs...");
181 
182 	started = 0;
183 	for (i = 0; i < 2000; i++) {
184 		if (smp_started) {
185 			printf("done\n");
186 			return;
187 		}
188 		/*
189 		 * Don't time out while we are making progress. Some large
190 		 * systems can take a while to start all CPUs.
191 		 */
192 		if (smp_cpus > started) {
193 			i = 0;
194 			started = smp_cpus;
195 		}
196 		DELAY(1000);
197 	}
198 
199 	printf("APs not started\n");
200 }
201 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
202 
203 void
204 init_secondary(uint64_t cpu)
205 {
206 	struct pcpu *pcpup;
207 	pmap_t pmap0;
208 	u_int mpidr;
209 
210 	/*
211 	 * Verify that the value passed in 'cpu' argument (aka context_id) is
212 	 * valid. Some older U-Boot based PSCI implementations are buggy,
213 	 * they can pass random value in it.
214 	 */
215 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
216 	if  (cpu >= MAXCPU || __pcpu[cpu].pc_mpidr != mpidr) {
217 		for (cpu = 0; cpu < mp_maxid; cpu++)
218 			if (__pcpu[cpu].pc_mpidr == mpidr)
219 				break;
220 		if ( cpu >= MAXCPU)
221 			panic("MPIDR for this CPU is not in pcpu table");
222 	}
223 
224 	pcpup = &__pcpu[cpu];
225 	/*
226 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
227 	 * loaded when entering the kernel from userland.
228 	 */
229 	__asm __volatile(
230 	    "mov x18, %0 \n"
231 	    "msr tpidr_el1, %0" :: "r"(pcpup));
232 
233 	/*
234 	 * Identify current CPU. This is necessary to setup
235 	 * affinity registers and to provide support for
236 	 * runtime chip identification.
237 	 *
238 	 * We need this before signalling the CPU is ready to
239 	 * let the boot CPU use the results.
240 	 */
241 	pcpup->pc_midr = get_midr();
242 	identify_cpu(cpu);
243 
244 	/* Ensure the stores in identify_cpu have completed */
245 	atomic_thread_fence_acq_rel();
246 
247 	/* Signal the BSP and spin until it has released all APs. */
248 	atomic_add_int(&aps_started, 1);
249 	while (!atomic_load_int(&aps_ready))
250 		__asm __volatile("wfe");
251 
252 	/* Initialize curthread */
253 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
254 	pcpup->pc_curthread = pcpup->pc_idlethread;
255 
256 	/* Initialize curpmap to match TTBR0's current setting. */
257 	pmap0 = vmspace_pmap(&vmspace0);
258 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
259 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
260 	pcpup->pc_curpmap = pmap0;
261 
262 	install_cpu_errata();
263 
264 	intr_pic_init_secondary();
265 
266 	/* Start per-CPU event timers. */
267 	cpu_initclocks_ap();
268 
269 #ifdef VFP
270 	vfp_init();
271 #endif
272 
273 	dbg_init();
274 	pan_enable();
275 
276 	mtx_lock_spin(&ap_boot_mtx);
277 	atomic_add_rel_32(&smp_cpus, 1);
278 	if (smp_cpus == mp_ncpus) {
279 		/* enable IPI's, tlb shootdown, freezes etc */
280 		atomic_store_rel_int(&smp_started, 1);
281 	}
282 	mtx_unlock_spin(&ap_boot_mtx);
283 
284 	kcsan_cpu_init(cpu);
285 
286 	/*
287 	 * Assert that smp_after_idle_runnable condition is reasonable.
288 	 */
289 	MPASS(PCPU_GET(curpcb) == NULL);
290 
291 	/* Enter the scheduler */
292 	sched_throw(NULL);
293 
294 	panic("scheduler returned us to init_secondary");
295 	/* NOTREACHED */
296 }
297 
298 static void
299 smp_after_idle_runnable(void *arg __unused)
300 {
301 	struct pcpu *pc;
302 	int cpu;
303 
304 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
305 		if (bootstacks[cpu] != NULL) {
306 			pc = pcpu_find(cpu);
307 			while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
308 				cpu_spinwait();
309 			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
310 		}
311 	}
312 }
313 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
314     smp_after_idle_runnable, NULL);
315 
316 /*
317  *  Send IPI thru interrupt controller.
318  */
319 static void
320 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
321 {
322 
323 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
324 
325 	/*
326 	 * Ensure that this CPU's stores will be visible to IPI
327 	 * recipients before starting to send the interrupts.
328 	 */
329 	dsb(ishst);
330 
331 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
332 }
333 
334 /*
335  *  Setup IPI handler on interrupt controller.
336  *
337  *  Not SMP coherent.
338  */
339 static void
340 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
341     void *arg)
342 {
343 	struct intr_irqsrc *isrc;
344 	struct intr_ipi *ii;
345 	int error;
346 
347 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
348 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
349 
350 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
351 	if (error != 0)
352 		return;
353 
354 	isrc->isrc_handlers++;
355 
356 	ii = intr_ipi_lookup(ipi);
357 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
358 
359 	ii->ii_handler = hand;
360 	ii->ii_handler_arg = arg;
361 	ii->ii_send = pic_ipi_send;
362 	ii->ii_send_arg = isrc;
363 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
364 	ii->ii_count = intr_ipi_setup_counters(name);
365 
366 	PIC_ENABLE_INTR(intr_irq_root_dev, isrc);
367 }
368 
369 static void
370 intr_ipi_send(cpuset_t cpus, u_int ipi)
371 {
372 	struct intr_ipi *ii;
373 
374 	ii = intr_ipi_lookup(ipi);
375 	if (ii->ii_count == NULL)
376 		panic("%s: not setup IPI %u", __func__, ipi);
377 
378 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
379 }
380 
381 static void
382 ipi_ast(void *dummy __unused)
383 {
384 
385 	CTR0(KTR_SMP, "IPI_AST");
386 }
387 
388 static void
389 ipi_hardclock(void *dummy __unused)
390 {
391 
392 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
393 	hardclockintr();
394 }
395 
396 static void
397 ipi_preempt(void *dummy __unused)
398 {
399 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
400 	sched_preempt(curthread);
401 }
402 
403 static void
404 ipi_rendezvous(void *dummy __unused)
405 {
406 
407 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
408 	smp_rendezvous_action();
409 }
410 
411 static void
412 ipi_stop(void *dummy __unused)
413 {
414 	u_int cpu;
415 
416 	CTR0(KTR_SMP, "IPI_STOP");
417 
418 	cpu = PCPU_GET(cpuid);
419 	savectx(&stoppcbs[cpu]);
420 
421 	/* Indicate we are stopped */
422 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
423 
424 	/* Wait for restart */
425 	while (!CPU_ISSET(cpu, &started_cpus))
426 		cpu_spinwait();
427 
428 #ifdef DDB
429 	dbg_register_sync(NULL);
430 #endif
431 
432 	CPU_CLR_ATOMIC(cpu, &started_cpus);
433 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
434 	CTR0(KTR_SMP, "IPI_STOP (restart)");
435 }
436 
437 struct cpu_group *
438 cpu_topo(void)
439 {
440 	struct cpu_group *dom, *root;
441 	int i;
442 
443 	root = smp_topo_alloc(1);
444 	dom = smp_topo_alloc(vm_ndomains);
445 
446 	root->cg_parent = NULL;
447 	root->cg_child = dom;
448 	CPU_COPY(&all_cpus, &root->cg_mask);
449 	root->cg_count = mp_ncpus;
450 	root->cg_children = vm_ndomains;
451 	root->cg_level = CG_SHARE_NONE;
452 	root->cg_flags = 0;
453 
454 	/*
455 	 * Redundant layers will be collapsed by the caller so we don't need a
456 	 * special case for a single domain.
457 	 */
458 	for (i = 0; i < vm_ndomains; i++, dom++) {
459 		dom->cg_parent = root;
460 		dom->cg_child = NULL;
461 		CPU_COPY(&cpuset_domain[i], &dom->cg_mask);
462 		dom->cg_count = CPU_COUNT(&dom->cg_mask);
463 		dom->cg_children = 0;
464 		dom->cg_level = CG_SHARE_L3;
465 		dom->cg_flags = 0;
466 	}
467 
468 	return (root);
469 }
470 
471 /* Determine if we running MP machine */
472 int
473 cpu_mp_probe(void)
474 {
475 
476 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
477 	return (1);
478 }
479 
480 /*
481  * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
482  * do nothing. Returns true if the CPU is present and running.
483  */
484 static bool
485 start_cpu(u_int cpuid, uint64_t target_cpu)
486 {
487 	struct pcpu *pcpup;
488 	vm_paddr_t pa;
489 	int err, naps;
490 
491 	/* Check we are able to start this cpu */
492 	if (cpuid > mp_maxid)
493 		return (false);
494 
495 	/* Skip boot CPU */
496 	if (is_boot_cpu(target_cpu))
497 		return (true);
498 
499 	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
500 
501 	pcpup = &__pcpu[cpuid];
502 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
503 	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
504 
505 	dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
506 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
507 
508 	bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
509 
510 	naps = atomic_load_int(&aps_started);
511 	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
512 
513 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
514 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
515 	err = psci_cpu_on(target_cpu, pa, cpuid);
516 	if (err != PSCI_RETVAL_SUCCESS) {
517 		/*
518 		 * Panic here if INVARIANTS are enabled and PSCI failed to
519 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
520 		 * to indicate we are unable to use it to start the given CPU.
521 		 */
522 		KASSERT(err == PSCI_MISSING ||
523 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
524 		    ("Failed to start CPU %u (%lx), error %d\n",
525 		    cpuid, target_cpu, err));
526 
527 		pcpu_destroy(pcpup);
528 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
529 		dpcpu[cpuid - 1] = NULL;
530 		kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
531 		bootstacks[cpuid] = NULL;
532 		mp_ncpus--;
533 		return (false);
534 	}
535 
536 	/* Wait for the AP to switch to its boot stack. */
537 	while (atomic_load_int(&aps_started) < naps + 1)
538 		cpu_spinwait();
539 	CPU_SET(cpuid, &all_cpus);
540 
541 	return (true);
542 }
543 
544 #ifdef DEV_ACPI
545 static void
546 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
547 {
548 	ACPI_MADT_GENERIC_INTERRUPT *intr;
549 	u_int *cpuid;
550 	u_int id;
551 
552 	switch(entry->Type) {
553 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
554 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
555 		cpuid = arg;
556 
557 		if (is_boot_cpu(intr->ArmMpidr))
558 			id = 0;
559 		else
560 			id = *cpuid;
561 
562 		if (start_cpu(id, intr->ArmMpidr)) {
563 			__pcpu[id].pc_acpi_id = intr->Uid;
564 			/*
565 			 * Don't increment for the boot CPU, its CPU ID is
566 			 * reserved.
567 			 */
568 			if (!is_boot_cpu(intr->ArmMpidr))
569 				(*cpuid)++;
570 		}
571 
572 		break;
573 	default:
574 		break;
575 	}
576 }
577 
578 static void
579 cpu_init_acpi(void)
580 {
581 	ACPI_TABLE_MADT *madt;
582 	vm_paddr_t physaddr;
583 	u_int cpuid;
584 
585 	physaddr = acpi_find_table(ACPI_SIG_MADT);
586 	if (physaddr == 0)
587 		return;
588 
589 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
590 	if (madt == NULL) {
591 		printf("Unable to map the MADT, not starting APs\n");
592 		return;
593 	}
594 	/* Boot CPU is always 0 */
595 	cpuid = 1;
596 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
597 	    madt_handler, &cpuid);
598 
599 	acpi_unmap_table(madt);
600 
601 #if MAXMEMDOM > 1
602 	acpi_pxm_set_cpu_locality();
603 #endif
604 }
605 #endif
606 
607 #ifdef FDT
608 static boolean_t
609 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
610 {
611 	uint64_t target_cpu;
612 	int domain;
613 	int cpuid;
614 
615 	target_cpu = reg[0];
616 	if (addr_size == 2) {
617 		target_cpu <<= 32;
618 		target_cpu |= reg[1];
619 	}
620 
621 	if (is_boot_cpu(target_cpu))
622 		cpuid = 0;
623 	else
624 		cpuid = fdt_cpuid;
625 
626 	if (!start_cpu(cpuid, target_cpu))
627 		return (FALSE);
628 
629 	/*
630 	 * Don't increment for the boot CPU, its CPU ID is reserved.
631 	 */
632 	if (!is_boot_cpu(target_cpu))
633 		fdt_cpuid++;
634 
635 	/* Try to read the numa node of this cpu */
636 	if (vm_ndomains == 1 ||
637 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
638 		domain = 0;
639 	__pcpu[cpuid].pc_domain = domain;
640 	if (domain < MAXMEMDOM)
641 		CPU_SET(cpuid, &cpuset_domain[domain]);
642 	return (TRUE);
643 }
644 static void
645 cpu_init_fdt(void)
646 {
647 	phandle_t node;
648 	int i;
649 
650 	node = OF_peer(0);
651 	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
652 		if (ofw_bus_node_is_compatible(node,
653 		    fdt_quirks[i].compat) != 0) {
654 			mp_quirks = fdt_quirks[i].quirks;
655 		}
656 	}
657 	fdt_cpuid = 1;
658 	ofw_cpu_early_foreach(start_cpu_fdt, true);
659 }
660 #endif
661 
662 /* Initialize and fire up non-boot processors */
663 void
664 cpu_mp_start(void)
665 {
666 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
667 
668 	/* CPU 0 is always boot CPU. */
669 	CPU_SET(0, &all_cpus);
670 	__pcpu[0].pc_mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
671 
672 	switch(arm64_bus_method) {
673 #ifdef DEV_ACPI
674 	case ARM64_BUS_ACPI:
675 		mp_quirks = MP_QUIRK_CPULIST;
676 		cpu_init_acpi();
677 		break;
678 #endif
679 #ifdef FDT
680 	case ARM64_BUS_FDT:
681 		cpu_init_fdt();
682 		break;
683 #endif
684 	default:
685 		break;
686 	}
687 }
688 
689 /* Introduce rest of cores to the world */
690 void
691 cpu_mp_announce(void)
692 {
693 }
694 
695 #ifdef DEV_ACPI
696 static void
697 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
698 {
699 	ACPI_MADT_GENERIC_INTERRUPT *intr;
700 	u_int *cores = arg;
701 
702 	switch(entry->Type) {
703 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
704 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
705 		(*cores)++;
706 		break;
707 	default:
708 		break;
709 	}
710 }
711 
712 static u_int
713 cpu_count_acpi(void)
714 {
715 	ACPI_TABLE_MADT *madt;
716 	vm_paddr_t physaddr;
717 	u_int cores;
718 
719 	physaddr = acpi_find_table(ACPI_SIG_MADT);
720 	if (physaddr == 0)
721 		return (0);
722 
723 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
724 	if (madt == NULL) {
725 		printf("Unable to map the MADT, not starting APs\n");
726 		return (0);
727 	}
728 
729 	cores = 0;
730 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
731 	    cpu_count_acpi_handler, &cores);
732 
733 	acpi_unmap_table(madt);
734 
735 	return (cores);
736 }
737 #endif
738 
739 void
740 cpu_mp_setmaxid(void)
741 {
742 	int cores;
743 
744 	mp_ncpus = 1;
745 	mp_maxid = 0;
746 
747 	switch(arm64_bus_method) {
748 #ifdef DEV_ACPI
749 	case ARM64_BUS_ACPI:
750 		cores = cpu_count_acpi();
751 		if (cores > 0) {
752 			cores = MIN(cores, MAXCPU);
753 			if (bootverbose)
754 				printf("Found %d CPUs in the ACPI tables\n",
755 				    cores);
756 			mp_ncpus = cores;
757 			mp_maxid = cores - 1;
758 		}
759 		break;
760 #endif
761 #ifdef FDT
762 	case ARM64_BUS_FDT:
763 		cores = ofw_cpu_early_foreach(NULL, false);
764 		if (cores > 0) {
765 			cores = MIN(cores, MAXCPU);
766 			if (bootverbose)
767 				printf("Found %d CPUs in the device tree\n",
768 				    cores);
769 			mp_ncpus = cores;
770 			mp_maxid = cores - 1;
771 		}
772 		break;
773 #endif
774 	default:
775 		if (bootverbose)
776 			printf("No CPU data, limiting to 1 core\n");
777 		break;
778 	}
779 
780 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
781 		if (cores > 0 && cores < mp_ncpus) {
782 			mp_ncpus = cores;
783 			mp_maxid = cores - 1;
784 		}
785 	}
786 }
787 
788 /*
789  *  Lookup IPI source.
790  */
791 static struct intr_ipi *
792 intr_ipi_lookup(u_int ipi)
793 {
794 
795 	if (ipi >= INTR_IPI_COUNT)
796 		panic("%s: no such IPI %u", __func__, ipi);
797 
798 	return (&ipi_sources[ipi]);
799 }
800 
801 /*
802  *  interrupt controller dispatch function for IPIs. It should
803  *  be called straight from the interrupt controller, when associated
804  *  interrupt source is learned. Or from anybody who has an interrupt
805  *  source mapped.
806  */
807 void
808 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
809 {
810 	void *arg;
811 	struct intr_ipi *ii;
812 
813 	ii = intr_ipi_lookup(ipi);
814 	if (ii->ii_count == NULL)
815 		panic("%s: not setup IPI %u", __func__, ipi);
816 
817 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
818 
819 	/*
820 	 * Supply ipi filter with trapframe argument
821 	 * if none is registered.
822 	 */
823 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
824 	ii->ii_handler(arg);
825 }
826 
827 #ifdef notyet
828 /*
829  *  Map IPI into interrupt controller.
830  *
831  *  Not SMP coherent.
832  */
833 static int
834 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
835 {
836 	boolean_t is_percpu;
837 	int error;
838 
839 	if (ipi >= INTR_IPI_COUNT)
840 		panic("%s: no such IPI %u", __func__, ipi);
841 
842 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
843 
844 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
845 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
846 	isrc->isrc_nspc_num = ipi_next_num;
847 
848 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
849 	if (error == 0) {
850 		isrc->isrc_dev = intr_irq_root_dev;
851 		ipi_next_num++;
852 	}
853 	return (error);
854 }
855 
856 /*
857  *  Setup IPI handler to interrupt source.
858  *
859  *  Note that there could be more ways how to send and receive IPIs
860  *  on a platform like fast interrupts for example. In that case,
861  *  one can call this function with ASIF_NOALLOC flag set and then
862  *  call intr_ipi_dispatch() when appropriate.
863  *
864  *  Not SMP coherent.
865  */
866 int
867 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
868     void *arg, u_int flags)
869 {
870 	struct intr_irqsrc *isrc;
871 	int error;
872 
873 	if (filter == NULL)
874 		return(EINVAL);
875 
876 	isrc = intr_ipi_lookup(ipi);
877 	if (isrc->isrc_ipifilter != NULL)
878 		return (EEXIST);
879 
880 	if ((flags & AISHF_NOALLOC) == 0) {
881 		error = ipi_map(isrc, ipi);
882 		if (error != 0)
883 			return (error);
884 	}
885 
886 	isrc->isrc_ipifilter = filter;
887 	isrc->isrc_arg = arg;
888 	isrc->isrc_handlers = 1;
889 	isrc->isrc_count = intr_ipi_setup_counters(name);
890 	isrc->isrc_index = 0; /* it should not be used in IPI case */
891 
892 	if (isrc->isrc_dev != NULL) {
893 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
894 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
895 	}
896 	return (0);
897 }
898 #endif
899 
900 /* Sending IPI */
901 void
902 ipi_all_but_self(u_int ipi)
903 {
904 	cpuset_t cpus;
905 
906 	cpus = all_cpus;
907 	CPU_CLR(PCPU_GET(cpuid), &cpus);
908 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
909 	intr_ipi_send(cpus, ipi);
910 }
911 
912 void
913 ipi_cpu(int cpu, u_int ipi)
914 {
915 	cpuset_t cpus;
916 
917 	CPU_ZERO(&cpus);
918 	CPU_SET(cpu, &cpus);
919 
920 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
921 	intr_ipi_send(cpus, ipi);
922 }
923 
924 void
925 ipi_selected(cpuset_t cpus, u_int ipi)
926 {
927 
928 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
929 	intr_ipi_send(cpus, ipi);
930 }
931