xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 780fb4a2)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54 
55 #include <machine/machdep.h>
56 #include <machine/intr.h>
57 #include <machine/smp.h>
58 #ifdef VFP
59 #include <machine/vfp.h>
60 #endif
61 
62 #ifdef DEV_ACPI
63 #include <contrib/dev/acpica/include/acpi.h>
64 #include <dev/acpica/acpivar.h>
65 #endif
66 
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #include <dev/ofw/ofw_cpu.h>
72 #endif
73 
74 #include <dev/psci/psci.h>
75 
76 #include "pic_if.h"
77 
78 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
79 					/* don't panic if one fails to start */
80 static uint32_t mp_quirks;
81 
82 #ifdef FDT
83 static struct {
84 	const char *compat;
85 	uint32_t quirks;
86 } fdt_quirks[] = {
87 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
88 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
89 	/* This is incorrect in some DTS files */
90 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
91 	{ NULL, 0 },
92 };
93 #endif
94 
95 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
96 typedef void intr_ipi_handler_t(void *);
97 
98 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
99 struct intr_ipi {
100 	intr_ipi_handler_t *	ii_handler;
101 	void *			ii_handler_arg;
102 	intr_ipi_send_t *	ii_send;
103 	void *			ii_send_arg;
104 	char			ii_name[INTR_IPI_NAMELEN];
105 	u_long *		ii_count;
106 };
107 
108 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
109 
110 static struct intr_ipi *intr_ipi_lookup(u_int);
111 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
112     void *);
113 
114 extern struct pcpu __pcpu[];
115 
116 static device_identify_t arm64_cpu_identify;
117 static device_probe_t arm64_cpu_probe;
118 static device_attach_t arm64_cpu_attach;
119 
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
125 
126 struct mtx ap_boot_mtx;
127 struct pcb stoppcbs[MAXCPU];
128 
129 static device_t cpu_list[MAXCPU];
130 
131 /*
132  * Not all systems boot from the first CPU in the device tree. To work around
133  * this we need to find which CPU we have booted from so when we later
134  * enable the secondary CPUs we skip this one.
135  */
136 static int cpu0 = -1;
137 
138 void mpentry(unsigned long cpuid);
139 void init_secondary(uint64_t);
140 
141 uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16);
142 
143 /* Set to 1 once we're ready to let the APs out of the pen. */
144 volatile int aps_ready = 0;
145 
146 /* Temporary variables for init_secondary()  */
147 void *dpcpu[MAXCPU - 1];
148 
149 static device_method_t arm64_cpu_methods[] = {
150 	/* Device interface */
151 	DEVMETHOD(device_identify,	arm64_cpu_identify),
152 	DEVMETHOD(device_probe,		arm64_cpu_probe),
153 	DEVMETHOD(device_attach,	arm64_cpu_attach),
154 
155 	DEVMETHOD_END
156 };
157 
158 static devclass_t arm64_cpu_devclass;
159 static driver_t arm64_cpu_driver = {
160 	"arm64_cpu",
161 	arm64_cpu_methods,
162 	0
163 };
164 
165 DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0);
166 
167 static void
168 arm64_cpu_identify(driver_t *driver, device_t parent)
169 {
170 
171 	if (device_find_child(parent, "arm64_cpu", -1) != NULL)
172 		return;
173 	if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL)
174 		device_printf(parent, "add child failed\n");
175 }
176 
177 static int
178 arm64_cpu_probe(device_t dev)
179 {
180 	u_int cpuid;
181 
182 	cpuid = device_get_unit(dev);
183 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
184 		return (EINVAL);
185 
186 	device_quiet(dev);
187 	return (0);
188 }
189 
190 static int
191 arm64_cpu_attach(device_t dev)
192 {
193 	const uint32_t *reg;
194 	size_t reg_size;
195 	u_int cpuid;
196 	int i;
197 
198 	cpuid = device_get_unit(dev);
199 
200 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
201 		return (EINVAL);
202 	KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
203 
204 	reg = cpu_get_cpuid(dev, &reg_size);
205 	if (reg == NULL)
206 		return (EINVAL);
207 
208 	if (bootverbose) {
209 		device_printf(dev, "register <");
210 		for (i = 0; i < reg_size; i++)
211 			printf("%s%x", (i == 0) ? "" : " ", reg[i]);
212 		printf(">\n");
213 	}
214 
215 	/* Set the device to start it later */
216 	cpu_list[cpuid] = dev;
217 
218 	return (0);
219 }
220 
221 static void
222 release_aps(void *dummy __unused)
223 {
224 	int i, started;
225 
226 	/* Only release CPUs if they exist */
227 	if (mp_ncpus == 1)
228 		return;
229 
230 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
231 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
232 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
233 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
234 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
235 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
236 
237 	atomic_store_rel_int(&aps_ready, 1);
238 	/* Wake up the other CPUs */
239 	__asm __volatile(
240 	    "dsb ishst	\n"
241 	    "sev	\n"
242 	    ::: "memory");
243 
244 	printf("Release APs...");
245 
246 	started = 0;
247 	for (i = 0; i < 2000; i++) {
248 		if (smp_started) {
249 			printf("done\n");
250 			return;
251 		}
252 		/*
253 		 * Don't time out while we are making progress. Some large
254 		 * systems can take a while to start all CPUs.
255 		 */
256 		if (smp_cpus > started) {
257 			i = 0;
258 			started = smp_cpus;
259 		}
260 		DELAY(1000);
261 	}
262 
263 	printf("APs not started\n");
264 }
265 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
266 
267 void
268 init_secondary(uint64_t cpu)
269 {
270 	struct pcpu *pcpup;
271 
272 	pcpup = &__pcpu[cpu];
273 	/*
274 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
275 	 * loaded when entering the kernel from userland.
276 	 */
277 	__asm __volatile(
278 	    "mov x18, %0 \n"
279 	    "msr tpidr_el1, %0" :: "r"(pcpup));
280 
281 	/* Spin until the BSP releases the APs */
282 	while (!aps_ready)
283 		__asm __volatile("wfe");
284 
285 	/* Initialize curthread */
286 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
287 	pcpup->pc_curthread = pcpup->pc_idlethread;
288 	pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb;
289 
290 	/*
291 	 * Identify current CPU. This is necessary to setup
292 	 * affinity registers and to provide support for
293 	 * runtime chip identification.
294 	 */
295 	identify_cpu();
296 	install_cpu_errata();
297 
298 	intr_pic_init_secondary();
299 
300 	/* Start per-CPU event timers. */
301 	cpu_initclocks_ap();
302 
303 #ifdef VFP
304 	vfp_init();
305 #endif
306 
307 	dbg_init();
308 	pan_enable();
309 
310 	/* Enable interrupts */
311 	intr_enable();
312 
313 	mtx_lock_spin(&ap_boot_mtx);
314 
315 	atomic_add_rel_32(&smp_cpus, 1);
316 
317 	if (smp_cpus == mp_ncpus) {
318 		/* enable IPI's, tlb shootdown, freezes etc */
319 		atomic_store_rel_int(&smp_started, 1);
320 	}
321 
322 	mtx_unlock_spin(&ap_boot_mtx);
323 
324 	/* Enter the scheduler */
325 	sched_throw(NULL);
326 
327 	panic("scheduler returned us to init_secondary");
328 	/* NOTREACHED */
329 }
330 
331 /*
332  *  Send IPI thru interrupt controller.
333  */
334 static void
335 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
336 {
337 
338 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
339 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
340 }
341 
342 /*
343  *  Setup IPI handler on interrupt controller.
344  *
345  *  Not SMP coherent.
346  */
347 static void
348 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
349     void *arg)
350 {
351 	struct intr_irqsrc *isrc;
352 	struct intr_ipi *ii;
353 	int error;
354 
355 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
356 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
357 
358 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
359 	if (error != 0)
360 		return;
361 
362 	isrc->isrc_handlers++;
363 
364 	ii = intr_ipi_lookup(ipi);
365 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
366 
367 	ii->ii_handler = hand;
368 	ii->ii_handler_arg = arg;
369 	ii->ii_send = pic_ipi_send;
370 	ii->ii_send_arg = isrc;
371 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
372 	ii->ii_count = intr_ipi_setup_counters(name);
373 }
374 
375 static void
376 intr_ipi_send(cpuset_t cpus, u_int ipi)
377 {
378 	struct intr_ipi *ii;
379 
380 	ii = intr_ipi_lookup(ipi);
381 	if (ii->ii_count == NULL)
382 		panic("%s: not setup IPI %u", __func__, ipi);
383 
384 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
385 }
386 
387 static void
388 ipi_ast(void *dummy __unused)
389 {
390 
391 	CTR0(KTR_SMP, "IPI_AST");
392 }
393 
394 static void
395 ipi_hardclock(void *dummy __unused)
396 {
397 
398 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
399 	hardclockintr();
400 }
401 
402 static void
403 ipi_preempt(void *dummy __unused)
404 {
405 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
406 	sched_preempt(curthread);
407 }
408 
409 static void
410 ipi_rendezvous(void *dummy __unused)
411 {
412 
413 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
414 	smp_rendezvous_action();
415 }
416 
417 static void
418 ipi_stop(void *dummy __unused)
419 {
420 	u_int cpu;
421 
422 	CTR0(KTR_SMP, "IPI_STOP");
423 
424 	cpu = PCPU_GET(cpuid);
425 	savectx(&stoppcbs[cpu]);
426 
427 	/* Indicate we are stopped */
428 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
429 
430 	/* Wait for restart */
431 	while (!CPU_ISSET(cpu, &started_cpus))
432 		cpu_spinwait();
433 
434 	CPU_CLR_ATOMIC(cpu, &started_cpus);
435 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
436 	CTR0(KTR_SMP, "IPI_STOP (restart)");
437 }
438 
439 struct cpu_group *
440 cpu_topo(void)
441 {
442 
443 	return (smp_topo_none());
444 }
445 
446 /* Determine if we running MP machine */
447 int
448 cpu_mp_probe(void)
449 {
450 
451 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
452 	return (1);
453 }
454 
455 static bool
456 start_cpu(u_int id, uint64_t target_cpu)
457 {
458 	struct pcpu *pcpup;
459 	vm_paddr_t pa;
460 	u_int cpuid;
461 	int err;
462 
463 	/* Check we are able to start this cpu */
464 	if (id > mp_maxid)
465 		return (false);
466 
467 	KASSERT(id < MAXCPU, ("Too many CPUs"));
468 
469 	/* We are already running on cpu 0 */
470 	if (id == cpu0)
471 		return (true);
472 
473 	/*
474 	 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
475 	 * CPUs ordered as the are likely grouped into clusters so it can be
476 	 * useful to keep that property, e.g. for the GICv3 driver to send
477 	 * an IPI to all CPUs in the cluster.
478 	 */
479 	cpuid = id;
480 	if (cpuid < cpu0)
481 		cpuid += mp_maxid + 1;
482 	cpuid -= cpu0;
483 
484 	pcpup = &__pcpu[cpuid];
485 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
486 
487 	dpcpu[cpuid - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
488 	    M_WAITOK | M_ZERO);
489 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
490 
491 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
492 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
493 
494 	err = psci_cpu_on(target_cpu, pa, cpuid);
495 	if (err != PSCI_RETVAL_SUCCESS) {
496 		/*
497 		 * Panic here if INVARIANTS are enabled and PSCI failed to
498 		 * start the requested CPU. If psci_cpu_on returns PSCI_MISSING
499 		 * to indicate we are unable to use it to start the given CPU.
500 		 */
501 		KASSERT(err == PSCI_MISSING ||
502 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
503 		    ("Failed to start CPU %u (%lx)\n", id, target_cpu));
504 
505 		pcpu_destroy(pcpup);
506 		kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1],
507 		    DPCPU_SIZE);
508 		dpcpu[cpuid - 1] = NULL;
509 		mp_ncpus--;
510 
511 		/* Notify the user that the CPU failed to start */
512 		printf("Failed to start CPU %u (%lx)\n", id, target_cpu);
513 	} else
514 		CPU_SET(cpuid, &all_cpus);
515 
516 	return (true);
517 }
518 
519 #ifdef DEV_ACPI
520 static void
521 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
522 {
523 	ACPI_MADT_GENERIC_INTERRUPT *intr;
524 	u_int *cpuid;
525 
526 	switch(entry->Type) {
527 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
528 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
529 		cpuid = arg;
530 
531 		start_cpu((*cpuid), intr->ArmMpidr);
532 		(*cpuid)++;
533 		break;
534 	default:
535 		break;
536 	}
537 }
538 
539 static void
540 cpu_init_acpi(void)
541 {
542 	ACPI_TABLE_MADT *madt;
543 	vm_paddr_t physaddr;
544 	u_int cpuid;
545 
546 	physaddr = acpi_find_table(ACPI_SIG_MADT);
547 	if (physaddr == 0)
548 		return;
549 
550 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
551 	if (madt == NULL) {
552 		printf("Unable to map the MADT, not starting APs\n");
553 		return;
554 	}
555 
556 	cpuid = 0;
557 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
558 	    madt_handler, &cpuid);
559 
560 	acpi_unmap_table(madt);
561 }
562 #endif
563 
564 #ifdef FDT
565 static boolean_t
566 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
567 {
568 	uint64_t target_cpu;
569 	int domain;
570 
571 	target_cpu = reg[0];
572 	if (addr_size == 2) {
573 		target_cpu <<= 32;
574 		target_cpu |= reg[1];
575 	}
576 
577 	if (!start_cpu(id, target_cpu))
578 		return (FALSE);
579 
580 	/* Try to read the numa node of this cpu */
581 	if (OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) > 0) {
582 		__pcpu[id].pc_domain = domain;
583 		if (domain < MAXMEMDOM)
584 			CPU_SET(id, &cpuset_domain[domain]);
585 	}
586 
587 	return (TRUE);
588 }
589 #endif
590 
591 /* Initialize and fire up non-boot processors */
592 void
593 cpu_mp_start(void)
594 {
595 #ifdef FDT
596 	phandle_t node;
597 	int i;
598 #endif
599 
600 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
601 
602 	CPU_SET(0, &all_cpus);
603 
604 	switch(arm64_bus_method) {
605 #ifdef DEV_ACPI
606 	case ARM64_BUS_ACPI:
607 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
608 		cpu_init_acpi();
609 		break;
610 #endif
611 #ifdef FDT
612 	case ARM64_BUS_FDT:
613 		node = OF_peer(0);
614 		for (i = 0; fdt_quirks[i].compat != NULL; i++) {
615 			if (ofw_bus_node_is_compatible(node,
616 			    fdt_quirks[i].compat) != 0) {
617 				mp_quirks = fdt_quirks[i].quirks;
618 			}
619 		}
620 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
621 		ofw_cpu_early_foreach(cpu_init_fdt, true);
622 		break;
623 #endif
624 	default:
625 		break;
626 	}
627 }
628 
629 /* Introduce rest of cores to the world */
630 void
631 cpu_mp_announce(void)
632 {
633 }
634 
635 #ifdef DEV_ACPI
636 static void
637 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
638 {
639 	ACPI_MADT_GENERIC_INTERRUPT *intr;
640 	u_int *cores = arg;
641 	uint64_t mpidr_reg;
642 
643 	switch(entry->Type) {
644 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
645 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
646 		if (cpu0 < 0) {
647 			mpidr_reg = READ_SPECIALREG(mpidr_el1);
648 			if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
649 				cpu0 = *cores;
650 		}
651 		(*cores)++;
652 		break;
653 	default:
654 		break;
655 	}
656 }
657 
658 static u_int
659 cpu_count_acpi(void)
660 {
661 	ACPI_TABLE_MADT *madt;
662 	vm_paddr_t physaddr;
663 	u_int cores;
664 
665 	physaddr = acpi_find_table(ACPI_SIG_MADT);
666 	if (physaddr == 0)
667 		return (0);
668 
669 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
670 	if (madt == NULL) {
671 		printf("Unable to map the MADT, not starting APs\n");
672 		return (0);
673 	}
674 
675 	cores = 0;
676 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
677 	    cpu_count_acpi_handler, &cores);
678 
679 	acpi_unmap_table(madt);
680 
681 	return (cores);
682 }
683 #endif
684 
685 #ifdef FDT
686 static boolean_t
687 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
688 {
689 	uint64_t mpidr_fdt, mpidr_reg;
690 
691 	if (cpu0 < 0) {
692 		mpidr_fdt = reg[0];
693 		if (addr_size == 2) {
694 			mpidr_fdt <<= 32;
695 			mpidr_fdt |= reg[1];
696 		}
697 
698 		mpidr_reg = READ_SPECIALREG(mpidr_el1);
699 
700 		if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
701 			cpu0 = id;
702 	}
703 
704 	return (TRUE);
705 }
706 #endif
707 
708 void
709 cpu_mp_setmaxid(void)
710 {
711 #if defined(DEV_ACPI) || defined(FDT)
712 	int cores;
713 #endif
714 
715 	switch(arm64_bus_method) {
716 #ifdef DEV_ACPI
717 	case ARM64_BUS_ACPI:
718 		cores = cpu_count_acpi();
719 		if (cores > 0) {
720 			cores = MIN(cores, MAXCPU);
721 			if (bootverbose)
722 				printf("Found %d CPUs in the ACPI tables\n",
723 				    cores);
724 			mp_ncpus = cores;
725 			mp_maxid = cores - 1;
726 			return;
727 		}
728 		break;
729 #endif
730 #ifdef FDT
731 	case ARM64_BUS_FDT:
732 		cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
733 		if (cores > 0) {
734 			cores = MIN(cores, MAXCPU);
735 			if (bootverbose)
736 				printf("Found %d CPUs in the device tree\n",
737 				    cores);
738 			mp_ncpus = cores;
739 			mp_maxid = cores - 1;
740 			return;
741 		}
742 		break;
743 #endif
744 	default:
745 		break;
746 	}
747 
748 	if (bootverbose)
749 		printf("No CPU data, limiting to 1 core\n");
750 	mp_ncpus = 1;
751 	mp_maxid = 0;
752 }
753 
754 /*
755  *  Lookup IPI source.
756  */
757 static struct intr_ipi *
758 intr_ipi_lookup(u_int ipi)
759 {
760 
761 	if (ipi >= INTR_IPI_COUNT)
762 		panic("%s: no such IPI %u", __func__, ipi);
763 
764 	return (&ipi_sources[ipi]);
765 }
766 
767 /*
768  *  interrupt controller dispatch function for IPIs. It should
769  *  be called straight from the interrupt controller, when associated
770  *  interrupt source is learned. Or from anybody who has an interrupt
771  *  source mapped.
772  */
773 void
774 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
775 {
776 	void *arg;
777 	struct intr_ipi *ii;
778 
779 	ii = intr_ipi_lookup(ipi);
780 	if (ii->ii_count == NULL)
781 		panic("%s: not setup IPI %u", __func__, ipi);
782 
783 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
784 
785 	/*
786 	 * Supply ipi filter with trapframe argument
787 	 * if none is registered.
788 	 */
789 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
790 	ii->ii_handler(arg);
791 }
792 
793 #ifdef notyet
794 /*
795  *  Map IPI into interrupt controller.
796  *
797  *  Not SMP coherent.
798  */
799 static int
800 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
801 {
802 	boolean_t is_percpu;
803 	int error;
804 
805 	if (ipi >= INTR_IPI_COUNT)
806 		panic("%s: no such IPI %u", __func__, ipi);
807 
808 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
809 
810 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
811 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
812 	isrc->isrc_nspc_num = ipi_next_num;
813 
814 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
815 	if (error == 0) {
816 		isrc->isrc_dev = intr_irq_root_dev;
817 		ipi_next_num++;
818 	}
819 	return (error);
820 }
821 
822 /*
823  *  Setup IPI handler to interrupt source.
824  *
825  *  Note that there could be more ways how to send and receive IPIs
826  *  on a platform like fast interrupts for example. In that case,
827  *  one can call this function with ASIF_NOALLOC flag set and then
828  *  call intr_ipi_dispatch() when appropriate.
829  *
830  *  Not SMP coherent.
831  */
832 int
833 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
834     void *arg, u_int flags)
835 {
836 	struct intr_irqsrc *isrc;
837 	int error;
838 
839 	if (filter == NULL)
840 		return(EINVAL);
841 
842 	isrc = intr_ipi_lookup(ipi);
843 	if (isrc->isrc_ipifilter != NULL)
844 		return (EEXIST);
845 
846 	if ((flags & AISHF_NOALLOC) == 0) {
847 		error = ipi_map(isrc, ipi);
848 		if (error != 0)
849 			return (error);
850 	}
851 
852 	isrc->isrc_ipifilter = filter;
853 	isrc->isrc_arg = arg;
854 	isrc->isrc_handlers = 1;
855 	isrc->isrc_count = intr_ipi_setup_counters(name);
856 	isrc->isrc_index = 0; /* it should not be used in IPI case */
857 
858 	if (isrc->isrc_dev != NULL) {
859 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
860 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
861 	}
862 	return (0);
863 }
864 #endif
865 
866 /* Sending IPI */
867 void
868 ipi_all_but_self(u_int ipi)
869 {
870 	cpuset_t cpus;
871 
872 	cpus = all_cpus;
873 	CPU_CLR(PCPU_GET(cpuid), &cpus);
874 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
875 	intr_ipi_send(cpus, ipi);
876 }
877 
878 void
879 ipi_cpu(int cpu, u_int ipi)
880 {
881 	cpuset_t cpus;
882 
883 	CPU_ZERO(&cpus);
884 	CPU_SET(cpu, &cpus);
885 
886 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
887 	intr_ipi_send(cpus, ipi);
888 }
889 
890 void
891 ipi_selected(cpuset_t cpus, u_int ipi)
892 {
893 
894 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
895 	intr_ipi_send(cpus, ipi);
896 }
897