xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision a3557ef0)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_platform.h"
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpu.h>
43 #include <sys/csan.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/machdep.h>
61 #include <machine/debug_monitor.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 #ifdef VFP
65 #include <machine/vfp.h>
66 #endif
67 
68 #ifdef DEV_ACPI
69 #include <contrib/dev/acpica/include/acpi.h>
70 #include <dev/acpica/acpivar.h>
71 #endif
72 
73 #ifdef FDT
74 #include <dev/ofw/openfirm.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/ofw/ofw_cpu.h>
78 #endif
79 
80 #include <dev/psci/psci.h>
81 
82 #include "pic_if.h"
83 
84 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
85 					/* don't panic if one fails to start */
86 static uint32_t mp_quirks;
87 
88 #ifdef FDT
89 static struct {
90 	const char *compat;
91 	uint32_t quirks;
92 } fdt_quirks[] = {
93 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
94 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
95 	/* This is incorrect in some DTS files */
96 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
97 	{ NULL, 0 },
98 };
99 #endif
100 
101 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
102 typedef void intr_ipi_handler_t(void *);
103 
104 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
105 struct intr_ipi {
106 	intr_ipi_handler_t *	ii_handler;
107 	void *			ii_handler_arg;
108 	intr_ipi_send_t *	ii_send;
109 	void *			ii_send_arg;
110 	char			ii_name[INTR_IPI_NAMELEN];
111 	u_long *		ii_count;
112 };
113 
114 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
115 
116 static struct intr_ipi *intr_ipi_lookup(u_int);
117 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
118     void *);
119 
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
125 
126 struct pcb stoppcbs[MAXCPU];
127 
128 /*
129  * Not all systems boot from the first CPU in the device tree. To work around
130  * this we need to find which CPU we have booted from so when we later
131  * enable the secondary CPUs we skip this one.
132  */
133 static int cpu0 = -1;
134 
135 void mpentry(unsigned long cpuid);
136 void init_secondary(uint64_t);
137 
138 /* Synchronize AP startup. */
139 static struct mtx ap_boot_mtx;
140 
141 /* Stacks for AP initialization, discarded once idle threads are started. */
142 void *bootstack;
143 static void *bootstacks[MAXCPU];
144 
145 /* Count of started APs, used to synchronize access to bootstack. */
146 static volatile int aps_started;
147 
148 /* Set to 1 once we're ready to let the APs out of the pen. */
149 static volatile int aps_ready;
150 
151 /* Temporary variables for init_secondary()  */
152 void *dpcpu[MAXCPU - 1];
153 
154 static void
155 release_aps(void *dummy __unused)
156 {
157 	int i, started;
158 
159 	/* Only release CPUs if they exist */
160 	if (mp_ncpus == 1)
161 		return;
162 
163 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
164 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
165 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
166 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
167 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
168 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
169 
170 	atomic_store_rel_int(&aps_ready, 1);
171 	/* Wake up the other CPUs */
172 	__asm __volatile(
173 	    "dsb ishst	\n"
174 	    "sev	\n"
175 	    ::: "memory");
176 
177 	printf("Release APs...");
178 
179 	started = 0;
180 	for (i = 0; i < 2000; i++) {
181 		if (smp_started) {
182 			printf("done\n");
183 			return;
184 		}
185 		/*
186 		 * Don't time out while we are making progress. Some large
187 		 * systems can take a while to start all CPUs.
188 		 */
189 		if (smp_cpus > started) {
190 			i = 0;
191 			started = smp_cpus;
192 		}
193 		DELAY(1000);
194 	}
195 
196 	printf("APs not started\n");
197 }
198 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
199 
200 void
201 init_secondary(uint64_t cpu)
202 {
203 	struct pcpu *pcpup;
204 	pmap_t pmap0;
205 
206 	pcpup = &__pcpu[cpu];
207 	/*
208 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
209 	 * loaded when entering the kernel from userland.
210 	 */
211 	__asm __volatile(
212 	    "mov x18, %0 \n"
213 	    "msr tpidr_el1, %0" :: "r"(pcpup));
214 
215 	/* Signal the BSP and spin until it has released all APs. */
216 	atomic_add_int(&aps_started, 1);
217 	while (!atomic_load_int(&aps_ready))
218 		__asm __volatile("wfe");
219 
220 	/* Initialize curthread */
221 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
222 	pcpup->pc_curthread = pcpup->pc_idlethread;
223 
224 	/* Initialize curpmap to match TTBR0's current setting. */
225 	pmap0 = vmspace_pmap(&vmspace0);
226 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
227 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
228 	pcpup->pc_curpmap = pmap0;
229 
230 	/*
231 	 * Identify current CPU. This is necessary to setup
232 	 * affinity registers and to provide support for
233 	 * runtime chip identification.
234 	 */
235 	identify_cpu();
236 	install_cpu_errata();
237 
238 	intr_pic_init_secondary();
239 
240 	/* Start per-CPU event timers. */
241 	cpu_initclocks_ap();
242 
243 #ifdef VFP
244 	vfp_init();
245 #endif
246 
247 	dbg_init();
248 	pan_enable();
249 
250 	mtx_lock_spin(&ap_boot_mtx);
251 	atomic_add_rel_32(&smp_cpus, 1);
252 	if (smp_cpus == mp_ncpus) {
253 		/* enable IPI's, tlb shootdown, freezes etc */
254 		atomic_store_rel_int(&smp_started, 1);
255 	}
256 	mtx_unlock_spin(&ap_boot_mtx);
257 
258 	kcsan_cpu_init(cpu);
259 
260 	/*
261 	 * Assert that smp_after_idle_runnable condition is reasonable.
262 	 */
263 	MPASS(PCPU_GET(curpcb) == NULL);
264 
265 	/* Enter the scheduler */
266 	sched_throw(NULL);
267 
268 	panic("scheduler returned us to init_secondary");
269 	/* NOTREACHED */
270 }
271 
272 static void
273 smp_after_idle_runnable(void *arg __unused)
274 {
275 	struct pcpu *pc;
276 	int cpu;
277 
278 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
279 		if (bootstacks[cpu] != NULL) {
280 			pc = pcpu_find(cpu);
281 			while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
282 				cpu_spinwait();
283 			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
284 		}
285 	}
286 }
287 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
288     smp_after_idle_runnable, NULL);
289 
290 /*
291  *  Send IPI thru interrupt controller.
292  */
293 static void
294 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
295 {
296 
297 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
298 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
299 }
300 
301 /*
302  *  Setup IPI handler on interrupt controller.
303  *
304  *  Not SMP coherent.
305  */
306 static void
307 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
308     void *arg)
309 {
310 	struct intr_irqsrc *isrc;
311 	struct intr_ipi *ii;
312 	int error;
313 
314 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
315 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
316 
317 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
318 	if (error != 0)
319 		return;
320 
321 	isrc->isrc_handlers++;
322 
323 	ii = intr_ipi_lookup(ipi);
324 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
325 
326 	ii->ii_handler = hand;
327 	ii->ii_handler_arg = arg;
328 	ii->ii_send = pic_ipi_send;
329 	ii->ii_send_arg = isrc;
330 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
331 	ii->ii_count = intr_ipi_setup_counters(name);
332 }
333 
334 static void
335 intr_ipi_send(cpuset_t cpus, u_int ipi)
336 {
337 	struct intr_ipi *ii;
338 
339 	ii = intr_ipi_lookup(ipi);
340 	if (ii->ii_count == NULL)
341 		panic("%s: not setup IPI %u", __func__, ipi);
342 
343 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
344 }
345 
346 static void
347 ipi_ast(void *dummy __unused)
348 {
349 
350 	CTR0(KTR_SMP, "IPI_AST");
351 }
352 
353 static void
354 ipi_hardclock(void *dummy __unused)
355 {
356 
357 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
358 	hardclockintr();
359 }
360 
361 static void
362 ipi_preempt(void *dummy __unused)
363 {
364 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
365 	sched_preempt(curthread);
366 }
367 
368 static void
369 ipi_rendezvous(void *dummy __unused)
370 {
371 
372 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
373 	smp_rendezvous_action();
374 }
375 
376 static void
377 ipi_stop(void *dummy __unused)
378 {
379 	u_int cpu;
380 
381 	CTR0(KTR_SMP, "IPI_STOP");
382 
383 	cpu = PCPU_GET(cpuid);
384 	savectx(&stoppcbs[cpu]);
385 
386 	/* Indicate we are stopped */
387 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
388 
389 	/* Wait for restart */
390 	while (!CPU_ISSET(cpu, &started_cpus))
391 		cpu_spinwait();
392 
393 #ifdef DDB
394 	dbg_register_sync(NULL);
395 #endif
396 
397 	CPU_CLR_ATOMIC(cpu, &started_cpus);
398 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
399 	CTR0(KTR_SMP, "IPI_STOP (restart)");
400 }
401 
402 struct cpu_group *
403 cpu_topo(void)
404 {
405 
406 	return (smp_topo_none());
407 }
408 
409 /* Determine if we running MP machine */
410 int
411 cpu_mp_probe(void)
412 {
413 
414 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
415 	return (1);
416 }
417 
418 static bool
419 start_cpu(u_int id, uint64_t target_cpu)
420 {
421 	struct pcpu *pcpup;
422 	vm_paddr_t pa;
423 	u_int cpuid;
424 	int err, naps;
425 
426 	/* Check we are able to start this cpu */
427 	if (id > mp_maxid)
428 		return (false);
429 
430 	KASSERT(id < MAXCPU, ("Too many CPUs"));
431 
432 	/* We are already running on cpu 0 */
433 	if (id == cpu0)
434 		return (true);
435 
436 	/*
437 	 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
438 	 * CPUs ordered as they are likely grouped into clusters so it can be
439 	 * useful to keep that property, e.g. for the GICv3 driver to send
440 	 * an IPI to all CPUs in the cluster.
441 	 */
442 	cpuid = id;
443 	if (cpuid < cpu0)
444 		cpuid += mp_maxid + 1;
445 	cpuid -= cpu0;
446 
447 	pcpup = &__pcpu[cpuid];
448 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
449 
450 	dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
451 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
452 
453 	bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
454 
455 	naps = atomic_load_int(&aps_started);
456 	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
457 
458 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
459 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
460 	err = psci_cpu_on(target_cpu, pa, cpuid);
461 	if (err != PSCI_RETVAL_SUCCESS) {
462 		/*
463 		 * Panic here if INVARIANTS are enabled and PSCI failed to
464 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
465 		 * to indicate we are unable to use it to start the given CPU.
466 		 */
467 		KASSERT(err == PSCI_MISSING ||
468 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
469 		    ("Failed to start CPU %u (%lx), error %d\n",
470 		    id, target_cpu, err));
471 
472 		pcpu_destroy(pcpup);
473 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
474 		dpcpu[cpuid - 1] = NULL;
475 		kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
476 		bootstacks[cpuid] = NULL;
477 		mp_ncpus--;
478 
479 		/* Notify the user that the CPU failed to start */
480 		printf("Failed to start CPU %u (%lx), error %d\n",
481 		    id, target_cpu, err);
482 	} else {
483 		/* Wait for the AP to switch to its boot stack. */
484 		while (atomic_load_int(&aps_started) < naps + 1)
485 			cpu_spinwait();
486 		CPU_SET(cpuid, &all_cpus);
487 	}
488 
489 	return (true);
490 }
491 
492 #ifdef DEV_ACPI
493 static void
494 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
495 {
496 	ACPI_MADT_GENERIC_INTERRUPT *intr;
497 	u_int *cpuid;
498 	u_int id;
499 
500 	switch(entry->Type) {
501 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
502 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
503 		cpuid = arg;
504 		id = *cpuid;
505 		start_cpu(id, intr->ArmMpidr);
506 		__pcpu[id].pc_acpi_id = intr->Uid;
507 		(*cpuid)++;
508 		break;
509 	default:
510 		break;
511 	}
512 }
513 
514 static void
515 cpu_init_acpi(void)
516 {
517 	ACPI_TABLE_MADT *madt;
518 	vm_paddr_t physaddr;
519 	u_int cpuid;
520 
521 	physaddr = acpi_find_table(ACPI_SIG_MADT);
522 	if (physaddr == 0)
523 		return;
524 
525 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
526 	if (madt == NULL) {
527 		printf("Unable to map the MADT, not starting APs\n");
528 		return;
529 	}
530 
531 	cpuid = 0;
532 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
533 	    madt_handler, &cpuid);
534 
535 	acpi_unmap_table(madt);
536 
537 #if MAXMEMDOM > 1
538 	acpi_pxm_set_cpu_locality();
539 #endif
540 }
541 #endif
542 
543 #ifdef FDT
544 static boolean_t
545 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
546 {
547 	uint64_t target_cpu;
548 	int domain;
549 
550 	target_cpu = reg[0];
551 	if (addr_size == 2) {
552 		target_cpu <<= 32;
553 		target_cpu |= reg[1];
554 	}
555 
556 	if (!start_cpu(id, target_cpu))
557 		return (FALSE);
558 
559 	/* Try to read the numa node of this cpu */
560 	if (vm_ndomains == 1 ||
561 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
562 		domain = 0;
563 	__pcpu[id].pc_domain = domain;
564 	if (domain < MAXMEMDOM)
565 		CPU_SET(id, &cpuset_domain[domain]);
566 
567 	return (TRUE);
568 }
569 #endif
570 
571 /* Initialize and fire up non-boot processors */
572 void
573 cpu_mp_start(void)
574 {
575 #ifdef FDT
576 	phandle_t node;
577 	int i;
578 #endif
579 
580 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
581 
582 	CPU_SET(0, &all_cpus);
583 
584 	switch(arm64_bus_method) {
585 #ifdef DEV_ACPI
586 	case ARM64_BUS_ACPI:
587 		mp_quirks = MP_QUIRK_CPULIST;
588 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
589 		cpu_init_acpi();
590 		break;
591 #endif
592 #ifdef FDT
593 	case ARM64_BUS_FDT:
594 		node = OF_peer(0);
595 		for (i = 0; fdt_quirks[i].compat != NULL; i++) {
596 			if (ofw_bus_node_is_compatible(node,
597 			    fdt_quirks[i].compat) != 0) {
598 				mp_quirks = fdt_quirks[i].quirks;
599 			}
600 		}
601 		KASSERT(cpu0 >= 0, ("Current CPU was not found"));
602 		ofw_cpu_early_foreach(cpu_init_fdt, true);
603 		break;
604 #endif
605 	default:
606 		break;
607 	}
608 }
609 
610 /* Introduce rest of cores to the world */
611 void
612 cpu_mp_announce(void)
613 {
614 }
615 
616 #ifdef DEV_ACPI
617 static void
618 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
619 {
620 	ACPI_MADT_GENERIC_INTERRUPT *intr;
621 	u_int *cores = arg;
622 	uint64_t mpidr_reg;
623 
624 	switch(entry->Type) {
625 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
626 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
627 		if (cpu0 < 0) {
628 			mpidr_reg = READ_SPECIALREG(mpidr_el1);
629 			if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
630 				cpu0 = *cores;
631 		}
632 		(*cores)++;
633 		break;
634 	default:
635 		break;
636 	}
637 }
638 
639 static u_int
640 cpu_count_acpi(void)
641 {
642 	ACPI_TABLE_MADT *madt;
643 	vm_paddr_t physaddr;
644 	u_int cores;
645 
646 	physaddr = acpi_find_table(ACPI_SIG_MADT);
647 	if (physaddr == 0)
648 		return (0);
649 
650 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
651 	if (madt == NULL) {
652 		printf("Unable to map the MADT, not starting APs\n");
653 		return (0);
654 	}
655 
656 	cores = 0;
657 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
658 	    cpu_count_acpi_handler, &cores);
659 
660 	acpi_unmap_table(madt);
661 
662 	return (cores);
663 }
664 #endif
665 
666 #ifdef FDT
667 static boolean_t
668 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
669 {
670 	uint64_t mpidr_fdt, mpidr_reg;
671 
672 	if (cpu0 < 0) {
673 		mpidr_fdt = reg[0];
674 		if (addr_size == 2) {
675 			mpidr_fdt <<= 32;
676 			mpidr_fdt |= reg[1];
677 		}
678 
679 		mpidr_reg = READ_SPECIALREG(mpidr_el1);
680 
681 		if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
682 			cpu0 = id;
683 	}
684 
685 	return (TRUE);
686 }
687 #endif
688 
689 void
690 cpu_mp_setmaxid(void)
691 {
692 	int cores;
693 
694 	mp_ncpus = 1;
695 	mp_maxid = 0;
696 
697 	switch(arm64_bus_method) {
698 #ifdef DEV_ACPI
699 	case ARM64_BUS_ACPI:
700 		cores = cpu_count_acpi();
701 		if (cores > 0) {
702 			cores = MIN(cores, MAXCPU);
703 			if (bootverbose)
704 				printf("Found %d CPUs in the ACPI tables\n",
705 				    cores);
706 			mp_ncpus = cores;
707 			mp_maxid = cores - 1;
708 		}
709 		break;
710 #endif
711 #ifdef FDT
712 	case ARM64_BUS_FDT:
713 		cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
714 		if (cores > 0) {
715 			cores = MIN(cores, MAXCPU);
716 			if (bootverbose)
717 				printf("Found %d CPUs in the device tree\n",
718 				    cores);
719 			mp_ncpus = cores;
720 			mp_maxid = cores - 1;
721 		}
722 		break;
723 #endif
724 	default:
725 		if (bootverbose)
726 			printf("No CPU data, limiting to 1 core\n");
727 		break;
728 	}
729 
730 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
731 		if (cores > 0 && cores < mp_ncpus) {
732 			mp_ncpus = cores;
733 			mp_maxid = cores - 1;
734 		}
735 	}
736 }
737 
738 /*
739  *  Lookup IPI source.
740  */
741 static struct intr_ipi *
742 intr_ipi_lookup(u_int ipi)
743 {
744 
745 	if (ipi >= INTR_IPI_COUNT)
746 		panic("%s: no such IPI %u", __func__, ipi);
747 
748 	return (&ipi_sources[ipi]);
749 }
750 
751 /*
752  *  interrupt controller dispatch function for IPIs. It should
753  *  be called straight from the interrupt controller, when associated
754  *  interrupt source is learned. Or from anybody who has an interrupt
755  *  source mapped.
756  */
757 void
758 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
759 {
760 	void *arg;
761 	struct intr_ipi *ii;
762 
763 	ii = intr_ipi_lookup(ipi);
764 	if (ii->ii_count == NULL)
765 		panic("%s: not setup IPI %u", __func__, ipi);
766 
767 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
768 
769 	/*
770 	 * Supply ipi filter with trapframe argument
771 	 * if none is registered.
772 	 */
773 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
774 	ii->ii_handler(arg);
775 }
776 
777 #ifdef notyet
778 /*
779  *  Map IPI into interrupt controller.
780  *
781  *  Not SMP coherent.
782  */
783 static int
784 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
785 {
786 	boolean_t is_percpu;
787 	int error;
788 
789 	if (ipi >= INTR_IPI_COUNT)
790 		panic("%s: no such IPI %u", __func__, ipi);
791 
792 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
793 
794 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
795 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
796 	isrc->isrc_nspc_num = ipi_next_num;
797 
798 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
799 	if (error == 0) {
800 		isrc->isrc_dev = intr_irq_root_dev;
801 		ipi_next_num++;
802 	}
803 	return (error);
804 }
805 
806 /*
807  *  Setup IPI handler to interrupt source.
808  *
809  *  Note that there could be more ways how to send and receive IPIs
810  *  on a platform like fast interrupts for example. In that case,
811  *  one can call this function with ASIF_NOALLOC flag set and then
812  *  call intr_ipi_dispatch() when appropriate.
813  *
814  *  Not SMP coherent.
815  */
816 int
817 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
818     void *arg, u_int flags)
819 {
820 	struct intr_irqsrc *isrc;
821 	int error;
822 
823 	if (filter == NULL)
824 		return(EINVAL);
825 
826 	isrc = intr_ipi_lookup(ipi);
827 	if (isrc->isrc_ipifilter != NULL)
828 		return (EEXIST);
829 
830 	if ((flags & AISHF_NOALLOC) == 0) {
831 		error = ipi_map(isrc, ipi);
832 		if (error != 0)
833 			return (error);
834 	}
835 
836 	isrc->isrc_ipifilter = filter;
837 	isrc->isrc_arg = arg;
838 	isrc->isrc_handlers = 1;
839 	isrc->isrc_count = intr_ipi_setup_counters(name);
840 	isrc->isrc_index = 0; /* it should not be used in IPI case */
841 
842 	if (isrc->isrc_dev != NULL) {
843 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
844 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
845 	}
846 	return (0);
847 }
848 #endif
849 
850 /* Sending IPI */
851 void
852 ipi_all_but_self(u_int ipi)
853 {
854 	cpuset_t cpus;
855 
856 	cpus = all_cpus;
857 	CPU_CLR(PCPU_GET(cpuid), &cpus);
858 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
859 	intr_ipi_send(cpus, ipi);
860 }
861 
862 void
863 ipi_cpu(int cpu, u_int ipi)
864 {
865 	cpuset_t cpus;
866 
867 	CPU_ZERO(&cpus);
868 	CPU_SET(cpu, &cpus);
869 
870 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
871 	intr_ipi_send(cpus, ipi);
872 }
873 
874 void
875 ipi_selected(cpuset_t cpus, u_int ipi)
876 {
877 
878 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
879 	intr_ipi_send(cpus, ipi);
880 }
881