xref: /freebsd/sys/arm64/arm64/mp_machdep.c (revision 325151a3)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_kstack_pages.h"
32 #include "opt_platform.h"
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpu.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53 
54 #include <machine/intr.h>
55 #include <machine/smp.h>
56 #ifdef VFP
57 #include <machine/vfp.h>
58 #endif
59 
60 #ifdef FDT
61 #include <dev/ofw/openfirm.h>
62 #include <dev/ofw/ofw_cpu.h>
63 #endif
64 
65 #include <dev/psci/psci.h>
66 
67 boolean_t ofw_cpu_reg(phandle_t node, u_int, cell_t *);
68 
69 extern struct pcpu __pcpu[];
70 
71 static enum {
72 	CPUS_UNKNOWN,
73 #ifdef FDT
74 	CPUS_FDT,
75 #endif
76 } cpu_enum_method;
77 
78 static device_identify_t arm64_cpu_identify;
79 static device_probe_t arm64_cpu_probe;
80 static device_attach_t arm64_cpu_attach;
81 
82 static int ipi_handler(void *arg);
83 
84 struct mtx ap_boot_mtx;
85 struct pcb stoppcbs[MAXCPU];
86 
87 #ifdef INVARIANTS
88 static uint32_t cpu_reg[MAXCPU][2];
89 #endif
90 static device_t cpu_list[MAXCPU];
91 
92 void mpentry(unsigned long cpuid);
93 void init_secondary(uint64_t);
94 
95 uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16);
96 
97 /* Set to 1 once we're ready to let the APs out of the pen. */
98 volatile int aps_ready = 0;
99 
100 /* Temporary variables for init_secondary()  */
101 void *dpcpu[MAXCPU - 1];
102 
103 static device_method_t arm64_cpu_methods[] = {
104 	/* Device interface */
105 	DEVMETHOD(device_identify,	arm64_cpu_identify),
106 	DEVMETHOD(device_probe,		arm64_cpu_probe),
107 	DEVMETHOD(device_attach,	arm64_cpu_attach),
108 
109 	DEVMETHOD_END
110 };
111 
112 static devclass_t arm64_cpu_devclass;
113 static driver_t arm64_cpu_driver = {
114 	"arm64_cpu",
115 	arm64_cpu_methods,
116 	0
117 };
118 
119 DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0);
120 
121 static void
122 arm64_cpu_identify(driver_t *driver, device_t parent)
123 {
124 
125 	if (device_find_child(parent, "arm64_cpu", -1) != NULL)
126 		return;
127 	if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL)
128 		device_printf(parent, "add child failed\n");
129 }
130 
131 static int
132 arm64_cpu_probe(device_t dev)
133 {
134 	u_int cpuid;
135 
136 	cpuid = device_get_unit(dev);
137 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
138 		return (EINVAL);
139 
140 	return (0);
141 }
142 
143 static int
144 arm64_cpu_attach(device_t dev)
145 {
146 	const uint32_t *reg;
147 	size_t reg_size;
148 	u_int cpuid;
149 	int i;
150 
151 	cpuid = device_get_unit(dev);
152 
153 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
154 		return (EINVAL);
155 	KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
156 
157 	reg = cpu_get_cpuid(dev, &reg_size);
158 	if (reg == NULL)
159 		return (EINVAL);
160 
161 	device_printf(dev, "Found register:");
162 	for (i = 0; i < reg_size; i++)
163 		printf(" %x", reg[i]);
164 	printf("\n");
165 
166 	/* Set the device to start it later */
167 	cpu_list[cpuid] = dev;
168 
169 	return (0);
170 }
171 
172 static void
173 release_aps(void *dummy __unused)
174 {
175 	int i;
176 
177 	/* Setup the IPI handler */
178 	for (i = 0; i < COUNT_IPI; i++)
179 		arm_setup_ipihandler(ipi_handler, i);
180 
181 	atomic_store_rel_int(&aps_ready, 1);
182 	/* Wake up the other CPUs */
183 	__asm __volatile("sev");
184 
185 	printf("Release APs\n");
186 
187 	for (i = 0; i < 2000; i++) {
188 		if (smp_started)
189 			return;
190 		DELAY(1000);
191 	}
192 
193 	printf("APs not started\n");
194 }
195 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
196 
197 void
198 init_secondary(uint64_t cpu)
199 {
200 	struct pcpu *pcpup;
201 	int i;
202 
203 	pcpup = &__pcpu[cpu];
204 	/*
205 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
206 	 * loaded when entering the kernel from userland.
207 	 */
208 	__asm __volatile(
209 	    "mov x18, %0 \n"
210 	    "msr tpidr_el1, %0" :: "r"(pcpup));
211 
212 	/* Spin until the BSP releases the APs */
213 	while (!aps_ready)
214 		__asm __volatile("wfe");
215 
216 	/* Initialize curthread */
217 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
218 	pcpup->pc_curthread = pcpup->pc_idlethread;
219 	pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb;
220 
221 	/*
222 	 * Identify current CPU. This is necessary to setup
223 	 * affinity registers and to provide support for
224 	 * runtime chip identification.
225 	 */
226 	identify_cpu();
227 
228 	/* Configure the interrupt controller */
229 	arm_init_secondary();
230 
231 	for (i = 0; i < COUNT_IPI; i++)
232 		arm_unmask_ipi(i);
233 
234 	/* Start per-CPU event timers. */
235 	cpu_initclocks_ap();
236 
237 #ifdef VFP
238 	vfp_init();
239 #endif
240 
241 	/* Enable interrupts */
242 	intr_enable();
243 
244 	mtx_lock_spin(&ap_boot_mtx);
245 
246 	atomic_add_rel_32(&smp_cpus, 1);
247 
248 	if (smp_cpus == mp_ncpus) {
249 		/* enable IPI's, tlb shootdown, freezes etc */
250 		atomic_store_rel_int(&smp_started, 1);
251 	}
252 
253 	mtx_unlock_spin(&ap_boot_mtx);
254 
255 	/* Enter the scheduler */
256 	sched_throw(NULL);
257 
258 	panic("scheduler returned us to init_secondary");
259 	/* NOTREACHED */
260 }
261 
262 static int
263 ipi_handler(void *arg)
264 {
265 	u_int cpu, ipi;
266 
267 	arg = (void *)((uintptr_t)arg & ~(1 << 16));
268 	KASSERT((uintptr_t)arg < COUNT_IPI,
269 	    ("Invalid IPI %ju", (uintptr_t)arg));
270 
271 	cpu = PCPU_GET(cpuid);
272 	ipi = (uintptr_t)arg;
273 
274 	switch(ipi) {
275 	case IPI_AST:
276 		CTR0(KTR_SMP, "IPI_AST");
277 		break;
278 	case IPI_PREEMPT:
279 		CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
280 		sched_preempt(curthread);
281 		break;
282 	case IPI_RENDEZVOUS:
283 		CTR0(KTR_SMP, "IPI_RENDEZVOUS");
284 		smp_rendezvous_action();
285 		break;
286 	case IPI_STOP:
287 	case IPI_STOP_HARD:
288 		CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD");
289 		savectx(&stoppcbs[cpu]);
290 
291 		/* Indicate we are stopped */
292 		CPU_SET_ATOMIC(cpu, &stopped_cpus);
293 
294 		/* Wait for restart */
295 		while (!CPU_ISSET(cpu, &started_cpus))
296 			cpu_spinwait();
297 
298 		CPU_CLR_ATOMIC(cpu, &started_cpus);
299 		CPU_CLR_ATOMIC(cpu, &stopped_cpus);
300 		CTR0(KTR_SMP, "IPI_STOP (restart)");
301 		break;
302 	case IPI_HARDCLOCK:
303 		CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
304 		hardclockintr();
305 		break;
306 	default:
307 		panic("Unknown IPI %#0x on cpu %d", ipi, curcpu);
308 	}
309 
310 	return (FILTER_HANDLED);
311 }
312 
313 struct cpu_group *
314 cpu_topo(void)
315 {
316 
317 	return (smp_topo_none());
318 }
319 
320 /* Determine if we running MP machine */
321 int
322 cpu_mp_probe(void)
323 {
324 
325 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
326 	return (1);
327 }
328 
329 #ifdef FDT
330 static boolean_t
331 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
332 {
333 	uint64_t target_cpu;
334 	struct pcpu *pcpup;
335 	vm_paddr_t pa;
336 	int err;
337 
338 	/* Check we are able to start this cpu */
339 	if (id > mp_maxid)
340 		return (0);
341 
342 	KASSERT(id < MAXCPU, ("Too mant CPUs"));
343 
344 	KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size"));
345 #ifdef INVARIANTS
346 	cpu_reg[id][0] = reg[0];
347 	if (addr_size == 2)
348 		cpu_reg[id][1] = reg[1];
349 #endif
350 
351 	/* We are already running on cpu 0 */
352 	if (id == 0)
353 		return (1);
354 
355 
356 	pcpup = &__pcpu[id];
357 	pcpu_init(pcpup, id, sizeof(struct pcpu));
358 
359 	dpcpu[id - 1] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
360 	    M_WAITOK | M_ZERO);
361 	dpcpu_init(dpcpu[id - 1], id);
362 
363 	target_cpu = reg[0];
364 	if (addr_size == 2) {
365 		target_cpu <<= 32;
366 		target_cpu |= reg[1];
367 	}
368 
369 	printf("Starting CPU %u (%lx)\n", id, target_cpu);
370 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
371 
372 	err = psci_cpu_on(target_cpu, pa, id);
373 	if (err != PSCI_RETVAL_SUCCESS) {
374 		/* Panic here if INVARIANTS are enabled */
375 		KASSERT(0, ("Failed to start CPU %u (%lx)\n", id, target_cpu));
376 
377 		pcpu_destroy(pcpup);
378 		kmem_free(kernel_arena, (vm_offset_t)dpcpu[id - 1], DPCPU_SIZE);
379 		dpcpu[id - 1] = NULL;
380 		/* Notify the user that the CPU failed to start */
381 		printf("Failed to start CPU %u (%lx)\n", id, target_cpu);
382 	} else
383 		CPU_SET(id, &all_cpus);
384 
385 	return (1);
386 }
387 #endif
388 
389 /* Initialize and fire up non-boot processors */
390 void
391 cpu_mp_start(void)
392 {
393 
394 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
395 
396 	CPU_SET(0, &all_cpus);
397 
398 	switch(cpu_enum_method) {
399 #ifdef FDT
400 	case CPUS_FDT:
401 		ofw_cpu_early_foreach(cpu_init_fdt, true);
402 		break;
403 #endif
404 	case CPUS_UNKNOWN:
405 		break;
406 	}
407 }
408 
409 /* Introduce rest of cores to the world */
410 void
411 cpu_mp_announce(void)
412 {
413 }
414 
415 void
416 cpu_mp_setmaxid(void)
417 {
418 #ifdef FDT
419 	int cores;
420 
421 	cores = ofw_cpu_early_foreach(NULL, false);
422 	if (cores > 0) {
423 		cores = MIN(cores, MAXCPU);
424 		if (bootverbose)
425 			printf("Found %d CPUs in the device tree\n", cores);
426 		mp_ncpus = cores;
427 		mp_maxid = cores - 1;
428 		cpu_enum_method = CPUS_FDT;
429 		return;
430 	}
431 #endif
432 
433 	if (bootverbose)
434 		printf("No CPU data, limiting to 1 core\n");
435 	mp_ncpus = 1;
436 	mp_maxid = 0;
437 }
438