xref: /freebsd/sys/riscv/riscv/mp_machdep.c (revision be181ee2)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Andrew Turner under
7  * sponsorship from the FreeBSD Foundation.
8  *
9  * Portions of this software were developed by SRI International and the
10  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
11  * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
12  *
13  * Portions of this software were developed by the University of Cambridge
14  * Computer Laboratory as part of the CTSRD Project, with support from the
15  * UK Higher Education Innovation Fund (HEIF).
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include "opt_kstack_pages.h"
40 #include "opt_platform.h"
41 
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/cpu.h>
49 #include <sys/cpuset.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/malloc.h>
53 #include <sys/module.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/smp.h>
58 
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_kern.h>
63 #include <vm/vm_map.h>
64 
65 #include <machine/intr.h>
66 #include <machine/smp.h>
67 #include <machine/sbi.h>
68 
69 #ifdef FDT
70 #include <dev/ofw/openfirm.h>
71 #include <dev/ofw/ofw_cpu.h>
72 #endif
73 
74 #define	MP_BOOTSTACK_SIZE	(kstack_pages * PAGE_SIZE)
75 
76 boolean_t ofw_cpu_reg(phandle_t node, u_int, cell_t *);
77 
78 uint32_t __riscv_boot_ap[MAXCPU];
79 
80 static enum {
81 	CPUS_UNKNOWN,
82 #ifdef FDT
83 	CPUS_FDT,
84 #endif
85 } cpu_enum_method;
86 
87 static device_identify_t riscv64_cpu_identify;
88 static device_probe_t riscv64_cpu_probe;
89 static device_attach_t riscv64_cpu_attach;
90 
91 static int ipi_handler(void *);
92 
93 struct pcb stoppcbs[MAXCPU];
94 
95 extern uint32_t boot_hart;
96 extern cpuset_t all_harts;
97 
98 #ifdef INVARIANTS
99 static uint32_t cpu_reg[MAXCPU][2];
100 #endif
101 static device_t cpu_list[MAXCPU];
102 
103 void mpentry(u_long hartid);
104 void init_secondary(uint64_t);
105 
106 static struct mtx ap_boot_mtx;
107 
108 /* Stacks for AP initialization, discarded once idle threads are started. */
109 void *bootstack;
110 static void *bootstacks[MAXCPU];
111 
112 /* Count of started APs, used to synchronize access to bootstack. */
113 static volatile int aps_started;
114 
115 /* Set to 1 once we're ready to let the APs out of the pen. */
116 static volatile int aps_ready;
117 
118 /* Temporary variables for init_secondary()  */
119 void *dpcpu[MAXCPU - 1];
120 
121 static device_method_t riscv64_cpu_methods[] = {
122 	/* Device interface */
123 	DEVMETHOD(device_identify,	riscv64_cpu_identify),
124 	DEVMETHOD(device_probe,		riscv64_cpu_probe),
125 	DEVMETHOD(device_attach,	riscv64_cpu_attach),
126 
127 	DEVMETHOD_END
128 };
129 
130 static driver_t riscv64_cpu_driver = {
131 	"riscv64_cpu",
132 	riscv64_cpu_methods,
133 	0
134 };
135 
136 DRIVER_MODULE(riscv64_cpu, cpu, riscv64_cpu_driver, 0, 0);
137 
138 static void
139 riscv64_cpu_identify(driver_t *driver, device_t parent)
140 {
141 
142 	if (device_find_child(parent, "riscv64_cpu", -1) != NULL)
143 		return;
144 	if (BUS_ADD_CHILD(parent, 0, "riscv64_cpu", -1) == NULL)
145 		device_printf(parent, "add child failed\n");
146 }
147 
148 static int
149 riscv64_cpu_probe(device_t dev)
150 {
151 	u_int cpuid;
152 
153 	cpuid = device_get_unit(dev);
154 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
155 		return (EINVAL);
156 
157 	device_quiet(dev);
158 	return (0);
159 }
160 
161 static int
162 riscv64_cpu_attach(device_t dev)
163 {
164 	const uint32_t *reg;
165 	size_t reg_size;
166 	u_int cpuid;
167 	int i;
168 
169 	cpuid = device_get_unit(dev);
170 
171 	if (cpuid >= MAXCPU || cpuid > mp_maxid)
172 		return (EINVAL);
173 	KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
174 
175 	reg = cpu_get_cpuid(dev, &reg_size);
176 	if (reg == NULL)
177 		return (EINVAL);
178 
179 	if (bootverbose) {
180 		device_printf(dev, "register <");
181 		for (i = 0; i < reg_size; i++)
182 			printf("%s%x", (i == 0) ? "" : " ", reg[i]);
183 		printf(">\n");
184 	}
185 
186 	/* Set the device to start it later */
187 	cpu_list[cpuid] = dev;
188 
189 	return (0);
190 }
191 
192 static void
193 release_aps(void *dummy __unused)
194 {
195 	cpuset_t mask;
196 	int i;
197 
198 	if (mp_ncpus == 1)
199 		return;
200 
201 	/* Setup the IPI handler */
202 	riscv_setup_ipihandler(ipi_handler);
203 
204 	atomic_store_rel_int(&aps_ready, 1);
205 
206 	/* Wake up the other CPUs */
207 	mask = all_harts;
208 	CPU_CLR(boot_hart, &mask);
209 
210 	printf("Release APs\n");
211 
212 	sbi_send_ipi(mask.__bits);
213 
214 	for (i = 0; i < 2000; i++) {
215 		if (atomic_load_acq_int(&smp_started))
216 			return;
217 		DELAY(1000);
218 	}
219 
220 	printf("APs not started\n");
221 }
222 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
223 
224 void
225 init_secondary(uint64_t hart)
226 {
227 	struct pcpu *pcpup;
228 	u_int cpuid;
229 
230 	/* Renumber this cpu */
231 	cpuid = hart;
232 	if (cpuid < boot_hart)
233 		cpuid += mp_maxid + 1;
234 	cpuid -= boot_hart;
235 
236 	/* Setup the pcpu pointer */
237 	pcpup = &__pcpu[cpuid];
238 	__asm __volatile("mv tp, %0" :: "r"(pcpup));
239 
240 	/* Workaround: make sure wfi doesn't halt the hart */
241 	csr_set(sie, SIE_SSIE);
242 	csr_set(sip, SIE_SSIE);
243 
244 	/* Signal the BSP and spin until it has released all APs. */
245 	atomic_add_int(&aps_started, 1);
246 	while (!atomic_load_int(&aps_ready))
247 		__asm __volatile("wfi");
248 
249 	/* Initialize curthread */
250 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
251 	pcpup->pc_curthread = pcpup->pc_idlethread;
252 	schedinit_ap();
253 
254 	/*
255 	 * Identify current CPU. This is necessary to setup
256 	 * affinity registers and to provide support for
257 	 * runtime chip identification.
258 	 */
259 	identify_cpu();
260 
261 	/* Enable software interrupts */
262 	riscv_unmask_ipi();
263 
264 #ifndef EARLY_AP_STARTUP
265 	/* Start per-CPU event timers. */
266 	cpu_initclocks_ap();
267 #endif
268 
269 	/* Enable external (PLIC) interrupts */
270 	csr_set(sie, SIE_SEIE);
271 
272 	/* Activate this hart in the kernel pmap. */
273 	CPU_SET_ATOMIC(hart, &kernel_pmap->pm_active);
274 
275 	/* Activate process 0's pmap. */
276 	pmap_activate_boot(vmspace_pmap(proc0.p_vmspace));
277 
278 	mtx_lock_spin(&ap_boot_mtx);
279 
280 	atomic_add_rel_32(&smp_cpus, 1);
281 
282 	if (smp_cpus == mp_ncpus) {
283 		/* enable IPI's, tlb shootdown, freezes etc */
284 		atomic_store_rel_int(&smp_started, 1);
285 	}
286 
287 	mtx_unlock_spin(&ap_boot_mtx);
288 
289 	/* Enter the scheduler */
290 	sched_ap_entry();
291 
292 	panic("scheduler returned us to init_secondary");
293 	/* NOTREACHED */
294 }
295 
296 static void
297 smp_after_idle_runnable(void *arg __unused)
298 {
299 	int cpu;
300 
301 	if (mp_ncpus == 1)
302 		return;
303 
304 	KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__));
305 
306 	/*
307 	 * Wait for all APs to handle an interrupt.  After that, we know that
308 	 * the APs have entered the scheduler at least once, so the boot stacks
309 	 * are safe to free.
310 	 */
311 	smp_rendezvous(smp_no_rendezvous_barrier, NULL,
312 	    smp_no_rendezvous_barrier, NULL);
313 
314 	for (cpu = 1; cpu <= mp_maxid; cpu++) {
315 		if (bootstacks[cpu] != NULL)
316 			kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
317 	}
318 }
319 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
320     smp_after_idle_runnable, NULL);
321 
322 static int
323 ipi_handler(void *arg)
324 {
325 	u_int ipi_bitmap;
326 	u_int cpu, ipi;
327 	int bit;
328 
329 	csr_clear(sip, SIP_SSIP);
330 
331 	cpu = PCPU_GET(cpuid);
332 
333 	mb();
334 
335 	ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
336 	if (ipi_bitmap == 0)
337 		return (FILTER_HANDLED);
338 
339 	while ((bit = ffs(ipi_bitmap))) {
340 		bit = (bit - 1);
341 		ipi = (1 << bit);
342 		ipi_bitmap &= ~ipi;
343 
344 		mb();
345 
346 		switch (ipi) {
347 		case IPI_AST:
348 			CTR0(KTR_SMP, "IPI_AST");
349 			break;
350 		case IPI_PREEMPT:
351 			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
352 			sched_preempt(curthread);
353 			break;
354 		case IPI_RENDEZVOUS:
355 			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
356 			smp_rendezvous_action();
357 			break;
358 		case IPI_STOP:
359 		case IPI_STOP_HARD:
360 			CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD");
361 			savectx(&stoppcbs[cpu]);
362 
363 			/* Indicate we are stopped */
364 			CPU_SET_ATOMIC(cpu, &stopped_cpus);
365 
366 			/* Wait for restart */
367 			while (!CPU_ISSET(cpu, &started_cpus))
368 				cpu_spinwait();
369 
370 			CPU_CLR_ATOMIC(cpu, &started_cpus);
371 			CPU_CLR_ATOMIC(cpu, &stopped_cpus);
372 			CTR0(KTR_SMP, "IPI_STOP (restart)");
373 
374 			/*
375 			 * The kernel debugger might have set a breakpoint,
376 			 * so flush the instruction cache.
377 			 */
378 			fence_i();
379 			break;
380 		case IPI_HARDCLOCK:
381 			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
382 			hardclockintr();
383 			break;
384 		default:
385 			panic("Unknown IPI %#0x on cpu %d", ipi, curcpu);
386 		}
387 	}
388 
389 	return (FILTER_HANDLED);
390 }
391 
392 struct cpu_group *
393 cpu_topo(void)
394 {
395 
396 	return (smp_topo_none());
397 }
398 
399 /* Determine if we running MP machine */
400 int
401 cpu_mp_probe(void)
402 {
403 
404 	return (mp_ncpus > 1);
405 }
406 
407 #ifdef FDT
408 static boolean_t
409 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
410 {
411 	struct pcpu *pcpup;
412 	vm_paddr_t start_addr;
413 	uint64_t hart;
414 	u_int cpuid;
415 	int naps;
416 	int error;
417 
418 	/* Check if this hart supports MMU. */
419 	if (OF_getproplen(node, "mmu-type") < 0)
420 		return (0);
421 
422 	KASSERT(id < MAXCPU, ("Too many CPUs"));
423 
424 	KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size"));
425 #ifdef INVARIANTS
426 	cpu_reg[id][0] = reg[0];
427 	if (addr_size == 2)
428 		cpu_reg[id][1] = reg[1];
429 #endif
430 
431 	hart = reg[0];
432 	if (addr_size == 2) {
433 		hart <<= 32;
434 		hart |= reg[1];
435 	}
436 
437 	KASSERT(hart < MAXCPU, ("Too many harts."));
438 
439 	/* We are already running on this cpu */
440 	if (hart == boot_hart)
441 		return (1);
442 
443 	/*
444 	 * Rotate the CPU IDs to put the boot CPU as CPU 0.
445 	 * We keep the other CPUs ordered.
446 	 */
447 	cpuid = hart;
448 	if (cpuid < boot_hart)
449 		cpuid += mp_maxid + 1;
450 	cpuid -= boot_hart;
451 
452 	/* Check if we are able to start this cpu */
453 	if (cpuid > mp_maxid)
454 		return (0);
455 
456 	/*
457 	 * Depending on the SBI implementation, APs are waiting either in
458 	 * locore.S or to be activated explicitly, via SBI call.
459 	 */
460 	if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) {
461 		start_addr = pmap_kextract((vm_offset_t)mpentry);
462 		error = sbi_hsm_hart_start(hart, start_addr, 0);
463 		if (error != 0) {
464 			mp_ncpus--;
465 
466 			/* Send a warning to the user and continue. */
467 			printf("AP %u (hart %lu) failed to start, error %d\n",
468 			    cpuid, hart, error);
469 			return (0);
470 		}
471 	}
472 
473 	pcpup = &__pcpu[cpuid];
474 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
475 	pcpup->pc_hart = hart;
476 
477 	dpcpu[cpuid - 1] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
478 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
479 
480 	bootstacks[cpuid] = kmem_malloc(MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
481 
482 	naps = atomic_load_int(&aps_started);
483 	bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
484 
485 	printf("Starting CPU %u (hart %lx)\n", cpuid, hart);
486 	atomic_store_32(&__riscv_boot_ap[hart], 1);
487 
488 	/* Wait for the AP to switch to its boot stack. */
489 	while (atomic_load_int(&aps_started) < naps + 1)
490 		cpu_spinwait();
491 
492 	CPU_SET(cpuid, &all_cpus);
493 	CPU_SET(hart, &all_harts);
494 
495 	return (1);
496 }
497 #endif
498 
499 /* Initialize and fire up non-boot processors */
500 void
501 cpu_mp_start(void)
502 {
503 
504 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
505 
506 	CPU_SET(0, &all_cpus);
507 	CPU_SET(boot_hart, &all_harts);
508 
509 	switch(cpu_enum_method) {
510 #ifdef FDT
511 	case CPUS_FDT:
512 		ofw_cpu_early_foreach(cpu_init_fdt, true);
513 		break;
514 #endif
515 	case CPUS_UNKNOWN:
516 		break;
517 	}
518 }
519 
520 /* Introduce rest of cores to the world */
521 void
522 cpu_mp_announce(void)
523 {
524 }
525 
526 static boolean_t
527 cpu_check_mmu(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
528 {
529 
530 	/* Check if this hart supports MMU. */
531 	if (OF_getproplen(node, "mmu-type") < 0)
532 		return (0);
533 
534 	return (1);
535 }
536 
537 void
538 cpu_mp_setmaxid(void)
539 {
540 	int cores;
541 
542 #ifdef FDT
543 	cores = ofw_cpu_early_foreach(cpu_check_mmu, true);
544 	if (cores > 0) {
545 		cores = MIN(cores, MAXCPU);
546 		if (bootverbose)
547 			printf("Found %d CPUs in the device tree\n", cores);
548 		mp_ncpus = cores;
549 		mp_maxid = cores - 1;
550 		cpu_enum_method = CPUS_FDT;
551 	} else
552 #endif
553 	{
554 		if (bootverbose)
555 			printf("No CPU data, limiting to 1 core\n");
556 		mp_ncpus = 1;
557 		mp_maxid = 0;
558 	}
559 
560 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
561 		if (cores > 0 && cores < mp_ncpus) {
562 			mp_ncpus = cores;
563 			mp_maxid = cores - 1;
564 		}
565 	}
566 }
567