1 /*- 2 * Copyright (c) 2015 The FreeBSD Foundation 3 * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com> 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Andrew Turner under 7 * sponsorship from the FreeBSD Foundation. 8 * 9 * Portions of this software were developed by SRI International and the 10 * University of Cambridge Computer Laboratory under DARPA/AFRL contract 11 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. 12 * 13 * Portions of this software were developed by the University of Cambridge 14 * Computer Laboratory as part of the CTSRD Project, with support from the 15 * UK Higher Education Innovation Fund (HEIF). 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 #include "opt_kstack_pages.h" 40 #include "opt_platform.h" 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/bus.h> 48 #include <sys/cpu.h> 49 #include <sys/cpuset.h> 50 #include <sys/kernel.h> 51 #include <sys/ktr.h> 52 #include <sys/malloc.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/proc.h> 56 #include <sys/sched.h> 57 #include <sys/smp.h> 58 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_extern.h> 62 #include <vm/vm_kern.h> 63 #include <vm/vm_map.h> 64 65 #include <machine/intr.h> 66 #include <machine/smp.h> 67 #include <machine/sbi.h> 68 69 #ifdef FDT 70 #include <dev/ofw/openfirm.h> 71 #include <dev/ofw/ofw_cpu.h> 72 #endif 73 74 #define MP_BOOTSTACK_SIZE (kstack_pages * PAGE_SIZE) 75 76 uint32_t __riscv_boot_ap[MAXCPU]; 77 78 static enum { 79 CPUS_UNKNOWN, 80 #ifdef FDT 81 CPUS_FDT, 82 #endif 83 } cpu_enum_method; 84 85 static device_identify_t riscv64_cpu_identify; 86 static device_probe_t riscv64_cpu_probe; 87 static device_attach_t riscv64_cpu_attach; 88 89 static int ipi_handler(void *); 90 91 struct pcb stoppcbs[MAXCPU]; 92 93 extern uint32_t boot_hart; 94 extern cpuset_t all_harts; 95 96 #ifdef INVARIANTS 97 static uint32_t cpu_reg[MAXCPU][2]; 98 #endif 99 static device_t cpu_list[MAXCPU]; 100 101 void mpentry(u_long hartid); 102 void init_secondary(uint64_t); 103 104 static struct mtx ap_boot_mtx; 105 106 /* Stacks for AP initialization, discarded once idle threads are started. */ 107 void *bootstack; 108 static void *bootstacks[MAXCPU]; 109 110 /* Count of started APs, used to synchronize access to bootstack. */ 111 static volatile int aps_started; 112 113 /* Set to 1 once we're ready to let the APs out of the pen. */ 114 static volatile int aps_ready; 115 116 /* Temporary variables for init_secondary() */ 117 void *dpcpu[MAXCPU - 1]; 118 119 static device_method_t riscv64_cpu_methods[] = { 120 /* Device interface */ 121 DEVMETHOD(device_identify, riscv64_cpu_identify), 122 DEVMETHOD(device_probe, riscv64_cpu_probe), 123 DEVMETHOD(device_attach, riscv64_cpu_attach), 124 125 DEVMETHOD_END 126 }; 127 128 static driver_t riscv64_cpu_driver = { 129 "riscv64_cpu", 130 riscv64_cpu_methods, 131 0 132 }; 133 134 DRIVER_MODULE(riscv64_cpu, cpu, riscv64_cpu_driver, 0, 0); 135 136 static void 137 riscv64_cpu_identify(driver_t *driver, device_t parent) 138 { 139 140 if (device_find_child(parent, "riscv64_cpu", -1) != NULL) 141 return; 142 if (BUS_ADD_CHILD(parent, 0, "riscv64_cpu", -1) == NULL) 143 device_printf(parent, "add child failed\n"); 144 } 145 146 static int 147 riscv64_cpu_probe(device_t dev) 148 { 149 u_int cpuid; 150 151 cpuid = device_get_unit(dev); 152 if (cpuid >= MAXCPU || cpuid > mp_maxid) 153 return (EINVAL); 154 155 device_quiet(dev); 156 return (0); 157 } 158 159 static int 160 riscv64_cpu_attach(device_t dev) 161 { 162 const uint32_t *reg; 163 size_t reg_size; 164 u_int cpuid; 165 int i; 166 167 cpuid = device_get_unit(dev); 168 169 if (cpuid >= MAXCPU || cpuid > mp_maxid) 170 return (EINVAL); 171 KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid)); 172 173 reg = cpu_get_cpuid(dev, ®_size); 174 if (reg == NULL) 175 return (EINVAL); 176 177 if (bootverbose) { 178 device_printf(dev, "register <"); 179 for (i = 0; i < reg_size; i++) 180 printf("%s%x", (i == 0) ? "" : " ", reg[i]); 181 printf(">\n"); 182 } 183 184 /* Set the device to start it later */ 185 cpu_list[cpuid] = dev; 186 187 return (0); 188 } 189 190 static void 191 release_aps(void *dummy __unused) 192 { 193 cpuset_t mask; 194 int i; 195 196 if (mp_ncpus == 1) 197 return; 198 199 /* Setup the IPI handler */ 200 riscv_setup_ipihandler(ipi_handler); 201 202 atomic_store_rel_int(&aps_ready, 1); 203 204 /* Wake up the other CPUs */ 205 mask = all_harts; 206 CPU_CLR(boot_hart, &mask); 207 208 printf("Release APs\n"); 209 210 sbi_send_ipi(mask.__bits); 211 212 for (i = 0; i < 2000; i++) { 213 if (atomic_load_acq_int(&smp_started)) 214 return; 215 DELAY(1000); 216 } 217 218 printf("APs not started\n"); 219 } 220 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 221 222 void 223 init_secondary(uint64_t hart) 224 { 225 struct pcpu *pcpup; 226 u_int cpuid; 227 228 /* Renumber this cpu */ 229 cpuid = hart; 230 if (cpuid < boot_hart) 231 cpuid += mp_maxid + 1; 232 cpuid -= boot_hart; 233 234 /* Setup the pcpu pointer */ 235 pcpup = &__pcpu[cpuid]; 236 __asm __volatile("mv tp, %0" :: "r"(pcpup)); 237 238 /* Workaround: make sure wfi doesn't halt the hart */ 239 csr_set(sie, SIE_SSIE); 240 csr_set(sip, SIE_SSIE); 241 242 /* Signal the BSP and spin until it has released all APs. */ 243 atomic_add_int(&aps_started, 1); 244 while (!atomic_load_int(&aps_ready)) 245 __asm __volatile("wfi"); 246 247 /* Initialize curthread */ 248 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 249 pcpup->pc_curthread = pcpup->pc_idlethread; 250 schedinit_ap(); 251 252 /* 253 * Identify current CPU. This is necessary to setup 254 * affinity registers and to provide support for 255 * runtime chip identification. 256 */ 257 identify_cpu(); 258 259 /* Enable software interrupts */ 260 riscv_unmask_ipi(); 261 262 #ifndef EARLY_AP_STARTUP 263 /* Start per-CPU event timers. */ 264 cpu_initclocks_ap(); 265 #endif 266 267 /* Enable external (PLIC) interrupts */ 268 csr_set(sie, SIE_SEIE); 269 270 /* Activate this hart in the kernel pmap. */ 271 CPU_SET_ATOMIC(hart, &kernel_pmap->pm_active); 272 273 /* Activate process 0's pmap. */ 274 pmap_activate_boot(vmspace_pmap(proc0.p_vmspace)); 275 276 mtx_lock_spin(&ap_boot_mtx); 277 278 atomic_add_rel_32(&smp_cpus, 1); 279 280 if (smp_cpus == mp_ncpus) { 281 /* enable IPI's, tlb shootdown, freezes etc */ 282 atomic_store_rel_int(&smp_started, 1); 283 } 284 285 mtx_unlock_spin(&ap_boot_mtx); 286 287 /* Enter the scheduler */ 288 sched_ap_entry(); 289 290 panic("scheduler returned us to init_secondary"); 291 /* NOTREACHED */ 292 } 293 294 static void 295 smp_after_idle_runnable(void *arg __unused) 296 { 297 int cpu; 298 299 if (mp_ncpus == 1) 300 return; 301 302 KASSERT(smp_started != 0, ("%s: SMP not started yet", __func__)); 303 304 /* 305 * Wait for all APs to handle an interrupt. After that, we know that 306 * the APs have entered the scheduler at least once, so the boot stacks 307 * are safe to free. 308 */ 309 smp_rendezvous(smp_no_rendezvous_barrier, NULL, 310 smp_no_rendezvous_barrier, NULL); 311 312 for (cpu = 1; cpu <= mp_maxid; cpu++) { 313 if (bootstacks[cpu] != NULL) 314 kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE); 315 } 316 } 317 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, 318 smp_after_idle_runnable, NULL); 319 320 static int 321 ipi_handler(void *arg) 322 { 323 u_int ipi_bitmap; 324 u_int cpu, ipi; 325 int bit; 326 327 csr_clear(sip, SIP_SSIP); 328 329 cpu = PCPU_GET(cpuid); 330 331 mb(); 332 333 ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); 334 if (ipi_bitmap == 0) 335 return (FILTER_HANDLED); 336 337 while ((bit = ffs(ipi_bitmap))) { 338 bit = (bit - 1); 339 ipi = (1 << bit); 340 ipi_bitmap &= ~ipi; 341 342 mb(); 343 344 switch (ipi) { 345 case IPI_AST: 346 CTR0(KTR_SMP, "IPI_AST"); 347 break; 348 case IPI_PREEMPT: 349 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); 350 sched_preempt(curthread); 351 break; 352 case IPI_RENDEZVOUS: 353 CTR0(KTR_SMP, "IPI_RENDEZVOUS"); 354 smp_rendezvous_action(); 355 break; 356 case IPI_STOP: 357 case IPI_STOP_HARD: 358 CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD"); 359 savectx(&stoppcbs[cpu]); 360 361 /* Indicate we are stopped */ 362 CPU_SET_ATOMIC(cpu, &stopped_cpus); 363 364 /* Wait for restart */ 365 while (!CPU_ISSET(cpu, &started_cpus)) 366 cpu_spinwait(); 367 368 CPU_CLR_ATOMIC(cpu, &started_cpus); 369 CPU_CLR_ATOMIC(cpu, &stopped_cpus); 370 CTR0(KTR_SMP, "IPI_STOP (restart)"); 371 372 /* 373 * The kernel debugger might have set a breakpoint, 374 * so flush the instruction cache. 375 */ 376 fence_i(); 377 break; 378 case IPI_HARDCLOCK: 379 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); 380 hardclockintr(); 381 break; 382 default: 383 panic("Unknown IPI %#0x on cpu %d", ipi, curcpu); 384 } 385 } 386 387 return (FILTER_HANDLED); 388 } 389 390 struct cpu_group * 391 cpu_topo(void) 392 { 393 394 return (smp_topo_none()); 395 } 396 397 /* Determine if we running MP machine */ 398 int 399 cpu_mp_probe(void) 400 { 401 402 return (mp_ncpus > 1); 403 } 404 405 #ifdef FDT 406 static boolean_t 407 cpu_check_mmu(u_int id __unused, phandle_t node, u_int addr_size __unused, 408 pcell_t *reg __unused) 409 { 410 char type[32]; 411 412 /* Check if this hart supports MMU. */ 413 if (OF_getprop(node, "mmu-type", (void *)type, sizeof(type)) == -1 || 414 strncmp(type, "riscv,none", 10) == 0) 415 return (0); 416 417 return (1); 418 } 419 420 static boolean_t 421 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) 422 { 423 struct pcpu *pcpup; 424 vm_paddr_t start_addr; 425 uint64_t hart; 426 u_int cpuid; 427 int naps; 428 int error; 429 430 if (!cpu_check_mmu(id, node, addr_size, reg)) 431 return (0); 432 433 KASSERT(id < MAXCPU, ("Too many CPUs")); 434 435 KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size")); 436 #ifdef INVARIANTS 437 cpu_reg[id][0] = reg[0]; 438 if (addr_size == 2) 439 cpu_reg[id][1] = reg[1]; 440 #endif 441 442 hart = reg[0]; 443 if (addr_size == 2) { 444 hart <<= 32; 445 hart |= reg[1]; 446 } 447 448 KASSERT(hart < MAXCPU, ("Too many harts.")); 449 450 /* We are already running on this cpu */ 451 if (hart == boot_hart) 452 return (1); 453 454 /* 455 * Rotate the CPU IDs to put the boot CPU as CPU 0. 456 * We keep the other CPUs ordered. 457 */ 458 cpuid = hart; 459 if (cpuid < boot_hart) 460 cpuid += mp_maxid + 1; 461 cpuid -= boot_hart; 462 463 /* Check if we are able to start this cpu */ 464 if (cpuid > mp_maxid) 465 return (0); 466 467 /* 468 * Depending on the SBI implementation, APs are waiting either in 469 * locore.S or to be activated explicitly, via SBI call. 470 */ 471 if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) { 472 start_addr = pmap_kextract((vm_offset_t)mpentry); 473 error = sbi_hsm_hart_start(hart, start_addr, 0); 474 if (error != 0) { 475 mp_ncpus--; 476 477 /* Send a warning to the user and continue. */ 478 printf("AP %u (hart %lu) failed to start, error %d\n", 479 cpuid, hart, error); 480 return (0); 481 } 482 } 483 484 pcpup = &__pcpu[cpuid]; 485 pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); 486 pcpup->pc_hart = hart; 487 488 dpcpu[cpuid - 1] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); 489 dpcpu_init(dpcpu[cpuid - 1], cpuid); 490 491 bootstacks[cpuid] = kmem_malloc(MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO); 492 493 naps = atomic_load_int(&aps_started); 494 bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE; 495 496 printf("Starting CPU %u (hart %lx)\n", cpuid, hart); 497 atomic_store_32(&__riscv_boot_ap[hart], 1); 498 499 /* Wait for the AP to switch to its boot stack. */ 500 while (atomic_load_int(&aps_started) < naps + 1) 501 cpu_spinwait(); 502 503 CPU_SET(cpuid, &all_cpus); 504 CPU_SET(hart, &all_harts); 505 506 return (1); 507 } 508 #endif 509 510 /* Initialize and fire up non-boot processors */ 511 void 512 cpu_mp_start(void) 513 { 514 515 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 516 517 CPU_SET(0, &all_cpus); 518 CPU_SET(boot_hart, &all_harts); 519 520 switch(cpu_enum_method) { 521 #ifdef FDT 522 case CPUS_FDT: 523 ofw_cpu_early_foreach(cpu_init_fdt, true); 524 break; 525 #endif 526 case CPUS_UNKNOWN: 527 break; 528 } 529 } 530 531 /* Introduce rest of cores to the world */ 532 void 533 cpu_mp_announce(void) 534 { 535 } 536 537 void 538 cpu_mp_setmaxid(void) 539 { 540 int cores; 541 542 #ifdef FDT 543 cores = ofw_cpu_early_foreach(cpu_check_mmu, true); 544 if (cores > 0) { 545 cores = MIN(cores, MAXCPU); 546 if (bootverbose) 547 printf("Found %d CPUs in the device tree\n", cores); 548 mp_ncpus = cores; 549 mp_maxid = cores - 1; 550 cpu_enum_method = CPUS_FDT; 551 } else 552 #endif 553 { 554 if (bootverbose) 555 printf("No CPU data, limiting to 1 core\n"); 556 mp_ncpus = 1; 557 mp_maxid = 0; 558 } 559 560 if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) { 561 if (cores > 0 && cores < mp_ncpus) { 562 mp_ncpus = cores; 563 mp_maxid = cores - 1; 564 } 565 } 566 } 567