1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Semihalf. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include "opt_ddb.h" 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/proc.h> 39 #include <sys/pcpu.h> 40 #include <sys/sched.h> 41 #include <sys/smp.h> 42 #include <sys/ktr.h> 43 #include <sys/malloc.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 #include <vm/vm_kern.h> 48 #include <vm/pmap.h> 49 50 #include <machine/armreg.h> 51 #include <machine/cpu.h> 52 #include <machine/cpufunc.h> 53 #include <machine/debug_monitor.h> 54 #include <machine/smp.h> 55 #include <machine/pcb.h> 56 #include <machine/intr.h> 57 #include <machine/vmparam.h> 58 #ifdef VFP 59 #include <machine/vfp.h> 60 #endif 61 #ifdef CPU_MV_PJ4B 62 #include <arm/mv/mvwin.h> 63 #endif 64 65 /* used to hold the AP's until we are ready to release them */ 66 struct mtx ap_boot_mtx; 67 struct pcb stoppcbs[MAXCPU]; 68 69 /* # of Applications processors */ 70 volatile int mp_naps; 71 72 /* Set to 1 once we're ready to let the APs out of the pen. */ 73 volatile int aps_ready = 0; 74 75 void set_stackptrs(int cpu); 76 77 /* Temporary variables for init_secondary() */ 78 void *dpcpu[MAXCPU - 1]; 79 80 /* Determine if we running MP machine */ 81 int 82 cpu_mp_probe(void) 83 { 84 85 KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset")); 86 87 CPU_SETOF(0, &all_cpus); 88 89 return (mp_ncpus > 1); 90 } 91 92 /* Start Application Processor via platform specific function */ 93 static int 94 check_ap(void) 95 { 96 uint32_t ms; 97 98 for (ms = 0; ms < 2000; ++ms) { 99 if ((mp_naps + 1) == mp_ncpus) 100 return (0); /* success */ 101 else 102 DELAY(1000); 103 } 104 105 return (-2); 106 } 107 108 /* Initialize and fire up non-boot processors */ 109 void 110 cpu_mp_start(void) 111 { 112 int error, i; 113 114 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 115 116 /* Reserve memory for application processors */ 117 for(i = 0; i < (mp_ncpus - 1); i++) 118 dpcpu[i] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); 119 120 dcache_wbinv_poc_all(); 121 122 /* Initialize boot code and start up processors */ 123 platform_mp_start_ap(); 124 125 /* Check if ap's started properly */ 126 error = check_ap(); 127 if (error) 128 printf("WARNING: Some AP's failed to start\n"); 129 else 130 for (i = 1; i < mp_ncpus; i++) 131 CPU_SET(i, &all_cpus); 132 } 133 134 /* Introduce rest of cores to the world */ 135 void 136 cpu_mp_announce(void) 137 { 138 139 } 140 141 void 142 init_secondary(int cpu) 143 { 144 struct pcpu *pc; 145 uint32_t loop_counter; 146 147 pmap_set_tex(); 148 cpuinfo_reinit_mmu(pmap_kern_ttb); 149 cpu_setup(); 150 151 /* Provide stack pointers for other processor modes. */ 152 set_stackptrs(cpu); 153 154 enable_interrupts(PSR_A); 155 pc = &__pcpu[cpu]; 156 157 /* 158 * pcpu_init() updates queue, so it should not be executed in parallel 159 * on several cores 160 */ 161 while(mp_naps < (cpu - 1)) 162 ; 163 164 pcpu_init(pc, cpu, sizeof(struct pcpu)); 165 pc->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF; 166 dpcpu_init(dpcpu[cpu - 1], cpu); 167 #if defined(DDB) 168 dbg_monitor_init_secondary(); 169 #endif 170 /* Signal our startup to BSP */ 171 atomic_add_rel_32(&mp_naps, 1); 172 173 /* Spin until the BSP releases the APs */ 174 while (!atomic_load_acq_int(&aps_ready)) { 175 #if __ARM_ARCH >= 7 176 __asm __volatile("wfe"); 177 #endif 178 } 179 180 /* Initialize curthread */ 181 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 182 pc->pc_curthread = pc->pc_idlethread; 183 pc->pc_curpcb = pc->pc_idlethread->td_pcb; 184 set_curthread(pc->pc_idlethread); 185 schedinit_ap(); 186 #ifdef VFP 187 vfp_init(); 188 #endif 189 190 /* Configure the interrupt controller */ 191 intr_pic_init_secondary(); 192 193 /* Apply possible BP hardening */ 194 cpuinfo_init_bp_hardening(); 195 196 mtx_lock_spin(&ap_boot_mtx); 197 198 atomic_add_rel_32(&smp_cpus, 1); 199 200 if (smp_cpus == mp_ncpus) { 201 /* enable IPI's, tlb shootdown, freezes etc */ 202 atomic_store_rel_int(&smp_started, 1); 203 } 204 205 mtx_unlock_spin(&ap_boot_mtx); 206 207 loop_counter = 0; 208 while (smp_started == 0) { 209 DELAY(100); 210 loop_counter++; 211 if (loop_counter == 1000) 212 CTR0(KTR_SMP, "AP still wait for smp_started"); 213 } 214 /* Start per-CPU event timers. */ 215 cpu_initclocks_ap(); 216 217 CTR0(KTR_SMP, "go into scheduler"); 218 219 /* Enter the scheduler */ 220 sched_ap_entry(); 221 222 panic("scheduler returned us to %s", __func__); 223 /* NOTREACHED */ 224 } 225 226 static void 227 ipi_rendezvous(void *dummy __unused) 228 { 229 230 CTR0(KTR_SMP, "IPI_RENDEZVOUS"); 231 smp_rendezvous_action(); 232 } 233 234 static void 235 ipi_ast(void *dummy __unused) 236 { 237 238 CTR0(KTR_SMP, "IPI_AST"); 239 } 240 241 static void 242 ipi_stop(void *dummy __unused) 243 { 244 u_int cpu; 245 246 /* 247 * IPI_STOP_HARD is mapped to IPI_STOP. 248 */ 249 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); 250 251 cpu = PCPU_GET(cpuid); 252 savectx(&stoppcbs[cpu]); 253 254 /* 255 * CPUs are stopped when entering the debugger and at 256 * system shutdown, both events which can precede a 257 * panic dump. For the dump to be correct, all caches 258 * must be flushed and invalidated, but on ARM there's 259 * no way to broadcast a wbinv_all to other cores. 260 * Instead, we have each core do the local wbinv_all as 261 * part of stopping the core. The core requesting the 262 * stop will do the l2 cache flush after all other cores 263 * have done their l1 flushes and stopped. 264 */ 265 dcache_wbinv_poc_all(); 266 267 /* Indicate we are stopped */ 268 CPU_SET_ATOMIC(cpu, &stopped_cpus); 269 270 /* Wait for restart */ 271 while (!CPU_ISSET(cpu, &started_cpus)) 272 cpu_spinwait(); 273 274 CPU_CLR_ATOMIC(cpu, &started_cpus); 275 CPU_CLR_ATOMIC(cpu, &stopped_cpus); 276 #ifdef DDB 277 dbg_resume_dbreg(); 278 #endif 279 CTR0(KTR_SMP, "IPI_STOP (restart)"); 280 } 281 282 static void 283 ipi_preempt(void *arg) 284 { 285 struct trapframe *oldframe; 286 struct thread *td; 287 288 critical_enter(); 289 td = curthread; 290 td->td_intr_nesting_level++; 291 oldframe = td->td_intr_frame; 292 td->td_intr_frame = (struct trapframe *)arg; 293 294 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); 295 sched_preempt(td); 296 297 td->td_intr_frame = oldframe; 298 td->td_intr_nesting_level--; 299 critical_exit(); 300 } 301 302 static void 303 ipi_hardclock(void *arg) 304 { 305 struct trapframe *oldframe; 306 struct thread *td; 307 308 critical_enter(); 309 td = curthread; 310 td->td_intr_nesting_level++; 311 oldframe = td->td_intr_frame; 312 td->td_intr_frame = (struct trapframe *)arg; 313 314 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); 315 hardclockintr(); 316 317 td->td_intr_frame = oldframe; 318 td->td_intr_nesting_level--; 319 critical_exit(); 320 } 321 322 static void 323 release_aps(void *dummy __unused) 324 { 325 uint32_t loop_counter; 326 327 if (mp_ncpus == 1) 328 return; 329 330 intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); 331 intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); 332 intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); 333 intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); 334 intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); 335 336 atomic_store_rel_int(&aps_ready, 1); 337 /* Wake the other threads up */ 338 dsb(); 339 sev(); 340 341 printf("Release APs\n"); 342 343 for (loop_counter = 0; loop_counter < 2000; loop_counter++) { 344 if (smp_started) 345 return; 346 DELAY(1000); 347 } 348 printf("AP's not started\n"); 349 } 350 351 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 352 353 struct cpu_group * 354 cpu_topo(void) 355 { 356 357 return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); 358 } 359 360 void 361 cpu_mp_setmaxid(void) 362 { 363 364 platform_mp_setmaxid(); 365 } 366 367 /* Sending IPI */ 368 void 369 ipi_all_but_self(u_int ipi) 370 { 371 cpuset_t other_cpus; 372 373 other_cpus = all_cpus; 374 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 375 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 376 intr_ipi_send(other_cpus, ipi); 377 } 378 379 void 380 ipi_cpu(int cpu, u_int ipi) 381 { 382 cpuset_t cpus; 383 384 CPU_ZERO(&cpus); 385 CPU_SET(cpu, &cpus); 386 387 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); 388 intr_ipi_send(cpus, ipi); 389 } 390 391 void 392 ipi_selected(cpuset_t cpus, u_int ipi) 393 { 394 395 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 396 intr_ipi_send(cpus, ipi); 397 } 398