1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Semihalf. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include "opt_ddb.h" 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/proc.h> 39 #include <sys/pcpu.h> 40 #include <sys/sched.h> 41 #include <sys/smp.h> 42 #include <sys/ktr.h> 43 #include <sys/malloc.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 #include <vm/vm_kern.h> 48 #include <vm/pmap.h> 49 50 #include <machine/armreg.h> 51 #include <machine/cpu.h> 52 #include <machine/cpufunc.h> 53 #include <machine/debug_monitor.h> 54 #include <machine/smp.h> 55 #include <machine/pcb.h> 56 #include <machine/physmem.h> 57 #include <machine/intr.h> 58 #include <machine/vmparam.h> 59 #ifdef VFP 60 #include <machine/vfp.h> 61 #endif 62 #ifdef CPU_MV_PJ4B 63 #include <arm/mv/mvwin.h> 64 #endif 65 66 extern struct pcpu __pcpu[]; 67 /* used to hold the AP's until we are ready to release them */ 68 struct mtx ap_boot_mtx; 69 struct pcb stoppcbs[MAXCPU]; 70 71 /* # of Applications processors */ 72 volatile int mp_naps; 73 74 /* Set to 1 once we're ready to let the APs out of the pen. */ 75 volatile int aps_ready = 0; 76 77 void set_stackptrs(int cpu); 78 79 /* Temporary variables for init_secondary() */ 80 void *dpcpu[MAXCPU - 1]; 81 82 /* Determine if we running MP machine */ 83 int 84 cpu_mp_probe(void) 85 { 86 87 KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset")); 88 89 CPU_SETOF(0, &all_cpus); 90 91 return (mp_ncpus > 1); 92 } 93 94 /* Start Application Processor via platform specific function */ 95 static int 96 check_ap(void) 97 { 98 uint32_t ms; 99 100 for (ms = 0; ms < 2000; ++ms) { 101 if ((mp_naps + 1) == mp_ncpus) 102 return (0); /* success */ 103 else 104 DELAY(1000); 105 } 106 107 return (-2); 108 } 109 110 extern unsigned char _end[]; 111 112 /* Initialize and fire up non-boot processors */ 113 void 114 cpu_mp_start(void) 115 { 116 int error, i; 117 118 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 119 120 /* Reserve memory for application processors */ 121 for(i = 0; i < (mp_ncpus - 1); i++) 122 dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); 123 124 dcache_wbinv_poc_all(); 125 126 /* Initialize boot code and start up processors */ 127 platform_mp_start_ap(); 128 129 /* Check if ap's started properly */ 130 error = check_ap(); 131 if (error) 132 printf("WARNING: Some AP's failed to start\n"); 133 else 134 for (i = 1; i < mp_ncpus; i++) 135 CPU_SET(i, &all_cpus); 136 } 137 138 /* Introduce rest of cores to the world */ 139 void 140 cpu_mp_announce(void) 141 { 142 143 } 144 145 extern vm_paddr_t pmap_pa; 146 void 147 init_secondary(int cpu) 148 { 149 struct pcpu *pc; 150 uint32_t loop_counter; 151 152 pmap_set_tex(); 153 cpuinfo_reinit_mmu(pmap_kern_ttb); 154 cpu_setup(); 155 156 /* Provide stack pointers for other processor modes. */ 157 set_stackptrs(cpu); 158 159 enable_interrupts(PSR_A); 160 pc = &__pcpu[cpu]; 161 162 /* 163 * pcpu_init() updates queue, so it should not be executed in parallel 164 * on several cores 165 */ 166 while(mp_naps < (cpu - 1)) 167 ; 168 169 pcpu_init(pc, cpu, sizeof(struct pcpu)); 170 dpcpu_init(dpcpu[cpu - 1], cpu); 171 #if __ARM_ARCH >= 6 && defined(DDB) 172 dbg_monitor_init_secondary(); 173 #endif 174 /* Signal our startup to BSP */ 175 atomic_add_rel_32(&mp_naps, 1); 176 177 /* Spin until the BSP releases the APs */ 178 while (!atomic_load_acq_int(&aps_ready)) { 179 #if __ARM_ARCH >= 7 180 __asm __volatile("wfe"); 181 #endif 182 } 183 184 /* Initialize curthread */ 185 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 186 pc->pc_curthread = pc->pc_idlethread; 187 pc->pc_curpcb = pc->pc_idlethread->td_pcb; 188 set_curthread(pc->pc_idlethread); 189 #ifdef VFP 190 vfp_init(); 191 #endif 192 193 /* Configure the interrupt controller */ 194 intr_pic_init_secondary(); 195 196 /* Apply possible BP hardening */ 197 cpuinfo_init_bp_hardening(); 198 199 mtx_lock_spin(&ap_boot_mtx); 200 201 atomic_add_rel_32(&smp_cpus, 1); 202 203 if (smp_cpus == mp_ncpus) { 204 /* enable IPI's, tlb shootdown, freezes etc */ 205 atomic_store_rel_int(&smp_started, 1); 206 } 207 208 mtx_unlock_spin(&ap_boot_mtx); 209 210 enable_interrupts(PSR_I); 211 212 loop_counter = 0; 213 while (smp_started == 0) { 214 DELAY(100); 215 loop_counter++; 216 if (loop_counter == 1000) 217 CTR0(KTR_SMP, "AP still wait for smp_started"); 218 } 219 /* Start per-CPU event timers. */ 220 cpu_initclocks_ap(); 221 222 CTR0(KTR_SMP, "go into scheduler"); 223 224 /* Enter the scheduler */ 225 sched_throw(NULL); 226 227 panic("scheduler returned us to %s", __func__); 228 /* NOTREACHED */ 229 } 230 231 static void 232 ipi_rendezvous(void *dummy __unused) 233 { 234 235 CTR0(KTR_SMP, "IPI_RENDEZVOUS"); 236 smp_rendezvous_action(); 237 } 238 239 static void 240 ipi_ast(void *dummy __unused) 241 { 242 243 CTR0(KTR_SMP, "IPI_AST"); 244 } 245 246 static void 247 ipi_stop(void *dummy __unused) 248 { 249 u_int cpu; 250 251 /* 252 * IPI_STOP_HARD is mapped to IPI_STOP. 253 */ 254 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); 255 256 cpu = PCPU_GET(cpuid); 257 savectx(&stoppcbs[cpu]); 258 259 /* 260 * CPUs are stopped when entering the debugger and at 261 * system shutdown, both events which can precede a 262 * panic dump. For the dump to be correct, all caches 263 * must be flushed and invalidated, but on ARM there's 264 * no way to broadcast a wbinv_all to other cores. 265 * Instead, we have each core do the local wbinv_all as 266 * part of stopping the core. The core requesting the 267 * stop will do the l2 cache flush after all other cores 268 * have done their l1 flushes and stopped. 269 */ 270 dcache_wbinv_poc_all(); 271 272 /* Indicate we are stopped */ 273 CPU_SET_ATOMIC(cpu, &stopped_cpus); 274 275 /* Wait for restart */ 276 while (!CPU_ISSET(cpu, &started_cpus)) 277 cpu_spinwait(); 278 279 CPU_CLR_ATOMIC(cpu, &started_cpus); 280 CPU_CLR_ATOMIC(cpu, &stopped_cpus); 281 #ifdef DDB 282 dbg_resume_dbreg(); 283 #endif 284 CTR0(KTR_SMP, "IPI_STOP (restart)"); 285 } 286 287 static void 288 ipi_preempt(void *arg) 289 { 290 struct trapframe *oldframe; 291 struct thread *td; 292 293 critical_enter(); 294 td = curthread; 295 td->td_intr_nesting_level++; 296 oldframe = td->td_intr_frame; 297 td->td_intr_frame = (struct trapframe *)arg; 298 299 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); 300 sched_preempt(td); 301 302 td->td_intr_frame = oldframe; 303 td->td_intr_nesting_level--; 304 critical_exit(); 305 } 306 307 static void 308 ipi_hardclock(void *arg) 309 { 310 struct trapframe *oldframe; 311 struct thread *td; 312 313 critical_enter(); 314 td = curthread; 315 td->td_intr_nesting_level++; 316 oldframe = td->td_intr_frame; 317 td->td_intr_frame = (struct trapframe *)arg; 318 319 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); 320 hardclockintr(); 321 322 td->td_intr_frame = oldframe; 323 td->td_intr_nesting_level--; 324 critical_exit(); 325 } 326 327 static void 328 release_aps(void *dummy __unused) 329 { 330 uint32_t loop_counter; 331 332 if (mp_ncpus == 1) 333 return; 334 335 intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL); 336 intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL); 337 intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL); 338 intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL); 339 intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL); 340 341 atomic_store_rel_int(&aps_ready, 1); 342 /* Wake the other threads up */ 343 dsb(); 344 sev(); 345 346 printf("Release APs\n"); 347 348 for (loop_counter = 0; loop_counter < 2000; loop_counter++) { 349 if (smp_started) 350 return; 351 DELAY(1000); 352 } 353 printf("AP's not started\n"); 354 } 355 356 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 357 358 struct cpu_group * 359 cpu_topo(void) 360 { 361 362 return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); 363 } 364 365 void 366 cpu_mp_setmaxid(void) 367 { 368 369 platform_mp_setmaxid(); 370 } 371 372 /* Sending IPI */ 373 void 374 ipi_all_but_self(u_int ipi) 375 { 376 cpuset_t other_cpus; 377 378 other_cpus = all_cpus; 379 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 380 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 381 intr_ipi_send(other_cpus, ipi); 382 } 383 384 void 385 ipi_cpu(int cpu, u_int ipi) 386 { 387 cpuset_t cpus; 388 389 CPU_ZERO(&cpus); 390 CPU_SET(cpu, &cpus); 391 392 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); 393 intr_ipi_send(cpus, ipi); 394 } 395 396 void 397 ipi_selected(cpuset_t cpus, u_int ipi) 398 { 399 400 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 401 intr_ipi_send(cpus, ipi); 402 } 403