1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 36 #include <sys/interrupt.h> 37 #include <sys/kernel.h> 38 #include <sys/memrange.h> 39 #include <sys/tls.h> 40 #include <sys/types.h> 41 #include <sys/vmm.h> 42 43 #include <vm/vm_extern.h> 44 #include <vm/vm_kern.h> 45 #include <vm/vm_object.h> 46 #include <vm/vm_page.h> 47 48 #include <sys/mplock2.h> 49 #include <sys/thread2.h> 50 51 #include <machine/cpu.h> 52 #include <machine/cpufunc.h> 53 #include <machine/cpumask.h> 54 #include <machine/globaldata.h> 55 #include <machine/md_var.h> 56 #include <machine/pmap.h> 57 #include <machine/smp.h> 58 #include <machine/tls.h> 59 #include <machine/param.h> 60 61 #include <unistd.h> 62 #include <pthread.h> 63 #include <signal.h> 64 #include <stdio.h> 65 66 extern pt_entry_t *KPTphys; 67 68 extern int vmm_enabled; 69 70 volatile cpumask_t stopped_cpus; 71 /* which cpus are ready for IPIs etc? */ 72 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 73 static int boot_address; 74 /* which cpus have been started */ 75 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 76 static int mp_finish; 77 78 /* Local data for detecting CPU TOPOLOGY */ 79 static int core_bits = 0; 80 static int logical_CPU_bits = 0; 81 82 /* function prototypes XXX these should go elsewhere */ 83 void bootstrap_idle(void); 84 void single_cpu_ipi(int, int, int); 85 void selected_cpu_ipi(cpumask_t, int, int); 86 #if 0 87 void ipi_handler(int); 88 #endif 89 90 pt_entry_t *SMPpt; 91 92 /* AP uses this during bootstrap. Do not staticize. */ 93 char *bootSTK; 94 static int bootAP; 95 96 97 /* XXX these need to go into the appropriate header file */ 98 static int start_all_aps(u_int); 99 void init_secondary(void); 100 void *start_ap(void *); 101 102 /* 103 * Get SMP fully working before we start initializing devices. 104 */ 105 static 106 void 107 ap_finish(void) 108 { 109 mp_finish = 1; 110 if (bootverbose) 111 kprintf("Finish MP startup\n"); 112 113 /* build our map of 'other' CPUs */ 114 mycpu->gd_other_cpus = smp_startup_mask; 115 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 116 117 /* 118 * Let the other cpu's finish initializing and build their map 119 * of 'other' CPUs. 120 */ 121 rel_mplock(); 122 while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) { 123 DELAY(100000); 124 cpu_lfence(); 125 } 126 127 while (try_mplock() == 0) 128 DELAY(100000); 129 if (bootverbose) 130 kprintf("Active CPU Mask: %08lx\n", 131 (long)CPUMASK_LOWMASK(smp_active_mask)); 132 } 133 134 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 135 136 void * 137 start_ap(void *arg __unused) 138 { 139 init_secondary(); 140 setrealcpu(); 141 bootstrap_idle(); 142 143 return(NULL); /* NOTREACHED */ 144 } 145 146 /* storage for AP thread IDs */ 147 pthread_t ap_tids[MAXCPU]; 148 149 int naps; 150 151 void 152 mp_start(void) 153 { 154 size_t ipiq_size; 155 int shift; 156 157 ncpus = optcpus; 158 naps = ncpus - 1; 159 160 for (shift = 0; (1 << shift) <= ncpus; ++shift) 161 ; 162 --shift; 163 164 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 165 if ((1 << shift) < ncpus) 166 ++shift; 167 ncpus_fit = 1 << shift; 168 ncpus_fit_mask = ncpus_fit - 1; 169 170 /* 171 * cpu0 initialization 172 */ 173 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 174 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 175 VM_SUBSYS_IPIQ); 176 bzero(mycpu->gd_ipiq, ipiq_size); 177 178 /* initialize arc4random. */ 179 arc4_init_pcpu(0); 180 181 /* 182 * cpu 1-(n-1) 183 */ 184 start_all_aps(boot_address); 185 186 } 187 188 void 189 mp_announce(void) 190 { 191 int x; 192 193 kprintf("DragonFly/MP: Multiprocessor\n"); 194 kprintf(" cpu0 (BSP)\n"); 195 196 for (x = 1; x <= naps; ++x) 197 kprintf(" cpu%d (AP)\n", x); 198 } 199 200 void 201 cpu_send_ipiq(int dcpu) 202 { 203 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 204 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0) 205 panic("pthread_kill failed in cpu_send_ipiq"); 206 } 207 #if 0 208 panic("XXX cpu_send_ipiq()"); 209 #endif 210 } 211 212 void 213 single_cpu_ipi(int cpu, int vector, int delivery_mode) 214 { 215 kprintf("XXX single_cpu_ipi\n"); 216 } 217 218 void 219 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode) 220 { 221 crit_enter(); 222 while (CPUMASK_TESTNZERO(target)) { 223 int n = BSFCPUMASK(target); 224 CPUMASK_NANDBIT(target, n); 225 single_cpu_ipi(n, vector, delivery_mode); 226 } 227 crit_exit(); 228 } 229 230 int 231 stop_cpus(cpumask_t map) 232 { 233 CPUMASK_ANDMASK(map, smp_active_mask); 234 235 crit_enter(); 236 while (CPUMASK_TESTNZERO(map)) { 237 int n = BSFCPUMASK(map); 238 CPUMASK_NANDBIT(map, n); 239 ATOMIC_CPUMASK_ORBIT(stopped_cpus, n); 240 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 241 panic("stop_cpus: pthread_kill failed"); 242 } 243 crit_exit(); 244 #if 0 245 panic("XXX stop_cpus()"); 246 #endif 247 248 return(1); 249 } 250 251 int 252 restart_cpus(cpumask_t map) 253 { 254 CPUMASK_ANDMASK(map, smp_active_mask); 255 256 crit_enter(); 257 while (CPUMASK_TESTNZERO(map)) { 258 int n = BSFCPUMASK(map); 259 CPUMASK_NANDBIT(map, n); 260 ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n); 261 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 262 panic("restart_cpus: pthread_kill failed"); 263 } 264 crit_exit(); 265 #if 0 266 panic("XXX restart_cpus()"); 267 #endif 268 269 return(1); 270 } 271 void 272 ap_init(void) 273 { 274 /* 275 * Adjust smp_startup_mask to signal the BSP that we have started 276 * up successfully. Note that we do not yet hold the BGL. The BSP 277 * is waiting for our signal. 278 * 279 * We can't set our bit in smp_active_mask yet because we are holding 280 * interrupts physically disabled and remote cpus could deadlock 281 * trying to send us an IPI. 282 */ 283 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 284 cpu_mfence(); 285 286 /* 287 * Interlock for finalization. Wait until mp_finish is non-zero, 288 * then get the MP lock. 289 * 290 * Note: We are in a critical section. 291 * 292 * Note: we are the idle thread, we can only spin. 293 * 294 * Note: The load fence is memory volatile and prevents the compiler 295 * from improperly caching mp_finish, and the cpu from improperly 296 * caching it. 297 */ 298 299 while (mp_finish == 0) { 300 cpu_lfence(); 301 DELAY(500000); 302 } 303 while (try_mplock() == 0) 304 DELAY(100000); 305 306 /* BSP may have changed PTD while we're waiting for the lock */ 307 cpu_invltlb(); 308 309 /* Build our map of 'other' CPUs. */ 310 mycpu->gd_other_cpus = smp_startup_mask; 311 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 312 313 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid); 314 315 316 /* Set memory range attributes for this CPU to match the BSP */ 317 mem_range_AP_init(); 318 /* 319 * Once we go active we must process any IPIQ messages that may 320 * have been queued, because no actual IPI will occur until we 321 * set our bit in the smp_active_mask. If we don't the IPI 322 * message interlock could be left set which would also prevent 323 * further IPIs. 324 * 325 * The idle loop doesn't expect the BGL to be held and while 326 * lwkt_switch() normally cleans things up this is a special case 327 * because we returning almost directly into the idle loop. 328 * 329 * The idle thread is never placed on the runq, make sure 330 * nothing we've done put it there. 331 */ 332 KKASSERT(get_mplock_count(curthread) == 1); 333 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 334 335 mdcpu->gd_fpending = 0; 336 mdcpu->gd_ipending = 0; 337 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 338 339 /* 340 * Since we may have cleaned up the interrupt triggers, manually 341 * process any pending IPIs before exiting our critical section. 342 * Once the critical section has exited, normal interrupt processing 343 * may occur. 344 */ 345 atomic_swap_int(&mycpu->gd_npoll, 0); 346 lwkt_process_ipiq(); 347 348 /* 349 * Releasing the mp lock lets the BSP finish up the SMP init 350 */ 351 rel_mplock(); 352 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 353 } 354 355 void 356 init_secondary(void) 357 { 358 int myid = bootAP; 359 struct mdglobaldata *md; 360 struct privatespace *ps; 361 362 ps = &CPU_prvspace[myid]; 363 364 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps); 365 366 /* 367 * Setup the %gs for cpu #n. The mycpu macro works after this 368 * point. Note that %fs is used by pthreads. 369 */ 370 tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace)); 371 372 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 373 374 /* JG */ 375 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 376 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 377 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 378 379 /* 380 * Set to a known state: 381 * Set by mpboot.s: CR0_PG, CR0_PE 382 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 383 */ 384 } 385 386 static int 387 start_all_aps(u_int boot_addr) 388 { 389 int x, i; 390 struct mdglobaldata *gd; 391 struct privatespace *ps; 392 vm_page_t m; 393 vm_offset_t va; 394 void *stack; 395 pthread_attr_t attr; 396 size_t ipiq_size; 397 #if 0 398 struct lwp_params params; 399 #endif 400 401 /* 402 * needed for ipis to initial thread 403 * FIXME: rename ap_tids? 404 */ 405 ap_tids[0] = pthread_self(); 406 pthread_attr_init(&attr); 407 408 vm_object_hold(&kernel_object); 409 for (x = 1; x <= naps; ++x) { 410 /* Allocate space for the CPU's private space. */ 411 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) { 412 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i; 413 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 414 pmap_kenter_quick(va, m->phys_addr); 415 } 416 417 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) { 418 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i; 419 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 420 pmap_kenter_quick(va, m->phys_addr); 421 } 422 423 gd = &CPU_prvspace[x].mdglobaldata; /* official location */ 424 bzero(gd, sizeof(*gd)); 425 gd->mi.gd_prvspace = ps = &CPU_prvspace[x]; 426 427 /* prime data page for it to use */ 428 mi_gdinit(&gd->mi, x); 429 cpu_gdinit(gd, x); 430 431 #if 0 432 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1); 433 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2); 434 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3); 435 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1); 436 gd->gd_CADDR1 = ps->CPAGE1; 437 gd->gd_CADDR2 = ps->CPAGE2; 438 gd->gd_CADDR3 = ps->CPAGE3; 439 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1; 440 #endif 441 442 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 443 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 444 VM_SUBSYS_IPIQ); 445 bzero(gd->mi.gd_ipiq, ipiq_size); 446 447 /* initialize arc4random. */ 448 arc4_init_pcpu(x); 449 450 /* 451 * Setup the AP boot stack 452 */ 453 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2]; 454 bootAP = x; 455 456 /* 457 * Setup the AP's lwp, this is the 'cpu' 458 * 459 * We have to make sure our signals are masked or the new LWP 460 * may pick up a signal that it isn't ready for yet. SMP 461 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts 462 * have already been enabled. 463 */ 464 cpu_disable_intr(); 465 466 if (vmm_enabled) { 467 stack = mmap(NULL, KERNEL_STACK_SIZE, 468 PROT_READ|PROT_WRITE|PROT_EXEC, 469 MAP_ANON, -1, 0); 470 if (stack == MAP_FAILED) { 471 panic("Unable to allocate stack for thread %d\n", x); 472 } 473 pthread_attr_setstack(&attr, stack, KERNEL_STACK_SIZE); 474 } 475 476 pthread_create(&ap_tids[x], &attr, start_ap, NULL); 477 cpu_enable_intr(); 478 479 while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) { 480 cpu_lfence(); /* XXX spin until the AP has started */ 481 DELAY(1000); 482 } 483 } 484 vm_object_drop(&kernel_object); 485 pthread_attr_destroy(&attr); 486 487 return(ncpus - 1); 488 } 489 490 /* 491 * CPU TOPOLOGY DETECTION FUNCTIONS. 492 */ 493 void 494 detect_cpu_topology(void) 495 { 496 logical_CPU_bits = vkernel_b_arg; 497 core_bits = vkernel_B_arg; 498 } 499 500 int 501 get_chip_ID(int cpuid) 502 { 503 return get_apicid_from_cpuid(cpuid) >> 504 (logical_CPU_bits + core_bits); 505 } 506 507 int 508 get_chip_ID_from_APICID(int apicid) 509 { 510 return apicid >> (logical_CPU_bits + core_bits); 511 } 512 513 int 514 get_core_number_within_chip(int cpuid) 515 { 516 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 517 ((1 << core_bits) - 1)); 518 } 519 520 int 521 get_logical_CPU_number_within_core(int cpuid) 522 { 523 return (get_apicid_from_cpuid(cpuid) & 524 ((1 << logical_CPU_bits) - 1)); 525 } 526