1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 36 #include <sys/interrupt.h> 37 #include <sys/kernel.h> 38 #include <sys/memrange.h> 39 #include <sys/tls.h> 40 #include <sys/types.h> 41 #include <sys/vmm.h> 42 43 #include <vm/vm_extern.h> 44 #include <vm/vm_kern.h> 45 #include <vm/vm_object.h> 46 #include <vm/vm_page.h> 47 48 #include <sys/mplock2.h> 49 50 #include <machine/cpu.h> 51 #include <machine/cpufunc.h> 52 #include <machine/globaldata.h> 53 #include <machine/md_var.h> 54 #include <machine/pmap.h> 55 #include <machine/smp.h> 56 #include <machine/tls.h> 57 #include <machine/param.h> 58 59 #include <unistd.h> 60 #include <pthread.h> 61 #include <signal.h> 62 #include <stdio.h> 63 64 extern pt_entry_t *KPTphys; 65 66 extern int vmm_enabled; 67 68 volatile cpumask_t stopped_cpus; 69 /* which cpus are ready for IPIs etc? */ 70 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 71 static int boot_address; 72 /* which cpus have been started */ 73 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 74 static int mp_finish; 75 76 /* Local data for detecting CPU TOPOLOGY */ 77 static int core_bits = 0; 78 static int logical_CPU_bits = 0; 79 80 /* function prototypes XXX these should go elsewhere */ 81 void bootstrap_idle(void); 82 void single_cpu_ipi(int, int, int); 83 void selected_cpu_ipi(cpumask_t, int, int); 84 #if 0 85 void ipi_handler(int); 86 #endif 87 88 pt_entry_t *SMPpt; 89 90 /* AP uses this during bootstrap. Do not staticize. */ 91 char *bootSTK; 92 static int bootAP; 93 94 95 /* XXX these need to go into the appropriate header file */ 96 static int start_all_aps(u_int); 97 void init_secondary(void); 98 void *start_ap(void *); 99 100 /* 101 * Get SMP fully working before we start initializing devices. 102 */ 103 static 104 void 105 ap_finish(void) 106 { 107 mp_finish = 1; 108 if (bootverbose) 109 kprintf("Finish MP startup\n"); 110 111 /* build our map of 'other' CPUs */ 112 mycpu->gd_other_cpus = smp_startup_mask; 113 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 114 115 /* 116 * Let the other cpu's finish initializing and build their map 117 * of 'other' CPUs. 118 */ 119 rel_mplock(); 120 while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) { 121 DELAY(100000); 122 cpu_lfence(); 123 } 124 125 while (try_mplock() == 0) 126 DELAY(100000); 127 if (bootverbose) 128 kprintf("Active CPU Mask: %08lx\n", 129 (long)CPUMASK_LOWMASK(smp_active_mask)); 130 } 131 132 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 133 134 void * 135 start_ap(void *arg __unused) 136 { 137 init_secondary(); 138 setrealcpu(); 139 bootstrap_idle(); 140 141 return(NULL); /* NOTREACHED */ 142 } 143 144 /* storage for AP thread IDs */ 145 pthread_t ap_tids[MAXCPU]; 146 147 int naps; 148 149 void 150 mp_start(void) 151 { 152 size_t ipiq_size; 153 int shift; 154 155 ncpus = optcpus; 156 naps = ncpus - 1; 157 158 for (shift = 0; (1 << shift) <= ncpus; ++shift) 159 ; 160 --shift; 161 162 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 163 if ((1 << shift) < ncpus) 164 ++shift; 165 ncpus_fit = 1 << shift; 166 ncpus_fit_mask = ncpus_fit - 1; 167 168 /* 169 * cpu0 initialization 170 */ 171 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 172 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 173 VM_SUBSYS_IPIQ); 174 bzero(mycpu->gd_ipiq, ipiq_size); 175 176 /* initialize arc4random. */ 177 arc4_init_pcpu(0); 178 179 /* 180 * cpu 1-(n-1) 181 */ 182 start_all_aps(boot_address); 183 184 } 185 186 void 187 mp_announce(void) 188 { 189 int x; 190 191 kprintf("DragonFly/MP: Multiprocessor\n"); 192 kprintf(" cpu0 (BSP)\n"); 193 194 for (x = 1; x <= naps; ++x) 195 kprintf(" cpu%d (AP)\n", x); 196 } 197 198 void 199 cpu_send_ipiq(int dcpu) 200 { 201 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 202 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0) 203 panic("pthread_kill failed in cpu_send_ipiq"); 204 } 205 #if 0 206 panic("XXX cpu_send_ipiq()"); 207 #endif 208 } 209 210 void 211 single_cpu_ipi(int cpu, int vector, int delivery_mode) 212 { 213 kprintf("XXX single_cpu_ipi\n"); 214 } 215 216 void 217 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode) 218 { 219 crit_enter(); 220 while (CPUMASK_TESTNZERO(target)) { 221 int n = BSFCPUMASK(target); 222 CPUMASK_NANDBIT(target, n); 223 single_cpu_ipi(n, vector, delivery_mode); 224 } 225 crit_exit(); 226 } 227 228 int 229 stop_cpus(cpumask_t map) 230 { 231 CPUMASK_ANDMASK(map, smp_active_mask); 232 233 crit_enter(); 234 while (CPUMASK_TESTNZERO(map)) { 235 int n = BSFCPUMASK(map); 236 CPUMASK_NANDBIT(map, n); 237 ATOMIC_CPUMASK_ORBIT(stopped_cpus, n); 238 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 239 panic("stop_cpus: pthread_kill failed"); 240 } 241 crit_exit(); 242 #if 0 243 panic("XXX stop_cpus()"); 244 #endif 245 246 return(1); 247 } 248 249 int 250 restart_cpus(cpumask_t map) 251 { 252 CPUMASK_ANDMASK(map, smp_active_mask); 253 254 crit_enter(); 255 while (CPUMASK_TESTNZERO(map)) { 256 int n = BSFCPUMASK(map); 257 CPUMASK_NANDBIT(map, n); 258 ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n); 259 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 260 panic("restart_cpus: pthread_kill failed"); 261 } 262 crit_exit(); 263 #if 0 264 panic("XXX restart_cpus()"); 265 #endif 266 267 return(1); 268 } 269 void 270 ap_init(void) 271 { 272 /* 273 * Adjust smp_startup_mask to signal the BSP that we have started 274 * up successfully. Note that we do not yet hold the BGL. The BSP 275 * is waiting for our signal. 276 * 277 * We can't set our bit in smp_active_mask yet because we are holding 278 * interrupts physically disabled and remote cpus could deadlock 279 * trying to send us an IPI. 280 */ 281 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 282 cpu_mfence(); 283 284 /* 285 * Interlock for finalization. Wait until mp_finish is non-zero, 286 * then get the MP lock. 287 * 288 * Note: We are in a critical section. 289 * 290 * Note: we are the idle thread, we can only spin. 291 * 292 * Note: The load fence is memory volatile and prevents the compiler 293 * from improperly caching mp_finish, and the cpu from improperly 294 * caching it. 295 */ 296 297 while (mp_finish == 0) { 298 cpu_lfence(); 299 DELAY(500000); 300 } 301 while (try_mplock() == 0) 302 DELAY(100000); 303 304 /* BSP may have changed PTD while we're waiting for the lock */ 305 cpu_invltlb(); 306 307 /* Build our map of 'other' CPUs. */ 308 mycpu->gd_other_cpus = smp_startup_mask; 309 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 310 311 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid); 312 313 314 /* Set memory range attributes for this CPU to match the BSP */ 315 mem_range_AP_init(); 316 /* 317 * Once we go active we must process any IPIQ messages that may 318 * have been queued, because no actual IPI will occur until we 319 * set our bit in the smp_active_mask. If we don't the IPI 320 * message interlock could be left set which would also prevent 321 * further IPIs. 322 * 323 * The idle loop doesn't expect the BGL to be held and while 324 * lwkt_switch() normally cleans things up this is a special case 325 * because we returning almost directly into the idle loop. 326 * 327 * The idle thread is never placed on the runq, make sure 328 * nothing we've done put it there. 329 */ 330 KKASSERT(get_mplock_count(curthread) == 1); 331 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 332 333 mdcpu->gd_fpending = 0; 334 mdcpu->gd_ipending = 0; 335 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 336 337 /* 338 * Since we may have cleaned up the interrupt triggers, manually 339 * process any pending IPIs before exiting our critical section. 340 * Once the critical section has exited, normal interrupt processing 341 * may occur. 342 */ 343 atomic_swap_int(&mycpu->gd_npoll, 0); 344 lwkt_process_ipiq(); 345 346 /* 347 * Releasing the mp lock lets the BSP finish up the SMP init 348 */ 349 rel_mplock(); 350 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 351 } 352 353 void 354 init_secondary(void) 355 { 356 int myid = bootAP; 357 struct mdglobaldata *md; 358 struct privatespace *ps; 359 360 ps = &CPU_prvspace[myid]; 361 362 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps); 363 364 /* 365 * Setup the %gs for cpu #n. The mycpu macro works after this 366 * point. Note that %fs is used by pthreads. 367 */ 368 tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace)); 369 370 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 371 372 /* JG */ 373 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 374 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 375 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 376 377 /* 378 * Set to a known state: 379 * Set by mpboot.s: CR0_PG, CR0_PE 380 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 381 */ 382 } 383 384 static int 385 start_all_aps(u_int boot_addr) 386 { 387 int x, i; 388 struct mdglobaldata *gd; 389 struct privatespace *ps; 390 vm_page_t m; 391 vm_offset_t va; 392 void *stack; 393 pthread_attr_t attr; 394 size_t ipiq_size; 395 #if 0 396 struct lwp_params params; 397 #endif 398 399 /* 400 * needed for ipis to initial thread 401 * FIXME: rename ap_tids? 402 */ 403 ap_tids[0] = pthread_self(); 404 pthread_attr_init(&attr); 405 406 vm_object_hold(&kernel_object); 407 for (x = 1; x <= naps; ++x) { 408 /* Allocate space for the CPU's private space. */ 409 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) { 410 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i; 411 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 412 pmap_kenter_quick(va, m->phys_addr); 413 } 414 415 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) { 416 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i; 417 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 418 pmap_kenter_quick(va, m->phys_addr); 419 } 420 421 gd = &CPU_prvspace[x].mdglobaldata; /* official location */ 422 bzero(gd, sizeof(*gd)); 423 gd->mi.gd_prvspace = ps = &CPU_prvspace[x]; 424 425 /* prime data page for it to use */ 426 mi_gdinit(&gd->mi, x); 427 cpu_gdinit(gd, x); 428 429 #if 0 430 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1); 431 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2); 432 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3); 433 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1); 434 gd->gd_CADDR1 = ps->CPAGE1; 435 gd->gd_CADDR2 = ps->CPAGE2; 436 gd->gd_CADDR3 = ps->CPAGE3; 437 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1; 438 #endif 439 440 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 441 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 442 VM_SUBSYS_IPIQ); 443 bzero(gd->mi.gd_ipiq, ipiq_size); 444 445 /* initialize arc4random. */ 446 arc4_init_pcpu(x); 447 448 /* 449 * Setup the AP boot stack 450 */ 451 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2]; 452 bootAP = x; 453 454 /* 455 * Setup the AP's lwp, this is the 'cpu' 456 * 457 * We have to make sure our signals are masked or the new LWP 458 * may pick up a signal that it isn't ready for yet. SMP 459 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts 460 * have already been enabled. 461 */ 462 cpu_disable_intr(); 463 464 if (vmm_enabled) { 465 stack = mmap(NULL, KERNEL_STACK_SIZE, 466 PROT_READ|PROT_WRITE|PROT_EXEC, 467 MAP_ANON, -1, 0); 468 if (stack == MAP_FAILED) { 469 panic("Unable to allocate stack for thread %d\n", x); 470 } 471 pthread_attr_setstack(&attr, stack, KERNEL_STACK_SIZE); 472 } 473 474 pthread_create(&ap_tids[x], &attr, start_ap, NULL); 475 cpu_enable_intr(); 476 477 while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) { 478 cpu_lfence(); /* XXX spin until the AP has started */ 479 DELAY(1000); 480 } 481 } 482 vm_object_drop(&kernel_object); 483 pthread_attr_destroy(&attr); 484 485 return(ncpus - 1); 486 } 487 488 /* 489 * CPU TOPOLOGY DETECTION FUNCTIONS. 490 */ 491 void 492 detect_cpu_topology(void) 493 { 494 logical_CPU_bits = vkernel_b_arg; 495 core_bits = vkernel_B_arg; 496 } 497 498 int 499 get_chip_ID(int cpuid) 500 { 501 return get_apicid_from_cpuid(cpuid) >> 502 (logical_CPU_bits + core_bits); 503 } 504 505 int 506 get_chip_ID_from_APICID(int apicid) 507 { 508 return apicid >> (logical_CPU_bits + core_bits); 509 } 510 511 int 512 get_core_number_within_chip(int cpuid) 513 { 514 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 515 ((1 << core_bits) - 1)); 516 } 517 518 int 519 get_logical_CPU_number_within_core(int cpuid) 520 { 521 return (get_apicid_from_cpuid(cpuid) & 522 ((1 << logical_CPU_bits) - 1)); 523 } 524