1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 36 #include <sys/interrupt.h> 37 #include <sys/kernel.h> 38 #include <sys/memrange.h> 39 #include <sys/tls.h> 40 #include <sys/types.h> 41 #include <sys/vmm.h> 42 43 #include <vm/vm_extern.h> 44 #include <vm/vm_kern.h> 45 #include <vm/vm_object.h> 46 #include <vm/vm_page.h> 47 48 #include <sys/mplock2.h> 49 50 #include <machine/cpu.h> 51 #include <machine/cpufunc.h> 52 #include <machine/globaldata.h> 53 #include <machine/md_var.h> 54 #include <machine/pmap.h> 55 #include <machine/smp.h> 56 #include <machine/tls.h> 57 #include <machine/param.h> 58 59 #include <unistd.h> 60 #include <pthread.h> 61 #include <signal.h> 62 #include <stdio.h> 63 64 extern pt_entry_t *KPTphys; 65 66 extern int vmm_enabled; 67 68 volatile cpumask_t stopped_cpus; 69 /* which cpus are ready for IPIs etc? */ 70 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 71 static int boot_address; 72 /* which cpus have been started */ 73 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 74 static int mp_finish; 75 76 /* Local data for detecting CPU TOPOLOGY */ 77 static int core_bits = 0; 78 static int logical_CPU_bits = 0; 79 80 /* function prototypes XXX these should go elsewhere */ 81 void bootstrap_idle(void); 82 void single_cpu_ipi(int, int, int); 83 void selected_cpu_ipi(cpumask_t, int, int); 84 #if 0 85 void ipi_handler(int); 86 #endif 87 88 pt_entry_t *SMPpt; 89 90 /* AP uses this during bootstrap. Do not staticize. */ 91 char *bootSTK; 92 static int bootAP; 93 94 95 /* XXX these need to go into the appropriate header file */ 96 static int start_all_aps(u_int); 97 void init_secondary(void); 98 void *start_ap(void *); 99 100 /* 101 * Get SMP fully working before we start initializing devices. 102 */ 103 static 104 void 105 ap_finish(void) 106 { 107 mp_finish = 1; 108 if (bootverbose) 109 kprintf("Finish MP startup\n"); 110 111 /* build our map of 'other' CPUs */ 112 mycpu->gd_other_cpus = smp_startup_mask; 113 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 114 115 /* 116 * Let the other cpu's finish initializing and build their map 117 * of 'other' CPUs. 118 */ 119 rel_mplock(); 120 while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) { 121 DELAY(100000); 122 cpu_lfence(); 123 } 124 125 while (try_mplock() == 0) 126 DELAY(100000); 127 if (bootverbose) 128 kprintf("Active CPU Mask: %08lx\n", 129 (long)CPUMASK_LOWMASK(smp_active_mask)); 130 } 131 132 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 133 134 void * 135 start_ap(void *arg __unused) 136 { 137 init_secondary(); 138 setrealcpu(); 139 bootstrap_idle(); 140 141 return(NULL); /* NOTREACHED */ 142 } 143 144 /* storage for AP thread IDs */ 145 pthread_t ap_tids[MAXCPU]; 146 147 int naps; 148 149 void 150 mp_start(void) 151 { 152 size_t ipiq_size; 153 int shift; 154 155 ncpus = optcpus; 156 naps = ncpus - 1; 157 158 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */ 159 for (shift = 0; (1 << shift) <= ncpus; ++shift) 160 ; 161 --shift; 162 ncpus2_shift = shift; 163 ncpus2 = 1 << shift; 164 ncpus2_mask = ncpus2 - 1; 165 166 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 167 if ((1 << shift) < ncpus) 168 ++shift; 169 ncpus_fit = 1 << shift; 170 ncpus_fit_mask = ncpus_fit - 1; 171 172 /* 173 * cpu0 initialization 174 */ 175 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 176 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 177 VM_SUBSYS_IPIQ); 178 bzero(mycpu->gd_ipiq, ipiq_size); 179 180 /* 181 * cpu 1-(n-1) 182 */ 183 start_all_aps(boot_address); 184 185 } 186 187 void 188 mp_announce(void) 189 { 190 int x; 191 192 kprintf("DragonFly/MP: Multiprocessor\n"); 193 kprintf(" cpu0 (BSP)\n"); 194 195 for (x = 1; x <= naps; ++x) 196 kprintf(" cpu%d (AP)\n", x); 197 } 198 199 void 200 cpu_send_ipiq(int dcpu) 201 { 202 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 203 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0) 204 panic("pthread_kill failed in cpu_send_ipiq"); 205 } 206 #if 0 207 panic("XXX cpu_send_ipiq()"); 208 #endif 209 } 210 211 void 212 single_cpu_ipi(int cpu, int vector, int delivery_mode) 213 { 214 kprintf("XXX single_cpu_ipi\n"); 215 } 216 217 void 218 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode) 219 { 220 crit_enter(); 221 while (CPUMASK_TESTNZERO(target)) { 222 int n = BSFCPUMASK(target); 223 CPUMASK_NANDBIT(target, n); 224 single_cpu_ipi(n, vector, delivery_mode); 225 } 226 crit_exit(); 227 } 228 229 int 230 stop_cpus(cpumask_t map) 231 { 232 CPUMASK_ANDMASK(map, smp_active_mask); 233 234 crit_enter(); 235 while (CPUMASK_TESTNZERO(map)) { 236 int n = BSFCPUMASK(map); 237 CPUMASK_NANDBIT(map, n); 238 ATOMIC_CPUMASK_ORBIT(stopped_cpus, n); 239 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 240 panic("stop_cpus: pthread_kill failed"); 241 } 242 crit_exit(); 243 #if 0 244 panic("XXX stop_cpus()"); 245 #endif 246 247 return(1); 248 } 249 250 int 251 restart_cpus(cpumask_t map) 252 { 253 CPUMASK_ANDMASK(map, smp_active_mask); 254 255 crit_enter(); 256 while (CPUMASK_TESTNZERO(map)) { 257 int n = BSFCPUMASK(map); 258 CPUMASK_NANDBIT(map, n); 259 ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n); 260 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 261 panic("restart_cpus: pthread_kill failed"); 262 } 263 crit_exit(); 264 #if 0 265 panic("XXX restart_cpus()"); 266 #endif 267 268 return(1); 269 } 270 void 271 ap_init(void) 272 { 273 /* 274 * Adjust smp_startup_mask to signal the BSP that we have started 275 * up successfully. Note that we do not yet hold the BGL. The BSP 276 * is waiting for our signal. 277 * 278 * We can't set our bit in smp_active_mask yet because we are holding 279 * interrupts physically disabled and remote cpus could deadlock 280 * trying to send us an IPI. 281 */ 282 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 283 cpu_mfence(); 284 285 /* 286 * Interlock for finalization. Wait until mp_finish is non-zero, 287 * then get the MP lock. 288 * 289 * Note: We are in a critical section. 290 * 291 * Note: we are the idle thread, we can only spin. 292 * 293 * Note: The load fence is memory volatile and prevents the compiler 294 * from improperly caching mp_finish, and the cpu from improperly 295 * caching it. 296 */ 297 298 while (mp_finish == 0) { 299 cpu_lfence(); 300 DELAY(500000); 301 } 302 while (try_mplock() == 0) 303 DELAY(100000); 304 305 /* BSP may have changed PTD while we're waiting for the lock */ 306 cpu_invltlb(); 307 308 /* Build our map of 'other' CPUs. */ 309 mycpu->gd_other_cpus = smp_startup_mask; 310 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 311 312 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid); 313 314 315 /* Set memory range attributes for this CPU to match the BSP */ 316 mem_range_AP_init(); 317 /* 318 * Once we go active we must process any IPIQ messages that may 319 * have been queued, because no actual IPI will occur until we 320 * set our bit in the smp_active_mask. If we don't the IPI 321 * message interlock could be left set which would also prevent 322 * further IPIs. 323 * 324 * The idle loop doesn't expect the BGL to be held and while 325 * lwkt_switch() normally cleans things up this is a special case 326 * because we returning almost directly into the idle loop. 327 * 328 * The idle thread is never placed on the runq, make sure 329 * nothing we've done put it there. 330 */ 331 KKASSERT(get_mplock_count(curthread) == 1); 332 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 333 334 mdcpu->gd_fpending = 0; 335 mdcpu->gd_ipending = 0; 336 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 337 338 /* 339 * Since we may have cleaned up the interrupt triggers, manually 340 * process any pending IPIs before exiting our critical section. 341 * Once the critical section has exited, normal interrupt processing 342 * may occur. 343 */ 344 atomic_swap_int(&mycpu->gd_npoll, 0); 345 lwkt_process_ipiq(); 346 347 /* 348 * Releasing the mp lock lets the BSP finish up the SMP init 349 */ 350 rel_mplock(); 351 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 352 } 353 354 void 355 init_secondary(void) 356 { 357 int myid = bootAP; 358 struct mdglobaldata *md; 359 struct privatespace *ps; 360 361 ps = &CPU_prvspace[myid]; 362 363 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps); 364 365 /* 366 * Setup the %gs for cpu #n. The mycpu macro works after this 367 * point. Note that %fs is used by pthreads. 368 */ 369 tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace)); 370 371 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 372 373 /* JG */ 374 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 375 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 376 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 377 378 /* 379 * Set to a known state: 380 * Set by mpboot.s: CR0_PG, CR0_PE 381 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 382 */ 383 } 384 385 static int 386 start_all_aps(u_int boot_addr) 387 { 388 int x, i; 389 struct mdglobaldata *gd; 390 struct privatespace *ps; 391 vm_page_t m; 392 vm_offset_t va; 393 void *stack; 394 pthread_attr_t attr; 395 size_t ipiq_size; 396 #if 0 397 struct lwp_params params; 398 #endif 399 400 /* 401 * needed for ipis to initial thread 402 * FIXME: rename ap_tids? 403 */ 404 ap_tids[0] = pthread_self(); 405 pthread_attr_init(&attr); 406 407 vm_object_hold(&kernel_object); 408 for (x = 1; x <= naps; ++x) { 409 /* Allocate space for the CPU's private space. */ 410 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) { 411 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i; 412 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 413 pmap_kenter_quick(va, m->phys_addr); 414 } 415 416 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) { 417 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i; 418 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 419 pmap_kenter_quick(va, m->phys_addr); 420 } 421 422 gd = &CPU_prvspace[x].mdglobaldata; /* official location */ 423 bzero(gd, sizeof(*gd)); 424 gd->mi.gd_prvspace = ps = &CPU_prvspace[x]; 425 426 /* prime data page for it to use */ 427 mi_gdinit(&gd->mi, x); 428 cpu_gdinit(gd, x); 429 430 #if 0 431 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1); 432 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2); 433 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3); 434 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1); 435 gd->gd_CADDR1 = ps->CPAGE1; 436 gd->gd_CADDR2 = ps->CPAGE2; 437 gd->gd_CADDR3 = ps->CPAGE3; 438 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1; 439 #endif 440 441 ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); 442 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, 443 VM_SUBSYS_IPIQ); 444 bzero(gd->mi.gd_ipiq, ipiq_size); 445 446 /* 447 * Setup the AP boot stack 448 */ 449 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2]; 450 bootAP = x; 451 452 /* 453 * Setup the AP's lwp, this is the 'cpu' 454 * 455 * We have to make sure our signals are masked or the new LWP 456 * may pick up a signal that it isn't ready for yet. SMP 457 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts 458 * have already been enabled. 459 */ 460 cpu_disable_intr(); 461 462 if (vmm_enabled) { 463 stack = mmap(NULL, KERNEL_STACK_SIZE, 464 PROT_READ|PROT_WRITE|PROT_EXEC, 465 MAP_ANON, -1, 0); 466 if (stack == MAP_FAILED) { 467 panic("Unable to allocate stack for thread %d\n", x); 468 } 469 pthread_attr_setstack(&attr, stack, KERNEL_STACK_SIZE); 470 } 471 472 pthread_create(&ap_tids[x], &attr, start_ap, NULL); 473 cpu_enable_intr(); 474 475 while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) { 476 cpu_lfence(); /* XXX spin until the AP has started */ 477 DELAY(1000); 478 } 479 } 480 vm_object_drop(&kernel_object); 481 pthread_attr_destroy(&attr); 482 483 return(ncpus - 1); 484 } 485 486 /* 487 * CPU TOPOLOGY DETECTION FUNCTIONS. 488 */ 489 void 490 detect_cpu_topology(void) 491 { 492 logical_CPU_bits = vkernel_b_arg; 493 core_bits = vkernel_B_arg; 494 } 495 496 int 497 get_chip_ID(int cpuid) 498 { 499 return get_apicid_from_cpuid(cpuid) >> 500 (logical_CPU_bits + core_bits); 501 } 502 503 int 504 get_chip_ID_from_APICID(int apicid) 505 { 506 return apicid >> (logical_CPU_bits + core_bits); 507 } 508 509 int 510 get_core_number_within_chip(int cpuid) 511 { 512 return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 513 ((1 << core_bits) - 1)); 514 } 515 516 int 517 get_logical_CPU_number_within_core(int cpuid) 518 { 519 return (get_apicid_from_cpuid(cpuid) & 520 ((1 << logical_CPU_bits) - 1)); 521 } 522