1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 36 #include <sys/interrupt.h> 37 #include <sys/kernel.h> 38 #include <sys/memrange.h> 39 #include <sys/tls.h> 40 #include <sys/types.h> 41 #include <sys/vmm.h> 42 43 #include <vm/vm_extern.h> 44 #include <vm/vm_kern.h> 45 #include <vm/vm_object.h> 46 #include <vm/vm_page.h> 47 48 #include <sys/mplock2.h> 49 50 #include <machine/cpu.h> 51 #include <machine/cpufunc.h> 52 #include <machine/globaldata.h> 53 #include <machine/md_var.h> 54 #include <machine/pmap.h> 55 #include <machine/smp.h> 56 #include <machine/tls.h> 57 #include <machine/param.h> 58 59 #include <unistd.h> 60 #include <pthread.h> 61 #include <signal.h> 62 #include <stdio.h> 63 64 extern pt_entry_t *KPTphys; 65 66 extern int vmm_enabled; 67 68 volatile cpumask_t stopped_cpus; 69 /* which cpus are ready for IPIs etc? */ 70 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE; 71 static int boot_address; 72 /* which cpus have been started */ 73 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE; 74 int mp_naps; /* # of Applications processors */ 75 static int mp_finish; 76 77 /* Local data for detecting CPU TOPOLOGY */ 78 static int core_bits = 0; 79 static int logical_CPU_bits = 0; 80 81 /* function prototypes XXX these should go elsewhere */ 82 void bootstrap_idle(void); 83 void single_cpu_ipi(int, int, int); 84 void selected_cpu_ipi(cpumask_t, int, int); 85 #if 0 86 void ipi_handler(int); 87 #endif 88 89 pt_entry_t *SMPpt; 90 91 /* AP uses this during bootstrap. Do not staticize. */ 92 char *bootSTK; 93 static int bootAP; 94 95 96 /* XXX these need to go into the appropriate header file */ 97 static int start_all_aps(u_int); 98 void init_secondary(void); 99 void *start_ap(void *); 100 101 /* 102 * Get SMP fully working before we start initializing devices. 103 */ 104 static 105 void 106 ap_finish(void) 107 { 108 mp_finish = 1; 109 if (bootverbose) 110 kprintf("Finish MP startup\n"); 111 112 /* build our map of 'other' CPUs */ 113 mycpu->gd_other_cpus = smp_startup_mask; 114 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 115 116 /* 117 * Let the other cpu's finish initializing and build their map 118 * of 'other' CPUs. 119 */ 120 rel_mplock(); 121 while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) { 122 DELAY(100000); 123 cpu_lfence(); 124 } 125 126 while (try_mplock() == 0) 127 DELAY(100000); 128 if (bootverbose) 129 kprintf("Active CPU Mask: %08lx\n", 130 (long)CPUMASK_LOWMASK(smp_active_mask)); 131 } 132 133 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL); 134 135 void * 136 start_ap(void *arg __unused) 137 { 138 init_secondary(); 139 setrealcpu(); 140 bootstrap_idle(); 141 142 return(NULL); /* NOTREACHED */ 143 } 144 145 /* storage for AP thread IDs */ 146 pthread_t ap_tids[MAXCPU]; 147 148 void 149 mp_start(void) 150 { 151 size_t ipiq_size; 152 int shift; 153 154 ncpus = optcpus; 155 156 mp_naps = ncpus - 1; 157 158 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */ 159 for (shift = 0; (1 << shift) <= ncpus; ++shift) 160 ; 161 --shift; 162 ncpus2_shift = shift; 163 ncpus2 = 1 << shift; 164 ncpus2_mask = ncpus2 - 1; 165 166 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */ 167 if ((1 << shift) < ncpus) 168 ++shift; 169 ncpus_fit = 1 << shift; 170 ncpus_fit_mask = ncpus_fit - 1; 171 172 /* 173 * cpu0 initialization 174 */ 175 ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; 176 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 177 bzero(mycpu->gd_ipiq, ipiq_size); 178 179 /* 180 * cpu 1-(n-1) 181 */ 182 start_all_aps(boot_address); 183 184 } 185 186 void 187 mp_announce(void) 188 { 189 int x; 190 191 kprintf("DragonFly/MP: Multiprocessor\n"); 192 kprintf(" cpu0 (BSP)\n"); 193 194 for (x = 1; x <= mp_naps; ++x) 195 kprintf(" cpu%d (AP)\n", x); 196 } 197 198 void 199 cpu_send_ipiq(int dcpu) 200 { 201 if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) { 202 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0) 203 panic("pthread_kill failed in cpu_send_ipiq"); 204 } 205 #if 0 206 panic("XXX cpu_send_ipiq()"); 207 #endif 208 } 209 210 void 211 single_cpu_ipi(int cpu, int vector, int delivery_mode) 212 { 213 kprintf("XXX single_cpu_ipi\n"); 214 } 215 216 void 217 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode) 218 { 219 crit_enter(); 220 while (CPUMASK_TESTNZERO(target)) { 221 int n = BSFCPUMASK(target); 222 CPUMASK_NANDBIT(target, n); 223 single_cpu_ipi(n, vector, delivery_mode); 224 } 225 crit_exit(); 226 } 227 228 int 229 stop_cpus(cpumask_t map) 230 { 231 CPUMASK_ANDMASK(map, smp_active_mask); 232 233 crit_enter(); 234 while (CPUMASK_TESTNZERO(map)) { 235 int n = BSFCPUMASK(map); 236 CPUMASK_NANDBIT(map, n); 237 ATOMIC_CPUMASK_ORBIT(stopped_cpus, n); 238 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 239 panic("stop_cpus: pthread_kill failed"); 240 } 241 crit_exit(); 242 #if 0 243 panic("XXX stop_cpus()"); 244 #endif 245 246 return(1); 247 } 248 249 int 250 restart_cpus(cpumask_t map) 251 { 252 CPUMASK_ANDMASK(map, smp_active_mask); 253 254 crit_enter(); 255 while (CPUMASK_TESTNZERO(map)) { 256 int n = BSFCPUMASK(map); 257 CPUMASK_NANDBIT(map, n); 258 ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n); 259 if (pthread_kill(ap_tids[n], SIGXCPU) != 0) 260 panic("restart_cpus: pthread_kill failed"); 261 } 262 crit_exit(); 263 #if 0 264 panic("XXX restart_cpus()"); 265 #endif 266 267 return(1); 268 } 269 void 270 ap_init(void) 271 { 272 /* 273 * Adjust smp_startup_mask to signal the BSP that we have started 274 * up successfully. Note that we do not yet hold the BGL. The BSP 275 * is waiting for our signal. 276 * 277 * We can't set our bit in smp_active_mask yet because we are holding 278 * interrupts physically disabled and remote cpus could deadlock 279 * trying to send us an IPI. 280 */ 281 ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid); 282 cpu_mfence(); 283 284 /* 285 * Interlock for finalization. Wait until mp_finish is non-zero, 286 * then get the MP lock. 287 * 288 * Note: We are in a critical section. 289 * 290 * Note: we are the idle thread, we can only spin. 291 * 292 * Note: The load fence is memory volatile and prevents the compiler 293 * from improperly caching mp_finish, and the cpu from improperly 294 * caching it. 295 */ 296 297 while (mp_finish == 0) { 298 cpu_lfence(); 299 DELAY(500000); 300 } 301 while (try_mplock() == 0) 302 DELAY(100000); 303 304 /* BSP may have changed PTD while we're waiting for the lock */ 305 cpu_invltlb(); 306 307 /* Build our map of 'other' CPUs. */ 308 mycpu->gd_other_cpus = smp_startup_mask; 309 CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); 310 311 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid); 312 313 314 /* Set memory range attributes for this CPU to match the BSP */ 315 mem_range_AP_init(); 316 /* 317 * Once we go active we must process any IPIQ messages that may 318 * have been queued, because no actual IPI will occur until we 319 * set our bit in the smp_active_mask. If we don't the IPI 320 * message interlock could be left set which would also prevent 321 * further IPIs. 322 * 323 * The idle loop doesn't expect the BGL to be held and while 324 * lwkt_switch() normally cleans things up this is a special case 325 * because we returning almost directly into the idle loop. 326 * 327 * The idle thread is never placed on the runq, make sure 328 * nothing we've done put it there. 329 */ 330 KKASSERT(get_mplock_count(curthread) == 1); 331 ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid); 332 333 mdcpu->gd_fpending = 0; 334 mdcpu->gd_ipending = 0; 335 initclocks_pcpu(); /* clock interrupts (via IPIs) */ 336 337 /* 338 * Since we may have cleaned up the interrupt triggers, manually 339 * process any pending IPIs before exiting our critical section. 340 * Once the critical section has exited, normal interrupt processing 341 * may occur. 342 */ 343 atomic_swap_int(&mycpu->gd_npoll, 0); 344 lwkt_process_ipiq(); 345 346 /* 347 * Releasing the mp lock lets the BSP finish up the SMP init 348 */ 349 rel_mplock(); 350 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0); 351 } 352 353 void 354 init_secondary(void) 355 { 356 int myid = bootAP; 357 struct mdglobaldata *md; 358 struct privatespace *ps; 359 360 ps = &CPU_prvspace[myid]; 361 362 KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps); 363 364 /* 365 * Setup the %gs for cpu #n. The mycpu macro works after this 366 * point. Note that %fs is used by pthreads. 367 */ 368 tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace)); 369 370 md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ 371 372 /* JG */ 373 md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ 374 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 375 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; 376 377 /* 378 * Set to a known state: 379 * Set by mpboot.s: CR0_PG, CR0_PE 380 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 381 */ 382 } 383 384 static int 385 start_all_aps(u_int boot_addr) 386 { 387 int x, i; 388 struct mdglobaldata *gd; 389 struct privatespace *ps; 390 vm_page_t m; 391 vm_offset_t va; 392 void *stack; 393 pthread_attr_t attr; 394 size_t ipiq_size; 395 #if 0 396 struct lwp_params params; 397 #endif 398 399 /* 400 * needed for ipis to initial thread 401 * FIXME: rename ap_tids? 402 */ 403 ap_tids[0] = pthread_self(); 404 pthread_attr_init(&attr); 405 406 vm_object_hold(&kernel_object); 407 for (x = 1; x <= mp_naps; x++) 408 { 409 /* Allocate space for the CPU's private space. */ 410 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) { 411 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i; 412 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 413 pmap_kenter_quick(va, m->phys_addr); 414 } 415 416 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) { 417 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i; 418 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM); 419 pmap_kenter_quick(va, m->phys_addr); 420 } 421 422 gd = &CPU_prvspace[x].mdglobaldata; /* official location */ 423 bzero(gd, sizeof(*gd)); 424 gd->mi.gd_prvspace = ps = &CPU_prvspace[x]; 425 426 /* prime data page for it to use */ 427 mi_gdinit(&gd->mi, x); 428 cpu_gdinit(gd, x); 429 430 #if 0 431 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1); 432 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2); 433 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3); 434 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1); 435 gd->gd_CADDR1 = ps->CPAGE1; 436 gd->gd_CADDR2 = ps->CPAGE2; 437 gd->gd_CADDR3 = ps->CPAGE3; 438 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1; 439 #endif 440 441 ipiq_size = sizeof(struct lwkt_ipiq) * (mp_naps + 1); 442 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size); 443 bzero(gd->mi.gd_ipiq, ipiq_size); 444 445 /* 446 * Setup the AP boot stack 447 */ 448 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2]; 449 bootAP = x; 450 451 /* 452 * Setup the AP's lwp, this is the 'cpu' 453 * 454 * We have to make sure our signals are masked or the new LWP 455 * may pick up a signal that it isn't ready for yet. SMP 456 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts 457 * have already been enabled. 458 */ 459 cpu_disable_intr(); 460 461 if (vmm_enabled) { 462 stack = mmap(NULL, KERNEL_STACK_SIZE, 463 PROT_READ|PROT_WRITE|PROT_EXEC, 464 MAP_ANON, -1, 0); 465 if (stack == MAP_FAILED) { 466 panic("Unable to allocate stack for thread %d\n", x); 467 } 468 pthread_attr_setstack(&attr, stack, KERNEL_STACK_SIZE); 469 } 470 471 pthread_create(&ap_tids[x], &attr, start_ap, NULL); 472 cpu_enable_intr(); 473 474 while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) { 475 cpu_lfence(); /* XXX spin until the AP has started */ 476 DELAY(1000); 477 } 478 } 479 vm_object_drop(&kernel_object); 480 pthread_attr_destroy(&attr); 481 482 return(ncpus - 1); 483 } 484 485 /* 486 * CPU TOPOLOGY DETECTION FUNCTIONS. 487 */ 488 489 void 490 detect_cpu_topology(void) 491 { 492 logical_CPU_bits = vkernel_b_arg; 493 core_bits = vkernel_B_arg; 494 } 495 496 int 497 get_chip_ID(int cpuid) 498 { 499 return get_apicid_from_cpuid(cpuid) >> 500 (logical_CPU_bits + core_bits); 501 } 502 503 int 504 get_core_number_within_chip(int cpuid) 505 { 506 return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) & 507 ( (1 << core_bits) -1); 508 } 509 510 int 511 get_logical_CPU_number_within_core(int cpuid) 512 { 513 return get_apicid_from_cpuid(cpuid) & 514 ( (1 << logical_CPU_bits) -1); 515 } 516 517