xref: /dragonfly/sys/platform/vkernel64/x86_64/mp.c (revision 655933d6)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cpumask.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/memrange.h>
40 #include <sys/tls.h>
41 #include <sys/types.h>
42 
43 #include <vm/vm_extern.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_object.h>
46 #include <vm/vm_page.h>
47 
48 #include <sys/mplock2.h>
49 #include <sys/thread2.h>
50 
51 #include <machine/cpu.h>
52 #include <machine/cpufunc.h>
53 #include <machine/globaldata.h>
54 #include <machine/md_var.h>
55 #include <machine/pmap.h>
56 #include <machine/smp.h>
57 #include <machine/tls.h>
58 #include <machine/param.h>
59 
60 #include <unistd.h>
61 #include <pthread.h>
62 #include <signal.h>
63 #include <stdio.h>
64 
65 extern pt_entry_t *KPTphys;
66 
67 volatile cpumask_t stopped_cpus;
68 /* which cpus are ready for IPIs etc? */
69 cpumask_t	smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
70 static int	boot_address;
71 /* which cpus have been started */
72 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
73 static int  mp_finish;
74 
75 /* Local data for detecting CPU TOPOLOGY */
76 static int core_bits = 0;
77 static int logical_CPU_bits = 0;
78 
79 /* function prototypes XXX these should go elsewhere */
80 void bootstrap_idle(void);
81 void single_cpu_ipi(int, int, int);
82 void selected_cpu_ipi(cpumask_t, int, int);
83 #if 0
84 void ipi_handler(int);
85 #endif
86 
87 pt_entry_t *SMPpt;
88 
89 /* AP uses this during bootstrap.  Do not staticize.  */
90 char *bootSTK;
91 static int bootAP;
92 
93 
94 /* XXX these need to go into the appropriate header file */
95 static int start_all_aps(u_int);
96 void init_secondary(void);
97 void *start_ap(void *);
98 
99 /*
100  * Get SMP fully working before we start initializing devices.
101  */
102 static
103 void
104 ap_finish(void)
105 {
106         mp_finish = 1;
107         if (bootverbose)
108                 kprintf("Finish MP startup\n");
109 
110 	/* build our map of 'other' CPUs */
111 	mycpu->gd_other_cpus = smp_startup_mask;
112 	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
113 
114 	/*
115 	 * Let the other cpu's finish initializing and build their map
116 	 * of 'other' CPUs.
117 	 */
118         rel_mplock();
119         while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) {
120 		DELAY(100000);
121                 cpu_lfence();
122 	}
123 
124         while (try_mplock() == 0)
125 		DELAY(100000);
126         if (bootverbose)
127                 kprintf("Active CPU Mask: %08lx\n",
128 			(long)CPUMASK_LOWMASK(smp_active_mask));
129 }
130 
131 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL);
132 
133 void *
134 start_ap(void *arg __unused)
135 {
136 	init_secondary();
137 	setrealcpu();
138 	bootstrap_idle();
139 
140 	return(NULL); /* NOTREACHED */
141 }
142 
143 /* storage for AP thread IDs */
144 pthread_t ap_tids[MAXCPU];
145 
146 int naps;
147 
148 void
149 mp_start(void)
150 {
151 	size_t ipiq_size;
152 	int shift;
153 
154 	ncpus = optcpus;
155 	naps = ncpus - 1;
156 
157 	for (shift = 0; (1 << shift) <= ncpus; ++shift)
158 		;
159 	--shift;
160 
161         /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
162         if ((1 << shift) < ncpus)
163                 ++shift;
164         ncpus_fit = 1 << shift;
165         ncpus_fit_mask = ncpus_fit - 1;
166 
167 	malloc_reinit_ncpus();
168 
169 	/*
170 	 * cpu0 initialization
171 	 */
172 	ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
173 	mycpu->gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size,
174 					    VM_SUBSYS_IPIQ);
175 	bzero(mycpu->gd_ipiq, ipiq_size);
176 
177 	/* initialize arc4random. */
178 	arc4_init_pcpu(0);
179 
180 	/*
181 	 * cpu 1-(n-1)
182 	 */
183 	start_all_aps(boot_address);
184 
185 }
186 
187 void
188 mp_announce(void)
189 {
190 	int x;
191 
192 	kprintf("DragonFly/MP: Multiprocessor\n");
193 	kprintf(" cpu0 (BSP)\n");
194 
195 	for (x = 1; x <= naps; ++x)
196 		kprintf(" cpu%d (AP)\n", x);
197 }
198 
199 void
200 cpu_send_ipiq(int dcpu)
201 {
202 	if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
203 		if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
204 			panic("pthread_kill failed in cpu_send_ipiq");
205 	}
206 #if 0
207 	panic("XXX cpu_send_ipiq()");
208 #endif
209 }
210 
211 void
212 single_cpu_ipi(int cpu, int vector, int delivery_mode)
213 {
214 	kprintf("XXX single_cpu_ipi\n");
215 }
216 
217 void
218 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
219 {
220 	crit_enter();
221 	while (CPUMASK_TESTNZERO(target)) {
222 		int n = BSFCPUMASK(target);
223 		CPUMASK_NANDBIT(target, n);
224 		single_cpu_ipi(n, vector, delivery_mode);
225 	}
226 	crit_exit();
227 }
228 
229 int
230 stop_cpus(cpumask_t map)
231 {
232 	CPUMASK_ANDMASK(map, smp_active_mask);
233 
234 	crit_enter();
235 	while (CPUMASK_TESTNZERO(map)) {
236 		int n = BSFCPUMASK(map);
237 		CPUMASK_NANDBIT(map, n);
238 		ATOMIC_CPUMASK_ORBIT(stopped_cpus, n);
239 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
240 			panic("stop_cpus: pthread_kill failed");
241 	}
242 	crit_exit();
243 #if 0
244 	panic("XXX stop_cpus()");
245 #endif
246 
247 	return(1);
248 }
249 
250 int
251 restart_cpus(cpumask_t map)
252 {
253 	CPUMASK_ANDMASK(map, smp_active_mask);
254 
255 	crit_enter();
256 	while (CPUMASK_TESTNZERO(map)) {
257 		int n = BSFCPUMASK(map);
258 		CPUMASK_NANDBIT(map, n);
259 		ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n);
260 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
261 			panic("restart_cpus: pthread_kill failed");
262 	}
263 	crit_exit();
264 #if 0
265 	panic("XXX restart_cpus()");
266 #endif
267 
268 	return(1);
269 }
270 void
271 ap_init(void)
272 {
273         /*
274          * Adjust smp_startup_mask to signal the BSP that we have started
275          * up successfully.  Note that we do not yet hold the BGL.  The BSP
276          * is waiting for our signal.
277          *
278          * We can't set our bit in smp_active_mask yet because we are holding
279          * interrupts physically disabled and remote cpus could deadlock
280          * trying to send us an IPI.
281          */
282 	ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
283 	cpu_mfence();
284 
285         /*
286          * Interlock for finalization.  Wait until mp_finish is non-zero,
287          * then get the MP lock.
288          *
289          * Note: We are in a critical section.
290          *
291          * Note: we are the idle thread, we can only spin.
292          *
293          * Note: The load fence is memory volatile and prevents the compiler
294          * from improperly caching mp_finish, and the cpu from improperly
295          * caching it.
296          */
297 
298 	while (mp_finish == 0) {
299 		cpu_lfence();
300 		DELAY(500000);
301 	}
302         while (try_mplock() == 0)
303 		DELAY(100000);
304 
305         /* BSP may have changed PTD while we're waiting for the lock */
306         cpu_invltlb();
307 
308         /* Build our map of 'other' CPUs. */
309         mycpu->gd_other_cpus = smp_startup_mask;
310 	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
311 
312         kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
313 
314 
315         /* Set memory range attributes for this CPU to match the BSP */
316         mem_range_AP_init();
317         /*
318          * Once we go active we must process any IPIQ messages that may
319          * have been queued, because no actual IPI will occur until we
320          * set our bit in the smp_active_mask.  If we don't the IPI
321          * message interlock could be left set which would also prevent
322          * further IPIs.
323          *
324          * The idle loop doesn't expect the BGL to be held and while
325          * lwkt_switch() normally cleans things up this is a special case
326          * because we returning almost directly into the idle loop.
327          *
328          * The idle thread is never placed on the runq, make sure
329          * nothing we've done put it there.
330          */
331 	KKASSERT(get_mplock_count(curthread) == 1);
332 	ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
333 
334 	mdcpu->gd_fpending = 0;
335 	mdcpu->gd_ipending = 0;
336 	initclocks_pcpu();	/* clock interrupts (via IPIs) */
337 
338 	/*
339 	 * Since we may have cleaned up the interrupt triggers, manually
340 	 * process any pending IPIs before exiting our critical section.
341 	 * Once the critical section has exited, normal interrupt processing
342 	 * may occur.
343 	 */
344 	atomic_swap_int(&mycpu->gd_npoll, 0);
345 	lwkt_process_ipiq();
346 
347         /*
348          * Releasing the mp lock lets the BSP finish up the SMP init
349          */
350         rel_mplock();
351         KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
352 }
353 
354 void
355 init_secondary(void)
356 {
357         int     myid = bootAP;
358         struct mdglobaldata *md;
359         struct privatespace *ps;
360 
361         ps = &CPU_prvspace[myid];
362 
363 	KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
364 
365 	/*
366 	 * Setup the %gs for cpu #n.  The mycpu macro works after this
367 	 * point.  Note that %fs is used by pthreads.
368 	 */
369 	tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
370 
371         md = mdcpu;     /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
372 
373 	/* JG */
374         md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
375         //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
376         //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
377 
378         /*
379          * Set to a known state:
380          * Set by mpboot.s: CR0_PG, CR0_PE
381          * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
382          */
383 }
384 
385 static int
386 start_all_aps(u_int boot_addr)
387 {
388 	int x, i;
389 	struct mdglobaldata *gd;
390 	struct privatespace *ps;
391 	vm_page_t m;
392 	vm_offset_t va;
393 	pthread_attr_t attr;
394 	size_t ipiq_size;
395 #if 0
396 	struct lwp_params params;
397 #endif
398 
399 	/*
400 	 * needed for ipis to initial thread
401 	 * FIXME: rename ap_tids?
402 	 */
403 	ap_tids[0] = pthread_self();
404 	pthread_attr_init(&attr);
405 
406 	vm_object_hold(kernel_object);
407 	for (x = 1; x <= naps; ++x) {
408 		/* Allocate space for the CPU's private space. */
409 		for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
410 			va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
411 			m = vm_page_alloc(kernel_object, va, VM_ALLOC_SYSTEM);
412 			pmap_kenter_quick(va, m->phys_addr);
413 		}
414 
415 		for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
416 			va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
417 			m = vm_page_alloc(kernel_object, va, VM_ALLOC_SYSTEM);
418 			pmap_kenter_quick(va, m->phys_addr);
419 		}
420 
421                 gd = &CPU_prvspace[x].mdglobaldata;     /* official location */
422                 bzero(gd, sizeof(*gd));
423                 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
424 
425                 /* prime data page for it to use */
426                 mi_gdinit(&gd->mi, x);
427                 cpu_gdinit(gd, x);
428 
429 #if 0
430                 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
431                 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
432                 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
433                 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
434                 gd->gd_CADDR1 = ps->CPAGE1;
435                 gd->gd_CADDR2 = ps->CPAGE2;
436                 gd->gd_CADDR3 = ps->CPAGE3;
437                 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
438 #endif
439 
440 		ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
441                 gd->mi.gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size,
442 						    VM_SUBSYS_IPIQ);
443                 bzero(gd->mi.gd_ipiq, ipiq_size);
444 
445 		/* initialize arc4random. */
446 		arc4_init_pcpu(x);
447 
448                 /*
449                  * Setup the AP boot stack
450                  */
451                 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
452                 bootAP = x;
453 
454 		/*
455 		 * Setup the AP's lwp, this is the 'cpu'
456 		 *
457 		 * We have to make sure our signals are masked or the new LWP
458 		 * may pick up a signal that it isn't ready for yet.  SMP
459 		 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
460 		 * have already been enabled.
461 		 */
462 		cpu_disable_intr();
463 
464 		pthread_create(&ap_tids[x], &attr, start_ap, NULL);
465 		cpu_enable_intr();
466 
467 		while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) {
468 			cpu_lfence(); /* XXX spin until the AP has started */
469 			DELAY(1000);
470 		}
471 	}
472 	vm_object_drop(kernel_object);
473 	pthread_attr_destroy(&attr);
474 
475 	return(ncpus - 1);
476 }
477 
478 /*
479  * CPU TOPOLOGY DETECTION FUNCTIONS.
480  */
481 void
482 detect_cpu_topology(void)
483 {
484 	logical_CPU_bits = vkernel_b_arg;
485 	core_bits = vkernel_B_arg;
486 }
487 
488 int
489 get_chip_ID(int cpuid)
490 {
491 	return get_apicid_from_cpuid(cpuid) >>
492 	    (logical_CPU_bits + core_bits);
493 }
494 
495 int
496 get_chip_ID_from_APICID(int apicid)
497 {
498         return apicid >> (logical_CPU_bits + core_bits);
499 }
500 
501 int
502 get_core_number_within_chip(int cpuid)
503 {
504 	return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
505 		((1 << core_bits) - 1));
506 }
507 
508 int
509 get_logical_CPU_number_within_core(int cpuid)
510 {
511 	return (get_apicid_from_cpuid(cpuid) &
512 		((1 << logical_CPU_bits) - 1));
513 }
514