xref: /dragonfly/sys/platform/vkernel64/x86_64/mp.c (revision 07a2f99c)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/memrange.h>
39 #include <sys/tls.h>
40 #include <sys/types.h>
41 
42 #include <vm/vm_extern.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 
47 #include <sys/mplock2.h>
48 
49 #include <machine/cpu.h>
50 #include <machine/cpufunc.h>
51 #include <machine/globaldata.h>
52 #include <machine/md_var.h>
53 #include <machine/pmap.h>
54 #include <machine/smp.h>
55 #include <machine/tls.h>
56 
57 #include <unistd.h>
58 #include <pthread.h>
59 #include <signal.h>
60 #include <stdio.h>
61 
62 extern pt_entry_t *KPTphys;
63 
64 volatile cpumask_t stopped_cpus;
65 cpumask_t	smp_active_mask = 1;  /* which cpus are ready for IPIs etc? */
66 static int	boot_address;
67 static cpumask_t smp_startup_mask = 1;  /* which cpus have been started */
68 int		mp_naps;                /* # of Applications processors */
69 static int  mp_finish;
70 
71 /* Local data for detecting CPU TOPOLOGY */
72 static int core_bits = 0;
73 static int logical_CPU_bits = 0;
74 
75 /* function prototypes XXX these should go elsewhere */
76 void bootstrap_idle(void);
77 void single_cpu_ipi(int, int, int);
78 void selected_cpu_ipi(cpumask_t, int, int);
79 #if 0
80 void ipi_handler(int);
81 #endif
82 
83 pt_entry_t *SMPpt;
84 
85 /* AP uses this during bootstrap.  Do not staticize.  */
86 char *bootSTK;
87 static int bootAP;
88 
89 
90 /* XXX these need to go into the appropriate header file */
91 static int start_all_aps(u_int);
92 void init_secondary(void);
93 void *start_ap(void *);
94 
95 /*
96  * Get SMP fully working before we start initializing devices.
97  */
98 static
99 void
100 ap_finish(void)
101 {
102 	int i;
103 	cpumask_t ncpus_mask = 0;
104 
105 	for (i = 1; i <= ncpus; i++)
106 		ncpus_mask |= CPUMASK(i);
107 
108         mp_finish = 1;
109         if (bootverbose)
110                 kprintf("Finish MP startup\n");
111 
112 	/* build our map of 'other' CPUs */
113 	mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
114 
115 	/*
116 	 * Let the other cpu's finish initializing and build their map
117 	 * of 'other' CPUs.
118 	 */
119         rel_mplock();
120         while (smp_active_mask != smp_startup_mask) {
121 		DELAY(100000);
122                 cpu_lfence();
123 	}
124 
125         while (try_mplock() == 0)
126 		DELAY(100000);
127         if (bootverbose)
128                 kprintf("Active CPU Mask: %08lx\n", (long)smp_active_mask);
129 }
130 
131 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
132 
133 
134 void *
135 start_ap(void *arg __unused)
136 {
137 	init_secondary();
138 	setrealcpu();
139 	bootstrap_idle();
140 
141 	return(NULL); /* NOTREACHED */
142 }
143 
144 /* storage for AP thread IDs */
145 pthread_t ap_tids[MAXCPU];
146 
147 void
148 mp_start(void)
149 {
150 	int shift;
151 
152 	ncpus = optcpus;
153 
154 	mp_naps = ncpus - 1;
155 
156 	/* ncpus2 -- ncpus rounded down to the nearest power of 2 */
157 	for (shift = 0; (1 << shift) <= ncpus; ++shift)
158 		;
159 	--shift;
160 	ncpus2_shift = shift;
161 	ncpus2 = 1 << shift;
162 	ncpus2_mask = ncpus2 - 1;
163 
164         /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
165         if ((1 << shift) < ncpus)
166                 ++shift;
167         ncpus_fit = 1 << shift;
168         ncpus_fit_mask = ncpus_fit - 1;
169 
170 	/*
171 	 * cpu0 initialization
172 	 */
173 	mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map,
174 					    sizeof(lwkt_ipiq) * ncpus);
175 	bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
176 
177 	/*
178 	 * cpu 1-(n-1)
179 	 */
180 	start_all_aps(boot_address);
181 
182 }
183 
184 void
185 mp_announce(void)
186 {
187 	int x;
188 
189 	kprintf("DragonFly/MP: Multiprocessor\n");
190 	kprintf(" cpu0 (BSP)\n");
191 
192 	for (x = 1; x <= mp_naps; ++x)
193 		kprintf(" cpu%d (AP)\n", x);
194 }
195 
196 void
197 cpu_send_ipiq(int dcpu)
198 {
199 	if (CPUMASK(dcpu) & smp_active_mask) {
200 		if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
201 			panic("pthread_kill failed in cpu_send_ipiq");
202 	}
203 #if 0
204 	panic("XXX cpu_send_ipiq()");
205 #endif
206 }
207 
208 void
209 smp_invltlb(void)
210 {
211 }
212 
213 void
214 single_cpu_ipi(int cpu, int vector, int delivery_mode)
215 {
216 	kprintf("XXX single_cpu_ipi\n");
217 }
218 
219 void
220 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
221 {
222 	crit_enter();
223 	while (target) {
224 		int n = BSFCPUMASK(target);
225 		target &= ~CPUMASK(n);
226 		single_cpu_ipi(n, vector, delivery_mode);
227 	}
228 	crit_exit();
229 }
230 
231 int
232 stop_cpus(cpumask_t map)
233 {
234 	map &= smp_active_mask;
235 
236 	crit_enter();
237 	while (map) {
238 		int n = BSFCPUMASK(map);
239 		map &= ~CPUMASK(n);
240 		stopped_cpus |= CPUMASK(n);
241 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
242 			panic("stop_cpus: pthread_kill failed");
243 	}
244 	crit_exit();
245 #if 0
246 	panic("XXX stop_cpus()");
247 #endif
248 
249 	return(1);
250 }
251 
252 int
253 restart_cpus(cpumask_t map)
254 {
255 	map &= smp_active_mask;
256 
257 	crit_enter();
258 	while (map) {
259 		int n = BSFCPUMASK(map);
260 		map &= ~CPUMASK(n);
261 		stopped_cpus &= ~CPUMASK(n);
262 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
263 			panic("restart_cpus: pthread_kill failed");
264 	}
265 	crit_exit();
266 #if 0
267 	panic("XXX restart_cpus()");
268 #endif
269 
270 	return(1);
271 }
272 
273 void
274 ap_init(void)
275 {
276         /*
277          * Adjust smp_startup_mask to signal the BSP that we have started
278          * up successfully.  Note that we do not yet hold the BGL.  The BSP
279          * is waiting for our signal.
280          *
281          * We can't set our bit in smp_active_mask yet because we are holding
282          * interrupts physically disabled and remote cpus could deadlock
283          * trying to send us an IPI.
284          */
285 	smp_startup_mask |= CPUMASK(mycpu->gd_cpuid);
286 	cpu_mfence();
287 
288         /*
289          * Interlock for finalization.  Wait until mp_finish is non-zero,
290          * then get the MP lock.
291          *
292          * Note: We are in a critical section.
293          *
294          * Note: we are the idle thread, we can only spin.
295          *
296          * Note: The load fence is memory volatile and prevents the compiler
297          * from improperly caching mp_finish, and the cpu from improperly
298          * caching it.
299          */
300 
301 	while (mp_finish == 0) {
302 		cpu_lfence();
303 		DELAY(500000);
304 	}
305         while (try_mplock() == 0)
306 		DELAY(100000);
307 
308         /* BSP may have changed PTD while we're waiting for the lock */
309         cpu_invltlb();
310 
311         /* Build our map of 'other' CPUs. */
312         mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid);
313 
314         kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
315 
316 
317         /* Set memory range attributes for this CPU to match the BSP */
318         mem_range_AP_init();
319         /*
320          * Once we go active we must process any IPIQ messages that may
321          * have been queued, because no actual IPI will occur until we
322          * set our bit in the smp_active_mask.  If we don't the IPI
323          * message interlock could be left set which would also prevent
324          * further IPIs.
325          *
326          * The idle loop doesn't expect the BGL to be held and while
327          * lwkt_switch() normally cleans things up this is a special case
328          * because we returning almost directly into the idle loop.
329          *
330          * The idle thread is never placed on the runq, make sure
331          * nothing we've done put it there.
332          */
333 	KKASSERT(get_mplock_count(curthread) == 1);
334         smp_active_mask |= CPUMASK(mycpu->gd_cpuid);
335 
336 	mdcpu->gd_fpending = 0;
337 	mdcpu->gd_ipending = 0;
338 	initclocks_pcpu();	/* clock interrupts (via IPIs) */
339 	lwkt_process_ipiq();
340 
341         /*
342          * Releasing the mp lock lets the BSP finish up the SMP init
343          */
344         rel_mplock();
345         KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
346 }
347 
348 void
349 init_secondary(void)
350 {
351         int     myid = bootAP;
352         struct mdglobaldata *md;
353         struct privatespace *ps;
354 
355         ps = &CPU_prvspace[myid];
356 
357 	KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
358 
359 	/*
360 	 * Setup the %gs for cpu #n.  The mycpu macro works after this
361 	 * point.  Note that %fs is used by pthreads.
362 	 */
363 	tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
364 
365         md = mdcpu;     /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
366 
367 	/* JG */
368         md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
369         //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
370         //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
371 
372         /*
373          * Set to a known state:
374          * Set by mpboot.s: CR0_PG, CR0_PE
375          * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
376          */
377 }
378 
379 static int
380 start_all_aps(u_int boot_addr)
381 {
382 	int x, i;
383 	struct mdglobaldata *gd;
384 	struct privatespace *ps;
385 	vm_page_t m;
386 	vm_offset_t va;
387 #if 0
388 	struct lwp_params params;
389 #endif
390 
391 	/*
392 	 * needed for ipis to initial thread
393 	 * FIXME: rename ap_tids?
394 	 */
395 	ap_tids[0] = pthread_self();
396 
397 	vm_object_hold(&kernel_object);
398 	for (x = 1; x <= mp_naps; x++)
399 	{
400 		/* Allocate space for the CPU's private space. */
401 		for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
402 			va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
403 			m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
404 			pmap_kenter_quick(va, m->phys_addr);
405 		}
406 
407 		for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
408 			va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
409 			m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
410 			pmap_kenter_quick(va, m->phys_addr);
411 		}
412 
413                 gd = &CPU_prvspace[x].mdglobaldata;     /* official location */
414                 bzero(gd, sizeof(*gd));
415                 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
416 
417                 /* prime data page for it to use */
418                 mi_gdinit(&gd->mi, x);
419                 cpu_gdinit(gd, x);
420 
421 #if 0
422                 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
423                 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
424                 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
425                 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
426                 gd->gd_CADDR1 = ps->CPAGE1;
427                 gd->gd_CADDR2 = ps->CPAGE2;
428                 gd->gd_CADDR3 = ps->CPAGE3;
429                 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
430 #endif
431 
432                 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
433                 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
434 
435                 /*
436                  * Setup the AP boot stack
437                  */
438                 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
439                 bootAP = x;
440 
441 		/*
442 		 * Setup the AP's lwp, this is the 'cpu'
443 		 *
444 		 * We have to make sure our signals are masked or the new LWP
445 		 * may pick up a signal that it isn't ready for yet.  SMP
446 		 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
447 		 * have already been enabled.
448 		 */
449 		cpu_disable_intr();
450 		pthread_create(&ap_tids[x], NULL, start_ap, NULL);
451 		cpu_enable_intr();
452 
453 		while((smp_startup_mask & CPUMASK(x)) == 0) {
454 			cpu_lfence(); /* XXX spin until the AP has started */
455 			DELAY(1000);
456 		}
457 	}
458 	vm_object_drop(&kernel_object);
459 
460 	return(ncpus - 1);
461 }
462 
463 /*
464  * CPU TOPOLOGY DETECTION FUNCTIONS.
465  */
466 
467 void
468 detect_cpu_topology(void)
469 {
470 	logical_CPU_bits = vkernel_b_arg;
471 	core_bits = vkernel_B_arg;
472 }
473 
474 int
475 get_chip_ID(int cpuid)
476 {
477 	return get_apicid_from_cpuid(cpuid) >>
478 	    (logical_CPU_bits + core_bits);
479 }
480 
481 int
482 get_core_number_within_chip(int cpuid)
483 {
484 	return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
485 	    ( (1 << core_bits) -1);
486 }
487 
488 int
489 get_logical_CPU_number_within_core(int cpuid)
490 {
491 	return get_apicid_from_cpuid(cpuid) &
492 	    ( (1 << logical_CPU_bits) -1);
493 }
494 
495