xref: /dragonfly/sys/platform/vkernel64/x86_64/mp.c (revision a68e0df0)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/platform/vkernel/i386/mp.c,v 1.8 2008/05/07 17:19:46 dillon Exp $
35  */
36 
37 
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/memrange.h>
41 #include <sys/tls.h>
42 #include <sys/types.h>
43 
44 #include <vm/vm_extern.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 
49 #include <sys/mplock2.h>
50 
51 #include <machine/cpu.h>
52 #include <machine/cpufunc.h>
53 #include <machine/globaldata.h>
54 #include <machine/md_var.h>
55 #include <machine/pmap.h>
56 #include <machine/smp.h>
57 #include <machine/tls.h>
58 
59 #include <unistd.h>
60 #include <pthread.h>
61 #include <signal.h>
62 #include <stdio.h>
63 
64 extern pt_entry_t *KPTphys;
65 
66 volatile u_int	stopped_cpus;
67 cpumask_t	smp_active_mask = 1;  /* which cpus are ready for IPIs etc? */
68 static int	boot_address;
69 static cpumask_t smp_startup_mask = 1;  /* which cpus have been started */
70 int		mp_naps;                /* # of Applications processors */
71 static int  mp_finish;
72 
73 /* function prototypes XXX these should go elsewhere */
74 void bootstrap_idle(void);
75 void single_cpu_ipi(int, int, int);
76 void selected_cpu_ipi(u_int, int, int);
77 #if 0
78 void ipi_handler(int);
79 #endif
80 
81 pt_entry_t *SMPpt;
82 
83 /* AP uses this during bootstrap.  Do not staticize.  */
84 char *bootSTK;
85 static int bootAP;
86 
87 
88 /* XXX these need to go into the appropriate header file */
89 static int start_all_aps(u_int);
90 void init_secondary(void);
91 void *start_ap(void *);
92 
93 /*
94  * Get SMP fully working before we start initializing devices.
95  */
96 static
97 void
98 ap_finish(void)
99 {
100 	int i;
101 	cpumask_t ncpus_mask = 0;
102 
103 	for (i = 1; i <= ncpus; i++)
104 		ncpus_mask |= (1 << i);
105 
106         mp_finish = 1;
107         if (bootverbose)
108                 kprintf("Finish MP startup\n");
109 
110 	/* build our map of 'other' CPUs */
111 	mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
112 
113 	/*
114 	 * Let the other cpu's finish initializing and build their map
115 	 * of 'other' CPUs.
116 	 */
117         rel_mplock();
118         while (smp_active_mask != smp_startup_mask) {
119 		DELAY(100000);
120                 cpu_lfence();
121 	}
122 
123         while (try_mplock() == 0)
124 		DELAY(100000);
125         if (bootverbose)
126                 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
127 }
128 
129 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
130 
131 
132 void *
133 start_ap(void *arg __unused)
134 {
135 	init_secondary();
136 	setrealcpu();
137 	bootstrap_idle();
138 
139 	return(NULL); /* NOTREACHED */
140 }
141 
142 /* storage for AP thread IDs */
143 pthread_t ap_tids[MAXCPU];
144 
145 void
146 mp_start(void)
147 {
148 	int shift;
149 
150 	ncpus = optcpus;
151 
152 	mp_naps = ncpus - 1;
153 
154 	/* ncpus2 -- ncpus rounded down to the nearest power of 2 */
155 	for (shift = 0; (1 << shift) <= ncpus; ++shift)
156 		;
157 	--shift;
158 	ncpus2_shift = shift;
159 	ncpus2 = 1 << shift;
160 	ncpus2_mask = ncpus2 - 1;
161 
162         /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
163         if ((1 << shift) < ncpus)
164                 ++shift;
165         ncpus_fit = 1 << shift;
166         ncpus_fit_mask = ncpus_fit - 1;
167 
168 	/*
169 	 * cpu0 initialization
170 	 */
171 	mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map,
172 					    sizeof(lwkt_ipiq) * ncpus);
173 	bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
174 
175 	/*
176 	 * cpu 1-(n-1)
177 	 */
178 	start_all_aps(boot_address);
179 
180 }
181 
182 void
183 mp_announce(void)
184 {
185 	int x;
186 
187 	kprintf("DragonFly/MP: Multiprocessor\n");
188 	kprintf(" cpu0 (BSP)\n");
189 
190 	for (x = 1; x <= mp_naps; ++x)
191 		kprintf(" cpu%d (AP)\n", x);
192 }
193 
194 void
195 forward_fastint_remote(void *arg)
196 {
197 	panic("XXX forward_fastint_remote()");
198 }
199 
200 void
201 cpu_send_ipiq(int dcpu)
202 {
203 	if ((1 << dcpu) & smp_active_mask)
204 		if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
205 			panic("pthread_kill failed in cpu_send_ipiq");
206 #if 0
207 	panic("XXX cpu_send_ipiq()");
208 #endif
209 }
210 
211 void
212 smp_invltlb(void)
213 {
214 #ifdef SMP
215 #endif
216 }
217 
218 void
219 single_cpu_ipi(int cpu, int vector, int delivery_mode)
220 {
221 	kprintf("XXX single_cpu_ipi\n");
222 }
223 
224 void
225 selected_cpu_ipi(u_int target, int vector, int delivery_mode)
226 {
227 	crit_enter();
228 	while (target) {
229 		int n = bsfl(target);
230 		target &= ~(1 << n);
231 		single_cpu_ipi(n, vector, delivery_mode);
232 	}
233 	crit_exit();
234 }
235 
236 int
237 stop_cpus(u_int map)
238 {
239 	map &= smp_active_mask;
240 
241 	crit_enter();
242 	while (map) {
243 		int n = bsfl(map);
244 		map &= ~(1 << n);
245 		stopped_cpus |= 1 << n;
246 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
247 			panic("stop_cpus: pthread_kill failed");
248 	}
249 	crit_exit();
250 #if 0
251 	panic("XXX stop_cpus()");
252 #endif
253 
254 	return(1);
255 }
256 
257 int
258 restart_cpus(u_int map)
259 {
260 	map &= smp_active_mask;
261 
262 	crit_enter();
263 	while (map) {
264 		int n = bsfl(map);
265 		map &= ~(1 << n);
266 		stopped_cpus &= ~(1 << n);
267 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
268 			panic("restart_cpus: pthread_kill failed");
269 	}
270 	crit_exit();
271 #if 0
272 	panic("XXX restart_cpus()");
273 #endif
274 
275 	return(1);
276 }
277 
278 void
279 ap_init(void)
280 {
281         /*
282          * Adjust smp_startup_mask to signal the BSP that we have started
283          * up successfully.  Note that we do not yet hold the BGL.  The BSP
284          * is waiting for our signal.
285          *
286          * We can't set our bit in smp_active_mask yet because we are holding
287          * interrupts physically disabled and remote cpus could deadlock
288          * trying to send us an IPI.
289          */
290 	smp_startup_mask |= 1 << mycpu->gd_cpuid;
291 	cpu_mfence();
292 
293         /*
294          * Interlock for finalization.  Wait until mp_finish is non-zero,
295          * then get the MP lock.
296          *
297          * Note: We are in a critical section.
298          *
299          * Note: We have to synchronize td_mpcount to our desired MP state
300          * before calling cpu_try_mplock().
301          *
302          * Note: we are the idle thread, we can only spin.
303          *
304          * Note: The load fence is memory volatile and prevents the compiler
305          * from improperly caching mp_finish, and the cpu from improperly
306          * caching it.
307          */
308 
309 	while (mp_finish == 0) {
310 		cpu_lfence();
311 		DELAY(500000);
312 	}
313         ++curthread->td_mpcount;
314         while (cpu_try_mplock() == 0)
315 		DELAY(100000);
316 
317         /* BSP may have changed PTD while we're waiting for the lock */
318         cpu_invltlb();
319 
320         /* Build our map of 'other' CPUs. */
321         mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
322 
323         kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
324 
325 
326         /* Set memory range attributes for this CPU to match the BSP */
327         mem_range_AP_init();
328         /*
329          * Once we go active we must process any IPIQ messages that may
330          * have been queued, because no actual IPI will occur until we
331          * set our bit in the smp_active_mask.  If we don't the IPI
332          * message interlock could be left set which would also prevent
333          * further IPIs.
334          *
335          * The idle loop doesn't expect the BGL to be held and while
336          * lwkt_switch() normally cleans things up this is a special case
337          * because we returning almost directly into the idle loop.
338          *
339          * The idle thread is never placed on the runq, make sure
340          * nothing we've done put it there.
341          */
342         KKASSERT(curthread->td_mpcount == 1);
343         smp_active_mask |= 1 << mycpu->gd_cpuid;
344 
345 	mdcpu->gd_fpending = 0;
346 	mdcpu->gd_ipending = 0;
347 	initclocks_pcpu();	/* clock interrupts (via IPIs) */
348 	lwkt_process_ipiq();
349 
350         /*
351          * Releasing the mp lock lets the BSP finish up the SMP init
352          */
353         rel_mplock();
354         KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
355 }
356 
357 void
358 init_secondary(void)
359 {
360         int     myid = bootAP;
361         struct mdglobaldata *md;
362         struct privatespace *ps;
363 
364         ps = &CPU_prvspace[myid];
365 
366 	KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
367 
368 	/*
369 	 * Setup the %gs for cpu #n.  The mycpu macro works after this
370 	 * point.  Note that %fs is used by pthreads.
371 	 */
372 	tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
373 
374         md = mdcpu;     /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
375 
376 	/* JG */
377         md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
378         //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
379         //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
380 
381         /*
382          * Set to a known state:
383          * Set by mpboot.s: CR0_PG, CR0_PE
384          * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
385          */
386 }
387 
388 static int
389 start_all_aps(u_int boot_addr)
390 {
391 	int x, i;
392 	struct mdglobaldata *gd;
393 	struct privatespace *ps;
394 	vm_page_t m;
395 	vm_offset_t va;
396 #if 0
397 	struct lwp_params params;
398 #endif
399 
400 	/*
401 	 * needed for ipis to initial thread
402 	 * FIXME: rename ap_tids?
403 	 */
404 	ap_tids[0] = pthread_self();
405 
406 	for (x = 1; x <= mp_naps; x++)
407 	{
408 		/* Allocate space for the CPU's private space. */
409 		va = (vm_offset_t)&CPU_prvspace[x];
410 		for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
411 			va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
412 			m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
413 			pmap_kenter_quick(va, m->phys_addr);
414 		}
415 
416 		for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
417 			va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
418 			m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
419 			pmap_kenter_quick(va, m->phys_addr);
420 		}
421 
422                 gd = &CPU_prvspace[x].mdglobaldata;     /* official location */
423                 bzero(gd, sizeof(*gd));
424                 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
425 
426                 /* prime data page for it to use */
427                 mi_gdinit(&gd->mi, x);
428                 cpu_gdinit(gd, x);
429 
430 #if 0
431                 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
432                 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
433                 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
434                 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
435                 gd->gd_CADDR1 = ps->CPAGE1;
436                 gd->gd_CADDR2 = ps->CPAGE2;
437                 gd->gd_CADDR3 = ps->CPAGE3;
438                 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
439 #endif
440 
441                 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
442                 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
443 
444                 /*
445                  * Setup the AP boot stack
446                  */
447                 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
448                 bootAP = x;
449 
450 		/*
451 		 * Setup the AP's lwp, this is the 'cpu'
452 		 *
453 		 * We have to make sure our signals are masked or the new LWP
454 		 * may pick up a signal that it isn't ready for yet.  SMP
455 		 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
456 		 * have already been enabled.
457 		 */
458 		cpu_disable_intr();
459 		pthread_create(&ap_tids[x], NULL, start_ap, NULL);
460 		cpu_enable_intr();
461 
462 		while((smp_startup_mask & (1 << x)) == 0) {
463 			cpu_lfence(); /* XXX spin until the AP has started */
464 			DELAY(1000);
465 		}
466 	}
467 
468 	return(ncpus - 1);
469 }
470