xref: /dragonfly/sys/platform/vkernel64/x86_64/mp.c (revision f7df6c8e)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/memrange.h>
39 #include <sys/tls.h>
40 #include <sys/types.h>
41 #include <sys/vmm.h>
42 
43 #include <vm/vm_extern.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_object.h>
46 #include <vm/vm_page.h>
47 
48 #include <sys/mplock2.h>
49 
50 #include <machine/cpu.h>
51 #include <machine/cpufunc.h>
52 #include <machine/globaldata.h>
53 #include <machine/md_var.h>
54 #include <machine/pmap.h>
55 #include <machine/smp.h>
56 #include <machine/tls.h>
57 #include <machine/param.h>
58 
59 #include <unistd.h>
60 #include <pthread.h>
61 #include <signal.h>
62 #include <stdio.h>
63 
64 extern pt_entry_t *KPTphys;
65 
66 extern int vmm_enabled;
67 
68 volatile cpumask_t stopped_cpus;
69 /* which cpus are ready for IPIs etc? */
70 cpumask_t	smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
71 static int	boot_address;
72 /* which cpus have been started */
73 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
74 int		mp_naps;                /* # of Applications processors */
75 static int  mp_finish;
76 
77 /* Local data for detecting CPU TOPOLOGY */
78 static int core_bits = 0;
79 static int logical_CPU_bits = 0;
80 
81 /* function prototypes XXX these should go elsewhere */
82 void bootstrap_idle(void);
83 void single_cpu_ipi(int, int, int);
84 void selected_cpu_ipi(cpumask_t, int, int);
85 #if 0
86 void ipi_handler(int);
87 #endif
88 
89 pt_entry_t *SMPpt;
90 
91 /* AP uses this during bootstrap.  Do not staticize.  */
92 char *bootSTK;
93 static int bootAP;
94 
95 
96 /* XXX these need to go into the appropriate header file */
97 static int start_all_aps(u_int);
98 void init_secondary(void);
99 void *start_ap(void *);
100 
101 /*
102  * Get SMP fully working before we start initializing devices.
103  */
104 static
105 void
106 ap_finish(void)
107 {
108         mp_finish = 1;
109         if (bootverbose)
110                 kprintf("Finish MP startup\n");
111 
112 	/* build our map of 'other' CPUs */
113 	mycpu->gd_other_cpus = smp_startup_mask;
114 	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
115 
116 	/*
117 	 * Let the other cpu's finish initializing and build their map
118 	 * of 'other' CPUs.
119 	 */
120         rel_mplock();
121         while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) {
122 		DELAY(100000);
123                 cpu_lfence();
124 	}
125 
126         while (try_mplock() == 0)
127 		DELAY(100000);
128         if (bootverbose)
129                 kprintf("Active CPU Mask: %08lx\n",
130 			(long)CPUMASK_LOWMASK(smp_active_mask));
131 }
132 
133 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
134 
135 void *
136 start_ap(void *arg __unused)
137 {
138 	init_secondary();
139 	setrealcpu();
140 	bootstrap_idle();
141 
142 	return(NULL); /* NOTREACHED */
143 }
144 
145 /* storage for AP thread IDs */
146 pthread_t ap_tids[MAXCPU];
147 
148 void
149 mp_start(void)
150 {
151 	size_t ipiq_size;
152 	int shift;
153 
154 	ncpus = optcpus;
155 
156 	mp_naps = ncpus - 1;
157 
158 	/* ncpus2 -- ncpus rounded down to the nearest power of 2 */
159 	for (shift = 0; (1 << shift) <= ncpus; ++shift)
160 		;
161 	--shift;
162 	ncpus2_shift = shift;
163 	ncpus2 = 1 << shift;
164 	ncpus2_mask = ncpus2 - 1;
165 
166         /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
167         if ((1 << shift) < ncpus)
168                 ++shift;
169         ncpus_fit = 1 << shift;
170         ncpus_fit_mask = ncpus_fit - 1;
171 
172 	/*
173 	 * cpu0 initialization
174 	 */
175 	ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
176 	mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
177 	bzero(mycpu->gd_ipiq, ipiq_size);
178 
179 	/*
180 	 * cpu 1-(n-1)
181 	 */
182 	start_all_aps(boot_address);
183 
184 }
185 
186 void
187 mp_announce(void)
188 {
189 	int x;
190 
191 	kprintf("DragonFly/MP: Multiprocessor\n");
192 	kprintf(" cpu0 (BSP)\n");
193 
194 	for (x = 1; x <= mp_naps; ++x)
195 		kprintf(" cpu%d (AP)\n", x);
196 }
197 
198 void
199 cpu_send_ipiq(int dcpu)
200 {
201 	if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
202 		if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
203 			panic("pthread_kill failed in cpu_send_ipiq");
204 	}
205 #if 0
206 	panic("XXX cpu_send_ipiq()");
207 #endif
208 }
209 
210 void
211 single_cpu_ipi(int cpu, int vector, int delivery_mode)
212 {
213 	kprintf("XXX single_cpu_ipi\n");
214 }
215 
216 void
217 selected_cpu_ipi(cpumask_t target, int vector, int delivery_mode)
218 {
219 	crit_enter();
220 	while (CPUMASK_TESTNZERO(target)) {
221 		int n = BSFCPUMASK(target);
222 		CPUMASK_NANDBIT(target, n);
223 		single_cpu_ipi(n, vector, delivery_mode);
224 	}
225 	crit_exit();
226 }
227 
228 int
229 stop_cpus(cpumask_t map)
230 {
231 	CPUMASK_ANDMASK(map, smp_active_mask);
232 
233 	crit_enter();
234 	while (CPUMASK_TESTNZERO(map)) {
235 		int n = BSFCPUMASK(map);
236 		CPUMASK_NANDBIT(map, n);
237 		ATOMIC_CPUMASK_ORBIT(stopped_cpus, n);
238 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
239 			panic("stop_cpus: pthread_kill failed");
240 	}
241 	crit_exit();
242 #if 0
243 	panic("XXX stop_cpus()");
244 #endif
245 
246 	return(1);
247 }
248 
249 int
250 restart_cpus(cpumask_t map)
251 {
252 	CPUMASK_ANDMASK(map, smp_active_mask);
253 
254 	crit_enter();
255 	while (CPUMASK_TESTNZERO(map)) {
256 		int n = BSFCPUMASK(map);
257 		CPUMASK_NANDBIT(map, n);
258 		ATOMIC_CPUMASK_NANDBIT(stopped_cpus, n);
259 		if (pthread_kill(ap_tids[n], SIGXCPU) != 0)
260 			panic("restart_cpus: pthread_kill failed");
261 	}
262 	crit_exit();
263 #if 0
264 	panic("XXX restart_cpus()");
265 #endif
266 
267 	return(1);
268 }
269 void
270 ap_init(void)
271 {
272         /*
273          * Adjust smp_startup_mask to signal the BSP that we have started
274          * up successfully.  Note that we do not yet hold the BGL.  The BSP
275          * is waiting for our signal.
276          *
277          * We can't set our bit in smp_active_mask yet because we are holding
278          * interrupts physically disabled and remote cpus could deadlock
279          * trying to send us an IPI.
280          */
281 	ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
282 	cpu_mfence();
283 
284         /*
285          * Interlock for finalization.  Wait until mp_finish is non-zero,
286          * then get the MP lock.
287          *
288          * Note: We are in a critical section.
289          *
290          * Note: we are the idle thread, we can only spin.
291          *
292          * Note: The load fence is memory volatile and prevents the compiler
293          * from improperly caching mp_finish, and the cpu from improperly
294          * caching it.
295          */
296 
297 	while (mp_finish == 0) {
298 		cpu_lfence();
299 		DELAY(500000);
300 	}
301         while (try_mplock() == 0)
302 		DELAY(100000);
303 
304         /* BSP may have changed PTD while we're waiting for the lock */
305         cpu_invltlb();
306 
307         /* Build our map of 'other' CPUs. */
308         mycpu->gd_other_cpus = smp_startup_mask;
309 	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
310 
311         kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
312 
313 
314         /* Set memory range attributes for this CPU to match the BSP */
315         mem_range_AP_init();
316         /*
317          * Once we go active we must process any IPIQ messages that may
318          * have been queued, because no actual IPI will occur until we
319          * set our bit in the smp_active_mask.  If we don't the IPI
320          * message interlock could be left set which would also prevent
321          * further IPIs.
322          *
323          * The idle loop doesn't expect the BGL to be held and while
324          * lwkt_switch() normally cleans things up this is a special case
325          * because we returning almost directly into the idle loop.
326          *
327          * The idle thread is never placed on the runq, make sure
328          * nothing we've done put it there.
329          */
330 	KKASSERT(get_mplock_count(curthread) == 1);
331 	ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
332 
333 	mdcpu->gd_fpending = 0;
334 	mdcpu->gd_ipending = 0;
335 	initclocks_pcpu();	/* clock interrupts (via IPIs) */
336 	lwkt_process_ipiq();
337 
338         /*
339          * Releasing the mp lock lets the BSP finish up the SMP init
340          */
341         rel_mplock();
342         KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
343 }
344 
345 void
346 init_secondary(void)
347 {
348         int     myid = bootAP;
349         struct mdglobaldata *md;
350         struct privatespace *ps;
351 
352         ps = &CPU_prvspace[myid];
353 
354 	KKASSERT(ps->mdglobaldata.mi.gd_prvspace == ps);
355 
356 	/*
357 	 * Setup the %gs for cpu #n.  The mycpu macro works after this
358 	 * point.  Note that %fs is used by pthreads.
359 	 */
360 	tls_set_gs(&CPU_prvspace[myid], sizeof(struct privatespace));
361 
362         md = mdcpu;     /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
363 
364 	/* JG */
365         md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */
366         //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
367         //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
368 
369         /*
370          * Set to a known state:
371          * Set by mpboot.s: CR0_PG, CR0_PE
372          * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
373          */
374 }
375 
376 static int
377 start_all_aps(u_int boot_addr)
378 {
379 	int x, i;
380 	struct mdglobaldata *gd;
381 	struct privatespace *ps;
382 	vm_page_t m;
383 	vm_offset_t va;
384 	void *stack;
385 	pthread_attr_t attr;
386 	size_t ipiq_size;
387 #if 0
388 	struct lwp_params params;
389 #endif
390 
391 	/*
392 	 * needed for ipis to initial thread
393 	 * FIXME: rename ap_tids?
394 	 */
395 	ap_tids[0] = pthread_self();
396 	pthread_attr_init(&attr);
397 
398 	vm_object_hold(&kernel_object);
399 	for (x = 1; x <= mp_naps; x++)
400 	{
401 		/* Allocate space for the CPU's private space. */
402 		for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
403 			va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
404 			m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
405 			pmap_kenter_quick(va, m->phys_addr);
406 		}
407 
408 		for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
409 			va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
410 			m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
411 			pmap_kenter_quick(va, m->phys_addr);
412 		}
413 
414                 gd = &CPU_prvspace[x].mdglobaldata;     /* official location */
415                 bzero(gd, sizeof(*gd));
416                 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
417 
418                 /* prime data page for it to use */
419                 mi_gdinit(&gd->mi, x);
420                 cpu_gdinit(gd, x);
421 
422 #if 0
423                 gd->gd_CMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE1);
424                 gd->gd_CMAP2 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE2);
425                 gd->gd_CMAP3 = pmap_kpte((vm_offset_t)CPU_prvspace[x].CPAGE3);
426                 gd->gd_PMAP1 = pmap_kpte((vm_offset_t)CPU_prvspace[x].PPAGE1);
427                 gd->gd_CADDR1 = ps->CPAGE1;
428                 gd->gd_CADDR2 = ps->CPAGE2;
429                 gd->gd_CADDR3 = ps->CPAGE3;
430                 gd->gd_PADDR1 = (vpte_t *)ps->PPAGE1;
431 #endif
432 
433 		ipiq_size = sizeof(struct lwkt_ipiq) * (mp_naps + 1);
434                 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
435                 bzero(gd->mi.gd_ipiq, ipiq_size);
436 
437                 /*
438                  * Setup the AP boot stack
439                  */
440                 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
441                 bootAP = x;
442 
443 		/*
444 		 * Setup the AP's lwp, this is the 'cpu'
445 		 *
446 		 * We have to make sure our signals are masked or the new LWP
447 		 * may pick up a signal that it isn't ready for yet.  SMP
448 		 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
449 		 * have already been enabled.
450 		 */
451 		cpu_disable_intr();
452 
453 		if (vmm_enabled) {
454 			stack = mmap(NULL, KERNEL_STACK_SIZE,
455 			    PROT_READ|PROT_WRITE|PROT_EXEC,
456 			    MAP_ANON, -1, 0);
457 			if (stack == MAP_FAILED) {
458 				panic("Unable to allocate stack for thread %d\n", x);
459 			}
460 			pthread_attr_setstack(&attr, stack, KERNEL_STACK_SIZE);
461 		}
462 
463 		pthread_create(&ap_tids[x], &attr, start_ap, NULL);
464 		cpu_enable_intr();
465 
466 		while (CPUMASK_TESTBIT(smp_startup_mask, x) == 0) {
467 			cpu_lfence(); /* XXX spin until the AP has started */
468 			DELAY(1000);
469 		}
470 	}
471 	vm_object_drop(&kernel_object);
472 	pthread_attr_destroy(&attr);
473 
474 	return(ncpus - 1);
475 }
476 
477 /*
478  * CPU TOPOLOGY DETECTION FUNCTIONS.
479  */
480 
481 void
482 detect_cpu_topology(void)
483 {
484 	logical_CPU_bits = vkernel_b_arg;
485 	core_bits = vkernel_B_arg;
486 }
487 
488 int
489 get_chip_ID(int cpuid)
490 {
491 	return get_apicid_from_cpuid(cpuid) >>
492 	    (logical_CPU_bits + core_bits);
493 }
494 
495 int
496 get_core_number_within_chip(int cpuid)
497 {
498 	return (get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
499 	    ( (1 << core_bits) -1);
500 }
501 
502 int
503 get_logical_CPU_number_within_core(int cpuid)
504 {
505 	return get_apicid_from_cpuid(cpuid) &
506 	    ( (1 << logical_CPU_bits) -1);
507 }
508 
509