1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
26  */
27 
28 #include "opt_cpu.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/memrange.h>
36 #include <sys/cons.h>	/* cngetc() */
37 #include <sys/machintr.h>
38 #include <sys/cpu_topology.h>
39 
40 #include <sys/mplock2.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_extern.h>
47 #include <sys/lock.h>
48 #include <vm/vm_map.h>
49 
50 #include <machine/smp.h>
51 #include <machine_base/apic/apicreg.h>
52 #include <machine/atomic.h>
53 #include <machine/cpufunc.h>
54 #include <machine/cputypes.h>
55 #include <machine_base/apic/lapic.h>
56 #include <machine_base/apic/ioapic.h>
57 #include <machine_base/acpica/acpi_md_cpu.h>
58 #include <machine/psl.h>
59 #include <machine/segments.h>
60 #include <machine/tss.h>
61 #include <machine/specialreg.h>
62 #include <machine/globaldata.h>
63 #include <machine/pmap_inval.h>
64 #include <machine/clock.h>
65 
66 #include <machine/md_var.h>		/* setidt() */
67 #include <machine_base/icu/icu.h>	/* IPIs */
68 #include <machine_base/icu/icu_var.h>
69 #include <machine_base/apic/ioapic_abi.h>
70 #include <machine/intr_machdep.h>	/* IPIs */
71 
72 #define WARMBOOT_TARGET		0
73 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
74 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
75 
76 #define CMOS_REG		(0x70)
77 #define CMOS_DATA		(0x71)
78 #define BIOS_RESET		(0x0f)
79 #define BIOS_WARM		(0x0a)
80 
81 /*
82  * this code MUST be enabled here and in mpboot.s.
83  * it follows the very early stages of AP boot by placing values in CMOS ram.
84  * it NORMALLY will never be needed and thus the primitive method for enabling.
85  *
86  */
87 #if defined(CHECK_POINTS)
88 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
89 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
90 
91 #define CHECK_INIT(D);				\
92 	CHECK_WRITE(0x34, (D));			\
93 	CHECK_WRITE(0x35, (D));			\
94 	CHECK_WRITE(0x36, (D));			\
95 	CHECK_WRITE(0x37, (D));			\
96 	CHECK_WRITE(0x38, (D));			\
97 	CHECK_WRITE(0x39, (D));
98 
99 #define CHECK_PRINT(S);				\
100 	kprintf("%s: %d, %d, %d, %d, %d, %d\n",	\
101 	   (S),					\
102 	   CHECK_READ(0x34),			\
103 	   CHECK_READ(0x35),			\
104 	   CHECK_READ(0x36),			\
105 	   CHECK_READ(0x37),			\
106 	   CHECK_READ(0x38),			\
107 	   CHECK_READ(0x39));
108 
109 #else				/* CHECK_POINTS */
110 
111 #define CHECK_INIT(D)
112 #define CHECK_PRINT(S)
113 
114 #endif				/* CHECK_POINTS */
115 
116 /*
117  * Values to send to the POST hardware.
118  */
119 #define MP_BOOTADDRESS_POST	0x10
120 #define MP_PROBE_POST		0x11
121 #define MPTABLE_PASS1_POST	0x12
122 
123 #define MP_START_POST		0x13
124 #define MP_ENABLE_POST		0x14
125 #define MPTABLE_PASS2_POST	0x15
126 
127 #define START_ALL_APS_POST	0x16
128 #define INSTALL_AP_TRAMP_POST	0x17
129 #define START_AP_POST		0x18
130 
131 #define MP_ANNOUNCE_POST	0x19
132 
133 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
134 int	current_postcode;
135 
136 /** XXX FIXME: what system files declare these??? */
137 
138 extern int naps;
139 extern int _udatasel;
140 
141 int64_t tsc0_offset;
142 extern int64_t tsc_offsets[];
143 
144 /* AP uses this during bootstrap.  Do not staticize.  */
145 char *bootSTK;
146 static int bootAP;
147 
148 struct pcb stoppcbs[MAXCPU];
149 
150 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
151 
152 /*
153  * Local data and functions.
154  */
155 
156 static u_int	boot_address;
157 static int	mp_finish;
158 static int	mp_finish_lapic;
159 
160 static int	start_all_aps(u_int boot_addr);
161 #if 0
162 static void	install_ap_tramp(u_int boot_addr);
163 #endif
164 static int	start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
165 static int	smitest(void);
166 static void	mp_bsp_simple_setup(void);
167 
168 /* which cpus have been started */
169 static cpumask_t smp_startup_mask = CPUMASK_INITIALIZER_ONLYONE;
170 /* which cpus have lapic been inited */
171 static cpumask_t smp_lapic_mask = CPUMASK_INITIALIZER_ONLYONE;
172 /* which cpus are ready for IPIs etc? */
173 cpumask_t smp_active_mask = CPUMASK_INITIALIZER_ONLYONE;
174 cpumask_t smp_finalize_mask = CPUMASK_INITIALIZER_ONLYONE;
175 
176 SYSCTL_OPAQUE(_machdep, OID_AUTO, smp_active, CTLFLAG_RD,
177 	      &smp_active_mask, sizeof(smp_active_mask), "LU", "");
178 static u_int	bootMP_size;
179 static u_int	report_invlpg_src;
180 SYSCTL_INT(_machdep, OID_AUTO, report_invlpg_src, CTLFLAG_RW,
181 	&report_invlpg_src, 0, "");
182 static u_int	report_invltlb_src;
183 SYSCTL_INT(_machdep, OID_AUTO, report_invltlb_src, CTLFLAG_RW,
184 	&report_invltlb_src, 0, "");
185 static int	optimized_invltlb;
186 SYSCTL_INT(_machdep, OID_AUTO, optimized_invltlb, CTLFLAG_RW,
187 	&optimized_invltlb, 0, "");
188 static int	all_but_self_ipi_enable = 1;
189 SYSCTL_INT(_machdep, OID_AUTO, all_but_self_ipi_enable, CTLFLAG_RW,
190 	&all_but_self_ipi_enable, 0, "");
191 
192 /* Local data for detecting CPU TOPOLOGY */
193 static int core_bits = 0;
194 static int logical_CPU_bits = 0;
195 
196 
197 /*
198  * Calculate usable address in base memory for AP trampoline code.
199  */
200 u_int
201 mp_bootaddress(u_int basemem)
202 {
203 	POSTCODE(MP_BOOTADDRESS_POST);
204 
205 	bootMP_size = mptramp_end - mptramp_start;
206 	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
207 	if (((basemem * 1024) - boot_address) < bootMP_size)
208 		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
209 	/* 3 levels of page table pages */
210 	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
211 
212 	return mptramp_pagetables;
213 }
214 
215 /*
216  * Print various information about the SMP system hardware and setup.
217  */
218 void
219 mp_announce(void)
220 {
221 	int     x;
222 
223 	POSTCODE(MP_ANNOUNCE_POST);
224 
225 	kprintf("DragonFly/MP: Multiprocessor motherboard\n");
226 	kprintf(" cpu0 (BSP): apic id: %2d\n", CPUID_TO_APICID(0));
227 	for (x = 1; x <= naps; ++x)
228 		kprintf(" cpu%d (AP):  apic id: %2d\n", x, CPUID_TO_APICID(x));
229 
230 	if (!ioapic_enable)
231 		kprintf(" Warning: APIC I/O disabled\n");
232 }
233 
234 /*
235  * AP cpu's call this to sync up protected mode.
236  *
237  * WARNING! %gs is not set up on entry.  This routine sets up %gs.
238  */
239 void
240 init_secondary(void)
241 {
242 	int	gsel_tss;
243 	int	x, myid = bootAP;
244 	u_int64_t msr, cr0;
245 	struct mdglobaldata *md;
246 	struct privatespace *ps;
247 
248 	ps = CPU_prvspace[myid];
249 
250 	gdt_segs[GPROC0_SEL].ssd_base = (long)&ps->common_tss;
251 	ps->mdglobaldata.mi.gd_prvspace = ps;
252 
253 	/* We fill the 32-bit segment descriptors */
254 	for (x = 0; x < NGDT; x++) {
255 		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
256 			ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
257 	}
258 	/* And now a 64-bit one */
259 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
260 	    (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
261 
262 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
263 	r_gdt.rd_base = (long) &gdt[myid * NGDT];
264 	lgdt(&r_gdt);			/* does magic intra-segment return */
265 
266 	/* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
267 	wrmsr(MSR_FSBASE, 0);		/* User value */
268 	wrmsr(MSR_GSBASE, (u_int64_t)ps);
269 	wrmsr(MSR_KGSBASE, 0);		/* XXX User value while we're in the kernel */
270 
271 	lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]);
272 
273 	load_ds(_udatasel);
274 	load_es(_udatasel);
275 	load_fs(_udatasel);
276 
277 #if 0
278 	lldt(_default_ldt);
279 	mdcpu->gd_currentldt = _default_ldt;
280 #endif
281 
282 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
283 	gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
284 
285 	md = mdcpu;	/* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
286 
287 	/*
288 	 * TSS entry point for interrupts, traps, and exceptions
289 	 * (sans NMI).  This will always go to near the top of the pcpu
290 	 * trampoline area.  Hardware-pushed data will be copied into
291 	 * the trap-frame on entry, and (if necessary) returned to the
292 	 * trampoline on exit.
293 	 *
294 	 * We store some pcb data for the trampoline code above the
295 	 * stack the cpu hw pushes into, and arrange things so the
296 	 * address of tr_pcb_rsp is the same as the desired top of
297 	 * stack.
298 	 */
299 	ps->common_tss.tss_rsp0 = (register_t)&ps->trampoline.tr_pcb_rsp;
300 	ps->trampoline.tr_pcb_rsp = ps->common_tss.tss_rsp0;
301 	ps->trampoline.tr_pcb_gs_kernel = (register_t)md;
302 	ps->trampoline.tr_pcb_cr3 = KPML4phys;	/* adj to user cr3 live */
303 	ps->dbltramp.tr_pcb_gs_kernel = (register_t)md;
304 	ps->dbltramp.tr_pcb_cr3 = KPML4phys;
305 	ps->dbgtramp.tr_pcb_gs_kernel = (register_t)md;
306 	ps->dbgtramp.tr_pcb_cr3 = KPML4phys;
307 
308 #if 0 /* JG XXX */
309 	ps->common_tss.tss_ioopt = (sizeof ps->common_tss) << 16;
310 #endif
311 	md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
312 	md->gd_common_tssd = *md->gd_tss_gdt;
313 
314 	/* double fault stack */
315 	ps->common_tss.tss_ist1 = (register_t)&ps->dbltramp.tr_pcb_rsp;
316 	ps->common_tss.tss_ist2 = (register_t)&ps->dbgtramp.tr_pcb_rsp;
317 
318 	ltr(gsel_tss);
319 
320 	/*
321 	 * Set to a known state:
322 	 * Set by mpboot.s: CR0_PG, CR0_PE
323 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
324 	 */
325 	cr0 = rcr0();
326 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
327 	load_cr0(cr0);
328 
329 	/* Set up the fast syscall stuff */
330 	msr = rdmsr(MSR_EFER) | EFER_SCE;
331 	wrmsr(MSR_EFER, msr);
332 	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
333 	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
334 	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
335 	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
336 	wrmsr(MSR_STAR, msr);
337 	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL|PSL_AC);
338 
339 	pmap_set_opt();		/* PSE/4MB pages, etc */
340 	pmap_init_pat();	/* Page Attribute Table */
341 
342 	/* set up CPU registers and state */
343 	cpu_setregs();
344 
345 	/* set up SSE/NX registers */
346 	initializecpu(myid);
347 
348 	/* set up FPU state on the AP */
349 	npxinit();
350 
351 	/* If BSP is in the X2APIC mode, put the AP into the X2APIC mode. */
352 	if (x2apic_enable)
353 		lapic_x2apic_enter(FALSE);
354 
355 	/* disable the APIC, just to be SURE */
356 	LAPIC_WRITE(svr, (LAPIC_READ(svr) & ~APIC_SVR_ENABLE));
357 }
358 
359 /*******************************************************************
360  * local functions and data
361  */
362 
363 /*
364  * Start the SMP system
365  */
366 static void
367 mp_start_aps(void *dummy __unused)
368 {
369 	if (lapic_enable) {
370 		/* start each Application Processor */
371 		start_all_aps(boot_address);
372 	} else {
373 		mp_bsp_simple_setup();
374 	}
375 }
376 SYSINIT(startaps, SI_BOOT2_START_APS, SI_ORDER_FIRST, mp_start_aps, NULL);
377 
378 /*
379  * start each AP in our list
380  */
381 static int
382 start_all_aps(u_int boot_addr)
383 {
384 	vm_offset_t va = boot_address + KERNBASE;
385 	u_int64_t *pt4, *pt3, *pt2;
386 	int	pssize;
387 	int     x, i;
388 	int	shift;
389 	int	smicount;
390 	int	smibest;
391 	int	smilast;
392 	u_char  mpbiosreason;
393 	u_long  mpbioswarmvec;
394 	struct mdglobaldata *gd;
395 	struct privatespace *ps;
396 	size_t ipiq_size;
397 
398 	POSTCODE(START_ALL_APS_POST);
399 
400 	/* install the AP 1st level boot code */
401 	pmap_kenter(va, boot_address);
402 	cpu_invlpg((void *)va);		/* JG XXX */
403 	bcopy(mptramp_start, (void *)va, bootMP_size);
404 
405 	/* Locate the page tables, they'll be below the trampoline */
406 	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
407 	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
408 	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
409 
410 	/* Create the initial 1GB replicated page tables */
411 	for (i = 0; i < 512; i++) {
412 		/* Each slot of the level 4 pages points to the same level 3 page */
413 		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
414 		pt4[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
415 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
416 		    kernel_pmap.pmap_bits[PG_U_IDX];
417 
418 		/* Each slot of the level 3 pages points to the same level 2 page */
419 		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
420 		pt3[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
421 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
422 		    kernel_pmap.pmap_bits[PG_U_IDX];
423 
424 		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
425 		pt2[i] = i * (2 * 1024 * 1024);
426 		pt2[i] |= kernel_pmap.pmap_bits[PG_V_IDX] |
427 		    kernel_pmap.pmap_bits[PG_RW_IDX] |
428 		    kernel_pmap.pmap_bits[PG_PS_IDX] |
429 		    kernel_pmap.pmap_bits[PG_U_IDX];
430 	}
431 
432 	/* save the current value of the warm-start vector */
433 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
434 	outb(CMOS_REG, BIOS_RESET);
435 	mpbiosreason = inb(CMOS_DATA);
436 
437 	/* setup a vector to our boot code */
438 	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
439 	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
440 	outb(CMOS_REG, BIOS_RESET);
441 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
442 
443 	/*
444 	 * If we have a TSC we can figure out the SMI interrupt rate.
445 	 * The SMI does not necessarily use a constant rate.  Spend
446 	 * up to 250ms trying to figure it out.
447 	 */
448 	smibest = 0;
449 	if (cpu_feature & CPUID_TSC) {
450 		set_apic_timer(275000);
451 		smilast = read_apic_timer();
452 		for (x = 0; x < 20 && read_apic_timer(); ++x) {
453 			smicount = smitest();
454 			if (smibest == 0 || smilast - smicount < smibest)
455 				smibest = smilast - smicount;
456 			smilast = smicount;
457 		}
458 		if (smibest > 250000)
459 			smibest = 0;
460 	}
461 	if (smibest)
462 		kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
463 			1000000 / smibest, smibest);
464 
465 	/* start each AP */
466 	for (x = 1; x <= naps; ++x) {
467 		/* This is a bit verbose, it will go away soon.  */
468 
469 		pssize = sizeof(struct privatespace);
470 		ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD,
471 					 KM_CPU(x));
472 		CPU_prvspace[x] = ps;
473 #if 0
474 		kprintf("ps %d %p %d\n", x, ps, pssize);
475 #endif
476 		bzero(ps, pssize);
477 		gd = &ps->mdglobaldata;
478 		gd->mi.gd_prvspace = ps;
479 
480 		/* prime data page for it to use */
481 		mi_gdinit(&gd->mi, x);
482 		cpu_gdinit(gd, x);
483 		ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
484 		gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size,
485 						     VM_SUBSYS_IPIQ, KM_CPU(x));
486 		bzero(gd->mi.gd_ipiq, ipiq_size);
487 
488 		gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid);
489 
490 		/* initialize arc4random. */
491 		arc4_init_pcpu(x);
492 
493 		/* setup a vector to our boot code */
494 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
495 		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
496 		outb(CMOS_REG, BIOS_RESET);
497 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
498 
499 		/*
500 		 * Setup the AP boot stack
501 		 */
502 		bootSTK = &ps->idlestack[UPAGES * PAGE_SIZE - PAGE_SIZE];
503 		bootAP = x;
504 
505 		/* attempt to start the Application Processor */
506 		CHECK_INIT(99);	/* setup checkpoints */
507 		if (!start_ap(gd, boot_addr, smibest)) {
508 			kprintf("\nAP #%d (PHY# %d) failed!\n",
509 				x, CPUID_TO_APICID(x));
510 			CHECK_PRINT("trace");	/* show checkpoints */
511 			/* better panic as the AP may be running loose */
512 			kprintf("panic y/n? [y] ");
513 			cnpoll(TRUE);
514 			if (cngetc() != 'n')
515 				panic("bye-bye");
516 			cnpoll(FALSE);
517 		}
518 		CHECK_PRINT("trace");		/* show checkpoints */
519 	}
520 
521 	/* set ncpus to 1 + highest logical cpu.  Not all may have come up */
522 	ncpus = x;
523 
524 	for (shift = 0; (1 << shift) <= ncpus; ++shift)
525 		;
526 	--shift;
527 
528 	/* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
529 	if ((1 << shift) < ncpus)
530 		++shift;
531 	ncpus_fit = 1 << shift;
532 	ncpus_fit_mask = ncpus_fit - 1;
533 
534 	/* build our map of 'other' CPUs */
535 	mycpu->gd_other_cpus = smp_startup_mask;
536 	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
537 
538 	gd = (struct mdglobaldata *)mycpu;
539 	gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid);
540 
541 	ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
542 	mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size,
543 					     VM_SUBSYS_IPIQ, KM_CPU(0));
544 	bzero(mycpu->gd_ipiq, ipiq_size);
545 
546 	/* initialize arc4random. */
547 	arc4_init_pcpu(0);
548 
549 	/* restore the warmstart vector */
550 	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
551 	outb(CMOS_REG, BIOS_RESET);
552 	outb(CMOS_DATA, mpbiosreason);
553 
554 	/*
555 	 * NOTE!  The idlestack for the BSP was setup by locore.  Finish
556 	 * up, clean out the P==V mapping we did earlier.
557 	 */
558 	pmap_set_opt();
559 
560 	/*
561 	 * Wait all APs to finish initializing LAPIC
562 	 */
563 	if (bootverbose)
564 		kprintf("SMP: Waiting APs LAPIC initialization\n");
565 	if (cpu_feature & CPUID_TSC)
566 		tsc0_offset = rdtsc();
567 	tsc_offsets[0] = 0;
568 	mp_finish_lapic = 1;
569 	rel_mplock();
570 
571 	while (CPUMASK_CMPMASKNEQ(smp_lapic_mask, smp_startup_mask)) {
572 		cpu_pause();
573 		cpu_lfence();
574 		if (cpu_feature & CPUID_TSC)
575 			tsc0_offset = rdtsc();
576 	}
577 	while (try_mplock() == 0) {
578 		cpu_pause();
579 		cpu_lfence();
580 	}
581 
582 	/* number of APs actually started */
583 	return ncpus - 1;
584 }
585 
586 
587 /*
588  * load the 1st level AP boot code into base memory.
589  */
590 
591 /* targets for relocation */
592 extern void bigJump(void);
593 extern void bootCodeSeg(void);
594 extern void bootDataSeg(void);
595 extern void MPentry(void);
596 extern u_int MP_GDT;
597 extern u_int mp_gdtbase;
598 
599 #if 0
600 
601 static void
602 install_ap_tramp(u_int boot_addr)
603 {
604 	int     x;
605 	int     size = *(int *) ((u_long) & bootMP_size);
606 	u_char *src = (u_char *) ((u_long) bootMP);
607 	u_char *dst = (u_char *) boot_addr + KERNBASE;
608 	u_int   boot_base = (u_int) bootMP;
609 	u_int8_t *dst8;
610 	u_int16_t *dst16;
611 	u_int32_t *dst32;
612 
613 	POSTCODE(INSTALL_AP_TRAMP_POST);
614 
615 	for (x = 0; x < size; ++x)
616 		*dst++ = *src++;
617 
618 	/*
619 	 * modify addresses in code we just moved to basemem. unfortunately we
620 	 * need fairly detailed info about mpboot.s for this to work.  changes
621 	 * to mpboot.s might require changes here.
622 	 */
623 
624 	/* boot code is located in KERNEL space */
625 	dst = (u_char *) boot_addr + KERNBASE;
626 
627 	/* modify the lgdt arg */
628 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
629 	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
630 
631 	/* modify the ljmp target for MPentry() */
632 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
633 	*dst32 = ((u_int) MPentry - KERNBASE);
634 
635 	/* modify the target for boot code segment */
636 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
637 	dst8 = (u_int8_t *) (dst16 + 1);
638 	*dst16 = (u_int) boot_addr & 0xffff;
639 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
640 
641 	/* modify the target for boot data segment */
642 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
643 	dst8 = (u_int8_t *) (dst16 + 1);
644 	*dst16 = (u_int) boot_addr & 0xffff;
645 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
646 }
647 
648 #endif
649 
650 /*
651  * This function starts the AP (application processor) identified
652  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
653  * to accomplish this.  This is necessary because of the nuances
654  * of the different hardware we might encounter.  It ain't pretty,
655  * but it seems to work.
656  *
657  * NOTE: eventually an AP gets to ap_init(), which is called just
658  * before the AP goes into the LWKT scheduler's idle loop.
659  */
660 static int
661 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
662 {
663 	int     physical_cpu;
664 	int     vector;
665 
666 	POSTCODE(START_AP_POST);
667 
668 	/* get the PHYSICAL APIC ID# */
669 	physical_cpu = CPUID_TO_APICID(gd->mi.gd_cpuid);
670 
671 	/* calculate the vector */
672 	vector = (boot_addr >> 12) & 0xff;
673 
674 	/* We don't want anything interfering */
675 	cpu_disable_intr();
676 
677 	/* Make sure the target cpu sees everything */
678 	wbinvd();
679 
680 	/*
681 	 * Try to detect when a SMI has occurred, wait up to 200ms.
682 	 *
683 	 * If a SMI occurs during an AP reset but before we issue
684 	 * the STARTUP command, the AP may brick.  To work around
685 	 * this problem we hold off doing the AP startup until
686 	 * after we have detected the SMI.  Hopefully another SMI
687 	 * will not occur before we finish the AP startup.
688 	 *
689 	 * Retries don't seem to help.  SMIs have a window of opportunity
690 	 * and if USB->legacy keyboard emulation is enabled in the BIOS
691 	 * the interrupt rate can be quite high.
692 	 *
693 	 * NOTE: Don't worry about the L1 cache load, it might bloat
694 	 *	 ldelta a little but ndelta will be so huge when the SMI
695 	 *	 occurs the detection logic will still work fine.
696 	 */
697 	if (smibest) {
698 		set_apic_timer(200000);
699 		smitest();
700 	}
701 
702 	/*
703 	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
704 	 * and running the target CPU. OR this INIT IPI might be latched (P5
705 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
706 	 * ignored.
707 	 *
708 	 * see apic/apicreg.h for icr bit definitions.
709 	 *
710 	 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
711 	 */
712 
713 	/*
714 	 * Do an INIT IPI: assert RESET
715 	 *
716 	 * Use edge triggered mode to assert INIT
717 	 */
718 	lapic_seticr_sync(physical_cpu,
719 	    APIC_DESTMODE_PHY |
720 	    APIC_DEST_DESTFLD |
721 	    APIC_TRIGMOD_EDGE |
722 	    APIC_LEVEL_ASSERT |
723 	    APIC_DELMODE_INIT);
724 
725 	/*
726 	 * The spec calls for a 10ms delay but we may have to use a
727 	 * MUCH lower delay to avoid bricking an AP due to a fast SMI
728 	 * interrupt.  We have other loops here too and dividing by 2
729 	 * doesn't seem to be enough even after subtracting 350us,
730 	 * so we divide by 4.
731 	 *
732 	 * Our minimum delay is 150uS, maximum is 10ms.  If no SMI
733 	 * interrupt was detected we use the full 10ms.
734 	 */
735 	if (smibest == 0)
736 		u_sleep(10000);
737 	else if (smibest < 150 * 4 + 350)
738 		u_sleep(150);
739 	else if ((smibest - 350) / 4 < 10000)
740 		u_sleep((smibest - 350) / 4);
741 	else
742 		u_sleep(10000);
743 
744 	/*
745 	 * Do an INIT IPI: deassert RESET
746 	 *
747 	 * Use level triggered mode to deassert.  It is unclear
748 	 * why we need to do this.
749 	 */
750 	lapic_seticr_sync(physical_cpu,
751 	    APIC_DESTMODE_PHY |
752 	    APIC_DEST_DESTFLD |
753 	    APIC_TRIGMOD_LEVEL |
754 	    APIC_LEVEL_DEASSERT |
755 	    APIC_DELMODE_INIT);
756 	u_sleep(150);				/* wait 150us */
757 
758 	/*
759 	 * Next we do a STARTUP IPI: the previous INIT IPI might still be
760 	 * latched, (P5 bug) this 1st STARTUP would then terminate
761 	 * immediately, and the previously started INIT IPI would continue. OR
762 	 * the previous INIT IPI has already run. and this STARTUP IPI will
763 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
764 	 * will run.
765 	 *
766 	 * XXX set APIC_LEVEL_ASSERT
767 	 */
768 	lapic_seticr_sync(physical_cpu,
769 	    APIC_DESTMODE_PHY |
770 	    APIC_DEST_DESTFLD |
771 	    APIC_DELMODE_STARTUP |
772 	    vector);
773 	u_sleep(200);		/* wait ~200uS */
774 
775 	/*
776 	 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
777 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
778 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
779 	 * recognized after hardware RESET or INIT IPI.
780 	 *
781 	 * XXX set APIC_LEVEL_ASSERT
782 	 */
783 	lapic_seticr_sync(physical_cpu,
784 	    APIC_DESTMODE_PHY |
785 	    APIC_DEST_DESTFLD |
786 	    APIC_DELMODE_STARTUP |
787 	    vector);
788 
789 	/* Resume normal operation */
790 	cpu_enable_intr();
791 
792 	/* wait for it to start, see ap_init() */
793 	set_apic_timer(5000000);/* == 5 seconds */
794 	while (read_apic_timer()) {
795 		if (CPUMASK_TESTBIT(smp_startup_mask, gd->mi.gd_cpuid))
796 			return 1;	/* return SUCCESS */
797 	}
798 
799 	return 0;		/* return FAILURE */
800 }
801 
802 static
803 int
804 smitest(void)
805 {
806 	int64_t	ltsc;
807 	int64_t	ntsc;
808 	int64_t	ldelta;
809 	int64_t	ndelta;
810 	int count;
811 
812 	ldelta = 0;
813 	ndelta = 0;
814 	while (read_apic_timer()) {
815 		ltsc = rdtsc();
816 		for (count = 0; count < 100; ++count)
817 			ntsc = rdtsc();	/* force loop to occur */
818 		if (ldelta) {
819 			ndelta = ntsc - ltsc;
820 			if (ldelta > ndelta)
821 				ldelta = ndelta;
822 			if (ndelta > ldelta * 2)
823 				break;
824 		} else {
825 			ldelta = ntsc - ltsc;
826 		}
827 	}
828 	return(read_apic_timer());
829 }
830 
831 /*
832  * Synchronously flush the TLB on all other CPU's.  The current cpu's
833  * TLB is not flushed.  If the caller wishes to flush the current cpu's
834  * TLB the caller must call cpu_invltlb() in addition to smp_invltlb().
835  *
836  * This routine may be called concurrently from multiple cpus.  When this
837  * happens, smp_invltlb() can wind up sticking around in the confirmation
838  * while() loop at the end as additional cpus are added to the global
839  * cpumask, until they are acknowledged by another IPI.
840  *
841  * NOTE: If for some reason we were unable to start all cpus we cannot
842  *	 safely use broadcast IPIs.
843  */
844 
845 cpumask_t smp_smurf_mask;
846 static cpumask_t smp_invltlb_mask;
847 #define LOOPRECOVER
848 #define LOOPMASK_IN
849 #ifdef LOOPMASK_IN
850 cpumask_t smp_in_mask;
851 #endif
852 cpumask_t smp_invmask;
853 extern cpumask_t smp_idleinvl_mask;
854 extern cpumask_t smp_idleinvl_reqs;
855 
856 /*
857  * Atomically OR bits in *mask to smp_smurf_mask.  Adjust *mask to remove
858  * bits that do not need to be IPId.  These bits are still part of the command,
859  * but the target cpus have already been signalled and do not need to be
860  * sigalled again.
861  */
862 #include <sys/spinlock.h>
863 #include <sys/spinlock2.h>
864 
865 static __noinline
866 void
867 smp_smurf_fetchset(cpumask_t *mask)
868 {
869 	cpumask_t omask;
870 	int i;
871 	__uint64_t obits;
872 	__uint64_t nbits;
873 
874 	i = 0;
875 	while (i < CPUMASK_ELEMENTS) {
876 		obits = smp_smurf_mask.ary[i];
877 		cpu_ccfence();
878 		nbits = obits | mask->ary[i];
879 		if (atomic_cmpset_long(&smp_smurf_mask.ary[i], obits, nbits)) {
880 			omask.ary[i] = obits;
881 			++i;
882 		}
883 	}
884 	CPUMASK_NANDMASK(*mask, omask);
885 }
886 
887 /*
888  * This is a mechanism which guarantees that cpu_invltlb() will be executed
889  * on idle cpus without having to signal or wake them up.  The invltlb will be
890  * executed when they wake up, prior to any scheduling or interrupt thread.
891  *
892  * (*mask) is modified to remove the cpus we successfully negotiate this
893  * function with.  This function may only be used with semi-synchronous
894  * commands (typically invltlb's or semi-synchronous invalidations which
895  * are usually associated only with kernel memory).
896  */
897 void
898 smp_smurf_idleinvlclr(cpumask_t *mask)
899 {
900 	if (optimized_invltlb) {
901 		ATOMIC_CPUMASK_ORMASK(smp_idleinvl_reqs, *mask);
902 		/* cpu_lfence() not needed */
903 		CPUMASK_NANDMASK(*mask, smp_idleinvl_mask);
904 	}
905 }
906 
907 /*
908  * Issue cpu_invltlb() across all cpus except the current cpu.
909  *
910  * This function will arrange to avoid idle cpus, but still gurantee that
911  * invltlb is run on them when they wake up prior to any scheduling or
912  * nominal interrupt.
913  */
914 void
915 smp_invltlb(void)
916 {
917 	struct mdglobaldata *md = mdcpu;
918 	cpumask_t mask;
919 	unsigned long rflags;
920 #ifdef LOOPRECOVER
921 	tsc_uclock_t tsc_base = rdtsc();
922 	int repeats = 0;
923 #endif
924 
925 	if (report_invltlb_src > 0) {
926 		if (--report_invltlb_src <= 0)
927 			print_backtrace(8);
928 	}
929 
930 	/*
931 	 * Disallow normal interrupts, set all active cpus except our own
932 	 * in the global smp_invltlb_mask.
933 	 */
934 	++md->mi.gd_cnt.v_smpinvltlb;
935 	crit_enter_gd(&md->mi);
936 
937 	/*
938 	 * Bits we want to set in smp_invltlb_mask.  We do not want to signal
939 	 * our own cpu.  Also try to remove bits associated with idle cpus
940 	 * that we can flag for auto-invltlb.
941 	 */
942 	mask = smp_active_mask;
943 	CPUMASK_NANDBIT(mask, md->mi.gd_cpuid);
944 	smp_smurf_idleinvlclr(&mask);
945 
946 	rflags = read_rflags();
947 	cpu_disable_intr();
948 	ATOMIC_CPUMASK_ORMASK(smp_invltlb_mask, mask);
949 
950 	/*
951 	 * IPI non-idle cpus represented by mask.  The omask calculation
952 	 * removes cpus from the mask which already have a Xinvltlb IPI
953 	 * pending (avoid double-queueing the IPI).
954 	 *
955 	 * We must disable real interrupts when setting the smurf flags or
956 	 * we might race a XINVLTLB before we manage to send the ipi's for
957 	 * the bits we set.
958 	 *
959 	 * NOTE: We are not signalling ourselves, mask already does NOT
960 	 * include our own cpu.
961 	 */
962 	smp_smurf_fetchset(&mask);
963 
964 	/*
965 	 * Issue the IPI.  Note that the XINVLTLB IPI runs regardless of
966 	 * the critical section count on the target cpus.
967 	 */
968 	CPUMASK_ORMASK(mask, md->mi.gd_cpumask);
969 	if (all_but_self_ipi_enable &&
970 	    (all_but_self_ipi_enable >= 2 ||
971 	     CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) {
972 		all_but_self_ipi(XINVLTLB_OFFSET);
973 	} else {
974 		CPUMASK_NANDMASK(mask, md->mi.gd_cpumask);
975 		selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
976 	}
977 
978 	/*
979 	 * Wait for acknowledgement by all cpus.  smp_inval_intr() will
980 	 * temporarily enable interrupts to avoid deadlocking the lapic,
981 	 * and will also handle running cpu_invltlb() and remote invlpg
982 	 * command son our cpu if some other cpu requests it of us.
983 	 *
984 	 * WARNING! I originally tried to implement this as a hard loop
985 	 *	    checking only smp_invltlb_mask (and issuing a local
986 	 *	    cpu_invltlb() if requested), with interrupts enabled
987 	 *	    and without calling smp_inval_intr().  This DID NOT WORK.
988 	 *	    It resulted in weird races where smurf bits would get
989 	 *	    cleared without any action being taken.
990 	 */
991 	smp_inval_intr();
992 	CPUMASK_ASSZERO(mask);
993 	while (CPUMASK_CMPMASKNEQ(smp_invltlb_mask, mask)) {
994 		smp_inval_intr();
995 		cpu_pause();
996 #ifdef LOOPRECOVER
997 		if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
998 			/*
999 			 * cpuid 	- cpu doing the waiting
1000 			 * invltlb_mask - IPI in progress
1001 			 */
1002 			kprintf("smp_invltlb %d: waited too long inv=%08jx "
1003 				"smurf=%08jx "
1004 #ifdef LOOPMASK_IN
1005 				"in=%08jx "
1006 #endif
1007 				"idle=%08jx/%08jx\n",
1008 				md->mi.gd_cpuid,
1009 				smp_invltlb_mask.ary[0],
1010 				smp_smurf_mask.ary[0],
1011 #ifdef LOOPMASK_IN
1012 				smp_in_mask.ary[0],
1013 #endif
1014 				smp_idleinvl_mask.ary[0],
1015 				smp_idleinvl_reqs.ary[0]);
1016 			mdcpu->gd_xinvaltlb = 0;
1017 			ATOMIC_CPUMASK_NANDMASK(smp_smurf_mask,
1018 						smp_invltlb_mask);
1019 			smp_invlpg(&smp_active_mask);
1020 			tsc_base = rdtsc();
1021 			if (++repeats > 10) {
1022 				kprintf("smp_invltlb: giving up\n");
1023 				CPUMASK_ASSZERO(smp_invltlb_mask);
1024 			}
1025 		}
1026 #endif
1027 	}
1028 	write_rflags(rflags);
1029 	crit_exit_gd(&md->mi);
1030 }
1031 
1032 /*
1033  * Called from a critical section with interrupts hard-disabled.
1034  * This function issues an XINVLTLB IPI and then executes any pending
1035  * command on the current cpu before returning.
1036  */
1037 void
1038 smp_invlpg(cpumask_t *cmdmask)
1039 {
1040 	struct mdglobaldata *md = mdcpu;
1041 	cpumask_t mask;
1042 
1043 	if (report_invlpg_src > 0) {
1044 		if (--report_invlpg_src <= 0)
1045 			print_backtrace(8);
1046 	}
1047 
1048 	/*
1049 	 * Disallow normal interrupts, set all active cpus in the pmap,
1050 	 * plus our own for completion processing (it might or might not
1051 	 * be part of the set).
1052 	 */
1053 	mask = smp_active_mask;
1054 	CPUMASK_ANDMASK(mask, *cmdmask);
1055 	CPUMASK_ORMASK(mask, md->mi.gd_cpumask);
1056 
1057 	/*
1058 	 * Avoid double-queuing IPIs, which can deadlock us.  We must disable
1059 	 * real interrupts when setting the smurf flags or we might race a
1060 	 * XINVLTLB before we manage to send the ipi's for the bits we set.
1061 	 *
1062 	 * NOTE: We might be including our own cpu in the smurf mask.
1063 	 */
1064 	smp_smurf_fetchset(&mask);
1065 
1066 	/*
1067 	 * Issue the IPI.  Note that the XINVLTLB IPI runs regardless of
1068 	 * the critical section count on the target cpus.
1069 	 *
1070 	 * We do not include our own cpu when issuing the IPI.
1071 	 */
1072 	if (all_but_self_ipi_enable &&
1073 	    (all_but_self_ipi_enable >= 2 ||
1074 	     CPUMASK_CMPMASKEQ(smp_startup_mask, mask))) {
1075 		all_but_self_ipi(XINVLTLB_OFFSET);
1076 	} else {
1077 		CPUMASK_NANDMASK(mask, md->mi.gd_cpumask);
1078 		selected_apic_ipi(mask, XINVLTLB_OFFSET, APIC_DELMODE_FIXED);
1079 	}
1080 
1081 	/*
1082 	 * This will synchronously wait for our command to complete,
1083 	 * as well as process commands from other cpus.  It also handles
1084 	 * reentrancy.
1085 	 *
1086 	 * (interrupts are disabled and we are in a critical section here)
1087 	 */
1088 	smp_inval_intr();
1089 }
1090 
1091 void
1092 smp_sniff(void)
1093 {
1094 	globaldata_t gd = mycpu;
1095 	int dummy;
1096 	register_t rflags;
1097 
1098 	/*
1099 	 * Ignore all_but_self_ipi_enable here and just use it.
1100 	 */
1101 	rflags = read_rflags();
1102 	cpu_disable_intr();
1103 	all_but_self_ipi(XSNIFF_OFFSET);
1104 	gd->gd_sample_pc = smp_sniff;
1105 	gd->gd_sample_sp = &dummy;
1106 	write_rflags(rflags);
1107 }
1108 
1109 void
1110 cpu_sniff(int dcpu)
1111 {
1112 	globaldata_t rgd = globaldata_find(dcpu);
1113 	register_t rflags;
1114 	int dummy;
1115 
1116 	/*
1117 	 * Ignore all_but_self_ipi_enable here and just use it.
1118 	 */
1119 	rflags = read_rflags();
1120 	cpu_disable_intr();
1121 	single_apic_ipi(dcpu, XSNIFF_OFFSET, APIC_DELMODE_FIXED);
1122 	rgd->gd_sample_pc = cpu_sniff;
1123 	rgd->gd_sample_sp = &dummy;
1124 	write_rflags(rflags);
1125 }
1126 
1127 /*
1128  * Called from Xinvltlb assembly with interrupts hard-disabled and in a
1129  * critical section.  gd_intr_nesting_level may or may not be bumped
1130  * depending on entry.
1131  *
1132  * THIS CODE IS INTENDED TO EXPLICITLY IGNORE THE CRITICAL SECTION COUNT.
1133  * THAT IS, THE INTERRUPT IS INTENDED TO FUNCTION EVEN WHEN MAINLINE CODE
1134  * IS IN A CRITICAL SECTION.
1135  */
1136 void
1137 smp_inval_intr(void)
1138 {
1139 	struct mdglobaldata *md = mdcpu;
1140 	cpumask_t cpumask;
1141 #ifdef LOOPRECOVER
1142 	tsc_uclock_t tsc_base = rdtsc();
1143 #endif
1144 
1145 #if 0
1146 	/*
1147 	 * The idle code is in a critical section, but that doesn't stop
1148 	 * Xinvltlb from executing, so deal with the race which can occur
1149 	 * in that situation.  Otherwise r-m-w operations by pmap_inval_intr()
1150 	 * may have problems.
1151 	 */
1152 	if (ATOMIC_CPUMASK_TESTANDCLR(smp_idleinvl_reqs, md->mi.gd_cpuid)) {
1153 		ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask, md->mi.gd_cpuid);
1154 		cpu_invltlb();
1155 		cpu_mfence();
1156 	}
1157 #endif
1158 
1159 	/*
1160 	 * This is a real mess.  I'd like to just leave interrupts disabled
1161 	 * but it can cause the lapic to deadlock if too many interrupts queue
1162 	 * to it, due to the idiotic design of the lapic.  So instead we have
1163 	 * to enter a critical section so normal interrupts are made pending
1164 	 * and track whether this one was reentered.
1165 	 */
1166 	if (md->gd_xinvaltlb) {		/* reentrant on cpu */
1167 		md->gd_xinvaltlb = 2;
1168 		return;
1169 	}
1170 	md->gd_xinvaltlb = 1;
1171 
1172 	/*
1173 	 * Check only those cpus with active Xinvl* commands pending.
1174 	 *
1175 	 * We are going to enable interrupts so make sure we are in a
1176 	 * critical section.  This is necessary to avoid deadlocking
1177 	 * the lapic and to ensure that we execute our commands prior to
1178 	 * any nominal interrupt or preemption.
1179 	 *
1180 	 * WARNING! It is very important that we only clear out but in
1181 	 *	    smp_smurf_mask once for each interrupt we take.  In
1182 	 *	    this case, we clear it on initial entry and only loop
1183 	 *	    on the reentrancy detect (caused by another interrupt).
1184 	 */
1185 	cpumask = smp_invmask;
1186 #ifdef LOOPMASK_IN
1187 	ATOMIC_CPUMASK_ORBIT(smp_in_mask, md->mi.gd_cpuid);
1188 #endif
1189 loop:
1190 	cpu_enable_intr();
1191 	ATOMIC_CPUMASK_NANDBIT(smp_smurf_mask, md->mi.gd_cpuid);
1192 
1193 	/*
1194 	 * Specific page request(s), and we can't return until all bits
1195 	 * are zero.
1196 	 */
1197 	for (;;) {
1198 		int toolong;
1199 
1200 		/*
1201 		 * Also execute any pending full invalidation request in
1202 		 * this loop.
1203 		 */
1204 		if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) {
1205 			ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask,
1206 					       md->mi.gd_cpuid);
1207 			cpu_invltlb();
1208 			cpu_mfence();
1209 		}
1210 
1211 #ifdef LOOPRECOVER
1212 		if (tsc_frequency && rdtsc() - tsc_base > tsc_frequency) {
1213 			/*
1214 			 * cpuid 	- cpu doing the waiting
1215 			 * invmask	- IPI in progress
1216 			 * invltlb_mask - which ones are TLB invalidations?
1217 			 */
1218 			kprintf("smp_inval_intr %d inv=%08jx tlbm=%08jx "
1219 				"smurf=%08jx "
1220 #ifdef LOOPMASK_IN
1221 				"in=%08jx "
1222 #endif
1223 				"idle=%08jx/%08jx\n",
1224 				md->mi.gd_cpuid,
1225 				smp_invmask.ary[0],
1226 				smp_invltlb_mask.ary[0],
1227 				smp_smurf_mask.ary[0],
1228 #ifdef LOOPMASK_IN
1229 				smp_in_mask.ary[0],
1230 #endif
1231 				smp_idleinvl_mask.ary[0],
1232 				smp_idleinvl_reqs.ary[0]);
1233 			tsc_base = rdtsc();
1234 			toolong = 1;
1235 		} else {
1236 			toolong = 0;
1237 		}
1238 #else
1239 		toolong = 0;
1240 #endif
1241 
1242 		/*
1243 		 * We can only add bits to the cpumask to test during the
1244 		 * loop because the smp_invmask bit is cleared once the
1245 		 * originator completes the command (the targets may still
1246 		 * be cycling their own completions in this loop, afterwords).
1247 		 *
1248 		 * lfence required prior to all tests as this Xinvltlb
1249 		 * interrupt could race the originator (already be in progress
1250 		 * wnen the originator decides to issue, due to an issue by
1251 		 * another cpu).
1252 		 */
1253 		cpu_lfence();
1254 		CPUMASK_ORMASK(cpumask, smp_invmask);
1255 		/*cpumask = smp_active_mask;*/	/* XXX */
1256 		cpu_lfence();
1257 
1258 		if (pmap_inval_intr(&cpumask, toolong) == 0) {
1259 			/*
1260 			 * Clear our smurf mask to allow new IPIs, but deal
1261 			 * with potential races.
1262 			 */
1263 			break;
1264 		}
1265 
1266 		/*
1267 		 * Test if someone sent us another invalidation IPI, break
1268 		 * out so we can take it to avoid deadlocking the lapic
1269 		 * interrupt queue (? stupid intel, amd).
1270 		 */
1271 		if (md->gd_xinvaltlb == 2)
1272 			break;
1273 		/*
1274 		if (CPUMASK_TESTBIT(smp_smurf_mask, md->mi.gd_cpuid))
1275 			break;
1276 		*/
1277 	}
1278 
1279 	/*
1280 	 * Full invalidation request
1281 	 */
1282 	if (CPUMASK_TESTBIT(smp_invltlb_mask, md->mi.gd_cpuid)) {
1283 		ATOMIC_CPUMASK_NANDBIT(smp_invltlb_mask,
1284 				       md->mi.gd_cpuid);
1285 		cpu_invltlb();
1286 		cpu_mfence();
1287 	}
1288 
1289 	/*
1290 	 * Check to see if another Xinvltlb interrupt occurred and loop up
1291 	 * if it did.
1292 	 */
1293 	cpu_disable_intr();
1294 	if (md->gd_xinvaltlb == 2) {
1295 		md->gd_xinvaltlb = 1;
1296 		goto loop;
1297 	}
1298 #ifdef LOOPMASK_IN
1299 	ATOMIC_CPUMASK_NANDBIT(smp_in_mask, md->mi.gd_cpuid);
1300 #endif
1301 	md->gd_xinvaltlb = 0;
1302 }
1303 
1304 void
1305 cpu_wbinvd_on_all_cpus_callback(void *arg)
1306 {
1307 	wbinvd();
1308 }
1309 
1310 /*
1311  * When called the executing CPU will send an IPI to all other CPUs
1312  * requesting that they halt execution.
1313  *
1314  * Usually (but not necessarily) called with 'other_cpus' as its arg.
1315  *
1316  *  - Signals all CPUs in map to stop.
1317  *  - Waits for each to stop.
1318  *
1319  * Returns:
1320  *  -1: error
1321  *   0: NA
1322  *   1: ok
1323  *
1324  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
1325  *            from executing at same time.
1326  */
1327 int
1328 stop_cpus(cpumask_t map)
1329 {
1330 	cpumask_t mask;
1331 
1332 	CPUMASK_ANDMASK(map, smp_active_mask);
1333 
1334 	/* send the Xcpustop IPI to all CPUs in map */
1335 	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
1336 
1337 	do {
1338 		mask = stopped_cpus;
1339 		CPUMASK_ANDMASK(mask, map);
1340 		/* spin */
1341 	} while (CPUMASK_CMPMASKNEQ(mask, map));
1342 
1343 	return 1;
1344 }
1345 
1346 
1347 /*
1348  * Called by a CPU to restart stopped CPUs.
1349  *
1350  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
1351  *
1352  *  - Signals all CPUs in map to restart.
1353  *  - Waits for each to restart.
1354  *
1355  * Returns:
1356  *  -1: error
1357  *   0: NA
1358  *   1: ok
1359  */
1360 int
1361 restart_cpus(cpumask_t map)
1362 {
1363 	cpumask_t mask;
1364 
1365 	/* signal other cpus to restart */
1366 	mask = map;
1367 	CPUMASK_ANDMASK(mask, smp_active_mask);
1368 	cpu_ccfence();
1369 	started_cpus = mask;
1370 	cpu_ccfence();
1371 
1372 	/* wait for each to clear its bit */
1373 	while (CPUMASK_CMPMASKNEQ(stopped_cpus, map))
1374 		cpu_pause();
1375 
1376 	return 1;
1377 }
1378 
1379 /*
1380  * This is called once the mpboot code has gotten us properly relocated
1381  * and the MMU turned on, etc.   ap_init() is actually the idle thread,
1382  * and when it returns the scheduler will call the real cpu_idle() main
1383  * loop for the idlethread.  Interrupts are disabled on entry and should
1384  * remain disabled at return.
1385  */
1386 void
1387 ap_init(void)
1388 {
1389 	int	cpu_id;
1390 
1391 	/*
1392 	 * Adjust smp_startup_mask to signal the BSP that we have started
1393 	 * up successfully.  Note that we do not yet hold the BGL.  The BSP
1394 	 * is waiting for our signal.
1395 	 *
1396 	 * We can't set our bit in smp_active_mask yet because we are holding
1397 	 * interrupts physically disabled and remote cpus could deadlock
1398 	 * trying to send us an IPI.
1399 	 */
1400 	ATOMIC_CPUMASK_ORBIT(smp_startup_mask, mycpu->gd_cpuid);
1401 	cpu_mfence();
1402 
1403 	/*
1404 	 * Interlock for LAPIC initialization.  Wait until mp_finish_lapic is
1405 	 * non-zero, then get the MP lock.
1406 	 *
1407 	 * Note: We are in a critical section.
1408 	 *
1409 	 * Note: we are the idle thread, we can only spin.
1410 	 *
1411 	 * Note: The load fence is memory volatile and prevents the compiler
1412 	 * from improperly caching mp_finish_lapic, and the cpu from improperly
1413 	 * caching it.
1414 	 */
1415 	while (mp_finish_lapic == 0) {
1416 		cpu_pause();
1417 		cpu_lfence();
1418 	}
1419 #if 0
1420 	while (try_mplock() == 0) {
1421 		cpu_pause();
1422 		cpu_lfence();
1423 	}
1424 #endif
1425 
1426 	if (cpu_feature & CPUID_TSC) {
1427 		/*
1428 		 * The BSP is constantly updating tsc0_offset, figure out
1429 		 * the relative difference to synchronize ktrdump.
1430 		 */
1431 		tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
1432 	}
1433 
1434 	/* BSP may have changed PTD while we're waiting for the lock */
1435 	cpu_invltlb();
1436 
1437 	/* Build our map of 'other' CPUs. */
1438 	mycpu->gd_other_cpus = smp_startup_mask;
1439 	ATOMIC_CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1440 
1441 	/* A quick check from sanity claus */
1442 	cpu_id = APICID_TO_CPUID(LAPIC_READID);
1443 	if (mycpu->gd_cpuid != cpu_id) {
1444 		kprintf("SMP: assigned cpuid = %d\n", mycpu->gd_cpuid);
1445 		kprintf("SMP: actual cpuid = %d lapicid %d\n",
1446 			cpu_id, LAPIC_READID);
1447 #if 0 /* JGXXX */
1448 		kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
1449 #endif
1450 		panic("cpuid mismatch! boom!!");
1451 	}
1452 
1453 	/* Initialize AP's local APIC for irq's */
1454 	lapic_init(FALSE);
1455 
1456 	/* LAPIC initialization is done */
1457 	ATOMIC_CPUMASK_ORBIT(smp_lapic_mask, mycpu->gd_cpuid);
1458 	cpu_mfence();
1459 
1460 #if 0
1461 	/* Let BSP move onto the next initialization stage */
1462 	rel_mplock();
1463 #endif
1464 
1465 	/*
1466 	 * Interlock for finalization.  Wait until mp_finish is non-zero,
1467 	 * then get the MP lock.
1468 	 *
1469 	 * Note: We are in a critical section.
1470 	 *
1471 	 * Note: we are the idle thread, we can only spin.
1472 	 *
1473 	 * Note: The load fence is memory volatile and prevents the compiler
1474 	 * from improperly caching mp_finish, and the cpu from improperly
1475 	 * caching it.
1476 	 */
1477 	while (mp_finish == 0) {
1478 		cpu_pause();
1479 		cpu_lfence();
1480 	}
1481 
1482 	/* BSP may have changed PTD while we're waiting for the lock */
1483 	cpu_invltlb();
1484 
1485 	/* Set memory range attributes for this CPU to match the BSP */
1486 	mem_range_AP_init();
1487 
1488 	/*
1489 	 * Once we go active we must process any IPIQ messages that may
1490 	 * have been queued, because no actual IPI will occur until we
1491 	 * set our bit in the smp_active_mask.  If we don't the IPI
1492 	 * message interlock could be left set which would also prevent
1493 	 * further IPIs.
1494 	 *
1495 	 * The idle loop doesn't expect the BGL to be held and while
1496 	 * lwkt_switch() normally cleans things up this is a special case
1497 	 * because we returning almost directly into the idle loop.
1498 	 *
1499 	 * The idle thread is never placed on the runq, make sure
1500 	 * nothing we've done put it there.
1501 	 */
1502 
1503 	/*
1504 	 * Hold a critical section and allow real interrupts to occur.  Zero
1505 	 * any spurious interrupts which have accumulated, then set our
1506 	 * smp_active_mask indicating that we are fully operational.
1507 	 */
1508 	crit_enter();
1509 	__asm __volatile("sti; pause; pause"::);
1510 	bzero(mdcpu->gd_ipending, sizeof(mdcpu->gd_ipending));
1511 	ATOMIC_CPUMASK_ORBIT(smp_active_mask, mycpu->gd_cpuid);
1512 
1513 	/*
1514 	 * Wait until all cpus have set their smp_active_mask and have fully
1515 	 * operational interrupts before proceeding.
1516 	 *
1517 	 * We need a final cpu_invltlb() because we would not have received
1518 	 * any until we set our bit in smp_active_mask.
1519 	 */
1520 	while (mp_finish == 1) {
1521 		cpu_pause();
1522 		cpu_lfence();
1523 	}
1524 	cpu_invltlb();
1525 
1526 	/*
1527 	 * Initialize per-cpu clocks and do other per-cpu initialization.
1528 	 * At this point code is expected to be able to use the full kernel
1529 	 * API.
1530 	 */
1531 	initclocks_pcpu();	/* clock interrupts (via IPIs) */
1532 
1533 	/*
1534 	 * Since we may have cleaned up the interrupt triggers, manually
1535 	 * process any pending IPIs before exiting our critical section.
1536 	 * Once the critical section has exited, normal interrupt processing
1537 	 * may occur.
1538 	 */
1539 	atomic_swap_int(&mycpu->gd_npoll, 0);
1540 	lwkt_process_ipiq();
1541 	crit_exit();
1542 
1543 	/*
1544 	 * Final final, allow the waiting BSP to resume the boot process,
1545 	 * return 'into' the idle thread bootstrap.
1546 	 */
1547 	ATOMIC_CPUMASK_ORBIT(smp_finalize_mask, mycpu->gd_cpuid);
1548 	KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
1549 }
1550 
1551 /*
1552  * Get SMP fully working before we start initializing devices.
1553  */
1554 static
1555 void
1556 ap_finish(void)
1557 {
1558 	if (bootverbose)
1559 		kprintf("Finish MP startup\n");
1560 	rel_mplock();
1561 
1562 	/*
1563 	 * Wait for the active mask to complete, after which all cpus will
1564 	 * be accepting interrupts.
1565 	 */
1566 	mp_finish = 1;
1567 	while (CPUMASK_CMPMASKNEQ(smp_active_mask, smp_startup_mask)) {
1568 		cpu_pause();
1569 		cpu_lfence();
1570 	}
1571 
1572 	/*
1573 	 * Wait for the finalization mask to complete, after which all cpus
1574 	 * have completely finished initializing and are entering or are in
1575 	 * their idle thread.
1576 	 *
1577 	 * BSP should have received all required invltlbs but do another
1578 	 * one just in case.
1579 	 */
1580 	cpu_invltlb();
1581 	mp_finish = 2;
1582 	while (CPUMASK_CMPMASKNEQ(smp_finalize_mask, smp_startup_mask)) {
1583 		cpu_pause();
1584 		cpu_lfence();
1585 	}
1586 
1587 	while (try_mplock() == 0) {
1588 		cpu_pause();
1589 		cpu_lfence();
1590 	}
1591 
1592 	if (bootverbose) {
1593 		kprintf("Active CPU Mask: %016jx\n",
1594 			(uintmax_t)CPUMASK_LOWMASK(smp_active_mask));
1595 	}
1596 }
1597 
1598 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL);
1599 
1600 /*
1601  * Interrupts must be hard-disabled by caller
1602  */
1603 void
1604 cpu_send_ipiq(int dcpu)
1605 {
1606 	if (CPUMASK_TESTBIT(smp_active_mask, dcpu))
1607                 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
1608 }
1609 
1610 #if 0	/* single_apic_ipi_passive() not working yet */
1611 /*
1612  * Returns 0 on failure, 1 on success
1613  */
1614 int
1615 cpu_send_ipiq_passive(int dcpu)
1616 {
1617         int r = 0;
1618 	if (CPUMASK_TESTBIT(smp_active_mask, dcpu)) {
1619                 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
1620                                         APIC_DELMODE_FIXED);
1621         }
1622 	return(r);
1623 }
1624 #endif
1625 
1626 static void
1627 mp_bsp_simple_setup(void)
1628 {
1629 	struct mdglobaldata *gd;
1630 	size_t ipiq_size;
1631 
1632 	/* build our map of 'other' CPUs */
1633 	mycpu->gd_other_cpus = smp_startup_mask;
1634 	CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid);
1635 
1636 	gd = (struct mdglobaldata *)mycpu;
1637 	gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid);
1638 
1639 	ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
1640 	mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
1641 					    VM_SUBSYS_IPIQ);
1642 	bzero(mycpu->gd_ipiq, ipiq_size);
1643 
1644 	/* initialize arc4random. */
1645 	arc4_init_pcpu(0);
1646 
1647 	pmap_set_opt();
1648 
1649 	if (cpu_feature & CPUID_TSC)
1650 		tsc0_offset = rdtsc();
1651 }
1652 
1653 
1654 /*
1655  * CPU TOPOLOGY DETECTION FUNCTIONS
1656  */
1657 
1658 /* Detect intel topology using CPUID
1659  * Ref: http://www.intel.com/Assets/PDF/appnote/241618.pdf, pg 41
1660  */
1661 static void
1662 detect_intel_topology(int count_htt_cores)
1663 {
1664 	int shift = 0;
1665 	int ecx_index = 0;
1666 	int core_plus_logical_bits = 0;
1667 	int cores_per_package;
1668 	int logical_per_package;
1669 	int logical_per_core;
1670 	unsigned int p[4];
1671 
1672 	if (cpu_high >= 0xb) {
1673 		goto FUNC_B;
1674 
1675 	} else if (cpu_high >= 0x4) {
1676 		goto FUNC_4;
1677 
1678 	} else {
1679 		core_bits = 0;
1680 		for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1681 			;
1682 		logical_CPU_bits = 1 << shift;
1683 		return;
1684 	}
1685 
1686 FUNC_B:
1687 	cpuid_count(0xb, FUNC_B_THREAD_LEVEL, p);
1688 
1689 	/* if 0xb not supported - fallback to 0x4 */
1690 	if (p[1] == 0 || (FUNC_B_TYPE(p[2]) != FUNC_B_THREAD_TYPE)) {
1691 		goto FUNC_4;
1692 	}
1693 
1694 	logical_CPU_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1695 
1696 	ecx_index = FUNC_B_THREAD_LEVEL + 1;
1697 	do {
1698 		cpuid_count(0xb, ecx_index, p);
1699 
1700 		/* Check for the Core type in the implemented sub leaves. */
1701 		if (FUNC_B_TYPE(p[2]) == FUNC_B_CORE_TYPE) {
1702 			core_plus_logical_bits = FUNC_B_BITS_SHIFT_NEXT_LEVEL(p[0]);
1703 			break;
1704 		}
1705 
1706 		ecx_index++;
1707 
1708 	} while (FUNC_B_TYPE(p[2]) != FUNC_B_INVALID_TYPE);
1709 
1710 	core_bits = core_plus_logical_bits - logical_CPU_bits;
1711 
1712 	return;
1713 
1714 FUNC_4:
1715 	cpuid_count(0x4, 0, p);
1716 	cores_per_package = FUNC_4_MAX_CORE_NO(p[0]) + 1;
1717 
1718 	logical_per_package = count_htt_cores;
1719 	logical_per_core = logical_per_package / cores_per_package;
1720 
1721 	for (shift = 0; (1 << shift) < logical_per_core; ++shift)
1722 		;
1723 	logical_CPU_bits = shift;
1724 
1725 	for (shift = 0; (1 << shift) < cores_per_package; ++shift)
1726 		;
1727 	core_bits = shift;
1728 
1729 	return;
1730 }
1731 
1732 /* Detect AMD topology using CPUID
1733  * Ref: http://support.amd.com/us/Embedded_TechDocs/25481.pdf, last page
1734  */
1735 static void
1736 detect_amd_topology(int count_htt_cores)
1737 {
1738 	int shift = 0;
1739 	if ((cpu_feature & CPUID_HTT) && (amd_feature2 & AMDID2_CMP)) {
1740 		if (cpu_procinfo2 & AMDID_COREID_SIZE) {
1741 			core_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >>
1742 				    AMDID_COREID_SIZE_SHIFT;
1743 		} else {
1744 			core_bits = (cpu_procinfo2 & AMDID_CMP_CORES) + 1;
1745 			for (shift = 0; (1 << shift) < core_bits; ++shift)
1746 				;
1747 			core_bits = shift;
1748 		}
1749 		logical_CPU_bits = count_htt_cores >> core_bits;
1750 		for (shift = 0; (1 << shift) < logical_CPU_bits; ++shift)
1751 			;
1752 		logical_CPU_bits = shift;
1753 
1754 		kprintf("core_bits %d logical_CPU_bits %d\n",
1755 			core_bits - logical_CPU_bits, logical_CPU_bits);
1756 
1757 		if (amd_feature2 & AMDID2_TOPOEXT) {
1758 			u_int p[4];	/* eax,ebx,ecx,edx */
1759 			int nodes;
1760 
1761 			cpuid_count(0x8000001e, 0, p);
1762 
1763 			switch(((p[1] >> 8) & 3) + 1) {
1764 			case 1:
1765 				logical_CPU_bits = 0;
1766 				break;
1767 			case 2:
1768 				logical_CPU_bits = 1;
1769 				break;
1770 			case 3:
1771 			case 4:
1772 				logical_CPU_bits = 2;
1773 				break;
1774 			}
1775 
1776 			/*
1777 			 * Nodes are kind of a stand-in for packages*sockets,
1778 			 * but can be thought of in terms of Numa domains.
1779 			 */
1780 			nodes = ((p[2] >> 8) & 7) + 1;
1781 			switch(nodes) {
1782 			case 8:
1783 			case 7:
1784 			case 6:
1785 			case 5:
1786 				--core_bits;
1787 				/* fallthrough */
1788 			case 4:
1789 			case 3:
1790 				--core_bits;
1791 				/* fallthrough */
1792 			case 2:
1793 				--core_bits;
1794 				/* fallthrough */
1795 			case 1:
1796 				break;
1797 			}
1798 			core_bits -= logical_CPU_bits;
1799 			kprintf("%d-way htt, %d Nodes, %d cores/node\n",
1800 				(int)(((p[1] >> 8) & 3) + 1),
1801 				nodes,
1802 				1 << core_bits);
1803 
1804 		}
1805 #if 0
1806 		if (amd_feature2 & AMDID2_TOPOEXT) {
1807 			u_int p[4];
1808 			int i;
1809 			int type;
1810 			int level;
1811 			int share_count;
1812 
1813 			logical_CPU_bits = 0;
1814 			core_bits = 0;
1815 
1816 			for (i = 0; i < 256; ++i)  {
1817 				cpuid_count(0x8000001d, i, p);
1818 				type = p[0] & 0x1f;
1819 				level = (p[0] >> 5) & 0x7;
1820 				share_count = 1 + ((p[0] >> 14) & 0xfff);
1821 
1822 				if (type == 0)
1823 					break;
1824 				kprintf("Topology probe i=%2d type=%d "
1825 					"level=%d share_count=%d\n",
1826 					i, type, level, share_count);
1827 				shift = 0;
1828 				while ((1 << shift) < share_count)
1829 					++shift;
1830 
1831 				switch(type) {
1832 				case 1:
1833 					/*
1834 					 * CPUID_TYPE_SMT
1835 					 *
1836 					 * Logical CPU (SMT)
1837 					 */
1838 					logical_CPU_bits = shift;
1839 					break;
1840 				case 2:
1841 					/*
1842 					 * CPUID_TYPE_CORE
1843 					 *
1844 					 * Physical subdivision of a package
1845 					 */
1846 					core_bits = logical_CPU_bits +
1847 						    shift;
1848 					break;
1849 				case 3:
1850 					/*
1851 					 * CPUID_TYPE_CACHE
1852 					 *
1853 					 * CPU L1/L2/L3 cache
1854 					 */
1855 					break;
1856 				case 4:
1857 					/*
1858 					 * CPUID_TYPE_PKG
1859 					 *
1860 					 * Package aka chip, equivalent to
1861 					 * socket
1862 					 */
1863 					break;
1864 				}
1865 			}
1866 		}
1867 #endif
1868 	} else {
1869 		for (shift = 0; (1 << shift) < count_htt_cores; ++shift)
1870 			;
1871 		core_bits = shift;
1872 		logical_CPU_bits = 0;
1873 	}
1874 }
1875 
1876 static void
1877 amd_get_compute_unit_id(void *arg)
1878 {
1879 	u_int regs[4];
1880 
1881 	do_cpuid(0x8000001e, regs);
1882 	cpu_node_t * mynode = get_cpu_node_by_cpuid(mycpuid);
1883 
1884 	/*
1885 	 * AMD - CPUID Specification September 2010
1886 	 * page 34 - //ComputeUnitID = ebx[0:7]//
1887 	 */
1888 	mynode->compute_unit_id = regs[1] & 0xff;
1889 }
1890 
1891 int
1892 fix_amd_topology(void)
1893 {
1894 	cpumask_t mask;
1895 
1896 	if (cpu_vendor_id != CPU_VENDOR_AMD)
1897 		return -1;
1898 	if ((amd_feature2 & AMDID2_TOPOEXT) == 0)
1899 		return -1;
1900 
1901 	CPUMASK_ASSALLONES(mask);
1902 	lwkt_cpusync_simple(mask, amd_get_compute_unit_id, NULL);
1903 
1904 	kprintf("Compute unit iDS:\n");
1905 	int i;
1906 	for (i = 0; i < ncpus; i++) {
1907 		kprintf("%d-%d; \n",
1908 			i, get_cpu_node_by_cpuid(i)->compute_unit_id);
1909 	}
1910 	return 0;
1911 }
1912 
1913 /*
1914  * Calculate
1915  * - logical_CPU_bits
1916  * - core_bits
1917  * With the values above (for AMD or INTEL) we are able to generally
1918  * detect the CPU topology (number of cores for each level):
1919  * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1920  * Ref: http://www.multicoreinfo.com/research/papers/whitepapers/Intel-detect-topology.pdf
1921  */
1922 void
1923 detect_cpu_topology(void)
1924 {
1925 	static int topology_detected = 0;
1926 	int count = 0;
1927 
1928 	if (topology_detected)
1929 		goto OUT;
1930 	if ((cpu_feature & CPUID_HTT) == 0) {
1931 		core_bits = 0;
1932 		logical_CPU_bits = 0;
1933 		goto OUT;
1934 	}
1935 	count = (cpu_procinfo & CPUID_HTT_CORES) >> CPUID_HTT_CORE_SHIFT;
1936 
1937 	if (cpu_vendor_id == CPU_VENDOR_INTEL)
1938 		detect_intel_topology(count);
1939 	else if (cpu_vendor_id == CPU_VENDOR_AMD)
1940 		detect_amd_topology(count);
1941 	topology_detected = 1;
1942 
1943 OUT:
1944 	if (bootverbose) {
1945 		kprintf("Bits within APICID: logical_CPU_bits: %d; "
1946 			"core_bits: %d\n",
1947 			logical_CPU_bits, core_bits);
1948 	}
1949 }
1950 
1951 /*
1952  * Interface functions to calculate chip_ID,
1953  * core_number and logical_number
1954  * Ref: http://wiki.osdev.org/Detecting_CPU_Topology_(80x86)
1955  */
1956 int
1957 get_chip_ID(int cpuid)
1958 {
1959 	return get_apicid_from_cpuid(cpuid) >>
1960 	    (logical_CPU_bits + core_bits);
1961 }
1962 
1963 int
1964 get_chip_ID_from_APICID(int apicid)
1965 {
1966 	return apicid >> (logical_CPU_bits + core_bits);
1967 }
1968 
1969 int
1970 get_core_number_within_chip(int cpuid)
1971 {
1972 	return ((get_apicid_from_cpuid(cpuid) >> logical_CPU_bits) &
1973 		((1 << core_bits) - 1));
1974 }
1975 
1976 int
1977 get_logical_CPU_number_within_core(int cpuid)
1978 {
1979 	return (get_apicid_from_cpuid(cpuid) &
1980 		((1 << logical_CPU_bits) - 1));
1981 }
1982