1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $
26  * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.60 2008/06/07 12:03:52 mneumann Exp $
27  */
28 
29 #include "opt_cpu.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/malloc.h>
36 #include <sys/memrange.h>
37 #include <sys/cons.h>	/* cngetc() */
38 #include <sys/machintr.h>
39 
40 #include <sys/mplock2.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_param.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_extern.h>
47 #include <sys/lock.h>
48 #include <vm/vm_map.h>
49 #include <sys/user.h>
50 #ifdef GPROF
51 #include <sys/gmon.h>
52 #endif
53 
54 #include <machine/smp.h>
55 #include <machine_base/apic/apicreg.h>
56 #include <machine/atomic.h>
57 #include <machine/cpufunc.h>
58 #include <machine_base/apic/mpapic.h>
59 #include <machine/psl.h>
60 #include <machine/segments.h>
61 #include <machine/tss.h>
62 #include <machine/specialreg.h>
63 #include <machine/globaldata.h>
64 
65 #include <machine/md_var.h>		/* setidt() */
66 #include <machine_base/icu/icu.h>		/* IPIs */
67 #include <machine_base/isa/intr_machdep.h>	/* IPIs */
68 
69 #define FIXUP_EXTRA_APIC_INTS	8	/* additional entries we may create */
70 
71 #define WARMBOOT_TARGET		0
72 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
73 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
74 
75 #define BIOS_BASE		(0xf0000)
76 #define BIOS_SIZE		(0x10000)
77 #define BIOS_COUNT		(BIOS_SIZE/4)
78 
79 #define CMOS_REG		(0x70)
80 #define CMOS_DATA		(0x71)
81 #define BIOS_RESET		(0x0f)
82 #define BIOS_WARM		(0x0a)
83 
84 #define PROCENTRY_FLAG_EN	0x01
85 #define PROCENTRY_FLAG_BP	0x02
86 #define IOAPICENTRY_FLAG_EN	0x01
87 
88 
89 /* MP Floating Pointer Structure */
90 typedef struct MPFPS {
91 	char    signature[4];
92 	u_int32_t pap;
93 	u_char  length;
94 	u_char  spec_rev;
95 	u_char  checksum;
96 	u_char  mpfb1;
97 	u_char  mpfb2;
98 	u_char  mpfb3;
99 	u_char  mpfb4;
100 	u_char  mpfb5;
101 }      *mpfps_t;
102 
103 /* MP Configuration Table Header */
104 typedef struct MPCTH {
105 	char    signature[4];
106 	u_short base_table_length;
107 	u_char  spec_rev;
108 	u_char  checksum;
109 	u_char  oem_id[8];
110 	u_char  product_id[12];
111 	u_int32_t oem_table_pointer;
112 	u_short oem_table_size;
113 	u_short entry_count;
114 	u_int32_t apic_address;
115 	u_short extended_table_length;
116 	u_char  extended_table_checksum;
117 	u_char  reserved;
118 }      *mpcth_t;
119 
120 
121 typedef struct PROCENTRY {
122 	u_char  type;
123 	u_char  apic_id;
124 	u_char  apic_version;
125 	u_char  cpu_flags;
126 	u_int32_t cpu_signature;
127 	u_int32_t feature_flags;
128 	u_int32_t reserved1;
129 	u_int32_t reserved2;
130 }      *proc_entry_ptr;
131 
132 typedef struct BUSENTRY {
133 	u_char  type;
134 	u_char  bus_id;
135 	char    bus_type[6];
136 }      *bus_entry_ptr;
137 
138 typedef struct IOAPICENTRY {
139 	u_char  type;
140 	u_char  apic_id;
141 	u_char  apic_version;
142 	u_char  apic_flags;
143 	u_int32_t apic_address;
144 }      *io_apic_entry_ptr;
145 
146 typedef struct INTENTRY {
147 	u_char  type;
148 	u_char  int_type;
149 	u_short int_flags;
150 	u_char  src_bus_id;
151 	u_char  src_bus_irq;
152 	u_char  dst_apic_id;
153 	u_char  dst_apic_int;
154 }      *int_entry_ptr;
155 
156 /* descriptions of MP basetable entries */
157 typedef struct BASETABLE_ENTRY {
158 	u_char  type;
159 	u_char  length;
160 	char    name[16];
161 }       basetable_entry;
162 
163 /*
164  * this code MUST be enabled here and in mpboot.s.
165  * it follows the very early stages of AP boot by placing values in CMOS ram.
166  * it NORMALLY will never be needed and thus the primitive method for enabling.
167  *
168  */
169 #if defined(CHECK_POINTS)
170 #define CHECK_READ(A)	 (outb(CMOS_REG, (A)), inb(CMOS_DATA))
171 #define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D)))
172 
173 #define CHECK_INIT(D);				\
174 	CHECK_WRITE(0x34, (D));			\
175 	CHECK_WRITE(0x35, (D));			\
176 	CHECK_WRITE(0x36, (D));			\
177 	CHECK_WRITE(0x37, (D));			\
178 	CHECK_WRITE(0x38, (D));			\
179 	CHECK_WRITE(0x39, (D));
180 
181 #define CHECK_PRINT(S);				\
182 	kprintf("%s: %d, %d, %d, %d, %d, %d\n",	\
183 	   (S),					\
184 	   CHECK_READ(0x34),			\
185 	   CHECK_READ(0x35),			\
186 	   CHECK_READ(0x36),			\
187 	   CHECK_READ(0x37),			\
188 	   CHECK_READ(0x38),			\
189 	   CHECK_READ(0x39));
190 
191 #else				/* CHECK_POINTS */
192 
193 #define CHECK_INIT(D)
194 #define CHECK_PRINT(S)
195 
196 #endif				/* CHECK_POINTS */
197 
198 /*
199  * Values to send to the POST hardware.
200  */
201 #define MP_BOOTADDRESS_POST	0x10
202 #define MP_PROBE_POST		0x11
203 #define MPTABLE_PASS1_POST	0x12
204 
205 #define MP_START_POST		0x13
206 #define MP_ENABLE_POST		0x14
207 #define MPTABLE_PASS2_POST	0x15
208 
209 #define START_ALL_APS_POST	0x16
210 #define INSTALL_AP_TRAMP_POST	0x17
211 #define START_AP_POST		0x18
212 
213 #define MP_ANNOUNCE_POST	0x19
214 
215 static int need_hyperthreading_fixup;
216 static u_int logical_cpus;
217 u_int	logical_cpus_mask;
218 
219 /** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
220 int	current_postcode;
221 
222 /** XXX FIXME: what system files declare these??? */
223 extern struct region_descriptor r_gdt, r_idt;
224 
225 int	bsp_apic_ready = 0;	/* flags useability of BSP apic */
226 int	mp_naps;		/* # of Applications processors */
227 int	mp_nbusses;		/* # of busses */
228 #ifdef APIC_IO
229 int	mp_napics;		/* # of IO APICs */
230 #endif
231 int	boot_cpu_id;		/* designated BSP */
232 vm_offset_t cpu_apic_address;
233 #ifdef APIC_IO
234 vm_offset_t io_apic_address[NAPICID];	/* NAPICID is more than enough */
235 u_int32_t *io_apic_versions;
236 #endif
237 extern	int nkpt;
238 
239 u_int32_t cpu_apic_versions[MAXCPU];
240 int64_t tsc0_offset;
241 extern int64_t tsc_offsets[];
242 
243 #ifdef APIC_IO
244 struct apic_intmapinfo	int_to_apicintpin[APIC_INTMAPSIZE];
245 #endif
246 
247 /*
248  * APIC ID logical/physical mapping structures.
249  * We oversize these to simplify boot-time config.
250  */
251 int     cpu_num_to_apic_id[NAPICID];
252 #ifdef APIC_IO
253 int     io_num_to_apic_id[NAPICID];
254 #endif
255 int     apic_id_to_logical[NAPICID];
256 
257 /* AP uses this during bootstrap.  Do not staticize.  */
258 char *bootSTK;
259 static int bootAP;
260 
261 /*
262  * SMP page table page.  Setup by locore to point to a page table
263  * page from which we allocate per-cpu privatespace areas io_apics,
264  * and so forth.
265  */
266 
267 #define IO_MAPPING_START_INDEX	\
268 		(SMP_MAXCPU * sizeof(struct privatespace) / PAGE_SIZE)
269 
270 extern pt_entry_t *SMPpt;
271 
272 struct pcb stoppcbs[MAXCPU];
273 
274 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
275 
276 /*
277  * Local data and functions.
278  */
279 
280 static int	mp_capable;
281 static u_int	boot_address;
282 static u_int	base_memory;
283 static int	mp_finish;
284 
285 static mpfps_t	mpfps;
286 static long	search_for_sig(u_int32_t target, int count);
287 static void	mp_enable(u_int boot_addr);
288 
289 static void	mptable_hyperthread_fixup(u_int id_mask);
290 static void	mptable_pass1(void);
291 static int	mptable_pass2(void);
292 static void	default_mp_table(int type);
293 static void	fix_mp_table(void);
294 #ifdef APIC_IO
295 static void	setup_apic_irq_mapping(void);
296 static int	apic_int_is_bus_type(int intr, int bus_type);
297 #endif
298 static int	start_all_aps(u_int boot_addr);
299 #if 0
300 static void	install_ap_tramp(u_int boot_addr);
301 #endif
302 static int	start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest);
303 static int	smitest(void);
304 
305 static cpumask_t smp_startup_mask = 1;	/* which cpus have been started */
306 cpumask_t smp_active_mask = 1;	/* which cpus are ready for IPIs etc? */
307 SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RD, &smp_active_mask, 0, "");
308 static u_int	bootMP_size;
309 
310 /*
311  * Calculate usable address in base memory for AP trampoline code.
312  */
313 u_int
314 mp_bootaddress(u_int basemem)
315 {
316 	POSTCODE(MP_BOOTADDRESS_POST);
317 
318 	base_memory = basemem;
319 
320 	bootMP_size = mptramp_end - mptramp_start;
321 	boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
322 	if (((basemem * 1024) - boot_address) < bootMP_size)
323 		boot_address -= PAGE_SIZE;	/* not enough, lower by 4k */
324 	/* 3 levels of page table pages */
325 	mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
326 
327 	return mptramp_pagetables;
328 }
329 
330 
331 /*
332  * Look for an Intel MP spec table (ie, SMP capable hardware).
333  */
334 int
335 mp_probe(void)
336 {
337 	long    x;
338 	u_long  segment;
339 	u_int32_t target;
340 
341 	/*
342 	 * Make sure our SMPpt[] page table is big enough to hold all the
343 	 * mappings we need.
344 	 */
345 	KKASSERT(IO_MAPPING_START_INDEX < NPTEPG - 2);
346 
347 	POSTCODE(MP_PROBE_POST);
348 
349 	/* see if EBDA exists */
350 	if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) {
351 		/* search first 1K of EBDA */
352 		target = (u_int32_t) (segment << 4);
353 		if ((x = search_for_sig(target, 1024 / 4)) != -1L)
354 			goto found;
355 	} else {
356 		/* last 1K of base memory, effective 'top of base' passed in */
357 		target = (u_int32_t) (base_memory - 0x400);
358 		if ((x = search_for_sig(target, 1024 / 4)) != -1L)
359 			goto found;
360 	}
361 
362 	/* search the BIOS */
363 	target = (u_int32_t) BIOS_BASE;
364 	if ((x = search_for_sig(target, BIOS_COUNT)) != -1L)
365 		goto found;
366 
367 	/* nothing found */
368 	mpfps = (mpfps_t)0;
369 	mp_capable = 0;
370 	return 0;
371 
372 found:
373 	/*
374 	 * Calculate needed resources.  We can safely map physical
375 	 * memory into SMPpt after mptable_pass1() completes.
376 	 */
377 	mpfps = (mpfps_t)x;
378 	mptable_pass1();
379 
380 	/* flag fact that we are running multiple processors */
381 	mp_capable = 1;
382 	return 1;
383 }
384 
385 
386 /*
387  * Startup the SMP processors.
388  */
389 void
390 mp_start(void)
391 {
392 	POSTCODE(MP_START_POST);
393 
394 	/* look for MP capable motherboard */
395 	if (mp_capable)
396 		mp_enable(boot_address);
397 	else
398 		panic("MP hardware not found!");
399 }
400 
401 
402 /*
403  * Print various information about the SMP system hardware and setup.
404  */
405 void
406 mp_announce(void)
407 {
408 	int     x;
409 
410 	POSTCODE(MP_ANNOUNCE_POST);
411 
412 	kprintf("DragonFly/MP: Multiprocessor motherboard\n");
413 	kprintf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0));
414 	kprintf(", version: 0x%08x", cpu_apic_versions[0]);
415 	kprintf(", at 0x%08jx\n", (intmax_t)cpu_apic_address);
416 	for (x = 1; x <= mp_naps; ++x) {
417 		kprintf(" cpu%d (AP):  apic id: %2d", x, CPU_TO_ID(x));
418 		kprintf(", version: 0x%08x", cpu_apic_versions[x]);
419 		kprintf(", at 0x%08jx\n", (intmax_t)cpu_apic_address);
420 	}
421 
422 #if defined(APIC_IO)
423 	for (x = 0; x < mp_napics; ++x) {
424 		kprintf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x));
425 		kprintf(", version: 0x%08x", io_apic_versions[x]);
426 		kprintf(", at 0x%08lx\n", io_apic_address[x]);
427 	}
428 #else
429 	kprintf(" Warning: APIC I/O disabled\n");
430 #endif	/* APIC_IO */
431 }
432 
433 /*
434  * AP cpu's call this to sync up protected mode.
435  *
436  * WARNING! %gs is not set up on entry.  This routine sets up %gs.
437  */
438 void
439 init_secondary(void)
440 {
441 	int	gsel_tss;
442 	int	x, myid = bootAP;
443 	u_int64_t msr, cr0;
444 	struct mdglobaldata *md;
445 	struct privatespace *ps;
446 
447 	ps = &CPU_prvspace[myid];
448 
449 	gdt_segs[GPROC0_SEL].ssd_base =
450 		(long) &ps->mdglobaldata.gd_common_tss;
451 	ps->mdglobaldata.mi.gd_prvspace = ps;
452 
453 	/* We fill the 32-bit segment descriptors */
454 	for (x = 0; x < NGDT; x++) {
455 		if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
456 			ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]);
457 	}
458 	/* And now a 64-bit one */
459 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
460 	    (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]);
461 
462 	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
463 	r_gdt.rd_base = (long) &gdt[myid * NGDT];
464 	lgdt(&r_gdt);			/* does magic intra-segment return */
465 
466 	/* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */
467 	wrmsr(MSR_FSBASE, 0);		/* User value */
468 	wrmsr(MSR_GSBASE, (u_int64_t)ps);
469 	wrmsr(MSR_KGSBASE, 0);		/* XXX User value while we're in the kernel */
470 
471 	lidt(&r_idt);
472 
473 #if 0
474 	lldt(_default_ldt);
475 	mdcpu->gd_currentldt = _default_ldt;
476 #endif
477 
478 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
479 	gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS;
480 
481 	md = mdcpu;	/* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
482 
483 	md->gd_common_tss.tss_rsp0 = 0;	/* not used until after switch */
484 #if 0 /* JG XXX */
485 	md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
486 #endif
487 	md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL];
488 	md->gd_common_tssd = *md->gd_tss_gdt;
489 #if 0 /* JG XXX */
490 	md->gd_common_tss.tss_ist1 = (long)&doublefault_stack[PAGE_SIZE];
491 #endif
492 	ltr(gsel_tss);
493 
494 	/*
495 	 * Set to a known state:
496 	 * Set by mpboot.s: CR0_PG, CR0_PE
497 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
498 	 */
499 	cr0 = rcr0();
500 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
501 	load_cr0(cr0);
502 
503 	/* Set up the fast syscall stuff */
504 	msr = rdmsr(MSR_EFER) | EFER_SCE;
505 	wrmsr(MSR_EFER, msr);
506 	wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
507 	wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
508 	msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
509 	      ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
510 	wrmsr(MSR_STAR, msr);
511 	wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
512 
513 	pmap_set_opt();		/* PSE/4MB pages, etc */
514 #if JGXXX
515 	/* Initialize the PAT MSR. */
516 	pmap_init_pat();
517 #endif
518 
519 	/* set up CPU registers and state */
520 	cpu_setregs();
521 
522 	/* set up SSE/NX registers */
523 	initializecpu();
524 
525 	/* set up FPU state on the AP */
526 	npxinit(__INITIAL_NPXCW__);
527 
528 	/* disable the APIC, just to be SURE */
529 	lapic->svr &= ~APIC_SVR_ENABLE;
530 
531 	/* data returned to BSP */
532 	cpu_apic_versions[0] = lapic->version;
533 }
534 
535 /*******************************************************************
536  * local functions and data
537  */
538 
539 /*
540  * start the SMP system
541  */
542 static void
543 mp_enable(u_int boot_addr)
544 {
545 	int     x;
546 #if defined(APIC_IO)
547 	int     apic;
548 	u_int   ux;
549 #endif	/* APIC_IO */
550 
551 	POSTCODE(MP_ENABLE_POST);
552 
553 #if 0 /* JGXXX */
554 	/* turn on 4MB of V == P addressing so we can get to MP table */
555 	*(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME);
556 	cpu_invltlb();
557 #endif
558 
559 	/* examine the MP table for needed info, uses physical addresses */
560 	x = mptable_pass2();
561 
562 #if 0 /* JGXXX */
563 	*(int *)PTD = 0;
564 	cpu_invltlb();
565 #endif /* 0 JGXXX */
566 
567 	/* can't process default configs till the CPU APIC is pmapped */
568 	if (x)
569 		default_mp_table(x);
570 
571 	/* post scan cleanup */
572 	fix_mp_table();
573 
574 #if defined(APIC_IO)
575 
576 	setup_apic_irq_mapping();
577 
578 	/* fill the LOGICAL io_apic_versions table */
579 	for (apic = 0; apic < mp_napics; ++apic) {
580 		ux = io_apic_read(apic, IOAPIC_VER);
581 		io_apic_versions[apic] = ux;
582 		io_apic_set_id(apic, IO_TO_ID(apic));
583 	}
584 
585 	/* program each IO APIC in the system */
586 	for (apic = 0; apic < mp_napics; ++apic)
587 		if (io_apic_setup(apic) < 0)
588 			panic("IO APIC setup failure");
589 
590 #endif	/* APIC_IO */
591 
592 	/*
593 	 * These are required for SMP operation
594 	 */
595 
596 	/* install a 'Spurious INTerrupt' vector */
597 	setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
598 	       SDT_SYSIGT, SEL_KPL, 0);
599 
600 	/* install an inter-CPU IPI for TLB invalidation */
601 	setidt(XINVLTLB_OFFSET, Xinvltlb,
602 	       SDT_SYSIGT, SEL_KPL, 0);
603 
604 	/* install an inter-CPU IPI for IPIQ messaging */
605 	setidt(XIPIQ_OFFSET, Xipiq,
606 	       SDT_SYSIGT, SEL_KPL, 0);
607 
608 	/* install a timer vector */
609 	setidt(XTIMER_OFFSET, Xtimer,
610 	       SDT_SYSIGT, SEL_KPL, 0);
611 
612 	/* install an inter-CPU IPI for CPU stop/restart */
613 	setidt(XCPUSTOP_OFFSET, Xcpustop,
614 	       SDT_SYSIGT, SEL_KPL, 0);
615 
616 	/* start each Application Processor */
617 	start_all_aps(boot_addr);
618 }
619 
620 
621 /*
622  * look for the MP spec signature
623  */
624 
625 /* string defined by the Intel MP Spec as identifying the MP table */
626 #define MP_SIG		0x5f504d5f	/* _MP_ */
627 #define NEXT(X)		((X) += 4)
628 static long
629 search_for_sig(u_int32_t target, int count)
630 {
631 	int     x;
632 	u_int32_t *addr = (u_int32_t *) (KERNBASE + target);
633 
634 	for (x = 0; x < count; NEXT(x))
635 		if (addr[x] == MP_SIG)
636 			/* make array index a byte index */
637 			return (long)(&addr[x]);
638 
639 	return -1;
640 }
641 
642 
643 static basetable_entry basetable_entry_types[] =
644 {
645 	{0, 20, "Processor"},
646 	{1, 8, "Bus"},
647 	{2, 8, "I/O APIC"},
648 	{3, 8, "I/O INT"},
649 	{4, 8, "Local INT"}
650 };
651 
652 typedef struct BUSDATA {
653 	u_char  bus_id;
654 	enum busTypes bus_type;
655 }       bus_datum;
656 
657 typedef struct INTDATA {
658 	u_char  int_type;
659 	u_short int_flags;
660 	u_char  src_bus_id;
661 	u_char  src_bus_irq;
662 	u_char  dst_apic_id;
663 	u_char  dst_apic_int;
664 	u_char	int_vector;
665 }       io_int, local_int;
666 
667 typedef struct BUSTYPENAME {
668 	u_char  type;
669 	char    name[7];
670 }       bus_type_name;
671 
672 static bus_type_name bus_type_table[] =
673 {
674 	{CBUS, "CBUS"},
675 	{CBUSII, "CBUSII"},
676 	{EISA, "EISA"},
677 	{MCA, "MCA"},
678 	{UNKNOWN_BUSTYPE, "---"},
679 	{ISA, "ISA"},
680 	{MCA, "MCA"},
681 	{UNKNOWN_BUSTYPE, "---"},
682 	{UNKNOWN_BUSTYPE, "---"},
683 	{UNKNOWN_BUSTYPE, "---"},
684 	{UNKNOWN_BUSTYPE, "---"},
685 	{UNKNOWN_BUSTYPE, "---"},
686 	{PCI, "PCI"},
687 	{UNKNOWN_BUSTYPE, "---"},
688 	{UNKNOWN_BUSTYPE, "---"},
689 	{UNKNOWN_BUSTYPE, "---"},
690 	{UNKNOWN_BUSTYPE, "---"},
691 	{XPRESS, "XPRESS"},
692 	{UNKNOWN_BUSTYPE, "---"}
693 };
694 /* from MP spec v1.4, table 5-1 */
695 static int default_data[7][5] =
696 {
697 /*   nbus, id0, type0, id1, type1 */
698 	{1, 0, ISA, 255, 255},
699 	{1, 0, EISA, 255, 255},
700 	{1, 0, EISA, 255, 255},
701 	{1, 0, MCA, 255, 255},
702 	{2, 0, ISA, 1, PCI},
703 	{2, 0, EISA, 1, PCI},
704 	{2, 0, MCA, 1, PCI}
705 };
706 
707 
708 /* the bus data */
709 static bus_datum *bus_data;
710 
711 #ifdef APIC_IO
712 /* the IO INT data, one entry per possible APIC INTerrupt */
713 static io_int  *io_apic_ints;
714 static int nintrs;
715 #endif
716 
717 static int processor_entry	(proc_entry_ptr entry, int cpu);
718 static int bus_entry		(bus_entry_ptr entry, int bus);
719 #ifdef APIC_IO
720 static int io_apic_entry	(io_apic_entry_ptr entry, int apic);
721 static int int_entry		(int_entry_ptr entry, int intr);
722 #endif
723 static int lookup_bus_type	(char *name);
724 
725 
726 /*
727  * 1st pass on motherboard's Intel MP specification table.
728  *
729  * initializes:
730  *	ncpus = 1
731  *
732  * determines:
733  *	cpu_apic_address (common to all CPUs)
734  *	io_apic_address[N]
735  *	mp_naps
736  *	mp_nbusses
737  *	mp_napics
738  *	nintrs
739  */
740 static void
741 mptable_pass1(void)
742 {
743 #ifdef APIC_IO
744 	int	x;
745 #endif
746 	mpcth_t	cth;
747 	int	totalSize;
748 	void*	position;
749 	int	count;
750 	int	type;
751 	u_int	id_mask;
752 
753 	POSTCODE(MPTABLE_PASS1_POST);
754 
755 #ifdef APIC_IO
756 	/* clear various tables */
757 	for (x = 0; x < NAPICID; ++x) {
758 		io_apic_address[x] = ~0;	/* IO APIC address table */
759 	}
760 #endif
761 
762 	/* init everything to empty */
763 	mp_naps = 0;
764 	mp_nbusses = 0;
765 #ifdef APIC_IO
766 	mp_napics = 0;
767 	nintrs = 0;
768 #endif
769 	id_mask = 0;
770 
771 	/* check for use of 'default' configuration */
772 	if (mpfps->mpfb1 != 0) {
773 		/* use default addresses */
774 		cpu_apic_address = DEFAULT_APIC_BASE;
775 #ifdef APIC_IO
776 		io_apic_address[0] = DEFAULT_IO_APIC_BASE;
777 #endif
778 
779 		/* fill in with defaults */
780 		mp_naps = 2;		/* includes BSP */
781 		mp_nbusses = default_data[mpfps->mpfb1 - 1][0];
782 #if defined(APIC_IO)
783 		mp_napics = 1;
784 		nintrs = 16;
785 #endif	/* APIC_IO */
786 	}
787 	else {
788 		if (mpfps->pap == 0)
789 			panic("MP Configuration Table Header MISSING!");
790 		cth = (void *)PHYS_TO_DMAP(mpfps->pap);
791 
792 		cpu_apic_address = (vm_offset_t) cth->apic_address;
793 
794 		/* walk the table, recording info of interest */
795 		totalSize = cth->base_table_length - sizeof(struct MPCTH);
796 		position = (u_char *) cth + sizeof(struct MPCTH);
797 		count = cth->entry_count;
798 
799 		while (count--) {
800 			switch (type = *(u_char *) position) {
801 			case 0: /* processor_entry */
802 				if (((proc_entry_ptr)position)->cpu_flags
803 				    & PROCENTRY_FLAG_EN) {
804 					++mp_naps;
805 					id_mask |= 1 <<
806 					    ((proc_entry_ptr)position)->apic_id;
807 				}
808 				break;
809 			case 1: /* bus_entry */
810 				++mp_nbusses;
811 				break;
812 			case 2: /* io_apic_entry */
813 #ifdef APIC_IO
814 				if (((io_apic_entry_ptr)position)->apic_flags
815 					& IOAPICENTRY_FLAG_EN)
816 					io_apic_address[mp_napics++] =
817 					    (vm_offset_t)((io_apic_entry_ptr)
818 						position)->apic_address;
819 #endif
820 				break;
821 			case 3: /* int_entry */
822 #ifdef APIC_IO
823 				++nintrs;
824 #endif
825 				break;
826 			case 4:	/* int_entry */
827 				break;
828 			default:
829 				panic("mpfps Base Table HOSED!");
830 				/* NOTREACHED */
831 			}
832 
833 			totalSize -= basetable_entry_types[type].length;
834 			position = (uint8_t *)position +
835 			    basetable_entry_types[type].length;
836 		}
837 	}
838 
839 	/* qualify the numbers */
840 	if (mp_naps > MAXCPU) {
841 		kprintf("Warning: only using %d of %d available CPUs!\n",
842 			MAXCPU, mp_naps);
843 		mp_naps = MAXCPU;
844 	}
845 
846 	/* See if we need to fixup HT logical CPUs. */
847 	mptable_hyperthread_fixup(id_mask);
848 
849 	/*
850 	 * Count the BSP.
851 	 * This is also used as a counter while starting the APs.
852 	 */
853 	ncpus = 1;
854 
855 	--mp_naps;	/* subtract the BSP */
856 }
857 
858 
859 /*
860  * 2nd pass on motherboard's Intel MP specification table.
861  *
862  * sets:
863  *	boot_cpu_id
864  *	ID_TO_IO(N), phy APIC ID to log CPU/IO table
865  *	CPU_TO_ID(N), logical CPU to APIC ID table
866  *	IO_TO_ID(N), logical IO to APIC ID table
867  *	bus_data[N]
868  *	io_apic_ints[N]
869  */
870 static int
871 mptable_pass2(void)
872 {
873 	struct PROCENTRY proc;
874 	int     x;
875 	mpcth_t cth;
876 	int     totalSize;
877 	void*   position;
878 	int     count;
879 	int     type;
880 	int     apic, bus, cpu, intr;
881 	int	i;
882 
883 	POSTCODE(MPTABLE_PASS2_POST);
884 
885 	/* Initialize fake proc entry for use with HT fixup. */
886 	bzero(&proc, sizeof(proc));
887 	proc.type = 0;
888 	proc.cpu_flags = PROCENTRY_FLAG_EN;
889 
890 #ifdef APIC_IO
891 	MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics,
892 	    M_DEVBUF, M_WAITOK);
893 	MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics,
894 	    M_DEVBUF, M_WAITOK | M_ZERO);
895 	MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + FIXUP_EXTRA_APIC_INTS),
896 	    M_DEVBUF, M_WAITOK);
897 #endif
898 	MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses,
899 	    M_DEVBUF, M_WAITOK);
900 
901 #ifdef APIC_IO
902 	for (i = 0; i < mp_napics; i++) {
903 		ioapic[i] = permanent_io_mapping(io_apic_address[i]);
904 	}
905 #endif
906 
907 	/* clear various tables */
908 	for (x = 0; x < NAPICID; ++x) {
909 		CPU_TO_ID(x) = -1;	/* logical CPU to APIC ID table */
910 #ifdef APIC_IO
911 		ID_TO_IO(x) = -1;	/* phy APIC ID to log CPU/IO table */
912 		IO_TO_ID(x) = -1;	/* logical IO to APIC ID table */
913 #endif
914 	}
915 
916 	/* clear bus data table */
917 	for (x = 0; x < mp_nbusses; ++x)
918 		bus_data[x].bus_id = 0xff;
919 
920 #ifdef APIC_IO
921 	/* clear IO APIC INT table */
922 	for (x = 0; x < (nintrs + 1); ++x) {
923 		io_apic_ints[x].int_type = 0xff;
924 		io_apic_ints[x].int_vector = 0xff;
925 	}
926 #endif
927 
928 	/* setup the cpu/apic mapping arrays */
929 	boot_cpu_id = -1;
930 
931 	/* record whether PIC or virtual-wire mode */
932 	machintr_setvar_simple(MACHINTR_VAR_IMCR_PRESENT, mpfps->mpfb2 & 0x80);
933 
934 	/* check for use of 'default' configuration */
935 	if (mpfps->mpfb1 != 0)
936 		return mpfps->mpfb1;	/* return default configuration type */
937 
938 	if (mpfps->pap == 0)
939 		panic("MP Configuration Table Header MISSING!");
940 
941 	cth = (void *)PHYS_TO_DMAP(mpfps->pap);
942 	/* walk the table, recording info of interest */
943 	totalSize = cth->base_table_length - sizeof(struct MPCTH);
944 	position = (u_char *) cth + sizeof(struct MPCTH);
945 	count = cth->entry_count;
946 	apic = bus = intr = 0;
947 	cpu = 1;				/* pre-count the BSP */
948 
949 	while (count--) {
950 		switch (type = *(u_char *) position) {
951 		case 0:
952 			if (processor_entry(position, cpu))
953 				++cpu;
954 
955 			if (need_hyperthreading_fixup) {
956 				/*
957 				 * Create fake mptable processor entries
958 				 * and feed them to processor_entry() to
959 				 * enumerate the logical CPUs.
960 				 */
961 				proc.apic_id = ((proc_entry_ptr)position)->apic_id;
962 				for (i = 1; i < logical_cpus; i++) {
963 					proc.apic_id++;
964 					processor_entry(&proc, cpu);
965 					logical_cpus_mask |= (1 << cpu);
966 					cpu++;
967 				}
968 			}
969 			break;
970 		case 1:
971 			if (bus_entry(position, bus))
972 				++bus;
973 			break;
974 		case 2:
975 #ifdef APIC_IO
976 			if (io_apic_entry(position, apic))
977 				++apic;
978 #endif
979 			break;
980 		case 3:
981 #ifdef APIC_IO
982 			if (int_entry(position, intr))
983 				++intr;
984 #endif
985 			break;
986 		case 4:
987 			/* int_entry(position); */
988 			break;
989 		default:
990 			panic("mpfps Base Table HOSED!");
991 			/* NOTREACHED */
992 		}
993 
994 		totalSize -= basetable_entry_types[type].length;
995 		position = (uint8_t *)position + basetable_entry_types[type].length;
996 	}
997 
998 	if (boot_cpu_id == -1)
999 		panic("NO BSP found!");
1000 
1001 	/* report fact that its NOT a default configuration */
1002 	return 0;
1003 }
1004 
1005 /*
1006  * Check if we should perform a hyperthreading "fix-up" to
1007  * enumerate any logical CPU's that aren't already listed
1008  * in the table.
1009  *
1010  * XXX: We assume that all of the physical CPUs in the
1011  * system have the same number of logical CPUs.
1012  *
1013  * XXX: We assume that APIC ID's are allocated such that
1014  * the APIC ID's for a physical processor are aligned
1015  * with the number of logical CPU's in the processor.
1016  */
1017 static void
1018 mptable_hyperthread_fixup(u_int id_mask)
1019 {
1020 	u_int i, id;
1021 
1022 	/* Nothing to do if there is no HTT support. */
1023 	if ((cpu_feature & CPUID_HTT) == 0)
1024 		return;
1025 	logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
1026 	if (logical_cpus <= 1)
1027 		return;
1028 
1029 	/*
1030 	 * For each APIC ID of a CPU that is set in the mask,
1031 	 * scan the other candidate APIC ID's for this
1032 	 * physical processor.  If any of those ID's are
1033 	 * already in the table, then kill the fixup.
1034 	 */
1035 	for (id = 0; id <= MAXCPU; id++) {
1036 		if ((id_mask & 1 << id) == 0)
1037 			continue;
1038 		/* First, make sure we are on a logical_cpus boundary. */
1039 		if (id % logical_cpus != 0)
1040 			return;
1041 		for (i = id + 1; i < id + logical_cpus; i++)
1042 			if ((id_mask & 1 << i) != 0)
1043 				return;
1044 	}
1045 
1046 	/*
1047 	 * Ok, the ID's checked out, so enable the fixup.  We have to fixup
1048 	 * mp_naps right now.
1049 	 */
1050 	need_hyperthreading_fixup = 1;
1051 	mp_naps *= logical_cpus;
1052 }
1053 
1054 #ifdef APIC_IO
1055 
1056 void
1057 assign_apic_irq(int apic, int intpin, int irq)
1058 {
1059 	int x;
1060 
1061 	if (int_to_apicintpin[irq].ioapic != -1)
1062 		panic("assign_apic_irq: inconsistent table");
1063 
1064 	int_to_apicintpin[irq].ioapic = apic;
1065 	int_to_apicintpin[irq].int_pin = intpin;
1066 	int_to_apicintpin[irq].apic_address = ioapic[apic];
1067 	int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin;
1068 
1069 	for (x = 0; x < nintrs; x++) {
1070 		if ((io_apic_ints[x].int_type == 0 ||
1071 		     io_apic_ints[x].int_type == 3) &&
1072 		    io_apic_ints[x].int_vector == 0xff &&
1073 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) &&
1074 		    io_apic_ints[x].dst_apic_int == intpin)
1075 			io_apic_ints[x].int_vector = irq;
1076 	}
1077 }
1078 
1079 void
1080 revoke_apic_irq(int irq)
1081 {
1082 	int x;
1083 	int oldapic;
1084 	int oldintpin;
1085 
1086 	if (int_to_apicintpin[irq].ioapic == -1)
1087 		panic("revoke_apic_irq: inconsistent table");
1088 
1089 	oldapic = int_to_apicintpin[irq].ioapic;
1090 	oldintpin = int_to_apicintpin[irq].int_pin;
1091 
1092 	int_to_apicintpin[irq].ioapic = -1;
1093 	int_to_apicintpin[irq].int_pin = 0;
1094 	int_to_apicintpin[irq].apic_address = NULL;
1095 	int_to_apicintpin[irq].redirindex = 0;
1096 
1097 	for (x = 0; x < nintrs; x++) {
1098 		if ((io_apic_ints[x].int_type == 0 ||
1099 		     io_apic_ints[x].int_type == 3) &&
1100 		    io_apic_ints[x].int_vector != 0xff &&
1101 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) &&
1102 		    io_apic_ints[x].dst_apic_int == oldintpin)
1103 			io_apic_ints[x].int_vector = 0xff;
1104 	}
1105 }
1106 
1107 /*
1108  * Allocate an IRQ
1109  */
1110 static void
1111 allocate_apic_irq(int intr)
1112 {
1113 	int apic;
1114 	int intpin;
1115 	int irq;
1116 
1117 	if (io_apic_ints[intr].int_vector != 0xff)
1118 		return;		/* Interrupt handler already assigned */
1119 
1120 	if (io_apic_ints[intr].int_type != 0 &&
1121 	    (io_apic_ints[intr].int_type != 3 ||
1122 	     (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) &&
1123 	      io_apic_ints[intr].dst_apic_int == 0)))
1124 		return;		/* Not INT or ExtInt on != (0, 0) */
1125 
1126 	irq = 0;
1127 	while (irq < APIC_INTMAPSIZE &&
1128 	       int_to_apicintpin[irq].ioapic != -1)
1129 		irq++;
1130 
1131 	if (irq >= APIC_INTMAPSIZE)
1132 		return;		/* No free interrupt handlers */
1133 
1134 	apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id);
1135 	intpin = io_apic_ints[intr].dst_apic_int;
1136 
1137 	assign_apic_irq(apic, intpin, irq);
1138 }
1139 
1140 
1141 static void
1142 swap_apic_id(int apic, int oldid, int newid)
1143 {
1144 	int x;
1145 	int oapic;
1146 
1147 
1148 	if (oldid == newid)
1149 		return;			/* Nothing to do */
1150 
1151 	kprintf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n",
1152 	       apic, oldid, newid);
1153 
1154 	/* Swap physical APIC IDs in interrupt entries */
1155 	for (x = 0; x < nintrs; x++) {
1156 		if (io_apic_ints[x].dst_apic_id == oldid)
1157 			io_apic_ints[x].dst_apic_id = newid;
1158 		else if (io_apic_ints[x].dst_apic_id == newid)
1159 			io_apic_ints[x].dst_apic_id = oldid;
1160 	}
1161 
1162 	/* Swap physical APIC IDs in IO_TO_ID mappings */
1163 	for (oapic = 0; oapic < mp_napics; oapic++)
1164 		if (IO_TO_ID(oapic) == newid)
1165 			break;
1166 
1167 	if (oapic < mp_napics) {
1168 		kprintf("Changing APIC ID for IO APIC #%d from "
1169 		       "%d to %d in MP table\n",
1170 		       oapic, newid, oldid);
1171 		IO_TO_ID(oapic) = oldid;
1172 	}
1173 	IO_TO_ID(apic) = newid;
1174 }
1175 
1176 
1177 static void
1178 fix_id_to_io_mapping(void)
1179 {
1180 	int x;
1181 
1182 	for (x = 0; x < NAPICID; x++)
1183 		ID_TO_IO(x) = -1;
1184 
1185 	for (x = 0; x <= mp_naps; x++)
1186 		if (CPU_TO_ID(x) < NAPICID)
1187 			ID_TO_IO(CPU_TO_ID(x)) = x;
1188 
1189 	for (x = 0; x < mp_napics; x++)
1190 		if (IO_TO_ID(x) < NAPICID)
1191 			ID_TO_IO(IO_TO_ID(x)) = x;
1192 }
1193 
1194 
1195 static int
1196 first_free_apic_id(void)
1197 {
1198 	int freeid, x;
1199 
1200 	for (freeid = 0; freeid < NAPICID; freeid++) {
1201 		for (x = 0; x <= mp_naps; x++)
1202 			if (CPU_TO_ID(x) == freeid)
1203 				break;
1204 		if (x <= mp_naps)
1205 			continue;
1206 		for (x = 0; x < mp_napics; x++)
1207 			if (IO_TO_ID(x) == freeid)
1208 				break;
1209 		if (x < mp_napics)
1210 			continue;
1211 		return freeid;
1212 	}
1213 	return freeid;
1214 }
1215 
1216 
1217 static int
1218 io_apic_id_acceptable(int apic, int id)
1219 {
1220 	int cpu;		/* Logical CPU number */
1221 	int oapic;		/* Logical IO APIC number for other IO APIC */
1222 
1223 	if (id >= NAPICID)
1224 		return 0;	/* Out of range */
1225 
1226 	for (cpu = 0; cpu <= mp_naps; cpu++)
1227 		if (CPU_TO_ID(cpu) == id)
1228 			return 0;	/* Conflict with CPU */
1229 
1230 	for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++)
1231 		if (IO_TO_ID(oapic) == id)
1232 			return 0;	/* Conflict with other APIC */
1233 
1234 	return 1;		/* ID is acceptable for IO APIC */
1235 }
1236 
1237 static
1238 io_int *
1239 io_apic_find_int_entry(int apic, int pin)
1240 {
1241 	int     x;
1242 
1243 	/* search each of the possible INTerrupt sources */
1244 	for (x = 0; x < nintrs; ++x) {
1245 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1246 		    (pin == io_apic_ints[x].dst_apic_int))
1247 			return (&io_apic_ints[x]);
1248 	}
1249 	return NULL;
1250 }
1251 
1252 #endif
1253 
1254 /*
1255  * parse an Intel MP specification table
1256  */
1257 static void
1258 fix_mp_table(void)
1259 {
1260 	int	x;
1261 #ifdef APIC_IO
1262 	int	id;
1263 	int	apic;		/* IO APIC unit number */
1264 	int     freeid;		/* Free physical APIC ID */
1265 	int	physid;		/* Current physical IO APIC ID */
1266 	io_int *io14;
1267 #endif
1268 	int	bus_0 = 0;	/* Stop GCC warning */
1269 	int	bus_pci = 0;	/* Stop GCC warning */
1270 	int	num_pci_bus;
1271 
1272 	/*
1273 	 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS
1274 	 * did it wrong.  The MP spec says that when more than 1 PCI bus
1275 	 * exists the BIOS must begin with bus entries for the PCI bus and use
1276 	 * actual PCI bus numbering.  This implies that when only 1 PCI bus
1277 	 * exists the BIOS can choose to ignore this ordering, and indeed many
1278 	 * MP motherboards do ignore it.  This causes a problem when the PCI
1279 	 * sub-system makes requests of the MP sub-system based on PCI bus
1280 	 * numbers.	So here we look for the situation and renumber the
1281 	 * busses and associated INTs in an effort to "make it right".
1282 	 */
1283 
1284 	/* find bus 0, PCI bus, count the number of PCI busses */
1285 	for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) {
1286 		if (bus_data[x].bus_id == 0) {
1287 			bus_0 = x;
1288 		}
1289 		if (bus_data[x].bus_type == PCI) {
1290 			++num_pci_bus;
1291 			bus_pci = x;
1292 		}
1293 	}
1294 	/*
1295 	 * bus_0 == slot of bus with ID of 0
1296 	 * bus_pci == slot of last PCI bus encountered
1297 	 */
1298 
1299 	/* check the 1 PCI bus case for sanity */
1300 	/* if it is number 0 all is well */
1301 	if (num_pci_bus == 1 &&
1302 	    bus_data[bus_pci].bus_id != 0) {
1303 
1304 		/* mis-numbered, swap with whichever bus uses slot 0 */
1305 
1306 		/* swap the bus entry types */
1307 		bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type;
1308 		bus_data[bus_0].bus_type = PCI;
1309 
1310 #ifdef APIC_IO
1311 		/* swap each relavant INTerrupt entry */
1312 		id = bus_data[bus_pci].bus_id;
1313 		for (x = 0; x < nintrs; ++x) {
1314 			if (io_apic_ints[x].src_bus_id == id) {
1315 				io_apic_ints[x].src_bus_id = 0;
1316 			}
1317 			else if (io_apic_ints[x].src_bus_id == 0) {
1318 				io_apic_ints[x].src_bus_id = id;
1319 			}
1320 		}
1321 #endif
1322 	}
1323 
1324 #ifdef APIC_IO
1325 	/* Assign IO APIC IDs.
1326 	 *
1327 	 * First try the existing ID. If a conflict is detected, try
1328 	 * the ID in the MP table.  If a conflict is still detected, find
1329 	 * a free id.
1330 	 *
1331 	 * We cannot use the ID_TO_IO table before all conflicts has been
1332 	 * resolved and the table has been corrected.
1333 	 */
1334 	for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */
1335 
1336 		/* First try to use the value set by the BIOS */
1337 		physid = io_apic_get_id(apic);
1338 		if (io_apic_id_acceptable(apic, physid)) {
1339 			if (IO_TO_ID(apic) != physid)
1340 				swap_apic_id(apic, IO_TO_ID(apic), physid);
1341 			continue;
1342 		}
1343 
1344 		/* Then check if the value in the MP table is acceptable */
1345 		if (io_apic_id_acceptable(apic, IO_TO_ID(apic)))
1346 			continue;
1347 
1348 		/* Last resort, find a free APIC ID and use it */
1349 		freeid = first_free_apic_id();
1350 		if (freeid >= NAPICID)
1351 			panic("No free physical APIC IDs found");
1352 
1353 		if (io_apic_id_acceptable(apic, freeid)) {
1354 			swap_apic_id(apic, IO_TO_ID(apic), freeid);
1355 			continue;
1356 		}
1357 		panic("Free physical APIC ID not usable");
1358 	}
1359 	fix_id_to_io_mapping();
1360 #endif
1361 
1362 #ifdef APIC_IO
1363 	/* detect and fix broken Compaq MP table */
1364 	if (apic_int_type(0, 0) == -1) {
1365 		kprintf("APIC_IO: MP table broken: 8259->APIC entry missing!\n");
1366 		io_apic_ints[nintrs].int_type = 3;	/* ExtInt */
1367 		io_apic_ints[nintrs].int_vector = 0xff;	/* Unassigned */
1368 		/* XXX fixme, set src bus id etc, but it doesn't seem to hurt */
1369 		io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0);
1370 		io_apic_ints[nintrs].dst_apic_int = 0;	/* Pin 0 */
1371 		nintrs++;
1372 	} else if (apic_int_type(0, 0) == 0) {
1373 		kprintf("APIC_IO: MP table broken: ExtINT entry corrupt!\n");
1374 		for (x = 0; x < nintrs; ++x)
1375 			if ((0 == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1376 			    (0 == io_apic_ints[x].dst_apic_int)) {
1377 				io_apic_ints[x].int_type = 3;
1378 				io_apic_ints[x].int_vector = 0xff;
1379 				break;
1380 			}
1381 	}
1382 
1383 	/*
1384 	 * Fix missing IRQ 15 when IRQ 14 is an ISA interrupt.  IDE
1385 	 * controllers universally come in pairs.  If IRQ 14 is specified
1386 	 * as an ISA interrupt, then IRQ 15 had better be too.
1387 	 *
1388 	 * [ Shuttle XPC / AMD Athlon X2 ]
1389 	 *	The MPTable is missing an entry for IRQ 15.  Note that the
1390 	 *	ACPI table has an entry for both 14 and 15.
1391 	 */
1392 	if (apic_int_type(0, 14) == 0 && apic_int_type(0, 15) == -1) {
1393 		kprintf("APIC_IO: MP table broken: IRQ 15 not ISA when IRQ 14 is!\n");
1394 		io14 = io_apic_find_int_entry(0, 14);
1395 		io_apic_ints[nintrs] = *io14;
1396 		io_apic_ints[nintrs].src_bus_irq = 15;
1397 		io_apic_ints[nintrs].dst_apic_int = 15;
1398 		nintrs++;
1399 	}
1400 #endif
1401 }
1402 
1403 #ifdef APIC_IO
1404 
1405 /* Assign low level interrupt handlers */
1406 static void
1407 setup_apic_irq_mapping(void)
1408 {
1409 	int	x;
1410 	int	int_vector;
1411 
1412 	/* Clear array */
1413 	for (x = 0; x < APIC_INTMAPSIZE; x++) {
1414 		int_to_apicintpin[x].ioapic = -1;
1415 		int_to_apicintpin[x].int_pin = 0;
1416 		int_to_apicintpin[x].apic_address = NULL;
1417 		int_to_apicintpin[x].redirindex = 0;
1418 	}
1419 
1420 	/* First assign ISA/EISA interrupts */
1421 	for (x = 0; x < nintrs; x++) {
1422 		int_vector = io_apic_ints[x].src_bus_irq;
1423 		if (int_vector < APIC_INTMAPSIZE &&
1424 		    io_apic_ints[x].int_vector == 0xff &&
1425 		    int_to_apicintpin[int_vector].ioapic == -1 &&
1426 		    (apic_int_is_bus_type(x, ISA) ||
1427 		     apic_int_is_bus_type(x, EISA)) &&
1428 		    io_apic_ints[x].int_type == 0) {
1429 			assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id),
1430 					io_apic_ints[x].dst_apic_int,
1431 					int_vector);
1432 		}
1433 	}
1434 
1435 	/* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */
1436 	for (x = 0; x < nintrs; x++) {
1437 		if (io_apic_ints[x].dst_apic_int == 0 &&
1438 		    io_apic_ints[x].dst_apic_id == IO_TO_ID(0) &&
1439 		    io_apic_ints[x].int_vector == 0xff &&
1440 		    int_to_apicintpin[0].ioapic == -1 &&
1441 		    io_apic_ints[x].int_type == 3) {
1442 			assign_apic_irq(0, 0, 0);
1443 			break;
1444 		}
1445 	}
1446 
1447 	/* Assign PCI interrupts */
1448 	for (x = 0; x < nintrs; ++x) {
1449 		if (io_apic_ints[x].int_type == 0 &&
1450 		    io_apic_ints[x].int_vector == 0xff &&
1451 		    apic_int_is_bus_type(x, PCI))
1452 			allocate_apic_irq(x);
1453 	}
1454 }
1455 
1456 #endif
1457 
1458 static int
1459 processor_entry(proc_entry_ptr entry, int cpu)
1460 {
1461 	/* check for usability */
1462 	if (!(entry->cpu_flags & PROCENTRY_FLAG_EN))
1463 		return 0;
1464 
1465 	if(entry->apic_id >= NAPICID)
1466 		panic("CPU APIC ID out of range (0..%d)", NAPICID - 1);
1467 	/* check for BSP flag */
1468 	if (entry->cpu_flags & PROCENTRY_FLAG_BP) {
1469 		boot_cpu_id = entry->apic_id;
1470 		CPU_TO_ID(0) = entry->apic_id;
1471 		ID_TO_CPU(entry->apic_id) = 0;
1472 		return 0;	/* its already been counted */
1473 	}
1474 
1475 	/* add another AP to list, if less than max number of CPUs */
1476 	else if (cpu < MAXCPU) {
1477 		CPU_TO_ID(cpu) = entry->apic_id;
1478 		ID_TO_CPU(entry->apic_id) = cpu;
1479 		return 1;
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 
1486 static int
1487 bus_entry(bus_entry_ptr entry, int bus)
1488 {
1489 	int     x;
1490 	char    c, name[8];
1491 
1492 	/* encode the name into an index */
1493 	for (x = 0; x < 6; ++x) {
1494 		if ((c = entry->bus_type[x]) == ' ')
1495 			break;
1496 		name[x] = c;
1497 	}
1498 	name[x] = '\0';
1499 
1500 	if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE)
1501 		panic("unknown bus type: '%s'", name);
1502 
1503 	bus_data[bus].bus_id = entry->bus_id;
1504 	bus_data[bus].bus_type = x;
1505 
1506 	return 1;
1507 }
1508 
1509 #ifdef APIC_IO
1510 
1511 static int
1512 io_apic_entry(io_apic_entry_ptr entry, int apic)
1513 {
1514 	if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN))
1515 		return 0;
1516 
1517 	IO_TO_ID(apic) = entry->apic_id;
1518 	if (entry->apic_id < NAPICID)
1519 		ID_TO_IO(entry->apic_id) = apic;
1520 
1521 	return 1;
1522 }
1523 
1524 #endif
1525 
1526 static int
1527 lookup_bus_type(char *name)
1528 {
1529 	int     x;
1530 
1531 	for (x = 0; x < MAX_BUSTYPE; ++x)
1532 		if (strcmp(bus_type_table[x].name, name) == 0)
1533 			return bus_type_table[x].type;
1534 
1535 	return UNKNOWN_BUSTYPE;
1536 }
1537 
1538 #ifdef APIC_IO
1539 
1540 static int
1541 int_entry(int_entry_ptr entry, int intr)
1542 {
1543 	int apic;
1544 
1545 	io_apic_ints[intr].int_type = entry->int_type;
1546 	io_apic_ints[intr].int_flags = entry->int_flags;
1547 	io_apic_ints[intr].src_bus_id = entry->src_bus_id;
1548 	io_apic_ints[intr].src_bus_irq = entry->src_bus_irq;
1549 	if (entry->dst_apic_id == 255) {
1550 		/* This signal goes to all IO APICS.  Select an IO APIC
1551 		   with sufficient number of interrupt pins */
1552 		for (apic = 0; apic < mp_napics; apic++)
1553 			if (((io_apic_read(apic, IOAPIC_VER) &
1554 			      IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >=
1555 			    entry->dst_apic_int)
1556 				break;
1557 		if (apic < mp_napics)
1558 			io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic);
1559 		else
1560 			io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1561 	} else
1562 		io_apic_ints[intr].dst_apic_id = entry->dst_apic_id;
1563 	io_apic_ints[intr].dst_apic_int = entry->dst_apic_int;
1564 
1565 	return 1;
1566 }
1567 
1568 static int
1569 apic_int_is_bus_type(int intr, int bus_type)
1570 {
1571 	int     bus;
1572 
1573 	for (bus = 0; bus < mp_nbusses; ++bus)
1574 		if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id)
1575 		    && ((int) bus_data[bus].bus_type == bus_type))
1576 			return 1;
1577 
1578 	return 0;
1579 }
1580 
1581 /*
1582  * Given a traditional ISA INT mask, return an APIC mask.
1583  */
1584 u_int
1585 isa_apic_mask(u_int isa_mask)
1586 {
1587 	int isa_irq;
1588 	int apic_pin;
1589 
1590 #if defined(SKIP_IRQ15_REDIRECT)
1591 	if (isa_mask == (1 << 15)) {
1592 		kprintf("skipping ISA IRQ15 redirect\n");
1593 		return isa_mask;
1594 	}
1595 #endif  /* SKIP_IRQ15_REDIRECT */
1596 
1597 	isa_irq = ffs(isa_mask);		/* find its bit position */
1598 	if (isa_irq == 0)			/* doesn't exist */
1599 		return 0;
1600 	--isa_irq;				/* make it zero based */
1601 
1602 	apic_pin = isa_apic_irq(isa_irq);	/* look for APIC connection */
1603 	if (apic_pin == -1)
1604 		return 0;
1605 
1606 	return (1 << apic_pin);			/* convert pin# to a mask */
1607 }
1608 
1609 /*
1610  * Determine which APIC pin an ISA/EISA INT is attached to.
1611  */
1612 #define INTTYPE(I)	(io_apic_ints[(I)].int_type)
1613 #define INTPIN(I)	(io_apic_ints[(I)].dst_apic_int)
1614 #define INTIRQ(I)	(io_apic_ints[(I)].int_vector)
1615 #define INTAPIC(I)	(ID_TO_IO(io_apic_ints[(I)].dst_apic_id))
1616 
1617 #define SRCBUSIRQ(I)	(io_apic_ints[(I)].src_bus_irq)
1618 int
1619 isa_apic_irq(int isa_irq)
1620 {
1621 	int     intr;
1622 
1623 	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1624 		if (INTTYPE(intr) == 0) {		/* standard INT */
1625 			if (SRCBUSIRQ(intr) == isa_irq) {
1626 				if (apic_int_is_bus_type(intr, ISA) ||
1627 			            apic_int_is_bus_type(intr, EISA)) {
1628 					if (INTIRQ(intr) == 0xff)
1629 						return -1; /* unassigned */
1630 					return INTIRQ(intr);	/* found */
1631 				}
1632 			}
1633 		}
1634 	}
1635 	return -1;					/* NOT found */
1636 }
1637 
1638 
1639 /*
1640  * Determine which APIC pin a PCI INT is attached to.
1641  */
1642 #define SRCBUSID(I)	(io_apic_ints[(I)].src_bus_id)
1643 #define SRCBUSDEVICE(I)	((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f)
1644 #define SRCBUSLINE(I)	(io_apic_ints[(I)].src_bus_irq & 0x03)
1645 int
1646 pci_apic_irq(int pciBus, int pciDevice, int pciInt)
1647 {
1648 	int     intr;
1649 
1650 	--pciInt;					/* zero based */
1651 
1652 	for (intr = 0; intr < nintrs; ++intr) {		/* check each record */
1653 		if ((INTTYPE(intr) == 0)		/* standard INT */
1654 		    && (SRCBUSID(intr) == pciBus)
1655 		    && (SRCBUSDEVICE(intr) == pciDevice)
1656 		    && (SRCBUSLINE(intr) == pciInt)) {	/* a candidate IRQ */
1657 			if (apic_int_is_bus_type(intr, PCI)) {
1658 				if (INTIRQ(intr) == 0xff) {
1659 					kprintf("IOAPIC: pci_apic_irq() "
1660 						"failed\n");
1661 					return -1;	/* unassigned */
1662 				}
1663 				return INTIRQ(intr);	/* exact match */
1664 			}
1665 		}
1666 	}
1667 
1668 	return -1;					/* NOT found */
1669 }
1670 
1671 int
1672 next_apic_irq(int irq)
1673 {
1674 	int intr, ointr;
1675 	int bus, bustype;
1676 
1677 	bus = 0;
1678 	bustype = 0;
1679 	for (intr = 0; intr < nintrs; intr++) {
1680 		if (INTIRQ(intr) != irq || INTTYPE(intr) != 0)
1681 			continue;
1682 		bus = SRCBUSID(intr);
1683 		bustype = apic_bus_type(bus);
1684 		if (bustype != ISA &&
1685 		    bustype != EISA &&
1686 		    bustype != PCI)
1687 			continue;
1688 		break;
1689 	}
1690 	if (intr >= nintrs) {
1691 		return -1;
1692 	}
1693 	for (ointr = intr + 1; ointr < nintrs; ointr++) {
1694 		if (INTTYPE(ointr) != 0)
1695 			continue;
1696 		if (bus != SRCBUSID(ointr))
1697 			continue;
1698 		if (bustype == PCI) {
1699 			if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr))
1700 				continue;
1701 			if (SRCBUSLINE(intr) != SRCBUSLINE(ointr))
1702 				continue;
1703 		}
1704 		if (bustype == ISA || bustype == EISA) {
1705 			if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr))
1706 				continue;
1707 		}
1708 		if (INTPIN(intr) == INTPIN(ointr))
1709 			continue;
1710 		break;
1711 	}
1712 	if (ointr >= nintrs) {
1713 		return -1;
1714 	}
1715 	return INTIRQ(ointr);
1716 }
1717 #undef SRCBUSLINE
1718 #undef SRCBUSDEVICE
1719 #undef SRCBUSID
1720 #undef SRCBUSIRQ
1721 
1722 #undef INTPIN
1723 #undef INTIRQ
1724 #undef INTAPIC
1725 #undef INTTYPE
1726 
1727 #endif
1728 
1729 /*
1730  * Reprogram the MB chipset to NOT redirect an ISA INTerrupt.
1731  *
1732  * XXX FIXME:
1733  *  Exactly what this means is unclear at this point.  It is a solution
1734  *  for motherboards that redirect the MBIRQ0 pin.  Generically a motherboard
1735  *  could route any of the ISA INTs to upper (>15) IRQ values.  But most would
1736  *  NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an
1737  *  option.
1738  */
1739 int
1740 undirect_isa_irq(int rirq)
1741 {
1742 #if defined(READY)
1743 	if (bootverbose)
1744 	    kprintf("Freeing redirected ISA irq %d.\n", rirq);
1745 	/** FIXME: tickle the MB redirector chip */
1746 	return /* XXX */;
1747 #else
1748 	if (bootverbose)
1749 	    kprintf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq);
1750 	return 0;
1751 #endif  /* READY */
1752 }
1753 
1754 
1755 /*
1756  * Reprogram the MB chipset to NOT redirect a PCI INTerrupt
1757  */
1758 int
1759 undirect_pci_irq(int rirq)
1760 {
1761 #if defined(READY)
1762 	if (bootverbose)
1763 		kprintf("Freeing redirected PCI irq %d.\n", rirq);
1764 
1765 	/** FIXME: tickle the MB redirector chip */
1766 	return /* XXX */;
1767 #else
1768 	if (bootverbose)
1769 		kprintf("Freeing (NOT implemented) redirected PCI irq %d.\n",
1770 		       rirq);
1771 	return 0;
1772 #endif  /* READY */
1773 }
1774 
1775 
1776 /*
1777  * given a bus ID, return:
1778  *  the bus type if found
1779  *  -1 if NOT found
1780  */
1781 int
1782 apic_bus_type(int id)
1783 {
1784 	int     x;
1785 
1786 	for (x = 0; x < mp_nbusses; ++x)
1787 		if (bus_data[x].bus_id == id)
1788 			return bus_data[x].bus_type;
1789 
1790 	return -1;
1791 }
1792 
1793 #ifdef APIC_IO
1794 
1795 /*
1796  * given a LOGICAL APIC# and pin#, return:
1797  *  the associated src bus ID if found
1798  *  -1 if NOT found
1799  */
1800 int
1801 apic_src_bus_id(int apic, int pin)
1802 {
1803 	int     x;
1804 
1805 	/* search each of the possible INTerrupt sources */
1806 	for (x = 0; x < nintrs; ++x)
1807 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1808 		    (pin == io_apic_ints[x].dst_apic_int))
1809 			return (io_apic_ints[x].src_bus_id);
1810 
1811 	return -1;		/* NOT found */
1812 }
1813 
1814 /*
1815  * given a LOGICAL APIC# and pin#, return:
1816  *  the associated src bus IRQ if found
1817  *  -1 if NOT found
1818  */
1819 int
1820 apic_src_bus_irq(int apic, int pin)
1821 {
1822 	int     x;
1823 
1824 	for (x = 0; x < nintrs; x++)
1825 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1826 		    (pin == io_apic_ints[x].dst_apic_int))
1827 			return (io_apic_ints[x].src_bus_irq);
1828 
1829 	return -1;		/* NOT found */
1830 }
1831 
1832 
1833 /*
1834  * given a LOGICAL APIC# and pin#, return:
1835  *  the associated INTerrupt type if found
1836  *  -1 if NOT found
1837  */
1838 int
1839 apic_int_type(int apic, int pin)
1840 {
1841 	int     x;
1842 
1843 	/* search each of the possible INTerrupt sources */
1844 	for (x = 0; x < nintrs; ++x) {
1845 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1846 		    (pin == io_apic_ints[x].dst_apic_int))
1847 			return (io_apic_ints[x].int_type);
1848 	}
1849 	return -1;		/* NOT found */
1850 }
1851 
1852 /*
1853  * Return the IRQ associated with an APIC pin
1854  */
1855 int
1856 apic_irq(int apic, int pin)
1857 {
1858 	int x;
1859 	int res;
1860 
1861 	for (x = 0; x < nintrs; ++x) {
1862 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1863 		    (pin == io_apic_ints[x].dst_apic_int)) {
1864 			res = io_apic_ints[x].int_vector;
1865 			if (res == 0xff)
1866 				return -1;
1867 			if (apic != int_to_apicintpin[res].ioapic)
1868 				panic("apic_irq: inconsistent table %d/%d", apic, int_to_apicintpin[res].ioapic);
1869 			if (pin != int_to_apicintpin[res].int_pin)
1870 				panic("apic_irq inconsistent table (2)");
1871 			return res;
1872 		}
1873 	}
1874 	return -1;
1875 }
1876 
1877 
1878 /*
1879  * given a LOGICAL APIC# and pin#, return:
1880  *  the associated trigger mode if found
1881  *  -1 if NOT found
1882  */
1883 int
1884 apic_trigger(int apic, int pin)
1885 {
1886 	int     x;
1887 
1888 	/* search each of the possible INTerrupt sources */
1889 	for (x = 0; x < nintrs; ++x)
1890 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1891 		    (pin == io_apic_ints[x].dst_apic_int))
1892 			return ((io_apic_ints[x].int_flags >> 2) & 0x03);
1893 
1894 	return -1;		/* NOT found */
1895 }
1896 
1897 
1898 /*
1899  * given a LOGICAL APIC# and pin#, return:
1900  *  the associated 'active' level if found
1901  *  -1 if NOT found
1902  */
1903 int
1904 apic_polarity(int apic, int pin)
1905 {
1906 	int     x;
1907 
1908 	/* search each of the possible INTerrupt sources */
1909 	for (x = 0; x < nintrs; ++x)
1910 		if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) &&
1911 		    (pin == io_apic_ints[x].dst_apic_int))
1912 			return (io_apic_ints[x].int_flags & 0x03);
1913 
1914 	return -1;		/* NOT found */
1915 }
1916 
1917 #endif
1918 
1919 /*
1920  * set data according to MP defaults
1921  * FIXME: probably not complete yet...
1922  */
1923 static void
1924 default_mp_table(int type)
1925 {
1926 	int     ap_cpu_id;
1927 #if defined(APIC_IO)
1928 	int     io_apic_id;
1929 	int     pin;
1930 #endif	/* APIC_IO */
1931 
1932 #if 0
1933 	kprintf("  MP default config type: %d\n", type);
1934 	switch (type) {
1935 	case 1:
1936 		kprintf("   bus: ISA, APIC: 82489DX\n");
1937 		break;
1938 	case 2:
1939 		kprintf("   bus: EISA, APIC: 82489DX\n");
1940 		break;
1941 	case 3:
1942 		kprintf("   bus: EISA, APIC: 82489DX\n");
1943 		break;
1944 	case 4:
1945 		kprintf("   bus: MCA, APIC: 82489DX\n");
1946 		break;
1947 	case 5:
1948 		kprintf("   bus: ISA+PCI, APIC: Integrated\n");
1949 		break;
1950 	case 6:
1951 		kprintf("   bus: EISA+PCI, APIC: Integrated\n");
1952 		break;
1953 	case 7:
1954 		kprintf("   bus: MCA+PCI, APIC: Integrated\n");
1955 		break;
1956 	default:
1957 		kprintf("   future type\n");
1958 		break;
1959 		/* NOTREACHED */
1960 	}
1961 #endif	/* 0 */
1962 
1963 	boot_cpu_id = (lapic->id & APIC_ID_MASK) >> 24;
1964 	ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0;
1965 
1966 	/* BSP */
1967 	CPU_TO_ID(0) = boot_cpu_id;
1968 	ID_TO_CPU(boot_cpu_id) = 0;
1969 
1970 	/* one and only AP */
1971 	CPU_TO_ID(1) = ap_cpu_id;
1972 	ID_TO_CPU(ap_cpu_id) = 1;
1973 
1974 #if defined(APIC_IO)
1975 	/* one and only IO APIC */
1976 	io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24;
1977 
1978 	/*
1979 	 * sanity check, refer to MP spec section 3.6.6, last paragraph
1980 	 * necessary as some hardware isn't properly setting up the IO APIC
1981 	 */
1982 #if defined(REALLY_ANAL_IOAPICID_VALUE)
1983 	if (io_apic_id != 2) {
1984 #else
1985 	if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) {
1986 #endif	/* REALLY_ANAL_IOAPICID_VALUE */
1987 		io_apic_set_id(0, 2);
1988 		io_apic_id = 2;
1989 	}
1990 	IO_TO_ID(0) = io_apic_id;
1991 	ID_TO_IO(io_apic_id) = 0;
1992 #endif	/* APIC_IO */
1993 
1994 	/* fill out bus entries */
1995 	switch (type) {
1996 	case 1:
1997 	case 2:
1998 	case 3:
1999 	case 4:
2000 	case 5:
2001 	case 6:
2002 	case 7:
2003 		bus_data[0].bus_id = default_data[type - 1][1];
2004 		bus_data[0].bus_type = default_data[type - 1][2];
2005 		bus_data[1].bus_id = default_data[type - 1][3];
2006 		bus_data[1].bus_type = default_data[type - 1][4];
2007 		break;
2008 
2009 	/* case 4: case 7:		   MCA NOT supported */
2010 	default:		/* illegal/reserved */
2011 		panic("BAD default MP config: %d", type);
2012 		/* NOTREACHED */
2013 	}
2014 
2015 #if defined(APIC_IO)
2016 	/* general cases from MP v1.4, table 5-2 */
2017 	for (pin = 0; pin < 16; ++pin) {
2018 		io_apic_ints[pin].int_type = 0;
2019 		io_apic_ints[pin].int_flags = 0x05;	/* edge/active-hi */
2020 		io_apic_ints[pin].src_bus_id = 0;
2021 		io_apic_ints[pin].src_bus_irq = pin;	/* IRQ2 caught below */
2022 		io_apic_ints[pin].dst_apic_id = io_apic_id;
2023 		io_apic_ints[pin].dst_apic_int = pin;	/* 1-to-1 */
2024 	}
2025 
2026 	/* special cases from MP v1.4, table 5-2 */
2027 	if (type == 2) {
2028 		io_apic_ints[2].int_type = 0xff;	/* N/C */
2029 		io_apic_ints[13].int_type = 0xff;	/* N/C */
2030 #if !defined(APIC_MIXED_MODE)
2031 		/** FIXME: ??? */
2032 		panic("sorry, can't support type 2 default yet");
2033 #endif	/* APIC_MIXED_MODE */
2034 	}
2035 	else
2036 		io_apic_ints[2].src_bus_irq = 0;	/* ISA IRQ0 is on APIC INT 2 */
2037 
2038 	if (type == 7)
2039 		io_apic_ints[0].int_type = 0xff;	/* N/C */
2040 	else
2041 		io_apic_ints[0].int_type = 3;	/* vectored 8259 */
2042 #endif	/* APIC_IO */
2043 }
2044 
2045 /*
2046  * Map a physical memory address representing I/O into KVA.  The I/O
2047  * block is assumed not to cross a page boundary.
2048  */
2049 void *
2050 permanent_io_mapping(vm_paddr_t pa)
2051 {
2052 	KKASSERT(pa < 0x100000000LL);
2053 
2054 	return pmap_mapdev_uncacheable(pa, PAGE_SIZE);
2055 }
2056 
2057 /*
2058  * start each AP in our list
2059  */
2060 static int
2061 start_all_aps(u_int boot_addr)
2062 {
2063 	vm_offset_t va = boot_address + KERNBASE;
2064 	u_int64_t *pt4, *pt3, *pt2;
2065 	int     x, i, pg;
2066 	int	shift;
2067 	int	smicount;
2068 	int	smibest;
2069 	int	smilast;
2070 	u_char  mpbiosreason;
2071 	u_long  mpbioswarmvec;
2072 	struct mdglobaldata *gd;
2073 	struct privatespace *ps;
2074 
2075 	POSTCODE(START_ALL_APS_POST);
2076 
2077 	/* Initialize BSP's local APIC */
2078 	apic_initialize(TRUE);
2079 	bsp_apic_ready = 1;
2080 
2081 	/* install the AP 1st level boot code */
2082 	pmap_kenter(va, boot_address);
2083 	cpu_invlpg((void *)va);		/* JG XXX */
2084 	bcopy(mptramp_start, (void *)va, bootMP_size);
2085 
2086 	/* Locate the page tables, they'll be below the trampoline */
2087 	pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
2088 	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
2089 	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
2090 
2091 	/* Create the initial 1GB replicated page tables */
2092 	for (i = 0; i < 512; i++) {
2093 		/* Each slot of the level 4 pages points to the same level 3 page */
2094 		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
2095 		pt4[i] |= PG_V | PG_RW | PG_U;
2096 
2097 		/* Each slot of the level 3 pages points to the same level 2 page */
2098 		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
2099 		pt3[i] |= PG_V | PG_RW | PG_U;
2100 
2101 		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
2102 		pt2[i] = i * (2 * 1024 * 1024);
2103 		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
2104 	}
2105 
2106 	/* save the current value of the warm-start vector */
2107 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
2108 	outb(CMOS_REG, BIOS_RESET);
2109 	mpbiosreason = inb(CMOS_DATA);
2110 
2111 	/* setup a vector to our boot code */
2112 	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2113 	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
2114 	outb(CMOS_REG, BIOS_RESET);
2115 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2116 
2117 	/*
2118 	 * If we have a TSC we can figure out the SMI interrupt rate.
2119 	 * The SMI does not necessarily use a constant rate.  Spend
2120 	 * up to 250ms trying to figure it out.
2121 	 */
2122 	smibest = 0;
2123 	if (cpu_feature & CPUID_TSC) {
2124 		set_apic_timer(275000);
2125 		smilast = read_apic_timer();
2126 		for (x = 0; x < 20 && read_apic_timer(); ++x) {
2127 			smicount = smitest();
2128 			if (smibest == 0 || smilast - smicount < smibest)
2129 				smibest = smilast - smicount;
2130 			smilast = smicount;
2131 		}
2132 		if (smibest > 250000)
2133 			smibest = 0;
2134 		if (smibest) {
2135 			smibest = smibest * (int64_t)1000000 /
2136 				  get_apic_timer_frequency();
2137 		}
2138 	}
2139 	if (smibest)
2140 		kprintf("SMI Frequency (worst case): %d Hz (%d us)\n",
2141 			1000000 / smibest, smibest);
2142 
2143 	/* start each AP */
2144 	for (x = 1; x <= mp_naps; ++x) {
2145 
2146 		/* This is a bit verbose, it will go away soon.  */
2147 
2148 		/* first page of AP's private space */
2149 		pg = x * x86_64_btop(sizeof(struct privatespace));
2150 
2151 		/* allocate new private data page(s) */
2152 		gd = (struct mdglobaldata *)kmem_alloc(&kernel_map,
2153 				MDGLOBALDATA_BASEALLOC_SIZE);
2154 
2155 		gd = &CPU_prvspace[x].mdglobaldata;	/* official location */
2156 		bzero(gd, sizeof(*gd));
2157 		gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
2158 
2159 		/* prime data page for it to use */
2160 		mi_gdinit(&gd->mi, x);
2161 		cpu_gdinit(gd, x);
2162 		gd->gd_CMAP1 = &SMPpt[pg + 0];
2163 		gd->gd_CMAP2 = &SMPpt[pg + 1];
2164 		gd->gd_CMAP3 = &SMPpt[pg + 2];
2165 		gd->gd_PMAP1 = &SMPpt[pg + 3];
2166 		gd->gd_CADDR1 = ps->CPAGE1;
2167 		gd->gd_CADDR2 = ps->CPAGE2;
2168 		gd->gd_CADDR3 = ps->CPAGE3;
2169 		gd->gd_PADDR1 = (pt_entry_t *)ps->PPAGE1;
2170 		gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
2171 		bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
2172 
2173 		/* setup a vector to our boot code */
2174 		*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
2175 		*((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4);
2176 		outb(CMOS_REG, BIOS_RESET);
2177 		outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
2178 
2179 		/*
2180 		 * Setup the AP boot stack
2181 		 */
2182 		bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
2183 		bootAP = x;
2184 
2185 		/* attempt to start the Application Processor */
2186 		CHECK_INIT(99);	/* setup checkpoints */
2187 		if (!start_ap(gd, boot_addr, smibest)) {
2188 			kprintf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x));
2189 			CHECK_PRINT("trace");	/* show checkpoints */
2190 			/* better panic as the AP may be running loose */
2191 			kprintf("panic y/n? [y] ");
2192 			if (cngetc() != 'n')
2193 				panic("bye-bye");
2194 		}
2195 		CHECK_PRINT("trace");		/* show checkpoints */
2196 
2197 		/* record its version info */
2198 		cpu_apic_versions[x] = cpu_apic_versions[0];
2199 	}
2200 
2201 	/* set ncpus to 1 + highest logical cpu.  Not all may have come up */
2202 	ncpus = x;
2203 
2204 	/* ncpus2 -- ncpus rounded down to the nearest power of 2 */
2205 	for (shift = 0; (1 << shift) <= ncpus; ++shift)
2206 		;
2207 	--shift;
2208 	ncpus2_shift = shift;
2209 	ncpus2 = 1 << shift;
2210 	ncpus2_mask = ncpus2 - 1;
2211 
2212 	/* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
2213 	if ((1 << shift) < ncpus)
2214 		++shift;
2215 	ncpus_fit = 1 << shift;
2216 	ncpus_fit_mask = ncpus_fit - 1;
2217 
2218 	/* build our map of 'other' CPUs */
2219 	mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2220 	mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus);
2221 	bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
2222 
2223 	/* fill in our (BSP) APIC version */
2224 	cpu_apic_versions[0] = lapic->version;
2225 
2226 	/* restore the warmstart vector */
2227 	*(u_long *) WARMBOOT_OFF = mpbioswarmvec;
2228 	outb(CMOS_REG, BIOS_RESET);
2229 	outb(CMOS_DATA, mpbiosreason);
2230 
2231 	/*
2232 	 * NOTE!  The idlestack for the BSP was setup by locore.  Finish
2233 	 * up, clean out the P==V mapping we did earlier.
2234 	 */
2235 #if JGXXX
2236 	for (x = 0; x < NKPT; x++)
2237 		PTD[x] = 0;
2238 #endif
2239 	pmap_set_opt();
2240 
2241 	/* number of APs actually started */
2242 	return ncpus - 1;
2243 }
2244 
2245 
2246 /*
2247  * load the 1st level AP boot code into base memory.
2248  */
2249 
2250 /* targets for relocation */
2251 extern void bigJump(void);
2252 extern void bootCodeSeg(void);
2253 extern void bootDataSeg(void);
2254 extern void MPentry(void);
2255 extern u_int MP_GDT;
2256 extern u_int mp_gdtbase;
2257 
2258 #if 0
2259 
2260 static void
2261 install_ap_tramp(u_int boot_addr)
2262 {
2263 	int     x;
2264 	int     size = *(int *) ((u_long) & bootMP_size);
2265 	u_char *src = (u_char *) ((u_long) bootMP);
2266 	u_char *dst = (u_char *) boot_addr + KERNBASE;
2267 	u_int   boot_base = (u_int) bootMP;
2268 	u_int8_t *dst8;
2269 	u_int16_t *dst16;
2270 	u_int32_t *dst32;
2271 
2272 	POSTCODE(INSTALL_AP_TRAMP_POST);
2273 
2274 	for (x = 0; x < size; ++x)
2275 		*dst++ = *src++;
2276 
2277 	/*
2278 	 * modify addresses in code we just moved to basemem. unfortunately we
2279 	 * need fairly detailed info about mpboot.s for this to work.  changes
2280 	 * to mpboot.s might require changes here.
2281 	 */
2282 
2283 	/* boot code is located in KERNEL space */
2284 	dst = (u_char *) boot_addr + KERNBASE;
2285 
2286 	/* modify the lgdt arg */
2287 	dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base));
2288 	*dst32 = boot_addr + ((u_int) & MP_GDT - boot_base);
2289 
2290 	/* modify the ljmp target for MPentry() */
2291 	dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1);
2292 	*dst32 = ((u_int) MPentry - KERNBASE);
2293 
2294 	/* modify the target for boot code segment */
2295 	dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base));
2296 	dst8 = (u_int8_t *) (dst16 + 1);
2297 	*dst16 = (u_int) boot_addr & 0xffff;
2298 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2299 
2300 	/* modify the target for boot data segment */
2301 	dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base));
2302 	dst8 = (u_int8_t *) (dst16 + 1);
2303 	*dst16 = (u_int) boot_addr & 0xffff;
2304 	*dst8 = ((u_int) boot_addr >> 16) & 0xff;
2305 }
2306 
2307 #endif
2308 
2309 /*
2310  * This function starts the AP (application processor) identified
2311  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
2312  * to accomplish this.  This is necessary because of the nuances
2313  * of the different hardware we might encounter.  It ain't pretty,
2314  * but it seems to work.
2315  *
2316  * NOTE: eventually an AP gets to ap_init(), which is called just
2317  * before the AP goes into the LWKT scheduler's idle loop.
2318  */
2319 static int
2320 start_ap(struct mdglobaldata *gd, u_int boot_addr, int smibest)
2321 {
2322 	int     physical_cpu;
2323 	int     vector;
2324 	u_long  icr_lo, icr_hi;
2325 
2326 	POSTCODE(START_AP_POST);
2327 
2328 	/* get the PHYSICAL APIC ID# */
2329 	physical_cpu = CPU_TO_ID(gd->mi.gd_cpuid);
2330 
2331 	/* calculate the vector */
2332 	vector = (boot_addr >> 12) & 0xff;
2333 
2334 	/* We don't want anything interfering */
2335 	cpu_disable_intr();
2336 
2337 	/* Make sure the target cpu sees everything */
2338 	wbinvd();
2339 
2340 	/*
2341 	 * Try to detect when a SMI has occurred, wait up to 200ms.
2342 	 *
2343 	 * If a SMI occurs during an AP reset but before we issue
2344 	 * the STARTUP command, the AP may brick.  To work around
2345 	 * this problem we hold off doing the AP startup until
2346 	 * after we have detected the SMI.  Hopefully another SMI
2347 	 * will not occur before we finish the AP startup.
2348 	 *
2349 	 * Retries don't seem to help.  SMIs have a window of opportunity
2350 	 * and if USB->legacy keyboard emulation is enabled in the BIOS
2351 	 * the interrupt rate can be quite high.
2352 	 *
2353 	 * NOTE: Don't worry about the L1 cache load, it might bloat
2354 	 *	 ldelta a little but ndelta will be so huge when the SMI
2355 	 *	 occurs the detection logic will still work fine.
2356 	 */
2357 	if (smibest) {
2358 		set_apic_timer(200000);
2359 		smitest();
2360 	}
2361 
2362 	/*
2363 	 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
2364 	 * and running the target CPU. OR this INIT IPI might be latched (P5
2365 	 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
2366 	 * ignored.
2367 	 *
2368 	 * see apic/apicreg.h for icr bit definitions.
2369 	 *
2370 	 * TIME CRITICAL CODE, DO NOT DO ANY KPRINTFS IN THE HOT PATH.
2371 	 */
2372 
2373 	/*
2374 	 * Setup the address for the target AP.  We can setup
2375 	 * icr_hi once and then just trigger operations with
2376 	 * icr_lo.
2377 	 */
2378 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
2379 	icr_hi |= (physical_cpu << 24);
2380 	icr_lo = lapic->icr_lo & 0xfff00000;
2381 	lapic->icr_hi = icr_hi;
2382 
2383 	/*
2384 	 * Do an INIT IPI: assert RESET
2385 	 *
2386 	 * Use edge triggered mode to assert INIT
2387 	 */
2388 	lapic->icr_lo = icr_lo | 0x00004500;
2389 	while (lapic->icr_lo & APIC_DELSTAT_MASK)
2390 		 /* spin */ ;
2391 
2392 	/*
2393 	 * The spec calls for a 10ms delay but we may have to use a
2394 	 * MUCH lower delay to avoid bricking an AP due to a fast SMI
2395 	 * interrupt.  We have other loops here too and dividing by 2
2396 	 * doesn't seem to be enough even after subtracting 350us,
2397 	 * so we divide by 4.
2398 	 *
2399 	 * Our minimum delay is 150uS, maximum is 10ms.  If no SMI
2400 	 * interrupt was detected we use the full 10ms.
2401 	 */
2402 	if (smibest == 0)
2403 		u_sleep(10000);
2404 	else if (smibest < 150 * 4 + 350)
2405 		u_sleep(150);
2406 	else if ((smibest - 350) / 4 < 10000)
2407 		u_sleep((smibest - 350) / 4);
2408 	else
2409 		u_sleep(10000);
2410 
2411 	/*
2412 	 * Do an INIT IPI: deassert RESET
2413 	 *
2414 	 * Use level triggered mode to deassert.  It is unclear
2415 	 * why we need to do this.
2416 	 */
2417 	lapic->icr_lo = icr_lo | 0x00008500;
2418 	while (lapic->icr_lo & APIC_DELSTAT_MASK)
2419 		 /* spin */ ;
2420 	u_sleep(150);				/* wait 150us */
2421 
2422 	/*
2423 	 * Next we do a STARTUP IPI: the previous INIT IPI might still be
2424 	 * latched, (P5 bug) this 1st STARTUP would then terminate
2425 	 * immediately, and the previously started INIT IPI would continue. OR
2426 	 * the previous INIT IPI has already run. and this STARTUP IPI will
2427 	 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
2428 	 * will run.
2429 	 */
2430 	lapic->icr_lo = icr_lo | 0x00000600 | vector;
2431 	while (lapic->icr_lo & APIC_DELSTAT_MASK)
2432 		 /* spin */ ;
2433 	u_sleep(200);		/* wait ~200uS */
2434 
2435 	/*
2436 	 * Finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
2437 	 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
2438 	 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
2439 	 * recognized after hardware RESET or INIT IPI.
2440 	 */
2441 	lapic->icr_lo = icr_lo | 0x00000600 | vector;
2442 	while (lapic->icr_lo & APIC_DELSTAT_MASK)
2443 		 /* spin */ ;
2444 
2445 	/* Resume normal operation */
2446 	cpu_enable_intr();
2447 
2448 	/* wait for it to start, see ap_init() */
2449 	set_apic_timer(5000000);/* == 5 seconds */
2450 	while (read_apic_timer()) {
2451 		if (smp_startup_mask & (1 << gd->mi.gd_cpuid))
2452 			return 1;	/* return SUCCESS */
2453 	}
2454 
2455 	return 0;		/* return FAILURE */
2456 }
2457 
2458 static
2459 int
2460 smitest(void)
2461 {
2462 	int64_t	ltsc;
2463 	int64_t	ntsc;
2464 	int64_t	ldelta;
2465 	int64_t	ndelta;
2466 	int count;
2467 
2468 	ldelta = 0;
2469 	ndelta = 0;
2470 	while (read_apic_timer()) {
2471 		ltsc = rdtsc();
2472 		for (count = 0; count < 100; ++count)
2473 			ntsc = rdtsc();	/* force loop to occur */
2474 		if (ldelta) {
2475 			ndelta = ntsc - ltsc;
2476 			if (ldelta > ndelta)
2477 				ldelta = ndelta;
2478 			if (ndelta > ldelta * 2)
2479 				break;
2480 		} else {
2481 			ldelta = ntsc - ltsc;
2482 		}
2483 	}
2484 	return(read_apic_timer());
2485 }
2486 
2487 /*
2488  * Lazy flush the TLB on all other CPU's.  DEPRECATED.
2489  *
2490  * If for some reason we were unable to start all cpus we cannot safely
2491  * use broadcast IPIs.
2492  */
2493 void
2494 smp_invltlb(void)
2495 {
2496 #ifdef SMP
2497 	if (smp_startup_mask == smp_active_mask) {
2498 		all_but_self_ipi(XINVLTLB_OFFSET);
2499 	} else {
2500 		selected_apic_ipi(smp_active_mask, XINVLTLB_OFFSET,
2501 			APIC_DELMODE_FIXED);
2502 	}
2503 #endif
2504 }
2505 
2506 /*
2507  * When called the executing CPU will send an IPI to all other CPUs
2508  *  requesting that they halt execution.
2509  *
2510  * Usually (but not necessarily) called with 'other_cpus' as its arg.
2511  *
2512  *  - Signals all CPUs in map to stop.
2513  *  - Waits for each to stop.
2514  *
2515  * Returns:
2516  *  -1: error
2517  *   0: NA
2518  *   1: ok
2519  *
2520  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
2521  *            from executing at same time.
2522  */
2523 int
2524 stop_cpus(u_int map)
2525 {
2526 	map &= smp_active_mask;
2527 
2528 	/* send the Xcpustop IPI to all CPUs in map */
2529 	selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED);
2530 
2531 	while ((stopped_cpus & map) != map)
2532 		/* spin */ ;
2533 
2534 	return 1;
2535 }
2536 
2537 
2538 /*
2539  * Called by a CPU to restart stopped CPUs.
2540  *
2541  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
2542  *
2543  *  - Signals all CPUs in map to restart.
2544  *  - Waits for each to restart.
2545  *
2546  * Returns:
2547  *  -1: error
2548  *   0: NA
2549  *   1: ok
2550  */
2551 int
2552 restart_cpus(u_int map)
2553 {
2554 	/* signal other cpus to restart */
2555 	started_cpus = map & smp_active_mask;
2556 
2557 	while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */
2558 		/* spin */ ;
2559 
2560 	return 1;
2561 }
2562 
2563 /*
2564  * This is called once the mpboot code has gotten us properly relocated
2565  * and the MMU turned on, etc.   ap_init() is actually the idle thread,
2566  * and when it returns the scheduler will call the real cpu_idle() main
2567  * loop for the idlethread.  Interrupts are disabled on entry and should
2568  * remain disabled at return.
2569  */
2570 void
2571 ap_init(void)
2572 {
2573 	u_int	apic_id;
2574 
2575 	/*
2576 	 * Adjust smp_startup_mask to signal the BSP that we have started
2577 	 * up successfully.  Note that we do not yet hold the BGL.  The BSP
2578 	 * is waiting for our signal.
2579 	 *
2580 	 * We can't set our bit in smp_active_mask yet because we are holding
2581 	 * interrupts physically disabled and remote cpus could deadlock
2582 	 * trying to send us an IPI.
2583 	 */
2584 	smp_startup_mask |= 1 << mycpu->gd_cpuid;
2585 	cpu_mfence();
2586 
2587 	/*
2588 	 * Interlock for finalization.  Wait until mp_finish is non-zero,
2589 	 * then get the MP lock.
2590 	 *
2591 	 * Note: We are in a critical section.
2592 	 *
2593 	 * Note: We have to synchronize td_mpcount to our desired MP state
2594 	 * before calling cpu_try_mplock().
2595 	 *
2596 	 * Note: we are the idle thread, we can only spin.
2597 	 *
2598 	 * Note: The load fence is memory volatile and prevents the compiler
2599 	 * from improperly caching mp_finish, and the cpu from improperly
2600 	 * caching it.
2601 	 */
2602 	while (mp_finish == 0)
2603 	    cpu_lfence();
2604 	++curthread->td_mpcount;
2605 	while (cpu_try_mplock() == 0)
2606 	    ;
2607 
2608 	if (cpu_feature & CPUID_TSC) {
2609 	    /*
2610 	     * The BSP is constantly updating tsc0_offset, figure out the
2611 	     * relative difference to synchronize ktrdump.
2612 	     */
2613 	    tsc_offsets[mycpu->gd_cpuid] = rdtsc() - tsc0_offset;
2614 	}
2615 
2616 	/* BSP may have changed PTD while we're waiting for the lock */
2617 	cpu_invltlb();
2618 
2619 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2620 	lidt(&r_idt);
2621 #endif
2622 
2623 	/* Build our map of 'other' CPUs. */
2624 	mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
2625 
2626 	kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
2627 
2628 	/* A quick check from sanity claus */
2629 	apic_id = (apic_id_to_logical[(lapic->id & 0x0f000000) >> 24]);
2630 	if (mycpu->gd_cpuid != apic_id) {
2631 		kprintf("SMP: cpuid = %d\n", mycpu->gd_cpuid);
2632 		kprintf("SMP: apic_id = %d\n", apic_id);
2633 #if JGXXX
2634 		kprintf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]);
2635 #endif
2636 		panic("cpuid mismatch! boom!!");
2637 	}
2638 
2639 	/* Initialize AP's local APIC for irq's */
2640 	apic_initialize(FALSE);
2641 
2642 	/* Set memory range attributes for this CPU to match the BSP */
2643 	mem_range_AP_init();
2644 
2645 	/*
2646 	 * Once we go active we must process any IPIQ messages that may
2647 	 * have been queued, because no actual IPI will occur until we
2648 	 * set our bit in the smp_active_mask.  If we don't the IPI
2649 	 * message interlock could be left set which would also prevent
2650 	 * further IPIs.
2651 	 *
2652 	 * The idle loop doesn't expect the BGL to be held and while
2653 	 * lwkt_switch() normally cleans things up this is a special case
2654 	 * because we returning almost directly into the idle loop.
2655 	 *
2656 	 * The idle thread is never placed on the runq, make sure
2657 	 * nothing we've done put it there.
2658 	 */
2659 	KKASSERT(curthread->td_mpcount == 1);
2660 	smp_active_mask |= 1 << mycpu->gd_cpuid;
2661 
2662 	/*
2663 	 * Enable interrupts here.  idle_restore will also do it, but
2664 	 * doing it here lets us clean up any strays that got posted to
2665 	 * the CPU during the AP boot while we are still in a critical
2666 	 * section.
2667 	 */
2668 	__asm __volatile("sti; pause; pause"::);
2669 	mdcpu->gd_fpending = 0;
2670 
2671 	initclocks_pcpu();	/* clock interrupts (via IPIs) */
2672 	lwkt_process_ipiq();
2673 
2674 	/*
2675 	 * Releasing the mp lock lets the BSP finish up the SMP init
2676 	 */
2677 	rel_mplock();
2678 	KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
2679 }
2680 
2681 /*
2682  * Get SMP fully working before we start initializing devices.
2683  */
2684 static
2685 void
2686 ap_finish(void)
2687 {
2688 	mp_finish = 1;
2689 	if (bootverbose)
2690 		kprintf("Finish MP startup\n");
2691 	if (cpu_feature & CPUID_TSC)
2692 		tsc0_offset = rdtsc();
2693 	tsc_offsets[0] = 0;
2694 	rel_mplock();
2695 	while (smp_active_mask != smp_startup_mask) {
2696 		cpu_lfence();
2697 		if (cpu_feature & CPUID_TSC)
2698 			tsc0_offset = rdtsc();
2699 	}
2700 	while (try_mplock() == 0)
2701 		;
2702 	if (bootverbose)
2703 		kprintf("Active CPU Mask: %08x\n", smp_active_mask);
2704 }
2705 
2706 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
2707 
2708 void
2709 cpu_send_ipiq(int dcpu)
2710 {
2711         if ((1 << dcpu) & smp_active_mask)
2712                 single_apic_ipi(dcpu, XIPIQ_OFFSET, APIC_DELMODE_FIXED);
2713 }
2714 
2715 #if 0	/* single_apic_ipi_passive() not working yet */
2716 /*
2717  * Returns 0 on failure, 1 on success
2718  */
2719 int
2720 cpu_send_ipiq_passive(int dcpu)
2721 {
2722         int r = 0;
2723         if ((1 << dcpu) & smp_active_mask) {
2724                 r = single_apic_ipi_passive(dcpu, XIPIQ_OFFSET,
2725                                         APIC_DELMODE_FIXED);
2726         }
2727 	return(r);
2728 }
2729 #endif
2730 
2731