xref: /dragonfly/sys/platform/pc64/apic/lapic.c (revision 59b0b316)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/bus.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/clock.h>
35 #include <machine/smp.h>
36 #include <machine/md_var.h>
37 #include <machine/pmap.h>
38 #include <machine/specialreg.h>
39 #include <machine_base/apic/lapic.h>
40 #include <machine_base/apic/ioapic.h>
41 #include <machine_base/apic/ioapic_abi.h>
42 #include <machine_base/apic/apicvar.h>
43 #include <machine_base/icu/icu_var.h>
44 #include <machine/segments.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
47 
48 #include <machine/cputypes.h>
49 #include <machine/intr_machdep.h>
50 
51 extern int naps;
52 
53 volatile lapic_t *lapic;
54 
55 static void	lapic_timer_calibrate(void);
56 static void	lapic_timer_set_divisor(int);
57 static void	lapic_timer_fixup_handler(void *);
58 static void	lapic_timer_restart_handler(void *);
59 
60 
61 static int	lapic_timer_enable = 1;
62 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
63 
64 static void	lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
65 static void	lapic_timer_intr_enable(struct cputimer_intr *);
66 static void	lapic_timer_intr_restart(struct cputimer_intr *);
67 static void	lapic_timer_intr_pmfixup(struct cputimer_intr *);
68 
69 static struct cputimer_intr lapic_cputimer_intr = {
70 	.freq = 0,
71 	.reload = lapic_timer_intr_reload,
72 	.enable = lapic_timer_intr_enable,
73 	.config = cputimer_intr_default_config,
74 	.restart = lapic_timer_intr_restart,
75 	.pmfixup = lapic_timer_intr_pmfixup,
76 	.initclock = cputimer_intr_default_initclock,
77 	.pcpuhand = NULL,
78 	.next = SLIST_ENTRY_INITIALIZER,
79 	.name = "lapic",
80 	.type = CPUTIMER_INTR_LAPIC,
81 	.prio = CPUTIMER_INTR_PRIO_LAPIC,
82 	.caps = CPUTIMER_INTR_CAP_NONE,
83 	.priv = NULL
84 };
85 
86 static int		lapic_timer_divisor_idx = -1;
87 static const uint32_t	lapic_timer_divisors[] = {
88 	APIC_TDCR_2,	APIC_TDCR_4,	APIC_TDCR_8,	APIC_TDCR_16,
89 	APIC_TDCR_32,	APIC_TDCR_64,	APIC_TDCR_128,	APIC_TDCR_1
90 };
91 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
92 
93 /*
94  * APIC ID <-> CPU ID mapping structures.
95  */
96 int	cpu_id_to_apic_id[NAPICID];
97 int	apic_id_to_cpu_id[NAPICID];
98 int	lapic_enable = 1;
99 
100 /*
101  * Enable LAPIC, configure interrupts.
102  */
103 void
104 lapic_init(boolean_t bsp)
105 {
106 	uint32_t timer;
107 	u_int   temp;
108 
109 	/*
110 	 * Install vectors
111 	 *
112 	 * Since IDT is shared between BSP and APs, these vectors
113 	 * only need to be installed once; we do it on BSP.
114 	 */
115 	if (bsp) {
116 		if (cpu_vendor_id == CPU_VENDOR_AMD &&
117 		    CPUID_TO_FAMILY(cpu_id) >= 0x0f &&
118 		    CPUID_TO_FAMILY(cpu_id) < 0x17) {	/* XXX */
119 			uint32_t tcr;
120 
121 			/*
122 			 * Set the LINTEN bit in the HyperTransport
123 			 * Transaction Control Register.
124 			 *
125 			 * This will cause EXTINT and NMI interrupts
126 			 * routed over the hypertransport bus to be
127 			 * fed into the LAPIC LINT0/LINT1.  If the bit
128 			 * isn't set, the interrupts will go to the
129 			 * general cpu INTR/NMI pins.  On a dual-core
130 			 * cpu the interrupt winds up going to BOTH cpus.
131 			 * The first cpu that does the interrupt ack
132 			 * cycle will get the correct interrupt.  The
133 			 * second cpu that does it will get a spurious
134 			 * interrupt vector (typically IRQ 7).
135 			 */
136 			outl(0x0cf8,
137 			    (1 << 31) |	/* enable */
138 			    (0 << 16) |	/* bus */
139 			    (0x18 << 11) | /* dev (cpu + 0x18) */
140 			    (0 << 8) |	/* func */
141 			    0x68	/* reg */
142 			    );
143 			tcr = inl(0xcfc);
144 			if ((tcr & 0x00010000) == 0) {
145 				kprintf("LAPIC: AMD LINTEN on\n");
146 				outl(0xcfc, tcr|0x00010000);
147 			}
148 			outl(0x0cf8, 0);
149 		}
150 
151 		/* Install a 'Spurious INTerrupt' vector */
152 		setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint,
153 		    SDT_SYSIGT, SEL_KPL, 0);
154 
155 		/* Install a timer vector */
156 		setidt_global(XTIMER_OFFSET, Xtimer,
157 		    SDT_SYSIGT, SEL_KPL, 0);
158 
159 		/* Install an inter-CPU IPI for TLB invalidation */
160 		setidt_global(XINVLTLB_OFFSET, Xinvltlb,
161 		    SDT_SYSIGT, SEL_KPL, 0);
162 
163 		/* Install an inter-CPU IPI for IPIQ messaging */
164 		setidt_global(XIPIQ_OFFSET, Xipiq,
165 		    SDT_SYSIGT, SEL_KPL, 0);
166 
167 		/* Install an inter-CPU IPI for CPU stop/restart */
168 		setidt_global(XCPUSTOP_OFFSET, Xcpustop,
169 		    SDT_SYSIGT, SEL_KPL, 0);
170 
171 		/* Install an inter-CPU IPI for TLB invalidation */
172 		setidt_global(XSNIFF_OFFSET, Xsniff,
173 		    SDT_SYSIGT, SEL_KPL, 0);
174 	}
175 
176 	/*
177 	 * Setup LINT0 as ExtINT on the BSP.  This is theoretically an
178 	 * aggregate interrupt input from the 8259.  The INTA cycle
179 	 * will be routed to the external controller (the 8259) which
180 	 * is expected to supply the vector.
181 	 *
182 	 * Must be setup edge triggered, active high.
183 	 *
184 	 * Disable LINT0 on BSP, if I/O APIC is enabled.
185 	 *
186 	 * Disable LINT0 on the APs.  It doesn't matter what delivery
187 	 * mode we use because we leave it masked.
188 	 */
189 	temp = lapic->lvt_lint0;
190 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
191 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
192 	if (bsp) {
193 		temp |= APIC_LVT_DM_EXTINT;
194 		if (ioapic_enable)
195 			temp |= APIC_LVT_MASKED;
196 	} else {
197 		temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
198 	}
199 	lapic->lvt_lint0 = temp;
200 
201 	/*
202 	 * Setup LINT1 as NMI.
203 	 *
204 	 * Must be setup edge trigger, active high.
205 	 *
206 	 * Enable LINT1 on BSP, if I/O APIC is enabled.
207 	 *
208 	 * Disable LINT1 on the APs.
209 	 */
210 	temp = lapic->lvt_lint1;
211 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
212 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
213 	temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
214 	if (bsp && ioapic_enable)
215 		temp &= ~APIC_LVT_MASKED;
216 	lapic->lvt_lint1 = temp;
217 
218 	/*
219 	 * Mask the LAPIC error interrupt, LAPIC performance counter
220 	 * interrupt.
221 	 */
222 	lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
223 	lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
224 
225 	/*
226 	 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
227 	 */
228 	timer = lapic->lvt_timer;
229 	timer &= ~APIC_LVTT_VECTOR;
230 	timer |= XTIMER_OFFSET;
231 	timer |= APIC_LVTT_MASKED;
232 	lapic->lvt_timer = timer;
233 
234 	/*
235 	 * Set the Task Priority Register as needed.   At the moment allow
236 	 * interrupts on all cpus (the APs will remain CLId until they are
237 	 * ready to deal).
238 	 */
239 	temp = lapic->tpr;
240 	temp &= ~APIC_TPR_PRIO;		/* clear priority field */
241 	lapic->tpr = temp;
242 
243 	/*
244 	 * AMD specific setup
245 	 */
246 	if (cpu_vendor_id == CPU_VENDOR_AMD &&
247 	    (lapic->version & APIC_VER_AMD_EXT_SPACE)) {
248 		uint32_t ext_feat;
249 		uint32_t count;
250 		uint32_t max_count;
251 		uint32_t lvt;
252 		uint32_t i;
253 
254 		ext_feat = lapic->ext_feat;
255 		count = (ext_feat & APIC_EXTFEAT_MASK) >> APIC_EXTFEAT_SHIFT;
256 		max_count = sizeof(lapic->ext_lvt) / sizeof(lapic->ext_lvt[0]);
257 		if (count > max_count)
258 			count = max_count;
259 		for (i = 0; i < count; ++i) {
260 			lvt = lapic->ext_lvt[i].lvt;
261 
262 			lvt &= ~(APIC_LVT_POLARITY_MASK | APIC_LVT_TRIG_MASK |
263 				 APIC_LVT_DM_MASK | APIC_LVT_MASKED);
264 			lvt |= APIC_LVT_MASKED | APIC_LVT_DM_FIXED;
265 
266 			switch(i) {
267 			case APIC_EXTLVT_IBS:
268 				break;
269 			case APIC_EXTLVT_MCA:
270 				break;
271 			case APIC_EXTLVT_DEI:
272 				break;
273 			case APIC_EXTLVT_SBI:
274 				break;
275 			default:
276 				break;
277 			}
278 			if (bsp) {
279 				kprintf("   LAPIC AMD elvt%d: 0x%08x",
280 					i, lapic->ext_lvt[i].lvt);
281 				if (lapic->ext_lvt[i].lvt != lvt)
282 					kprintf(" -> 0x%08x", lvt);
283 				kprintf("\n");
284 			}
285 			lapic->ext_lvt[i].lvt = lvt;
286 		}
287 	}
288 
289 	/*
290 	 * Enable the LAPIC
291 	 */
292 	temp = lapic->svr;
293 	temp |= APIC_SVR_ENABLE;	/* enable the LAPIC */
294 	temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
295 
296 	if (lapic->version & APIC_VER_EOI_SUPP) {
297 		if (temp & APIC_SVR_EOI_SUPP) {
298 			temp &= ~APIC_SVR_EOI_SUPP;
299 			if (bsp)
300 				kprintf("    LAPIC disabling EOI supp\n");
301 		}
302 	}
303 
304 	/*
305 	 * Set the spurious interrupt vector.  The low 4 bits of the vector
306 	 * must be 1111.
307 	 */
308 	if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
309 		panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
310 	temp &= ~APIC_SVR_VECTOR;
311 	temp |= XSPURIOUSINT_OFFSET;
312 
313 	lapic->svr = temp;
314 
315 	/*
316 	 * Pump out a few EOIs to clean out interrupts that got through
317 	 * before we were able to set the TPR.
318 	 */
319 	lapic->eoi = 0;
320 	lapic->eoi = 0;
321 	lapic->eoi = 0;
322 
323 	if (bsp) {
324 		lapic_timer_calibrate();
325 		if (lapic_timer_enable) {
326 			if (cpu_thermal_feature & CPUID_THERMAL_ARAT) {
327 				/*
328 				 * Local APIC timer will not stop
329 				 * in deep C-state.
330 				 */
331 				lapic_cputimer_intr.caps |=
332 				    CPUTIMER_INTR_CAP_PS;
333 			}
334 			cputimer_intr_register(&lapic_cputimer_intr);
335 			cputimer_intr_select(&lapic_cputimer_intr, 0);
336 		}
337 	} else {
338 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
339 	}
340 
341 	if (bootverbose)
342 		apic_dump("apic_initialize()");
343 }
344 
345 static void
346 lapic_timer_set_divisor(int divisor_idx)
347 {
348 	KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
349 	lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
350 }
351 
352 static void
353 lapic_timer_oneshot(u_int count)
354 {
355 	uint32_t value;
356 
357 	value = lapic->lvt_timer;
358 	value &= ~APIC_LVTT_PERIODIC;
359 	lapic->lvt_timer = value;
360 	lapic->icr_timer = count;
361 }
362 
363 static void
364 lapic_timer_oneshot_quick(u_int count)
365 {
366 	lapic->icr_timer = count;
367 }
368 
369 static void
370 lapic_timer_calibrate(void)
371 {
372 	sysclock_t value;
373 
374 	/* Try to calibrate the local APIC timer. */
375 	for (lapic_timer_divisor_idx = 0;
376 	     lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
377 	     lapic_timer_divisor_idx++) {
378 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
379 		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
380 		DELAY(2000000);
381 		value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
382 		if (value != APIC_TIMER_MAX_COUNT)
383 			break;
384 	}
385 	if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
386 		panic("lapic: no proper timer divisor?!");
387 	lapic_cputimer_intr.freq = value / 2;
388 
389 	kprintf("lapic: divisor index %d, frequency %u Hz\n",
390 		lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
391 }
392 
393 static void
394 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
395 {
396 	struct globaldata *gd = mycpu;
397 
398 	reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
399 	if (reload < 2)
400 		reload = 2;
401 
402 	if (gd->gd_timer_running) {
403 		if (reload < lapic->ccr_timer)
404 			lapic_timer_oneshot_quick(reload);
405 	} else {
406 		gd->gd_timer_running = 1;
407 		lapic_timer_oneshot_quick(reload);
408 	}
409 }
410 
411 static void
412 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
413 {
414 	uint32_t timer;
415 
416 	timer = lapic->lvt_timer;
417 	timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
418 	lapic->lvt_timer = timer;
419 
420 	lapic_timer_fixup_handler(NULL);
421 }
422 
423 static void
424 lapic_timer_fixup_handler(void *arg)
425 {
426 	int *started = arg;
427 
428 	if (started != NULL)
429 		*started = 0;
430 
431 	if (cpu_vendor_id == CPU_VENDOR_AMD) {
432 		/*
433 		 * Detect the presence of C1E capability mostly on latest
434 		 * dual-cores (or future) k8 family.  This feature renders
435 		 * the local APIC timer dead, so we disable it by reading
436 		 * the Interrupt Pending Message register and clearing both
437 		 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
438 		 *
439 		 * Reference:
440 		 *   "BIOS and Kernel Developer's Guide for AMD NPT
441 		 *    Family 0Fh Processors"
442 		 *   #32559 revision 3.00
443 		 */
444 		if ((cpu_id & 0x00000f00) == 0x00000f00 &&
445 		    (cpu_id & 0x0fff0000) >= 0x00040000) {
446 			uint64_t msr;
447 
448 			msr = rdmsr(0xc0010055);
449 			if (msr & 0x18000000) {
450 				struct globaldata *gd = mycpu;
451 
452 				kprintf("cpu%d: AMD C1E detected\n",
453 					gd->gd_cpuid);
454 				wrmsr(0xc0010055, msr & ~0x18000000ULL);
455 
456 				/*
457 				 * We are kinda stalled;
458 				 * kick start again.
459 				 */
460 				gd->gd_timer_running = 1;
461 				lapic_timer_oneshot_quick(2);
462 
463 				if (started != NULL)
464 					*started = 1;
465 			}
466 		}
467 	}
468 }
469 
470 static void
471 lapic_timer_restart_handler(void *dummy __unused)
472 {
473 	int started;
474 
475 	lapic_timer_fixup_handler(&started);
476 	if (!started) {
477 		struct globaldata *gd = mycpu;
478 
479 		gd->gd_timer_running = 1;
480 		lapic_timer_oneshot_quick(2);
481 	}
482 }
483 
484 /*
485  * This function is called only by ACPICA code currently:
486  * - AMD C1E fixup.  AMD C1E only seems to happen after ACPI
487  *   module controls PM.  So once ACPICA is attached, we try
488  *   to apply the fixup to prevent LAPIC timer from hanging.
489  */
490 static void
491 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
492 {
493 	lwkt_send_ipiq_mask(smp_active_mask,
494 			    lapic_timer_fixup_handler, NULL);
495 }
496 
497 static void
498 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
499 {
500 	lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
501 }
502 
503 
504 /*
505  * dump contents of local APIC registers
506  */
507 void
508 apic_dump(char* str)
509 {
510 	kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
511 	kprintf("     lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
512 		lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
513 }
514 
515 /*
516  * Inter Processor Interrupt functions.
517  */
518 
519 /*
520  * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
521  *
522  *  destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
523  *  vector is any valid SYSTEM INT vector
524  *  delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
525  *
526  * WARNINGS!
527  *
528  * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than
529  * one IPI from being sent to any given cpu at a time.  Thus we no longer
530  * have to process incoming IPIs while waiting for the status to clear.
531  * No deadlock should be possible.
532  *
533  * We now physically disable interrupts for the lapic ICR operation.  If
534  * we do not do this then it looks like an EOI sent to the lapic (which
535  * occurs even with a critical section) can interfere with the command
536  * register ready status and cause an IPI to be lost.
537  *
538  * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command
539  * register to busy just before we write to icr_lo, resulting in a lost
540  * issuance.  This only appears to occur on Intel cpus and is not
541  * documented.  It could simply be that cpus are so fast these days that
542  * it was always an issue, but is only now rearing its ugly head.  This
543  * is conjecture.
544  */
545 int
546 apic_ipi(int dest_type, int vector, int delivery_mode)
547 {
548 	uint32_t icr_hi;
549 	uint32_t icr_lo;
550 	int64_t tsc;
551 	int loops = 1;
552 
553 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
554 		tsc = rdtsc();
555 		while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
556 			cpu_pause();
557 			if ((int64_t)(rdtsc() - (tsc + tsc_frequency)) > 0) {
558 				kprintf("apic_ipi stall cpu %d (sing)\n",
559 					mycpuid);
560 				tsc = rdtsc();
561 				if (++loops > 30)
562 					panic("apic stall");
563 			}
564 		}
565 	}
566 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
567 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
568 		 APIC_LEVEL_ASSERT | delivery_mode | vector;
569 	lapic->icr_hi = icr_hi;
570 	lapic->icr_lo = icr_lo;
571 
572 	return 0;
573 }
574 
575 /*
576  * Interrupts must be hard-disabled by caller
577  */
578 void
579 single_apic_ipi(int cpu, int vector, int delivery_mode)
580 {
581 	uint32_t  icr_lo;
582 	uint32_t  icr_hi;
583 	int64_t tsc;
584 	int loops = 1;
585 
586 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
587 		tsc = rdtsc();
588 		while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
589 			cpu_pause();
590 			if ((int64_t)(rdtsc() - (tsc + tsc_frequency)) > 0) {
591 				kprintf("single_apic_ipi stall cpu %d (sing)\n",
592 					mycpuid);
593 				tsc = rdtsc();
594 				if (++loops > 30)
595 					panic("apic stall");
596 			}
597 		}
598 	}
599 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
600 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
601 
602 	/* build ICR_LOW */
603 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) |
604 		 APIC_LEVEL_ASSERT | APIC_DEST_DESTFLD | delivery_mode | vector;
605 
606 	/* write APIC ICR */
607 	lapic->icr_hi = icr_hi;
608 	lapic->icr_lo = icr_lo;
609 }
610 
611 #if 0
612 
613 /*
614  * Returns 0 if the apic is busy, 1 if we were able to queue the request.
615  *
616  * NOT WORKING YET!  The code as-is may end up not queueing an IPI at all
617  * to the target, and the scheduler does not 'poll' for IPI messages.
618  */
619 int
620 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
621 {
622 	u_long  icr_lo;
623 	u_long  icr_hi;
624 	unsigned long rflags;
625 
626 	rflags = read_rflags();
627 	cpu_disable_intr();
628 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
629 		write_rflags(rflags);
630 		return(0);
631 	}
632 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
633 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
634 	lapic->icr_hi = icr_hi;
635 
636 	/* build IRC_LOW */
637 	icr_lo = (lapic->icr_lo & APIC_RESV2_MASK) |
638 		 APIC_DEST_DESTFLD | delivery_mode | vector;
639 
640 	/* write APIC ICR */
641 	lapic->icr_lo = icr_lo;
642 	write_rflags(rflags);
643 
644 	return(1);
645 }
646 
647 #endif
648 
649 /*
650  * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
651  *
652  * target is a bitmask of destination cpus.  Vector is any
653  * valid system INT vector.  Delivery mode may be either
654  * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
655  *
656  * Interrupts must be hard-disabled by caller
657  */
658 void
659 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
660 {
661 	while (CPUMASK_TESTNZERO(target)) {
662 		int n = BSFCPUMASK(target);
663 		CPUMASK_NANDBIT(target, n);
664 		single_apic_ipi(n, vector, delivery_mode);
665 	}
666 }
667 
668 /*
669  * Timer code, in development...
670  *  - suggested by rgrimes@gndrsh.aac.dev.com
671  */
672 int
673 get_apic_timer_frequency(void)
674 {
675 	return(lapic_cputimer_intr.freq);
676 }
677 
678 /*
679  * Load a 'downcount time' in uSeconds.
680  */
681 void
682 set_apic_timer(int us)
683 {
684 	u_int count;
685 
686 	/*
687 	 * When we reach here, lapic timer's frequency
688 	 * must have been calculated as well as the
689 	 * divisor (lapic->dcr_timer is setup during the
690 	 * divisor calculation).
691 	 */
692 	KKASSERT(lapic_cputimer_intr.freq != 0 &&
693 		 lapic_timer_divisor_idx >= 0);
694 
695 	count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
696 	lapic_timer_oneshot(count);
697 }
698 
699 
700 /*
701  * Read remaining time in timer.
702  */
703 int
704 read_apic_timer(void)
705 {
706 #if 0
707 	/** XXX FIXME: we need to return the actual remaining time,
708          *         for now we just return the remaining count.
709          */
710 #else
711 	return lapic->ccr_timer;
712 #endif
713 }
714 
715 
716 /*
717  * Spin-style delay, set delay time in uS, spin till it drains.
718  */
719 void
720 u_sleep(int count)
721 {
722 	set_apic_timer(count);
723 	while (read_apic_timer())
724 		 /* spin */ ;
725 }
726 
727 int
728 lapic_unused_apic_id(int start)
729 {
730 	int i;
731 
732 	for (i = start; i < APICID_MAX; ++i) {
733 		if (APICID_TO_CPUID(i) == -1)
734 			return i;
735 	}
736 	return NAPICID;
737 }
738 
739 void
740 lapic_map(vm_paddr_t lapic_addr)
741 {
742 	lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
743 }
744 
745 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
746 	TAILQ_HEAD_INITIALIZER(lapic_enumerators);
747 
748 int
749 lapic_config(void)
750 {
751 	struct lapic_enumerator *e;
752 	int error, i, ap_max;
753 
754 	KKASSERT(lapic_enable);
755 
756 	for (i = 0; i < NAPICID; ++i)
757 		APICID_TO_CPUID(i) = -1;
758 
759 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
760 		error = e->lapic_probe(e);
761 		if (!error)
762 			break;
763 	}
764 	if (e == NULL) {
765 		kprintf("LAPIC: Can't find LAPIC\n");
766 		return ENXIO;
767 	}
768 
769 	error = e->lapic_enumerate(e);
770 	if (error) {
771 		kprintf("LAPIC: enumeration failed\n");
772 		return ENXIO;
773 	}
774 
775 	ap_max = MAXCPU - 1;
776 	TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
777 	if (ap_max > MAXCPU - 1)
778 		ap_max = MAXCPU - 1;
779 
780 	if (naps > ap_max) {
781 		kprintf("LAPIC: Warning use only %d out of %d "
782 			"available APs\n",
783 			ap_max, naps);
784 		naps = ap_max;
785 	}
786 
787 	return 0;
788 }
789 
790 void
791 lapic_enumerator_register(struct lapic_enumerator *ne)
792 {
793 	struct lapic_enumerator *e;
794 
795 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
796 		if (e->lapic_prio < ne->lapic_prio) {
797 			TAILQ_INSERT_BEFORE(e, ne, lapic_link);
798 			return;
799 		}
800 	}
801 	TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
802 }
803 
804 void
805 lapic_set_cpuid(int cpu_id, int apic_id)
806 {
807 	CPUID_TO_APICID(cpu_id) = apic_id;
808 	APICID_TO_CPUID(apic_id) = cpu_id;
809 }
810 
811 void
812 lapic_fixup_noioapic(void)
813 {
814 	u_int   temp;
815 
816 	/* Only allowed on BSP */
817 	KKASSERT(mycpuid == 0);
818 	KKASSERT(!ioapic_enable);
819 
820 	temp = lapic->lvt_lint0;
821 	temp &= ~APIC_LVT_MASKED;
822 	lapic->lvt_lint0 = temp;
823 
824 	temp = lapic->lvt_lint1;
825 	temp |= APIC_LVT_MASKED;
826 	lapic->lvt_lint1 = temp;
827 }
828 
829 static void
830 lapic_sysinit(void *dummy __unused)
831 {
832 	if (lapic_enable) {
833 		int error;
834 
835 		error = lapic_config();
836 		if (error)
837 			lapic_enable = 0;
838 	}
839 
840 	if (lapic_enable) {
841 		/* Initialize BSP's local APIC */
842 		lapic_init(TRUE);
843 	} else if (ioapic_enable) {
844 		ioapic_enable = 0;
845 		icu_reinit_noioapic();
846 	}
847 }
848 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL);
849