xref: /dragonfly/sys/platform/pc64/apic/lapic.c (revision 8edfbc5e)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/bus.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine/specialreg.h>
38 #include <machine_base/apic/lapic.h>
39 #include <machine_base/apic/ioapic.h>
40 #include <machine_base/apic/ioapic_abi.h>
41 #include <machine_base/apic/apicvar.h>
42 #include <machine_base/icu/icu_var.h>
43 #include <machine/segments.h>
44 #include <sys/thread2.h>
45 
46 #include <machine/cputypes.h>
47 #include <machine/intr_machdep.h>
48 
49 extern int naps;
50 
51 volatile lapic_t *lapic;
52 
53 static void	lapic_timer_calibrate(void);
54 static void	lapic_timer_set_divisor(int);
55 static void	lapic_timer_fixup_handler(void *);
56 static void	lapic_timer_restart_handler(void *);
57 
58 
59 static int	lapic_timer_enable = 1;
60 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
61 
62 static void	lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
63 static void	lapic_timer_intr_enable(struct cputimer_intr *);
64 static void	lapic_timer_intr_restart(struct cputimer_intr *);
65 static void	lapic_timer_intr_pmfixup(struct cputimer_intr *);
66 
67 static struct cputimer_intr lapic_cputimer_intr = {
68 	.freq = 0,
69 	.reload = lapic_timer_intr_reload,
70 	.enable = lapic_timer_intr_enable,
71 	.config = cputimer_intr_default_config,
72 	.restart = lapic_timer_intr_restart,
73 	.pmfixup = lapic_timer_intr_pmfixup,
74 	.initclock = cputimer_intr_default_initclock,
75 	.pcpuhand = NULL,
76 	.next = SLIST_ENTRY_INITIALIZER,
77 	.name = "lapic",
78 	.type = CPUTIMER_INTR_LAPIC,
79 	.prio = CPUTIMER_INTR_PRIO_LAPIC,
80 	.caps = CPUTIMER_INTR_CAP_NONE,
81 	.priv = NULL
82 };
83 
84 static int		lapic_timer_divisor_idx = -1;
85 static const uint32_t	lapic_timer_divisors[] = {
86 	APIC_TDCR_2,	APIC_TDCR_4,	APIC_TDCR_8,	APIC_TDCR_16,
87 	APIC_TDCR_32,	APIC_TDCR_64,	APIC_TDCR_128,	APIC_TDCR_1
88 };
89 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
90 
91 /*
92  * APIC ID <-> CPU ID mapping structures.
93  */
94 int	cpu_id_to_apic_id[NAPICID];
95 int	apic_id_to_cpu_id[NAPICID];
96 int	lapic_enable = 1;
97 
98 /*
99  * Enable LAPIC, configure interrupts.
100  */
101 void
102 lapic_init(boolean_t bsp)
103 {
104 	uint32_t timer;
105 	u_int   temp;
106 
107 	/*
108 	 * Install vectors
109 	 *
110 	 * Since IDT is shared between BSP and APs, these vectors
111 	 * only need to be installed once; we do it on BSP.
112 	 */
113 	if (bsp) {
114 		if (cpu_vendor_id == CPU_VENDOR_AMD &&
115 		    CPUID_TO_FAMILY(cpu_id) >= 0xf) {
116 			uint32_t tcr;
117 
118 			/*
119 			 * Set the LINTEN bit in the HyperTransport
120 			 * Transaction Control Register.
121 			 *
122 			 * This will cause EXTINT and NMI interrupts
123 			 * routed over the hypertransport bus to be
124 			 * fed into the LAPIC LINT0/LINT1.  If the bit
125 			 * isn't set, the interrupts will go to the
126 			 * general cpu INTR/NMI pins.  On a dual-core
127 			 * cpu the interrupt winds up going to BOTH cpus.
128 			 * The first cpu that does the interrupt ack
129 			 * cycle will get the correct interrupt.  The
130 			 * second cpu that does it will get a spurious
131 			 * interrupt vector (typically IRQ 7).
132 			 */
133 			outl(0x0cf8,
134 			    (1 << 31) |	/* enable */
135 			    (0 << 16) |	/* bus */
136 			    (0x18 << 11) | /* dev (cpu + 0x18) */
137 			    (0 << 8) |	/* func */
138 			    0x68	/* reg */
139 			    );
140 			tcr = inl(0xcfc);
141 			if ((tcr & 0x00010000) == 0) {
142 				kprintf("LAPIC: AMD LINTEN on\n");
143 				outl(0xcfc, tcr|0x00010000);
144 			}
145 			outl(0x0cf8, 0);
146 		}
147 
148 		/* Install a 'Spurious INTerrupt' vector */
149 		setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint,
150 		    SDT_SYSIGT, SEL_KPL, 0);
151 
152 		/* Install a timer vector */
153 		setidt_global(XTIMER_OFFSET, Xtimer,
154 		    SDT_SYSIGT, SEL_KPL, 0);
155 
156 		/* Install an inter-CPU IPI for TLB invalidation */
157 		setidt_global(XINVLTLB_OFFSET, Xinvltlb,
158 		    SDT_SYSIGT, SEL_KPL, 0);
159 
160 		/* Install an inter-CPU IPI for IPIQ messaging */
161 		setidt_global(XIPIQ_OFFSET, Xipiq,
162 		    SDT_SYSIGT, SEL_KPL, 0);
163 
164 		/* Install an inter-CPU IPI for CPU stop/restart */
165 		setidt_global(XCPUSTOP_OFFSET, Xcpustop,
166 		    SDT_SYSIGT, SEL_KPL, 0);
167 
168 		/* Install an inter-CPU IPI for TLB invalidation */
169 		setidt_global(XSNIFF_OFFSET, Xsniff,
170 		    SDT_SYSIGT, SEL_KPL, 0);
171 	}
172 
173 	/*
174 	 * Setup LINT0 as ExtINT on the BSP.  This is theoretically an
175 	 * aggregate interrupt input from the 8259.  The INTA cycle
176 	 * will be routed to the external controller (the 8259) which
177 	 * is expected to supply the vector.
178 	 *
179 	 * Must be setup edge triggered, active high.
180 	 *
181 	 * Disable LINT0 on BSP, if I/O APIC is enabled.
182 	 *
183 	 * Disable LINT0 on the APs.  It doesn't matter what delivery
184 	 * mode we use because we leave it masked.
185 	 */
186 	temp = lapic->lvt_lint0;
187 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
188 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
189 	if (bsp) {
190 		temp |= APIC_LVT_DM_EXTINT;
191 		if (ioapic_enable)
192 			temp |= APIC_LVT_MASKED;
193 	} else {
194 		temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
195 	}
196 	lapic->lvt_lint0 = temp;
197 
198 	/*
199 	 * Setup LINT1 as NMI.
200 	 *
201 	 * Must be setup edge trigger, active high.
202 	 *
203 	 * Enable LINT1 on BSP, if I/O APIC is enabled.
204 	 *
205 	 * Disable LINT1 on the APs.
206 	 */
207 	temp = lapic->lvt_lint1;
208 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
209 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
210 	temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
211 	if (bsp && ioapic_enable)
212 		temp &= ~APIC_LVT_MASKED;
213 	lapic->lvt_lint1 = temp;
214 
215 	/*
216 	 * Mask the LAPIC error interrupt, LAPIC performance counter
217 	 * interrupt.
218 	 */
219 	lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
220 	lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
221 
222 	/*
223 	 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
224 	 */
225 	timer = lapic->lvt_timer;
226 	timer &= ~APIC_LVTT_VECTOR;
227 	timer |= XTIMER_OFFSET;
228 	timer |= APIC_LVTT_MASKED;
229 	lapic->lvt_timer = timer;
230 
231 	/*
232 	 * Set the Task Priority Register as needed.   At the moment allow
233 	 * interrupts on all cpus (the APs will remain CLId until they are
234 	 * ready to deal).
235 	 */
236 	temp = lapic->tpr;
237 	temp &= ~APIC_TPR_PRIO;		/* clear priority field */
238 	lapic->tpr = temp;
239 
240 	/*
241 	 * Enable the LAPIC
242 	 */
243 	temp = lapic->svr;
244 	temp |= APIC_SVR_ENABLE;	/* enable the LAPIC */
245 	temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
246 
247 	/*
248 	 * Set the spurious interrupt vector.  The low 4 bits of the vector
249 	 * must be 1111.
250 	 */
251 	if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
252 		panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
253 	temp &= ~APIC_SVR_VECTOR;
254 	temp |= XSPURIOUSINT_OFFSET;
255 
256 	lapic->svr = temp;
257 
258 	/*
259 	 * Pump out a few EOIs to clean out interrupts that got through
260 	 * before we were able to set the TPR.
261 	 */
262 	lapic->eoi = 0;
263 	lapic->eoi = 0;
264 	lapic->eoi = 0;
265 
266 	if (bsp) {
267 		lapic_timer_calibrate();
268 		if (lapic_timer_enable) {
269 			if (cpu_thermal_feature & CPUID_THERMAL_ARAT) {
270 				/*
271 				 * Local APIC timer will not stop
272 				 * in deep C-state.
273 				 */
274 				lapic_cputimer_intr.caps |=
275 				    CPUTIMER_INTR_CAP_PS;
276 			}
277 			cputimer_intr_register(&lapic_cputimer_intr);
278 			cputimer_intr_select(&lapic_cputimer_intr, 0);
279 		}
280 	} else {
281 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
282 	}
283 
284 	if (bootverbose)
285 		apic_dump("apic_initialize()");
286 }
287 
288 static void
289 lapic_timer_set_divisor(int divisor_idx)
290 {
291 	KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
292 	lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
293 }
294 
295 static void
296 lapic_timer_oneshot(u_int count)
297 {
298 	uint32_t value;
299 
300 	value = lapic->lvt_timer;
301 	value &= ~APIC_LVTT_PERIODIC;
302 	lapic->lvt_timer = value;
303 	lapic->icr_timer = count;
304 }
305 
306 static void
307 lapic_timer_oneshot_quick(u_int count)
308 {
309 	lapic->icr_timer = count;
310 }
311 
312 static void
313 lapic_timer_calibrate(void)
314 {
315 	sysclock_t value;
316 
317 	/* Try to calibrate the local APIC timer. */
318 	for (lapic_timer_divisor_idx = 0;
319 	     lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
320 	     lapic_timer_divisor_idx++) {
321 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
322 		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
323 		DELAY(2000000);
324 		value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
325 		if (value != APIC_TIMER_MAX_COUNT)
326 			break;
327 	}
328 	if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
329 		panic("lapic: no proper timer divisor?!");
330 	lapic_cputimer_intr.freq = value / 2;
331 
332 	kprintf("lapic: divisor index %d, frequency %u Hz\n",
333 		lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
334 }
335 
336 static void
337 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
338 {
339 	struct globaldata *gd = mycpu;
340 
341 	reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
342 	if (reload < 2)
343 		reload = 2;
344 
345 	if (gd->gd_timer_running) {
346 		if (reload < lapic->ccr_timer)
347 			lapic_timer_oneshot_quick(reload);
348 	} else {
349 		gd->gd_timer_running = 1;
350 		lapic_timer_oneshot_quick(reload);
351 	}
352 }
353 
354 static void
355 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
356 {
357 	uint32_t timer;
358 
359 	timer = lapic->lvt_timer;
360 	timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
361 	lapic->lvt_timer = timer;
362 
363 	lapic_timer_fixup_handler(NULL);
364 }
365 
366 static void
367 lapic_timer_fixup_handler(void *arg)
368 {
369 	int *started = arg;
370 
371 	if (started != NULL)
372 		*started = 0;
373 
374 	if (cpu_vendor_id == CPU_VENDOR_AMD) {
375 		/*
376 		 * Detect the presence of C1E capability mostly on latest
377 		 * dual-cores (or future) k8 family.  This feature renders
378 		 * the local APIC timer dead, so we disable it by reading
379 		 * the Interrupt Pending Message register and clearing both
380 		 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
381 		 *
382 		 * Reference:
383 		 *   "BIOS and Kernel Developer's Guide for AMD NPT
384 		 *    Family 0Fh Processors"
385 		 *   #32559 revision 3.00
386 		 */
387 		if ((cpu_id & 0x00000f00) == 0x00000f00 &&
388 		    (cpu_id & 0x0fff0000) >= 0x00040000) {
389 			uint64_t msr;
390 
391 			msr = rdmsr(0xc0010055);
392 			if (msr & 0x18000000) {
393 				struct globaldata *gd = mycpu;
394 
395 				kprintf("cpu%d: AMD C1E detected\n",
396 					gd->gd_cpuid);
397 				wrmsr(0xc0010055, msr & ~0x18000000ULL);
398 
399 				/*
400 				 * We are kinda stalled;
401 				 * kick start again.
402 				 */
403 				gd->gd_timer_running = 1;
404 				lapic_timer_oneshot_quick(2);
405 
406 				if (started != NULL)
407 					*started = 1;
408 			}
409 		}
410 	}
411 }
412 
413 static void
414 lapic_timer_restart_handler(void *dummy __unused)
415 {
416 	int started;
417 
418 	lapic_timer_fixup_handler(&started);
419 	if (!started) {
420 		struct globaldata *gd = mycpu;
421 
422 		gd->gd_timer_running = 1;
423 		lapic_timer_oneshot_quick(2);
424 	}
425 }
426 
427 /*
428  * This function is called only by ACPICA code currently:
429  * - AMD C1E fixup.  AMD C1E only seems to happen after ACPI
430  *   module controls PM.  So once ACPICA is attached, we try
431  *   to apply the fixup to prevent LAPIC timer from hanging.
432  */
433 static void
434 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
435 {
436 	lwkt_send_ipiq_mask(smp_active_mask,
437 			    lapic_timer_fixup_handler, NULL);
438 }
439 
440 static void
441 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
442 {
443 	lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
444 }
445 
446 
447 /*
448  * dump contents of local APIC registers
449  */
450 void
451 apic_dump(char* str)
452 {
453 	kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
454 	kprintf("     lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
455 		lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
456 }
457 
458 /*
459  * Inter Processor Interrupt functions.
460  */
461 
462 /*
463  * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
464  *
465  *  destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
466  *  vector is any valid SYSTEM INT vector
467  *  delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
468  *
469  * WARNINGS!
470  *
471  * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than
472  * one IPI from being sent to any given cpu at a time.  Thus we no longer
473  * have to process incoming IPIs while waiting for the status to clear.
474  * No deadlock should be possible.
475  *
476  * We now physically disable interrupts for the lapic ICR operation.  If
477  * we do not do this then it looks like an EOI sent to the lapic (which
478  * occurs even with a critical section) can interfere with the command
479  * register ready status and cause an IPI to be lost.
480  *
481  * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command
482  * register to busy just before we write to icr_lo, resulting in a lost
483  * issuance.  This only appears to occur on Intel cpus and is not
484  * documented.  It could simply be that cpus are so fast these days that
485  * it was always an issue, but is only now rearing its ugly head.  This
486  * is conjecture.
487  */
488 int
489 apic_ipi(int dest_type, int vector, int delivery_mode)
490 {
491 	unsigned long rflags;
492 	u_long  icr_lo;
493 	int loops = 1;
494 
495 	rflags = read_rflags();
496 	cpu_disable_intr();
497 	while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
498 		cpu_pause();
499 		if (++loops == 10000000)
500 			kprintf("apic_ipi stall cpu %d\n", mycpuid);
501 	}
502 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
503 		delivery_mode | vector;
504 	lapic->icr_lo = icr_lo;
505 	write_rflags(rflags);
506 
507 	return 0;
508 }
509 
510 void
511 single_apic_ipi(int cpu, int vector, int delivery_mode)
512 {
513 	unsigned long rflags;
514 	u_long  icr_lo;
515 	u_long  icr_hi;
516 	int loops = 1;
517 
518 	rflags = read_rflags();
519 	cpu_disable_intr();
520 	while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
521 		cpu_pause();
522 		if (++loops == 10000000)
523 			kprintf("apic_ipi stall cpu %d (sing)\n", mycpuid);
524 	}
525 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
526 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
527 	lapic->icr_hi = icr_hi;
528 
529 	/* build ICR_LOW */
530 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) |
531 		 APIC_DEST_DESTFLD | delivery_mode | vector;
532 
533 	/* write APIC ICR */
534 	lapic->icr_lo = icr_lo;
535 	write_rflags(rflags);
536 }
537 
538 #if 0
539 
540 /*
541  * Returns 0 if the apic is busy, 1 if we were able to queue the request.
542  *
543  * NOT WORKING YET!  The code as-is may end up not queueing an IPI at all
544  * to the target, and the scheduler does not 'poll' for IPI messages.
545  */
546 int
547 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
548 {
549 	u_long  icr_lo;
550 	u_long  icr_hi;
551 	unsigned long rflags;
552 
553 	rflags = read_rflags();
554 	cpu_disable_intr();
555 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
556 		write_rflags(rflags);
557 		return(0);
558 	}
559 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
560 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
561 	lapic->icr_hi = icr_hi;
562 
563 	/* build IRC_LOW */
564 	icr_lo = (lapic->icr_lo & APIC_RESV2_MASK) |
565 		 APIC_DEST_DESTFLD | delivery_mode | vector;
566 
567 	/* write APIC ICR */
568 	lapic->icr_lo = icr_lo;
569 	write_rflags(rflags);
570 
571 	return(1);
572 }
573 
574 #endif
575 
576 /*
577  * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
578  *
579  * target is a bitmask of destination cpus.  Vector is any
580  * valid system INT vector.  Delivery mode may be either
581  * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
582  */
583 void
584 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
585 {
586 	crit_enter();
587 	while (CPUMASK_TESTNZERO(target)) {
588 		int n = BSFCPUMASK(target);
589 		CPUMASK_NANDBIT(target, n);
590 		single_apic_ipi(n, vector, delivery_mode);
591 	}
592 	crit_exit();
593 }
594 
595 /*
596  * Timer code, in development...
597  *  - suggested by rgrimes@gndrsh.aac.dev.com
598  */
599 int
600 get_apic_timer_frequency(void)
601 {
602 	return(lapic_cputimer_intr.freq);
603 }
604 
605 /*
606  * Load a 'downcount time' in uSeconds.
607  */
608 void
609 set_apic_timer(int us)
610 {
611 	u_int count;
612 
613 	/*
614 	 * When we reach here, lapic timer's frequency
615 	 * must have been calculated as well as the
616 	 * divisor (lapic->dcr_timer is setup during the
617 	 * divisor calculation).
618 	 */
619 	KKASSERT(lapic_cputimer_intr.freq != 0 &&
620 		 lapic_timer_divisor_idx >= 0);
621 
622 	count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
623 	lapic_timer_oneshot(count);
624 }
625 
626 
627 /*
628  * Read remaining time in timer.
629  */
630 int
631 read_apic_timer(void)
632 {
633 #if 0
634 	/** XXX FIXME: we need to return the actual remaining time,
635          *         for now we just return the remaining count.
636          */
637 #else
638 	return lapic->ccr_timer;
639 #endif
640 }
641 
642 
643 /*
644  * Spin-style delay, set delay time in uS, spin till it drains.
645  */
646 void
647 u_sleep(int count)
648 {
649 	set_apic_timer(count);
650 	while (read_apic_timer())
651 		 /* spin */ ;
652 }
653 
654 int
655 lapic_unused_apic_id(int start)
656 {
657 	int i;
658 
659 	for (i = start; i < APICID_MAX; ++i) {
660 		if (APICID_TO_CPUID(i) == -1)
661 			return i;
662 	}
663 	return NAPICID;
664 }
665 
666 void
667 lapic_map(vm_paddr_t lapic_addr)
668 {
669 	lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
670 }
671 
672 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
673 	TAILQ_HEAD_INITIALIZER(lapic_enumerators);
674 
675 int
676 lapic_config(void)
677 {
678 	struct lapic_enumerator *e;
679 	int error, i, ap_max;
680 
681 	KKASSERT(lapic_enable);
682 
683 	for (i = 0; i < NAPICID; ++i)
684 		APICID_TO_CPUID(i) = -1;
685 
686 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
687 		error = e->lapic_probe(e);
688 		if (!error)
689 			break;
690 	}
691 	if (e == NULL) {
692 		kprintf("LAPIC: Can't find LAPIC\n");
693 		return ENXIO;
694 	}
695 
696 	error = e->lapic_enumerate(e);
697 	if (error) {
698 		kprintf("LAPIC: enumeration failed\n");
699 		return ENXIO;
700 	}
701 
702 	ap_max = MAXCPU - 1;
703 	TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
704 	if (ap_max > MAXCPU - 1)
705 		ap_max = MAXCPU - 1;
706 
707 	if (naps > ap_max) {
708 		kprintf("LAPIC: Warning use only %d out of %d "
709 			"available APs\n",
710 			ap_max, naps);
711 		naps = ap_max;
712 	}
713 
714 	return 0;
715 }
716 
717 void
718 lapic_enumerator_register(struct lapic_enumerator *ne)
719 {
720 	struct lapic_enumerator *e;
721 
722 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
723 		if (e->lapic_prio < ne->lapic_prio) {
724 			TAILQ_INSERT_BEFORE(e, ne, lapic_link);
725 			return;
726 		}
727 	}
728 	TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
729 }
730 
731 void
732 lapic_set_cpuid(int cpu_id, int apic_id)
733 {
734 	CPUID_TO_APICID(cpu_id) = apic_id;
735 	APICID_TO_CPUID(apic_id) = cpu_id;
736 }
737 
738 void
739 lapic_fixup_noioapic(void)
740 {
741 	u_int   temp;
742 
743 	/* Only allowed on BSP */
744 	KKASSERT(mycpuid == 0);
745 	KKASSERT(!ioapic_enable);
746 
747 	temp = lapic->lvt_lint0;
748 	temp &= ~APIC_LVT_MASKED;
749 	lapic->lvt_lint0 = temp;
750 
751 	temp = lapic->lvt_lint1;
752 	temp |= APIC_LVT_MASKED;
753 	lapic->lvt_lint1 = temp;
754 }
755 
756 static void
757 lapic_sysinit(void *dummy __unused)
758 {
759 	if (lapic_enable) {
760 		int error;
761 
762 		error = lapic_config();
763 		if (error)
764 			lapic_enable = 0;
765 	}
766 
767 	if (lapic_enable) {
768 		/* Initialize BSP's local APIC */
769 		lapic_init(TRUE);
770 	} else if (ioapic_enable) {
771 		ioapic_enable = 0;
772 		icu_reinit_noioapic();
773 	}
774 }
775 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL);
776