xref: /dragonfly/sys/platform/pc64/apic/lapic.c (revision 92fc8b5c)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/bus.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine_base/apic/lapic.h>
38 #include <machine_base/apic/ioapic.h>
39 #include <machine_base/apic/ioapic_abi.h>
40 #include <machine/segments.h>
41 #include <sys/thread2.h>
42 
43 #include <machine/intr_machdep.h>
44 
45 #include "apicvar.h"
46 
47 extern int naps;
48 
49 volatile lapic_t *lapic;
50 
51 static void	lapic_timer_calibrate(void);
52 static void	lapic_timer_set_divisor(int);
53 static void	lapic_timer_fixup_handler(void *);
54 static void	lapic_timer_restart_handler(void *);
55 
56 void		lapic_timer_process(void);
57 void		lapic_timer_process_frame(struct intrframe *);
58 void		lapic_timer_always(struct intrframe *);
59 
60 static int	lapic_timer_enable = 1;
61 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
62 
63 static void	lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
64 static void	lapic_timer_intr_enable(struct cputimer_intr *);
65 static void	lapic_timer_intr_restart(struct cputimer_intr *);
66 static void	lapic_timer_intr_pmfixup(struct cputimer_intr *);
67 
68 static struct cputimer_intr lapic_cputimer_intr = {
69 	.freq = 0,
70 	.reload = lapic_timer_intr_reload,
71 	.enable = lapic_timer_intr_enable,
72 	.config = cputimer_intr_default_config,
73 	.restart = lapic_timer_intr_restart,
74 	.pmfixup = lapic_timer_intr_pmfixup,
75 	.initclock = cputimer_intr_default_initclock,
76 	.next = SLIST_ENTRY_INITIALIZER,
77 	.name = "lapic",
78 	.type = CPUTIMER_INTR_LAPIC,
79 	.prio = CPUTIMER_INTR_PRIO_LAPIC,
80 	.caps = CPUTIMER_INTR_CAP_NONE
81 };
82 
83 static int		lapic_timer_divisor_idx = -1;
84 static const uint32_t	lapic_timer_divisors[] = {
85 	APIC_TDCR_2,	APIC_TDCR_4,	APIC_TDCR_8,	APIC_TDCR_16,
86 	APIC_TDCR_32,	APIC_TDCR_64,	APIC_TDCR_128,	APIC_TDCR_1
87 };
88 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
89 
90 /*
91  * APIC ID <-> CPU ID mapping structures.
92  */
93 int	cpu_id_to_apic_id[NAPICID];
94 int	apic_id_to_cpu_id[NAPICID];
95 
96 void
97 lapic_eoi(void)
98 {
99 
100 	lapic->eoi = 0;
101 }
102 
103 /*
104  * Enable LAPIC, configure interrupts.
105  */
106 void
107 lapic_init(boolean_t bsp)
108 {
109 	uint32_t timer;
110 	u_int   temp;
111 
112 	/*
113 	 * Install vectors
114 	 *
115 	 * Since IDT is shared between BSP and APs, these vectors
116 	 * only need to be installed once; we do it on BSP.
117 	 */
118 	if (bsp) {
119 		/* Install a 'Spurious INTerrupt' vector */
120 		setidt(XSPURIOUSINT_OFFSET, Xspuriousint,
121 		    SDT_SYSIGT, SEL_KPL, 0);
122 
123 		/* Install an inter-CPU IPI for TLB invalidation */
124 		setidt(XINVLTLB_OFFSET, Xinvltlb,
125 		    SDT_SYSIGT, SEL_KPL, 0);
126 
127 		/* Install an inter-CPU IPI for IPIQ messaging */
128 		setidt(XIPIQ_OFFSET, Xipiq,
129 		    SDT_SYSIGT, SEL_KPL, 0);
130 
131 		/* Install a timer vector */
132 		setidt(XTIMER_OFFSET, Xtimer,
133 		    SDT_SYSIGT, SEL_KPL, 0);
134 
135 		/* Install an inter-CPU IPI for CPU stop/restart */
136 		setidt(XCPUSTOP_OFFSET, Xcpustop,
137 		    SDT_SYSIGT, SEL_KPL, 0);
138 	}
139 
140 	/*
141 	 * Setup LINT0 as ExtINT on the BSP.  This is theoretically an
142 	 * aggregate interrupt input from the 8259.  The INTA cycle
143 	 * will be routed to the external controller (the 8259) which
144 	 * is expected to supply the vector.
145 	 *
146 	 * Must be setup edge triggered, active high.
147 	 *
148 	 * Disable LINT0 on BSP, if I/O APIC is enabled.
149 	 *
150 	 * Disable LINT0 on the APs.  It doesn't matter what delivery
151 	 * mode we use because we leave it masked.
152 	 */
153 	temp = lapic->lvt_lint0;
154 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
155 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
156 	if (bsp) {
157 		temp |= APIC_LVT_DM_EXTINT;
158 		if (ioapic_enable)
159 			temp |= APIC_LVT_MASKED;
160 	} else {
161 		temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
162 	}
163 	lapic->lvt_lint0 = temp;
164 
165 	/*
166 	 * Setup LINT1 as NMI.
167 	 *
168 	 * Must be setup edge trigger, active high.
169 	 *
170 	 * Enable LINT1 on BSP, if I/O APIC is enabled.
171 	 *
172 	 * Disable LINT1 on the APs.
173 	 */
174 	temp = lapic->lvt_lint1;
175 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
176 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
177 	temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
178 	if (bsp && ioapic_enable)
179 		temp &= ~APIC_LVT_MASKED;
180 	lapic->lvt_lint1 = temp;
181 
182 	/*
183 	 * Mask the LAPIC error interrupt, LAPIC performance counter
184 	 * interrupt.
185 	 */
186 	lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
187 	lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
188 
189 	/*
190 	 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
191 	 */
192 	timer = lapic->lvt_timer;
193 	timer &= ~APIC_LVTT_VECTOR;
194 	timer |= XTIMER_OFFSET;
195 	timer |= APIC_LVTT_MASKED;
196 	lapic->lvt_timer = timer;
197 
198 	/*
199 	 * Set the Task Priority Register as needed.   At the moment allow
200 	 * interrupts on all cpus (the APs will remain CLId until they are
201 	 * ready to deal).
202 	 */
203 	temp = lapic->tpr;
204 	temp &= ~APIC_TPR_PRIO;		/* clear priority field */
205 	lapic->tpr = temp;
206 
207 	/*
208 	 * Enable the LAPIC
209 	 */
210 	temp = lapic->svr;
211 	temp |= APIC_SVR_ENABLE;	/* enable the LAPIC */
212 	temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
213 
214 	/*
215 	 * Set the spurious interrupt vector.  The low 4 bits of the vector
216 	 * must be 1111.
217 	 */
218 	if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
219 		panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
220 	temp &= ~APIC_SVR_VECTOR;
221 	temp |= XSPURIOUSINT_OFFSET;
222 
223 	lapic->svr = temp;
224 
225 	/*
226 	 * Pump out a few EOIs to clean out interrupts that got through
227 	 * before we were able to set the TPR.
228 	 */
229 	lapic_eoi();
230 	lapic_eoi();
231 	lapic_eoi();
232 
233 	if (bsp) {
234 		lapic_timer_calibrate();
235 		if (lapic_timer_enable) {
236 			cputimer_intr_register(&lapic_cputimer_intr);
237 			cputimer_intr_select(&lapic_cputimer_intr, 0);
238 		}
239 	} else {
240 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
241 	}
242 
243 	if (bootverbose)
244 		apic_dump("apic_initialize()");
245 }
246 
247 static void
248 lapic_timer_set_divisor(int divisor_idx)
249 {
250 	KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
251 	lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
252 }
253 
254 static void
255 lapic_timer_oneshot(u_int count)
256 {
257 	uint32_t value;
258 
259 	value = lapic->lvt_timer;
260 	value &= ~APIC_LVTT_PERIODIC;
261 	lapic->lvt_timer = value;
262 	lapic->icr_timer = count;
263 }
264 
265 static void
266 lapic_timer_oneshot_quick(u_int count)
267 {
268 	lapic->icr_timer = count;
269 }
270 
271 static void
272 lapic_timer_calibrate(void)
273 {
274 	sysclock_t value;
275 
276 	/* Try to calibrate the local APIC timer. */
277 	for (lapic_timer_divisor_idx = 0;
278 	     lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
279 	     lapic_timer_divisor_idx++) {
280 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
281 		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
282 		DELAY(2000000);
283 		value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
284 		if (value != APIC_TIMER_MAX_COUNT)
285 			break;
286 	}
287 	if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
288 		panic("lapic: no proper timer divisor?!\n");
289 	lapic_cputimer_intr.freq = value / 2;
290 
291 	kprintf("lapic: divisor index %d, frequency %u Hz\n",
292 		lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
293 }
294 
295 static void
296 lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame)
297 {
298 	sysclock_t count;
299 
300 	gd->gd_timer_running = 0;
301 
302 	count = sys_cputimer->count();
303 	if (TAILQ_FIRST(&gd->gd_systimerq) != NULL)
304 		systimer_intr(&count, 0, frame);
305 }
306 
307 void
308 lapic_timer_process(void)
309 {
310 	lapic_timer_process_oncpu(mycpu, NULL);
311 }
312 
313 void
314 lapic_timer_process_frame(struct intrframe *frame)
315 {
316 	lapic_timer_process_oncpu(mycpu, frame);
317 }
318 
319 /*
320  * This manual debugging code is called unconditionally from Xtimer
321  * (the lapic timer interrupt) whether the current thread is in a
322  * critical section or not) and can be useful in tracking down lockups.
323  *
324  * NOTE: MANUAL DEBUG CODE
325  */
326 #if 0
327 static int saveticks[SMP_MAXCPU];
328 static int savecounts[SMP_MAXCPU];
329 #endif
330 
331 void
332 lapic_timer_always(struct intrframe *frame)
333 {
334 #if 0
335 	globaldata_t gd = mycpu;
336 	int cpu = gd->gd_cpuid;
337 	char buf[64];
338 	short *gptr;
339 	int i;
340 
341 	if (cpu <= 20) {
342 		gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
343 		*gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
344 		++gptr;
345 
346 		ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ",
347 		    (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks,
348 		    gd->gd_infomsg);
349 		for (i = 0; buf[i]; ++i) {
350 			gptr[i] = 0x0700 | (unsigned char)buf[i];
351 		}
352 	}
353 #if 0
354 	if (saveticks[gd->gd_cpuid] != ticks) {
355 		saveticks[gd->gd_cpuid] = ticks;
356 		savecounts[gd->gd_cpuid] = 0;
357 	}
358 	++savecounts[gd->gd_cpuid];
359 	if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) {
360 		panic("cpud %d panicing on ticks failure",
361 			gd->gd_cpuid);
362 	}
363 	for (i = 0; i < ncpus; ++i) {
364 		int delta;
365 		if (saveticks[i] && panicstr == NULL) {
366 			delta = saveticks[i] - ticks;
367 			if (delta < -10 || delta > 10) {
368 				panic("cpu %d panicing on cpu %d watchdog",
369 				      gd->gd_cpuid, i);
370 			}
371 		}
372 	}
373 #endif
374 #endif
375 }
376 
377 static void
378 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
379 {
380 	struct globaldata *gd = mycpu;
381 
382 	reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
383 	if (reload < 2)
384 		reload = 2;
385 
386 	if (gd->gd_timer_running) {
387 		if (reload < lapic->ccr_timer)
388 			lapic_timer_oneshot_quick(reload);
389 	} else {
390 		gd->gd_timer_running = 1;
391 		lapic_timer_oneshot_quick(reload);
392 	}
393 }
394 
395 static void
396 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
397 {
398 	uint32_t timer;
399 
400 	timer = lapic->lvt_timer;
401 	timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
402 	lapic->lvt_timer = timer;
403 
404 	lapic_timer_fixup_handler(NULL);
405 }
406 
407 static void
408 lapic_timer_fixup_handler(void *arg)
409 {
410 	int *started = arg;
411 
412 	if (started != NULL)
413 		*started = 0;
414 
415 	if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
416 		/*
417 		 * Detect the presence of C1E capability mostly on latest
418 		 * dual-cores (or future) k8 family.  This feature renders
419 		 * the local APIC timer dead, so we disable it by reading
420 		 * the Interrupt Pending Message register and clearing both
421 		 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
422 		 *
423 		 * Reference:
424 		 *   "BIOS and Kernel Developer's Guide for AMD NPT
425 		 *    Family 0Fh Processors"
426 		 *   #32559 revision 3.00
427 		 */
428 		if ((cpu_id & 0x00000f00) == 0x00000f00 &&
429 		    (cpu_id & 0x0fff0000) >= 0x00040000) {
430 			uint64_t msr;
431 
432 			msr = rdmsr(0xc0010055);
433 			if (msr & 0x18000000) {
434 				struct globaldata *gd = mycpu;
435 
436 				kprintf("cpu%d: AMD C1E detected\n",
437 					gd->gd_cpuid);
438 				wrmsr(0xc0010055, msr & ~0x18000000ULL);
439 
440 				/*
441 				 * We are kinda stalled;
442 				 * kick start again.
443 				 */
444 				gd->gd_timer_running = 1;
445 				lapic_timer_oneshot_quick(2);
446 
447 				if (started != NULL)
448 					*started = 1;
449 			}
450 		}
451 	}
452 }
453 
454 static void
455 lapic_timer_restart_handler(void *dummy __unused)
456 {
457 	int started;
458 
459 	lapic_timer_fixup_handler(&started);
460 	if (!started) {
461 		struct globaldata *gd = mycpu;
462 
463 		gd->gd_timer_running = 1;
464 		lapic_timer_oneshot_quick(2);
465 	}
466 }
467 
468 /*
469  * This function is called only by ACPI-CA code currently:
470  * - AMD C1E fixup.  AMD C1E only seems to happen after ACPI
471  *   module controls PM.  So once ACPI-CA is attached, we try
472  *   to apply the fixup to prevent LAPIC timer from hanging.
473  */
474 static void
475 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
476 {
477 	lwkt_send_ipiq_mask(smp_active_mask,
478 			    lapic_timer_fixup_handler, NULL);
479 }
480 
481 static void
482 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
483 {
484 	lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
485 }
486 
487 
488 /*
489  * dump contents of local APIC registers
490  */
491 void
492 apic_dump(char* str)
493 {
494 	kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
495 	kprintf("     lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
496 		lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
497 }
498 
499 /*
500  * Inter Processor Interrupt functions.
501  */
502 
503 /*
504  * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
505  *
506  *  destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
507  *  vector is any valid SYSTEM INT vector
508  *  delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
509  *
510  * A backlog of requests can create a deadlock between cpus.  To avoid this
511  * we have to be able to accept IPIs at the same time we are trying to send
512  * them.  The critical section prevents us from attempting to send additional
513  * IPIs reentrantly, but also prevents IPIQ processing so we have to call
514  * lwkt_process_ipiq() manually.  It's rather messy and expensive for this
515  * to occur but fortunately it does not happen too often.
516  */
517 int
518 apic_ipi(int dest_type, int vector, int delivery_mode)
519 {
520 	u_long  icr_lo;
521 
522 	crit_enter();
523 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
524 	    unsigned long rflags = read_rflags();
525 	    cpu_enable_intr();
526 	    DEBUG_PUSH_INFO("apic_ipi");
527 	    while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
528 		lwkt_process_ipiq();
529 	    }
530 	    DEBUG_POP_INFO();
531 	    write_rflags(rflags);
532 	}
533 
534 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
535 		delivery_mode | vector;
536 	lapic->icr_lo = icr_lo;
537 	crit_exit();
538 	return 0;
539 }
540 
541 void
542 single_apic_ipi(int cpu, int vector, int delivery_mode)
543 {
544 	u_long  icr_lo;
545 	u_long  icr_hi;
546 
547 	crit_enter();
548 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
549 	    unsigned long rflags = read_rflags();
550 	    cpu_enable_intr();
551 	    DEBUG_PUSH_INFO("single_apic_ipi");
552 	    while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
553 		lwkt_process_ipiq();
554 	    }
555 	    DEBUG_POP_INFO();
556 	    write_rflags(rflags);
557 	}
558 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
559 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
560 	lapic->icr_hi = icr_hi;
561 
562 	/* build ICR_LOW */
563 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK)
564 	    | APIC_DEST_DESTFLD | delivery_mode | vector;
565 
566 	/* write APIC ICR */
567 	lapic->icr_lo = icr_lo;
568 	crit_exit();
569 }
570 
571 #if 0
572 
573 /*
574  * Returns 0 if the apic is busy, 1 if we were able to queue the request.
575  *
576  * NOT WORKING YET!  The code as-is may end up not queueing an IPI at all
577  * to the target, and the scheduler does not 'poll' for IPI messages.
578  */
579 int
580 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
581 {
582 	u_long  icr_lo;
583 	u_long  icr_hi;
584 
585 	crit_enter();
586 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
587 	    crit_exit();
588 	    return(0);
589 	}
590 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
591 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
592 	lapic->icr_hi = icr_hi;
593 
594 	/* build IRC_LOW */
595 	icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
596 	    | APIC_DEST_DESTFLD | delivery_mode | vector;
597 
598 	/* write APIC ICR */
599 	lapic->icr_lo = icr_lo;
600 	crit_exit();
601 	return(1);
602 }
603 
604 #endif
605 
606 /*
607  * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
608  *
609  * target is a bitmask of destination cpus.  Vector is any
610  * valid system INT vector.  Delivery mode may be either
611  * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
612  */
613 void
614 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
615 {
616 	crit_enter();
617 	while (target) {
618 		int n = BSFCPUMASK(target);
619 		target &= ~CPUMASK(n);
620 		single_apic_ipi(n, vector, delivery_mode);
621 	}
622 	crit_exit();
623 }
624 
625 /*
626  * Timer code, in development...
627  *  - suggested by rgrimes@gndrsh.aac.dev.com
628  */
629 int
630 get_apic_timer_frequency(void)
631 {
632 	return(lapic_cputimer_intr.freq);
633 }
634 
635 /*
636  * Load a 'downcount time' in uSeconds.
637  */
638 void
639 set_apic_timer(int us)
640 {
641 	u_int count;
642 
643 	/*
644 	 * When we reach here, lapic timer's frequency
645 	 * must have been calculated as well as the
646 	 * divisor (lapic->dcr_timer is setup during the
647 	 * divisor calculation).
648 	 */
649 	KKASSERT(lapic_cputimer_intr.freq != 0 &&
650 		 lapic_timer_divisor_idx >= 0);
651 
652 	count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
653 	lapic_timer_oneshot(count);
654 }
655 
656 
657 /*
658  * Read remaining time in timer.
659  */
660 int
661 read_apic_timer(void)
662 {
663 #if 0
664 	/** XXX FIXME: we need to return the actual remaining time,
665          *         for now we just return the remaining count.
666          */
667 #else
668 	return lapic->ccr_timer;
669 #endif
670 }
671 
672 
673 /*
674  * Spin-style delay, set delay time in uS, spin till it drains.
675  */
676 void
677 u_sleep(int count)
678 {
679 	set_apic_timer(count);
680 	while (read_apic_timer())
681 		 /* spin */ ;
682 }
683 
684 int
685 lapic_unused_apic_id(int start)
686 {
687 	int i;
688 
689 	for (i = start; i < NAPICID; ++i) {
690 		if (APICID_TO_CPUID(i) == -1)
691 			return i;
692 	}
693 	return NAPICID;
694 }
695 
696 void
697 lapic_map(vm_offset_t lapic_addr)
698 {
699 	lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
700 
701 	kprintf("lapic: at 0x%08lx\n", lapic_addr);
702 }
703 
704 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
705 	TAILQ_HEAD_INITIALIZER(lapic_enumerators);
706 
707 int
708 lapic_config(void)
709 {
710 	struct lapic_enumerator *e;
711 	int error, i, enable, ap_max;
712 
713 	for (i = 0; i < NAPICID; ++i)
714 		APICID_TO_CPUID(i) = -1;
715 
716 	enable = 1;
717 	TUNABLE_INT_FETCH("hw.lapic_enable", &enable);
718 	if (!enable) {
719 		kprintf("LAPIC: Warning LAPIC is disabled\n");
720 		return ENXIO;
721 	}
722 
723 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
724 		error = e->lapic_probe(e);
725 		if (!error)
726 			break;
727 	}
728 	if (e == NULL) {
729 		kprintf("LAPIC: Can't find LAPIC\n");
730 		return ENXIO;
731 	}
732 
733 	e->lapic_enumerate(e);
734 
735 	ap_max = MAXCPU - 1;
736 	TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
737 	if (ap_max > MAXCPU - 1)
738 		ap_max = MAXCPU - 1;
739 
740 	if (naps > ap_max) {
741 		kprintf("LAPIC: Warning use only %d out of %d "
742 			"available APs\n",
743 			ap_max, naps);
744 		naps = ap_max;
745 	}
746 
747 	return 0;
748 }
749 
750 void
751 lapic_enumerator_register(struct lapic_enumerator *ne)
752 {
753 	struct lapic_enumerator *e;
754 
755 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
756 		if (e->lapic_prio < ne->lapic_prio) {
757 			TAILQ_INSERT_BEFORE(e, ne, lapic_link);
758 			return;
759 		}
760 	}
761 	TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
762 }
763 
764 void
765 lapic_set_cpuid(int cpu_id, int apic_id)
766 {
767 	CPUID_TO_APICID(cpu_id) = apic_id;
768 	APICID_TO_CPUID(apic_id) = cpu_id;
769 }
770 
771 void
772 lapic_fixup_noioapic(void)
773 {
774 	u_int   temp;
775 
776 	/* Only allowed on BSP */
777 	KKASSERT(mycpuid == 0);
778 	KKASSERT(!ioapic_enable);
779 
780 	temp = lapic->lvt_lint0;
781 	temp &= ~APIC_LVT_MASKED;
782 	lapic->lvt_lint0 = temp;
783 
784 	temp = lapic->lvt_lint1;
785 	temp |= APIC_LVT_MASKED;
786 	lapic->lvt_lint1 = temp;
787 }
788