xref: /dragonfly/sys/platform/pc64/apic/lapic.c (revision fb151170)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/bus.h>
32 #include <sys/machintr.h>
33 #include <machine/globaldata.h>
34 #include <machine/smp.h>
35 #include <machine/md_var.h>
36 #include <machine/pmap.h>
37 #include <machine/specialreg.h>
38 #include <machine_base/apic/lapic.h>
39 #include <machine_base/apic/ioapic.h>
40 #include <machine_base/apic/ioapic_abi.h>
41 #include <machine_base/apic/apicvar.h>
42 #include <machine_base/icu/icu_var.h>
43 #include <machine/segments.h>
44 #include <sys/thread2.h>
45 
46 #include <machine/cputypes.h>
47 #include <machine/intr_machdep.h>
48 
49 extern int naps;
50 
51 volatile lapic_t *lapic;
52 
53 static void	lapic_timer_calibrate(void);
54 static void	lapic_timer_set_divisor(int);
55 static void	lapic_timer_fixup_handler(void *);
56 static void	lapic_timer_restart_handler(void *);
57 
58 void		lapic_timer_process(void);
59 void		lapic_timer_process_frame(struct intrframe *);
60 void		lapic_timer_always(struct intrframe *);
61 
62 static int	lapic_timer_enable = 1;
63 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
64 
65 static void	lapic_timer_intr_reload(struct cputimer_intr *, sysclock_t);
66 static void	lapic_timer_intr_enable(struct cputimer_intr *);
67 static void	lapic_timer_intr_restart(struct cputimer_intr *);
68 static void	lapic_timer_intr_pmfixup(struct cputimer_intr *);
69 
70 static struct cputimer_intr lapic_cputimer_intr = {
71 	.freq = 0,
72 	.reload = lapic_timer_intr_reload,
73 	.enable = lapic_timer_intr_enable,
74 	.config = cputimer_intr_default_config,
75 	.restart = lapic_timer_intr_restart,
76 	.pmfixup = lapic_timer_intr_pmfixup,
77 	.initclock = cputimer_intr_default_initclock,
78 	.next = SLIST_ENTRY_INITIALIZER,
79 	.name = "lapic",
80 	.type = CPUTIMER_INTR_LAPIC,
81 	.prio = CPUTIMER_INTR_PRIO_LAPIC,
82 	.caps = CPUTIMER_INTR_CAP_NONE
83 };
84 
85 static int		lapic_timer_divisor_idx = -1;
86 static const uint32_t	lapic_timer_divisors[] = {
87 	APIC_TDCR_2,	APIC_TDCR_4,	APIC_TDCR_8,	APIC_TDCR_16,
88 	APIC_TDCR_32,	APIC_TDCR_64,	APIC_TDCR_128,	APIC_TDCR_1
89 };
90 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
91 
92 /*
93  * APIC ID <-> CPU ID mapping structures.
94  */
95 int	cpu_id_to_apic_id[NAPICID];
96 int	apic_id_to_cpu_id[NAPICID];
97 int	lapic_enable = 1;
98 
99 /*
100  * Enable LAPIC, configure interrupts.
101  */
102 void
103 lapic_init(boolean_t bsp)
104 {
105 	uint32_t timer;
106 	u_int   temp;
107 
108 	/*
109 	 * Install vectors
110 	 *
111 	 * Since IDT is shared between BSP and APs, these vectors
112 	 * only need to be installed once; we do it on BSP.
113 	 */
114 	if (bsp) {
115 		if (cpu_vendor_id == CPU_VENDOR_AMD &&
116 		    CPUID_TO_FAMILY(cpu_id) >= 0xf) {
117 			uint32_t tcr;
118 
119 			/*
120 			 * Set the LINTEN bit in the HyperTransport
121 			 * Transaction Control Register.
122 			 *
123 			 * This will cause EXTINT and NMI interrupts
124 			 * routed over the hypertransport bus to be
125 			 * fed into the LAPIC LINT0/LINT1.  If the bit
126 			 * isn't set, the interrupts will go to the
127 			 * general cpu INTR/NMI pins.  On a dual-core
128 			 * cpu the interrupt winds up going to BOTH cpus.
129 			 * The first cpu that does the interrupt ack
130 			 * cycle will get the correct interrupt.  The
131 			 * second cpu that does it will get a spurious
132 			 * interrupt vector (typically IRQ 7).
133 			 */
134 			outl(0x0cf8,
135 			    (1 << 31) |	/* enable */
136 			    (0 << 16) |	/* bus */
137 			    (0x18 << 11) | /* dev (cpu + 0x18) */
138 			    (0 << 8) |	/* func */
139 			    0x68	/* reg */
140 			    );
141 			tcr = inl(0xcfc);
142 			if ((tcr & 0x00010000) == 0) {
143 				kprintf("LAPIC: AMD LINTEN on\n");
144 				outl(0xcfc, tcr|0x00010000);
145 			}
146 			outl(0x0cf8, 0);
147 		}
148 
149 		/* Install a 'Spurious INTerrupt' vector */
150 		setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint,
151 		    SDT_SYSIGT, SEL_KPL, 0);
152 
153 		/* Install a timer vector */
154 		setidt_global(XTIMER_OFFSET, Xtimer,
155 		    SDT_SYSIGT, SEL_KPL, 0);
156 
157 #ifdef SMP
158 		/* Install an inter-CPU IPI for TLB invalidation */
159 		setidt_global(XINVLTLB_OFFSET, Xinvltlb,
160 		    SDT_SYSIGT, SEL_KPL, 0);
161 
162 		/* Install an inter-CPU IPI for IPIQ messaging */
163 		setidt_global(XIPIQ_OFFSET, Xipiq,
164 		    SDT_SYSIGT, SEL_KPL, 0);
165 
166 		/* Install an inter-CPU IPI for CPU stop/restart */
167 		setidt_global(XCPUSTOP_OFFSET, Xcpustop,
168 		    SDT_SYSIGT, SEL_KPL, 0);
169 #endif
170 	}
171 
172 	/*
173 	 * Setup LINT0 as ExtINT on the BSP.  This is theoretically an
174 	 * aggregate interrupt input from the 8259.  The INTA cycle
175 	 * will be routed to the external controller (the 8259) which
176 	 * is expected to supply the vector.
177 	 *
178 	 * Must be setup edge triggered, active high.
179 	 *
180 	 * Disable LINT0 on BSP, if I/O APIC is enabled.
181 	 *
182 	 * Disable LINT0 on the APs.  It doesn't matter what delivery
183 	 * mode we use because we leave it masked.
184 	 */
185 	temp = lapic->lvt_lint0;
186 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
187 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
188 	if (bsp) {
189 		temp |= APIC_LVT_DM_EXTINT;
190 		if (ioapic_enable)
191 			temp |= APIC_LVT_MASKED;
192 	} else {
193 		temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
194 	}
195 	lapic->lvt_lint0 = temp;
196 
197 	/*
198 	 * Setup LINT1 as NMI.
199 	 *
200 	 * Must be setup edge trigger, active high.
201 	 *
202 	 * Enable LINT1 on BSP, if I/O APIC is enabled.
203 	 *
204 	 * Disable LINT1 on the APs.
205 	 */
206 	temp = lapic->lvt_lint1;
207 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
208 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
209 	temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
210 	if (bsp && ioapic_enable)
211 		temp &= ~APIC_LVT_MASKED;
212 	lapic->lvt_lint1 = temp;
213 
214 	/*
215 	 * Mask the LAPIC error interrupt, LAPIC performance counter
216 	 * interrupt.
217 	 */
218 	lapic->lvt_error = lapic->lvt_error | APIC_LVT_MASKED;
219 	lapic->lvt_pcint = lapic->lvt_pcint | APIC_LVT_MASKED;
220 
221 	/*
222 	 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
223 	 */
224 	timer = lapic->lvt_timer;
225 	timer &= ~APIC_LVTT_VECTOR;
226 	timer |= XTIMER_OFFSET;
227 	timer |= APIC_LVTT_MASKED;
228 	lapic->lvt_timer = timer;
229 
230 	/*
231 	 * Set the Task Priority Register as needed.   At the moment allow
232 	 * interrupts on all cpus (the APs will remain CLId until they are
233 	 * ready to deal).
234 	 */
235 	temp = lapic->tpr;
236 	temp &= ~APIC_TPR_PRIO;		/* clear priority field */
237 	lapic->tpr = temp;
238 
239 	/*
240 	 * Enable the LAPIC
241 	 */
242 	temp = lapic->svr;
243 	temp |= APIC_SVR_ENABLE;	/* enable the LAPIC */
244 	temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
245 
246 	/*
247 	 * Set the spurious interrupt vector.  The low 4 bits of the vector
248 	 * must be 1111.
249 	 */
250 	if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
251 		panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
252 	temp &= ~APIC_SVR_VECTOR;
253 	temp |= XSPURIOUSINT_OFFSET;
254 
255 	lapic->svr = temp;
256 
257 	/*
258 	 * Pump out a few EOIs to clean out interrupts that got through
259 	 * before we were able to set the TPR.
260 	 */
261 	lapic->eoi = 0;
262 	lapic->eoi = 0;
263 	lapic->eoi = 0;
264 
265 	if (bsp) {
266 		lapic_timer_calibrate();
267 		if (lapic_timer_enable) {
268 			cputimer_intr_register(&lapic_cputimer_intr);
269 			cputimer_intr_select(&lapic_cputimer_intr, 0);
270 		}
271 	} else {
272 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
273 	}
274 
275 	if (bootverbose)
276 		apic_dump("apic_initialize()");
277 }
278 
279 static void
280 lapic_timer_set_divisor(int divisor_idx)
281 {
282 	KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
283 	lapic->dcr_timer = lapic_timer_divisors[divisor_idx];
284 }
285 
286 static void
287 lapic_timer_oneshot(u_int count)
288 {
289 	uint32_t value;
290 
291 	value = lapic->lvt_timer;
292 	value &= ~APIC_LVTT_PERIODIC;
293 	lapic->lvt_timer = value;
294 	lapic->icr_timer = count;
295 }
296 
297 static void
298 lapic_timer_oneshot_quick(u_int count)
299 {
300 	lapic->icr_timer = count;
301 }
302 
303 static void
304 lapic_timer_calibrate(void)
305 {
306 	sysclock_t value;
307 
308 	/* Try to calibrate the local APIC timer. */
309 	for (lapic_timer_divisor_idx = 0;
310 	     lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
311 	     lapic_timer_divisor_idx++) {
312 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
313 		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
314 		DELAY(2000000);
315 		value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
316 		if (value != APIC_TIMER_MAX_COUNT)
317 			break;
318 	}
319 	if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
320 		panic("lapic: no proper timer divisor?!\n");
321 	lapic_cputimer_intr.freq = value / 2;
322 
323 	kprintf("lapic: divisor index %d, frequency %u Hz\n",
324 		lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
325 }
326 
327 static void
328 lapic_timer_process_oncpu(struct globaldata *gd, struct intrframe *frame)
329 {
330 	sysclock_t count;
331 
332 	gd->gd_timer_running = 0;
333 
334 	count = sys_cputimer->count();
335 	if (TAILQ_FIRST(&gd->gd_systimerq) != NULL)
336 		systimer_intr(&count, 0, frame);
337 }
338 
339 void
340 lapic_timer_process(void)
341 {
342 	lapic_timer_process_oncpu(mycpu, NULL);
343 }
344 
345 void
346 lapic_timer_process_frame(struct intrframe *frame)
347 {
348 	lapic_timer_process_oncpu(mycpu, frame);
349 }
350 
351 /*
352  * This manual debugging code is called unconditionally from Xtimer
353  * (the lapic timer interrupt) whether the current thread is in a
354  * critical section or not) and can be useful in tracking down lockups.
355  *
356  * NOTE: MANUAL DEBUG CODE
357  */
358 #if 0
359 static int saveticks[SMP_MAXCPU];
360 static int savecounts[SMP_MAXCPU];
361 #endif
362 
363 void
364 lapic_timer_always(struct intrframe *frame)
365 {
366 #if 0
367 	globaldata_t gd = mycpu;
368 	int cpu = gd->gd_cpuid;
369 	char buf[64];
370 	short *gptr;
371 	int i;
372 
373 	if (cpu <= 20) {
374 		gptr = (short *)0xFFFFFFFF800b8000 + 80 * cpu;
375 		*gptr = ((*gptr + 1) & 0x00FF) | 0x0700;
376 		++gptr;
377 
378 		ksnprintf(buf, sizeof(buf), " %p %16s %d %16s ",
379 		    (void *)frame->if_rip, gd->gd_curthread->td_comm, ticks,
380 		    gd->gd_infomsg);
381 		for (i = 0; buf[i]; ++i) {
382 			gptr[i] = 0x0700 | (unsigned char)buf[i];
383 		}
384 	}
385 #if 0
386 	if (saveticks[gd->gd_cpuid] != ticks) {
387 		saveticks[gd->gd_cpuid] = ticks;
388 		savecounts[gd->gd_cpuid] = 0;
389 	}
390 	++savecounts[gd->gd_cpuid];
391 	if (savecounts[gd->gd_cpuid] > 2000 && panicstr == NULL) {
392 		panic("cpud %d panicing on ticks failure",
393 			gd->gd_cpuid);
394 	}
395 	for (i = 0; i < ncpus; ++i) {
396 		int delta;
397 		if (saveticks[i] && panicstr == NULL) {
398 			delta = saveticks[i] - ticks;
399 			if (delta < -10 || delta > 10) {
400 				panic("cpu %d panicing on cpu %d watchdog",
401 				      gd->gd_cpuid, i);
402 			}
403 		}
404 	}
405 #endif
406 #endif
407 }
408 
409 static void
410 lapic_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
411 {
412 	struct globaldata *gd = mycpu;
413 
414 	reload = (int64_t)reload * cti->freq / sys_cputimer->freq;
415 	if (reload < 2)
416 		reload = 2;
417 
418 	if (gd->gd_timer_running) {
419 		if (reload < lapic->ccr_timer)
420 			lapic_timer_oneshot_quick(reload);
421 	} else {
422 		gd->gd_timer_running = 1;
423 		lapic_timer_oneshot_quick(reload);
424 	}
425 }
426 
427 static void
428 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
429 {
430 	uint32_t timer;
431 
432 	timer = lapic->lvt_timer;
433 	timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC);
434 	lapic->lvt_timer = timer;
435 
436 	lapic_timer_fixup_handler(NULL);
437 }
438 
439 static void
440 lapic_timer_fixup_handler(void *arg)
441 {
442 	int *started = arg;
443 
444 	if (started != NULL)
445 		*started = 0;
446 
447 	if (cpu_vendor_id == CPU_VENDOR_AMD) {
448 		/*
449 		 * Detect the presence of C1E capability mostly on latest
450 		 * dual-cores (or future) k8 family.  This feature renders
451 		 * the local APIC timer dead, so we disable it by reading
452 		 * the Interrupt Pending Message register and clearing both
453 		 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
454 		 *
455 		 * Reference:
456 		 *   "BIOS and Kernel Developer's Guide for AMD NPT
457 		 *    Family 0Fh Processors"
458 		 *   #32559 revision 3.00
459 		 */
460 		if ((cpu_id & 0x00000f00) == 0x00000f00 &&
461 		    (cpu_id & 0x0fff0000) >= 0x00040000) {
462 			uint64_t msr;
463 
464 			msr = rdmsr(0xc0010055);
465 			if (msr & 0x18000000) {
466 				struct globaldata *gd = mycpu;
467 
468 				kprintf("cpu%d: AMD C1E detected\n",
469 					gd->gd_cpuid);
470 				wrmsr(0xc0010055, msr & ~0x18000000ULL);
471 
472 				/*
473 				 * We are kinda stalled;
474 				 * kick start again.
475 				 */
476 				gd->gd_timer_running = 1;
477 				lapic_timer_oneshot_quick(2);
478 
479 				if (started != NULL)
480 					*started = 1;
481 			}
482 		}
483 	}
484 }
485 
486 static void
487 lapic_timer_restart_handler(void *dummy __unused)
488 {
489 	int started;
490 
491 	lapic_timer_fixup_handler(&started);
492 	if (!started) {
493 		struct globaldata *gd = mycpu;
494 
495 		gd->gd_timer_running = 1;
496 		lapic_timer_oneshot_quick(2);
497 	}
498 }
499 
500 /*
501  * This function is called only by ACPI-CA code currently:
502  * - AMD C1E fixup.  AMD C1E only seems to happen after ACPI
503  *   module controls PM.  So once ACPI-CA is attached, we try
504  *   to apply the fixup to prevent LAPIC timer from hanging.
505  */
506 static void
507 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
508 {
509 #ifdef SMP
510 	lwkt_send_ipiq_mask(smp_active_mask,
511 			    lapic_timer_fixup_handler, NULL);
512 #else
513 	lapic_timer_fixup_handler(NULL);
514 #endif
515 }
516 
517 static void
518 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
519 {
520 #ifdef SMP
521 	lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
522 #else
523 	lapic_timer_restart_handler(NULL);
524 #endif
525 }
526 
527 
528 /*
529  * dump contents of local APIC registers
530  */
531 void
532 apic_dump(char* str)
533 {
534 	kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
535 	kprintf("     lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
536 		lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
537 }
538 
539 #ifdef SMP
540 
541 /*
542  * Inter Processor Interrupt functions.
543  */
544 
545 /*
546  * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
547  *
548  *  destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
549  *  vector is any valid SYSTEM INT vector
550  *  delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
551  *
552  * WARNINGS!
553  *
554  * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than
555  * one IPI from being sent to any given cpu at a time.  Thus we no longer
556  * have to process incoming IPIs while waiting for the status to clear.
557  * No deadlock should be possible.
558  *
559  * We now physically disable interrupts for the lapic ICR operation.  If
560  * we do not do this then it looks like an EOI sent to the lapic (which
561  * occurs even with a critical section) can interfere with the command
562  * register ready status and cause an IPI to be lost.
563  *
564  * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command
565  * register to busy just before we write to icr_lo, resulting in a lost
566  * issuance.  This only appears to occur on Intel cpus and is not
567  * documented.  It could simply be that cpus are so fast these days that
568  * it was always an issue, but is only now rearing its ugly head.  This
569  * is conjecture.
570  */
571 int
572 apic_ipi(int dest_type, int vector, int delivery_mode)
573 {
574 	unsigned long rflags;
575 	u_long  icr_lo;
576 
577 	rflags = read_rflags();
578 	cpu_disable_intr();
579 	while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
580 		cpu_pause();
581 	}
582 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) | dest_type |
583 		delivery_mode | vector;
584 	lapic->icr_lo = icr_lo;
585 	write_rflags(rflags);
586 
587 	return 0;
588 }
589 
590 void
591 single_apic_ipi(int cpu, int vector, int delivery_mode)
592 {
593 	unsigned long rflags;
594 	u_long  icr_lo;
595 	u_long  icr_hi;
596 
597 	rflags = read_rflags();
598 	cpu_disable_intr();
599 	while ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
600 		cpu_pause();
601 	}
602 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
603 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
604 	lapic->icr_hi = icr_hi;
605 
606 	/* build ICR_LOW */
607 	icr_lo = (lapic->icr_lo & APIC_ICRLO_RESV_MASK) |
608 		 APIC_DEST_DESTFLD | delivery_mode | vector;
609 
610 	/* write APIC ICR */
611 	lapic->icr_lo = icr_lo;
612 	write_rflags(rflags);
613 }
614 
615 #if 0
616 
617 /*
618  * Returns 0 if the apic is busy, 1 if we were able to queue the request.
619  *
620  * NOT WORKING YET!  The code as-is may end up not queueing an IPI at all
621  * to the target, and the scheduler does not 'poll' for IPI messages.
622  */
623 int
624 single_apic_ipi_passive(int cpu, int vector, int delivery_mode)
625 {
626 	u_long  icr_lo;
627 	u_long  icr_hi;
628 
629 	crit_enter();
630 	if ((lapic->icr_lo & APIC_DELSTAT_MASK) != 0) {
631 	    crit_exit();
632 	    return(0);
633 	}
634 	icr_hi = lapic->icr_hi & ~APIC_ID_MASK;
635 	icr_hi |= (CPUID_TO_APICID(cpu) << 24);
636 	lapic->icr_hi = icr_hi;
637 
638 	/* build IRC_LOW */
639 	icr_lo = (lapic->icr_lo & APIC_RESV2_MASK)
640 	    | APIC_DEST_DESTFLD | delivery_mode | vector;
641 
642 	/* write APIC ICR */
643 	lapic->icr_lo = icr_lo;
644 	crit_exit();
645 	return(1);
646 }
647 
648 #endif
649 
650 /*
651  * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
652  *
653  * target is a bitmask of destination cpus.  Vector is any
654  * valid system INT vector.  Delivery mode may be either
655  * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
656  */
657 void
658 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
659 {
660 	crit_enter();
661 	while (target) {
662 		int n = BSFCPUMASK(target);
663 		target &= ~CPUMASK(n);
664 		single_apic_ipi(n, vector, delivery_mode);
665 	}
666 	crit_exit();
667 }
668 
669 #endif	/* SMP */
670 
671 /*
672  * Timer code, in development...
673  *  - suggested by rgrimes@gndrsh.aac.dev.com
674  */
675 int
676 get_apic_timer_frequency(void)
677 {
678 	return(lapic_cputimer_intr.freq);
679 }
680 
681 /*
682  * Load a 'downcount time' in uSeconds.
683  */
684 void
685 set_apic_timer(int us)
686 {
687 	u_int count;
688 
689 	/*
690 	 * When we reach here, lapic timer's frequency
691 	 * must have been calculated as well as the
692 	 * divisor (lapic->dcr_timer is setup during the
693 	 * divisor calculation).
694 	 */
695 	KKASSERT(lapic_cputimer_intr.freq != 0 &&
696 		 lapic_timer_divisor_idx >= 0);
697 
698 	count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
699 	lapic_timer_oneshot(count);
700 }
701 
702 
703 /*
704  * Read remaining time in timer.
705  */
706 int
707 read_apic_timer(void)
708 {
709 #if 0
710 	/** XXX FIXME: we need to return the actual remaining time,
711          *         for now we just return the remaining count.
712          */
713 #else
714 	return lapic->ccr_timer;
715 #endif
716 }
717 
718 
719 /*
720  * Spin-style delay, set delay time in uS, spin till it drains.
721  */
722 void
723 u_sleep(int count)
724 {
725 	set_apic_timer(count);
726 	while (read_apic_timer())
727 		 /* spin */ ;
728 }
729 
730 int
731 lapic_unused_apic_id(int start)
732 {
733 	int i;
734 
735 	for (i = start; i < NAPICID; ++i) {
736 		if (APICID_TO_CPUID(i) == -1)
737 			return i;
738 	}
739 	return NAPICID;
740 }
741 
742 void
743 lapic_map(vm_paddr_t lapic_addr)
744 {
745 	lapic = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
746 }
747 
748 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
749 	TAILQ_HEAD_INITIALIZER(lapic_enumerators);
750 
751 int
752 lapic_config(void)
753 {
754 	struct lapic_enumerator *e;
755 	int error, i, ap_max;
756 
757 	KKASSERT(lapic_enable);
758 
759 	for (i = 0; i < NAPICID; ++i)
760 		APICID_TO_CPUID(i) = -1;
761 
762 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
763 		error = e->lapic_probe(e);
764 		if (!error)
765 			break;
766 	}
767 	if (e == NULL) {
768 		kprintf("LAPIC: Can't find LAPIC\n");
769 		return ENXIO;
770 	}
771 
772 	e->lapic_enumerate(e);
773 
774 	ap_max = MAXCPU - 1;
775 	TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
776 	if (ap_max > MAXCPU - 1)
777 		ap_max = MAXCPU - 1;
778 
779 	if (naps > ap_max) {
780 		kprintf("LAPIC: Warning use only %d out of %d "
781 			"available APs\n",
782 			ap_max, naps);
783 		naps = ap_max;
784 	}
785 
786 	return 0;
787 }
788 
789 void
790 lapic_enumerator_register(struct lapic_enumerator *ne)
791 {
792 	struct lapic_enumerator *e;
793 
794 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
795 		if (e->lapic_prio < ne->lapic_prio) {
796 			TAILQ_INSERT_BEFORE(e, ne, lapic_link);
797 			return;
798 		}
799 	}
800 	TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
801 }
802 
803 void
804 lapic_set_cpuid(int cpu_id, int apic_id)
805 {
806 	CPUID_TO_APICID(cpu_id) = apic_id;
807 	APICID_TO_CPUID(apic_id) = cpu_id;
808 }
809 
810 void
811 lapic_fixup_noioapic(void)
812 {
813 	u_int   temp;
814 
815 	/* Only allowed on BSP */
816 	KKASSERT(mycpuid == 0);
817 	KKASSERT(!ioapic_enable);
818 
819 	temp = lapic->lvt_lint0;
820 	temp &= ~APIC_LVT_MASKED;
821 	lapic->lvt_lint0 = temp;
822 
823 	temp = lapic->lvt_lint1;
824 	temp |= APIC_LVT_MASKED;
825 	lapic->lvt_lint1 = temp;
826 }
827 
828 static void
829 lapic_sysinit(void *dummy __unused)
830 {
831 	if (lapic_enable) {
832 		int error;
833 
834 		error = lapic_config();
835 		if (error)
836 			lapic_enable = 0;
837 	}
838 
839 	if (lapic_enable) {
840 		/* Initialize BSP's local APIC */
841 		lapic_init(TRUE);
842 	} else if (ioapic_enable) {
843 		ioapic_enable = 0;
844 		icu_reinit_noioapic();
845 	}
846 }
847 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL)
848