xref: /dragonfly/sys/platform/pc64/apic/lapic.c (revision 9317c2d0)
1 /*
2  * Copyright (c) 1996, by Steve Passe
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. The name of the developer may NOT be used to endorse or promote products
11  *    derived from this software without specific prior written permission.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD: src/sys/i386/i386/mpapic.c,v 1.37.2.7 2003/01/25 02:31:47 peter Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/ktr.h>
32 #include <sys/bus.h>
33 #include <sys/machintr.h>
34 #include <sys/malloc.h>
35 #include <sys/sysctl.h>
36 #include <machine/globaldata.h>
37 #include <machine/clock.h>
38 #include <machine/limits.h>
39 #include <machine/smp.h>
40 #include <machine/md_var.h>
41 #include <machine/pmap.h>
42 #include <machine/specialreg.h>
43 #include <machine_base/apic/lapic.h>
44 #include <machine_base/apic/ioapic.h>
45 #include <machine_base/apic/ioapic_abi.h>
46 #include <machine_base/apic/apicvar.h>
47 #include <machine_base/icu/icu_var.h>
48 #include <machine/segments.h>
49 #include <sys/spinlock2.h>
50 
51 #include <machine/cputypes.h>
52 #include <machine/intr_machdep.h>
53 
54 #if !defined(KTR_LAPIC)
55 #define KTR_LAPIC	KTR_ALL
56 #endif
57 KTR_INFO_MASTER(lapic);
58 KTR_INFO(KTR_LAPIC, lapic, mem_eoi, 0, "mem_eoi");
59 KTR_INFO(KTR_LAPIC, lapic, msr_eoi, 0, "msr_eoi");
60 #define log_lapic(name)     KTR_LOG(lapic_ ## name)
61 
62 extern int naps;
63 
64 volatile lapic_t *lapic_mem;
65 
66 static void	lapic_timer_calibrate(void);
67 static void	lapic_timer_set_divisor(int);
68 static void	lapic_timer_fixup_handler(void *);
69 static void	lapic_timer_restart_handler(void *);
70 
71 static int	lapic_timer_c1e_test = -1;	/* auto-detect */
72 TUNABLE_INT("hw.lapic_timer_c1e_test", &lapic_timer_c1e_test);
73 
74 static int	lapic_timer_enable = 1;
75 TUNABLE_INT("hw.lapic_timer_enable", &lapic_timer_enable);
76 
77 static int	lapic_timer_tscdeadline = 1;
78 TUNABLE_INT("hw.lapic_timer_tscdeadline", &lapic_timer_tscdeadline);
79 
80 static int	lapic_calibrate_test = 0;
81 TUNABLE_INT("hw.lapic_calibrate_test", &lapic_calibrate_test);
82 
83 static int	lapic_calibrate_fast = 1;
84 TUNABLE_INT("hw.lapic_calibrate_fast", &lapic_calibrate_fast);
85 
86 static void	lapic_timer_tscdlt_reload(struct cputimer_intr *, sysclock_t);
87 static void	lapic_mem_timer_intr_reload(struct cputimer_intr *, sysclock_t);
88 static void	lapic_msr_timer_intr_reload(struct cputimer_intr *, sysclock_t);
89 static void	lapic_timer_intr_enable(struct cputimer_intr *);
90 static void	lapic_timer_intr_restart(struct cputimer_intr *);
91 static void	lapic_timer_intr_pmfixup(struct cputimer_intr *);
92 
93 static struct cputimer_intr lapic_cputimer_intr = {
94 	.freq = 0,
95 	.reload = lapic_mem_timer_intr_reload,
96 	.enable = lapic_timer_intr_enable,
97 	.config = cputimer_intr_default_config,
98 	.restart = lapic_timer_intr_restart,
99 	.pmfixup = lapic_timer_intr_pmfixup,
100 	.initclock = cputimer_intr_default_initclock,
101 	.pcpuhand = NULL,
102 	.next = SLIST_ENTRY_INITIALIZER,
103 	.name = "lapic",
104 	.type = CPUTIMER_INTR_LAPIC,
105 	.prio = CPUTIMER_INTR_PRIO_LAPIC,
106 	.caps = CPUTIMER_INTR_CAP_NONE,
107 	.priv = NULL
108 };
109 
110 static int		lapic_timer_divisor_idx = -1;
111 static const uint32_t	lapic_timer_divisors[] = {
112 	APIC_TDCR_2,	APIC_TDCR_4,	APIC_TDCR_8,	APIC_TDCR_16,
113 	APIC_TDCR_32,	APIC_TDCR_64,	APIC_TDCR_128,	APIC_TDCR_1
114 };
115 #define APIC_TIMER_NDIVISORS (int)(NELEM(lapic_timer_divisors))
116 
117 static int	lapic_use_tscdeadline = 0;
118 
119 /*
120  * APIC ID <-> CPU ID mapping structures.
121  */
122 int	cpu_id_to_apic_id[NAPICID];
123 int	apic_id_to_cpu_id[NAPICID];
124 int	lapic_enable = 1;
125 int	lapic_usable = 0;
126 int	x2apic_enable = 1;
127 
128 SYSCTL_INT(_hw, OID_AUTO, x2apic_enable, CTLFLAG_RD, &x2apic_enable, 0, "");
129 
130 /* Separate cachelines for each cpu's info. */
131 struct deadlines {
132 	uint64_t timestamp;
133 	uint64_t downcount_time;
134 	uint64_t padding[6];
135 };
136 struct deadlines *tsc_deadlines = NULL;
137 
138 static void	lapic_mem_eoi(void);
139 static int	lapic_mem_ipi(int dest_type, int vector, int delivery_mode);
140 static void	lapic_mem_single_ipi(int cpu, int vector, int delivery_mode);
141 
142 static void	lapic_msr_eoi(void);
143 static int	lapic_msr_ipi(int dest_type, int vector, int delivery_mode);
144 static void	lapic_msr_single_ipi(int cpu, int vector, int delivery_mode);
145 
146 void		(*lapic_eoi)(void);
147 int		(*apic_ipi)(int dest_type, int vector, int delivery_mode);
148 void		(*single_apic_ipi)(int cpu, int vector, int delivery_mode);
149 
150 static __inline void
151 lapic_mem_icr_set(uint32_t apic_id, uint32_t icr_lo_val)
152 {
153 	uint32_t icr_lo, icr_hi;
154 
155 	icr_hi = (LAPIC_MEM_READ(icr_hi) & ~APIC_ID_MASK) |
156 	    (apic_id << APIC_ID_SHIFT);
157 	icr_lo = (LAPIC_MEM_READ(icr_lo) & APIC_ICRLO_RESV_MASK) | icr_lo_val;
158 
159 	LAPIC_MEM_WRITE(icr_hi, icr_hi);
160 	LAPIC_MEM_WRITE(icr_lo, icr_lo);
161 }
162 
163 static __inline void
164 lapic_msr_icr_set(uint32_t apic_id, uint32_t icr_lo_val)
165 {
166 	LAPIC_MSR_WRITE(MSR_X2APIC_ICR,
167 	    ((uint64_t)apic_id << 32) | ((uint64_t)icr_lo_val));
168 }
169 
170 /*
171  * Enable LAPIC, configure interrupts.
172  */
173 void
174 lapic_init(boolean_t bsp)
175 {
176 	uint32_t timer;
177 	u_int   temp;
178 
179 	if (bsp) {
180 		/* Decide whether we want to use TSC Deadline mode. */
181 		if (lapic_timer_tscdeadline != 0 &&
182 		    (cpu_feature2 & CPUID2_TSCDLT) &&
183 		    tsc_invariant && tsc_frequency != 0) {
184 			lapic_use_tscdeadline = 1;
185 			tsc_deadlines =
186 				kmalloc(sizeof(struct deadlines) * (naps + 1),
187 					M_DEVBUF,
188 					M_WAITOK | M_ZERO | M_CACHEALIGN);
189 		}
190 	}
191 
192 	/*
193 	 * Install vectors
194 	 *
195 	 * Since IDT is shared between BSP and APs, these vectors
196 	 * only need to be installed once; we do it on BSP.
197 	 */
198 	if (bsp) {
199 		if (cpu_vendor_id == CPU_VENDOR_AMD &&
200 		    CPUID_TO_FAMILY(cpu_id) >= 0x0f &&
201 		    CPUID_TO_FAMILY(cpu_id) < 0x17) {	/* XXX */
202 			uint32_t tcr;
203 
204 			/*
205 			 * Set the LINTEN bit in the HyperTransport
206 			 * Transaction Control Register.
207 			 *
208 			 * This will cause EXTINT and NMI interrupts
209 			 * routed over the hypertransport bus to be
210 			 * fed into the LAPIC LINT0/LINT1.  If the bit
211 			 * isn't set, the interrupts will go to the
212 			 * general cpu INTR/NMI pins.  On a dual-core
213 			 * cpu the interrupt winds up going to BOTH cpus.
214 			 * The first cpu that does the interrupt ack
215 			 * cycle will get the correct interrupt.  The
216 			 * second cpu that does it will get a spurious
217 			 * interrupt vector (typically IRQ 7).
218 			 */
219 			outl(0x0cf8,
220 			    (1 << 31) |	/* enable */
221 			    (0 << 16) |	/* bus */
222 			    (0x18 << 11) | /* dev (cpu + 0x18) */
223 			    (0 << 8) |	/* func */
224 			    0x68	/* reg */
225 			    );
226 			tcr = inl(0xcfc);
227 			if ((tcr & 0x00010000) == 0) {
228 				kprintf("LAPIC: AMD LINTEN on\n");
229 				outl(0xcfc, tcr|0x00010000);
230 			}
231 			outl(0x0cf8, 0);
232 		}
233 
234 		/* Install a 'Spurious INTerrupt' vector */
235 		setidt_global(XSPURIOUSINT_OFFSET, Xspuriousint,
236 		    SDT_SYSIGT, SEL_KPL, 0);
237 
238 		/* Install a timer vector */
239 		setidt_global(XTIMER_OFFSET, Xtimer,
240 		    SDT_SYSIGT, SEL_KPL, 0);
241 
242 		/* Install an inter-CPU IPI for TLB invalidation */
243 		setidt_global(XINVLTLB_OFFSET, Xinvltlb,
244 		    SDT_SYSIGT, SEL_KPL, 0);
245 
246 		/* Install an inter-CPU IPI for IPIQ messaging */
247 		setidt_global(XIPIQ_OFFSET, Xipiq,
248 		    SDT_SYSIGT, SEL_KPL, 0);
249 
250 		/* Install an inter-CPU IPI for CPU stop/restart */
251 		setidt_global(XCPUSTOP_OFFSET, Xcpustop,
252 		    SDT_SYSIGT, SEL_KPL, 0);
253 
254 		/* Install an inter-CPU IPI for TLB invalidation */
255 		setidt_global(XSNIFF_OFFSET, Xsniff,
256 		    SDT_SYSIGT, SEL_KPL, 0);
257 	}
258 
259 	/*
260 	 * Setup LINT0 as ExtINT on the BSP.  This is theoretically an
261 	 * aggregate interrupt input from the 8259.  The INTA cycle
262 	 * will be routed to the external controller (the 8259) which
263 	 * is expected to supply the vector.
264 	 *
265 	 * Must be setup edge triggered, active high.
266 	 *
267 	 * Disable LINT0 on BSP, if I/O APIC is enabled.
268 	 *
269 	 * Disable LINT0 on the APs.  It doesn't matter what delivery
270 	 * mode we use because we leave it masked.
271 	 */
272 	temp = LAPIC_READ(lvt_lint0);
273 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
274 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
275 	if (bsp) {
276 		temp |= APIC_LVT_DM_EXTINT;
277 		if (ioapic_enable)
278 			temp |= APIC_LVT_MASKED;
279 	} else {
280 		temp |= APIC_LVT_DM_FIXED | APIC_LVT_MASKED;
281 	}
282 	LAPIC_WRITE(lvt_lint0, temp);
283 
284 	/*
285 	 * Setup LINT1 as NMI.
286 	 *
287 	 * Must be setup edge trigger, active high.
288 	 *
289 	 * Enable LINT1 on BSP, if I/O APIC is enabled.
290 	 *
291 	 * Disable LINT1 on the APs.
292 	 */
293 	temp = LAPIC_READ(lvt_lint1);
294 	temp &= ~(APIC_LVT_MASKED | APIC_LVT_TRIG_MASK |
295 		  APIC_LVT_POLARITY_MASK | APIC_LVT_DM_MASK);
296 	temp |= APIC_LVT_MASKED | APIC_LVT_DM_NMI;
297 	if (bsp && ioapic_enable)
298 		temp &= ~APIC_LVT_MASKED;
299 	LAPIC_WRITE(lvt_lint1, temp);
300 
301 	/*
302 	 * Mask the LAPIC error interrupt, LAPIC performance counter
303 	 * interrupt.
304 	 */
305 	LAPIC_WRITE(lvt_error, LAPIC_READ(lvt_error) | APIC_LVT_MASKED);
306 	LAPIC_WRITE(lvt_pcint, LAPIC_READ(lvt_pcint) | APIC_LVT_MASKED);
307 
308 	/*
309 	 * Set LAPIC timer vector and mask the LAPIC timer interrupt.
310 	 */
311 	timer = LAPIC_READ(lvt_timer);
312 	timer &= ~APIC_LVTT_VECTOR;
313 	timer |= XTIMER_OFFSET;
314 	timer |= APIC_LVTT_MASKED;
315 	LAPIC_WRITE(lvt_timer, timer);
316 
317 	/*
318 	 * Set the Task Priority Register as needed.   At the moment allow
319 	 * interrupts on all cpus (the APs will remain CLId until they are
320 	 * ready to deal).
321 	 */
322 	temp = LAPIC_READ(tpr);
323 	temp &= ~APIC_TPR_PRIO;		/* clear priority field */
324 	LAPIC_WRITE(tpr, temp);
325 
326 	/*
327 	 * AMD specific setup
328 	 */
329 	if (cpu_vendor_id == CPU_VENDOR_AMD && lapic_mem != NULL &&
330 	    (LAPIC_MEM_READ(version) & APIC_VER_AMD_EXT_SPACE)) {
331 		uint32_t ext_feat;
332 		uint32_t count;
333 		uint32_t max_count;
334 		uint32_t lvt;
335 		uint32_t i;
336 
337 		ext_feat = LAPIC_MEM_READ(ext_feat);
338 		count = (ext_feat & APIC_EXTFEAT_MASK) >> APIC_EXTFEAT_SHIFT;
339 		max_count = sizeof(lapic_mem->ext_lvt) /
340 		    sizeof(lapic_mem->ext_lvt[0]);
341 		if (count > max_count)
342 			count = max_count;
343 		for (i = 0; i < count; ++i) {
344 			lvt = LAPIC_MEM_READ(ext_lvt[i].lvt);
345 
346 			lvt &= ~(APIC_LVT_POLARITY_MASK | APIC_LVT_TRIG_MASK |
347 				 APIC_LVT_DM_MASK | APIC_LVT_MASKED);
348 			lvt |= APIC_LVT_MASKED | APIC_LVT_DM_FIXED;
349 
350 			switch(i) {
351 			case APIC_EXTLVT_IBS:
352 				break;
353 			case APIC_EXTLVT_MCA:
354 				break;
355 			case APIC_EXTLVT_DEI:
356 				break;
357 			case APIC_EXTLVT_SBI:
358 				break;
359 			default:
360 				break;
361 			}
362 			if (bsp) {
363 				kprintf("   LAPIC AMD elvt%d: 0x%08x",
364 					i, LAPIC_MEM_READ(ext_lvt[i].lvt));
365 				if (LAPIC_MEM_READ(ext_lvt[i].lvt) != lvt)
366 					kprintf(" -> 0x%08x", lvt);
367 				kprintf("\n");
368 			}
369 			LAPIC_MEM_WRITE(ext_lvt[i].lvt, lvt);
370 		}
371 	}
372 
373 	/*
374 	 * Enable the LAPIC
375 	 */
376 	temp = LAPIC_READ(svr);
377 	temp |= APIC_SVR_ENABLE;	/* enable the LAPIC */
378 	temp &= ~APIC_SVR_FOCUS_DISABLE; /* enable lopri focus processor */
379 
380 	if (LAPIC_READ(version) & APIC_VER_EOI_SUPP) {
381 		if (temp & APIC_SVR_EOI_SUPP) {
382 			temp &= ~APIC_SVR_EOI_SUPP;
383 			if (bsp)
384 				kprintf("    LAPIC disabling EOI supp\n");
385 		}
386 	}
387 
388 	/*
389 	 * Set the spurious interrupt vector.  The low 4 bits of the vector
390 	 * must be 1111.
391 	 */
392 	if ((XSPURIOUSINT_OFFSET & 0x0F) != 0x0F)
393 		panic("bad XSPURIOUSINT_OFFSET: 0x%08x", XSPURIOUSINT_OFFSET);
394 	temp &= ~APIC_SVR_VECTOR;
395 	temp |= XSPURIOUSINT_OFFSET;
396 
397 	LAPIC_WRITE(svr, temp);
398 
399 	/*
400 	 * Pump out a few EOIs to clean out interrupts that got through
401 	 * before we were able to set the TPR.
402 	 */
403 	LAPIC_WRITE(eoi, 0);
404 	LAPIC_WRITE(eoi, 0);
405 	LAPIC_WRITE(eoi, 0);
406 
407 	if (bsp) {
408 		lapic_timer_calibrate();
409 		if (lapic_timer_enable) {
410 			if (cpu_thermal_feature & CPUID_THERMAL_ARAT) {
411 				/*
412 				 * Local APIC timer will not stop
413 				 * in deep C-state.
414 				 */
415 				lapic_cputimer_intr.caps |=
416 				    CPUTIMER_INTR_CAP_PS;
417 			}
418 			if (lapic_use_tscdeadline) {
419 				lapic_cputimer_intr.reload =
420 				    lapic_timer_tscdlt_reload;
421 			}
422 			cputimer_intr_register(&lapic_cputimer_intr);
423 			cputimer_intr_select(&lapic_cputimer_intr, 0);
424 		}
425 	} else if (!lapic_use_tscdeadline) {
426 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
427 	}
428 
429 	if (bootverbose)
430 		apic_dump("apic_initialize()");
431 }
432 
433 static void
434 lapic_timer_set_divisor(int divisor_idx)
435 {
436 	KKASSERT(divisor_idx >= 0 && divisor_idx < APIC_TIMER_NDIVISORS);
437 	LAPIC_WRITE(dcr_timer, lapic_timer_divisors[divisor_idx]);
438 }
439 
440 static void
441 lapic_timer_oneshot(u_int count)
442 {
443 	uint32_t value;
444 
445 	value = LAPIC_READ(lvt_timer);
446 	value &= ~(APIC_LVTT_PERIODIC | APIC_LVTT_TSCDLT);
447 	LAPIC_WRITE(lvt_timer, value);
448 	LAPIC_WRITE(icr_timer, count);
449 }
450 
451 static void
452 lapic_timer_oneshot_quick(u_int count)
453 {
454 	LAPIC_WRITE(icr_timer, count);
455 }
456 
457 static void
458 lapic_timer_tscdeadline_quick(uint64_t diff)
459 {
460 	uint64_t val = rdtsc() + diff;
461 
462 	wrmsr(MSR_TSC_DEADLINE, val);
463 	tsc_deadlines[mycpuid].timestamp = val;
464 }
465 
466 static uint64_t
467 lapic_scale_to_tsc(unsigned value, unsigned scale)
468 {
469 	uint64_t val;
470 
471 	val = value;
472 	val *= tsc_frequency;
473 	val += (scale - 1);
474 	val /= scale;
475 	return val;
476 }
477 
478 #define MAX_MEASURE_RETRIES	100
479 
480 static u_int64_t
481 do_tsc_calibration(u_int us, u_int64_t apic_delay_tsc)
482 {
483 	u_int64_t old_tsc1, old_tsc2, new_tsc1, new_tsc2;
484 	u_int64_t diff, count;
485 	u_int64_t a;
486 	u_int32_t start, end;
487 	int retries1 = 0, retries2 = 0;
488 
489 retry1:
490 	lapic_timer_oneshot_quick(APIC_TIMER_MAX_COUNT);
491 	old_tsc1 = rdtsc_ordered();
492 	start = LAPIC_READ(ccr_timer);
493 	old_tsc2 = rdtsc_ordered();
494 	if (apic_delay_tsc > 0 && retries1 < MAX_MEASURE_RETRIES &&
495 	    old_tsc2 - old_tsc1 > 2 * apic_delay_tsc) {
496 		retries1++;
497 		goto retry1;
498 	}
499 	DELAY(us);
500 retry2:
501 	new_tsc1 = rdtsc_ordered();
502 	end = LAPIC_READ(ccr_timer);
503 	new_tsc2 = rdtsc_ordered();
504 	if (apic_delay_tsc > 0 && retries2 < MAX_MEASURE_RETRIES &&
505 	    new_tsc2 - new_tsc1 > 2 * apic_delay_tsc) {
506 		retries2++;
507 		goto retry2;
508 	}
509 	if (end == 0)
510 		return 0;
511 
512 	count = start - end;
513 
514 	/* Make sure the lapic can count for up to 2s */
515 	a = (unsigned)APIC_TIMER_MAX_COUNT;
516 	if (us < 2000000 && (u_int64_t)count * 2000000 >= a * us)
517 		return 0;
518 
519 	if (lapic_calibrate_test > 0 && (retries1 > 0 || retries2 > 0)) {
520 		kprintf("%s: retries1=%d retries2=%d\n",
521 		    __func__, retries1, retries2);
522 	}
523 
524 	diff = (new_tsc1 - old_tsc1) + (new_tsc2 - old_tsc2);
525 	/* XXX First estimate if the total TSC diff value makes sense */
526 	/* This will almost overflow, but only almost :) */
527 	count = (2 * count * tsc_frequency) / diff;
528 
529 	return count;
530 }
531 
532 static uint64_t
533 do_cputimer_calibration(u_int us)
534 {
535 	sysclock_t value;
536 	sysclock_t start, end;
537 	uint32_t beginning, finish;
538 
539 	lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
540 	beginning = LAPIC_READ(ccr_timer);
541 	start = sys_cputimer->count();
542 	DELAY(us);
543 	end = sys_cputimer->count();
544 	finish = LAPIC_READ(ccr_timer);
545 	if (finish == 0)
546 		return 0;
547 	/* value is the LAPIC timer difference. */
548 	value = (uint32_t)(beginning - finish);
549 	/* end is the sys_cputimer difference. */
550 	end -= start;
551 	if (end == 0)
552 		return 0;
553 	value = muldivu64(value, sys_cputimer->freq, end);
554 
555 	return value;
556 }
557 
558 static void
559 lapic_timer_calibrate(void)
560 {
561 	sysclock_t value;
562 	u_int64_t apic_delay_tsc = 0;
563 	int use_tsc_calibration = 0;
564 
565 	/* No need to calibrate lapic_timer, if we will use TSC Deadline mode */
566 	if (lapic_use_tscdeadline) {
567 		lapic_cputimer_intr.freq = tsc_frequency;
568 		kprintf(
569 		    "lapic: TSC Deadline Mode: frequency %lu Hz\n",
570 		    lapic_cputimer_intr.freq);
571 		return;
572 	}
573 
574 	/*
575 	 * On real hardware, tsc_invariant == 0 wouldn't be an issue, but in
576 	 * a virtual machine the frequency may get changed by the host.
577 	 */
578 	if (tsc_frequency != 0 && tsc_invariant && lapic_calibrate_fast)
579 		use_tsc_calibration = 1;
580 
581 	if (use_tsc_calibration) {
582 		u_int64_t min_apic_tsc = 0, max_apic_tsc = 0;
583 		u_int64_t old_tsc, new_tsc;
584 		uint32_t val;
585 		int i;
586 
587 		/* warm up */
588 		lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
589 		for (i = 0; i < 10; i++)
590 			val = LAPIC_READ(ccr_timer);
591 
592 		for (i = 0; i < 100; i++) {
593 			old_tsc = rdtsc_ordered();
594 			val = LAPIC_READ(ccr_timer);
595 			new_tsc = rdtsc_ordered();
596 			new_tsc -= old_tsc;
597 			apic_delay_tsc += new_tsc;
598 			if (min_apic_tsc == 0 ||
599 			    min_apic_tsc > new_tsc) {
600 				min_apic_tsc = new_tsc;
601 			}
602 			if (max_apic_tsc < new_tsc)
603 				max_apic_tsc = new_tsc;
604 		}
605 		apic_delay_tsc /= 100;
606 		kprintf(
607 		    "LAPIC latency (in TSC ticks): %lu min: %lu max: %lu\n",
608 		    apic_delay_tsc, min_apic_tsc, max_apic_tsc);
609 		apic_delay_tsc = min_apic_tsc;
610 	}
611 
612 	if (!use_tsc_calibration) {
613 		int i;
614 
615 		/*
616 		 * Do some exercising of the lapic timer access. This improves
617 		 * precision of the subsequent calibration run in at least some
618 		 * virtualization cases.
619 		 */
620 		lapic_timer_set_divisor(0);
621 		for (i = 0; i < 10; i++)
622 			(void)do_cputimer_calibration(100);
623 	}
624 	/* Try to calibrate the local APIC timer. */
625 	for (lapic_timer_divisor_idx = 0;
626 	     lapic_timer_divisor_idx < APIC_TIMER_NDIVISORS;
627 	     lapic_timer_divisor_idx++) {
628 		lapic_timer_set_divisor(lapic_timer_divisor_idx);
629 		if (use_tsc_calibration) {
630 			value = do_tsc_calibration(200*1000, apic_delay_tsc);
631 		} else {
632 			value = do_cputimer_calibration(2*1000*1000);
633 		}
634 		if (value != 0)
635 			break;
636 	}
637 	if (lapic_timer_divisor_idx >= APIC_TIMER_NDIVISORS)
638 		panic("lapic: no proper timer divisor?!");
639 	lapic_cputimer_intr.freq = value;
640 
641 	kprintf("lapic: divisor index %d, frequency %lu Hz\n",
642 		lapic_timer_divisor_idx, lapic_cputimer_intr.freq);
643 
644 	if (lapic_calibrate_test > 0) {
645 		uint64_t freq;
646 		int i;
647 
648 		for (i = 1; i <= 20; i++) {
649 			if (use_tsc_calibration) {
650 				freq = do_tsc_calibration(i*100*1000,
651 							  apic_delay_tsc);
652 			} else {
653 				freq = do_cputimer_calibration(i*100*1000);
654 			}
655 			if (freq != 0)
656 				kprintf("%ums: %lu\n", i * 100, freq);
657 		}
658 	}
659 }
660 
661 static void
662 lapic_timer_tscdlt_reload(struct cputimer_intr *cti, sysclock_t reload)
663 {
664 	struct globaldata *gd = mycpu;
665 	uint64_t diff, now, val;
666 
667 	/*
668 	 * Set maximum deadline to 60 seconds
669 	 */
670 	if (reload > sys_cputimer->freq * 60)
671 		reload = sys_cputimer->freq * 60;
672 	diff = muldivu64(reload, tsc_frequency, sys_cputimer->freq);
673 	if (diff < 4)
674 		diff = 4;
675 	if (cpu_vendor_id == CPU_VENDOR_INTEL)
676 		cpu_lfence();
677 	else
678 		cpu_mfence();
679 	now = rdtsc();
680 	val = now + diff;
681 	if (gd->gd_timer_running) {
682 		uint64_t deadline = tsc_deadlines[mycpuid].timestamp;
683 		if (deadline == 0 || now > deadline || val < deadline) {
684 			wrmsr(MSR_TSC_DEADLINE, val);
685 			tsc_deadlines[mycpuid].timestamp = val;
686 		}
687 	} else {
688 		gd->gd_timer_running = 1;
689 		wrmsr(MSR_TSC_DEADLINE, val);
690 		tsc_deadlines[mycpuid].timestamp = val;
691 	}
692 }
693 
694 static void
695 lapic_mem_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
696 {
697 	struct globaldata *gd = mycpu;
698 
699 	if ((ssysclock_t)reload < 0)
700 		reload = 1;
701 	reload = muldivu64(reload, cti->freq, sys_cputimer->freq);
702 	if (reload < 2)
703 		reload = 2;
704 	if (reload > 0xFFFFFFFF)
705 		reload = 0xFFFFFFFF;
706 
707 	if (gd->gd_timer_running) {
708 		if (reload < LAPIC_MEM_READ(ccr_timer))
709 			LAPIC_MEM_WRITE(icr_timer, (uint32_t)reload);
710 	} else {
711 		gd->gd_timer_running = 1;
712 		LAPIC_MEM_WRITE(icr_timer, (uint32_t)reload);
713 	}
714 }
715 
716 static void
717 lapic_msr_timer_intr_reload(struct cputimer_intr *cti, sysclock_t reload)
718 {
719 	struct globaldata *gd = mycpu;
720 
721 	if ((ssysclock_t)reload < 0)
722 		reload = 1;
723 	reload = muldivu64(reload, cti->freq, sys_cputimer->freq);
724 	if (reload < 2)
725 		reload = 2;
726 	if (reload > 0xFFFFFFFF)
727 		reload = 0xFFFFFFFF;
728 
729 	if (gd->gd_timer_running) {
730 		if (reload < LAPIC_MSR_READ(MSR_X2APIC_CCR_TIMER))
731 			LAPIC_MSR_WRITE(MSR_X2APIC_ICR_TIMER, (uint32_t)reload);
732 	} else {
733 		gd->gd_timer_running = 1;
734 		LAPIC_MSR_WRITE(MSR_X2APIC_ICR_TIMER, (uint32_t)reload);
735 	}
736 }
737 
738 static void
739 lapic_timer_intr_enable(struct cputimer_intr *cti __unused)
740 {
741 	uint32_t timer;
742 
743 	timer = LAPIC_READ(lvt_timer);
744 	timer &= ~(APIC_LVTT_MASKED | APIC_LVTT_PERIODIC | APIC_LVTT_TSCDLT);
745 	if (lapic_use_tscdeadline)
746 		timer |= APIC_LVTT_TSCDLT;
747 	LAPIC_WRITE(lvt_timer, timer);
748 	if (lapic_use_tscdeadline)
749 		cpu_mfence();
750 
751 	lapic_timer_fixup_handler(NULL);
752 }
753 
754 static void
755 lapic_timer_fixup_handler(void *arg)
756 {
757 	int *started = arg;
758 
759 	if (started != NULL)
760 		*started = 0;
761 
762 	if (cpu_vendor_id == CPU_VENDOR_AMD) {
763 		int c1e_test = lapic_timer_c1e_test;
764 
765 		if (c1e_test < 0) {
766 			if (vmm_guest == VMM_GUEST_NONE) {
767 				c1e_test = 1;
768 			} else {
769 				/*
770 				 * Don't do this C1E testing and adjustment
771 				 * on virtual machines, the best case for
772 				 * accessing this MSR is a NOOP; the worst
773 				 * cases could be pretty nasty, e.g. crash.
774 				 */
775 				c1e_test = 0;
776 			}
777 		}
778 
779 		/*
780 		 * Detect the presence of C1E capability mostly on latest
781 		 * dual-cores (or future) k8 family.  This feature renders
782 		 * the local APIC timer dead, so we disable it by reading
783 		 * the Interrupt Pending Message register and clearing both
784 		 * C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
785 		 *
786 		 * Reference:
787 		 *   "BIOS and Kernel Developer's Guide for AMD NPT
788 		 *    Family 0Fh Processors"
789 		 *   #32559 revision 3.00
790 		 */
791 		if ((cpu_id & 0x00000f00) == 0x00000f00 &&
792 		    (cpu_id & 0x0fff0000) >= 0x00040000 &&
793 		    c1e_test) {
794 			uint64_t msr;
795 
796 			msr = rdmsr(0xc0010055);
797 			if (msr & 0x18000000) {
798 				struct globaldata *gd = mycpu;
799 
800 				kprintf("cpu%d: AMD C1E detected\n",
801 					gd->gd_cpuid);
802 				wrmsr(0xc0010055, msr & ~0x18000000ULL);
803 
804 				/*
805 				 * We are kinda stalled;
806 				 * kick start again.
807 				 */
808 				gd->gd_timer_running = 1;
809 				if (lapic_use_tscdeadline) {
810 					/* Maybe reached in Virtual Machines? */
811 					lapic_timer_tscdeadline_quick(5000);
812 				} else {
813 					lapic_timer_oneshot_quick(2);
814 				}
815 
816 				if (started != NULL)
817 					*started = 1;
818 			}
819 		}
820 	}
821 }
822 
823 static void
824 lapic_timer_restart_handler(void *dummy __unused)
825 {
826 	int started;
827 
828 	lapic_timer_fixup_handler(&started);
829 	if (!started) {
830 		struct globaldata *gd = mycpu;
831 
832 		gd->gd_timer_running = 1;
833 		if (lapic_use_tscdeadline) {
834 			/* Maybe reached in Virtual Machines? */
835 			lapic_timer_tscdeadline_quick(5000);
836 		} else {
837 			lapic_timer_oneshot_quick(2);
838 		}
839 	}
840 }
841 
842 /*
843  * This function is called only by ACPICA code currently:
844  * - AMD C1E fixup.  AMD C1E only seems to happen after ACPI
845  *   module controls PM.  So once ACPICA is attached, we try
846  *   to apply the fixup to prevent LAPIC timer from hanging.
847  */
848 static void
849 lapic_timer_intr_pmfixup(struct cputimer_intr *cti __unused)
850 {
851 	lwkt_send_ipiq_mask(smp_active_mask,
852 			    lapic_timer_fixup_handler, NULL);
853 }
854 
855 static void
856 lapic_timer_intr_restart(struct cputimer_intr *cti __unused)
857 {
858 	lwkt_send_ipiq_mask(smp_active_mask, lapic_timer_restart_handler, NULL);
859 }
860 
861 
862 /*
863  * dump contents of local APIC registers
864  */
865 void
866 apic_dump(char* str)
867 {
868 	kprintf("SMP: CPU%d %s:\n", mycpu->gd_cpuid, str);
869 	kprintf("     lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
870 		LAPIC_READ(lvt_lint0), LAPIC_READ(lvt_lint1), LAPIC_READ(tpr),
871 		LAPIC_READ(svr));
872 }
873 
874 /*
875  * Inter Processor Interrupt functions.
876  */
877 
878 static __inline void
879 lapic_mem_icr_unpend(const char *func)
880 {
881 	if (LAPIC_MEM_READ(icr_lo) & APIC_DELSTAT_PEND) {
882 		int64_t tsc;
883 		int loops = 1;
884 
885 		tsc = rdtsc();
886 		while (LAPIC_MEM_READ(icr_lo) & APIC_DELSTAT_PEND) {
887 			cpu_pause();
888 			if ((tsc_sclock_t)(rdtsc() -
889 					   (tsc + tsc_frequency)) > 0) {
890 				tsc = rdtsc();
891 				if (++loops > 30) {
892 					panic("%s: cpu%d apic stalled",
893 					    func, mycpuid);
894 				} else {
895 					kprintf("%s: cpu%d apic stalled\n",
896 					    func, mycpuid);
897 				}
898 			}
899 		}
900 	}
901 }
902 
903 /*
904  * Send APIC IPI 'vector' to 'destType' via 'deliveryMode'.
905  *
906  *  destType is 1 of: APIC_DEST_SELF, APIC_DEST_ALLISELF, APIC_DEST_ALLESELF
907  *  vector is any valid SYSTEM INT vector
908  *  delivery_mode is 1 of: APIC_DELMODE_FIXED, APIC_DELMODE_LOWPRIO
909  *
910  * WARNINGS!
911  *
912  * We now implement a per-cpu interlock (gd->gd_npoll) to prevent more than
913  * one IPI from being sent to any given cpu at a time.  Thus we no longer
914  * have to process incoming IPIs while waiting for the status to clear.
915  * No deadlock should be possible.
916  *
917  * We now physically disable interrupts for the lapic ICR operation.  If
918  * we do not do this then it looks like an EOI sent to the lapic (which
919  * occurs even with a critical section) can interfere with the command
920  * register ready status and cause an IPI to be lost.
921  *
922  * e.g. an interrupt can occur, issue the EOI, IRET, and cause the command
923  * register to busy just before we write to icr_lo, resulting in a lost
924  * issuance.  This only appears to occur on Intel cpus and is not
925  * documented.  It could simply be that cpus are so fast these days that
926  * it was always an issue, but is only now rearing its ugly head.  This
927  * is conjecture.
928  */
929 static int
930 lapic_mem_ipi(int dest_type, int vector, int delivery_mode)
931 {
932 	lapic_mem_icr_unpend(__func__);
933 	lapic_mem_icr_set(0,
934 	    dest_type | APIC_LEVEL_ASSERT | delivery_mode | vector);
935 	return 0;
936 }
937 
938 static int
939 lapic_msr_ipi(int dest_type, int vector, int delivery_mode)
940 {
941 	lapic_msr_icr_set(0,
942 	    dest_type | APIC_LEVEL_ASSERT | delivery_mode | vector);
943 	return 0;
944 }
945 
946 /*
947  * Interrupts must be hard-disabled by caller
948  */
949 static void
950 lapic_mem_single_ipi(int cpu, int vector, int delivery_mode)
951 {
952 	lapic_mem_icr_unpend(__func__);
953 	lapic_mem_icr_set(CPUID_TO_APICID(cpu),
954 	    APIC_DEST_DESTFLD | APIC_LEVEL_ASSERT | delivery_mode | vector);
955 }
956 
957 static void
958 lapic_msr_single_ipi(int cpu, int vector, int delivery_mode)
959 {
960 	lapic_msr_icr_set(CPUID_TO_APICID(cpu),
961 	    APIC_DEST_DESTFLD | APIC_LEVEL_ASSERT | delivery_mode | vector);
962 }
963 
964 /*
965  * Send APIC IPI 'vector' to 'target's via 'delivery_mode'.
966  *
967  * target is a bitmask of destination cpus.  Vector is any
968  * valid system INT vector.  Delivery mode may be either
969  * APIC_DELMODE_FIXED or APIC_DELMODE_LOWPRIO.
970  *
971  * Interrupts must be hard-disabled by caller
972  */
973 void
974 selected_apic_ipi(cpumask_t target, int vector, int delivery_mode)
975 {
976 	while (CPUMASK_TESTNZERO(target)) {
977 		int n = BSFCPUMASK(target);
978 		CPUMASK_NANDBIT(target, n);
979 		single_apic_ipi(n, vector, delivery_mode);
980 	}
981 }
982 
983 /*
984  * Load a 'downcount time' in uSeconds.
985  */
986 void
987 set_apic_timer(int us)
988 {
989 	u_int count;
990 
991 	if (lapic_use_tscdeadline) {
992 		uint64_t val;
993 
994 		val = lapic_scale_to_tsc(us, 1000000);
995 		val += rdtsc();
996 		/* No need to arm the lapic here, just track the timeout. */
997 		tsc_deadlines[mycpuid].downcount_time = val;
998 		return;
999 	}
1000 
1001 	/*
1002 	 * When we reach here, lapic timer's frequency
1003 	 * must have been calculated as well as the
1004 	 * divisor (lapic->dcr_timer is setup during the
1005 	 * divisor calculation).
1006 	 */
1007 	KKASSERT(lapic_cputimer_intr.freq != 0 &&
1008 		 lapic_timer_divisor_idx >= 0);
1009 
1010 	count = ((us * (int64_t)lapic_cputimer_intr.freq) + 999999) / 1000000;
1011 	lapic_timer_oneshot(count);
1012 }
1013 
1014 
1015 /*
1016  * Read remaining time in timer, in microseconds (rounded up).
1017  */
1018 int
1019 read_apic_timer(void)
1020 {
1021 	uint64_t val;
1022 
1023 	if (lapic_use_tscdeadline) {
1024 		uint64_t now;
1025 
1026 		val = tsc_deadlines[mycpuid].downcount_time;
1027 		now = rdtsc();
1028 		if (val == 0 || now > val) {
1029 			return 0;
1030 		} else {
1031 			val -= now;
1032 			val *= 1000000;
1033 			val += (tsc_frequency - 1);
1034 			val /= tsc_frequency;
1035 			if (val > INT_MAX)
1036 				val = INT_MAX;
1037 			return val;
1038 		}
1039 	}
1040 
1041 	val = LAPIC_READ(ccr_timer);
1042 	if (val == 0)
1043 		return 0;
1044 
1045 	KKASSERT(lapic_cputimer_intr.freq > 0);
1046 	val *= 1000000;
1047 	val += (lapic_cputimer_intr.freq - 1);
1048 	val /= lapic_cputimer_intr.freq;
1049 	if (val > INT_MAX)
1050 		val = INT_MAX;
1051 	return val;
1052 }
1053 
1054 
1055 /*
1056  * Spin-style delay, set delay time in uS, spin till it drains.
1057  */
1058 void
1059 u_sleep(int count)
1060 {
1061 	set_apic_timer(count);
1062 	while (read_apic_timer())
1063 		 /* spin */ ;
1064 }
1065 
1066 int
1067 lapic_unused_apic_id(int start)
1068 {
1069 	int i;
1070 
1071 	for (i = start; i < APICID_MAX; ++i) {
1072 		if (APICID_TO_CPUID(i) == -1)
1073 			return i;
1074 	}
1075 	return NAPICID;
1076 }
1077 
1078 void
1079 lapic_map(vm_paddr_t lapic_addr)
1080 {
1081 	lapic_mem = pmap_mapdev_uncacheable(lapic_addr, sizeof(struct LAPIC));
1082 }
1083 
1084 void
1085 lapic_x2apic_enter(boolean_t bsp)
1086 {
1087 	uint64_t apic_base;
1088 
1089 	KASSERT(x2apic_enable, ("X2APIC mode is not enabled"));
1090 
1091 	/*
1092 	 * X2APIC mode is requested, if it has not been enabled by the BIOS,
1093 	 * enable it now.
1094 	 */
1095 	apic_base = rdmsr(MSR_APICBASE);
1096 	if ((apic_base & APICBASE_X2APIC) == 0) {
1097 		wrmsr(MSR_APICBASE,
1098 		    apic_base | APICBASE_X2APIC | APICBASE_ENABLED);
1099 	}
1100 	if (bsp) {
1101 		lapic_eoi = lapic_msr_eoi;
1102 		apic_ipi = lapic_msr_ipi;
1103 		single_apic_ipi = lapic_msr_single_ipi;
1104 		lapic_cputimer_intr.reload = lapic_msr_timer_intr_reload;
1105 	}
1106 }
1107 
1108 static TAILQ_HEAD(, lapic_enumerator) lapic_enumerators =
1109 	TAILQ_HEAD_INITIALIZER(lapic_enumerators);
1110 
1111 int
1112 lapic_config(void)
1113 {
1114 	struct lapic_enumerator *e;
1115 	uint64_t apic_base;
1116 	int error, i, ap_max;
1117 
1118 	KKASSERT(lapic_enable);
1119 
1120 	lapic_eoi = lapic_mem_eoi;
1121 	apic_ipi = lapic_mem_ipi;
1122 	single_apic_ipi = lapic_mem_single_ipi;
1123 
1124 	TUNABLE_INT_FETCH("hw.x2apic_enable", &x2apic_enable);
1125 	if (x2apic_enable < 0)
1126 		x2apic_enable = 1;
1127 
1128 	if ((cpu_feature2 & CPUID2_X2APIC) == 0) {
1129 		/* X2APIC is not supported. */
1130 		x2apic_enable = 0;
1131 	} else if (!x2apic_enable) {
1132 		/*
1133 		 * If the BIOS enabled the X2APIC mode, then we would stick
1134 		 * with the X2APIC mode.
1135 		 */
1136 		apic_base = rdmsr(MSR_APICBASE);
1137 		if (apic_base & APICBASE_X2APIC) {
1138 			kprintf("LAPIC: BIOS enabled X2APIC mode\n");
1139 			x2apic_enable = 1;
1140 		}
1141 	}
1142 
1143 	if (x2apic_enable) {
1144 		/*
1145 		 * Enter X2APIC mode.
1146 		 */
1147 		kprintf("LAPIC: enter X2APIC mode\n");
1148 		lapic_x2apic_enter(TRUE);
1149 	}
1150 
1151 	for (i = 0; i < NAPICID; ++i)
1152 		APICID_TO_CPUID(i) = -1;
1153 
1154 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
1155 		error = e->lapic_probe(e);
1156 		if (!error)
1157 			break;
1158 	}
1159 	if (e == NULL) {
1160 		kprintf("LAPIC: Can't find LAPIC\n");
1161 		return ENXIO;
1162 	}
1163 
1164 	error = e->lapic_enumerate(e);
1165 	if (error) {
1166 		kprintf("LAPIC: enumeration failed\n");
1167 		return ENXIO;
1168 	}
1169 
1170 	/* LAPIC is usable now. */
1171 	lapic_usable = 1;
1172 
1173 	ap_max = MAXCPU - 1;
1174 	TUNABLE_INT_FETCH("hw.ap_max", &ap_max);
1175 	if (ap_max > MAXCPU - 1)
1176 		ap_max = MAXCPU - 1;
1177 
1178 	if (naps > ap_max) {
1179 		kprintf("LAPIC: Warning use only %d out of %d "
1180 			"available APs\n",
1181 			ap_max, naps);
1182 		naps = ap_max;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 void
1189 lapic_enumerator_register(struct lapic_enumerator *ne)
1190 {
1191 	struct lapic_enumerator *e;
1192 
1193 	TAILQ_FOREACH(e, &lapic_enumerators, lapic_link) {
1194 		if (e->lapic_prio < ne->lapic_prio) {
1195 			TAILQ_INSERT_BEFORE(e, ne, lapic_link);
1196 			return;
1197 		}
1198 	}
1199 	TAILQ_INSERT_TAIL(&lapic_enumerators, ne, lapic_link);
1200 }
1201 
1202 void
1203 lapic_set_cpuid(int cpu_id, int apic_id)
1204 {
1205 	CPUID_TO_APICID(cpu_id) = apic_id;
1206 	APICID_TO_CPUID(apic_id) = cpu_id;
1207 }
1208 
1209 void
1210 lapic_fixup_noioapic(void)
1211 {
1212 	u_int   temp;
1213 
1214 	/* Only allowed on BSP */
1215 	KKASSERT(mycpuid == 0);
1216 	KKASSERT(!ioapic_enable);
1217 
1218 	temp = LAPIC_READ(lvt_lint0);
1219 	temp &= ~APIC_LVT_MASKED;
1220 	LAPIC_WRITE(lvt_lint0, temp);
1221 
1222 	temp = LAPIC_READ(lvt_lint1);
1223 	temp |= APIC_LVT_MASKED;
1224 	LAPIC_WRITE(lvt_lint1, temp);
1225 }
1226 
1227 static void
1228 lapic_mem_eoi(void)
1229 {
1230 	log_lapic(mem_eoi);
1231 	LAPIC_MEM_WRITE(eoi, 0);
1232 }
1233 
1234 static void
1235 lapic_msr_eoi(void)
1236 {
1237 	log_lapic(msr_eoi);
1238 	LAPIC_MSR_WRITE(MSR_X2APIC_EOI, 0);
1239 }
1240 
1241 static void
1242 lapic_mem_seticr_sync(uint32_t apic_id, uint32_t icr_lo_val)
1243 {
1244 	lapic_mem_icr_set(apic_id, icr_lo_val);
1245 	while (LAPIC_MEM_READ(icr_lo) & APIC_DELSTAT_PEND)
1246 		/* spin */;
1247 }
1248 
1249 void
1250 lapic_seticr_sync(uint32_t apic_id, uint32_t icr_lo_val)
1251 {
1252 	if (x2apic_enable)
1253 		lapic_msr_icr_set(apic_id, icr_lo_val);
1254 	else
1255 		lapic_mem_seticr_sync(apic_id, icr_lo_val);
1256 }
1257 
1258 static void
1259 lapic_sysinit(void *dummy __unused)
1260 {
1261 	if (lapic_enable) {
1262 		int error;
1263 
1264 		error = lapic_config();
1265 		if (error)
1266 			lapic_enable = 0;
1267 	}
1268 	if (!lapic_enable)
1269 		x2apic_enable = 0;
1270 
1271 	if (lapic_enable) {
1272 		/* Initialize BSP's local APIC */
1273 		lapic_init(TRUE);
1274 	} else if (ioapic_enable) {
1275 		ioapic_enable = 0;
1276 		icu_reinit_noioapic();
1277 	}
1278 }
1279 SYSINIT(lapic, SI_BOOT2_LAPIC, SI_ORDER_FIRST, lapic_sysinit, NULL);
1280