xref: /freebsd/sys/x86/x86/local_apic.c (revision 5b9c547c)
1 /*-
2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3  * Copyright (c) 1996, by Steve Passe
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. The name of the developer may NOT be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Local APIC support on Pentium and later processors.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_atpic.h"
38 #include "opt_hwpmc_hooks.h"
39 
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/timeet.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 
58 #include <x86/apicreg.h>
59 #include <machine/cpufunc.h>
60 #include <machine/cputypes.h>
61 #include <machine/frame.h>
62 #include <machine/intr_machdep.h>
63 #include <x86/apicvar.h>
64 #include <x86/mca.h>
65 #include <machine/md_var.h>
66 #include <machine/smp.h>
67 #include <machine/specialreg.h>
68 #include <x86/init.h>
69 
70 #ifdef DDB
71 #include <sys/interrupt.h>
72 #include <ddb/ddb.h>
73 #endif
74 
75 #ifdef __amd64__
76 #define	SDT_APIC	SDT_SYSIGT
77 #define	SDT_APICT	SDT_SYSIGT
78 #define	GSEL_APIC	0
79 #else
80 #define	SDT_APIC	SDT_SYS386IGT
81 #define	SDT_APICT	SDT_SYS386TGT
82 #define	GSEL_APIC	GSEL(GCODE_SEL, SEL_KPL)
83 #endif
84 
85 /* Sanity checks on IDT vectors. */
86 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
87 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
88 CTASSERT(APIC_LOCAL_INTS == 240);
89 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
90 
91 /* Magic IRQ values for the timer and syscalls. */
92 #define	IRQ_TIMER	(NUM_IO_INTS + 1)
93 #define	IRQ_SYSCALL	(NUM_IO_INTS + 2)
94 #define	IRQ_DTRACE_RET	(NUM_IO_INTS + 3)
95 #define	IRQ_EVTCHN	(NUM_IO_INTS + 4)
96 
97 /*
98  * Support for local APICs.  Local APICs manage interrupts on each
99  * individual processor as opposed to I/O APICs which receive interrupts
100  * from I/O devices and then forward them on to the local APICs.
101  *
102  * Local APICs can also send interrupts to each other thus providing the
103  * mechanism for IPIs.
104  */
105 
106 struct lvt {
107 	u_int lvt_edgetrigger:1;
108 	u_int lvt_activehi:1;
109 	u_int lvt_masked:1;
110 	u_int lvt_active:1;
111 	u_int lvt_mode:16;
112 	u_int lvt_vector:8;
113 };
114 
115 struct lapic {
116 	struct lvt la_lvts[APIC_LVT_MAX + 1];
117 	u_int la_id:8;
118 	u_int la_cluster:4;
119 	u_int la_cluster_id:2;
120 	u_int la_present:1;
121 	u_long *la_timer_count;
122 	u_long la_timer_period;
123 	u_int la_timer_mode;
124 	uint32_t lvt_timer_cache;
125 	/* Include IDT_SYSCALL to make indexing easier. */
126 	int la_ioint_irqs[APIC_NUM_IOINTS + 1];
127 } static lapics[MAX_APIC_ID + 1];
128 
129 /* Global defaults for local APIC LVT entries. */
130 static struct lvt lvts[APIC_LVT_MAX + 1] = {
131 	{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },	/* LINT0: masked ExtINT */
132 	{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },	/* LINT1: NMI */
133 	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },	/* Timer */
134 	{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },	/* Error */
135 	{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },	/* PMC */
136 	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },	/* Thermal */
137 	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },	/* CMCI */
138 };
139 
140 static inthand_t *ioint_handlers[] = {
141 	NULL,			/* 0 - 31 */
142 	IDTVEC(apic_isr1),	/* 32 - 63 */
143 	IDTVEC(apic_isr2),	/* 64 - 95 */
144 	IDTVEC(apic_isr3),	/* 96 - 127 */
145 	IDTVEC(apic_isr4),	/* 128 - 159 */
146 	IDTVEC(apic_isr5),	/* 160 - 191 */
147 	IDTVEC(apic_isr6),	/* 192 - 223 */
148 	IDTVEC(apic_isr7),	/* 224 - 255 */
149 };
150 
151 
152 static u_int32_t lapic_timer_divisors[] = {
153 	APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
154 	APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
155 };
156 
157 extern inthand_t IDTVEC(rsvd);
158 
159 volatile char *lapic_map;
160 vm_paddr_t lapic_paddr;
161 int x2apic_mode;
162 int lapic_eoi_suppression;
163 static u_long lapic_timer_divisor;
164 static struct eventtimer lapic_et;
165 
166 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD, 0, "APIC options");
167 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
168 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
169     &lapic_eoi_suppression, 0, "");
170 
171 static uint32_t
172 lapic_read32(enum LAPIC_REGISTERS reg)
173 {
174 	uint32_t res;
175 
176 	if (x2apic_mode) {
177 		res = rdmsr32(MSR_APIC_000 + reg);
178 	} else {
179 		res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
180 	}
181 	return (res);
182 }
183 
184 static void
185 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
186 {
187 
188 	if (x2apic_mode) {
189 		mfence();
190 		wrmsr(MSR_APIC_000 + reg, val);
191 	} else {
192 		*(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
193 	}
194 }
195 
196 static void
197 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
198 {
199 
200 	if (x2apic_mode) {
201 		wrmsr(MSR_APIC_000 + reg, val);
202 	} else {
203 		*(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
204 	}
205 }
206 
207 static uint64_t
208 lapic_read_icr(void)
209 {
210 	uint64_t v;
211 	uint32_t vhi, vlo;
212 
213 	if (x2apic_mode) {
214 		v = rdmsr(MSR_APIC_000 + LAPIC_ICR_LO);
215 	} else {
216 		vhi = lapic_read32(LAPIC_ICR_HI);
217 		vlo = lapic_read32(LAPIC_ICR_LO);
218 		v = ((uint64_t)vhi << 32) | vlo;
219 	}
220 	return (v);
221 }
222 
223 static uint64_t
224 lapic_read_icr_lo(void)
225 {
226 
227 	return (lapic_read32(LAPIC_ICR_LO));
228 }
229 
230 static void
231 lapic_write_icr(uint32_t vhi, uint32_t vlo)
232 {
233 	uint64_t v;
234 
235 	if (x2apic_mode) {
236 		v = ((uint64_t)vhi << 32) | vlo;
237 		mfence();
238 		wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
239 	} else {
240 		lapic_write32(LAPIC_ICR_HI, vhi);
241 		lapic_write32(LAPIC_ICR_LO, vlo);
242 	}
243 }
244 
245 static void
246 native_lapic_enable_x2apic(void)
247 {
248 	uint64_t apic_base;
249 
250 	apic_base = rdmsr(MSR_APICBASE);
251 	apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
252 	wrmsr(MSR_APICBASE, apic_base);
253 }
254 
255 static void	lapic_enable(void);
256 static void	lapic_resume(struct pic *pic, bool suspend_cancelled);
257 static void	lapic_timer_oneshot(struct lapic *,
258 		    u_int count, int enable_int);
259 static void	lapic_timer_periodic(struct lapic *,
260 		    u_int count, int enable_int);
261 static void	lapic_timer_stop(struct lapic *);
262 static void	lapic_timer_set_divisor(u_int divisor);
263 static uint32_t	lvt_mode(struct lapic *la, u_int pin, uint32_t value);
264 static int	lapic_et_start(struct eventtimer *et,
265 		    sbintime_t first, sbintime_t period);
266 static int	lapic_et_stop(struct eventtimer *et);
267 static u_int	apic_idt_to_irq(u_int apic_id, u_int vector);
268 static void	lapic_set_tpr(u_int vector);
269 
270 struct pic lapic_pic = { .pic_resume = lapic_resume };
271 
272 /* Forward declarations for apic_ops */
273 static void	native_lapic_create(u_int apic_id, int boot_cpu);
274 static void	native_lapic_init(vm_paddr_t addr);
275 static void	native_lapic_xapic_mode(void);
276 static void	native_lapic_setup(int boot);
277 static void	native_lapic_dump(const char *str);
278 static void	native_lapic_disable(void);
279 static void	native_lapic_eoi(void);
280 static int	native_lapic_id(void);
281 static int	native_lapic_intr_pending(u_int vector);
282 static u_int	native_apic_cpuid(u_int apic_id);
283 static u_int	native_apic_alloc_vector(u_int apic_id, u_int irq);
284 static u_int	native_apic_alloc_vectors(u_int apic_id, u_int *irqs,
285 		    u_int count, u_int align);
286 static void 	native_apic_disable_vector(u_int apic_id, u_int vector);
287 static void 	native_apic_enable_vector(u_int apic_id, u_int vector);
288 static void 	native_apic_free_vector(u_int apic_id, u_int vector, u_int irq);
289 static void 	native_lapic_set_logical_id(u_int apic_id, u_int cluster,
290 		    u_int cluster_id);
291 static int 	native_lapic_enable_pmc(void);
292 static void 	native_lapic_disable_pmc(void);
293 static void 	native_lapic_reenable_pmc(void);
294 static void 	native_lapic_enable_cmc(void);
295 static void 	native_lapic_ipi_raw(register_t icrlo, u_int dest);
296 static void 	native_lapic_ipi_vectored(u_int vector, int dest);
297 static int 	native_lapic_ipi_wait(int delay);
298 static int 	native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
299 		    u_char masked);
300 static int 	native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
301 		    uint32_t mode);
302 static int 	native_lapic_set_lvt_polarity(u_int apic_id, u_int lvt,
303 		    enum intr_polarity pol);
304 static int 	native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
305 		    enum intr_trigger trigger);
306 static int	native_lapic_ipi_alloc(inthand_t *ipifunc);
307 static void	native_lapic_ipi_free(int vector);
308 
309 struct apic_ops apic_ops = {
310 	.create			= native_lapic_create,
311 	.init			= native_lapic_init,
312 	.xapic_mode		= native_lapic_xapic_mode,
313 	.setup			= native_lapic_setup,
314 	.dump			= native_lapic_dump,
315 	.disable		= native_lapic_disable,
316 	.eoi			= native_lapic_eoi,
317 	.id			= native_lapic_id,
318 	.intr_pending		= native_lapic_intr_pending,
319 	.set_logical_id		= native_lapic_set_logical_id,
320 	.cpuid			= native_apic_cpuid,
321 	.alloc_vector		= native_apic_alloc_vector,
322 	.alloc_vectors		= native_apic_alloc_vectors,
323 	.enable_vector		= native_apic_enable_vector,
324 	.disable_vector		= native_apic_disable_vector,
325 	.free_vector		= native_apic_free_vector,
326 	.enable_pmc		= native_lapic_enable_pmc,
327 	.disable_pmc		= native_lapic_disable_pmc,
328 	.reenable_pmc		= native_lapic_reenable_pmc,
329 	.enable_cmc		= native_lapic_enable_cmc,
330 #ifdef SMP
331 	.ipi_raw		= native_lapic_ipi_raw,
332 	.ipi_vectored		= native_lapic_ipi_vectored,
333 	.ipi_wait		= native_lapic_ipi_wait,
334 	.ipi_alloc		= native_lapic_ipi_alloc,
335 	.ipi_free		= native_lapic_ipi_free,
336 #endif
337 	.set_lvt_mask		= native_lapic_set_lvt_mask,
338 	.set_lvt_mode		= native_lapic_set_lvt_mode,
339 	.set_lvt_polarity	= native_lapic_set_lvt_polarity,
340 	.set_lvt_triggermode	= native_lapic_set_lvt_triggermode,
341 };
342 
343 static uint32_t
344 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
345 {
346 	struct lvt *lvt;
347 
348 	KASSERT(pin <= APIC_LVT_MAX, ("%s: pin %u out of range", __func__, pin));
349 	if (la->la_lvts[pin].lvt_active)
350 		lvt = &la->la_lvts[pin];
351 	else
352 		lvt = &lvts[pin];
353 
354 	value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
355 	    APIC_LVT_VECTOR);
356 	if (lvt->lvt_edgetrigger == 0)
357 		value |= APIC_LVT_TM;
358 	if (lvt->lvt_activehi == 0)
359 		value |= APIC_LVT_IIPP_INTALO;
360 	if (lvt->lvt_masked)
361 		value |= APIC_LVT_M;
362 	value |= lvt->lvt_mode;
363 	switch (lvt->lvt_mode) {
364 	case APIC_LVT_DM_NMI:
365 	case APIC_LVT_DM_SMI:
366 	case APIC_LVT_DM_INIT:
367 	case APIC_LVT_DM_EXTINT:
368 		if (!lvt->lvt_edgetrigger && bootverbose) {
369 			printf("lapic%u: Forcing LINT%u to edge trigger\n",
370 			    la->la_id, pin);
371 			value |= APIC_LVT_TM;
372 		}
373 		/* Use a vector of 0. */
374 		break;
375 	case APIC_LVT_DM_FIXED:
376 		value |= lvt->lvt_vector;
377 		break;
378 	default:
379 		panic("bad APIC LVT delivery mode: %#x\n", value);
380 	}
381 	return (value);
382 }
383 
384 /*
385  * Map the local APIC and setup necessary interrupt vectors.
386  */
387 static void
388 native_lapic_init(vm_paddr_t addr)
389 {
390 	uint32_t ver;
391 	u_int regs[4];
392 	int i, arat;
393 
394 	/*
395 	 * Enable x2APIC mode if possible. Map the local APIC
396 	 * registers page.
397 	 *
398 	 * Keep the LAPIC registers page mapped uncached for x2APIC
399 	 * mode too, to have direct map page attribute set to
400 	 * uncached.  This is needed to work around CPU errata present
401 	 * on all Intel processors.
402 	 */
403 	KASSERT(trunc_page(addr) == addr,
404 	    ("local APIC not aligned on a page boundary"));
405 	lapic_paddr = addr;
406 	lapic_map = pmap_mapdev(addr, PAGE_SIZE);
407 	if (x2apic_mode) {
408 		native_lapic_enable_x2apic();
409 		lapic_map = NULL;
410 	}
411 
412 	/* Setup the spurious interrupt handler. */
413 	setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
414 	    GSEL_APIC);
415 
416 	/* Perform basic initialization of the BSP's local APIC. */
417 	lapic_enable();
418 
419 	/* Set BSP's per-CPU local APIC ID. */
420 	PCPU_SET(apic_id, lapic_id());
421 
422 	/* Local APIC timer interrupt. */
423 	setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
424 
425 	/* Local APIC error interrupt. */
426 	setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
427 
428 	/* XXX: Thermal interrupt */
429 
430 	/* Local APIC CMCI. */
431 	setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
432 
433 	if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
434 		arat = 0;
435 		/* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
436 		if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
437 			do_cpuid(0x06, regs);
438 			if ((regs[0] & CPUTPM1_ARAT) != 0)
439 				arat = 1;
440 		}
441 		bzero(&lapic_et, sizeof(lapic_et));
442 		lapic_et.et_name = "LAPIC";
443 		lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
444 		    ET_FLAGS_PERCPU;
445 		lapic_et.et_quality = 600;
446 		if (!arat) {
447 			lapic_et.et_flags |= ET_FLAGS_C3STOP;
448 			lapic_et.et_quality -= 200;
449 		}
450 		lapic_et.et_frequency = 0;
451 		/* We don't know frequency yet, so trying to guess. */
452 		lapic_et.et_min_period = 0x00001000LL;
453 		lapic_et.et_max_period = SBT_1S;
454 		lapic_et.et_start = lapic_et_start;
455 		lapic_et.et_stop = lapic_et_stop;
456 		lapic_et.et_priv = NULL;
457 		et_register(&lapic_et);
458 	}
459 
460 	/*
461 	 * Set lapic_eoi_suppression after lapic_enable(), to not
462 	 * enable suppression in the hardware prematurely.  Note that
463 	 * we by default enable suppression even when system only has
464 	 * one IO-APIC, since EOI is broadcasted to all APIC agents,
465 	 * including CPUs, otherwise.
466 	 */
467 	ver = lapic_read32(LAPIC_VERSION);
468 	if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
469 		lapic_eoi_suppression = 1;
470 		TUNABLE_INT_FETCH("hw.lapic_eoi_suppression",
471 		    &lapic_eoi_suppression);
472 	}
473 }
474 
475 /*
476  * Create a local APIC instance.
477  */
478 static void
479 native_lapic_create(u_int apic_id, int boot_cpu)
480 {
481 	int i;
482 
483 	if (apic_id > MAX_APIC_ID) {
484 		printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
485 		if (boot_cpu)
486 			panic("Can't ignore BSP");
487 		return;
488 	}
489 	KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
490 	    apic_id));
491 
492 	/*
493 	 * Assume no local LVT overrides and a cluster of 0 and
494 	 * intra-cluster ID of 0.
495 	 */
496 	lapics[apic_id].la_present = 1;
497 	lapics[apic_id].la_id = apic_id;
498 	for (i = 0; i <= APIC_LVT_MAX; i++) {
499 		lapics[apic_id].la_lvts[i] = lvts[i];
500 		lapics[apic_id].la_lvts[i].lvt_active = 0;
501 	}
502 	for (i = 0; i <= APIC_NUM_IOINTS; i++)
503 	    lapics[apic_id].la_ioint_irqs[i] = -1;
504 	lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
505 	lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
506 	    IRQ_TIMER;
507 #ifdef KDTRACE_HOOKS
508 	lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
509 	    IRQ_DTRACE_RET;
510 #endif
511 #ifdef XENHVM
512 	lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
513 #endif
514 
515 
516 #ifdef SMP
517 	cpu_add(apic_id, boot_cpu);
518 #endif
519 }
520 
521 /*
522  * Dump contents of local APIC registers
523  */
524 static void
525 native_lapic_dump(const char* str)
526 {
527 	uint32_t maxlvt;
528 
529 	maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
530 	printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
531 	printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
532 	    lapic_read32(LAPIC_ID), lapic_read32(LAPIC_VERSION),
533 	    lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
534 	if ((cpu_feature2 & CPUID2_X2APIC) != 0)
535 		printf(" x2APIC: %d", x2apic_mode);
536 	printf("\n  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
537 	    lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
538 	    lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
539 	printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
540 	    lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
541 	    lapic_read32(LAPIC_LVT_ERROR));
542 	if (maxlvt >= APIC_LVT_PMC)
543 		printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
544 	printf("\n");
545 	if (maxlvt >= APIC_LVT_CMCI)
546 		printf("   cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
547 }
548 
549 static void
550 native_lapic_xapic_mode(void)
551 {
552 	register_t saveintr;
553 
554 	saveintr = intr_disable();
555 	if (x2apic_mode)
556 		native_lapic_enable_x2apic();
557 	intr_restore(saveintr);
558 }
559 
560 static void
561 native_lapic_setup(int boot)
562 {
563 	struct lapic *la;
564 	uint32_t maxlvt;
565 	register_t saveintr;
566 	char buf[MAXCOMLEN + 1];
567 
568 	saveintr = intr_disable();
569 
570 	la = &lapics[lapic_id()];
571 	KASSERT(la->la_present, ("missing APIC structure"));
572 	maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
573 
574 	/* Initialize the TPR to allow all interrupts. */
575 	lapic_set_tpr(0);
576 
577 	/* Setup spurious vector and enable the local APIC. */
578 	lapic_enable();
579 
580 	/* Program LINT[01] LVT entries. */
581 	lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
582 	    lapic_read32(LAPIC_LVT_LINT0)));
583 	lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
584 	    lapic_read32(LAPIC_LVT_LINT1)));
585 
586 	/* Program the PMC LVT entry if present. */
587 	if (maxlvt >= APIC_LVT_PMC) {
588 		lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
589 		    LAPIC_LVT_PCINT));
590 	}
591 
592 	/* Program timer LVT and setup handler. */
593 	la->lvt_timer_cache = lvt_mode(la, APIC_LVT_TIMER,
594 	    lapic_read32(LAPIC_LVT_TIMER));
595 	lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_cache);
596 	if (boot) {
597 		snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
598 		intrcnt_add(buf, &la->la_timer_count);
599 	}
600 
601 	/* Setup the timer if configured. */
602 	if (la->la_timer_mode != 0) {
603 		KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
604 		    lapic_id()));
605 		lapic_timer_set_divisor(lapic_timer_divisor);
606 		if (la->la_timer_mode == 1)
607 			lapic_timer_periodic(la, la->la_timer_period, 1);
608 		else
609 			lapic_timer_oneshot(la, la->la_timer_period, 1);
610 	}
611 
612 	/* Program error LVT and clear any existing errors. */
613 	lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
614 	    lapic_read32(LAPIC_LVT_ERROR)));
615 	lapic_write32(LAPIC_ESR, 0);
616 
617 	/* XXX: Thermal LVT */
618 
619 	/* Program the CMCI LVT entry if present. */
620 	if (maxlvt >= APIC_LVT_CMCI) {
621 		lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
622 		    lapic_read32(LAPIC_LVT_CMCI)));
623 	}
624 
625 	intr_restore(saveintr);
626 }
627 
628 static void
629 native_lapic_reenable_pmc(void)
630 {
631 #ifdef HWPMC_HOOKS
632 	uint32_t value;
633 
634 	value = lapic_read32(LAPIC_LVT_PCINT);
635 	value &= ~APIC_LVT_M;
636 	lapic_write32(LAPIC_LVT_PCINT, value);
637 #endif
638 }
639 
640 #ifdef HWPMC_HOOKS
641 static void
642 lapic_update_pmc(void *dummy)
643 {
644 	struct lapic *la;
645 
646 	la = &lapics[lapic_id()];
647 	lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
648 	    lapic_read32(LAPIC_LVT_PCINT)));
649 }
650 #endif
651 
652 static int
653 native_lapic_enable_pmc(void)
654 {
655 #ifdef HWPMC_HOOKS
656 	u_int32_t maxlvt;
657 
658 	/* Fail if the local APIC is not present. */
659 	if (!x2apic_mode && lapic_map == NULL)
660 		return (0);
661 
662 	/* Fail if the PMC LVT is not present. */
663 	maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
664 	if (maxlvt < APIC_LVT_PMC)
665 		return (0);
666 
667 	lvts[APIC_LVT_PMC].lvt_masked = 0;
668 
669 #ifdef SMP
670 	/*
671 	 * If hwpmc was loaded at boot time then the APs may not be
672 	 * started yet.  In that case, don't forward the request to
673 	 * them as they will program the lvt when they start.
674 	 */
675 	if (smp_started)
676 		smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
677 	else
678 #endif
679 		lapic_update_pmc(NULL);
680 	return (1);
681 #else
682 	return (0);
683 #endif
684 }
685 
686 static void
687 native_lapic_disable_pmc(void)
688 {
689 #ifdef HWPMC_HOOKS
690 	u_int32_t maxlvt;
691 
692 	/* Fail if the local APIC is not present. */
693 	if (!x2apic_mode && lapic_map == NULL)
694 		return;
695 
696 	/* Fail if the PMC LVT is not present. */
697 	maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
698 	if (maxlvt < APIC_LVT_PMC)
699 		return;
700 
701 	lvts[APIC_LVT_PMC].lvt_masked = 1;
702 
703 #ifdef SMP
704 	/* The APs should always be started when hwpmc is unloaded. */
705 	KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
706 #endif
707 	smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
708 #endif
709 }
710 
711 static int
712 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
713 {
714 	struct lapic *la;
715 	u_long value;
716 
717 	la = &lapics[PCPU_GET(apic_id)];
718 	if (et->et_frequency == 0) {
719 		/* Start off with a divisor of 2 (power on reset default). */
720 		lapic_timer_divisor = 2;
721 		/* Try to calibrate the local APIC timer. */
722 		do {
723 			lapic_timer_set_divisor(lapic_timer_divisor);
724 			lapic_timer_oneshot(la, APIC_TIMER_MAX_COUNT, 0);
725 			DELAY(1000000);
726 			value = APIC_TIMER_MAX_COUNT -
727 			    lapic_read32(LAPIC_CCR_TIMER);
728 			if (value != APIC_TIMER_MAX_COUNT)
729 				break;
730 			lapic_timer_divisor <<= 1;
731 		} while (lapic_timer_divisor <= 128);
732 		if (lapic_timer_divisor > 128)
733 			panic("lapic: Divisor too big");
734 		if (bootverbose)
735 			printf("lapic: Divisor %lu, Frequency %lu Hz\n",
736 			    lapic_timer_divisor, value);
737 		et->et_frequency = value;
738 		et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
739 		et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
740 	}
741 	if (la->la_timer_mode == 0)
742 		lapic_timer_set_divisor(lapic_timer_divisor);
743 	if (period != 0) {
744 		la->la_timer_mode = 1;
745 		la->la_timer_period = ((uint32_t)et->et_frequency * period) >> 32;
746 		lapic_timer_periodic(la, la->la_timer_period, 1);
747 	} else {
748 		la->la_timer_mode = 2;
749 		la->la_timer_period = ((uint32_t)et->et_frequency * first) >> 32;
750 		lapic_timer_oneshot(la, la->la_timer_period, 1);
751 	}
752 	return (0);
753 }
754 
755 static int
756 lapic_et_stop(struct eventtimer *et)
757 {
758 	struct lapic *la = &lapics[PCPU_GET(apic_id)];
759 
760 	la->la_timer_mode = 0;
761 	lapic_timer_stop(la);
762 	return (0);
763 }
764 
765 static void
766 native_lapic_disable(void)
767 {
768 	uint32_t value;
769 
770 	/* Software disable the local APIC. */
771 	value = lapic_read32(LAPIC_SVR);
772 	value &= ~APIC_SVR_SWEN;
773 	lapic_write32(LAPIC_SVR, value);
774 }
775 
776 static void
777 lapic_enable(void)
778 {
779 	uint32_t value;
780 
781 	/* Program the spurious vector to enable the local APIC. */
782 	value = lapic_read32(LAPIC_SVR);
783 	value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
784 	value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
785 	if (lapic_eoi_suppression)
786 		value |= APIC_SVR_EOI_SUPPRESSION;
787 	lapic_write32(LAPIC_SVR, value);
788 }
789 
790 /* Reset the local APIC on the BSP during resume. */
791 static void
792 lapic_resume(struct pic *pic, bool suspend_cancelled)
793 {
794 
795 	lapic_setup(0);
796 }
797 
798 static int
799 native_lapic_id(void)
800 {
801 	uint32_t v;
802 
803 	KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
804 	v = lapic_read32(LAPIC_ID);
805 	if (!x2apic_mode)
806 		v >>= APIC_ID_SHIFT;
807 	return (v);
808 }
809 
810 static int
811 native_lapic_intr_pending(u_int vector)
812 {
813 	uint32_t irr;
814 
815 	/*
816 	 * The IRR registers are an array of registers each of which
817 	 * only describes 32 interrupts in the low 32 bits.  Thus, we
818 	 * divide the vector by 32 to get the register index.
819 	 * Finally, we modulus the vector by 32 to determine the
820 	 * individual bit to test.
821 	 */
822 	irr = lapic_read32(LAPIC_IRR0 + vector / 32);
823 	return (irr & 1 << (vector % 32));
824 }
825 
826 static void
827 native_lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
828 {
829 	struct lapic *la;
830 
831 	KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
832 	    __func__, apic_id));
833 	KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
834 	    __func__, cluster));
835 	KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
836 	    ("%s: intra cluster id %u too big", __func__, cluster_id));
837 	la = &lapics[apic_id];
838 	la->la_cluster = cluster;
839 	la->la_cluster_id = cluster_id;
840 }
841 
842 static int
843 native_lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
844 {
845 
846 	if (pin > APIC_LVT_MAX)
847 		return (EINVAL);
848 	if (apic_id == APIC_ID_ALL) {
849 		lvts[pin].lvt_masked = masked;
850 		if (bootverbose)
851 			printf("lapic:");
852 	} else {
853 		KASSERT(lapics[apic_id].la_present,
854 		    ("%s: missing APIC %u", __func__, apic_id));
855 		lapics[apic_id].la_lvts[pin].lvt_masked = masked;
856 		lapics[apic_id].la_lvts[pin].lvt_active = 1;
857 		if (bootverbose)
858 			printf("lapic%u:", apic_id);
859 	}
860 	if (bootverbose)
861 		printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
862 	return (0);
863 }
864 
865 static int
866 native_lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
867 {
868 	struct lvt *lvt;
869 
870 	if (pin > APIC_LVT_MAX)
871 		return (EINVAL);
872 	if (apic_id == APIC_ID_ALL) {
873 		lvt = &lvts[pin];
874 		if (bootverbose)
875 			printf("lapic:");
876 	} else {
877 		KASSERT(lapics[apic_id].la_present,
878 		    ("%s: missing APIC %u", __func__, apic_id));
879 		lvt = &lapics[apic_id].la_lvts[pin];
880 		lvt->lvt_active = 1;
881 		if (bootverbose)
882 			printf("lapic%u:", apic_id);
883 	}
884 	lvt->lvt_mode = mode;
885 	switch (mode) {
886 	case APIC_LVT_DM_NMI:
887 	case APIC_LVT_DM_SMI:
888 	case APIC_LVT_DM_INIT:
889 	case APIC_LVT_DM_EXTINT:
890 		lvt->lvt_edgetrigger = 1;
891 		lvt->lvt_activehi = 1;
892 		if (mode == APIC_LVT_DM_EXTINT)
893 			lvt->lvt_masked = 1;
894 		else
895 			lvt->lvt_masked = 0;
896 		break;
897 	default:
898 		panic("Unsupported delivery mode: 0x%x\n", mode);
899 	}
900 	if (bootverbose) {
901 		printf(" Routing ");
902 		switch (mode) {
903 		case APIC_LVT_DM_NMI:
904 			printf("NMI");
905 			break;
906 		case APIC_LVT_DM_SMI:
907 			printf("SMI");
908 			break;
909 		case APIC_LVT_DM_INIT:
910 			printf("INIT");
911 			break;
912 		case APIC_LVT_DM_EXTINT:
913 			printf("ExtINT");
914 			break;
915 		}
916 		printf(" -> LINT%u\n", pin);
917 	}
918 	return (0);
919 }
920 
921 static int
922 native_lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
923 {
924 
925 	if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
926 		return (EINVAL);
927 	if (apic_id == APIC_ID_ALL) {
928 		lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
929 		if (bootverbose)
930 			printf("lapic:");
931 	} else {
932 		KASSERT(lapics[apic_id].la_present,
933 		    ("%s: missing APIC %u", __func__, apic_id));
934 		lapics[apic_id].la_lvts[pin].lvt_active = 1;
935 		lapics[apic_id].la_lvts[pin].lvt_activehi =
936 		    (pol == INTR_POLARITY_HIGH);
937 		if (bootverbose)
938 			printf("lapic%u:", apic_id);
939 	}
940 	if (bootverbose)
941 		printf(" LINT%u polarity: %s\n", pin,
942 		    pol == INTR_POLARITY_HIGH ? "high" : "low");
943 	return (0);
944 }
945 
946 static int
947 native_lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
948      enum intr_trigger trigger)
949 {
950 
951 	if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
952 		return (EINVAL);
953 	if (apic_id == APIC_ID_ALL) {
954 		lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
955 		if (bootverbose)
956 			printf("lapic:");
957 	} else {
958 		KASSERT(lapics[apic_id].la_present,
959 		    ("%s: missing APIC %u", __func__, apic_id));
960 		lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
961 		    (trigger == INTR_TRIGGER_EDGE);
962 		lapics[apic_id].la_lvts[pin].lvt_active = 1;
963 		if (bootverbose)
964 			printf("lapic%u:", apic_id);
965 	}
966 	if (bootverbose)
967 		printf(" LINT%u trigger: %s\n", pin,
968 		    trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
969 	return (0);
970 }
971 
972 /*
973  * Adjust the TPR of the current CPU so that it blocks all interrupts below
974  * the passed in vector.
975  */
976 static void
977 lapic_set_tpr(u_int vector)
978 {
979 #ifdef CHEAP_TPR
980 	lapic_write32(LAPIC_TPR, vector);
981 #else
982 	uint32_t tpr;
983 
984 	tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
985 	tpr |= vector;
986 	lapic_write32(LAPIC_TPR, tpr);
987 #endif
988 }
989 
990 static void
991 native_lapic_eoi(void)
992 {
993 
994 	lapic_write32_nofence(LAPIC_EOI, 0);
995 }
996 
997 void
998 lapic_handle_intr(int vector, struct trapframe *frame)
999 {
1000 	struct intsrc *isrc;
1001 
1002 	isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1003 	    vector));
1004 	intr_execute_handlers(isrc, frame);
1005 }
1006 
1007 void
1008 lapic_handle_timer(struct trapframe *frame)
1009 {
1010 	struct lapic *la;
1011 	struct trapframe *oldframe;
1012 	struct thread *td;
1013 
1014 	/* Send EOI first thing. */
1015 	lapic_eoi();
1016 
1017 #if defined(SMP) && !defined(SCHED_ULE)
1018 	/*
1019 	 * Don't do any accounting for the disabled HTT cores, since it
1020 	 * will provide misleading numbers for the userland.
1021 	 *
1022 	 * No locking is necessary here, since even if we lose the race
1023 	 * when hlt_cpus_mask changes it is not a big deal, really.
1024 	 *
1025 	 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1026 	 * and unlike other schedulers it actually schedules threads to
1027 	 * those CPUs.
1028 	 */
1029 	if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1030 		return;
1031 #endif
1032 
1033 	/* Look up our local APIC structure for the tick counters. */
1034 	la = &lapics[PCPU_GET(apic_id)];
1035 	(*la->la_timer_count)++;
1036 	critical_enter();
1037 	if (lapic_et.et_active) {
1038 		td = curthread;
1039 		td->td_intr_nesting_level++;
1040 		oldframe = td->td_intr_frame;
1041 		td->td_intr_frame = frame;
1042 		lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1043 		td->td_intr_frame = oldframe;
1044 		td->td_intr_nesting_level--;
1045 	}
1046 	critical_exit();
1047 }
1048 
1049 static void
1050 lapic_timer_set_divisor(u_int divisor)
1051 {
1052 
1053 	KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1054 	KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
1055 	    sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
1056 	lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1057 }
1058 
1059 static void
1060 lapic_timer_oneshot(struct lapic *la, u_int count, int enable_int)
1061 {
1062 	uint32_t value;
1063 
1064 	value = la->lvt_timer_cache;
1065 	value &= ~APIC_LVTT_TM;
1066 	value |= APIC_LVTT_TM_ONE_SHOT;
1067 	if (enable_int)
1068 		value &= ~APIC_LVT_M;
1069 	lapic_write32(LAPIC_LVT_TIMER, value);
1070 	lapic_write32(LAPIC_ICR_TIMER, count);
1071 }
1072 
1073 static void
1074 lapic_timer_periodic(struct lapic *la, u_int count, int enable_int)
1075 {
1076 	uint32_t value;
1077 
1078 	value = la->lvt_timer_cache;
1079 	value &= ~APIC_LVTT_TM;
1080 	value |= APIC_LVTT_TM_PERIODIC;
1081 	if (enable_int)
1082 		value &= ~APIC_LVT_M;
1083 	lapic_write32(LAPIC_LVT_TIMER, value);
1084 	lapic_write32(LAPIC_ICR_TIMER, count);
1085 }
1086 
1087 static void
1088 lapic_timer_stop(struct lapic *la)
1089 {
1090 	uint32_t value;
1091 
1092 	value = la->lvt_timer_cache;
1093 	value &= ~APIC_LVTT_TM;
1094 	value |= APIC_LVT_M;
1095 	lapic_write32(LAPIC_LVT_TIMER, value);
1096 }
1097 
1098 void
1099 lapic_handle_cmc(void)
1100 {
1101 
1102 	lapic_eoi();
1103 	cmc_intr();
1104 }
1105 
1106 /*
1107  * Called from the mca_init() to activate the CMC interrupt if this CPU is
1108  * responsible for monitoring any MC banks for CMC events.  Since mca_init()
1109  * is called prior to lapic_setup() during boot, this just needs to unmask
1110  * this CPU's LVT_CMCI entry.
1111  */
1112 static void
1113 native_lapic_enable_cmc(void)
1114 {
1115 	u_int apic_id;
1116 
1117 #ifdef DEV_ATPIC
1118 	if (!x2apic_mode && lapic_map == NULL)
1119 		return;
1120 #endif
1121 	apic_id = PCPU_GET(apic_id);
1122 	KASSERT(lapics[apic_id].la_present,
1123 	    ("%s: missing APIC %u", __func__, apic_id));
1124 	lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1125 	lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1126 	if (bootverbose)
1127 		printf("lapic%u: CMCI unmasked\n", apic_id);
1128 }
1129 
1130 void
1131 lapic_handle_error(void)
1132 {
1133 	uint32_t esr;
1134 
1135 	/*
1136 	 * Read the contents of the error status register.  Write to
1137 	 * the register first before reading from it to force the APIC
1138 	 * to update its value to indicate any errors that have
1139 	 * occurred since the previous write to the register.
1140 	 */
1141 	lapic_write32(LAPIC_ESR, 0);
1142 	esr = lapic_read32(LAPIC_ESR);
1143 
1144 	printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1145 	lapic_eoi();
1146 }
1147 
1148 static u_int
1149 native_apic_cpuid(u_int apic_id)
1150 {
1151 #ifdef SMP
1152 	return apic_cpuids[apic_id];
1153 #else
1154 	return 0;
1155 #endif
1156 }
1157 
1158 /* Request a free IDT vector to be used by the specified IRQ. */
1159 static u_int
1160 native_apic_alloc_vector(u_int apic_id, u_int irq)
1161 {
1162 	u_int vector;
1163 
1164 	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1165 
1166 	/*
1167 	 * Search for a free vector.  Currently we just use a very simple
1168 	 * algorithm to find the first free vector.
1169 	 */
1170 	mtx_lock_spin(&icu_lock);
1171 	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1172 		if (lapics[apic_id].la_ioint_irqs[vector] != -1)
1173 			continue;
1174 		lapics[apic_id].la_ioint_irqs[vector] = irq;
1175 		mtx_unlock_spin(&icu_lock);
1176 		return (vector + APIC_IO_INTS);
1177 	}
1178 	mtx_unlock_spin(&icu_lock);
1179 	return (0);
1180 }
1181 
1182 /*
1183  * Request 'count' free contiguous IDT vectors to be used by 'count'
1184  * IRQs.  'count' must be a power of two and the vectors will be
1185  * aligned on a boundary of 'align'.  If the request cannot be
1186  * satisfied, 0 is returned.
1187  */
1188 static u_int
1189 native_apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1190 {
1191 	u_int first, run, vector;
1192 
1193 	KASSERT(powerof2(count), ("bad count"));
1194 	KASSERT(powerof2(align), ("bad align"));
1195 	KASSERT(align >= count, ("align < count"));
1196 #ifdef INVARIANTS
1197 	for (run = 0; run < count; run++)
1198 		KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
1199 		    irqs[run], run));
1200 #endif
1201 
1202 	/*
1203 	 * Search for 'count' free vectors.  As with apic_alloc_vector(),
1204 	 * this just uses a simple first fit algorithm.
1205 	 */
1206 	run = 0;
1207 	first = 0;
1208 	mtx_lock_spin(&icu_lock);
1209 	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1210 
1211 		/* Vector is in use, end run. */
1212 		if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
1213 			run = 0;
1214 			first = 0;
1215 			continue;
1216 		}
1217 
1218 		/* Start a new run if run == 0 and vector is aligned. */
1219 		if (run == 0) {
1220 			if ((vector & (align - 1)) != 0)
1221 				continue;
1222 			first = vector;
1223 		}
1224 		run++;
1225 
1226 		/* Keep looping if the run isn't long enough yet. */
1227 		if (run < count)
1228 			continue;
1229 
1230 		/* Found a run, assign IRQs and return the first vector. */
1231 		for (vector = 0; vector < count; vector++)
1232 			lapics[apic_id].la_ioint_irqs[first + vector] =
1233 			    irqs[vector];
1234 		mtx_unlock_spin(&icu_lock);
1235 		return (first + APIC_IO_INTS);
1236 	}
1237 	mtx_unlock_spin(&icu_lock);
1238 	printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1239 	return (0);
1240 }
1241 
1242 /*
1243  * Enable a vector for a particular apic_id.  Since all lapics share idt
1244  * entries and ioint_handlers this enables the vector on all lapics.  lapics
1245  * which do not have the vector configured would report spurious interrupts
1246  * should it fire.
1247  */
1248 static void
1249 native_apic_enable_vector(u_int apic_id, u_int vector)
1250 {
1251 
1252 	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1253 	KASSERT(ioint_handlers[vector / 32] != NULL,
1254 	    ("No ISR handler for vector %u", vector));
1255 #ifdef KDTRACE_HOOKS
1256 	KASSERT(vector != IDT_DTRACE_RET,
1257 	    ("Attempt to overwrite DTrace entry"));
1258 #endif
1259 	setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1260 	    GSEL_APIC);
1261 }
1262 
1263 static void
1264 native_apic_disable_vector(u_int apic_id, u_int vector)
1265 {
1266 
1267 	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1268 #ifdef KDTRACE_HOOKS
1269 	KASSERT(vector != IDT_DTRACE_RET,
1270 	    ("Attempt to overwrite DTrace entry"));
1271 #endif
1272 	KASSERT(ioint_handlers[vector / 32] != NULL,
1273 	    ("No ISR handler for vector %u", vector));
1274 #ifdef notyet
1275 	/*
1276 	 * We can not currently clear the idt entry because other cpus
1277 	 * may have a valid vector at this offset.
1278 	 */
1279 	setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1280 #endif
1281 }
1282 
1283 /* Release an APIC vector when it's no longer in use. */
1284 static void
1285 native_apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1286 {
1287 	struct thread *td;
1288 
1289 	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1290 	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1291 	    ("Vector %u does not map to an IRQ line", vector));
1292 	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1293 	KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1294 	    irq, ("IRQ mismatch"));
1295 #ifdef KDTRACE_HOOKS
1296 	KASSERT(vector != IDT_DTRACE_RET,
1297 	    ("Attempt to overwrite DTrace entry"));
1298 #endif
1299 
1300 	/*
1301 	 * Bind us to the cpu that owned the vector before freeing it so
1302 	 * we don't lose an interrupt delivery race.
1303 	 */
1304 	td = curthread;
1305 	if (!rebooting) {
1306 		thread_lock(td);
1307 		if (sched_is_bound(td))
1308 			panic("apic_free_vector: Thread already bound.\n");
1309 		sched_bind(td, apic_cpuid(apic_id));
1310 		thread_unlock(td);
1311 	}
1312 	mtx_lock_spin(&icu_lock);
1313 	lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1314 	mtx_unlock_spin(&icu_lock);
1315 	if (!rebooting) {
1316 		thread_lock(td);
1317 		sched_unbind(td);
1318 		thread_unlock(td);
1319 	}
1320 }
1321 
1322 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1323 static u_int
1324 apic_idt_to_irq(u_int apic_id, u_int vector)
1325 {
1326 	int irq;
1327 
1328 	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1329 	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1330 	    ("Vector %u does not map to an IRQ line", vector));
1331 #ifdef KDTRACE_HOOKS
1332 	KASSERT(vector != IDT_DTRACE_RET,
1333 	    ("Attempt to overwrite DTrace entry"));
1334 #endif
1335 	irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1336 	if (irq < 0)
1337 		irq = 0;
1338 	return (irq);
1339 }
1340 
1341 #ifdef DDB
1342 /*
1343  * Dump data about APIC IDT vector mappings.
1344  */
1345 DB_SHOW_COMMAND(apic, db_show_apic)
1346 {
1347 	struct intsrc *isrc;
1348 	int i, verbose;
1349 	u_int apic_id;
1350 	u_int irq;
1351 
1352 	if (strcmp(modif, "vv") == 0)
1353 		verbose = 2;
1354 	else if (strcmp(modif, "v") == 0)
1355 		verbose = 1;
1356 	else
1357 		verbose = 0;
1358 	for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1359 		if (lapics[apic_id].la_present == 0)
1360 			continue;
1361 		db_printf("Interrupts bound to lapic %u\n", apic_id);
1362 		for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1363 			irq = lapics[apic_id].la_ioint_irqs[i];
1364 			if (irq == -1 || irq == IRQ_SYSCALL)
1365 				continue;
1366 #ifdef KDTRACE_HOOKS
1367 			if (irq == IRQ_DTRACE_RET)
1368 				continue;
1369 #endif
1370 #ifdef XENHVM
1371 			if (irq == IRQ_EVTCHN)
1372 				continue;
1373 #endif
1374 			db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1375 			if (irq == IRQ_TIMER)
1376 				db_printf("lapic timer\n");
1377 			else if (irq < NUM_IO_INTS) {
1378 				isrc = intr_lookup_source(irq);
1379 				if (isrc == NULL || verbose == 0)
1380 					db_printf("IRQ %u\n", irq);
1381 				else
1382 					db_dump_intr_event(isrc->is_event,
1383 					    verbose == 2);
1384 			} else
1385 				db_printf("IRQ %u ???\n", irq);
1386 		}
1387 	}
1388 }
1389 
1390 static void
1391 dump_mask(const char *prefix, uint32_t v, int base)
1392 {
1393 	int i, first;
1394 
1395 	first = 1;
1396 	for (i = 0; i < 32; i++)
1397 		if (v & (1 << i)) {
1398 			if (first) {
1399 				db_printf("%s:", prefix);
1400 				first = 0;
1401 			}
1402 			db_printf(" %02x", base + i);
1403 		}
1404 	if (!first)
1405 		db_printf("\n");
1406 }
1407 
1408 /* Show info from the lapic regs for this CPU. */
1409 DB_SHOW_COMMAND(lapic, db_show_lapic)
1410 {
1411 	uint32_t v;
1412 
1413 	db_printf("lapic ID = %d\n", lapic_id());
1414 	v = lapic_read32(LAPIC_VERSION);
1415 	db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1416 	    v & 0xf);
1417 	db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1418 	v = lapic_read32(LAPIC_SVR);
1419 	db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1420 	    v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1421 	db_printf("TPR      = %02x\n", lapic_read32(LAPIC_TPR));
1422 
1423 #define dump_field(prefix, regn, index)					\
1424 	dump_mask(__XSTRING(prefix ## index), 				\
1425 	    lapic_read32(LAPIC_ ## regn ## index),			\
1426 	    index * 32)
1427 
1428 	db_printf("In-service Interrupts:\n");
1429 	dump_field(isr, ISR, 0);
1430 	dump_field(isr, ISR, 1);
1431 	dump_field(isr, ISR, 2);
1432 	dump_field(isr, ISR, 3);
1433 	dump_field(isr, ISR, 4);
1434 	dump_field(isr, ISR, 5);
1435 	dump_field(isr, ISR, 6);
1436 	dump_field(isr, ISR, 7);
1437 
1438 	db_printf("TMR Interrupts:\n");
1439 	dump_field(tmr, TMR, 0);
1440 	dump_field(tmr, TMR, 1);
1441 	dump_field(tmr, TMR, 2);
1442 	dump_field(tmr, TMR, 3);
1443 	dump_field(tmr, TMR, 4);
1444 	dump_field(tmr, TMR, 5);
1445 	dump_field(tmr, TMR, 6);
1446 	dump_field(tmr, TMR, 7);
1447 
1448 	db_printf("IRR Interrupts:\n");
1449 	dump_field(irr, IRR, 0);
1450 	dump_field(irr, IRR, 1);
1451 	dump_field(irr, IRR, 2);
1452 	dump_field(irr, IRR, 3);
1453 	dump_field(irr, IRR, 4);
1454 	dump_field(irr, IRR, 5);
1455 	dump_field(irr, IRR, 6);
1456 	dump_field(irr, IRR, 7);
1457 
1458 #undef dump_field
1459 }
1460 #endif
1461 
1462 /*
1463  * APIC probing support code.  This includes code to manage enumerators.
1464  */
1465 
1466 static SLIST_HEAD(, apic_enumerator) enumerators =
1467 	SLIST_HEAD_INITIALIZER(enumerators);
1468 static struct apic_enumerator *best_enum;
1469 
1470 void
1471 apic_register_enumerator(struct apic_enumerator *enumerator)
1472 {
1473 #ifdef INVARIANTS
1474 	struct apic_enumerator *apic_enum;
1475 
1476 	SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1477 		if (apic_enum == enumerator)
1478 			panic("%s: Duplicate register of %s", __func__,
1479 			    enumerator->apic_name);
1480 	}
1481 #endif
1482 	SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1483 }
1484 
1485 /*
1486  * We have to look for CPU's very, very early because certain subsystems
1487  * want to know how many CPU's we have extremely early on in the boot
1488  * process.
1489  */
1490 static void
1491 apic_init(void *dummy __unused)
1492 {
1493 	struct apic_enumerator *enumerator;
1494 	int retval, best;
1495 
1496 	/* We only support built in local APICs. */
1497 	if (!(cpu_feature & CPUID_APIC))
1498 		return;
1499 
1500 	/* Don't probe if APIC mode is disabled. */
1501 	if (resource_disabled("apic", 0))
1502 		return;
1503 
1504 	/* Probe all the enumerators to find the best match. */
1505 	best_enum = NULL;
1506 	best = 0;
1507 	SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1508 		retval = enumerator->apic_probe();
1509 		if (retval > 0)
1510 			continue;
1511 		if (best_enum == NULL || best < retval) {
1512 			best_enum = enumerator;
1513 			best = retval;
1514 		}
1515 	}
1516 	if (best_enum == NULL) {
1517 		if (bootverbose)
1518 			printf("APIC: Could not find any APICs.\n");
1519 #ifndef DEV_ATPIC
1520 		panic("running without device atpic requires a local APIC");
1521 #endif
1522 		return;
1523 	}
1524 
1525 	if (bootverbose)
1526 		printf("APIC: Using the %s enumerator.\n",
1527 		    best_enum->apic_name);
1528 
1529 #ifdef I686_CPU
1530 	/*
1531 	 * To work around an errata, we disable the local APIC on some
1532 	 * CPUs during early startup.  We need to turn the local APIC back
1533 	 * on on such CPUs now.
1534 	 */
1535 	ppro_reenable_apic();
1536 #endif
1537 
1538 	/* Probe the CPU's in the system. */
1539 	retval = best_enum->apic_probe_cpus();
1540 	if (retval != 0)
1541 		printf("%s: Failed to probe CPUs: returned %d\n",
1542 		    best_enum->apic_name, retval);
1543 
1544 }
1545 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1546 
1547 /*
1548  * Setup the local APIC.  We have to do this prior to starting up the APs
1549  * in the SMP case.
1550  */
1551 static void
1552 apic_setup_local(void *dummy __unused)
1553 {
1554 	int retval;
1555 
1556 	if (best_enum == NULL)
1557 		return;
1558 
1559 	/* Initialize the local APIC. */
1560 	retval = best_enum->apic_setup_local();
1561 	if (retval != 0)
1562 		printf("%s: Failed to setup the local APIC: returned %d\n",
1563 		    best_enum->apic_name, retval);
1564 }
1565 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1566 
1567 /*
1568  * Setup the I/O APICs.
1569  */
1570 static void
1571 apic_setup_io(void *dummy __unused)
1572 {
1573 	int retval;
1574 
1575 	if (best_enum == NULL)
1576 		return;
1577 
1578 	/*
1579 	 * Local APIC must be registered before other PICs and pseudo PICs
1580 	 * for proper suspend/resume order.
1581 	 */
1582 #ifndef XEN
1583 	intr_register_pic(&lapic_pic);
1584 #endif
1585 
1586 	retval = best_enum->apic_setup_io();
1587 	if (retval != 0)
1588 		printf("%s: Failed to setup I/O APICs: returned %d\n",
1589 		    best_enum->apic_name, retval);
1590 #ifdef XEN
1591 	return;
1592 #endif
1593 	/*
1594 	 * Finish setting up the local APIC on the BSP once we know
1595 	 * how to properly program the LINT pins.  In particular, this
1596 	 * enables the EOI suppression mode, if LAPIC support it and
1597 	 * user did not disabled the mode.
1598 	 */
1599 	lapic_setup(1);
1600 	if (bootverbose)
1601 		lapic_dump("BSP");
1602 
1603 	/* Enable the MSI "pic". */
1604 	init_ops.msi_init();
1605 }
1606 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
1607 
1608 #ifdef SMP
1609 /*
1610  * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1611  * private to the MD code.  The public interface for the rest of the
1612  * kernel is defined in mp_machdep.c.
1613  */
1614 static int
1615 native_lapic_ipi_wait(int delay)
1616 {
1617 	int x;
1618 
1619 	/* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
1620 	if (x2apic_mode)
1621 		return (1);
1622 
1623 	/*
1624 	 * Wait delay microseconds for IPI to be sent.  If delay is
1625 	 * -1, we wait forever.
1626 	 */
1627 	if (delay == -1) {
1628 		while ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) !=
1629 		    APIC_DELSTAT_IDLE)
1630 			ia32_pause();
1631 		return (1);
1632 	}
1633 
1634 	for (x = 0; x < delay; x += 5) {
1635 		if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
1636 		    APIC_DELSTAT_IDLE)
1637 			return (1);
1638 		DELAY(5);
1639 	}
1640 	return (0);
1641 }
1642 
1643 static void
1644 native_lapic_ipi_raw(register_t icrlo, u_int dest)
1645 {
1646 	uint64_t icr;
1647 	uint32_t vhi, vlo;
1648 	register_t saveintr;
1649 
1650 	/* XXX: Need more sanity checking of icrlo? */
1651 	KASSERT(x2apic_mode || lapic_map != NULL,
1652 	    ("%s called too early", __func__));
1653 	KASSERT(x2apic_mode ||
1654 	    (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1655 	    ("%s: invalid dest field", __func__));
1656 	KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1657 	    ("%s: reserved bits set in ICR LO register", __func__));
1658 
1659 	/* Set destination in ICR HI register if it is being used. */
1660 	saveintr = intr_disable();
1661 	if (!x2apic_mode)
1662 		icr = lapic_read_icr();
1663 
1664 	if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1665 		if (x2apic_mode) {
1666 			vhi = dest;
1667 		} else {
1668 			vhi = icr >> 32;
1669 			vhi &= ~APIC_ID_MASK;
1670 			vhi |= dest << APIC_ID_SHIFT;
1671 		}
1672 	} else {
1673 		vhi = 0;
1674 	}
1675 
1676 	/* Program the contents of the IPI and dispatch it. */
1677 	if (x2apic_mode) {
1678 		vlo = icrlo;
1679 	} else {
1680 		vlo = icr;
1681 		vlo &= APIC_ICRLO_RESV_MASK;
1682 		vlo |= icrlo;
1683 	}
1684 	lapic_write_icr(vhi, vlo);
1685 	intr_restore(saveintr);
1686 }
1687 
1688 #define	BEFORE_SPIN	50000
1689 #ifdef DETECT_DEADLOCK
1690 #define	AFTER_SPIN	50
1691 #endif
1692 
1693 static void
1694 native_lapic_ipi_vectored(u_int vector, int dest)
1695 {
1696 	register_t icrlo, destfield;
1697 
1698 	KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1699 	    ("%s: invalid vector %d", __func__, vector));
1700 
1701 	icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
1702 
1703 	/*
1704 	 * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
1705 	 * Use special rules regard NMI if passed, otherwise specify
1706 	 * the vector.
1707 	 */
1708 	if (vector == IPI_STOP_HARD)
1709 		icrlo |= APIC_DELMODE_NMI;
1710 	else
1711 		icrlo |= vector | APIC_DELMODE_FIXED;
1712 	destfield = 0;
1713 	switch (dest) {
1714 	case APIC_IPI_DEST_SELF:
1715 		icrlo |= APIC_DEST_SELF;
1716 		break;
1717 	case APIC_IPI_DEST_ALL:
1718 		icrlo |= APIC_DEST_ALLISELF;
1719 		break;
1720 	case APIC_IPI_DEST_OTHERS:
1721 		icrlo |= APIC_DEST_ALLESELF;
1722 		break;
1723 	default:
1724 		KASSERT(x2apic_mode ||
1725 		    (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1726 		    ("%s: invalid destination 0x%x", __func__, dest));
1727 		destfield = dest;
1728 	}
1729 
1730 	/* Wait for an earlier IPI to finish. */
1731 	if (!lapic_ipi_wait(BEFORE_SPIN)) {
1732 		if (panicstr != NULL)
1733 			return;
1734 		else
1735 			panic("APIC: Previous IPI is stuck");
1736 	}
1737 
1738 	lapic_ipi_raw(icrlo, destfield);
1739 
1740 #ifdef DETECT_DEADLOCK
1741 	/* Wait for IPI to be delivered. */
1742 	if (!lapic_ipi_wait(AFTER_SPIN)) {
1743 #ifdef needsattention
1744 		/*
1745 		 * XXX FIXME:
1746 		 *
1747 		 * The above function waits for the message to actually be
1748 		 * delivered.  It breaks out after an arbitrary timeout
1749 		 * since the message should eventually be delivered (at
1750 		 * least in theory) and that if it wasn't we would catch
1751 		 * the failure with the check above when the next IPI is
1752 		 * sent.
1753 		 *
1754 		 * We could skip this wait entirely, EXCEPT it probably
1755 		 * protects us from other routines that assume that the
1756 		 * message was delivered and acted upon when this function
1757 		 * returns.
1758 		 */
1759 		printf("APIC: IPI might be stuck\n");
1760 #else /* !needsattention */
1761 		/* Wait until mesage is sent without a timeout. */
1762 		while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
1763 			ia32_pause();
1764 #endif /* needsattention */
1765 	}
1766 #endif /* DETECT_DEADLOCK */
1767 }
1768 
1769 /*
1770  * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
1771  * visible.
1772  *
1773  * Consider the case where an IPI is generated immediately after allocation:
1774  *     vector = lapic_ipi_alloc(ipifunc);
1775  *     ipi_selected(other_cpus, vector);
1776  *
1777  * In xAPIC mode a write to ICR_LO has serializing semantics because the
1778  * APIC page is mapped as an uncached region. In x2APIC mode there is an
1779  * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
1780  * the IDT slot update is globally visible before the IPI is delivered.
1781  */
1782 static int
1783 native_lapic_ipi_alloc(inthand_t *ipifunc)
1784 {
1785 	struct gate_descriptor *ip;
1786 	long func;
1787 	int idx, vector;
1788 
1789 	KASSERT(ipifunc != &IDTVEC(rsvd), ("invalid ipifunc %p", ipifunc));
1790 
1791 	vector = -1;
1792 	mtx_lock_spin(&icu_lock);
1793 	for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
1794 		ip = &idt[idx];
1795 		func = (ip->gd_hioffset << 16) | ip->gd_looffset;
1796 		if (func == (uintptr_t)&IDTVEC(rsvd)) {
1797 			vector = idx;
1798 			setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
1799 			break;
1800 		}
1801 	}
1802 	mtx_unlock_spin(&icu_lock);
1803 	return (vector);
1804 }
1805 
1806 static void
1807 native_lapic_ipi_free(int vector)
1808 {
1809 	struct gate_descriptor *ip;
1810 	long func;
1811 
1812 	KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
1813 	    ("%s: invalid vector %d", __func__, vector));
1814 
1815 	mtx_lock_spin(&icu_lock);
1816 	ip = &idt[vector];
1817 	func = (ip->gd_hioffset << 16) | ip->gd_looffset;
1818 	KASSERT(func != (uintptr_t)&IDTVEC(rsvd),
1819 	    ("invalid idtfunc %#lx", func));
1820 	setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1821 	mtx_unlock_spin(&icu_lock);
1822 }
1823 
1824 #endif /* SMP */
1825