xref: /freebsd/sys/x86/x86/local_apic.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3  * Copyright (c) 1996, by Steve Passe
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. The name of the developer may NOT be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Local APIC support on Pentium and later processors.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_hwpmc_hooks.h"
38 #include "opt_kdtrace.h"
39 
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/timeet.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 
57 #include <x86/apicreg.h>
58 #include <machine/cpu.h>
59 #include <machine/cputypes.h>
60 #include <machine/frame.h>
61 #include <machine/intr_machdep.h>
62 #include <machine/apicvar.h>
63 #include <x86/mca.h>
64 #include <machine/md_var.h>
65 #include <machine/smp.h>
66 #include <machine/specialreg.h>
67 
68 #ifdef DDB
69 #include <sys/interrupt.h>
70 #include <ddb/ddb.h>
71 #endif
72 
73 #ifdef __amd64__
74 #define	SDT_APIC	SDT_SYSIGT
75 #define	SDT_APICT	SDT_SYSIGT
76 #define	GSEL_APIC	0
77 #else
78 #define	SDT_APIC	SDT_SYS386IGT
79 #define	SDT_APICT	SDT_SYS386TGT
80 #define	GSEL_APIC	GSEL(GCODE_SEL, SEL_KPL)
81 #endif
82 
83 /* Sanity checks on IDT vectors. */
84 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
85 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
86 CTASSERT(APIC_LOCAL_INTS == 240);
87 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
88 
89 /* Magic IRQ values for the timer and syscalls. */
90 #define	IRQ_TIMER	(NUM_IO_INTS + 1)
91 #define	IRQ_SYSCALL	(NUM_IO_INTS + 2)
92 #define	IRQ_DTRACE_RET	(NUM_IO_INTS + 3)
93 
94 /*
95  * Support for local APICs.  Local APICs manage interrupts on each
96  * individual processor as opposed to I/O APICs which receive interrupts
97  * from I/O devices and then forward them on to the local APICs.
98  *
99  * Local APICs can also send interrupts to each other thus providing the
100  * mechanism for IPIs.
101  */
102 
103 struct lvt {
104 	u_int lvt_edgetrigger:1;
105 	u_int lvt_activehi:1;
106 	u_int lvt_masked:1;
107 	u_int lvt_active:1;
108 	u_int lvt_mode:16;
109 	u_int lvt_vector:8;
110 };
111 
112 struct lapic {
113 	struct lvt la_lvts[LVT_MAX + 1];
114 	u_int la_id:8;
115 	u_int la_cluster:4;
116 	u_int la_cluster_id:2;
117 	u_int la_present:1;
118 	u_long *la_timer_count;
119 	u_long la_timer_period;
120 	u_int la_timer_mode;
121 	/* Include IDT_SYSCALL to make indexing easier. */
122 	int la_ioint_irqs[APIC_NUM_IOINTS + 1];
123 } static lapics[MAX_APIC_ID + 1];
124 
125 /* Global defaults for local APIC LVT entries. */
126 static struct lvt lvts[LVT_MAX + 1] = {
127 	{ 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },	/* LINT0: masked ExtINT */
128 	{ 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },	/* LINT1: NMI */
129 	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },	/* Timer */
130 	{ 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },	/* Error */
131 	{ 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },	/* PMC */
132 	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },	/* Thermal */
133 	{ 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },	/* CMCI */
134 };
135 
136 static inthand_t *ioint_handlers[] = {
137 	NULL,			/* 0 - 31 */
138 	IDTVEC(apic_isr1),	/* 32 - 63 */
139 	IDTVEC(apic_isr2),	/* 64 - 95 */
140 	IDTVEC(apic_isr3),	/* 96 - 127 */
141 	IDTVEC(apic_isr4),	/* 128 - 159 */
142 	IDTVEC(apic_isr5),	/* 160 - 191 */
143 	IDTVEC(apic_isr6),	/* 192 - 223 */
144 	IDTVEC(apic_isr7),	/* 224 - 255 */
145 };
146 
147 
148 static u_int32_t lapic_timer_divisors[] = {
149 	APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
150 	APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
151 };
152 
153 extern inthand_t IDTVEC(rsvd);
154 
155 volatile lapic_t *lapic;
156 vm_paddr_t lapic_paddr;
157 static u_long lapic_timer_divisor;
158 static struct eventtimer lapic_et;
159 
160 static void	lapic_enable(void);
161 static void	lapic_resume(struct pic *pic);
162 static void	lapic_timer_enable_intr(void);
163 static void	lapic_timer_oneshot(u_int count);
164 static void	lapic_timer_periodic(u_int count);
165 static void	lapic_timer_stop(void);
166 static void	lapic_timer_set_divisor(u_int divisor);
167 static uint32_t	lvt_mode(struct lapic *la, u_int pin, uint32_t value);
168 static int	lapic_et_start(struct eventtimer *et,
169     struct bintime *first, struct bintime *period);
170 static int	lapic_et_stop(struct eventtimer *et);
171 
172 struct pic lapic_pic = { .pic_resume = lapic_resume };
173 
174 static uint32_t
175 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
176 {
177 	struct lvt *lvt;
178 
179 	KASSERT(pin <= LVT_MAX, ("%s: pin %u out of range", __func__, pin));
180 	if (la->la_lvts[pin].lvt_active)
181 		lvt = &la->la_lvts[pin];
182 	else
183 		lvt = &lvts[pin];
184 
185 	value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
186 	    APIC_LVT_VECTOR);
187 	if (lvt->lvt_edgetrigger == 0)
188 		value |= APIC_LVT_TM;
189 	if (lvt->lvt_activehi == 0)
190 		value |= APIC_LVT_IIPP_INTALO;
191 	if (lvt->lvt_masked)
192 		value |= APIC_LVT_M;
193 	value |= lvt->lvt_mode;
194 	switch (lvt->lvt_mode) {
195 	case APIC_LVT_DM_NMI:
196 	case APIC_LVT_DM_SMI:
197 	case APIC_LVT_DM_INIT:
198 	case APIC_LVT_DM_EXTINT:
199 		if (!lvt->lvt_edgetrigger) {
200 			printf("lapic%u: Forcing LINT%u to edge trigger\n",
201 			    la->la_id, pin);
202 			value |= APIC_LVT_TM;
203 		}
204 		/* Use a vector of 0. */
205 		break;
206 	case APIC_LVT_DM_FIXED:
207 		value |= lvt->lvt_vector;
208 		break;
209 	default:
210 		panic("bad APIC LVT delivery mode: %#x\n", value);
211 	}
212 	return (value);
213 }
214 
215 /*
216  * Map the local APIC and setup necessary interrupt vectors.
217  */
218 void
219 lapic_init(vm_paddr_t addr)
220 {
221 	u_int regs[4];
222 	int i, arat;
223 
224 	/* Map the local APIC and setup the spurious interrupt handler. */
225 	KASSERT(trunc_page(addr) == addr,
226 	    ("local APIC not aligned on a page boundary"));
227 	lapic = pmap_mapdev(addr, sizeof(lapic_t));
228 	lapic_paddr = addr;
229 	setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
230 	    GSEL_APIC);
231 
232 	/* Perform basic initialization of the BSP's local APIC. */
233 	lapic_enable();
234 
235 	/* Set BSP's per-CPU local APIC ID. */
236 	PCPU_SET(apic_id, lapic_id());
237 
238 	/* Local APIC timer interrupt. */
239 	setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
240 
241 	/* Local APIC error interrupt. */
242 	setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
243 
244 	/* XXX: Thermal interrupt */
245 
246 	/* Local APIC CMCI. */
247 	setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL, GSEL_APIC);
248 
249 	if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
250 		arat = 0;
251 		/* Intel CPUID 0x06 EAX[2] set if APIC timer runs in C3. */
252 		if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_high >= 6) {
253 			do_cpuid(0x06, regs);
254 			if ((regs[0] & CPUTPM1_ARAT) != 0)
255 				arat = 1;
256 		}
257 		bzero(&lapic_et, sizeof(lapic_et));
258 		lapic_et.et_name = "LAPIC";
259 		lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
260 		    ET_FLAGS_PERCPU;
261 		lapic_et.et_quality = 600;
262 		if (!arat) {
263 			lapic_et.et_flags |= ET_FLAGS_C3STOP;
264 			lapic_et.et_quality -= 200;
265 		}
266 		lapic_et.et_frequency = 0;
267 		/* We don't know frequency yet, so trying to guess. */
268 		lapic_et.et_min_period.sec = 0;
269 		lapic_et.et_min_period.frac = 0x00001000LL << 32;
270 		lapic_et.et_max_period.sec = 1;
271 		lapic_et.et_max_period.frac = 0;
272 		lapic_et.et_start = lapic_et_start;
273 		lapic_et.et_stop = lapic_et_stop;
274 		lapic_et.et_priv = NULL;
275 		et_register(&lapic_et);
276 	}
277 }
278 
279 /*
280  * Create a local APIC instance.
281  */
282 void
283 lapic_create(u_int apic_id, int boot_cpu)
284 {
285 	int i;
286 
287 	if (apic_id > MAX_APIC_ID) {
288 		printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
289 		if (boot_cpu)
290 			panic("Can't ignore BSP");
291 		return;
292 	}
293 	KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
294 	    apic_id));
295 
296 	/*
297 	 * Assume no local LVT overrides and a cluster of 0 and
298 	 * intra-cluster ID of 0.
299 	 */
300 	lapics[apic_id].la_present = 1;
301 	lapics[apic_id].la_id = apic_id;
302 	for (i = 0; i <= LVT_MAX; i++) {
303 		lapics[apic_id].la_lvts[i] = lvts[i];
304 		lapics[apic_id].la_lvts[i].lvt_active = 0;
305 	}
306 	for (i = 0; i <= APIC_NUM_IOINTS; i++)
307 	    lapics[apic_id].la_ioint_irqs[i] = -1;
308 	lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
309 	lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
310 	    IRQ_TIMER;
311 #ifdef KDTRACE_HOOKS
312 	lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] = IRQ_DTRACE_RET;
313 #endif
314 
315 
316 #ifdef SMP
317 	cpu_add(apic_id, boot_cpu);
318 #endif
319 }
320 
321 /*
322  * Dump contents of local APIC registers
323  */
324 void
325 lapic_dump(const char* str)
326 {
327 	uint32_t maxlvt;
328 
329 	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
330 	printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
331 	printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
332 	    lapic->id, lapic->version, lapic->ldr, lapic->dfr);
333 	printf("  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
334 	    lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
335 	printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
336 	    lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error);
337 	if (maxlvt >= LVT_PMC)
338 		printf(" pmc: 0x%08x", lapic->lvt_pcint);
339 	printf("\n");
340 	if (maxlvt >= LVT_CMCI)
341 		printf("   cmci: 0x%08x\n", lapic->lvt_cmci);
342 }
343 
344 void
345 lapic_setup(int boot)
346 {
347 	struct lapic *la;
348 	u_int32_t maxlvt;
349 	register_t saveintr;
350 	char buf[MAXCOMLEN + 1];
351 
352 	la = &lapics[lapic_id()];
353 	KASSERT(la->la_present, ("missing APIC structure"));
354 	saveintr = intr_disable();
355 	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
356 
357 	/* Initialize the TPR to allow all interrupts. */
358 	lapic_set_tpr(0);
359 
360 	/* Setup spurious vector and enable the local APIC. */
361 	lapic_enable();
362 
363 	/* Program LINT[01] LVT entries. */
364 	lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
365 	lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);
366 
367 	/* Program the PMC LVT entry if present. */
368 	if (maxlvt >= LVT_PMC)
369 		lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
370 
371 	/* Program timer LVT and setup handler. */
372 	lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer);
373 	if (boot) {
374 		snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid));
375 		intrcnt_add(buf, &la->la_timer_count);
376 	}
377 
378 	/* Setup the timer if configured. */
379 	if (la->la_timer_mode != 0) {
380 		KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
381 		    lapic_id()));
382 		lapic_timer_stop();
383 		lapic_timer_set_divisor(lapic_timer_divisor);
384 		lapic_timer_enable_intr();
385 		if (la->la_timer_mode == 1)
386 			lapic_timer_periodic(la->la_timer_period);
387 		else
388 			lapic_timer_oneshot(la->la_timer_period);
389 	}
390 
391 	/* Program error LVT and clear any existing errors. */
392 	lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error);
393 	lapic->esr = 0;
394 
395 	/* XXX: Thermal LVT */
396 
397 	/* Program the CMCI LVT entry if present. */
398 	if (maxlvt >= LVT_CMCI)
399 		lapic->lvt_cmci = lvt_mode(la, LVT_CMCI, lapic->lvt_cmci);
400 
401 	intr_restore(saveintr);
402 }
403 
404 void
405 lapic_reenable_pmc(void)
406 {
407 #ifdef HWPMC_HOOKS
408 	uint32_t value;
409 
410 	value =  lapic->lvt_pcint;
411 	value &= ~APIC_LVT_M;
412 	lapic->lvt_pcint = value;
413 #endif
414 }
415 
416 #ifdef HWPMC_HOOKS
417 static void
418 lapic_update_pmc(void *dummy)
419 {
420 	struct lapic *la;
421 
422 	la = &lapics[lapic_id()];
423 	lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
424 }
425 #endif
426 
427 int
428 lapic_enable_pmc(void)
429 {
430 #ifdef HWPMC_HOOKS
431 	u_int32_t maxlvt;
432 
433 	/* Fail if the local APIC is not present. */
434 	if (lapic == NULL)
435 		return (0);
436 
437 	/* Fail if the PMC LVT is not present. */
438 	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
439 	if (maxlvt < LVT_PMC)
440 		return (0);
441 
442 	lvts[LVT_PMC].lvt_masked = 0;
443 
444 #ifdef SMP
445 	/*
446 	 * If hwpmc was loaded at boot time then the APs may not be
447 	 * started yet.  In that case, don't forward the request to
448 	 * them as they will program the lvt when they start.
449 	 */
450 	if (smp_started)
451 		smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
452 	else
453 #endif
454 		lapic_update_pmc(NULL);
455 	return (1);
456 #else
457 	return (0);
458 #endif
459 }
460 
461 void
462 lapic_disable_pmc(void)
463 {
464 #ifdef HWPMC_HOOKS
465 	u_int32_t maxlvt;
466 
467 	/* Fail if the local APIC is not present. */
468 	if (lapic == NULL)
469 		return;
470 
471 	/* Fail if the PMC LVT is not present. */
472 	maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
473 	if (maxlvt < LVT_PMC)
474 		return;
475 
476 	lvts[LVT_PMC].lvt_masked = 1;
477 
478 #ifdef SMP
479 	/* The APs should always be started when hwpmc is unloaded. */
480 	KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
481 #endif
482 	smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
483 #endif
484 }
485 
486 static int
487 lapic_et_start(struct eventtimer *et,
488     struct bintime *first, struct bintime *period)
489 {
490 	struct lapic *la;
491 	u_long value;
492 
493 	if (et->et_frequency == 0) {
494 		/* Start off with a divisor of 2 (power on reset default). */
495 		lapic_timer_divisor = 2;
496 		/* Try to calibrate the local APIC timer. */
497 		do {
498 			lapic_timer_set_divisor(lapic_timer_divisor);
499 			lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
500 			DELAY(1000000);
501 			value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
502 			if (value != APIC_TIMER_MAX_COUNT)
503 				break;
504 			lapic_timer_divisor <<= 1;
505 		} while (lapic_timer_divisor <= 128);
506 		if (lapic_timer_divisor > 128)
507 			panic("lapic: Divisor too big");
508 		if (bootverbose)
509 			printf("lapic: Divisor %lu, Frequency %lu Hz\n",
510 			    lapic_timer_divisor, value);
511 		et->et_frequency = value;
512 		et->et_min_period.sec = 0;
513 		et->et_min_period.frac =
514 		    ((0x00000002LLU << 32) / et->et_frequency) << 32;
515 		et->et_max_period.sec = 0xfffffffeLLU / et->et_frequency;
516 		et->et_max_period.frac =
517 		    ((0xfffffffeLLU << 32) / et->et_frequency) << 32;
518 	}
519 	lapic_timer_stop();
520 	lapic_timer_set_divisor(lapic_timer_divisor);
521 	lapic_timer_enable_intr();
522 	la = &lapics[lapic_id()];
523 	if (period != NULL) {
524 		la->la_timer_mode = 1;
525 		la->la_timer_period =
526 		    (et->et_frequency * (period->frac >> 32)) >> 32;
527 		if (period->sec != 0)
528 			la->la_timer_period += et->et_frequency * period->sec;
529 		lapic_timer_periodic(la->la_timer_period);
530 	} else {
531 		la->la_timer_mode = 2;
532 		la->la_timer_period =
533 		    (et->et_frequency * (first->frac >> 32)) >> 32;
534 		if (first->sec != 0)
535 			la->la_timer_period += et->et_frequency * first->sec;
536 		lapic_timer_oneshot(la->la_timer_period);
537 	}
538 	return (0);
539 }
540 
541 static int
542 lapic_et_stop(struct eventtimer *et)
543 {
544 	struct lapic *la = &lapics[lapic_id()];
545 
546 	la->la_timer_mode = 0;
547 	lapic_timer_stop();
548 	return (0);
549 }
550 
551 void
552 lapic_disable(void)
553 {
554 	uint32_t value;
555 
556 	/* Software disable the local APIC. */
557 	value = lapic->svr;
558 	value &= ~APIC_SVR_SWEN;
559 	lapic->svr = value;
560 }
561 
562 static void
563 lapic_enable(void)
564 {
565 	u_int32_t value;
566 
567 	/* Program the spurious vector to enable the local APIC. */
568 	value = lapic->svr;
569 	value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
570 	value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
571 	lapic->svr = value;
572 }
573 
574 /* Reset the local APIC on the BSP during resume. */
575 static void
576 lapic_resume(struct pic *pic)
577 {
578 
579 	lapic_setup(0);
580 }
581 
582 int
583 lapic_id(void)
584 {
585 
586 	KASSERT(lapic != NULL, ("local APIC is not mapped"));
587 	return (lapic->id >> APIC_ID_SHIFT);
588 }
589 
590 int
591 lapic_intr_pending(u_int vector)
592 {
593 	volatile u_int32_t *irr;
594 
595 	/*
596 	 * The IRR registers are an array of 128-bit registers each of
597 	 * which only describes 32 interrupts in the low 32 bits..  Thus,
598 	 * we divide the vector by 32 to get the 128-bit index.  We then
599 	 * multiply that index by 4 to get the equivalent index from
600 	 * treating the IRR as an array of 32-bit registers.  Finally, we
601 	 * modulus the vector by 32 to determine the individual bit to
602 	 * test.
603 	 */
604 	irr = &lapic->irr0;
605 	return (irr[(vector / 32) * 4] & 1 << (vector % 32));
606 }
607 
608 void
609 lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
610 {
611 	struct lapic *la;
612 
613 	KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
614 	    __func__, apic_id));
615 	KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
616 	    __func__, cluster));
617 	KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
618 	    ("%s: intra cluster id %u too big", __func__, cluster_id));
619 	la = &lapics[apic_id];
620 	la->la_cluster = cluster;
621 	la->la_cluster_id = cluster_id;
622 }
623 
624 int
625 lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
626 {
627 
628 	if (pin > LVT_MAX)
629 		return (EINVAL);
630 	if (apic_id == APIC_ID_ALL) {
631 		lvts[pin].lvt_masked = masked;
632 		if (bootverbose)
633 			printf("lapic:");
634 	} else {
635 		KASSERT(lapics[apic_id].la_present,
636 		    ("%s: missing APIC %u", __func__, apic_id));
637 		lapics[apic_id].la_lvts[pin].lvt_masked = masked;
638 		lapics[apic_id].la_lvts[pin].lvt_active = 1;
639 		if (bootverbose)
640 			printf("lapic%u:", apic_id);
641 	}
642 	if (bootverbose)
643 		printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
644 	return (0);
645 }
646 
647 int
648 lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
649 {
650 	struct lvt *lvt;
651 
652 	if (pin > LVT_MAX)
653 		return (EINVAL);
654 	if (apic_id == APIC_ID_ALL) {
655 		lvt = &lvts[pin];
656 		if (bootverbose)
657 			printf("lapic:");
658 	} else {
659 		KASSERT(lapics[apic_id].la_present,
660 		    ("%s: missing APIC %u", __func__, apic_id));
661 		lvt = &lapics[apic_id].la_lvts[pin];
662 		lvt->lvt_active = 1;
663 		if (bootverbose)
664 			printf("lapic%u:", apic_id);
665 	}
666 	lvt->lvt_mode = mode;
667 	switch (mode) {
668 	case APIC_LVT_DM_NMI:
669 	case APIC_LVT_DM_SMI:
670 	case APIC_LVT_DM_INIT:
671 	case APIC_LVT_DM_EXTINT:
672 		lvt->lvt_edgetrigger = 1;
673 		lvt->lvt_activehi = 1;
674 		if (mode == APIC_LVT_DM_EXTINT)
675 			lvt->lvt_masked = 1;
676 		else
677 			lvt->lvt_masked = 0;
678 		break;
679 	default:
680 		panic("Unsupported delivery mode: 0x%x\n", mode);
681 	}
682 	if (bootverbose) {
683 		printf(" Routing ");
684 		switch (mode) {
685 		case APIC_LVT_DM_NMI:
686 			printf("NMI");
687 			break;
688 		case APIC_LVT_DM_SMI:
689 			printf("SMI");
690 			break;
691 		case APIC_LVT_DM_INIT:
692 			printf("INIT");
693 			break;
694 		case APIC_LVT_DM_EXTINT:
695 			printf("ExtINT");
696 			break;
697 		}
698 		printf(" -> LINT%u\n", pin);
699 	}
700 	return (0);
701 }
702 
703 int
704 lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
705 {
706 
707 	if (pin > LVT_MAX || pol == INTR_POLARITY_CONFORM)
708 		return (EINVAL);
709 	if (apic_id == APIC_ID_ALL) {
710 		lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
711 		if (bootverbose)
712 			printf("lapic:");
713 	} else {
714 		KASSERT(lapics[apic_id].la_present,
715 		    ("%s: missing APIC %u", __func__, apic_id));
716 		lapics[apic_id].la_lvts[pin].lvt_active = 1;
717 		lapics[apic_id].la_lvts[pin].lvt_activehi =
718 		    (pol == INTR_POLARITY_HIGH);
719 		if (bootverbose)
720 			printf("lapic%u:", apic_id);
721 	}
722 	if (bootverbose)
723 		printf(" LINT%u polarity: %s\n", pin,
724 		    pol == INTR_POLARITY_HIGH ? "high" : "low");
725 	return (0);
726 }
727 
728 int
729 lapic_set_lvt_triggermode(u_int apic_id, u_int pin, enum intr_trigger trigger)
730 {
731 
732 	if (pin > LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
733 		return (EINVAL);
734 	if (apic_id == APIC_ID_ALL) {
735 		lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
736 		if (bootverbose)
737 			printf("lapic:");
738 	} else {
739 		KASSERT(lapics[apic_id].la_present,
740 		    ("%s: missing APIC %u", __func__, apic_id));
741 		lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
742 		    (trigger == INTR_TRIGGER_EDGE);
743 		lapics[apic_id].la_lvts[pin].lvt_active = 1;
744 		if (bootverbose)
745 			printf("lapic%u:", apic_id);
746 	}
747 	if (bootverbose)
748 		printf(" LINT%u trigger: %s\n", pin,
749 		    trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
750 	return (0);
751 }
752 
753 /*
754  * Adjust the TPR of the current CPU so that it blocks all interrupts below
755  * the passed in vector.
756  */
757 void
758 lapic_set_tpr(u_int vector)
759 {
760 #ifdef CHEAP_TPR
761 	lapic->tpr = vector;
762 #else
763 	u_int32_t tpr;
764 
765 	tpr = lapic->tpr & ~APIC_TPR_PRIO;
766 	tpr |= vector;
767 	lapic->tpr = tpr;
768 #endif
769 }
770 
771 void
772 lapic_eoi(void)
773 {
774 
775 	lapic->eoi = 0;
776 }
777 
778 void
779 lapic_handle_intr(int vector, struct trapframe *frame)
780 {
781 	struct intsrc *isrc;
782 
783 	isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
784 	    vector));
785 	intr_execute_handlers(isrc, frame);
786 }
787 
788 void
789 lapic_handle_timer(struct trapframe *frame)
790 {
791 	struct lapic *la;
792 	struct trapframe *oldframe;
793 	struct thread *td;
794 
795 	/* Send EOI first thing. */
796 	lapic_eoi();
797 
798 #if defined(SMP) && !defined(SCHED_ULE)
799 	/*
800 	 * Don't do any accounting for the disabled HTT cores, since it
801 	 * will provide misleading numbers for the userland.
802 	 *
803 	 * No locking is necessary here, since even if we loose the race
804 	 * when hlt_cpus_mask changes it is not a big deal, really.
805 	 *
806 	 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
807 	 * and unlike other schedulers it actually schedules threads to
808 	 * those CPUs.
809 	 */
810 	if ((hlt_cpus_mask & (1 << PCPU_GET(cpuid))) != 0)
811 		return;
812 #endif
813 
814 	/* Look up our local APIC structure for the tick counters. */
815 	la = &lapics[PCPU_GET(apic_id)];
816 	(*la->la_timer_count)++;
817 	critical_enter();
818 	if (lapic_et.et_active) {
819 		td = curthread;
820 		td->td_intr_nesting_level++;
821 		oldframe = td->td_intr_frame;
822 		td->td_intr_frame = frame;
823 		lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
824 		td->td_intr_frame = oldframe;
825 		td->td_intr_nesting_level--;
826 	}
827 	critical_exit();
828 }
829 
830 static void
831 lapic_timer_set_divisor(u_int divisor)
832 {
833 
834 	KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
835 	KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
836 	    sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
837 	lapic->dcr_timer = lapic_timer_divisors[ffs(divisor) - 1];
838 }
839 
840 static void
841 lapic_timer_oneshot(u_int count)
842 {
843 	u_int32_t value;
844 
845 	value = lapic->lvt_timer;
846 	value &= ~APIC_LVTT_TM;
847 	value |= APIC_LVTT_TM_ONE_SHOT;
848 	lapic->lvt_timer = value;
849 	lapic->icr_timer = count;
850 }
851 
852 static void
853 lapic_timer_periodic(u_int count)
854 {
855 	u_int32_t value;
856 
857 	value = lapic->lvt_timer;
858 	value &= ~APIC_LVTT_TM;
859 	value |= APIC_LVTT_TM_PERIODIC;
860 	lapic->lvt_timer = value;
861 	lapic->icr_timer = count;
862 }
863 
864 static void
865 lapic_timer_stop(void)
866 {
867 	u_int32_t value;
868 
869 	value = lapic->lvt_timer;
870 	value &= ~APIC_LVTT_TM;
871 	value |= APIC_LVT_M;
872 	lapic->lvt_timer = value;
873 	lapic->icr_timer = 0;
874 }
875 
876 static void
877 lapic_timer_enable_intr(void)
878 {
879 	u_int32_t value;
880 
881 	value = lapic->lvt_timer;
882 	value &= ~APIC_LVT_M;
883 	lapic->lvt_timer = value;
884 }
885 
886 void
887 lapic_handle_cmc(void)
888 {
889 
890 	lapic_eoi();
891 	cmc_intr();
892 }
893 
894 /*
895  * Called from the mca_init() to activate the CMC interrupt if this CPU is
896  * responsible for monitoring any MC banks for CMC events.  Since mca_init()
897  * is called prior to lapic_setup() during boot, this just needs to unmask
898  * this CPU's LVT_CMCI entry.
899  */
900 void
901 lapic_enable_cmc(void)
902 {
903 	u_int apic_id;
904 
905 	apic_id = PCPU_GET(apic_id);
906 	KASSERT(lapics[apic_id].la_present,
907 	    ("%s: missing APIC %u", __func__, apic_id));
908 	lapics[apic_id].la_lvts[LVT_CMCI].lvt_masked = 0;
909 	lapics[apic_id].la_lvts[LVT_CMCI].lvt_active = 1;
910 	if (bootverbose)
911 		printf("lapic%u: CMCI unmasked\n", apic_id);
912 }
913 
914 void
915 lapic_handle_error(void)
916 {
917 	u_int32_t esr;
918 
919 	/*
920 	 * Read the contents of the error status register.  Write to
921 	 * the register first before reading from it to force the APIC
922 	 * to update its value to indicate any errors that have
923 	 * occurred since the previous write to the register.
924 	 */
925 	lapic->esr = 0;
926 	esr = lapic->esr;
927 
928 	printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
929 	lapic_eoi();
930 }
931 
932 u_int
933 apic_cpuid(u_int apic_id)
934 {
935 #ifdef SMP
936 	return apic_cpuids[apic_id];
937 #else
938 	return 0;
939 #endif
940 }
941 
942 /* Request a free IDT vector to be used by the specified IRQ. */
943 u_int
944 apic_alloc_vector(u_int apic_id, u_int irq)
945 {
946 	u_int vector;
947 
948 	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
949 
950 	/*
951 	 * Search for a free vector.  Currently we just use a very simple
952 	 * algorithm to find the first free vector.
953 	 */
954 	mtx_lock_spin(&icu_lock);
955 	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
956 		if (lapics[apic_id].la_ioint_irqs[vector] != -1)
957 			continue;
958 		lapics[apic_id].la_ioint_irqs[vector] = irq;
959 		mtx_unlock_spin(&icu_lock);
960 		return (vector + APIC_IO_INTS);
961 	}
962 	mtx_unlock_spin(&icu_lock);
963 	return (0);
964 }
965 
966 /*
967  * Request 'count' free contiguous IDT vectors to be used by 'count'
968  * IRQs.  'count' must be a power of two and the vectors will be
969  * aligned on a boundary of 'align'.  If the request cannot be
970  * satisfied, 0 is returned.
971  */
972 u_int
973 apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
974 {
975 	u_int first, run, vector;
976 
977 	KASSERT(powerof2(count), ("bad count"));
978 	KASSERT(powerof2(align), ("bad align"));
979 	KASSERT(align >= count, ("align < count"));
980 #ifdef INVARIANTS
981 	for (run = 0; run < count; run++)
982 		KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
983 		    irqs[run], run));
984 #endif
985 
986 	/*
987 	 * Search for 'count' free vectors.  As with apic_alloc_vector(),
988 	 * this just uses a simple first fit algorithm.
989 	 */
990 	run = 0;
991 	first = 0;
992 	mtx_lock_spin(&icu_lock);
993 	for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
994 
995 		/* Vector is in use, end run. */
996 		if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
997 			run = 0;
998 			first = 0;
999 			continue;
1000 		}
1001 
1002 		/* Start a new run if run == 0 and vector is aligned. */
1003 		if (run == 0) {
1004 			if ((vector & (align - 1)) != 0)
1005 				continue;
1006 			first = vector;
1007 		}
1008 		run++;
1009 
1010 		/* Keep looping if the run isn't long enough yet. */
1011 		if (run < count)
1012 			continue;
1013 
1014 		/* Found a run, assign IRQs and return the first vector. */
1015 		for (vector = 0; vector < count; vector++)
1016 			lapics[apic_id].la_ioint_irqs[first + vector] =
1017 			    irqs[vector];
1018 		mtx_unlock_spin(&icu_lock);
1019 		return (first + APIC_IO_INTS);
1020 	}
1021 	mtx_unlock_spin(&icu_lock);
1022 	printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1023 	return (0);
1024 }
1025 
1026 /*
1027  * Enable a vector for a particular apic_id.  Since all lapics share idt
1028  * entries and ioint_handlers this enables the vector on all lapics.  lapics
1029  * which do not have the vector configured would report spurious interrupts
1030  * should it fire.
1031  */
1032 void
1033 apic_enable_vector(u_int apic_id, u_int vector)
1034 {
1035 
1036 	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1037 	KASSERT(ioint_handlers[vector / 32] != NULL,
1038 	    ("No ISR handler for vector %u", vector));
1039 #ifdef KDTRACE_HOOKS
1040 	KASSERT(vector != IDT_DTRACE_RET,
1041 	    ("Attempt to overwrite DTrace entry"));
1042 #endif
1043 	setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
1044 	    GSEL_APIC);
1045 }
1046 
1047 void
1048 apic_disable_vector(u_int apic_id, u_int vector)
1049 {
1050 
1051 	KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1052 #ifdef KDTRACE_HOOKS
1053 	KASSERT(vector != IDT_DTRACE_RET,
1054 	    ("Attempt to overwrite DTrace entry"));
1055 #endif
1056 	KASSERT(ioint_handlers[vector / 32] != NULL,
1057 	    ("No ISR handler for vector %u", vector));
1058 #ifdef notyet
1059 	/*
1060 	 * We can not currently clear the idt entry because other cpus
1061 	 * may have a valid vector at this offset.
1062 	 */
1063 	setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
1064 #endif
1065 }
1066 
1067 /* Release an APIC vector when it's no longer in use. */
1068 void
1069 apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1070 {
1071 	struct thread *td;
1072 
1073 	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1074 	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1075 	    ("Vector %u does not map to an IRQ line", vector));
1076 	KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
1077 	KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1078 	    irq, ("IRQ mismatch"));
1079 #ifdef KDTRACE_HOOKS
1080 	KASSERT(vector != IDT_DTRACE_RET,
1081 	    ("Attempt to overwrite DTrace entry"));
1082 #endif
1083 
1084 	/*
1085 	 * Bind us to the cpu that owned the vector before freeing it so
1086 	 * we don't lose an interrupt delivery race.
1087 	 */
1088 	td = curthread;
1089 	if (!rebooting) {
1090 		thread_lock(td);
1091 		if (sched_is_bound(td))
1092 			panic("apic_free_vector: Thread already bound.\n");
1093 		sched_bind(td, apic_cpuid(apic_id));
1094 		thread_unlock(td);
1095 	}
1096 	mtx_lock_spin(&icu_lock);
1097 	lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
1098 	mtx_unlock_spin(&icu_lock);
1099 	if (!rebooting) {
1100 		thread_lock(td);
1101 		sched_unbind(td);
1102 		thread_unlock(td);
1103 	}
1104 }
1105 
1106 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1107 u_int
1108 apic_idt_to_irq(u_int apic_id, u_int vector)
1109 {
1110 	int irq;
1111 
1112 	KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1113 	    vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1114 	    ("Vector %u does not map to an IRQ line", vector));
1115 #ifdef KDTRACE_HOOKS
1116 	KASSERT(vector != IDT_DTRACE_RET,
1117 	    ("Attempt to overwrite DTrace entry"));
1118 #endif
1119 	irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1120 	if (irq < 0)
1121 		irq = 0;
1122 	return (irq);
1123 }
1124 
1125 #ifdef DDB
1126 /*
1127  * Dump data about APIC IDT vector mappings.
1128  */
1129 DB_SHOW_COMMAND(apic, db_show_apic)
1130 {
1131 	struct intsrc *isrc;
1132 	int i, verbose;
1133 	u_int apic_id;
1134 	u_int irq;
1135 
1136 	if (strcmp(modif, "vv") == 0)
1137 		verbose = 2;
1138 	else if (strcmp(modif, "v") == 0)
1139 		verbose = 1;
1140 	else
1141 		verbose = 0;
1142 	for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
1143 		if (lapics[apic_id].la_present == 0)
1144 			continue;
1145 		db_printf("Interrupts bound to lapic %u\n", apic_id);
1146 		for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1147 			irq = lapics[apic_id].la_ioint_irqs[i];
1148 			if (irq == -1 || irq == IRQ_SYSCALL)
1149 				continue;
1150 #ifdef KDTRACE_HOOKS
1151 			if (irq == IRQ_DTRACE_RET)
1152 				continue;
1153 #endif
1154 			db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1155 			if (irq == IRQ_TIMER)
1156 				db_printf("lapic timer\n");
1157 			else if (irq < NUM_IO_INTS) {
1158 				isrc = intr_lookup_source(irq);
1159 				if (isrc == NULL || verbose == 0)
1160 					db_printf("IRQ %u\n", irq);
1161 				else
1162 					db_dump_intr_event(isrc->is_event,
1163 					    verbose == 2);
1164 			} else
1165 				db_printf("IRQ %u ???\n", irq);
1166 		}
1167 	}
1168 }
1169 
1170 static void
1171 dump_mask(const char *prefix, uint32_t v, int base)
1172 {
1173 	int i, first;
1174 
1175 	first = 1;
1176 	for (i = 0; i < 32; i++)
1177 		if (v & (1 << i)) {
1178 			if (first) {
1179 				db_printf("%s:", prefix);
1180 				first = 0;
1181 			}
1182 			db_printf(" %02x", base + i);
1183 		}
1184 	if (!first)
1185 		db_printf("\n");
1186 }
1187 
1188 /* Show info from the lapic regs for this CPU. */
1189 DB_SHOW_COMMAND(lapic, db_show_lapic)
1190 {
1191 	uint32_t v;
1192 
1193 	db_printf("lapic ID = %d\n", lapic_id());
1194 	v = lapic->version;
1195 	db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1196 	    v & 0xf);
1197 	db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1198 	v = lapic->svr;
1199 	db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
1200 	    v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1201 	db_printf("TPR      = %02x\n", lapic->tpr);
1202 
1203 #define dump_field(prefix, index)					\
1204 	dump_mask(__XSTRING(prefix ## index), lapic->prefix ## index,	\
1205 	    index * 32)
1206 
1207 	db_printf("In-service Interrupts:\n");
1208 	dump_field(isr, 0);
1209 	dump_field(isr, 1);
1210 	dump_field(isr, 2);
1211 	dump_field(isr, 3);
1212 	dump_field(isr, 4);
1213 	dump_field(isr, 5);
1214 	dump_field(isr, 6);
1215 	dump_field(isr, 7);
1216 
1217 	db_printf("TMR Interrupts:\n");
1218 	dump_field(tmr, 0);
1219 	dump_field(tmr, 1);
1220 	dump_field(tmr, 2);
1221 	dump_field(tmr, 3);
1222 	dump_field(tmr, 4);
1223 	dump_field(tmr, 5);
1224 	dump_field(tmr, 6);
1225 	dump_field(tmr, 7);
1226 
1227 	db_printf("IRR Interrupts:\n");
1228 	dump_field(irr, 0);
1229 	dump_field(irr, 1);
1230 	dump_field(irr, 2);
1231 	dump_field(irr, 3);
1232 	dump_field(irr, 4);
1233 	dump_field(irr, 5);
1234 	dump_field(irr, 6);
1235 	dump_field(irr, 7);
1236 
1237 #undef dump_field
1238 }
1239 #endif
1240 
1241 /*
1242  * APIC probing support code.  This includes code to manage enumerators.
1243  */
1244 
1245 static SLIST_HEAD(, apic_enumerator) enumerators =
1246 	SLIST_HEAD_INITIALIZER(enumerators);
1247 static struct apic_enumerator *best_enum;
1248 
1249 void
1250 apic_register_enumerator(struct apic_enumerator *enumerator)
1251 {
1252 #ifdef INVARIANTS
1253 	struct apic_enumerator *apic_enum;
1254 
1255 	SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1256 		if (apic_enum == enumerator)
1257 			panic("%s: Duplicate register of %s", __func__,
1258 			    enumerator->apic_name);
1259 	}
1260 #endif
1261 	SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1262 }
1263 
1264 /*
1265  * We have to look for CPU's very, very early because certain subsystems
1266  * want to know how many CPU's we have extremely early on in the boot
1267  * process.
1268  */
1269 static void
1270 apic_init(void *dummy __unused)
1271 {
1272 	struct apic_enumerator *enumerator;
1273 #ifndef __amd64__
1274 	uint64_t apic_base;
1275 #endif
1276 	int retval, best;
1277 
1278 	/* We only support built in local APICs. */
1279 	if (!(cpu_feature & CPUID_APIC))
1280 		return;
1281 
1282 	/* Don't probe if APIC mode is disabled. */
1283 	if (resource_disabled("apic", 0))
1284 		return;
1285 
1286 	/* Probe all the enumerators to find the best match. */
1287 	best_enum = NULL;
1288 	best = 0;
1289 	SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1290 		retval = enumerator->apic_probe();
1291 		if (retval > 0)
1292 			continue;
1293 		if (best_enum == NULL || best < retval) {
1294 			best_enum = enumerator;
1295 			best = retval;
1296 		}
1297 	}
1298 	if (best_enum == NULL) {
1299 		if (bootverbose)
1300 			printf("APIC: Could not find any APICs.\n");
1301 		return;
1302 	}
1303 
1304 	if (bootverbose)
1305 		printf("APIC: Using the %s enumerator.\n",
1306 		    best_enum->apic_name);
1307 
1308 #ifndef __amd64__
1309 	/*
1310 	 * To work around an errata, we disable the local APIC on some
1311 	 * CPUs during early startup.  We need to turn the local APIC back
1312 	 * on on such CPUs now.
1313 	 */
1314 	if (cpu == CPU_686 && cpu_vendor_id == CPU_VENDOR_INTEL &&
1315 	    (cpu_id & 0xff0) == 0x610) {
1316 		apic_base = rdmsr(MSR_APICBASE);
1317 		apic_base |= APICBASE_ENABLED;
1318 		wrmsr(MSR_APICBASE, apic_base);
1319 	}
1320 #endif
1321 
1322 	/* Probe the CPU's in the system. */
1323 	retval = best_enum->apic_probe_cpus();
1324 	if (retval != 0)
1325 		printf("%s: Failed to probe CPUs: returned %d\n",
1326 		    best_enum->apic_name, retval);
1327 
1328 }
1329 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1330 
1331 /*
1332  * Setup the local APIC.  We have to do this prior to starting up the APs
1333  * in the SMP case.
1334  */
1335 static void
1336 apic_setup_local(void *dummy __unused)
1337 {
1338 	int retval;
1339 
1340 	if (best_enum == NULL)
1341 		return;
1342 
1343 	/* Initialize the local APIC. */
1344 	retval = best_enum->apic_setup_local();
1345 	if (retval != 0)
1346 		printf("%s: Failed to setup the local APIC: returned %d\n",
1347 		    best_enum->apic_name, retval);
1348 }
1349 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1350 
1351 /*
1352  * Setup the I/O APICs.
1353  */
1354 static void
1355 apic_setup_io(void *dummy __unused)
1356 {
1357 	int retval;
1358 
1359 	if (best_enum == NULL)
1360 		return;
1361 	retval = best_enum->apic_setup_io();
1362 	if (retval != 0)
1363 		printf("%s: Failed to setup I/O APICs: returned %d\n",
1364 		    best_enum->apic_name, retval);
1365 
1366 #ifdef XEN
1367 	return;
1368 #endif
1369 	/*
1370 	 * Finish setting up the local APIC on the BSP once we know how to
1371 	 * properly program the LINT pins.
1372 	 */
1373 	lapic_setup(1);
1374 	intr_register_pic(&lapic_pic);
1375 	if (bootverbose)
1376 		lapic_dump("BSP");
1377 
1378 	/* Enable the MSI "pic". */
1379 	msi_init();
1380 }
1381 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_SECOND, apic_setup_io, NULL);
1382 
1383 #ifdef SMP
1384 /*
1385  * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
1386  * private to the MD code.  The public interface for the rest of the
1387  * kernel is defined in mp_machdep.c.
1388  */
1389 int
1390 lapic_ipi_wait(int delay)
1391 {
1392 	int x, incr;
1393 
1394 	/*
1395 	 * Wait delay loops for IPI to be sent.  This is highly bogus
1396 	 * since this is sensitive to CPU clock speed.  If delay is
1397 	 * -1, we wait forever.
1398 	 */
1399 	if (delay == -1) {
1400 		incr = 0;
1401 		delay = 1;
1402 	} else
1403 		incr = 1;
1404 	for (x = 0; x < delay; x += incr) {
1405 		if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
1406 			return (1);
1407 		ia32_pause();
1408 	}
1409 	return (0);
1410 }
1411 
1412 void
1413 lapic_ipi_raw(register_t icrlo, u_int dest)
1414 {
1415 	register_t value, saveintr;
1416 
1417 	/* XXX: Need more sanity checking of icrlo? */
1418 	KASSERT(lapic != NULL, ("%s called too early", __func__));
1419 	KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1420 	    ("%s: invalid dest field", __func__));
1421 	KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
1422 	    ("%s: reserved bits set in ICR LO register", __func__));
1423 
1424 	/* Set destination in ICR HI register if it is being used. */
1425 	saveintr = intr_disable();
1426 	if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
1427 		value = lapic->icr_hi;
1428 		value &= ~APIC_ID_MASK;
1429 		value |= dest << APIC_ID_SHIFT;
1430 		lapic->icr_hi = value;
1431 	}
1432 
1433 	/* Program the contents of the IPI and dispatch it. */
1434 	value = lapic->icr_lo;
1435 	value &= APIC_ICRLO_RESV_MASK;
1436 	value |= icrlo;
1437 	lapic->icr_lo = value;
1438 	intr_restore(saveintr);
1439 }
1440 
1441 #define	BEFORE_SPIN	1000000
1442 #ifdef DETECT_DEADLOCK
1443 #define	AFTER_SPIN	1000
1444 #endif
1445 
1446 void
1447 lapic_ipi_vectored(u_int vector, int dest)
1448 {
1449 	register_t icrlo, destfield;
1450 
1451 	KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
1452 	    ("%s: invalid vector %d", __func__, vector));
1453 
1454 	icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE;
1455 
1456 	/*
1457 	 * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
1458 	 * Use special rules regard NMI if passed, otherwise specify
1459 	 * the vector.
1460 	 */
1461 	if (vector == IPI_STOP_HARD)
1462 		icrlo |= APIC_DELMODE_NMI | APIC_LEVEL_ASSERT;
1463 	else
1464 		icrlo |= vector | APIC_DELMODE_FIXED | APIC_LEVEL_DEASSERT;
1465 	destfield = 0;
1466 	switch (dest) {
1467 	case APIC_IPI_DEST_SELF:
1468 		icrlo |= APIC_DEST_SELF;
1469 		break;
1470 	case APIC_IPI_DEST_ALL:
1471 		icrlo |= APIC_DEST_ALLISELF;
1472 		break;
1473 	case APIC_IPI_DEST_OTHERS:
1474 		icrlo |= APIC_DEST_ALLESELF;
1475 		break;
1476 	default:
1477 		KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
1478 		    ("%s: invalid destination 0x%x", __func__, dest));
1479 		destfield = dest;
1480 	}
1481 
1482 	/* Wait for an earlier IPI to finish. */
1483 	if (!lapic_ipi_wait(BEFORE_SPIN)) {
1484 		if (panicstr != NULL)
1485 			return;
1486 		else
1487 			panic("APIC: Previous IPI is stuck");
1488 	}
1489 
1490 	lapic_ipi_raw(icrlo, destfield);
1491 
1492 #ifdef DETECT_DEADLOCK
1493 	/* Wait for IPI to be delivered. */
1494 	if (!lapic_ipi_wait(AFTER_SPIN)) {
1495 #ifdef needsattention
1496 		/*
1497 		 * XXX FIXME:
1498 		 *
1499 		 * The above function waits for the message to actually be
1500 		 * delivered.  It breaks out after an arbitrary timeout
1501 		 * since the message should eventually be delivered (at
1502 		 * least in theory) and that if it wasn't we would catch
1503 		 * the failure with the check above when the next IPI is
1504 		 * sent.
1505 		 *
1506 		 * We could skip this wait entirely, EXCEPT it probably
1507 		 * protects us from other routines that assume that the
1508 		 * message was delivered and acted upon when this function
1509 		 * returns.
1510 		 */
1511 		printf("APIC: IPI might be stuck\n");
1512 #else /* !needsattention */
1513 		/* Wait until mesage is sent without a timeout. */
1514 		while (lapic->icr_lo & APIC_DELSTAT_PEND)
1515 			ia32_pause();
1516 #endif /* needsattention */
1517 	}
1518 #endif /* DETECT_DEADLOCK */
1519 }
1520 #endif /* SMP */
1521