xref: /freebsd/sys/x86/x86/intr_machdep.c (revision b0b1dbdd)
1 /*-
2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * Machine dependent interrupt code for x86.  For x86, we have to
31  * deal with different PICs.  Thus, we use the passed in vector to lookup
32  * an interrupt source associated with that vector.  The interrupt source
33  * describes which PIC the source belongs to and includes methods to handle
34  * that source.
35  */
36 
37 #include "opt_atpic.h"
38 #include "opt_ddb.h"
39 
40 #include <sys/param.h>
41 #include <sys/bus.h>
42 #include <sys/interrupt.h>
43 #include <sys/ktr.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/smp.h>
49 #include <sys/sx.h>
50 #include <sys/syslog.h>
51 #include <sys/systm.h>
52 #include <machine/clock.h>
53 #include <machine/intr_machdep.h>
54 #include <machine/smp.h>
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58 
59 #ifndef DEV_ATPIC
60 #include <machine/segments.h>
61 #include <machine/frame.h>
62 #include <dev/ic/i8259.h>
63 #include <x86/isa/icu.h>
64 #include <isa/isareg.h>
65 #endif
66 
67 #define	MAX_STRAY_LOG	5
68 
69 typedef void (*mask_fn)(void *);
70 
71 static int intrcnt_index;
72 static struct intsrc *interrupt_sources[NUM_IO_INTS];
73 static struct sx intrsrc_lock;
74 static struct mtx intrpic_lock;
75 static struct mtx intrcnt_lock;
76 static TAILQ_HEAD(pics_head, pic) pics;
77 
78 #if defined(SMP) && !defined(EARLY_AP_STARTUP)
79 static int assign_cpu;
80 #endif
81 
82 u_long intrcnt[INTRCNT_COUNT];
83 char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)];
84 size_t sintrcnt = sizeof(intrcnt);
85 size_t sintrnames = sizeof(intrnames);
86 
87 static int	intr_assign_cpu(void *arg, int cpu);
88 static void	intr_disable_src(void *arg);
89 static void	intr_init(void *__dummy);
90 static int	intr_pic_registered(struct pic *pic);
91 static void	intrcnt_setname(const char *name, int index);
92 static void	intrcnt_updatename(struct intsrc *is);
93 static void	intrcnt_register(struct intsrc *is);
94 
95 static int
96 intr_pic_registered(struct pic *pic)
97 {
98 	struct pic *p;
99 
100 	TAILQ_FOREACH(p, &pics, pics) {
101 		if (p == pic)
102 			return (1);
103 	}
104 	return (0);
105 }
106 
107 /*
108  * Register a new interrupt controller (PIC).  This is to support suspend
109  * and resume where we suspend/resume controllers rather than individual
110  * sources.  This also allows controllers with no active sources (such as
111  * 8259As in a system using the APICs) to participate in suspend and resume.
112  */
113 int
114 intr_register_pic(struct pic *pic)
115 {
116 	int error;
117 
118 	mtx_lock(&intrpic_lock);
119 	if (intr_pic_registered(pic))
120 		error = EBUSY;
121 	else {
122 		TAILQ_INSERT_TAIL(&pics, pic, pics);
123 		error = 0;
124 	}
125 	mtx_unlock(&intrpic_lock);
126 	return (error);
127 }
128 
129 /*
130  * Register a new interrupt source with the global interrupt system.
131  * The global interrupts need to be disabled when this function is
132  * called.
133  */
134 int
135 intr_register_source(struct intsrc *isrc)
136 {
137 	int error, vector;
138 
139 	KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
140 	vector = isrc->is_pic->pic_vector(isrc);
141 	if (interrupt_sources[vector] != NULL)
142 		return (EEXIST);
143 	error = intr_event_create(&isrc->is_event, isrc, 0, vector,
144 	    intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
145 	    (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
146 	    vector);
147 	if (error)
148 		return (error);
149 	sx_xlock(&intrsrc_lock);
150 	if (interrupt_sources[vector] != NULL) {
151 		sx_xunlock(&intrsrc_lock);
152 		intr_event_destroy(isrc->is_event);
153 		return (EEXIST);
154 	}
155 	intrcnt_register(isrc);
156 	interrupt_sources[vector] = isrc;
157 	isrc->is_handlers = 0;
158 	sx_xunlock(&intrsrc_lock);
159 	return (0);
160 }
161 
162 struct intsrc *
163 intr_lookup_source(int vector)
164 {
165 
166 	return (interrupt_sources[vector]);
167 }
168 
169 int
170 intr_add_handler(const char *name, int vector, driver_filter_t filter,
171     driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
172 {
173 	struct intsrc *isrc;
174 	int error;
175 
176 	isrc = intr_lookup_source(vector);
177 	if (isrc == NULL)
178 		return (EINVAL);
179 	error = intr_event_add_handler(isrc->is_event, name, filter, handler,
180 	    arg, intr_priority(flags), flags, cookiep);
181 	if (error == 0) {
182 		sx_xlock(&intrsrc_lock);
183 		intrcnt_updatename(isrc);
184 		isrc->is_handlers++;
185 		if (isrc->is_handlers == 1) {
186 			isrc->is_pic->pic_enable_intr(isrc);
187 			isrc->is_pic->pic_enable_source(isrc);
188 		}
189 		sx_xunlock(&intrsrc_lock);
190 	}
191 	return (error);
192 }
193 
194 int
195 intr_remove_handler(void *cookie)
196 {
197 	struct intsrc *isrc;
198 	int error;
199 
200 	isrc = intr_handler_source(cookie);
201 	error = intr_event_remove_handler(cookie);
202 	if (error == 0) {
203 		sx_xlock(&intrsrc_lock);
204 		isrc->is_handlers--;
205 		if (isrc->is_handlers == 0) {
206 			isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
207 			isrc->is_pic->pic_disable_intr(isrc);
208 		}
209 		intrcnt_updatename(isrc);
210 		sx_xunlock(&intrsrc_lock);
211 	}
212 	return (error);
213 }
214 
215 int
216 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
217 {
218 	struct intsrc *isrc;
219 
220 	isrc = intr_lookup_source(vector);
221 	if (isrc == NULL)
222 		return (EINVAL);
223 	return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
224 }
225 
226 static void
227 intr_disable_src(void *arg)
228 {
229 	struct intsrc *isrc;
230 
231 	isrc = arg;
232 	isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
233 }
234 
235 void
236 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
237 {
238 	struct intr_event *ie;
239 	int vector;
240 
241 	/*
242 	 * We count software interrupts when we process them.  The
243 	 * code here follows previous practice, but there's an
244 	 * argument for counting hardware interrupts when they're
245 	 * processed too.
246 	 */
247 	(*isrc->is_count)++;
248 	PCPU_INC(cnt.v_intr);
249 
250 	ie = isrc->is_event;
251 
252 	/*
253 	 * XXX: We assume that IRQ 0 is only used for the ISA timer
254 	 * device (clk).
255 	 */
256 	vector = isrc->is_pic->pic_vector(isrc);
257 	if (vector == 0)
258 		clkintr_pending = 1;
259 
260 	/*
261 	 * For stray interrupts, mask and EOI the source, bump the
262 	 * stray count, and log the condition.
263 	 */
264 	if (intr_event_handle(ie, frame) != 0) {
265 		isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
266 		(*isrc->is_straycount)++;
267 		if (*isrc->is_straycount < MAX_STRAY_LOG)
268 			log(LOG_ERR, "stray irq%d\n", vector);
269 		else if (*isrc->is_straycount == MAX_STRAY_LOG)
270 			log(LOG_CRIT,
271 			    "too many stray irq %d's: not logging anymore\n",
272 			    vector);
273 	}
274 }
275 
276 void
277 intr_resume(bool suspend_cancelled)
278 {
279 	struct pic *pic;
280 
281 #ifndef DEV_ATPIC
282 	atpic_reset();
283 #endif
284 	mtx_lock(&intrpic_lock);
285 	TAILQ_FOREACH(pic, &pics, pics) {
286 		if (pic->pic_resume != NULL)
287 			pic->pic_resume(pic, suspend_cancelled);
288 	}
289 	mtx_unlock(&intrpic_lock);
290 }
291 
292 void
293 intr_suspend(void)
294 {
295 	struct pic *pic;
296 
297 	mtx_lock(&intrpic_lock);
298 	TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) {
299 		if (pic->pic_suspend != NULL)
300 			pic->pic_suspend(pic);
301 	}
302 	mtx_unlock(&intrpic_lock);
303 }
304 
305 static int
306 intr_assign_cpu(void *arg, int cpu)
307 {
308 #ifdef SMP
309 	struct intsrc *isrc;
310 	int error;
311 
312 #ifdef EARLY_AP_STARTUP
313 	MPASS(mp_ncpus == 1 || smp_started);
314 	if (cpu != NOCPU) {
315 #else
316 	/*
317 	 * Don't do anything during early boot.  We will pick up the
318 	 * assignment once the APs are started.
319 	 */
320 	if (assign_cpu && cpu != NOCPU) {
321 #endif
322 		isrc = arg;
323 		sx_xlock(&intrsrc_lock);
324 		error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
325 		sx_xunlock(&intrsrc_lock);
326 	} else
327 		error = 0;
328 	return (error);
329 #else
330 	return (EOPNOTSUPP);
331 #endif
332 }
333 
334 static void
335 intrcnt_setname(const char *name, int index)
336 {
337 
338 	snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
339 	    MAXCOMLEN, name);
340 }
341 
342 static void
343 intrcnt_updatename(struct intsrc *is)
344 {
345 
346 	intrcnt_setname(is->is_event->ie_fullname, is->is_index);
347 }
348 
349 static void
350 intrcnt_register(struct intsrc *is)
351 {
352 	char straystr[MAXCOMLEN + 1];
353 
354 	KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
355 	mtx_lock_spin(&intrcnt_lock);
356 	is->is_index = intrcnt_index;
357 	intrcnt_index += 2;
358 	snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
359 	    is->is_pic->pic_vector(is));
360 	intrcnt_updatename(is);
361 	is->is_count = &intrcnt[is->is_index];
362 	intrcnt_setname(straystr, is->is_index + 1);
363 	is->is_straycount = &intrcnt[is->is_index + 1];
364 	mtx_unlock_spin(&intrcnt_lock);
365 }
366 
367 void
368 intrcnt_add(const char *name, u_long **countp)
369 {
370 
371 	mtx_lock_spin(&intrcnt_lock);
372 	*countp = &intrcnt[intrcnt_index];
373 	intrcnt_setname(name, intrcnt_index);
374 	intrcnt_index++;
375 	mtx_unlock_spin(&intrcnt_lock);
376 }
377 
378 static void
379 intr_init(void *dummy __unused)
380 {
381 
382 	intrcnt_setname("???", 0);
383 	intrcnt_index = 1;
384 	TAILQ_INIT(&pics);
385 	mtx_init(&intrpic_lock, "intrpic", NULL, MTX_DEF);
386 	sx_init(&intrsrc_lock, "intrsrc");
387 	mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
388 }
389 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
390 
391 static void
392 intr_init_final(void *dummy __unused)
393 {
394 
395 	/*
396 	 * Enable interrupts on the BSP after all of the interrupt
397 	 * controllers are initialized.  Device interrupts are still
398 	 * disabled in the interrupt controllers until interrupt
399 	 * handlers are registered.  Interrupts are enabled on each AP
400 	 * after their first context switch.
401 	 */
402 	enable_intr();
403 }
404 SYSINIT(intr_init_final, SI_SUB_INTR, SI_ORDER_ANY, intr_init_final, NULL);
405 
406 #ifndef DEV_ATPIC
407 /* Initialize the two 8259A's to a known-good shutdown state. */
408 void
409 atpic_reset(void)
410 {
411 
412 	outb(IO_ICU1, ICW1_RESET | ICW1_IC4);
413 	outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS);
414 	outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID));
415 	outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE);
416 	outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
417 	outb(IO_ICU1, OCW3_SEL | OCW3_RR);
418 
419 	outb(IO_ICU2, ICW1_RESET | ICW1_IC4);
420 	outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8);
421 	outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID);
422 	outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE);
423 	outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);
424 	outb(IO_ICU2, OCW3_SEL | OCW3_RR);
425 }
426 #endif
427 
428 /* Add a description to an active interrupt handler. */
429 int
430 intr_describe(u_int vector, void *ih, const char *descr)
431 {
432 	struct intsrc *isrc;
433 	int error;
434 
435 	isrc = intr_lookup_source(vector);
436 	if (isrc == NULL)
437 		return (EINVAL);
438 	error = intr_event_describe_handler(isrc->is_event, ih, descr);
439 	if (error)
440 		return (error);
441 	intrcnt_updatename(isrc);
442 	return (0);
443 }
444 
445 void
446 intr_reprogram(void)
447 {
448 	struct intsrc *is;
449 	int v;
450 
451 	sx_xlock(&intrsrc_lock);
452 	for (v = 0; v < NUM_IO_INTS; v++) {
453 		is = interrupt_sources[v];
454 		if (is == NULL)
455 			continue;
456 		if (is->is_pic->pic_reprogram_pin != NULL)
457 			is->is_pic->pic_reprogram_pin(is);
458 	}
459 	sx_xunlock(&intrsrc_lock);
460 }
461 
462 #ifdef DDB
463 /*
464  * Dump data about interrupt handlers
465  */
466 DB_SHOW_COMMAND(irqs, db_show_irqs)
467 {
468 	struct intsrc **isrc;
469 	int i, verbose;
470 
471 	if (strcmp(modif, "v") == 0)
472 		verbose = 1;
473 	else
474 		verbose = 0;
475 	isrc = interrupt_sources;
476 	for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
477 		if (*isrc != NULL)
478 			db_dump_intr_event((*isrc)->is_event, verbose);
479 }
480 #endif
481 
482 #ifdef SMP
483 /*
484  * Support for balancing interrupt sources across CPUs.  For now we just
485  * allocate CPUs round-robin.
486  */
487 
488 cpuset_t intr_cpus = CPUSET_T_INITIALIZER(0x1);
489 static int current_cpu;
490 
491 /*
492  * Return the CPU that the next interrupt source should use.  For now
493  * this just returns the next local APIC according to round-robin.
494  */
495 u_int
496 intr_next_cpu(void)
497 {
498 	u_int apic_id;
499 
500 #ifdef EARLY_AP_STARTUP
501 	MPASS(mp_ncpus == 1 || smp_started);
502 #else
503 	/* Leave all interrupts on the BSP during boot. */
504 	if (!assign_cpu)
505 		return (PCPU_GET(apic_id));
506 #endif
507 
508 	mtx_lock_spin(&icu_lock);
509 	apic_id = cpu_apic_ids[current_cpu];
510 	do {
511 		current_cpu++;
512 		if (current_cpu > mp_maxid)
513 			current_cpu = 0;
514 	} while (!CPU_ISSET(current_cpu, &intr_cpus));
515 	mtx_unlock_spin(&icu_lock);
516 	return (apic_id);
517 }
518 
519 /* Attempt to bind the specified IRQ to the specified CPU. */
520 int
521 intr_bind(u_int vector, u_char cpu)
522 {
523 	struct intsrc *isrc;
524 
525 	isrc = intr_lookup_source(vector);
526 	if (isrc == NULL)
527 		return (EINVAL);
528 	return (intr_event_bind(isrc->is_event, cpu));
529 }
530 
531 /*
532  * Add a CPU to our mask of valid CPUs that can be destinations of
533  * interrupts.
534  */
535 void
536 intr_add_cpu(u_int cpu)
537 {
538 
539 	if (cpu >= MAXCPU)
540 		panic("%s: Invalid CPU ID", __func__);
541 	if (bootverbose)
542 		printf("INTR: Adding local APIC %d as a target\n",
543 		    cpu_apic_ids[cpu]);
544 
545 	CPU_SET(cpu, &intr_cpus);
546 }
547 
548 #ifndef EARLY_AP_STARTUP
549 /*
550  * Distribute all the interrupt sources among the available CPUs once the
551  * AP's have been launched.
552  */
553 static void
554 intr_shuffle_irqs(void *arg __unused)
555 {
556 	struct intsrc *isrc;
557 	int i;
558 
559 	/* Don't bother on UP. */
560 	if (mp_ncpus == 1)
561 		return;
562 
563 	/* Round-robin assign a CPU to each enabled source. */
564 	sx_xlock(&intrsrc_lock);
565 	assign_cpu = 1;
566 	for (i = 0; i < NUM_IO_INTS; i++) {
567 		isrc = interrupt_sources[i];
568 		if (isrc != NULL && isrc->is_handlers > 0) {
569 			/*
570 			 * If this event is already bound to a CPU,
571 			 * then assign the source to that CPU instead
572 			 * of picking one via round-robin.  Note that
573 			 * this is careful to only advance the
574 			 * round-robin if the CPU assignment succeeds.
575 			 */
576 			if (isrc->is_event->ie_cpu != NOCPU)
577 				(void)isrc->is_pic->pic_assign_cpu(isrc,
578 				    cpu_apic_ids[isrc->is_event->ie_cpu]);
579 			else if (isrc->is_pic->pic_assign_cpu(isrc,
580 				cpu_apic_ids[current_cpu]) == 0)
581 				(void)intr_next_cpu();
582 
583 		}
584 	}
585 	sx_xunlock(&intrsrc_lock);
586 }
587 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
588     NULL);
589 #endif
590 #else
591 /*
592  * Always route interrupts to the current processor in the UP case.
593  */
594 u_int
595 intr_next_cpu(void)
596 {
597 
598 	return (PCPU_GET(apic_id));
599 }
600 #endif
601