1 /* $NetBSD: intr.c,v 1.34 2022/02/16 23:49:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2007 Michael Lorenz
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #define __INTR_PRIVATE
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.34 2022/02/16 23:49:27 riastradh Exp $");
33
34 #ifdef _KERNEL_OPT
35 #include "opt_interrupt.h"
36 #include "opt_multiprocessor.h"
37 #include "opt_pic.h"
38 #include "opt_ppcarch.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/cpu.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/interrupt.h>
46
47 #include <powerpc/psl.h>
48 #include <powerpc/pic/picvar.h>
49
50 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
51 #include <machine/isa_machdep.h>
52 #endif
53
54 #ifdef MULTIPROCESSOR
55 #include <powerpc/pic/ipivar.h>
56 #endif
57
58 #ifdef __HAVE_FAST_SOFTINTS
59 #include <powerpc/softint.h>
60 #endif
61
62 #define MAX_PICS 8 /* 8 PICs ought to be enough for everyone */
63
64 #define PIC_VIRQ_LEGAL_P(x) ((u_int)(x) < NVIRQ)
65
66 #if defined(PPC_IBM4XX) && !defined(PPC_IBM440)
67 /* eieio is implemented as sync */
68 #define REORDER_PROTECT() __asm volatile("sync" ::: "memory")
69 #else
70 #define REORDER_PROTECT() __asm volatile("sync; eieio" ::: "memory")
71 #endif
72
73 struct pic_ops *pics[MAX_PICS];
74 int num_pics = 0;
75 int max_base = 0;
76 uint8_t virq_map[NIRQ];
77 imask_t virq_mask = HWIRQ_MASK;
78 static imask_t imask[NIPL];
79 int primary_pic = 0;
80
81 static int fakeintr(void *);
82 static int mapirq(int);
83 static void intr_calculatemasks(void);
84 static struct pic_ops *find_pic_by_hwirq(int);
85
86 static struct intr_source intrsources[NVIRQ];
87
88 void
pic_init(void)89 pic_init(void)
90 {
91 /* everything is in bss, no reason to zero it. */
92 }
93
94 int
pic_add(struct pic_ops * pic)95 pic_add(struct pic_ops *pic)
96 {
97
98 if (num_pics >= MAX_PICS)
99 return -1;
100
101 pics[num_pics] = pic;
102 pic->pic_intrbase = max_base;
103 max_base += pic->pic_numintrs;
104 num_pics++;
105
106 return pic->pic_intrbase;
107 }
108
109 void
pic_finish_setup(void)110 pic_finish_setup(void)
111 {
112 for (size_t i = 0; i < num_pics; i++) {
113 struct pic_ops * const pic = pics[i];
114 if (pic->pic_finish_setup != NULL)
115 pic->pic_finish_setup(pic);
116 }
117 }
118
119 static struct pic_ops *
find_pic_by_hwirq(int hwirq)120 find_pic_by_hwirq(int hwirq)
121 {
122 for (u_int base = 0; base < num_pics; base++) {
123 struct pic_ops * const pic = pics[base];
124 if (pic->pic_intrbase <= hwirq
125 && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
126 return pic;
127 }
128 }
129 return NULL;
130 }
131
132 static int
fakeintr(void * arg)133 fakeintr(void *arg)
134 {
135
136 return 0;
137 }
138
139 /*
140 * Register an interrupt handler.
141 */
142 void *
intr_establish(int hwirq,int type,int ipl,int (* ih_fun)(void *),void * ih_arg)143 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
144 void *ih_arg)
145 {
146 return intr_establish_xname(hwirq, type, ipl, ih_fun, ih_arg, NULL);
147 }
148
149 void *
intr_establish_xname(int hwirq,int type,int ipl,int (* ih_fun)(void *),void * ih_arg,const char * xname)150 intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *),
151 void *ih_arg, const char *xname)
152 {
153 struct intrhand **p, *q, *ih;
154 struct pic_ops *pic;
155 static struct intrhand fakehand;
156 int maxipl = ipl;
157
158 if (maxipl == IPL_NONE)
159 maxipl = IPL_HIGH;
160
161 if (hwirq >= max_base) {
162 panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
163 max_base - 1);
164 }
165
166 pic = find_pic_by_hwirq(hwirq);
167 if (pic == NULL) {
168 panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
169 }
170
171 const int virq = mapirq(hwirq);
172
173 /* no point in sleeping unless someone can free memory. */
174 ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
175 if (ih == NULL)
176 panic("intr_establish: can't allocate handler info");
177
178 if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
179 panic("intr_establish: bogus irq (%d) or type (%d)",
180 hwirq, type);
181
182 struct intr_source * const is = &intrsources[virq];
183
184 switch (is->is_type) {
185 case IST_NONE:
186 is->is_type = type;
187 break;
188 case IST_EDGE_FALLING:
189 case IST_EDGE_RISING:
190 case IST_LEVEL_LOW:
191 case IST_LEVEL_HIGH:
192 if (type == is->is_type)
193 break;
194 /* FALLTHROUGH */
195 case IST_PULSE:
196 if (type != IST_NONE)
197 panic("intr_establish: can't share %s with %s",
198 intr_typename(is->is_type),
199 intr_typename(type));
200 break;
201 }
202 if (is->is_hand == NULL) {
203 snprintf(is->is_intrid, sizeof(is->is_intrid), "%s irq %d",
204 pic->pic_name, is->is_hwirq);
205 snprintf(is->is_evname, sizeof(is->is_evname), "irq %d",
206 is->is_hwirq);
207 evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
208 pic->pic_name, is->is_evname);
209 }
210
211 /*
212 * Figure out where to put the handler.
213 * This is O(N^2), but we want to preserve the order, and N is
214 * generally small.
215 */
216 for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
217 maxipl = uimax(maxipl, q->ih_ipl);
218 }
219
220 /*
221 * Actually install a fake handler momentarily, since we might be doing
222 * this with interrupts enabled and don't want the real routine called
223 * until masking is set up.
224 */
225 fakehand.ih_ipl = ipl;
226 fakehand.ih_fun = fakeintr;
227 *p = &fakehand;
228
229 /*
230 * Poke the real handler in now.
231 */
232 ih->ih_fun = ih_fun;
233 ih->ih_arg = ih_arg;
234 ih->ih_next = NULL;
235 ih->ih_ipl = ipl;
236 ih->ih_virq = virq;
237 strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown",
238 sizeof(ih->ih_xname));
239 *p = ih;
240
241 if (pic->pic_establish_irq != NULL)
242 pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
243 is->is_type, maxipl);
244
245 /*
246 * Remember the highest IPL used by this handler.
247 */
248 is->is_ipl = maxipl;
249
250 /*
251 * now that the handler is established we're actually ready to
252 * calculate the masks
253 */
254 intr_calculatemasks();
255
256 return ih;
257 }
258
259 void
dummy_pic_establish_intr(struct pic_ops * pic,int irq,int type,int pri)260 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
261 {
262 }
263
264 /*
265 * Deregister an interrupt handler.
266 */
267 void
intr_disestablish(void * arg)268 intr_disestablish(void *arg)
269 {
270 struct intrhand * const ih = arg;
271 const int virq = ih->ih_virq;
272 struct intr_source * const is = &intrsources[virq];
273 struct intrhand **p, **q;
274 int maxipl = IPL_NONE;
275
276 if (!PIC_VIRQ_LEGAL_P(virq))
277 panic("intr_disestablish: bogus virq %d", virq);
278
279 /*
280 * Remove the handler from the chain.
281 * This is O(n^2), too.
282 */
283 for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
284 struct intrhand * const tmp_ih = *p;
285 if (tmp_ih == ih) {
286 q = p;
287 } else {
288 maxipl = uimax(maxipl, tmp_ih->ih_ipl);
289 }
290 }
291 if (q)
292 *q = ih->ih_next;
293 else
294 panic("intr_disestablish: handler not registered");
295 kmem_intr_free((void *)ih, sizeof(*ih));
296
297 /*
298 * Reset the IPL for this source now that we've removed a handler.
299 */
300 is->is_ipl = maxipl;
301
302 intr_calculatemasks();
303
304 if (is->is_hand == NULL) {
305 is->is_type = IST_NONE;
306 evcnt_detach(&is->is_ev);
307 /*
308 * Make the virutal IRQ available again.
309 */
310 virq_map[virq] = 0;
311 virq_mask |= PIC_VIRQ_TO_MASK(virq);
312 }
313 }
314
315 /*
316 * Map max_base irqs into 32 (bits).
317 */
318 static int
mapirq(int hwirq)319 mapirq(int hwirq)
320 {
321 struct pic_ops *pic;
322
323 if (hwirq >= max_base)
324 panic("invalid irq %d", hwirq);
325
326 if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
327 panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
328
329 if (virq_map[hwirq])
330 return virq_map[hwirq];
331
332 if (virq_mask == 0)
333 panic("virq overflow");
334
335 const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
336 struct intr_source * const is = intrsources + virq;
337
338 virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
339
340 is->is_hwirq = hwirq;
341 is->is_pic = pic;
342 virq_map[hwirq] = virq;
343 #ifdef PIC_DEBUG
344 printf("mapping hwirq %d to virq %d\n", hwirq, virq);
345 #endif
346 return virq;
347 }
348
349 static const char * const intr_typenames[] = {
350 [IST_NONE] = "none",
351 [IST_PULSE] = "pulsed",
352 [IST_EDGE_FALLING] = "falling edge triggered",
353 [IST_EDGE_RISING] = "rising edge triggered",
354 [IST_LEVEL_LOW] = "low level triggered",
355 [IST_LEVEL_HIGH] = "high level triggered",
356 };
357
358 const char *
intr_typename(int type)359 intr_typename(int type)
360 {
361 KASSERT((unsigned int) type < __arraycount(intr_typenames));
362 KASSERT(intr_typenames[type] != NULL);
363 return intr_typenames[type];
364 }
365
366 /*
367 * Recalculate the interrupt masks from scratch.
368 * We could code special registry and deregistry versions of this function that
369 * would be faster, but the code would be nastier, and we don't expect this to
370 * happen very much anyway.
371 */
372 static void
intr_calculatemasks(void)373 intr_calculatemasks(void)
374 {
375 imask_t newmask[NIPL];
376 struct intr_source *is;
377 struct intrhand *ih;
378 int irq;
379
380 for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
381 newmask[ipl] = 0;
382 }
383
384 /* First, figure out which ipl each IRQ uses. */
385 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
386 for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
387 newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
388 }
389 }
390
391 /*
392 * IPL_NONE is used for hardware interrupts that are never blocked,
393 * and do not block anything else.
394 */
395 newmask[IPL_NONE] = 0;
396
397 /*
398 * strict hierarchy - all IPLs block everything blocked by any lower
399 * IPL
400 */
401 for (u_int ipl = 1; ipl < NIPL; ipl++) {
402 newmask[ipl] |= newmask[ipl - 1];
403 }
404
405 #ifdef PIC_DEBUG
406 for (u_int ipl = 0; ipl < NIPL; ipl++) {
407 printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
408 }
409 #endif
410
411 /*
412 * Disable all interrupts.
413 */
414 for (u_int base = 0; base < num_pics; base++) {
415 struct pic_ops * const pic = pics[base];
416 for (u_int i = 0; i < pic->pic_numintrs; i++) {
417 pic->pic_disable_irq(pic, i);
418 }
419 }
420
421 /*
422 * Now that all interrupts are disabled, update the ipl masks.
423 */
424 for (u_int ipl = 0; ipl < NIPL; ipl++) {
425 imask[ipl] = newmask[ipl];
426 }
427
428 /*
429 * Lastly, enable IRQs actually in use.
430 */
431 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
432 if (is->is_hand)
433 pic_enable_irq(is->is_hwirq);
434 }
435 }
436
437 void
pic_enable_irq(int hwirq)438 pic_enable_irq(int hwirq)
439 {
440 struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
441 if (pic == NULL)
442 panic("%s: bogus IRQ %d", __func__, hwirq);
443 const int type = intrsources[virq_map[hwirq]].is_type;
444 (*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
445 }
446
447 void
pic_mark_pending(int hwirq)448 pic_mark_pending(int hwirq)
449 {
450 struct cpu_info * const ci = curcpu();
451
452 const int virq = virq_map[hwirq];
453 if (virq == 0)
454 printf("IRQ %d maps to 0\n", hwirq);
455
456 const register_t msr = mfmsr();
457 mtmsr(msr & ~PSL_EE);
458 ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
459 mtmsr(msr);
460 }
461
462 static void
intr_deliver(struct intr_source * is,int virq)463 intr_deliver(struct intr_source *is, int virq)
464 {
465 bool locked = false;
466 for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
467 KASSERTMSG(ih->ih_fun != NULL,
468 "%s: irq %d, hwirq %d, is %p ih %p: "
469 "NULL interrupt handler!\n", __func__,
470 virq, is->is_hwirq, is, ih);
471 if (ih->ih_ipl == IPL_VM) {
472 if (!locked) {
473 KERNEL_LOCK(1, NULL);
474 locked = true;
475 }
476 } else if (locked) {
477 KERNEL_UNLOCK_ONE(NULL);
478 locked = false;
479 }
480 (*ih->ih_fun)(ih->ih_arg);
481 }
482 if (locked) {
483 KERNEL_UNLOCK_ONE(NULL);
484 }
485 is->is_ev.ev_count++;
486 }
487
488 void
pic_do_pending_int(void)489 pic_do_pending_int(void)
490 {
491 struct cpu_info * const ci = curcpu();
492 imask_t vpend;
493
494 if (ci->ci_iactive)
495 return;
496
497 ci->ci_iactive = 1;
498
499 const register_t emsr = mfmsr();
500 const register_t dmsr = emsr & ~PSL_EE;
501
502 KASSERT(emsr & PSL_EE);
503 mtmsr(dmsr);
504
505 const int pcpl = ci->ci_cpl;
506 #ifdef __HAVE_FAST_SOFTINTS
507 again:
508 #endif
509
510 /* Do now unmasked pendings */
511 while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
512 ci->ci_idepth++;
513 KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
514
515 /* Get most significant pending bit */
516 const int virq = PIC_VIRQ_MS_PENDING(vpend);
517 ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
518
519 struct intr_source * const is = &intrsources[virq];
520 struct pic_ops * const pic = is->is_pic;
521
522 splraise(is->is_ipl);
523 mtmsr(emsr);
524 intr_deliver(is, virq);
525 mtmsr(dmsr);
526 ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
527
528 pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
529 is->is_type);
530 ci->ci_idepth--;
531 }
532
533 #ifdef __HAVE_FAST_SOFTINTS
534 const u_int softints = ci->ci_data.cpu_softints &
535 (IPL_SOFTMASK << pcpl);
536
537 /* make sure there are no bits to screw with the line above */
538 KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
539
540 if (__predict_false(softints != 0)) {
541 ci->ci_cpl = IPL_HIGH;
542 mtmsr(emsr);
543 powerpc_softint(ci, pcpl,
544 (vaddr_t)__builtin_return_address(0));
545 mtmsr(dmsr);
546 ci->ci_cpl = pcpl;
547 if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
548 goto again;
549 }
550 #endif
551
552 ci->ci_iactive = 0;
553 mtmsr(emsr);
554 }
555
556 int
pic_handle_intr(void * cookie)557 pic_handle_intr(void *cookie)
558 {
559 struct pic_ops *pic = cookie;
560 struct cpu_info *ci = curcpu();
561 int picirq;
562
563 picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
564 if (picirq == 255)
565 return 0;
566
567 const register_t msr = mfmsr();
568 const int pcpl = ci->ci_cpl;
569
570 do {
571 const int virq = virq_map[picirq + pic->pic_intrbase];
572
573 KASSERT(virq != 0);
574 KASSERT(picirq < pic->pic_numintrs);
575 imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
576 struct intr_source * const is = &intrsources[virq];
577
578 if ((imask[pcpl] & v_imen) != 0) {
579 ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
580 pic->pic_disable_irq(pic, picirq);
581 } else {
582 /* this interrupt is no longer pending */
583 ci->ci_ipending &= ~v_imen;
584 ci->ci_idepth++;
585
586 splraise(is->is_ipl);
587 mtmsr(msr | PSL_EE);
588 intr_deliver(is, virq);
589 mtmsr(msr);
590 ci->ci_cpl = pcpl;
591
592 ci->ci_data.cpu_nintr++;
593 ci->ci_idepth--;
594 }
595 pic->pic_ack_irq(pic, picirq);
596 } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
597
598 mtmsr(msr | PSL_EE);
599 splx(pcpl); /* Process pendings. */
600 mtmsr(msr);
601
602 return 0;
603 }
604
605 void
pic_ext_intr(void)606 pic_ext_intr(void)
607 {
608
609 KASSERT(pics[primary_pic] != NULL);
610 pic_handle_intr(pics[primary_pic]);
611
612 return;
613
614 }
615
616 int
splraise(int ncpl)617 splraise(int ncpl)
618 {
619 struct cpu_info *ci = curcpu();
620 int ocpl;
621
622 if (ncpl == ci->ci_cpl)
623 return ncpl;
624 REORDER_PROTECT();
625 ocpl = ci->ci_cpl;
626 KASSERT(ncpl < NIPL);
627 ci->ci_cpl = uimax(ncpl, ocpl);
628 REORDER_PROTECT();
629 __insn_barrier();
630 return ocpl;
631 }
632
633 static inline bool
have_pending_intr_p(struct cpu_info * ci,int ncpl)634 have_pending_intr_p(struct cpu_info *ci, int ncpl)
635 {
636 if (ci->ci_ipending & ~imask[ncpl])
637 return true;
638 #ifdef __HAVE_FAST_SOFTINTS
639 if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
640 return true;
641 #endif
642 return false;
643 }
644
645 void
splx(int ncpl)646 splx(int ncpl)
647 {
648 struct cpu_info *ci = curcpu();
649
650 __insn_barrier();
651 REORDER_PROTECT();
652 ci->ci_cpl = ncpl;
653 if (have_pending_intr_p(ci, ncpl))
654 pic_do_pending_int();
655
656 REORDER_PROTECT();
657 }
658
659 int
spllower(int ncpl)660 spllower(int ncpl)
661 {
662 struct cpu_info *ci = curcpu();
663 int ocpl;
664
665 __insn_barrier();
666 REORDER_PROTECT();
667 ocpl = ci->ci_cpl;
668 ci->ci_cpl = ncpl;
669 if (have_pending_intr_p(ci, ncpl))
670 pic_do_pending_int();
671 REORDER_PROTECT();
672 return ocpl;
673 }
674
675 void
genppc_cpu_configure(void)676 genppc_cpu_configure(void)
677 {
678 aprint_normal("vmmask %x schedmask %x highmask %x\n",
679 (u_int)imask[IPL_VM] & 0x7fffffff,
680 (u_int)imask[IPL_SCHED] & 0x7fffffff,
681 (u_int)imask[IPL_HIGH] & 0x7fffffff);
682
683 spl0();
684 }
685
686 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
687 /*
688 * isa_intr_alloc needs to be done here, because it needs direct access to
689 * the various interrupt handler structures.
690 */
691
692 int
genppc_isa_intr_alloc(isa_chipset_tag_t ic,struct pic_ops * pic,int mask,int type,int * irq_p)693 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
694 int mask, int type, int *irq_p)
695 {
696 int irq, vi;
697 int maybe_irq = -1;
698 int shared_depth = 0;
699 struct intr_source *is;
700
701 if (pic == NULL)
702 return 1;
703
704 for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
705 mask >>= 1, irq++) {
706 if ((mask & 1) == 0)
707 continue;
708 vi = virq_map[irq + pic->pic_intrbase];
709 if (!vi) {
710 *irq_p = irq;
711 return 0;
712 }
713 is = &intrsources[vi];
714 if (is->is_type == IST_NONE) {
715 *irq_p = irq;
716 return 0;
717 }
718 /* Level interrupts can be shared */
719 if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
720 struct intrhand *ih = is->is_hand;
721 int depth;
722
723 if (maybe_irq == -1) {
724 maybe_irq = irq;
725 continue;
726 }
727 for (depth = 0; ih != NULL; ih = ih->ih_next)
728 depth++;
729 if (depth < shared_depth) {
730 maybe_irq = irq;
731 shared_depth = depth;
732 }
733 }
734 }
735 if (maybe_irq != -1) {
736 *irq_p = maybe_irq;
737 return 0;
738 }
739 return 1;
740 }
741 #endif
742
743 static struct intr_source *
intr_get_source(const char * intrid)744 intr_get_source(const char *intrid)
745 {
746 struct intr_source *is;
747 int irq;
748
749 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
750 if (strcmp(intrid, is->is_intrid) == 0)
751 return is;
752 }
753 return NULL;
754 }
755
756 static struct intrhand *
intr_get_handler(const char * intrid)757 intr_get_handler(const char *intrid)
758 {
759 struct intr_source *is;
760
761 is = intr_get_source(intrid);
762 if (is != NULL)
763 return is->is_hand;
764 return NULL;
765 }
766
767 uint64_t
interrupt_get_count(const char * intrid,u_int cpu_idx)768 interrupt_get_count(const char *intrid, u_int cpu_idx)
769 {
770 struct intr_source *is;
771
772 /* XXX interrupt is always generated by CPU 0 */
773 if (cpu_idx != 0)
774 return 0;
775
776 is = intr_get_source(intrid);
777 if (is != NULL)
778 return is->is_ev.ev_count;
779 return 0;
780 }
781
782 void
interrupt_get_assigned(const char * intrid,kcpuset_t * cpuset)783 interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
784 {
785 struct intr_source *is;
786
787 kcpuset_zero(cpuset);
788
789 is = intr_get_source(intrid);
790 if (is != NULL)
791 kcpuset_set(cpuset, 0); /* XXX */
792 }
793
794 void
interrupt_get_available(kcpuset_t * cpuset)795 interrupt_get_available(kcpuset_t *cpuset)
796 {
797 CPU_INFO_ITERATOR cii;
798 struct cpu_info *ci;
799
800 kcpuset_zero(cpuset);
801
802 mutex_enter(&cpu_lock);
803 for (CPU_INFO_FOREACH(cii, ci)) {
804 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0)
805 kcpuset_set(cpuset, cpu_index(ci));
806 }
807 mutex_exit(&cpu_lock);
808 }
809
810 void
interrupt_get_devname(const char * intrid,char * buf,size_t len)811 interrupt_get_devname(const char *intrid, char *buf, size_t len)
812 {
813 struct intrhand *ih;
814
815 if (len == 0)
816 return;
817
818 buf[0] = '\0';
819
820 for (ih = intr_get_handler(intrid); ih != NULL; ih = ih->ih_next) {
821 if (buf[0] != '\0')
822 strlcat(buf, ", ", len);
823 strlcat(buf, ih->ih_xname, len);
824 }
825 }
826
827 struct intrids_handler *
interrupt_construct_intrids(const kcpuset_t * cpuset)828 interrupt_construct_intrids(const kcpuset_t *cpuset)
829 {
830 struct intr_source *is;
831 struct intrids_handler *ii_handler;
832 intrid_t *ids;
833 int i, irq, count;
834
835 if (kcpuset_iszero(cpuset))
836 return NULL;
837 if (!kcpuset_isset(cpuset, 0)) /* XXX */
838 return NULL;
839
840 count = 0;
841 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
842 if (is->is_hand != NULL)
843 count++;
844 }
845
846 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
847 KM_SLEEP);
848 if (ii_handler == NULL)
849 return NULL;
850 ii_handler->iih_nids = count;
851 if (count == 0)
852 return ii_handler;
853
854 ids = ii_handler->iih_intrids;
855 i = 0;
856 for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
857 /* Ignore devices attached after counting "count". */
858 if (i >= count)
859 break;
860
861 if (is->is_hand == NULL)
862 continue;
863
864 strncpy(ids[i], is->is_intrid, sizeof(intrid_t));
865 i++;
866 }
867
868 return ii_handler;
869 }
870
871 void
interrupt_destruct_intrids(struct intrids_handler * ii_handler)872 interrupt_destruct_intrids(struct intrids_handler *ii_handler)
873 {
874 size_t iih_size;
875
876 if (ii_handler == NULL)
877 return;
878
879 iih_size = sizeof(int) + sizeof(intrid_t) * ii_handler->iih_nids;
880 kmem_free(ii_handler, iih_size);
881 }
882
883 int
interrupt_distribute(void * ich,const kcpuset_t * newset,kcpuset_t * oldset)884 interrupt_distribute(void *ich, const kcpuset_t *newset, kcpuset_t *oldset)
885 {
886 return EOPNOTSUPP;
887 }
888
889 int
interrupt_distribute_handler(const char * intrid,const kcpuset_t * newset,kcpuset_t * oldset)890 interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset,
891 kcpuset_t *oldset)
892 {
893 return EOPNOTSUPP;
894 }
895
896 #undef REORDER_PROTECT
897