xref: /illumos-gate/usr/src/uts/common/io/avintr.c (revision 050c4bfe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Autovectored Interrupt Configuration and Deconfiguration
28  */
29 
30 #include <sys/param.h>
31 #include <sys/cmn_err.h>
32 #include <sys/trap.h>
33 #include <sys/t_lock.h>
34 #include <sys/avintr.h>
35 #include <sys/kmem.h>
36 #include <sys/machlock.h>
37 #include <sys/systm.h>
38 #include <sys/machsystm.h>
39 #include <sys/sunddi.h>
40 #include <sys/x_call.h>
41 #include <sys/cpuvar.h>
42 #include <sys/atomic.h>
43 #include <sys/smp_impldefs.h>
44 #include <sys/sdt.h>
45 #include <sys/stack.h>
46 #include <sys/ddi_impldefs.h>
47 #ifdef __xpv
48 #include <sys/evtchn_impl.h>
49 #endif
50 
51 typedef struct av_softinfo {
52 	cpuset_t	av_pending;	/* pending bitmasks */
53 } av_softinfo_t;
54 
55 static void insert_av(void *intr_id, struct av_head *vectp, avfunc f,
56 	caddr_t arg1, caddr_t arg2, uint64_t *ticksp, int pri_level,
57 	dev_info_t *dip);
58 static void remove_av(void *intr_id, struct av_head *vectp, avfunc f,
59 	int pri_level, int vect);
60 
61 /*
62  * Arrange for a driver to be called when a particular
63  * auto-vectored interrupt occurs.
64  * NOTE: if a device can generate interrupts on more than
65  * one level, or if a driver services devices that interrupt
66  * on more than one level, then the driver should install
67  * itself on each of those levels.
68  */
69 static char badsoft[] =
70 	"add_avintr: bad soft interrupt level %d for driver '%s'\n";
71 static char multilevel[] =
72 	"!IRQ%d is being shared by drivers with different interrupt levels.\n"
73 	"This may result in reduced system performance.";
74 static char multilevel2[] =
75 	"Cannot register interrupt for '%s' device at IPL %d because it\n"
76 	"conflicts with another device using the same vector %d with an IPL\n"
77 	"of %d. Reconfigure the conflicting devices to use different vectors.";
78 
79 #ifdef __xpv
80 #define	MAX_VECT	NR_IRQS
81 #else
82 #define	MAX_VECT	256
83 #endif
84 
85 struct autovec *nmivect = NULL;
86 struct av_head autovect[MAX_VECT];
87 struct av_head softvect[LOCK_LEVEL + 1];
88 kmutex_t av_lock;
89 /*
90  * These are software interrupt handlers dedicated to ddi timer.
91  * The interrupt levels up to 10 are supported, but high interrupts
92  * must not be used there.
93  */
94 ddi_softint_hdl_impl_t softlevel_hdl[DDI_IPL_10] = {
95 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 1 */
96 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 2 */
97 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 3 */
98 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 4 */
99 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 5 */
100 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 6 */
101 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 7 */
102 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 8 */
103 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 9 */
104 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 10 */
105 };
106 ddi_softint_hdl_impl_t softlevel1_hdl =
107 	{0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
108 
109 /*
110  * clear/check softint pending flag corresponding for
111  * the current CPU
112  */
113 void
114 av_clear_softint_pending(av_softinfo_t *infop)
115 {
116 	CPUSET_ATOMIC_DEL(infop->av_pending, CPU->cpu_seqid);
117 }
118 
119 boolean_t
120 av_check_softint_pending(av_softinfo_t *infop, boolean_t check_all)
121 {
122 	if (check_all)
123 		return (!CPUSET_ISNULL(infop->av_pending));
124 	else
125 		return (CPU_IN_SET(infop->av_pending, CPU->cpu_seqid) != 0);
126 }
127 
128 /*
129  * This is the wrapper function which is generally used to set a softint
130  * pending
131  */
132 void
133 av_set_softint_pending(int pri, av_softinfo_t *infop)
134 {
135 	kdi_av_set_softint_pending(pri, infop);
136 }
137 
138 /*
139  * This is kmdb's private entry point to setsoftint called from kdi_siron
140  * It first sets our av softint pending bit for the current CPU,
141  * then it sets the CPU softint pending bit for pri.
142  */
143 void
144 kdi_av_set_softint_pending(int pri, av_softinfo_t *infop)
145 {
146 	CPUSET_ATOMIC_ADD(infop->av_pending, CPU->cpu_seqid);
147 
148 	atomic_or_32((uint32_t *)&CPU->cpu_softinfo.st_pending, 1 << pri);
149 }
150 
151 /*
152  * register nmi interrupt routine. The first arg is used only to order
153  * various nmi interrupt service routines in the chain. Higher lvls will
154  * be called first
155  */
156 int
157 add_nmintr(int lvl, avfunc nmintr, char *name, caddr_t arg)
158 {
159 	struct autovec  *mem;
160 	struct autovec *p, *prev = NULL;
161 
162 	if (nmintr == NULL) {
163 		printf("Attempt to add null vect for %s on nmi\n", name);
164 		return (0);
165 
166 	}
167 
168 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
169 	mem->av_vector = nmintr;
170 	mem->av_intarg1 = arg;
171 	mem->av_intarg2 = NULL;
172 	mem->av_intr_id = NULL;
173 	mem->av_prilevel = lvl;
174 	mem->av_dip = NULL;
175 	mem->av_link = NULL;
176 
177 	mutex_enter(&av_lock);
178 
179 	if (!nmivect) {
180 		nmivect = mem;
181 		mutex_exit(&av_lock);
182 		return (1);
183 	}
184 	/* find where it goes in list */
185 	for (p = nmivect; p != NULL; p = p->av_link) {
186 		if (p->av_vector == nmintr && p->av_intarg1 == arg) {
187 			/*
188 			 * already in list
189 			 * So? Somebody added the same interrupt twice.
190 			 */
191 			cmn_err(CE_WARN, "Driver already registered '%s'",
192 			    name);
193 			kmem_free(mem, sizeof (struct autovec));
194 			mutex_exit(&av_lock);
195 			return (0);
196 		}
197 		if (p->av_prilevel < lvl) {
198 			if (p == nmivect) {   /* it's at head of list */
199 				mem->av_link = p;
200 				nmivect = mem;
201 			} else {
202 				mem->av_link = p;
203 				prev->av_link = mem;
204 			}
205 			mutex_exit(&av_lock);
206 			return (1);
207 		}
208 		prev = p;
209 
210 	}
211 	/* didn't find it, add it to the end */
212 	prev->av_link = mem;
213 	mutex_exit(&av_lock);
214 	return (1);
215 
216 }
217 
218 /*
219  * register a hardware interrupt handler.
220  */
221 int
222 add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name, int vect,
223     caddr_t arg1, caddr_t arg2, uint64_t *ticksp, dev_info_t *dip)
224 {
225 	struct av_head *vecp = (struct av_head *)0;
226 	avfunc f;
227 	int s, vectindex;			/* save old spl value */
228 	ushort_t hi_pri;
229 
230 	if ((f = xxintr) == NULL) {
231 		printf("Attempt to add null vect for %s on vector %d\n",
232 		    name, vect);
233 		return (0);
234 
235 	}
236 	vectindex = vect % MAX_VECT;
237 
238 	vecp = &autovect[vectindex];
239 
240 	/*
241 	 * "hi_pri == 0" implies all entries on list are "unused",
242 	 * which means that it's OK to just insert this one.
243 	 */
244 	hi_pri = vecp->avh_hi_pri;
245 	if (vecp->avh_link && (hi_pri != 0)) {
246 		if (((hi_pri > LOCK_LEVEL) && (lvl < LOCK_LEVEL)) ||
247 		    ((hi_pri < LOCK_LEVEL) && (lvl > LOCK_LEVEL))) {
248 			cmn_err(CE_WARN, multilevel2, name, lvl, vect,
249 			    hi_pri);
250 			return (0);
251 		}
252 		if ((vecp->avh_lo_pri != lvl) || (hi_pri != lvl))
253 			cmn_err(CE_NOTE, multilevel, vect);
254 	}
255 
256 	insert_av(intr_id, vecp, f, arg1, arg2, ticksp, lvl, dip);
257 	s = splhi();
258 	/*
259 	 * do what ever machine specific things are necessary
260 	 * to set priority level (e.g. set picmasks)
261 	 */
262 	mutex_enter(&av_lock);
263 	(*addspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
264 	mutex_exit(&av_lock);
265 	splx(s);
266 	return (1);
267 
268 }
269 
270 void
271 update_avsoftintr_args(void *intr_id, int lvl, caddr_t arg2)
272 {
273 	struct autovec *p;
274 	struct autovec *target = NULL;
275 	struct av_head *vectp = (struct av_head *)&softvect[lvl];
276 
277 	for (p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
278 		if (p->av_intr_id == intr_id) {
279 			target = p;
280 			break;
281 		}
282 	}
283 
284 	if (target == NULL)
285 		return;
286 	target->av_intarg2 = arg2;
287 }
288 
289 /*
290  * Register a software interrupt handler
291  */
292 int
293 add_avsoftintr(void *intr_id, int lvl, avfunc xxintr, char *name,
294     caddr_t arg1, caddr_t arg2)
295 {
296 	int slvl;
297 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
298 
299 	if ((slvl = slvltovect(lvl)) != -1)
300 		return (add_avintr(intr_id, lvl, xxintr,
301 		    name, slvl, arg1, arg2, NULL, NULL));
302 
303 	if (intr_id == NULL) {
304 		printf("Attempt to add null intr_id for %s on level %d\n",
305 		    name, lvl);
306 		return (0);
307 	}
308 
309 	if (xxintr == NULL) {
310 		printf("Attempt to add null handler for %s on level %d\n",
311 		    name, lvl);
312 		return (0);
313 	}
314 
315 	if (lvl <= 0 || lvl > LOCK_LEVEL) {
316 		printf(badsoft, lvl, name);
317 		return (0);
318 	}
319 
320 	if (hdlp->ih_pending == NULL) {
321 		hdlp->ih_pending =
322 		    kmem_zalloc(sizeof (av_softinfo_t), KM_SLEEP);
323 	}
324 
325 	insert_av(intr_id, &softvect[lvl], xxintr, arg1, arg2, NULL, lvl, NULL);
326 
327 	return (1);
328 }
329 
330 /* insert an interrupt vector into chain */
331 static void
332 insert_av(void *intr_id, struct av_head *vectp, avfunc f, caddr_t arg1,
333     caddr_t arg2, uint64_t *ticksp, int pri_level, dev_info_t *dip)
334 {
335 	/*
336 	 * Protect rewrites of the list
337 	 */
338 	struct autovec *p, *mem;
339 
340 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
341 	mem->av_vector = f;
342 	mem->av_intarg1 = arg1;
343 	mem->av_intarg2 = arg2;
344 	mem->av_ticksp = ticksp;
345 	mem->av_intr_id = intr_id;
346 	mem->av_prilevel = pri_level;
347 	mem->av_dip = dip;
348 	mem->av_link = NULL;
349 
350 	mutex_enter(&av_lock);
351 
352 	if (vectp->avh_link == NULL) {	/* Nothing on list - put it at head */
353 		vectp->avh_link = mem;
354 		vectp->avh_hi_pri = vectp->avh_lo_pri = (ushort_t)pri_level;
355 
356 		mutex_exit(&av_lock);
357 		return;
358 	}
359 
360 	/* find where it goes in list */
361 	for (p = vectp->avh_link; p != NULL; p = p->av_link) {
362 		if (p->av_vector == NULL) {	/* freed struct available */
363 			p->av_intarg1 = arg1;
364 			p->av_intarg2 = arg2;
365 			p->av_ticksp = ticksp;
366 			p->av_intr_id = intr_id;
367 			p->av_prilevel = pri_level;
368 			p->av_dip = dip;
369 			if (pri_level > (int)vectp->avh_hi_pri) {
370 				vectp->avh_hi_pri = (ushort_t)pri_level;
371 			}
372 			if (pri_level < (int)vectp->avh_lo_pri) {
373 				vectp->avh_lo_pri = (ushort_t)pri_level;
374 			}
375 			/*
376 			 * To prevent calling service routine before args
377 			 * and ticksp are ready fill in vector last.
378 			 */
379 			p->av_vector = f;
380 			mutex_exit(&av_lock);
381 			kmem_free(mem, sizeof (struct autovec));
382 			return;
383 		}
384 	}
385 	/* insert new intpt at beginning of chain */
386 	mem->av_link = vectp->avh_link;
387 	vectp->avh_link = mem;
388 	if (pri_level > (int)vectp->avh_hi_pri) {
389 		vectp->avh_hi_pri = (ushort_t)pri_level;
390 	}
391 	if (pri_level < (int)vectp->avh_lo_pri) {
392 		vectp->avh_lo_pri = (ushort_t)pri_level;
393 	}
394 	mutex_exit(&av_lock);
395 }
396 
397 static int
398 av_rem_softintr(void *intr_id, int lvl, avfunc xxintr, boolean_t rem_softinfo)
399 {
400 	struct av_head *vecp = (struct av_head *)0;
401 	int slvl;
402 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
403 	av_softinfo_t *infop = (av_softinfo_t *)hdlp->ih_pending;
404 
405 	if (xxintr == NULL)
406 		return (0);
407 
408 	if ((slvl = slvltovect(lvl)) != -1) {
409 		rem_avintr(intr_id, lvl, xxintr, slvl);
410 		return (1);
411 	}
412 
413 	if (lvl <= 0 && lvl >= LOCK_LEVEL) {
414 		return (0);
415 	}
416 	vecp = &softvect[lvl];
417 	remove_av(intr_id, vecp, xxintr, lvl, 0);
418 
419 	if (rem_softinfo) {
420 		kmem_free(infop, sizeof (av_softinfo_t));
421 		hdlp->ih_pending = NULL;
422 	}
423 
424 	return (1);
425 }
426 
427 int
428 av_softint_movepri(void *intr_id, int old_lvl)
429 {
430 	int ret;
431 	ddi_softint_hdl_impl_t	*hdlp = (ddi_softint_hdl_impl_t *)intr_id;
432 
433 	ret = add_avsoftintr(intr_id, hdlp->ih_pri, hdlp->ih_cb_func,
434 	    DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
435 
436 	if (ret) {
437 		(void) av_rem_softintr(intr_id, old_lvl, hdlp->ih_cb_func,
438 		    B_FALSE);
439 	}
440 
441 	return (ret);
442 }
443 
444 /*
445  * Remove a driver from the autovector list.
446  */
447 int
448 rem_avsoftintr(void *intr_id, int lvl, avfunc xxintr)
449 {
450 	return (av_rem_softintr(intr_id, lvl, xxintr, B_TRUE));
451 }
452 
453 void
454 rem_avintr(void *intr_id, int lvl, avfunc xxintr, int vect)
455 {
456 	struct av_head *vecp = (struct av_head *)0;
457 	avfunc f;
458 	int s, vectindex;			/* save old spl value */
459 
460 	if ((f = xxintr) == NULL)
461 		return;
462 
463 	vectindex = vect % MAX_VECT;
464 	vecp = &autovect[vectindex];
465 	remove_av(intr_id, vecp, f, lvl, vect);
466 	s = splhi();
467 	mutex_enter(&av_lock);
468 	(*delspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
469 	mutex_exit(&av_lock);
470 	splx(s);
471 }
472 
473 
474 /*
475  * After having made a change to an autovector list, wait until we have
476  * seen each cpu not executing an interrupt at that level--so we know our
477  * change has taken effect completely (no old state in registers, etc).
478  */
479 static void
480 wait_till_seen(int ipl)
481 {
482 	int cpu_in_chain, cix;
483 	struct cpu *cpup;
484 	cpuset_t cpus_to_check;
485 
486 	CPUSET_ALL(cpus_to_check);
487 	do {
488 		cpu_in_chain = 0;
489 		for (cix = 0; cix < NCPU; cix++) {
490 			cpup = cpu[cix];
491 			if (cpup != NULL && CPU_IN_SET(cpus_to_check, cix)) {
492 				if (INTR_ACTIVE(cpup, ipl)) {
493 					cpu_in_chain = 1;
494 				} else {
495 					CPUSET_DEL(cpus_to_check, cix);
496 				}
497 			}
498 		}
499 	} while (cpu_in_chain);
500 }
501 
502 static uint64_t dummy_tick;
503 
504 /* remove an interrupt vector from the chain */
505 static void
506 remove_av(void *intr_id, struct av_head *vectp, avfunc f, int pri_level,
507 	int vect)
508 {
509 	struct autovec *p, *target;
510 	int	lo_pri, hi_pri;
511 	int	ipl;
512 	/*
513 	 * Protect rewrites of the list
514 	 */
515 	target = NULL;
516 
517 	mutex_enter(&av_lock);
518 	ipl = pri_level;
519 	lo_pri = MAXIPL;
520 	hi_pri = 0;
521 	for (p = vectp->avh_link; p; p = p->av_link) {
522 		if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
523 			/* found the handler */
524 			target = p;
525 			continue;
526 		}
527 		if (p->av_vector != NULL) {
528 			if (p->av_prilevel > hi_pri)
529 				hi_pri = p->av_prilevel;
530 			if (p->av_prilevel < lo_pri)
531 				lo_pri = p->av_prilevel;
532 		}
533 	}
534 	if (ipl < hi_pri)
535 		ipl = hi_pri;
536 	if (target == NULL) {	/* not found */
537 		printf("Couldn't remove function %p at %d, %d\n",
538 		    (void *)f, vect, pri_level);
539 		mutex_exit(&av_lock);
540 		return;
541 	}
542 
543 	/*
544 	 * This drops the handler from the chain, it can no longer be called.
545 	 * However, there is no guarantee that the handler is not currently
546 	 * still executing.
547 	 */
548 	target->av_vector = NULL;
549 	/*
550 	 * There is a race where we could be just about to pick up the ticksp
551 	 * pointer to increment it after returning from the service routine
552 	 * in av_dispatch_autovect.  Rather than NULL it out let's just point
553 	 * it off to something safe so that any final tick update attempt
554 	 * won't fault.
555 	 */
556 	target->av_ticksp = &dummy_tick;
557 	wait_till_seen(ipl);
558 
559 	if (lo_pri > hi_pri) {	/* the chain is now empty */
560 		/* Leave the unused entries here for probable future use */
561 		vectp->avh_lo_pri = MAXIPL;
562 		vectp->avh_hi_pri = 0;
563 	} else {
564 		if ((int)vectp->avh_lo_pri < lo_pri)
565 			vectp->avh_lo_pri = (ushort_t)lo_pri;
566 		if ((int)vectp->avh_hi_pri > hi_pri)
567 			vectp->avh_hi_pri = (ushort_t)hi_pri;
568 	}
569 	mutex_exit(&av_lock);
570 	wait_till_seen(ipl);
571 }
572 
573 /*
574  * kmdb uses siron (and thus setsoftint) while the world is stopped in order to
575  * inform its driver component that there's work to be done.  We need to keep
576  * DTrace from instrumenting kmdb's siron and setsoftint.  We duplicate siron,
577  * giving kmdb's version a kdi prefix to keep DTrace at bay.   We also
578  * provide a version of the various setsoftint functions available for kmdb to
579  * use using a kdi_ prefix while the main *setsoftint() functionality is
580  * implemented as a wrapper.  This allows tracing, while still providing a
581  * way for kmdb to sneak in unmolested.
582  */
583 void
584 kdi_siron(void)
585 {
586 	(*kdisetsoftint)(1, softlevel1_hdl.ih_pending);
587 }
588 
589 /*
590  * Trigger a soft interrupt.
591  */
592 void
593 siron(void)
594 {
595 	/* Level 1 software interrupt */
596 	(*setsoftint)(1, softlevel1_hdl.ih_pending);
597 }
598 
599 /*
600  * Trigger software interrupts dedicated to ddi timer.
601  */
602 void
603 sir_on(int level)
604 {
605 	ASSERT(level >= DDI_IPL_1 && level <= DDI_IPL_10);
606 	(*setsoftint)(level, softlevel_hdl[level-1].ih_pending);
607 }
608 
609 /*
610  * The handler which is executed on the target CPU.
611  */
612 /*ARGSUSED*/
613 static int
614 siron_poke_intr(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
615 {
616 	siron();
617 	return (0);
618 }
619 
620 /*
621  * May get called from softcall to poke CPUs.
622  */
623 void
624 siron_poke_cpu(cpuset_t poke)
625 {
626 	int cpuid = CPU->cpu_id;
627 
628 	/*
629 	 * If we are poking to ourself then we can simply
630 	 * generate level1 using siron()
631 	 */
632 	if (CPU_IN_SET(poke, cpuid)) {
633 		siron();
634 		CPUSET_DEL(poke, cpuid);
635 		if (CPUSET_ISNULL(poke))
636 			return;
637 	}
638 
639 	xc_call(0, 0, 0, CPUSET2BV(poke), (xc_func_t)siron_poke_intr);
640 }
641 
642 /*
643  * Walk the autovector table for this vector, invoking each
644  * interrupt handler as we go.
645  */
646 
647 extern uint64_t intr_get_time(void);
648 
649 void
650 av_dispatch_autovect(uint_t vec)
651 {
652 	struct autovec *av;
653 
654 	ASSERT_STACK_ALIGNED();
655 
656 	while ((av = autovect[vec].avh_link) != NULL) {
657 		uint_t numcalled = 0;
658 		uint_t claimed = 0;
659 
660 		for (; av; av = av->av_link) {
661 			uint_t r;
662 			uint_t (*intr)() = av->av_vector;
663 			caddr_t arg1 = av->av_intarg1;
664 			caddr_t arg2 = av->av_intarg2;
665 			dev_info_t *dip = av->av_dip;
666 
667 			/*
668 			 * We must walk the entire chain.  Removed handlers
669 			 * may be anywhere in the chain.
670 			 */
671 			if (intr == NULL)
672 				continue;
673 
674 			DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
675 			    void *, intr, caddr_t, arg1, caddr_t, arg2);
676 			r = (*intr)(arg1, arg2);
677 			DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
678 			    void *, intr, caddr_t, arg1, uint_t, r);
679 			numcalled++;
680 			claimed |= r;
681 			if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
682 				atomic_add_64(av->av_ticksp, intr_get_time());
683 		}
684 
685 		/*
686 		 * If there's only one interrupt handler in the chain,
687 		 * or if no-one claimed the interrupt at all give up now.
688 		 */
689 		if (numcalled == 1 || claimed == 0)
690 			break;
691 	}
692 }
693 
694 /*
695  * Call every soft interrupt handler we can find at this level once.
696  */
697 void
698 av_dispatch_softvect(uint_t pil)
699 {
700 	struct autovec *av;
701 	ddi_softint_hdl_impl_t	*hdlp;
702 	uint_t (*intr)();
703 	caddr_t arg1;
704 	caddr_t arg2;
705 
706 	ASSERT_STACK_ALIGNED();
707 	ASSERT(pil >= 0 && pil <= PIL_MAX);
708 
709 	for (av = softvect[pil].avh_link; av; av = av->av_link) {
710 		/*
711 		 * We must walk the entire chain.  Removed handlers
712 		 * may be anywhere in the chain.
713 		 */
714 		if ((intr = av->av_vector) == NULL)
715 			continue;
716 		arg1 = av->av_intarg1;
717 		arg2 = av->av_intarg2;
718 
719 		hdlp = (ddi_softint_hdl_impl_t *)av->av_intr_id;
720 		ASSERT(hdlp);
721 
722 		/*
723 		 * Each cpu has its own pending bit in hdlp->ih_pending,
724 		 * here av_check/clear_softint_pending is just checking
725 		 * and clearing the pending bit for the current cpu, who
726 		 * has just triggered a softint.
727 		 */
728 		if (av_check_softint_pending(hdlp->ih_pending, B_FALSE)) {
729 			av_clear_softint_pending(hdlp->ih_pending);
730 			(void) (*intr)(arg1, arg2);
731 		}
732 	}
733 }
734 
735 struct regs;
736 
737 /*
738  * Call every NMI handler we know of once.
739  */
740 void
741 av_dispatch_nmivect(struct regs *rp)
742 {
743 	struct autovec *av;
744 
745 	ASSERT_STACK_ALIGNED();
746 
747 	for (av = nmivect; av; av = av->av_link)
748 		(void) (av->av_vector)(av->av_intarg1, rp);
749 }
750