xref: /netbsd/sys/arch/sparc/sparc/intr.c (revision 6550d01e)
1 /*	$NetBSD: intr.c,v 1.111 2011/01/27 06:24:59 mrg Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)intr.c	8.3 (Berkeley) 11/11/93
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.111 2011/01/27 06:24:59 mrg Exp $");
45 
46 #include "opt_multiprocessor.h"
47 #include "opt_sparc_arch.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/cpu.h>
54 #include <sys/intr.h>
55 #include <sys/simplelock.h>
56 
57 #include <uvm/uvm_extern.h>
58 
59 #include <dev/cons.h>
60 
61 #include <machine/ctlreg.h>
62 #include <machine/instr.h>
63 #include <machine/trap.h>
64 #include <machine/promlib.h>
65 
66 #include <sparc/sparc/asm.h>
67 #include <sparc/sparc/cpuvar.h>
68 
69 #if defined(MULTIPROCESSOR) && defined(DDB)
70 #include <machine/db_machdep.h>
71 #endif
72 
73 #if defined(MULTIPROCESSOR)
74 static int intr_biglock_wrapper(void *);
75 
76 void *xcall_cookie;
77 #endif
78 
79 void	strayintr(struct clockframe *);
80 #ifdef DIAGNOSTIC
81 void	bogusintr(struct clockframe *);
82 #endif
83 
84 /*
85  * Stray interrupt handler.  Clear it if possible.
86  * If not, and if we get 10 interrupts in 10 seconds, panic.
87  * XXXSMP: We are holding the kernel lock at entry & exit.
88  */
89 void
90 strayintr(struct clockframe *fp)
91 {
92 	static int straytime, nstray;
93 	char bits[64];
94 	int timesince;
95 
96 #if defined(MULTIPROCESSOR)
97 	/*
98 	 * XXX
99 	 *
100 	 * Don't whine about zs interrupts on MP.  We sometimes get
101 	 * stray interrupts when polled kernel output on cpu>0 eats
102 	 * the interrupt and cpu0 sees it.
103 	 */
104 #define ZS_INTR_IPL	12
105 	if (fp->ipl == ZS_INTR_IPL)
106 		return;
107 #endif
108 
109 	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
110 	printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
111 	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
112 
113 	timesince = time_uptime - straytime;
114 	if (timesince <= 10) {
115 		if (++nstray > 10)
116 			panic("crazy interrupts");
117 	} else {
118 		straytime = time_uptime;
119 		nstray = 1;
120 	}
121 }
122 
123 
124 #ifdef DIAGNOSTIC
125 /*
126  * Bogus interrupt for which neither hard nor soft interrupt bit in
127  * the IPR was set.
128  */
129 void
130 bogusintr(struct clockframe *fp)
131 {
132 	char bits[64];
133 
134 	snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
135 	printf("cpu%d: bogus interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
136 	    cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
137 }
138 #endif /* DIAGNOSTIC */
139 
140 /*
141  * Get module ID of interrupt target.
142  */
143 u_int
144 getitr(void)
145 {
146 #if defined(MULTIPROCESSOR)
147 	u_int v;
148 
149 	if (!CPU_ISSUN4M || sparc_ncpus <= 1)
150 		return (0);
151 
152 	v = *((u_int *)ICR_ITR);
153 	return (v + 8);
154 #else
155 	return (0);
156 #endif
157 }
158 
159 /*
160  * Set interrupt target.
161  * Return previous value.
162  */
163 u_int
164 setitr(u_int mid)
165 {
166 #if defined(MULTIPROCESSOR)
167 	u_int v;
168 
169 	if (!CPU_ISSUN4M || sparc_ncpus <= 1)
170 		return (0);
171 
172 	v = *((u_int *)ICR_ITR);
173 	*((u_int *)ICR_ITR) = CPU_MID2CPUNO(mid);
174 	return (v + 8);
175 #else
176 	return (0);
177 #endif
178 }
179 
180 #if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
181 void	nmi_hard(void);
182 void	nmi_soft(struct trapframe *);
183 
184 int	(*memerr_handler)(void);
185 int	(*sbuserr_handler)(void);
186 int	(*vmeerr_handler)(void);
187 int	(*moduleerr_handler)(void);
188 
189 #if defined(MULTIPROCESSOR)
190 volatile int nmi_hard_wait = 0;
191 struct simplelock nmihard_lock = SIMPLELOCK_INITIALIZER;
192 int drop_into_rom_on_fatal = 1;
193 #endif
194 
195 void
196 nmi_hard(void)
197 {
198 	/*
199 	 * A level 15 hard interrupt.
200 	 */
201 	int fatal = 0;
202 	uint32_t si;
203 	char bits[64];
204 	u_int afsr, afva;
205 
206 	/* Tally */
207 	cpuinfo.ci_intrcnt[15].ev_count++;
208 
209 	afsr = afva = 0;
210 	if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) {
211 		snprintb(bits, sizeof(bits), AFSR_BITS, afsr);
212 		printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n",
213 			cpuinfo.mid, bits,
214 			(afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva);
215 	}
216 
217 #if defined(MULTIPROCESSOR)
218 	/*
219 	 * Increase nmi_hard_wait.  If we aren't the master, loop while this
220 	 * variable is non-zero.  If we are the master, loop while this
221 	 * variable is less than the number of cpus.
222 	 */
223 	simple_lock(&nmihard_lock);
224 	nmi_hard_wait++;
225 	simple_unlock(&nmihard_lock);
226 
227 	if (cpuinfo.master == 0) {
228 		while (nmi_hard_wait)
229 			;
230 		return;
231 	} else {
232 		int n = 100000;
233 
234 		while (nmi_hard_wait < sparc_ncpus) {
235 			DELAY(1);
236 			if (n-- > 0)
237 				continue;
238 			printf("nmi_hard: SMP botch.");
239 			break;
240 		}
241 	}
242 #endif
243 
244 	/*
245 	 * Examine pending system interrupts.
246 	 */
247 	si = *((uint32_t *)ICR_SI_PEND);
248 	snprintb(bits, sizeof(bits), SINTR_BITS, si);
249 	printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(), bits);
250 
251 
252 	if ((si & SINTR_M) != 0) {
253 		/* ECC memory error */
254 		if (memerr_handler != NULL)
255 			fatal |= (*memerr_handler)();
256 	}
257 	if ((si & SINTR_I) != 0) {
258 		/* MBus/SBus async error */
259 		if (sbuserr_handler != NULL)
260 			fatal |= (*sbuserr_handler)();
261 	}
262 	if ((si & SINTR_V) != 0) {
263 		/* VME async error */
264 		if (vmeerr_handler != NULL)
265 			fatal |= (*vmeerr_handler)();
266 	}
267 	if ((si & SINTR_ME) != 0) {
268 		/* Module async error */
269 		if (moduleerr_handler != NULL)
270 			fatal |= (*moduleerr_handler)();
271 	}
272 
273 #if defined(MULTIPROCESSOR)
274 	/*
275 	 * Tell everyone else we've finished dealing with the hard NMI.
276 	 */
277 	simple_lock(&nmihard_lock);
278 	nmi_hard_wait = 0;
279 	simple_unlock(&nmihard_lock);
280 	if (fatal && drop_into_rom_on_fatal) {
281 		prom_abort();
282 		return;
283 	}
284 #endif
285 
286 	if (fatal)
287 		panic("nmi");
288 }
289 
290 /*
291  * Non-maskable soft interrupt level 15 handler
292  */
293 void
294 nmi_soft(struct trapframe *tf)
295 {
296 
297 	/* Tally */
298 	cpuinfo.ci_sintrcnt[15].ev_count++;
299 
300 	if (cpuinfo.mailbox) {
301 		/* Check PROM messages */
302 		uint8_t msg = *(uint8_t *)cpuinfo.mailbox;
303 		switch (msg) {
304 		case OPENPROM_MBX_STOP:
305 		case OPENPROM_MBX_WD:
306 			/* In case there's an xcall in progress (unlikely) */
307 			spl0();
308 			cpuinfo.flags &= ~CPUFLG_READY;
309 #ifdef MULTIPROCESSOR
310 			cpu_ready_mask &= ~(1 << cpu_number());
311 #endif
312 			prom_cpustop(0);
313 			break;
314 		case OPENPROM_MBX_ABORT:
315 		case OPENPROM_MBX_BPT:
316 			prom_cpuidle(0);
317 			/*
318 			 * We emerge here after someone does a
319 			 * prom_resumecpu(ournode).
320 			 */
321 			return;
322 		default:
323 			break;
324 		}
325 	}
326 
327 #if defined(MULTIPROCESSOR)
328 	switch (cpuinfo.msg_lev15.tag) {
329 	case XPMSG15_PAUSECPU:
330 		/* XXX - assumes DDB is the only user of mp_pause_cpu() */
331 		cpuinfo.flags |= CPUFLG_PAUSED;
332 #if defined(DDB)
333 		/* trap(T_DBPAUSE) */
334 		__asm("ta 0x8b");
335 #else
336 		while (cpuinfo.flags & CPUFLG_PAUSED)
337 			/* spin */;
338 #endif /* DDB */
339 	}
340 	cpuinfo.msg_lev15.tag = 0;
341 #endif /* MULTIPROCESSOR */
342 }
343 
344 #if defined(MULTIPROCESSOR)
345 /*
346  * Respond to an xcall() request from another CPU.
347  *
348  * This is also called directly from xcall() if we notice an
349  * incoming message while we're waiting to grab the xpmsg_lock.
350  * We pass the address of xcallintr() itself to indicate that
351  * this is not a real interrupt.
352  */
353 void
354 xcallintr(void *v)
355 {
356 
357 	/* Tally */
358 	if (v != xcallintr)
359 		cpuinfo.ci_sintrcnt[13].ev_count++;
360 
361 	/* notyet - cpuinfo.msg.received = 1; */
362 	switch (cpuinfo.msg.tag) {
363 	case XPMSG_FUNC:
364 	    {
365 		volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
366 
367 		if (p->func)
368 			(*p->func)(p->arg0, p->arg1, p->arg2);
369 		break;
370 	    }
371 	}
372 	cpuinfo.msg.tag = 0;
373 	cpuinfo.msg.complete = 1;
374 }
375 #endif /* MULTIPROCESSOR */
376 #endif /* SUN4M || SUN4D */
377 
378 
379 #ifdef MSIIEP
380 /*
381  * It's easier to make this separate so that not to further obscure
382  * SUN4M case with more ifdefs.  There's no common functionality
383  * anyway.
384  */
385 
386 #include <sparc/sparc/msiiepreg.h>
387 
388 void	nmi_hard_msiiep(void);
389 void	nmi_soft_msiiep(void);
390 
391 
392 void
393 nmi_hard_msiiep(void)
394 {
395 	uint32_t si;
396 	char bits[128];
397 	int fatal = 0;
398 
399 	si = mspcic_read_4(pcic_sys_ipr);
400 	snprintb(bits, sizeof(bits), MSIIEP_SYS_IPR_BITS, si);
401 	printf("NMI: system interrupts: %s\n", bits);
402 
403 
404 	if (si & MSIIEP_SYS_IPR_MEM_FAULT) {
405 		uint32_t afsr, afar, mfsr, mfar;
406 
407 		afar = *(volatile uint32_t *)MSIIEP_AFAR;
408 		afsr = *(volatile uint32_t *)MSIIEP_AFSR;
409 
410 		mfar = *(volatile uint32_t *)MSIIEP_MFAR;
411 		mfsr = *(volatile uint32_t *)MSIIEP_MFSR;
412 
413 		if (afsr & MSIIEP_AFSR_ERR) {
414 			snprintb(bits, sizeof(bits), MSIIEP_AFSR_BITS, afsr);
415 			printf("async fault: afsr=%s; afar=%08x\n", bits, afsr);
416 		}
417 
418 		if (mfsr & MSIIEP_MFSR_ERR) {
419 			snprintb(bits, sizeof(bits), MSIIEP_MFSR_BITS, mfsr);
420 			printf("mem fault: mfsr=%s; mfar=%08x\n", bits, mfsr);
421 		}
422 
423 		fatal = 0;
424 	}
425 
426 	if (si & MSIIEP_SYS_IPR_SERR) {	/* XXX */
427 		printf("serr#\n");
428 		fatal = 0;
429 	}
430 
431 	if (si & MSIIEP_SYS_IPR_DMA_ERR) {
432 		printf("dma: %08x\n",
433 		       mspcic_read_stream_4(pcic_iotlb_err_addr));
434 		fatal = 0;
435 	}
436 
437 	if (si & MSIIEP_SYS_IPR_PIO_ERR) {
438 		printf("pio: addr=%08x, cmd=%x\n",
439 		       mspcic_read_stream_4(pcic_pio_err_addr),
440 		       mspcic_read_stream_1(pcic_pio_err_cmd));
441 		fatal = 0;
442 	}
443 
444 	if (fatal)
445 		panic("nmi");
446 
447 	/* Clear the NMI if it was PCIC related */
448 	mspcic_write_1(pcic_sys_ipr_clr, MSIIEP_SYS_IPR_CLR_ALL);
449 }
450 
451 
452 void
453 nmi_soft_msiiep(void)
454 {
455 
456 	panic("soft nmi");
457 }
458 
459 #endif /* MSIIEP */
460 
461 
462 /*
463  * Level 15 interrupts are special, and not vectored here.
464  * Only `prewired' interrupts appear here; boot-time configured devices
465  * are attached via intr_establish() below.
466  */
467 struct intrhand *intrhand[15] = {
468 	NULL,			/*  0 = error */
469 	NULL,			/*  1 = software level 1 + Sbus */
470 	NULL,	 		/*  2 = Sbus level 2 (4m: Sbus L1) */
471 	NULL,			/*  3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/
472 	NULL,			/*  4 = software level 4 (tty softint) (scsi) */
473 	NULL,			/*  5 = Ethernet + Sbus level 4 (4m: Sbus L3) */
474 	NULL,			/*  6 = software level 6 (not used) (4m: enet)*/
475 	NULL,			/*  7 = video + Sbus level 5 */
476 	NULL,			/*  8 = Sbus level 6 */
477 	NULL,			/*  9 = Sbus level 7 */
478 	NULL, 			/* 10 = counter 0 = clock */
479 	NULL,			/* 11 = floppy */
480 	NULL,			/* 12 = zs hardware interrupt */
481 	NULL,			/* 13 = audio chip */
482 	NULL, 			/* 14 = counter 1 = profiling timer */
483 };
484 
485 /*
486  * Soft interrupts use a separate set of handler chains.
487  * This is necessary since soft interrupt handlers do not return a value
488  * and therefore cannot be mixed with hardware interrupt handlers on a
489  * shared handler chain.
490  */
491 struct intrhand *sintrhand[15] = { NULL };
492 
493 static void
494 ih_insert(struct intrhand **head, struct intrhand *ih)
495 {
496 	struct intrhand **p, *q;
497 	/*
498 	 * This is O(N^2) for long chains, but chains are never long
499 	 * and we do want to preserve order.
500 	 */
501 	for (p = head; (q = *p) != NULL; p = &q->ih_next)
502 		continue;
503 	*p = ih;
504 	ih->ih_next = NULL;
505 }
506 
507 static void
508 ih_remove(struct intrhand **head, struct intrhand *ih)
509 {
510 	struct intrhand **p, *q;
511 
512 	for (p = head; (q = *p) != ih; p = &q->ih_next)
513 		continue;
514 	if (q == NULL)
515 		panic("intr_remove: intrhand %p fun %p arg %p",
516 			ih, ih->ih_fun, ih->ih_arg);
517 
518 	*p = q->ih_next;
519 	q->ih_next = NULL;
520 }
521 
522 static int fastvec;		/* marks fast vectors (see below) */
523 extern int sparc_interrupt4m[];
524 extern int sparc_interrupt44c[];
525 
526 #ifdef DIAGNOSTIC
527 static void
528 check_tv(int level)
529 {
530 	struct trapvec *tv;
531 	int displ;
532 
533 	/* double check for legal hardware interrupt */
534 	tv = &trapbase[T_L1INT - 1 + level];
535 	displ = (CPU_ISSUN4M || CPU_ISSUN4D)
536 		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
537 		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
538 
539 	/* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
540 	if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
541 	    tv->tv_instr[1] != I_BA(0, displ) ||
542 	    tv->tv_instr[2] != I_RDPSR(I_L0))
543 		panic("intr_establish(%d)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
544 		    level,
545 		    tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
546 		    I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
547 }
548 #endif
549 
550 /*
551  * Wire a fast trap vector.  Only one such fast trap is legal for any
552  * interrupt, and it must be a hardware interrupt.
553  */
554 static void
555 inst_fasttrap(int level, void (*vec)(void))
556 {
557 	struct trapvec *tv;
558 	u_long hi22, lo10;
559 	int s;
560 
561 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
562 		/* Can't wire to softintr slots */
563 		if (level == 1 || level == 4 || level == 6)
564 			return;
565 	}
566 
567 #ifdef DIAGNOSTIC
568 	check_tv(level);
569 #endif
570 
571 	tv = &trapbase[T_L1INT - 1 + level];
572 	hi22 = ((u_long)vec) >> 10;
573 	lo10 = ((u_long)vec) & 0x3ff;
574 	s = splhigh();
575 
576 	/* kernel text is write protected -- let us in for a moment */
577 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
578 	    VM_PROT_READ|VM_PROT_WRITE);
579 	cpuinfo.cache_flush_all();
580 	tv->tv_instr[0] = I_SETHI(I_L3, hi22);	/* sethi %hi(vec),%l3 */
581 	tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */
582 	tv->tv_instr[2] = I_RDPSR(I_L0);	/* mov %psr, %l0 */
583 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
584 	cpuinfo.cache_flush_all();
585 	fastvec |= 1 << level;
586 	splx(s);
587 }
588 
589 /*
590  * Uninstall a fast trap handler.
591  */
592 static void
593 uninst_fasttrap(int level)
594 {
595 	struct trapvec *tv;
596 	int displ;	/* suspenders, belt, and buttons too */
597 	int s;
598 
599 	tv = &trapbase[T_L1INT - 1 + level];
600 	s = splhigh();
601 	displ = (CPU_ISSUN4M || CPU_ISSUN4D)
602 		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
603 		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
604 
605 	/* kernel text is write protected -- let us in for a moment */
606 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
607 	    VM_PROT_READ|VM_PROT_WRITE);
608 	cpuinfo.cache_flush_all();
609 	tv->tv_instr[0] = I_MOVi(I_L3, level);
610 	tv->tv_instr[1] = I_BA(0, displ);
611 	tv->tv_instr[2] = I_RDPSR(I_L0);
612 	pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
613 	cpuinfo.cache_flush_all();
614 	fastvec &= ~(1 << level);
615 	splx(s);
616 }
617 
618 /*
619  * Attach an interrupt handler to the vector chain for the given level.
620  * This is not possible if it has been taken away as a fast vector.
621  */
622 void
623 intr_establish(int level, int classipl,
624 	       struct intrhand *ih, void (*vec)(void),
625 	       bool maybe_mpsafe)
626 {
627 	int s = splhigh();
628 #ifdef MULTIPROCESSOR
629 	bool mpsafe;
630 #endif /* MULTIPROCESSOR */
631 	if (classipl == 0)
632 		classipl = level;
633 
634 #ifdef MULTIPROCESSOR
635 	mpsafe = (classipl != IPL_VM) || maybe_mpsafe;
636 #endif
637 
638 #ifdef DIAGNOSTIC
639 	if (CPU_ISSUN4C) {
640 		/*
641 		 * Check reserved softintr slots on SUN4C only.
642 		 * No check for SUN4, as 4/300's have
643 		 * esp0 at level 4 and le0 at level 6.
644 		 */
645 		if (level == 1 || level == 4 || level == 6)
646 			panic("intr_establish: reserved softintr level");
647 	}
648 #endif
649 
650 	/*
651 	 * If a `fast vector' is currently tied to this level, we must
652 	 * first undo that.
653 	 */
654 	if (fastvec & (1 << level)) {
655 		printf("intr_establish: untie fast vector at level %d\n",
656 		    level);
657 		uninst_fasttrap(level);
658 	} else if (vec != NULL &&
659 		   intrhand[level] == NULL && sintrhand[level] == NULL) {
660 		inst_fasttrap(level, vec);
661 	}
662 
663 	/* A requested IPL cannot exceed its device class level */
664 	if (classipl < level)
665 		panic("intr_establish: class lvl (%d) < pil (%d)\n",
666 			classipl, level);
667 
668 	/* pre-shift to PIL field in %psr */
669 	ih->ih_classipl = (classipl << 8) & PSR_PIL;
670 
671 #ifdef MULTIPROCESSOR
672 	if (!mpsafe) {
673 		ih->ih_realfun = ih->ih_fun;
674 		ih->ih_realarg = ih->ih_arg;
675 		ih->ih_fun = intr_biglock_wrapper;
676 		ih->ih_arg = ih;
677 	}
678 #endif /* MULTIPROCESSOR */
679 
680 	ih_insert(&intrhand[level], ih);
681 	splx(s);
682 }
683 
684 void
685 intr_disestablish(int level, struct intrhand *ih)
686 {
687 
688 	ih_remove(&intrhand[level], ih);
689 }
690 
691 /*
692  * This is a softintr cookie.  NB that sic_pilreq MUST be the
693  * first element in the struct, because the softintr_schedule()
694  * macro in intr.h casts cookies to int * to get it.  On a
695  * sun4m, sic_pilreq is an actual processor interrupt level that
696  * is passed to raise(), and on a sun4 or sun4c sic_pilreq is a
697  * bit to set in the interrupt enable register with ienab_bis().
698  */
699 struct softintr_cookie {
700 	int sic_pilreq;		/* CPU-specific bits; MUST be first! */
701 	int sic_pil;		/* Actual machine PIL that is used */
702 	struct intrhand sic_hand;
703 };
704 
705 /*
706  * softintr_init(): initialise the MI softintr system.
707  */
708 void
709 sparc_softintr_init(void)
710 {
711 
712 #if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D))
713 	/* Establish a standard soft interrupt handler for cross calls */
714 	xcall_cookie = sparc_softintr_establish(13, xcallintr, NULL);
715 #endif
716 }
717 
718 /*
719  * softintr_establish(): MI interface.  establish a func(arg) as a
720  * software interrupt.
721  */
722 void *
723 sparc_softintr_establish(int level, void (*fun)(void *), void *arg)
724 {
725 	struct softintr_cookie *sic;
726 	struct intrhand *ih;
727 	int pilreq;
728 	int pil;
729 #ifdef MULTIPROCESSOR
730 	bool mpsafe = (level != IPL_VM);
731 #endif /* MULTIPROCESSOR */
732 
733 	/*
734 	 * On a sun4m, the processor interrupt level is stored
735 	 * in the softintr cookie to be passed to raise().
736 	 *
737 	 * On a sun4 or sun4c the appropriate bit to set
738 	 * in the interrupt enable register is stored in
739 	 * the softintr cookie to be passed to ienab_bis().
740 	 */
741 	pil = pilreq = level;
742 	if (CPU_ISSUN4 || CPU_ISSUN4C) {
743 		/* Select the most suitable of three available softint levels */
744 		if (level >= 1 && level < 4) {
745 			pil = 1;
746 			pilreq = IE_L1;
747 		} else if (level >= 4 && level < 6) {
748 			pil = 4;
749 			pilreq = IE_L4;
750 		} else {
751 			pil = 6;
752 			pilreq = IE_L6;
753 		}
754 	}
755 
756 	sic = malloc(sizeof(*sic), M_DEVBUF, 0);
757 	sic->sic_pil = pil;
758 	sic->sic_pilreq = pilreq;
759 	ih = &sic->sic_hand;
760 #ifdef MULTIPROCESSOR
761 	if (!mpsafe) {
762 		ih->ih_realfun = (int (*)(void *))fun;
763 		ih->ih_realarg = arg;
764 		ih->ih_fun = intr_biglock_wrapper;
765 		ih->ih_arg = ih;
766 	} else
767 #endif /* MULTIPROCESSOR */
768 	{
769 		ih->ih_fun = (int (*)(void *))fun;
770 		ih->ih_arg = arg;
771 	}
772 
773 	/*
774 	 * Always run the handler at the requested level, which might
775 	 * be higher than the hardware can provide.
776 	 *
777 	 * pre-shift to PIL field in %psr
778 	 */
779 	ih->ih_classipl = (level << 8) & PSR_PIL;
780 
781 	if (fastvec & (1 << pil)) {
782 		printf("softintr_establish: untie fast vector at level %d\n",
783 		    pil);
784 		uninst_fasttrap(level);
785 	}
786 
787 	ih_insert(&sintrhand[pil], ih);
788 	return (void *)sic;
789 }
790 
791 /*
792  * softintr_disestablish(): MI interface.  disestablish the specified
793  * software interrupt.
794  */
795 void
796 sparc_softintr_disestablish(void *cookie)
797 {
798 	struct softintr_cookie *sic = cookie;
799 
800 	ih_remove(&sintrhand[sic->sic_pil], &sic->sic_hand);
801 	free(cookie, M_DEVBUF);
802 }
803 
804 #if 0
805 void
806 sparc_softintr_schedule(void *cookie)
807 {
808 	struct softintr_cookie *sic = cookie;
809 	if (CPU_ISSUN4M || CPU_ISSUN4D) {
810 #if defined(SUN4M) || defined(SUN4D)
811 		extern void raise(int,int);
812 		raise(0, sic->sic_pilreq);
813 #endif
814 	} else {
815 #if defined(SUN4) || defined(SUN4C)
816 		ienab_bis(sic->sic_pilreq);
817 #endif
818 	}
819 }
820 #endif
821 
822 #ifdef MULTIPROCESSOR
823 
824 /*
825  * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
826  */
827 
828 static int
829 intr_biglock_wrapper(void *vp)
830 {
831 	struct intrhand *ih = vp;
832 	int ret;
833 
834 	KERNEL_LOCK(1, NULL);
835 
836 	ret = (*ih->ih_realfun)(ih->ih_realarg);
837 
838 	KERNEL_UNLOCK_ONE(NULL);
839 
840 	return ret;
841 }
842 #endif /* MULTIPROCESSOR */
843 
844 bool
845 cpu_intr_p(void)
846 {
847 
848 	return curcpu()->ci_idepth != 0;
849 }
850