xref: /netbsd/sys/arch/sparc/sparc/intr.c (revision bf9ec67e)
1 /*	$NetBSD: intr.c,v 1.58 2001/12/04 00:05:06 darrenr Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)intr.c	8.3 (Berkeley) 11/11/93
45  */
46 
47 #include "opt_multiprocessor.h"
48 #include "opt_sparc_arch.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 
55 #include <uvm/uvm_extern.h>
56 
57 #include <dev/cons.h>
58 
59 #include <net/netisr.h>
60 
61 #include <machine/cpu.h>
62 #include <machine/ctlreg.h>
63 #include <machine/instr.h>
64 #include <machine/intr.h>
65 #include <machine/trap.h>
66 #include <machine/promlib.h>
67 
68 #include <sparc/sparc/asm.h>
69 #include <sparc/sparc/cpuvar.h>
70 
71 #if defined(MULTIPROCESSOR) && defined(DDB)
72 #include <machine/db_machdep.h>
73 #endif
74 
75 void *softnet_cookie;
76 
77 void	strayintr __P((struct clockframe *));
78 void	softnet __P((void *));
79 
80 /*
81  * Stray interrupt handler.  Clear it if possible.
82  * If not, and if we get 10 interrupts in 10 seconds, panic.
83  * XXXSMP: We are holding the kernel lock at entry & exit.
84  */
85 void
86 strayintr(fp)
87 	struct clockframe *fp;
88 {
89 	static int straytime, nstray;
90 	char bits[64];
91 	int timesince;
92 
93 	printf("stray interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
94 		fp->ipl, fp->pc, fp->npc, bitmask_snprintf(fp->psr,
95 		       PSR_BITS, bits, sizeof(bits)));
96 
97 	timesince = time.tv_sec - straytime;
98 	if (timesince <= 10) {
99 		if (++nstray > 9)
100 			panic("crazy interrupts");
101 	} else {
102 		straytime = time.tv_sec;
103 		nstray = 1;
104 	}
105 }
106 
107 /*
108  * Process software network interrupts.
109  */
110 void
111 softnet(fp)
112 	void *fp;
113 {
114 	int n, s;
115 
116 	s = splhigh();
117 	n = netisr;
118 	netisr = 0;
119 	splx(s);
120 
121 	if (n == 0)
122 		return;
123 
124 #define DONETISR(bit, fn) do {		\
125 	if (n & (1 << bit))		\
126 		fn();			\
127 	} while (0)
128 
129 #include <net/netisr_dispatch.h>
130 
131 #undef DONETISR
132 }
133 
134 #if defined(SUN4M)
135 void	nmi_hard __P((void));
136 void	nmi_soft __P((struct trapframe *));
137 
138 int	(*memerr_handler) __P((void));
139 int	(*sbuserr_handler) __P((void));
140 int	(*vmeerr_handler) __P((void));
141 int	(*moduleerr_handler) __P((void));
142 
143 #if defined(MULTIPROCESSOR)
144 volatile int nmi_hard_wait = 0;
145 struct simplelock nmihard_lock = SIMPLELOCK_INITIALIZER;
146 #endif
147 
148 void
149 nmi_hard()
150 {
151 	/*
152 	 * A level 15 hard interrupt.
153 	 */
154 	int fatal = 0;
155 	u_int32_t si;
156 	char bits[64];
157 	u_int afsr, afva;
158 
159 	afsr = afva = 0;
160 	if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) {
161 		printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n",
162 			cpuinfo.mid,
163 			bitmask_snprintf(afsr, AFSR_BITS, bits, sizeof(bits)),
164 			(afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva);
165 	}
166 
167 #if defined(MULTIPROCESSOR)
168 	/*
169 	 * Increase nmi_hard_wait.  If we aren't the master, loop while this
170 	 * variable is non-zero.  If we are the master, loop while this
171 	 * variable is less than the number of cpus.
172 	 */
173 	simple_lock(&nmihard_lock);
174 	nmi_hard_wait++;
175 	simple_unlock(&nmihard_lock);
176 
177 	if (cpuinfo.master == 0) {
178 		while (nmi_hard_wait)
179 			;
180 		return;
181 	} else {
182 		int n = 0;
183 
184 		while (nmi_hard_wait < ncpu)
185 			if (n++ > 100000)
186 				panic("nmi_hard: SMP botch.");
187 	}
188 #endif
189 
190 	/*
191 	 * Examine pending system interrupts.
192 	 */
193 	si = *((u_int32_t *)ICR_SI_PEND);
194 	printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(),
195 		bitmask_snprintf(si, SINTR_BITS, bits, sizeof(bits)));
196 
197 	if ((si & SINTR_M) != 0) {
198 		/* ECC memory error */
199 		if (memerr_handler != NULL)
200 			fatal |= (*memerr_handler)();
201 	}
202 	if ((si & SINTR_I) != 0) {
203 		/* MBus/SBus async error */
204 		if (sbuserr_handler != NULL)
205 			fatal |= (*sbuserr_handler)();
206 	}
207 	if ((si & SINTR_V) != 0) {
208 		/* VME async error */
209 		if (vmeerr_handler != NULL)
210 			fatal |= (*vmeerr_handler)();
211 	}
212 	if ((si & SINTR_ME) != 0) {
213 		/* Module async error */
214 		if (moduleerr_handler != NULL)
215 			fatal |= (*moduleerr_handler)();
216 	}
217 
218 #if defined(MULTIPROCESSOR)
219 	/*
220 	 * Tell everyone else we've finished dealing with the hard NMI.
221 	 */
222 	simple_lock(&nmihard_lock);
223 	nmi_hard_wait = 0;
224 	simple_unlock(&nmihard_lock);
225 #endif
226 
227 	if (fatal)
228 		panic("nmi");
229 }
230 
231 void
232 nmi_soft(tf)
233 	struct trapframe *tf;
234 {
235 
236 #ifdef MULTIPROCESSOR
237 	switch (cpuinfo.msg.tag) {
238 	case XPMSG_SAVEFPU:
239 		savefpstate(cpuinfo.fpproc->p_md.md_fpstate);
240 		cpuinfo.fpproc->p_md.md_fpumid = -1;
241 		cpuinfo.fpproc = NULL;
242 		break;
243 	case XPMSG_PAUSECPU:
244 	    {
245 #if defined(DDB)
246 		db_regs_t regs;
247 
248 		regs.db_tf = *tf;
249 		regs.db_fr = *(struct frame *)tf->tf_out[6];
250 		cpuinfo.ci_ddb_regs = &regs;
251 #endif
252 		cpuinfo.flags |= CPUFLG_PAUSED|CPUFLG_GOTMSG;
253 		while (cpuinfo.flags & CPUFLG_PAUSED)
254 			cpuinfo.cache_flush((caddr_t)&cpuinfo.flags,
255 			    sizeof(cpuinfo.flags));
256 #if defined(DDB)
257 		cpuinfo.ci_ddb_regs = 0;
258 #endif
259 		return;
260 	    }
261 	case XPMSG_FUNC:
262 	    {
263 		struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
264 
265 		p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
266 		break;
267 	    }
268 	case XPMSG_VCACHE_FLUSH_PAGE:
269 	    {
270 		struct xpmsg_flush_page *p = &cpuinfo.msg.u.xpmsg_flush_page;
271 		int ctx = getcontext();
272 
273 		setcontext(p->ctx);
274 		cpuinfo.sp_vcache_flush_page(p->va);
275 		setcontext(ctx);
276 		break;
277 	    }
278 	case XPMSG_VCACHE_FLUSH_SEGMENT:
279 	    {
280 		struct xpmsg_flush_segment *p = &cpuinfo.msg.u.xpmsg_flush_segment;
281 		int ctx = getcontext();
282 
283 		setcontext(p->ctx);
284 		cpuinfo.sp_vcache_flush_segment(p->vr, p->vs);
285 		setcontext(ctx);
286 		break;
287 	    }
288 	case XPMSG_VCACHE_FLUSH_REGION:
289 	    {
290 		struct xpmsg_flush_region *p = &cpuinfo.msg.u.xpmsg_flush_region;
291 		int ctx = getcontext();
292 
293 		setcontext(p->ctx);
294 		cpuinfo.sp_vcache_flush_region(p->vr);
295 		setcontext(ctx);
296 		break;
297 	    }
298 	case XPMSG_VCACHE_FLUSH_CONTEXT:
299 	    {
300 		struct xpmsg_flush_context *p = &cpuinfo.msg.u.xpmsg_flush_context;
301 		int ctx = getcontext();
302 
303 		setcontext(p->ctx);
304 		cpuinfo.sp_vcache_flush_context();
305 		setcontext(ctx);
306 		break;
307 	    }
308 	case XPMSG_VCACHE_FLUSH_RANGE:
309 	    {
310 		struct xpmsg_flush_range *p = &cpuinfo.msg.u.xpmsg_flush_range;
311 		int ctx = getcontext();
312 
313 		setcontext(p->ctx);
314 		cpuinfo.sp_cache_flush(p->va, p->size);
315 		setcontext(ctx);
316 		break;
317 	    }
318 	case XPMSG_DEMAP_TLB_PAGE:
319 	    {
320 		struct xpmsg_flush_page *p = &cpuinfo.msg.u.xpmsg_flush_page;
321 		int ctx = getcontext();
322 
323 		setcontext(p->ctx);
324 		tlb_flush_page_real(p->va);
325 		setcontext(ctx);
326 		break;
327 	    }
328 	case XPMSG_DEMAP_TLB_SEGMENT:
329 	    {
330 		struct xpmsg_flush_segment *p = &cpuinfo.msg.u.xpmsg_flush_segment;
331 		int ctx = getcontext();
332 
333 		setcontext(p->ctx);
334 		tlb_flush_segment_real(p->vr, p->vs);
335 		setcontext(ctx);
336 		break;
337 	    }
338 	case XPMSG_DEMAP_TLB_REGION:
339 	    {
340 		struct xpmsg_flush_region *p = &cpuinfo.msg.u.xpmsg_flush_region;
341 		int ctx = getcontext();
342 
343 		setcontext(p->ctx);
344 		tlb_flush_region_real(p->vr);
345 		setcontext(ctx);
346 		break;
347 	    }
348 	case XPMSG_DEMAP_TLB_CONTEXT:
349 	    {
350 		struct xpmsg_flush_context *p = &cpuinfo.msg.u.xpmsg_flush_context;
351 		int ctx = getcontext();
352 
353 		setcontext(p->ctx);
354 		tlb_flush_context_real();
355 		setcontext(ctx);
356 		break;
357 	    }
358 	case XPMSG_DEMAP_TLB_ALL:
359 		tlb_flush_all_real();
360 		break;
361 	}
362 	cpuinfo.flags |= CPUFLG_GOTMSG;
363 #endif
364 }
365 #endif
366 
367 /*
368  * Level 15 interrupts are special, and not vectored here.
369  * Only `prewired' interrupts appear here; boot-time configured devices
370  * are attached via intr_establish() below.
371  */
372 struct intrhand *intrhand[15] = {
373 	NULL,			/*  0 = error */
374 	NULL,			/*  1 = software level 1 + Sbus */
375 	NULL,	 		/*  2 = Sbus level 2 (4m: Sbus L1) */
376 	NULL,			/*  3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/
377 	NULL,			/*  4 = software level 4 (tty softint) (scsi) */
378 	NULL,			/*  5 = Ethernet + Sbus level 4 (4m: Sbus L3) */
379 	NULL,			/*  6 = software level 6 (not used) (4m: enet)*/
380 	NULL,			/*  7 = video + Sbus level 5 */
381 	NULL,			/*  8 = Sbus level 6 */
382 	NULL,			/*  9 = Sbus level 7 */
383 	NULL, 			/* 10 = counter 0 = clock */
384 	NULL,			/* 11 = floppy */
385 	NULL,			/* 12 = zs hardware interrupt */
386 	NULL,			/* 13 = audio chip */
387 	NULL, 			/* 14 = counter 1 = profiling timer */
388 };
389 
390 static int fastvec;		/* marks fast vectors (see below) */
391 #ifdef DIAGNOSTIC
392 extern int sparc_interrupt4m[];
393 extern int sparc_interrupt44c[];
394 #endif
395 
396 /*
397  * Attach an interrupt handler to the vector chain for the given level.
398  * This is not possible if it has been taken away as a fast vector.
399  */
400 void
401 intr_establish(level, ih)
402 	int level;
403 	struct intrhand *ih;
404 {
405 	struct intrhand **p, *q;
406 #ifdef DIAGNOSTIC
407 	struct trapvec *tv;
408 	int displ;
409 #endif
410 	int s;
411 
412 	s = splhigh();
413 	if (fastvec & (1 << level))
414 		panic("intr_establish: level %d interrupt tied to fast vector",
415 		    level);
416 #ifdef DIAGNOSTIC
417 	/* double check for legal hardware interrupt */
418 	if ((level != 1 && level != 4 && level != 6) || CPU_ISSUN4M) {
419 		tv = &trapbase[T_L1INT - 1 + level];
420 		displ = (CPU_ISSUN4M)
421 			? &sparc_interrupt4m[0] - &tv->tv_instr[1]
422 			: &sparc_interrupt44c[0] - &tv->tv_instr[1];
423 
424 		/* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
425 		if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
426 		    tv->tv_instr[1] != I_BA(0, displ) ||
427 		    tv->tv_instr[2] != I_RDPSR(I_L0))
428 			panic("intr_establish(%d, %p)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
429 			    level, ih,
430 			    tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
431 			    I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
432 	}
433 #endif
434 	/*
435 	 * This is O(N^2) for long chains, but chains are never long
436 	 * and we do want to preserve order.
437 	 */
438 	for (p = &intrhand[level]; (q = *p) != NULL; p = &q->ih_next)
439 		continue;
440 	*p = ih;
441 	ih->ih_next = NULL;
442 	splx(s);
443 }
444 
445 void
446 intr_disestablish(level, ih)
447 	int level;
448 	struct intrhand *ih;
449 {
450 	struct intrhand **p, *q;
451 
452 	for (p = &intrhand[level]; (q = *p) != ih; p = &q->ih_next)
453 		continue;
454 	if (q == NULL)
455 		panic("intr_disestablish: level %d intrhand %p fun %p arg %p\n",
456 		    level, ih, ih->ih_fun, ih->ih_arg);
457 
458 	*p = q->ih_next;
459 	q->ih_next = NULL;
460 }
461 
462 /*
463  * Like intr_establish, but wires a fast trap vector.  Only one such fast
464  * trap is legal for any interrupt, and it must be a hardware interrupt.
465  */
466 void
467 intr_fasttrap(level, vec)
468 	int level;
469 	void (*vec) __P((void));
470 {
471 	struct trapvec *tv;
472 	u_long hi22, lo10;
473 #ifdef DIAGNOSTIC
474 	int displ;	/* suspenders, belt, and buttons too */
475 #endif
476 	int s;
477 
478 	tv = &trapbase[T_L1INT - 1 + level];
479 	hi22 = ((u_long)vec) >> 10;
480 	lo10 = ((u_long)vec) & 0x3ff;
481 	s = splhigh();
482 	if ((fastvec & (1 << level)) != 0 || intrhand[level] != NULL)
483 		panic("intr_fasttrap: already handling level %d interrupts",
484 		    level);
485 #ifdef DIAGNOSTIC
486 	displ = (CPU_ISSUN4M)
487 		? &sparc_interrupt4m[0] - &tv->tv_instr[1]
488 		: &sparc_interrupt44c[0] - &tv->tv_instr[1];
489 
490 	/* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
491 	if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
492 	    tv->tv_instr[1] != I_BA(0, displ) ||
493 	    tv->tv_instr[2] != I_RDPSR(I_L0))
494 		panic("intr_fasttrap(%d, %p)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
495 		    level, vec,
496 		    tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
497 		    I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
498 #endif
499 	/* kernel text is write protected -- let us in for a moment */
500 	pmap_changeprot(pmap_kernel(), (vaddr_t)tv,
501 	    VM_PROT_READ|VM_PROT_WRITE, 1);
502 	cpuinfo.cache_flush_all();
503 	tv->tv_instr[0] = I_SETHI(I_L3, hi22);	/* sethi %hi(vec),%l3 */
504 	tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */
505 	tv->tv_instr[2] = I_RDPSR(I_L0);	/* mov %psr, %l0 */
506 	pmap_changeprot(pmap_kernel(), (vaddr_t)tv, VM_PROT_READ, 1);
507 	cpuinfo.cache_flush_all();
508 	fastvec |= 1 << level;
509 	splx(s);
510 }
511 
512 /*
513  * softintr_init(): initialise the MI softintr system.
514  */
515 void
516 softintr_init()
517 {
518 
519 	softnet_cookie = softintr_establish(IPL_SOFTNET, softnet, NULL);
520 }
521 
522 /*
523  * softintr_establish(): MI interface.  establish a func(arg) as a
524  * software interrupt.
525  */
526 void *
527 softintr_establish(level, fun, arg)
528 	int level;
529 	void (*fun) __P((void *));
530 	void *arg;
531 {
532 	struct intrhand *ih;
533 
534 	ih = malloc(sizeof(*ih), M_DEVBUF, 0);
535 	bzero(ih, sizeof(*ih));
536 	ih->ih_fun = (int (*) __P((void *)))fun;
537 	ih->ih_arg = arg;
538 	ih->ih_next = 0;
539 	intr_establish(1, ih);
540 	return (void *)ih;
541 }
542 
543 /*
544  * softintr_disestablish(): MI interface.  disestablish the specified
545  * software interrupt.
546  */
547 void
548 softintr_disestablish(cookie)
549 	void *cookie;
550 {
551 
552 	intr_disestablish(1, cookie);
553 	free(cookie, M_DEVBUF);
554 }
555 
556 #ifdef MULTIPROCESSOR
557 /*
558  * Called by interrupt stubs, etc., to lock/unlock the kernel.
559  */
560 void
561 intr_lock_kernel()
562 {
563 
564 	KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
565 }
566 
567 void
568 intr_unlock_kernel()
569 {
570 
571 	KERNEL_UNLOCK();
572 }
573 #endif
574