xref: /netbsd/sys/arch/alpha/alpha/interrupt.c (revision bf9ec67e)
1 /* $NetBSD: interrupt.c,v 1.63 2001/07/27 00:25:18 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Authors: Keith Bostic, Chris G. Demetriou
44  *
45  * Permission to use, copy, modify and distribute this software and
46  * its documentation is hereby granted, provided that both the copyright
47  * notice and this permission notice appear in all copies of the
48  * software, derivative works or modified versions, and any portions
49  * thereof, and that both notices appear in supporting documentation.
50  *
51  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
52  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
53  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54  *
55  * Carnegie Mellon requests users of this software to return to
56  *
57  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
58  *  School of Computer Science
59  *  Carnegie Mellon University
60  *  Pittsburgh PA 15213-3890
61  *
62  * any improvements or extensions that they make and grant Carnegie the
63  * rights to redistribute these changes.
64  */
65 /*
66  * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center.
67  * Redistribute and modify at will, leaving only this additional copyright
68  * notice.
69  */
70 
71 #include "opt_multiprocessor.h"
72 
73 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
74 
75 __KERNEL_RCSID(0, "$NetBSD: interrupt.c,v 1.63 2001/07/27 00:25:18 thorpej Exp $");
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc.h>
80 #include <sys/vmmeter.h>
81 #include <sys/sched.h>
82 #include <sys/malloc.h>
83 #include <sys/kernel.h>
84 #include <sys/time.h>
85 
86 #include <machine/cpuvar.h>
87 
88 /* XXX Network interrupts should be converted to new softintrs */
89 #include <net/netisr.h>
90 
91 #include <uvm/uvm_extern.h>
92 
93 #include <machine/atomic.h>
94 #include <machine/autoconf.h>
95 #include <machine/cpu.h>
96 #include <machine/reg.h>
97 #include <machine/rpb.h>
98 #include <machine/frame.h>
99 #include <machine/cpuconf.h>
100 #include <machine/alpha.h>
101 
102 #if defined(MULTIPROCESSOR)
103 #include <sys/device.h>
104 #endif
105 
106 struct scbvec scb_iovectab[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)];
107 
108 void	netintr(void);
109 
110 void	scb_stray(void *, u_long);
111 
112 void
113 scb_init(void)
114 {
115 	u_long i;
116 
117 	for (i = 0; i < SCB_NIOVECS; i++) {
118 		scb_iovectab[i].scb_func = scb_stray;
119 		scb_iovectab[i].scb_arg = NULL;
120 	}
121 }
122 
123 void
124 scb_stray(void *arg, u_long vec)
125 {
126 
127 	printf("WARNING: stray interrupt, vector 0x%lx\n", vec);
128 }
129 
130 void
131 scb_set(u_long vec, void (*func)(void *, u_long), void *arg)
132 {
133 	u_long idx;
134 	int s;
135 
136 	s = splhigh();
137 
138 	if (vec < SCB_IOVECBASE || vec >= SCB_SIZE ||
139 	    (vec & (SCB_VECSIZE - 1)) != 0)
140 		panic("scb_set: bad vector 0x%lx", vec);
141 
142 	idx = SCB_VECTOIDX(vec - SCB_IOVECBASE);
143 
144 	if (scb_iovectab[idx].scb_func != scb_stray)
145 		panic("scb_set: vector 0x%lx already occupied", vec);
146 
147 	scb_iovectab[idx].scb_func = func;
148 	scb_iovectab[idx].scb_arg = arg;
149 
150 	splx(s);
151 }
152 
153 u_long
154 scb_alloc(void (*func)(void *, u_long), void *arg)
155 {
156 	u_long vec, idx;
157 	int s;
158 
159 	s = splhigh();
160 
161 	/*
162 	 * Allocate "downwards", to avoid bumping into
163 	 * interrupts which are likely to be at the lower
164 	 * vector numbers.
165 	 */
166 	for (vec = SCB_SIZE - SCB_VECSIZE;
167 	     vec >= SCB_IOVECBASE; vec -= SCB_VECSIZE) {
168 		idx = SCB_VECTOIDX(vec - SCB_IOVECBASE);
169 		if (scb_iovectab[idx].scb_func == scb_stray) {
170 			scb_iovectab[idx].scb_func = func;
171 			scb_iovectab[idx].scb_arg = arg;
172 			splx(s);
173 			return (vec);
174 		}
175 	}
176 
177 	splx(s);
178 
179 	return (SCB_ALLOC_FAILED);
180 }
181 
182 void
183 scb_free(u_long vec)
184 {
185 	u_long idx;
186 	int s;
187 
188 	s = splhigh();
189 
190 	if (vec < SCB_IOVECBASE || vec >= SCB_SIZE ||
191 	    (vec & (SCB_VECSIZE - 1)) != 0)
192 		panic("scb_free: bad vector 0x%lx", vec);
193 
194 	idx = SCB_VECTOIDX(vec - SCB_IOVECBASE);
195 
196 	if (scb_iovectab[idx].scb_func == scb_stray)
197 		panic("scb_free: vector 0x%lx is empty", vec);
198 
199 	scb_iovectab[idx].scb_func = scb_stray;
200 	scb_iovectab[idx].scb_arg = (void *) vec;
201 
202 	splx(s);
203 }
204 
205 void
206 interrupt(unsigned long a0, unsigned long a1, unsigned long a2,
207     struct trapframe *framep)
208 {
209 	static int microset_iter;	/* call microset() once per sec. */
210 	struct cpu_info *ci = curcpu();
211 	struct cpu_softc *sc = ci->ci_softc;
212 	struct proc *p;
213 
214 	switch (a0) {
215 	case ALPHA_INTR_XPROC:	/* interprocessor interrupt */
216 #if defined(MULTIPROCESSOR)
217 		atomic_add_ulong(&ci->ci_intrdepth, 1);
218 
219 		alpha_ipi_process(ci, framep);
220 
221 		/*
222 		 * Handle inter-console messages if we're the primary
223 		 * CPU.
224 		 */
225 		if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id &&
226 		    hwrpb->rpb_txrdy != 0)
227 			cpu_iccb_receive();
228 
229 		atomic_sub_ulong(&ci->ci_intrdepth, 1);
230 #else
231 		printf("WARNING: received interprocessor interrupt!\n");
232 #endif /* MULTIPROCESSOR */
233 		break;
234 
235 	case ALPHA_INTR_CLOCK:	/* clock interrupt */
236 		/*
237 		 * We don't increment the interrupt depth for the
238 		 * clock interrupt, since it is *sampled* from
239 		 * the clock interrupt, so if we did, all system
240 		 * time would be counted as interrupt time.
241 		 */
242 		sc->sc_evcnt_clock.ev_count++;
243 		uvmexp.intrs++;
244 		/*
245 		 * Update the PCC frequency for use by microtime().
246 		 */
247 		if (
248 #if defined(MULTIPROCESSOR)
249 		    CPU_IS_PRIMARY(ci) &&
250 #endif
251 
252 		    microset_iter-- == 0) {
253 			microset_iter = hz-1;
254 			microset_time = time;
255 #if defined(MULTIPROCESSOR)
256 			alpha_multicast_ipi(cpus_running,
257 			    ALPHA_IPI_MICROSET);
258 #endif
259 			microset(ci, framep);
260 		}
261 		if (platform.clockintr) {
262 			/*
263 			 * Call hardclock().  This will also call
264 			 * statclock(). On the primary CPU, it
265 			 * will also deal with time-of-day stuff.
266 			 */
267 			(*platform.clockintr)((struct clockframe *)framep);
268 
269 			/*
270 			 * If it's time to call the scheduler clock,
271 			 * do so.
272 			 */
273 			if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 &&
274 			    (p = ci->ci_curproc) != NULL && schedhz != 0)
275 				schedclock(p);
276 		}
277 		break;
278 
279 	case ALPHA_INTR_ERROR:	/* Machine Check or Correctable Error */
280 		atomic_add_ulong(&ci->ci_intrdepth, 1);
281 		a0 = alpha_pal_rdmces();
282 		if (platform.mcheck_handler != NULL &&
283 		    (void *)framep->tf_regs[FRAME_PC] != XentArith)
284 			(*platform.mcheck_handler)(a0, framep, a1, a2);
285 		else
286 			machine_check(a0, framep, a1, a2);
287 		atomic_sub_ulong(&ci->ci_intrdepth, 1);
288 		break;
289 
290 	case ALPHA_INTR_DEVICE:	/* I/O device interrupt */
291 	    {
292 		struct scbvec *scb;
293 
294 		KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE);
295 
296 		atomic_add_ulong(&sc->sc_evcnt_device.ev_count, 1);
297 		atomic_add_ulong(&ci->ci_intrdepth, 1);
298 
299 		KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
300 
301 		uvmexp.intrs++;
302 
303 		scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)];
304 		(*scb->scb_func)(scb->scb_arg, a1);
305 
306 		KERNEL_UNLOCK();
307 
308 		atomic_sub_ulong(&ci->ci_intrdepth, 1);
309 		break;
310 	    }
311 
312 	case ALPHA_INTR_PERF:	/* performance counter interrupt */
313 		printf("WARNING: received performance counter interrupt!\n");
314 		break;
315 
316 	case ALPHA_INTR_PASSIVE:
317 #if 0
318 		printf("WARNING: received passive release interrupt vec "
319 		    "0x%lx\n", a1);
320 #endif
321 		break;
322 
323 	default:
324 		printf("unexpected interrupt: type 0x%lx vec 0x%lx "
325 		    "a2 0x%lx"
326 #if defined(MULTIPROCESSOR)
327 		    " cpu %lu"
328 #endif
329 		    "\n", a0, a1, a2
330 #if defined(MULTIPROCESSOR)
331 		    , ci->ci_cpuid
332 #endif
333 		    );
334 		panic("interrupt");
335 		/* NOTREACHED */
336 	}
337 }
338 
339 void
340 machine_check(unsigned long mces, struct trapframe *framep,
341     unsigned long vector, unsigned long param)
342 {
343 	const char *type;
344 	struct mchkinfo *mcp;
345 	static struct timeval ratelimit[1];
346 
347 	mcp = &curcpu()->ci_mcinfo;
348 	/* Make sure it's an error we know about. */
349 	if ((mces & (ALPHA_MCES_MIP|ALPHA_MCES_SCE|ALPHA_MCES_PCE)) == 0) {
350 		type = "fatal machine check or error (unknown type)";
351 		goto fatal;
352 	}
353 
354 	/* Machine checks. */
355 	if (mces & ALPHA_MCES_MIP) {
356 		/* If we weren't expecting it, then we punt. */
357 		if (!mcp->mc_expected) {
358 			type = "unexpected machine check";
359 			goto fatal;
360 		}
361 		mcp->mc_expected = 0;
362 		mcp->mc_received = 1;
363 	}
364 
365 	/* System correctable errors. */
366 	if (mces & ALPHA_MCES_SCE)
367 		printf("Warning: received system correctable error.\n");
368 
369 	/* Processor correctable errors. */
370 	if (mces & ALPHA_MCES_PCE)
371 		printf("Warning: received processor correctable error.\n");
372 
373 	/* Clear pending machine checks and correctable errors */
374 	alpha_pal_wrmces(mces);
375 	return;
376 
377 fatal:
378 	alpha_pal_wrmces(mces);
379 	if ((void *)framep->tf_regs[FRAME_PC] == XentArith) {
380 		rlprintf(ratelimit, "Stray machine check\n");
381 		return;
382 	}
383 
384 	printf("\n");
385 	printf("%s:\n", type);
386 	printf("\n");
387 	printf("    mces    = 0x%lx\n", mces);
388 	printf("    vector  = 0x%lx\n", vector);
389 	printf("    param   = 0x%lx\n", param);
390 	printf("    pc      = 0x%lx\n", framep->tf_regs[FRAME_PC]);
391 	printf("    ra      = 0x%lx\n", framep->tf_regs[FRAME_RA]);
392 	printf("    code    = 0x%lx\n", *(unsigned long *)(param + 0x10));
393 	printf("    curproc = %p\n", curproc);
394 	if (curproc != NULL)
395 		printf("        pid = %d, comm = %s\n", curproc->p_pid,
396 		    curproc->p_comm);
397 	printf("\n");
398 	panic("machine check");
399 }
400 
401 int
402 badaddr(void *addr, size_t size)
403 {
404 
405 	return (badaddr_read(addr, size, NULL));
406 }
407 
408 int
409 badaddr_read(void *addr, size_t size, void *rptr)
410 {
411 	struct mchkinfo *mcp = &curcpu()->ci_mcinfo;
412 	long rcpt;
413 	int rv;
414 
415 	/* Get rid of any stale machine checks that have been waiting.  */
416 	alpha_pal_draina();
417 
418 	/* Tell the trap code to expect a machine check. */
419 	mcp->mc_received = 0;
420 	mcp->mc_expected = 1;
421 
422 	/* Read from the test address, and make sure the read happens. */
423 	alpha_mb();
424 	switch (size) {
425 	case sizeof (u_int8_t):
426 		rcpt = *(volatile u_int8_t *)addr;
427 		break;
428 
429 	case sizeof (u_int16_t):
430 		rcpt = *(volatile u_int16_t *)addr;
431 		break;
432 
433 	case sizeof (u_int32_t):
434 		rcpt = *(volatile u_int32_t *)addr;
435 		break;
436 
437 	case sizeof (u_int64_t):
438 		rcpt = *(volatile u_int64_t *)addr;
439 		break;
440 
441 	default:
442 		panic("badaddr: invalid size (%ld)\n", size);
443 	}
444 	alpha_mb();
445 	alpha_mb();	/* MAGIC ON SOME SYSTEMS */
446 
447 	/* Make sure we took the machine check, if we caused one. */
448 	alpha_pal_draina();
449 
450 	/* disallow further machine checks */
451 	mcp->mc_expected = 0;
452 
453 	rv = mcp->mc_received;
454 	mcp->mc_received = 0;
455 
456 	/*
457 	 * And copy back read results (if no fault occurred).
458 	 */
459 	if (rptr && rv == 0) {
460 		switch (size) {
461 		case sizeof (u_int8_t):
462 			*(volatile u_int8_t *)rptr = rcpt;
463 			break;
464 
465 		case sizeof (u_int16_t):
466 			*(volatile u_int16_t *)rptr = rcpt;
467 			break;
468 
469 		case sizeof (u_int32_t):
470 			*(volatile u_int32_t *)rptr = rcpt;
471 			break;
472 
473 		case sizeof (u_int64_t):
474 			*(volatile u_int64_t *)rptr = rcpt;
475 			break;
476 		}
477 	}
478 	/* Return non-zero (i.e. true) if it's a bad address. */
479 	return (rv);
480 }
481 
482 void
483 netintr()
484 {
485 	int n, s;
486 
487 	s = splhigh();
488 	n = netisr;
489 	netisr = 0;
490 	splx(s);
491 
492 #define	DONETISR(bit, fn)						\
493 	do {								\
494 		if (n & (1 << (bit)))					\
495 			fn();						\
496 	} while (0)
497 
498 #include <net/netisr_dispatch.h>
499 
500 #undef DONETISR
501 }
502 
503 struct alpha_soft_intr alpha_soft_intrs[IPL_NSOFT];
504 __volatile unsigned long ssir;
505 
506 /* XXX For legacy software interrupts. */
507 struct alpha_soft_intrhand *softnet_intrhand;
508 
509 /*
510  * spl0:
511  *
512  *	Lower interrupt priority to IPL 0 -- must check for
513  *	software interrupts.
514  */
515 void
516 spl0(void)
517 {
518 
519 	if (ssir) {
520 		(void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT);
521 		softintr_dispatch();
522 	}
523 
524 	(void) alpha_pal_swpipl(ALPHA_PSL_IPL_0);
525 }
526 
527 /*
528  * softintr_init:
529  *
530  *	Initialize the software interrupt system.
531  */
532 void
533 softintr_init()
534 {
535 	static const char *softintr_names[] = IPL_SOFTNAMES;
536 	struct alpha_soft_intr *asi;
537 	int i;
538 
539 	for (i = 0; i < IPL_NSOFT; i++) {
540 		asi = &alpha_soft_intrs[i];
541 		TAILQ_INIT(&asi->softintr_q);
542 		simple_lock_init(&asi->softintr_slock);
543 		asi->softintr_ipl = i;
544 		evcnt_attach_dynamic(&asi->softintr_evcnt, EVCNT_TYPE_INTR,
545 		    NULL, "soft", softintr_names[i]);
546 	}
547 
548 	/* XXX Establish legacy software interrupt handlers. */
549 	softnet_intrhand = softintr_establish(IPL_SOFTNET,
550 	    (void (*)(void *))netintr, NULL);
551 
552 	assert(softnet_intrhand != NULL);
553 }
554 
555 /*
556  * softintr_dispatch:
557  *
558  *	Process pending software interrupts.
559  */
560 void
561 softintr_dispatch()
562 {
563 	struct alpha_soft_intr *asi;
564 	struct alpha_soft_intrhand *sih;
565 	u_int64_t n, i;
566 
567 #ifdef DEBUG
568 	n = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK;
569 	if (n != ALPHA_PSL_IPL_SOFT)
570 		panic("softintr_dispatch: entry at ipl %ld", n);
571 #endif
572 
573 	KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
574 
575 #ifdef DEBUG
576 	n = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK;
577 	if (n != ALPHA_PSL_IPL_SOFT)
578 		panic("softintr_dispatch: after kernel lock at ipl %ld", n);
579 #endif
580 
581 	while ((n = atomic_loadlatch_ulong(&ssir, 0)) != 0) {
582 		for (i = 0; i < IPL_NSOFT; i++) {
583 			if ((n & (1 << i)) == 0)
584 				continue;
585 
586 			asi = &alpha_soft_intrs[i];
587 
588 			asi->softintr_evcnt.ev_count++;
589 
590 			for (;;) {
591 				(void) alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
592 				simple_lock(&asi->softintr_slock);
593 
594 				sih = TAILQ_FIRST(&asi->softintr_q);
595 				if (sih != NULL) {
596 					TAILQ_REMOVE(&asi->softintr_q, sih,
597 					    sih_q);
598 					sih->sih_pending = 0;
599 				}
600 
601 				simple_unlock(&asi->softintr_slock);
602 				(void) alpha_pal_swpipl(ALPHA_PSL_IPL_SOFT);
603 
604 				if (sih == NULL)
605 					break;
606 
607 				uvmexp.softs++;
608 				(*sih->sih_fn)(sih->sih_arg);
609 			}
610 		}
611 	}
612 
613 	KERNEL_UNLOCK();
614 }
615 
616 /*
617  * softintr_establish:		[interface]
618  *
619  *	Register a software interrupt handler.
620  */
621 void *
622 softintr_establish(int ipl, void (*func)(void *), void *arg)
623 {
624 	struct alpha_soft_intr *asi;
625 	struct alpha_soft_intrhand *sih;
626 
627 	if (__predict_false(ipl >= IPL_NSOFT || ipl < 0))
628 		panic("softintr_establish");
629 
630 	asi = &alpha_soft_intrs[ipl];
631 
632 	sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
633 	if (__predict_true(sih != NULL)) {
634 		sih->sih_intrhead = asi;
635 		sih->sih_fn = func;
636 		sih->sih_arg = arg;
637 		sih->sih_pending = 0;
638 	}
639 	return (sih);
640 }
641 
642 /*
643  * softintr_disestablish:	[interface]
644  *
645  *	Unregister a software interrupt handler.
646  */
647 void
648 softintr_disestablish(void *arg)
649 {
650 	struct alpha_soft_intrhand *sih = arg;
651 	struct alpha_soft_intr *asi = sih->sih_intrhead;
652 	int s;
653 
654 	s = splhigh();
655 	simple_lock(&asi->softintr_slock);
656 	if (sih->sih_pending) {
657 		TAILQ_REMOVE(&asi->softintr_q, sih, sih_q);
658 		sih->sih_pending = 0;
659 	}
660 	simple_unlock(&asi->softintr_slock);
661 	splx(s);
662 
663 	free(sih, M_DEVBUF);
664 }
665 
666 /*
667  * Security sensitive rate limiting printf
668  */
669 void
670 rlprintf(struct timeval *t, const char *fmt, ...)
671 {
672 	va_list ap;
673 	static const struct timeval msgperiod[1] = {{ 5, 0 }};
674 
675 	if (ratecheck(t, msgperiod))
676 		vprintf(fmt, ap);
677 }
678