xref: /netbsd/sys/arch/evbmips/evbmips/interrupt.c (revision c4a72b64)
1 /*	$NetBSD: interrupt.c,v 1.4 2002/11/10 15:21:51 simonb Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/malloc.h>
41 #include <sys/device.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/intr.h>
46 #include <machine/locore.h>
47 
48 #include <evbmips/evbmips/clockvar.h>
49 
50 struct evbmips_soft_intrhand *softnet_intrhand;
51 struct evbmips_soft_intr evbmips_soft_intrs[_IPL_NSOFT];
52 
53 struct evcnt mips_int5_evcnt =
54     EVCNT_INITIALIZER(EVCNT_TYPE_INTR, NULL, "mips", "int 5 (clock)");
55 
56 uint32_t last_cp0_count;	/* used by microtime() */
57 uint32_t next_cp0_clk_intr;	/* used to schedule hard clock interrupts */
58 
59 void
60 intr_init(void)
61 {
62 
63 	evcnt_attach_static(&mips_int5_evcnt);
64 	evbmips_intr_init();	/* board specific stuff */
65 
66 	softintr_init();
67 }
68 
69 void
70 cpu_intr(u_int32_t status, u_int32_t cause, u_int32_t pc, u_int32_t ipending)
71 {
72 	struct clockframe cf;
73 	struct evbmips_soft_intr *si;
74 	struct evbmips_soft_intrhand *sih;
75 	uint32_t new_cnt;
76 	int i, s;
77 
78 	uvmexp.intrs++;
79 
80 	if (ipending & MIPS_INT_MASK_5) {
81 		last_cp0_count = next_cp0_clk_intr;
82 		next_cp0_clk_intr += curcpu()->ci_cycles_per_hz;
83 		mips3_cp0_compare_write(next_cp0_clk_intr);
84 
85 		/* Check for lost clock interrupts */
86 		new_cnt = mips3_cp0_count_read();
87 
88 		/*
89 		 * Missed one or more clock interrupts, so let's start
90 		 * counting again from the current value.
91 		 */
92 		if ((next_cp0_clk_intr - new_cnt) & 0x80000000) {
93 #if 0	/* XXX - should add an event counter for this */
94 			missed_clk_intrs++;
95 #endif
96 
97 			next_cp0_clk_intr = new_cnt +
98 			    curcpu()->ci_cycles_per_hz;
99 			mips3_cp0_compare_write(next_cp0_clk_intr);
100 		}
101 
102 		cf.pc = pc;
103 		cf.sr = status;
104 		hardclock(&cf);
105 
106 		mips_int5_evcnt.ev_count++;
107 
108 		/* Re-enable clock interrupts. */
109 		cause &= ~MIPS_INT_MASK_5;
110 		_splset(MIPS_SR_INT_IE |
111 		    ((status & ~cause) & MIPS_HARD_INT_MASK));
112 	}
113 
114 	if (ipending & (MIPS_INT_MASK_0|MIPS_INT_MASK_1|MIPS_INT_MASK_2|
115 			MIPS_INT_MASK_3|MIPS_INT_MASK_4)) {
116 		/* Process I/O and error interrupts. */
117 		evbmips_iointr(status, cause, pc, ipending);
118 	}
119 
120 	ipending &= (MIPS_SOFT_INT_MASK_1|MIPS_SOFT_INT_MASK_0);
121 	if (ipending == 0)
122 		return;
123 
124 	_clrsoftintr(ipending);
125 
126 	for (i = _IPL_NSOFT - 1; i >= 0; i--) {
127 		if ((ipending & ipl_si_to_sr[i]) == 0)
128 			continue;
129 
130 		si = &evbmips_soft_intrs[i];
131 
132 		if (TAILQ_FIRST(&si->softintr_q) != NULL)
133 			si->softintr_evcnt.ev_count++;
134 
135 		for (;;) {
136 			s = splhigh();
137 
138 			sih = TAILQ_FIRST(&si->softintr_q);
139 			if (sih != NULL) {
140 				TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
141 				sih->sih_pending = 0;
142 			}
143 
144 			splx(s);
145 
146 			if (sih == NULL)
147 				break;
148 
149 			uvmexp.softs++;
150 			(*sih->sih_fn)(sih->sih_arg);
151 		}
152 	}
153 }
154 
155 /*
156  * softintr_init:
157  *
158  *	Initialize the software interrupt system.
159  */
160 void
161 softintr_init(void)
162 {
163 	static const char *softintr_names[] = IPL_SOFTNAMES;
164 	struct evbmips_soft_intr *si;
165 	int i;
166 
167 	for (i = 0; i < _IPL_NSOFT; i++) {
168 		si = &evbmips_soft_intrs[i];
169 		TAILQ_INIT(&si->softintr_q);
170 		simple_lock_init(&si->softintr_slock);
171 		si->softintr_ipl = IPL_SOFT + i;
172 		evcnt_attach_dynamic(&si->softintr_evcnt, EVCNT_TYPE_INTR,
173 		    NULL, "soft", softintr_names[i]);
174 	}
175 
176 	/* XXX Establish legacy software interrupt handlers. */
177 	softnet_intrhand = softintr_establish(IPL_SOFTNET,
178 	    (void (*)(void *))netintr, NULL);
179 
180 	assert(softnet_intrhand != NULL);
181 }
182 
183 /*
184  * softintr_establish:		[interface]
185  *
186  *	Register a software interrupt handler.
187  */
188 void *
189 softintr_establish(int ipl, void (*func)(void *), void *arg)
190 {
191 	struct evbmips_soft_intr *si;
192 	struct evbmips_soft_intrhand *sih;
193 
194 	if (__predict_false(ipl >= (IPL_SOFT + _IPL_NSOFT) ||
195 			    ipl < IPL_SOFT))
196 		panic("softintr_establish");
197 
198 	si = &evbmips_soft_intrs[ipl - IPL_SOFT];
199 
200 	sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
201 	if (__predict_true(sih != NULL)) {
202 		sih->sih_intrhead = si;
203 		sih->sih_fn = func;
204 		sih->sih_arg = arg;
205 		sih->sih_pending = 0;
206 	}
207 	return (sih);
208 }
209 
210 /*
211  * softintr_disestablish:	[interface]
212  *
213  *	Unregister a software interrupt handler.
214  */
215 void
216 softintr_disestablish(void *arg)
217 {
218 	struct evbmips_soft_intrhand *sih = arg;
219 	struct evbmips_soft_intr *si = sih->sih_intrhead;
220 	int s;
221 
222 	s = splhigh();
223 	simple_lock(&si->softintr_slock);
224 	if (sih->sih_pending) {
225 		TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
226 		sih->sih_pending = 0;
227 	}
228 	simple_unlock(&si->softintr_slock);
229 	splx(s);
230 
231 	free(sih, M_DEVBUF);
232 }
233