xref: /netbsd/sys/arch/evbmips/evbmips/interrupt.c (revision bf9ec67e)
1 /*	$NetBSD: interrupt.c,v 1.2 2002/04/08 14:08:26 simonb Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/malloc.h>
41 #include <sys/device.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/intr.h>
46 #include <machine/locore.h>
47 
48 #include <evbmips/evbmips/clockvar.h>
49 
50 struct evbmips_soft_intrhand *softnet_intrhand;
51 
52 /*
53  * This is a mask of bits to clear in the SR when we go to a
54  * given software interrupt priority level.
55  * Hardware ipls are port/board specific.
56  */
57 
58 const u_int32_t ipl_si_to_sr[_IPL_NSOFT] = {
59 	MIPS_SOFT_INT_MASK_0,			/* IPL_SOFT */
60 	MIPS_SOFT_INT_MASK_0,			/* IPL_SOFTCLOCK */
61 	MIPS_SOFT_INT_MASK_1,			/* IPL_SOFTNET */
62 	MIPS_SOFT_INT_MASK_1,			/* IPL_SOFTSERIAL */
63 };
64 
65 struct evbmips_soft_intr evbmips_soft_intrs[_IPL_NSOFT];
66 
67 struct evcnt mips_int5_evcnt =
68     EVCNT_INITIALIZER(EVCNT_TYPE_INTR, NULL, "mips", "int 5 (clock)");
69 
70 uint32_t last_cp0_count;	/* used by microtime() */
71 uint32_t next_cp0_clk_intr;	/* used to schedule hard clock interrupts */
72 
73 void
74 intr_init(void)
75 {
76 
77 	evbmips_intr_init();	/* board specific stuff */
78 
79 	softintr_init();
80 }
81 
82 void
83 cpu_intr(u_int32_t status, u_int32_t cause, u_int32_t pc, u_int32_t ipending)
84 {
85 	struct clockframe cf;
86 	struct evbmips_soft_intr *si;
87 	struct evbmips_soft_intrhand *sih;
88 	uint32_t new_cnt;
89 	int i, s;
90 
91 	uvmexp.intrs++;
92 
93 	if (ipending & MIPS_INT_MASK_5) {
94 		last_cp0_count = next_cp0_clk_intr;
95 		next_cp0_clk_intr += curcpu()->ci_cycles_per_hz;
96 		mips3_cp0_compare_write(next_cp0_clk_intr);
97 
98 		/* Check for lost clock interrupts */
99 		new_cnt = mips3_cp0_count_read();
100 
101 		/*
102 		 * Missed one or more clock interrupts, so let's start
103 		 * counting again from the current value.
104 		 */
105 		if ((next_cp0_clk_intr - new_cnt) & 0x80000000) {
106 #if 0
107 			missed_clk_intrs++;
108 #endif
109 
110 			next_cp0_clk_intr = new_cnt +
111 			    curcpu()->ci_cycles_per_hz;
112 			mips3_cp0_compare_write(next_cp0_clk_intr);
113 		}
114 
115 		cf.pc = pc;
116 		cf.sr = status;
117 		hardclock(&cf);
118 
119 		mips_int5_evcnt.ev_count++;
120 
121 		/* Re-enable clock interrupts. */
122 		cause &= ~MIPS_INT_MASK_5;
123 		_splset(MIPS_SR_INT_IE |
124 		    ((status & ~cause) & MIPS_HARD_INT_MASK));
125 	}
126 
127 	if (ipending & (MIPS_INT_MASK_0|MIPS_INT_MASK_1|MIPS_INT_MASK_2|
128 			MIPS_INT_MASK_3|MIPS_INT_MASK_4)) {
129 		/* Process I/O and error interrupts. */
130 		evbmips_iointr(status, cause, pc, ipending);
131 	}
132 
133 	ipending &= (MIPS_SOFT_INT_MASK_1|MIPS_SOFT_INT_MASK_0);
134 	if (ipending == 0)
135 		return;
136 
137 	_clrsoftintr(ipending);
138 
139 	for (i = _IPL_NSOFT - 1; i >= 0; i--) {
140 		if ((ipending & ipl_si_to_sr[i]) == 0)
141 			continue;
142 
143 		si = &evbmips_soft_intrs[i];
144 
145 		if (TAILQ_FIRST(&si->softintr_q) != NULL)
146 			si->softintr_evcnt.ev_count++;
147 
148 		for (;;) {
149 			s = splhigh();
150 
151 			sih = TAILQ_FIRST(&si->softintr_q);
152 			if (sih != NULL) {
153 				TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
154 				sih->sih_pending = 0;
155 			}
156 
157 			splx(s);
158 
159 			if (sih == NULL)
160 				break;
161 
162 			uvmexp.softs++;
163 			(*sih->sih_fn)(sih->sih_arg);
164 		}
165 	}
166 }
167 
168 /*
169  * softintr_init:
170  *
171  *	Initialize the software interrupt system.
172  */
173 void
174 softintr_init(void)
175 {
176 	static const char *softintr_names[] = IPL_SOFTNAMES;
177 	struct evbmips_soft_intr *si;
178 	int i;
179 
180 	for (i = 0; i < _IPL_NSOFT; i++) {
181 		si = &evbmips_soft_intrs[i];
182 		TAILQ_INIT(&si->softintr_q);
183 		si->softintr_ipl = IPL_SOFT + i;
184 		evcnt_attach_dynamic(&si->softintr_evcnt, EVCNT_TYPE_INTR,
185 		    NULL, "soft", softintr_names[i]);
186 	}
187 
188 	/* XXX Establish legacy soft interrupt handlers. */
189 	softnet_intrhand = softintr_establish(IPL_SOFTNET,
190 	    (void (*)(void *))netintr, NULL);
191 
192 	assert(softnet_intrhand != NULL);
193 }
194 
195 /*
196  * softintr_establish:		[interface]
197  *
198  *	Register a software interrupt handler.
199  */
200 void *
201 softintr_establish(int ipl, void (*func)(void *), void *arg)
202 {
203 	struct evbmips_soft_intr *si;
204 	struct evbmips_soft_intrhand *sih;
205 
206 	if (__predict_false(ipl >= (IPL_SOFT + _IPL_NSOFT) ||
207 			    ipl < IPL_SOFT))
208 		panic("softintr_establish");
209 
210 	si = &evbmips_soft_intrs[ipl - IPL_SOFT];
211 
212 	sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
213 	if (__predict_true(sih != NULL)) {
214 		sih->sih_intrhead = si;
215 		sih->sih_fn = func;
216 		sih->sih_arg = arg;
217 		sih->sih_pending = 0;
218 	}
219 	return (sih);
220 }
221 
222 /*
223  * softintr_disestablish:	[interface]
224  *
225  *	Unregister a software interrupt handler.
226  */
227 void
228 softintr_disestablish(void *arg)
229 {
230 	struct evbmips_soft_intrhand *sih = arg;
231 	struct evbmips_soft_intr *si = sih->sih_intrhead;
232 	int s;
233 
234 	s = splhigh();
235 	if (sih->sih_pending) {
236 		TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
237 		sih->sih_pending = 0;
238 	}
239 	splx(s);
240 
241 	free(sih, M_DEVBUF);
242 }
243