xref: /openbsd/sys/arch/landisk/landisk/intr.c (revision 3c0e3384)
1 /*	$OpenBSD: intr.c,v 1.13 2024/11/06 18:59:09 miod Exp $	*/
2 /*	$NetBSD: intr.c,v 1.1 2006/09/01 21:26:18 uwe Exp $	*/
3 
4 /*-
5  * Copyright (c) 2005 NONAKA Kimihiro
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/device.h>
35 #include <sys/evcount.h>
36 
37 #include <sh/trap.h>
38 
39 #include <machine/intr.h>
40 
41 #define	_N_EXTINTR		8
42 
43 #define	LANDISK_INTEN		0xb0000005
44 #define	INTEN_ALL_MASK		0x00
45 
46 struct intrhand {
47 	int	(*ih_fun)(void *);
48 	void	*ih_arg;
49 	struct	intrhand *ih_next;
50 	int	ih_enable;
51 	int	ih_level;
52 	int	ih_irq;
53 	struct evcount	ih_count;
54 	const char	*ih_name;
55 };
56 
57 struct extintr_handler {
58 	int		(*eih_func)(void *eih_arg);
59 	void		*eih_arg;
60 	struct intrhand	*eih_ih;
61 	int		eih_nih;
62 };
63 
64 static struct extintr_handler extintr_handler[_N_EXTINTR];
65 
66 static int fakeintr(void *arg);
67 static int extintr_intr_handler(void *arg);
68 
69 void
intc_intr(int ssr,int spc,int ssp)70 intc_intr(int ssr, int spc, int ssp)
71 {
72 	struct intc_intrhand *ih;
73 	struct clockframe cf;
74 	int evtcode;
75 
76 	curcpu()->ci_idepth++;
77 
78 	evtcode = _reg_read_4(SH4_INTEVT);
79 	ih = EVTCODE_IH(evtcode);
80 	KDASSERT(ih->ih_func);
81 
82 	switch (evtcode) {
83 #if 0
84 #define	IRL(irq)	(0x200 + ((irq) << 5))
85 	case IRL(5): case IRL(6): case IRL(7): case IRL(8):
86 	case IRL(9): case IRL(10): case IRL(11): case IRL(12):
87 	{
88 		int level;
89 		uint8_t inten, bit;
90 
91 		bit = 1 << (EVTCODE_TO_MAP_INDEX(evtcode) - 5);
92 		inten = _reg_read_1(LANDISK_INTEN);
93 		_reg_write_1(LANDISK_INTEN, inten & ~bit);
94 		level = (_IPL_NSOFT + 1) << 4;	/* disable softintr */
95 		ssr &= 0xf0;
96 		if (level < ssr)
97 			level = ssr;
98 		(void)_cpu_intr_resume(level);
99 		if ((*ih->ih_func)(ih->ih_arg) != 0)
100 			ih->ih_count.ec_count++;
101 		_reg_write_1(LANDISK_INTEN, inten);
102 		break;
103 	}
104 #endif
105 	default:
106 		(void)_cpu_intr_resume(ih->ih_level);
107 		if ((*ih->ih_func)(ih->ih_arg) != 0)
108 			ih->ih_count.ec_count++;
109 		break;
110 
111 	case SH_INTEVT_TMU0_TUNI0:
112 		(void)_cpu_intr_resume(ih->ih_level);
113 		cf.ssr = ssr;
114 		cf.spc = spc;
115 		if ((*ih->ih_func)(&cf) != 0)
116 			ih->ih_count.ec_count++;
117 		break;
118 
119 	case SH_INTEVT_NMI:
120 		printf("NMI ignored.\n");
121 		break;
122 	}
123 
124 	curcpu()->ci_idepth--;
125 }
126 
127 void
intr_init(void)128 intr_init(void)
129 {
130 	_reg_write_1(LANDISK_INTEN, INTEN_ALL_MASK);
131 }
132 
133 void *
extintr_establish(int irq,int level,int (* ih_fun)(void *),void * ih_arg,const char * ih_name)134 extintr_establish(int irq, int level, int (*ih_fun)(void *), void *ih_arg,
135     const char *ih_name)
136 {
137 	static struct intrhand fakehand = {fakeintr};
138 	struct extintr_handler *eih;
139 	struct intrhand **p, *q, *ih;
140 	int evtcode;
141 	int s;
142 
143 	KDASSERT(irq >= 5 && irq < 13);
144 
145 	ih = malloc(sizeof(*ih), M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
146 	if (ih == NULL)
147 		panic("intr_establish: can't malloc handler info");
148 
149 	s = _cpu_intr_suspend();
150 
151 	switch (level) {
152 	default:
153 #if defined(DEBUG)
154 		panic("extintr_establish: unknown level %d", level);
155 		/*NOTREACHED*/
156 #endif
157 	case IPL_BIO:
158 	case IPL_NET:
159 	case IPL_TTY:
160 		break;
161 	}
162 
163 	eih = &extintr_handler[irq - 5];
164 	if (eih->eih_func == NULL) {
165 		evtcode = 0x200 + (irq << 5);
166 		eih->eih_func = intc_intr_establish(evtcode, IST_LEVEL, level,
167 		    extintr_intr_handler, eih, NULL);
168 	}
169 
170 	/*
171 	 * Figure out where to put the handler.
172 	 * This is O(N^2), but we want to preserve the order, and N is
173 	 * generally small.
174 	 */
175 	for (p = &eih->eih_ih; (q = *p) != NULL; p = &q->ih_next)
176 		continue;
177 
178 	/*
179 	 * Actually install a fake handler momentarily, since we might be doing
180 	 * this with interrupts enabled and don't want the real routine called
181 	 * until masking is set up.
182 	 */
183 	fakehand.ih_level = level;
184 	*p = &fakehand;
185 
186 	/*
187 	 * Poke the real handler in now.
188 	 */
189 	memset(ih, 0, sizeof(*ih));
190 	ih->ih_fun = ih_fun;
191 	ih->ih_arg = ih_arg;
192 	ih->ih_next = NULL;
193 	ih->ih_enable = 1;
194 	ih->ih_level = level;
195 	ih->ih_irq = irq;
196 	ih->ih_name = ih_name;
197 
198 	if (ih_name != NULL)
199 		evcount_attach(&ih->ih_count, ih_name, &ih->ih_irq);
200 	*p = ih;
201 
202 	if (++eih->eih_nih == 1) {
203 		/* Unmask interrupt */
204 		_reg_bset_1(LANDISK_INTEN, (1 << (irq - 5)));
205 	}
206 
207 	_cpu_intr_resume(s);
208 
209 	return (ih);
210 }
211 
212 void
extintr_disestablish(void * aux)213 extintr_disestablish(void *aux)
214 {
215 	struct intrhand *ih = aux;
216 	struct intrhand **p, *q;
217 	struct extintr_handler *eih;
218 	int irq;
219 	int s;
220 
221 	KDASSERT(ih != NULL);
222 
223 	s = _cpu_intr_suspend();
224 
225 	irq = ih->ih_irq - 5;
226 	eih = &extintr_handler[irq];
227 	/*
228 	 * Remove the handler from the chain.
229 	 * This is O(n^2), too.
230 	 */
231 	for (p = &eih->eih_ih; (q = *p) != NULL && q != ih; p = &q->ih_next)
232 		continue;
233 	if (q == NULL)
234 		panic("extintr_disestablish: handler not registered");
235 
236 	*p = q->ih_next;
237 
238 #if 0
239 	if (ih->ih_name != NULL)
240 		evcount_detach(&ih->ih_count);
241 #endif
242 
243 	free(ih, M_DEVBUF, sizeof *ih);
244 
245 	if (--eih->eih_nih == 0) {
246 		intc_intr_disestablish(eih->eih_func);
247 
248 		/* Mask interrupt */
249 		_reg_bclr_1(LANDISK_INTEN, (1 << irq));
250 	}
251 
252 	_cpu_intr_resume(s);
253 }
254 
255 void
extintr_enable(void * aux)256 extintr_enable(void *aux)
257 {
258 	struct intrhand *ih = aux;
259 	struct intrhand *p, *q;
260 	struct extintr_handler *eih;
261 	int irq;
262 	int cnt;
263 	int s;
264 
265 	KDASSERT(ih != NULL);
266 
267 	s = _cpu_intr_suspend();
268 
269 	irq = ih->ih_irq - 5;
270 	KDASSERT(irq >= 0 && irq < 8);
271 	eih = &extintr_handler[irq];
272 	for (cnt = 0, p = eih->eih_ih, q = NULL; p != NULL; p = p->ih_next) {
273 		if (p->ih_enable) {
274 			cnt++;
275 		}
276 		if (p == ih) {
277 			q = p;
278 			p->ih_enable = 1;
279 		}
280 	}
281 	KDASSERT(q != NULL);
282 
283 	if (cnt == 0) {
284 		/* Unmask interrupt */
285 		_reg_bset_1(LANDISK_INTEN, (1 << irq));
286 	}
287 
288 	_cpu_intr_resume(s);
289 }
290 
291 void
extintr_disable(void * aux)292 extintr_disable(void *aux)
293 {
294 	struct intrhand *ih = aux;
295 	struct intrhand *p, *q;
296 	struct extintr_handler *eih;
297 	int irq;
298 	int cnt;
299 	int s;
300 
301 	KDASSERT(ih != NULL);
302 
303 	s = _cpu_intr_suspend();
304 
305 	irq = ih->ih_irq - 5;
306 	KDASSERT(irq >= 0 && irq < 8);
307 	eih = &extintr_handler[irq];
308 	for (cnt = 0, p = eih->eih_ih, q = NULL; p != NULL; p = p->ih_next) {
309 		if (p == ih) {
310 			q = p;
311 			p->ih_enable = 0;
312 		}
313 		if (!ih->ih_enable) {
314 			cnt++;
315 		}
316 	}
317 	KDASSERT(q != NULL);
318 
319 	if (cnt == 0) {
320 		/* Mask interrupt */
321 		_reg_bclr_1(LANDISK_INTEN, (1 << irq));
322 	}
323 
324 	_cpu_intr_resume(s);
325 }
326 
327 void
extintr_disable_by_num(int irq)328 extintr_disable_by_num(int irq)
329 {
330 	struct extintr_handler *eih;
331 	struct intrhand *ih;
332 	int s;
333 
334 	irq -= 5;
335 	KDASSERT(irq >= 0 && irq < 8);
336 
337 	s = _cpu_intr_suspend();
338 	eih = &extintr_handler[irq];
339 	for (ih = eih->eih_ih; ih != NULL; ih = ih->ih_next) {
340 		ih->ih_enable = 0;
341 	}
342 	/* Mask interrupt */
343 	_reg_bclr_1(LANDISK_INTEN, (1 << irq));
344 	_cpu_intr_resume(s);
345 }
346 
347 static int
fakeintr(void * arg)348 fakeintr(void *arg)
349 {
350 	return 0;
351 }
352 
353 static int
extintr_intr_handler(void * arg)354 extintr_intr_handler(void *arg)
355 {
356 	struct extintr_handler *eih = arg;
357 	struct intrhand *ih;
358 	int r;
359 
360 	if (__predict_true(eih != NULL)) {
361 		for (ih = eih->eih_ih; ih != NULL; ih = ih->ih_next) {
362 			if (__predict_true(ih->ih_enable)) {
363 				r = (*ih->ih_fun)(ih->ih_arg);
364 				if (__predict_true(r != 0)) {
365 					ih->ih_count.ec_count++;
366 					if (r == 1)
367 						break;
368 				}
369 			}
370 		}
371 		return 1;
372 	}
373 	return 0;
374 }
375 
376 #ifdef DIAGNOSTIC
377 void
splassert_check(int wantipl,const char * func)378 splassert_check(int wantipl, const char *func)
379 {
380 	register_t sr;
381         int oldipl;
382 
383 	__asm__ volatile ("stc sr,%0" : "=r" (sr));
384 
385 	oldipl = (sr & 0xf0) >> 4;
386         if (oldipl < wantipl) {
387                 splassert_fail(wantipl, oldipl, func);
388                 /*
389                  * If the splassert_ctl is set to not panic, raise the ipl
390                  * in a feeble attempt to reduce damage.
391                  */
392 		_cpu_intr_raise(wantipl << 4);
393         }
394 }
395 #endif
396