xref: /netbsd/sys/arch/evbarm/iq80310/iq80310_intr.c (revision 8a2c4560)
1 /*	$NetBSD: iq80310_intr.c,v 1.36 2020/11/21 15:30:07 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: iq80310_intr.c,v 1.36 2020/11/21 15:30:07 thorpej Exp $");
40 
41 #ifndef EVBARM_SPL_NOINLINE
42 #define	EVBARM_SPL_NOINLINE
43 #endif
44 
45 /*
46  * Interrupt support for the Intel IQ80310.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kmem.h>
52 
53 #include <sys/bus.h>
54 #include <machine/intr.h>
55 
56 #include <arm/cpufunc.h>
57 
58 #include <arm/xscale/i80200reg.h>
59 #include <arm/xscale/i80200var.h>
60 
61 #include <evbarm/iq80310/iq80310reg.h>
62 #include <evbarm/iq80310/iq80310var.h>
63 #include <evbarm/iq80310/obiovar.h>
64 
65 /* Interrupt handler queues. */
66 struct intrq intrq[NIRQ];
67 
68 /* Interrupts to mask at each level. */
69 int iq80310_imask[NIPL];
70 
71 /* Interrupts pending. */
72 volatile int iq80310_ipending;
73 
74 /* Software copy of the IRQs we have enabled. */
75 uint32_t intr_enabled;
76 
77 #ifdef __HAVE_FAST_SOFTINTS
78 /*
79  * Map a software interrupt queue index (at the top of the word, and
80  * highest priority softintr is encountered first in an ffs()).
81  */
82 #define	SI_TO_IRQBIT(si)	(1U << (31 - (si)))
83 
84 /*
85  * Map a software interrupt queue to an interrupt priority level.
86  */
87 static const int si_to_ipl[SI_NQUEUES] = {
88 	IPL_SOFT,		/* SI_SOFT */
89 	IPL_SOFTCLOCK,		/* SI_SOFTCLOCK */
90 	IPL_SOFTNET,		/* SI_SOFTNET */
91 	IPL_SOFTSERIAL,		/* SI_SOFTSERIAL */
92 };
93 #endif
94 
95 void	iq80310_intr_dispatch(struct trapframe *frame);
96 
97 static inline uint32_t
iq80310_intstat_read(void)98 iq80310_intstat_read(void)
99 {
100 	uint32_t intstat;
101 
102 	intstat = CPLD_READ(IQ80310_XINT3_STATUS) & 0x1f;
103 #if defined(IRQ_READ_XINT0)
104 	if (IRQ_READ_XINT0)
105 		intstat |= (CPLD_READ(IQ80310_XINT0_STATUS) & 0x7) << 5;
106 #endif
107 
108 	/* XXX Why do we have to mask off? */
109 	return (intstat & intr_enabled);
110 }
111 
112 static inline void
iq80310_set_intrmask(void)113 iq80310_set_intrmask(void)
114 {
115 	uint32_t disabled;
116 
117 	intr_enabled |= IRQ_BITS_ALWAYS_ON;
118 
119 	/* The XINT_MASK register sets a bit to *disable*. */
120 	disabled = (~intr_enabled) & IRQ_BITS;
121 
122 	CPLD_WRITE(IQ80310_XINT_MASK, disabled & 0x1f);
123 }
124 
125 static inline void
iq80310_enable_irq(int irq)126 iq80310_enable_irq(int irq)
127 {
128 
129 	intr_enabled |= (1U << irq);
130 	iq80310_set_intrmask();
131 }
132 
133 static inline void
iq80310_disable_irq(int irq)134 iq80310_disable_irq(int irq)
135 {
136 
137 	intr_enabled &= ~(1U << irq);
138 	iq80310_set_intrmask();
139 }
140 
141 /*
142  * NOTE: This routine must be called with interrupts disabled in the CPSR.
143  */
144 static void
iq80310_intr_calculate_masks(void)145 iq80310_intr_calculate_masks(void)
146 {
147 	struct intrq *iq;
148 	struct intrhand *ih;
149 	int irq, ipl;
150 
151 	/* First, figure out which IPLs each IRQ has. */
152 	for (irq = 0; irq < NIRQ; irq++) {
153 		int levels = 0;
154 		iq = &intrq[irq];
155 		iq80310_disable_irq(irq);
156 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
157 		     ih = TAILQ_NEXT(ih, ih_list))
158 			levels |= (1U << ih->ih_ipl);
159 		iq->iq_levels = levels;
160 	}
161 
162 	/* Next, figure out which IRQs are used by each IPL. */
163 	for (ipl = 0; ipl < NIPL; ipl++) {
164 		int irqs = 0;
165 		for (irq = 0; irq < NIRQ; irq++) {
166 			if (intrq[irq].iq_levels & (1U << ipl))
167 				irqs |= (1U << irq);
168 		}
169 		iq80310_imask[ipl] = irqs;
170 	}
171 
172 	iq80310_imask[IPL_NONE] = 0;
173 	iq80310_imask[IPL_SOFTCLOCK] = 0;
174 	iq80310_imask[IPL_SOFTNET] = 0;
175 	iq80310_imask[IPL_SOFTSERIAL] = 0;
176 
177 	/*
178 	 * splsoftnet() must also block splsoftclock(), since we don't
179 	 * want timer-driven network events to occur while we're
180 	 * processing incoming packets.
181 	 */
182 	iq80310_imask[IPL_SOFTNET] |= iq80310_imask[IPL_SOFTCLOCK];
183 
184 	/*
185 	 * Enforce a hierarchy that gives "slow" device (or devices with
186 	 * limited input buffer space/"real-time" requirements) a better
187 	 * chance at not dropping data.
188 	 */
189 	iq80310_imask[IPL_BIO] |= iq80310_imask[IPL_SOFTNET];
190 	iq80310_imask[IPL_NET] |= iq80310_imask[IPL_BIO];
191 	iq80310_imask[IPL_SOFTSERIAL] |= iq80310_imask[IPL_NET];
192 	iq80310_imask[IPL_TTY] |= iq80310_imask[IPL_SOFTSERIAL];
193 
194 	/*
195 	 * splvm() blocks all interrupts that use the kernel memory
196 	 * allocation facilities.
197 	 */
198 	iq80310_imask[IPL_VM] |= iq80310_imask[IPL_TTY];
199 
200 	/*
201 	 * Audio devices are not allowed to perform memory allocation
202 	 * in their interrupt routines, and they have fairly "real-time"
203 	 * requirements, so give them a high interrupt priority.
204 	 */
205 	iq80310_imask[IPL_AUDIO] |= iq80310_imask[IPL_VM];
206 
207 	/*
208 	 * splclock() must block anything that uses the scheduler.
209 	 */
210 	iq80310_imask[IPL_CLOCK] |= iq80310_imask[IPL_AUDIO];
211 
212 	/*
213 	 * No separate statclock on the IQ80310.
214 	 */
215 #ifdef IPL_STATCLOCK
216 	iq80310_imask[IPL_STATCLOCK] |= iq80310_imask[IPL_CLOCK];
217 #endif
218 
219 	/*
220 	 * splhigh() must block "everything".
221 	 */
222 #ifdef IPL_STATCLOCK
223 	iq80310_imask[IPL_HIGH] |= iq80310_imask[IPL_STATCLOCK];
224 #else
225 	iq80310_imask[IPL_HIGH] |= iq80310_imask[IPL_CLOCK];
226 #endif
227 
228 	/*
229 	 * XXX We need serial drivers to run at the absolute highest priority
230 	 * in order to avoid overruns, so serial > high.
231 	 */
232 	iq80310_imask[IPL_SERIAL] |= iq80310_imask[IPL_HIGH];
233 
234 	/*
235 	 * Now compute which IRQs must be blocked when servicing any
236 	 * given IRQ.
237 	 */
238 	for (irq = 0; irq < NIRQ; irq++) {
239 		int irqs = (1U << irq);
240 		iq = &intrq[irq];
241 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
242 			iq80310_enable_irq(irq);
243 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
244 		     ih = TAILQ_NEXT(ih, ih_list))
245 			irqs |= iq80310_imask[ih->ih_ipl];
246 		iq->iq_mask = irqs;
247 	}
248 }
249 
250 #ifdef __HAVE_FAST_SOFTINTS
251 void
iq80310_do_soft(void)252 iq80310_do_soft(void)
253 {
254 	static __cpu_simple_lock_t processing = __SIMPLELOCK_UNLOCKED;
255 	struct cpu_info * const ci = curcpu();
256 	int new, oldirqstate;
257 
258 	if (__cpu_simple_lock_try(&processing) == 0)
259 		return;
260 
261 	new = ci->ci_cpl;
262 
263 	oldirqstate = disable_interrupts(I32_bit);
264 
265 #define	DO_SOFTINT(si)							\
266 	if ((iq80310_ipending & ~new) & SI_TO_IRQBIT(si)) {		\
267 		iq80310_ipending &= ~SI_TO_IRQBIT(si);			\
268 		ci->ci_cpl |= iq80310_imask[si_to_ipl[(si)]];	\
269 		restore_interrupts(oldirqstate);			\
270 		softintr_dispatch(si);					\
271 		oldirqstate = disable_interrupts(I32_bit);		\
272 		ci->ci_cpl = new;				\
273 	}
274 
275 	DO_SOFTINT(SI_SOFTSERIAL);
276 	DO_SOFTINT(SI_SOFTNET);
277 	DO_SOFTINT(SI_SOFTCLOCK);
278 	DO_SOFTINT(SI_SOFT);
279 
280 	__cpu_simple_unlock(&processing);
281 
282 	restore_interrupts(oldirqstate);
283 }
284 #endif	/* __HAVE_SOFT_FASTINTS */
285 
286 int
_splraise(int ipl)287 _splraise(int ipl)
288 {
289 
290 	return (iq80310_splraise(ipl));
291 }
292 
293 inline void
splx(int new)294 splx(int new)
295 {
296 
297 	return (iq80310_splx(new));
298 }
299 
300 int
_spllower(int ipl)301 _spllower(int ipl)
302 {
303 
304 	return (iq80310_spllower(ipl));
305 }
306 
307 #ifdef __HAVE_FAST_SOFTINTS
308 void
_setsoftintr(int si)309 _setsoftintr(int si)
310 {
311 	int oldirqstate;
312 
313 	oldirqstate = disable_interrupts(I32_bit);
314 	iq80310_ipending |= SI_TO_IRQBIT(si);
315 	restore_interrupts(oldirqstate);
316 
317 	/* Process unmasked pending soft interrupts. */
318 	if ((iq80310_ipending & ~IRQ_BITS) & ~curcpl())
319 		iq80310_do_soft();
320 }
321 #endif
322 
323 void
iq80310_intr_init(void)324 iq80310_intr_init(void)
325 {
326 	struct intrq *iq;
327 	int i;
328 
329 	/*
330 	 * The Secondary PCI interrupts INTA, INTB, and INTC
331 	 * area always enabled, since they cannot be masked
332 	 * in the CPLD.
333 	 */
334 	intr_enabled |= IRQ_BITS_ALWAYS_ON;
335 
336 	for (i = 0; i < NIRQ; i++) {
337 		iq = &intrq[i];
338 		TAILQ_INIT(&iq->iq_list);
339 
340 		snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i);
341 	}
342 
343 	iq80310_intr_calculate_masks();
344 
345 	/* Enable external interrupts on the i80200. */
346 	i80200_extirq_dispatch = iq80310_intr_dispatch;
347 	i80200_intr_enable(INTCTL_IM | INTCTL_PM);
348 
349 	/* Enable IRQs (don't yet use FIQs). */
350 	enable_interrupts(I32_bit);
351 }
352 
353 void
iq80310_intr_evcnt_attach(void)354 iq80310_intr_evcnt_attach(void)
355 {
356 	struct intrq *iq;
357 	int i;
358 
359 	for (i = 0; i < NIRQ; i++) {
360 		iq = &intrq[i];
361 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
362 		    NULL, "iq80310", iq->iq_name);
363 	}
364 }
365 
366 void *
iq80310_intr_establish(int irq,int ipl,int (* func)(void *),void * arg)367 iq80310_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
368 {
369 	struct intrq *iq;
370 	struct intrhand *ih;
371 	u_int oldirqstate;
372 
373 	if (irq < 0 || irq > NIRQ)
374 		panic("iq80310_intr_establish: IRQ %d out of range", irq);
375 
376 	ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
377 	ih->ih_func = func;
378 	ih->ih_arg = arg;
379 	ih->ih_ipl = ipl;
380 	ih->ih_irq = irq;
381 
382 	iq = &intrq[irq];
383 
384 	/* All IQ80310 interrupts are level-triggered. */
385 	iq->iq_ist = IST_LEVEL;
386 
387 	oldirqstate = disable_interrupts(I32_bit);
388 
389 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
390 
391 	iq80310_intr_calculate_masks();
392 
393 	restore_interrupts(oldirqstate);
394 
395 	return (ih);
396 }
397 
398 void
iq80310_intr_disestablish(void * cookie)399 iq80310_intr_disestablish(void *cookie)
400 {
401 	struct intrhand *ih = cookie;
402 	struct intrq *iq = &intrq[ih->ih_irq];
403 	int oldirqstate;
404 
405 	oldirqstate = disable_interrupts(I32_bit);
406 
407 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
408 
409 	iq80310_intr_calculate_masks();
410 
411 	restore_interrupts(oldirqstate);
412 }
413 
414 void
iq80310_intr_dispatch(struct trapframe * frame)415 iq80310_intr_dispatch(struct trapframe *frame)
416 {
417 	struct intrq *iq;
418 	struct intrhand *ih;
419 	int oldirqstate, pcpl, irq, ibit, hwpend, rv;
420 	struct cpu_info * const ci = curcpu();
421 #if 0
422 	int stray;
423 
424 	stray = 1;
425 #endif
426 
427 	/* First, disable external IRQs. */
428 	i80200_intr_disable(INTCTL_IM | INTCTL_PM);
429 
430 	pcpl = ci->ci_cpl;
431 
432 	for (hwpend = iq80310_intstat_read(); hwpend != 0;) {
433 		irq = ffs(hwpend) - 1;
434 		ibit = (1U << irq);
435 
436 #if 0
437 		stray = 0;
438 #endif
439 
440 		hwpend &= ~ibit;
441 
442 		if (pcpl & ibit) {
443 			/*
444 			 * IRQ is masked; mark it as pending and check
445 			 * the next one.  Note: external IRQs are already
446 			 * disabled.
447 			 */
448 			iq80310_ipending |= ibit;
449 			continue;
450 		}
451 
452 		iq80310_ipending &= ~ibit;
453 		rv = 0;
454 
455 		iq = &intrq[irq];
456 		iq->iq_ev.ev_count++;
457 		ci->ci_data.cpu_nintr++;
458 		ci->ci_cpl |= iq->iq_mask;
459 		oldirqstate = enable_interrupts(I32_bit);
460 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
461 		     ih = TAILQ_NEXT(ih, ih_list)) {
462 			rv |= (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
463 		}
464 		restore_interrupts(oldirqstate);
465 
466 		ci->ci_cpl = pcpl;
467 
468 #if 0 /* XXX */
469 		if (rv == 0)
470 			printf("Stray interrupt: IRQ %d\n", irq);
471 #endif
472 	}
473 
474 #if 0 /* XXX */
475 	if (stray)
476 		printf("Stray external interrupt\n");
477 #endif
478 
479 #ifdef __HAVE_FAST_SOFTINTS
480 	/* Check for pendings soft intrs. */
481 	if ((iq80310_ipending & ~IRQ_BITS) & ~ci->ci_cpl) {
482 		oldirqstate = enable_interrupts(I32_bit);
483 		iq80310_do_soft();
484 		restore_interrupts(oldirqstate);
485 	}
486 #endif
487 
488 	/*
489 	 * If no hardware interrupts are masked, re-enable external
490 	 * interrupts.
491 	 */
492 	if ((iq80310_ipending & IRQ_BITS) == 0)
493 		i80200_intr_enable(INTCTL_IM | INTCTL_PM);
494 }
495