xref: /netbsd/sys/arch/arm/ep93xx/ep93xx_intr.c (revision 6550d01e)
1 /* $NetBSD: ep93xx_intr.c,v 1.15 2010/12/20 00:25:27 matt Exp $ */
2 
3 /*
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jesse Off
9  *
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Ichiro FUKUHARA and Naoto Shimazaki.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: ep93xx_intr.c,v 1.15 2010/12/20 00:25:27 matt Exp $");
37 
38 /*
39  * Interrupt support for the Cirrus Logic EP93XX
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/malloc.h>
45 #include <sys/termios.h>
46 
47 #include <machine/bus.h>
48 #include <machine/intr.h>
49 
50 #include <arm/cpufunc.h>
51 
52 #include <arm/ep93xx/ep93xxreg.h>
53 #include <arm/ep93xx/ep93xxvar.h>
54 
55 /* Interrupt handler queues. */
56 struct intrq intrq[NIRQ];
57 
58 /* Interrupts to mask at each level. */
59 static u_int32_t vic1_imask[NIPL];
60 static u_int32_t vic2_imask[NIPL];
61 
62 /* Current interrupt priority level. */
63 volatile int hardware_spl_level;
64 
65 /* Software copy of the IRQs we have enabled. */
66 volatile u_int32_t vic1_intr_enabled;
67 volatile u_int32_t vic2_intr_enabled;
68 
69 /* Interrupts pending. */
70 static volatile int ipending;
71 
72 void	ep93xx_intr_dispatch(struct irqframe *frame);
73 
74 #define VIC1REG(reg)	*((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
75 	EP93XX_AHB_VIC1 + (reg)))
76 #define VIC2REG(reg)	*((volatile u_int32_t*) (EP93XX_AHB_VBASE + \
77 	EP93XX_AHB_VIC2 + (reg)))
78 
79 static void
80 ep93xx_set_intrmask(u_int32_t vic1_irqs, u_int32_t vic2_irqs)
81 {
82 	VIC1REG(EP93XX_VIC_IntEnClear) = vic1_irqs;
83 	VIC1REG(EP93XX_VIC_IntEnable) = vic1_intr_enabled & ~vic1_irqs;
84 	VIC2REG(EP93XX_VIC_IntEnClear) = vic2_irqs;
85 	VIC2REG(EP93XX_VIC_IntEnable) = vic2_intr_enabled & ~vic2_irqs;
86 }
87 
88 static void
89 ep93xx_enable_irq(int irq)
90 {
91 	if (irq < VIC_NIRQ) {
92 		vic1_intr_enabled |= (1U << irq);
93 		VIC1REG(EP93XX_VIC_IntEnable) = (1U << irq);
94 	} else {
95 		vic2_intr_enabled |= (1U << (irq - VIC_NIRQ));
96 		VIC2REG(EP93XX_VIC_IntEnable) = (1U << (irq - VIC_NIRQ));
97 	}
98 }
99 
100 static inline void
101 ep93xx_disable_irq(int irq)
102 {
103 	if (irq < VIC_NIRQ) {
104 		vic1_intr_enabled &= ~(1U << irq);
105 		VIC1REG(EP93XX_VIC_IntEnClear) = (1U << irq);
106 	} else {
107 		vic2_intr_enabled &= ~(1U << (irq - VIC_NIRQ));
108 		VIC2REG(EP93XX_VIC_IntEnClear) = (1U << (irq - VIC_NIRQ));
109 	}
110 }
111 
112 /*
113  * NOTE: This routine must be called with interrupts disabled in the CPSR.
114  */
115 static void
116 ep93xx_intr_calculate_masks(void)
117 {
118 	struct intrq *iq;
119 	struct intrhand *ih;
120 	int irq, ipl;
121 
122 	/* First, figure out which IPLs each IRQ has. */
123 	for (irq = 0; irq < NIRQ; irq++) {
124 		int levels = 0;
125 		iq = &intrq[irq];
126 		ep93xx_disable_irq(irq);
127 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
128 		     ih = TAILQ_NEXT(ih, ih_list))
129 			levels |= (1U << ih->ih_ipl);
130 		iq->iq_levels = levels;
131 	}
132 
133 	/* Next, figure out which IRQs are used by each IPL. */
134 	for (ipl = 0; ipl < NIPL; ipl++) {
135 		int vic1_irqs = 0;
136 		int vic2_irqs = 0;
137 		for (irq = 0; irq < VIC_NIRQ; irq++) {
138 			if (intrq[irq].iq_levels & (1U << ipl))
139 				vic1_irqs |= (1U << irq);
140 		}
141 		vic1_imask[ipl] = vic1_irqs;
142 		for (irq = 0; irq < VIC_NIRQ; irq++) {
143 			if (intrq[irq + VIC_NIRQ].iq_levels & (1U << ipl))
144 				vic2_irqs |= (1U << irq);
145 		}
146 		vic2_imask[ipl] = vic2_irqs;
147 	}
148 
149 	KASSERT(vic1_imask[IPL_NONE] == 0);
150 	KASSERT(vic2_imask[IPL_NONE] == 0);
151 	KASSERT(vic1_imask[IPL_SOFTCLOCK] == 0);
152 	KASSERT(vic2_imask[IPL_SOFTCLOCK] == 0);
153 	KASSERT(vic1_imask[IPL_SOFTBIO] == 0);
154 	KASSERT(vic2_imask[IPL_SOFTBIO] == 0);
155 	KASSERT(vic1_imask[IPL_SOFTNET] == 0);
156 	KASSERT(vic2_imask[IPL_SOFTNET] == 0);
157 	KASSERT(vic1_imask[IPL_SOFTSERIAL] == 0);
158 	KASSERT(vic2_imask[IPL_SOFTSERIAL] == 0);
159 
160 	/*
161 	 * splsched() must block anything that uses the scheduler.
162 	 */
163 	vic1_imask[IPL_SCHED] |= vic1_imask[IPL_VM];
164 	vic2_imask[IPL_SCHED] |= vic2_imask[IPL_VM];
165 
166 	/*
167 	 * splhigh() must block "everything".
168 	 */
169 	vic1_imask[IPL_HIGH] |= vic1_imask[IPL_SCHED];
170 	vic2_imask[IPL_HIGH] |= vic2_imask[IPL_SCHED];
171 
172 	/*
173 	 * Now compute which IRQs must be blocked when servicing any
174 	 * given IRQ.
175 	 */
176 	for (irq = 0; irq < NIRQ; irq++) {
177 		int	vic1_irqs;
178 		int	vic2_irqs;
179 
180 		if (irq < VIC_NIRQ) {
181 			vic1_irqs = (1U << irq);
182 			vic2_irqs = 0;
183 		} else {
184 			vic1_irqs = 0;
185 			vic2_irqs = (1U << (irq - VIC_NIRQ));
186 		}
187 		iq = &intrq[irq];
188 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
189 			ep93xx_enable_irq(irq);
190 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
191 		     ih = TAILQ_NEXT(ih, ih_list)) {
192 			vic1_irqs |= vic1_imask[ih->ih_ipl];
193 			vic2_irqs |= vic2_imask[ih->ih_ipl];
194 		}
195 		iq->iq_vic1_mask = vic1_irqs;
196 		iq->iq_vic2_mask = vic2_irqs;
197 	}
198 }
199 
200 inline void
201 splx(int new)
202 {
203 	int	old;
204 	u_int	oldirqstate;
205 
206 	oldirqstate = disable_interrupts(I32_bit);
207 	old = curcpl();
208 	set_curcpl(new);
209 	if (new != hardware_spl_level) {
210 		hardware_spl_level = new;
211 		ep93xx_set_intrmask(vic1_imask[new], vic2_imask[new]);
212 	}
213 	restore_interrupts(oldirqstate);
214 
215 #ifdef __HAVE_FAST_SOFTINTS
216 	cpu_dosoftints();
217 #endif
218 }
219 
220 int
221 _splraise(int ipl)
222 {
223 	int	old;
224 	u_int	oldirqstate;
225 
226 	oldirqstate = disable_interrupts(I32_bit);
227 	old = curcpl();
228 	set_curcpl(ipl);
229 	restore_interrupts(oldirqstate);
230 	return (old);
231 }
232 
233 int
234 _spllower(int ipl)
235 {
236 	int	old = curcpl();
237 
238 	if (old <= ipl)
239 		return (old);
240 	splx(ipl);
241 	return (old);
242 }
243 
244 /*
245  * ep93xx_intr_init:
246  *
247  *	Initialize the rest of the interrupt subsystem, making it
248  *	ready to handle interrupts from devices.
249  */
250 void
251 ep93xx_intr_init(void)
252 {
253 	struct intrq *iq;
254 	int i;
255 
256 	vic1_intr_enabled = 0;
257 	vic2_intr_enabled = 0;
258 
259 	for (i = 0; i < NIRQ; i++) {
260 		iq = &intrq[i];
261 		TAILQ_INIT(&iq->iq_list);
262 
263 		sprintf(iq->iq_name, "irq %d", i);
264 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
265 				     NULL, (i < VIC_NIRQ ? "vic1" : "vic2"),
266 		                     iq->iq_name);
267 	}
268 	curcpu()->ci_intr_depth = 0;
269 	set_curcpl(0);
270 	hardware_spl_level = 0;
271 
272 	/* All interrupts should use IRQ not FIQ */
273 	VIC1REG(EP93XX_VIC_IntSelect) = 0;
274 	VIC2REG(EP93XX_VIC_IntSelect) = 0;
275 
276 	ep93xx_intr_calculate_masks();
277 
278 	/* Enable IRQs (don't yet use FIQs). */
279 	enable_interrupts(I32_bit);
280 }
281 
282 void *
283 ep93xx_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
284 {
285 	struct intrq*		iq;
286 	struct intrhand*	ih;
287 	u_int			oldirqstate;
288 
289 	if (irq < 0 || irq > NIRQ)
290 		panic("ep93xx_intr_establish: IRQ %d out of range", irq);
291 	if (ipl < 0 || ipl > NIPL)
292 		panic("ep93xx_intr_establish: IPL %d out of range", ipl);
293 
294 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
295 	if (ih == NULL)
296 		return (NULL);
297 
298 	ih->ih_func = ih_func;
299 	ih->ih_arg = arg;
300 	ih->ih_irq = irq;
301 	ih->ih_ipl = ipl;
302 
303 	iq = &intrq[irq];
304 
305 	oldirqstate = disable_interrupts(I32_bit);
306 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
307 	ep93xx_intr_calculate_masks();
308 	restore_interrupts(oldirqstate);
309 
310 	return (ih);
311 }
312 
313 void
314 ep93xx_intr_disestablish(void *cookie)
315 {
316 	struct intrhand*	ih = cookie;
317 	struct intrq*		iq = &intrq[ih->ih_irq];
318 	u_int			oldirqstate;
319 
320 	oldirqstate = disable_interrupts(I32_bit);
321 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
322 	ep93xx_intr_calculate_masks();
323 	restore_interrupts(oldirqstate);
324 }
325 
326 void
327 ep93xx_intr_dispatch(struct irqframe *frame)
328 {
329 	struct intrq*		iq;
330 	struct intrhand*	ih;
331 	u_int			oldirqstate;
332 	int			pcpl;
333 	u_int32_t		vic1_hwpend;
334 	u_int32_t		vic2_hwpend;
335 	int			irq;
336 
337 	pcpl = curcpl();
338 
339 	vic1_hwpend = VIC1REG(EP93XX_VIC_IRQStatus);
340 	vic2_hwpend = VIC2REG(EP93XX_VIC_IRQStatus);
341 
342 	hardware_spl_level = pcpl;
343 	ep93xx_set_intrmask(vic1_imask[pcpl] | vic1_hwpend,
344 			     vic2_imask[pcpl] | vic2_hwpend);
345 
346 	vic1_hwpend &= ~vic1_imask[pcpl];
347 	vic2_hwpend &= ~vic2_imask[pcpl];
348 
349 	if (vic1_hwpend) {
350 		irq = ffs(vic1_hwpend) - 1;
351 
352 		iq = &intrq[irq];
353 		iq->iq_ev.ev_count++;
354 		curcpu()->ci_data.cpu_nintr++;
355 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
356 			set_curcpl(ih->ih_ipl);
357 			oldirqstate = enable_interrupts(I32_bit);
358 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
359 			restore_interrupts(oldirqstate);
360 		}
361 	} else if (vic2_hwpend) {
362 		irq = ffs(vic2_hwpend) - 1;
363 
364 		iq = &intrq[irq + VIC_NIRQ];
365 		iq->iq_ev.ev_count++;
366 		curcpu()->ci_data.cpu_nintr++;
367 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
368 			set_curcpl(ih->ih_ipl);
369 			oldirqstate = enable_interrupts(I32_bit);
370 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
371 			restore_interrupts(oldirqstate);
372 		}
373 	}
374 
375 	set_curcpl(pcpl);
376 	hardware_spl_level = pcpl;
377 	ep93xx_set_intrmask(vic1_imask[pcpl], vic2_imask[pcpl]);
378 
379 #ifdef __HAVE_FAST_SOFTINTS
380 	cpu_dosoftints();
381 #endif
382 }
383