xref: /netbsd/sys/arch/arm/at91/at91aic.c (revision 6550d01e)
1 /*	$Id: at91aic.c,v 1.5 2010/12/20 00:25:27 matt Exp $	*/
2 /*	$NetBSD: at91aic.c,v 1.5 2010/12/20 00:25:27 matt Exp $	*/
3 
4 /*
5  * Copyright (c) 2007 Embedtronics Oy.
6  * All rights reserved.
7  *
8  * Based on ep93xx_intr.c
9  * Copyright (c) 2002 The NetBSD Foundation, Inc.
10  * All rights reserved.
11  *
12  * This code is derived from software contributed to The NetBSD Foundation
13  * by Jesse Off
14  *
15  * This code is derived from software contributed to The NetBSD Foundation
16  * by Ichiro FUKUHARA and Naoto Shimazaki.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions
20  * are met:
21  * 1. Redistributions of source code must retain the above copyright
22  *    notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *    notice, this list of conditions and the following disclaimer in the
25  *    documentation and/or other materials provided with the distribution.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 
41 /*
42  * Interrupt support for the Atmel's AT91xx9xxx family controllers
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/termios.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/bus.h>
53 #include <machine/intr.h>
54 
55 #include <arm/cpufunc.h>
56 
57 #include <arm/at91/at91reg.h>
58 #include <arm/at91/at91var.h>
59 #include <arm/at91/at91aicreg.h>
60 #include <arm/at91/at91aicvar.h>
61 
62 #define	NIRQ	32
63 
64 /* Interrupt handler queues. */
65 struct intrq intrq[NIRQ];
66 
67 /* Interrupts to mask at each level. */
68 static u_int32_t aic_imask[NIPL];
69 
70 /* Software copy of the IRQs we have enabled. */
71 volatile u_int32_t aic_intr_enabled;
72 
73 #define	AICREG(reg)	*((volatile u_int32_t*) (AT91AIC_BASE + (reg)))
74 
75 static int	at91aic_match(device_t, cfdata_t, void *);
76 static void	at91aic_attach(device_t, device_t, void *);
77 
78 CFATTACH_DECL(at91aic, sizeof(struct device),
79 	      at91aic_match, at91aic_attach, NULL, NULL);
80 
81 static int
82 at91aic_match(device_t parent, cfdata_t match, void *aux)
83 {
84 	if (strcmp(match->cf_name, "at91aic") == 0)
85 		return 2;
86 	return 0;
87 }
88 
89 static void
90 at91aic_attach(device_t parent, device_t self, void *aux)
91 {
92 	(void)parent; (void)self; (void)aux;
93 	printf("\n");
94 }
95 
96 static inline void
97 at91_set_intrmask(u_int32_t aic_irqs)
98 {
99 	AICREG(AIC_IDCR)	= aic_irqs;
100 	AICREG(AIC_IECR)	= aic_intr_enabled & ~aic_irqs;
101 }
102 
103 static inline void
104 at91_enable_irq(int irq)
105 {
106 	aic_intr_enabled       |= (1U << irq);
107 	AICREG(AIC_IECR)	= (1U << irq);
108 }
109 
110 static inline void
111 at91_disable_irq(int irq)
112 {
113 	aic_intr_enabled       &= ~(1U << irq);
114 	AICREG(AIC_IDCR)	=  (1U << irq);
115 }
116 
117 /*
118  * NOTE: This routine must be called with interrupts disabled in the CPSR.
119  */
120 static void
121 at91aic_calculate_masks(void)
122 {
123 	struct intrq *iq;
124 	struct intrhand *ih;
125 	int irq, ipl;
126 
127 	/* First, figure out which IPLs each IRQ has. */
128 	for (irq = 0; irq < NIRQ; irq++) {
129 		int levels = 0;
130 		iq = &intrq[irq];
131 		at91_disable_irq(irq);
132 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
133 		     ih = TAILQ_NEXT(ih, ih_list))
134 			levels |= (1U << ih->ih_ipl);
135 		iq->iq_levels = levels;
136 	}
137 
138 	/* Next, figure out which IRQs are used by each IPL. */
139 	for (ipl = 0; ipl < NIPL; ipl++) {
140 		int aic_irqs = 0;
141 		for (irq = 0; irq < AIC_NIRQ; irq++) {
142 			if (intrq[irq].iq_levels & (1U << ipl))
143 				aic_irqs |= (1U << irq);
144 		}
145 		aic_imask[ipl] = aic_irqs;
146 	}
147 
148 	/* IPL_NONE must open up all interrupts */
149 	KASSERT(aic_imask[IPL_NONE] == 0);
150 	KASSERT(aic_imask[IPL_SOFTCLOCK] == 0);
151 	KASSERT(aic_imask[IPL_SOFTBIO] == 0);
152 	KASSERT(aic_imask[IPL_SOFTNET] == 0);
153 	KASSERT(aic_imask[IPL_SOFTSERIAL] == 0);
154 
155 	/*
156 	 * Enforce a hierarchy that gives "slow" device (or devices with
157 	 * limited input buffer space/"real-time" requirements) a better
158 	 * chance at not dropping data.
159 	 */
160 	aic_imask[IPL_SCHED] |= aic_imask[IPL_VM];
161 	aic_imask[IPL_HIGH] |= aic_imask[IPL_SCHED];
162 
163 	/*
164 	 * Now compute which IRQs must be blocked when servicing any
165 	 * given IRQ.
166 	 */
167 	for (irq = 0; irq < MIN(NIRQ, AIC_NIRQ); irq++) {
168 		iq = &intrq[irq];
169 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
170 			at91_enable_irq(irq);
171 	}
172 	/*
173 	 * update current mask
174 	 */
175 	at91_set_intrmask(aic_imask[curcpl()]);
176 }
177 
178 inline void
179 splx(int new)
180 {
181 	int	old;
182 	u_int	oldirqstate;
183 
184 	oldirqstate = disable_interrupts(I32_bit);
185 	old = curcpl();
186 	if (old != new) {
187 		set_curcpl(new);
188 		at91_set_intrmask(aic_imask[new]);
189 	}
190 	restore_interrupts(oldirqstate);
191 #ifdef __HAVE_FAST_SOFTINTS
192 	cpu_dosoftints();
193 #endif
194 }
195 
196 int
197 _splraise(int ipl)
198 {
199 	int	old;
200 	u_int	oldirqstate;
201 
202 	oldirqstate = disable_interrupts(I32_bit);
203 	old = curcpl();
204 	if (old != ipl) {
205 		set_curcpl(ipl);
206 		at91_set_intrmask(aic_imask[ipl]);
207 	}
208 	restore_interrupts(oldirqstate);
209 
210 	return (old);
211 }
212 
213 int
214 _spllower(int ipl)
215 {
216 	int	old = curcpl();
217 
218 	if (old <= ipl)
219 		return (old);
220 	splx(ipl);
221 #ifdef __HAVE_FAST_SOFTINTS
222 	cpu_dosoftints();
223 #endif
224 	return (old);
225 }
226 
227 /*
228  * at91aic_init:
229  *
230  *	Initialize the rest of the interrupt subsystem, making it
231  *	ready to handle interrupts from devices.
232  */
233 void
234 at91aic_init(void)
235 {
236 	struct intrq *iq;
237 	int i;
238 
239 	aic_intr_enabled = 0;
240 
241 	// disable intrrupts:
242 	AICREG(AIC_IDCR)	= -1;
243 
244 	for (i = 0; i < NIRQ; i++) {
245 		iq = &intrq[i];
246 		TAILQ_INIT(&iq->iq_list);
247 
248 		sprintf(iq->iq_name, "irq %d", i);
249 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
250 				     NULL, "aic", iq->iq_name);
251 	}
252 
253 	/* All interrupts should use IRQ not FIQ */
254 
255 	AICREG(AIC_IDCR)	= -1;	/* disable interrupts	*/
256 	AICREG(AIC_ICCR)	= -1;	/* clear all interrupts	*/
257 	AICREG(AIC_DCR)		= 0;	/* not in debug mode, just to make sure */
258 	for (i = 0; i < NIRQ; i++) {
259 	  AICREG(AIC_SMR(i))	= 0;	/* disable interrupt */
260 	  AICREG(AIC_SVR(i))	= (u_int32_t)&intrq[i];	// address of interrupt queue
261 	}
262 	AICREG(AIC_FVR)		= 0;	// fast interrupt...
263 	AICREG(AIC_SPU)		= 0;	// spurious interrupt vector
264 
265 	AICREG(AIC_EOICR)	= 0;	/* clear logic... */
266 	AICREG(AIC_EOICR)	= 0;	/* clear logic... */
267 
268 	at91aic_calculate_masks();
269 
270 	/* Enable IRQs (don't yet use FIQs). */
271 	enable_interrupts(I32_bit);
272 }
273 
274 void *
275 at91aic_intr_establish(int irq, int ipl, int type, int (*ih_func)(void *), void *arg)
276 {
277 	struct intrq*		iq;
278 	struct intrhand*	ih;
279 	u_int			oldirqstate;
280 	unsigned		ok;
281 	uint32_t		smr;
282 
283 	if (irq < 0 || irq >= NIRQ)
284 		panic("intr_establish: IRQ %d out of range", irq);
285 	if (ipl < 0 || ipl >= NIPL)
286 		panic("intr_establish: IPL %d out of range", ipl);
287 
288 	smr = 1;		// all interrupts have priority one.. ok?
289 	switch (type) {
290 	case _INTR_LOW_LEVEL:
291 		smr |= AIC_SMR_SRCTYPE_LVL_LO;
292 		break;
293 	case INTR_HIGH_LEVEL:
294 		smr |= AIC_SMR_SRCTYPE_LVL_HI;
295 		break;
296 	case INTR_FALLING_EDGE:
297 		smr |= AIC_SMR_SRCTYPE_FALLING;
298 		break;
299 	case INTR_RISING_EDGE:
300 		smr |= AIC_SMR_SRCTYPE_RISING;
301 		break;
302 	default:
303 		panic("intr_establish: interrupt type %d is invalid", type);
304 	}
305 
306 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
307 	if (ih == NULL)
308 		return (NULL);
309 
310 	ih->ih_func = ih_func;
311 	ih->ih_arg = arg;
312 	ih->ih_irq = irq;
313 	ih->ih_ipl = ipl;
314 
315 	iq = &intrq[irq];
316 
317 	oldirqstate = disable_interrupts(I32_bit);
318 	if (TAILQ_FIRST(&iq->iq_list) == NULL || (iq->iq_type & ~type) == 0) {
319 		AICREG(AIC_SMR(irq)) = smr;
320 		iq->iq_type = type;
321 		TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
322 		at91aic_calculate_masks();
323 		ok = 1;
324 	} else
325 		ok = 0;
326 	restore_interrupts(oldirqstate);
327 
328 	if (ok) {
329 #ifdef	AT91AIC_DEBUG
330 		int i;
331 		printf("\n");
332 		for (i = 0; i < NIPL; i++) {
333 			printf("IPL%d: aic_imask=0x%08X\n", i, aic_imask[i]);
334 		}
335 #endif
336 	} else {
337 		free(ih, M_DEVBUF);
338 		ih = NULL;
339 	}
340 
341 	return (ih);
342 }
343 
344 void
345 at91aic_intr_disestablish(void *cookie)
346 {
347 	struct intrhand*	ih = cookie;
348 	struct intrq*		iq = &intrq[ih->ih_irq];
349 	u_int			oldirqstate;
350 
351 	oldirqstate = disable_interrupts(I32_bit);
352 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
353 	at91aic_calculate_masks();
354 	restore_interrupts(oldirqstate);
355 }
356 
357 #include <arm/at91/at91reg.h>
358 #include <arm/at91/at91dbgureg.h>
359 #include <arm/at91/at91pdcreg.h>
360 
361 static inline void intr_process(struct intrq *iq, int pcpl, struct irqframe *frame);
362 
363 static inline void
364 intr_process(struct intrq *iq, int pcpl, struct irqframe *frame)
365 {
366 	struct intrhand*	ih;
367 	u_int			oldirqstate, intr;
368 
369 	intr = iq - intrq;
370 
371 	iq->iq_ev.ev_count++;
372 	curcpu()->ci_data.cpu_nintr++;
373 
374 	if ((1U << intr) & aic_imask[pcpl]) {
375 		panic("interrupt %d should be masked! (aic_imask=0x%X)", intr, aic_imask[pcpl]);
376 	}
377 
378 	if (iq->iq_busy) {
379 		panic("interrupt %d busy!", intr);
380 	}
381 
382 	iq->iq_busy = 1;
383 
384 	for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
385 	     ih = TAILQ_NEXT(ih, ih_list)) {
386 		set_curcpl(ih->ih_ipl);
387 		at91_set_intrmask(aic_imask[ih->ih_ipl]);
388 		oldirqstate = enable_interrupts(I32_bit);
389 		(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
390 		restore_interrupts(oldirqstate);
391 	}
392 
393 	if (!iq->iq_busy) {
394 		panic("interrupt %d not busy!", intr);
395 	}
396 	iq->iq_busy = 0;
397 
398 	set_curcpl(pcpl);
399 	at91_set_intrmask(aic_imask[pcpl]);
400 }
401 
402 void
403 at91aic_intr_dispatch(struct irqframe *frame)
404 {
405 	struct intrq*		iq;
406 	int			pcpl = curcpl();
407 
408 	iq = (struct intrq *)AICREG(AIC_IVR);	// get current queue
409 
410 	// OK, service interrupt
411 	if (iq)
412 		intr_process(iq, pcpl, frame);
413 
414 	AICREG(AIC_EOICR) = 0;			// end of interrupt
415 }
416 
417 #if 0
418 void
419 at91aic_intr_poll(int irq)
420 {
421 	u_int		oldirqstate;
422 	uint32_t	ipr;
423 	int		pcpl = curcpl();
424 
425 	oldirqstate = disable_interrupts(I32_bit);
426 	ipr = 	AICREG(AIC_IPR);
427 	if ((ipr & (1U << irq) & ~aic_imask[pcpl]))
428 		intr_process(&intrq[irq], pcpl, NULL);
429 	restore_interrupts(oldirqstate);
430 #ifdef __HAVE_FAST_SOFTINTS
431 	cpu_dosoftints();
432 #endif
433 }
434 #endif
435 
436 void
437 at91aic_intr_poll(void *ihp, int flags)
438 {
439 	struct intrhand* ih = ihp;
440 	u_int		oldirqstate, irq = ih->ih_irq;
441 	uint32_t	ipr;
442 	int		pcpl = curcpl();
443 
444 	oldirqstate = disable_interrupts(I32_bit);
445 	ipr = AICREG(AIC_IPR);
446 	if ((ipr & (1U << irq))
447 	    && (flags || !(aic_imask[pcpl] & (1U << irq)))) {
448 		set_curcpl(ih->ih_ipl);
449 		at91_set_intrmask(aic_imask[ih->ih_ipl]);
450 		(void)enable_interrupts(I32_bit);
451 		(void)(*ih->ih_func)(ih->ih_arg ? ih->ih_arg : NULL);
452 		(void)disable_interrupts(I32_bit);
453 		set_curcpl(pcpl);
454 		at91_set_intrmask(aic_imask[pcpl]);
455 	}
456 	restore_interrupts(oldirqstate);
457 
458 #ifdef __HAVE_FAST_SOFTINTS
459 	cpu_dosoftints();
460 #endif
461 }
462