xref: /netbsd/sys/arch/arm/ixp12x0/ixp12x0_intr.c (revision 6550d01e)
1 /* $NetBSD: ixp12x0_intr.c,v 1.21 2010/12/20 00:25:28 matt Exp $ */
2 
3 /*
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Ichiro FUKUHARA and Naoto Shimazaki.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ixp12x0_intr.c,v 1.21 2010/12/20 00:25:28 matt Exp $");
34 
35 /*
36  * Interrupt support for the Intel ixp12x0
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/simplelock.h>
43 #include <sys/termios.h>
44 
45 #include <machine/bus.h>
46 #include <machine/intr.h>
47 
48 #include <arm/cpufunc.h>
49 
50 #include <arm/ixp12x0/ixp12x0reg.h>
51 #include <arm/ixp12x0/ixp12x0var.h>
52 #include <arm/ixp12x0/ixp12x0_comreg.h>
53 #include <arm/ixp12x0/ixp12x0_comvar.h>
54 #include <arm/ixp12x0/ixp12x0_pcireg.h>
55 
56 
57 extern u_int32_t	ixpcom_cr;	/* current cr from *_com.c */
58 extern u_int32_t	ixpcom_imask;	/* tell mask to *_com.c */
59 
60 /* Interrupt handler queues. */
61 struct intrq intrq[NIRQ];
62 
63 /* Interrupts to mask at each level. */
64 static u_int32_t imask[NIPL];
65 static u_int32_t pci_imask[NIPL];
66 
67 /* Current interrupt priority level. */
68 volatile int hardware_spl_level;
69 
70 /* Software copy of the IRQs we have enabled. */
71 volatile u_int32_t intr_enabled;
72 volatile u_int32_t pci_intr_enabled;
73 
74 /* Interrupts pending. */
75 static volatile int ipending;
76 
77 void	ixp12x0_intr_dispatch(struct irqframe *frame);
78 
79 #define IXPREG(reg)	*((volatile u_int32_t*) (reg))
80 
81 static inline u_int32_t
82 ixp12x0_irq_read(void)
83 {
84 	return IXPREG(IXP12X0_IRQ_VBASE) & IXP12X0_INTR_MASK;
85 }
86 
87 static inline u_int32_t
88 ixp12x0_pci_irq_read(void)
89 {
90 	return IXPREG(IXPPCI_IRQ_STATUS);
91 }
92 
93 static void
94 ixp12x0_enable_uart_irq(void)
95 {
96 	ixpcom_imask = 0;
97 	if (ixpcom_sc)
98 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
99 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
100 }
101 
102 static void
103 ixp12x0_disable_uart_irq(void)
104 {
105 	ixpcom_imask = CR_RIE | CR_XIE;
106 	if (ixpcom_sc)
107 		bus_space_write_4(ixpcom_sc->sc_iot, ixpcom_sc->sc_ioh,
108 				  IXPCOM_CR, ixpcom_cr & ~ixpcom_imask);
109 }
110 
111 static void
112 ixp12x0_set_intrmask(u_int32_t irqs, u_int32_t pci_irqs)
113 {
114 	if (irqs & (1U << IXP12X0_INTR_UART)) {
115 		ixp12x0_disable_uart_irq();
116 	} else {
117 		ixp12x0_enable_uart_irq();
118 	}
119 	IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = pci_irqs;
120 	IXPREG(IXPPCI_IRQ_ENABLE_SET) = pci_intr_enabled & ~pci_irqs;
121 }
122 
123 static void
124 ixp12x0_enable_irq(int irq)
125 {
126 	if (irq < SYS_NIRQ) {
127 		intr_enabled |= (1U << irq);
128 		switch (irq) {
129 		case IXP12X0_INTR_UART:
130 			ixp12x0_enable_uart_irq();
131 			break;
132 
133 		case IXP12X0_INTR_PCI:
134 			/* nothing to do */
135 			break;
136 		default:
137 			panic("enable_irq:bad IRQ %d", irq);
138 		}
139 	} else {
140 		pci_intr_enabled |= (1U << (irq - SYS_NIRQ));
141 		IXPREG(IXPPCI_IRQ_ENABLE_SET) = (1U << (irq - SYS_NIRQ));
142 	}
143 }
144 
145 static inline void
146 ixp12x0_disable_irq(int irq)
147 {
148 	if (irq < SYS_NIRQ) {
149 		intr_enabled ^= ~(1U << irq);
150 		switch (irq) {
151 		case IXP12X0_INTR_UART:
152 			ixp12x0_disable_uart_irq();
153 			break;
154 
155 		case IXP12X0_INTR_PCI:
156 			/* nothing to do */
157 			break;
158 		default:
159 			/* nothing to do */
160 			break;
161 		}
162 	} else {
163 		pci_intr_enabled &= ~(1U << (irq - SYS_NIRQ));
164 		IXPREG(IXPPCI_IRQ_ENABLE_CLEAR) = (1U << (irq - SYS_NIRQ));
165 	}
166 }
167 
168 /*
169  * NOTE: This routine must be called with interrupts disabled in the CPSR.
170  */
171 static void
172 ixp12x0_intr_calculate_masks(void)
173 {
174 	struct intrq *iq;
175 	struct intrhand *ih;
176 	int irq, ipl;
177 
178 	/* First, figure out which IPLs each IRQ has. */
179 	for (irq = 0; irq < NIRQ; irq++) {
180 		int levels = 0;
181 		iq = &intrq[irq];
182 		ixp12x0_disable_irq(irq);
183 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
184 		     ih = TAILQ_NEXT(ih, ih_list))
185 			levels |= (1U << ih->ih_ipl);
186 		iq->iq_levels = levels;
187 	}
188 
189 	/* Next, figure out which IRQs are used by each IPL. */
190 	for (ipl = 0; ipl < NIPL; ipl++) {
191 		int irqs = 0;
192 		int pci_irqs = 0;
193 		for (irq = 0; irq < SYS_NIRQ; irq++) {
194 			if (intrq[irq].iq_levels & (1U << ipl))
195 				irqs |= (1U << irq);
196 		}
197 		imask[ipl] = irqs;
198 		for (irq = 0; irq < SYS_NIRQ; irq++) {
199 			if (intrq[irq + SYS_NIRQ].iq_levels & (1U << ipl))
200 				pci_irqs |= (1U << irq);
201 		}
202 		pci_imask[ipl] = pci_irqs;
203 	}
204 
205 	KASSERT(imask[IPL_NONE] == 0);
206 	KASSERT(pci_imask[IPL_NONE] == 0);
207 	KASSERT(imask[IPL_SOFTCLOCK] == 0);
208 	KASSERT(pci_imask[IPL_SOFTCLOCK] == 0);
209 	KASSERT(imask[IPL_SOFTBIO] == 0);
210 	KASSERT(pci_imask[IPL_SOFTBIO] == 0);
211 	KASSERT(imask[IPL_SOFTNET] == 0);
212 	KASSERT(pci_imask[IPL_SOFTNET] == 0);
213 	KASSERT(imask[IPL_SOFTSERIAL] == 0);
214 	KASSERT(pci_imask[IPL_SOFTSERIAL] == 0);
215 
216 	KASSERT(imask[IPL_VM] != 0);
217 	KASSERT(pci_imask[IPL_VM] != 0);
218 
219 	/*
220 	 * splsched() must block anything that uses the scheduler.
221 	 */
222 	imask[IPL_SCHED] |= imask[IPL_VM];
223 	pci_imask[IPL_SCHED] |= pci_imask[IPL_VM];
224 
225 	/*
226 	 * splhigh() must block "everything".
227 	 */
228 	imask[IPL_HIGH] |= imask[IPL_SCHED];
229 	pci_imask[IPL_HIGH] |= pci_imask[IPL_SCHED];
230 
231 	/*
232 	 * Now compute which IRQs must be blocked when servicing any
233 	 * given IRQ.
234 	 */
235 	for (irq = 0; irq < NIRQ; irq++) {
236 		int	irqs;
237 		int	pci_irqs;
238 
239 		if (irq < SYS_NIRQ) {
240 			irqs = (1U << irq);
241 			pci_irqs = 0;
242 		} else {
243 			irqs = 0;
244 			pci_irqs = (1U << (irq - SYS_NIRQ));
245 		}
246 		iq = &intrq[irq];
247 		if (TAILQ_FIRST(&iq->iq_list) != NULL)
248 			ixp12x0_enable_irq(irq);
249 		for (ih = TAILQ_FIRST(&iq->iq_list); ih != NULL;
250 		     ih = TAILQ_NEXT(ih, ih_list)) {
251 			irqs |= imask[ih->ih_ipl];
252 			pci_irqs |= pci_imask[ih->ih_ipl];
253 		}
254 		iq->iq_mask = irqs;
255 		iq->iq_pci_mask = pci_irqs;
256 	}
257 }
258 
259 inline void
260 splx(int new)
261 {
262 	int	old;
263 	u_int	oldirqstate;
264 
265 	oldirqstate = disable_interrupts(I32_bit);
266 	old = curcpl();
267 	set_curcpl(new);
268 	if (new != hardware_spl_level) {
269 		hardware_spl_level = new;
270 		ixp12x0_set_intrmask(imask[new], pci_imask[new]);
271 	}
272 	restore_interrupts(oldirqstate);
273 
274 #ifdef __HAVE_FAST_SOFTINTS
275 	cpu_dosoftints();
276 #endif
277 }
278 
279 int
280 _splraise(int ipl)
281 {
282 	int	old;
283 	u_int	oldirqstate;
284 
285 	oldirqstate = disable_interrupts(I32_bit);
286 	old = curcpl();
287 	set_curcpl(ipl);
288 	restore_interrupts(oldirqstate);
289 	return (old);
290 }
291 
292 int
293 _spllower(int ipl)
294 {
295 	int	old = curcpl();
296 
297 	if (old <= ipl)
298 		return (old);
299 	splx(ipl);
300 	return (old);
301 }
302 
303 /*
304  * ixp12x0_intr_init:
305  *
306  *	Initialize the rest of the interrupt subsystem, making it
307  *	ready to handle interrupts from devices.
308  */
309 void
310 ixp12x0_intr_init(void)
311 {
312 	struct intrq *iq;
313 	int i;
314 
315 	intr_enabled = 0;
316 	pci_intr_enabled = 0;
317 
318 	for (i = 0; i < NIRQ; i++) {
319 		iq = &intrq[i];
320 		TAILQ_INIT(&iq->iq_list);
321 
322 		sprintf(iq->iq_name, "ipl %d", i);
323 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
324 				     NULL, "ixpintr", iq->iq_name);
325 	}
326 	curcpu()->ci_intr_depth = 0;
327 	curcpu()->ci_cpl = 0;
328 	hardware_spl_level = 0;
329 
330 	ixp12x0_intr_calculate_masks();
331 
332 	/* Enable IRQs (don't yet use FIQs). */
333 	enable_interrupts(I32_bit);
334 }
335 
336 void *
337 ixp12x0_intr_establish(int irq, int ipl, int (*ih_func)(void *), void *arg)
338 {
339 	struct intrq*		iq;
340 	struct intrhand*	ih;
341 	u_int			oldirqstate;
342 #ifdef DEBUG
343 	printf("ixp12x0_intr_establish(irq=%d, ipl=%d, ih_func=%08x, arg=%08x)\n",
344 	       irq, ipl, (u_int32_t) ih_func, (u_int32_t) arg);
345 #endif
346 	if (irq < 0 || irq > NIRQ)
347 		panic("ixp12x0_intr_establish: IRQ %d out of range", ipl);
348 	if (ipl < 0 || ipl > NIPL)
349 		panic("ixp12x0_intr_establish: IPL %d out of range", ipl);
350 
351 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
352 	if (ih == NULL)
353 		return (NULL);
354 
355 	ih->ih_func = ih_func;
356 	ih->ih_arg = arg;
357 	ih->ih_irq = irq;
358 	ih->ih_ipl = ipl;
359 
360 	iq = &intrq[irq];
361 	iq->iq_ist = IST_LEVEL;
362 
363 	oldirqstate = disable_interrupts(I32_bit);
364 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
365 	ixp12x0_intr_calculate_masks();
366 	restore_interrupts(oldirqstate);
367 
368 	return (ih);
369 }
370 
371 void
372 ixp12x0_intr_disestablish(void *cookie)
373 {
374 	struct intrhand*	ih = cookie;
375 	struct intrq*		iq = &intrq[ih->ih_ipl];
376 	u_int			oldirqstate;
377 
378 	oldirqstate = disable_interrupts(I32_bit);
379 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
380 	ixp12x0_intr_calculate_masks();
381 	restore_interrupts(oldirqstate);
382 }
383 
384 void
385 ixp12x0_intr_dispatch(struct irqframe *frame)
386 {
387 	struct intrq*		iq;
388 	struct intrhand*	ih;
389 	struct cpu_info* const	ci = curcpu();
390 	const int		ppl = ci->ci_cpl;
391 	u_int			oldirqstate;
392 	u_int32_t		hwpend;
393 	u_int32_t		pci_hwpend;
394 	int			irq;
395 	u_int32_t		ibit;
396 
397 
398 	hwpend = ixp12x0_irq_read();
399 	pci_hwpend = ixp12x0_pci_irq_read();
400 
401 	hardware_spl_level = ppl;
402 	ixp12x0_set_intrmask(imask[ppl] | hwpend, pci_imask[ppl] | pci_hwpend);
403 
404 	hwpend &= ~imask[ppl];
405 	pci_hwpend &= ~pci_imask[ppl];
406 
407 	while (hwpend) {
408 		irq = ffs(hwpend) - 1;
409 		ibit = (1U << irq);
410 
411 		iq = &intrq[irq];
412 		iq->iq_ev.ev_count++;
413 		ci->ci_data.cpu_nintr++;
414 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
415 			ci->ci_cpl = ih->ih_ipl;
416 			oldirqstate = enable_interrupts(I32_bit);
417 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
418 			restore_interrupts(oldirqstate);
419 			hwpend &= ~ibit;
420 		}
421 	}
422 	while (pci_hwpend) {
423 		irq = ffs(pci_hwpend) - 1;
424 		ibit = (1U << irq);
425 
426 		iq = &intrq[irq + SYS_NIRQ];
427 		iq->iq_ev.ev_count++;
428 		ci->ci_data.cpu_nintr++;
429 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
430 			ci->ci_cpl = ih->ih_ipl;
431 			oldirqstate = enable_interrupts(I32_bit);
432 			(void) (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
433 			restore_interrupts(oldirqstate);
434 		}
435 		pci_hwpend &= ~ibit;
436 	}
437 
438 	ci->ci_cpl = ppl;
439 	hardware_spl_level = ppl;
440 	ixp12x0_set_intrmask(imask[ppl], pci_imask[ppl]);
441 
442 #ifdef __HAVE_FAST_SOFTINTS
443 	cpu_dosoftints();
444 #endif
445 }
446