1 /*	$NetBSD: footbridge_irqhandler.c,v 1.28 2021/08/13 11:40:43 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #ifndef ARM_SPL_NOINLINE
39 #define	ARM_SPL_NOINLINE
40 #endif
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0,"$NetBSD: footbridge_irqhandler.c,v 1.28 2021/08/13 11:40:43 skrll Exp $");
44 
45 #include "opt_irqstats.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kmem.h>
50 
51 #include <machine/intr.h>
52 #include <machine/cpu.h>
53 #include <arm/footbridge/dc21285mem.h>
54 #include <arm/footbridge/dc21285reg.h>
55 
56 #include <dev/pci/pcivar.h>
57 
58 #include "isa.h"
59 #if NISA > 0
60 #include <dev/isa/isavar.h>
61 #endif
62 
63 /* Interrupt handler queues. */
64 static struct intrq footbridge_intrq[NIRQ];
65 
66 /* Interrupts to mask at each level. */
67 int footbridge_imask[NIPL];
68 
69 /* Software copy of the IRQs we have enabled. */
70 volatile uint32_t intr_enabled;
71 
72 /* Interrupts pending */
73 volatile int footbridge_ipending;
74 
75 void footbridge_intr_dispatch(struct clockframe *frame);
76 
77 const struct evcnt *footbridge_pci_intr_evcnt(void *, pci_intr_handle_t);
78 
79 const struct evcnt *
footbridge_pci_intr_evcnt(void * pcv,pci_intr_handle_t ih)80 footbridge_pci_intr_evcnt(void *pcv, pci_intr_handle_t ih)
81 {
82 	/* XXX check range is valid */
83 #if NISA > 0
84 	if (ih >= 0x80 && ih <= 0x8f) {
85 		return isa_intr_evcnt(NULL, (ih & 0x0f));
86 	}
87 #endif
88 	return &footbridge_intrq[ih].iq_ev;
89 }
90 
91 static inline void
footbridge_enable_irq(int irq)92 footbridge_enable_irq(int irq)
93 {
94 	intr_enabled |= (1U << irq);
95 	footbridge_set_intrmask();
96 }
97 
98 static inline void
footbridge_disable_irq(int irq)99 footbridge_disable_irq(int irq)
100 {
101 	intr_enabled &= ~(1U << irq);
102 	footbridge_set_intrmask();
103 }
104 
105 /*
106  * NOTE: This routine must be called with interrupts disabled in the CPSR.
107  */
108 static void
footbridge_intr_calculate_masks(void)109 footbridge_intr_calculate_masks(void)
110 {
111 	struct intrq *iq;
112 	struct intrhand *ih;
113 	int irq, ipl;
114 
115 	/* First, figure out which IPLs each IRQ has. */
116 	for (irq = 0; irq < NIRQ; irq++) {
117 		int levels = 0;
118 		iq = &footbridge_intrq[irq];
119 		footbridge_disable_irq(irq);
120 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
121 			levels |= (1U << ih->ih_ipl);
122 		}
123 		iq->iq_levels = levels;
124 	}
125 
126 	/* Next, figure out which IRQs are used by each IPL. */
127 	for (ipl = 0; ipl < NIPL; ipl++) {
128 		int irqs = 0;
129 		for (irq = 0; irq < NIRQ; irq++) {
130 			if (footbridge_intrq[irq].iq_levels & (1U << ipl))
131 				irqs |= (1U << irq);
132 		}
133 		footbridge_imask[ipl] = irqs;
134 	}
135 
136 	/* IPL_NONE must open up all interrupts */
137 	KASSERT(footbridge_imask[IPL_NONE] == 0);
138 	KASSERT(footbridge_imask[IPL_SOFTCLOCK] == 0);
139 	KASSERT(footbridge_imask[IPL_SOFTBIO] == 0);
140 	KASSERT(footbridge_imask[IPL_SOFTNET] == 0);
141 	KASSERT(footbridge_imask[IPL_SOFTSERIAL] == 0);
142 
143 	/*
144 	 * Enforce a hierarchy that gives "slow" device (or devices with
145 	 * limited input buffer space/"real-time" requirements) a better
146 	 * chance at not dropping data.
147 	 */
148 	footbridge_imask[IPL_SCHED] |= footbridge_imask[IPL_VM];
149 	footbridge_imask[IPL_HIGH] |= footbridge_imask[IPL_SCHED];
150 
151 	/*
152 	 * Calculate the ipl level to go to when handling this interrupt
153 	 */
154 	for (irq = 0, iq = footbridge_intrq; irq < NIRQ; irq++, iq++) {
155 		int irqs = (1U << irq);
156 		if (!TAILQ_EMPTY(&iq->iq_list)) {
157 			footbridge_enable_irq(irq);
158 			TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
159 				irqs |= footbridge_imask[ih->ih_ipl];
160 			}
161 		}
162 		iq->iq_mask = irqs;
163 	}
164 }
165 
166 int
_splraise(int ipl)167 _splraise(int ipl)
168 {
169     return (footbridge_splraise(ipl));
170 }
171 
172 /* this will always take us to the ipl passed in */
173 void
splx(int new)174 splx(int new)
175 {
176     footbridge_splx(new);
177 }
178 
179 int
_spllower(int ipl)180 _spllower(int ipl)
181 {
182     return (footbridge_spllower(ipl));
183 }
184 
185 void
footbridge_intr_init(void)186 footbridge_intr_init(void)
187 {
188 	struct intrq *iq;
189 	int i;
190 
191 	intr_enabled = 0;
192 	set_curcpl(0xffffffff);
193 	footbridge_ipending = 0;
194 	footbridge_set_intrmask();
195 
196 	for (i = 0, iq = footbridge_intrq; i < NIRQ; i++, iq++) {
197 		TAILQ_INIT(&iq->iq_list);
198 	}
199 
200 	footbridge_intr_calculate_masks();
201 
202 	/* Enable IRQ's, we don't have any FIQ's*/
203 	enable_interrupts(I32_bit);
204 }
205 
206 void
footbridge_intr_evcnt_attach(void)207 footbridge_intr_evcnt_attach(void)
208 {
209 	struct intrq *iq;
210 	int i;
211 
212 	for (i = 0, iq = footbridge_intrq; i < NIRQ; i++, iq++) {
213 
214 		snprintf(iq->iq_name, sizeof(iq->iq_name), "irq %d", i);
215 		evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
216 		    NULL, "footbridge", iq->iq_name);
217 	}
218 }
219 
220 void *
footbridge_intr_claim(int irq,int ipl,const char * name,int (* func)(void *),void * arg)221 footbridge_intr_claim(int irq, int ipl, const char *name, int (*func)(void *), void *arg)
222 {
223 	struct intrq *iq;
224 	struct intrhand *ih;
225 	u_int oldirqstate;
226 
227 	if (irq < 0 || irq > NIRQ)
228 		panic("footbridge_intr_establish: IRQ %d out of range", irq);
229 
230 	ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
231 	ih->ih_func = func;
232 	ih->ih_arg = arg;
233 	ih->ih_ipl = ipl;
234 	ih->ih_irq = irq;
235 
236 	iq = &footbridge_intrq[irq];
237 
238 	iq->iq_ist = IST_LEVEL;
239 
240 	oldirqstate = disable_interrupts(I32_bit);
241 
242 	TAILQ_INSERT_TAIL(&iq->iq_list, ih, ih_list);
243 
244 	footbridge_intr_calculate_masks();
245 
246 	/* detach the existing event counter and add the new name */
247 	evcnt_detach(&iq->iq_ev);
248 	evcnt_attach_dynamic(&iq->iq_ev, EVCNT_TYPE_INTR,
249 			NULL, "footbridge", name);
250 
251 	restore_interrupts(oldirqstate);
252 
253 	return(ih);
254 }
255 
256 void
footbridge_intr_disestablish(void * cookie)257 footbridge_intr_disestablish(void *cookie)
258 {
259 	struct intrhand *ih = cookie;
260 	struct intrq *iq = &footbridge_intrq[ih->ih_irq];
261 	int oldirqstate;
262 
263 	/* XXX need to free ih ? */
264 	oldirqstate = disable_interrupts(I32_bit);
265 
266 	TAILQ_REMOVE(&iq->iq_list, ih, ih_list);
267 
268 	footbridge_intr_calculate_masks();
269 
270 	restore_interrupts(oldirqstate);
271 }
272 
footbridge_intstatus(void)273 static inline uint32_t footbridge_intstatus(void)
274 {
275 	return ((volatile uint32_t*)(DC21285_ARMCSR_VBASE))[IRQ_STATUS>>2];
276 }
277 
278 /* called with external interrupts disabled */
279 void
footbridge_intr_dispatch(struct clockframe * frame)280 footbridge_intr_dispatch(struct clockframe *frame)
281 {
282 	struct intrq *iq;
283 	struct intrhand *ih;
284 	int oldirqstate, irq, ibit, hwpend;
285 	struct cpu_info * const ci = curcpu();
286 	const int ppl = ci->ci_cpl;
287 	const int imask = footbridge_imask[ppl];
288 
289 	hwpend = footbridge_intstatus();
290 
291 	/*
292 	 * Disable all the interrupts that are pending.  We will
293 	 * reenable them once they are processed and not masked.
294 	 */
295 	intr_enabled &= ~hwpend;
296 	footbridge_set_intrmask();
297 
298 	while (hwpend != 0) {
299 		int intr_rc = 0;
300 		irq = ffs(hwpend) - 1;
301 		ibit = (1U << irq);
302 
303 		hwpend &= ~ibit;
304 
305 		if (imask & ibit) {
306 			/*
307 			 * IRQ is masked; mark it as pending and check
308 			 * the next one.  Note: the IRQ is already disabled.
309 			 */
310 			footbridge_ipending |= ibit;
311 			continue;
312 		}
313 
314 		footbridge_ipending &= ~ibit;
315 
316 		iq = &footbridge_intrq[irq];
317 		iq->iq_ev.ev_count++;
318 		ci->ci_data.cpu_nintr++;
319 		TAILQ_FOREACH(ih, &iq->iq_list, ih_list) {
320 			ci->ci_cpl = ih->ih_ipl;
321 			oldirqstate = enable_interrupts(I32_bit);
322 			intr_rc = (*ih->ih_func)(ih->ih_arg ? ih->ih_arg : frame);
323 			restore_interrupts(oldirqstate);
324 			if (intr_rc != 1)
325 				break;
326 		}
327 
328 		ci->ci_cpl = ppl;
329 
330 		/* Re-enable this interrupt now that's it's cleared. */
331 		intr_enabled |= ibit;
332 		footbridge_set_intrmask();
333 
334 		/* also check for any new interrupts that may have occurred,
335 		 * that we can handle at this spl level */
336 		hwpend |= (footbridge_ipending & ICU_INT_HWMASK) & ~imask;
337 	}
338 
339 #ifdef __HAVE_FAST_SOFTINTS
340 	cpu_dosoftints();
341 #endif /* __HAVE_FAST_SOFTINTS */
342 }
343