xref: /openbsd/sys/arch/armv7/sunxi/sxiintc.c (revision 556e9d60)
1 /*	$OpenBSD: sxiintc.c,v 1.12 2024/04/29 12:33:17 jsg Exp $	*/
2 /*
3  * Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
4  * Copyright (c) 2013 Artturi Alm
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28 
29 #include <armv7/sunxi/sxiintc.h>
30 
31 #include <dev/ofw/openfirm.h>
32 #include <dev/ofw/fdt.h>
33 
34 #ifdef DEBUG_INTC
35 #define DPRINTF(x)	do { if (sxiintcdebug) printf x; } while (0)
36 #define DPRINTFN(n,x)	do { if (sxiintcdebug>(n)) printf x; } while (0)
37 int	sxiintcdebug = 10;
38 char *ipl_strtbl[NIPL] = {
39 	"IPL_NONE",
40 	"IPL_SOFT",
41 	"IPL_SOFTCLOCK",
42 	"IPL_SOFTNET",
43 	"IPL_SOFTTTY",
44 	"IPL_BIO|IPL_USB",
45 	"IPL_NET",
46 	"IPL_TTY",
47 	"IPL_VM",
48 	"IPL_AUDIO",
49 	"IPL_CLOCK",
50 	"IPL_STATCLOCK",
51 	"IPL_SCHED|IPL_HIGH"
52 };
53 #else
54 #define DPRINTF(x)
55 #define DPRINTFN(n,x)
56 #endif
57 
58 #define NIRQ			96
59 #define NBANKS			3
60 #define NIRQPRIOREGS		5
61 
62 /* registers */
63 #define INTC_VECTOR_REG		0x00
64 #define INTC_BASE_ADR_REG	0x04
65 #define INTC_PROTECTION_REG	0x08
66 #define INTC_NMI_CTRL_REG	0x0c
67 
68 #define INTC_IRQ_PENDING_REG0	0x10
69 #define INTC_IRQ_PENDING_REG1	0x14
70 #define INTC_IRQ_PENDING_REG2	0x18
71 
72 #define INTC_SELECT_REG0	0x30
73 #define INTC_SELECT_REG1	0x34
74 #define INTC_SELECT_REG2	0x38
75 
76 #define INTC_ENABLE_REG0	0x40
77 #define INTC_ENABLE_REG1	0x44
78 #define INTC_ENABLE_REG2	0x48
79 
80 #define INTC_MASK_REG0		0x50
81 #define INTC_MASK_REG1		0x54
82 #define INTC_MASK_REG2		0x58
83 
84 #define INTC_RESP_REG0		0x60
85 #define INTC_RESP_REG1		0x64
86 #define INTC_RESP_REG2		0x68
87 
88 #define INTC_PRIO_REG0		0x80
89 #define INTC_PRIO_REG1		0x84
90 #define INTC_PRIO_REG2		0x88
91 #define INTC_PRIO_REG3		0x8c
92 #define INTC_PRIO_REG4		0x90
93 
94 #define INTC_IRQ_PENDING_REG(_b)	(0x10 + ((_b) * 4))
95 #define INTC_FIQ_PENDING_REG(_b)	(0x20 + ((_b) * 4))
96 #define INTC_SELECT_REG(_b)		(0x30 + ((_b) * 4))
97 #define INTC_ENABLE_REG(_b)		(0x40 + ((_b) * 4))
98 #define INTC_MASK_REG(_b)		(0x50 + ((_b) * 4))
99 #define INTC_RESP_REG(_b)		(0x60 + ((_b) * 4))
100 #define INTC_PRIO_REG(_b)		(0x80 + ((_b) * 4))
101 
102 #define IRQ2REG32(i)		(((i) >> 5) & 0x3)
103 #define IRQ2BIT32(i)		((i) & 0x1f)
104 
105 #define IRQ2REG16(i)		(((i) >> 4) & 0x5)
106 #define IRQ2BIT16(i)		(((i) & 0x0f) * 2)
107 
108 #define INTC_IRQ_HIPRIO		0x3
109 #define INTC_IRQ_ENABLED	0x2
110 #define INTC_IRQ_DISABLED	0x1
111 #define INTC_IRQ_LOWPRIO	0x0
112 #define INTC_PRIOCLEAR(i)	(~(INTC_IRQ_HIPRIO << IRQ2BIT16((i))))
113 #define INTC_PRIOENABLE(i)	(INTC_IRQ_ENABLED << IRQ2BIT16((i)))
114 #define INTC_PRIOHI(i)		(INTC_IRQ_HIPRIO << IRQ2BIT16((i)))
115 
116 
117 struct intrhand {
118 	TAILQ_ENTRY(intrhand) ih_list;	/* link on intrq list */
119 	int (*ih_func)(void *);		/* handler */
120 	void *ih_arg;			/* arg for handler */
121 	int ih_ipl;			/* IPL_* */
122 	int ih_irq;			/* IRQ number */
123 	struct evcount	ih_count;
124 	char *ih_name;
125 };
126 
127 struct intrq {
128 	TAILQ_HEAD(, intrhand) iq_list;	/* handler list */
129 	int iq_irq;			/* IRQ to mask while handling */
130 	int iq_levels;			/* IPL_*'s this IRQ has */
131 	int iq_ist;			/* share type */
132 };
133 
134 struct intrq sxiintc_handler[NIRQ];
135 u_int32_t sxiintc_smask[NIPL];
136 u_int32_t sxiintc_imask[NBANKS][NIPL];
137 struct interrupt_controller sxiintc_ic;
138 
139 bus_space_tag_t		sxiintc_iot;
140 bus_space_handle_t	sxiintc_ioh;
141 int			sxiintc_nirq;
142 
143 int	sxiintc_match(struct device *, void *, void *);
144 void	sxiintc_attach(struct device *, struct device *, void *);
145 int	sxiintc_spllower(int);
146 int	sxiintc_splraise(int);
147 void	sxiintc_setipl(int);
148 void	sxiintc_calc_masks(void);
149 void	*sxiintc_intr_establish_fdt(void *, int *, int, struct cpu_info *,
150 	    int (*)(void *), void *, char *);
151 
152 const struct cfattach	sxiintc_ca = {
153 	sizeof (struct device), sxiintc_match, sxiintc_attach
154 };
155 
156 struct cfdriver sxiintc_cd = {
157 	NULL, "sxiintc", DV_DULL
158 };
159 
160 int sxiintc_attached = 0;
161 
162 int
sxiintc_match(struct device * parent,void * match,void * aux)163 sxiintc_match(struct device *parent, void *match, void *aux)
164 {
165 	struct fdt_attach_args *faa = aux;
166 
167 	return OF_is_compatible(faa->fa_node, "allwinner,sun4i-a10-ic");
168 }
169 
170 void
sxiintc_attach(struct device * parent,struct device * self,void * aux)171 sxiintc_attach(struct device *parent, struct device *self, void *aux)
172 {
173 	struct fdt_attach_args *faa = aux;
174 	int i, j;
175 
176 	sxiintc_iot = faa->fa_iot;
177 	if (bus_space_map(sxiintc_iot, faa->fa_reg[0].addr,
178 	    faa->fa_reg[0].size, 0, &sxiintc_ioh))
179 		panic("sxiintc_attach: bus_space_map failed!");
180 
181 	/* disable/mask/clear all interrupts */
182 	for (i = 0; i < NBANKS; i++) {
183 		bus_space_write_4(sxiintc_iot, sxiintc_ioh, INTC_ENABLE_REG(i), 0);
184 		bus_space_write_4(sxiintc_iot, sxiintc_ioh, INTC_MASK_REG(i), 0);
185 		bus_space_write_4(sxiintc_iot, sxiintc_ioh, INTC_IRQ_PENDING_REG(i),
186 		    0xffffffff);
187 		for (j = 0; j < NIPL; j++)
188 			sxiintc_imask[i][j] = 0;
189 	}
190 
191 	/* XXX */
192 	bus_space_write_4(sxiintc_iot, sxiintc_ioh, INTC_PROTECTION_REG, 1);
193 	bus_space_write_4(sxiintc_iot, sxiintc_ioh, INTC_NMI_CTRL_REG, 0);
194 
195 	for (i = 0; i < NIRQ; i++)
196 		TAILQ_INIT(&sxiintc_handler[i].iq_list);
197 
198 	sxiintc_calc_masks();
199 
200 	arm_init_smask();
201 	sxiintc_attached = 1;
202 
203 	/* insert self as interrupt handler */
204 	arm_set_intr_handler(sxiintc_splraise, sxiintc_spllower, sxiintc_splx,
205 	    sxiintc_setipl,
206 	    sxiintc_intr_establish, sxiintc_intr_disestablish, sxiintc_intr_string,
207 	    sxiintc_irq_handler);
208 	sxiintc_setipl(IPL_HIGH);  /* XXX ??? */
209 	enable_interrupts(PSR_I);
210 	printf("\n");
211 
212 	sxiintc_ic.ic_node = faa->fa_node;
213 	sxiintc_ic.ic_establish = sxiintc_intr_establish_fdt;
214 	arm_intr_register_fdt(&sxiintc_ic);
215 }
216 
217 void
sxiintc_calc_masks(void)218 sxiintc_calc_masks(void)
219 {
220 	struct cpu_info *ci = curcpu();
221 	int irq;
222 	struct intrhand *ih;
223 	int i;
224 
225 	for (irq = 0; irq < NIRQ; irq++) {
226 		int max = IPL_NONE;
227 		int min = IPL_HIGH;
228 		TAILQ_FOREACH(ih, &sxiintc_handler[irq].iq_list, ih_list) {
229 			if (ih->ih_ipl > max)
230 				max = ih->ih_ipl;
231 			if (ih->ih_ipl < min)
232 				min = ih->ih_ipl;
233 		}
234 
235 		sxiintc_handler[irq].iq_irq = max;
236 
237 		if (max == IPL_NONE)
238 			min = IPL_NONE;
239 
240 #ifdef DEBUG_INTC
241 		if (min != IPL_NONE) {
242 			printf("irq %d to block at %d %d reg %d bit %d\n",
243 			    irq, max, min, IRQ2REG32(irq),
244 			    IRQ2BIT32(irq));
245 		}
246 #endif
247 		/* Enable interrupts at lower levels, clear -> enable */
248 		for (i = 0; i < min; i++)
249 			sxiintc_imask[IRQ2REG32(irq)][i] &=
250 			    ~(1 << IRQ2BIT32(irq));
251 		for (; i < NIPL; i++)
252 			sxiintc_imask[IRQ2REG32(irq)][i] |=
253 			    (1 << IRQ2BIT32(irq));
254 		/* XXX - set enable/disable, priority */
255 	}
256 
257 	sxiintc_setipl(ci->ci_cpl);
258 }
259 
260 void
sxiintc_splx(int new)261 sxiintc_splx(int new)
262 {
263 	struct cpu_info *ci = curcpu();
264 	sxiintc_setipl(new);
265 
266 	if (ci->ci_ipending & arm_smask[ci->ci_cpl])
267 		arm_do_pending_intr(ci->ci_cpl);
268 }
269 
270 int
sxiintc_spllower(int new)271 sxiintc_spllower(int new)
272 {
273 	struct cpu_info *ci = curcpu();
274 	int old = ci->ci_cpl;
275 	sxiintc_splx(new);
276 	return (old);
277 }
278 
279 int
sxiintc_splraise(int new)280 sxiintc_splraise(int new)
281 {
282 	struct cpu_info *ci = curcpu();
283 	int old;
284 	old = ci->ci_cpl;
285 
286 	/*
287 	 * setipl must always be called because there is a race window
288 	 * where the variable is updated before the mask is set
289 	 * an interrupt occurs in that window without the mask always
290 	 * being set, the hardware might not get updated on the next
291 	 * splraise completely messing up spl protection.
292 	 */
293 	if (old > new)
294 		new = old;
295 
296 	sxiintc_setipl(new);
297 
298 	return (old);
299 }
300 
301 void
sxiintc_setipl(int new)302 sxiintc_setipl(int new)
303 {
304 	struct cpu_info *ci = curcpu();
305 	int i, psw;
306 #if 1
307 	/*
308 	 * XXX not needed, because all interrupts are disabled
309 	 * by default, so touching maskregs has no effect, i hope.
310 	 */
311 	if (sxiintc_attached == 0) {
312 		ci->ci_cpl = new;
313 		return;
314 	}
315 #endif
316 	psw = disable_interrupts(PSR_I);
317 	ci->ci_cpl = new;
318 	for (i = 0; i < NBANKS; i++)
319 		bus_space_write_4(sxiintc_iot, sxiintc_ioh,
320 		    INTC_MASK_REG(i), sxiintc_imask[i][new]);
321 	restore_interrupts(psw);
322 }
323 
324 void
sxiintc_irq_handler(void * frame)325 sxiintc_irq_handler(void *frame)
326 {
327 	struct intrhand *ih;
328 	void *arg;
329 	uint32_t pr;
330 	int irq, prio, s;
331 
332 	irq = bus_space_read_4(sxiintc_iot, sxiintc_ioh, INTC_VECTOR_REG) >> 2;
333 	if (irq == 0)
334 		return;
335 
336 	prio = sxiintc_handler[irq].iq_irq;
337 	s = sxiintc_splraise(prio);
338 	splassert(prio);
339 
340 	pr = bus_space_read_4(sxiintc_iot, sxiintc_ioh,
341 	    INTC_ENABLE_REG(IRQ2REG32(irq)));
342 	bus_space_write_4(sxiintc_iot, sxiintc_ioh,
343 	    INTC_ENABLE_REG(IRQ2REG32(irq)),
344 	    pr & ~(1 << IRQ2BIT32(irq)));
345 
346 	/* clear pending */
347 	pr = bus_space_read_4(sxiintc_iot, sxiintc_ioh,
348 	    INTC_IRQ_PENDING_REG(IRQ2REG32(irq)));
349 	bus_space_write_4(sxiintc_iot, sxiintc_ioh,
350 	    INTC_IRQ_PENDING_REG(IRQ2REG32(irq)),
351 	    pr | (1 << IRQ2BIT32(irq)));
352 
353 	pr = bus_space_read_4(sxiintc_iot, sxiintc_ioh,
354 	    INTC_ENABLE_REG(IRQ2REG32(irq)));
355 	bus_space_write_4(sxiintc_iot, sxiintc_ioh,
356 	    INTC_ENABLE_REG(IRQ2REG32(irq)),
357 	    pr | (1 << IRQ2BIT32(irq)));
358 
359 	TAILQ_FOREACH(ih, &sxiintc_handler[irq].iq_list, ih_list) {
360 		if (ih->ih_arg)
361 			arg = ih->ih_arg;
362 		else
363 			arg = frame;
364 
365 		if (ih->ih_func(arg))
366 			ih->ih_count.ec_count++;
367 	}
368 	sxiintc_splx(s);
369 }
370 
371 void *
sxiintc_intr_establish(int irq,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)372 sxiintc_intr_establish(int irq, int level, struct cpu_info *ci,
373     int (*func)(void *), void *arg, char *name)
374 {
375 	int psw;
376 	struct intrhand *ih;
377 	uint32_t er;
378 
379 	if (irq <= 0 || irq >= NIRQ)
380 		panic("intr_establish: bogus irq %d %s", irq, name);
381 
382 	if (ci == NULL)
383 		ci = &cpu_info_primary;
384 	else if (!CPU_IS_PRIMARY(ci))
385 		return NULL;
386 
387 	DPRINTF(("intr_establish: irq %d level %d [%s]\n", irq, level,
388 	    name != NULL ? name : "NULL"));
389 
390 	psw = disable_interrupts(PSR_I);
391 
392 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
393 	ih->ih_func = func;
394 	ih->ih_arg = arg;
395 	ih->ih_ipl = level & IPL_IRQMASK;
396 	ih->ih_irq = irq;
397 	ih->ih_name = name;
398 
399 	TAILQ_INSERT_TAIL(&sxiintc_handler[irq].iq_list, ih, ih_list);
400 
401 	if (name != NULL)
402 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
403 
404 	er = bus_space_read_4(sxiintc_iot, sxiintc_ioh,
405 	    INTC_ENABLE_REG(IRQ2REG32(irq)));
406 	bus_space_write_4(sxiintc_iot, sxiintc_ioh,
407 	    INTC_ENABLE_REG(IRQ2REG32(irq)),
408 	    er | (1 << IRQ2BIT32(irq)));
409 
410 	sxiintc_calc_masks();
411 
412 	restore_interrupts(psw);
413 	return (ih);
414 }
415 
416 void *
sxiintc_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)417 sxiintc_intr_establish_fdt(void *cookie, int *cell, int level,
418     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
419 {
420 	return sxiintc_intr_establish(cell[0], level, ci, func, arg, name);
421 }
422 
423 void
sxiintc_intr_disestablish(void * cookie)424 sxiintc_intr_disestablish(void *cookie)
425 {
426 	struct intrhand *ih = cookie;
427 	int irq = ih->ih_irq;
428 	int psw;
429 	uint32_t er;
430 
431 	psw = disable_interrupts(PSR_I);
432 
433 	TAILQ_REMOVE(&sxiintc_handler[irq].iq_list, ih, ih_list);
434 
435 	if (ih->ih_name != NULL)
436 		evcount_detach(&ih->ih_count);
437 
438 	free(ih, M_DEVBUF, 0);
439 
440 	er = bus_space_read_4(sxiintc_iot, sxiintc_ioh,
441 	    INTC_ENABLE_REG(IRQ2REG32(irq)));
442 	bus_space_write_4(sxiintc_iot, sxiintc_ioh,
443 	    INTC_ENABLE_REG(IRQ2REG32(irq)),
444 	    er & ~(1 << IRQ2BIT32(irq)));
445 
446 	sxiintc_calc_masks();
447 
448 	restore_interrupts(psw);
449 }
450 
451 const char *
sxiintc_intr_string(void * cookie)452 sxiintc_intr_string(void *cookie)
453 {
454 	return "asd?";
455 }
456