xref: /openbsd/sys/arch/arm64/dev/bcm2836_intr.c (revision 4cfece93)
1 /* $OpenBSD: bcm2836_intr.c,v 1.9 2020/07/14 15:34:14 patrick Exp $ */
2 /*
3  * Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
4  * Copyright (c) 2015 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28 
29 #include <dev/ofw/openfirm.h>
30 #include <dev/ofw/fdt.h>
31 
32 /* registers */
33 #define	INTC_PENDING_BANK0	0x00
34 #define	INTC_PENDING_BANK1	0x04
35 #define	INTC_PENDING_BANK2	0x08
36 #define	INTC_FIQ_CONTROL	0x0C
37 #define	INTC_ENABLE_BANK1	0x10
38 #define	INTC_ENABLE_BANK2	0x14
39 #define	INTC_ENABLE_BANK0	0x18
40 #define	INTC_DISABLE_BANK1	0x1C
41 #define	INTC_DISABLE_BANK2	0x20
42 #define	INTC_DISABLE_BANK0	0x24
43 
44 /* arm local */
45 #define	ARM_LOCAL_CONTROL		0x00
46 #define	ARM_LOCAL_PRESCALER		0x08
47 #define	 PRESCALER_19_2			0x80000000 /* 19.2 MHz */
48 #define	ARM_LOCAL_INT_TIMER(n)		(0x40 + (n) * 4)
49 #define	ARM_LOCAL_INT_MAILBOX(n)	(0x50 + (n) * 4)
50 #define	ARM_LOCAL_INT_PENDING(n)	(0x60 + (n) * 4)
51 #define	 ARM_LOCAL_INT_PENDING_MASK	0x0f
52 #define	ARM_LOCAL_INT_MAILBOX_SET(n)	(0x80 + (n) * 16)
53 #define	ARM_LOCAL_INT_MAILBOX_CLR(n)	(0xc0 + (n) * 16)
54 
55 #define	BANK0_START	64
56 #define	BANK0_END	(BANK0_START + 32 - 1)
57 #define	BANK1_START	0
58 #define	BANK1_END	(BANK1_START + 32 - 1)
59 #define	BANK2_START	32
60 #define	BANK2_END	(BANK2_START + 32 - 1)
61 #define	LOCAL_START	96
62 #define	LOCAL_END	(LOCAL_START + 32 - 1)
63 
64 #define	IS_IRQ_BANK0(n)	(((n) >= BANK0_START) && ((n) <= BANK0_END))
65 #define	IS_IRQ_BANK1(n)	(((n) >= BANK1_START) && ((n) <= BANK1_END))
66 #define	IS_IRQ_BANK2(n)	(((n) >= BANK2_START) && ((n) <= BANK2_END))
67 #define	IS_IRQ_LOCAL(n)	(((n) >= LOCAL_START) && ((n) <= LOCAL_END))
68 #define	IRQ_BANK0(n)	((n) - BANK0_START)
69 #define	IRQ_BANK1(n)	((n) - BANK1_START)
70 #define	IRQ_BANK2(n)	((n) - BANK2_START)
71 #define	IRQ_LOCAL(n)	((n) - LOCAL_START)
72 
73 #define ARM_LOCAL_IRQ_MAILBOX(n) (4 + (n))
74 
75 #define	INTC_NIRQ	128
76 #define	INTC_NBANK	4
77 
78 #define INTC_IRQ_TO_REG(i)	(((i) >> 5) & 0x3)
79 #define INTC_IRQ_TO_REGi(i)	((i) & 0x1f)
80 
81 struct intrhand {
82 	TAILQ_ENTRY(intrhand) ih_list;	/* link on intrq list */
83 	int (*ih_func)(void *);		/* handler */
84 	void *ih_arg;			/* arg for handler */
85 	int ih_ipl;			/* IPL_* */
86 	int ih_flags;
87 	int ih_irq;			/* IRQ number */
88 	struct evcount ih_count;	/* interrupt counter */
89 	char *ih_name;			/* device name */
90 };
91 
92 struct intrsource {
93 	TAILQ_HEAD(, intrhand) is_list;	/* handler list */
94 	int is_irq;			/* IRQ to mask while handling */
95 };
96 
97 struct bcm_intc_softc {
98 	struct device		 sc_dev;
99 	struct intrsource	 sc_bcm_intc_handler[INTC_NIRQ];
100 	uint32_t		 sc_bcm_intc_imask[INTC_NBANK][NIPL];
101 	int32_t			 sc_localcoremask[MAXCPUS];
102 	bus_space_tag_t		 sc_iot;
103 	bus_space_handle_t	 sc_ioh;
104 	bus_space_handle_t	 sc_lioh;
105 	struct interrupt_controller sc_intc;
106 	struct interrupt_controller sc_l1_intc;
107 };
108 struct bcm_intc_softc *bcm_intc;
109 
110 int	 bcm_intc_match(struct device *, void *, void *);
111 void	 bcm_intc_attach(struct device *, struct device *, void *);
112 void	 bcm_intc_splx(int new);
113 int	 bcm_intc_spllower(int new);
114 int	 bcm_intc_splraise(int new);
115 void	 bcm_intc_setipl(int new);
116 void	 bcm_intc_calc_mask(void);
117 void	*bcm_intc_intr_establish(int, int, struct cpu_info *,
118     int (*)(void *), void *, char *);
119 void	*bcm_intc_intr_establish_fdt(void *, int *, int, struct cpu_info *,
120     int (*)(void *), void *, char *);
121 void	*l1_intc_intr_establish_fdt(void *, int *, int, struct cpu_info *,
122     int (*)(void *), void *, char *);
123 void	 bcm_intc_intr_disestablish(void *);
124 void	 bcm_intc_irq_handler(void *);
125 void	 bcm_intc_intr_route(void *, int , struct cpu_info *);
126 void	 bcm_intc_handle_ipi(void);
127 void	 bcm_intc_send_ipi(struct cpu_info *, int);
128 
129 struct cfattach	bcmintc_ca = {
130 	sizeof (struct bcm_intc_softc), bcm_intc_match, bcm_intc_attach
131 };
132 
133 struct cfdriver bcmintc_cd = {
134 	NULL, "bcmintc", DV_DULL
135 };
136 
137 int
138 bcm_intc_match(struct device *parent, void *cfdata, void *aux)
139 {
140 	struct fdt_attach_args *faa = aux;
141 
142 	if (OF_is_compatible(faa->fa_node, "brcm,bcm2836-armctrl-ic"))
143 		return 1;
144 
145 	return 0;
146 }
147 
148 void
149 bcm_intc_attach(struct device *parent, struct device *self, void *aux)
150 {
151 	struct bcm_intc_softc *sc = (struct bcm_intc_softc *)self;
152 	struct fdt_attach_args *faa = aux;
153 	uint32_t reg[2];
154 	int node;
155 	int i;
156 
157 	if (faa->fa_nreg < 1)
158 		return;
159 
160 	bcm_intc = sc;
161 
162 	sc->sc_iot = faa->fa_iot;
163 
164 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
165 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
166 		panic("%s: bus_space_map failed!", __func__);
167 
168 	/*
169 	 * ARM control logic.
170 	 *
171 	 * XXX Should really be implemented as a separate interrupt
172 	 * controller, but for now it is easier to handle it together
173 	 * with its BCM2835 partner.
174 	 */
175 	node = OF_finddevice("/soc/local_intc");
176 	if (node == -1)
177 		panic("%s: can't find ARM control logic", __func__);
178 
179 	if (OF_getpropintarray(node, "reg", reg, sizeof(reg)) != sizeof(reg))
180 		panic("%s: can't map ARM control logic", __func__);
181 
182 	if (bus_space_map(sc->sc_iot, reg[0], reg[1], 0, &sc->sc_lioh))
183 		panic("%s: bus_space_map failed!", __func__);
184 
185 	printf("\n");
186 
187 	/* mask all interrupts */
188 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK0,
189 	    0xffffffff);
190 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK1,
191 	    0xffffffff);
192 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK2,
193 	    0xffffffff);
194 
195 	/* ARM control specific */
196 	bus_space_write_4(sc->sc_iot, sc->sc_lioh, ARM_LOCAL_CONTROL, 0);
197 	bus_space_write_4(sc->sc_iot, sc->sc_lioh, ARM_LOCAL_PRESCALER,
198 	    PRESCALER_19_2);
199 	for (i = 0; i < 4; i++)
200 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
201 		    ARM_LOCAL_INT_TIMER(i), 0);
202 	for (i = 0; i < 4; i++)
203 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
204 		    ARM_LOCAL_INT_MAILBOX(i), 0);
205 
206 	for (i = 0; i < INTC_NIRQ; i++) {
207 		TAILQ_INIT(&sc->sc_bcm_intc_handler[i].is_list);
208 	}
209 
210 	bcm_intc_calc_mask();
211 
212 	/* insert self as interrupt handler */
213 	arm_set_intr_handler(bcm_intc_splraise, bcm_intc_spllower,
214 	    bcm_intc_splx, bcm_intc_setipl, bcm_intc_irq_handler);
215 
216 	sc->sc_intc.ic_node = faa->fa_node;
217 	sc->sc_intc.ic_cookie = sc;
218 	sc->sc_intc.ic_establish = bcm_intc_intr_establish_fdt;
219 	sc->sc_intc.ic_disestablish = bcm_intc_intr_disestablish;
220 	sc->sc_intc.ic_route = bcm_intc_intr_route;
221 	arm_intr_register_fdt(&sc->sc_intc);
222 
223 	sc->sc_l1_intc.ic_node = node;
224 	sc->sc_l1_intc.ic_cookie = sc;
225 	sc->sc_l1_intc.ic_establish = l1_intc_intr_establish_fdt;
226 	sc->sc_l1_intc.ic_disestablish = bcm_intc_intr_disestablish;
227 	sc->sc_l1_intc.ic_route = bcm_intc_intr_route;
228 	arm_intr_register_fdt(&sc->sc_l1_intc);
229 
230 	intr_send_ipi_func = bcm_intc_send_ipi;
231 
232 	bcm_intc_setipl(IPL_HIGH);  /* XXX ??? */
233 	enable_interrupts();
234 }
235 
236 void
237 bcm_intc_intr_enable(int irq, int ipl)
238 {
239 	struct bcm_intc_softc	*sc = bcm_intc;
240 
241 	if (IS_IRQ_BANK0(irq))
242 		sc->sc_bcm_intc_imask[0][ipl] |= (1 << IRQ_BANK0(irq));
243 	else if (IS_IRQ_BANK1(irq))
244 		sc->sc_bcm_intc_imask[1][ipl] |= (1 << IRQ_BANK1(irq));
245 	else if (IS_IRQ_BANK2(irq))
246 		sc->sc_bcm_intc_imask[2][ipl] |= (1 << IRQ_BANK2(irq));
247 	else if (IS_IRQ_LOCAL(irq))
248 		sc->sc_bcm_intc_imask[3][ipl] |= (1 << IRQ_LOCAL(irq));
249 	else
250 		printf("%s: invalid irq number: %d\n", __func__, irq);
251 }
252 
253 void
254 bcm_intc_intr_disable(int irq, int ipl)
255 {
256 	struct bcm_intc_softc	*sc = bcm_intc;
257 
258 	if (IS_IRQ_BANK0(irq))
259 		sc->sc_bcm_intc_imask[0][ipl] &= ~(1 << IRQ_BANK0(irq));
260 	else if (IS_IRQ_BANK1(irq))
261 		sc->sc_bcm_intc_imask[1][ipl] &= ~(1 << IRQ_BANK1(irq));
262 	else if (IS_IRQ_BANK2(irq))
263 		sc->sc_bcm_intc_imask[2][ipl] &= ~(1 << IRQ_BANK2(irq));
264 	else if (IS_IRQ_LOCAL(irq))
265 		sc->sc_bcm_intc_imask[3][ipl] &= ~(1 << IRQ_LOCAL(irq));
266 	else
267 		printf("%s: invalid irq number: %d\n", __func__, irq);
268 }
269 
270 void
271 bcm_intc_calc_mask(void)
272 {
273 	struct cpu_info *ci = curcpu();
274 	struct bcm_intc_softc *sc = bcm_intc;
275 	int irq;
276 	struct intrhand *ih;
277 	int i;
278 
279 	for (irq = 0; irq < INTC_NIRQ; irq++) {
280 		int max = IPL_NONE;
281 		int min = IPL_HIGH;
282 		TAILQ_FOREACH(ih, &sc->sc_bcm_intc_handler[irq].is_list,
283 		    ih_list) {
284 			if (ih->ih_ipl > max)
285 				max = ih->ih_ipl;
286 
287 			if (ih->ih_ipl < min)
288 				min = ih->ih_ipl;
289 		}
290 
291 		sc->sc_bcm_intc_handler[irq].is_irq = max;
292 
293 		if (max == IPL_NONE)
294 			min = IPL_NONE;
295 
296 #ifdef DEBUG_INTC
297 		if (min != IPL_NONE) {
298 			printf("irq %d to block at %d %d reg %d bit %d\n",
299 			    irq, max, min, INTC_IRQ_TO_REG(irq),
300 			    INTC_IRQ_TO_REGi(irq));
301 		}
302 #endif
303 		/* Enable interrupts at lower levels, clear -> enable */
304 		for (i = 0; i < min; i++)
305 			bcm_intc_intr_enable(irq, i);
306 		for (; i <= IPL_HIGH; i++)
307 			bcm_intc_intr_disable(irq, i);
308 	}
309 	arm_init_smask();
310 	bcm_intc_setipl(ci->ci_cpl);
311 }
312 
313 void
314 bcm_intc_splx(int new)
315 {
316 	struct cpu_info *ci = curcpu();
317 
318 	if (ci->ci_ipending & arm_smask[new])
319 		arm_do_pending_intr(new);
320 
321 	bcm_intc_setipl(new);
322 }
323 
324 int
325 bcm_intc_spllower(int new)
326 {
327 	struct cpu_info *ci = curcpu();
328 	int old = ci->ci_cpl;
329 	bcm_intc_splx(new);
330 	return (old);
331 }
332 
333 int
334 bcm_intc_splraise(int new)
335 {
336 	struct cpu_info *ci = curcpu();
337 	int old;
338 	old = ci->ci_cpl;
339 
340 	/*
341 	 * setipl must always be called because there is a race window
342 	 * where the variable is updated before the mask is set
343 	 * an interrupt occurs in that window without the mask always
344 	 * being set, the hardware might not get updated on the next
345 	 * splraise completely messing up spl protection.
346 	 */
347 	if (old > new)
348 		new = old;
349 
350 	bcm_intc_setipl(new);
351 
352 	return (old);
353 }
354 
355 void
356 bcm_intc_setipl(int new)
357 {
358 	struct cpu_info *ci = curcpu();
359 	struct bcm_intc_softc *sc = bcm_intc;
360 	int psw;
361 
362 	psw = disable_interrupts();
363 	ci->ci_cpl = new;
364 	if (cpu_number() == 0) {
365 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK0,
366 		    0xffffffff);
367 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK1,
368 		    0xffffffff);
369 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_DISABLE_BANK2,
370 		    0xffffffff);
371 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK0,
372 		    sc->sc_bcm_intc_imask[0][new]);
373 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK1,
374 		    sc->sc_bcm_intc_imask[1][new]);
375 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, INTC_ENABLE_BANK2,
376 		    sc->sc_bcm_intc_imask[2][new]);
377 	}
378 	/* timer for current core */
379 	bus_space_write_4(sc->sc_iot, sc->sc_lioh,
380 	    ARM_LOCAL_INT_TIMER(cpu_number()),
381 	    sc->sc_bcm_intc_imask[3][ci->ci_cpl] &
382 	    sc->sc_localcoremask[cpu_number()]);
383 	restore_interrupts(psw);
384 }
385 
386 int
387 bcm_intc_get_next_irq(int last_irq)
388 {
389 	struct bcm_intc_softc *sc = bcm_intc;
390 	uint32_t pending;
391 	int32_t irq = last_irq + 1;
392 
393 	/* Sanity check */
394 	if (irq < 0)
395 		irq = 0;
396 
397 	/* We need to keep this order. */
398 	/* TODO: should we mask last_irq? */
399 	if (IS_IRQ_BANK1(irq)) {
400 		pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
401 		    INTC_PENDING_BANK1);
402 		if (pending == 0) {
403 			irq = BANK2_START;	/* skip to next bank */
404 		} else do {
405 			if (pending & (1 << IRQ_BANK1(irq)))
406 				return irq;
407 			irq++;
408 		} while (IS_IRQ_BANK1(irq));
409 	}
410 	if (IS_IRQ_BANK2(irq)) {
411 		pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
412 		    INTC_PENDING_BANK2);
413 		if (pending == 0) {
414 			irq = BANK0_START;	/* skip to next bank */
415 		} else do {
416 			if (pending & (1 << IRQ_BANK2(irq)))
417 				return irq;
418 			irq++;
419 		} while (IS_IRQ_BANK2(irq));
420 	}
421 	if (IS_IRQ_BANK0(irq)) {
422 		pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
423 		    INTC_PENDING_BANK0);
424 		if (pending == 0) {
425 			irq = LOCAL_START;	/* skip to next bank */
426 		} else do {
427 			if (pending & (1 << IRQ_BANK0(irq)))
428 				return irq;
429 			irq++;
430 		} while (IS_IRQ_BANK0(irq));
431 	}
432 	if (IS_IRQ_LOCAL(irq)) {
433 		pending = bus_space_read_4(sc->sc_iot, sc->sc_lioh,
434 		    ARM_LOCAL_INT_PENDING(cpu_number()));
435 		pending &= ARM_LOCAL_INT_PENDING_MASK;
436 		if (pending != 0) do {
437 			if (pending & (1 << IRQ_LOCAL(irq)))
438 				return irq;
439 			irq++;
440 		} while (IS_IRQ_LOCAL(irq));
441 	}
442 	return (-1);
443 }
444 
445 static void
446 bcm_intc_call_handler(int irq, void *frame)
447 {
448 	struct bcm_intc_softc *sc = bcm_intc;
449 	struct intrhand *ih;
450 	int pri, s, handled;
451 	void *arg;
452 
453 #ifdef DEBUG_INTC
454 	if (irq != 99)
455 		printf("irq  %d fired\n", irq);
456 	else {
457 		static int cnt = 0;
458 		if ((cnt++ % 100) == 0) {
459 			printf("irq  %d fired * _100\n", irq);
460 #ifdef DDB
461 			db_enter();
462 #endif
463 		}
464 	}
465 #endif
466 
467 	pri = sc->sc_bcm_intc_handler[irq].is_irq;
468 	s = bcm_intc_splraise(pri);
469 	TAILQ_FOREACH(ih, &sc->sc_bcm_intc_handler[irq].is_list, ih_list) {
470 #ifdef MULTIPROCESSOR
471 		int need_lock;
472 
473 		if (ih->ih_flags & IPL_MPSAFE)
474 			need_lock = 0;
475 		else
476 			need_lock = s < IPL_SCHED;
477 
478 		if (need_lock)
479 			KERNEL_LOCK();
480 #endif
481 
482 		if (ih->ih_arg != 0)
483 			arg = ih->ih_arg;
484 		else
485 			arg = frame;
486 
487 		enable_interrupts();
488 		handled = ih->ih_func(arg);
489 		disable_interrupts();
490 		if (handled)
491 			ih->ih_count.ec_count++;
492 
493 #ifdef MULTIPROCESSOR
494 		if (need_lock)
495 			KERNEL_UNLOCK();
496 #endif
497 	}
498 
499 	bcm_intc_splx(s);
500 }
501 
502 void
503 bcm_intc_irq_handler(void *frame)
504 {
505 	int irq = (cpu_number() == 0 ? 0 : LOCAL_START) - 1;
506 
507 	while ((irq = bcm_intc_get_next_irq(irq)) != -1) {
508 #ifdef MULTIPROCESSOR
509 		if (irq == ARM_LOCAL_IRQ_MAILBOX(cpu_number())) {
510 			bcm_intc_handle_ipi();
511 			continue;
512 		}
513 #endif
514 		bcm_intc_call_handler(irq, frame);
515 	}
516 }
517 
518 void *
519 bcm_intc_intr_establish_fdt(void *cookie, int *cell, int level,
520     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
521 {
522 	struct bcm_intc_softc	*sc = (struct bcm_intc_softc *)cookie;
523 	int irq;
524 
525 	irq = cell[1];
526 	if (cell[0] == 0)
527 		irq += BANK0_START;
528 	else if (cell[0] == 1)
529 		irq += BANK1_START;
530 	else if (cell[0] == 2)
531 		irq += BANK2_START;
532 	else if (cell[0] == 3)
533 		irq += LOCAL_START;
534 	else
535 		panic("%s: bogus interrupt type", sc->sc_dev.dv_xname);
536 
537 	return bcm_intc_intr_establish(irq, level, ci, func, arg, name);
538 }
539 
540 void *
541 l1_intc_intr_establish_fdt(void *cookie, int *cell, int level,
542     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
543 {
544 	int irq;
545 
546 	irq = cell[0] + LOCAL_START;
547 	return bcm_intc_intr_establish(irq, level, ci, func, arg, name);
548 }
549 
550 void *
551 bcm_intc_intr_establish(int irqno, int level, struct cpu_info *ci,
552     int (*func)(void *), void *arg, char *name)
553 {
554 	struct bcm_intc_softc *sc = bcm_intc;
555 	struct intrhand *ih;
556 	int psw;
557 
558 	if (irqno < 0 || irqno >= INTC_NIRQ)
559 		panic("bcm_intc_intr_establish: bogus irqnumber %d: %s",
560 		     irqno, name);
561 
562 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
563 		return NULL;
564 
565 	psw = disable_interrupts();
566 
567 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
568 	ih->ih_func = func;
569 	ih->ih_arg = arg;
570 	ih->ih_ipl = level & IPL_IRQMASK;
571 	ih->ih_flags = level & IPL_FLAGMASK;
572 	ih->ih_irq = irqno;
573 	ih->ih_name = name;
574 
575 	if (IS_IRQ_LOCAL(irqno))
576 		sc->sc_localcoremask[0] |= (1 << IRQ_LOCAL(irqno));
577 
578 	TAILQ_INSERT_TAIL(&sc->sc_bcm_intc_handler[irqno].is_list, ih, ih_list);
579 
580 	if (name != NULL)
581 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
582 
583 #ifdef DEBUG_INTC
584 	printf("%s irq %d level %d [%s]\n", __func__, irqno, level,
585 	    name);
586 #endif
587 	bcm_intc_calc_mask();
588 
589 	restore_interrupts(psw);
590 	return (ih);
591 }
592 
593 void
594 bcm_intc_intr_disestablish(void *cookie)
595 {
596 	struct bcm_intc_softc *sc = bcm_intc;
597 	struct intrhand *ih = cookie;
598 	int irqno = ih->ih_irq;
599 	int psw;
600 
601 	psw = disable_interrupts();
602 	TAILQ_REMOVE(&sc->sc_bcm_intc_handler[irqno].is_list, ih, ih_list);
603 	if (ih->ih_name != NULL)
604 		evcount_detach(&ih->ih_count);
605 	free(ih, M_DEVBUF, 0);
606 	restore_interrupts(psw);
607 }
608 
609 void
610 bcm_intc_intr_route(void *cookie, int enable, struct cpu_info *ci)
611 {
612 	struct bcm_intc_softc *sc = bcm_intc;
613 	struct intrhand *ih = cookie;
614 	int lirq = IRQ_LOCAL(ih->ih_irq);
615 
616 	if (enable)
617 		sc->sc_localcoremask[ci->ci_cpuid] |= (1 << lirq);
618 	else
619 		sc->sc_localcoremask[ci->ci_cpuid] &= ~(1 << lirq);
620 
621 	if (ci == curcpu()) {
622 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
623 		    ARM_LOCAL_INT_TIMER(cpu_number()),
624 		    sc->sc_bcm_intc_imask[3][ci->ci_cpl] &
625 		    sc->sc_localcoremask[cpu_number()]);
626 #ifdef MULTIPROCESSOR
627 		bus_space_write_4(sc->sc_iot, sc->sc_lioh,
628 		    ARM_LOCAL_INT_MAILBOX(cpu_number()),
629 		    sc->sc_bcm_intc_imask[3][ci->ci_cpl] &
630 		    sc->sc_localcoremask[cpu_number()]);
631 #endif
632 	}
633 }
634 
635 void
636 bcm_intc_handle_ipi(void)
637 {
638 	struct bcm_intc_softc *sc = bcm_intc;
639 	int cpuno = cpu_number();
640 	uint32_t mbox_val;
641 	int ipi;
642 
643 	mbox_val = bus_space_read_4(sc->sc_iot, sc->sc_lioh,
644 		ARM_LOCAL_INT_MAILBOX_CLR(cpuno));
645 	ipi = ffs(mbox_val) - 1;
646 	bus_space_write_4(sc->sc_iot, sc->sc_lioh,
647 	    ARM_LOCAL_INT_MAILBOX_CLR(cpuno), 1 << ipi);
648 	switch (ipi) {
649 	case ARM_IPI_DDB:
650 		/* XXX */
651 #ifdef DDB
652 		db_enter();
653 #endif
654 		break;
655 	case ARM_IPI_NOP:
656 		break;
657 	}
658 }
659 
660 void
661 bcm_intc_send_ipi(struct cpu_info *ci, int id)
662 {
663 	struct bcm_intc_softc *sc = bcm_intc;
664 
665 	__asm volatile("dsb sy"); /* XXX */
666 
667 	bus_space_write_4(sc->sc_iot, sc->sc_lioh,
668 	    ARM_LOCAL_INT_MAILBOX_SET(ci->ci_cpuid), 1 << id);
669 }
670