xref: /openbsd/sys/arch/arm/cortex/ampintc.c (revision 4cfece93)
1 /* $OpenBSD: ampintc.c,v 1.28 2020/07/14 15:34:14 patrick Exp $ */
2 /*
3  * Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * This driver implements the interrupt controller as specified in
20  * DDI0407E_cortex_a9_mpcore_r2p0_trm with the
21  * IHI0048A_gic_architecture_spec_v1_0 underlying specification
22  */
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/queue.h>
26 #include <sys/malloc.h>
27 #include <sys/device.h>
28 #include <sys/evcount.h>
29 
30 #include <uvm/uvm_extern.h>
31 
32 #include <machine/bus.h>
33 #include <machine/fdt.h>
34 
35 #include <arm/cpufunc.h>
36 #include <arm/cortex/cortex.h>
37 
38 #include <dev/ofw/fdt.h>
39 #include <dev/ofw/openfirm.h>
40 
41 #include <arm/simplebus/simplebusvar.h>
42 
43 /* registers */
44 #define	ICD_DCR			0x000
45 #define		ICD_DCR_ES		0x00000001
46 #define		ICD_DCR_ENS		0x00000002
47 
48 #define ICD_ICTR			0x004
49 #define		ICD_ICTR_LSPI_SH	11
50 #define		ICD_ICTR_LSPI_M		0x1f
51 #define		ICD_ICTR_CPU_SH		5
52 #define		ICD_ICTR_CPU_M		0x07
53 #define		ICD_ICTR_ITL_SH		0
54 #define		ICD_ICTR_ITL_M		0x1f
55 #define ICD_IDIR			0x008
56 #define 	ICD_DIR_PROD_SH		24
57 #define 	ICD_DIR_PROD_M		0xff
58 #define 	ICD_DIR_REV_SH		12
59 #define 	ICD_DIR_REV_M		0xfff
60 #define 	ICD_DIR_IMP_SH		0
61 #define 	ICD_DIR_IMP_M		0xfff
62 
63 #define IRQ_TO_REG32(i)		(((i) >> 5) & 0x1f)
64 #define IRQ_TO_REG32BIT(i)	((i) & 0x1f)
65 #define IRQ_TO_REG4(i)		(((i) >> 2) & 0xff)
66 #define IRQ_TO_REG4BIT(i)	((i) & 0x3)
67 #define IRQ_TO_REG16(i)		(((i) >> 4) & 0x3f)
68 #define IRQ_TO_REG16BIT(i)	((i) & 0xf)
69 #define IRQ_TO_REGBIT_S(i)	8
70 #define IRQ_TO_REG4BIT_M(i)	8
71 
72 #define ICD_ISRn(i)		(0x080 + (IRQ_TO_REG32(i) * 4))
73 #define ICD_ISERn(i)		(0x100 + (IRQ_TO_REG32(i) * 4))
74 #define ICD_ICERn(i)		(0x180 + (IRQ_TO_REG32(i) * 4))
75 #define ICD_ISPRn(i)		(0x200 + (IRQ_TO_REG32(i) * 4))
76 #define ICD_ICPRn(i)		(0x280 + (IRQ_TO_REG32(i) * 4))
77 #define ICD_ABRn(i)		(0x300 + (IRQ_TO_REG32(i) * 4))
78 #define ICD_IPRn(i)		(0x400 + (i))
79 #define ICD_IPTRn(i)		(0x800 + (i))
80 #define ICD_ICRn(i)		(0xC00 + (IRQ_TO_REG16(i) * 4))
81 #define 	ICD_ICR_TRIG_LEVEL(i)	(0x0 << (IRQ_TO_REG16BIT(i) * 2))
82 #define 	ICD_ICR_TRIG_EDGE(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
83 #define 	ICD_ICR_TRIG_MASK(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
84 
85 /*
86  * what about (ppi|spi)_status
87  */
88 #define ICD_PPI			0xD00
89 #define 	ICD_PPI_GTIMER	(1 << 11)
90 #define 	ICD_PPI_FIQ		(1 << 12)
91 #define 	ICD_PPI_PTIMER	(1 << 13)
92 #define 	ICD_PPI_PWDOG	(1 << 14)
93 #define 	ICD_PPI_IRQ		(1 << 15)
94 #define ICD_SPI_BASE		0xD04
95 #define ICD_SPIn(i)			(ICD_SPI_BASE + ((i) * 4))
96 
97 
98 #define ICD_SGIR			0xF00
99 
100 #define ICD_PERIPH_ID_0			0xFD0
101 #define ICD_PERIPH_ID_1			0xFD4
102 #define ICD_PERIPH_ID_2			0xFD8
103 #define ICD_PERIPH_ID_3			0xFDC
104 #define ICD_PERIPH_ID_4			0xFE0
105 #define ICD_PERIPH_ID_5			0xFE4
106 #define ICD_PERIPH_ID_6			0xFE8
107 #define ICD_PERIPH_ID_7			0xFEC
108 
109 #define ICD_COMP_ID_0			0xFEC
110 #define ICD_COMP_ID_1			0xFEC
111 #define ICD_COMP_ID_2			0xFEC
112 #define ICD_COMP_ID_3			0xFEC
113 
114 
115 #define ICPICR				0x00
116 #define ICPIPMR				0x04
117 /* XXX - must left justify bits to  0 - 7  */
118 #define 	ICMIPMR_SH 		4
119 #define ICPBPR				0x08
120 #define ICPIAR				0x0C
121 #define 	ICPIAR_IRQ_SH		0
122 #define 	ICPIAR_IRQ_M		0x3ff
123 #define 	ICPIAR_CPUID_SH		10
124 #define 	ICPIAR_CPUID_M		0x7
125 #define 	ICPIAR_NO_PENDING_IRQ	ICPIAR_IRQ_M
126 #define ICPEOIR				0x10
127 #define ICPPRP				0x14
128 #define ICPHPIR				0x18
129 #define ICPIIR				0xFC
130 
131 /*
132  * what about periph_id and component_id
133  */
134 
135 #define IRQ_ENABLE	1
136 #define IRQ_DISABLE	0
137 
138 struct ampintc_softc {
139 	struct simplebus_softc	 sc_sbus;
140 	struct intrq 		*sc_handler;
141 	int			 sc_nintr;
142 	bus_space_tag_t		 sc_iot;
143 	bus_space_handle_t	 sc_d_ioh, sc_p_ioh;
144 	uint8_t			 sc_cpu_mask[ICD_ICTR_CPU_M + 1];
145 	struct evcount		 sc_spur;
146 	struct interrupt_controller sc_ic;
147 	int			 sc_ipi_reason[ICD_ICTR_CPU_M + 1];
148 	int			 sc_ipi_num[2];
149 };
150 struct ampintc_softc *ampintc;
151 
152 
153 struct intrhand {
154 	TAILQ_ENTRY(intrhand) ih_list;	/* link on intrq list */
155 	int (*ih_func)(void *);		/* handler */
156 	void *ih_arg;			/* arg for handler */
157 	int ih_ipl;			/* IPL_* */
158 	int ih_flags;
159 	int ih_irq;			/* IRQ number */
160 	struct evcount	ih_count;
161 	char *ih_name;
162 };
163 
164 struct intrq {
165 	TAILQ_HEAD(, intrhand) iq_list;	/* handler list */
166 	int iq_irq_max;			/* IRQ to mask while handling */
167 	int iq_irq_min;			/* lowest IRQ when shared */
168 	int iq_ist;			/* share type */
169 };
170 
171 
172 int		 ampintc_match(struct device *, void *, void *);
173 void		 ampintc_attach(struct device *, struct device *, void *);
174 void		 ampintc_cpuinit(void);
175 int		 ampintc_spllower(int);
176 void		 ampintc_splx(int);
177 int		 ampintc_splraise(int);
178 void		 ampintc_setipl(int);
179 void		 ampintc_calc_mask(void);
180 void		*ampintc_intr_establish(int, int, int, struct cpu_info *,
181 		    int (*)(void *), void *, char *);
182 void		*ampintc_intr_establish_ext(int, int, struct cpu_info *,
183 		    int (*)(void *), void *, char *);
184 void		*ampintc_intr_establish_fdt(void *, int *, int,
185 		    struct cpu_info *, int (*)(void *), void *, char *);
186 void		 ampintc_intr_disestablish(void *);
187 void		 ampintc_irq_handler(void *);
188 const char	*ampintc_intr_string(void *);
189 uint32_t	 ampintc_iack(void);
190 void		 ampintc_eoi(uint32_t);
191 void		 ampintc_set_priority(int, int);
192 void		 ampintc_intr_enable(int);
193 void		 ampintc_intr_disable(int);
194 void		 ampintc_intr_config(int, int);
195 void		 ampintc_route(int, int, struct cpu_info *);
196 void		 ampintc_route_irq(void *, int, struct cpu_info *);
197 
198 int		 ampintc_ipi_combined(void *);
199 int		 ampintc_ipi_nop(void *);
200 int		 ampintc_ipi_ddb(void *);
201 void		 ampintc_send_ipi(struct cpu_info *, int);
202 
203 struct cfattach	ampintc_ca = {
204 	sizeof (struct ampintc_softc), ampintc_match, ampintc_attach
205 };
206 
207 struct cfdriver ampintc_cd = {
208 	NULL, "ampintc", DV_DULL
209 };
210 
211 static char *ampintc_compatibles[] = {
212 	"arm,cortex-a7-gic",
213 	"arm,cortex-a9-gic",
214 	"arm,cortex-a15-gic",
215 	"arm,gic-400",
216 	NULL
217 };
218 
219 int
220 ampintc_match(struct device *parent, void *cfdata, void *aux)
221 {
222 	struct fdt_attach_args *faa = aux;
223 	int i;
224 
225 	for (i = 0; ampintc_compatibles[i]; i++)
226 		if (OF_is_compatible(faa->fa_node, ampintc_compatibles[i]))
227 			return (1);
228 
229 	return (0);
230 }
231 
232 void
233 ampintc_attach(struct device *parent, struct device *self, void *aux)
234 {
235 	struct ampintc_softc *sc = (struct ampintc_softc *)self;
236 	struct fdt_attach_args *faa = aux;
237 	int i, nintr, ncpu;
238 	uint32_t ictr;
239 #ifdef MULTIPROCESSOR
240 	int nipi, ipiirq[2];
241 #endif
242 
243 	ampintc = sc;
244 
245 	arm_init_smask();
246 
247 	sc->sc_iot = faa->fa_iot;
248 
249 	/* First row: ICD */
250 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
251 	    faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
252 		panic("%s: ICD bus_space_map failed!", __func__);
253 
254 	/* Second row: ICP */
255 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
256 	    faa->fa_reg[1].size, 0, &sc->sc_p_ioh))
257 		panic("%s: ICP bus_space_map failed!", __func__);
258 
259 	evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
260 
261 	ictr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICTR);
262 	nintr = 32 * ((ictr >> ICD_ICTR_ITL_SH) & ICD_ICTR_ITL_M);
263 	nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
264 	sc->sc_nintr = nintr;
265 	ncpu = ((ictr >> ICD_ICTR_CPU_SH) & ICD_ICTR_CPU_M) + 1;
266 	printf(" nirq %d, ncpu %d", nintr, ncpu);
267 
268 	KASSERT(curcpu()->ci_cpuid <= ICD_ICTR_CPU_M);
269 	sc->sc_cpu_mask[curcpu()->ci_cpuid] =
270 	    bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(0));
271 
272 	/* Disable all interrupts, clear all pending */
273 	for (i = 0; i < nintr/32; i++) {
274 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
275 		    ICD_ICERn(i*32), ~0);
276 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
277 		    ICD_ICPRn(i*32), ~0);
278 	}
279 	for (i = 0; i < nintr; i++) {
280 		/* lowest priority ?? */
281 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 0xff);
282 		/* target no cpus */
283 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(i), 0);
284 	}
285 	for (i = 2; i < nintr/16; i++) {
286 		/* irq 32 - N */
287 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(i*16), 0);
288 	}
289 
290 	/* software reset of the part? */
291 	/* set protection bit (kernel only)? */
292 
293 	/* XXX - check power saving bit */
294 
295 	sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF,
296 	    M_ZERO | M_NOWAIT);
297 	for (i = 0; i < nintr; i++) {
298 		TAILQ_INIT(&sc->sc_handler[i].iq_list);
299 	}
300 
301 	ampintc_setipl(IPL_HIGH);  /* XXX ??? */
302 	ampintc_calc_mask();
303 
304 	/* insert self as interrupt handler */
305 	arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx,
306 	    ampintc_setipl, ampintc_intr_establish_ext,
307 	    ampintc_intr_disestablish, ampintc_intr_string, ampintc_irq_handler);
308 
309 #ifdef MULTIPROCESSOR
310 	/* setup IPI interrupts */
311 
312 	/*
313 	 * Ideally we want two IPI interrupts, one for NOP and one for
314 	 * DDB, however we can survive if only one is available it is
315 	 * possible that most are not available to the non-secure OS.
316 	 */
317 	nipi = 0;
318 	for (i = 0; i < 16; i++) {
319 		int reg, oldreg;
320 
321 		oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
322 		    ICD_IPRn(i));
323 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
324 		    oldreg ^ 0x20);
325 
326 		/* if this interrupt is not usable, route will be zero */
327 		reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i));
328 		if (reg == oldreg)
329 			continue;
330 
331 		/* return to original value, will be set when used */
332 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
333 		    oldreg);
334 
335 		if (nipi == 0)
336 			printf(" ipi: %d", i);
337 		else
338 			printf(", %d", i);
339 		ipiirq[nipi++] = i;
340 		if (nipi == 2)
341 			break;
342 	}
343 
344 	if (nipi == 0)
345 		panic ("no irq available for IPI");
346 
347 	switch (nipi) {
348 	case 1:
349 		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
350 		    IPL_IPI|IPL_MPSAFE, ampintc_ipi_combined, sc, "ipi");
351 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
352 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
353 		break;
354 	case 2:
355 		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
356 		    IPL_IPI|IPL_MPSAFE, ampintc_ipi_nop, sc, "ipinop");
357 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
358 		ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
359 		    IPL_IPI|IPL_MPSAFE, ampintc_ipi_ddb, sc, "ipiddb");
360 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
361 		break;
362 	default:
363 		panic("nipi unexpected number %d", nipi);
364 	}
365 
366 	intr_send_ipi_func = ampintc_send_ipi;
367 #endif
368 
369 	/* enable interrupts */
370 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
371 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
372 	enable_interrupts(PSR_I);
373 
374 	sc->sc_ic.ic_node = faa->fa_node;
375 	sc->sc_ic.ic_cookie = self;
376 	sc->sc_ic.ic_establish = ampintc_intr_establish_fdt;
377 	sc->sc_ic.ic_disestablish = ampintc_intr_disestablish;
378 	sc->sc_ic.ic_route = ampintc_route_irq;
379 	sc->sc_ic.ic_cpu_enable = ampintc_cpuinit;
380 	arm_intr_register_fdt(&sc->sc_ic);
381 
382 	/* attach GICv2M frame controller */
383 	simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
384 }
385 
386 void
387 ampintc_set_priority(int irq, int pri)
388 {
389 	struct ampintc_softc	*sc = ampintc;
390 	uint32_t		 prival;
391 
392 	/*
393 	 * We only use 16 (13 really) interrupt priorities,
394 	 * and a CPU is only required to implement bit 4-7 of each field
395 	 * so shift into the top bits.
396 	 * also low values are higher priority thus IPL_HIGH - pri
397 	 */
398 	prival = (IPL_HIGH - pri) << ICMIPMR_SH;
399 	bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(irq), prival);
400 }
401 
402 void
403 ampintc_setipl(int new)
404 {
405 	struct cpu_info		*ci = curcpu();
406 	struct ampintc_softc	*sc = ampintc;
407 	int			 psw;
408 
409 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
410 	psw = disable_interrupts(PSR_I);
411 	ci->ci_cpl = new;
412 
413 	/* low values are higher priority thus IPL_HIGH - pri */
414 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPIPMR,
415 	    (IPL_HIGH - new) << ICMIPMR_SH);
416 	restore_interrupts(psw);
417 }
418 
419 void
420 ampintc_intr_enable(int irq)
421 {
422 	struct ampintc_softc	*sc = ampintc;
423 
424 #ifdef DEBUG
425 	printf("enable irq %d register %x bitmask %08x\n",
426 	    irq, ICD_ISERn(irq), 1 << IRQ_TO_REG32BIT(irq));
427 #endif
428 
429 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ISERn(irq),
430 	    1 << IRQ_TO_REG32BIT(irq));
431 }
432 
433 void
434 ampintc_intr_disable(int irq)
435 {
436 	struct ampintc_softc	*sc = ampintc;
437 
438 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICERn(irq),
439 	    1 << IRQ_TO_REG32BIT(irq));
440 }
441 
442 void
443 ampintc_intr_config(int irqno, int type)
444 {
445 	struct ampintc_softc	*sc = ampintc;
446 	uint32_t		 ctrl;
447 
448 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno));
449 
450 	ctrl &= ~ICD_ICR_TRIG_MASK(irqno);
451 	if (type == IST_EDGE_RISING)
452 		ctrl |= ICD_ICR_TRIG_EDGE(irqno);
453 	else
454 		ctrl |= ICD_ICR_TRIG_LEVEL(irqno);
455 
456 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno), ctrl);
457 }
458 
459 void
460 ampintc_calc_mask(void)
461 {
462 	struct cpu_info		*ci = curcpu();
463         struct ampintc_softc	*sc = ampintc;
464 	struct intrhand		*ih;
465 	int			 irq;
466 
467 	for (irq = 0; irq < sc->sc_nintr; irq++) {
468 		int max = IPL_NONE;
469 		int min = IPL_HIGH;
470 		TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
471 			if (ih->ih_ipl > max)
472 				max = ih->ih_ipl;
473 
474 			if (ih->ih_ipl < min)
475 				min = ih->ih_ipl;
476 		}
477 
478 		if (max == IPL_NONE)
479 			min = IPL_NONE;
480 
481 		if (sc->sc_handler[irq].iq_irq_max == max &&
482 		    sc->sc_handler[irq].iq_irq_min == min)
483 			continue;
484 
485 		sc->sc_handler[irq].iq_irq_max = max;
486 		sc->sc_handler[irq].iq_irq_min = min;
487 
488 		/* Enable interrupts at lower levels, clear -> enable */
489 		/* Set interrupt priority/enable */
490 		if (min != IPL_NONE) {
491 			ampintc_set_priority(irq, min);
492 			ampintc_intr_enable(irq);
493 			ampintc_route(irq, IRQ_ENABLE, ci);
494 		} else {
495 			ampintc_intr_disable(irq);
496 			ampintc_route(irq, IRQ_DISABLE, ci);
497 		}
498 	}
499 	ampintc_setipl(ci->ci_cpl);
500 }
501 
502 void
503 ampintc_splx(int new)
504 {
505 	struct cpu_info *ci = curcpu();
506 
507 	if (ci->ci_ipending & arm_smask[new])
508 		arm_do_pending_intr(new);
509 
510 	ampintc_setipl(new);
511 }
512 
513 int
514 ampintc_spllower(int new)
515 {
516 	struct cpu_info *ci = curcpu();
517 	int old = ci->ci_cpl;
518 	ampintc_splx(new);
519 	return (old);
520 }
521 
522 int
523 ampintc_splraise(int new)
524 {
525 	struct cpu_info *ci = curcpu();
526 	int old;
527 	old = ci->ci_cpl;
528 
529 	/*
530 	 * setipl must always be called because there is a race window
531 	 * where the variable is updated before the mask is set
532 	 * an interrupt occurs in that window without the mask always
533 	 * being set, the hardware might not get updated on the next
534 	 * splraise completely messing up spl protection.
535 	 */
536 	if (old > new)
537 		new = old;
538 
539 	ampintc_setipl(new);
540 
541 	return (old);
542 }
543 
544 
545 uint32_t
546 ampintc_iack(void)
547 {
548 	uint32_t intid;
549 	struct ampintc_softc	*sc = ampintc;
550 
551 	intid = bus_space_read_4(sc->sc_iot, sc->sc_p_ioh, ICPIAR);
552 
553 	return (intid);
554 }
555 
556 void
557 ampintc_eoi(uint32_t eoi)
558 {
559 	struct ampintc_softc	*sc = ampintc;
560 
561 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPEOIR, eoi);
562 }
563 
564 void
565 ampintc_route(int irq, int enable, struct cpu_info *ci)
566 {
567 	struct ampintc_softc	*sc = ampintc;
568 	uint8_t			 mask, val;
569 
570 	KASSERT(ci->ci_cpuid <= ICD_ICTR_CPU_M);
571 	mask = sc->sc_cpu_mask[ci->ci_cpuid];
572 
573 	val = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq));
574 	if (enable == IRQ_ENABLE)
575 		val |= mask;
576 	else
577 		val &= ~mask;
578 	bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val);
579 }
580 
581 void
582 ampintc_cpuinit(void)
583 {
584 	struct ampintc_softc    *sc = ampintc;
585 	int			 i;
586 
587 	/* XXX - this is the only cpu specific call to set this */
588 	if (sc->sc_cpu_mask[cpu_number()] == 0) {
589 		for (i = 0; i < 32; i++) {
590 			int cpumask =
591 			    bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
592 			        ICD_IPTRn(i));
593 
594 			if (cpumask != 0) {
595 				sc->sc_cpu_mask[cpu_number()] = cpumask;
596 				break;
597 			}
598 		}
599 	}
600 
601 	if (sc->sc_cpu_mask[cpu_number()] == 0)
602 		panic("could not determine cpu target mask");
603 }
604 
605 void
606 ampintc_route_irq(void *v, int enable, struct cpu_info *ci)
607 {
608 	struct ampintc_softc    *sc = ampintc;
609 	struct intrhand         *ih = v;
610 
611 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
612 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0);
613 	if (enable) {
614 		ampintc_set_priority(ih->ih_irq,
615 		    sc->sc_handler[ih->ih_irq].iq_irq_min);
616 		ampintc_intr_enable(ih->ih_irq);
617 	}
618 
619 	ampintc_route(ih->ih_irq, enable, ci);
620 }
621 
622 void
623 ampintc_irq_handler(void *frame)
624 {
625 	struct ampintc_softc	*sc = ampintc;
626 	struct intrhand		*ih;
627 	void			*arg;
628 	uint32_t		 iack_val;
629 	int			 irq, pri, s, handled;
630 
631 	iack_val = ampintc_iack();
632 #ifdef DEBUG_INTC
633 	if (iack_val != 27)
634 		printf("irq  %d fired\n", iack_val);
635 	else {
636 		static int cnt = 0;
637 		if ((cnt++ % 100) == 0) {
638 			printf("irq  %d fired * _100\n", iack_val);
639 #ifdef DDB
640 			db_enter();
641 #endif
642 		}
643 
644 	}
645 #endif
646 
647 	irq = iack_val & ICPIAR_IRQ_M;
648 
649 	if (irq == 1023) {
650 		sc->sc_spur.ec_count++;
651 		return;
652 	}
653 
654 	if (irq >= sc->sc_nintr)
655 		return;
656 
657 	pri = sc->sc_handler[irq].iq_irq_max;
658 	s = ampintc_splraise(pri);
659 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
660 #ifdef MULTIPROCESSOR
661 		int need_lock;
662 
663 		if (ih->ih_flags & IPL_MPSAFE)
664 			need_lock = 0;
665 		else
666 			need_lock = s < IPL_SCHED;
667 
668 		if (need_lock)
669 			KERNEL_LOCK();
670 #endif
671 
672 		if (ih->ih_arg != 0)
673 			arg = ih->ih_arg;
674 		else
675 			arg = frame;
676 
677 		enable_interrupts(PSR_I);
678 		handled = ih->ih_func(arg);
679 		disable_interrupts(PSR_I);
680 		if (handled)
681 			ih->ih_count.ec_count++;
682 
683 #ifdef MULTIPROCESSOR
684 		if (need_lock)
685 			KERNEL_UNLOCK();
686 #endif
687 	}
688 	ampintc_eoi(iack_val);
689 
690 	ampintc_splx(s);
691 }
692 
693 void *
694 ampintc_intr_establish_ext(int irqno, int level, struct cpu_info *ci,
695     int (*func)(void *), void *arg, char *name)
696 {
697 	return ampintc_intr_establish(irqno+32, IST_LEVEL_HIGH, level,
698 	    ci, func, arg, name);
699 }
700 
701 void *
702 ampintc_intr_establish_fdt(void *cookie, int *cell, int level,
703     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
704 {
705 	struct ampintc_softc	*sc = (struct ampintc_softc *)cookie;
706 	int			 irq;
707 	int			 type;
708 
709 	/* 2nd cell contains the interrupt number */
710 	irq = cell[1];
711 
712 	/* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
713 	if (cell[0] == 0)
714 		irq += 32;
715 	else if (cell[0] == 1)
716 		irq += 16;
717 	else
718 		panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
719 
720 	/* SPIs are only active-high level or low-to-high edge */
721 	if (cell[2] & 0x3)
722 		type = IST_EDGE_RISING;
723 	else
724 		type = IST_LEVEL_HIGH;
725 
726 	return ampintc_intr_establish(irq, type, level, ci, func, arg, name);
727 }
728 
729 void *
730 ampintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
731     int (*func)(void *), void *arg, char *name)
732 {
733 	struct ampintc_softc	*sc = ampintc;
734 	struct intrhand		*ih;
735 	int			 psw;
736 
737 	if (irqno < 0 || irqno >= sc->sc_nintr)
738 		panic("ampintc_intr_establish: bogus irqnumber %d: %s",
739 		     irqno, name);
740 
741 	if (ci == NULL)
742 		ci = &cpu_info_primary;
743 	else if (!CPU_IS_PRIMARY(ci))
744 		return NULL;
745 
746 	if (irqno < 16) {
747 		/* SGI are only EDGE */
748 		type = IST_EDGE_RISING;
749 	} else if (irqno < 32) {
750 		/* PPI are only LEVEL */
751 		type = IST_LEVEL_HIGH;
752 	}
753 
754 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
755 	ih->ih_func = func;
756 	ih->ih_arg = arg;
757 	ih->ih_ipl = level & IPL_IRQMASK;
758 	ih->ih_flags = level & IPL_FLAGMASK;
759 	ih->ih_irq = irqno;
760 	ih->ih_name = name;
761 
762 	psw = disable_interrupts(PSR_I);
763 
764 	TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
765 
766 	if (name != NULL)
767 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
768 
769 #ifdef DEBUG_INTC
770 	printf("ampintc_intr_establish irq %d level %d [%s]\n", irqno, level,
771 	    name);
772 #endif
773 
774 	ampintc_intr_config(irqno, type);
775 	ampintc_calc_mask();
776 
777 	restore_interrupts(psw);
778 	return (ih);
779 }
780 
781 void
782 ampintc_intr_disestablish(void *cookie)
783 {
784 	struct ampintc_softc	*sc = ampintc;
785 	struct intrhand		*ih = cookie;
786 	int			 psw;
787 
788 #ifdef DEBUG_INTC
789 	printf("ampintc_intr_disestablish irq %d level %d [%s]\n",
790 	    ih->ih_irq, ih->ih_ipl, ih->ih_name);
791 #endif
792 
793 	psw = disable_interrupts(PSR_I);
794 
795 	TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list);
796 	if (ih->ih_name != NULL)
797 		evcount_detach(&ih->ih_count);
798 	free(ih, M_DEVBUF, sizeof(*ih));
799 
800 	ampintc_calc_mask();
801 
802 	restore_interrupts(psw);
803 }
804 
805 const char *
806 ampintc_intr_string(void *cookie)
807 {
808 	struct intrhand *ih = (struct intrhand *)cookie;
809 	static char irqstr[1 + sizeof("ampintc irq ") + 4];
810 
811 	snprintf(irqstr, sizeof irqstr, "ampintc irq %d", ih->ih_irq);
812 	return irqstr;
813 }
814 
815 /*
816  * GICv2m frame controller for MSI interrupts.
817  */
818 #define GICV2M_TYPER		0x008
819 #define  GICV2M_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
820 #define  GICV2M_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
821 #define GICV2M_SETSPI_NS	0x040
822 
823 int	 ampintc_msi_match(struct device *, void *, void *);
824 void	 ampintc_msi_attach(struct device *, struct device *, void *);
825 void	*ampintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
826 	    int , struct cpu_info *, int (*)(void *), void *, char *);
827 void	 ampintc_intr_disestablish_msi(void *);
828 
829 struct ampintc_msi_softc {
830 	struct device			 sc_dev;
831 	bus_space_tag_t			 sc_iot;
832 	bus_space_handle_t		 sc_ioh;
833 	paddr_t				 sc_addr;
834 	int				 sc_bspi;
835 	int				 sc_nspi;
836 	void				**sc_spi;
837 	struct interrupt_controller	 sc_ic;
838 };
839 
840 struct cfattach	ampintcmsi_ca = {
841 	sizeof (struct ampintc_msi_softc), ampintc_msi_match, ampintc_msi_attach
842 };
843 
844 struct cfdriver ampintcmsi_cd = {
845 	NULL, "ampintcmsi", DV_DULL
846 };
847 
848 int
849 ampintc_msi_match(struct device *parent, void *cfdata, void *aux)
850 {
851 	struct fdt_attach_args *faa = aux;
852 
853 	return OF_is_compatible(faa->fa_node, "arm,gic-v2m-frame");
854 }
855 
856 void
857 ampintc_msi_attach(struct device *parent, struct device *self, void *aux)
858 {
859 	struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
860 	struct fdt_attach_args *faa = aux;
861 	uint32_t typer;
862 
863 	sc->sc_iot = faa->fa_iot;
864 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
865 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
866 		panic("%s: bus_space_map failed!", __func__);
867 
868 	/* XXX: Hack to retrieve the physical address (from a CPU PoV). */
869 	if (!pmap_extract(pmap_kernel(), sc->sc_ioh, &sc->sc_addr)) {
870 		printf(": cannot retrieve msi addr\n");
871 		return;
872 	}
873 
874 	typer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GICV2M_TYPER);
875 	sc->sc_bspi = GICV2M_TYPER_SPI_BASE(typer);
876 	sc->sc_nspi = GICV2M_TYPER_SPI_COUNT(typer);
877 
878 	sc->sc_bspi = OF_getpropint(faa->fa_node,
879 	    "arm,msi-base-spi", sc->sc_bspi);
880 	sc->sc_nspi = OF_getpropint(faa->fa_node,
881 	    "arm,msi-num-spis", sc->sc_nspi);
882 
883 	printf(": nspi %d\n", sc->sc_nspi);
884 
885 	sc->sc_spi = mallocarray(sc->sc_nspi, sizeof(void *), M_DEVBUF,
886 	    M_WAITOK|M_ZERO);
887 
888 	sc->sc_ic.ic_node = faa->fa_node;
889 	sc->sc_ic.ic_cookie = sc;
890 	sc->sc_ic.ic_establish_msi = ampintc_intr_establish_msi;
891 	sc->sc_ic.ic_disestablish = ampintc_intr_disestablish_msi;
892 	arm_intr_register_fdt(&sc->sc_ic);
893 }
894 
895 void *
896 ampintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
897     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
898 {
899 	struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
900 	void *cookie;
901 	int i;
902 
903 	for (i = 0; i < sc->sc_nspi; i++) {
904 		if (sc->sc_spi[i] != NULL)
905 			continue;
906 
907 		cookie = ampintc_intr_establish(sc->sc_bspi + i,
908 		    IST_EDGE_RISING, level, ci, func, arg, name);
909 		if (cookie == NULL)
910 			return NULL;
911 
912 		*addr = sc->sc_addr + GICV2M_SETSPI_NS;
913 		*data = sc->sc_bspi + i;
914 		sc->sc_spi[i] = cookie;
915 		return &sc->sc_spi[i];
916 	}
917 
918 	return NULL;
919 }
920 
921 void
922 ampintc_intr_disestablish_msi(void *cookie)
923 {
924 	ampintc_intr_disestablish(*(void **)cookie);
925 	*(void **)cookie = NULL;
926 }
927 
928 #ifdef MULTIPROCESSOR
929 int
930 ampintc_ipi_ddb(void *v)
931 {
932 	/* XXX */
933 	db_enter();
934 	return 1;
935 }
936 
937 int
938 ampintc_ipi_nop(void *v)
939 {
940 	/* Nothing to do here, just enough to wake up from WFI */
941 	return 1;
942 }
943 
944 int
945 ampintc_ipi_combined(void *v)
946 {
947 	struct ampintc_softc *sc = (struct ampintc_softc *)v;
948 
949 	if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
950 		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
951 		return ampintc_ipi_ddb(v);
952 	} else {
953 		return ampintc_ipi_nop(v);
954 	}
955 }
956 
957 void
958 ampintc_send_ipi(struct cpu_info *ci, int id)
959 {
960 	struct ampintc_softc	*sc = ampintc;
961 	int sendmask;
962 
963 	if (ci == curcpu() && id == ARM_IPI_NOP)
964 		return;
965 
966 	/* never overwrite IPI_DDB with IPI_NOP */
967 	if (id == ARM_IPI_DDB)
968 		sc->sc_ipi_reason[ci->ci_cpuid] = id;
969 
970 	/* currently will only send to one cpu */
971 	sendmask = 1 << (16 + ci->ci_cpuid);
972 	sendmask |= sc->sc_ipi_num[id];
973 
974 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask);
975 }
976 #endif
977