xref: /openbsd/sys/arch/arm64/dev/ampintc.c (revision 097a140d)
1 /* $OpenBSD: ampintc.c,v 1.21 2021/02/17 12:11:45 kettenis Exp $ */
2 /*
3  * Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 /*
19  * This driver implements the interrupt controller as specified in
20  * DDI0407E_cortex_a9_mpcore_r2p0_trm with the
21  * IHI0048A_gic_architecture_spec_v1_0 underlying specification
22  */
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/queue.h>
26 #include <sys/malloc.h>
27 #include <sys/device.h>
28 #include <sys/evcount.h>
29 
30 #include <uvm/uvm_extern.h>
31 
32 #include <machine/bus.h>
33 #include <machine/fdt.h>
34 
35 #include <dev/ofw/fdt.h>
36 #include <dev/ofw/openfirm.h>
37 
38 #include <arm64/dev/simplebusvar.h>
39 
40 /* registers */
41 #define	ICD_DCR			0x000
42 #define		ICD_DCR_ES		0x00000001
43 #define		ICD_DCR_ENS		0x00000002
44 
45 #define ICD_ICTR			0x004
46 #define		ICD_ICTR_LSPI_SH	11
47 #define		ICD_ICTR_LSPI_M		0x1f
48 #define		ICD_ICTR_CPU_SH		5
49 #define		ICD_ICTR_CPU_M		0x07
50 #define		ICD_ICTR_ITL_SH		0
51 #define		ICD_ICTR_ITL_M		0x1f
52 #define ICD_IDIR			0x008
53 #define 	ICD_DIR_PROD_SH		24
54 #define 	ICD_DIR_PROD_M		0xff
55 #define 	ICD_DIR_REV_SH		12
56 #define 	ICD_DIR_REV_M		0xfff
57 #define 	ICD_DIR_IMP_SH		0
58 #define 	ICD_DIR_IMP_M		0xfff
59 
60 #define IRQ_TO_REG32(i)		(((i) >> 5) & 0x1f)
61 #define IRQ_TO_REG32BIT(i)	((i) & 0x1f)
62 #define IRQ_TO_REG4(i)		(((i) >> 2) & 0xff)
63 #define IRQ_TO_REG4BIT(i)	((i) & 0x3)
64 #define IRQ_TO_REG16(i)		(((i) >> 4) & 0x3f)
65 #define IRQ_TO_REG16BIT(i)	((i) & 0xf)
66 #define IRQ_TO_REGBIT_S(i)	8
67 #define IRQ_TO_REG4BIT_M(i)	8
68 
69 #define ICD_ISRn(i)		(0x080 + (IRQ_TO_REG32(i) * 4))
70 #define ICD_ISERn(i)		(0x100 + (IRQ_TO_REG32(i) * 4))
71 #define ICD_ICERn(i)		(0x180 + (IRQ_TO_REG32(i) * 4))
72 #define ICD_ISPRn(i)		(0x200 + (IRQ_TO_REG32(i) * 4))
73 #define ICD_ICPRn(i)		(0x280 + (IRQ_TO_REG32(i) * 4))
74 #define ICD_ABRn(i)		(0x300 + (IRQ_TO_REG32(i) * 4))
75 #define ICD_IPRn(i)		(0x400 + (i))
76 #define ICD_IPTRn(i)		(0x800 + (i))
77 #define ICD_ICRn(i)		(0xC00 + (IRQ_TO_REG16(i) * 4))
78 #define 	ICD_ICR_TRIG_LEVEL(i)	(0x0 << (IRQ_TO_REG16BIT(i) * 2))
79 #define 	ICD_ICR_TRIG_EDGE(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
80 #define 	ICD_ICR_TRIG_MASK(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
81 
82 /*
83  * what about (ppi|spi)_status
84  */
85 #define ICD_PPI			0xD00
86 #define 	ICD_PPI_GTIMER	(1 << 11)
87 #define 	ICD_PPI_FIQ		(1 << 12)
88 #define 	ICD_PPI_PTIMER	(1 << 13)
89 #define 	ICD_PPI_PWDOG	(1 << 14)
90 #define 	ICD_PPI_IRQ		(1 << 15)
91 #define ICD_SPI_BASE		0xD04
92 #define ICD_SPIn(i)			(ICD_SPI_BASE + ((i) * 4))
93 
94 
95 #define ICD_SGIR			0xF00
96 
97 #define ICD_PERIPH_ID_0			0xFD0
98 #define ICD_PERIPH_ID_1			0xFD4
99 #define ICD_PERIPH_ID_2			0xFD8
100 #define ICD_PERIPH_ID_3			0xFDC
101 #define ICD_PERIPH_ID_4			0xFE0
102 #define ICD_PERIPH_ID_5			0xFE4
103 #define ICD_PERIPH_ID_6			0xFE8
104 #define ICD_PERIPH_ID_7			0xFEC
105 
106 #define ICD_COMP_ID_0			0xFEC
107 #define ICD_COMP_ID_1			0xFEC
108 #define ICD_COMP_ID_2			0xFEC
109 #define ICD_COMP_ID_3			0xFEC
110 
111 
112 #define ICPICR				0x00
113 #define ICPIPMR				0x04
114 /* XXX - must left justify bits to  0 - 7  */
115 #define 	ICMIPMR_SH 		4
116 #define ICPBPR				0x08
117 #define ICPIAR				0x0C
118 #define 	ICPIAR_IRQ_SH		0
119 #define 	ICPIAR_IRQ_M		0x3ff
120 #define 	ICPIAR_CPUID_SH		10
121 #define 	ICPIAR_CPUID_M		0x7
122 #define 	ICPIAR_NO_PENDING_IRQ	ICPIAR_IRQ_M
123 #define ICPEOIR				0x10
124 #define ICPPRP				0x14
125 #define ICPHPIR				0x18
126 #define ICPIIR				0xFC
127 
128 /*
129  * what about periph_id and component_id
130  */
131 
132 #define IRQ_ENABLE	1
133 #define IRQ_DISABLE	0
134 
135 struct ampintc_softc {
136 	struct simplebus_softc	 sc_sbus;
137 	struct intrq 		*sc_handler;
138 	int			 sc_nintr;
139 	bus_space_tag_t		 sc_iot;
140 	bus_space_handle_t	 sc_d_ioh, sc_p_ioh;
141 	uint8_t			 sc_cpu_mask[ICD_ICTR_CPU_M + 1];
142 	struct evcount		 sc_spur;
143 	struct interrupt_controller sc_ic;
144 	int			 sc_ipi_reason[ICD_ICTR_CPU_M + 1];
145 	int			 sc_ipi_num[2];
146 };
147 struct ampintc_softc *ampintc;
148 
149 
150 struct intrhand {
151 	TAILQ_ENTRY(intrhand) ih_list;	/* link on intrq list */
152 	int (*ih_func)(void *);		/* handler */
153 	void *ih_arg;			/* arg for handler */
154 	int ih_ipl;			/* IPL_* */
155 	int ih_flags;
156 	int ih_irq;			/* IRQ number */
157 	struct evcount	ih_count;
158 	char *ih_name;
159 	struct cpu_info *ih_ci;		/* CPU the IRQ runs on */
160 };
161 
162 struct intrq {
163 	TAILQ_HEAD(, intrhand) iq_list;	/* handler list */
164 	struct cpu_info *iq_ci;		/* CPU the IRQ runs on */
165 	int iq_irq_max;			/* IRQ to mask while handling */
166 	int iq_irq_min;			/* lowest IRQ when shared */
167 	int iq_ist;			/* share type */
168 };
169 
170 
171 int		 ampintc_match(struct device *, void *, void *);
172 void		 ampintc_attach(struct device *, struct device *, void *);
173 void		 ampintc_cpuinit(void);
174 int		 ampintc_spllower(int);
175 void		 ampintc_splx(int);
176 int		 ampintc_splraise(int);
177 void		 ampintc_setipl(int);
178 void		 ampintc_calc_mask(void);
179 void		 ampintc_calc_irq(struct ampintc_softc *, int);
180 void		*ampintc_intr_establish(int, int, int, struct cpu_info *,
181 		    int (*)(void *), void *, char *);
182 void		*ampintc_intr_establish_fdt(void *, int *, int,
183 		    struct cpu_info *, int (*)(void *), void *, char *);
184 void		 ampintc_intr_disestablish(void *);
185 void		 ampintc_irq_handler(void *);
186 const char	*ampintc_intr_string(void *);
187 uint32_t	 ampintc_iack(void);
188 void		 ampintc_eoi(uint32_t);
189 void		 ampintc_set_priority(int, int);
190 void		 ampintc_intr_enable(int);
191 void		 ampintc_intr_disable(int);
192 void		 ampintc_intr_config(int, int);
193 void		 ampintc_route(int, int, struct cpu_info *);
194 void		 ampintc_route_irq(void *, int, struct cpu_info *);
195 void		 ampintc_intr_barrier(void *);
196 
197 int		 ampintc_ipi_combined(void *);
198 int		 ampintc_ipi_nop(void *);
199 int		 ampintc_ipi_ddb(void *);
200 void		 ampintc_send_ipi(struct cpu_info *, int);
201 
202 struct cfattach	ampintc_ca = {
203 	sizeof (struct ampintc_softc), ampintc_match, ampintc_attach
204 };
205 
206 struct cfdriver ampintc_cd = {
207 	NULL, "ampintc", DV_DULL
208 };
209 
210 static char *ampintc_compatibles[] = {
211 	"arm,cortex-a7-gic",
212 	"arm,cortex-a9-gic",
213 	"arm,cortex-a15-gic",
214 	"arm,gic-400",
215 	NULL
216 };
217 
218 int
219 ampintc_match(struct device *parent, void *cfdata, void *aux)
220 {
221 	struct fdt_attach_args *faa = aux;
222 	int i;
223 
224 	for (i = 0; ampintc_compatibles[i]; i++)
225 		if (OF_is_compatible(faa->fa_node, ampintc_compatibles[i]))
226 			return (1);
227 
228 	return (0);
229 }
230 
231 void
232 ampintc_attach(struct device *parent, struct device *self, void *aux)
233 {
234 	struct ampintc_softc *sc = (struct ampintc_softc *)self;
235 	struct fdt_attach_args *faa = aux;
236 	int i, nintr, ncpu;
237 	uint32_t ictr;
238 #ifdef MULTIPROCESSOR
239 	int nipi, ipiirq[2];
240 #endif
241 
242 	ampintc = sc;
243 
244 	arm_init_smask();
245 
246 	sc->sc_iot = faa->fa_iot;
247 
248 	/* First row: ICD */
249 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
250 	    faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
251 		panic("%s: ICD bus_space_map failed!", __func__);
252 
253 	/* Second row: ICP */
254 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
255 	    faa->fa_reg[1].size, 0, &sc->sc_p_ioh))
256 		panic("%s: ICP bus_space_map failed!", __func__);
257 
258 	evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
259 
260 	ictr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICTR);
261 	nintr = 32 * ((ictr >> ICD_ICTR_ITL_SH) & ICD_ICTR_ITL_M);
262 	nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
263 	sc->sc_nintr = nintr;
264 	ncpu = ((ictr >> ICD_ICTR_CPU_SH) & ICD_ICTR_CPU_M) + 1;
265 	printf(" nirq %d, ncpu %d", nintr, ncpu);
266 
267 	KASSERT(curcpu()->ci_cpuid <= ICD_ICTR_CPU_M);
268 	sc->sc_cpu_mask[curcpu()->ci_cpuid] =
269 	    bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(0));
270 
271 	/* Disable all interrupts, clear all pending */
272 	for (i = 0; i < nintr/32; i++) {
273 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
274 		    ICD_ICERn(i*32), ~0);
275 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
276 		    ICD_ICPRn(i*32), ~0);
277 	}
278 	for (i = 0; i < nintr; i++) {
279 		/* lowest priority ?? */
280 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 0xff);
281 		/* target no cpus */
282 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(i), 0);
283 	}
284 	for (i = 2; i < nintr/16; i++) {
285 		/* irq 32 - N */
286 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(i*16), 0);
287 	}
288 
289 	/* software reset of the part? */
290 	/* set protection bit (kernel only)? */
291 
292 	/* XXX - check power saving bit */
293 
294 	sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF,
295 	    M_ZERO | M_NOWAIT);
296 	for (i = 0; i < nintr; i++) {
297 		TAILQ_INIT(&sc->sc_handler[i].iq_list);
298 	}
299 
300 	ampintc_setipl(IPL_HIGH);  /* XXX ??? */
301 	ampintc_calc_mask();
302 
303 	/* insert self as interrupt handler */
304 	arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx,
305 	    ampintc_setipl, ampintc_irq_handler, NULL);
306 
307 #ifdef MULTIPROCESSOR
308 	/* setup IPI interrupts */
309 
310 	/*
311 	 * Ideally we want two IPI interrupts, one for NOP and one for
312 	 * DDB, however we can survive if only one is available it is
313 	 * possible that most are not available to the non-secure OS.
314 	 */
315 	nipi = 0;
316 	for (i = 0; i < 16; i++) {
317 		int reg, oldreg;
318 
319 		oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
320 		    ICD_IPRn(i));
321 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
322 		    oldreg ^ 0x20);
323 
324 		/* if this interrupt is not usable, route will be zero */
325 		reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i));
326 		if (reg == oldreg)
327 			continue;
328 
329 		/* return to original value, will be set when used */
330 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
331 		    oldreg);
332 
333 		if (nipi == 0)
334 			printf(" ipi: %d", i);
335 		else
336 			printf(", %d", i);
337 		ipiirq[nipi++] = i;
338 		if (nipi == 2)
339 			break;
340 	}
341 
342 	if (nipi == 0)
343 		panic ("no irq available for IPI");
344 
345 	switch (nipi) {
346 	case 1:
347 		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
348 		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_combined, sc, "ipi");
349 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
350 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
351 		break;
352 	case 2:
353 		ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
354 		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_nop, sc, "ipinop");
355 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
356 		ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
357 		    IPL_IPI|IPL_MPSAFE, NULL, ampintc_ipi_ddb, sc, "ipiddb");
358 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
359 		break;
360 	default:
361 		panic("nipi unexpected number %d", nipi);
362 	}
363 
364 	intr_send_ipi_func = ampintc_send_ipi;
365 #endif
366 
367 	/* enable interrupts */
368 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
369 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
370 	enable_interrupts();
371 
372 	sc->sc_ic.ic_node = faa->fa_node;
373 	sc->sc_ic.ic_cookie = self;
374 	sc->sc_ic.ic_establish = ampintc_intr_establish_fdt;
375 	sc->sc_ic.ic_disestablish = ampintc_intr_disestablish;
376 	sc->sc_ic.ic_route = ampintc_route_irq;
377 	sc->sc_ic.ic_cpu_enable = ampintc_cpuinit;
378 	sc->sc_ic.ic_barrier = ampintc_intr_barrier;
379 	arm_intr_register_fdt(&sc->sc_ic);
380 
381 	/* attach GICv2M frame controller */
382 	simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
383 }
384 
385 void
386 ampintc_set_priority(int irq, int pri)
387 {
388 	struct ampintc_softc	*sc = ampintc;
389 	uint32_t		 prival;
390 
391 	/*
392 	 * We only use 16 (13 really) interrupt priorities,
393 	 * and a CPU is only required to implement bit 4-7 of each field
394 	 * so shift into the top bits.
395 	 * also low values are higher priority thus IPL_HIGH - pri
396 	 */
397 	prival = (IPL_HIGH - pri) << ICMIPMR_SH;
398 	bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(irq), prival);
399 }
400 
401 void
402 ampintc_setipl(int new)
403 {
404 	struct cpu_info		*ci = curcpu();
405 	struct ampintc_softc	*sc = ampintc;
406 	int			 psw;
407 
408 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
409 	psw = disable_interrupts();
410 	ci->ci_cpl = new;
411 
412 	/* low values are higher priority thus IPL_HIGH - pri */
413 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPIPMR,
414 	    (IPL_HIGH - new) << ICMIPMR_SH);
415 	restore_interrupts(psw);
416 }
417 
418 void
419 ampintc_intr_enable(int irq)
420 {
421 	struct ampintc_softc	*sc = ampintc;
422 
423 #ifdef DEBUG
424 	printf("enable irq %d register %x bitmask %08x\n",
425 	    irq, ICD_ISERn(irq), 1 << IRQ_TO_REG32BIT(irq));
426 #endif
427 
428 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ISERn(irq),
429 	    1 << IRQ_TO_REG32BIT(irq));
430 }
431 
432 void
433 ampintc_intr_disable(int irq)
434 {
435 	struct ampintc_softc	*sc = ampintc;
436 
437 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICERn(irq),
438 	    1 << IRQ_TO_REG32BIT(irq));
439 }
440 
441 void
442 ampintc_intr_config(int irqno, int type)
443 {
444 	struct ampintc_softc	*sc = ampintc;
445 	uint32_t		 ctrl;
446 
447 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno));
448 
449 	ctrl &= ~ICD_ICR_TRIG_MASK(irqno);
450 	if (type == IST_EDGE_RISING)
451 		ctrl |= ICD_ICR_TRIG_EDGE(irqno);
452 	else
453 		ctrl |= ICD_ICR_TRIG_LEVEL(irqno);
454 
455 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno), ctrl);
456 }
457 
458 void
459 ampintc_calc_mask(void)
460 {
461 	struct ampintc_softc	*sc = ampintc;
462 	int			 irq;
463 
464 	for (irq = 0; irq < sc->sc_nintr; irq++)
465 		ampintc_calc_irq(sc, irq);
466 }
467 
468 void
469 ampintc_calc_irq(struct ampintc_softc *sc, int irq)
470 {
471 	struct cpu_info		*ci = sc->sc_handler[irq].iq_ci;
472 	struct intrhand		*ih;
473 	int			max = IPL_NONE;
474 	int			min = IPL_HIGH;
475 
476 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
477 		if (ih->ih_ipl > max)
478 			max = ih->ih_ipl;
479 
480 		if (ih->ih_ipl < min)
481 			min = ih->ih_ipl;
482 	}
483 
484 	if (max == IPL_NONE)
485 		min = IPL_NONE;
486 
487 	if (sc->sc_handler[irq].iq_irq_max == max &&
488 	    sc->sc_handler[irq].iq_irq_min == min)
489 		return;
490 
491 	sc->sc_handler[irq].iq_irq_max = max;
492 	sc->sc_handler[irq].iq_irq_min = min;
493 
494 	/* Enable interrupts at lower levels, clear -> enable */
495 	/* Set interrupt priority/enable */
496 	if (min != IPL_NONE) {
497 		ampintc_set_priority(irq, min);
498 		ampintc_intr_enable(irq);
499 		ampintc_route(irq, IRQ_ENABLE, ci);
500 	} else {
501 		ampintc_intr_disable(irq);
502 		ampintc_route(irq, IRQ_DISABLE, ci);
503 	}
504 }
505 
506 void
507 ampintc_splx(int new)
508 {
509 	struct cpu_info *ci = curcpu();
510 
511 	if (ci->ci_ipending & arm_smask[new])
512 		arm_do_pending_intr(new);
513 
514 	ampintc_setipl(new);
515 }
516 
517 int
518 ampintc_spllower(int new)
519 {
520 	struct cpu_info *ci = curcpu();
521 	int old = ci->ci_cpl;
522 	ampintc_splx(new);
523 	return (old);
524 }
525 
526 int
527 ampintc_splraise(int new)
528 {
529 	struct cpu_info *ci = curcpu();
530 	int old;
531 	old = ci->ci_cpl;
532 
533 	/*
534 	 * setipl must always be called because there is a race window
535 	 * where the variable is updated before the mask is set
536 	 * an interrupt occurs in that window without the mask always
537 	 * being set, the hardware might not get updated on the next
538 	 * splraise completely messing up spl protection.
539 	 */
540 	if (old > new)
541 		new = old;
542 
543 	ampintc_setipl(new);
544 
545 	return (old);
546 }
547 
548 
549 uint32_t
550 ampintc_iack(void)
551 {
552 	uint32_t intid;
553 	struct ampintc_softc	*sc = ampintc;
554 
555 	intid = bus_space_read_4(sc->sc_iot, sc->sc_p_ioh, ICPIAR);
556 
557 	return (intid);
558 }
559 
560 void
561 ampintc_eoi(uint32_t eoi)
562 {
563 	struct ampintc_softc	*sc = ampintc;
564 
565 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPEOIR, eoi);
566 }
567 
568 void
569 ampintc_route(int irq, int enable, struct cpu_info *ci)
570 {
571 	struct ampintc_softc	*sc = ampintc;
572 	uint8_t			 mask, val;
573 
574 	KASSERT(ci->ci_cpuid <= ICD_ICTR_CPU_M);
575 	mask = sc->sc_cpu_mask[ci->ci_cpuid];
576 
577 	val = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq));
578 	if (enable == IRQ_ENABLE)
579 		val |= mask;
580 	else
581 		val &= ~mask;
582 	bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val);
583 }
584 
585 void
586 ampintc_cpuinit(void)
587 {
588 	struct ampintc_softc	*sc = ampintc;
589 	int			 i, irq;
590 
591 	/* XXX - this is the only cpu specific call to set this */
592 	if (sc->sc_cpu_mask[cpu_number()] == 0) {
593 		for (i = 0; i < 32; i++) {
594 			int cpumask =
595 			    bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
596 			        ICD_IPTRn(i));
597 
598 			if (cpumask != 0) {
599 				sc->sc_cpu_mask[cpu_number()] = cpumask;
600 				break;
601 			}
602 		}
603 	}
604 
605 	if (sc->sc_cpu_mask[cpu_number()] == 0)
606 		panic("could not determine cpu target mask");
607 
608 	for (irq = 0; irq < sc->sc_nintr; irq++) {
609 		if (sc->sc_handler[irq].iq_ci != curcpu())
610 			continue;
611 		if (sc->sc_handler[irq].iq_irq_min != IPL_NONE)
612 			ampintc_route(irq, IRQ_ENABLE, curcpu());
613 		else
614 			ampintc_route(irq, IRQ_DISABLE, curcpu());
615 	}
616 }
617 
618 void
619 ampintc_route_irq(void *v, int enable, struct cpu_info *ci)
620 {
621 	struct ampintc_softc    *sc = ampintc;
622 	struct intrhand         *ih = v;
623 
624 	bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
625 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0);
626 	if (enable) {
627 		ampintc_set_priority(ih->ih_irq,
628 		    sc->sc_handler[ih->ih_irq].iq_irq_min);
629 		ampintc_intr_enable(ih->ih_irq);
630 	}
631 
632 	ampintc_route(ih->ih_irq, enable, ci);
633 }
634 
635 void
636 ampintc_intr_barrier(void *cookie)
637 {
638 	struct intrhand		*ih = cookie;
639 
640 	sched_barrier(ih->ih_ci);
641 }
642 
643 void
644 ampintc_irq_handler(void *frame)
645 {
646 	struct ampintc_softc	*sc = ampintc;
647 	struct intrhand		*ih;
648 	void			*arg;
649 	uint32_t		 iack_val;
650 	int			 irq, pri, s, handled;
651 
652 	iack_val = ampintc_iack();
653 #ifdef DEBUG_INTC
654 	if (iack_val != 27)
655 		printf("irq  %d fired\n", iack_val);
656 	else {
657 		static int cnt = 0;
658 		if ((cnt++ % 100) == 0) {
659 			printf("irq  %d fired * _100\n", iack_val);
660 #ifdef DDB
661 			db_enter();
662 #endif
663 		}
664 
665 	}
666 #endif
667 
668 	irq = iack_val & ICPIAR_IRQ_M;
669 
670 	if (irq == 1023) {
671 		sc->sc_spur.ec_count++;
672 		return;
673 	}
674 
675 	if (irq >= sc->sc_nintr)
676 		return;
677 
678 	pri = sc->sc_handler[irq].iq_irq_max;
679 	s = ampintc_splraise(pri);
680 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
681 #ifdef MULTIPROCESSOR
682 		int need_lock;
683 
684 		if (ih->ih_flags & IPL_MPSAFE)
685 			need_lock = 0;
686 		else
687 			need_lock = s < IPL_SCHED;
688 
689 		if (need_lock)
690 			KERNEL_LOCK();
691 #endif
692 
693 		if (ih->ih_arg != 0)
694 			arg = ih->ih_arg;
695 		else
696 			arg = frame;
697 
698 		enable_interrupts();
699 		handled = ih->ih_func(arg);
700 		disable_interrupts();
701 		if (handled)
702 			ih->ih_count.ec_count++;
703 
704 #ifdef MULTIPROCESSOR
705 		if (need_lock)
706 			KERNEL_UNLOCK();
707 #endif
708 	}
709 	ampintc_eoi(iack_val);
710 
711 	ampintc_splx(s);
712 }
713 
714 void *
715 ampintc_intr_establish_fdt(void *cookie, int *cell, int level,
716     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
717 {
718 	struct ampintc_softc	*sc = (struct ampintc_softc *)cookie;
719 	int			 irq;
720 	int			 type;
721 
722 	/* 2nd cell contains the interrupt number */
723 	irq = cell[1];
724 
725 	/* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
726 	if (cell[0] == 0)
727 		irq += 32;
728 	else if (cell[0] == 1)
729 		irq += 16;
730 	else
731 		panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
732 
733 	/* SPIs are only active-high level or low-to-high edge */
734 	if (cell[2] & 0x3)
735 		type = IST_EDGE_RISING;
736 	else
737 		type = IST_LEVEL_HIGH;
738 
739 	return ampintc_intr_establish(irq, type, level, ci, func, arg, name);
740 }
741 
742 void *
743 ampintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
744     int (*func)(void *), void *arg, char *name)
745 {
746 	struct ampintc_softc	*sc = ampintc;
747 	struct intrhand		*ih;
748 	int			 psw;
749 
750 	if (irqno < 0 || irqno >= sc->sc_nintr)
751 		panic("ampintc_intr_establish: bogus irqnumber %d: %s",
752 		     irqno, name);
753 
754 	if (ci == NULL)
755 		ci = &cpu_info_primary;
756 
757 	if (irqno < 16) {
758 		/* SGI are only EDGE */
759 		type = IST_EDGE_RISING;
760 	} else if (irqno < 32) {
761 		/* PPI are only LEVEL */
762 		type = IST_LEVEL_HIGH;
763 	}
764 
765 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
766 	ih->ih_func = func;
767 	ih->ih_arg = arg;
768 	ih->ih_ipl = level & IPL_IRQMASK;
769 	ih->ih_flags = level & IPL_FLAGMASK;
770 	ih->ih_irq = irqno;
771 	ih->ih_name = name;
772 	ih->ih_ci = ci;
773 
774 	psw = disable_interrupts();
775 
776 	if (!TAILQ_EMPTY(&sc->sc_handler[irqno].iq_list) &&
777 	    sc->sc_handler[irqno].iq_ci != ci) {
778 		free(ih, M_DEVBUF, sizeof(*ih));
779 		restore_interrupts(psw);
780 		return NULL;
781 	}
782 
783 	TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
784 	sc->sc_handler[irqno].iq_ci = ci;
785 
786 	if (name != NULL)
787 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
788 
789 #ifdef DEBUG_INTC
790 	printf("ampintc_intr_establish irq %d level %d [%s]\n", irqno, level,
791 	    name);
792 #endif
793 
794 	ampintc_intr_config(irqno, type);
795 	ampintc_calc_mask();
796 
797 	restore_interrupts(psw);
798 	return (ih);
799 }
800 
801 void
802 ampintc_intr_disestablish(void *cookie)
803 {
804 	struct ampintc_softc	*sc = ampintc;
805 	struct intrhand		*ih = cookie;
806 	int			 psw;
807 
808 #ifdef DEBUG_INTC
809 	printf("ampintc_intr_disestablish irq %d level %d [%s]\n",
810 	    ih->ih_irq, ih->ih_ipl, ih->ih_name);
811 #endif
812 
813 	psw = disable_interrupts();
814 
815 	TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list);
816 	if (ih->ih_name != NULL)
817 		evcount_detach(&ih->ih_count);
818 	free(ih, M_DEVBUF, sizeof(*ih));
819 
820 	ampintc_calc_mask();
821 
822 	restore_interrupts(psw);
823 }
824 
825 const char *
826 ampintc_intr_string(void *cookie)
827 {
828 	struct intrhand *ih = (struct intrhand *)cookie;
829 	static char irqstr[1 + sizeof("ampintc irq ") + 4];
830 
831 	snprintf(irqstr, sizeof irqstr, "ampintc irq %d", ih->ih_irq);
832 	return irqstr;
833 }
834 
835 /*
836  * GICv2m frame controller for MSI interrupts.
837  */
838 #define GICV2M_TYPER		0x008
839 #define  GICV2M_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
840 #define  GICV2M_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
841 #define GICV2M_SETSPI_NS	0x040
842 
843 int	 ampintc_msi_match(struct device *, void *, void *);
844 void	 ampintc_msi_attach(struct device *, struct device *, void *);
845 void	*ampintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
846 	    int , struct cpu_info *, int (*)(void *), void *, char *);
847 void	 ampintc_intr_disestablish_msi(void *);
848 void	 ampintc_intr_barrier_msi(void *);
849 
850 struct ampintc_msi_softc {
851 	struct device			 sc_dev;
852 	bus_space_tag_t			 sc_iot;
853 	bus_space_handle_t		 sc_ioh;
854 	paddr_t				 sc_addr;
855 	int				 sc_bspi;
856 	int				 sc_nspi;
857 	void				**sc_spi;
858 	struct interrupt_controller	 sc_ic;
859 };
860 
861 struct cfattach	ampintcmsi_ca = {
862 	sizeof (struct ampintc_msi_softc), ampintc_msi_match, ampintc_msi_attach
863 };
864 
865 struct cfdriver ampintcmsi_cd = {
866 	NULL, "ampintcmsi", DV_DULL
867 };
868 
869 int
870 ampintc_msi_match(struct device *parent, void *cfdata, void *aux)
871 {
872 	struct fdt_attach_args *faa = aux;
873 
874 	return OF_is_compatible(faa->fa_node, "arm,gic-v2m-frame");
875 }
876 
877 void
878 ampintc_msi_attach(struct device *parent, struct device *self, void *aux)
879 {
880 	struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
881 	struct fdt_attach_args *faa = aux;
882 	uint32_t typer;
883 
884 	sc->sc_iot = faa->fa_iot;
885 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
886 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
887 		panic("%s: bus_space_map failed!", __func__);
888 
889 	/* XXX: Hack to retrieve the physical address (from a CPU PoV). */
890 	if (!pmap_extract(pmap_kernel(), sc->sc_ioh, &sc->sc_addr)) {
891 		printf(": cannot retrieve msi addr\n");
892 		return;
893 	}
894 
895 	typer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GICV2M_TYPER);
896 	sc->sc_bspi = GICV2M_TYPER_SPI_BASE(typer);
897 	sc->sc_nspi = GICV2M_TYPER_SPI_COUNT(typer);
898 
899 	sc->sc_bspi = OF_getpropint(faa->fa_node,
900 	    "arm,msi-base-spi", sc->sc_bspi);
901 	sc->sc_nspi = OF_getpropint(faa->fa_node,
902 	    "arm,msi-num-spis", sc->sc_nspi);
903 
904 	printf(": nspi %d\n", sc->sc_nspi);
905 
906 	sc->sc_spi = mallocarray(sc->sc_nspi, sizeof(void *), M_DEVBUF,
907 	    M_WAITOK|M_ZERO);
908 
909 	sc->sc_ic.ic_node = faa->fa_node;
910 	sc->sc_ic.ic_cookie = sc;
911 	sc->sc_ic.ic_establish_msi = ampintc_intr_establish_msi;
912 	sc->sc_ic.ic_disestablish = ampintc_intr_disestablish_msi;
913 	sc->sc_ic.ic_barrier = ampintc_intr_barrier_msi;
914 	arm_intr_register_fdt(&sc->sc_ic);
915 }
916 
917 void *
918 ampintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
919     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
920 {
921 	struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
922 	void *cookie;
923 	int i;
924 
925 	for (i = 0; i < sc->sc_nspi; i++) {
926 		if (sc->sc_spi[i] != NULL)
927 			continue;
928 
929 		cookie = ampintc_intr_establish(sc->sc_bspi + i,
930 		    IST_EDGE_RISING, level, ci, func, arg, name);
931 		if (cookie == NULL)
932 			return NULL;
933 
934 		*addr = sc->sc_addr + GICV2M_SETSPI_NS;
935 		*data = sc->sc_bspi + i;
936 		sc->sc_spi[i] = cookie;
937 		return &sc->sc_spi[i];
938 	}
939 
940 	return NULL;
941 }
942 
943 void
944 ampintc_intr_disestablish_msi(void *cookie)
945 {
946 	ampintc_intr_disestablish(*(void **)cookie);
947 	*(void **)cookie = NULL;
948 }
949 
950 void
951 ampintc_intr_barrier_msi(void *cookie)
952 {
953 	ampintc_intr_barrier(*(void **)cookie);
954 }
955 
956 #ifdef MULTIPROCESSOR
957 int
958 ampintc_ipi_ddb(void *v)
959 {
960 	/* XXX */
961 #ifdef DDB
962 	db_enter();
963 #endif
964 	return 1;
965 }
966 
967 int
968 ampintc_ipi_nop(void *v)
969 {
970 	/* Nothing to do here, just enough to wake up from WFI */
971 	return 1;
972 }
973 
974 int
975 ampintc_ipi_combined(void *v)
976 {
977 	struct ampintc_softc *sc = (struct ampintc_softc *)v;
978 
979 	if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
980 		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
981 		return ampintc_ipi_ddb(v);
982 	} else {
983 		return ampintc_ipi_nop(v);
984 	}
985 }
986 
987 void
988 ampintc_send_ipi(struct cpu_info *ci, int id)
989 {
990 	struct ampintc_softc	*sc = ampintc;
991 	int sendmask;
992 
993 	if (ci == curcpu() && id == ARM_IPI_NOP)
994 		return;
995 
996 	/* never overwrite IPI_DDB with IPI_NOP */
997 	if (id == ARM_IPI_DDB)
998 		sc->sc_ipi_reason[ci->ci_cpuid] = id;
999 
1000 	/* currently will only send to one cpu */
1001 	sendmask = 1 << (16 + ci->ci_cpuid);
1002 	sendmask |= sc->sc_ipi_num[id];
1003 
1004 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask);
1005 }
1006 #endif
1007