xref: /freebsd/sys/arm/arm/gic.c (revision 81ad6265)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Developed by Damjan Marion <damjan.marion@gmail.com>
8  *
9  * Based on OMAP4 GIC code by Ben Gray
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the company nor the name of the author may be used to
20  *    endorse or promote products derived from this software without specific
21  *    prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_acpi.h"
40 #include "opt_ddb.h"
41 #include "opt_platform.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/kernel.h>
47 #include <sys/ktr.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/rman.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/cpuset.h>
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <sys/smp.h>
57 #include <sys/sched.h>
58 
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 
62 #include <machine/bus.h>
63 #include <machine/intr.h>
64 #include <machine/smp.h>
65 
66 #ifdef FDT
67 #include <dev/fdt/fdt_intr.h>
68 #include <dev/ofw/ofw_bus_subr.h>
69 #endif
70 
71 #ifdef DEV_ACPI
72 #include <contrib/dev/acpica/include/acpi.h>
73 #include <dev/acpica/acpivar.h>
74 #endif
75 
76 #ifdef DDB
77 #include <ddb/ddb.h>
78 #include <ddb/db_lex.h>
79 #endif
80 
81 #include <arm/arm/gic.h>
82 #include <arm/arm/gic_common.h>
83 
84 #include "gic_if.h"
85 #include "pic_if.h"
86 #include "msi_if.h"
87 
88 /* We are using GICv2 register naming */
89 
90 /* Distributor Registers */
91 
92 /* CPU Registers */
93 #define GICC_CTLR		0x0000			/* v1 ICCICR */
94 #define GICC_PMR		0x0004			/* v1 ICCPMR */
95 #define GICC_BPR		0x0008			/* v1 ICCBPR */
96 #define GICC_IAR		0x000C			/* v1 ICCIAR */
97 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
98 #define GICC_RPR		0x0014			/* v1 ICCRPR */
99 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
100 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
101 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
102 
103 /* TYPER Registers */
104 #define	GICD_TYPER_SECURITYEXT	0x400
105 #define	GIC_SUPPORT_SECEXT(_sc)	\
106     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
107 
108 #ifndef	GIC_DEFAULT_ICFGR_INIT
109 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
110 #endif
111 
112 struct gic_irqsrc {
113 	struct intr_irqsrc	gi_isrc;
114 	uint32_t		gi_irq;
115 	enum intr_polarity	gi_pol;
116 	enum intr_trigger	gi_trig;
117 #define GI_FLAG_EARLY_EOI	(1 << 0)
118 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
119 					 /* be used for MSI/MSI-X interrupts */
120 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
121 					 /* for a MSI/MSI-X interrupt */
122 	u_int			gi_flags;
123 };
124 
125 static u_int gic_irq_cpu;
126 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
127 
128 #ifdef SMP
129 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
130 static u_int sgi_first_unused = GIC_FIRST_SGI;
131 #endif
132 
133 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
134 
135 static struct resource_spec arm_gic_spec[] = {
136 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
137 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
138 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
139 	{ -1, 0 }
140 };
141 
142 #if defined(__arm__) && defined(INVARIANTS)
143 static int gic_debug_spurious = 1;
144 #else
145 static int gic_debug_spurious = 0;
146 #endif
147 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
148 
149 static u_int arm_gic_map[MAXCPU];
150 
151 static struct arm_gic_softc *gic_sc = NULL;
152 
153 /* CPU Interface */
154 #define	gic_c_read_4(_sc, _reg)		\
155     bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
156 #define	gic_c_write_4(_sc, _reg, _val)		\
157     bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
158 /* Distributor Interface */
159 #define	gic_d_read_4(_sc, _reg)		\
160     bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg))
161 #define	gic_d_write_1(_sc, _reg, _val)		\
162     bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
163 #define	gic_d_write_4(_sc, _reg, _val)		\
164     bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
165 
166 static inline void
167 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
168 {
169 
170 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
171 }
172 
173 static inline void
174 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
175 {
176 
177 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
178 }
179 
180 static uint8_t
181 gic_cpu_mask(struct arm_gic_softc *sc)
182 {
183 	uint32_t mask;
184 	int i;
185 
186 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
187 	for (i = 0; i < 8; i++) {
188 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
189 		if (mask != 0)
190 			break;
191 	}
192 	/* No mask found, assume we are on CPU interface 0 */
193 	if (mask == 0)
194 		return (1);
195 
196 	/* Collect the mask in the lower byte */
197 	mask |= mask >> 16;
198 	mask |= mask >> 8;
199 
200 	return (mask);
201 }
202 
203 #ifdef SMP
204 static void
205 arm_gic_init_secondary(device_t dev)
206 {
207 	struct arm_gic_softc *sc = device_get_softc(dev);
208 	u_int irq, cpu;
209 
210 	/* Set the mask so we can find this CPU to send it IPIs */
211 	cpu = PCPU_GET(cpuid);
212 	arm_gic_map[cpu] = gic_cpu_mask(sc);
213 
214 	for (irq = 0; irq < sc->nirqs; irq += 4)
215 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
216 
217 	/* Set all the interrupts to be in Group 0 (secure) */
218 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
219 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
220 	}
221 
222 	/* Enable CPU interface */
223 	gic_c_write_4(sc, GICC_CTLR, 1);
224 
225 	/* Set priority mask register. */
226 	gic_c_write_4(sc, GICC_PMR, 0xff);
227 
228 	/* Enable interrupt distribution */
229 	gic_d_write_4(sc, GICD_CTLR, 0x01);
230 
231 	/* Unmask attached SGI interrupts. */
232 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
233 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
234 			gic_irq_unmask(sc, irq);
235 
236 	/* Unmask attached PPI interrupts. */
237 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
238 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
239 			gic_irq_unmask(sc, irq);
240 }
241 #endif /* SMP */
242 
243 static int
244 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
245 {
246 	int error;
247 	uint32_t irq;
248 	struct gic_irqsrc *irqs;
249 	struct intr_irqsrc *isrc;
250 	const char *name;
251 
252 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
253 	    M_WAITOK | M_ZERO);
254 
255 	name = device_get_nameunit(sc->gic_dev);
256 	for (irq = 0; irq < num; irq++) {
257 		irqs[irq].gi_irq = irq;
258 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
259 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
260 
261 		isrc = &irqs[irq].gi_isrc;
262 		if (irq <= GIC_LAST_SGI) {
263 			error = intr_isrc_register(isrc, sc->gic_dev,
264 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
265 		} else if (irq <= GIC_LAST_PPI) {
266 			error = intr_isrc_register(isrc, sc->gic_dev,
267 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
268 		} else {
269 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
270 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
271 		}
272 		if (error != 0) {
273 			/* XXX call intr_isrc_deregister() */
274 			free(irqs, M_DEVBUF);
275 			return (error);
276 		}
277 	}
278 	sc->gic_irqs = irqs;
279 	sc->nirqs = num;
280 	return (0);
281 }
282 
283 static void
284 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
285 {
286 	struct arm_gic_softc *sc;
287 	int i;
288 
289 	sc = device_get_softc(dev);
290 
291 	KASSERT((start + count) <= sc->nirqs,
292 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
293 	    start, count, sc->nirqs));
294 	for (i = 0; i < count; i++) {
295 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
296 		    ("%s: MSI interrupt %d already has a handler", __func__,
297 		    count + i));
298 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
299 		    ("%s: MSI interrupt %d already has a polarity", __func__,
300 		    count + i));
301 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
302 		    ("%s: MSI interrupt %d already has a trigger", __func__,
303 		    count + i));
304 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
305 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
306 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
307 	}
308 }
309 
310 int
311 arm_gic_attach(device_t dev)
312 {
313 	struct		arm_gic_softc *sc;
314 	int		i;
315 	uint32_t	icciidr, mask, nirqs;
316 
317 	if (gic_sc)
318 		return (ENXIO);
319 
320 	sc = device_get_softc(dev);
321 
322 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
323 		device_printf(dev, "could not allocate resources\n");
324 		return (ENXIO);
325 	}
326 
327 	sc->gic_dev = dev;
328 	gic_sc = sc;
329 
330 	/* Initialize mutex */
331 	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
332 
333 	/* Disable interrupt forwarding to the CPU interface */
334 	gic_d_write_4(sc, GICD_CTLR, 0x00);
335 
336 	/* Get the number of interrupts */
337 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
338 	nirqs = GICD_TYPER_I_NUM(sc->typer);
339 
340 	if (arm_gic_register_isrcs(sc, nirqs)) {
341 		device_printf(dev, "could not register irqs\n");
342 		goto cleanup;
343 	}
344 
345 	icciidr = gic_c_read_4(sc, GICC_IIDR);
346 	device_printf(dev,
347 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
348 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
349 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
350 	sc->gic_iidr = icciidr;
351 
352 	/* Set all global interrupts to be level triggered, active low. */
353 	for (i = 32; i < sc->nirqs; i += 16) {
354 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
355 	}
356 
357 	/* Disable all interrupts. */
358 	for (i = 32; i < sc->nirqs; i += 32) {
359 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
360 	}
361 
362 	/* Find the current cpu mask */
363 	mask = gic_cpu_mask(sc);
364 	/* Set the mask so we can find this CPU to send it IPIs */
365 	arm_gic_map[PCPU_GET(cpuid)] = mask;
366 	/* Set all four targets to this cpu */
367 	mask |= mask << 8;
368 	mask |= mask << 16;
369 
370 	for (i = 0; i < sc->nirqs; i += 4) {
371 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
372 		if (i > 32) {
373 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
374 		}
375 	}
376 
377 	/* Set all the interrupts to be in Group 0 (secure) */
378 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
379 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
380 	}
381 
382 	/* Enable CPU interface */
383 	gic_c_write_4(sc, GICC_CTLR, 1);
384 
385 	/* Set priority mask register. */
386 	gic_c_write_4(sc, GICC_PMR, 0xff);
387 
388 	/* Enable interrupt distribution */
389 	gic_d_write_4(sc, GICD_CTLR, 0x01);
390 	return (0);
391 
392 cleanup:
393 	arm_gic_detach(dev);
394 	return(ENXIO);
395 }
396 
397 int
398 arm_gic_detach(device_t dev)
399 {
400 	struct arm_gic_softc *sc;
401 
402 	sc = device_get_softc(dev);
403 
404 	if (sc->gic_irqs != NULL)
405 		free(sc->gic_irqs, M_DEVBUF);
406 
407 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
408 
409 	return (0);
410 }
411 
412 static int
413 arm_gic_print_child(device_t bus, device_t child)
414 {
415 	struct resource_list *rl;
416 	int rv;
417 
418 	rv = bus_print_child_header(bus, child);
419 
420 	rl = BUS_GET_RESOURCE_LIST(bus, child);
421 	if (rl != NULL) {
422 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
423 		    "%#jx");
424 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
425 	}
426 
427 	rv += bus_print_child_footer(bus, child);
428 
429 	return (rv);
430 }
431 
432 static struct resource *
433 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
434     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
435 {
436 	struct arm_gic_softc *sc;
437 	struct resource_list_entry *rle;
438 	struct resource_list *rl;
439 	int j;
440 
441 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
442 
443 	sc = device_get_softc(bus);
444 
445 	/*
446 	 * Request for the default allocation with a given rid: use resource
447 	 * list stored in the local device info.
448 	 */
449 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
450 		rl = BUS_GET_RESOURCE_LIST(bus, child);
451 
452 		if (type == SYS_RES_IOPORT)
453 			type = SYS_RES_MEMORY;
454 
455 		rle = resource_list_find(rl, type, *rid);
456 		if (rle == NULL) {
457 			if (bootverbose)
458 				device_printf(bus, "no default resources for "
459 				    "rid = %d, type = %d\n", *rid, type);
460 			return (NULL);
461 		}
462 		start = rle->start;
463 		end = rle->end;
464 		count = rle->count;
465 	}
466 
467 	/* Remap through ranges property */
468 	for (j = 0; j < sc->nranges; j++) {
469 		if (start >= sc->ranges[j].bus && end <
470 		    sc->ranges[j].bus + sc->ranges[j].size) {
471 			start -= sc->ranges[j].bus;
472 			start += sc->ranges[j].host;
473 			end -= sc->ranges[j].bus;
474 			end += sc->ranges[j].host;
475 			break;
476 		}
477 	}
478 	if (j == sc->nranges && sc->nranges != 0) {
479 		if (bootverbose)
480 			device_printf(bus, "Could not map resource "
481 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
482 
483 		return (NULL);
484 	}
485 
486 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
487 	    count, flags));
488 }
489 
490 static int
491 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
492 {
493 	struct arm_gic_softc *sc;
494 
495 	sc = device_get_softc(dev);
496 
497 	switch(which) {
498 	case GIC_IVAR_HW_REV:
499 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
500 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
501 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
502 		*result = GICD_IIDR_VAR(sc->gic_iidr);
503 		return (0);
504 	case GIC_IVAR_BUS:
505 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
506 		    ("arm_gic_read_ivar: Unknown bus type"));
507 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
508 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
509 		*result = sc->gic_bus;
510 		return (0);
511 	}
512 
513 	return (ENOENT);
514 }
515 
516 static int
517 arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
518 {
519 	switch(which) {
520 	case GIC_IVAR_HW_REV:
521 	case GIC_IVAR_BUS:
522 		return (EINVAL);
523 	}
524 
525 	return (ENOENT);
526 }
527 
528 int
529 arm_gic_intr(void *arg)
530 {
531 	struct arm_gic_softc *sc = arg;
532 	struct gic_irqsrc *gi;
533 	uint32_t irq_active_reg, irq;
534 	struct trapframe *tf;
535 
536 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
537 	irq = irq_active_reg & 0x3FF;
538 
539 	/*
540 	 * 1. We do EOI here because recent read value from active interrupt
541 	 *    register must be used for it. Another approach is to save this
542 	 *    value into associated interrupt source.
543 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
544 	 *    we must ensure that interrupted thread does not migrate to
545 	 *    another CPU.
546 	 * 3. EOI cannot be delayed by any preemption which could happen on
547 	 *    critical_exit() used in MI intr code, when interrupt thread is
548 	 *    scheduled. See next point.
549 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
550 	 *    an action and any use of critical_exit() could break this
551 	 *    assumption. See comments within smp_rendezvous_action().
552 	 * 5. We always return FILTER_HANDLED as this is an interrupt
553 	 *    controller dispatch function. Otherwise, in cascaded interrupt
554 	 *    case, the whole interrupt subtree would be masked.
555 	 */
556 
557 	if (irq >= sc->nirqs) {
558 		if (gic_debug_spurious)
559 			device_printf(sc->gic_dev,
560 			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
561 			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
562 		return (FILTER_HANDLED);
563 	}
564 
565 	tf = curthread->td_intr_frame;
566 dispatch_irq:
567 	gi = sc->gic_irqs + irq;
568 	/*
569 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
570 	 * as compiler complains that comparing u_int >= 0 is always true.
571 	 */
572 	if (irq <= GIC_LAST_SGI) {
573 #ifdef SMP
574 		/* Call EOI for all IPI before dispatch. */
575 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
576 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
577 		goto next_irq;
578 #else
579 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
580 		    irq - GIC_FIRST_SGI);
581 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
582 		goto next_irq;
583 #endif
584 	}
585 
586 	if (gic_debug_spurious)
587 		sc->last_irq[PCPU_GET(cpuid)] = irq;
588 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
589 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
590 
591 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
592 		gic_irq_mask(sc, irq);
593 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
594 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
595 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
596 	}
597 
598 next_irq:
599 	arm_irq_memory_barrier(irq);
600 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
601 	irq = irq_active_reg & 0x3FF;
602 	if (irq < sc->nirqs)
603 		goto dispatch_irq;
604 
605 	return (FILTER_HANDLED);
606 }
607 
608 static void
609 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
610     enum intr_polarity pol)
611 {
612 	uint32_t reg;
613 	uint32_t mask;
614 
615 	if (irq < GIC_FIRST_SPI)
616 		return;
617 
618 	mtx_lock_spin(&sc->mutex);
619 
620 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
621 	mask = (reg >> 2*(irq % 16)) & 0x3;
622 
623 	if (pol == INTR_POLARITY_LOW) {
624 		mask &= ~GICD_ICFGR_POL_MASK;
625 		mask |= GICD_ICFGR_POL_LOW;
626 	} else if (pol == INTR_POLARITY_HIGH) {
627 		mask &= ~GICD_ICFGR_POL_MASK;
628 		mask |= GICD_ICFGR_POL_HIGH;
629 	}
630 
631 	if (trig == INTR_TRIGGER_LEVEL) {
632 		mask &= ~GICD_ICFGR_TRIG_MASK;
633 		mask |= GICD_ICFGR_TRIG_LVL;
634 	} else if (trig == INTR_TRIGGER_EDGE) {
635 		mask &= ~GICD_ICFGR_TRIG_MASK;
636 		mask |= GICD_ICFGR_TRIG_EDGE;
637 	}
638 
639 	/* Set mask */
640 	reg = reg & ~(0x3 << 2*(irq % 16));
641 	reg = reg | (mask << 2*(irq % 16));
642 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
643 
644 	mtx_unlock_spin(&sc->mutex);
645 }
646 
647 static int
648 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
649 {
650 	uint32_t cpu, end, mask;
651 
652 	end = min(mp_ncpus, 8);
653 	for (cpu = end; cpu < MAXCPU; cpu++)
654 		if (CPU_ISSET(cpu, cpus))
655 			return (EINVAL);
656 
657 	for (mask = 0, cpu = 0; cpu < end; cpu++)
658 		if (CPU_ISSET(cpu, cpus))
659 			mask |= arm_gic_map[cpu];
660 
661 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
662 	return (0);
663 }
664 
665 #ifdef FDT
666 static int
667 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
668     enum intr_polarity *polp, enum intr_trigger *trigp)
669 {
670 
671 	if (ncells == 1) {
672 		*irqp = cells[0];
673 		*polp = INTR_POLARITY_CONFORM;
674 		*trigp = INTR_TRIGGER_CONFORM;
675 		return (0);
676 	}
677 	if (ncells == 3) {
678 		u_int irq, tripol;
679 
680 		/*
681 		 * The 1st cell is the interrupt type:
682 		 *	0 = SPI
683 		 *	1 = PPI
684 		 * The 2nd cell contains the interrupt number:
685 		 *	[0 - 987] for SPI
686 		 *	[0 -  15] for PPI
687 		 * The 3rd cell is the flags, encoded as follows:
688 		 *   bits[3:0] trigger type and level flags
689 		 *	1 = low-to-high edge triggered
690 		 *	2 = high-to-low edge triggered
691 		 *	4 = active high level-sensitive
692 		 *	8 = active low level-sensitive
693 		 *   bits[15:8] PPI interrupt cpu mask
694 		 *	Each bit corresponds to each of the 8 possible cpus
695 		 *	attached to the GIC.  A bit set to '1' indicated
696 		 *	the interrupt is wired to that CPU.
697 		 */
698 		switch (cells[0]) {
699 		case 0:
700 			irq = GIC_FIRST_SPI + cells[1];
701 			/* SPI irq is checked later. */
702 			break;
703 		case 1:
704 			irq = GIC_FIRST_PPI + cells[1];
705 			if (irq > GIC_LAST_PPI) {
706 				device_printf(dev, "unsupported PPI interrupt "
707 				    "number %u\n", cells[1]);
708 				return (EINVAL);
709 			}
710 			break;
711 		default:
712 			device_printf(dev, "unsupported interrupt type "
713 			    "configuration %u\n", cells[0]);
714 			return (EINVAL);
715 		}
716 
717 		tripol = cells[2] & 0xff;
718 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
719 		    cells[0] == 0))
720 			device_printf(dev, "unsupported trigger/polarity "
721 			    "configuration 0x%02x\n", tripol);
722 
723 		*irqp = irq;
724 		*polp = INTR_POLARITY_CONFORM;
725 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
726 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
727 		return (0);
728 	}
729 	return (EINVAL);
730 }
731 #endif
732 
733 static int
734 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
735     enum intr_polarity *polp, enum intr_trigger *trigp)
736 {
737 	struct gic_irqsrc *gi;
738 
739 	/* Map a non-GICv2m MSI */
740 	gi = (struct gic_irqsrc *)msi_data->isrc;
741 	if (gi == NULL)
742 		return (ENXIO);
743 
744 	*irqp = gi->gi_irq;
745 
746 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
747 	*polp = INTR_POLARITY_HIGH;
748 	*trigp = INTR_TRIGGER_EDGE;
749 
750 	return (0);
751 }
752 
753 static int
754 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
755     enum intr_polarity *polp, enum intr_trigger *trigp)
756 {
757 	u_int irq;
758 	enum intr_polarity pol;
759 	enum intr_trigger trig;
760 	struct arm_gic_softc *sc;
761 	struct intr_map_data_msi *dam;
762 #ifdef FDT
763 	struct intr_map_data_fdt *daf;
764 #endif
765 #ifdef DEV_ACPI
766 	struct intr_map_data_acpi *daa;
767 #endif
768 
769 	sc = device_get_softc(dev);
770 	switch (data->type) {
771 #ifdef FDT
772 	case INTR_MAP_DATA_FDT:
773 		daf = (struct intr_map_data_fdt *)data;
774 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
775 		    &trig) != 0)
776 			return (EINVAL);
777 		KASSERT(irq >= sc->nirqs ||
778 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
779 		    ("%s: Attempting to map a MSI interrupt from FDT",
780 		    __func__));
781 		break;
782 #endif
783 #ifdef DEV_ACPI
784 	case INTR_MAP_DATA_ACPI:
785 		daa = (struct intr_map_data_acpi *)data;
786 		irq = daa->irq;
787 		pol = daa->pol;
788 		trig = daa->trig;
789 		break;
790 #endif
791 	case INTR_MAP_DATA_MSI:
792 		/* Non-GICv2m MSI */
793 		dam = (struct intr_map_data_msi *)data;
794 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
795 			return (EINVAL);
796 		break;
797 	default:
798 		return (ENOTSUP);
799 	}
800 
801 	if (irq >= sc->nirqs)
802 		return (EINVAL);
803 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
804 	    pol != INTR_POLARITY_HIGH)
805 		return (EINVAL);
806 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
807 	    trig != INTR_TRIGGER_LEVEL)
808 		return (EINVAL);
809 
810 	*irqp = irq;
811 	if (polp != NULL)
812 		*polp = pol;
813 	if (trigp != NULL)
814 		*trigp = trig;
815 	return (0);
816 }
817 
818 static int
819 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
820     struct intr_irqsrc **isrcp)
821 {
822 	int error;
823 	u_int irq;
824 	struct arm_gic_softc *sc;
825 
826 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
827 	if (error == 0) {
828 		sc = device_get_softc(dev);
829 		*isrcp = GIC_INTR_ISRC(sc, irq);
830 	}
831 	return (error);
832 }
833 
834 static int
835 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
836     struct resource *res, struct intr_map_data *data)
837 {
838 	struct arm_gic_softc *sc = device_get_softc(dev);
839 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
840 	enum intr_trigger trig;
841 	enum intr_polarity pol;
842 
843 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
844 		/* GICv2m MSI */
845 		pol = gi->gi_pol;
846 		trig = gi->gi_trig;
847 		KASSERT(pol == INTR_POLARITY_HIGH,
848 		    ("%s: MSI interrupts must be active-high", __func__));
849 		KASSERT(trig == INTR_TRIGGER_EDGE,
850 		    ("%s: MSI interrupts must be edge triggered", __func__));
851 	} else if (data != NULL) {
852 		u_int irq;
853 
854 		/* Get config for resource. */
855 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
856 		    gi->gi_irq != irq)
857 			return (EINVAL);
858 	} else {
859 		pol = INTR_POLARITY_CONFORM;
860 		trig = INTR_TRIGGER_CONFORM;
861 	}
862 
863 	/* Compare config if this is not first setup. */
864 	if (isrc->isrc_handlers != 0) {
865 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
866 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
867 			return (EINVAL);
868 		else
869 			return (0);
870 	}
871 
872 	/* For MSI/MSI-X we should have already configured these */
873 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
874 		if (pol == INTR_POLARITY_CONFORM)
875 			pol = INTR_POLARITY_LOW;	/* just pick some */
876 		if (trig == INTR_TRIGGER_CONFORM)
877 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
878 
879 		gi->gi_pol = pol;
880 		gi->gi_trig = trig;
881 
882 		/* Edge triggered interrupts need an early EOI sent */
883 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
884 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
885 	}
886 
887 	/*
888 	 * XXX - In case that per CPU interrupt is going to be enabled in time
889 	 *       when SMP is already started, we need some IPI call which
890 	 *       enables it on others CPUs. Further, it's more complicated as
891 	 *       pic_enable_source() and pic_disable_source() should act on
892 	 *       per CPU basis only. Thus, it should be solved here somehow.
893 	 */
894 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
895 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
896 
897 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
898 	arm_gic_bind_intr(dev, isrc);
899 	return (0);
900 }
901 
902 static int
903 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
904     struct resource *res, struct intr_map_data *data)
905 {
906 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
907 
908 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
909 		gi->gi_pol = INTR_POLARITY_CONFORM;
910 		gi->gi_trig = INTR_TRIGGER_CONFORM;
911 	}
912 	return (0);
913 }
914 
915 static void
916 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
917 {
918 	struct arm_gic_softc *sc = device_get_softc(dev);
919 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
920 
921 	arm_irq_memory_barrier(gi->gi_irq);
922 	gic_irq_unmask(sc, gi->gi_irq);
923 }
924 
925 static void
926 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
927 {
928 	struct arm_gic_softc *sc = device_get_softc(dev);
929 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
930 
931 	gic_irq_mask(sc, gi->gi_irq);
932 }
933 
934 static void
935 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
936 {
937 	struct arm_gic_softc *sc = device_get_softc(dev);
938 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
939 
940 	arm_gic_disable_intr(dev, isrc);
941 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
942 }
943 
944 static void
945 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
946 {
947 
948 	arm_irq_memory_barrier(0);
949 	arm_gic_enable_intr(dev, isrc);
950 }
951 
952 static void
953 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
954 {
955 	struct arm_gic_softc *sc = device_get_softc(dev);
956 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
957 
958         /* EOI for edge-triggered done earlier. */
959 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
960 		return;
961 
962 	arm_irq_memory_barrier(0);
963 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
964 }
965 
966 static int
967 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
968 {
969 	struct arm_gic_softc *sc = device_get_softc(dev);
970 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
971 
972 	if (gi->gi_irq < GIC_FIRST_SPI)
973 		return (EINVAL);
974 
975 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
976 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
977 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
978 	}
979 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
980 }
981 
982 #ifdef SMP
983 static void
984 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
985     u_int ipi)
986 {
987 	struct arm_gic_softc *sc = device_get_softc(dev);
988 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
989 	uint32_t val = 0, i;
990 
991 	for (i = 0; i < MAXCPU; i++)
992 		if (CPU_ISSET(i, &cpus))
993 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
994 
995 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
996 }
997 
998 static int
999 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1000 {
1001 	struct intr_irqsrc *isrc;
1002 	struct arm_gic_softc *sc = device_get_softc(dev);
1003 
1004 	if (sgi_first_unused > GIC_LAST_SGI)
1005 		return (ENOSPC);
1006 
1007 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1008 	sgi_to_ipi[sgi_first_unused++] = ipi;
1009 
1010 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1011 
1012 	*isrcp = isrc;
1013 	return (0);
1014 }
1015 #endif
1016 
1017 static int
1018 arm_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count,
1019     int maxcount, struct intr_irqsrc **isrc)
1020 {
1021 	struct arm_gic_softc *sc;
1022 	int i, irq, end_irq;
1023 	bool found;
1024 
1025 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1026 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1027 
1028 	sc = device_get_softc(dev);
1029 
1030 	mtx_lock_spin(&sc->mutex);
1031 
1032 	found = false;
1033 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1034 		/* Start on an aligned interrupt */
1035 		if ((irq & (maxcount - 1)) != 0)
1036 			continue;
1037 
1038 		/* Assume we found a valid range until shown otherwise */
1039 		found = true;
1040 
1041 		/* Check this range is valid */
1042 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1043 			/* No free interrupts */
1044 			if (end_irq == mbi_start + mbi_count) {
1045 				found = false;
1046 				break;
1047 			}
1048 
1049 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1050 			    ("%s: Non-MSI interrupt found", __func__));
1051 
1052 			/* This is already used */
1053 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1054 			    GI_FLAG_MSI_USED) {
1055 				found = false;
1056 				break;
1057 			}
1058 		}
1059 		if (found)
1060 			break;
1061 	}
1062 
1063 	/* Not enough interrupts were found */
1064 	if (!found || irq == mbi_start + mbi_count) {
1065 		mtx_unlock_spin(&sc->mutex);
1066 		return (ENXIO);
1067 	}
1068 
1069 	for (i = 0; i < count; i++) {
1070 		/* Mark the interrupt as used */
1071 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1072 	}
1073 	mtx_unlock_spin(&sc->mutex);
1074 
1075 	for (i = 0; i < count; i++)
1076 		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1077 
1078 	return (0);
1079 }
1080 
1081 static int
1082 arm_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1083 {
1084 	struct arm_gic_softc *sc;
1085 	struct gic_irqsrc *gi;
1086 	int i;
1087 
1088 	sc = device_get_softc(dev);
1089 
1090 	mtx_lock_spin(&sc->mutex);
1091 	for (i = 0; i < count; i++) {
1092 		gi = (struct gic_irqsrc *)isrc[i];
1093 
1094 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1095 		    ("%s: Trying to release an unused MSI-X interrupt",
1096 		    __func__));
1097 
1098 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1099 	}
1100 	mtx_unlock_spin(&sc->mutex);
1101 
1102 	return (0);
1103 }
1104 
1105 static int
1106 arm_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1107     struct intr_irqsrc **isrc)
1108 {
1109 	struct arm_gic_softc *sc;
1110 	int irq;
1111 
1112 	sc = device_get_softc(dev);
1113 
1114 	mtx_lock_spin(&sc->mutex);
1115 	/* Find an unused interrupt */
1116 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1117 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1118 		    ("%s: Non-MSI interrupt found", __func__));
1119 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1120 			break;
1121 	}
1122 	/* No free interrupt was found */
1123 	if (irq == mbi_start + mbi_count) {
1124 		mtx_unlock_spin(&sc->mutex);
1125 		return (ENXIO);
1126 	}
1127 
1128 	/* Mark the interrupt as used */
1129 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1130 	mtx_unlock_spin(&sc->mutex);
1131 
1132 	*isrc = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1133 
1134 	return (0);
1135 }
1136 
1137 static int
1138 arm_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1139 {
1140 	struct arm_gic_softc *sc;
1141 	struct gic_irqsrc *gi;
1142 
1143 	sc = device_get_softc(dev);
1144 	gi = (struct gic_irqsrc *)isrc;
1145 
1146 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1147 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1148 
1149 	mtx_lock_spin(&sc->mutex);
1150 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1151 	mtx_unlock_spin(&sc->mutex);
1152 
1153 	return (0);
1154 }
1155 
1156 #ifdef DDB
1157 static void
1158 arm_gic_db_show(device_t dev)
1159 {
1160 	struct arm_gic_softc *sc = device_get_softc(dev);
1161 	uint32_t val;
1162 	u_int i;
1163 
1164 	db_printf("%s CPU registers:\n", device_get_nameunit(dev));
1165 	db_printf(" CTLR: %08x   PMR: %08x   BPR: %08x   RPR: %08x\n",
1166 	    gic_c_read_4(sc, GICC_CTLR), gic_c_read_4(sc, GICC_PMR),
1167 	    gic_c_read_4(sc, GICC_BPR), gic_c_read_4(sc, GICC_RPR));
1168 	db_printf("HPPIR: %08x  IIDR: %08x\n", gic_c_read_4(sc, GICC_HPPIR),
1169 	    gic_c_read_4(sc, GICC_IIDR));
1170 
1171 	db_printf("%s Distributor registers:\n", device_get_nameunit(dev));
1172 	db_printf(" CTLR: %08x TYPER: %08x  IIDR: %08x\n",
1173 	    gic_d_read_4(sc, GICD_CTLR), gic_d_read_4(sc, GICD_TYPER),
1174 	    gic_d_read_4(sc, GICD_IIDR));
1175 	for (i = 0; i < sc->nirqs; i++) {
1176 		if (i <= GIC_LAST_SGI)
1177 			db_printf("SGI %2u ", i);
1178 		else if (i <= GIC_LAST_PPI)
1179 			db_printf("PPI %2u ", i - GIC_FIRST_PPI);
1180 		else
1181 			db_printf("SPI %2u ", i - GIC_FIRST_SPI);
1182 		db_printf(" grp:%u",
1183 		    !!(gic_d_read_4(sc, GICD_IGROUPR(i)) & GICD_I_MASK(i)));
1184 		db_printf(" enable:%u pend:%u active:%u",
1185 		    !!(gic_d_read_4(sc, GICD_ISENABLER(i)) & GICD_I_MASK(i)),
1186 		    !!(gic_d_read_4(sc, GICD_ISPENDR(i)) & GICD_I_MASK(i)),
1187 		    !!(gic_d_read_4(sc, GICD_ISACTIVER(i)) & GICD_I_MASK(i)));
1188 		db_printf(" pri:%u",
1189 		    (gic_d_read_4(sc, GICD_IPRIORITYR(i)) >> 8 * (i & 0x3)) &
1190 		    0xff);
1191 		db_printf(" trg:%u",
1192 		    (gic_d_read_4(sc, GICD_ITARGETSR(i)) >> 8 * (i & 0x3)) &
1193 		    0xff);
1194 		val = gic_d_read_4(sc, GICD_ICFGR(i)) >> 2 * (i & 0xf);
1195 		if ((val & GICD_ICFGR_POL_MASK) == GICD_ICFGR_POL_LOW)
1196 			db_printf(" LO");
1197 		else
1198 			db_printf(" HI");
1199 		if ((val & GICD_ICFGR_TRIG_MASK) == GICD_ICFGR_TRIG_LVL)
1200 			db_printf(" LV");
1201 		else
1202 			db_printf(" ED");
1203 		db_printf("\n");
1204 	}
1205 }
1206 #endif
1207 
1208 static device_method_t arm_gic_methods[] = {
1209 	/* Bus interface */
1210 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1211 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1212 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1213 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1214 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1215 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1216 	DEVMETHOD(bus_write_ivar,	arm_gic_write_ivar),
1217 
1218 	/* Interrupt controller interface */
1219 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1220 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1221 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1222 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1223 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1224 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1225 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1226 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1227 #ifdef SMP
1228 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1229 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1230 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1231 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1232 #endif
1233 
1234 	/* GIC */
1235 	DEVMETHOD(gic_reserve_msi_range, arm_gic_reserve_msi_range),
1236 	DEVMETHOD(gic_alloc_msi,	arm_gic_alloc_msi),
1237 	DEVMETHOD(gic_release_msi,	arm_gic_release_msi),
1238 	DEVMETHOD(gic_alloc_msix,	arm_gic_alloc_msix),
1239 	DEVMETHOD(gic_release_msix,	arm_gic_release_msix),
1240 #ifdef DDB
1241 	DEVMETHOD(gic_db_show,		arm_gic_db_show),
1242 #endif
1243 
1244 	{ 0, 0 }
1245 };
1246 
1247 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1248     sizeof(struct arm_gic_softc));
1249 
1250 #ifdef DDB
1251 DB_SHOW_COMMAND_FLAGS(gic, db_show_gic, CS_OWN)
1252 {
1253 	device_t dev;
1254 	int t;
1255 	bool valid;
1256 
1257 	valid = false;
1258 	t = db_read_token();
1259 	if (t == tIDENT) {
1260 		dev = device_lookup_by_name(db_tok_string);
1261 		valid = true;
1262 	}
1263 	db_skip_to_eol();
1264 	if (!valid) {
1265 		db_printf("usage: show gic <name>\n");
1266 		return;
1267 	}
1268 
1269 	if (dev == NULL) {
1270 		db_printf("device not found\n");
1271 		return;
1272 	}
1273 
1274 	GIC_DB_SHOW(dev);
1275 }
1276 
1277 DB_SHOW_ALL_COMMAND(gics, db_show_all_gics)
1278 {
1279 	devclass_t dc;
1280 	device_t dev;
1281 	int i;
1282 
1283 	dc = devclass_find("gic");
1284 	if (dc == NULL)
1285 		return;
1286 
1287 	for (i = 0; i < devclass_get_maxunit(dc); i++) {
1288 		dev = devclass_get_device(dc, i);
1289 		if (dev != NULL)
1290 			GIC_DB_SHOW(dev);
1291 		if (db_pager_quit)
1292 			break;
1293 	}
1294 }
1295 
1296 #endif
1297 
1298 /*
1299  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1300  */
1301 
1302 #define	GICV2M_MSI_TYPER	0x008
1303 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1304 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1305 #define	GICv2M_MSI_SETSPI_NS	0x040
1306 #define	GICV2M_MSI_IIDR		0xFCC
1307 
1308 int
1309 arm_gicv2m_attach(device_t dev)
1310 {
1311 	struct arm_gicv2m_softc *sc;
1312 	uint32_t typer;
1313 	int rid;
1314 
1315 	sc = device_get_softc(dev);
1316 
1317 	rid = 0;
1318 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1319 	    RF_ACTIVE);
1320 	if (sc->sc_mem == NULL) {
1321 		device_printf(dev, "Unable to allocate resources\n");
1322 		return (ENXIO);
1323 	}
1324 
1325 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1326 	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1327 	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1328 
1329 	/* Reserve these interrupts for MSI/MSI-X use */
1330 	GIC_RESERVE_MSI_RANGE(device_get_parent(dev), sc->sc_spi_start,
1331 	    sc->sc_spi_count);
1332 
1333 	intr_msi_register(dev, sc->sc_xref);
1334 
1335 	if (bootverbose)
1336 		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1337 		    sc->sc_spi_start + sc->sc_spi_count - 1);
1338 
1339 	return (0);
1340 }
1341 
1342 static int
1343 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1344     device_t *pic, struct intr_irqsrc **srcs)
1345 {
1346 	struct arm_gicv2m_softc *sc;
1347 	int error;
1348 
1349 	sc = device_get_softc(dev);
1350 	error = GIC_ALLOC_MSI(device_get_parent(dev), sc->sc_spi_start,
1351 	    sc->sc_spi_count, count, maxcount, srcs);
1352 	if (error != 0)
1353 		return (error);
1354 
1355 	*pic = dev;
1356 	return (0);
1357 }
1358 
1359 static int
1360 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1361     struct intr_irqsrc **isrc)
1362 {
1363 	return (GIC_RELEASE_MSI(device_get_parent(dev), count, isrc));
1364 }
1365 
1366 static int
1367 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1368     struct intr_irqsrc **isrcp)
1369 {
1370 	struct arm_gicv2m_softc *sc;
1371 	int error;
1372 
1373 	sc = device_get_softc(dev);
1374 	error = GIC_ALLOC_MSIX(device_get_parent(dev), sc->sc_spi_start,
1375 	    sc->sc_spi_count, isrcp);
1376 	if (error != 0)
1377 		return (error);
1378 
1379 	*pic = dev;
1380 	return (0);
1381 }
1382 
1383 static int
1384 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1385 {
1386 	return (GIC_RELEASE_MSIX(device_get_parent(dev), isrc));
1387 }
1388 
1389 static int
1390 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1391     uint64_t *addr, uint32_t *data)
1392 {
1393 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1394 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1395 
1396 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1397 	*data = gi->gi_irq;
1398 
1399 	return (0);
1400 }
1401 
1402 static device_method_t arm_gicv2m_methods[] = {
1403 	/* Device interface */
1404 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1405 
1406 	/* MSI/MSI-X */
1407 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1408 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1409 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1410 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1411 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1412 
1413 	/* End */
1414 	DEVMETHOD_END
1415 };
1416 
1417 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1418     sizeof(struct arm_gicv2m_softc));
1419