xref: /freebsd/sys/arm/arm/gic.c (revision 53b70c86)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Developed by Damjan Marion <damjan.marion@gmail.com>
8  *
9  * Based on OMAP4 GIC code by Ben Gray
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the company nor the name of the author may be used to
20  *    endorse or promote products derived from this software without specific
21  *    prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/rman.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 
65 #ifdef FDT
66 #include <dev/fdt/fdt_intr.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #endif
69 
70 #ifdef DEV_ACPI
71 #include <contrib/dev/acpica/include/acpi.h>
72 #include <dev/acpica/acpivar.h>
73 #endif
74 
75 #include <arm/arm/gic.h>
76 #include <arm/arm/gic_common.h>
77 
78 #include "pic_if.h"
79 #include "msi_if.h"
80 
81 /* We are using GICv2 register naming */
82 
83 /* Distributor Registers */
84 
85 /* CPU Registers */
86 #define GICC_CTLR		0x0000			/* v1 ICCICR */
87 #define GICC_PMR		0x0004			/* v1 ICCPMR */
88 #define GICC_BPR		0x0008			/* v1 ICCBPR */
89 #define GICC_IAR		0x000C			/* v1 ICCIAR */
90 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
91 #define GICC_RPR		0x0014			/* v1 ICCRPR */
92 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
93 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
94 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
95 
96 /* TYPER Registers */
97 #define	GICD_TYPER_SECURITYEXT	0x400
98 #define	GIC_SUPPORT_SECEXT(_sc)	\
99     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
100 
101 #ifndef	GIC_DEFAULT_ICFGR_INIT
102 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
103 #endif
104 
105 struct gic_irqsrc {
106 	struct intr_irqsrc	gi_isrc;
107 	uint32_t		gi_irq;
108 	enum intr_polarity	gi_pol;
109 	enum intr_trigger	gi_trig;
110 #define GI_FLAG_EARLY_EOI	(1 << 0)
111 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
112 					 /* be used for MSI/MSI-X interrupts */
113 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
114 					 /* for a MSI/MSI-X interrupt */
115 	u_int			gi_flags;
116 };
117 
118 static u_int gic_irq_cpu;
119 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
120 
121 #ifdef SMP
122 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
123 static u_int sgi_first_unused = GIC_FIRST_SGI;
124 #endif
125 
126 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
127 
128 static struct resource_spec arm_gic_spec[] = {
129 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
130 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
131 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
132 	{ -1, 0 }
133 };
134 
135 #if defined(__arm__) && defined(INVARIANTS)
136 static int gic_debug_spurious = 1;
137 #else
138 static int gic_debug_spurious = 0;
139 #endif
140 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
141 
142 static u_int arm_gic_map[MAXCPU];
143 
144 static struct arm_gic_softc *gic_sc = NULL;
145 
146 /* CPU Interface */
147 #define	gic_c_read_4(_sc, _reg)		\
148     bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
149 #define	gic_c_write_4(_sc, _reg, _val)		\
150     bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
151 /* Distributor Interface */
152 #define	gic_d_read_4(_sc, _reg)		\
153     bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg))
154 #define	gic_d_write_1(_sc, _reg, _val)		\
155     bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
156 #define	gic_d_write_4(_sc, _reg, _val)		\
157     bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
158 
159 static inline void
160 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
161 {
162 
163 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
164 }
165 
166 static inline void
167 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
168 {
169 
170 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
171 }
172 
173 static uint8_t
174 gic_cpu_mask(struct arm_gic_softc *sc)
175 {
176 	uint32_t mask;
177 	int i;
178 
179 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
180 	for (i = 0; i < 8; i++) {
181 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
182 		if (mask != 0)
183 			break;
184 	}
185 	/* No mask found, assume we are on CPU interface 0 */
186 	if (mask == 0)
187 		return (1);
188 
189 	/* Collect the mask in the lower byte */
190 	mask |= mask >> 16;
191 	mask |= mask >> 8;
192 
193 	return (mask);
194 }
195 
196 #ifdef SMP
197 static void
198 arm_gic_init_secondary(device_t dev)
199 {
200 	struct arm_gic_softc *sc = device_get_softc(dev);
201 	u_int irq, cpu;
202 
203 	/* Set the mask so we can find this CPU to send it IPIs */
204 	cpu = PCPU_GET(cpuid);
205 	arm_gic_map[cpu] = gic_cpu_mask(sc);
206 
207 	for (irq = 0; irq < sc->nirqs; irq += 4)
208 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
209 
210 	/* Set all the interrupts to be in Group 0 (secure) */
211 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
212 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
213 	}
214 
215 	/* Enable CPU interface */
216 	gic_c_write_4(sc, GICC_CTLR, 1);
217 
218 	/* Set priority mask register. */
219 	gic_c_write_4(sc, GICC_PMR, 0xff);
220 
221 	/* Enable interrupt distribution */
222 	gic_d_write_4(sc, GICD_CTLR, 0x01);
223 
224 	/* Unmask attached SGI interrupts. */
225 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
226 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
227 			gic_irq_unmask(sc, irq);
228 
229 	/* Unmask attached PPI interrupts. */
230 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
231 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
232 			gic_irq_unmask(sc, irq);
233 }
234 #endif /* SMP */
235 
236 static int
237 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
238 {
239 	int error;
240 	uint32_t irq;
241 	struct gic_irqsrc *irqs;
242 	struct intr_irqsrc *isrc;
243 	const char *name;
244 
245 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
246 	    M_WAITOK | M_ZERO);
247 
248 	name = device_get_nameunit(sc->gic_dev);
249 	for (irq = 0; irq < num; irq++) {
250 		irqs[irq].gi_irq = irq;
251 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
252 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
253 
254 		isrc = &irqs[irq].gi_isrc;
255 		if (irq <= GIC_LAST_SGI) {
256 			error = intr_isrc_register(isrc, sc->gic_dev,
257 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
258 		} else if (irq <= GIC_LAST_PPI) {
259 			error = intr_isrc_register(isrc, sc->gic_dev,
260 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
261 		} else {
262 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
263 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
264 		}
265 		if (error != 0) {
266 			/* XXX call intr_isrc_deregister() */
267 			free(irqs, M_DEVBUF);
268 			return (error);
269 		}
270 	}
271 	sc->gic_irqs = irqs;
272 	sc->nirqs = num;
273 	return (0);
274 }
275 
276 static void
277 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
278 {
279 	struct arm_gic_softc *sc;
280 	int i;
281 
282 	sc = device_get_softc(dev);
283 
284 	KASSERT((start + count) < sc->nirqs,
285 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
286 	    start, count, sc->nirqs));
287 	for (i = 0; i < count; i++) {
288 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
289 		    ("%s: MSI interrupt %d already has a handler", __func__,
290 		    count + i));
291 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
292 		    ("%s: MSI interrupt %d already has a polarity", __func__,
293 		    count + i));
294 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
295 		    ("%s: MSI interrupt %d already has a trigger", __func__,
296 		    count + i));
297 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
298 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
299 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
300 	}
301 }
302 
303 int
304 arm_gic_attach(device_t dev)
305 {
306 	struct		arm_gic_softc *sc;
307 	int		i;
308 	uint32_t	icciidr, mask, nirqs;
309 
310 	if (gic_sc)
311 		return (ENXIO);
312 
313 	sc = device_get_softc(dev);
314 
315 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
316 		device_printf(dev, "could not allocate resources\n");
317 		return (ENXIO);
318 	}
319 
320 	sc->gic_dev = dev;
321 	gic_sc = sc;
322 
323 	/* Initialize mutex */
324 	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
325 
326 	/* Disable interrupt forwarding to the CPU interface */
327 	gic_d_write_4(sc, GICD_CTLR, 0x00);
328 
329 	/* Get the number of interrupts */
330 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
331 	nirqs = GICD_TYPER_I_NUM(sc->typer);
332 
333 	if (arm_gic_register_isrcs(sc, nirqs)) {
334 		device_printf(dev, "could not register irqs\n");
335 		goto cleanup;
336 	}
337 
338 	icciidr = gic_c_read_4(sc, GICC_IIDR);
339 	device_printf(dev,
340 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
341 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
342 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
343 	sc->gic_iidr = icciidr;
344 
345 	/* Set all global interrupts to be level triggered, active low. */
346 	for (i = 32; i < sc->nirqs; i += 16) {
347 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
348 	}
349 
350 	/* Disable all interrupts. */
351 	for (i = 32; i < sc->nirqs; i += 32) {
352 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
353 	}
354 
355 	/* Find the current cpu mask */
356 	mask = gic_cpu_mask(sc);
357 	/* Set the mask so we can find this CPU to send it IPIs */
358 	arm_gic_map[PCPU_GET(cpuid)] = mask;
359 	/* Set all four targets to this cpu */
360 	mask |= mask << 8;
361 	mask |= mask << 16;
362 
363 	for (i = 0; i < sc->nirqs; i += 4) {
364 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
365 		if (i > 32) {
366 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
367 		}
368 	}
369 
370 	/* Set all the interrupts to be in Group 0 (secure) */
371 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
372 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
373 	}
374 
375 	/* Enable CPU interface */
376 	gic_c_write_4(sc, GICC_CTLR, 1);
377 
378 	/* Set priority mask register. */
379 	gic_c_write_4(sc, GICC_PMR, 0xff);
380 
381 	/* Enable interrupt distribution */
382 	gic_d_write_4(sc, GICD_CTLR, 0x01);
383 	return (0);
384 
385 cleanup:
386 	arm_gic_detach(dev);
387 	return(ENXIO);
388 }
389 
390 int
391 arm_gic_detach(device_t dev)
392 {
393 	struct arm_gic_softc *sc;
394 
395 	sc = device_get_softc(dev);
396 
397 	if (sc->gic_irqs != NULL)
398 		free(sc->gic_irqs, M_DEVBUF);
399 
400 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
401 
402 	return (0);
403 }
404 
405 static int
406 arm_gic_print_child(device_t bus, device_t child)
407 {
408 	struct resource_list *rl;
409 	int rv;
410 
411 	rv = bus_print_child_header(bus, child);
412 
413 	rl = BUS_GET_RESOURCE_LIST(bus, child);
414 	if (rl != NULL) {
415 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
416 		    "%#jx");
417 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
418 	}
419 
420 	rv += bus_print_child_footer(bus, child);
421 
422 	return (rv);
423 }
424 
425 static struct resource *
426 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
427     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
428 {
429 	struct arm_gic_softc *sc;
430 	struct resource_list_entry *rle;
431 	struct resource_list *rl;
432 	int j;
433 
434 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
435 
436 	sc = device_get_softc(bus);
437 
438 	/*
439 	 * Request for the default allocation with a given rid: use resource
440 	 * list stored in the local device info.
441 	 */
442 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
443 		rl = BUS_GET_RESOURCE_LIST(bus, child);
444 
445 		if (type == SYS_RES_IOPORT)
446 			type = SYS_RES_MEMORY;
447 
448 		rle = resource_list_find(rl, type, *rid);
449 		if (rle == NULL) {
450 			if (bootverbose)
451 				device_printf(bus, "no default resources for "
452 				    "rid = %d, type = %d\n", *rid, type);
453 			return (NULL);
454 		}
455 		start = rle->start;
456 		end = rle->end;
457 		count = rle->count;
458 	}
459 
460 	/* Remap through ranges property */
461 	for (j = 0; j < sc->nranges; j++) {
462 		if (start >= sc->ranges[j].bus && end <
463 		    sc->ranges[j].bus + sc->ranges[j].size) {
464 			start -= sc->ranges[j].bus;
465 			start += sc->ranges[j].host;
466 			end -= sc->ranges[j].bus;
467 			end += sc->ranges[j].host;
468 			break;
469 		}
470 	}
471 	if (j == sc->nranges && sc->nranges != 0) {
472 		if (bootverbose)
473 			device_printf(bus, "Could not map resource "
474 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
475 
476 		return (NULL);
477 	}
478 
479 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
480 	    count, flags));
481 }
482 
483 static int
484 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
485 {
486 	struct arm_gic_softc *sc;
487 
488 	sc = device_get_softc(dev);
489 
490 	switch(which) {
491 	case GIC_IVAR_HW_REV:
492 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
493 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
494 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
495 		*result = GICD_IIDR_VAR(sc->gic_iidr);
496 		return (0);
497 	case GIC_IVAR_BUS:
498 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
499 		    ("arm_gic_read_ivar: Unknown bus type"));
500 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
501 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
502 		*result = sc->gic_bus;
503 		return (0);
504 	case GIC_IVAR_MBI_START:
505 		*result = sc->sc_spi_start;
506 		return (0);
507 	case GIC_IVAR_MBI_COUNT:
508 		*result = sc->sc_spi_count;
509 		return (0);
510 	}
511 
512 	return (ENOENT);
513 }
514 
515 static int
516 arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
517 {
518 	struct arm_gic_softc *sc;
519 
520 	sc = device_get_softc(dev);
521 
522 	switch(which) {
523 	case GIC_IVAR_HW_REV:
524 	case GIC_IVAR_BUS:
525 		return (EINVAL);
526 	case GIC_IVAR_MBI_START:
527 		/*
528 		 * GIC_IVAR_MBI_START must be set once and first. This allows
529 		 * us to reserve the registers when GIC_IVAR_MBI_COUNT is set.
530 		 */
531 		MPASS(sc->sc_spi_start == 0);
532 		MPASS(sc->sc_spi_count == 0);
533 		MPASS(value >= GIC_FIRST_SPI);
534 		MPASS(value < sc->nirqs);
535 
536 		sc->sc_spi_start = value;
537 		return (0);
538 	case GIC_IVAR_MBI_COUNT:
539 		MPASS(sc->sc_spi_start != 0);
540 		MPASS(sc->sc_spi_count == 0);
541 
542 		sc->sc_spi_count = value;
543 		sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
544 
545 		MPASS(sc->sc_spi_end <= sc->nirqs);
546 
547 		/* Reserve these interrupts for MSI/MSI-X use */
548 		arm_gic_reserve_msi_range(dev, sc->sc_spi_start,
549 		    sc->sc_spi_count);
550 
551 		return (0);
552 	}
553 
554 	return (ENOENT);
555 }
556 
557 int
558 arm_gic_intr(void *arg)
559 {
560 	struct arm_gic_softc *sc = arg;
561 	struct gic_irqsrc *gi;
562 	uint32_t irq_active_reg, irq;
563 	struct trapframe *tf;
564 
565 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
566 	irq = irq_active_reg & 0x3FF;
567 
568 	/*
569 	 * 1. We do EOI here because recent read value from active interrupt
570 	 *    register must be used for it. Another approach is to save this
571 	 *    value into associated interrupt source.
572 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
573 	 *    we must ensure that interrupted thread does not migrate to
574 	 *    another CPU.
575 	 * 3. EOI cannot be delayed by any preemption which could happen on
576 	 *    critical_exit() used in MI intr code, when interrupt thread is
577 	 *    scheduled. See next point.
578 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
579 	 *    an action and any use of critical_exit() could break this
580 	 *    assumption. See comments within smp_rendezvous_action().
581 	 * 5. We always return FILTER_HANDLED as this is an interrupt
582 	 *    controller dispatch function. Otherwise, in cascaded interrupt
583 	 *    case, the whole interrupt subtree would be masked.
584 	 */
585 
586 	if (irq >= sc->nirqs) {
587 		if (gic_debug_spurious)
588 			device_printf(sc->gic_dev,
589 			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
590 			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
591 		return (FILTER_HANDLED);
592 	}
593 
594 	tf = curthread->td_intr_frame;
595 dispatch_irq:
596 	gi = sc->gic_irqs + irq;
597 	/*
598 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
599 	 * as compiler complains that comparing u_int >= 0 is always true.
600 	 */
601 	if (irq <= GIC_LAST_SGI) {
602 #ifdef SMP
603 		/* Call EOI for all IPI before dispatch. */
604 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
605 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
606 		goto next_irq;
607 #else
608 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
609 		    irq - GIC_FIRST_SGI);
610 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
611 		goto next_irq;
612 #endif
613 	}
614 
615 	if (gic_debug_spurious)
616 		sc->last_irq[PCPU_GET(cpuid)] = irq;
617 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
618 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
619 
620 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
621 		gic_irq_mask(sc, irq);
622 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
623 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
624 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
625 	}
626 
627 next_irq:
628 	arm_irq_memory_barrier(irq);
629 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
630 	irq = irq_active_reg & 0x3FF;
631 	if (irq < sc->nirqs)
632 		goto dispatch_irq;
633 
634 	return (FILTER_HANDLED);
635 }
636 
637 static void
638 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
639     enum intr_polarity pol)
640 {
641 	uint32_t reg;
642 	uint32_t mask;
643 
644 	if (irq < GIC_FIRST_SPI)
645 		return;
646 
647 	mtx_lock_spin(&sc->mutex);
648 
649 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
650 	mask = (reg >> 2*(irq % 16)) & 0x3;
651 
652 	if (pol == INTR_POLARITY_LOW) {
653 		mask &= ~GICD_ICFGR_POL_MASK;
654 		mask |= GICD_ICFGR_POL_LOW;
655 	} else if (pol == INTR_POLARITY_HIGH) {
656 		mask &= ~GICD_ICFGR_POL_MASK;
657 		mask |= GICD_ICFGR_POL_HIGH;
658 	}
659 
660 	if (trig == INTR_TRIGGER_LEVEL) {
661 		mask &= ~GICD_ICFGR_TRIG_MASK;
662 		mask |= GICD_ICFGR_TRIG_LVL;
663 	} else if (trig == INTR_TRIGGER_EDGE) {
664 		mask &= ~GICD_ICFGR_TRIG_MASK;
665 		mask |= GICD_ICFGR_TRIG_EDGE;
666 	}
667 
668 	/* Set mask */
669 	reg = reg & ~(0x3 << 2*(irq % 16));
670 	reg = reg | (mask << 2*(irq % 16));
671 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
672 
673 	mtx_unlock_spin(&sc->mutex);
674 }
675 
676 static int
677 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
678 {
679 	uint32_t cpu, end, mask;
680 
681 	end = min(mp_ncpus, 8);
682 	for (cpu = end; cpu < MAXCPU; cpu++)
683 		if (CPU_ISSET(cpu, cpus))
684 			return (EINVAL);
685 
686 	for (mask = 0, cpu = 0; cpu < end; cpu++)
687 		if (CPU_ISSET(cpu, cpus))
688 			mask |= arm_gic_map[cpu];
689 
690 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
691 	return (0);
692 }
693 
694 #ifdef FDT
695 static int
696 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
697     enum intr_polarity *polp, enum intr_trigger *trigp)
698 {
699 
700 	if (ncells == 1) {
701 		*irqp = cells[0];
702 		*polp = INTR_POLARITY_CONFORM;
703 		*trigp = INTR_TRIGGER_CONFORM;
704 		return (0);
705 	}
706 	if (ncells == 3) {
707 		u_int irq, tripol;
708 
709 		/*
710 		 * The 1st cell is the interrupt type:
711 		 *	0 = SPI
712 		 *	1 = PPI
713 		 * The 2nd cell contains the interrupt number:
714 		 *	[0 - 987] for SPI
715 		 *	[0 -  15] for PPI
716 		 * The 3rd cell is the flags, encoded as follows:
717 		 *   bits[3:0] trigger type and level flags
718 		 *	1 = low-to-high edge triggered
719 		 *	2 = high-to-low edge triggered
720 		 *	4 = active high level-sensitive
721 		 *	8 = active low level-sensitive
722 		 *   bits[15:8] PPI interrupt cpu mask
723 		 *	Each bit corresponds to each of the 8 possible cpus
724 		 *	attached to the GIC.  A bit set to '1' indicated
725 		 *	the interrupt is wired to that CPU.
726 		 */
727 		switch (cells[0]) {
728 		case 0:
729 			irq = GIC_FIRST_SPI + cells[1];
730 			/* SPI irq is checked later. */
731 			break;
732 		case 1:
733 			irq = GIC_FIRST_PPI + cells[1];
734 			if (irq > GIC_LAST_PPI) {
735 				device_printf(dev, "unsupported PPI interrupt "
736 				    "number %u\n", cells[1]);
737 				return (EINVAL);
738 			}
739 			break;
740 		default:
741 			device_printf(dev, "unsupported interrupt type "
742 			    "configuration %u\n", cells[0]);
743 			return (EINVAL);
744 		}
745 
746 		tripol = cells[2] & 0xff;
747 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
748 		    cells[0] == 0))
749 			device_printf(dev, "unsupported trigger/polarity "
750 			    "configuration 0x%02x\n", tripol);
751 
752 		*irqp = irq;
753 		*polp = INTR_POLARITY_CONFORM;
754 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
755 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
756 		return (0);
757 	}
758 	return (EINVAL);
759 }
760 #endif
761 
762 static int
763 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
764     enum intr_polarity *polp, enum intr_trigger *trigp)
765 {
766 	struct gic_irqsrc *gi;
767 
768 	/* Map a non-GICv2m MSI */
769 	gi = (struct gic_irqsrc *)msi_data->isrc;
770 	if (gi == NULL)
771 		return (ENXIO);
772 
773 	*irqp = gi->gi_irq;
774 
775 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
776 	*polp = INTR_POLARITY_HIGH;
777 	*trigp = INTR_TRIGGER_EDGE;
778 
779 	return (0);
780 }
781 
782 static int
783 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
784     enum intr_polarity *polp, enum intr_trigger *trigp)
785 {
786 	u_int irq;
787 	enum intr_polarity pol;
788 	enum intr_trigger trig;
789 	struct arm_gic_softc *sc;
790 	struct intr_map_data_msi *dam;
791 #ifdef FDT
792 	struct intr_map_data_fdt *daf;
793 #endif
794 #ifdef DEV_ACPI
795 	struct intr_map_data_acpi *daa;
796 #endif
797 
798 	sc = device_get_softc(dev);
799 	switch (data->type) {
800 #ifdef FDT
801 	case INTR_MAP_DATA_FDT:
802 		daf = (struct intr_map_data_fdt *)data;
803 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
804 		    &trig) != 0)
805 			return (EINVAL);
806 		KASSERT(irq >= sc->nirqs ||
807 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
808 		    ("%s: Attempting to map a MSI interrupt from FDT",
809 		    __func__));
810 		break;
811 #endif
812 #ifdef DEV_ACPI
813 	case INTR_MAP_DATA_ACPI:
814 		daa = (struct intr_map_data_acpi *)data;
815 		irq = daa->irq;
816 		pol = daa->pol;
817 		trig = daa->trig;
818 		break;
819 #endif
820 	case INTR_MAP_DATA_MSI:
821 		/* Non-GICv2m MSI */
822 		dam = (struct intr_map_data_msi *)data;
823 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
824 			return (EINVAL);
825 		break;
826 	default:
827 		return (ENOTSUP);
828 	}
829 
830 	if (irq >= sc->nirqs)
831 		return (EINVAL);
832 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
833 	    pol != INTR_POLARITY_HIGH)
834 		return (EINVAL);
835 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
836 	    trig != INTR_TRIGGER_LEVEL)
837 		return (EINVAL);
838 
839 	*irqp = irq;
840 	if (polp != NULL)
841 		*polp = pol;
842 	if (trigp != NULL)
843 		*trigp = trig;
844 	return (0);
845 }
846 
847 static int
848 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
849     struct intr_irqsrc **isrcp)
850 {
851 	int error;
852 	u_int irq;
853 	struct arm_gic_softc *sc;
854 
855 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
856 	if (error == 0) {
857 		sc = device_get_softc(dev);
858 		*isrcp = GIC_INTR_ISRC(sc, irq);
859 	}
860 	return (error);
861 }
862 
863 static int
864 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
865     struct resource *res, struct intr_map_data *data)
866 {
867 	struct arm_gic_softc *sc = device_get_softc(dev);
868 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
869 	enum intr_trigger trig;
870 	enum intr_polarity pol;
871 
872 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
873 		/* GICv2m MSI */
874 		pol = gi->gi_pol;
875 		trig = gi->gi_trig;
876 		KASSERT(pol == INTR_POLARITY_HIGH,
877 		    ("%s: MSI interrupts must be active-high", __func__));
878 		KASSERT(trig == INTR_TRIGGER_EDGE,
879 		    ("%s: MSI interrupts must be edge triggered", __func__));
880 	} else if (data != NULL) {
881 		u_int irq;
882 
883 		/* Get config for resource. */
884 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
885 		    gi->gi_irq != irq)
886 			return (EINVAL);
887 	} else {
888 		pol = INTR_POLARITY_CONFORM;
889 		trig = INTR_TRIGGER_CONFORM;
890 	}
891 
892 	/* Compare config if this is not first setup. */
893 	if (isrc->isrc_handlers != 0) {
894 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
895 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
896 			return (EINVAL);
897 		else
898 			return (0);
899 	}
900 
901 	/* For MSI/MSI-X we should have already configured these */
902 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
903 		if (pol == INTR_POLARITY_CONFORM)
904 			pol = INTR_POLARITY_LOW;	/* just pick some */
905 		if (trig == INTR_TRIGGER_CONFORM)
906 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
907 
908 		gi->gi_pol = pol;
909 		gi->gi_trig = trig;
910 
911 		/* Edge triggered interrupts need an early EOI sent */
912 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
913 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
914 	}
915 
916 	/*
917 	 * XXX - In case that per CPU interrupt is going to be enabled in time
918 	 *       when SMP is already started, we need some IPI call which
919 	 *       enables it on others CPUs. Further, it's more complicated as
920 	 *       pic_enable_source() and pic_disable_source() should act on
921 	 *       per CPU basis only. Thus, it should be solved here somehow.
922 	 */
923 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
924 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
925 
926 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
927 	arm_gic_bind_intr(dev, isrc);
928 	return (0);
929 }
930 
931 static int
932 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
933     struct resource *res, struct intr_map_data *data)
934 {
935 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
936 
937 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
938 		gi->gi_pol = INTR_POLARITY_CONFORM;
939 		gi->gi_trig = INTR_TRIGGER_CONFORM;
940 	}
941 	return (0);
942 }
943 
944 static void
945 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
946 {
947 	struct arm_gic_softc *sc = device_get_softc(dev);
948 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
949 
950 	arm_irq_memory_barrier(gi->gi_irq);
951 	gic_irq_unmask(sc, gi->gi_irq);
952 }
953 
954 static void
955 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
956 {
957 	struct arm_gic_softc *sc = device_get_softc(dev);
958 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
959 
960 	gic_irq_mask(sc, gi->gi_irq);
961 }
962 
963 static void
964 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
965 {
966 	struct arm_gic_softc *sc = device_get_softc(dev);
967 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
968 
969 	arm_gic_disable_intr(dev, isrc);
970 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
971 }
972 
973 static void
974 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
975 {
976 
977 	arm_irq_memory_barrier(0);
978 	arm_gic_enable_intr(dev, isrc);
979 }
980 
981 static void
982 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
983 {
984 	struct arm_gic_softc *sc = device_get_softc(dev);
985 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
986 
987         /* EOI for edge-triggered done earlier. */
988 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
989 		return;
990 
991 	arm_irq_memory_barrier(0);
992 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
993 }
994 
995 static int
996 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
997 {
998 	struct arm_gic_softc *sc = device_get_softc(dev);
999 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1000 
1001 	if (gi->gi_irq < GIC_FIRST_SPI)
1002 		return (EINVAL);
1003 
1004 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
1005 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1006 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1007 	}
1008 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
1009 }
1010 
1011 #ifdef SMP
1012 static void
1013 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1014     u_int ipi)
1015 {
1016 	struct arm_gic_softc *sc = device_get_softc(dev);
1017 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1018 	uint32_t val = 0, i;
1019 
1020 	for (i = 0; i < MAXCPU; i++)
1021 		if (CPU_ISSET(i, &cpus))
1022 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1023 
1024 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
1025 }
1026 
1027 static int
1028 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1029 {
1030 	struct intr_irqsrc *isrc;
1031 	struct arm_gic_softc *sc = device_get_softc(dev);
1032 
1033 	if (sgi_first_unused > GIC_LAST_SGI)
1034 		return (ENOSPC);
1035 
1036 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1037 	sgi_to_ipi[sgi_first_unused++] = ipi;
1038 
1039 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1040 
1041 	*isrcp = isrc;
1042 	return (0);
1043 }
1044 #endif
1045 
1046 static int
1047 arm_gic_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1048     device_t *pic, struct intr_irqsrc **srcs)
1049 {
1050 	struct arm_gic_softc *sc;
1051 	int i, irq, end_irq;
1052 	bool found;
1053 
1054 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1055 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1056 
1057 	sc = device_get_softc(dev);
1058 
1059 	mtx_lock(&sc->mutex);
1060 
1061 	found = false;
1062 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1063 		/* Start on an aligned interrupt */
1064 		if ((irq & (maxcount - 1)) != 0)
1065 			continue;
1066 
1067 		/* Assume we found a valid range until shown otherwise */
1068 		found = true;
1069 
1070 		/* Check this range is valid */
1071 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1072 			/* No free interrupts */
1073 			if (end_irq == sc->sc_spi_end) {
1074 				found = false;
1075 				break;
1076 			}
1077 
1078 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1079 			    ("%s: Non-MSI interrupt found", __func__));
1080 
1081 			/* This is already used */
1082 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1083 			    GI_FLAG_MSI_USED) {
1084 				found = false;
1085 				break;
1086 			}
1087 		}
1088 		if (found)
1089 			break;
1090 	}
1091 
1092 	/* Not enough interrupts were found */
1093 	if (!found || irq == sc->sc_spi_end) {
1094 		mtx_unlock(&sc->mutex);
1095 		return (ENXIO);
1096 	}
1097 
1098 	for (i = 0; i < count; i++) {
1099 		/* Mark the interrupt as used */
1100 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1101 	}
1102 	mtx_unlock(&sc->mutex);
1103 
1104 	for (i = 0; i < count; i++)
1105 		srcs[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1106 	*pic = dev;
1107 
1108 	return (0);
1109 }
1110 
1111 static int
1112 arm_gic_release_msi(device_t dev, device_t child, int count,
1113     struct intr_irqsrc **isrc)
1114 {
1115 	struct arm_gic_softc *sc;
1116 	struct gic_irqsrc *gi;
1117 	int i;
1118 
1119 	sc = device_get_softc(dev);
1120 
1121 	mtx_lock(&sc->mutex);
1122 	for (i = 0; i < count; i++) {
1123 		gi = (struct gic_irqsrc *)isrc[i];
1124 
1125 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1126 		    ("%s: Trying to release an unused MSI-X interrupt",
1127 		    __func__));
1128 
1129 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1130 	}
1131 	mtx_unlock(&sc->mutex);
1132 
1133 	return (0);
1134 }
1135 
1136 static int
1137 arm_gic_alloc_msix(device_t dev, device_t child, device_t *pic,
1138     struct intr_irqsrc **isrcp)
1139 {
1140 	struct arm_gic_softc *sc;
1141 	int irq;
1142 
1143 	sc = device_get_softc(dev);
1144 
1145 	mtx_lock(&sc->mutex);
1146 	/* Find an unused interrupt */
1147 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1148 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1149 		    ("%s: Non-MSI interrupt found", __func__));
1150 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1151 			break;
1152 	}
1153 	/* No free interrupt was found */
1154 	if (irq == sc->sc_spi_end) {
1155 		mtx_unlock(&sc->mutex);
1156 		return (ENXIO);
1157 	}
1158 
1159 	/* Mark the interrupt as used */
1160 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1161 	mtx_unlock(&sc->mutex);
1162 
1163 	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1164 	*pic = dev;
1165 
1166 	return (0);
1167 }
1168 
1169 static int
1170 arm_gic_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1171 {
1172 	struct arm_gic_softc *sc;
1173 	struct gic_irqsrc *gi;
1174 
1175 	sc = device_get_softc(dev);
1176 	gi = (struct gic_irqsrc *)isrc;
1177 
1178 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1179 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1180 
1181 	mtx_lock(&sc->mutex);
1182 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1183 	mtx_unlock(&sc->mutex);
1184 
1185 	return (0);
1186 }
1187 
1188 static device_method_t arm_gic_methods[] = {
1189 	/* Bus interface */
1190 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1191 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1192 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1193 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1194 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1195 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1196 	DEVMETHOD(bus_write_ivar,	arm_gic_write_ivar),
1197 
1198 	/* Interrupt controller interface */
1199 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1200 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1201 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1202 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1203 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1204 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1205 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1206 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1207 #ifdef SMP
1208 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1209 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1210 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1211 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1212 #endif
1213 
1214 	/* MSI/MSI-X */
1215 	DEVMETHOD(msi_alloc_msi,	arm_gic_alloc_msi),
1216 	DEVMETHOD(msi_release_msi,	arm_gic_release_msi),
1217 	DEVMETHOD(msi_alloc_msix,	arm_gic_alloc_msix),
1218 	DEVMETHOD(msi_release_msix,	arm_gic_release_msix),
1219 
1220 	{ 0, 0 }
1221 };
1222 
1223 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1224     sizeof(struct arm_gic_softc));
1225 
1226 /*
1227  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1228  */
1229 
1230 #define	GICV2M_MSI_TYPER	0x008
1231 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1232 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1233 #define	GICv2M_MSI_SETSPI_NS	0x040
1234 #define	GICV2M_MSI_IIDR		0xFCC
1235 
1236 int
1237 arm_gicv2m_attach(device_t dev)
1238 {
1239 	struct arm_gicv2m_softc *sc;
1240 	uint32_t typer;
1241 	u_int spi_start, spi_count;
1242 	int rid;
1243 
1244 	sc = device_get_softc(dev);
1245 
1246 	rid = 0;
1247 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1248 	    RF_ACTIVE);
1249 	if (sc->sc_mem == NULL) {
1250 		device_printf(dev, "Unable to allocate resources\n");
1251 		return (ENXIO);
1252 	}
1253 
1254 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1255 	spi_start = MSI_TYPER_SPI_BASE(typer);
1256 	spi_count = MSI_TYPER_SPI_COUNT(typer);
1257 	gic_set_mbi_start(dev, spi_start);
1258 	gic_set_mbi_count(dev, spi_count);
1259 
1260 	intr_msi_register(dev, sc->sc_xref);
1261 
1262 	if (bootverbose)
1263 		device_printf(dev, "using spi %u to %u\n", spi_start,
1264 		    spi_start + spi_count - 1);
1265 
1266 	return (0);
1267 }
1268 
1269 static int
1270 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1271     device_t *pic, struct intr_irqsrc **srcs)
1272 {
1273 	return (MSI_ALLOC_MSI(device_get_parent(dev), child, count, maxcount,
1274 	    pic, srcs));
1275 }
1276 
1277 static int
1278 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1279     struct intr_irqsrc **isrc)
1280 {
1281 	return (MSI_RELEASE_MSI(device_get_parent(dev), child, count, isrc));
1282 }
1283 
1284 static int
1285 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1286     struct intr_irqsrc **isrcp)
1287 {
1288 	return (MSI_ALLOC_MSIX(device_get_parent(dev), child, pic, isrcp));
1289 }
1290 
1291 static int
1292 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1293 {
1294 	return (MSI_RELEASE_MSIX(device_get_parent(dev), child, isrc));
1295 }
1296 
1297 static int
1298 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1299     uint64_t *addr, uint32_t *data)
1300 {
1301 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1302 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1303 
1304 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1305 	*data = gi->gi_irq;
1306 
1307 	return (0);
1308 }
1309 
1310 static device_method_t arm_gicv2m_methods[] = {
1311 	/* Device interface */
1312 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1313 
1314 	/* MSI/MSI-X */
1315 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1316 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1317 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1318 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1319 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1320 
1321 	/* End */
1322 	DEVMETHOD_END
1323 };
1324 
1325 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1326     sizeof(struct arm_gicv2m_softc));
1327