xref: /freebsd/sys/arm/arm/gic.c (revision 42249ef2)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Developed by Damjan Marion <damjan.marion@gmail.com>
8  *
9  * Based on OMAP4 GIC code by Ben Gray
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the company nor the name of the author may be used to
20  *    endorse or promote products derived from this software without specific
21  *    prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/rman.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 
65 #ifdef FDT
66 #include <dev/fdt/fdt_intr.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #endif
69 
70 #ifdef DEV_ACPI
71 #include <contrib/dev/acpica/include/acpi.h>
72 #include <dev/acpica/acpivar.h>
73 #endif
74 
75 #include <arm/arm/gic.h>
76 #include <arm/arm/gic_common.h>
77 
78 #include "pic_if.h"
79 #include "msi_if.h"
80 
81 /* We are using GICv2 register naming */
82 
83 /* Distributor Registers */
84 
85 /* CPU Registers */
86 #define GICC_CTLR		0x0000			/* v1 ICCICR */
87 #define GICC_PMR		0x0004			/* v1 ICCPMR */
88 #define GICC_BPR		0x0008			/* v1 ICCBPR */
89 #define GICC_IAR		0x000C			/* v1 ICCIAR */
90 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
91 #define GICC_RPR		0x0014			/* v1 ICCRPR */
92 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
93 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
94 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
95 
96 /* TYPER Registers */
97 #define	GICD_TYPER_SECURITYEXT	0x400
98 #define	GIC_SUPPORT_SECEXT(_sc)	\
99     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
100 
101 
102 #ifndef	GIC_DEFAULT_ICFGR_INIT
103 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
104 #endif
105 
106 struct gic_irqsrc {
107 	struct intr_irqsrc	gi_isrc;
108 	uint32_t		gi_irq;
109 	enum intr_polarity	gi_pol;
110 	enum intr_trigger	gi_trig;
111 #define GI_FLAG_EARLY_EOI	(1 << 0)
112 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
113 					 /* be used for MSI/MSI-X interrupts */
114 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
115 					 /* for a MSI/MSI-X interrupt */
116 	u_int			gi_flags;
117 };
118 
119 static u_int gic_irq_cpu;
120 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
121 
122 #ifdef SMP
123 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
124 static u_int sgi_first_unused = GIC_FIRST_SGI;
125 #endif
126 
127 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
128 
129 static struct resource_spec arm_gic_spec[] = {
130 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
131 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
132 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
133 	{ -1, 0 }
134 };
135 
136 
137 #if defined(__arm__) && defined(INVARIANTS)
138 static int gic_debug_spurious = 1;
139 #else
140 static int gic_debug_spurious = 0;
141 #endif
142 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
143 
144 static u_int arm_gic_map[MAXCPU];
145 
146 static struct arm_gic_softc *gic_sc = NULL;
147 
148 #define	gic_c_read_4(_sc, _reg)		\
149     bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
150 #define	gic_c_write_4(_sc, _reg, _val)		\
151     bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
152 #define	gic_d_read_4(_sc, _reg)		\
153     bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
154 #define	gic_d_write_1(_sc, _reg, _val)		\
155     bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
156 #define	gic_d_write_4(_sc, _reg, _val)		\
157     bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
158 
159 static inline void
160 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
161 {
162 
163 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
164 }
165 
166 static inline void
167 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
168 {
169 
170 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
171 }
172 
173 static uint8_t
174 gic_cpu_mask(struct arm_gic_softc *sc)
175 {
176 	uint32_t mask;
177 	int i;
178 
179 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
180 	for (i = 0; i < 8; i++) {
181 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
182 		if (mask != 0)
183 			break;
184 	}
185 	/* No mask found, assume we are on CPU interface 0 */
186 	if (mask == 0)
187 		return (1);
188 
189 	/* Collect the mask in the lower byte */
190 	mask |= mask >> 16;
191 	mask |= mask >> 8;
192 
193 	return (mask);
194 }
195 
196 #ifdef SMP
197 static void
198 arm_gic_init_secondary(device_t dev)
199 {
200 	struct arm_gic_softc *sc = device_get_softc(dev);
201 	u_int irq, cpu;
202 
203 	/* Set the mask so we can find this CPU to send it IPIs */
204 	cpu = PCPU_GET(cpuid);
205 	arm_gic_map[cpu] = gic_cpu_mask(sc);
206 
207 	for (irq = 0; irq < sc->nirqs; irq += 4)
208 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
209 
210 	/* Set all the interrupts to be in Group 0 (secure) */
211 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
212 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
213 	}
214 
215 	/* Enable CPU interface */
216 	gic_c_write_4(sc, GICC_CTLR, 1);
217 
218 	/* Set priority mask register. */
219 	gic_c_write_4(sc, GICC_PMR, 0xff);
220 
221 	/* Enable interrupt distribution */
222 	gic_d_write_4(sc, GICD_CTLR, 0x01);
223 
224 	/* Unmask attached SGI interrupts. */
225 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
226 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
227 			gic_irq_unmask(sc, irq);
228 
229 	/* Unmask attached PPI interrupts. */
230 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
231 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
232 			gic_irq_unmask(sc, irq);
233 }
234 #endif /* SMP */
235 
236 static int
237 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
238 {
239 	int error;
240 	uint32_t irq;
241 	struct gic_irqsrc *irqs;
242 	struct intr_irqsrc *isrc;
243 	const char *name;
244 
245 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
246 	    M_WAITOK | M_ZERO);
247 
248 	name = device_get_nameunit(sc->gic_dev);
249 	for (irq = 0; irq < num; irq++) {
250 		irqs[irq].gi_irq = irq;
251 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
252 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
253 
254 		isrc = &irqs[irq].gi_isrc;
255 		if (irq <= GIC_LAST_SGI) {
256 			error = intr_isrc_register(isrc, sc->gic_dev,
257 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
258 		} else if (irq <= GIC_LAST_PPI) {
259 			error = intr_isrc_register(isrc, sc->gic_dev,
260 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
261 		} else {
262 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
263 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
264 		}
265 		if (error != 0) {
266 			/* XXX call intr_isrc_deregister() */
267 			free(irqs, M_DEVBUF);
268 			return (error);
269 		}
270 	}
271 	sc->gic_irqs = irqs;
272 	sc->nirqs = num;
273 	return (0);
274 }
275 
276 static void
277 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
278 {
279 	struct arm_gic_softc *sc;
280 	int i;
281 
282 	sc = device_get_softc(dev);
283 
284 	KASSERT((start + count) < sc->nirqs,
285 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
286 	    start, count, sc->nirqs));
287 	for (i = 0; i < count; i++) {
288 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
289 		    ("%s: MSI interrupt %d already has a handler", __func__,
290 		    count + i));
291 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
292 		    ("%s: MSI interrupt %d already has a polarity", __func__,
293 		    count + i));
294 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
295 		    ("%s: MSI interrupt %d already has a trigger", __func__,
296 		    count + i));
297 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
298 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
299 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
300 	}
301 }
302 
303 int
304 arm_gic_attach(device_t dev)
305 {
306 	struct		arm_gic_softc *sc;
307 	int		i;
308 	uint32_t	icciidr, mask, nirqs;
309 
310 	if (gic_sc)
311 		return (ENXIO);
312 
313 	sc = device_get_softc(dev);
314 
315 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
316 		device_printf(dev, "could not allocate resources\n");
317 		return (ENXIO);
318 	}
319 
320 	sc->gic_dev = dev;
321 	gic_sc = sc;
322 
323 	/* Initialize mutex */
324 	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
325 
326 	/* Distributor Interface */
327 	sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
328 	sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
329 
330 	/* CPU Interface */
331 	sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
332 	sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
333 
334 	/* Disable interrupt forwarding to the CPU interface */
335 	gic_d_write_4(sc, GICD_CTLR, 0x00);
336 
337 	/* Get the number of interrupts */
338 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
339 	nirqs = GICD_TYPER_I_NUM(sc->typer);
340 
341 	if (arm_gic_register_isrcs(sc, nirqs)) {
342 		device_printf(dev, "could not register irqs\n");
343 		goto cleanup;
344 	}
345 
346 	icciidr = gic_c_read_4(sc, GICC_IIDR);
347 	device_printf(dev,
348 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
349 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
350 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
351 	sc->gic_iidr = icciidr;
352 
353 	/* Set all global interrupts to be level triggered, active low. */
354 	for (i = 32; i < sc->nirqs; i += 16) {
355 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
356 	}
357 
358 	/* Disable all interrupts. */
359 	for (i = 32; i < sc->nirqs; i += 32) {
360 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
361 	}
362 
363 	/* Find the current cpu mask */
364 	mask = gic_cpu_mask(sc);
365 	/* Set the mask so we can find this CPU to send it IPIs */
366 	arm_gic_map[PCPU_GET(cpuid)] = mask;
367 	/* Set all four targets to this cpu */
368 	mask |= mask << 8;
369 	mask |= mask << 16;
370 
371 	for (i = 0; i < sc->nirqs; i += 4) {
372 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
373 		if (i > 32) {
374 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
375 		}
376 	}
377 
378 	/* Set all the interrupts to be in Group 0 (secure) */
379 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
380 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
381 	}
382 
383 	/* Enable CPU interface */
384 	gic_c_write_4(sc, GICC_CTLR, 1);
385 
386 	/* Set priority mask register. */
387 	gic_c_write_4(sc, GICC_PMR, 0xff);
388 
389 	/* Enable interrupt distribution */
390 	gic_d_write_4(sc, GICD_CTLR, 0x01);
391 	return (0);
392 
393 cleanup:
394 	arm_gic_detach(dev);
395 	return(ENXIO);
396 }
397 
398 int
399 arm_gic_detach(device_t dev)
400 {
401 	struct arm_gic_softc *sc;
402 
403 	sc = device_get_softc(dev);
404 
405 	if (sc->gic_irqs != NULL)
406 		free(sc->gic_irqs, M_DEVBUF);
407 
408 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
409 
410 	return (0);
411 }
412 
413 static int
414 arm_gic_print_child(device_t bus, device_t child)
415 {
416 	struct resource_list *rl;
417 	int rv;
418 
419 	rv = bus_print_child_header(bus, child);
420 
421 	rl = BUS_GET_RESOURCE_LIST(bus, child);
422 	if (rl != NULL) {
423 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
424 		    "%#jx");
425 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
426 	}
427 
428 	rv += bus_print_child_footer(bus, child);
429 
430 	return (rv);
431 }
432 
433 static struct resource *
434 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
435     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
436 {
437 	struct arm_gic_softc *sc;
438 	struct resource_list_entry *rle;
439 	struct resource_list *rl;
440 	int j;
441 
442 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
443 
444 	sc = device_get_softc(bus);
445 
446 	/*
447 	 * Request for the default allocation with a given rid: use resource
448 	 * list stored in the local device info.
449 	 */
450 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
451 		rl = BUS_GET_RESOURCE_LIST(bus, child);
452 
453 		if (type == SYS_RES_IOPORT)
454 			type = SYS_RES_MEMORY;
455 
456 		rle = resource_list_find(rl, type, *rid);
457 		if (rle == NULL) {
458 			if (bootverbose)
459 				device_printf(bus, "no default resources for "
460 				    "rid = %d, type = %d\n", *rid, type);
461 			return (NULL);
462 		}
463 		start = rle->start;
464 		end = rle->end;
465 		count = rle->count;
466 	}
467 
468 	/* Remap through ranges property */
469 	for (j = 0; j < sc->nranges; j++) {
470 		if (start >= sc->ranges[j].bus && end <
471 		    sc->ranges[j].bus + sc->ranges[j].size) {
472 			start -= sc->ranges[j].bus;
473 			start += sc->ranges[j].host;
474 			end -= sc->ranges[j].bus;
475 			end += sc->ranges[j].host;
476 			break;
477 		}
478 	}
479 	if (j == sc->nranges && sc->nranges != 0) {
480 		if (bootverbose)
481 			device_printf(bus, "Could not map resource "
482 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
483 
484 		return (NULL);
485 	}
486 
487 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
488 	    count, flags));
489 }
490 
491 static int
492 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
493 {
494 	struct arm_gic_softc *sc;
495 
496 	sc = device_get_softc(dev);
497 
498 	switch(which) {
499 	case GIC_IVAR_HW_REV:
500 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
501 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
502 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
503 		*result = GICD_IIDR_VAR(sc->gic_iidr);
504 		return (0);
505 	case GIC_IVAR_BUS:
506 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
507 		    ("arm_gic_read_ivar: Unknown bus type"));
508 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
509 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
510 		*result = sc->gic_bus;
511 		return (0);
512 	}
513 
514 	return (ENOENT);
515 }
516 
517 int
518 arm_gic_intr(void *arg)
519 {
520 	struct arm_gic_softc *sc = arg;
521 	struct gic_irqsrc *gi;
522 	uint32_t irq_active_reg, irq;
523 	struct trapframe *tf;
524 
525 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
526 	irq = irq_active_reg & 0x3FF;
527 
528 	/*
529 	 * 1. We do EOI here because recent read value from active interrupt
530 	 *    register must be used for it. Another approach is to save this
531 	 *    value into associated interrupt source.
532 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
533 	 *    we must ensure that interrupted thread does not migrate to
534 	 *    another CPU.
535 	 * 3. EOI cannot be delayed by any preemption which could happen on
536 	 *    critical_exit() used in MI intr code, when interrupt thread is
537 	 *    scheduled. See next point.
538 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
539 	 *    an action and any use of critical_exit() could break this
540 	 *    assumption. See comments within smp_rendezvous_action().
541 	 * 5. We always return FILTER_HANDLED as this is an interrupt
542 	 *    controller dispatch function. Otherwise, in cascaded interrupt
543 	 *    case, the whole interrupt subtree would be masked.
544 	 */
545 
546 	if (irq >= sc->nirqs) {
547 		if (gic_debug_spurious)
548 			device_printf(sc->gic_dev,
549 			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
550 			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
551 		return (FILTER_HANDLED);
552 	}
553 
554 	tf = curthread->td_intr_frame;
555 dispatch_irq:
556 	gi = sc->gic_irqs + irq;
557 	/*
558 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
559 	 * as compiler complains that comparing u_int >= 0 is always true.
560 	 */
561 	if (irq <= GIC_LAST_SGI) {
562 #ifdef SMP
563 		/* Call EOI for all IPI before dispatch. */
564 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
565 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
566 		goto next_irq;
567 #else
568 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
569 		    irq - GIC_FIRST_SGI);
570 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
571 		goto next_irq;
572 #endif
573 	}
574 
575 	if (gic_debug_spurious)
576 		sc->last_irq[PCPU_GET(cpuid)] = irq;
577 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
578 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
579 
580 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
581 		gic_irq_mask(sc, irq);
582 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
583 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
584 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
585 	}
586 
587 next_irq:
588 	arm_irq_memory_barrier(irq);
589 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
590 	irq = irq_active_reg & 0x3FF;
591 	if (irq < sc->nirqs)
592 		goto dispatch_irq;
593 
594 	return (FILTER_HANDLED);
595 }
596 
597 static void
598 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
599     enum intr_polarity pol)
600 {
601 	uint32_t reg;
602 	uint32_t mask;
603 
604 	if (irq < GIC_FIRST_SPI)
605 		return;
606 
607 	mtx_lock_spin(&sc->mutex);
608 
609 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
610 	mask = (reg >> 2*(irq % 16)) & 0x3;
611 
612 	if (pol == INTR_POLARITY_LOW) {
613 		mask &= ~GICD_ICFGR_POL_MASK;
614 		mask |= GICD_ICFGR_POL_LOW;
615 	} else if (pol == INTR_POLARITY_HIGH) {
616 		mask &= ~GICD_ICFGR_POL_MASK;
617 		mask |= GICD_ICFGR_POL_HIGH;
618 	}
619 
620 	if (trig == INTR_TRIGGER_LEVEL) {
621 		mask &= ~GICD_ICFGR_TRIG_MASK;
622 		mask |= GICD_ICFGR_TRIG_LVL;
623 	} else if (trig == INTR_TRIGGER_EDGE) {
624 		mask &= ~GICD_ICFGR_TRIG_MASK;
625 		mask |= GICD_ICFGR_TRIG_EDGE;
626 	}
627 
628 	/* Set mask */
629 	reg = reg & ~(0x3 << 2*(irq % 16));
630 	reg = reg | (mask << 2*(irq % 16));
631 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
632 
633 	mtx_unlock_spin(&sc->mutex);
634 }
635 
636 static int
637 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
638 {
639 	uint32_t cpu, end, mask;
640 
641 	end = min(mp_ncpus, 8);
642 	for (cpu = end; cpu < MAXCPU; cpu++)
643 		if (CPU_ISSET(cpu, cpus))
644 			return (EINVAL);
645 
646 	for (mask = 0, cpu = 0; cpu < end; cpu++)
647 		if (CPU_ISSET(cpu, cpus))
648 			mask |= arm_gic_map[cpu];
649 
650 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
651 	return (0);
652 }
653 
654 #ifdef FDT
655 static int
656 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
657     enum intr_polarity *polp, enum intr_trigger *trigp)
658 {
659 
660 	if (ncells == 1) {
661 		*irqp = cells[0];
662 		*polp = INTR_POLARITY_CONFORM;
663 		*trigp = INTR_TRIGGER_CONFORM;
664 		return (0);
665 	}
666 	if (ncells == 3) {
667 		u_int irq, tripol;
668 
669 		/*
670 		 * The 1st cell is the interrupt type:
671 		 *	0 = SPI
672 		 *	1 = PPI
673 		 * The 2nd cell contains the interrupt number:
674 		 *	[0 - 987] for SPI
675 		 *	[0 -  15] for PPI
676 		 * The 3rd cell is the flags, encoded as follows:
677 		 *   bits[3:0] trigger type and level flags
678 		 *	1 = low-to-high edge triggered
679 		 *	2 = high-to-low edge triggered
680 		 *	4 = active high level-sensitive
681 		 *	8 = active low level-sensitive
682 		 *   bits[15:8] PPI interrupt cpu mask
683 		 *	Each bit corresponds to each of the 8 possible cpus
684 		 *	attached to the GIC.  A bit set to '1' indicated
685 		 *	the interrupt is wired to that CPU.
686 		 */
687 		switch (cells[0]) {
688 		case 0:
689 			irq = GIC_FIRST_SPI + cells[1];
690 			/* SPI irq is checked later. */
691 			break;
692 		case 1:
693 			irq = GIC_FIRST_PPI + cells[1];
694 			if (irq > GIC_LAST_PPI) {
695 				device_printf(dev, "unsupported PPI interrupt "
696 				    "number %u\n", cells[1]);
697 				return (EINVAL);
698 			}
699 			break;
700 		default:
701 			device_printf(dev, "unsupported interrupt type "
702 			    "configuration %u\n", cells[0]);
703 			return (EINVAL);
704 		}
705 
706 		tripol = cells[2] & 0xff;
707 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
708 		    cells[0] == 0))
709 			device_printf(dev, "unsupported trigger/polarity "
710 			    "configuration 0x%02x\n", tripol);
711 
712 		*irqp = irq;
713 		*polp = INTR_POLARITY_CONFORM;
714 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
715 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
716 		return (0);
717 	}
718 	return (EINVAL);
719 }
720 #endif
721 
722 static int
723 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
724     enum intr_polarity *polp, enum intr_trigger *trigp)
725 {
726 	struct gic_irqsrc *gi;
727 
728 	/* Map a non-GICv2m MSI */
729 	gi = (struct gic_irqsrc *)msi_data->isrc;
730 	if (gi == NULL)
731 		return (ENXIO);
732 
733 	*irqp = gi->gi_irq;
734 
735 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
736 	*polp = INTR_POLARITY_HIGH;
737 	*trigp = INTR_TRIGGER_EDGE;
738 
739 	return (0);
740 }
741 
742 static int
743 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
744     enum intr_polarity *polp, enum intr_trigger *trigp)
745 {
746 	u_int irq;
747 	enum intr_polarity pol;
748 	enum intr_trigger trig;
749 	struct arm_gic_softc *sc;
750 	struct intr_map_data_msi *dam;
751 #ifdef FDT
752 	struct intr_map_data_fdt *daf;
753 #endif
754 #ifdef DEV_ACPI
755 	struct intr_map_data_acpi *daa;
756 #endif
757 
758 	sc = device_get_softc(dev);
759 	switch (data->type) {
760 #ifdef FDT
761 	case INTR_MAP_DATA_FDT:
762 		daf = (struct intr_map_data_fdt *)data;
763 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
764 		    &trig) != 0)
765 			return (EINVAL);
766 		KASSERT(irq >= sc->nirqs ||
767 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
768 		    ("%s: Attempting to map a MSI interrupt from FDT",
769 		    __func__));
770 		break;
771 #endif
772 #ifdef DEV_ACPI
773 	case INTR_MAP_DATA_ACPI:
774 		daa = (struct intr_map_data_acpi *)data;
775 		irq = daa->irq;
776 		pol = daa->pol;
777 		trig = daa->trig;
778 		break;
779 #endif
780 	case INTR_MAP_DATA_MSI:
781 		/* Non-GICv2m MSI */
782 		dam = (struct intr_map_data_msi *)data;
783 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
784 			return (EINVAL);
785 		break;
786 	default:
787 		return (ENOTSUP);
788 	}
789 
790 	if (irq >= sc->nirqs)
791 		return (EINVAL);
792 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
793 	    pol != INTR_POLARITY_HIGH)
794 		return (EINVAL);
795 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
796 	    trig != INTR_TRIGGER_LEVEL)
797 		return (EINVAL);
798 
799 	*irqp = irq;
800 	if (polp != NULL)
801 		*polp = pol;
802 	if (trigp != NULL)
803 		*trigp = trig;
804 	return (0);
805 }
806 
807 static int
808 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
809     struct intr_irqsrc **isrcp)
810 {
811 	int error;
812 	u_int irq;
813 	struct arm_gic_softc *sc;
814 
815 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
816 	if (error == 0) {
817 		sc = device_get_softc(dev);
818 		*isrcp = GIC_INTR_ISRC(sc, irq);
819 	}
820 	return (error);
821 }
822 
823 static int
824 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
825     struct resource *res, struct intr_map_data *data)
826 {
827 	struct arm_gic_softc *sc = device_get_softc(dev);
828 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
829 	enum intr_trigger trig;
830 	enum intr_polarity pol;
831 
832 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
833 		/* GICv2m MSI */
834 		pol = gi->gi_pol;
835 		trig = gi->gi_trig;
836 		KASSERT(pol == INTR_POLARITY_HIGH,
837 		    ("%s: MSI interrupts must be active-high", __func__));
838 		KASSERT(trig == INTR_TRIGGER_EDGE,
839 		    ("%s: MSI interrupts must be edge triggered", __func__));
840 	} else if (data != NULL) {
841 		u_int irq;
842 
843 		/* Get config for resource. */
844 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
845 		    gi->gi_irq != irq)
846 			return (EINVAL);
847 	} else {
848 		pol = INTR_POLARITY_CONFORM;
849 		trig = INTR_TRIGGER_CONFORM;
850 	}
851 
852 	/* Compare config if this is not first setup. */
853 	if (isrc->isrc_handlers != 0) {
854 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
855 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
856 			return (EINVAL);
857 		else
858 			return (0);
859 	}
860 
861 	/* For MSI/MSI-X we should have already configured these */
862 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
863 		if (pol == INTR_POLARITY_CONFORM)
864 			pol = INTR_POLARITY_LOW;	/* just pick some */
865 		if (trig == INTR_TRIGGER_CONFORM)
866 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
867 
868 		gi->gi_pol = pol;
869 		gi->gi_trig = trig;
870 
871 		/* Edge triggered interrupts need an early EOI sent */
872 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
873 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
874 	}
875 
876 	/*
877 	 * XXX - In case that per CPU interrupt is going to be enabled in time
878 	 *       when SMP is already started, we need some IPI call which
879 	 *       enables it on others CPUs. Further, it's more complicated as
880 	 *       pic_enable_source() and pic_disable_source() should act on
881 	 *       per CPU basis only. Thus, it should be solved here somehow.
882 	 */
883 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
884 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
885 
886 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
887 	arm_gic_bind_intr(dev, isrc);
888 	return (0);
889 }
890 
891 static int
892 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
893     struct resource *res, struct intr_map_data *data)
894 {
895 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
896 
897 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
898 		gi->gi_pol = INTR_POLARITY_CONFORM;
899 		gi->gi_trig = INTR_TRIGGER_CONFORM;
900 	}
901 	return (0);
902 }
903 
904 static void
905 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
906 {
907 	struct arm_gic_softc *sc = device_get_softc(dev);
908 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
909 
910 	arm_irq_memory_barrier(gi->gi_irq);
911 	gic_irq_unmask(sc, gi->gi_irq);
912 }
913 
914 static void
915 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
916 {
917 	struct arm_gic_softc *sc = device_get_softc(dev);
918 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
919 
920 	gic_irq_mask(sc, gi->gi_irq);
921 }
922 
923 static void
924 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
925 {
926 	struct arm_gic_softc *sc = device_get_softc(dev);
927 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
928 
929 	arm_gic_disable_intr(dev, isrc);
930 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
931 }
932 
933 static void
934 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
935 {
936 
937 	arm_irq_memory_barrier(0);
938 	arm_gic_enable_intr(dev, isrc);
939 }
940 
941 static void
942 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
943 {
944 	struct arm_gic_softc *sc = device_get_softc(dev);
945 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
946 
947         /* EOI for edge-triggered done earlier. */
948 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
949 		return;
950 
951 	arm_irq_memory_barrier(0);
952 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
953 }
954 
955 static int
956 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
957 {
958 	struct arm_gic_softc *sc = device_get_softc(dev);
959 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
960 
961 	if (gi->gi_irq < GIC_FIRST_SPI)
962 		return (EINVAL);
963 
964 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
965 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
966 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
967 	}
968 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
969 }
970 
971 #ifdef SMP
972 static void
973 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
974     u_int ipi)
975 {
976 	struct arm_gic_softc *sc = device_get_softc(dev);
977 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
978 	uint32_t val = 0, i;
979 
980 	for (i = 0; i < MAXCPU; i++)
981 		if (CPU_ISSET(i, &cpus))
982 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
983 
984 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
985 }
986 
987 static int
988 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
989 {
990 	struct intr_irqsrc *isrc;
991 	struct arm_gic_softc *sc = device_get_softc(dev);
992 
993 	if (sgi_first_unused > GIC_LAST_SGI)
994 		return (ENOSPC);
995 
996 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
997 	sgi_to_ipi[sgi_first_unused++] = ipi;
998 
999 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1000 
1001 	*isrcp = isrc;
1002 	return (0);
1003 }
1004 #endif
1005 
1006 static device_method_t arm_gic_methods[] = {
1007 	/* Bus interface */
1008 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1009 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1010 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1011 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1012 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1013 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1014 
1015 	/* Interrupt controller interface */
1016 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1017 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1018 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1019 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1020 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1021 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1022 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1023 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1024 #ifdef SMP
1025 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1026 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1027 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1028 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1029 #endif
1030 	{ 0, 0 }
1031 };
1032 
1033 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1034     sizeof(struct arm_gic_softc));
1035 
1036 /*
1037  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1038  */
1039 
1040 #define	GICV2M_MSI_TYPER	0x008
1041 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1042 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1043 #define	GICv2M_MSI_SETSPI_NS	0x040
1044 #define	GICV2M_MSI_IIDR		0xFCC
1045 
1046 int
1047 arm_gicv2m_attach(device_t dev)
1048 {
1049 	struct arm_gicv2m_softc *sc;
1050 	uint32_t typer;
1051 	int rid;
1052 
1053 	sc = device_get_softc(dev);
1054 
1055 	rid = 0;
1056 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1057 	    RF_ACTIVE);
1058 	if (sc->sc_mem == NULL) {
1059 		device_printf(dev, "Unable to allocate resources\n");
1060 		return (ENXIO);
1061 	}
1062 
1063 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1064 	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1065 	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1066 	sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
1067 
1068 	/* Reserve these interrupts for MSI/MSI-X use */
1069 	arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
1070 	    sc->sc_spi_count);
1071 
1072 	mtx_init(&sc->sc_mutex, "GICv2m lock", NULL, MTX_DEF);
1073 
1074 	intr_msi_register(dev, sc->sc_xref);
1075 
1076 	if (bootverbose)
1077 		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1078 		    sc->sc_spi_start + sc->sc_spi_count - 1);
1079 
1080 	return (0);
1081 }
1082 
1083 static int
1084 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1085     device_t *pic, struct intr_irqsrc **srcs)
1086 {
1087 	struct arm_gic_softc *psc;
1088 	struct arm_gicv2m_softc *sc;
1089 	int i, irq, end_irq;
1090 	bool found;
1091 
1092 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1093 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1094 
1095 	psc = device_get_softc(device_get_parent(dev));
1096 	sc = device_get_softc(dev);
1097 
1098 	mtx_lock(&sc->sc_mutex);
1099 
1100 	found = false;
1101 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1102 		/* Start on an aligned interrupt */
1103 		if ((irq & (maxcount - 1)) != 0)
1104 			continue;
1105 
1106 		/* Assume we found a valid range until shown otherwise */
1107 		found = true;
1108 
1109 		/* Check this range is valid */
1110 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1111 			/* No free interrupts */
1112 			if (end_irq == sc->sc_spi_end) {
1113 				found = false;
1114 				break;
1115 			}
1116 
1117 			KASSERT((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1118 			    ("%s: Non-MSI interrupt found", __func__));
1119 
1120 			/* This is already used */
1121 			if ((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1122 			    GI_FLAG_MSI_USED) {
1123 				found = false;
1124 				break;
1125 			}
1126 		}
1127 		if (found)
1128 			break;
1129 	}
1130 
1131 	/* Not enough interrupts were found */
1132 	if (!found || irq == sc->sc_spi_end) {
1133 		mtx_unlock(&sc->sc_mutex);
1134 		return (ENXIO);
1135 	}
1136 
1137 	for (i = 0; i < count; i++) {
1138 		/* Mark the interrupt as used */
1139 		psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1140 
1141 	}
1142 	mtx_unlock(&sc->sc_mutex);
1143 
1144 	for (i = 0; i < count; i++)
1145 		srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
1146 	*pic = device_get_parent(dev);
1147 
1148 	return (0);
1149 }
1150 
1151 static int
1152 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1153     struct intr_irqsrc **isrc)
1154 {
1155 	struct arm_gicv2m_softc *sc;
1156 	struct gic_irqsrc *gi;
1157 	int i;
1158 
1159 	sc = device_get_softc(dev);
1160 
1161 	mtx_lock(&sc->sc_mutex);
1162 	for (i = 0; i < count; i++) {
1163 		gi = (struct gic_irqsrc *)isrc[i];
1164 
1165 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1166 		    ("%s: Trying to release an unused MSI-X interrupt",
1167 		    __func__));
1168 
1169 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1170 	}
1171 	mtx_unlock(&sc->sc_mutex);
1172 
1173 	return (0);
1174 }
1175 
1176 static int
1177 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1178     struct intr_irqsrc **isrcp)
1179 {
1180 	struct arm_gicv2m_softc *sc;
1181 	struct arm_gic_softc *psc;
1182 	int irq;
1183 
1184 	psc = device_get_softc(device_get_parent(dev));
1185 	sc = device_get_softc(dev);
1186 
1187 	mtx_lock(&sc->sc_mutex);
1188 	/* Find an unused interrupt */
1189 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1190 		KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1191 		    ("%s: Non-MSI interrupt found", __func__));
1192 		if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1193 			break;
1194 	}
1195 	/* No free interrupt was found */
1196 	if (irq == sc->sc_spi_end) {
1197 		mtx_unlock(&sc->sc_mutex);
1198 		return (ENXIO);
1199 	}
1200 
1201 	/* Mark the interrupt as used */
1202 	psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1203 	mtx_unlock(&sc->sc_mutex);
1204 
1205 	*isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
1206 	*pic = device_get_parent(dev);
1207 
1208 	return (0);
1209 }
1210 
1211 static int
1212 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1213 {
1214 	struct arm_gicv2m_softc *sc;
1215 	struct gic_irqsrc *gi;
1216 
1217 	sc = device_get_softc(dev);
1218 	gi = (struct gic_irqsrc *)isrc;
1219 
1220 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1221 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1222 
1223 	mtx_lock(&sc->sc_mutex);
1224 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1225 	mtx_unlock(&sc->sc_mutex);
1226 
1227 	return (0);
1228 }
1229 
1230 static int
1231 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1232     uint64_t *addr, uint32_t *data)
1233 {
1234 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1235 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1236 
1237 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1238 	*data = gi->gi_irq;
1239 
1240 	return (0);
1241 }
1242 
1243 static device_method_t arm_gicv2m_methods[] = {
1244 	/* Device interface */
1245 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1246 
1247 	/* MSI/MSI-X */
1248 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1249 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1250 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1251 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1252 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1253 
1254 	/* End */
1255 	DEVMETHOD_END
1256 };
1257 
1258 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1259     sizeof(struct arm_gicv2m_softc));
1260