xref: /freebsd/sys/arm/arm/gic.c (revision 81b22a98)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Developed by Damjan Marion <damjan.marion@gmail.com>
8  *
9  * Based on OMAP4 GIC code by Ben Gray
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the company nor the name of the author may be used to
20  *    endorse or promote products derived from this software without specific
21  *    prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/rman.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 
65 #ifdef FDT
66 #include <dev/fdt/fdt_intr.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #endif
69 
70 #ifdef DEV_ACPI
71 #include <contrib/dev/acpica/include/acpi.h>
72 #include <dev/acpica/acpivar.h>
73 #endif
74 
75 #include <arm/arm/gic.h>
76 #include <arm/arm/gic_common.h>
77 
78 #include "gic_if.h"
79 #include "pic_if.h"
80 #include "msi_if.h"
81 
82 /* We are using GICv2 register naming */
83 
84 /* Distributor Registers */
85 
86 /* CPU Registers */
87 #define GICC_CTLR		0x0000			/* v1 ICCICR */
88 #define GICC_PMR		0x0004			/* v1 ICCPMR */
89 #define GICC_BPR		0x0008			/* v1 ICCBPR */
90 #define GICC_IAR		0x000C			/* v1 ICCIAR */
91 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
92 #define GICC_RPR		0x0014			/* v1 ICCRPR */
93 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
94 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
95 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
96 
97 /* TYPER Registers */
98 #define	GICD_TYPER_SECURITYEXT	0x400
99 #define	GIC_SUPPORT_SECEXT(_sc)	\
100     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
101 
102 #ifndef	GIC_DEFAULT_ICFGR_INIT
103 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
104 #endif
105 
106 struct gic_irqsrc {
107 	struct intr_irqsrc	gi_isrc;
108 	uint32_t		gi_irq;
109 	enum intr_polarity	gi_pol;
110 	enum intr_trigger	gi_trig;
111 #define GI_FLAG_EARLY_EOI	(1 << 0)
112 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
113 					 /* be used for MSI/MSI-X interrupts */
114 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
115 					 /* for a MSI/MSI-X interrupt */
116 	u_int			gi_flags;
117 };
118 
119 static u_int gic_irq_cpu;
120 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
121 
122 #ifdef SMP
123 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
124 static u_int sgi_first_unused = GIC_FIRST_SGI;
125 #endif
126 
127 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
128 
129 static struct resource_spec arm_gic_spec[] = {
130 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
131 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
132 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
133 	{ -1, 0 }
134 };
135 
136 #if defined(__arm__) && defined(INVARIANTS)
137 static int gic_debug_spurious = 1;
138 #else
139 static int gic_debug_spurious = 0;
140 #endif
141 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
142 
143 static u_int arm_gic_map[MAXCPU];
144 
145 static struct arm_gic_softc *gic_sc = NULL;
146 
147 /* CPU Interface */
148 #define	gic_c_read_4(_sc, _reg)		\
149     bus_read_4((_sc)->gic_res[GIC_RES_CPU], (_reg))
150 #define	gic_c_write_4(_sc, _reg, _val)		\
151     bus_write_4((_sc)->gic_res[GIC_RES_CPU], (_reg), (_val))
152 /* Distributor Interface */
153 #define	gic_d_read_4(_sc, _reg)		\
154     bus_read_4((_sc)->gic_res[GIC_RES_DIST], (_reg))
155 #define	gic_d_write_1(_sc, _reg, _val)		\
156     bus_write_1((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
157 #define	gic_d_write_4(_sc, _reg, _val)		\
158     bus_write_4((_sc)->gic_res[GIC_RES_DIST], (_reg), (_val))
159 
160 static inline void
161 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
162 {
163 
164 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
165 }
166 
167 static inline void
168 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
169 {
170 
171 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
172 }
173 
174 static uint8_t
175 gic_cpu_mask(struct arm_gic_softc *sc)
176 {
177 	uint32_t mask;
178 	int i;
179 
180 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
181 	for (i = 0; i < 8; i++) {
182 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
183 		if (mask != 0)
184 			break;
185 	}
186 	/* No mask found, assume we are on CPU interface 0 */
187 	if (mask == 0)
188 		return (1);
189 
190 	/* Collect the mask in the lower byte */
191 	mask |= mask >> 16;
192 	mask |= mask >> 8;
193 
194 	return (mask);
195 }
196 
197 #ifdef SMP
198 static void
199 arm_gic_init_secondary(device_t dev)
200 {
201 	struct arm_gic_softc *sc = device_get_softc(dev);
202 	u_int irq, cpu;
203 
204 	/* Set the mask so we can find this CPU to send it IPIs */
205 	cpu = PCPU_GET(cpuid);
206 	arm_gic_map[cpu] = gic_cpu_mask(sc);
207 
208 	for (irq = 0; irq < sc->nirqs; irq += 4)
209 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
210 
211 	/* Set all the interrupts to be in Group 0 (secure) */
212 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
213 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
214 	}
215 
216 	/* Enable CPU interface */
217 	gic_c_write_4(sc, GICC_CTLR, 1);
218 
219 	/* Set priority mask register. */
220 	gic_c_write_4(sc, GICC_PMR, 0xff);
221 
222 	/* Enable interrupt distribution */
223 	gic_d_write_4(sc, GICD_CTLR, 0x01);
224 
225 	/* Unmask attached SGI interrupts. */
226 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
227 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
228 			gic_irq_unmask(sc, irq);
229 
230 	/* Unmask attached PPI interrupts. */
231 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
232 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
233 			gic_irq_unmask(sc, irq);
234 }
235 #endif /* SMP */
236 
237 static int
238 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
239 {
240 	int error;
241 	uint32_t irq;
242 	struct gic_irqsrc *irqs;
243 	struct intr_irqsrc *isrc;
244 	const char *name;
245 
246 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
247 	    M_WAITOK | M_ZERO);
248 
249 	name = device_get_nameunit(sc->gic_dev);
250 	for (irq = 0; irq < num; irq++) {
251 		irqs[irq].gi_irq = irq;
252 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
253 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
254 
255 		isrc = &irqs[irq].gi_isrc;
256 		if (irq <= GIC_LAST_SGI) {
257 			error = intr_isrc_register(isrc, sc->gic_dev,
258 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
259 		} else if (irq <= GIC_LAST_PPI) {
260 			error = intr_isrc_register(isrc, sc->gic_dev,
261 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
262 		} else {
263 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
264 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
265 		}
266 		if (error != 0) {
267 			/* XXX call intr_isrc_deregister() */
268 			free(irqs, M_DEVBUF);
269 			return (error);
270 		}
271 	}
272 	sc->gic_irqs = irqs;
273 	sc->nirqs = num;
274 	return (0);
275 }
276 
277 static void
278 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
279 {
280 	struct arm_gic_softc *sc;
281 	int i;
282 
283 	sc = device_get_softc(dev);
284 
285 	KASSERT((start + count) < sc->nirqs,
286 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
287 	    start, count, sc->nirqs));
288 	for (i = 0; i < count; i++) {
289 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
290 		    ("%s: MSI interrupt %d already has a handler", __func__,
291 		    count + i));
292 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
293 		    ("%s: MSI interrupt %d already has a polarity", __func__,
294 		    count + i));
295 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
296 		    ("%s: MSI interrupt %d already has a trigger", __func__,
297 		    count + i));
298 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
299 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
300 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
301 	}
302 }
303 
304 int
305 arm_gic_attach(device_t dev)
306 {
307 	struct		arm_gic_softc *sc;
308 	int		i;
309 	uint32_t	icciidr, mask, nirqs;
310 
311 	if (gic_sc)
312 		return (ENXIO);
313 
314 	sc = device_get_softc(dev);
315 
316 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
317 		device_printf(dev, "could not allocate resources\n");
318 		return (ENXIO);
319 	}
320 
321 	sc->gic_dev = dev;
322 	gic_sc = sc;
323 
324 	/* Initialize mutex */
325 	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
326 
327 	/* Disable interrupt forwarding to the CPU interface */
328 	gic_d_write_4(sc, GICD_CTLR, 0x00);
329 
330 	/* Get the number of interrupts */
331 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
332 	nirqs = GICD_TYPER_I_NUM(sc->typer);
333 
334 	if (arm_gic_register_isrcs(sc, nirqs)) {
335 		device_printf(dev, "could not register irqs\n");
336 		goto cleanup;
337 	}
338 
339 	icciidr = gic_c_read_4(sc, GICC_IIDR);
340 	device_printf(dev,
341 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
342 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
343 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
344 	sc->gic_iidr = icciidr;
345 
346 	/* Set all global interrupts to be level triggered, active low. */
347 	for (i = 32; i < sc->nirqs; i += 16) {
348 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
349 	}
350 
351 	/* Disable all interrupts. */
352 	for (i = 32; i < sc->nirqs; i += 32) {
353 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
354 	}
355 
356 	/* Find the current cpu mask */
357 	mask = gic_cpu_mask(sc);
358 	/* Set the mask so we can find this CPU to send it IPIs */
359 	arm_gic_map[PCPU_GET(cpuid)] = mask;
360 	/* Set all four targets to this cpu */
361 	mask |= mask << 8;
362 	mask |= mask << 16;
363 
364 	for (i = 0; i < sc->nirqs; i += 4) {
365 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
366 		if (i > 32) {
367 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
368 		}
369 	}
370 
371 	/* Set all the interrupts to be in Group 0 (secure) */
372 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
373 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
374 	}
375 
376 	/* Enable CPU interface */
377 	gic_c_write_4(sc, GICC_CTLR, 1);
378 
379 	/* Set priority mask register. */
380 	gic_c_write_4(sc, GICC_PMR, 0xff);
381 
382 	/* Enable interrupt distribution */
383 	gic_d_write_4(sc, GICD_CTLR, 0x01);
384 	return (0);
385 
386 cleanup:
387 	arm_gic_detach(dev);
388 	return(ENXIO);
389 }
390 
391 int
392 arm_gic_detach(device_t dev)
393 {
394 	struct arm_gic_softc *sc;
395 
396 	sc = device_get_softc(dev);
397 
398 	if (sc->gic_irqs != NULL)
399 		free(sc->gic_irqs, M_DEVBUF);
400 
401 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
402 
403 	return (0);
404 }
405 
406 static int
407 arm_gic_print_child(device_t bus, device_t child)
408 {
409 	struct resource_list *rl;
410 	int rv;
411 
412 	rv = bus_print_child_header(bus, child);
413 
414 	rl = BUS_GET_RESOURCE_LIST(bus, child);
415 	if (rl != NULL) {
416 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
417 		    "%#jx");
418 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
419 	}
420 
421 	rv += bus_print_child_footer(bus, child);
422 
423 	return (rv);
424 }
425 
426 static struct resource *
427 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
428     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
429 {
430 	struct arm_gic_softc *sc;
431 	struct resource_list_entry *rle;
432 	struct resource_list *rl;
433 	int j;
434 
435 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
436 
437 	sc = device_get_softc(bus);
438 
439 	/*
440 	 * Request for the default allocation with a given rid: use resource
441 	 * list stored in the local device info.
442 	 */
443 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
444 		rl = BUS_GET_RESOURCE_LIST(bus, child);
445 
446 		if (type == SYS_RES_IOPORT)
447 			type = SYS_RES_MEMORY;
448 
449 		rle = resource_list_find(rl, type, *rid);
450 		if (rle == NULL) {
451 			if (bootverbose)
452 				device_printf(bus, "no default resources for "
453 				    "rid = %d, type = %d\n", *rid, type);
454 			return (NULL);
455 		}
456 		start = rle->start;
457 		end = rle->end;
458 		count = rle->count;
459 	}
460 
461 	/* Remap through ranges property */
462 	for (j = 0; j < sc->nranges; j++) {
463 		if (start >= sc->ranges[j].bus && end <
464 		    sc->ranges[j].bus + sc->ranges[j].size) {
465 			start -= sc->ranges[j].bus;
466 			start += sc->ranges[j].host;
467 			end -= sc->ranges[j].bus;
468 			end += sc->ranges[j].host;
469 			break;
470 		}
471 	}
472 	if (j == sc->nranges && sc->nranges != 0) {
473 		if (bootverbose)
474 			device_printf(bus, "Could not map resource "
475 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
476 
477 		return (NULL);
478 	}
479 
480 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
481 	    count, flags));
482 }
483 
484 static int
485 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
486 {
487 	struct arm_gic_softc *sc;
488 
489 	sc = device_get_softc(dev);
490 
491 	switch(which) {
492 	case GIC_IVAR_HW_REV:
493 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
494 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
495 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
496 		*result = GICD_IIDR_VAR(sc->gic_iidr);
497 		return (0);
498 	case GIC_IVAR_BUS:
499 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
500 		    ("arm_gic_read_ivar: Unknown bus type"));
501 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
502 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
503 		*result = sc->gic_bus;
504 		return (0);
505 	}
506 
507 	return (ENOENT);
508 }
509 
510 static int
511 arm_gic_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
512 {
513 	struct arm_gic_softc *sc;
514 
515 	sc = device_get_softc(dev);
516 
517 	switch(which) {
518 	case GIC_IVAR_HW_REV:
519 	case GIC_IVAR_BUS:
520 		return (EINVAL);
521 	}
522 
523 	return (ENOENT);
524 }
525 
526 int
527 arm_gic_intr(void *arg)
528 {
529 	struct arm_gic_softc *sc = arg;
530 	struct gic_irqsrc *gi;
531 	uint32_t irq_active_reg, irq;
532 	struct trapframe *tf;
533 
534 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
535 	irq = irq_active_reg & 0x3FF;
536 
537 	/*
538 	 * 1. We do EOI here because recent read value from active interrupt
539 	 *    register must be used for it. Another approach is to save this
540 	 *    value into associated interrupt source.
541 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
542 	 *    we must ensure that interrupted thread does not migrate to
543 	 *    another CPU.
544 	 * 3. EOI cannot be delayed by any preemption which could happen on
545 	 *    critical_exit() used in MI intr code, when interrupt thread is
546 	 *    scheduled. See next point.
547 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
548 	 *    an action and any use of critical_exit() could break this
549 	 *    assumption. See comments within smp_rendezvous_action().
550 	 * 5. We always return FILTER_HANDLED as this is an interrupt
551 	 *    controller dispatch function. Otherwise, in cascaded interrupt
552 	 *    case, the whole interrupt subtree would be masked.
553 	 */
554 
555 	if (irq >= sc->nirqs) {
556 		if (gic_debug_spurious)
557 			device_printf(sc->gic_dev,
558 			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
559 			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
560 		return (FILTER_HANDLED);
561 	}
562 
563 	tf = curthread->td_intr_frame;
564 dispatch_irq:
565 	gi = sc->gic_irqs + irq;
566 	/*
567 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
568 	 * as compiler complains that comparing u_int >= 0 is always true.
569 	 */
570 	if (irq <= GIC_LAST_SGI) {
571 #ifdef SMP
572 		/* Call EOI for all IPI before dispatch. */
573 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
574 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
575 		goto next_irq;
576 #else
577 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
578 		    irq - GIC_FIRST_SGI);
579 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
580 		goto next_irq;
581 #endif
582 	}
583 
584 	if (gic_debug_spurious)
585 		sc->last_irq[PCPU_GET(cpuid)] = irq;
586 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
587 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
588 
589 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
590 		gic_irq_mask(sc, irq);
591 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
592 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
593 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
594 	}
595 
596 next_irq:
597 	arm_irq_memory_barrier(irq);
598 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
599 	irq = irq_active_reg & 0x3FF;
600 	if (irq < sc->nirqs)
601 		goto dispatch_irq;
602 
603 	return (FILTER_HANDLED);
604 }
605 
606 static void
607 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
608     enum intr_polarity pol)
609 {
610 	uint32_t reg;
611 	uint32_t mask;
612 
613 	if (irq < GIC_FIRST_SPI)
614 		return;
615 
616 	mtx_lock_spin(&sc->mutex);
617 
618 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
619 	mask = (reg >> 2*(irq % 16)) & 0x3;
620 
621 	if (pol == INTR_POLARITY_LOW) {
622 		mask &= ~GICD_ICFGR_POL_MASK;
623 		mask |= GICD_ICFGR_POL_LOW;
624 	} else if (pol == INTR_POLARITY_HIGH) {
625 		mask &= ~GICD_ICFGR_POL_MASK;
626 		mask |= GICD_ICFGR_POL_HIGH;
627 	}
628 
629 	if (trig == INTR_TRIGGER_LEVEL) {
630 		mask &= ~GICD_ICFGR_TRIG_MASK;
631 		mask |= GICD_ICFGR_TRIG_LVL;
632 	} else if (trig == INTR_TRIGGER_EDGE) {
633 		mask &= ~GICD_ICFGR_TRIG_MASK;
634 		mask |= GICD_ICFGR_TRIG_EDGE;
635 	}
636 
637 	/* Set mask */
638 	reg = reg & ~(0x3 << 2*(irq % 16));
639 	reg = reg | (mask << 2*(irq % 16));
640 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
641 
642 	mtx_unlock_spin(&sc->mutex);
643 }
644 
645 static int
646 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
647 {
648 	uint32_t cpu, end, mask;
649 
650 	end = min(mp_ncpus, 8);
651 	for (cpu = end; cpu < MAXCPU; cpu++)
652 		if (CPU_ISSET(cpu, cpus))
653 			return (EINVAL);
654 
655 	for (mask = 0, cpu = 0; cpu < end; cpu++)
656 		if (CPU_ISSET(cpu, cpus))
657 			mask |= arm_gic_map[cpu];
658 
659 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
660 	return (0);
661 }
662 
663 #ifdef FDT
664 static int
665 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
666     enum intr_polarity *polp, enum intr_trigger *trigp)
667 {
668 
669 	if (ncells == 1) {
670 		*irqp = cells[0];
671 		*polp = INTR_POLARITY_CONFORM;
672 		*trigp = INTR_TRIGGER_CONFORM;
673 		return (0);
674 	}
675 	if (ncells == 3) {
676 		u_int irq, tripol;
677 
678 		/*
679 		 * The 1st cell is the interrupt type:
680 		 *	0 = SPI
681 		 *	1 = PPI
682 		 * The 2nd cell contains the interrupt number:
683 		 *	[0 - 987] for SPI
684 		 *	[0 -  15] for PPI
685 		 * The 3rd cell is the flags, encoded as follows:
686 		 *   bits[3:0] trigger type and level flags
687 		 *	1 = low-to-high edge triggered
688 		 *	2 = high-to-low edge triggered
689 		 *	4 = active high level-sensitive
690 		 *	8 = active low level-sensitive
691 		 *   bits[15:8] PPI interrupt cpu mask
692 		 *	Each bit corresponds to each of the 8 possible cpus
693 		 *	attached to the GIC.  A bit set to '1' indicated
694 		 *	the interrupt is wired to that CPU.
695 		 */
696 		switch (cells[0]) {
697 		case 0:
698 			irq = GIC_FIRST_SPI + cells[1];
699 			/* SPI irq is checked later. */
700 			break;
701 		case 1:
702 			irq = GIC_FIRST_PPI + cells[1];
703 			if (irq > GIC_LAST_PPI) {
704 				device_printf(dev, "unsupported PPI interrupt "
705 				    "number %u\n", cells[1]);
706 				return (EINVAL);
707 			}
708 			break;
709 		default:
710 			device_printf(dev, "unsupported interrupt type "
711 			    "configuration %u\n", cells[0]);
712 			return (EINVAL);
713 		}
714 
715 		tripol = cells[2] & 0xff;
716 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
717 		    cells[0] == 0))
718 			device_printf(dev, "unsupported trigger/polarity "
719 			    "configuration 0x%02x\n", tripol);
720 
721 		*irqp = irq;
722 		*polp = INTR_POLARITY_CONFORM;
723 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
724 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
725 		return (0);
726 	}
727 	return (EINVAL);
728 }
729 #endif
730 
731 static int
732 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
733     enum intr_polarity *polp, enum intr_trigger *trigp)
734 {
735 	struct gic_irqsrc *gi;
736 
737 	/* Map a non-GICv2m MSI */
738 	gi = (struct gic_irqsrc *)msi_data->isrc;
739 	if (gi == NULL)
740 		return (ENXIO);
741 
742 	*irqp = gi->gi_irq;
743 
744 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
745 	*polp = INTR_POLARITY_HIGH;
746 	*trigp = INTR_TRIGGER_EDGE;
747 
748 	return (0);
749 }
750 
751 static int
752 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
753     enum intr_polarity *polp, enum intr_trigger *trigp)
754 {
755 	u_int irq;
756 	enum intr_polarity pol;
757 	enum intr_trigger trig;
758 	struct arm_gic_softc *sc;
759 	struct intr_map_data_msi *dam;
760 #ifdef FDT
761 	struct intr_map_data_fdt *daf;
762 #endif
763 #ifdef DEV_ACPI
764 	struct intr_map_data_acpi *daa;
765 #endif
766 
767 	sc = device_get_softc(dev);
768 	switch (data->type) {
769 #ifdef FDT
770 	case INTR_MAP_DATA_FDT:
771 		daf = (struct intr_map_data_fdt *)data;
772 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
773 		    &trig) != 0)
774 			return (EINVAL);
775 		KASSERT(irq >= sc->nirqs ||
776 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
777 		    ("%s: Attempting to map a MSI interrupt from FDT",
778 		    __func__));
779 		break;
780 #endif
781 #ifdef DEV_ACPI
782 	case INTR_MAP_DATA_ACPI:
783 		daa = (struct intr_map_data_acpi *)data;
784 		irq = daa->irq;
785 		pol = daa->pol;
786 		trig = daa->trig;
787 		break;
788 #endif
789 	case INTR_MAP_DATA_MSI:
790 		/* Non-GICv2m MSI */
791 		dam = (struct intr_map_data_msi *)data;
792 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
793 			return (EINVAL);
794 		break;
795 	default:
796 		return (ENOTSUP);
797 	}
798 
799 	if (irq >= sc->nirqs)
800 		return (EINVAL);
801 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
802 	    pol != INTR_POLARITY_HIGH)
803 		return (EINVAL);
804 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
805 	    trig != INTR_TRIGGER_LEVEL)
806 		return (EINVAL);
807 
808 	*irqp = irq;
809 	if (polp != NULL)
810 		*polp = pol;
811 	if (trigp != NULL)
812 		*trigp = trig;
813 	return (0);
814 }
815 
816 static int
817 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
818     struct intr_irqsrc **isrcp)
819 {
820 	int error;
821 	u_int irq;
822 	struct arm_gic_softc *sc;
823 
824 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
825 	if (error == 0) {
826 		sc = device_get_softc(dev);
827 		*isrcp = GIC_INTR_ISRC(sc, irq);
828 	}
829 	return (error);
830 }
831 
832 static int
833 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
834     struct resource *res, struct intr_map_data *data)
835 {
836 	struct arm_gic_softc *sc = device_get_softc(dev);
837 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
838 	enum intr_trigger trig;
839 	enum intr_polarity pol;
840 
841 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
842 		/* GICv2m MSI */
843 		pol = gi->gi_pol;
844 		trig = gi->gi_trig;
845 		KASSERT(pol == INTR_POLARITY_HIGH,
846 		    ("%s: MSI interrupts must be active-high", __func__));
847 		KASSERT(trig == INTR_TRIGGER_EDGE,
848 		    ("%s: MSI interrupts must be edge triggered", __func__));
849 	} else if (data != NULL) {
850 		u_int irq;
851 
852 		/* Get config for resource. */
853 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
854 		    gi->gi_irq != irq)
855 			return (EINVAL);
856 	} else {
857 		pol = INTR_POLARITY_CONFORM;
858 		trig = INTR_TRIGGER_CONFORM;
859 	}
860 
861 	/* Compare config if this is not first setup. */
862 	if (isrc->isrc_handlers != 0) {
863 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
864 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
865 			return (EINVAL);
866 		else
867 			return (0);
868 	}
869 
870 	/* For MSI/MSI-X we should have already configured these */
871 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
872 		if (pol == INTR_POLARITY_CONFORM)
873 			pol = INTR_POLARITY_LOW;	/* just pick some */
874 		if (trig == INTR_TRIGGER_CONFORM)
875 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
876 
877 		gi->gi_pol = pol;
878 		gi->gi_trig = trig;
879 
880 		/* Edge triggered interrupts need an early EOI sent */
881 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
882 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
883 	}
884 
885 	/*
886 	 * XXX - In case that per CPU interrupt is going to be enabled in time
887 	 *       when SMP is already started, we need some IPI call which
888 	 *       enables it on others CPUs. Further, it's more complicated as
889 	 *       pic_enable_source() and pic_disable_source() should act on
890 	 *       per CPU basis only. Thus, it should be solved here somehow.
891 	 */
892 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
893 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
894 
895 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
896 	arm_gic_bind_intr(dev, isrc);
897 	return (0);
898 }
899 
900 static int
901 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
902     struct resource *res, struct intr_map_data *data)
903 {
904 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
905 
906 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
907 		gi->gi_pol = INTR_POLARITY_CONFORM;
908 		gi->gi_trig = INTR_TRIGGER_CONFORM;
909 	}
910 	return (0);
911 }
912 
913 static void
914 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
915 {
916 	struct arm_gic_softc *sc = device_get_softc(dev);
917 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
918 
919 	arm_irq_memory_barrier(gi->gi_irq);
920 	gic_irq_unmask(sc, gi->gi_irq);
921 }
922 
923 static void
924 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
925 {
926 	struct arm_gic_softc *sc = device_get_softc(dev);
927 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
928 
929 	gic_irq_mask(sc, gi->gi_irq);
930 }
931 
932 static void
933 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
934 {
935 	struct arm_gic_softc *sc = device_get_softc(dev);
936 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
937 
938 	arm_gic_disable_intr(dev, isrc);
939 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
940 }
941 
942 static void
943 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
944 {
945 
946 	arm_irq_memory_barrier(0);
947 	arm_gic_enable_intr(dev, isrc);
948 }
949 
950 static void
951 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
952 {
953 	struct arm_gic_softc *sc = device_get_softc(dev);
954 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
955 
956         /* EOI for edge-triggered done earlier. */
957 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
958 		return;
959 
960 	arm_irq_memory_barrier(0);
961 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
962 }
963 
964 static int
965 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
966 {
967 	struct arm_gic_softc *sc = device_get_softc(dev);
968 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
969 
970 	if (gi->gi_irq < GIC_FIRST_SPI)
971 		return (EINVAL);
972 
973 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
974 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
975 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
976 	}
977 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
978 }
979 
980 #ifdef SMP
981 static void
982 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
983     u_int ipi)
984 {
985 	struct arm_gic_softc *sc = device_get_softc(dev);
986 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
987 	uint32_t val = 0, i;
988 
989 	for (i = 0; i < MAXCPU; i++)
990 		if (CPU_ISSET(i, &cpus))
991 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
992 
993 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
994 }
995 
996 static int
997 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
998 {
999 	struct intr_irqsrc *isrc;
1000 	struct arm_gic_softc *sc = device_get_softc(dev);
1001 
1002 	if (sgi_first_unused > GIC_LAST_SGI)
1003 		return (ENOSPC);
1004 
1005 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1006 	sgi_to_ipi[sgi_first_unused++] = ipi;
1007 
1008 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1009 
1010 	*isrcp = isrc;
1011 	return (0);
1012 }
1013 #endif
1014 
1015 static int
1016 arm_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count, int count,
1017     int maxcount, struct intr_irqsrc **isrc)
1018 {
1019 	struct arm_gic_softc *sc;
1020 	int i, irq, end_irq;
1021 	bool found;
1022 
1023 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1024 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1025 
1026 	sc = device_get_softc(dev);
1027 
1028 	mtx_lock_spin(&sc->mutex);
1029 
1030 	found = false;
1031 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1032 		/* Start on an aligned interrupt */
1033 		if ((irq & (maxcount - 1)) != 0)
1034 			continue;
1035 
1036 		/* Assume we found a valid range until shown otherwise */
1037 		found = true;
1038 
1039 		/* Check this range is valid */
1040 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1041 			/* No free interrupts */
1042 			if (end_irq == mbi_start + mbi_count) {
1043 				found = false;
1044 				break;
1045 			}
1046 
1047 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1048 			    ("%s: Non-MSI interrupt found", __func__));
1049 
1050 			/* This is already used */
1051 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1052 			    GI_FLAG_MSI_USED) {
1053 				found = false;
1054 				break;
1055 			}
1056 		}
1057 		if (found)
1058 			break;
1059 	}
1060 
1061 	/* Not enough interrupts were found */
1062 	if (!found || irq == mbi_start + mbi_count) {
1063 		mtx_unlock_spin(&sc->mutex);
1064 		return (ENXIO);
1065 	}
1066 
1067 	for (i = 0; i < count; i++) {
1068 		/* Mark the interrupt as used */
1069 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1070 	}
1071 	mtx_unlock_spin(&sc->mutex);
1072 
1073 	for (i = 0; i < count; i++)
1074 		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1075 
1076 	return (0);
1077 }
1078 
1079 static int
1080 arm_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1081 {
1082 	struct arm_gic_softc *sc;
1083 	struct gic_irqsrc *gi;
1084 	int i;
1085 
1086 	sc = device_get_softc(dev);
1087 
1088 	mtx_lock_spin(&sc->mutex);
1089 	for (i = 0; i < count; i++) {
1090 		gi = (struct gic_irqsrc *)isrc[i];
1091 
1092 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1093 		    ("%s: Trying to release an unused MSI-X interrupt",
1094 		    __func__));
1095 
1096 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1097 	}
1098 	mtx_unlock_spin(&sc->mutex);
1099 
1100 	return (0);
1101 }
1102 
1103 static int
1104 arm_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1105     struct intr_irqsrc **isrc)
1106 {
1107 	struct arm_gic_softc *sc;
1108 	int irq;
1109 
1110 	sc = device_get_softc(dev);
1111 
1112 	mtx_lock_spin(&sc->mutex);
1113 	/* Find an unused interrupt */
1114 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1115 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1116 		    ("%s: Non-MSI interrupt found", __func__));
1117 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1118 			break;
1119 	}
1120 	/* No free interrupt was found */
1121 	if (irq == mbi_start + mbi_count) {
1122 		mtx_unlock_spin(&sc->mutex);
1123 		return (ENXIO);
1124 	}
1125 
1126 	/* Mark the interrupt as used */
1127 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1128 	mtx_unlock_spin(&sc->mutex);
1129 
1130 	*isrc = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1131 
1132 	return (0);
1133 }
1134 
1135 static int
1136 arm_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1137 {
1138 	struct arm_gic_softc *sc;
1139 	struct gic_irqsrc *gi;
1140 
1141 	sc = device_get_softc(dev);
1142 	gi = (struct gic_irqsrc *)isrc;
1143 
1144 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1145 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1146 
1147 	mtx_lock_spin(&sc->mutex);
1148 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1149 	mtx_unlock_spin(&sc->mutex);
1150 
1151 	return (0);
1152 }
1153 
1154 static device_method_t arm_gic_methods[] = {
1155 	/* Bus interface */
1156 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1157 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1158 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1159 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1160 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1161 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1162 	DEVMETHOD(bus_write_ivar,	arm_gic_write_ivar),
1163 
1164 	/* Interrupt controller interface */
1165 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1166 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1167 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1168 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1169 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1170 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1171 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1172 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1173 #ifdef SMP
1174 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1175 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1176 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1177 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1178 #endif
1179 
1180 	/* GIC */
1181 	DEVMETHOD(gic_reserve_msi_range, arm_gic_reserve_msi_range),
1182 	DEVMETHOD(gic_alloc_msi,	arm_gic_alloc_msi),
1183 	DEVMETHOD(gic_release_msi,	arm_gic_release_msi),
1184 	DEVMETHOD(gic_alloc_msix,	arm_gic_alloc_msix),
1185 	DEVMETHOD(gic_release_msix,	arm_gic_release_msix),
1186 
1187 	{ 0, 0 }
1188 };
1189 
1190 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1191     sizeof(struct arm_gic_softc));
1192 
1193 /*
1194  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1195  */
1196 
1197 #define	GICV2M_MSI_TYPER	0x008
1198 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1199 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1200 #define	GICv2M_MSI_SETSPI_NS	0x040
1201 #define	GICV2M_MSI_IIDR		0xFCC
1202 
1203 int
1204 arm_gicv2m_attach(device_t dev)
1205 {
1206 	struct arm_gicv2m_softc *sc;
1207 	uint32_t typer;
1208 	int rid;
1209 
1210 	sc = device_get_softc(dev);
1211 
1212 	rid = 0;
1213 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1214 	    RF_ACTIVE);
1215 	if (sc->sc_mem == NULL) {
1216 		device_printf(dev, "Unable to allocate resources\n");
1217 		return (ENXIO);
1218 	}
1219 
1220 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1221 	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1222 	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1223 
1224 	/* Reserve these interrupts for MSI/MSI-X use */
1225 	GIC_RESERVE_MSI_RANGE(device_get_parent(dev), sc->sc_spi_start,
1226 	    sc->sc_spi_count);
1227 
1228 	intr_msi_register(dev, sc->sc_xref);
1229 
1230 	if (bootverbose)
1231 		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1232 		    sc->sc_spi_start + sc->sc_spi_count - 1);
1233 
1234 	return (0);
1235 }
1236 
1237 static int
1238 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1239     device_t *pic, struct intr_irqsrc **srcs)
1240 {
1241 	struct arm_gicv2m_softc *sc;
1242 	int error;
1243 
1244 	sc = device_get_softc(dev);
1245 	error = GIC_ALLOC_MSI(device_get_parent(dev), sc->sc_spi_start,
1246 	    sc->sc_spi_count, count, maxcount, srcs);
1247 	if (error != 0)
1248 		return (error);
1249 
1250 	*pic = dev;
1251 	return (0);
1252 }
1253 
1254 static int
1255 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1256     struct intr_irqsrc **isrc)
1257 {
1258 	return (GIC_RELEASE_MSI(device_get_parent(dev), count, isrc));
1259 }
1260 
1261 static int
1262 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1263     struct intr_irqsrc **isrcp)
1264 {
1265 	struct arm_gicv2m_softc *sc;
1266 	int error;
1267 
1268 	sc = device_get_softc(dev);
1269 	error = GIC_ALLOC_MSIX(device_get_parent(dev), sc->sc_spi_start,
1270 	    sc->sc_spi_count, isrcp);
1271 	if (error != 0)
1272 		return (error);
1273 
1274 	*pic = dev;
1275 	return (0);
1276 }
1277 
1278 static int
1279 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1280 {
1281 	return (GIC_RELEASE_MSIX(device_get_parent(dev), isrc));
1282 }
1283 
1284 static int
1285 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1286     uint64_t *addr, uint32_t *data)
1287 {
1288 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1289 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1290 
1291 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1292 	*data = gi->gi_irq;
1293 
1294 	return (0);
1295 }
1296 
1297 static device_method_t arm_gicv2m_methods[] = {
1298 	/* Device interface */
1299 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1300 
1301 	/* MSI/MSI-X */
1302 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1303 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1304 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1305 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1306 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1307 
1308 	/* End */
1309 	DEVMETHOD_END
1310 };
1311 
1312 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1313     sizeof(struct arm_gicv2m_softc));
1314