xref: /freebsd/sys/arm/arm/gic.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2011 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * Developed by Damjan Marion <damjan.marion@gmail.com>
6  *
7  * Based on OMAP4 GIC code by Ben Gray
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the company nor the name of the author may be used to
18  *    endorse or promote products derived from this software without specific
19  *    prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_platform.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53 #ifdef INTRNG
54 #include <sys/sched.h>
55 #endif
56 
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 
60 #include <machine/bus.h>
61 #include <machine/intr.h>
62 #include <machine/smp.h>
63 
64 #ifdef FDT
65 #include <dev/fdt/fdt_intr.h>
66 #include <dev/ofw/ofw_bus_subr.h>
67 #endif
68 
69 #include <arm/arm/gic.h>
70 #include <arm/arm/gic_common.h>
71 
72 #ifdef INTRNG
73 #include "pic_if.h"
74 #include "msi_if.h"
75 #endif
76 
77 /* We are using GICv2 register naming */
78 
79 /* Distributor Registers */
80 
81 /* CPU Registers */
82 #define GICC_CTLR		0x0000			/* v1 ICCICR */
83 #define GICC_PMR		0x0004			/* v1 ICCPMR */
84 #define GICC_BPR		0x0008			/* v1 ICCBPR */
85 #define GICC_IAR		0x000C			/* v1 ICCIAR */
86 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
87 #define GICC_RPR		0x0014			/* v1 ICCRPR */
88 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
89 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
90 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
91 
92 /* TYPER Registers */
93 #define	GICD_TYPER_SECURITYEXT	0x400
94 #define	GIC_SUPPORT_SECEXT(_sc)	\
95     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
96 
97 
98 #ifndef	GIC_DEFAULT_ICFGR_INIT
99 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
100 #endif
101 
102 #ifdef INTRNG
103 struct gic_irqsrc {
104 	struct intr_irqsrc	gi_isrc;
105 	uint32_t		gi_irq;
106 	enum intr_polarity	gi_pol;
107 	enum intr_trigger	gi_trig;
108 #define GI_FLAG_EARLY_EOI	(1 << 0)
109 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
110 					 /* be used for MSI/MSI-X interrupts */
111 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
112 					 /* for a MSI/MSI-X interrupt */
113 	u_int			gi_flags;
114 };
115 
116 static u_int gic_irq_cpu;
117 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
118 
119 #ifdef SMP
120 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
121 static u_int sgi_first_unused = GIC_FIRST_SGI;
122 #endif
123 
124 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
125 #else /* !INTRNG */
126 static struct ofw_compat_data compat_data[] = {
127 	{"arm,gic",		true},	/* Non-standard, used in FreeBSD dts. */
128 	{"arm,gic-400",		true},
129 	{"arm,cortex-a15-gic",	true},
130 	{"arm,cortex-a9-gic",	true},
131 	{"arm,cortex-a7-gic",	true},
132 	{"arm,arm11mp-gic",	true},
133 	{"brcm,brahma-b15-gic",	true},
134 	{"qcom,msm-qgic2",	true},
135 	{NULL,			false}
136 };
137 #endif
138 
139 static struct resource_spec arm_gic_spec[] = {
140 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
141 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
142 #ifdef INTRNG
143 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
144 #endif
145 	{ -1, 0 }
146 };
147 
148 static u_int arm_gic_map[MAXCPU];
149 
150 static struct arm_gic_softc *gic_sc = NULL;
151 
152 #define	gic_c_read_4(_sc, _reg)		\
153     bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
154 #define	gic_c_write_4(_sc, _reg, _val)		\
155     bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
156 #define	gic_d_read_4(_sc, _reg)		\
157     bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
158 #define	gic_d_write_1(_sc, _reg, _val)		\
159     bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
160 #define	gic_d_write_4(_sc, _reg, _val)		\
161     bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
162 
163 #ifndef INTRNG
164 static int gic_config_irq(int irq, enum intr_trigger trig,
165     enum intr_polarity pol);
166 static void gic_post_filter(void *);
167 #endif
168 
169 #ifdef INTRNG
170 static inline void
171 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
172 {
173 
174 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
175 }
176 
177 static inline void
178 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
179 {
180 
181 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
182 }
183 #endif
184 
185 static uint8_t
186 gic_cpu_mask(struct arm_gic_softc *sc)
187 {
188 	uint32_t mask;
189 	int i;
190 
191 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
192 	for (i = 0; i < 8; i++) {
193 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
194 		if (mask != 0)
195 			break;
196 	}
197 	/* No mask found, assume we are on CPU interface 0 */
198 	if (mask == 0)
199 		return (1);
200 
201 	/* Collect the mask in the lower byte */
202 	mask |= mask >> 16;
203 	mask |= mask >> 8;
204 
205 	return (mask);
206 }
207 
208 #ifdef SMP
209 #ifdef INTRNG
210 static void
211 arm_gic_init_secondary(device_t dev)
212 {
213 	struct arm_gic_softc *sc = device_get_softc(dev);
214 	u_int irq, cpu;
215 
216 	/* Set the mask so we can find this CPU to send it IPIs */
217 	cpu = PCPU_GET(cpuid);
218 	arm_gic_map[cpu] = gic_cpu_mask(sc);
219 
220 	for (irq = 0; irq < sc->nirqs; irq += 4)
221 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
222 
223 	/* Set all the interrupts to be in Group 0 (secure) */
224 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
225 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
226 	}
227 
228 	/* Enable CPU interface */
229 	gic_c_write_4(sc, GICC_CTLR, 1);
230 
231 	/* Set priority mask register. */
232 	gic_c_write_4(sc, GICC_PMR, 0xff);
233 
234 	/* Enable interrupt distribution */
235 	gic_d_write_4(sc, GICD_CTLR, 0x01);
236 
237 	/* Unmask attached SGI interrupts. */
238 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
239 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
240 			gic_irq_unmask(sc, irq);
241 
242 	/* Unmask attached PPI interrupts. */
243 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
244 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
245 			gic_irq_unmask(sc, irq);
246 }
247 #else
248 static void
249 arm_gic_init_secondary(device_t dev)
250 {
251 	struct arm_gic_softc *sc = device_get_softc(dev);
252 	int i;
253 
254 	/* Set the mask so we can find this CPU to send it IPIs */
255 	arm_gic_map[PCPU_GET(cpuid)] = gic_cpu_mask(sc);
256 
257 	for (i = 0; i < sc->nirqs; i += 4)
258 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
259 
260 	/* Set all the interrupts to be in Group 0 (secure) */
261 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
262 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
263 	}
264 
265 	/* Enable CPU interface */
266 	gic_c_write_4(sc, GICC_CTLR, 1);
267 
268 	/* Set priority mask register. */
269 	gic_c_write_4(sc, GICC_PMR, 0xff);
270 
271 	/* Enable interrupt distribution */
272 	gic_d_write_4(sc, GICD_CTLR, 0x01);
273 
274 	/*
275 	 * Activate the timer interrupts: virtual, secure, and non-secure.
276 	 */
277 	gic_d_write_4(sc, GICD_ISENABLER(27), GICD_I_MASK(27));
278 	gic_d_write_4(sc, GICD_ISENABLER(29), GICD_I_MASK(29));
279 	gic_d_write_4(sc, GICD_ISENABLER(30), GICD_I_MASK(30));
280 }
281 #endif /* INTRNG */
282 #endif /* SMP */
283 
284 #ifndef INTRNG
285 int
286 gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt,
287     int *trig, int *pol)
288 {
289 	static u_int num_intr_cells;
290 	static phandle_t self;
291 	struct ofw_compat_data *ocd;
292 
293 	if (self == 0) {
294 		for (ocd = compat_data; ocd->ocd_str != NULL; ocd++) {
295 			if (ofw_bus_node_is_compatible(iparent, ocd->ocd_str)) {
296 				self = iparent;
297 				break;
298 			}
299 		}
300 	}
301 	if (self != iparent)
302 		return (ENXIO);
303 
304 	if (num_intr_cells == 0) {
305 		if (OF_searchencprop(OF_node_from_xref(iparent),
306 		    "#interrupt-cells", &num_intr_cells,
307 		    sizeof(num_intr_cells)) == -1) {
308 			num_intr_cells = 1;
309 		}
310 	}
311 
312 	if (num_intr_cells == 1) {
313 		*interrupt = fdt32_to_cpu(intr[0]);
314 		*trig = INTR_TRIGGER_CONFORM;
315 		*pol = INTR_POLARITY_CONFORM;
316 	} else {
317 		if (fdt32_to_cpu(intr[0]) == 0)
318 			*interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_SPI;
319 		else
320 			*interrupt = fdt32_to_cpu(intr[1]) + GIC_FIRST_PPI;
321 		/*
322 		 * In intr[2], bits[3:0] are trigger type and level flags.
323 		 *   1 = low-to-high edge triggered
324 		 *   2 = high-to-low edge triggered
325 		 *   4 = active high level-sensitive
326 		 *   8 = active low level-sensitive
327 		 * The hardware only supports active-high-level or rising-edge
328 		 * for SPIs
329 		 */
330 		if (*interrupt >= GIC_FIRST_SPI &&
331 		    fdt32_to_cpu(intr[2]) & 0x0a) {
332 			printf("unsupported trigger/polarity configuration "
333 			    "0x%02x\n", fdt32_to_cpu(intr[2]) & 0x0f);
334 		}
335 		*pol  = INTR_POLARITY_CONFORM;
336 		if (fdt32_to_cpu(intr[2]) & 0x03)
337 			*trig = INTR_TRIGGER_EDGE;
338 		else
339 			*trig = INTR_TRIGGER_LEVEL;
340 	}
341 	return (0);
342 }
343 #endif
344 
345 #ifdef INTRNG
346 static int
347 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
348 {
349 	int error;
350 	uint32_t irq;
351 	struct gic_irqsrc *irqs;
352 	struct intr_irqsrc *isrc;
353 	const char *name;
354 
355 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
356 	    M_WAITOK | M_ZERO);
357 
358 	name = device_get_nameunit(sc->gic_dev);
359 	for (irq = 0; irq < num; irq++) {
360 		irqs[irq].gi_irq = irq;
361 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
362 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
363 
364 		isrc = &irqs[irq].gi_isrc;
365 		if (irq <= GIC_LAST_SGI) {
366 			error = intr_isrc_register(isrc, sc->gic_dev,
367 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
368 		} else if (irq <= GIC_LAST_PPI) {
369 			error = intr_isrc_register(isrc, sc->gic_dev,
370 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
371 		} else {
372 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
373 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
374 		}
375 		if (error != 0) {
376 			/* XXX call intr_isrc_deregister() */
377 			free(irqs, M_DEVBUF);
378 			return (error);
379 		}
380 	}
381 	sc->gic_irqs = irqs;
382 	sc->nirqs = num;
383 	return (0);
384 }
385 
386 static void
387 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
388 {
389 	struct arm_gic_softc *sc;
390 	int i;
391 
392 	sc = device_get_softc(dev);
393 
394 	KASSERT((start + count) < sc->nirqs,
395 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
396 	    start, count, sc->nirqs));
397 	for (i = 0; i < count; i++) {
398 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
399 		    ("%s: MSI interrupt %d already has a handler", __func__,
400 		    count + i));
401 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
402 		    ("%s: MSI interrupt %d already has a polarity", __func__,
403 		    count + i));
404 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
405 		    ("%s: MSI interrupt %d already has a trigger", __func__,
406 		    count + i));
407 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
408 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
409 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
410 	}
411 }
412 #endif
413 
414 int
415 arm_gic_attach(device_t dev)
416 {
417 	struct		arm_gic_softc *sc;
418 	int		i;
419 	uint32_t	icciidr, mask, nirqs;
420 
421 	if (gic_sc)
422 		return (ENXIO);
423 
424 	sc = device_get_softc(dev);
425 
426 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
427 		device_printf(dev, "could not allocate resources\n");
428 		return (ENXIO);
429 	}
430 
431 	sc->gic_dev = dev;
432 	gic_sc = sc;
433 
434 	/* Initialize mutex */
435 	mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN);
436 
437 	/* Distributor Interface */
438 	sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
439 	sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
440 
441 	/* CPU Interface */
442 	sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
443 	sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
444 
445 	/* Disable interrupt forwarding to the CPU interface */
446 	gic_d_write_4(sc, GICD_CTLR, 0x00);
447 
448 	/* Get the number of interrupts */
449 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
450 	nirqs = GICD_TYPER_I_NUM(sc->typer);
451 
452 #ifdef INTRNG
453 	if (arm_gic_register_isrcs(sc, nirqs)) {
454 		device_printf(dev, "could not register irqs\n");
455 		goto cleanup;
456 	}
457 #else
458 	sc->nirqs = nirqs;
459 
460 	/* Set up function pointers */
461 	arm_post_filter = gic_post_filter;
462 	arm_config_irq = gic_config_irq;
463 #endif
464 
465 	icciidr = gic_c_read_4(sc, GICC_IIDR);
466 	device_printf(dev,
467 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
468 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
469 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
470 #ifdef INTRNG
471 	sc->gic_iidr = icciidr;
472 #endif
473 
474 	/* Set all global interrupts to be level triggered, active low. */
475 	for (i = 32; i < sc->nirqs; i += 16) {
476 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
477 	}
478 
479 	/* Disable all interrupts. */
480 	for (i = 32; i < sc->nirqs; i += 32) {
481 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
482 	}
483 
484 	/* Find the current cpu mask */
485 	mask = gic_cpu_mask(sc);
486 	/* Set the mask so we can find this CPU to send it IPIs */
487 	arm_gic_map[PCPU_GET(cpuid)] = mask;
488 	/* Set all four targets to this cpu */
489 	mask |= mask << 8;
490 	mask |= mask << 16;
491 
492 	for (i = 0; i < sc->nirqs; i += 4) {
493 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
494 		if (i > 32) {
495 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
496 		}
497 	}
498 
499 	/* Set all the interrupts to be in Group 0 (secure) */
500 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
501 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
502 	}
503 
504 	/* Enable CPU interface */
505 	gic_c_write_4(sc, GICC_CTLR, 1);
506 
507 	/* Set priority mask register. */
508 	gic_c_write_4(sc, GICC_PMR, 0xff);
509 
510 	/* Enable interrupt distribution */
511 	gic_d_write_4(sc, GICD_CTLR, 0x01);
512 	return (0);
513 
514 #ifdef INTRNG
515 cleanup:
516 	arm_gic_detach(dev);
517 	return(ENXIO);
518 #endif
519 }
520 
521 int
522 arm_gic_detach(device_t dev)
523 {
524 #ifdef INTRNG
525 	struct arm_gic_softc *sc;
526 
527 	sc = device_get_softc(dev);
528 
529 	if (sc->gic_irqs != NULL)
530 		free(sc->gic_irqs, M_DEVBUF);
531 
532 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
533 #endif
534 
535 	return (0);
536 }
537 
538 #ifdef INTRNG
539 static int
540 arm_gic_print_child(device_t bus, device_t child)
541 {
542 	struct resource_list *rl;
543 	int rv;
544 
545 	rv = bus_print_child_header(bus, child);
546 
547 	rl = BUS_GET_RESOURCE_LIST(bus, child);
548 	if (rl != NULL) {
549 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
550 		    "%#jx");
551 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
552 	}
553 
554 	rv += bus_print_child_footer(bus, child);
555 
556 	return (rv);
557 }
558 
559 static struct resource *
560 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
561     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
562 {
563 	struct arm_gic_softc *sc;
564 	struct resource_list_entry *rle;
565 	struct resource_list *rl;
566 	int j;
567 
568 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
569 
570 	sc = device_get_softc(bus);
571 
572 	/*
573 	 * Request for the default allocation with a given rid: use resource
574 	 * list stored in the local device info.
575 	 */
576 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
577 		rl = BUS_GET_RESOURCE_LIST(bus, child);
578 
579 		if (type == SYS_RES_IOPORT)
580 			type = SYS_RES_MEMORY;
581 
582 		rle = resource_list_find(rl, type, *rid);
583 		if (rle == NULL) {
584 			if (bootverbose)
585 				device_printf(bus, "no default resources for "
586 				    "rid = %d, type = %d\n", *rid, type);
587 			return (NULL);
588 		}
589 		start = rle->start;
590 		end = rle->end;
591 		count = rle->count;
592 	}
593 
594 	/* Remap through ranges property */
595 	for (j = 0; j < sc->nranges; j++) {
596 		if (start >= sc->ranges[j].bus && end <
597 		    sc->ranges[j].bus + sc->ranges[j].size) {
598 			start -= sc->ranges[j].bus;
599 			start += sc->ranges[j].host;
600 			end -= sc->ranges[j].bus;
601 			end += sc->ranges[j].host;
602 			break;
603 		}
604 	}
605 	if (j == sc->nranges && sc->nranges != 0) {
606 		if (bootverbose)
607 			device_printf(bus, "Could not map resource "
608 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
609 
610 		return (NULL);
611 	}
612 
613 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
614 	    count, flags));
615 }
616 
617 static int
618 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
619 {
620 	struct arm_gic_softc *sc;
621 
622 	sc = device_get_softc(dev);
623 
624 	switch(which) {
625 	case GIC_IVAR_HW_REV:
626 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3 &&
627 		    GICD_IIDR_VAR(sc->gic_iidr) != 0,
628 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
629 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
630 		*result = GICD_IIDR_VAR(sc->gic_iidr);
631 		return (0);
632 	case GIC_IVAR_BUS:
633 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
634 		    ("arm_gic_read_ivar: Unknown bus type"));
635 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
636 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
637 		*result = sc->gic_bus;
638 		return (0);
639 	}
640 
641 	return (ENOENT);
642 }
643 
644 int
645 arm_gic_intr(void *arg)
646 {
647 	struct arm_gic_softc *sc = arg;
648 	struct gic_irqsrc *gi;
649 	uint32_t irq_active_reg, irq;
650 	struct trapframe *tf;
651 
652 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
653 	irq = irq_active_reg & 0x3FF;
654 
655 	/*
656 	 * 1. We do EOI here because recent read value from active interrupt
657 	 *    register must be used for it. Another approach is to save this
658 	 *    value into associated interrupt source.
659 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
660 	 *    we must ensure that interrupted thread does not migrate to
661 	 *    another CPU.
662 	 * 3. EOI cannot be delayed by any preemption which could happen on
663 	 *    critical_exit() used in MI intr code, when interrupt thread is
664 	 *    scheduled. See next point.
665 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
666 	 *    an action and any use of critical_exit() could break this
667 	 *    assumption. See comments within smp_rendezvous_action().
668 	 * 5. We always return FILTER_HANDLED as this is an interrupt
669 	 *    controller dispatch function. Otherwise, in cascaded interrupt
670 	 *    case, the whole interrupt subtree would be masked.
671 	 */
672 
673 	if (irq >= sc->nirqs) {
674 #ifdef GIC_DEBUG_SPURIOUS
675 		device_printf(sc->gic_dev,
676 		    "Spurious interrupt detected: last irq: %d on CPU%d\n",
677 		    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
678 #endif
679 		return (FILTER_HANDLED);
680 	}
681 
682 	tf = curthread->td_intr_frame;
683 dispatch_irq:
684 	gi = sc->gic_irqs + irq;
685 	/*
686 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
687 	 * as compiler complains that comparing u_int >= 0 is always true.
688 	 */
689 	if (irq <= GIC_LAST_SGI) {
690 #ifdef SMP
691 		/* Call EOI for all IPI before dispatch. */
692 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
693 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
694 		goto next_irq;
695 #else
696 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
697 		    irq - GIC_FIRST_SGI);
698 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
699 		goto next_irq;
700 #endif
701 	}
702 
703 #ifdef GIC_DEBUG_SPURIOUS
704 	sc->last_irq[PCPU_GET(cpuid)] = irq;
705 #endif
706 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
707 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
708 
709 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
710 		gic_irq_mask(sc, irq);
711 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
712 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
713 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
714 	}
715 
716 next_irq:
717 	arm_irq_memory_barrier(irq);
718 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
719 	irq = irq_active_reg & 0x3FF;
720 	if (irq < sc->nirqs)
721 		goto dispatch_irq;
722 
723 	return (FILTER_HANDLED);
724 }
725 
726 static void
727 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
728     enum intr_polarity pol)
729 {
730 	uint32_t reg;
731 	uint32_t mask;
732 
733 	if (irq < GIC_FIRST_SPI)
734 		return;
735 
736 	mtx_lock_spin(&sc->mutex);
737 
738 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
739 	mask = (reg >> 2*(irq % 16)) & 0x3;
740 
741 	if (pol == INTR_POLARITY_LOW) {
742 		mask &= ~GICD_ICFGR_POL_MASK;
743 		mask |= GICD_ICFGR_POL_LOW;
744 	} else if (pol == INTR_POLARITY_HIGH) {
745 		mask &= ~GICD_ICFGR_POL_MASK;
746 		mask |= GICD_ICFGR_POL_HIGH;
747 	}
748 
749 	if (trig == INTR_TRIGGER_LEVEL) {
750 		mask &= ~GICD_ICFGR_TRIG_MASK;
751 		mask |= GICD_ICFGR_TRIG_LVL;
752 	} else if (trig == INTR_TRIGGER_EDGE) {
753 		mask &= ~GICD_ICFGR_TRIG_MASK;
754 		mask |= GICD_ICFGR_TRIG_EDGE;
755 	}
756 
757 	/* Set mask */
758 	reg = reg & ~(0x3 << 2*(irq % 16));
759 	reg = reg | (mask << 2*(irq % 16));
760 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
761 
762 	mtx_unlock_spin(&sc->mutex);
763 }
764 
765 static int
766 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
767 {
768 	uint32_t cpu, end, mask;
769 
770 	end = min(mp_ncpus, 8);
771 	for (cpu = end; cpu < MAXCPU; cpu++)
772 		if (CPU_ISSET(cpu, cpus))
773 			return (EINVAL);
774 
775 	for (mask = 0, cpu = 0; cpu < end; cpu++)
776 		if (CPU_ISSET(cpu, cpus))
777 			mask |= arm_gic_map[cpu];
778 
779 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
780 	return (0);
781 }
782 
783 #ifdef FDT
784 static int
785 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
786     enum intr_polarity *polp, enum intr_trigger *trigp)
787 {
788 
789 	if (ncells == 1) {
790 		*irqp = cells[0];
791 		*polp = INTR_POLARITY_CONFORM;
792 		*trigp = INTR_TRIGGER_CONFORM;
793 		return (0);
794 	}
795 	if (ncells == 3) {
796 		u_int irq, tripol;
797 
798 		/*
799 		 * The 1st cell is the interrupt type:
800 		 *	0 = SPI
801 		 *	1 = PPI
802 		 * The 2nd cell contains the interrupt number:
803 		 *	[0 - 987] for SPI
804 		 *	[0 -  15] for PPI
805 		 * The 3rd cell is the flags, encoded as follows:
806 		 *   bits[3:0] trigger type and level flags
807 		 *	1 = low-to-high edge triggered
808 		 *	2 = high-to-low edge triggered
809 		 *	4 = active high level-sensitive
810 		 *	8 = active low level-sensitive
811 		 *   bits[15:8] PPI interrupt cpu mask
812 		 *	Each bit corresponds to each of the 8 possible cpus
813 		 *	attached to the GIC.  A bit set to '1' indicated
814 		 *	the interrupt is wired to that CPU.
815 		 */
816 		switch (cells[0]) {
817 		case 0:
818 			irq = GIC_FIRST_SPI + cells[1];
819 			/* SPI irq is checked later. */
820 			break;
821 		case 1:
822 			irq = GIC_FIRST_PPI + cells[1];
823 			if (irq > GIC_LAST_PPI) {
824 				device_printf(dev, "unsupported PPI interrupt "
825 				    "number %u\n", cells[1]);
826 				return (EINVAL);
827 			}
828 			break;
829 		default:
830 			device_printf(dev, "unsupported interrupt type "
831 			    "configuration %u\n", cells[0]);
832 			return (EINVAL);
833 		}
834 
835 		tripol = cells[2] & 0xff;
836 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
837 		    cells[0] == 0))
838 			device_printf(dev, "unsupported trigger/polarity "
839 			    "configuration 0x%02x\n", tripol);
840 
841 		*irqp = irq;
842 		*polp = INTR_POLARITY_CONFORM;
843 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
844 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
845 		return (0);
846 	}
847 	return (EINVAL);
848 }
849 #endif
850 
851 static int
852 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
853     enum intr_polarity *polp, enum intr_trigger *trigp)
854 {
855 	struct gic_irqsrc *gi;
856 
857 	/* Map a non-GICv2m MSI */
858 	gi = (struct gic_irqsrc *)msi_data->isrc;
859 	if (gi == NULL)
860 		return (ENXIO);
861 
862 	*irqp = gi->gi_irq;
863 
864 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
865 	*polp = INTR_POLARITY_HIGH;
866 	*trigp = INTR_TRIGGER_EDGE;
867 
868 	return (0);
869 }
870 
871 static int
872 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
873     enum intr_polarity *polp, enum intr_trigger *trigp)
874 {
875 	u_int irq;
876 	enum intr_polarity pol;
877 	enum intr_trigger trig;
878 	struct arm_gic_softc *sc;
879 	struct intr_map_data_msi *dam;
880 #ifdef FDT
881 	struct intr_map_data_fdt *daf;
882 #endif
883 
884 	sc = device_get_softc(dev);
885 	switch (data->type) {
886 #ifdef FDT
887 	case INTR_MAP_DATA_FDT:
888 		daf = (struct intr_map_data_fdt *)data;
889 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
890 		    &trig) != 0)
891 			return (EINVAL);
892 		KASSERT(irq >= sc->nirqs ||
893 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
894 		    ("%s: Attempting to map a MSI interrupt from FDT",
895 		    __func__));
896 		break;
897 #endif
898 	case INTR_MAP_DATA_MSI:
899 		/* Non-GICv2m MSI */
900 		dam = (struct intr_map_data_msi *)data;
901 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
902 			return (EINVAL);
903 		break;
904 	default:
905 		return (ENOTSUP);
906 	}
907 
908 	if (irq >= sc->nirqs)
909 		return (EINVAL);
910 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
911 	    pol != INTR_POLARITY_HIGH)
912 		return (EINVAL);
913 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
914 	    trig != INTR_TRIGGER_LEVEL)
915 		return (EINVAL);
916 
917 	*irqp = irq;
918 	if (polp != NULL)
919 		*polp = pol;
920 	if (trigp != NULL)
921 		*trigp = trig;
922 	return (0);
923 }
924 
925 static int
926 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
927     struct intr_irqsrc **isrcp)
928 {
929 	int error;
930 	u_int irq;
931 	struct arm_gic_softc *sc;
932 
933 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
934 	if (error == 0) {
935 		sc = device_get_softc(dev);
936 		*isrcp = GIC_INTR_ISRC(sc, irq);
937 	}
938 	return (error);
939 }
940 
941 static int
942 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
943     struct resource *res, struct intr_map_data *data)
944 {
945 	struct arm_gic_softc *sc = device_get_softc(dev);
946 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
947 	enum intr_trigger trig;
948 	enum intr_polarity pol;
949 
950 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
951 		/* GICv2m MSI */
952 		pol = gi->gi_pol;
953 		trig = gi->gi_trig;
954 		KASSERT(pol == INTR_POLARITY_HIGH,
955 		    ("%s: MSI interrupts must be active-high", __func__));
956 		KASSERT(trig == INTR_TRIGGER_EDGE,
957 		    ("%s: MSI interrupts must be edge triggered", __func__));
958 	} else if (data != NULL) {
959 		u_int irq;
960 
961 		/* Get config for resource. */
962 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
963 		    gi->gi_irq != irq)
964 			return (EINVAL);
965 	} else {
966 		pol = INTR_POLARITY_CONFORM;
967 		trig = INTR_TRIGGER_CONFORM;
968 	}
969 
970 	/* Compare config if this is not first setup. */
971 	if (isrc->isrc_handlers != 0) {
972 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
973 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
974 			return (EINVAL);
975 		else
976 			return (0);
977 	}
978 
979 	/* For MSI/MSI-X we should have already configured these */
980 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
981 		if (pol == INTR_POLARITY_CONFORM)
982 			pol = INTR_POLARITY_LOW;	/* just pick some */
983 		if (trig == INTR_TRIGGER_CONFORM)
984 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
985 
986 		gi->gi_pol = pol;
987 		gi->gi_trig = trig;
988 
989 		/* Edge triggered interrupts need an early EOI sent */
990 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
991 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
992 	}
993 
994 	/*
995 	 * XXX - In case that per CPU interrupt is going to be enabled in time
996 	 *       when SMP is already started, we need some IPI call which
997 	 *       enables it on others CPUs. Further, it's more complicated as
998 	 *       pic_enable_source() and pic_disable_source() should act on
999 	 *       per CPU basis only. Thus, it should be solved here somehow.
1000 	 */
1001 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
1002 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1003 
1004 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
1005 	arm_gic_bind_intr(dev, isrc);
1006 	return (0);
1007 }
1008 
1009 static int
1010 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
1011     struct resource *res, struct intr_map_data *data)
1012 {
1013 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1014 
1015 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
1016 		gi->gi_pol = INTR_POLARITY_CONFORM;
1017 		gi->gi_trig = INTR_TRIGGER_CONFORM;
1018 	}
1019 	return (0);
1020 }
1021 
1022 static void
1023 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1024 {
1025 	struct arm_gic_softc *sc = device_get_softc(dev);
1026 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1027 
1028 	arm_irq_memory_barrier(gi->gi_irq);
1029 	gic_irq_unmask(sc, gi->gi_irq);
1030 }
1031 
1032 static void
1033 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
1034 {
1035 	struct arm_gic_softc *sc = device_get_softc(dev);
1036 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1037 
1038 	gic_irq_mask(sc, gi->gi_irq);
1039 }
1040 
1041 static void
1042 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1043 {
1044 	struct arm_gic_softc *sc = device_get_softc(dev);
1045 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1046 
1047 	arm_gic_disable_intr(dev, isrc);
1048 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
1049 }
1050 
1051 static void
1052 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1053 {
1054 
1055 	arm_irq_memory_barrier(0);
1056 	arm_gic_enable_intr(dev, isrc);
1057 }
1058 
1059 static void
1060 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
1061 {
1062 	struct arm_gic_softc *sc = device_get_softc(dev);
1063 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1064 
1065         /* EOI for edge-triggered done earlier. */
1066 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
1067 		return;
1068 
1069 	arm_irq_memory_barrier(0);
1070 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
1071 }
1072 
1073 static int
1074 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1075 {
1076 	struct arm_gic_softc *sc = device_get_softc(dev);
1077 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1078 
1079 	if (gi->gi_irq < GIC_FIRST_SPI)
1080 		return (EINVAL);
1081 
1082 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
1083 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1084 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1085 	}
1086 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
1087 }
1088 
1089 #ifdef SMP
1090 static void
1091 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1092     u_int ipi)
1093 {
1094 	struct arm_gic_softc *sc = device_get_softc(dev);
1095 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1096 	uint32_t val = 0, i;
1097 
1098 	for (i = 0; i < MAXCPU; i++)
1099 		if (CPU_ISSET(i, &cpus))
1100 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1101 
1102 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
1103 }
1104 
1105 static int
1106 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1107 {
1108 	struct intr_irqsrc *isrc;
1109 	struct arm_gic_softc *sc = device_get_softc(dev);
1110 
1111 	if (sgi_first_unused > GIC_LAST_SGI)
1112 		return (ENOSPC);
1113 
1114 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1115 	sgi_to_ipi[sgi_first_unused++] = ipi;
1116 
1117 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1118 
1119 	*isrcp = isrc;
1120 	return (0);
1121 }
1122 #endif
1123 #else
1124 static int
1125 arm_gic_next_irq(struct arm_gic_softc *sc, int last_irq)
1126 {
1127 	uint32_t active_irq;
1128 
1129 	active_irq = gic_c_read_4(sc, GICC_IAR);
1130 
1131 	/*
1132 	 * Immediately EOIR the SGIs, because doing so requires the other
1133 	 * bits (ie CPU number), not just the IRQ number, and we do not
1134 	 * have this information later.
1135 	 */
1136 	if ((active_irq & 0x3ff) <= GIC_LAST_SGI)
1137 		gic_c_write_4(sc, GICC_EOIR, active_irq);
1138 	active_irq &= 0x3FF;
1139 
1140 	if (active_irq == 0x3FF) {
1141 		if (last_irq == -1)
1142 			device_printf(sc->gic_dev,
1143 			    "Spurious interrupt detected\n");
1144 		return -1;
1145 	}
1146 
1147 	return active_irq;
1148 }
1149 
1150 static int
1151 arm_gic_config(device_t dev, int irq, enum intr_trigger trig,
1152     enum intr_polarity pol)
1153 {
1154 	struct arm_gic_softc *sc = device_get_softc(dev);
1155 	uint32_t reg;
1156 	uint32_t mask;
1157 
1158 	/* Function is public-accessible, so validate input arguments */
1159 	if ((irq < 0) || (irq >= sc->nirqs))
1160 		goto invalid_args;
1161 	if ((trig != INTR_TRIGGER_EDGE) && (trig != INTR_TRIGGER_LEVEL) &&
1162 	    (trig != INTR_TRIGGER_CONFORM))
1163 		goto invalid_args;
1164 	if ((pol != INTR_POLARITY_HIGH) && (pol != INTR_POLARITY_LOW) &&
1165 	    (pol != INTR_POLARITY_CONFORM))
1166 		goto invalid_args;
1167 
1168 	mtx_lock_spin(&sc->mutex);
1169 
1170 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
1171 	mask = (reg >> 2*(irq % 16)) & 0x3;
1172 
1173 	if (pol == INTR_POLARITY_LOW) {
1174 		mask &= ~GICD_ICFGR_POL_MASK;
1175 		mask |= GICD_ICFGR_POL_LOW;
1176 	} else if (pol == INTR_POLARITY_HIGH) {
1177 		mask &= ~GICD_ICFGR_POL_MASK;
1178 		mask |= GICD_ICFGR_POL_HIGH;
1179 	}
1180 
1181 	if (trig == INTR_TRIGGER_LEVEL) {
1182 		mask &= ~GICD_ICFGR_TRIG_MASK;
1183 		mask |= GICD_ICFGR_TRIG_LVL;
1184 	} else if (trig == INTR_TRIGGER_EDGE) {
1185 		mask &= ~GICD_ICFGR_TRIG_MASK;
1186 		mask |= GICD_ICFGR_TRIG_EDGE;
1187 	}
1188 
1189 	/* Set mask */
1190 	reg = reg & ~(0x3 << 2*(irq % 16));
1191 	reg = reg | (mask << 2*(irq % 16));
1192 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
1193 
1194 	mtx_unlock_spin(&sc->mutex);
1195 
1196 	return (0);
1197 
1198 invalid_args:
1199 	device_printf(dev, "gic_config_irg, invalid parameters\n");
1200 	return (EINVAL);
1201 }
1202 
1203 
1204 static void
1205 arm_gic_mask(device_t dev, int irq)
1206 {
1207 	struct arm_gic_softc *sc = device_get_softc(dev);
1208 
1209 	gic_d_write_4(sc, GICD_ICENABLER(irq), (1UL << (irq & 0x1F)));
1210 	gic_c_write_4(sc, GICC_EOIR, irq); /* XXX - not allowed */
1211 }
1212 
1213 static void
1214 arm_gic_unmask(device_t dev, int irq)
1215 {
1216 	struct arm_gic_softc *sc = device_get_softc(dev);
1217 
1218 	if (irq > GIC_LAST_SGI)
1219 		arm_irq_memory_barrier(irq);
1220 
1221 	gic_d_write_4(sc, GICD_ISENABLER(irq), (1UL << (irq & 0x1F)));
1222 }
1223 
1224 #ifdef SMP
1225 static void
1226 arm_gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi)
1227 {
1228 	struct arm_gic_softc *sc = device_get_softc(dev);
1229 	uint32_t val = 0, i;
1230 
1231 	for (i = 0; i < MAXCPU; i++)
1232 		if (CPU_ISSET(i, &cpus))
1233 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
1234 
1235 	gic_d_write_4(sc, GICD_SGIR, val | ipi);
1236 }
1237 
1238 static int
1239 arm_gic_ipi_read(device_t dev, int i)
1240 {
1241 
1242 	if (i != -1) {
1243 		/*
1244 		 * The intr code will automagically give the frame pointer
1245 		 * if the interrupt argument is 0.
1246 		 */
1247 		if ((unsigned int)i > 16)
1248 			return (0);
1249 		return (i);
1250 	}
1251 
1252 	return (0x3ff);
1253 }
1254 
1255 static void
1256 arm_gic_ipi_clear(device_t dev, int ipi)
1257 {
1258 	/* no-op */
1259 }
1260 #endif
1261 
1262 static void
1263 gic_post_filter(void *arg)
1264 {
1265 	struct arm_gic_softc *sc = gic_sc;
1266 	uintptr_t irq = (uintptr_t) arg;
1267 
1268 	if (irq > GIC_LAST_SGI)
1269 		arm_irq_memory_barrier(irq);
1270 	gic_c_write_4(sc, GICC_EOIR, irq);
1271 }
1272 
1273 static int
1274 gic_config_irq(int irq, enum intr_trigger trig, enum intr_polarity pol)
1275 {
1276 
1277 	return (arm_gic_config(gic_sc->gic_dev, irq, trig, pol));
1278 }
1279 
1280 void
1281 arm_mask_irq(uintptr_t nb)
1282 {
1283 
1284 	arm_gic_mask(gic_sc->gic_dev, nb);
1285 }
1286 
1287 void
1288 arm_unmask_irq(uintptr_t nb)
1289 {
1290 
1291 	arm_gic_unmask(gic_sc->gic_dev, nb);
1292 }
1293 
1294 int
1295 arm_get_next_irq(int last_irq)
1296 {
1297 
1298 	return (arm_gic_next_irq(gic_sc, last_irq));
1299 }
1300 
1301 #ifdef SMP
1302 void
1303 intr_pic_init_secondary(void)
1304 {
1305 
1306 	arm_gic_init_secondary(gic_sc->gic_dev);
1307 }
1308 
1309 void
1310 pic_ipi_send(cpuset_t cpus, u_int ipi)
1311 {
1312 
1313 	arm_gic_ipi_send(gic_sc->gic_dev, cpus, ipi);
1314 }
1315 
1316 int
1317 pic_ipi_read(int i)
1318 {
1319 
1320 	return (arm_gic_ipi_read(gic_sc->gic_dev, i));
1321 }
1322 
1323 void
1324 pic_ipi_clear(int ipi)
1325 {
1326 
1327 	arm_gic_ipi_clear(gic_sc->gic_dev, ipi);
1328 }
1329 #endif
1330 #endif /* INTRNG */
1331 
1332 static device_method_t arm_gic_methods[] = {
1333 #ifdef INTRNG
1334 	/* Bus interface */
1335 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1336 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1337 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1338 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1339 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1340 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1341 
1342 	/* Interrupt controller interface */
1343 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1344 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1345 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1346 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1347 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1348 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1349 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1350 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1351 #ifdef SMP
1352 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1353 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1354 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1355 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1356 #endif
1357 #endif
1358 	{ 0, 0 }
1359 };
1360 
1361 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1362     sizeof(struct arm_gic_softc));
1363 
1364 #ifdef INTRNG
1365 /*
1366  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1367  */
1368 
1369 #define	GICV2M_MSI_TYPER	0x008
1370 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1371 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1372 #define	GICv2M_MSI_SETSPI_NS	0x040
1373 #define	GICV2M_MSI_IIDR		0xFCC
1374 
1375 int
1376 arm_gicv2m_attach(device_t dev)
1377 {
1378 	struct arm_gicv2m_softc *sc;
1379 	struct arm_gic_softc *psc;
1380 	uint32_t typer;
1381 	int rid;
1382 
1383 	psc = device_get_softc(device_get_parent(dev));
1384 	sc = device_get_softc(dev);
1385 
1386 	rid = 0;
1387 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1388 	    RF_ACTIVE);
1389 	if (sc->sc_mem == NULL) {
1390 		device_printf(dev, "Unable to allocate resources\n");
1391 		return (ENXIO);
1392 	}
1393 
1394 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1395 	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1396 	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1397 	sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
1398 
1399 	/* Reserve these interrupts for MSI/MSI-X use */
1400 	arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
1401 	    sc->sc_spi_count);
1402 
1403 	mtx_init(&sc->sc_mutex, "GICv2m lock", "", MTX_DEF);
1404 
1405 	intr_msi_register(dev, sc->sc_xref);
1406 
1407 	if (bootverbose)
1408 		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1409 		    sc->sc_spi_start + sc->sc_spi_count - 1);
1410 
1411 	return (0);
1412 }
1413 
1414 static int
1415 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1416     device_t *pic, struct intr_irqsrc **srcs)
1417 {
1418 	struct arm_gic_softc *psc;
1419 	struct arm_gicv2m_softc *sc;
1420 	int i, irq, end_irq;
1421 	bool found;
1422 
1423 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1424 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1425 
1426 	psc = device_get_softc(device_get_parent(dev));
1427 	sc = device_get_softc(dev);
1428 
1429 	mtx_lock(&sc->sc_mutex);
1430 
1431 	found = false;
1432 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1433 		/* Start on an aligned interrupt */
1434 		if ((irq & (maxcount - 1)) != 0)
1435 			continue;
1436 
1437 		/* Assume we found a valid range until shown otherwise */
1438 		found = true;
1439 
1440 		/* Check this range is valid */
1441 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1442 			/* No free interrupts */
1443 			if (end_irq == sc->sc_spi_end) {
1444 				found = false;
1445 				break;
1446 			}
1447 
1448 			KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI)!= 0,
1449 			    ("%s: Non-MSI interrupt found", __func__));
1450 
1451 			/* This is already used */
1452 			if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) ==
1453 			    GI_FLAG_MSI_USED) {
1454 				found = false;
1455 				break;
1456 			}
1457 		}
1458 		if (found)
1459 			break;
1460 	}
1461 
1462 	/* Not enough interrupts were found */
1463 	if (!found || irq == sc->sc_spi_end) {
1464 		mtx_unlock(&sc->sc_mutex);
1465 		return (ENXIO);
1466 	}
1467 
1468 	for (i = 0; i < count; i++) {
1469 		/* Mark the interrupt as used */
1470 		psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1471 
1472 	}
1473 	mtx_unlock(&sc->sc_mutex);
1474 
1475 	for (i = 0; i < count; i++)
1476 		srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
1477 	*pic = device_get_parent(dev);
1478 
1479 	return (0);
1480 }
1481 
1482 static int
1483 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1484     struct intr_irqsrc **isrc)
1485 {
1486 	struct arm_gicv2m_softc *sc;
1487 	struct gic_irqsrc *gi;
1488 	int i;
1489 
1490 	sc = device_get_softc(dev);
1491 
1492 	mtx_lock(&sc->sc_mutex);
1493 	for (i = 0; i < count; i++) {
1494 		gi = (struct gic_irqsrc *)isrc[i];
1495 
1496 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1497 		    ("%s: Trying to release an unused MSI-X interrupt",
1498 		    __func__));
1499 
1500 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1501 	}
1502 	mtx_unlock(&sc->sc_mutex);
1503 
1504 	return (0);
1505 }
1506 
1507 static int
1508 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1509     struct intr_irqsrc **isrcp)
1510 {
1511 	struct arm_gicv2m_softc *sc;
1512 	struct arm_gic_softc *psc;
1513 	int irq;
1514 
1515 	psc = device_get_softc(device_get_parent(dev));
1516 	sc = device_get_softc(dev);
1517 
1518 	mtx_lock(&sc->sc_mutex);
1519 	/* Find an unused interrupt */
1520 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1521 		KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1522 		    ("%s: Non-MSI interrupt found", __func__));
1523 		if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1524 			break;
1525 	}
1526 	/* No free interrupt was found */
1527 	if (irq == sc->sc_spi_end) {
1528 		mtx_unlock(&sc->sc_mutex);
1529 		return (ENXIO);
1530 	}
1531 
1532 	/* Mark the interrupt as used */
1533 	psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1534 	mtx_unlock(&sc->sc_mutex);
1535 
1536 	*isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
1537 	*pic = device_get_parent(dev);
1538 
1539 	return (0);
1540 }
1541 
1542 static int
1543 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1544 {
1545 	struct arm_gicv2m_softc *sc;
1546 	struct gic_irqsrc *gi;
1547 
1548 	sc = device_get_softc(dev);
1549 	gi = (struct gic_irqsrc *)isrc;
1550 
1551 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1552 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1553 
1554 	mtx_lock(&sc->sc_mutex);
1555 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1556 	mtx_unlock(&sc->sc_mutex);
1557 
1558 	return (0);
1559 }
1560 
1561 static int
1562 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1563     uint64_t *addr, uint32_t *data)
1564 {
1565 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1566 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1567 
1568 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1569 	*data = gi->gi_irq;
1570 
1571 	return (0);
1572 }
1573 
1574 static device_method_t arm_gicv2m_methods[] = {
1575 	/* Device interface */
1576 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1577 
1578 	/* MSI/MSI-X */
1579 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1580 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1581 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1582 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1583 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1584 
1585 	/* End */
1586 	DEVMETHOD_END
1587 };
1588 
1589 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1590     sizeof(struct arm_gicv2m_softc));
1591 #endif
1592