xref: /freebsd/sys/arm64/arm64/gic_v3.c (revision 81b22a98)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * the sponsorship of the FreeBSD Foundation.
6  *
7  * This software was developed by Semihalf under
8  * the sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53 #include <sys/interrupt.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61 
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66 
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71 
72 #include "gic_if.h"
73 #include "pic_if.h"
74 #include "msi_if.h"
75 
76 #include <arm/arm/gic_common.h>
77 #include "gic_v3_reg.h"
78 #include "gic_v3_var.h"
79 
80 static bus_get_domain_t gic_v3_get_domain;
81 static bus_read_ivar_t gic_v3_read_ivar;
82 static bus_write_ivar_t gic_v3_write_ivar;
83 
84 static pic_disable_intr_t gic_v3_disable_intr;
85 static pic_enable_intr_t gic_v3_enable_intr;
86 static pic_map_intr_t gic_v3_map_intr;
87 static pic_setup_intr_t gic_v3_setup_intr;
88 static pic_teardown_intr_t gic_v3_teardown_intr;
89 static pic_post_filter_t gic_v3_post_filter;
90 static pic_post_ithread_t gic_v3_post_ithread;
91 static pic_pre_ithread_t gic_v3_pre_ithread;
92 static pic_bind_intr_t gic_v3_bind_intr;
93 #ifdef SMP
94 static pic_init_secondary_t gic_v3_init_secondary;
95 static pic_ipi_send_t gic_v3_ipi_send;
96 static pic_ipi_setup_t gic_v3_ipi_setup;
97 #endif
98 
99 static gic_reserve_msi_range_t gic_v3_reserve_msi_range;
100 static gic_alloc_msi_t gic_v3_gic_alloc_msi;
101 static gic_release_msi_t gic_v3_gic_release_msi;
102 static gic_alloc_msix_t gic_v3_gic_alloc_msix;
103 static gic_release_msix_t gic_v3_gic_release_msix;
104 
105 static msi_alloc_msi_t gic_v3_alloc_msi;
106 static msi_release_msi_t gic_v3_release_msi;
107 static msi_alloc_msix_t gic_v3_alloc_msix;
108 static msi_release_msix_t gic_v3_release_msix;
109 static msi_map_msi_t gic_v3_map_msi;
110 
111 static u_int gic_irq_cpu;
112 #ifdef SMP
113 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
114 static u_int sgi_first_unused = GIC_FIRST_SGI;
115 #endif
116 
117 static device_method_t gic_v3_methods[] = {
118 	/* Device interface */
119 	DEVMETHOD(device_detach,	gic_v3_detach),
120 
121 	/* Bus interface */
122 	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
123 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
124 	DEVMETHOD(bus_write_ivar,	gic_v3_write_ivar),
125 
126 	/* Interrupt controller interface */
127 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
128 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
129 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
130 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
131 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
132 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
133 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
134 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
135 #ifdef SMP
136 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
137 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
138 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
139 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
140 #endif
141 
142 	/* MSI/MSI-X */
143 	DEVMETHOD(msi_alloc_msi,        gic_v3_alloc_msi),
144 	DEVMETHOD(msi_release_msi,      gic_v3_release_msi),
145 	DEVMETHOD(msi_alloc_msix,       gic_v3_alloc_msix),
146 	DEVMETHOD(msi_release_msix,     gic_v3_release_msix),
147 	DEVMETHOD(msi_map_msi,          gic_v3_map_msi),
148 
149 	/* GIC */
150 	DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range),
151 	DEVMETHOD(gic_alloc_msi,	gic_v3_gic_alloc_msi),
152 	DEVMETHOD(gic_release_msi,	gic_v3_gic_release_msi),
153 	DEVMETHOD(gic_alloc_msix,	gic_v3_gic_alloc_msix),
154 	DEVMETHOD(gic_release_msix,	gic_v3_gic_release_msix),
155 
156 	/* End */
157 	DEVMETHOD_END
158 };
159 
160 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
161     sizeof(struct gic_v3_softc));
162 
163 /*
164  * Driver-specific definitions.
165  */
166 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
167 
168 /*
169  * Helper functions and definitions.
170  */
171 /* Destination registers, either Distributor or Re-Distributor */
172 enum gic_v3_xdist {
173 	DIST = 0,
174 	REDIST,
175 };
176 
177 struct gic_v3_irqsrc {
178 	struct intr_irqsrc	gi_isrc;
179 	uint32_t		gi_irq;
180 	enum intr_polarity	gi_pol;
181 	enum intr_trigger	gi_trig;
182 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
183 					 /* be used for MSI/MSI-X interrupts */
184 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
185 					 /* for a MSI/MSI-X interrupt */
186 	u_int			gi_flags;
187 };
188 
189 /* Helper routines starting with gic_v3_ */
190 static int gic_v3_dist_init(struct gic_v3_softc *);
191 static int gic_v3_redist_alloc(struct gic_v3_softc *);
192 static int gic_v3_redist_find(struct gic_v3_softc *);
193 static int gic_v3_redist_init(struct gic_v3_softc *);
194 static int gic_v3_cpu_init(struct gic_v3_softc *);
195 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
196 
197 /* A sequence of init functions for primary (boot) CPU */
198 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
199 /* Primary CPU initialization sequence */
200 static gic_v3_initseq_t gic_v3_primary_init[] = {
201 	gic_v3_dist_init,
202 	gic_v3_redist_alloc,
203 	gic_v3_redist_init,
204 	gic_v3_cpu_init,
205 	NULL
206 };
207 
208 #ifdef SMP
209 /* Secondary CPU initialization sequence */
210 static gic_v3_initseq_t gic_v3_secondary_init[] = {
211 	gic_v3_redist_init,
212 	gic_v3_cpu_init,
213 	NULL
214 };
215 #endif
216 
217 uint32_t
218 gic_r_read_4(device_t dev, bus_size_t offset)
219 {
220 	struct gic_v3_softc *sc;
221 	struct resource *rdist;
222 
223 	sc = device_get_softc(dev);
224 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
225 	return (bus_read_4(rdist, offset));
226 }
227 
228 uint64_t
229 gic_r_read_8(device_t dev, bus_size_t offset)
230 {
231 	struct gic_v3_softc *sc;
232 	struct resource *rdist;
233 
234 	sc = device_get_softc(dev);
235 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
236 	return (bus_read_8(rdist, offset));
237 }
238 
239 void
240 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
241 {
242 	struct gic_v3_softc *sc;
243 	struct resource *rdist;
244 
245 	sc = device_get_softc(dev);
246 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
247 	bus_write_4(rdist, offset, val);
248 }
249 
250 void
251 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
252 {
253 	struct gic_v3_softc *sc;
254 	struct resource *rdist;
255 
256 	sc = device_get_softc(dev);
257 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
258 	bus_write_8(rdist, offset, val);
259 }
260 
261 static void
262 gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
263 {
264 	struct gic_v3_softc *sc;
265 	int i;
266 
267 	sc = device_get_softc(dev);
268 
269 	KASSERT((start + count) < sc->gic_nirqs,
270 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
271 	    start, count, sc->gic_nirqs));
272 	for (i = 0; i < count; i++) {
273 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
274 		    ("%s: MSI interrupt %d already has a handler", __func__,
275 		    count + i));
276 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
277 		    ("%s: MSI interrupt %d already has a polarity", __func__,
278 		    count + i));
279 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
280 		    ("%s: MSI interrupt %d already has a trigger", __func__,
281 		    count + i));
282 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
283 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
284 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
285 	}
286 }
287 
288 /*
289  * Device interface.
290  */
291 int
292 gic_v3_attach(device_t dev)
293 {
294 	struct gic_v3_softc *sc;
295 	gic_v3_initseq_t *init_func;
296 	uint32_t typer;
297 	int rid;
298 	int err;
299 	size_t i;
300 	u_int irq;
301 	const char *name;
302 
303 	sc = device_get_softc(dev);
304 	sc->gic_registered = FALSE;
305 	sc->dev = dev;
306 	err = 0;
307 
308 	/* Initialize mutex */
309 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
310 
311 	/*
312 	 * Allocate array of struct resource.
313 	 * One entry for Distributor and all remaining for Re-Distributor.
314 	 */
315 	sc->gic_res = malloc(
316 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
317 	    M_GIC_V3, M_WAITOK);
318 
319 	/* Now allocate corresponding resources */
320 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
321 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
322 		    &rid, RF_ACTIVE);
323 		if (sc->gic_res[rid] == NULL)
324 			return (ENXIO);
325 	}
326 
327 	/*
328 	 * Distributor interface
329 	 */
330 	sc->gic_dist = sc->gic_res[0];
331 
332 	/*
333 	 * Re-Dristributor interface
334 	 */
335 	/* Allocate space under region descriptions */
336 	sc->gic_redists.regions = malloc(
337 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
338 	    M_GIC_V3, M_WAITOK);
339 
340 	/* Fill-up bus_space information for each region. */
341 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
342 		sc->gic_redists.regions[i] = sc->gic_res[rid];
343 
344 	/* Get the number of supported SPI interrupts */
345 	typer = gic_d_read(sc, 4, GICD_TYPER);
346 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
347 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
348 		sc->gic_nirqs = GIC_I_NUM_MAX;
349 
350 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
351 	    M_GIC_V3, M_WAITOK | M_ZERO);
352 	name = device_get_nameunit(dev);
353 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
354 		struct intr_irqsrc *isrc;
355 
356 		sc->gic_irqs[irq].gi_irq = irq;
357 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
358 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
359 
360 		isrc = &sc->gic_irqs[irq].gi_isrc;
361 		if (irq <= GIC_LAST_SGI) {
362 			err = intr_isrc_register(isrc, sc->dev,
363 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
364 		} else if (irq <= GIC_LAST_PPI) {
365 			err = intr_isrc_register(isrc, sc->dev,
366 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
367 		} else {
368 			err = intr_isrc_register(isrc, sc->dev, 0,
369 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
370 		}
371 		if (err != 0) {
372 			/* XXX call intr_isrc_deregister() */
373 			free(sc->gic_irqs, M_DEVBUF);
374 			return (err);
375 		}
376 	}
377 
378 	mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
379 	if (sc->gic_mbi_start > 0) {
380 		gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
381 		    sc->gic_mbi_end - sc->gic_mbi_start);
382 
383 		if (bootverbose) {
384 			device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
385 					sc->gic_mbi_end);
386 		}
387 	}
388 
389 	/*
390 	 * Read the Peripheral ID2 register. This is an implementation
391 	 * defined register, but seems to be implemented in all GICv3
392 	 * parts and Linux expects it to be there.
393 	 */
394 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
395 
396 	/* Get the number of supported interrupt identifier bits */
397 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
398 
399 	if (bootverbose) {
400 		device_printf(dev, "SPIs: %u, IDs: %u\n",
401 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
402 	}
403 
404 	/* Train init sequence for boot CPU */
405 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
406 		err = (*init_func)(sc);
407 		if (err != 0)
408 			return (err);
409 	}
410 
411 	return (0);
412 }
413 
414 int
415 gic_v3_detach(device_t dev)
416 {
417 	struct gic_v3_softc *sc;
418 	size_t i;
419 	int rid;
420 
421 	sc = device_get_softc(dev);
422 
423 	if (device_is_attached(dev)) {
424 		/*
425 		 * XXX: We should probably deregister PIC
426 		 */
427 		if (sc->gic_registered)
428 			panic("Trying to detach registered PIC");
429 	}
430 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
431 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
432 
433 	for (i = 0; i <= mp_maxid; i++)
434 		free(sc->gic_redists.pcpu[i], M_GIC_V3);
435 
436 	free(sc->gic_res, M_GIC_V3);
437 	free(sc->gic_redists.regions, M_GIC_V3);
438 
439 	return (0);
440 }
441 
442 static int
443 gic_v3_get_domain(device_t dev, device_t child, int *domain)
444 {
445 	struct gic_v3_devinfo *di;
446 
447 	di = device_get_ivars(child);
448 	if (di->gic_domain < 0)
449 		return (ENOENT);
450 
451 	*domain = di->gic_domain;
452 	return (0);
453 }
454 
455 static int
456 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
457 {
458 	struct gic_v3_softc *sc;
459 
460 	sc = device_get_softc(dev);
461 
462 	switch (which) {
463 	case GICV3_IVAR_NIRQS:
464 		*result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
465 		return (0);
466 	case GICV3_IVAR_REDIST:
467 		*result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
468 		return (0);
469 	case GIC_IVAR_HW_REV:
470 		KASSERT(
471 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
472 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
473 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
474 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
475 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
476 		return (0);
477 	case GIC_IVAR_BUS:
478 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
479 		    ("gic_v3_read_ivar: Unknown bus type"));
480 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
481 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
482 		*result = sc->gic_bus;
483 		return (0);
484 	}
485 
486 	return (ENOENT);
487 }
488 
489 static int
490 gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
491 {
492 	struct gic_v3_softc *sc;
493 
494 	sc = device_get_softc(dev);
495 
496 	switch(which) {
497 	case GICV3_IVAR_NIRQS:
498 	case GICV3_IVAR_REDIST:
499 	case GIC_IVAR_HW_REV:
500 	case GIC_IVAR_BUS:
501 		return (EINVAL);
502 	}
503 
504 	return (ENOENT);
505 }
506 
507 int
508 arm_gic_v3_intr(void *arg)
509 {
510 	struct gic_v3_softc *sc = arg;
511 	struct gic_v3_irqsrc *gi;
512 	struct intr_pic *pic;
513 	uint64_t active_irq;
514 	struct trapframe *tf;
515 
516 	pic = sc->gic_pic;
517 
518 	while (1) {
519 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
520 			/*
521 			 * Hardware:		Cavium ThunderX
522 			 * Chip revision:	Pass 1.0 (early version)
523 			 *			Pass 1.1 (production)
524 			 * ERRATUM:		22978, 23154
525 			 */
526 			__asm __volatile(
527 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
528 			    "mrs %0, ICC_IAR1_EL1		\n"
529 			    "nop;nop;nop;nop;			\n"
530 			    "dsb sy				\n"
531 			    : "=&r" (active_irq));
532 		} else {
533 			active_irq = gic_icc_read(IAR1);
534 		}
535 
536 		if (active_irq >= GIC_FIRST_LPI) {
537 			intr_child_irq_handler(pic, active_irq);
538 			continue;
539 		}
540 
541 		if (__predict_false(active_irq >= sc->gic_nirqs))
542 			return (FILTER_HANDLED);
543 
544 		tf = curthread->td_intr_frame;
545 		gi = &sc->gic_irqs[active_irq];
546 		if (active_irq <= GIC_LAST_SGI) {
547 			/* Call EOI for all IPI before dispatch. */
548 			gic_icc_write(EOIR1, (uint64_t)active_irq);
549 #ifdef SMP
550 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
551 #else
552 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
553 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
554 #endif
555 		} else if (active_irq >= GIC_FIRST_PPI &&
556 		    active_irq <= GIC_LAST_SPI) {
557 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
558 				gic_icc_write(EOIR1, gi->gi_irq);
559 
560 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
561 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
562 					gic_icc_write(EOIR1, gi->gi_irq);
563 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
564 				device_printf(sc->dev,
565 				    "Stray irq %lu disabled\n", active_irq);
566 			}
567 		}
568 	}
569 }
570 
571 #ifdef FDT
572 static int
573 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
574     enum intr_polarity *polp, enum intr_trigger *trigp)
575 {
576 	u_int irq;
577 
578 	if (ncells < 3)
579 		return (EINVAL);
580 
581 	/*
582 	 * The 1st cell is the interrupt type:
583 	 *	0 = SPI
584 	 *	1 = PPI
585 	 * The 2nd cell contains the interrupt number:
586 	 *	[0 - 987] for SPI
587 	 *	[0 -  15] for PPI
588 	 * The 3rd cell is the flags, encoded as follows:
589 	 *   bits[3:0] trigger type and level flags
590 	 *	1 = edge triggered
591 	 *      2 = edge triggered (PPI only)
592 	 *	4 = level-sensitive
593 	 *	8 = level-sensitive (PPI only)
594 	 */
595 	switch (cells[0]) {
596 	case 0:
597 		irq = GIC_FIRST_SPI + cells[1];
598 		/* SPI irq is checked later. */
599 		break;
600 	case 1:
601 		irq = GIC_FIRST_PPI + cells[1];
602 		if (irq > GIC_LAST_PPI) {
603 			device_printf(dev, "unsupported PPI interrupt "
604 			    "number %u\n", cells[1]);
605 			return (EINVAL);
606 		}
607 		break;
608 	default:
609 		device_printf(dev, "unsupported interrupt type "
610 		    "configuration %u\n", cells[0]);
611 		return (EINVAL);
612 	}
613 
614 	switch (cells[2] & FDT_INTR_MASK) {
615 	case FDT_INTR_EDGE_RISING:
616 		*trigp = INTR_TRIGGER_EDGE;
617 		*polp = INTR_POLARITY_HIGH;
618 		break;
619 	case FDT_INTR_EDGE_FALLING:
620 		*trigp = INTR_TRIGGER_EDGE;
621 		*polp = INTR_POLARITY_LOW;
622 		break;
623 	case FDT_INTR_LEVEL_HIGH:
624 		*trigp = INTR_TRIGGER_LEVEL;
625 		*polp = INTR_POLARITY_HIGH;
626 		break;
627 	case FDT_INTR_LEVEL_LOW:
628 		*trigp = INTR_TRIGGER_LEVEL;
629 		*polp = INTR_POLARITY_LOW;
630 		break;
631 	default:
632 		device_printf(dev, "unsupported trigger/polarity "
633 		    "configuration 0x%02x\n", cells[2]);
634 		return (EINVAL);
635 	}
636 
637 	/* Check the interrupt is valid */
638 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
639 		return (EINVAL);
640 
641 	*irqp = irq;
642 	return (0);
643 }
644 #endif
645 
646 static int
647 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
648     enum intr_polarity *polp, enum intr_trigger *trigp)
649 {
650 	struct gic_v3_irqsrc *gi;
651 
652 	/* SPI-mapped MSI */
653 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
654 	if (gi == NULL)
655 		return (ENXIO);
656 
657 	*irqp = gi->gi_irq;
658 
659 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
660 	*polp = INTR_POLARITY_HIGH;
661 	*trigp = INTR_TRIGGER_EDGE;
662 
663 	return (0);
664 }
665 
666 static int
667 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
668     enum intr_polarity *polp, enum intr_trigger *trigp)
669 {
670 	struct gic_v3_softc *sc;
671 	enum intr_polarity pol;
672 	enum intr_trigger trig;
673 	struct intr_map_data_msi *dam;
674 #ifdef FDT
675 	struct intr_map_data_fdt *daf;
676 #endif
677 #ifdef DEV_ACPI
678 	struct intr_map_data_acpi *daa;
679 #endif
680 	u_int irq;
681 
682 	sc = device_get_softc(dev);
683 
684 	switch (data->type) {
685 #ifdef FDT
686 	case INTR_MAP_DATA_FDT:
687 		daf = (struct intr_map_data_fdt *)data;
688 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
689 		    &trig) != 0)
690 			return (EINVAL);
691 		break;
692 #endif
693 #ifdef DEV_ACPI
694 	case INTR_MAP_DATA_ACPI:
695 		daa = (struct intr_map_data_acpi *)data;
696 		irq = daa->irq;
697 		pol = daa->pol;
698 		trig = daa->trig;
699 		break;
700 #endif
701 	case INTR_MAP_DATA_MSI:
702 		/* SPI-mapped MSI */
703 		dam = (struct intr_map_data_msi *)data;
704 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
705 			return (EINVAL);
706 		break;
707 	default:
708 		return (EINVAL);
709 	}
710 
711 	if (irq >= sc->gic_nirqs)
712 		return (EINVAL);
713 	switch (pol) {
714 	case INTR_POLARITY_CONFORM:
715 	case INTR_POLARITY_LOW:
716 	case INTR_POLARITY_HIGH:
717 		break;
718 	default:
719 		return (EINVAL);
720 	}
721 	switch (trig) {
722 	case INTR_TRIGGER_CONFORM:
723 	case INTR_TRIGGER_EDGE:
724 	case INTR_TRIGGER_LEVEL:
725 		break;
726 	default:
727 		return (EINVAL);
728 	}
729 
730 	*irqp = irq;
731 	if (polp != NULL)
732 		*polp = pol;
733 	if (trigp != NULL)
734 		*trigp = trig;
735 	return (0);
736 }
737 
738 static int
739 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
740     struct intr_irqsrc **isrcp)
741 {
742 	struct gic_v3_softc *sc;
743 	int error;
744 	u_int irq;
745 
746 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
747 	if (error == 0) {
748 		sc = device_get_softc(dev);
749 		*isrcp = GIC_INTR_ISRC(sc, irq);
750 	}
751 	return (error);
752 }
753 
754 static int
755 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
756     struct resource *res, struct intr_map_data *data)
757 {
758 	struct gic_v3_softc *sc = device_get_softc(dev);
759 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
760 	enum intr_trigger trig;
761 	enum intr_polarity pol;
762 	uint32_t reg;
763 	u_int irq;
764 	int error;
765 
766 	if (data == NULL)
767 		return (ENOTSUP);
768 
769 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
770 	if (error != 0)
771 		return (error);
772 
773 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
774 	    trig == INTR_TRIGGER_CONFORM)
775 		return (EINVAL);
776 
777 	/* Compare config if this is not first setup. */
778 	if (isrc->isrc_handlers != 0) {
779 		if (pol != gi->gi_pol || trig != gi->gi_trig)
780 			return (EINVAL);
781 		else
782 			return (0);
783 	}
784 
785 	/* For MSI/MSI-X we should have already configured these */
786 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
787 		gi->gi_pol = pol;
788 		gi->gi_trig = trig;
789 	}
790 
791 	/*
792 	 * XXX - In case that per CPU interrupt is going to be enabled in time
793 	 *       when SMP is already started, we need some IPI call which
794 	 *       enables it on others CPUs. Further, it's more complicated as
795 	 *       pic_enable_source() and pic_disable_source() should act on
796 	 *       per CPU basis only. Thus, it should be solved here somehow.
797 	 */
798 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
799 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
800 
801 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
802 		mtx_lock_spin(&sc->gic_mtx);
803 
804 		/* Set the trigger and polarity */
805 		if (irq <= GIC_LAST_PPI)
806 			reg = gic_r_read(sc, 4,
807 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
808 		else
809 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
810 		if (trig == INTR_TRIGGER_LEVEL)
811 			reg &= ~(2 << ((irq % 16) * 2));
812 		else
813 			reg |= 2 << ((irq % 16) * 2);
814 
815 		if (irq <= GIC_LAST_PPI) {
816 			gic_r_write(sc, 4,
817 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
818 			gic_v3_wait_for_rwp(sc, REDIST);
819 		} else {
820 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
821 			gic_v3_wait_for_rwp(sc, DIST);
822 		}
823 
824 		mtx_unlock_spin(&sc->gic_mtx);
825 
826 		gic_v3_bind_intr(dev, isrc);
827 	}
828 
829 	return (0);
830 }
831 
832 static int
833 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
834     struct resource *res, struct intr_map_data *data)
835 {
836 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
837 
838 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
839 		gi->gi_pol = INTR_POLARITY_CONFORM;
840 		gi->gi_trig = INTR_TRIGGER_CONFORM;
841 	}
842 
843 	return (0);
844 }
845 
846 static void
847 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
848 {
849 	struct gic_v3_softc *sc;
850 	struct gic_v3_irqsrc *gi;
851 	u_int irq;
852 
853 	sc = device_get_softc(dev);
854 	gi = (struct gic_v3_irqsrc *)isrc;
855 	irq = gi->gi_irq;
856 
857 	if (irq <= GIC_LAST_PPI) {
858 		/* SGIs and PPIs in corresponding Re-Distributor */
859 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
860 		    GICD_I_MASK(irq));
861 		gic_v3_wait_for_rwp(sc, REDIST);
862 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
863 		/* SPIs in distributor */
864 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
865 		gic_v3_wait_for_rwp(sc, DIST);
866 	} else
867 		panic("%s: Unsupported IRQ %u", __func__, irq);
868 }
869 
870 static void
871 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
872 {
873 	struct gic_v3_softc *sc;
874 	struct gic_v3_irqsrc *gi;
875 	u_int irq;
876 
877 	sc = device_get_softc(dev);
878 	gi = (struct gic_v3_irqsrc *)isrc;
879 	irq = gi->gi_irq;
880 
881 	if (irq <= GIC_LAST_PPI) {
882 		/* SGIs and PPIs in corresponding Re-Distributor */
883 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
884 		    GICD_I_MASK(irq));
885 		gic_v3_wait_for_rwp(sc, REDIST);
886 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
887 		/* SPIs in distributor */
888 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
889 		gic_v3_wait_for_rwp(sc, DIST);
890 	} else
891 		panic("%s: Unsupported IRQ %u", __func__, irq);
892 }
893 
894 static void
895 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
896 {
897 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
898 
899 	gic_v3_disable_intr(dev, isrc);
900 	gic_icc_write(EOIR1, gi->gi_irq);
901 }
902 
903 static void
904 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
905 {
906 
907 	gic_v3_enable_intr(dev, isrc);
908 }
909 
910 static void
911 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
912 {
913 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
914 
915 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
916 		return;
917 
918 	gic_icc_write(EOIR1, gi->gi_irq);
919 }
920 
921 static int
922 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
923 {
924 	struct gic_v3_softc *sc;
925 	struct gic_v3_irqsrc *gi;
926 	int cpu;
927 
928 	gi = (struct gic_v3_irqsrc *)isrc;
929 	if (gi->gi_irq <= GIC_LAST_PPI)
930 		return (EINVAL);
931 
932 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
933 	    ("%s: Attempting to bind an invalid IRQ", __func__));
934 
935 	sc = device_get_softc(dev);
936 
937 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
938 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
939 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
940 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
941 		    CPU_AFFINITY(gic_irq_cpu));
942 	} else {
943 		/*
944 		 * We can only bind to a single CPU so select
945 		 * the first CPU found.
946 		 */
947 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
948 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
949 	}
950 
951 	return (0);
952 }
953 
954 #ifdef SMP
955 static void
956 gic_v3_init_secondary(device_t dev)
957 {
958 	device_t child;
959 	struct gic_v3_softc *sc;
960 	gic_v3_initseq_t *init_func;
961 	struct intr_irqsrc *isrc;
962 	u_int cpu, irq;
963 	int err, i;
964 
965 	sc = device_get_softc(dev);
966 	cpu = PCPU_GET(cpuid);
967 
968 	/* Train init sequence for boot CPU */
969 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
970 	    init_func++) {
971 		err = (*init_func)(sc);
972 		if (err != 0) {
973 			device_printf(dev,
974 			    "Could not initialize GIC for CPU%u\n", cpu);
975 			return;
976 		}
977 	}
978 
979 	/* Unmask attached SGI interrupts. */
980 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
981 		isrc = GIC_INTR_ISRC(sc, irq);
982 		if (intr_isrc_init_on_cpu(isrc, cpu))
983 			gic_v3_enable_intr(dev, isrc);
984 	}
985 
986 	/* Unmask attached PPI interrupts. */
987 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
988 		isrc = GIC_INTR_ISRC(sc, irq);
989 		if (intr_isrc_init_on_cpu(isrc, cpu))
990 			gic_v3_enable_intr(dev, isrc);
991 	}
992 
993 	for (i = 0; i < sc->gic_nchildren; i++) {
994 		child = sc->gic_children[i];
995 		PIC_INIT_SECONDARY(child);
996 	}
997 }
998 
999 static void
1000 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1001     u_int ipi)
1002 {
1003 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1004 	uint64_t aff, val, irq;
1005 	int i;
1006 
1007 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1008 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
1009 	aff = GIC_AFFINITY(0);
1010 	irq = gi->gi_irq;
1011 	val = 0;
1012 
1013 	/* Iterate through all CPUs in set */
1014 	for (i = 0; i <= mp_maxid; i++) {
1015 		/* Move to the next affinity group */
1016 		if (aff != GIC_AFFINITY(i)) {
1017 			/* Send the IPI */
1018 			if (val != 0) {
1019 				gic_icc_write(SGI1R, val);
1020 				val = 0;
1021 			}
1022 			aff = GIC_AFFINITY(i);
1023 		}
1024 
1025 		/* Send the IPI to this cpu */
1026 		if (CPU_ISSET(i, &cpus)) {
1027 #define	ICC_SGI1R_AFFINITY(aff)					\
1028     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
1029      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
1030      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1031 			/* Set the affinity when the first at this level */
1032 			if (val == 0)
1033 				val = ICC_SGI1R_AFFINITY(aff) |
1034 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1035 			/* Set the bit to send the IPI to te CPU */
1036 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1037 		}
1038 	}
1039 
1040 	/* Send the IPI to the last cpu affinity group */
1041 	if (val != 0)
1042 		gic_icc_write(SGI1R, val);
1043 #undef GIC_AFF_MASK
1044 #undef GIC_AFFINITY
1045 }
1046 
1047 static int
1048 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1049 {
1050 	struct intr_irqsrc *isrc;
1051 	struct gic_v3_softc *sc = device_get_softc(dev);
1052 
1053 	if (sgi_first_unused > GIC_LAST_SGI)
1054 		return (ENOSPC);
1055 
1056 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1057 	sgi_to_ipi[sgi_first_unused++] = ipi;
1058 
1059 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1060 
1061 	*isrcp = isrc;
1062 	return (0);
1063 }
1064 #endif /* SMP */
1065 
1066 /*
1067  * Helper routines
1068  */
1069 static void
1070 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1071 {
1072 	struct resource *res;
1073 	u_int cpuid;
1074 	size_t us_left = 1000000;
1075 
1076 	cpuid = PCPU_GET(cpuid);
1077 
1078 	switch (xdist) {
1079 	case DIST:
1080 		res = sc->gic_dist;
1081 		break;
1082 	case REDIST:
1083 		res = &sc->gic_redists.pcpu[cpuid]->res;
1084 		break;
1085 	default:
1086 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1087 		return;
1088 	}
1089 
1090 	while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1091 		DELAY(1);
1092 		if (us_left-- == 0)
1093 			panic("GICD Register write pending for too long");
1094 	}
1095 }
1096 
1097 /* CPU interface. */
1098 static __inline void
1099 gic_v3_cpu_priority(uint64_t mask)
1100 {
1101 
1102 	/* Set prority mask */
1103 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1104 }
1105 
1106 static int
1107 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1108 {
1109 	uint64_t sre;
1110 	u_int cpuid;
1111 
1112 	cpuid = PCPU_GET(cpuid);
1113 	/*
1114 	 * Set the SRE bit to enable access to GIC CPU interface
1115 	 * via system registers.
1116 	 */
1117 	sre = READ_SPECIALREG(icc_sre_el1);
1118 	sre |= ICC_SRE_EL1_SRE;
1119 	WRITE_SPECIALREG(icc_sre_el1, sre);
1120 	isb();
1121 	/*
1122 	 * Now ensure that the bit is set.
1123 	 */
1124 	sre = READ_SPECIALREG(icc_sre_el1);
1125 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1126 		/* We are done. This was disabled in EL2 */
1127 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1128 		    "via system registers\n", cpuid);
1129 		return (ENXIO);
1130 	} else if (bootverbose) {
1131 		device_printf(sc->dev,
1132 		    "CPU%u enabled CPU interface via system registers\n",
1133 		    cpuid);
1134 	}
1135 
1136 	return (0);
1137 }
1138 
1139 static int
1140 gic_v3_cpu_init(struct gic_v3_softc *sc)
1141 {
1142 	int err;
1143 
1144 	/* Enable access to CPU interface via system registers */
1145 	err = gic_v3_cpu_enable_sre(sc);
1146 	if (err != 0)
1147 		return (err);
1148 	/* Priority mask to minimum - accept all interrupts */
1149 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1150 	/* Disable EOI mode */
1151 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1152 	/* Enable group 1 (insecure) interrups */
1153 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1154 
1155 	return (0);
1156 }
1157 
1158 /* Distributor */
1159 static int
1160 gic_v3_dist_init(struct gic_v3_softc *sc)
1161 {
1162 	uint64_t aff;
1163 	u_int i;
1164 
1165 	/*
1166 	 * 1. Disable the Distributor
1167 	 */
1168 	gic_d_write(sc, 4, GICD_CTLR, 0);
1169 	gic_v3_wait_for_rwp(sc, DIST);
1170 
1171 	/*
1172 	 * 2. Configure the Distributor
1173 	 */
1174 	/* Set all SPIs to be Group 1 Non-secure */
1175 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1176 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1177 
1178 	/* Set all global interrupts to be level triggered, active low. */
1179 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1180 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1181 
1182 	/* Set priority to all shared interrupts */
1183 	for (i = GIC_FIRST_SPI;
1184 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1185 		/* Set highest priority */
1186 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1187 	}
1188 
1189 	/*
1190 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1191 	 * Re-Distributor registers.
1192 	 */
1193 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1194 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1195 
1196 	gic_v3_wait_for_rwp(sc, DIST);
1197 
1198 	/*
1199 	 * 3. Enable Distributor
1200 	 */
1201 	/* Enable Distributor with ARE, Group 1 */
1202 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1203 	    GICD_CTLR_G1);
1204 
1205 	/*
1206 	 * 4. Route all interrupts to boot CPU.
1207 	 */
1208 	aff = CPU_AFFINITY(0);
1209 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1210 		gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1211 
1212 	return (0);
1213 }
1214 
1215 /* Re-Distributor */
1216 static int
1217 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1218 {
1219 	u_int cpuid;
1220 
1221 	/* Allocate struct resource for all CPU's Re-Distributor registers */
1222 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1223 		if (CPU_ISSET(cpuid, &all_cpus) != 0)
1224 			sc->gic_redists.pcpu[cpuid] =
1225 				malloc(sizeof(*sc->gic_redists.pcpu[0]),
1226 				    M_GIC_V3, M_WAITOK);
1227 		else
1228 			sc->gic_redists.pcpu[cpuid] = NULL;
1229 	return (0);
1230 }
1231 
1232 static int
1233 gic_v3_redist_find(struct gic_v3_softc *sc)
1234 {
1235 	struct resource r_res;
1236 	bus_space_handle_t r_bsh;
1237 	uint64_t aff;
1238 	uint64_t typer;
1239 	uint32_t pidr2;
1240 	u_int cpuid;
1241 	size_t i;
1242 
1243 	cpuid = PCPU_GET(cpuid);
1244 
1245 	aff = CPU_AFFINITY(cpuid);
1246 	/* Affinity in format for comparison with typer */
1247 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1248 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1249 
1250 	if (bootverbose) {
1251 		device_printf(sc->dev,
1252 		    "Start searching for Re-Distributor\n");
1253 	}
1254 	/* Iterate through Re-Distributor regions */
1255 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1256 		/* Take a copy of the region's resource */
1257 		r_res = *sc->gic_redists.regions[i];
1258 		r_bsh = rman_get_bushandle(&r_res);
1259 
1260 		pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1261 		switch (GICR_PIDR2_ARCH(pidr2)) {
1262 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1263 		case GICR_PIDR2_ARCH_GICv4:
1264 			break;
1265 		default:
1266 			device_printf(sc->dev,
1267 			    "No Re-Distributor found for CPU%u\n", cpuid);
1268 			return (ENODEV);
1269 		}
1270 
1271 		do {
1272 			typer = bus_read_8(&r_res, GICR_TYPER);
1273 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1274 				KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1275 				    ("Invalid pointer to per-CPU redistributor"));
1276 				/* Copy res contents to its final destination */
1277 				sc->gic_redists.pcpu[cpuid]->res = r_res;
1278 				sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1279 				if (bootverbose) {
1280 					device_printf(sc->dev,
1281 					    "CPU%u Re-Distributor has been found\n",
1282 					    cpuid);
1283 				}
1284 				return (0);
1285 			}
1286 
1287 			r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1288 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1289 				r_bsh +=
1290 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1291 			}
1292 
1293 			rman_set_bushandle(&r_res, r_bsh);
1294 		} while ((typer & GICR_TYPER_LAST) == 0);
1295 	}
1296 
1297 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1298 	return (ENXIO);
1299 }
1300 
1301 static int
1302 gic_v3_redist_wake(struct gic_v3_softc *sc)
1303 {
1304 	uint32_t waker;
1305 	size_t us_left = 1000000;
1306 
1307 	waker = gic_r_read(sc, 4, GICR_WAKER);
1308 	/* Wake up Re-Distributor for this CPU */
1309 	waker &= ~GICR_WAKER_PS;
1310 	gic_r_write(sc, 4, GICR_WAKER, waker);
1311 	/*
1312 	 * When clearing ProcessorSleep bit it is required to wait for
1313 	 * ChildrenAsleep to become zero following the processor power-on.
1314 	 */
1315 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1316 		DELAY(1);
1317 		if (us_left-- == 0) {
1318 			panic("Could not wake Re-Distributor for CPU%u",
1319 			    PCPU_GET(cpuid));
1320 		}
1321 	}
1322 
1323 	if (bootverbose) {
1324 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1325 		    PCPU_GET(cpuid));
1326 	}
1327 
1328 	return (0);
1329 }
1330 
1331 static int
1332 gic_v3_redist_init(struct gic_v3_softc *sc)
1333 {
1334 	int err;
1335 	size_t i;
1336 
1337 	err = gic_v3_redist_find(sc);
1338 	if (err != 0)
1339 		return (err);
1340 
1341 	err = gic_v3_redist_wake(sc);
1342 	if (err != 0)
1343 		return (err);
1344 
1345 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1346 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1347 	    0xFFFFFFFF);
1348 
1349 	/* Disable SPIs */
1350 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1351 	    GICR_I_ENABLER_PPI_MASK);
1352 	/* Enable SGIs */
1353 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1354 	    GICR_I_ENABLER_SGI_MASK);
1355 
1356 	/* Set priority for SGIs and PPIs */
1357 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1358 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1359 		    GIC_PRIORITY_MAX);
1360 	}
1361 
1362 	gic_v3_wait_for_rwp(sc, REDIST);
1363 
1364 	return (0);
1365 }
1366 
1367 /*
1368  * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1369  */
1370 
1371 static int
1372 gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count,
1373     int count, int maxcount, struct intr_irqsrc **isrc)
1374 {
1375 	struct gic_v3_softc *sc;
1376 	int i, irq, end_irq;
1377 	bool found;
1378 
1379 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1380 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1381 
1382 	sc = device_get_softc(dev);
1383 
1384 	mtx_lock(&sc->gic_mbi_mtx);
1385 
1386 	found = false;
1387 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1388 		/* Start on an aligned interrupt */
1389 		if ((irq & (maxcount - 1)) != 0)
1390 			continue;
1391 
1392 		/* Assume we found a valid range until shown otherwise */
1393 		found = true;
1394 
1395 		/* Check this range is valid */
1396 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1397 			/* No free interrupts */
1398 			if (end_irq == mbi_start + mbi_count) {
1399 				found = false;
1400 				break;
1401 			}
1402 
1403 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1404 			    ("%s: Non-MSI interrupt found", __func__));
1405 
1406 			/* This is already used */
1407 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1408 			    GI_FLAG_MSI_USED) {
1409 				found = false;
1410 				break;
1411 			}
1412 		}
1413 		if (found)
1414 			break;
1415 	}
1416 
1417 	/* Not enough interrupts were found */
1418 	if (!found || irq == mbi_start + mbi_count) {
1419 		mtx_unlock(&sc->gic_mbi_mtx);
1420 		return (ENXIO);
1421 	}
1422 
1423 	for (i = 0; i < count; i++) {
1424 		/* Mark the interrupt as used */
1425 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1426 	}
1427 	mtx_unlock(&sc->gic_mbi_mtx);
1428 
1429 	for (i = 0; i < count; i++)
1430 		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1431 
1432 	return (0);
1433 }
1434 
1435 static int
1436 gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1437 {
1438 	struct gic_v3_softc *sc;
1439 	struct gic_v3_irqsrc *gi;
1440 	int i;
1441 
1442 	sc = device_get_softc(dev);
1443 
1444 	mtx_lock(&sc->gic_mbi_mtx);
1445 	for (i = 0; i < count; i++) {
1446 		gi = (struct gic_v3_irqsrc *)isrc[i];
1447 
1448 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1449 		    ("%s: Trying to release an unused MSI-X interrupt",
1450 		    __func__));
1451 
1452 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1453 	}
1454 	mtx_unlock(&sc->gic_mbi_mtx);
1455 
1456 	return (0);
1457 }
1458 
1459 static int
1460 gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1461     struct intr_irqsrc **isrcp)
1462 {
1463 	struct gic_v3_softc *sc;
1464 	int irq;
1465 
1466 	sc = device_get_softc(dev);
1467 
1468 	mtx_lock(&sc->gic_mbi_mtx);
1469 	/* Find an unused interrupt */
1470 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1471 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1472 		    ("%s: Non-MSI interrupt found", __func__));
1473 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1474 			break;
1475 	}
1476 	/* No free interrupt was found */
1477 	if (irq == mbi_start + mbi_count) {
1478 		mtx_unlock(&sc->gic_mbi_mtx);
1479 		return (ENXIO);
1480 	}
1481 
1482 	/* Mark the interrupt as used */
1483 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1484 	mtx_unlock(&sc->gic_mbi_mtx);
1485 
1486 	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1487 
1488 	return (0);
1489 }
1490 
1491 static int
1492 gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1493 {
1494 	struct gic_v3_softc *sc;
1495 	struct gic_v3_irqsrc *gi;
1496 
1497 	sc = device_get_softc(dev);
1498 	gi = (struct gic_v3_irqsrc *)isrc;
1499 
1500 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1501 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1502 
1503 	mtx_lock(&sc->gic_mbi_mtx);
1504 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1505 	mtx_unlock(&sc->gic_mbi_mtx);
1506 
1507 	return (0);
1508 }
1509 
1510 static int
1511 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1512     device_t *pic, struct intr_irqsrc **isrc)
1513 {
1514 	struct gic_v3_softc *sc;
1515 	int error;
1516 
1517 	sc = device_get_softc(dev);
1518 	error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start,
1519 	    sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc);
1520 	if (error != 0)
1521 		return (error);
1522 
1523 	*pic = dev;
1524 	return (0);
1525 }
1526 
1527 static int
1528 gic_v3_release_msi(device_t dev, device_t child, int count,
1529     struct intr_irqsrc **isrc)
1530 {
1531 	return (gic_v3_gic_release_msi(dev, count, isrc));
1532 }
1533 
1534 static int
1535 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1536     struct intr_irqsrc **isrc)
1537 {
1538 	struct gic_v3_softc *sc;
1539 	int error;
1540 
1541 	sc = device_get_softc(dev);
1542 	error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start,
1543 	    sc->gic_mbi_end - sc->gic_mbi_start, isrc);
1544 	if (error != 0)
1545 		return (error);
1546 
1547 	*pic = dev;
1548 
1549 	return (0);
1550 }
1551 
1552 static int
1553 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1554 {
1555 	return (gic_v3_gic_release_msix(dev, isrc));
1556 }
1557 
1558 static int
1559 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1560     uint64_t *addr, uint32_t *data)
1561 {
1562 	struct gic_v3_softc *sc = device_get_softc(dev);
1563 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1564 
1565 	*addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1566 	*data = gi->gi_irq;
1567 
1568 	return (0);
1569 }
1570