xref: /freebsd/sys/arm64/arm64/gic_v3.c (revision 53b70c86)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * the sponsorship of the FreeBSD Foundation.
6  *
7  * This software was developed by Semihalf under
8  * the sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53 #include <sys/interrupt.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61 
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66 
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71 
72 #include "pic_if.h"
73 #include "msi_if.h"
74 
75 #include <arm/arm/gic_common.h>
76 #include "gic_v3_reg.h"
77 #include "gic_v3_var.h"
78 
79 static bus_get_domain_t gic_v3_get_domain;
80 static bus_read_ivar_t gic_v3_read_ivar;
81 static bus_write_ivar_t gic_v3_write_ivar;
82 
83 static pic_disable_intr_t gic_v3_disable_intr;
84 static pic_enable_intr_t gic_v3_enable_intr;
85 static pic_map_intr_t gic_v3_map_intr;
86 static pic_setup_intr_t gic_v3_setup_intr;
87 static pic_teardown_intr_t gic_v3_teardown_intr;
88 static pic_post_filter_t gic_v3_post_filter;
89 static pic_post_ithread_t gic_v3_post_ithread;
90 static pic_pre_ithread_t gic_v3_pre_ithread;
91 static pic_bind_intr_t gic_v3_bind_intr;
92 #ifdef SMP
93 static pic_init_secondary_t gic_v3_init_secondary;
94 static pic_ipi_send_t gic_v3_ipi_send;
95 static pic_ipi_setup_t gic_v3_ipi_setup;
96 #endif
97 
98 static msi_alloc_msi_t gic_v3_alloc_msi;
99 static msi_release_msi_t gic_v3_release_msi;
100 static msi_alloc_msix_t gic_v3_alloc_msix;
101 static msi_release_msix_t gic_v3_release_msix;
102 static msi_map_msi_t gic_v3_map_msi;
103 
104 static u_int gic_irq_cpu;
105 #ifdef SMP
106 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
107 static u_int sgi_first_unused = GIC_FIRST_SGI;
108 #endif
109 
110 static device_method_t gic_v3_methods[] = {
111 	/* Device interface */
112 	DEVMETHOD(device_detach,	gic_v3_detach),
113 
114 	/* Bus interface */
115 	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
116 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
117 	DEVMETHOD(bus_write_ivar,	gic_v3_write_ivar),
118 
119 	/* Interrupt controller interface */
120 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
121 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
122 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
123 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
124 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
125 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
126 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
127 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
128 #ifdef SMP
129 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
130 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
131 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
132 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
133 #endif
134 
135 	/* MSI/MSI-X */
136 	DEVMETHOD(msi_alloc_msi,        gic_v3_alloc_msi),
137 	DEVMETHOD(msi_release_msi,      gic_v3_release_msi),
138 	DEVMETHOD(msi_alloc_msix,       gic_v3_alloc_msix),
139 	DEVMETHOD(msi_release_msix,     gic_v3_release_msix),
140 	DEVMETHOD(msi_map_msi,          gic_v3_map_msi),
141 
142 	/* End */
143 	DEVMETHOD_END
144 };
145 
146 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
147     sizeof(struct gic_v3_softc));
148 
149 /*
150  * Driver-specific definitions.
151  */
152 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
153 
154 /*
155  * Helper functions and definitions.
156  */
157 /* Destination registers, either Distributor or Re-Distributor */
158 enum gic_v3_xdist {
159 	DIST = 0,
160 	REDIST,
161 };
162 
163 struct gic_v3_irqsrc {
164 	struct intr_irqsrc	gi_isrc;
165 	uint32_t		gi_irq;
166 	enum intr_polarity	gi_pol;
167 	enum intr_trigger	gi_trig;
168 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
169 					 /* be used for MSI/MSI-X interrupts */
170 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
171 					 /* for a MSI/MSI-X interrupt */
172 	u_int			gi_flags;
173 };
174 
175 /* Helper routines starting with gic_v3_ */
176 static int gic_v3_dist_init(struct gic_v3_softc *);
177 static int gic_v3_redist_alloc(struct gic_v3_softc *);
178 static int gic_v3_redist_find(struct gic_v3_softc *);
179 static int gic_v3_redist_init(struct gic_v3_softc *);
180 static int gic_v3_cpu_init(struct gic_v3_softc *);
181 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
182 
183 /* A sequence of init functions for primary (boot) CPU */
184 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
185 /* Primary CPU initialization sequence */
186 static gic_v3_initseq_t gic_v3_primary_init[] = {
187 	gic_v3_dist_init,
188 	gic_v3_redist_alloc,
189 	gic_v3_redist_init,
190 	gic_v3_cpu_init,
191 	NULL
192 };
193 
194 #ifdef SMP
195 /* Secondary CPU initialization sequence */
196 static gic_v3_initseq_t gic_v3_secondary_init[] = {
197 	gic_v3_redist_init,
198 	gic_v3_cpu_init,
199 	NULL
200 };
201 #endif
202 
203 uint32_t
204 gic_r_read_4(device_t dev, bus_size_t offset)
205 {
206 	struct gic_v3_softc *sc;
207 	struct resource *rdist;
208 
209 	sc = device_get_softc(dev);
210 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
211 	return (bus_read_4(rdist, offset));
212 }
213 
214 uint64_t
215 gic_r_read_8(device_t dev, bus_size_t offset)
216 {
217 	struct gic_v3_softc *sc;
218 	struct resource *rdist;
219 
220 	sc = device_get_softc(dev);
221 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
222 	return (bus_read_8(rdist, offset));
223 }
224 
225 void
226 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
227 {
228 	struct gic_v3_softc *sc;
229 	struct resource *rdist;
230 
231 	sc = device_get_softc(dev);
232 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
233 	bus_write_4(rdist, offset, val);
234 }
235 
236 void
237 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
238 {
239 	struct gic_v3_softc *sc;
240 	struct resource *rdist;
241 
242 	sc = device_get_softc(dev);
243 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
244 	bus_write_8(rdist, offset, val);
245 }
246 
247 static void
248 gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
249 {
250 	struct gic_v3_softc *sc;
251 	int i;
252 
253 	sc = device_get_softc(dev);
254 
255 	KASSERT((start + count) < sc->gic_nirqs,
256 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
257 	    start, count, sc->gic_nirqs));
258 	for (i = 0; i < count; i++) {
259 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
260 		    ("%s: MSI interrupt %d already has a handler", __func__,
261 		    count + i));
262 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
263 		    ("%s: MSI interrupt %d already has a polarity", __func__,
264 		    count + i));
265 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
266 		    ("%s: MSI interrupt %d already has a trigger", __func__,
267 		    count + i));
268 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
269 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
270 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
271 	}
272 }
273 
274 /*
275  * Device interface.
276  */
277 int
278 gic_v3_attach(device_t dev)
279 {
280 	struct gic_v3_softc *sc;
281 	gic_v3_initseq_t *init_func;
282 	uint32_t typer;
283 	int rid;
284 	int err;
285 	size_t i;
286 	u_int irq;
287 	const char *name;
288 
289 	sc = device_get_softc(dev);
290 	sc->gic_registered = FALSE;
291 	sc->dev = dev;
292 	err = 0;
293 
294 	/* Initialize mutex */
295 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
296 
297 	/*
298 	 * Allocate array of struct resource.
299 	 * One entry for Distributor and all remaining for Re-Distributor.
300 	 */
301 	sc->gic_res = malloc(
302 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
303 	    M_GIC_V3, M_WAITOK);
304 
305 	/* Now allocate corresponding resources */
306 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
307 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
308 		    &rid, RF_ACTIVE);
309 		if (sc->gic_res[rid] == NULL)
310 			return (ENXIO);
311 	}
312 
313 	/*
314 	 * Distributor interface
315 	 */
316 	sc->gic_dist = sc->gic_res[0];
317 
318 	/*
319 	 * Re-Dristributor interface
320 	 */
321 	/* Allocate space under region descriptions */
322 	sc->gic_redists.regions = malloc(
323 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
324 	    M_GIC_V3, M_WAITOK);
325 
326 	/* Fill-up bus_space information for each region. */
327 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
328 		sc->gic_redists.regions[i] = sc->gic_res[rid];
329 
330 	/* Get the number of supported SPI interrupts */
331 	typer = gic_d_read(sc, 4, GICD_TYPER);
332 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
333 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
334 		sc->gic_nirqs = GIC_I_NUM_MAX;
335 
336 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
337 	    M_GIC_V3, M_WAITOK | M_ZERO);
338 	name = device_get_nameunit(dev);
339 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
340 		struct intr_irqsrc *isrc;
341 
342 		sc->gic_irqs[irq].gi_irq = irq;
343 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
344 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
345 
346 		isrc = &sc->gic_irqs[irq].gi_isrc;
347 		if (irq <= GIC_LAST_SGI) {
348 			err = intr_isrc_register(isrc, sc->dev,
349 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
350 		} else if (irq <= GIC_LAST_PPI) {
351 			err = intr_isrc_register(isrc, sc->dev,
352 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
353 		} else {
354 			err = intr_isrc_register(isrc, sc->dev, 0,
355 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
356 		}
357 		if (err != 0) {
358 			/* XXX call intr_isrc_deregister() */
359 			free(sc->gic_irqs, M_DEVBUF);
360 			return (err);
361 		}
362 	}
363 
364 	mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
365 	if (sc->gic_mbi_start > 0) {
366 		gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
367 		    sc->gic_mbi_end - sc->gic_mbi_start);
368 
369 		if (bootverbose) {
370 			device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
371 					sc->gic_mbi_end);
372 		}
373 	}
374 
375 	/*
376 	 * Read the Peripheral ID2 register. This is an implementation
377 	 * defined register, but seems to be implemented in all GICv3
378 	 * parts and Linux expects it to be there.
379 	 */
380 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
381 
382 	/* Get the number of supported interrupt identifier bits */
383 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
384 
385 	if (bootverbose) {
386 		device_printf(dev, "SPIs: %u, IDs: %u\n",
387 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
388 	}
389 
390 	/* Train init sequence for boot CPU */
391 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
392 		err = (*init_func)(sc);
393 		if (err != 0)
394 			return (err);
395 	}
396 
397 	return (0);
398 }
399 
400 int
401 gic_v3_detach(device_t dev)
402 {
403 	struct gic_v3_softc *sc;
404 	size_t i;
405 	int rid;
406 
407 	sc = device_get_softc(dev);
408 
409 	if (device_is_attached(dev)) {
410 		/*
411 		 * XXX: We should probably deregister PIC
412 		 */
413 		if (sc->gic_registered)
414 			panic("Trying to detach registered PIC");
415 	}
416 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
417 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
418 
419 	for (i = 0; i <= mp_maxid; i++)
420 		free(sc->gic_redists.pcpu[i], M_GIC_V3);
421 
422 	free(sc->gic_res, M_GIC_V3);
423 	free(sc->gic_redists.regions, M_GIC_V3);
424 
425 	return (0);
426 }
427 
428 static int
429 gic_v3_get_domain(device_t dev, device_t child, int *domain)
430 {
431 	struct gic_v3_devinfo *di;
432 
433 	di = device_get_ivars(child);
434 	if (di->gic_domain < 0)
435 		return (ENOENT);
436 
437 	*domain = di->gic_domain;
438 	return (0);
439 }
440 
441 static int
442 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
443 {
444 	struct gic_v3_softc *sc;
445 
446 	sc = device_get_softc(dev);
447 
448 	switch (which) {
449 	case GICV3_IVAR_NIRQS:
450 		*result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
451 		return (0);
452 	case GICV3_IVAR_REDIST:
453 		*result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
454 		return (0);
455 	case GIC_IVAR_HW_REV:
456 		KASSERT(
457 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
458 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
459 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
460 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
461 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
462 		return (0);
463 	case GIC_IVAR_BUS:
464 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
465 		    ("gic_v3_read_ivar: Unknown bus type"));
466 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
467 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
468 		*result = sc->gic_bus;
469 		return (0);
470 	case GIC_IVAR_MBI_START:
471 		*result = sc->gic_mbi_start;
472 		return (0);
473 	case GIC_IVAR_MBI_COUNT:
474 		*result = sc->gic_mbi_end - sc->gic_mbi_start;
475 		return (0);
476 	}
477 
478 	return (ENOENT);
479 }
480 
481 static int
482 gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
483 {
484 	struct gic_v3_softc *sc;
485 
486 	sc = device_get_softc(dev);
487 
488 	switch(which) {
489 	case GICV3_IVAR_NIRQS:
490 	case GICV3_IVAR_REDIST:
491 	case GIC_IVAR_HW_REV:
492 	case GIC_IVAR_BUS:
493 		return (EINVAL);
494 	case GIC_IVAR_MBI_START:
495 		/*
496 		 * GIC_IVAR_MBI_START must be set once and first. This allows
497 		 * us to reserve the registers when GIC_IVAR_MBI_COUNT is set.
498 		 */
499 		MPASS(sc->gic_mbi_start == 0);
500 		MPASS(sc->gic_mbi_end == 0);
501 		MPASS(value >= GIC_FIRST_SPI);
502 		MPASS(value < sc->gic_nirqs);
503 
504 		sc->gic_mbi_start = value;
505 		return (0);
506 	case GIC_IVAR_MBI_COUNT:
507 		MPASS(sc->gic_mbi_start != 0);
508 		MPASS(sc->gic_mbi_end == 0);
509 
510 		sc->gic_mbi_end = value - sc->gic_mbi_start;
511 
512 		MPASS(sc->gic_mbi_end <= sc->gic_nirqs);
513 
514 		/* Reserve these interrupts for MSI/MSI-X use */
515 		gic_v3_reserve_msi_range(dev, sc->gic_mbi_start, value);
516 
517 		return (0);
518 	}
519 
520 	return (ENOENT);
521 }
522 
523 int
524 arm_gic_v3_intr(void *arg)
525 {
526 	struct gic_v3_softc *sc = arg;
527 	struct gic_v3_irqsrc *gi;
528 	struct intr_pic *pic;
529 	uint64_t active_irq;
530 	struct trapframe *tf;
531 
532 	pic = sc->gic_pic;
533 
534 	while (1) {
535 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
536 			/*
537 			 * Hardware:		Cavium ThunderX
538 			 * Chip revision:	Pass 1.0 (early version)
539 			 *			Pass 1.1 (production)
540 			 * ERRATUM:		22978, 23154
541 			 */
542 			__asm __volatile(
543 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
544 			    "mrs %0, ICC_IAR1_EL1		\n"
545 			    "nop;nop;nop;nop;			\n"
546 			    "dsb sy				\n"
547 			    : "=&r" (active_irq));
548 		} else {
549 			active_irq = gic_icc_read(IAR1);
550 		}
551 
552 		if (active_irq >= GIC_FIRST_LPI) {
553 			intr_child_irq_handler(pic, active_irq);
554 			continue;
555 		}
556 
557 		if (__predict_false(active_irq >= sc->gic_nirqs))
558 			return (FILTER_HANDLED);
559 
560 		tf = curthread->td_intr_frame;
561 		gi = &sc->gic_irqs[active_irq];
562 		if (active_irq <= GIC_LAST_SGI) {
563 			/* Call EOI for all IPI before dispatch. */
564 			gic_icc_write(EOIR1, (uint64_t)active_irq);
565 #ifdef SMP
566 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
567 #else
568 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
569 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
570 #endif
571 		} else if (active_irq >= GIC_FIRST_PPI &&
572 		    active_irq <= GIC_LAST_SPI) {
573 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
574 				gic_icc_write(EOIR1, gi->gi_irq);
575 
576 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
577 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
578 					gic_icc_write(EOIR1, gi->gi_irq);
579 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
580 				device_printf(sc->dev,
581 				    "Stray irq %lu disabled\n", active_irq);
582 			}
583 		}
584 	}
585 }
586 
587 #ifdef FDT
588 static int
589 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
590     enum intr_polarity *polp, enum intr_trigger *trigp)
591 {
592 	u_int irq;
593 
594 	if (ncells < 3)
595 		return (EINVAL);
596 
597 	/*
598 	 * The 1st cell is the interrupt type:
599 	 *	0 = SPI
600 	 *	1 = PPI
601 	 * The 2nd cell contains the interrupt number:
602 	 *	[0 - 987] for SPI
603 	 *	[0 -  15] for PPI
604 	 * The 3rd cell is the flags, encoded as follows:
605 	 *   bits[3:0] trigger type and level flags
606 	 *	1 = edge triggered
607 	 *      2 = edge triggered (PPI only)
608 	 *	4 = level-sensitive
609 	 *	8 = level-sensitive (PPI only)
610 	 */
611 	switch (cells[0]) {
612 	case 0:
613 		irq = GIC_FIRST_SPI + cells[1];
614 		/* SPI irq is checked later. */
615 		break;
616 	case 1:
617 		irq = GIC_FIRST_PPI + cells[1];
618 		if (irq > GIC_LAST_PPI) {
619 			device_printf(dev, "unsupported PPI interrupt "
620 			    "number %u\n", cells[1]);
621 			return (EINVAL);
622 		}
623 		break;
624 	default:
625 		device_printf(dev, "unsupported interrupt type "
626 		    "configuration %u\n", cells[0]);
627 		return (EINVAL);
628 	}
629 
630 	switch (cells[2] & FDT_INTR_MASK) {
631 	case FDT_INTR_EDGE_RISING:
632 		*trigp = INTR_TRIGGER_EDGE;
633 		*polp = INTR_POLARITY_HIGH;
634 		break;
635 	case FDT_INTR_EDGE_FALLING:
636 		*trigp = INTR_TRIGGER_EDGE;
637 		*polp = INTR_POLARITY_LOW;
638 		break;
639 	case FDT_INTR_LEVEL_HIGH:
640 		*trigp = INTR_TRIGGER_LEVEL;
641 		*polp = INTR_POLARITY_HIGH;
642 		break;
643 	case FDT_INTR_LEVEL_LOW:
644 		*trigp = INTR_TRIGGER_LEVEL;
645 		*polp = INTR_POLARITY_LOW;
646 		break;
647 	default:
648 		device_printf(dev, "unsupported trigger/polarity "
649 		    "configuration 0x%02x\n", cells[2]);
650 		return (EINVAL);
651 	}
652 
653 	/* Check the interrupt is valid */
654 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
655 		return (EINVAL);
656 
657 	*irqp = irq;
658 	return (0);
659 }
660 #endif
661 
662 static int
663 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
664     enum intr_polarity *polp, enum intr_trigger *trigp)
665 {
666 	struct gic_v3_irqsrc *gi;
667 
668 	/* SPI-mapped MSI */
669 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
670 	if (gi == NULL)
671 		return (ENXIO);
672 
673 	*irqp = gi->gi_irq;
674 
675 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
676 	*polp = INTR_POLARITY_HIGH;
677 	*trigp = INTR_TRIGGER_EDGE;
678 
679 	return (0);
680 }
681 
682 static int
683 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
684     enum intr_polarity *polp, enum intr_trigger *trigp)
685 {
686 	struct gic_v3_softc *sc;
687 	enum intr_polarity pol;
688 	enum intr_trigger trig;
689 	struct intr_map_data_msi *dam;
690 #ifdef FDT
691 	struct intr_map_data_fdt *daf;
692 #endif
693 #ifdef DEV_ACPI
694 	struct intr_map_data_acpi *daa;
695 #endif
696 	u_int irq;
697 
698 	sc = device_get_softc(dev);
699 
700 	switch (data->type) {
701 #ifdef FDT
702 	case INTR_MAP_DATA_FDT:
703 		daf = (struct intr_map_data_fdt *)data;
704 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
705 		    &trig) != 0)
706 			return (EINVAL);
707 		break;
708 #endif
709 #ifdef DEV_ACPI
710 	case INTR_MAP_DATA_ACPI:
711 		daa = (struct intr_map_data_acpi *)data;
712 		irq = daa->irq;
713 		pol = daa->pol;
714 		trig = daa->trig;
715 		break;
716 #endif
717 	case INTR_MAP_DATA_MSI:
718 		/* SPI-mapped MSI */
719 		dam = (struct intr_map_data_msi *)data;
720 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
721 			return (EINVAL);
722 		break;
723 	default:
724 		return (EINVAL);
725 	}
726 
727 	if (irq >= sc->gic_nirqs)
728 		return (EINVAL);
729 	switch (pol) {
730 	case INTR_POLARITY_CONFORM:
731 	case INTR_POLARITY_LOW:
732 	case INTR_POLARITY_HIGH:
733 		break;
734 	default:
735 		return (EINVAL);
736 	}
737 	switch (trig) {
738 	case INTR_TRIGGER_CONFORM:
739 	case INTR_TRIGGER_EDGE:
740 	case INTR_TRIGGER_LEVEL:
741 		break;
742 	default:
743 		return (EINVAL);
744 	}
745 
746 	*irqp = irq;
747 	if (polp != NULL)
748 		*polp = pol;
749 	if (trigp != NULL)
750 		*trigp = trig;
751 	return (0);
752 }
753 
754 static int
755 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
756     struct intr_irqsrc **isrcp)
757 {
758 	struct gic_v3_softc *sc;
759 	int error;
760 	u_int irq;
761 
762 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
763 	if (error == 0) {
764 		sc = device_get_softc(dev);
765 		*isrcp = GIC_INTR_ISRC(sc, irq);
766 	}
767 	return (error);
768 }
769 
770 static int
771 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
772     struct resource *res, struct intr_map_data *data)
773 {
774 	struct gic_v3_softc *sc = device_get_softc(dev);
775 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
776 	enum intr_trigger trig;
777 	enum intr_polarity pol;
778 	uint32_t reg;
779 	u_int irq;
780 	int error;
781 
782 	if (data == NULL)
783 		return (ENOTSUP);
784 
785 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
786 	if (error != 0)
787 		return (error);
788 
789 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
790 	    trig == INTR_TRIGGER_CONFORM)
791 		return (EINVAL);
792 
793 	/* Compare config if this is not first setup. */
794 	if (isrc->isrc_handlers != 0) {
795 		if (pol != gi->gi_pol || trig != gi->gi_trig)
796 			return (EINVAL);
797 		else
798 			return (0);
799 	}
800 
801 	/* For MSI/MSI-X we should have already configured these */
802 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
803 		gi->gi_pol = pol;
804 		gi->gi_trig = trig;
805 	}
806 
807 	/*
808 	 * XXX - In case that per CPU interrupt is going to be enabled in time
809 	 *       when SMP is already started, we need some IPI call which
810 	 *       enables it on others CPUs. Further, it's more complicated as
811 	 *       pic_enable_source() and pic_disable_source() should act on
812 	 *       per CPU basis only. Thus, it should be solved here somehow.
813 	 */
814 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
815 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
816 
817 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
818 		mtx_lock_spin(&sc->gic_mtx);
819 
820 		/* Set the trigger and polarity */
821 		if (irq <= GIC_LAST_PPI)
822 			reg = gic_r_read(sc, 4,
823 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
824 		else
825 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
826 		if (trig == INTR_TRIGGER_LEVEL)
827 			reg &= ~(2 << ((irq % 16) * 2));
828 		else
829 			reg |= 2 << ((irq % 16) * 2);
830 
831 		if (irq <= GIC_LAST_PPI) {
832 			gic_r_write(sc, 4,
833 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
834 			gic_v3_wait_for_rwp(sc, REDIST);
835 		} else {
836 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
837 			gic_v3_wait_for_rwp(sc, DIST);
838 		}
839 
840 		mtx_unlock_spin(&sc->gic_mtx);
841 
842 		gic_v3_bind_intr(dev, isrc);
843 	}
844 
845 	return (0);
846 }
847 
848 static int
849 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
850     struct resource *res, struct intr_map_data *data)
851 {
852 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
853 
854 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
855 		gi->gi_pol = INTR_POLARITY_CONFORM;
856 		gi->gi_trig = INTR_TRIGGER_CONFORM;
857 	}
858 
859 	return (0);
860 }
861 
862 static void
863 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
864 {
865 	struct gic_v3_softc *sc;
866 	struct gic_v3_irqsrc *gi;
867 	u_int irq;
868 
869 	sc = device_get_softc(dev);
870 	gi = (struct gic_v3_irqsrc *)isrc;
871 	irq = gi->gi_irq;
872 
873 	if (irq <= GIC_LAST_PPI) {
874 		/* SGIs and PPIs in corresponding Re-Distributor */
875 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
876 		    GICD_I_MASK(irq));
877 		gic_v3_wait_for_rwp(sc, REDIST);
878 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
879 		/* SPIs in distributor */
880 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
881 		gic_v3_wait_for_rwp(sc, DIST);
882 	} else
883 		panic("%s: Unsupported IRQ %u", __func__, irq);
884 }
885 
886 static void
887 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
888 {
889 	struct gic_v3_softc *sc;
890 	struct gic_v3_irqsrc *gi;
891 	u_int irq;
892 
893 	sc = device_get_softc(dev);
894 	gi = (struct gic_v3_irqsrc *)isrc;
895 	irq = gi->gi_irq;
896 
897 	if (irq <= GIC_LAST_PPI) {
898 		/* SGIs and PPIs in corresponding Re-Distributor */
899 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
900 		    GICD_I_MASK(irq));
901 		gic_v3_wait_for_rwp(sc, REDIST);
902 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
903 		/* SPIs in distributor */
904 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
905 		gic_v3_wait_for_rwp(sc, DIST);
906 	} else
907 		panic("%s: Unsupported IRQ %u", __func__, irq);
908 }
909 
910 static void
911 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
912 {
913 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
914 
915 	gic_v3_disable_intr(dev, isrc);
916 	gic_icc_write(EOIR1, gi->gi_irq);
917 }
918 
919 static void
920 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
921 {
922 
923 	gic_v3_enable_intr(dev, isrc);
924 }
925 
926 static void
927 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
928 {
929 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
930 
931 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
932 		return;
933 
934 	gic_icc_write(EOIR1, gi->gi_irq);
935 }
936 
937 static int
938 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
939 {
940 	struct gic_v3_softc *sc;
941 	struct gic_v3_irqsrc *gi;
942 	int cpu;
943 
944 	gi = (struct gic_v3_irqsrc *)isrc;
945 	if (gi->gi_irq <= GIC_LAST_PPI)
946 		return (EINVAL);
947 
948 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
949 	    ("%s: Attempting to bind an invalid IRQ", __func__));
950 
951 	sc = device_get_softc(dev);
952 
953 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
954 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
955 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
956 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
957 		    CPU_AFFINITY(gic_irq_cpu));
958 	} else {
959 		/*
960 		 * We can only bind to a single CPU so select
961 		 * the first CPU found.
962 		 */
963 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
964 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
965 	}
966 
967 	return (0);
968 }
969 
970 #ifdef SMP
971 static void
972 gic_v3_init_secondary(device_t dev)
973 {
974 	device_t child;
975 	struct gic_v3_softc *sc;
976 	gic_v3_initseq_t *init_func;
977 	struct intr_irqsrc *isrc;
978 	u_int cpu, irq;
979 	int err, i;
980 
981 	sc = device_get_softc(dev);
982 	cpu = PCPU_GET(cpuid);
983 
984 	/* Train init sequence for boot CPU */
985 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
986 	    init_func++) {
987 		err = (*init_func)(sc);
988 		if (err != 0) {
989 			device_printf(dev,
990 			    "Could not initialize GIC for CPU%u\n", cpu);
991 			return;
992 		}
993 	}
994 
995 	/* Unmask attached SGI interrupts. */
996 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
997 		isrc = GIC_INTR_ISRC(sc, irq);
998 		if (intr_isrc_init_on_cpu(isrc, cpu))
999 			gic_v3_enable_intr(dev, isrc);
1000 	}
1001 
1002 	/* Unmask attached PPI interrupts. */
1003 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
1004 		isrc = GIC_INTR_ISRC(sc, irq);
1005 		if (intr_isrc_init_on_cpu(isrc, cpu))
1006 			gic_v3_enable_intr(dev, isrc);
1007 	}
1008 
1009 	for (i = 0; i < sc->gic_nchildren; i++) {
1010 		child = sc->gic_children[i];
1011 		PIC_INIT_SECONDARY(child);
1012 	}
1013 }
1014 
1015 static void
1016 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1017     u_int ipi)
1018 {
1019 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1020 	uint64_t aff, val, irq;
1021 	int i;
1022 
1023 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1024 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
1025 	aff = GIC_AFFINITY(0);
1026 	irq = gi->gi_irq;
1027 	val = 0;
1028 
1029 	/* Iterate through all CPUs in set */
1030 	for (i = 0; i <= mp_maxid; i++) {
1031 		/* Move to the next affinity group */
1032 		if (aff != GIC_AFFINITY(i)) {
1033 			/* Send the IPI */
1034 			if (val != 0) {
1035 				gic_icc_write(SGI1R, val);
1036 				val = 0;
1037 			}
1038 			aff = GIC_AFFINITY(i);
1039 		}
1040 
1041 		/* Send the IPI to this cpu */
1042 		if (CPU_ISSET(i, &cpus)) {
1043 #define	ICC_SGI1R_AFFINITY(aff)					\
1044     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
1045      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
1046      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1047 			/* Set the affinity when the first at this level */
1048 			if (val == 0)
1049 				val = ICC_SGI1R_AFFINITY(aff) |
1050 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1051 			/* Set the bit to send the IPI to te CPU */
1052 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1053 		}
1054 	}
1055 
1056 	/* Send the IPI to the last cpu affinity group */
1057 	if (val != 0)
1058 		gic_icc_write(SGI1R, val);
1059 #undef GIC_AFF_MASK
1060 #undef GIC_AFFINITY
1061 }
1062 
1063 static int
1064 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1065 {
1066 	struct intr_irqsrc *isrc;
1067 	struct gic_v3_softc *sc = device_get_softc(dev);
1068 
1069 	if (sgi_first_unused > GIC_LAST_SGI)
1070 		return (ENOSPC);
1071 
1072 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1073 	sgi_to_ipi[sgi_first_unused++] = ipi;
1074 
1075 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1076 
1077 	*isrcp = isrc;
1078 	return (0);
1079 }
1080 #endif /* SMP */
1081 
1082 /*
1083  * Helper routines
1084  */
1085 static void
1086 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1087 {
1088 	struct resource *res;
1089 	u_int cpuid;
1090 	size_t us_left = 1000000;
1091 
1092 	cpuid = PCPU_GET(cpuid);
1093 
1094 	switch (xdist) {
1095 	case DIST:
1096 		res = sc->gic_dist;
1097 		break;
1098 	case REDIST:
1099 		res = &sc->gic_redists.pcpu[cpuid]->res;
1100 		break;
1101 	default:
1102 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1103 		return;
1104 	}
1105 
1106 	while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1107 		DELAY(1);
1108 		if (us_left-- == 0)
1109 			panic("GICD Register write pending for too long");
1110 	}
1111 }
1112 
1113 /* CPU interface. */
1114 static __inline void
1115 gic_v3_cpu_priority(uint64_t mask)
1116 {
1117 
1118 	/* Set prority mask */
1119 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1120 }
1121 
1122 static int
1123 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1124 {
1125 	uint64_t sre;
1126 	u_int cpuid;
1127 
1128 	cpuid = PCPU_GET(cpuid);
1129 	/*
1130 	 * Set the SRE bit to enable access to GIC CPU interface
1131 	 * via system registers.
1132 	 */
1133 	sre = READ_SPECIALREG(icc_sre_el1);
1134 	sre |= ICC_SRE_EL1_SRE;
1135 	WRITE_SPECIALREG(icc_sre_el1, sre);
1136 	isb();
1137 	/*
1138 	 * Now ensure that the bit is set.
1139 	 */
1140 	sre = READ_SPECIALREG(icc_sre_el1);
1141 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1142 		/* We are done. This was disabled in EL2 */
1143 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1144 		    "via system registers\n", cpuid);
1145 		return (ENXIO);
1146 	} else if (bootverbose) {
1147 		device_printf(sc->dev,
1148 		    "CPU%u enabled CPU interface via system registers\n",
1149 		    cpuid);
1150 	}
1151 
1152 	return (0);
1153 }
1154 
1155 static int
1156 gic_v3_cpu_init(struct gic_v3_softc *sc)
1157 {
1158 	int err;
1159 
1160 	/* Enable access to CPU interface via system registers */
1161 	err = gic_v3_cpu_enable_sre(sc);
1162 	if (err != 0)
1163 		return (err);
1164 	/* Priority mask to minimum - accept all interrupts */
1165 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1166 	/* Disable EOI mode */
1167 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1168 	/* Enable group 1 (insecure) interrups */
1169 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1170 
1171 	return (0);
1172 }
1173 
1174 /* Distributor */
1175 static int
1176 gic_v3_dist_init(struct gic_v3_softc *sc)
1177 {
1178 	uint64_t aff;
1179 	u_int i;
1180 
1181 	/*
1182 	 * 1. Disable the Distributor
1183 	 */
1184 	gic_d_write(sc, 4, GICD_CTLR, 0);
1185 	gic_v3_wait_for_rwp(sc, DIST);
1186 
1187 	/*
1188 	 * 2. Configure the Distributor
1189 	 */
1190 	/* Set all SPIs to be Group 1 Non-secure */
1191 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1192 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1193 
1194 	/* Set all global interrupts to be level triggered, active low. */
1195 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1196 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1197 
1198 	/* Set priority to all shared interrupts */
1199 	for (i = GIC_FIRST_SPI;
1200 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1201 		/* Set highest priority */
1202 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1203 	}
1204 
1205 	/*
1206 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1207 	 * Re-Distributor registers.
1208 	 */
1209 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1210 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1211 
1212 	gic_v3_wait_for_rwp(sc, DIST);
1213 
1214 	/*
1215 	 * 3. Enable Distributor
1216 	 */
1217 	/* Enable Distributor with ARE, Group 1 */
1218 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1219 	    GICD_CTLR_G1);
1220 
1221 	/*
1222 	 * 4. Route all interrupts to boot CPU.
1223 	 */
1224 	aff = CPU_AFFINITY(0);
1225 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1226 		gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1227 
1228 	return (0);
1229 }
1230 
1231 /* Re-Distributor */
1232 static int
1233 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1234 {
1235 	u_int cpuid;
1236 
1237 	/* Allocate struct resource for all CPU's Re-Distributor registers */
1238 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1239 		if (CPU_ISSET(cpuid, &all_cpus) != 0)
1240 			sc->gic_redists.pcpu[cpuid] =
1241 				malloc(sizeof(*sc->gic_redists.pcpu[0]),
1242 				    M_GIC_V3, M_WAITOK);
1243 		else
1244 			sc->gic_redists.pcpu[cpuid] = NULL;
1245 	return (0);
1246 }
1247 
1248 static int
1249 gic_v3_redist_find(struct gic_v3_softc *sc)
1250 {
1251 	struct resource r_res;
1252 	bus_space_handle_t r_bsh;
1253 	uint64_t aff;
1254 	uint64_t typer;
1255 	uint32_t pidr2;
1256 	u_int cpuid;
1257 	size_t i;
1258 
1259 	cpuid = PCPU_GET(cpuid);
1260 
1261 	aff = CPU_AFFINITY(cpuid);
1262 	/* Affinity in format for comparison with typer */
1263 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1264 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1265 
1266 	if (bootverbose) {
1267 		device_printf(sc->dev,
1268 		    "Start searching for Re-Distributor\n");
1269 	}
1270 	/* Iterate through Re-Distributor regions */
1271 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1272 		/* Take a copy of the region's resource */
1273 		r_res = *sc->gic_redists.regions[i];
1274 		r_bsh = rman_get_bushandle(&r_res);
1275 
1276 		pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1277 		switch (GICR_PIDR2_ARCH(pidr2)) {
1278 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1279 		case GICR_PIDR2_ARCH_GICv4:
1280 			break;
1281 		default:
1282 			device_printf(sc->dev,
1283 			    "No Re-Distributor found for CPU%u\n", cpuid);
1284 			return (ENODEV);
1285 		}
1286 
1287 		do {
1288 			typer = bus_read_8(&r_res, GICR_TYPER);
1289 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1290 				KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1291 				    ("Invalid pointer to per-CPU redistributor"));
1292 				/* Copy res contents to its final destination */
1293 				sc->gic_redists.pcpu[cpuid]->res = r_res;
1294 				sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1295 				if (bootverbose) {
1296 					device_printf(sc->dev,
1297 					    "CPU%u Re-Distributor has been found\n",
1298 					    cpuid);
1299 				}
1300 				return (0);
1301 			}
1302 
1303 			r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1304 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1305 				r_bsh +=
1306 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1307 			}
1308 
1309 			rman_set_bushandle(&r_res, r_bsh);
1310 		} while ((typer & GICR_TYPER_LAST) == 0);
1311 	}
1312 
1313 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1314 	return (ENXIO);
1315 }
1316 
1317 static int
1318 gic_v3_redist_wake(struct gic_v3_softc *sc)
1319 {
1320 	uint32_t waker;
1321 	size_t us_left = 1000000;
1322 
1323 	waker = gic_r_read(sc, 4, GICR_WAKER);
1324 	/* Wake up Re-Distributor for this CPU */
1325 	waker &= ~GICR_WAKER_PS;
1326 	gic_r_write(sc, 4, GICR_WAKER, waker);
1327 	/*
1328 	 * When clearing ProcessorSleep bit it is required to wait for
1329 	 * ChildrenAsleep to become zero following the processor power-on.
1330 	 */
1331 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1332 		DELAY(1);
1333 		if (us_left-- == 0) {
1334 			panic("Could not wake Re-Distributor for CPU%u",
1335 			    PCPU_GET(cpuid));
1336 		}
1337 	}
1338 
1339 	if (bootverbose) {
1340 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1341 		    PCPU_GET(cpuid));
1342 	}
1343 
1344 	return (0);
1345 }
1346 
1347 static int
1348 gic_v3_redist_init(struct gic_v3_softc *sc)
1349 {
1350 	int err;
1351 	size_t i;
1352 
1353 	err = gic_v3_redist_find(sc);
1354 	if (err != 0)
1355 		return (err);
1356 
1357 	err = gic_v3_redist_wake(sc);
1358 	if (err != 0)
1359 		return (err);
1360 
1361 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1362 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1363 	    0xFFFFFFFF);
1364 
1365 	/* Disable SPIs */
1366 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1367 	    GICR_I_ENABLER_PPI_MASK);
1368 	/* Enable SGIs */
1369 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1370 	    GICR_I_ENABLER_SGI_MASK);
1371 
1372 	/* Set priority for SGIs and PPIs */
1373 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1374 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1375 		    GIC_PRIORITY_MAX);
1376 	}
1377 
1378 	gic_v3_wait_for_rwp(sc, REDIST);
1379 
1380 	return (0);
1381 }
1382 
1383 /*
1384  * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1385  */
1386 
1387 static int
1388 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1389     device_t *pic, struct intr_irqsrc **srcs)
1390 {
1391 	struct gic_v3_softc *sc;
1392 	int i, irq, end_irq;
1393 	bool found;
1394 
1395 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1396 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1397 
1398 	sc = device_get_softc(dev);
1399 
1400 	mtx_lock(&sc->gic_mbi_mtx);
1401 
1402 	found = false;
1403 	for (irq = sc->gic_mbi_start; irq < sc->gic_mbi_end; irq++) {
1404 		/* Start on an aligned interrupt */
1405 		if ((irq & (maxcount - 1)) != 0)
1406 			continue;
1407 
1408 		/* Assume we found a valid range until shown otherwise */
1409 		found = true;
1410 
1411 		/* Check this range is valid */
1412 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1413 			/* No free interrupts */
1414 			if (end_irq == sc->gic_mbi_end) {
1415 				found = false;
1416 				break;
1417 			}
1418 
1419 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1420 			    ("%s: Non-MSI interrupt found", __func__));
1421 
1422 			/* This is already used */
1423 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1424 			    GI_FLAG_MSI_USED) {
1425 				found = false;
1426 				break;
1427 			}
1428 		}
1429 		if (found)
1430 			break;
1431 	}
1432 
1433 	/* Not enough interrupts were found */
1434 	if (!found || irq == sc->gic_mbi_end) {
1435 		mtx_unlock(&sc->gic_mbi_mtx);
1436 		return (ENXIO);
1437 	}
1438 
1439 	for (i = 0; i < count; i++) {
1440 		/* Mark the interrupt as used */
1441 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1442 	}
1443 	mtx_unlock(&sc->gic_mbi_mtx);
1444 
1445 	for (i = 0; i < count; i++)
1446 		srcs[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1447 	*pic = dev;
1448 
1449 	return (0);
1450 }
1451 
1452 static int
1453 gic_v3_release_msi(device_t dev, device_t child, int count,
1454     struct intr_irqsrc **isrc)
1455 {
1456 	struct gic_v3_softc *sc;
1457 	struct gic_v3_irqsrc *gi;
1458 	int i;
1459 
1460 	sc = device_get_softc(dev);
1461 
1462 	mtx_lock(&sc->gic_mbi_mtx);
1463 	for (i = 0; i < count; i++) {
1464 		gi = (struct gic_v3_irqsrc *)isrc[i];
1465 
1466 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1467 		    ("%s: Trying to release an unused MSI-X interrupt",
1468 		    __func__));
1469 
1470 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1471 	}
1472 	mtx_unlock(&sc->gic_mbi_mtx);
1473 
1474 	return (0);
1475 }
1476 
1477 static int
1478 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1479     struct intr_irqsrc **isrcp)
1480 {
1481 	struct gic_v3_softc *sc;
1482 	int irq;
1483 
1484 	sc = device_get_softc(dev);
1485 
1486 	mtx_lock(&sc->gic_mbi_mtx);
1487 	/* Find an unused interrupt */
1488 	for (irq = sc->gic_mbi_start; irq < sc->gic_mbi_end; irq++) {
1489 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1490 		    ("%s: Non-MSI interrupt found", __func__));
1491 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1492 			break;
1493 	}
1494 	/* No free interrupt was found */
1495 	if (irq == sc->gic_mbi_end) {
1496 		mtx_unlock(&sc->gic_mbi_mtx);
1497 		return (ENXIO);
1498 	}
1499 
1500 	/* Mark the interrupt as used */
1501 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1502 	mtx_unlock(&sc->gic_mbi_mtx);
1503 
1504 	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1505 	*pic = dev;
1506 
1507 	return (0);
1508 }
1509 
1510 static int
1511 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1512 {
1513 	struct gic_v3_softc *sc;
1514 	struct gic_v3_irqsrc *gi;
1515 
1516 	sc = device_get_softc(dev);
1517 	gi = (struct gic_v3_irqsrc *)isrc;
1518 
1519 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1520 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1521 
1522 	mtx_lock(&sc->gic_mbi_mtx);
1523 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1524 	mtx_unlock(&sc->gic_mbi_mtx);
1525 
1526 	return (0);
1527 }
1528 
1529 static int
1530 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1531     uint64_t *addr, uint32_t *data)
1532 {
1533 	struct gic_v3_softc *sc = device_get_softc(dev);
1534 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1535 
1536 	*addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1537 	*data = gi->gi_irq;
1538 
1539 	return (0);
1540 }
1541