xref: /freebsd/sys/arm64/arm64/gic_v3.c (revision 315ee00f)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * the sponsorship of the FreeBSD Foundation.
6  *
7  * This software was developed by Semihalf under
8  * the sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bitstring.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/rman.h>
45 #include <sys/pcpu.h>
46 #include <sys/proc.h>
47 #include <sys/cpuset.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/smp.h>
51 #include <sys/interrupt.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 
56 #include <machine/bus.h>
57 #include <machine/cpu.h>
58 #include <machine/intr.h>
59 
60 #ifdef FDT
61 #include <dev/fdt/fdt_intr.h>
62 #include <dev/ofw/ofw_bus_subr.h>
63 #endif
64 
65 #ifdef DEV_ACPI
66 #include <contrib/dev/acpica/include/acpi.h>
67 #include <dev/acpica/acpivar.h>
68 #endif
69 
70 #include "gic_if.h"
71 #include "pic_if.h"
72 #include "msi_if.h"
73 
74 #include <arm/arm/gic_common.h>
75 #include "gic_v3_reg.h"
76 #include "gic_v3_var.h"
77 
78 static bus_print_child_t gic_v3_print_child;
79 static bus_get_domain_t gic_v3_get_domain;
80 static bus_read_ivar_t gic_v3_read_ivar;
81 static bus_write_ivar_t gic_v3_write_ivar;
82 static bus_alloc_resource_t gic_v3_alloc_resource;
83 
84 static pic_disable_intr_t gic_v3_disable_intr;
85 static pic_enable_intr_t gic_v3_enable_intr;
86 static pic_map_intr_t gic_v3_map_intr;
87 static pic_setup_intr_t gic_v3_setup_intr;
88 static pic_teardown_intr_t gic_v3_teardown_intr;
89 static pic_post_filter_t gic_v3_post_filter;
90 static pic_post_ithread_t gic_v3_post_ithread;
91 static pic_pre_ithread_t gic_v3_pre_ithread;
92 static pic_bind_intr_t gic_v3_bind_intr;
93 #ifdef SMP
94 static pic_init_secondary_t gic_v3_init_secondary;
95 static pic_ipi_send_t gic_v3_ipi_send;
96 static pic_ipi_setup_t gic_v3_ipi_setup;
97 #endif
98 
99 static gic_reserve_msi_range_t gic_v3_reserve_msi_range;
100 static gic_alloc_msi_t gic_v3_gic_alloc_msi;
101 static gic_release_msi_t gic_v3_gic_release_msi;
102 static gic_alloc_msix_t gic_v3_gic_alloc_msix;
103 static gic_release_msix_t gic_v3_gic_release_msix;
104 
105 static msi_alloc_msi_t gic_v3_alloc_msi;
106 static msi_release_msi_t gic_v3_release_msi;
107 static msi_alloc_msix_t gic_v3_alloc_msix;
108 static msi_release_msix_t gic_v3_release_msix;
109 static msi_map_msi_t gic_v3_map_msi;
110 
111 static u_int gic_irq_cpu;
112 #ifdef SMP
113 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
114 static u_int sgi_first_unused = GIC_FIRST_SGI;
115 #endif
116 
117 static device_method_t gic_v3_methods[] = {
118 	/* Device interface */
119 	DEVMETHOD(device_detach,	gic_v3_detach),
120 
121 	/* Bus interface */
122 	DEVMETHOD(bus_print_child,	gic_v3_print_child),
123 	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
124 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
125 	DEVMETHOD(bus_write_ivar,	gic_v3_write_ivar),
126 	DEVMETHOD(bus_alloc_resource,	gic_v3_alloc_resource),
127 	DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
128 
129 	/* Interrupt controller interface */
130 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
131 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
132 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
133 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
134 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
135 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
136 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
137 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
138 #ifdef SMP
139 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
140 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
141 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
142 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
143 #endif
144 
145 	/* MSI/MSI-X */
146 	DEVMETHOD(msi_alloc_msi,        gic_v3_alloc_msi),
147 	DEVMETHOD(msi_release_msi,      gic_v3_release_msi),
148 	DEVMETHOD(msi_alloc_msix,       gic_v3_alloc_msix),
149 	DEVMETHOD(msi_release_msix,     gic_v3_release_msix),
150 	DEVMETHOD(msi_map_msi,          gic_v3_map_msi),
151 
152 	/* GIC */
153 	DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range),
154 	DEVMETHOD(gic_alloc_msi,	gic_v3_gic_alloc_msi),
155 	DEVMETHOD(gic_release_msi,	gic_v3_gic_release_msi),
156 	DEVMETHOD(gic_alloc_msix,	gic_v3_gic_alloc_msix),
157 	DEVMETHOD(gic_release_msix,	gic_v3_gic_release_msix),
158 
159 	/* End */
160 	DEVMETHOD_END
161 };
162 
163 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
164     sizeof(struct gic_v3_softc));
165 
166 /*
167  * Driver-specific definitions.
168  */
169 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
170 
171 /*
172  * Helper functions and definitions.
173  */
174 /* Destination registers, either Distributor or Re-Distributor */
175 enum gic_v3_xdist {
176 	DIST = 0,
177 	REDIST,
178 };
179 
180 struct gic_v3_irqsrc {
181 	struct intr_irqsrc	gi_isrc;
182 	uint32_t		gi_irq;
183 	enum intr_polarity	gi_pol;
184 	enum intr_trigger	gi_trig;
185 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
186 					 /* be used for MSI/MSI-X interrupts */
187 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
188 					 /* for a MSI/MSI-X interrupt */
189 	u_int			gi_flags;
190 };
191 
192 /* Helper routines starting with gic_v3_ */
193 static int gic_v3_dist_init(struct gic_v3_softc *);
194 static int gic_v3_redist_alloc(struct gic_v3_softc *);
195 static int gic_v3_redist_find(struct gic_v3_softc *);
196 static int gic_v3_redist_init(struct gic_v3_softc *);
197 static int gic_v3_cpu_init(struct gic_v3_softc *);
198 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
199 
200 /* A sequence of init functions for primary (boot) CPU */
201 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
202 /* Primary CPU initialization sequence */
203 static gic_v3_initseq_t gic_v3_primary_init[] = {
204 	gic_v3_dist_init,
205 	gic_v3_redist_alloc,
206 	gic_v3_redist_init,
207 	gic_v3_cpu_init,
208 	NULL
209 };
210 
211 #ifdef SMP
212 /* Secondary CPU initialization sequence */
213 static gic_v3_initseq_t gic_v3_secondary_init[] = {
214 	gic_v3_redist_init,
215 	gic_v3_cpu_init,
216 	NULL
217 };
218 #endif
219 
220 uint32_t
221 gic_r_read_4(device_t dev, bus_size_t offset)
222 {
223 	struct gic_v3_softc *sc;
224 	struct resource *rdist;
225 
226 	sc = device_get_softc(dev);
227 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
228 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
229 	return (bus_read_4(rdist, offset));
230 }
231 
232 uint64_t
233 gic_r_read_8(device_t dev, bus_size_t offset)
234 {
235 	struct gic_v3_softc *sc;
236 	struct resource *rdist;
237 
238 	sc = device_get_softc(dev);
239 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
240 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
241 	return (bus_read_8(rdist, offset));
242 }
243 
244 void
245 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
246 {
247 	struct gic_v3_softc *sc;
248 	struct resource *rdist;
249 
250 	sc = device_get_softc(dev);
251 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
252 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
253 	bus_write_4(rdist, offset, val);
254 }
255 
256 void
257 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
258 {
259 	struct gic_v3_softc *sc;
260 	struct resource *rdist;
261 
262 	sc = device_get_softc(dev);
263 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)].res;
264 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
265 	bus_write_8(rdist, offset, val);
266 }
267 
268 static void
269 gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
270 {
271 	struct gic_v3_softc *sc;
272 	int i;
273 
274 	sc = device_get_softc(dev);
275 
276 	KASSERT((start + count) < sc->gic_nirqs,
277 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
278 	    start, count, sc->gic_nirqs));
279 	for (i = 0; i < count; i++) {
280 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
281 		    ("%s: MSI interrupt %d already has a handler", __func__,
282 		    count + i));
283 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
284 		    ("%s: MSI interrupt %d already has a polarity", __func__,
285 		    count + i));
286 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
287 		    ("%s: MSI interrupt %d already has a trigger", __func__,
288 		    count + i));
289 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
290 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
291 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
292 	}
293 }
294 
295 /*
296  * Device interface.
297  */
298 int
299 gic_v3_attach(device_t dev)
300 {
301 	struct gic_v3_softc *sc;
302 	gic_v3_initseq_t *init_func;
303 	uint32_t typer;
304 	int rid;
305 	int err;
306 	size_t i;
307 	u_int irq;
308 	const char *name;
309 
310 	sc = device_get_softc(dev);
311 	sc->gic_registered = FALSE;
312 	sc->dev = dev;
313 	err = 0;
314 
315 	/* Initialize mutex */
316 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
317 
318 	/*
319 	 * Allocate array of struct resource.
320 	 * One entry for Distributor and all remaining for Re-Distributor.
321 	 */
322 	sc->gic_res = malloc(
323 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
324 	    M_GIC_V3, M_WAITOK);
325 
326 	/* Now allocate corresponding resources */
327 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
328 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
329 		    &rid, RF_ACTIVE);
330 		if (sc->gic_res[rid] == NULL)
331 			return (ENXIO);
332 	}
333 
334 	/*
335 	 * Distributor interface
336 	 */
337 	sc->gic_dist = sc->gic_res[0];
338 
339 	/*
340 	 * Re-Dristributor interface
341 	 */
342 	/* Allocate space under region descriptions */
343 	sc->gic_redists.regions = malloc(
344 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
345 	    M_GIC_V3, M_WAITOK);
346 
347 	/* Fill-up bus_space information for each region. */
348 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
349 		sc->gic_redists.regions[i] = sc->gic_res[rid];
350 
351 	/* Get the number of supported SPI interrupts */
352 	typer = gic_d_read(sc, 4, GICD_TYPER);
353 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
354 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
355 		sc->gic_nirqs = GIC_I_NUM_MAX;
356 
357 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
358 	    M_GIC_V3, M_WAITOK | M_ZERO);
359 	name = device_get_nameunit(dev);
360 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
361 		struct intr_irqsrc *isrc;
362 
363 		sc->gic_irqs[irq].gi_irq = irq;
364 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
365 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
366 
367 		isrc = &sc->gic_irqs[irq].gi_isrc;
368 		if (irq <= GIC_LAST_SGI) {
369 			err = intr_isrc_register(isrc, sc->dev,
370 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
371 		} else if (irq <= GIC_LAST_PPI) {
372 			err = intr_isrc_register(isrc, sc->dev,
373 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
374 		} else {
375 			err = intr_isrc_register(isrc, sc->dev, 0,
376 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
377 		}
378 		if (err != 0) {
379 			/* XXX call intr_isrc_deregister() */
380 			free(sc->gic_irqs, M_DEVBUF);
381 			return (err);
382 		}
383 	}
384 
385 	mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
386 	if (sc->gic_mbi_start > 0) {
387 		if (!sc->gic_mbi_end) {
388 			/*
389 			 * This is to address SPI based msi ranges, where
390 			 * SPI range is not specified in ACPI
391 			 */
392 			sc->gic_mbi_end = sc->gic_nirqs - 1;
393 		}
394 		gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
395 		    sc->gic_mbi_end - sc->gic_mbi_start);
396 
397 		if (bootverbose) {
398 			device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
399 					sc->gic_mbi_end);
400 		}
401 	}
402 
403 	/*
404 	 * Read the Peripheral ID2 register. This is an implementation
405 	 * defined register, but seems to be implemented in all GICv3
406 	 * parts and Linux expects it to be there.
407 	 */
408 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
409 
410 	/* Get the number of supported interrupt identifier bits */
411 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
412 
413 	if (bootverbose) {
414 		device_printf(dev, "SPIs: %u, IDs: %u\n",
415 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
416 	}
417 
418 	/* Train init sequence for boot CPU */
419 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
420 		err = (*init_func)(sc);
421 		if (err != 0)
422 			return (err);
423 	}
424 
425 	return (0);
426 }
427 
428 int
429 gic_v3_detach(device_t dev)
430 {
431 	struct gic_v3_softc *sc;
432 	int rid;
433 
434 	sc = device_get_softc(dev);
435 
436 	if (device_is_attached(dev)) {
437 		/*
438 		 * XXX: We should probably deregister PIC
439 		 */
440 		if (sc->gic_registered)
441 			panic("Trying to detach registered PIC");
442 	}
443 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
444 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
445 
446 	free(sc->gic_redists.pcpu, M_GIC_V3);
447 
448 	free(sc->ranges, M_GIC_V3);
449 	free(sc->gic_res, M_GIC_V3);
450 	free(sc->gic_redists.regions, M_GIC_V3);
451 
452 	return (0);
453 }
454 
455 static int
456 gic_v3_print_child(device_t bus, device_t child)
457 {
458 	struct resource_list *rl;
459 	int retval = 0;
460 
461 	rl = BUS_GET_RESOURCE_LIST(bus, child);
462 	KASSERT(rl != NULL, ("%s: No resource list", __func__));
463 	retval += bus_print_child_header(bus, child);
464 	retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
465 	retval += bus_print_child_footer(bus, child);
466 
467 	return (retval);
468 }
469 
470 static int
471 gic_v3_get_domain(device_t dev, device_t child, int *domain)
472 {
473 	struct gic_v3_devinfo *di;
474 
475 	di = device_get_ivars(child);
476 	if (di->gic_domain < 0)
477 		return (ENOENT);
478 
479 	*domain = di->gic_domain;
480 	return (0);
481 }
482 
483 static int
484 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
485 {
486 	struct gic_v3_softc *sc;
487 	struct gic_v3_devinfo *di;
488 
489 	sc = device_get_softc(dev);
490 
491 	switch (which) {
492 	case GICV3_IVAR_NIRQS:
493 		*result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
494 		return (0);
495 	case GICV3_IVAR_REDIST:
496 		*result = (uintptr_t)&sc->gic_redists.pcpu[PCPU_GET(cpuid)];
497 		return (0);
498 	case GIC_IVAR_HW_REV:
499 		KASSERT(
500 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
501 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
502 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
503 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
504 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
505 		return (0);
506 	case GIC_IVAR_BUS:
507 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
508 		    ("gic_v3_read_ivar: Unknown bus type"));
509 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
510 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
511 		*result = sc->gic_bus;
512 		return (0);
513 	case GIC_IVAR_VGIC:
514 		di = device_get_ivars(child);
515 		if (di == NULL)
516 			return (EINVAL);
517 		*result = di->is_vgic;
518 		return (0);
519 	}
520 
521 	return (ENOENT);
522 }
523 
524 static int
525 gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
526 {
527 	switch(which) {
528 	case GICV3_IVAR_NIRQS:
529 	case GICV3_IVAR_REDIST:
530 	case GIC_IVAR_HW_REV:
531 	case GIC_IVAR_BUS:
532 		return (EINVAL);
533 	}
534 
535 	return (ENOENT);
536 }
537 
538 static struct resource *
539 gic_v3_alloc_resource(device_t bus, device_t child, int type, int *rid,
540     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
541 {
542 	struct gic_v3_softc *sc;
543 	struct resource_list_entry *rle;
544 	struct resource_list *rl;
545 	int j;
546 
547 	/* We only allocate memory */
548 	if (type != SYS_RES_MEMORY)
549 		return (NULL);
550 
551 	sc = device_get_softc(bus);
552 
553 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
554 		rl = BUS_GET_RESOURCE_LIST(bus, child);
555 		if (rl == NULL)
556 			return (NULL);
557 
558 		/* Find defaults for this rid */
559 		rle = resource_list_find(rl, type, *rid);
560 		if (rle == NULL)
561 			return (NULL);
562 
563 		start = rle->start;
564 		end = rle->end;
565 		count = rle->count;
566 	}
567 
568 	/* Remap through ranges property */
569 	for (j = 0; j < sc->nranges; j++) {
570 		if (start >= sc->ranges[j].bus && end <
571 		    sc->ranges[j].bus + sc->ranges[j].size) {
572 			start -= sc->ranges[j].bus;
573 			start += sc->ranges[j].host;
574 			end -= sc->ranges[j].bus;
575 			end += sc->ranges[j].host;
576 			break;
577 		}
578 	}
579 	if (j == sc->nranges && sc->nranges != 0) {
580 		if (bootverbose)
581 			device_printf(bus, "Could not map resource "
582 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
583 
584 		return (NULL);
585 	}
586 
587 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
588 	    count, flags));
589 }
590 
591 int
592 arm_gic_v3_intr(void *arg)
593 {
594 	struct gic_v3_softc *sc = arg;
595 	struct gic_v3_irqsrc *gi;
596 	struct intr_pic *pic;
597 	uint64_t active_irq;
598 	struct trapframe *tf;
599 
600 	pic = sc->gic_pic;
601 
602 	while (1) {
603 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
604 			/*
605 			 * Hardware:		Cavium ThunderX
606 			 * Chip revision:	Pass 1.0 (early version)
607 			 *			Pass 1.1 (production)
608 			 * ERRATUM:		22978, 23154
609 			 */
610 			__asm __volatile(
611 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
612 			    "mrs %0, ICC_IAR1_EL1		\n"
613 			    "nop;nop;nop;nop;			\n"
614 			    "dsb sy				\n"
615 			    : "=&r" (active_irq));
616 		} else {
617 			active_irq = gic_icc_read(IAR1);
618 		}
619 
620 		if (active_irq >= GIC_FIRST_LPI) {
621 			intr_child_irq_handler(pic, active_irq);
622 			continue;
623 		}
624 
625 		if (__predict_false(active_irq >= sc->gic_nirqs))
626 			return (FILTER_HANDLED);
627 
628 		tf = curthread->td_intr_frame;
629 		gi = &sc->gic_irqs[active_irq];
630 		if (active_irq <= GIC_LAST_SGI) {
631 			/* Call EOI for all IPI before dispatch. */
632 			gic_icc_write(EOIR1, (uint64_t)active_irq);
633 #ifdef SMP
634 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
635 #else
636 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
637 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
638 #endif
639 		} else if (active_irq >= GIC_FIRST_PPI &&
640 		    active_irq <= GIC_LAST_SPI) {
641 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
642 				gic_icc_write(EOIR1, gi->gi_irq);
643 
644 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
645 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
646 					gic_icc_write(EOIR1, gi->gi_irq);
647 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
648 				device_printf(sc->dev,
649 				    "Stray irq %lu disabled\n", active_irq);
650 			}
651 		}
652 	}
653 }
654 
655 #ifdef FDT
656 static int
657 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
658     enum intr_polarity *polp, enum intr_trigger *trigp)
659 {
660 	u_int irq;
661 
662 	if (ncells < 3)
663 		return (EINVAL);
664 
665 	/*
666 	 * The 1st cell is the interrupt type:
667 	 *	0 = SPI
668 	 *	1 = PPI
669 	 * The 2nd cell contains the interrupt number:
670 	 *	[0 - 987] for SPI
671 	 *	[0 -  15] for PPI
672 	 * The 3rd cell is the flags, encoded as follows:
673 	 *   bits[3:0] trigger type and level flags
674 	 *	1 = edge triggered
675 	 *      2 = edge triggered (PPI only)
676 	 *	4 = level-sensitive
677 	 *	8 = level-sensitive (PPI only)
678 	 */
679 	switch (cells[0]) {
680 	case 0:
681 		irq = GIC_FIRST_SPI + cells[1];
682 		/* SPI irq is checked later. */
683 		break;
684 	case 1:
685 		irq = GIC_FIRST_PPI + cells[1];
686 		if (irq > GIC_LAST_PPI) {
687 			device_printf(dev, "unsupported PPI interrupt "
688 			    "number %u\n", cells[1]);
689 			return (EINVAL);
690 		}
691 		break;
692 	default:
693 		device_printf(dev, "unsupported interrupt type "
694 		    "configuration %u\n", cells[0]);
695 		return (EINVAL);
696 	}
697 
698 	switch (cells[2] & FDT_INTR_MASK) {
699 	case FDT_INTR_EDGE_RISING:
700 		*trigp = INTR_TRIGGER_EDGE;
701 		*polp = INTR_POLARITY_HIGH;
702 		break;
703 	case FDT_INTR_EDGE_FALLING:
704 		*trigp = INTR_TRIGGER_EDGE;
705 		*polp = INTR_POLARITY_LOW;
706 		break;
707 	case FDT_INTR_LEVEL_HIGH:
708 		*trigp = INTR_TRIGGER_LEVEL;
709 		*polp = INTR_POLARITY_HIGH;
710 		break;
711 	case FDT_INTR_LEVEL_LOW:
712 		*trigp = INTR_TRIGGER_LEVEL;
713 		*polp = INTR_POLARITY_LOW;
714 		break;
715 	default:
716 		device_printf(dev, "unsupported trigger/polarity "
717 		    "configuration 0x%02x\n", cells[2]);
718 		return (EINVAL);
719 	}
720 
721 	/* Check the interrupt is valid */
722 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
723 		return (EINVAL);
724 
725 	*irqp = irq;
726 	return (0);
727 }
728 #endif
729 
730 static int
731 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
732     enum intr_polarity *polp, enum intr_trigger *trigp)
733 {
734 	struct gic_v3_irqsrc *gi;
735 
736 	/* SPI-mapped MSI */
737 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
738 	if (gi == NULL)
739 		return (ENXIO);
740 
741 	*irqp = gi->gi_irq;
742 
743 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
744 	*polp = INTR_POLARITY_HIGH;
745 	*trigp = INTR_TRIGGER_EDGE;
746 
747 	return (0);
748 }
749 
750 static int
751 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
752     enum intr_polarity *polp, enum intr_trigger *trigp)
753 {
754 	struct gic_v3_softc *sc;
755 	enum intr_polarity pol;
756 	enum intr_trigger trig;
757 	struct intr_map_data_msi *dam;
758 #ifdef FDT
759 	struct intr_map_data_fdt *daf;
760 #endif
761 #ifdef DEV_ACPI
762 	struct intr_map_data_acpi *daa;
763 #endif
764 	u_int irq;
765 
766 	sc = device_get_softc(dev);
767 
768 	switch (data->type) {
769 #ifdef FDT
770 	case INTR_MAP_DATA_FDT:
771 		daf = (struct intr_map_data_fdt *)data;
772 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
773 		    &trig) != 0)
774 			return (EINVAL);
775 		break;
776 #endif
777 #ifdef DEV_ACPI
778 	case INTR_MAP_DATA_ACPI:
779 		daa = (struct intr_map_data_acpi *)data;
780 		irq = daa->irq;
781 		pol = daa->pol;
782 		trig = daa->trig;
783 		break;
784 #endif
785 	case INTR_MAP_DATA_MSI:
786 		/* SPI-mapped MSI */
787 		dam = (struct intr_map_data_msi *)data;
788 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
789 			return (EINVAL);
790 		break;
791 	default:
792 		return (EINVAL);
793 	}
794 
795 	if (irq >= sc->gic_nirqs)
796 		return (EINVAL);
797 	switch (pol) {
798 	case INTR_POLARITY_CONFORM:
799 	case INTR_POLARITY_LOW:
800 	case INTR_POLARITY_HIGH:
801 		break;
802 	default:
803 		return (EINVAL);
804 	}
805 	switch (trig) {
806 	case INTR_TRIGGER_CONFORM:
807 	case INTR_TRIGGER_EDGE:
808 	case INTR_TRIGGER_LEVEL:
809 		break;
810 	default:
811 		return (EINVAL);
812 	}
813 
814 	*irqp = irq;
815 	if (polp != NULL)
816 		*polp = pol;
817 	if (trigp != NULL)
818 		*trigp = trig;
819 	return (0);
820 }
821 
822 static int
823 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
824     struct intr_irqsrc **isrcp)
825 {
826 	struct gic_v3_softc *sc;
827 	int error;
828 	u_int irq;
829 
830 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
831 	if (error == 0) {
832 		sc = device_get_softc(dev);
833 		*isrcp = GIC_INTR_ISRC(sc, irq);
834 	}
835 	return (error);
836 }
837 
838 struct gic_v3_setup_periph_args {
839 	device_t		 dev;
840 	struct intr_irqsrc	*isrc;
841 };
842 
843 static void
844 gic_v3_setup_intr_periph(void *argp)
845 {
846 	struct gic_v3_setup_periph_args *args = argp;
847 	struct intr_irqsrc *isrc = args->isrc;
848 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
849 	device_t dev = args->dev;
850 	u_int irq = gi->gi_irq;
851 	struct gic_v3_softc *sc = device_get_softc(dev);
852 	uint32_t reg;
853 
854 	MPASS(irq <= GIC_LAST_SPI);
855 
856 	/*
857 	 * We need the lock for both SGIs and PPIs for an atomic CPU_SET() at a
858 	 * minimum, but we also need it below for SPIs.
859 	 */
860 	mtx_lock_spin(&sc->gic_mtx);
861 
862 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
863 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
864 
865 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
866 		/* Set the trigger and polarity */
867 		if (irq <= GIC_LAST_PPI)
868 			reg = gic_r_read(sc, 4,
869 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
870 		else
871 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
872 		if (gi->gi_trig == INTR_TRIGGER_LEVEL)
873 			reg &= ~(2 << ((irq % 16) * 2));
874 		else
875 			reg |= 2 << ((irq % 16) * 2);
876 
877 		if (irq <= GIC_LAST_PPI) {
878 			gic_r_write(sc, 4,
879 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
880 			gic_v3_wait_for_rwp(sc, REDIST);
881 		} else {
882 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
883 			gic_v3_wait_for_rwp(sc, DIST);
884 		}
885 	}
886 
887 	mtx_unlock_spin(&sc->gic_mtx);
888 }
889 
890 static int
891 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
892     struct resource *res, struct intr_map_data *data)
893 {
894 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
895 	struct gic_v3_setup_periph_args pargs;
896 	enum intr_trigger trig;
897 	enum intr_polarity pol;
898 	u_int irq;
899 	int error;
900 
901 	if (data == NULL)
902 		return (ENOTSUP);
903 
904 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
905 	if (error != 0)
906 		return (error);
907 
908 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
909 	    trig == INTR_TRIGGER_CONFORM)
910 		return (EINVAL);
911 
912 	/* Compare config if this is not first setup. */
913 	if (isrc->isrc_handlers != 0) {
914 		if (pol != gi->gi_pol || trig != gi->gi_trig)
915 			return (EINVAL);
916 		else
917 			return (0);
918 	}
919 
920 	/* For MSI/MSI-X we should have already configured these */
921 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
922 		gi->gi_pol = pol;
923 		gi->gi_trig = trig;
924 	}
925 
926 	pargs.dev = dev;
927 	pargs.isrc = isrc;
928 
929 	if (isrc->isrc_flags & INTR_ISRCF_PPI) {
930 		/*
931 		 * If APs haven't been fired up yet, smp_rendezvous() will just
932 		 * execute it on the single CPU and gic_v3_init_secondary() will
933 		 * clean up afterwards.
934 		 */
935 		smp_rendezvous(NULL, gic_v3_setup_intr_periph, NULL, &pargs);
936 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
937 		gic_v3_setup_intr_periph(&pargs);
938 		gic_v3_bind_intr(dev, isrc);
939 	}
940 
941 	return (0);
942 }
943 
944 static int
945 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
946     struct resource *res, struct intr_map_data *data)
947 {
948 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
949 
950 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
951 		gi->gi_pol = INTR_POLARITY_CONFORM;
952 		gi->gi_trig = INTR_TRIGGER_CONFORM;
953 	}
954 
955 	return (0);
956 }
957 
958 static void
959 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
960 {
961 	struct gic_v3_softc *sc;
962 	struct gic_v3_irqsrc *gi;
963 	u_int irq;
964 
965 	sc = device_get_softc(dev);
966 	gi = (struct gic_v3_irqsrc *)isrc;
967 	irq = gi->gi_irq;
968 
969 	if (irq <= GIC_LAST_PPI) {
970 		/* SGIs and PPIs in corresponding Re-Distributor */
971 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
972 		    GICD_I_MASK(irq));
973 		gic_v3_wait_for_rwp(sc, REDIST);
974 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
975 		/* SPIs in distributor */
976 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
977 		gic_v3_wait_for_rwp(sc, DIST);
978 	} else
979 		panic("%s: Unsupported IRQ %u", __func__, irq);
980 }
981 
982 static void
983 gic_v3_enable_intr_periph(void *argp)
984 {
985 	struct gic_v3_setup_periph_args *args = argp;
986 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)args->isrc;
987 	device_t dev = args->dev;
988 	struct gic_v3_softc *sc = device_get_softc(dev);
989 	u_int irq = gi->gi_irq;
990 
991 	/* SGIs and PPIs in corresponding Re-Distributor */
992 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
993 	    GICD_I_MASK(irq));
994 	gic_v3_wait_for_rwp(sc, REDIST);
995 }
996 
997 static void
998 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
999 {
1000 	struct gic_v3_setup_periph_args pargs;
1001 	struct gic_v3_softc *sc;
1002 	struct gic_v3_irqsrc *gi;
1003 	u_int irq;
1004 
1005 	gi = (struct gic_v3_irqsrc *)isrc;
1006 	irq = gi->gi_irq;
1007 	pargs.isrc = isrc;
1008 	pargs.dev = dev;
1009 
1010 	if (irq <= GIC_LAST_PPI) {
1011 		/*
1012 		 * SGIs only need configured on the current AP.  We'll setup and
1013 		 * enable IPIs as APs come online.
1014 		 */
1015 		if (irq <= GIC_LAST_SGI)
1016 			gic_v3_enable_intr_periph(&pargs);
1017 		else
1018 			smp_rendezvous(NULL, gic_v3_enable_intr_periph, NULL,
1019 			    &pargs);
1020 		return;
1021 	}
1022 
1023 	sc = device_get_softc(dev);
1024 
1025 	if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
1026 		/* SPIs in distributor */
1027 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
1028 		gic_v3_wait_for_rwp(sc, DIST);
1029 	} else
1030 		panic("%s: Unsupported IRQ %u", __func__, irq);
1031 }
1032 
1033 static void
1034 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1035 {
1036 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1037 
1038 	gic_v3_disable_intr(dev, isrc);
1039 	gic_icc_write(EOIR1, gi->gi_irq);
1040 }
1041 
1042 static void
1043 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1044 {
1045 
1046 	gic_v3_enable_intr(dev, isrc);
1047 }
1048 
1049 static void
1050 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
1051 {
1052 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1053 
1054 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
1055 		return;
1056 
1057 	gic_icc_write(EOIR1, gi->gi_irq);
1058 }
1059 
1060 static int
1061 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1062 {
1063 	struct gic_v3_softc *sc;
1064 	struct gic_v3_irqsrc *gi;
1065 	int cpu;
1066 
1067 	gi = (struct gic_v3_irqsrc *)isrc;
1068 
1069 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
1070 	    ("%s: Attempting to bind an invalid IRQ", __func__));
1071 
1072 	sc = device_get_softc(dev);
1073 
1074 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
1075 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1076 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1077 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
1078 		    CPU_AFFINITY(gic_irq_cpu));
1079 	} else {
1080 		/*
1081 		 * We can only bind to a single CPU so select
1082 		 * the first CPU found.
1083 		 */
1084 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
1085 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
1086 	}
1087 
1088 	return (0);
1089 }
1090 
1091 #ifdef SMP
1092 static void
1093 gic_v3_init_secondary(device_t dev)
1094 {
1095 	struct gic_v3_setup_periph_args pargs;
1096 	device_t child;
1097 	struct gic_v3_softc *sc;
1098 	gic_v3_initseq_t *init_func;
1099 	struct intr_irqsrc *isrc;
1100 	u_int cpu, irq;
1101 	int err, i;
1102 
1103 	sc = device_get_softc(dev);
1104 	cpu = PCPU_GET(cpuid);
1105 
1106 	/* Train init sequence for boot CPU */
1107 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
1108 	    init_func++) {
1109 		err = (*init_func)(sc);
1110 		if (err != 0) {
1111 			device_printf(dev,
1112 			    "Could not initialize GIC for CPU%u\n", cpu);
1113 			return;
1114 		}
1115 	}
1116 
1117 	pargs.dev = dev;
1118 
1119 	/* Unmask attached SGI interrupts. */
1120 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
1121 		isrc = GIC_INTR_ISRC(sc, irq);
1122 		if (intr_isrc_init_on_cpu(isrc, cpu)) {
1123 			pargs.isrc = isrc;
1124 			gic_v3_enable_intr_periph(&pargs);
1125 		}
1126 	}
1127 
1128 	/* Unmask attached PPI interrupts. */
1129 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
1130 		isrc = GIC_INTR_ISRC(sc, irq);
1131 		if (intr_isrc_init_on_cpu(isrc, cpu)) {
1132 			pargs.isrc = isrc;
1133 			gic_v3_setup_intr_periph(&pargs);
1134 			gic_v3_enable_intr_periph(&pargs);
1135 		}
1136 	}
1137 
1138 	for (i = 0; i < sc->gic_nchildren; i++) {
1139 		child = sc->gic_children[i];
1140 		PIC_INIT_SECONDARY(child);
1141 	}
1142 }
1143 
1144 static void
1145 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1146     u_int ipi)
1147 {
1148 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1149 	uint64_t aff, val, irq;
1150 	int i;
1151 
1152 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1153 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
1154 	aff = GIC_AFFINITY(0);
1155 	irq = gi->gi_irq;
1156 	val = 0;
1157 
1158 	/* Iterate through all CPUs in set */
1159 	for (i = 0; i <= mp_maxid; i++) {
1160 		/* Move to the next affinity group */
1161 		if (aff != GIC_AFFINITY(i)) {
1162 			/* Send the IPI */
1163 			if (val != 0) {
1164 				gic_icc_write(SGI1R, val);
1165 				val = 0;
1166 			}
1167 			aff = GIC_AFFINITY(i);
1168 		}
1169 
1170 		/* Send the IPI to this cpu */
1171 		if (CPU_ISSET(i, &cpus)) {
1172 #define	ICC_SGI1R_AFFINITY(aff)					\
1173     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
1174      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
1175      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1176 			/* Set the affinity when the first at this level */
1177 			if (val == 0)
1178 				val = ICC_SGI1R_AFFINITY(aff) |
1179 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1180 			/* Set the bit to send the IPI to te CPU */
1181 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1182 		}
1183 	}
1184 
1185 	/* Send the IPI to the last cpu affinity group */
1186 	if (val != 0)
1187 		gic_icc_write(SGI1R, val);
1188 #undef GIC_AFF_MASK
1189 #undef GIC_AFFINITY
1190 }
1191 
1192 static int
1193 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1194 {
1195 	struct intr_irqsrc *isrc;
1196 	struct gic_v3_softc *sc = device_get_softc(dev);
1197 
1198 	if (sgi_first_unused > GIC_LAST_SGI)
1199 		return (ENOSPC);
1200 
1201 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1202 	sgi_to_ipi[sgi_first_unused++] = ipi;
1203 
1204 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1205 
1206 	*isrcp = isrc;
1207 	return (0);
1208 }
1209 #endif /* SMP */
1210 
1211 /*
1212  * Helper routines
1213  */
1214 static void
1215 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1216 {
1217 	struct resource *res;
1218 	bus_size_t offset;
1219 	u_int cpuid;
1220 	size_t us_left = 1000000;
1221 
1222 	cpuid = PCPU_GET(cpuid);
1223 
1224 	switch (xdist) {
1225 	case DIST:
1226 		res = sc->gic_dist;
1227 		offset = 0;
1228 		break;
1229 	case REDIST:
1230 		res = sc->gic_redists.pcpu[cpuid].res;
1231 		offset = sc->gic_redists.pcpu[PCPU_GET(cpuid)].offset;
1232 		break;
1233 	default:
1234 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1235 		return;
1236 	}
1237 
1238 	while ((bus_read_4(res, offset + GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1239 		DELAY(1);
1240 		if (us_left-- == 0)
1241 			panic("GICD Register write pending for too long");
1242 	}
1243 }
1244 
1245 /* CPU interface. */
1246 static __inline void
1247 gic_v3_cpu_priority(uint64_t mask)
1248 {
1249 
1250 	/* Set prority mask */
1251 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1252 }
1253 
1254 static int
1255 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1256 {
1257 	uint64_t sre;
1258 	u_int cpuid;
1259 
1260 	cpuid = PCPU_GET(cpuid);
1261 	/*
1262 	 * Set the SRE bit to enable access to GIC CPU interface
1263 	 * via system registers.
1264 	 */
1265 	sre = READ_SPECIALREG(icc_sre_el1);
1266 	sre |= ICC_SRE_EL1_SRE;
1267 	WRITE_SPECIALREG(icc_sre_el1, sre);
1268 	isb();
1269 	/*
1270 	 * Now ensure that the bit is set.
1271 	 */
1272 	sre = READ_SPECIALREG(icc_sre_el1);
1273 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1274 		/* We are done. This was disabled in EL2 */
1275 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1276 		    "via system registers\n", cpuid);
1277 		return (ENXIO);
1278 	} else if (bootverbose) {
1279 		device_printf(sc->dev,
1280 		    "CPU%u enabled CPU interface via system registers\n",
1281 		    cpuid);
1282 	}
1283 
1284 	return (0);
1285 }
1286 
1287 static int
1288 gic_v3_cpu_init(struct gic_v3_softc *sc)
1289 {
1290 	int err;
1291 
1292 	/* Enable access to CPU interface via system registers */
1293 	err = gic_v3_cpu_enable_sre(sc);
1294 	if (err != 0)
1295 		return (err);
1296 	/* Priority mask to minimum - accept all interrupts */
1297 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1298 	/* Disable EOI mode */
1299 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1300 	/* Enable group 1 (insecure) interrups */
1301 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1302 
1303 	return (0);
1304 }
1305 
1306 /* Distributor */
1307 static int
1308 gic_v3_dist_init(struct gic_v3_softc *sc)
1309 {
1310 	uint64_t aff;
1311 	u_int i;
1312 
1313 	/*
1314 	 * 1. Disable the Distributor
1315 	 */
1316 	gic_d_write(sc, 4, GICD_CTLR, 0);
1317 	gic_v3_wait_for_rwp(sc, DIST);
1318 
1319 	/*
1320 	 * 2. Configure the Distributor
1321 	 */
1322 	/* Set all SPIs to be Group 1 Non-secure */
1323 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1324 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1325 
1326 	/* Set all global interrupts to be level triggered, active low. */
1327 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1328 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1329 
1330 	/* Set priority to all shared interrupts */
1331 	for (i = GIC_FIRST_SPI;
1332 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1333 		/* Set highest priority */
1334 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1335 	}
1336 
1337 	/*
1338 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1339 	 * Re-Distributor registers.
1340 	 */
1341 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1342 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1343 
1344 	gic_v3_wait_for_rwp(sc, DIST);
1345 
1346 	/*
1347 	 * 3. Enable Distributor
1348 	 */
1349 	/* Enable Distributor with ARE, Group 1 */
1350 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1351 	    GICD_CTLR_G1);
1352 
1353 	/*
1354 	 * 4. Route all interrupts to boot CPU.
1355 	 */
1356 	aff = CPU_AFFINITY(0);
1357 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1358 		gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1359 
1360 	return (0);
1361 }
1362 
1363 /* Re-Distributor */
1364 static int
1365 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1366 {
1367 	sc->gic_redists.pcpu = mallocarray(mp_maxid + 1,
1368 	    sizeof(sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK);
1369 	return (0);
1370 }
1371 
1372 static int
1373 gic_v3_redist_find(struct gic_v3_softc *sc)
1374 {
1375 	struct resource *r_res;
1376 	bus_size_t offset;
1377 	uint64_t aff;
1378 	uint64_t typer;
1379 	uint32_t pidr2;
1380 	u_int cpuid;
1381 	size_t i;
1382 
1383 	cpuid = PCPU_GET(cpuid);
1384 
1385 	aff = CPU_AFFINITY(cpuid);
1386 	/* Affinity in format for comparison with typer */
1387 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1388 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1389 
1390 	if (bootverbose) {
1391 		device_printf(sc->dev,
1392 		    "Start searching for Re-Distributor\n");
1393 	}
1394 	/* Iterate through Re-Distributor regions */
1395 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1396 		/* Take a copy of the region's resource */
1397 		r_res = sc->gic_redists.regions[i];
1398 
1399 		pidr2 = bus_read_4(r_res, GICR_PIDR2);
1400 		switch (GICR_PIDR2_ARCH(pidr2)) {
1401 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1402 		case GICR_PIDR2_ARCH_GICv4:
1403 			break;
1404 		default:
1405 			device_printf(sc->dev,
1406 			    "No Re-Distributor found for CPU%u\n", cpuid);
1407 			return (ENODEV);
1408 		}
1409 
1410 		offset = 0;
1411 		do {
1412 			typer = bus_read_8(r_res, offset + GICR_TYPER);
1413 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1414 				KASSERT(cpuid <= mp_maxid,
1415 				    ("Invalid pointer to per-CPU redistributor"));
1416 				/* Copy res contents to its final destination */
1417 				sc->gic_redists.pcpu[cpuid].res = r_res;
1418 				sc->gic_redists.pcpu[cpuid].offset = offset;
1419 				sc->gic_redists.pcpu[cpuid].lpi_enabled = false;
1420 				if (bootverbose) {
1421 					device_printf(sc->dev,
1422 					    "CPU%u Re-Distributor has been found\n",
1423 					    cpuid);
1424 				}
1425 				return (0);
1426 			}
1427 
1428 			offset += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1429 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1430 				offset +=
1431 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1432 			}
1433 		} while (offset < rman_get_size(r_res) &&
1434 		    (typer & GICR_TYPER_LAST) == 0);
1435 	}
1436 
1437 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1438 	return (ENXIO);
1439 }
1440 
1441 static int
1442 gic_v3_redist_wake(struct gic_v3_softc *sc)
1443 {
1444 	uint32_t waker;
1445 	size_t us_left = 1000000;
1446 
1447 	waker = gic_r_read(sc, 4, GICR_WAKER);
1448 	/* Wake up Re-Distributor for this CPU */
1449 	waker &= ~GICR_WAKER_PS;
1450 	gic_r_write(sc, 4, GICR_WAKER, waker);
1451 	/*
1452 	 * When clearing ProcessorSleep bit it is required to wait for
1453 	 * ChildrenAsleep to become zero following the processor power-on.
1454 	 */
1455 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1456 		DELAY(1);
1457 		if (us_left-- == 0) {
1458 			panic("Could not wake Re-Distributor for CPU%u",
1459 			    PCPU_GET(cpuid));
1460 		}
1461 	}
1462 
1463 	if (bootverbose) {
1464 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1465 		    PCPU_GET(cpuid));
1466 	}
1467 
1468 	return (0);
1469 }
1470 
1471 static int
1472 gic_v3_redist_init(struct gic_v3_softc *sc)
1473 {
1474 	int err;
1475 	size_t i;
1476 
1477 	err = gic_v3_redist_find(sc);
1478 	if (err != 0)
1479 		return (err);
1480 
1481 	err = gic_v3_redist_wake(sc);
1482 	if (err != 0)
1483 		return (err);
1484 
1485 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1486 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1487 	    0xFFFFFFFF);
1488 
1489 	/* Disable SPIs */
1490 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1491 	    GICR_I_ENABLER_PPI_MASK);
1492 	/* Enable SGIs */
1493 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1494 	    GICR_I_ENABLER_SGI_MASK);
1495 
1496 	/* Set priority for SGIs and PPIs */
1497 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1498 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1499 		    GIC_PRIORITY_MAX);
1500 	}
1501 
1502 	gic_v3_wait_for_rwp(sc, REDIST);
1503 
1504 	return (0);
1505 }
1506 
1507 /*
1508  * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1509  */
1510 
1511 static int
1512 gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count,
1513     int count, int maxcount, struct intr_irqsrc **isrc)
1514 {
1515 	struct gic_v3_softc *sc;
1516 	int i, irq, end_irq;
1517 	bool found;
1518 
1519 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1520 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1521 
1522 	sc = device_get_softc(dev);
1523 
1524 	mtx_lock(&sc->gic_mbi_mtx);
1525 
1526 	found = false;
1527 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1528 		/* Start on an aligned interrupt */
1529 		if ((irq & (maxcount - 1)) != 0)
1530 			continue;
1531 
1532 		/* Assume we found a valid range until shown otherwise */
1533 		found = true;
1534 
1535 		/* Check this range is valid */
1536 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1537 			/* No free interrupts */
1538 			if (end_irq == mbi_start + mbi_count) {
1539 				found = false;
1540 				break;
1541 			}
1542 
1543 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1544 			    ("%s: Non-MSI interrupt found", __func__));
1545 
1546 			/* This is already used */
1547 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1548 			    GI_FLAG_MSI_USED) {
1549 				found = false;
1550 				break;
1551 			}
1552 		}
1553 		if (found)
1554 			break;
1555 	}
1556 
1557 	/* Not enough interrupts were found */
1558 	if (!found || irq == mbi_start + mbi_count) {
1559 		mtx_unlock(&sc->gic_mbi_mtx);
1560 		return (ENXIO);
1561 	}
1562 
1563 	for (i = 0; i < count; i++) {
1564 		/* Mark the interrupt as used */
1565 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1566 	}
1567 	mtx_unlock(&sc->gic_mbi_mtx);
1568 
1569 	for (i = 0; i < count; i++)
1570 		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1571 
1572 	return (0);
1573 }
1574 
1575 static int
1576 gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1577 {
1578 	struct gic_v3_softc *sc;
1579 	struct gic_v3_irqsrc *gi;
1580 	int i;
1581 
1582 	sc = device_get_softc(dev);
1583 
1584 	mtx_lock(&sc->gic_mbi_mtx);
1585 	for (i = 0; i < count; i++) {
1586 		gi = (struct gic_v3_irqsrc *)isrc[i];
1587 
1588 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1589 		    ("%s: Trying to release an unused MSI-X interrupt",
1590 		    __func__));
1591 
1592 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1593 	}
1594 	mtx_unlock(&sc->gic_mbi_mtx);
1595 
1596 	return (0);
1597 }
1598 
1599 static int
1600 gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1601     struct intr_irqsrc **isrcp)
1602 {
1603 	struct gic_v3_softc *sc;
1604 	int irq;
1605 
1606 	sc = device_get_softc(dev);
1607 
1608 	mtx_lock(&sc->gic_mbi_mtx);
1609 	/* Find an unused interrupt */
1610 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1611 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1612 		    ("%s: Non-MSI interrupt found", __func__));
1613 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1614 			break;
1615 	}
1616 	/* No free interrupt was found */
1617 	if (irq == mbi_start + mbi_count) {
1618 		mtx_unlock(&sc->gic_mbi_mtx);
1619 		return (ENXIO);
1620 	}
1621 
1622 	/* Mark the interrupt as used */
1623 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1624 	mtx_unlock(&sc->gic_mbi_mtx);
1625 
1626 	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1627 
1628 	return (0);
1629 }
1630 
1631 static int
1632 gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1633 {
1634 	struct gic_v3_softc *sc;
1635 	struct gic_v3_irqsrc *gi;
1636 
1637 	sc = device_get_softc(dev);
1638 	gi = (struct gic_v3_irqsrc *)isrc;
1639 
1640 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1641 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1642 
1643 	mtx_lock(&sc->gic_mbi_mtx);
1644 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1645 	mtx_unlock(&sc->gic_mbi_mtx);
1646 
1647 	return (0);
1648 }
1649 
1650 static int
1651 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1652     device_t *pic, struct intr_irqsrc **isrc)
1653 {
1654 	struct gic_v3_softc *sc;
1655 	int error;
1656 
1657 	sc = device_get_softc(dev);
1658 	error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start,
1659 	    sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc);
1660 	if (error != 0)
1661 		return (error);
1662 
1663 	*pic = dev;
1664 	return (0);
1665 }
1666 
1667 static int
1668 gic_v3_release_msi(device_t dev, device_t child, int count,
1669     struct intr_irqsrc **isrc)
1670 {
1671 	return (gic_v3_gic_release_msi(dev, count, isrc));
1672 }
1673 
1674 static int
1675 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1676     struct intr_irqsrc **isrc)
1677 {
1678 	struct gic_v3_softc *sc;
1679 	int error;
1680 
1681 	sc = device_get_softc(dev);
1682 	error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start,
1683 	    sc->gic_mbi_end - sc->gic_mbi_start, isrc);
1684 	if (error != 0)
1685 		return (error);
1686 
1687 	*pic = dev;
1688 
1689 	return (0);
1690 }
1691 
1692 static int
1693 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1694 {
1695 	return (gic_v3_gic_release_msix(dev, isrc));
1696 }
1697 
1698 static int
1699 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1700     uint64_t *addr, uint32_t *data)
1701 {
1702 	struct gic_v3_softc *sc = device_get_softc(dev);
1703 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1704 
1705 	*addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1706 	*data = gi->gi_irq;
1707 
1708 	return (0);
1709 }
1710