xref: /freebsd/sys/arm64/arm64/gic_v3.c (revision 4e8d558c)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  *
4  * This software was developed by Andrew Turner under
5  * the sponsorship of the FreeBSD Foundation.
6  *
7  * This software was developed by Semihalf under
8  * the sponsorship of the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_acpi.h"
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53 #include <sys/interrupt.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61 
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66 
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71 
72 #include "gic_if.h"
73 #include "pic_if.h"
74 #include "msi_if.h"
75 
76 #include <arm/arm/gic_common.h>
77 #include "gic_v3_reg.h"
78 #include "gic_v3_var.h"
79 
80 static bus_print_child_t gic_v3_print_child;
81 static bus_get_domain_t gic_v3_get_domain;
82 static bus_read_ivar_t gic_v3_read_ivar;
83 static bus_write_ivar_t gic_v3_write_ivar;
84 static bus_alloc_resource_t gic_v3_alloc_resource;
85 
86 static pic_disable_intr_t gic_v3_disable_intr;
87 static pic_enable_intr_t gic_v3_enable_intr;
88 static pic_map_intr_t gic_v3_map_intr;
89 static pic_setup_intr_t gic_v3_setup_intr;
90 static pic_teardown_intr_t gic_v3_teardown_intr;
91 static pic_post_filter_t gic_v3_post_filter;
92 static pic_post_ithread_t gic_v3_post_ithread;
93 static pic_pre_ithread_t gic_v3_pre_ithread;
94 static pic_bind_intr_t gic_v3_bind_intr;
95 #ifdef SMP
96 static pic_init_secondary_t gic_v3_init_secondary;
97 static pic_ipi_send_t gic_v3_ipi_send;
98 static pic_ipi_setup_t gic_v3_ipi_setup;
99 #endif
100 
101 static gic_reserve_msi_range_t gic_v3_reserve_msi_range;
102 static gic_alloc_msi_t gic_v3_gic_alloc_msi;
103 static gic_release_msi_t gic_v3_gic_release_msi;
104 static gic_alloc_msix_t gic_v3_gic_alloc_msix;
105 static gic_release_msix_t gic_v3_gic_release_msix;
106 
107 static msi_alloc_msi_t gic_v3_alloc_msi;
108 static msi_release_msi_t gic_v3_release_msi;
109 static msi_alloc_msix_t gic_v3_alloc_msix;
110 static msi_release_msix_t gic_v3_release_msix;
111 static msi_map_msi_t gic_v3_map_msi;
112 
113 static u_int gic_irq_cpu;
114 #ifdef SMP
115 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
116 static u_int sgi_first_unused = GIC_FIRST_SGI;
117 #endif
118 
119 static device_method_t gic_v3_methods[] = {
120 	/* Device interface */
121 	DEVMETHOD(device_detach,	gic_v3_detach),
122 
123 	/* Bus interface */
124 	DEVMETHOD(bus_print_child,	gic_v3_print_child),
125 	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
126 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
127 	DEVMETHOD(bus_write_ivar,	gic_v3_write_ivar),
128 	DEVMETHOD(bus_alloc_resource,	gic_v3_alloc_resource),
129 	DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
130 
131 	/* Interrupt controller interface */
132 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
133 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
134 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
135 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
136 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
137 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
138 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
139 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
140 #ifdef SMP
141 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
142 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
143 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
144 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
145 #endif
146 
147 	/* MSI/MSI-X */
148 	DEVMETHOD(msi_alloc_msi,        gic_v3_alloc_msi),
149 	DEVMETHOD(msi_release_msi,      gic_v3_release_msi),
150 	DEVMETHOD(msi_alloc_msix,       gic_v3_alloc_msix),
151 	DEVMETHOD(msi_release_msix,     gic_v3_release_msix),
152 	DEVMETHOD(msi_map_msi,          gic_v3_map_msi),
153 
154 	/* GIC */
155 	DEVMETHOD(gic_reserve_msi_range, gic_v3_reserve_msi_range),
156 	DEVMETHOD(gic_alloc_msi,	gic_v3_gic_alloc_msi),
157 	DEVMETHOD(gic_release_msi,	gic_v3_gic_release_msi),
158 	DEVMETHOD(gic_alloc_msix,	gic_v3_gic_alloc_msix),
159 	DEVMETHOD(gic_release_msix,	gic_v3_gic_release_msix),
160 
161 	/* End */
162 	DEVMETHOD_END
163 };
164 
165 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
166     sizeof(struct gic_v3_softc));
167 
168 /*
169  * Driver-specific definitions.
170  */
171 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
172 
173 /*
174  * Helper functions and definitions.
175  */
176 /* Destination registers, either Distributor or Re-Distributor */
177 enum gic_v3_xdist {
178 	DIST = 0,
179 	REDIST,
180 };
181 
182 struct gic_v3_irqsrc {
183 	struct intr_irqsrc	gi_isrc;
184 	uint32_t		gi_irq;
185 	enum intr_polarity	gi_pol;
186 	enum intr_trigger	gi_trig;
187 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
188 					 /* be used for MSI/MSI-X interrupts */
189 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
190 					 /* for a MSI/MSI-X interrupt */
191 	u_int			gi_flags;
192 };
193 
194 /* Helper routines starting with gic_v3_ */
195 static int gic_v3_dist_init(struct gic_v3_softc *);
196 static int gic_v3_redist_alloc(struct gic_v3_softc *);
197 static int gic_v3_redist_find(struct gic_v3_softc *);
198 static int gic_v3_redist_init(struct gic_v3_softc *);
199 static int gic_v3_cpu_init(struct gic_v3_softc *);
200 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
201 
202 /* A sequence of init functions for primary (boot) CPU */
203 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
204 /* Primary CPU initialization sequence */
205 static gic_v3_initseq_t gic_v3_primary_init[] = {
206 	gic_v3_dist_init,
207 	gic_v3_redist_alloc,
208 	gic_v3_redist_init,
209 	gic_v3_cpu_init,
210 	NULL
211 };
212 
213 #ifdef SMP
214 /* Secondary CPU initialization sequence */
215 static gic_v3_initseq_t gic_v3_secondary_init[] = {
216 	gic_v3_redist_init,
217 	gic_v3_cpu_init,
218 	NULL
219 };
220 #endif
221 
222 uint32_t
223 gic_r_read_4(device_t dev, bus_size_t offset)
224 {
225 	struct gic_v3_softc *sc;
226 	struct resource *rdist;
227 
228 	sc = device_get_softc(dev);
229 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
230 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)]->offset;
231 	return (bus_read_4(rdist, offset));
232 }
233 
234 uint64_t
235 gic_r_read_8(device_t dev, bus_size_t offset)
236 {
237 	struct gic_v3_softc *sc;
238 	struct resource *rdist;
239 
240 	sc = device_get_softc(dev);
241 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
242 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)]->offset;
243 	return (bus_read_8(rdist, offset));
244 }
245 
246 void
247 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
248 {
249 	struct gic_v3_softc *sc;
250 	struct resource *rdist;
251 
252 	sc = device_get_softc(dev);
253 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
254 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)]->offset;
255 	bus_write_4(rdist, offset, val);
256 }
257 
258 void
259 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
260 {
261 	struct gic_v3_softc *sc;
262 	struct resource *rdist;
263 
264 	sc = device_get_softc(dev);
265 	rdist = sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
266 	offset += sc->gic_redists.pcpu[PCPU_GET(cpuid)]->offset;
267 	bus_write_8(rdist, offset, val);
268 }
269 
270 static void
271 gic_v3_reserve_msi_range(device_t dev, u_int start, u_int count)
272 {
273 	struct gic_v3_softc *sc;
274 	int i;
275 
276 	sc = device_get_softc(dev);
277 
278 	KASSERT((start + count) < sc->gic_nirqs,
279 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
280 	    start, count, sc->gic_nirqs));
281 	for (i = 0; i < count; i++) {
282 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
283 		    ("%s: MSI interrupt %d already has a handler", __func__,
284 		    count + i));
285 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
286 		    ("%s: MSI interrupt %d already has a polarity", __func__,
287 		    count + i));
288 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
289 		    ("%s: MSI interrupt %d already has a trigger", __func__,
290 		    count + i));
291 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
292 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
293 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
294 	}
295 }
296 
297 /*
298  * Device interface.
299  */
300 int
301 gic_v3_attach(device_t dev)
302 {
303 	struct gic_v3_softc *sc;
304 	gic_v3_initseq_t *init_func;
305 	uint32_t typer;
306 	int rid;
307 	int err;
308 	size_t i;
309 	u_int irq;
310 	const char *name;
311 
312 	sc = device_get_softc(dev);
313 	sc->gic_registered = FALSE;
314 	sc->dev = dev;
315 	err = 0;
316 
317 	/* Initialize mutex */
318 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
319 
320 	/*
321 	 * Allocate array of struct resource.
322 	 * One entry for Distributor and all remaining for Re-Distributor.
323 	 */
324 	sc->gic_res = malloc(
325 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
326 	    M_GIC_V3, M_WAITOK);
327 
328 	/* Now allocate corresponding resources */
329 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
330 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
331 		    &rid, RF_ACTIVE);
332 		if (sc->gic_res[rid] == NULL)
333 			return (ENXIO);
334 	}
335 
336 	/*
337 	 * Distributor interface
338 	 */
339 	sc->gic_dist = sc->gic_res[0];
340 
341 	/*
342 	 * Re-Dristributor interface
343 	 */
344 	/* Allocate space under region descriptions */
345 	sc->gic_redists.regions = malloc(
346 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
347 	    M_GIC_V3, M_WAITOK);
348 
349 	/* Fill-up bus_space information for each region. */
350 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
351 		sc->gic_redists.regions[i] = sc->gic_res[rid];
352 
353 	/* Get the number of supported SPI interrupts */
354 	typer = gic_d_read(sc, 4, GICD_TYPER);
355 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
356 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
357 		sc->gic_nirqs = GIC_I_NUM_MAX;
358 
359 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
360 	    M_GIC_V3, M_WAITOK | M_ZERO);
361 	name = device_get_nameunit(dev);
362 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
363 		struct intr_irqsrc *isrc;
364 
365 		sc->gic_irqs[irq].gi_irq = irq;
366 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
367 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
368 
369 		isrc = &sc->gic_irqs[irq].gi_isrc;
370 		if (irq <= GIC_LAST_SGI) {
371 			err = intr_isrc_register(isrc, sc->dev,
372 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
373 		} else if (irq <= GIC_LAST_PPI) {
374 			err = intr_isrc_register(isrc, sc->dev,
375 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
376 		} else {
377 			err = intr_isrc_register(isrc, sc->dev, 0,
378 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
379 		}
380 		if (err != 0) {
381 			/* XXX call intr_isrc_deregister() */
382 			free(sc->gic_irqs, M_DEVBUF);
383 			return (err);
384 		}
385 	}
386 
387 	mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
388 	if (sc->gic_mbi_start > 0) {
389 		if (!sc->gic_mbi_end) {
390 			/*
391 			 * This is to address SPI based msi ranges, where
392 			 * SPI range is not specified in ACPI
393 			 */
394 			sc->gic_mbi_end = sc->gic_nirqs - 1;
395 		}
396 		gic_v3_reserve_msi_range(dev, sc->gic_mbi_start,
397 		    sc->gic_mbi_end - sc->gic_mbi_start);
398 
399 		if (bootverbose) {
400 			device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
401 					sc->gic_mbi_end);
402 		}
403 	}
404 
405 	/*
406 	 * Read the Peripheral ID2 register. This is an implementation
407 	 * defined register, but seems to be implemented in all GICv3
408 	 * parts and Linux expects it to be there.
409 	 */
410 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
411 
412 	/* Get the number of supported interrupt identifier bits */
413 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
414 
415 	if (bootverbose) {
416 		device_printf(dev, "SPIs: %u, IDs: %u\n",
417 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
418 	}
419 
420 	/* Train init sequence for boot CPU */
421 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
422 		err = (*init_func)(sc);
423 		if (err != 0)
424 			return (err);
425 	}
426 
427 	return (0);
428 }
429 
430 int
431 gic_v3_detach(device_t dev)
432 {
433 	struct gic_v3_softc *sc;
434 	size_t i;
435 	int rid;
436 
437 	sc = device_get_softc(dev);
438 
439 	if (device_is_attached(dev)) {
440 		/*
441 		 * XXX: We should probably deregister PIC
442 		 */
443 		if (sc->gic_registered)
444 			panic("Trying to detach registered PIC");
445 	}
446 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
447 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
448 
449 	for (i = 0; i <= mp_maxid; i++)
450 		free(sc->gic_redists.pcpu[i], M_GIC_V3);
451 
452 	free(sc->ranges, M_GIC_V3);
453 	free(sc->gic_res, M_GIC_V3);
454 	free(sc->gic_redists.regions, M_GIC_V3);
455 
456 	return (0);
457 }
458 
459 static int
460 gic_v3_print_child(device_t bus, device_t child)
461 {
462 	struct resource_list *rl;
463 	int retval = 0;
464 
465 	rl = BUS_GET_RESOURCE_LIST(bus, child);
466 	KASSERT(rl != NULL, ("%s: No resource list", __func__));
467 	retval += bus_print_child_header(bus, child);
468 	retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
469 	retval += bus_print_child_footer(bus, child);
470 
471 	return (retval);
472 }
473 
474 static int
475 gic_v3_get_domain(device_t dev, device_t child, int *domain)
476 {
477 	struct gic_v3_devinfo *di;
478 
479 	di = device_get_ivars(child);
480 	if (di->gic_domain < 0)
481 		return (ENOENT);
482 
483 	*domain = di->gic_domain;
484 	return (0);
485 }
486 
487 static int
488 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
489 {
490 	struct gic_v3_softc *sc;
491 	struct gic_v3_devinfo *di;
492 
493 	sc = device_get_softc(dev);
494 
495 	switch (which) {
496 	case GICV3_IVAR_NIRQS:
497 		*result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
498 		return (0);
499 	case GICV3_IVAR_REDIST:
500 		*result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
501 		return (0);
502 	case GIC_IVAR_HW_REV:
503 		KASSERT(
504 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
505 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
506 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
507 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
508 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
509 		return (0);
510 	case GIC_IVAR_BUS:
511 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
512 		    ("gic_v3_read_ivar: Unknown bus type"));
513 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
514 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
515 		*result = sc->gic_bus;
516 		return (0);
517 	case GIC_IVAR_VGIC:
518 		di = device_get_ivars(child);
519 		if (di == NULL)
520 			return (EINVAL);
521 		*result = di->is_vgic;
522 		return (0);
523 	}
524 
525 	return (ENOENT);
526 }
527 
528 static int
529 gic_v3_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
530 {
531 	switch(which) {
532 	case GICV3_IVAR_NIRQS:
533 	case GICV3_IVAR_REDIST:
534 	case GIC_IVAR_HW_REV:
535 	case GIC_IVAR_BUS:
536 		return (EINVAL);
537 	}
538 
539 	return (ENOENT);
540 }
541 
542 static struct resource *
543 gic_v3_alloc_resource(device_t bus, device_t child, int type, int *rid,
544     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
545 {
546 	struct gic_v3_softc *sc;
547 	struct resource_list_entry *rle;
548 	struct resource_list *rl;
549 	int j;
550 
551 	/* We only allocate memory */
552 	if (type != SYS_RES_MEMORY)
553 		return (NULL);
554 
555 	sc = device_get_softc(bus);
556 
557 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
558 		rl = BUS_GET_RESOURCE_LIST(bus, child);
559 		if (rl == NULL)
560 			return (NULL);
561 
562 		/* Find defaults for this rid */
563 		rle = resource_list_find(rl, type, *rid);
564 		if (rle == NULL)
565 			return (NULL);
566 
567 		start = rle->start;
568 		end = rle->end;
569 		count = rle->count;
570 	}
571 
572 	/* Remap through ranges property */
573 	for (j = 0; j < sc->nranges; j++) {
574 		if (start >= sc->ranges[j].bus && end <
575 		    sc->ranges[j].bus + sc->ranges[j].size) {
576 			start -= sc->ranges[j].bus;
577 			start += sc->ranges[j].host;
578 			end -= sc->ranges[j].bus;
579 			end += sc->ranges[j].host;
580 			break;
581 		}
582 	}
583 	if (j == sc->nranges && sc->nranges != 0) {
584 		if (bootverbose)
585 			device_printf(bus, "Could not map resource "
586 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
587 
588 		return (NULL);
589 	}
590 
591 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
592 	    count, flags));
593 }
594 
595 int
596 arm_gic_v3_intr(void *arg)
597 {
598 	struct gic_v3_softc *sc = arg;
599 	struct gic_v3_irqsrc *gi;
600 	struct intr_pic *pic;
601 	uint64_t active_irq;
602 	struct trapframe *tf;
603 
604 	pic = sc->gic_pic;
605 
606 	while (1) {
607 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
608 			/*
609 			 * Hardware:		Cavium ThunderX
610 			 * Chip revision:	Pass 1.0 (early version)
611 			 *			Pass 1.1 (production)
612 			 * ERRATUM:		22978, 23154
613 			 */
614 			__asm __volatile(
615 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
616 			    "mrs %0, ICC_IAR1_EL1		\n"
617 			    "nop;nop;nop;nop;			\n"
618 			    "dsb sy				\n"
619 			    : "=&r" (active_irq));
620 		} else {
621 			active_irq = gic_icc_read(IAR1);
622 		}
623 
624 		if (active_irq >= GIC_FIRST_LPI) {
625 			intr_child_irq_handler(pic, active_irq);
626 			continue;
627 		}
628 
629 		if (__predict_false(active_irq >= sc->gic_nirqs))
630 			return (FILTER_HANDLED);
631 
632 		tf = curthread->td_intr_frame;
633 		gi = &sc->gic_irqs[active_irq];
634 		if (active_irq <= GIC_LAST_SGI) {
635 			/* Call EOI for all IPI before dispatch. */
636 			gic_icc_write(EOIR1, (uint64_t)active_irq);
637 #ifdef SMP
638 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq]);
639 #else
640 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
641 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
642 #endif
643 		} else if (active_irq >= GIC_FIRST_PPI &&
644 		    active_irq <= GIC_LAST_SPI) {
645 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
646 				gic_icc_write(EOIR1, gi->gi_irq);
647 
648 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
649 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
650 					gic_icc_write(EOIR1, gi->gi_irq);
651 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
652 				device_printf(sc->dev,
653 				    "Stray irq %lu disabled\n", active_irq);
654 			}
655 		}
656 	}
657 }
658 
659 #ifdef FDT
660 static int
661 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
662     enum intr_polarity *polp, enum intr_trigger *trigp)
663 {
664 	u_int irq;
665 
666 	if (ncells < 3)
667 		return (EINVAL);
668 
669 	/*
670 	 * The 1st cell is the interrupt type:
671 	 *	0 = SPI
672 	 *	1 = PPI
673 	 * The 2nd cell contains the interrupt number:
674 	 *	[0 - 987] for SPI
675 	 *	[0 -  15] for PPI
676 	 * The 3rd cell is the flags, encoded as follows:
677 	 *   bits[3:0] trigger type and level flags
678 	 *	1 = edge triggered
679 	 *      2 = edge triggered (PPI only)
680 	 *	4 = level-sensitive
681 	 *	8 = level-sensitive (PPI only)
682 	 */
683 	switch (cells[0]) {
684 	case 0:
685 		irq = GIC_FIRST_SPI + cells[1];
686 		/* SPI irq is checked later. */
687 		break;
688 	case 1:
689 		irq = GIC_FIRST_PPI + cells[1];
690 		if (irq > GIC_LAST_PPI) {
691 			device_printf(dev, "unsupported PPI interrupt "
692 			    "number %u\n", cells[1]);
693 			return (EINVAL);
694 		}
695 		break;
696 	default:
697 		device_printf(dev, "unsupported interrupt type "
698 		    "configuration %u\n", cells[0]);
699 		return (EINVAL);
700 	}
701 
702 	switch (cells[2] & FDT_INTR_MASK) {
703 	case FDT_INTR_EDGE_RISING:
704 		*trigp = INTR_TRIGGER_EDGE;
705 		*polp = INTR_POLARITY_HIGH;
706 		break;
707 	case FDT_INTR_EDGE_FALLING:
708 		*trigp = INTR_TRIGGER_EDGE;
709 		*polp = INTR_POLARITY_LOW;
710 		break;
711 	case FDT_INTR_LEVEL_HIGH:
712 		*trigp = INTR_TRIGGER_LEVEL;
713 		*polp = INTR_POLARITY_HIGH;
714 		break;
715 	case FDT_INTR_LEVEL_LOW:
716 		*trigp = INTR_TRIGGER_LEVEL;
717 		*polp = INTR_POLARITY_LOW;
718 		break;
719 	default:
720 		device_printf(dev, "unsupported trigger/polarity "
721 		    "configuration 0x%02x\n", cells[2]);
722 		return (EINVAL);
723 	}
724 
725 	/* Check the interrupt is valid */
726 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
727 		return (EINVAL);
728 
729 	*irqp = irq;
730 	return (0);
731 }
732 #endif
733 
734 static int
735 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
736     enum intr_polarity *polp, enum intr_trigger *trigp)
737 {
738 	struct gic_v3_irqsrc *gi;
739 
740 	/* SPI-mapped MSI */
741 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
742 	if (gi == NULL)
743 		return (ENXIO);
744 
745 	*irqp = gi->gi_irq;
746 
747 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
748 	*polp = INTR_POLARITY_HIGH;
749 	*trigp = INTR_TRIGGER_EDGE;
750 
751 	return (0);
752 }
753 
754 static int
755 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
756     enum intr_polarity *polp, enum intr_trigger *trigp)
757 {
758 	struct gic_v3_softc *sc;
759 	enum intr_polarity pol;
760 	enum intr_trigger trig;
761 	struct intr_map_data_msi *dam;
762 #ifdef FDT
763 	struct intr_map_data_fdt *daf;
764 #endif
765 #ifdef DEV_ACPI
766 	struct intr_map_data_acpi *daa;
767 #endif
768 	u_int irq;
769 
770 	sc = device_get_softc(dev);
771 
772 	switch (data->type) {
773 #ifdef FDT
774 	case INTR_MAP_DATA_FDT:
775 		daf = (struct intr_map_data_fdt *)data;
776 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
777 		    &trig) != 0)
778 			return (EINVAL);
779 		break;
780 #endif
781 #ifdef DEV_ACPI
782 	case INTR_MAP_DATA_ACPI:
783 		daa = (struct intr_map_data_acpi *)data;
784 		irq = daa->irq;
785 		pol = daa->pol;
786 		trig = daa->trig;
787 		break;
788 #endif
789 	case INTR_MAP_DATA_MSI:
790 		/* SPI-mapped MSI */
791 		dam = (struct intr_map_data_msi *)data;
792 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
793 			return (EINVAL);
794 		break;
795 	default:
796 		return (EINVAL);
797 	}
798 
799 	if (irq >= sc->gic_nirqs)
800 		return (EINVAL);
801 	switch (pol) {
802 	case INTR_POLARITY_CONFORM:
803 	case INTR_POLARITY_LOW:
804 	case INTR_POLARITY_HIGH:
805 		break;
806 	default:
807 		return (EINVAL);
808 	}
809 	switch (trig) {
810 	case INTR_TRIGGER_CONFORM:
811 	case INTR_TRIGGER_EDGE:
812 	case INTR_TRIGGER_LEVEL:
813 		break;
814 	default:
815 		return (EINVAL);
816 	}
817 
818 	*irqp = irq;
819 	if (polp != NULL)
820 		*polp = pol;
821 	if (trigp != NULL)
822 		*trigp = trig;
823 	return (0);
824 }
825 
826 static int
827 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
828     struct intr_irqsrc **isrcp)
829 {
830 	struct gic_v3_softc *sc;
831 	int error;
832 	u_int irq;
833 
834 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
835 	if (error == 0) {
836 		sc = device_get_softc(dev);
837 		*isrcp = GIC_INTR_ISRC(sc, irq);
838 	}
839 	return (error);
840 }
841 
842 struct gic_v3_setup_periph_args {
843 	device_t		 dev;
844 	struct intr_irqsrc	*isrc;
845 };
846 
847 static void
848 gic_v3_setup_intr_periph(void *argp)
849 {
850 	struct gic_v3_setup_periph_args *args = argp;
851 	struct intr_irqsrc *isrc = args->isrc;
852 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
853 	device_t dev = args->dev;
854 	u_int irq = gi->gi_irq;
855 	struct gic_v3_softc *sc = device_get_softc(dev);
856 	uint32_t reg;
857 
858 	MPASS(irq <= GIC_LAST_SPI);
859 
860 	/*
861 	 * We need the lock for both SGIs and PPIs for an atomic CPU_SET() at a
862 	 * minimum, but we also need it below for SPIs.
863 	 */
864 	mtx_lock_spin(&sc->gic_mtx);
865 
866 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
867 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
868 
869 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
870 		/* Set the trigger and polarity */
871 		if (irq <= GIC_LAST_PPI)
872 			reg = gic_r_read(sc, 4,
873 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
874 		else
875 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
876 		if (gi->gi_trig == INTR_TRIGGER_LEVEL)
877 			reg &= ~(2 << ((irq % 16) * 2));
878 		else
879 			reg |= 2 << ((irq % 16) * 2);
880 
881 		if (irq <= GIC_LAST_PPI) {
882 			gic_r_write(sc, 4,
883 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
884 			gic_v3_wait_for_rwp(sc, REDIST);
885 		} else {
886 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
887 			gic_v3_wait_for_rwp(sc, DIST);
888 		}
889 	}
890 
891 	mtx_unlock_spin(&sc->gic_mtx);
892 }
893 
894 static int
895 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
896     struct resource *res, struct intr_map_data *data)
897 {
898 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
899 	struct gic_v3_setup_periph_args pargs;
900 	enum intr_trigger trig;
901 	enum intr_polarity pol;
902 	u_int irq;
903 	int error;
904 
905 	if (data == NULL)
906 		return (ENOTSUP);
907 
908 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
909 	if (error != 0)
910 		return (error);
911 
912 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
913 	    trig == INTR_TRIGGER_CONFORM)
914 		return (EINVAL);
915 
916 	/* Compare config if this is not first setup. */
917 	if (isrc->isrc_handlers != 0) {
918 		if (pol != gi->gi_pol || trig != gi->gi_trig)
919 			return (EINVAL);
920 		else
921 			return (0);
922 	}
923 
924 	/* For MSI/MSI-X we should have already configured these */
925 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
926 		gi->gi_pol = pol;
927 		gi->gi_trig = trig;
928 	}
929 
930 	pargs.dev = dev;
931 	pargs.isrc = isrc;
932 
933 	if (isrc->isrc_flags & INTR_ISRCF_PPI) {
934 		/*
935 		 * If APs haven't been fired up yet, smp_rendezvous() will just
936 		 * execute it on the single CPU and gic_v3_init_secondary() will
937 		 * clean up afterwards.
938 		 */
939 		smp_rendezvous(NULL, gic_v3_setup_intr_periph, NULL, &pargs);
940 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
941 		gic_v3_setup_intr_periph(&pargs);
942 		gic_v3_bind_intr(dev, isrc);
943 	}
944 
945 	return (0);
946 }
947 
948 static int
949 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
950     struct resource *res, struct intr_map_data *data)
951 {
952 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
953 
954 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
955 		gi->gi_pol = INTR_POLARITY_CONFORM;
956 		gi->gi_trig = INTR_TRIGGER_CONFORM;
957 	}
958 
959 	return (0);
960 }
961 
962 static void
963 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
964 {
965 	struct gic_v3_softc *sc;
966 	struct gic_v3_irqsrc *gi;
967 	u_int irq;
968 
969 	sc = device_get_softc(dev);
970 	gi = (struct gic_v3_irqsrc *)isrc;
971 	irq = gi->gi_irq;
972 
973 	if (irq <= GIC_LAST_PPI) {
974 		/* SGIs and PPIs in corresponding Re-Distributor */
975 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
976 		    GICD_I_MASK(irq));
977 		gic_v3_wait_for_rwp(sc, REDIST);
978 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
979 		/* SPIs in distributor */
980 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
981 		gic_v3_wait_for_rwp(sc, DIST);
982 	} else
983 		panic("%s: Unsupported IRQ %u", __func__, irq);
984 }
985 
986 static void
987 gic_v3_enable_intr_periph(void *argp)
988 {
989 	struct gic_v3_setup_periph_args *args = argp;
990 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)args->isrc;
991 	device_t dev = args->dev;
992 	struct gic_v3_softc *sc = device_get_softc(dev);
993 	u_int irq = gi->gi_irq;
994 
995 	/* SGIs and PPIs in corresponding Re-Distributor */
996 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
997 	    GICD_I_MASK(irq));
998 	gic_v3_wait_for_rwp(sc, REDIST);
999 }
1000 
1001 static void
1002 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1003 {
1004 	struct gic_v3_setup_periph_args pargs;
1005 	struct gic_v3_softc *sc;
1006 	struct gic_v3_irqsrc *gi;
1007 	u_int irq;
1008 
1009 	gi = (struct gic_v3_irqsrc *)isrc;
1010 	irq = gi->gi_irq;
1011 	pargs.isrc = isrc;
1012 	pargs.dev = dev;
1013 
1014 	if (irq <= GIC_LAST_PPI) {
1015 		/*
1016 		 * SGIs only need configured on the current AP.  We'll setup and
1017 		 * enable IPIs as APs come online.
1018 		 */
1019 		if (irq <= GIC_LAST_SGI)
1020 			gic_v3_enable_intr_periph(&pargs);
1021 		else
1022 			smp_rendezvous(NULL, gic_v3_enable_intr_periph, NULL,
1023 			    &pargs);
1024 		return;
1025 	}
1026 
1027 	sc = device_get_softc(dev);
1028 
1029 	if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
1030 		/* SPIs in distributor */
1031 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
1032 		gic_v3_wait_for_rwp(sc, DIST);
1033 	} else
1034 		panic("%s: Unsupported IRQ %u", __func__, irq);
1035 }
1036 
1037 static void
1038 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1039 {
1040 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1041 
1042 	gic_v3_disable_intr(dev, isrc);
1043 	gic_icc_write(EOIR1, gi->gi_irq);
1044 }
1045 
1046 static void
1047 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1048 {
1049 
1050 	gic_v3_enable_intr(dev, isrc);
1051 }
1052 
1053 static void
1054 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
1055 {
1056 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1057 
1058 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
1059 		return;
1060 
1061 	gic_icc_write(EOIR1, gi->gi_irq);
1062 }
1063 
1064 static int
1065 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1066 {
1067 	struct gic_v3_softc *sc;
1068 	struct gic_v3_irqsrc *gi;
1069 	int cpu;
1070 
1071 	gi = (struct gic_v3_irqsrc *)isrc;
1072 
1073 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
1074 	    ("%s: Attempting to bind an invalid IRQ", __func__));
1075 
1076 	sc = device_get_softc(dev);
1077 
1078 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
1079 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
1080 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
1081 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
1082 		    CPU_AFFINITY(gic_irq_cpu));
1083 	} else {
1084 		/*
1085 		 * We can only bind to a single CPU so select
1086 		 * the first CPU found.
1087 		 */
1088 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
1089 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
1090 	}
1091 
1092 	return (0);
1093 }
1094 
1095 #ifdef SMP
1096 static void
1097 gic_v3_init_secondary(device_t dev)
1098 {
1099 	struct gic_v3_setup_periph_args pargs;
1100 	device_t child;
1101 	struct gic_v3_softc *sc;
1102 	gic_v3_initseq_t *init_func;
1103 	struct intr_irqsrc *isrc;
1104 	u_int cpu, irq;
1105 	int err, i;
1106 
1107 	sc = device_get_softc(dev);
1108 	cpu = PCPU_GET(cpuid);
1109 
1110 	/* Train init sequence for boot CPU */
1111 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
1112 	    init_func++) {
1113 		err = (*init_func)(sc);
1114 		if (err != 0) {
1115 			device_printf(dev,
1116 			    "Could not initialize GIC for CPU%u\n", cpu);
1117 			return;
1118 		}
1119 	}
1120 
1121 	pargs.dev = dev;
1122 
1123 	/* Unmask attached SGI interrupts. */
1124 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
1125 		isrc = GIC_INTR_ISRC(sc, irq);
1126 		if (intr_isrc_init_on_cpu(isrc, cpu)) {
1127 			pargs.isrc = isrc;
1128 			gic_v3_enable_intr_periph(&pargs);
1129 		}
1130 	}
1131 
1132 	/* Unmask attached PPI interrupts. */
1133 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
1134 		isrc = GIC_INTR_ISRC(sc, irq);
1135 		if (intr_isrc_init_on_cpu(isrc, cpu)) {
1136 			pargs.isrc = isrc;
1137 			gic_v3_setup_intr_periph(&pargs);
1138 			gic_v3_enable_intr_periph(&pargs);
1139 		}
1140 	}
1141 
1142 	for (i = 0; i < sc->gic_nchildren; i++) {
1143 		child = sc->gic_children[i];
1144 		PIC_INIT_SECONDARY(child);
1145 	}
1146 }
1147 
1148 static void
1149 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
1150     u_int ipi)
1151 {
1152 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1153 	uint64_t aff, val, irq;
1154 	int i;
1155 
1156 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
1157 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
1158 	aff = GIC_AFFINITY(0);
1159 	irq = gi->gi_irq;
1160 	val = 0;
1161 
1162 	/* Iterate through all CPUs in set */
1163 	for (i = 0; i <= mp_maxid; i++) {
1164 		/* Move to the next affinity group */
1165 		if (aff != GIC_AFFINITY(i)) {
1166 			/* Send the IPI */
1167 			if (val != 0) {
1168 				gic_icc_write(SGI1R, val);
1169 				val = 0;
1170 			}
1171 			aff = GIC_AFFINITY(i);
1172 		}
1173 
1174 		/* Send the IPI to this cpu */
1175 		if (CPU_ISSET(i, &cpus)) {
1176 #define	ICC_SGI1R_AFFINITY(aff)					\
1177     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
1178      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
1179      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
1180 			/* Set the affinity when the first at this level */
1181 			if (val == 0)
1182 				val = ICC_SGI1R_AFFINITY(aff) |
1183 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
1184 			/* Set the bit to send the IPI to te CPU */
1185 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
1186 		}
1187 	}
1188 
1189 	/* Send the IPI to the last cpu affinity group */
1190 	if (val != 0)
1191 		gic_icc_write(SGI1R, val);
1192 #undef GIC_AFF_MASK
1193 #undef GIC_AFFINITY
1194 }
1195 
1196 static int
1197 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
1198 {
1199 	struct intr_irqsrc *isrc;
1200 	struct gic_v3_softc *sc = device_get_softc(dev);
1201 
1202 	if (sgi_first_unused > GIC_LAST_SGI)
1203 		return (ENOSPC);
1204 
1205 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1206 	sgi_to_ipi[sgi_first_unused++] = ipi;
1207 
1208 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1209 
1210 	*isrcp = isrc;
1211 	return (0);
1212 }
1213 #endif /* SMP */
1214 
1215 /*
1216  * Helper routines
1217  */
1218 static void
1219 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1220 {
1221 	struct resource *res;
1222 	bus_size_t offset;
1223 	u_int cpuid;
1224 	size_t us_left = 1000000;
1225 
1226 	cpuid = PCPU_GET(cpuid);
1227 
1228 	switch (xdist) {
1229 	case DIST:
1230 		res = sc->gic_dist;
1231 		offset = 0;
1232 		break;
1233 	case REDIST:
1234 		res = sc->gic_redists.pcpu[cpuid]->res;
1235 		offset = sc->gic_redists.pcpu[PCPU_GET(cpuid)]->offset;
1236 		break;
1237 	default:
1238 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1239 		return;
1240 	}
1241 
1242 	while ((bus_read_4(res, offset + GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1243 		DELAY(1);
1244 		if (us_left-- == 0)
1245 			panic("GICD Register write pending for too long");
1246 	}
1247 }
1248 
1249 /* CPU interface. */
1250 static __inline void
1251 gic_v3_cpu_priority(uint64_t mask)
1252 {
1253 
1254 	/* Set prority mask */
1255 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1256 }
1257 
1258 static int
1259 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1260 {
1261 	uint64_t sre;
1262 	u_int cpuid;
1263 
1264 	cpuid = PCPU_GET(cpuid);
1265 	/*
1266 	 * Set the SRE bit to enable access to GIC CPU interface
1267 	 * via system registers.
1268 	 */
1269 	sre = READ_SPECIALREG(icc_sre_el1);
1270 	sre |= ICC_SRE_EL1_SRE;
1271 	WRITE_SPECIALREG(icc_sre_el1, sre);
1272 	isb();
1273 	/*
1274 	 * Now ensure that the bit is set.
1275 	 */
1276 	sre = READ_SPECIALREG(icc_sre_el1);
1277 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1278 		/* We are done. This was disabled in EL2 */
1279 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1280 		    "via system registers\n", cpuid);
1281 		return (ENXIO);
1282 	} else if (bootverbose) {
1283 		device_printf(sc->dev,
1284 		    "CPU%u enabled CPU interface via system registers\n",
1285 		    cpuid);
1286 	}
1287 
1288 	return (0);
1289 }
1290 
1291 static int
1292 gic_v3_cpu_init(struct gic_v3_softc *sc)
1293 {
1294 	int err;
1295 
1296 	/* Enable access to CPU interface via system registers */
1297 	err = gic_v3_cpu_enable_sre(sc);
1298 	if (err != 0)
1299 		return (err);
1300 	/* Priority mask to minimum - accept all interrupts */
1301 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1302 	/* Disable EOI mode */
1303 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1304 	/* Enable group 1 (insecure) interrups */
1305 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1306 
1307 	return (0);
1308 }
1309 
1310 /* Distributor */
1311 static int
1312 gic_v3_dist_init(struct gic_v3_softc *sc)
1313 {
1314 	uint64_t aff;
1315 	u_int i;
1316 
1317 	/*
1318 	 * 1. Disable the Distributor
1319 	 */
1320 	gic_d_write(sc, 4, GICD_CTLR, 0);
1321 	gic_v3_wait_for_rwp(sc, DIST);
1322 
1323 	/*
1324 	 * 2. Configure the Distributor
1325 	 */
1326 	/* Set all SPIs to be Group 1 Non-secure */
1327 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1328 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1329 
1330 	/* Set all global interrupts to be level triggered, active low. */
1331 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1332 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1333 
1334 	/* Set priority to all shared interrupts */
1335 	for (i = GIC_FIRST_SPI;
1336 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1337 		/* Set highest priority */
1338 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1339 	}
1340 
1341 	/*
1342 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1343 	 * Re-Distributor registers.
1344 	 */
1345 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1346 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1347 
1348 	gic_v3_wait_for_rwp(sc, DIST);
1349 
1350 	/*
1351 	 * 3. Enable Distributor
1352 	 */
1353 	/* Enable Distributor with ARE, Group 1 */
1354 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1355 	    GICD_CTLR_G1);
1356 
1357 	/*
1358 	 * 4. Route all interrupts to boot CPU.
1359 	 */
1360 	aff = CPU_AFFINITY(0);
1361 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1362 		gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1363 
1364 	return (0);
1365 }
1366 
1367 /* Re-Distributor */
1368 static int
1369 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1370 {
1371 	u_int cpuid;
1372 
1373 	/* Allocate struct resource for all CPU's Re-Distributor registers */
1374 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1375 		if (CPU_ISSET(cpuid, &all_cpus) != 0)
1376 			sc->gic_redists.pcpu[cpuid] =
1377 				malloc(sizeof(*sc->gic_redists.pcpu[0]),
1378 				    M_GIC_V3, M_WAITOK);
1379 		else
1380 			sc->gic_redists.pcpu[cpuid] = NULL;
1381 	return (0);
1382 }
1383 
1384 static int
1385 gic_v3_redist_find(struct gic_v3_softc *sc)
1386 {
1387 	struct resource *r_res;
1388 	bus_size_t offset;
1389 	uint64_t aff;
1390 	uint64_t typer;
1391 	uint32_t pidr2;
1392 	u_int cpuid;
1393 	size_t i;
1394 
1395 	cpuid = PCPU_GET(cpuid);
1396 
1397 	aff = CPU_AFFINITY(cpuid);
1398 	/* Affinity in format for comparison with typer */
1399 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1400 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1401 
1402 	if (bootverbose) {
1403 		device_printf(sc->dev,
1404 		    "Start searching for Re-Distributor\n");
1405 	}
1406 	/* Iterate through Re-Distributor regions */
1407 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1408 		/* Take a copy of the region's resource */
1409 		r_res = sc->gic_redists.regions[i];
1410 
1411 		pidr2 = bus_read_4(r_res, GICR_PIDR2);
1412 		switch (GICR_PIDR2_ARCH(pidr2)) {
1413 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1414 		case GICR_PIDR2_ARCH_GICv4:
1415 			break;
1416 		default:
1417 			device_printf(sc->dev,
1418 			    "No Re-Distributor found for CPU%u\n", cpuid);
1419 			return (ENODEV);
1420 		}
1421 
1422 		offset = 0;
1423 		do {
1424 			typer = bus_read_8(r_res, offset + GICR_TYPER);
1425 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1426 				KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1427 				    ("Invalid pointer to per-CPU redistributor"));
1428 				/* Copy res contents to its final destination */
1429 				sc->gic_redists.pcpu[cpuid]->res = r_res;
1430 				sc->gic_redists.pcpu[cpuid]->offset = offset;
1431 				sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1432 				if (bootverbose) {
1433 					device_printf(sc->dev,
1434 					    "CPU%u Re-Distributor has been found\n",
1435 					    cpuid);
1436 				}
1437 				return (0);
1438 			}
1439 
1440 			offset += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1441 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1442 				offset +=
1443 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1444 			}
1445 		} while (offset < rman_get_size(r_res) &&
1446 		    (typer & GICR_TYPER_LAST) == 0);
1447 	}
1448 
1449 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1450 	return (ENXIO);
1451 }
1452 
1453 static int
1454 gic_v3_redist_wake(struct gic_v3_softc *sc)
1455 {
1456 	uint32_t waker;
1457 	size_t us_left = 1000000;
1458 
1459 	waker = gic_r_read(sc, 4, GICR_WAKER);
1460 	/* Wake up Re-Distributor for this CPU */
1461 	waker &= ~GICR_WAKER_PS;
1462 	gic_r_write(sc, 4, GICR_WAKER, waker);
1463 	/*
1464 	 * When clearing ProcessorSleep bit it is required to wait for
1465 	 * ChildrenAsleep to become zero following the processor power-on.
1466 	 */
1467 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1468 		DELAY(1);
1469 		if (us_left-- == 0) {
1470 			panic("Could not wake Re-Distributor for CPU%u",
1471 			    PCPU_GET(cpuid));
1472 		}
1473 	}
1474 
1475 	if (bootverbose) {
1476 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1477 		    PCPU_GET(cpuid));
1478 	}
1479 
1480 	return (0);
1481 }
1482 
1483 static int
1484 gic_v3_redist_init(struct gic_v3_softc *sc)
1485 {
1486 	int err;
1487 	size_t i;
1488 
1489 	err = gic_v3_redist_find(sc);
1490 	if (err != 0)
1491 		return (err);
1492 
1493 	err = gic_v3_redist_wake(sc);
1494 	if (err != 0)
1495 		return (err);
1496 
1497 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1498 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1499 	    0xFFFFFFFF);
1500 
1501 	/* Disable SPIs */
1502 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1503 	    GICR_I_ENABLER_PPI_MASK);
1504 	/* Enable SGIs */
1505 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1506 	    GICR_I_ENABLER_SGI_MASK);
1507 
1508 	/* Set priority for SGIs and PPIs */
1509 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1510 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1511 		    GIC_PRIORITY_MAX);
1512 	}
1513 
1514 	gic_v3_wait_for_rwp(sc, REDIST);
1515 
1516 	return (0);
1517 }
1518 
1519 /*
1520  * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1521  */
1522 
1523 static int
1524 gic_v3_gic_alloc_msi(device_t dev, u_int mbi_start, u_int mbi_count,
1525     int count, int maxcount, struct intr_irqsrc **isrc)
1526 {
1527 	struct gic_v3_softc *sc;
1528 	int i, irq, end_irq;
1529 	bool found;
1530 
1531 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1532 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1533 
1534 	sc = device_get_softc(dev);
1535 
1536 	mtx_lock(&sc->gic_mbi_mtx);
1537 
1538 	found = false;
1539 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1540 		/* Start on an aligned interrupt */
1541 		if ((irq & (maxcount - 1)) != 0)
1542 			continue;
1543 
1544 		/* Assume we found a valid range until shown otherwise */
1545 		found = true;
1546 
1547 		/* Check this range is valid */
1548 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1549 			/* No free interrupts */
1550 			if (end_irq == mbi_start + mbi_count) {
1551 				found = false;
1552 				break;
1553 			}
1554 
1555 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1556 			    ("%s: Non-MSI interrupt found", __func__));
1557 
1558 			/* This is already used */
1559 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1560 			    GI_FLAG_MSI_USED) {
1561 				found = false;
1562 				break;
1563 			}
1564 		}
1565 		if (found)
1566 			break;
1567 	}
1568 
1569 	/* Not enough interrupts were found */
1570 	if (!found || irq == mbi_start + mbi_count) {
1571 		mtx_unlock(&sc->gic_mbi_mtx);
1572 		return (ENXIO);
1573 	}
1574 
1575 	for (i = 0; i < count; i++) {
1576 		/* Mark the interrupt as used */
1577 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1578 	}
1579 	mtx_unlock(&sc->gic_mbi_mtx);
1580 
1581 	for (i = 0; i < count; i++)
1582 		isrc[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1583 
1584 	return (0);
1585 }
1586 
1587 static int
1588 gic_v3_gic_release_msi(device_t dev, int count, struct intr_irqsrc **isrc)
1589 {
1590 	struct gic_v3_softc *sc;
1591 	struct gic_v3_irqsrc *gi;
1592 	int i;
1593 
1594 	sc = device_get_softc(dev);
1595 
1596 	mtx_lock(&sc->gic_mbi_mtx);
1597 	for (i = 0; i < count; i++) {
1598 		gi = (struct gic_v3_irqsrc *)isrc[i];
1599 
1600 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1601 		    ("%s: Trying to release an unused MSI-X interrupt",
1602 		    __func__));
1603 
1604 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1605 	}
1606 	mtx_unlock(&sc->gic_mbi_mtx);
1607 
1608 	return (0);
1609 }
1610 
1611 static int
1612 gic_v3_gic_alloc_msix(device_t dev, u_int mbi_start, u_int mbi_count,
1613     struct intr_irqsrc **isrcp)
1614 {
1615 	struct gic_v3_softc *sc;
1616 	int irq;
1617 
1618 	sc = device_get_softc(dev);
1619 
1620 	mtx_lock(&sc->gic_mbi_mtx);
1621 	/* Find an unused interrupt */
1622 	for (irq = mbi_start; irq < mbi_start + mbi_count; irq++) {
1623 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1624 		    ("%s: Non-MSI interrupt found", __func__));
1625 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1626 			break;
1627 	}
1628 	/* No free interrupt was found */
1629 	if (irq == mbi_start + mbi_count) {
1630 		mtx_unlock(&sc->gic_mbi_mtx);
1631 		return (ENXIO);
1632 	}
1633 
1634 	/* Mark the interrupt as used */
1635 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1636 	mtx_unlock(&sc->gic_mbi_mtx);
1637 
1638 	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1639 
1640 	return (0);
1641 }
1642 
1643 static int
1644 gic_v3_gic_release_msix(device_t dev, struct intr_irqsrc *isrc)
1645 {
1646 	struct gic_v3_softc *sc;
1647 	struct gic_v3_irqsrc *gi;
1648 
1649 	sc = device_get_softc(dev);
1650 	gi = (struct gic_v3_irqsrc *)isrc;
1651 
1652 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1653 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1654 
1655 	mtx_lock(&sc->gic_mbi_mtx);
1656 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1657 	mtx_unlock(&sc->gic_mbi_mtx);
1658 
1659 	return (0);
1660 }
1661 
1662 static int
1663 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1664     device_t *pic, struct intr_irqsrc **isrc)
1665 {
1666 	struct gic_v3_softc *sc;
1667 	int error;
1668 
1669 	sc = device_get_softc(dev);
1670 	error = gic_v3_gic_alloc_msi(dev, sc->gic_mbi_start,
1671 	    sc->gic_mbi_end - sc->gic_mbi_start, count, maxcount, isrc);
1672 	if (error != 0)
1673 		return (error);
1674 
1675 	*pic = dev;
1676 	return (0);
1677 }
1678 
1679 static int
1680 gic_v3_release_msi(device_t dev, device_t child, int count,
1681     struct intr_irqsrc **isrc)
1682 {
1683 	return (gic_v3_gic_release_msi(dev, count, isrc));
1684 }
1685 
1686 static int
1687 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1688     struct intr_irqsrc **isrc)
1689 {
1690 	struct gic_v3_softc *sc;
1691 	int error;
1692 
1693 	sc = device_get_softc(dev);
1694 	error = gic_v3_gic_alloc_msix(dev, sc->gic_mbi_start,
1695 	    sc->gic_mbi_end - sc->gic_mbi_start, isrc);
1696 	if (error != 0)
1697 		return (error);
1698 
1699 	*pic = dev;
1700 
1701 	return (0);
1702 }
1703 
1704 static int
1705 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1706 {
1707 	return (gic_v3_gic_release_msix(dev, isrc));
1708 }
1709 
1710 static int
1711 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1712     uint64_t *addr, uint32_t *data)
1713 {
1714 	struct gic_v3_softc *sc = device_get_softc(dev);
1715 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1716 
1717 	*addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1718 	*data = gi->gi_irq;
1719 
1720 	return (0);
1721 }
1722