xref: /freebsd/sys/arm64/arm64/gic_v3.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bitstring.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/rman.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/cpuset.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/smp.h>
54 
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/intr.h>
61 
62 #ifdef FDT
63 #include <dev/fdt/fdt_intr.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #endif
66 
67 #ifdef DEV_ACPI
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 #endif
71 
72 #include "pic_if.h"
73 
74 #include <arm/arm/gic_common.h>
75 #include "gic_v3_reg.h"
76 #include "gic_v3_var.h"
77 
78 static bus_get_domain_t gic_v3_get_domain;
79 static bus_read_ivar_t gic_v3_read_ivar;
80 
81 static pic_disable_intr_t gic_v3_disable_intr;
82 static pic_enable_intr_t gic_v3_enable_intr;
83 static pic_map_intr_t gic_v3_map_intr;
84 static pic_setup_intr_t gic_v3_setup_intr;
85 static pic_teardown_intr_t gic_v3_teardown_intr;
86 static pic_post_filter_t gic_v3_post_filter;
87 static pic_post_ithread_t gic_v3_post_ithread;
88 static pic_pre_ithread_t gic_v3_pre_ithread;
89 static pic_bind_intr_t gic_v3_bind_intr;
90 #ifdef SMP
91 static pic_init_secondary_t gic_v3_init_secondary;
92 static pic_ipi_send_t gic_v3_ipi_send;
93 static pic_ipi_setup_t gic_v3_ipi_setup;
94 #endif
95 
96 static u_int gic_irq_cpu;
97 #ifdef SMP
98 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
99 static u_int sgi_first_unused = GIC_FIRST_SGI;
100 #endif
101 
102 static device_method_t gic_v3_methods[] = {
103 	/* Device interface */
104 	DEVMETHOD(device_detach,	gic_v3_detach),
105 
106 	/* Bus interface */
107 	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
108 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
109 
110 	/* Interrupt controller interface */
111 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
112 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
113 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
114 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
115 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
116 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
117 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
118 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
119 #ifdef SMP
120 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
121 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
122 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
123 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
124 #endif
125 
126 	/* End */
127 	DEVMETHOD_END
128 };
129 
130 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
131     sizeof(struct gic_v3_softc));
132 
133 /*
134  * Driver-specific definitions.
135  */
136 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
137 
138 /*
139  * Helper functions and definitions.
140  */
141 /* Destination registers, either Distributor or Re-Distributor */
142 enum gic_v3_xdist {
143 	DIST = 0,
144 	REDIST,
145 };
146 
147 struct gic_v3_irqsrc {
148 	struct intr_irqsrc	gi_isrc;
149 	uint32_t		gi_irq;
150 	enum intr_polarity	gi_pol;
151 	enum intr_trigger	gi_trig;
152 };
153 
154 /* Helper routines starting with gic_v3_ */
155 static int gic_v3_dist_init(struct gic_v3_softc *);
156 static int gic_v3_redist_alloc(struct gic_v3_softc *);
157 static int gic_v3_redist_find(struct gic_v3_softc *);
158 static int gic_v3_redist_init(struct gic_v3_softc *);
159 static int gic_v3_cpu_init(struct gic_v3_softc *);
160 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
161 
162 /* A sequence of init functions for primary (boot) CPU */
163 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
164 /* Primary CPU initialization sequence */
165 static gic_v3_initseq_t gic_v3_primary_init[] = {
166 	gic_v3_dist_init,
167 	gic_v3_redist_alloc,
168 	gic_v3_redist_init,
169 	gic_v3_cpu_init,
170 	NULL
171 };
172 
173 #ifdef SMP
174 /* Secondary CPU initialization sequence */
175 static gic_v3_initseq_t gic_v3_secondary_init[] = {
176 	gic_v3_redist_init,
177 	gic_v3_cpu_init,
178 	NULL
179 };
180 #endif
181 
182 uint32_t
183 gic_r_read_4(device_t dev, bus_size_t offset)
184 {
185 	struct gic_v3_softc *sc;
186 	struct resource *rdist;
187 
188 	sc = device_get_softc(dev);
189 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
190 	return (bus_read_4(rdist, offset));
191 }
192 
193 uint64_t
194 gic_r_read_8(device_t dev, bus_size_t offset)
195 {
196 	struct gic_v3_softc *sc;
197 	struct resource *rdist;
198 
199 	sc = device_get_softc(dev);
200 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
201 	return (bus_read_8(rdist, offset));
202 }
203 
204 void
205 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
206 {
207 	struct gic_v3_softc *sc;
208 	struct resource *rdist;
209 
210 	sc = device_get_softc(dev);
211 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
212 	bus_write_4(rdist, offset, val);
213 }
214 
215 void
216 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
217 {
218 	struct gic_v3_softc *sc;
219 	struct resource *rdist;
220 
221 	sc = device_get_softc(dev);
222 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
223 	bus_write_8(rdist, offset, val);
224 }
225 
226 /*
227  * Device interface.
228  */
229 int
230 gic_v3_attach(device_t dev)
231 {
232 	struct gic_v3_softc *sc;
233 	gic_v3_initseq_t *init_func;
234 	uint32_t typer;
235 	int rid;
236 	int err;
237 	size_t i;
238 	u_int irq;
239 	const char *name;
240 
241 	sc = device_get_softc(dev);
242 	sc->gic_registered = FALSE;
243 	sc->dev = dev;
244 	err = 0;
245 
246 	/* Initialize mutex */
247 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
248 
249 	/*
250 	 * Allocate array of struct resource.
251 	 * One entry for Distributor and all remaining for Re-Distributor.
252 	 */
253 	sc->gic_res = malloc(
254 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
255 	    M_GIC_V3, M_WAITOK);
256 
257 	/* Now allocate corresponding resources */
258 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
259 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
260 		    &rid, RF_ACTIVE);
261 		if (sc->gic_res[rid] == NULL)
262 			return (ENXIO);
263 	}
264 
265 	/*
266 	 * Distributor interface
267 	 */
268 	sc->gic_dist = sc->gic_res[0];
269 
270 	/*
271 	 * Re-Dristributor interface
272 	 */
273 	/* Allocate space under region descriptions */
274 	sc->gic_redists.regions = malloc(
275 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
276 	    M_GIC_V3, M_WAITOK);
277 
278 	/* Fill-up bus_space information for each region. */
279 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
280 		sc->gic_redists.regions[i] = sc->gic_res[rid];
281 
282 	/* Get the number of supported SPI interrupts */
283 	typer = gic_d_read(sc, 4, GICD_TYPER);
284 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
285 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
286 		sc->gic_nirqs = GIC_I_NUM_MAX;
287 
288 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
289 	    M_GIC_V3, M_WAITOK | M_ZERO);
290 	name = device_get_nameunit(dev);
291 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
292 		struct intr_irqsrc *isrc;
293 
294 		sc->gic_irqs[irq].gi_irq = irq;
295 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
296 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
297 
298 		isrc = &sc->gic_irqs[irq].gi_isrc;
299 		if (irq <= GIC_LAST_SGI) {
300 			err = intr_isrc_register(isrc, sc->dev,
301 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
302 		} else if (irq <= GIC_LAST_PPI) {
303 			err = intr_isrc_register(isrc, sc->dev,
304 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
305 		} else {
306 			err = intr_isrc_register(isrc, sc->dev, 0,
307 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
308 		}
309 		if (err != 0) {
310 			/* XXX call intr_isrc_deregister() */
311 			free(sc->gic_irqs, M_DEVBUF);
312 			return (err);
313 		}
314 	}
315 
316 	/*
317 	 * Read the Peripheral ID2 register. This is an implementation
318 	 * defined register, but seems to be implemented in all GICv3
319 	 * parts and Linux expects it to be there.
320 	 */
321 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
322 
323 	/* Get the number of supported interrupt identifier bits */
324 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
325 
326 	if (bootverbose) {
327 		device_printf(dev, "SPIs: %u, IDs: %u\n",
328 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
329 	}
330 
331 	/* Train init sequence for boot CPU */
332 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
333 		err = (*init_func)(sc);
334 		if (err != 0)
335 			return (err);
336 	}
337 
338 	return (0);
339 }
340 
341 int
342 gic_v3_detach(device_t dev)
343 {
344 	struct gic_v3_softc *sc;
345 	size_t i;
346 	int rid;
347 
348 	sc = device_get_softc(dev);
349 
350 	if (device_is_attached(dev)) {
351 		/*
352 		 * XXX: We should probably deregister PIC
353 		 */
354 		if (sc->gic_registered)
355 			panic("Trying to detach registered PIC");
356 	}
357 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
358 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
359 
360 	for (i = 0; i <= mp_maxid; i++)
361 		free(sc->gic_redists.pcpu[i], M_GIC_V3);
362 
363 	free(sc->gic_res, M_GIC_V3);
364 	free(sc->gic_redists.regions, M_GIC_V3);
365 
366 	return (0);
367 }
368 
369 static int
370 gic_v3_get_domain(device_t dev, device_t child, int *domain)
371 {
372 	struct gic_v3_devinfo *di;
373 
374 	di = device_get_ivars(child);
375 	if (di->gic_domain < 0)
376 		return (ENOENT);
377 
378 	*domain = di->gic_domain;
379 	return (0);
380 }
381 
382 static int
383 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
384 {
385 	struct gic_v3_softc *sc;
386 
387 	sc = device_get_softc(dev);
388 
389 	switch (which) {
390 	case GICV3_IVAR_NIRQS:
391 		*result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren;
392 		return (0);
393 	case GICV3_IVAR_REDIST:
394 		*result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
395 		return (0);
396 	case GIC_IVAR_HW_REV:
397 		KASSERT(
398 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
399 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
400 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
401 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
402 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
403 		return (0);
404 	case GIC_IVAR_BUS:
405 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
406 		    ("gic_v3_read_ivar: Unknown bus type"));
407 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
408 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
409 		*result = sc->gic_bus;
410 		return (0);
411 	}
412 
413 	return (ENOENT);
414 }
415 
416 int
417 arm_gic_v3_intr(void *arg)
418 {
419 	struct gic_v3_softc *sc = arg;
420 	struct gic_v3_irqsrc *gi;
421 	struct intr_pic *pic;
422 	uint64_t active_irq;
423 	struct trapframe *tf;
424 
425 	pic = sc->gic_pic;
426 
427 	while (1) {
428 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
429 			/*
430 			 * Hardware:		Cavium ThunderX
431 			 * Chip revision:	Pass 1.0 (early version)
432 			 *			Pass 1.1 (production)
433 			 * ERRATUM:		22978, 23154
434 			 */
435 			__asm __volatile(
436 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
437 			    "mrs %0, ICC_IAR1_EL1		\n"
438 			    "nop;nop;nop;nop;			\n"
439 			    "dsb sy				\n"
440 			    : "=&r" (active_irq));
441 		} else {
442 			active_irq = gic_icc_read(IAR1);
443 		}
444 
445 		if (active_irq >= GIC_FIRST_LPI) {
446 			intr_child_irq_handler(pic, active_irq);
447 			continue;
448 		}
449 
450 		if (__predict_false(active_irq >= sc->gic_nirqs))
451 			return (FILTER_HANDLED);
452 
453 		tf = curthread->td_intr_frame;
454 		gi = &sc->gic_irqs[active_irq];
455 		if (active_irq <= GIC_LAST_SGI) {
456 			/* Call EOI for all IPI before dispatch. */
457 			gic_icc_write(EOIR1, (uint64_t)active_irq);
458 #ifdef SMP
459 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
460 #else
461 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
462 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
463 #endif
464 		} else if (active_irq >= GIC_FIRST_PPI &&
465 		    active_irq <= GIC_LAST_SPI) {
466 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
467 				gic_icc_write(EOIR1, gi->gi_irq);
468 
469 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
470 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
471 					gic_icc_write(EOIR1, gi->gi_irq);
472 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
473 				device_printf(sc->dev,
474 				    "Stray irq %lu disabled\n", active_irq);
475 			}
476 		}
477 	}
478 }
479 
480 #ifdef FDT
481 static int
482 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
483     enum intr_polarity *polp, enum intr_trigger *trigp)
484 {
485 	u_int irq;
486 
487 	if (ncells < 3)
488 		return (EINVAL);
489 
490 	/*
491 	 * The 1st cell is the interrupt type:
492 	 *	0 = SPI
493 	 *	1 = PPI
494 	 * The 2nd cell contains the interrupt number:
495 	 *	[0 - 987] for SPI
496 	 *	[0 -  15] for PPI
497 	 * The 3rd cell is the flags, encoded as follows:
498 	 *   bits[3:0] trigger type and level flags
499 	 *	1 = edge triggered
500 	 *      2 = edge triggered (PPI only)
501 	 *	4 = level-sensitive
502 	 *	8 = level-sensitive (PPI only)
503 	 */
504 	switch (cells[0]) {
505 	case 0:
506 		irq = GIC_FIRST_SPI + cells[1];
507 		/* SPI irq is checked later. */
508 		break;
509 	case 1:
510 		irq = GIC_FIRST_PPI + cells[1];
511 		if (irq > GIC_LAST_PPI) {
512 			device_printf(dev, "unsupported PPI interrupt "
513 			    "number %u\n", cells[1]);
514 			return (EINVAL);
515 		}
516 		break;
517 	default:
518 		device_printf(dev, "unsupported interrupt type "
519 		    "configuration %u\n", cells[0]);
520 		return (EINVAL);
521 	}
522 
523 	switch (cells[2] & FDT_INTR_MASK) {
524 	case FDT_INTR_EDGE_RISING:
525 		*trigp = INTR_TRIGGER_EDGE;
526 		*polp = INTR_POLARITY_HIGH;
527 		break;
528 	case FDT_INTR_EDGE_FALLING:
529 		*trigp = INTR_TRIGGER_EDGE;
530 		*polp = INTR_POLARITY_LOW;
531 		break;
532 	case FDT_INTR_LEVEL_HIGH:
533 		*trigp = INTR_TRIGGER_LEVEL;
534 		*polp = INTR_POLARITY_HIGH;
535 		break;
536 	case FDT_INTR_LEVEL_LOW:
537 		*trigp = INTR_TRIGGER_LEVEL;
538 		*polp = INTR_POLARITY_LOW;
539 		break;
540 	default:
541 		device_printf(dev, "unsupported trigger/polarity "
542 		    "configuration 0x%02x\n", cells[2]);
543 		return (EINVAL);
544 	}
545 
546 	/* Check the interrupt is valid */
547 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
548 		return (EINVAL);
549 
550 	*irqp = irq;
551 	return (0);
552 }
553 #endif
554 
555 static int
556 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
557     enum intr_polarity *polp, enum intr_trigger *trigp)
558 {
559 	struct gic_v3_irqsrc *gi;
560 
561 	/* SPI-mapped MSI */
562 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
563 	if (gi == NULL)
564 		return (ENXIO);
565 
566 	*irqp = gi->gi_irq;
567 
568 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
569 	*polp = INTR_POLARITY_HIGH;
570 	*trigp = INTR_TRIGGER_EDGE;
571 
572 	return (0);
573 }
574 
575 static int
576 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
577     enum intr_polarity *polp, enum intr_trigger *trigp)
578 {
579 	struct gic_v3_softc *sc;
580 	enum intr_polarity pol;
581 	enum intr_trigger trig;
582 	struct intr_map_data_msi *dam;
583 #ifdef FDT
584 	struct intr_map_data_fdt *daf;
585 #endif
586 #ifdef DEV_ACPI
587 	struct intr_map_data_acpi *daa;
588 #endif
589 	u_int irq;
590 
591 	sc = device_get_softc(dev);
592 
593 	switch (data->type) {
594 #ifdef FDT
595 	case INTR_MAP_DATA_FDT:
596 		daf = (struct intr_map_data_fdt *)data;
597 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
598 		    &trig) != 0)
599 			return (EINVAL);
600 		break;
601 #endif
602 #ifdef DEV_ACPI
603 	case INTR_MAP_DATA_ACPI:
604 		daa = (struct intr_map_data_acpi *)data;
605 		irq = daa->irq;
606 		pol = daa->pol;
607 		trig = daa->trig;
608 		break;
609 #endif
610 	case INTR_MAP_DATA_MSI:
611 		/* SPI-mapped MSI */
612 		dam = (struct intr_map_data_msi *)data;
613 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
614 			return (EINVAL);
615 		break;
616 	default:
617 		return (EINVAL);
618 	}
619 
620 	if (irq >= sc->gic_nirqs)
621 		return (EINVAL);
622 	switch (pol) {
623 	case INTR_POLARITY_CONFORM:
624 	case INTR_POLARITY_LOW:
625 	case INTR_POLARITY_HIGH:
626 		break;
627 	default:
628 		return (EINVAL);
629 	}
630 	switch (trig) {
631 	case INTR_TRIGGER_CONFORM:
632 	case INTR_TRIGGER_EDGE:
633 	case INTR_TRIGGER_LEVEL:
634 		break;
635 	default:
636 		return (EINVAL);
637 	}
638 
639 	*irqp = irq;
640 	if (polp != NULL)
641 		*polp = pol;
642 	if (trigp != NULL)
643 		*trigp = trig;
644 	return (0);
645 }
646 
647 static int
648 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
649     struct intr_irqsrc **isrcp)
650 {
651 	struct gic_v3_softc *sc;
652 	int error;
653 	u_int irq;
654 
655 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
656 	if (error == 0) {
657 		sc = device_get_softc(dev);
658 		*isrcp = GIC_INTR_ISRC(sc, irq);
659 	}
660 	return (error);
661 }
662 
663 static int
664 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
665     struct resource *res, struct intr_map_data *data)
666 {
667 	struct gic_v3_softc *sc = device_get_softc(dev);
668 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
669 	enum intr_trigger trig;
670 	enum intr_polarity pol;
671 	uint32_t reg;
672 	u_int irq;
673 	int error;
674 
675 	if (data == NULL)
676 		return (ENOTSUP);
677 
678 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
679 	if (error != 0)
680 		return (error);
681 
682 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
683 	    trig == INTR_TRIGGER_CONFORM)
684 		return (EINVAL);
685 
686 	/* Compare config if this is not first setup. */
687 	if (isrc->isrc_handlers != 0) {
688 		if (pol != gi->gi_pol || trig != gi->gi_trig)
689 			return (EINVAL);
690 		else
691 			return (0);
692 	}
693 
694 	gi->gi_pol = pol;
695 	gi->gi_trig = trig;
696 
697 	/*
698 	 * XXX - In case that per CPU interrupt is going to be enabled in time
699 	 *       when SMP is already started, we need some IPI call which
700 	 *       enables it on others CPUs. Further, it's more complicated as
701 	 *       pic_enable_source() and pic_disable_source() should act on
702 	 *       per CPU basis only. Thus, it should be solved here somehow.
703 	 */
704 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
705 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
706 
707 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
708 		mtx_lock_spin(&sc->gic_mtx);
709 
710 		/* Set the trigger and polarity */
711 		if (irq <= GIC_LAST_PPI)
712 			reg = gic_r_read(sc, 4,
713 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
714 		else
715 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
716 		if (trig == INTR_TRIGGER_LEVEL)
717 			reg &= ~(2 << ((irq % 16) * 2));
718 		else
719 			reg |= 2 << ((irq % 16) * 2);
720 
721 		if (irq <= GIC_LAST_PPI) {
722 			gic_r_write(sc, 4,
723 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
724 			gic_v3_wait_for_rwp(sc, REDIST);
725 		} else {
726 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
727 			gic_v3_wait_for_rwp(sc, DIST);
728 		}
729 
730 		mtx_unlock_spin(&sc->gic_mtx);
731 
732 		gic_v3_bind_intr(dev, isrc);
733 	}
734 
735 	return (0);
736 }
737 
738 static int
739 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
740     struct resource *res, struct intr_map_data *data)
741 {
742 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
743 
744 	if (isrc->isrc_handlers == 0) {
745 		gi->gi_pol = INTR_POLARITY_CONFORM;
746 		gi->gi_trig = INTR_TRIGGER_CONFORM;
747 	}
748 
749 	return (0);
750 }
751 
752 static void
753 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
754 {
755 	struct gic_v3_softc *sc;
756 	struct gic_v3_irqsrc *gi;
757 	u_int irq;
758 
759 	sc = device_get_softc(dev);
760 	gi = (struct gic_v3_irqsrc *)isrc;
761 	irq = gi->gi_irq;
762 
763 	if (irq <= GIC_LAST_PPI) {
764 		/* SGIs and PPIs in corresponding Re-Distributor */
765 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
766 		    GICD_I_MASK(irq));
767 		gic_v3_wait_for_rwp(sc, REDIST);
768 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
769 		/* SPIs in distributor */
770 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
771 		gic_v3_wait_for_rwp(sc, DIST);
772 	} else
773 		panic("%s: Unsupported IRQ %u", __func__, irq);
774 }
775 
776 static void
777 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
778 {
779 	struct gic_v3_softc *sc;
780 	struct gic_v3_irqsrc *gi;
781 	u_int irq;
782 
783 	sc = device_get_softc(dev);
784 	gi = (struct gic_v3_irqsrc *)isrc;
785 	irq = gi->gi_irq;
786 
787 	if (irq <= GIC_LAST_PPI) {
788 		/* SGIs and PPIs in corresponding Re-Distributor */
789 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
790 		    GICD_I_MASK(irq));
791 		gic_v3_wait_for_rwp(sc, REDIST);
792 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
793 		/* SPIs in distributor */
794 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
795 		gic_v3_wait_for_rwp(sc, DIST);
796 	} else
797 		panic("%s: Unsupported IRQ %u", __func__, irq);
798 }
799 
800 static void
801 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
802 {
803 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
804 
805 	gic_v3_disable_intr(dev, isrc);
806 	gic_icc_write(EOIR1, gi->gi_irq);
807 }
808 
809 static void
810 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
811 {
812 
813 	gic_v3_enable_intr(dev, isrc);
814 }
815 
816 static void
817 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
818 {
819 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
820 
821 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
822 		return;
823 
824 	gic_icc_write(EOIR1, gi->gi_irq);
825 }
826 
827 static int
828 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
829 {
830 	struct gic_v3_softc *sc;
831 	struct gic_v3_irqsrc *gi;
832 	int cpu;
833 
834 	gi = (struct gic_v3_irqsrc *)isrc;
835 	if (gi->gi_irq <= GIC_LAST_PPI)
836 		return (EINVAL);
837 
838 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
839 	    ("%s: Attempting to bind an invalid IRQ", __func__));
840 
841 	sc = device_get_softc(dev);
842 
843 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
844 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
845 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
846 		gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
847 		    CPU_AFFINITY(gic_irq_cpu));
848 	} else {
849 		/*
850 		 * We can only bind to a single CPU so select
851 		 * the first CPU found.
852 		 */
853 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
854 		gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
855 	}
856 
857 	return (0);
858 }
859 
860 #ifdef SMP
861 static void
862 gic_v3_init_secondary(device_t dev)
863 {
864 	device_t child;
865 	struct gic_v3_softc *sc;
866 	gic_v3_initseq_t *init_func;
867 	struct intr_irqsrc *isrc;
868 	u_int cpu, irq;
869 	int err, i;
870 
871 	sc = device_get_softc(dev);
872 	cpu = PCPU_GET(cpuid);
873 
874 	/* Train init sequence for boot CPU */
875 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
876 	    init_func++) {
877 		err = (*init_func)(sc);
878 		if (err != 0) {
879 			device_printf(dev,
880 			    "Could not initialize GIC for CPU%u\n", cpu);
881 			return;
882 		}
883 	}
884 
885 	/* Unmask attached SGI interrupts. */
886 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
887 		isrc = GIC_INTR_ISRC(sc, irq);
888 		if (intr_isrc_init_on_cpu(isrc, cpu))
889 			gic_v3_enable_intr(dev, isrc);
890 	}
891 
892 	/* Unmask attached PPI interrupts. */
893 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
894 		isrc = GIC_INTR_ISRC(sc, irq);
895 		if (intr_isrc_init_on_cpu(isrc, cpu))
896 			gic_v3_enable_intr(dev, isrc);
897 	}
898 
899 	for (i = 0; i < sc->gic_nchildren; i++) {
900 		child = sc->gic_children[i];
901 		PIC_INIT_SECONDARY(child);
902 	}
903 }
904 
905 static void
906 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
907     u_int ipi)
908 {
909 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
910 	uint64_t aff, val, irq;
911 	int i;
912 
913 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
914 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
915 	aff = GIC_AFFINITY(0);
916 	irq = gi->gi_irq;
917 	val = 0;
918 
919 	/* Iterate through all CPUs in set */
920 	for (i = 0; i <= mp_maxid; i++) {
921 		/* Move to the next affinity group */
922 		if (aff != GIC_AFFINITY(i)) {
923 			/* Send the IPI */
924 			if (val != 0) {
925 				gic_icc_write(SGI1R, val);
926 				val = 0;
927 			}
928 			aff = GIC_AFFINITY(i);
929 		}
930 
931 		/* Send the IPI to this cpu */
932 		if (CPU_ISSET(i, &cpus)) {
933 #define	ICC_SGI1R_AFFINITY(aff)					\
934     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
935      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
936      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
937 			/* Set the affinity when the first at this level */
938 			if (val == 0)
939 				val = ICC_SGI1R_AFFINITY(aff) |
940 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
941 			/* Set the bit to send the IPI to te CPU */
942 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
943 		}
944 	}
945 
946 	/* Send the IPI to the last cpu affinity group */
947 	if (val != 0)
948 		gic_icc_write(SGI1R, val);
949 #undef GIC_AFF_MASK
950 #undef GIC_AFFINITY
951 }
952 
953 static int
954 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
955 {
956 	struct intr_irqsrc *isrc;
957 	struct gic_v3_softc *sc = device_get_softc(dev);
958 
959 	if (sgi_first_unused > GIC_LAST_SGI)
960 		return (ENOSPC);
961 
962 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
963 	sgi_to_ipi[sgi_first_unused++] = ipi;
964 
965 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
966 
967 	*isrcp = isrc;
968 	return (0);
969 }
970 #endif /* SMP */
971 
972 /*
973  * Helper routines
974  */
975 static void
976 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
977 {
978 	struct resource *res;
979 	u_int cpuid;
980 	size_t us_left = 1000000;
981 
982 	cpuid = PCPU_GET(cpuid);
983 
984 	switch (xdist) {
985 	case DIST:
986 		res = sc->gic_dist;
987 		break;
988 	case REDIST:
989 		res = &sc->gic_redists.pcpu[cpuid]->res;
990 		break;
991 	default:
992 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
993 		return;
994 	}
995 
996 	while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
997 		DELAY(1);
998 		if (us_left-- == 0)
999 			panic("GICD Register write pending for too long");
1000 	}
1001 }
1002 
1003 /* CPU interface. */
1004 static __inline void
1005 gic_v3_cpu_priority(uint64_t mask)
1006 {
1007 
1008 	/* Set prority mask */
1009 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1010 }
1011 
1012 static int
1013 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1014 {
1015 	uint64_t sre;
1016 	u_int cpuid;
1017 
1018 	cpuid = PCPU_GET(cpuid);
1019 	/*
1020 	 * Set the SRE bit to enable access to GIC CPU interface
1021 	 * via system registers.
1022 	 */
1023 	sre = READ_SPECIALREG(icc_sre_el1);
1024 	sre |= ICC_SRE_EL1_SRE;
1025 	WRITE_SPECIALREG(icc_sre_el1, sre);
1026 	isb();
1027 	/*
1028 	 * Now ensure that the bit is set.
1029 	 */
1030 	sre = READ_SPECIALREG(icc_sre_el1);
1031 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1032 		/* We are done. This was disabled in EL2 */
1033 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1034 		    "via system registers\n", cpuid);
1035 		return (ENXIO);
1036 	} else if (bootverbose) {
1037 		device_printf(sc->dev,
1038 		    "CPU%u enabled CPU interface via system registers\n",
1039 		    cpuid);
1040 	}
1041 
1042 	return (0);
1043 }
1044 
1045 static int
1046 gic_v3_cpu_init(struct gic_v3_softc *sc)
1047 {
1048 	int err;
1049 
1050 	/* Enable access to CPU interface via system registers */
1051 	err = gic_v3_cpu_enable_sre(sc);
1052 	if (err != 0)
1053 		return (err);
1054 	/* Priority mask to minimum - accept all interrupts */
1055 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1056 	/* Disable EOI mode */
1057 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1058 	/* Enable group 1 (insecure) interrups */
1059 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1060 
1061 	return (0);
1062 }
1063 
1064 /* Distributor */
1065 static int
1066 gic_v3_dist_init(struct gic_v3_softc *sc)
1067 {
1068 	uint64_t aff;
1069 	u_int i;
1070 
1071 	/*
1072 	 * 1. Disable the Distributor
1073 	 */
1074 	gic_d_write(sc, 4, GICD_CTLR, 0);
1075 	gic_v3_wait_for_rwp(sc, DIST);
1076 
1077 	/*
1078 	 * 2. Configure the Distributor
1079 	 */
1080 	/* Set all SPIs to be Group 1 Non-secure */
1081 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1082 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1083 
1084 	/* Set all global interrupts to be level triggered, active low. */
1085 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1086 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1087 
1088 	/* Set priority to all shared interrupts */
1089 	for (i = GIC_FIRST_SPI;
1090 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1091 		/* Set highest priority */
1092 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1093 	}
1094 
1095 	/*
1096 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1097 	 * Re-Distributor registers.
1098 	 */
1099 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1100 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1101 
1102 	gic_v3_wait_for_rwp(sc, DIST);
1103 
1104 	/*
1105 	 * 3. Enable Distributor
1106 	 */
1107 	/* Enable Distributor with ARE, Group 1 */
1108 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1109 	    GICD_CTLR_G1);
1110 
1111 	/*
1112 	 * 4. Route all interrupts to boot CPU.
1113 	 */
1114 	aff = CPU_AFFINITY(0);
1115 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1116 		gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1117 
1118 	return (0);
1119 }
1120 
1121 /* Re-Distributor */
1122 static int
1123 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1124 {
1125 	u_int cpuid;
1126 
1127 	/* Allocate struct resource for all CPU's Re-Distributor registers */
1128 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1129 		if (CPU_ISSET(cpuid, &all_cpus) != 0)
1130 			sc->gic_redists.pcpu[cpuid] =
1131 				malloc(sizeof(*sc->gic_redists.pcpu[0]),
1132 				    M_GIC_V3, M_WAITOK);
1133 		else
1134 			sc->gic_redists.pcpu[cpuid] = NULL;
1135 	return (0);
1136 }
1137 
1138 static int
1139 gic_v3_redist_find(struct gic_v3_softc *sc)
1140 {
1141 	struct resource r_res;
1142 	bus_space_handle_t r_bsh;
1143 	uint64_t aff;
1144 	uint64_t typer;
1145 	uint32_t pidr2;
1146 	u_int cpuid;
1147 	size_t i;
1148 
1149 	cpuid = PCPU_GET(cpuid);
1150 
1151 	aff = CPU_AFFINITY(cpuid);
1152 	/* Affinity in format for comparison with typer */
1153 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1154 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1155 
1156 	if (bootverbose) {
1157 		device_printf(sc->dev,
1158 		    "Start searching for Re-Distributor\n");
1159 	}
1160 	/* Iterate through Re-Distributor regions */
1161 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1162 		/* Take a copy of the region's resource */
1163 		r_res = *sc->gic_redists.regions[i];
1164 		r_bsh = rman_get_bushandle(&r_res);
1165 
1166 		pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1167 		switch (GICR_PIDR2_ARCH(pidr2)) {
1168 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1169 		case GICR_PIDR2_ARCH_GICv4:
1170 			break;
1171 		default:
1172 			device_printf(sc->dev,
1173 			    "No Re-Distributor found for CPU%u\n", cpuid);
1174 			return (ENODEV);
1175 		}
1176 
1177 		do {
1178 			typer = bus_read_8(&r_res, GICR_TYPER);
1179 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1180 				KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1181 				    ("Invalid pointer to per-CPU redistributor"));
1182 				/* Copy res contents to its final destination */
1183 				sc->gic_redists.pcpu[cpuid]->res = r_res;
1184 				sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1185 				if (bootverbose) {
1186 					device_printf(sc->dev,
1187 					    "CPU%u Re-Distributor has been found\n",
1188 					    cpuid);
1189 				}
1190 				return (0);
1191 			}
1192 
1193 			r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1194 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1195 				r_bsh +=
1196 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1197 			}
1198 
1199 			rman_set_bushandle(&r_res, r_bsh);
1200 		} while ((typer & GICR_TYPER_LAST) == 0);
1201 	}
1202 
1203 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1204 	return (ENXIO);
1205 }
1206 
1207 static int
1208 gic_v3_redist_wake(struct gic_v3_softc *sc)
1209 {
1210 	uint32_t waker;
1211 	size_t us_left = 1000000;
1212 
1213 	waker = gic_r_read(sc, 4, GICR_WAKER);
1214 	/* Wake up Re-Distributor for this CPU */
1215 	waker &= ~GICR_WAKER_PS;
1216 	gic_r_write(sc, 4, GICR_WAKER, waker);
1217 	/*
1218 	 * When clearing ProcessorSleep bit it is required to wait for
1219 	 * ChildrenAsleep to become zero following the processor power-on.
1220 	 */
1221 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1222 		DELAY(1);
1223 		if (us_left-- == 0) {
1224 			panic("Could not wake Re-Distributor for CPU%u",
1225 			    PCPU_GET(cpuid));
1226 		}
1227 	}
1228 
1229 	if (bootverbose) {
1230 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1231 		    PCPU_GET(cpuid));
1232 	}
1233 
1234 	return (0);
1235 }
1236 
1237 static int
1238 gic_v3_redist_init(struct gic_v3_softc *sc)
1239 {
1240 	int err;
1241 	size_t i;
1242 
1243 	err = gic_v3_redist_find(sc);
1244 	if (err != 0)
1245 		return (err);
1246 
1247 	err = gic_v3_redist_wake(sc);
1248 	if (err != 0)
1249 		return (err);
1250 
1251 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1252 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1253 	    0xFFFFFFFF);
1254 
1255 	/* Disable SPIs */
1256 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1257 	    GICR_I_ENABLER_PPI_MASK);
1258 	/* Enable SGIs */
1259 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1260 	    GICR_I_ENABLER_SGI_MASK);
1261 
1262 	/* Set priority for SGIs and PPIs */
1263 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1264 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1265 		    GIC_PRIORITY_MAX);
1266 	}
1267 
1268 	gic_v3_wait_for_rwp(sc, REDIST);
1269 
1270 	return (0);
1271 }
1272