xref: /freebsd/sys/arm64/arm64/gic_v3.c (revision d93a896e)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_platform.h"
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bitstring.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/rman.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 
57 #include <machine/bus.h>
58 #include <machine/cpu.h>
59 #include <machine/intr.h>
60 
61 #ifdef FDT
62 #include <dev/fdt/fdt_intr.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #endif
65 
66 #include "pic_if.h"
67 
68 #include <arm/arm/gic_common.h>
69 #include "gic_v3_reg.h"
70 #include "gic_v3_var.h"
71 
72 static bus_read_ivar_t gic_v3_read_ivar;
73 
74 static pic_disable_intr_t gic_v3_disable_intr;
75 static pic_enable_intr_t gic_v3_enable_intr;
76 static pic_map_intr_t gic_v3_map_intr;
77 static pic_setup_intr_t gic_v3_setup_intr;
78 static pic_teardown_intr_t gic_v3_teardown_intr;
79 static pic_post_filter_t gic_v3_post_filter;
80 static pic_post_ithread_t gic_v3_post_ithread;
81 static pic_pre_ithread_t gic_v3_pre_ithread;
82 static pic_bind_intr_t gic_v3_bind_intr;
83 #ifdef SMP
84 static pic_init_secondary_t gic_v3_init_secondary;
85 static pic_ipi_send_t gic_v3_ipi_send;
86 static pic_ipi_setup_t gic_v3_ipi_setup;
87 #endif
88 
89 static u_int gic_irq_cpu;
90 #ifdef SMP
91 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
92 static u_int sgi_first_unused = GIC_FIRST_SGI;
93 #endif
94 
95 static device_method_t gic_v3_methods[] = {
96 	/* Device interface */
97 	DEVMETHOD(device_detach,	gic_v3_detach),
98 
99 	/* Bus interface */
100 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
101 
102 	/* Interrupt controller interface */
103 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
104 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
105 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
106 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
107 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
108 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
109 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
110 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
111 #ifdef SMP
112 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
113 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
114 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
115 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
116 #endif
117 
118 	/* End */
119 	DEVMETHOD_END
120 };
121 
122 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
123     sizeof(struct gic_v3_softc));
124 
125 /*
126  * Driver-specific definitions.
127  */
128 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
129 
130 /*
131  * Helper functions and definitions.
132  */
133 /* Destination registers, either Distributor or Re-Distributor */
134 enum gic_v3_xdist {
135 	DIST = 0,
136 	REDIST,
137 };
138 
139 struct gic_v3_irqsrc {
140 	struct intr_irqsrc	gi_isrc;
141 	uint32_t		gi_irq;
142 	enum intr_polarity	gi_pol;
143 	enum intr_trigger	gi_trig;
144 };
145 
146 /* Helper routines starting with gic_v3_ */
147 static int gic_v3_dist_init(struct gic_v3_softc *);
148 static int gic_v3_redist_alloc(struct gic_v3_softc *);
149 static int gic_v3_redist_find(struct gic_v3_softc *);
150 static int gic_v3_redist_init(struct gic_v3_softc *);
151 static int gic_v3_cpu_init(struct gic_v3_softc *);
152 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
153 
154 /* A sequence of init functions for primary (boot) CPU */
155 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
156 /* Primary CPU initialization sequence */
157 static gic_v3_initseq_t gic_v3_primary_init[] = {
158 	gic_v3_dist_init,
159 	gic_v3_redist_alloc,
160 	gic_v3_redist_init,
161 	gic_v3_cpu_init,
162 	NULL
163 };
164 
165 #ifdef SMP
166 /* Secondary CPU initialization sequence */
167 static gic_v3_initseq_t gic_v3_secondary_init[] = {
168 	gic_v3_redist_init,
169 	gic_v3_cpu_init,
170 	NULL
171 };
172 #endif
173 
174 uint32_t
175 gic_r_read_4(device_t dev, bus_size_t offset)
176 {
177 	struct gic_v3_softc *sc;
178 
179 	sc = device_get_softc(dev);
180 	return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
181 }
182 
183 uint64_t
184 gic_r_read_8(device_t dev, bus_size_t offset)
185 {
186 	struct gic_v3_softc *sc;
187 
188 	sc = device_get_softc(dev);
189 	return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset));
190 }
191 
192 void
193 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
194 {
195 	struct gic_v3_softc *sc;
196 
197 	sc = device_get_softc(dev);
198 	bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
199 }
200 
201 void
202 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
203 {
204 	struct gic_v3_softc *sc;
205 
206 	sc = device_get_softc(dev);
207 	bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val);
208 }
209 
210 /*
211  * Device interface.
212  */
213 int
214 gic_v3_attach(device_t dev)
215 {
216 	struct gic_v3_softc *sc;
217 	gic_v3_initseq_t *init_func;
218 	uint32_t typer;
219 	int rid;
220 	int err;
221 	size_t i;
222 	u_int irq;
223 	const char *name;
224 
225 	sc = device_get_softc(dev);
226 	sc->gic_registered = FALSE;
227 	sc->dev = dev;
228 	err = 0;
229 
230 	/* Initialize mutex */
231 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
232 
233 	/*
234 	 * Allocate array of struct resource.
235 	 * One entry for Distributor and all remaining for Re-Distributor.
236 	 */
237 	sc->gic_res = malloc(
238 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
239 	    M_GIC_V3, M_WAITOK);
240 
241 	/* Now allocate corresponding resources */
242 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
243 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
244 		    &rid, RF_ACTIVE);
245 		if (sc->gic_res[rid] == NULL)
246 			return (ENXIO);
247 	}
248 
249 	/*
250 	 * Distributor interface
251 	 */
252 	sc->gic_dist = sc->gic_res[0];
253 
254 	/*
255 	 * Re-Dristributor interface
256 	 */
257 	/* Allocate space under region descriptions */
258 	sc->gic_redists.regions = malloc(
259 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
260 	    M_GIC_V3, M_WAITOK);
261 
262 	/* Fill-up bus_space information for each region. */
263 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
264 		sc->gic_redists.regions[i] = sc->gic_res[rid];
265 
266 	/* Get the number of supported SPI interrupts */
267 	typer = gic_d_read(sc, 4, GICD_TYPER);
268 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
269 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
270 		sc->gic_nirqs = GIC_I_NUM_MAX;
271 
272 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
273 	    M_GIC_V3, M_WAITOK | M_ZERO);
274 	name = device_get_nameunit(dev);
275 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
276 		struct intr_irqsrc *isrc;
277 
278 		sc->gic_irqs[irq].gi_irq = irq;
279 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
280 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
281 
282 		isrc = &sc->gic_irqs[irq].gi_isrc;
283 		if (irq <= GIC_LAST_SGI) {
284 			err = intr_isrc_register(isrc, sc->dev,
285 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
286 		} else if (irq <= GIC_LAST_PPI) {
287 			err = intr_isrc_register(isrc, sc->dev,
288 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
289 		} else {
290 			err = intr_isrc_register(isrc, sc->dev, 0,
291 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
292 		}
293 		if (err != 0) {
294 			/* XXX call intr_isrc_deregister() */
295 			free(sc->gic_irqs, M_DEVBUF);
296 			return (err);
297 		}
298 	}
299 
300 	/*
301 	 * Read the Peripheral ID2 register. This is an implementation
302 	 * defined register, but seems to be implemented in all GICv3
303 	 * parts and Linux expects it to be there.
304 	 */
305 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
306 
307 	/* Get the number of supported interrupt identifier bits */
308 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
309 
310 	if (bootverbose) {
311 		device_printf(dev, "SPIs: %u, IDs: %u\n",
312 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
313 	}
314 
315 	/* Train init sequence for boot CPU */
316 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
317 		err = (*init_func)(sc);
318 		if (err != 0)
319 			return (err);
320 	}
321 
322 	return (0);
323 }
324 
325 int
326 gic_v3_detach(device_t dev)
327 {
328 	struct gic_v3_softc *sc;
329 	size_t i;
330 	int rid;
331 
332 	sc = device_get_softc(dev);
333 
334 	if (device_is_attached(dev)) {
335 		/*
336 		 * XXX: We should probably deregister PIC
337 		 */
338 		if (sc->gic_registered)
339 			panic("Trying to detach registered PIC");
340 	}
341 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
342 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
343 
344 	for (i = 0; i < mp_ncpus; i++)
345 		free(sc->gic_redists.pcpu[i], M_GIC_V3);
346 
347 	free(sc->gic_res, M_GIC_V3);
348 	free(sc->gic_redists.regions, M_GIC_V3);
349 
350 	return (0);
351 }
352 
353 static int
354 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
355 {
356 	struct gic_v3_softc *sc;
357 
358 	sc = device_get_softc(dev);
359 
360 	switch (which) {
361 	case GICV3_IVAR_NIRQS:
362 		*result = sc->gic_nirqs;
363 		return (0);
364 	case GICV3_IVAR_REDIST_VADDR:
365 		*result = (uintptr_t)rman_get_virtual(
366 		    sc->gic_redists.pcpu[PCPU_GET(cpuid)]);
367 		return (0);
368 	case GIC_IVAR_HW_REV:
369 		KASSERT(
370 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
371 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
372 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
373 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
374 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
375 		return (0);
376 	case GIC_IVAR_BUS:
377 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
378 		    ("gic_v3_read_ivar: Unknown bus type"));
379 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
380 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
381 		*result = sc->gic_bus;
382 		return (0);
383 	}
384 
385 	return (ENOENT);
386 }
387 
388 int
389 arm_gic_v3_intr(void *arg)
390 {
391 	struct gic_v3_softc *sc = arg;
392 	struct gic_v3_irqsrc *gi;
393 	struct intr_pic *pic;
394 	uint64_t active_irq;
395 	struct trapframe *tf;
396 	bool first;
397 
398 	first = true;
399 	pic = sc->gic_pic;
400 
401 	while (1) {
402 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) {
403 			/*
404 			 * Hardware:		Cavium ThunderX
405 			 * Chip revision:	Pass 1.0 (early version)
406 			 *			Pass 1.1 (production)
407 			 * ERRATUM:		22978, 23154
408 			 */
409 			__asm __volatile(
410 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
411 			    "mrs %0, ICC_IAR1_EL1		\n"
412 			    "nop;nop;nop;nop;			\n"
413 			    "dsb sy				\n"
414 			    : "=&r" (active_irq));
415 		} else {
416 			active_irq = gic_icc_read(IAR1);
417 		}
418 
419 		if (active_irq >= GIC_FIRST_LPI) {
420 			intr_child_irq_handler(pic, active_irq);
421 			continue;
422 		}
423 
424 		if (__predict_false(active_irq >= sc->gic_nirqs))
425 			return (FILTER_HANDLED);
426 
427 		tf = curthread->td_intr_frame;
428 		gi = &sc->gic_irqs[active_irq];
429 		if (active_irq <= GIC_LAST_SGI) {
430 			/* Call EOI for all IPI before dispatch. */
431 			gic_icc_write(EOIR1, (uint64_t)active_irq);
432 #ifdef SMP
433 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
434 #else
435 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
436 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
437 #endif
438 		} else if (active_irq >= GIC_FIRST_PPI &&
439 		    active_irq <= GIC_LAST_SPI) {
440 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
441 				gic_icc_write(EOIR1, gi->gi_irq);
442 
443 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
444 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
445 					gic_icc_write(EOIR1, gi->gi_irq);
446 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
447 				device_printf(sc->dev,
448 				    "Stray irq %lu disabled\n", active_irq);
449 			}
450 		}
451 	}
452 }
453 
454 #ifdef FDT
455 static int
456 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
457     enum intr_polarity *polp, enum intr_trigger *trigp)
458 {
459 	u_int irq;
460 
461 	if (ncells < 3)
462 		return (EINVAL);
463 
464 	/*
465 	 * The 1st cell is the interrupt type:
466 	 *	0 = SPI
467 	 *	1 = PPI
468 	 * The 2nd cell contains the interrupt number:
469 	 *	[0 - 987] for SPI
470 	 *	[0 -  15] for PPI
471 	 * The 3rd cell is the flags, encoded as follows:
472 	 *   bits[3:0] trigger type and level flags
473 	 *	1 = edge triggered
474 	 *      2 = edge triggered (PPI only)
475 	 *	4 = level-sensitive
476 	 *	8 = level-sensitive (PPI only)
477 	 */
478 	switch (cells[0]) {
479 	case 0:
480 		irq = GIC_FIRST_SPI + cells[1];
481 		/* SPI irq is checked later. */
482 		break;
483 	case 1:
484 		irq = GIC_FIRST_PPI + cells[1];
485 		if (irq > GIC_LAST_PPI) {
486 			device_printf(dev, "unsupported PPI interrupt "
487 			    "number %u\n", cells[1]);
488 			return (EINVAL);
489 		}
490 		break;
491 	default:
492 		device_printf(dev, "unsupported interrupt type "
493 		    "configuration %u\n", cells[0]);
494 		return (EINVAL);
495 	}
496 
497 	switch (cells[2] & FDT_INTR_MASK) {
498 	case FDT_INTR_EDGE_RISING:
499 		*trigp = INTR_TRIGGER_EDGE;
500 		*polp = INTR_POLARITY_HIGH;
501 		break;
502 	case FDT_INTR_EDGE_FALLING:
503 		*trigp = INTR_TRIGGER_EDGE;
504 		*polp = INTR_POLARITY_LOW;
505 		break;
506 	case FDT_INTR_LEVEL_HIGH:
507 		*trigp = INTR_TRIGGER_LEVEL;
508 		*polp = INTR_POLARITY_HIGH;
509 		break;
510 	case FDT_INTR_LEVEL_LOW:
511 		*trigp = INTR_TRIGGER_LEVEL;
512 		*polp = INTR_POLARITY_LOW;
513 		break;
514 	default:
515 		device_printf(dev, "unsupported trigger/polarity "
516 		    "configuration 0x%02x\n", cells[2]);
517 		return (EINVAL);
518 	}
519 
520 	/* Check the interrupt is valid */
521 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
522 		return (EINVAL);
523 
524 	*irqp = irq;
525 	return (0);
526 }
527 #endif
528 
529 static int
530 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
531     enum intr_polarity *polp, enum intr_trigger *trigp)
532 {
533 	struct gic_v3_irqsrc *gi;
534 
535 	/* SPI-mapped MSI */
536 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
537 	if (gi == NULL)
538 		return (ENXIO);
539 
540 	*irqp = gi->gi_irq;
541 
542 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
543 	*polp = INTR_POLARITY_HIGH;
544 	*trigp = INTR_TRIGGER_EDGE;
545 
546 	return (0);
547 }
548 
549 static int
550 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
551     enum intr_polarity *polp, enum intr_trigger *trigp)
552 {
553 	struct gic_v3_softc *sc;
554 	enum intr_polarity pol;
555 	enum intr_trigger trig;
556 	struct intr_map_data_msi *dam;
557 #ifdef FDT
558 	struct intr_map_data_fdt *daf;
559 #endif
560 	u_int irq;
561 
562 	sc = device_get_softc(dev);
563 
564 	switch (data->type) {
565 #ifdef FDT
566 	case INTR_MAP_DATA_FDT:
567 		daf = (struct intr_map_data_fdt *)data;
568 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
569 		    &trig) != 0)
570 			return (EINVAL);
571 		break;
572 #endif
573 	case INTR_MAP_DATA_MSI:
574 		/* SPI-mapped MSI */
575 		dam = (struct intr_map_data_msi *)data;
576 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
577 			return (EINVAL);
578 		break;
579 	default:
580 		return (EINVAL);
581 	}
582 
583 	if (irq >= sc->gic_nirqs)
584 		return (EINVAL);
585 	switch (pol) {
586 	case INTR_POLARITY_CONFORM:
587 	case INTR_POLARITY_LOW:
588 	case INTR_POLARITY_HIGH:
589 		break;
590 	default:
591 		return (EINVAL);
592 	}
593 	switch (trig) {
594 	case INTR_TRIGGER_CONFORM:
595 	case INTR_TRIGGER_EDGE:
596 	case INTR_TRIGGER_LEVEL:
597 		break;
598 	default:
599 		return (EINVAL);
600 	}
601 
602 	*irqp = irq;
603 	if (polp != NULL)
604 		*polp = pol;
605 	if (trigp != NULL)
606 		*trigp = trig;
607 	return (0);
608 }
609 
610 static int
611 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
612     struct intr_irqsrc **isrcp)
613 {
614 	struct gic_v3_softc *sc;
615 	int error;
616 	u_int irq;
617 
618 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
619 	if (error == 0) {
620 		sc = device_get_softc(dev);
621 		*isrcp = GIC_INTR_ISRC(sc, irq);
622 	}
623 	return (error);
624 }
625 
626 static int
627 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
628     struct resource *res, struct intr_map_data *data)
629 {
630 	struct gic_v3_softc *sc = device_get_softc(dev);
631 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
632 	enum intr_trigger trig;
633 	enum intr_polarity pol;
634 	uint32_t reg;
635 	u_int irq;
636 	int error;
637 
638 	if (data == NULL)
639 		return (ENOTSUP);
640 
641 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
642 	if (error != 0)
643 		return (error);
644 
645 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
646 	    trig == INTR_TRIGGER_CONFORM)
647 		return (EINVAL);
648 
649 	/* Compare config if this is not first setup. */
650 	if (isrc->isrc_handlers != 0) {
651 		if (pol != gi->gi_pol || trig != gi->gi_trig)
652 			return (EINVAL);
653 		else
654 			return (0);
655 	}
656 
657 	gi->gi_pol = pol;
658 	gi->gi_trig = trig;
659 
660 	/*
661 	 * XXX - In case that per CPU interrupt is going to be enabled in time
662 	 *       when SMP is already started, we need some IPI call which
663 	 *       enables it on others CPUs. Further, it's more complicated as
664 	 *       pic_enable_source() and pic_disable_source() should act on
665 	 *       per CPU basis only. Thus, it should be solved here somehow.
666 	 */
667 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
668 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
669 
670 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
671 		mtx_lock_spin(&sc->gic_mtx);
672 
673 		/* Set the trigger and polarity */
674 		if (irq <= GIC_LAST_PPI)
675 			reg = gic_r_read(sc, 4,
676 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
677 		else
678 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
679 		if (trig == INTR_TRIGGER_LEVEL)
680 			reg &= ~(2 << ((irq % 16) * 2));
681 		else
682 			reg |= 2 << ((irq % 16) * 2);
683 
684 		if (irq <= GIC_LAST_PPI) {
685 			gic_r_write(sc, 4,
686 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
687 			gic_v3_wait_for_rwp(sc, REDIST);
688 		} else {
689 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
690 			gic_v3_wait_for_rwp(sc, DIST);
691 		}
692 
693 		mtx_unlock_spin(&sc->gic_mtx);
694 
695 		gic_v3_bind_intr(dev, isrc);
696 	}
697 
698 	return (0);
699 }
700 
701 static int
702 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
703     struct resource *res, struct intr_map_data *data)
704 {
705 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
706 
707 	if (isrc->isrc_handlers == 0) {
708 		gi->gi_pol = INTR_POLARITY_CONFORM;
709 		gi->gi_trig = INTR_TRIGGER_CONFORM;
710 	}
711 
712 	return (0);
713 }
714 
715 static void
716 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
717 {
718 	struct gic_v3_softc *sc;
719 	struct gic_v3_irqsrc *gi;
720 	u_int irq;
721 
722 	sc = device_get_softc(dev);
723 	gi = (struct gic_v3_irqsrc *)isrc;
724 	irq = gi->gi_irq;
725 
726 	if (irq <= GIC_LAST_PPI) {
727 		/* SGIs and PPIs in corresponding Re-Distributor */
728 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
729 		    GICD_I_MASK(irq));
730 		gic_v3_wait_for_rwp(sc, REDIST);
731 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
732 		/* SPIs in distributor */
733 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
734 		gic_v3_wait_for_rwp(sc, DIST);
735 	} else
736 		panic("%s: Unsupported IRQ %u", __func__, irq);
737 }
738 
739 static void
740 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
741 {
742 	struct gic_v3_softc *sc;
743 	struct gic_v3_irqsrc *gi;
744 	u_int irq;
745 
746 	sc = device_get_softc(dev);
747 	gi = (struct gic_v3_irqsrc *)isrc;
748 	irq = gi->gi_irq;
749 
750 	if (irq <= GIC_LAST_PPI) {
751 		/* SGIs and PPIs in corresponding Re-Distributor */
752 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
753 		    GICD_I_MASK(irq));
754 		gic_v3_wait_for_rwp(sc, REDIST);
755 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
756 		/* SPIs in distributor */
757 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
758 		gic_v3_wait_for_rwp(sc, DIST);
759 	} else
760 		panic("%s: Unsupported IRQ %u", __func__, irq);
761 }
762 
763 static void
764 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
765 {
766 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
767 
768 	gic_v3_disable_intr(dev, isrc);
769 	gic_icc_write(EOIR1, gi->gi_irq);
770 }
771 
772 static void
773 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
774 {
775 
776 	gic_v3_enable_intr(dev, isrc);
777 }
778 
779 static void
780 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
781 {
782 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
783 
784 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
785 		return;
786 
787 	gic_icc_write(EOIR1, gi->gi_irq);
788 }
789 
790 static int
791 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
792 {
793 	struct gic_v3_softc *sc;
794 	struct gic_v3_irqsrc *gi;
795 	int cpu;
796 
797 	gi = (struct gic_v3_irqsrc *)isrc;
798 	if (gi->gi_irq <= GIC_LAST_PPI)
799 		return (EINVAL);
800 
801 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
802 	    ("%s: Attempting to bind an invalid IRQ", __func__));
803 
804 	sc = device_get_softc(dev);
805 
806 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
807 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
808 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
809 		gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq),
810 		    CPU_AFFINITY(gic_irq_cpu));
811 	} else {
812 		/*
813 		 * We can only bind to a single CPU so select
814 		 * the first CPU found.
815 		 */
816 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
817 		gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
818 	}
819 
820 	return (0);
821 }
822 
823 #ifdef SMP
824 static void
825 gic_v3_init_secondary(device_t dev)
826 {
827 	device_t child;
828 	struct gic_v3_softc *sc;
829 	gic_v3_initseq_t *init_func;
830 	struct intr_irqsrc *isrc;
831 	u_int cpu, irq;
832 	int err, i;
833 
834 	sc = device_get_softc(dev);
835 	cpu = PCPU_GET(cpuid);
836 
837 	/* Train init sequence for boot CPU */
838 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
839 	    init_func++) {
840 		err = (*init_func)(sc);
841 		if (err != 0) {
842 			device_printf(dev,
843 			    "Could not initialize GIC for CPU%u\n", cpu);
844 			return;
845 		}
846 	}
847 
848 	/* Unmask attached SGI interrupts. */
849 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
850 		isrc = GIC_INTR_ISRC(sc, irq);
851 		if (intr_isrc_init_on_cpu(isrc, cpu))
852 			gic_v3_enable_intr(dev, isrc);
853 	}
854 
855 	/* Unmask attached PPI interrupts. */
856 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
857 		isrc = GIC_INTR_ISRC(sc, irq);
858 		if (intr_isrc_init_on_cpu(isrc, cpu))
859 			gic_v3_enable_intr(dev, isrc);
860 	}
861 
862 	for (i = 0; i < sc->gic_nchildren; i++) {
863 		child = sc->gic_children[i];
864 		PIC_INIT_SECONDARY(child);
865 	}
866 }
867 
868 static void
869 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
870     u_int ipi)
871 {
872 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
873 	uint64_t aff, val, irq;
874 	int i;
875 
876 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
877 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
878 	aff = GIC_AFFINITY(0);
879 	irq = gi->gi_irq;
880 	val = 0;
881 
882 	/* Iterate through all CPUs in set */
883 	for (i = 0; i < mp_ncpus; i++) {
884 		/* Move to the next affinity group */
885 		if (aff != GIC_AFFINITY(i)) {
886 			/* Send the IPI */
887 			if (val != 0) {
888 				gic_icc_write(SGI1R, val);
889 				val = 0;
890 			}
891 			aff = GIC_AFFINITY(i);
892 		}
893 
894 		/* Send the IPI to this cpu */
895 		if (CPU_ISSET(i, &cpus)) {
896 #define	ICC_SGI1R_AFFINITY(aff)					\
897     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
898      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
899      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
900 			/* Set the affinity when the first at this level */
901 			if (val == 0)
902 				val = ICC_SGI1R_AFFINITY(aff) |
903 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
904 			/* Set the bit to send the IPI to te CPU */
905 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
906 		}
907 	}
908 
909 	/* Send the IPI to the last cpu affinity group */
910 	if (val != 0)
911 		gic_icc_write(SGI1R, val);
912 #undef GIC_AFF_MASK
913 #undef GIC_AFFINITY
914 }
915 
916 static int
917 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
918 {
919 	struct intr_irqsrc *isrc;
920 	struct gic_v3_softc *sc = device_get_softc(dev);
921 
922 	if (sgi_first_unused > GIC_LAST_SGI)
923 		return (ENOSPC);
924 
925 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
926 	sgi_to_ipi[sgi_first_unused++] = ipi;
927 
928 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
929 
930 	*isrcp = isrc;
931 	return (0);
932 }
933 #endif /* SMP */
934 
935 /*
936  * Helper routines
937  */
938 static void
939 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
940 {
941 	struct resource *res;
942 	u_int cpuid;
943 	size_t us_left = 1000000;
944 
945 	cpuid = PCPU_GET(cpuid);
946 
947 	switch (xdist) {
948 	case DIST:
949 		res = sc->gic_dist;
950 		break;
951 	case REDIST:
952 		res = sc->gic_redists.pcpu[cpuid];
953 		break;
954 	default:
955 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
956 		return;
957 	}
958 
959 	while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
960 		DELAY(1);
961 		if (us_left-- == 0)
962 			panic("GICD Register write pending for too long");
963 	}
964 }
965 
966 /* CPU interface. */
967 static __inline void
968 gic_v3_cpu_priority(uint64_t mask)
969 {
970 
971 	/* Set prority mask */
972 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
973 }
974 
975 static int
976 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
977 {
978 	uint64_t sre;
979 	u_int cpuid;
980 
981 	cpuid = PCPU_GET(cpuid);
982 	/*
983 	 * Set the SRE bit to enable access to GIC CPU interface
984 	 * via system registers.
985 	 */
986 	sre = READ_SPECIALREG(icc_sre_el1);
987 	sre |= ICC_SRE_EL1_SRE;
988 	WRITE_SPECIALREG(icc_sre_el1, sre);
989 	isb();
990 	/*
991 	 * Now ensure that the bit is set.
992 	 */
993 	sre = READ_SPECIALREG(icc_sre_el1);
994 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
995 		/* We are done. This was disabled in EL2 */
996 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
997 		    "via system registers\n", cpuid);
998 		return (ENXIO);
999 	} else if (bootverbose) {
1000 		device_printf(sc->dev,
1001 		    "CPU%u enabled CPU interface via system registers\n",
1002 		    cpuid);
1003 	}
1004 
1005 	return (0);
1006 }
1007 
1008 static int
1009 gic_v3_cpu_init(struct gic_v3_softc *sc)
1010 {
1011 	int err;
1012 
1013 	/* Enable access to CPU interface via system registers */
1014 	err = gic_v3_cpu_enable_sre(sc);
1015 	if (err != 0)
1016 		return (err);
1017 	/* Priority mask to minimum - accept all interrupts */
1018 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1019 	/* Disable EOI mode */
1020 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1021 	/* Enable group 1 (insecure) interrups */
1022 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1023 
1024 	return (0);
1025 }
1026 
1027 /* Distributor */
1028 static int
1029 gic_v3_dist_init(struct gic_v3_softc *sc)
1030 {
1031 	uint64_t aff;
1032 	u_int i;
1033 
1034 	/*
1035 	 * 1. Disable the Distributor
1036 	 */
1037 	gic_d_write(sc, 4, GICD_CTLR, 0);
1038 	gic_v3_wait_for_rwp(sc, DIST);
1039 
1040 	/*
1041 	 * 2. Configure the Distributor
1042 	 */
1043 	/* Set all SPIs to be Group 1 Non-secure */
1044 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1045 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1046 
1047 	/* Set all global interrupts to be level triggered, active low. */
1048 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1049 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1050 
1051 	/* Set priority to all shared interrupts */
1052 	for (i = GIC_FIRST_SPI;
1053 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1054 		/* Set highest priority */
1055 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1056 	}
1057 
1058 	/*
1059 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1060 	 * Re-Distributor registers.
1061 	 */
1062 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1063 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1064 
1065 	gic_v3_wait_for_rwp(sc, DIST);
1066 
1067 	/*
1068 	 * 3. Enable Distributor
1069 	 */
1070 	/* Enable Distributor with ARE, Group 1 */
1071 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1072 	    GICD_CTLR_G1);
1073 
1074 	/*
1075 	 * 4. Route all interrupts to boot CPU.
1076 	 */
1077 	aff = CPU_AFFINITY(0);
1078 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1079 		gic_d_write(sc, 4, GICD_IROUTER(i), aff);
1080 
1081 	return (0);
1082 }
1083 
1084 /* Re-Distributor */
1085 static int
1086 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1087 {
1088 	u_int cpuid;
1089 
1090 	/* Allocate struct resource for all CPU's Re-Distributor registers */
1091 	for (cpuid = 0; cpuid < mp_ncpus; cpuid++)
1092 		if (CPU_ISSET(cpuid, &all_cpus) != 0)
1093 			sc->gic_redists.pcpu[cpuid] =
1094 				malloc(sizeof(*sc->gic_redists.pcpu[0]),
1095 				    M_GIC_V3, M_WAITOK);
1096 		else
1097 			sc->gic_redists.pcpu[cpuid] = NULL;
1098 	return (0);
1099 }
1100 
1101 static int
1102 gic_v3_redist_find(struct gic_v3_softc *sc)
1103 {
1104 	struct resource r_res;
1105 	bus_space_handle_t r_bsh;
1106 	uint64_t aff;
1107 	uint64_t typer;
1108 	uint32_t pidr2;
1109 	u_int cpuid;
1110 	size_t i;
1111 
1112 	cpuid = PCPU_GET(cpuid);
1113 
1114 	aff = CPU_AFFINITY(cpuid);
1115 	/* Affinity in format for comparison with typer */
1116 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1117 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1118 
1119 	if (bootverbose) {
1120 		device_printf(sc->dev,
1121 		    "Start searching for Re-Distributor\n");
1122 	}
1123 	/* Iterate through Re-Distributor regions */
1124 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1125 		/* Take a copy of the region's resource */
1126 		r_res = *sc->gic_redists.regions[i];
1127 		r_bsh = rman_get_bushandle(&r_res);
1128 
1129 		pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1130 		switch (GICR_PIDR2_ARCH(pidr2)) {
1131 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1132 		case GICR_PIDR2_ARCH_GICv4:
1133 			break;
1134 		default:
1135 			device_printf(sc->dev,
1136 			    "No Re-Distributor found for CPU%u\n", cpuid);
1137 			return (ENODEV);
1138 		}
1139 
1140 		do {
1141 			typer = bus_read_8(&r_res, GICR_TYPER);
1142 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1143 				KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1144 				    ("Invalid pointer to per-CPU redistributor"));
1145 				/* Copy res contents to its final destination */
1146 				*sc->gic_redists.pcpu[cpuid] = r_res;
1147 				if (bootverbose) {
1148 					device_printf(sc->dev,
1149 					    "CPU%u Re-Distributor has been found\n",
1150 					    cpuid);
1151 				}
1152 				return (0);
1153 			}
1154 
1155 			r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1156 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1157 				r_bsh +=
1158 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1159 			}
1160 
1161 			rman_set_bushandle(&r_res, r_bsh);
1162 		} while ((typer & GICR_TYPER_LAST) == 0);
1163 	}
1164 
1165 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1166 	return (ENXIO);
1167 }
1168 
1169 static int
1170 gic_v3_redist_wake(struct gic_v3_softc *sc)
1171 {
1172 	uint32_t waker;
1173 	size_t us_left = 1000000;
1174 
1175 	waker = gic_r_read(sc, 4, GICR_WAKER);
1176 	/* Wake up Re-Distributor for this CPU */
1177 	waker &= ~GICR_WAKER_PS;
1178 	gic_r_write(sc, 4, GICR_WAKER, waker);
1179 	/*
1180 	 * When clearing ProcessorSleep bit it is required to wait for
1181 	 * ChildrenAsleep to become zero following the processor power-on.
1182 	 */
1183 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1184 		DELAY(1);
1185 		if (us_left-- == 0) {
1186 			panic("Could not wake Re-Distributor for CPU%u",
1187 			    PCPU_GET(cpuid));
1188 		}
1189 	}
1190 
1191 	if (bootverbose) {
1192 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1193 		    PCPU_GET(cpuid));
1194 	}
1195 
1196 	return (0);
1197 }
1198 
1199 static int
1200 gic_v3_redist_init(struct gic_v3_softc *sc)
1201 {
1202 	int err;
1203 	size_t i;
1204 
1205 	err = gic_v3_redist_find(sc);
1206 	if (err != 0)
1207 		return (err);
1208 
1209 	err = gic_v3_redist_wake(sc);
1210 	if (err != 0)
1211 		return (err);
1212 
1213 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1214 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1215 	    0xFFFFFFFF);
1216 
1217 	/* Disable SPIs */
1218 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1219 	    GICR_I_ENABLER_PPI_MASK);
1220 	/* Enable SGIs */
1221 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1222 	    GICR_I_ENABLER_SGI_MASK);
1223 
1224 	/* Set priority for SGIs and PPIs */
1225 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1226 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1227 		    GIC_PRIORITY_MAX);
1228 	}
1229 
1230 	gic_v3_wait_for_rwp(sc, REDIST);
1231 
1232 	return (0);
1233 }
1234