xref: /openbsd/sys/arch/arm64/dev/agintc.c (revision 53f700b0)
1 /* $OpenBSD: agintc.c,v 1.62 2025/01/24 20:17:28 kettenis Exp $ */
2 /*
3  * Copyright (c) 2007, 2009, 2011, 2017 Dale Rahn <drahn@dalerahn.com>
4  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * This is a device driver for the GICv3/GICv4 IP from ARM as specified
21  * in IHI0069C, an example of this hardware is the GIC 500.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/queue.h>
27 #include <sys/malloc.h>
28 #include <sys/device.h>
29 #include <sys/evcount.h>
30 
31 #include <machine/bus.h>
32 #include <machine/cpufunc.h>
33 #include <machine/fdt.h>
34 
35 #include <dev/ofw/fdt.h>
36 #include <dev/ofw/openfirm.h>
37 
38 #include <machine/simplebusvar.h>
39 
40 #define ICC_PMR		s3_0_c4_c6_0
41 #define ICC_IAR0	s3_0_c12_c8_0
42 #define ICC_EOIR0	s3_0_c12_c8_1
43 #define ICC_HPPIR0	s3_0_c12_c8_2
44 #define ICC_BPR0	s3_0_c12_c8_3
45 
46 #define ICC_DIR		s3_0_c12_c11_1
47 #define ICC_RPR		s3_0_c12_c11_3
48 #define ICC_SGI1R	s3_0_c12_c11_5
49 #define ICC_SGI0R	s3_0_c12_c11_7
50 
51 #define ICC_IAR1	s3_0_c12_c12_0
52 #define ICC_EOIR1	s3_0_c12_c12_1
53 #define ICC_HPPIR1	s3_0_c12_c12_2
54 #define ICC_BPR1	s3_0_c12_c12_3
55 #define ICC_CTLR	s3_0_c12_c12_4
56 #define ICC_SRE_EL1	s3_0_c12_c12_5
57 #define  ICC_SRE_EL1_EN		0x7
58 #define ICC_IGRPEN0	s3_0_c12_c12_6
59 #define ICC_IGRPEN1	s3_0_c12_c12_7
60 
61 #define _STR(x) #x
62 #define STR(x) _STR(x)
63 
64 /* distributor registers */
65 #define GICD_CTLR		0x0000
66 /* non-secure */
67 #define  GICD_CTLR_RWP			(1U << 31)
68 #define  GICD_CTLR_EnableGrp1		(1 << 0)
69 #define  GICD_CTLR_EnableGrp1A		(1 << 1)
70 #define  GICD_CTLR_ARE_NS		(1 << 4)
71 #define  GICD_CTLR_DS			(1 << 6)
72 #define GICD_TYPER		0x0004
73 #define  GICD_TYPER_MBIS		(1 << 16)
74 #define  GICD_TYPER_LPIS		(1 << 17)
75 #define  GICD_TYPER_ITLINE_M		0x1f
76 #define GICD_IIDR		0x0008
77 #define GICD_SETSPI_NSR		0x0040
78 #define GICD_CLRSPI_NSR		0x0048
79 #define GICD_IGROUPR(i)		(0x0080 + (IRQ_TO_REG32(i) * 4))
80 #define GICD_ISENABLER(i)	(0x0100 + (IRQ_TO_REG32(i) * 4))
81 #define GICD_ICENABLER(i)	(0x0180 + (IRQ_TO_REG32(i) * 4))
82 #define GICD_ISPENDR(i)		(0x0200 + (IRQ_TO_REG32(i) * 4))
83 #define GICD_ICPENDR(i)		(0x0280 + (IRQ_TO_REG32(i) * 4))
84 #define GICD_ISACTIVER(i)	(0x0300 + (IRQ_TO_REG32(i) * 4))
85 #define GICD_ICACTIVER(i)	(0x0380 + (IRQ_TO_REG32(i) * 4))
86 #define GICD_IPRIORITYR(i)	(0x0400 + (i))
87 #define GICD_ICFGR(i)		(0x0c00 + (IRQ_TO_REG16(i) * 4))
88 #define  GICD_ICFGR_TRIG_LEVEL(i)	(0x0 << (IRQ_TO_REG16BIT(i) * 2))
89 #define  GICD_ICFGR_TRIG_EDGE(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
90 #define  GICD_ICFGR_TRIG_MASK(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
91 #define GICD_IGRPMODR(i)	(0x0d00 + (IRQ_TO_REG32(i) * 4))
92 #define GICD_NSACR(i)		(0x0e00 + (IRQ_TO_REG16(i) * 4))
93 #define GICD_IROUTER(i)		(0x6000 + ((i) * 8))
94 
95 /* redistributor registers */
96 #define GICR_CTLR		0x00000
97 #define  GICR_CTLR_RWP			((1U << 31) | (1 << 3))
98 #define  GICR_CTLR_ENABLE_LPIS		(1 << 0)
99 #define GICR_IIDR		0x00004
100 #define GICR_TYPER		0x00008
101 #define  GICR_TYPER_LAST		(1 << 4)
102 #define  GICR_TYPER_VLPIS		(1 << 1)
103 #define GICR_WAKER		0x00014
104 #define  GICR_WAKER_X31			(1U << 31)
105 #define  GICR_WAKER_CHILDRENASLEEP	(1 << 2)
106 #define  GICR_WAKER_PROCESSORSLEEP	(1 << 1)
107 #define  GICR_WAKER_X0			(1 << 0)
108 #define GICR_PROPBASER		0x00070
109 #define  GICR_PROPBASER_ISH		(1ULL << 10)
110 #define  GICR_PROPBASER_IC_NORM_NC	(1ULL << 7)
111 #define GICR_PENDBASER		0x00078
112 #define  GICR_PENDBASER_PTZ		(1ULL << 62)
113 #define  GICR_PENDBASER_ISH		(1ULL << 10)
114 #define  GICR_PENDBASER_IC_NORM_NC	(1ULL << 7)
115 #define GICR_IGROUPR0		0x10080
116 #define GICR_ISENABLE0		0x10100
117 #define GICR_ICENABLE0		0x10180
118 #define GICR_ISPENDR0		0x10200
119 #define GICR_ICPENDR0		0x10280
120 #define GICR_ISACTIVE0		0x10300
121 #define GICR_ICACTIVE0		0x10380
122 #define GICR_IPRIORITYR(i)	(0x10400 + (i))
123 #define GICR_ICFGR0		0x10c00
124 #define GICR_ICFGR1		0x10c04
125 #define GICR_IGRPMODR0		0x10d00
126 
127 #define GICR_PROP_SIZE		(64 * 1024)
128 #define  GICR_PROP_GROUP1	(1 << 1)
129 #define  GICR_PROP_ENABLE	(1 << 0)
130 #define GICR_PEND_SIZE		(64 * 1024)
131 
132 #define PPI_BASE		16
133 #define SPI_BASE		32
134 #define LPI_BASE		8192
135 
136 #define IRQ_TO_REG32(i)		(((i) >> 5) & 0x1f)
137 #define IRQ_TO_REG32BIT(i)	((i) & 0x1f)
138 
139 #define IRQ_TO_REG16(i)		(((i) >> 4) & 0x3f)
140 #define IRQ_TO_REG16BIT(i)	((i) & 0xf)
141 
142 #define IRQ_ENABLE	1
143 #define IRQ_DISABLE	0
144 
145 struct agintc_mbi_range {
146 	int			  mr_base;
147 	int			  mr_span;
148 	void			**mr_mbi;
149 };
150 
151 struct agintc_lpi_info {
152 	struct agintc_msi_softc	*li_msic;
153 	struct cpu_info		*li_ci;
154 	uint32_t		 li_deviceid;
155 	uint32_t		 li_eventid;
156 	struct intrhand		*li_ih;
157 };
158 
159 struct agintc_softc {
160 	struct simplebus_softc	 sc_sbus;
161 	struct intrq		*sc_handler;
162 	struct agintc_lpi_info	**sc_lpi;
163 	bus_space_tag_t		 sc_iot;
164 	bus_space_handle_t	 sc_d_ioh;
165 	bus_space_handle_t	*sc_r_ioh;
166 	bus_space_handle_t	*sc_rbase_ioh;
167 	bus_dma_tag_t		 sc_dmat;
168 	uint16_t		*sc_processor;
169 	int			 sc_cpuremap[MAXCPUS];
170 	int			 sc_nintr;
171 	int			 sc_nlpi;
172 	bus_addr_t		 sc_mbi_addr;
173 	int			 sc_mbi_nranges;
174 	struct agintc_mbi_range	*sc_mbi_ranges;
175 	int			 sc_prio_shift;
176 	int			 sc_pmr_shift;
177 	int			 sc_rk3399_quirk;
178 	struct evcount		 sc_spur;
179 	int			 sc_ncells;
180 	int			 sc_num_redist;
181 	int			 sc_num_redist_regions;
182 	struct agintc_dmamem	*sc_prop;
183 	struct agintc_dmamem	*sc_pend;
184 	struct interrupt_controller sc_ic;
185 	int			 sc_ipi_num[3]; /* id for each ipi */
186 	int			 sc_ipi_reason[MAXCPUS]; /* cause of ipi */
187 	void			*sc_ipi_irq[3]; /* irqhandle for each ipi */
188 };
189 struct agintc_softc *agintc_sc;
190 
191 struct intrhand {
192 	TAILQ_ENTRY(intrhand)	 ih_list;		/* link on intrq list */
193 	int			(*ih_func)(void *);	/* handler */
194 	void			*ih_arg;		/* arg for handler */
195 	int			 ih_ipl;		/* IPL_* */
196 	int			 ih_flags;
197 	int			 ih_irq;		/* IRQ number */
198 	struct evcount		 ih_count;
199 	char			*ih_name;
200 	struct cpu_info		*ih_ci;			/* CPU the IRQ runs on */
201 };
202 
203 struct intrq {
204 	TAILQ_HEAD(, intrhand)	iq_list;	/* handler list */
205 	struct cpu_info		*iq_ci;		/* CPU the IRQ runs on */
206 	int			iq_irq_max;	/* IRQ to mask while handling */
207 	int			iq_irq_min;	/* lowest IRQ when shared */
208 	int			iq_ist;		/* share type */
209 	int			iq_route;
210 };
211 
212 struct agintc_dmamem {
213 	bus_dmamap_t		adm_map;
214 	bus_dma_segment_t	adm_seg;
215 	size_t			adm_size;
216 	caddr_t			adm_kva;
217 };
218 
219 #define AGINTC_DMA_MAP(_adm)	((_adm)->adm_map)
220 #define AGINTC_DMA_LEN(_adm)	((_adm)->adm_size)
221 #define AGINTC_DMA_DVA(_adm)	((_adm)->adm_map->dm_segs[0].ds_addr)
222 #define AGINTC_DMA_KVA(_adm)	((void *)(_adm)->adm_kva)
223 
224 struct agintc_dmamem *agintc_dmamem_alloc(bus_dma_tag_t, bus_size_t,
225 		    bus_size_t);
226 void		agintc_dmamem_free(bus_dma_tag_t, struct agintc_dmamem *);
227 
228 int		agintc_match(struct device *, void *, void *);
229 void		agintc_attach(struct device *, struct device *, void *);
230 void		agintc_mbiinit(struct agintc_softc *, int, bus_addr_t);
231 void		agintc_cpuinit(void);
232 int		agintc_spllower(int);
233 void		agintc_splx(int);
234 int		agintc_splraise(int);
235 void		agintc_setipl(int);
236 void		agintc_enable_wakeup(void);
237 void		agintc_disable_wakeup(void);
238 void		agintc_calc_mask(void);
239 void		agintc_calc_irq(struct agintc_softc *sc, int irq);
240 void		*agintc_intr_establish(int, int, int, struct cpu_info *,
241 		    int (*)(void *), void *, char *);
242 void		*agintc_intr_establish_fdt(void *cookie, int *cell, int level,
243 		    struct cpu_info *, int (*func)(void *), void *arg, char *name);
244 void		*agintc_intr_establish_mbi(void *, uint64_t *, uint64_t *,
245 		    int , struct cpu_info *, int (*)(void *), void *, char *);
246 void		agintc_intr_disestablish(void *);
247 void		agintc_intr_set_wakeup(void *);
248 void		agintc_irq_handler(void *);
249 uint32_t	agintc_iack(void);
250 void		agintc_eoi(uint32_t);
251 void		agintc_set_priority(struct agintc_softc *sc, int, int);
252 void		agintc_intr_enable(struct agintc_softc *, int);
253 void		agintc_intr_disable(struct agintc_softc *, int);
254 void		agintc_intr_config(struct agintc_softc *, int, int);
255 void		agintc_route(struct agintc_softc *, int, int,
256 		    struct cpu_info *);
257 void		agintc_route_irq(void *, int, struct cpu_info *);
258 void		agintc_intr_barrier(void *);
259 void		agintc_r_wait_rwp(struct agintc_softc *sc);
260 
261 int		agintc_ipi_ddb(void *v);
262 int		agintc_ipi_halt(void *v);
263 int		agintc_ipi_nop(void *v);
264 int		agintc_ipi_combined(void *);
265 void		agintc_send_ipi(struct cpu_info *, int);
266 
267 void		agintc_msi_discard(struct agintc_lpi_info *);
268 void		agintc_msi_inv(struct agintc_lpi_info *);
269 
270 const struct cfattach	agintc_ca = {
271 	sizeof (struct agintc_softc), agintc_match, agintc_attach
272 };
273 
274 struct cfdriver agintc_cd = {
275 	NULL, "agintc", DV_DULL
276 };
277 
278 static char *agintc_compatibles[] = {
279 	"arm,gic-v3",
280 	"arm,gic-v4",
281 	NULL
282 };
283 
284 int
agintc_match(struct device * parent,void * cfdata,void * aux)285 agintc_match(struct device *parent, void *cfdata, void *aux)
286 {
287 	struct fdt_attach_args *faa = aux;
288 	int i;
289 
290 	for (i = 0; agintc_compatibles[i]; i++)
291 		if (OF_is_compatible(faa->fa_node, agintc_compatibles[i]))
292 			return (1);
293 
294 	return (0);
295 }
296 
297 static void
__isb(void)298 __isb(void)
299 {
300 	__asm volatile("isb");
301 }
302 
303 void
agintc_attach(struct device * parent,struct device * self,void * aux)304 agintc_attach(struct device *parent, struct device *self, void *aux)
305 {
306 	struct agintc_softc	*sc = (struct agintc_softc *)self;
307 	struct fdt_attach_args	*faa = aux;
308 	struct cpu_info		*ci;
309 	CPU_INFO_ITERATOR	 cii;
310 	u_long			 psw;
311 	uint32_t		 typer;
312 	uint32_t		 nsacr, oldnsacr;
313 	uint32_t		 pmr, oldpmr;
314 	uint32_t		 ctrl, bits;
315 	uint32_t		 affinity;
316 	uint64_t		 redist_stride;
317 	int			 i, nbits, nintr;
318 	int			 idx, offset, nredist;
319 #ifdef MULTIPROCESSOR
320 	int			 nipi, ipiirq[3];
321 #endif
322 
323 	psw = intr_disable();
324 	arm_init_smask();
325 
326 	sc->sc_iot = faa->fa_iot;
327 	sc->sc_dmat = faa->fa_dmat;
328 
329 	sc->sc_num_redist_regions =
330 	    OF_getpropint(faa->fa_node, "#redistributor-regions", 1);
331 
332 	if (faa->fa_nreg < sc->sc_num_redist_regions + 1)
333 		panic("%s: missing registers", __func__);
334 
335 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
336 	    faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
337 		panic("%s: GICD bus_space_map failed", __func__);
338 
339 	sc->sc_rbase_ioh = mallocarray(sc->sc_num_redist_regions,
340 	    sizeof(*sc->sc_rbase_ioh), M_DEVBUF, M_WAITOK);
341 	for (idx = 0; idx < sc->sc_num_redist_regions; idx++) {
342 		if (bus_space_map(sc->sc_iot, faa->fa_reg[1 + idx].addr,
343 		    faa->fa_reg[1 + idx].size, 0, &sc->sc_rbase_ioh[idx]))
344 			panic("%s: GICR bus_space_map failed", __func__);
345 	}
346 
347 	typer = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_TYPER);
348 
349 	if (typer & GICD_TYPER_LPIS) {
350 		/* Allocate redistributor tables */
351 		sc->sc_prop = agintc_dmamem_alloc(sc->sc_dmat,
352 		    GICR_PROP_SIZE, GICR_PROP_SIZE);
353 		if (sc->sc_prop == NULL) {
354 			printf(": can't alloc LPI config table\n");
355 			goto unmap;
356 		}
357 		sc->sc_pend = agintc_dmamem_alloc(sc->sc_dmat,
358 		    GICR_PEND_SIZE, GICR_PEND_SIZE);
359 		if (sc->sc_pend == NULL) {
360 			printf(": can't alloc LPI pending table\n");
361 			goto unmap;
362 		}
363 
364 		/* Minimum number of LPIs supported by any implementation. */
365 		sc->sc_nlpi = 8192;
366 	}
367 
368 	if (typer & GICD_TYPER_MBIS)
369 		agintc_mbiinit(sc, faa->fa_node, faa->fa_reg[0].addr);
370 
371 	/*
372 	 * We are guaranteed to have at least 16 priority levels, so
373 	 * in principle we just want to use the top 4 bits of the
374 	 * (non-secure) priority field.
375 	 */
376 	sc->sc_prio_shift = sc->sc_pmr_shift = 4;
377 
378 	/*
379 	 * If the system supports two security states and SCR_EL3.FIQ
380 	 * is zero, the non-secure shifted view applies.  We detect
381 	 * this by checking whether the number of writable bits
382 	 * matches the number of implemented priority bits.  If that
383 	 * is the case we will need to adjust the priorities that we
384 	 * write into ICC_PMR_EL1 accordingly.
385 	 *
386 	 * On Ampere eMAG it appears as if there are five writable
387 	 * bits when we write 0xff.  But for higher priorities
388 	 * (smaller values) only the top 4 bits stick.  So we use 0xbf
389 	 * instead to determine the number of writable bits.
390 	 */
391 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR);
392 	if ((ctrl & GICD_CTLR_DS) == 0) {
393 		__asm volatile("mrs %x0, "STR(ICC_CTLR_EL1) : "=r"(ctrl));
394 		nbits = ICC_CTLR_EL1_PRIBITS(ctrl) + 1;
395 		__asm volatile("mrs %x0, "STR(ICC_PMR) : "=r"(oldpmr));
396 		__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xbf));
397 		__asm volatile("mrs %x0, "STR(ICC_PMR) : "=r"(pmr));
398 		__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(oldpmr));
399 		if (nbits == 8 - (ffs(pmr) - 1))
400 			sc->sc_pmr_shift--;
401 	}
402 
403 	/*
404 	 * The Rockchip RK3399 is busted.  Its GIC-500 treats all
405 	 * access to its memory mapped registers as "secure".  As a
406 	 * result, several registers don't behave as expected.  For
407 	 * example, the GICD_IPRIORITYRn and GICR_IPRIORITYRn
408 	 * registers expose the full priority range available to
409 	 * secure interrupts.  We need to be aware of this and write
410 	 * an adjusted priority value into these registers.  We also
411 	 * need to be careful not to touch any bits that shouldn't be
412 	 * writable in non-secure mode.
413 	 *
414 	 * We check whether we have secure mode access to these
415 	 * registers by attempting to write to the GICD_NSACR register
416 	 * and check whether its contents actually change.  In that
417 	 * case we need to adjust the priorities we write into
418 	 * GICD_IPRIORITYRn and GICRIPRIORITYRn accordingly.
419 	 */
420 	oldnsacr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32));
421 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32),
422 	    oldnsacr ^ 0xffffffff);
423 	nsacr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32));
424 	if (nsacr != oldnsacr) {
425 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32),
426 		    oldnsacr);
427 		sc->sc_rk3399_quirk = 1;
428 		sc->sc_prio_shift--;
429 		printf(" sec");
430 	}
431 
432 	printf(" shift %d:%d", sc->sc_prio_shift, sc->sc_pmr_shift);
433 
434 	evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
435 
436 	__asm volatile("msr "STR(ICC_SRE_EL1)", %x0" : : "r" (ICC_SRE_EL1_EN));
437 	__isb();
438 
439 	nintr = 32 * (typer & GICD_TYPER_ITLINE_M);
440 	nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
441 	sc->sc_nintr = nintr;
442 
443 	agintc_sc = sc; /* save this for global access */
444 
445 	/* find the redistributors. */
446 	idx = 0;
447 	offset = 0;
448 	redist_stride = OF_getpropint64(faa->fa_node, "redistributor-stride", 0);
449 	for (nredist = 0; idx < sc->sc_num_redist_regions; nredist++) {
450 		uint64_t typer;
451 		int32_t sz;
452 
453 		typer = bus_space_read_8(sc->sc_iot, sc->sc_rbase_ioh[idx],
454 		    offset + GICR_TYPER);
455 
456 		if (redist_stride == 0) {
457 			sz = (64 * 1024 * 2);
458 			if (typer & GICR_TYPER_VLPIS)
459 				sz += (64 * 1024 * 2);
460 		} else
461 			sz = redist_stride;
462 
463 #ifdef DEBUG_AGINTC
464 		printf("probing redistributor %d %x\n", nredist, offset);
465 #endif
466 
467 		offset += sz;
468 		if (offset >= faa->fa_reg[1 + idx].size ||
469 		    typer & GICR_TYPER_LAST) {
470 			offset = 0;
471 			idx++;
472 		}
473 	}
474 
475 	sc->sc_num_redist = nredist;
476 	printf(" nirq %d nredist %d", nintr, sc->sc_num_redist);
477 
478 	sc->sc_r_ioh = mallocarray(sc->sc_num_redist,
479 	    sizeof(*sc->sc_r_ioh), M_DEVBUF, M_WAITOK);
480 	sc->sc_processor = mallocarray(sc->sc_num_redist,
481 	    sizeof(*sc->sc_processor), M_DEVBUF, M_WAITOK);
482 
483 	/* submap and configure the redistributors. */
484 	idx = 0;
485 	offset = 0;
486 	for (nredist = 0; nredist < sc->sc_num_redist; nredist++) {
487 		uint64_t typer;
488 		int32_t sz;
489 
490 		typer = bus_space_read_8(sc->sc_iot, sc->sc_rbase_ioh[idx],
491 		    offset + GICR_TYPER);
492 
493 		if (redist_stride == 0) {
494 			sz = (64 * 1024 * 2);
495 			if (typer & GICR_TYPER_VLPIS)
496 				sz += (64 * 1024 * 2);
497 		} else
498 			sz = redist_stride;
499 
500 		affinity = bus_space_read_8(sc->sc_iot,
501 		    sc->sc_rbase_ioh[idx], offset + GICR_TYPER) >> 32;
502 		CPU_INFO_FOREACH(cii, ci) {
503 			if (affinity == (((ci->ci_mpidr >> 8) & 0xff000000) |
504 			    (ci->ci_mpidr & 0x00ffffff)))
505 				break;
506 		}
507 		if (ci != NULL)
508 			sc->sc_cpuremap[ci->ci_cpuid] = nredist;
509 
510 		sc->sc_processor[nredist] = bus_space_read_8(sc->sc_iot,
511 		    sc->sc_rbase_ioh[idx], offset + GICR_TYPER) >> 8;
512 
513 		bus_space_subregion(sc->sc_iot, sc->sc_rbase_ioh[idx],
514 		    offset, sz, &sc->sc_r_ioh[nredist]);
515 
516 		if (sc->sc_nlpi > 0) {
517 			bus_space_write_8(sc->sc_iot, sc->sc_rbase_ioh[idx],
518 			    offset + GICR_PROPBASER,
519 			    AGINTC_DMA_DVA(sc->sc_prop) |
520 			    GICR_PROPBASER_ISH | GICR_PROPBASER_IC_NORM_NC |
521 			    fls(LPI_BASE + sc->sc_nlpi - 1) - 1);
522 			bus_space_write_8(sc->sc_iot, sc->sc_rbase_ioh[idx],
523 			    offset + GICR_PENDBASER,
524 			    AGINTC_DMA_DVA(sc->sc_pend) |
525 			    GICR_PENDBASER_ISH | GICR_PENDBASER_IC_NORM_NC |
526 			    GICR_PENDBASER_PTZ);
527 			bus_space_write_4(sc->sc_iot, sc->sc_rbase_ioh[idx],
528 			    offset + GICR_CTLR, GICR_CTLR_ENABLE_LPIS);
529 		}
530 
531 		offset += sz;
532 		if (offset >= faa->fa_reg[1 + idx].size ||
533 		    typer & GICR_TYPER_LAST) {
534 			offset = 0;
535 			idx++;
536 		}
537 	}
538 
539 	/* Disable all interrupts, clear all pending */
540 	for (i = 1; i < nintr / 32; i++) {
541 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
542 		    GICD_ICACTIVER(i * 32), ~0);
543 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
544 		    GICD_ICENABLER(i * 32), ~0);
545 	}
546 
547 	for (i = 4; i < nintr; i += 4) {
548 		/* lowest priority ?? */
549 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
550 		    GICD_IPRIORITYR(i), 0xffffffff);
551 	}
552 
553 	/* Set all interrupts to G1NS */
554 	for (i = 1; i < nintr / 32; i++) {
555 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
556 		    GICD_IGROUPR(i * 32), ~0);
557 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
558 		    GICD_IGRPMODR(i * 32), 0);
559 	}
560 
561 	for (i = 2; i < nintr / 16; i++) {
562 		/* irq 32 - N */
563 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
564 		    GICD_ICFGR(i * 16), 0);
565 	}
566 
567 	agintc_cpuinit();
568 
569 	sc->sc_handler = mallocarray(nintr,
570 	    sizeof(*sc->sc_handler), M_DEVBUF, M_ZERO | M_WAITOK);
571 	for (i = 0; i < nintr; i++)
572 		TAILQ_INIT(&sc->sc_handler[i].iq_list);
573 	sc->sc_lpi = mallocarray(sc->sc_nlpi,
574 	    sizeof(*sc->sc_lpi), M_DEVBUF, M_ZERO | M_WAITOK);
575 
576 	/* set priority to IPL_HIGH until configure lowers to desired IPL */
577 	agintc_setipl(IPL_HIGH);
578 
579 	/* initialize all interrupts as disabled */
580 	agintc_calc_mask();
581 
582 	/* insert self as interrupt handler */
583 	arm_set_intr_handler(agintc_splraise, agintc_spllower, agintc_splx,
584 	    agintc_setipl, agintc_irq_handler, NULL,
585 	    agintc_enable_wakeup, agintc_disable_wakeup);
586 
587 	/* enable interrupts */
588 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR);
589 	bits = GICD_CTLR_ARE_NS | GICD_CTLR_EnableGrp1A | GICD_CTLR_EnableGrp1;
590 	if (sc->sc_rk3399_quirk) {
591 		bits &= ~GICD_CTLR_EnableGrp1A;
592 		bits <<= 1;
593 	}
594 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR, ctrl | bits);
595 
596 	__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xff));
597 	__asm volatile("msr "STR(ICC_BPR1)", %x0" :: "r"(0));
598 	__asm volatile("msr "STR(ICC_IGRPEN1)", %x0" :: "r"(1));
599 
600 #ifdef MULTIPROCESSOR
601 	/* setup IPI interrupts */
602 
603 	/*
604 	 * Ideally we want three IPI interrupts, one for NOP, one for
605 	 * DDB and one for HALT.  However we can survive if only one
606 	 * is available; it is possible that most are not available to
607 	 * the non-secure OS.
608 	 */
609 	nipi = 0;
610 	for (i = 0; i < 16; i++) {
611 		int hwcpu = sc->sc_cpuremap[cpu_number()];
612 		int reg, oldreg;
613 
614 		oldreg = bus_space_read_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
615 		    GICR_IPRIORITYR(i));
616 		bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
617 		    GICR_IPRIORITYR(i), oldreg ^ 0x20);
618 
619 		/* if this interrupt is not usable, pri will be unmodified */
620 		reg = bus_space_read_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
621 		    GICR_IPRIORITYR(i));
622 		if (reg == oldreg)
623 			continue;
624 
625 		/* return to original value, will be set when used */
626 		bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
627 		    GICR_IPRIORITYR(i), oldreg);
628 
629 		if (nipi == 0)
630 			printf(" ipi: %d", i);
631 		else
632 			printf(", %d", i);
633 		ipiirq[nipi++] = i;
634 		if (nipi == 3)
635 			break;
636 	}
637 
638 	if (nipi == 0)
639 		panic("no irq available for IPI");
640 
641 	switch (nipi) {
642 	case 1:
643 		sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
644 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
645 		    agintc_ipi_combined, sc, "ipi");
646 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
647 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
648 		sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[0];
649 		break;
650 	case 2:
651 		sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
652 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
653 		    agintc_ipi_nop, sc, "ipinop");
654 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
655 		sc->sc_ipi_irq[1] = agintc_intr_establish(ipiirq[1],
656 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
657 		    agintc_ipi_combined, sc, "ipi");
658 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
659 		sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[1];
660 		break;
661 	case 3:
662 		sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
663 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
664 		    agintc_ipi_nop, sc, "ipinop");
665 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
666 		sc->sc_ipi_irq[1] = agintc_intr_establish(ipiirq[1],
667 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
668 		    agintc_ipi_ddb, sc, "ipiddb");
669 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
670 		sc->sc_ipi_irq[2] = agintc_intr_establish(ipiirq[2],
671 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
672 		    agintc_ipi_halt, sc, "ipihalt");
673 		sc->sc_ipi_num[ARM_IPI_HALT] = ipiirq[2];
674 		break;
675 	default:
676 		panic("nipi unexpected number %d", nipi);
677 	}
678 
679 	intr_send_ipi_func = agintc_send_ipi;
680 #endif
681 
682 	sc->sc_ic.ic_node = faa->fa_node;
683 	sc->sc_ic.ic_cookie = self;
684 	sc->sc_ic.ic_establish = agintc_intr_establish_fdt;
685 	sc->sc_ic.ic_disestablish = agintc_intr_disestablish;
686 	sc->sc_ic.ic_route = agintc_route_irq;
687 	sc->sc_ic.ic_cpu_enable = agintc_cpuinit;
688 	sc->sc_ic.ic_barrier = agintc_intr_barrier;
689 	if (sc->sc_mbi_nranges > 0)
690 		sc->sc_ic.ic_establish_msi = agintc_intr_establish_mbi;
691 	sc->sc_ic.ic_set_wakeup = agintc_intr_set_wakeup;
692 	arm_intr_register_fdt(&sc->sc_ic);
693 
694 	intr_restore(psw);
695 
696 	/* Attach ITS. */
697 	simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
698 
699 	return;
700 
701 unmap:
702 	if (sc->sc_r_ioh) {
703 		free(sc->sc_r_ioh, M_DEVBUF,
704 		    sc->sc_num_redist * sizeof(*sc->sc_r_ioh));
705 	}
706 	if (sc->sc_processor) {
707 		free(sc->sc_processor, M_DEVBUF,
708 		     sc->sc_num_redist * sizeof(*sc->sc_processor));
709 	}
710 
711 	if (sc->sc_pend)
712 		agintc_dmamem_free(sc->sc_dmat, sc->sc_pend);
713 	if (sc->sc_prop)
714 		agintc_dmamem_free(sc->sc_dmat, sc->sc_prop);
715 
716 	for (idx = 0; idx < sc->sc_num_redist_regions; idx++) {
717 		bus_space_unmap(sc->sc_iot, sc->sc_rbase_ioh[idx],
718 		     faa->fa_reg[1 + idx].size);
719 	}
720 	free(sc->sc_rbase_ioh, M_DEVBUF,
721 	    sc->sc_num_redist_regions * sizeof(*sc->sc_rbase_ioh));
722 
723 	bus_space_unmap(sc->sc_iot, sc->sc_d_ioh, faa->fa_reg[0].size);
724 }
725 
726 void
agintc_mbiinit(struct agintc_softc * sc,int node,bus_addr_t addr)727 agintc_mbiinit(struct agintc_softc *sc, int node, bus_addr_t addr)
728 {
729 	uint32_t *ranges;
730 	int i, len;
731 
732 	if (OF_getproplen(node, "msi-controller") != 0)
733 		return;
734 
735 	len = OF_getproplen(node, "mbi-ranges");
736 	if (len <= 0 || len % 2 * sizeof(uint32_t) != 0)
737 		return;
738 
739 	ranges = malloc(len, M_TEMP, M_WAITOK);
740 	OF_getpropintarray(node, "mbi-ranges", ranges, len);
741 
742 	sc->sc_mbi_nranges = len / (2 * sizeof(uint32_t));
743 	sc->sc_mbi_ranges = mallocarray(sc->sc_mbi_nranges,
744 	    sizeof(struct agintc_mbi_range), M_DEVBUF, M_WAITOK);
745 
746 	for (i = 0; i < sc->sc_mbi_nranges; i++) {
747 		sc->sc_mbi_ranges[i].mr_base = ranges[2 * i + 0];
748 		sc->sc_mbi_ranges[i].mr_span = ranges[2 * i + 1];
749 		sc->sc_mbi_ranges[i].mr_mbi =
750 		    mallocarray(sc->sc_mbi_ranges[i].mr_span,
751 			sizeof(void *), M_DEVBUF, M_WAITOK | M_ZERO);
752 	}
753 
754 	free(ranges, M_TEMP, len);
755 
756 	addr = OF_getpropint64(node, "mbi-alias", addr);
757 	sc->sc_mbi_addr = addr + GICD_SETSPI_NSR;
758 
759 	printf(" mbi");
760 }
761 
762 /* Initialize redistributors on each core. */
763 void
agintc_cpuinit(void)764 agintc_cpuinit(void)
765 {
766 	struct agintc_softc *sc = agintc_sc;
767 	uint32_t waker;
768 	int timeout = 100000;
769 	int hwcpu;
770 	int i;
771 
772 	hwcpu = sc->sc_cpuremap[cpu_number()];
773 	waker = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
774 	    GICR_WAKER);
775 	waker &= ~(GICR_WAKER_PROCESSORSLEEP);
776 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu], GICR_WAKER,
777 	    waker);
778 
779 	do {
780 		waker = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
781 		    GICR_WAKER);
782 	} while (--timeout && (waker & GICR_WAKER_CHILDRENASLEEP));
783 	if (timeout == 0)
784 		printf("%s: waker timed out\n", __func__);
785 
786 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
787 	    GICR_ICENABLE0, ~0);
788 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
789 	    GICR_ICPENDR0, ~0);
790 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
791 	    GICR_ICACTIVE0, ~0);
792 	for (i = 0; i < 32; i += 4) {
793 		bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
794 		    GICR_IPRIORITYR(i), ~0);
795 	}
796 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
797 	    GICR_IGROUPR0, ~0);
798 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
799 	    GICR_IGRPMODR0, 0);
800 
801 	if (sc->sc_ipi_irq[0] != NULL)
802 		agintc_route_irq(sc->sc_ipi_irq[0], IRQ_ENABLE, curcpu());
803 	if (sc->sc_ipi_irq[1] != NULL)
804 		agintc_route_irq(sc->sc_ipi_irq[1], IRQ_ENABLE, curcpu());
805 	if (sc->sc_ipi_irq[2] != NULL)
806 		agintc_route_irq(sc->sc_ipi_irq[2], IRQ_ENABLE, curcpu());
807 
808 	__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xff));
809 	__asm volatile("msr "STR(ICC_BPR1)", %x0" :: "r"(0));
810 	__asm volatile("msr "STR(ICC_IGRPEN1)", %x0" :: "r"(1));
811 	intr_enable();
812 }
813 
814 void
agintc_set_priority(struct agintc_softc * sc,int irq,int ipl)815 agintc_set_priority(struct agintc_softc *sc, int irq, int ipl)
816 {
817 	struct cpu_info	*ci = curcpu();
818 	int		 hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
819 	uint32_t	 prival;
820 
821 	prival = ((0xff - ipl) << sc->sc_prio_shift) & 0xff;
822 
823 	if (irq >= SPI_BASE) {
824 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh,
825 		    GICD_IPRIORITYR(irq), prival);
826 	} else  {
827 		/* only sets local redistributor */
828 		bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
829 		    GICR_IPRIORITYR(irq), prival);
830 	}
831 }
832 
833 void
agintc_setipl(int ipl)834 agintc_setipl(int ipl)
835 {
836 	struct agintc_softc	*sc = agintc_sc;
837 	struct cpu_info		*ci = curcpu();
838 	u_long			 psw;
839 	uint32_t		 prival;
840 
841 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
842 	psw = intr_disable();
843 	ci->ci_cpl = ipl;
844 
845 	prival = ((0xff - ipl) << sc->sc_pmr_shift) & 0xff;
846 	__asm volatile("msr "STR(ICC_PMR)", %x0" : : "r" (prival));
847 	__isb();
848 
849 	intr_restore(psw);
850 }
851 
852 void
agintc_enable_wakeup(void)853 agintc_enable_wakeup(void)
854 {
855 	struct agintc_softc *sc = agintc_sc;
856 	struct intrhand *ih;
857 	uint8_t *prop;
858 	int irq, wakeup;
859 
860 	for (irq = 0; irq < sc->sc_nintr; irq++) {
861 		/* No handler? Disabled already. */
862 		if (TAILQ_EMPTY(&sc->sc_handler[irq].iq_list))
863 			continue;
864 		/* Unless we're WAKEUP, disable. */
865 		wakeup = 0;
866 		TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
867 			if (ih->ih_flags & IPL_WAKEUP) {
868 				wakeup = 1;
869 				break;
870 			}
871 		}
872 		if (!wakeup)
873 			agintc_intr_disable(sc, irq);
874 	}
875 
876 	for (irq = 0; irq < sc->sc_nlpi; irq++) {
877 		if (sc->sc_lpi[irq] == NULL)
878 			continue;
879 		ih = sc->sc_lpi[irq]->li_ih;
880 		KASSERT(ih != NULL);
881 		if (ih->ih_flags & IPL_WAKEUP)
882 			continue;
883 		prop = AGINTC_DMA_KVA(sc->sc_prop);
884 		prop[irq] &= ~GICR_PROP_ENABLE;
885 		/* Make globally visible. */
886 		cpu_dcache_wb_range((vaddr_t)&prop[irq],
887 		    sizeof(*prop));
888 		__asm volatile("dsb sy");
889 		/* Invalidate cache */
890 		agintc_msi_inv(sc->sc_lpi[irq]);
891 	}
892 }
893 
894 void
agintc_disable_wakeup(void)895 agintc_disable_wakeup(void)
896 {
897 	struct agintc_softc *sc = agintc_sc;
898 	struct intrhand *ih;
899 	uint8_t *prop;
900 	int irq, wakeup;
901 
902 	for (irq = 0; irq < sc->sc_nintr; irq++) {
903 		/* No handler? Keep disabled. */
904 		if (TAILQ_EMPTY(&sc->sc_handler[irq].iq_list))
905 			continue;
906 		/* WAKEUPs are already enabled. */
907 		wakeup = 0;
908 		TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
909 			if (ih->ih_flags & IPL_WAKEUP) {
910 				wakeup = 1;
911 				break;
912 			}
913 		}
914 		if (!wakeup)
915 			agintc_intr_enable(sc, irq);
916 	}
917 
918 	for (irq = 0; irq < sc->sc_nlpi; irq++) {
919 		if (sc->sc_lpi[irq] == NULL)
920 			continue;
921 		ih = sc->sc_lpi[irq]->li_ih;
922 		KASSERT(ih != NULL);
923 		if (ih->ih_flags & IPL_WAKEUP)
924 			continue;
925 		prop = AGINTC_DMA_KVA(sc->sc_prop);
926 		prop[irq] |= GICR_PROP_ENABLE;
927 		/* Make globally visible. */
928 		cpu_dcache_wb_range((vaddr_t)&prop[irq],
929 		    sizeof(*prop));
930 		__asm volatile("dsb sy");
931 		/* Invalidate cache */
932 		agintc_msi_inv(sc->sc_lpi[irq]);
933 	}
934 }
935 
936 void
agintc_intr_enable(struct agintc_softc * sc,int irq)937 agintc_intr_enable(struct agintc_softc *sc, int irq)
938 {
939 	struct cpu_info	*ci = curcpu();
940 	int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
941 	int bit = 1 << IRQ_TO_REG32BIT(irq);
942 
943 	if (irq >= 32) {
944 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
945 		    GICD_ISENABLER(irq), bit);
946 	} else {
947 		bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
948 		    GICR_ISENABLE0, bit);
949 	}
950 }
951 
952 void
agintc_intr_disable(struct agintc_softc * sc,int irq)953 agintc_intr_disable(struct agintc_softc *sc, int irq)
954 {
955 	struct cpu_info	*ci = curcpu();
956 	int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
957 
958 	if (irq >= 32) {
959 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
960 		    GICD_ICENABLER(irq), 1 << IRQ_TO_REG32BIT(irq));
961 	} else {
962 		bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
963 		    GICR_ICENABLE0, 1 << IRQ_TO_REG32BIT(irq));
964 	}
965 }
966 
967 void
agintc_intr_config(struct agintc_softc * sc,int irq,int type)968 agintc_intr_config(struct agintc_softc *sc, int irq, int type)
969 {
970 	uint32_t reg;
971 
972 	/* Don't dare to change SGIs or PPIs (yet) */
973 	if (irq < 32)
974 		return;
975 
976 	reg = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_ICFGR(irq));
977 	reg &= ~GICD_ICFGR_TRIG_MASK(irq);
978 	if (type == IST_EDGE_RISING)
979 		reg |= GICD_ICFGR_TRIG_EDGE(irq);
980 	else
981 		reg |= GICD_ICFGR_TRIG_LEVEL(irq);
982 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_ICFGR(irq), reg);
983 }
984 
985 void
agintc_calc_mask(void)986 agintc_calc_mask(void)
987 {
988 	struct agintc_softc	*sc = agintc_sc;
989 	int			 irq;
990 
991 	for (irq = 0; irq < sc->sc_nintr; irq++)
992 		agintc_calc_irq(sc, irq);
993 }
994 
995 void
agintc_calc_irq(struct agintc_softc * sc,int irq)996 agintc_calc_irq(struct agintc_softc *sc, int irq)
997 {
998 	struct cpu_info	*ci = sc->sc_handler[irq].iq_ci;
999 	struct intrhand	*ih;
1000 	int max = IPL_NONE;
1001 	int min = IPL_HIGH;
1002 
1003 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
1004 		if (ih->ih_ipl > max)
1005 			max = ih->ih_ipl;
1006 
1007 		if (ih->ih_ipl < min)
1008 			min = ih->ih_ipl;
1009 	}
1010 
1011 	if (max == IPL_NONE)
1012 		min = IPL_NONE;
1013 
1014 	if (sc->sc_handler[irq].iq_irq_max == max &&
1015 	    sc->sc_handler[irq].iq_irq_min == min)
1016 		return;
1017 
1018 	sc->sc_handler[irq].iq_irq_max = max;
1019 	sc->sc_handler[irq].iq_irq_min = min;
1020 
1021 #ifdef DEBUG_AGINTC
1022 	if (min != IPL_NONE)
1023 		printf("irq %d to block at %d %d \n", irq, max, min );
1024 #endif
1025 	/* Enable interrupts at lower levels, clear -> enable */
1026 	/* Set interrupt priority/enable */
1027 	if (min != IPL_NONE) {
1028 		agintc_set_priority(sc, irq, min);
1029 		agintc_route(sc, irq, IRQ_ENABLE, ci);
1030 		agintc_intr_enable(sc, irq);
1031 	} else {
1032 		agintc_intr_disable(sc, irq);
1033 		agintc_route(sc, irq, IRQ_DISABLE, ci);
1034 	}
1035 }
1036 
1037 void
agintc_splx(int new)1038 agintc_splx(int new)
1039 {
1040 	struct cpu_info *ci = curcpu();
1041 
1042 	if (ci->ci_ipending & arm_smask[new])
1043 		arm_do_pending_intr(new);
1044 
1045 	agintc_setipl(new);
1046 }
1047 
1048 int
agintc_spllower(int new)1049 agintc_spllower(int new)
1050 {
1051 	struct cpu_info *ci = curcpu();
1052 	int old = ci->ci_cpl;
1053 
1054 	agintc_splx(new);
1055 	return (old);
1056 }
1057 
1058 int
agintc_splraise(int new)1059 agintc_splraise(int new)
1060 {
1061 	struct cpu_info	*ci = curcpu();
1062 	int old = ci->ci_cpl;
1063 
1064 	/*
1065 	 * setipl must always be called because there is a race window
1066 	 * where the variable is updated before the mask is set
1067 	 * an interrupt occurs in that window without the mask always
1068 	 * being set, the hardware might not get updated on the next
1069 	 * splraise completely messing up spl protection.
1070 	 */
1071 	if (old > new)
1072 		new = old;
1073 
1074 	agintc_setipl(new);
1075 	return (old);
1076 }
1077 
1078 uint32_t
agintc_iack(void)1079 agintc_iack(void)
1080 {
1081 	int irq;
1082 
1083 	__asm volatile("mrs %x0, "STR(ICC_IAR1) : "=r" (irq));
1084 	__asm volatile("dsb sy");
1085 	return irq;
1086 }
1087 
1088 void
agintc_route_irq(void * v,int enable,struct cpu_info * ci)1089 agintc_route_irq(void *v, int enable, struct cpu_info *ci)
1090 {
1091 	struct agintc_softc	*sc = agintc_sc;
1092 	struct intrhand		*ih = v;
1093 
1094 	if (enable) {
1095 		agintc_set_priority(sc, ih->ih_irq,
1096 		    sc->sc_handler[ih->ih_irq].iq_irq_min);
1097 		agintc_route(sc, ih->ih_irq, IRQ_ENABLE, ci);
1098 		agintc_intr_enable(sc, ih->ih_irq);
1099 	}
1100 }
1101 
1102 void
agintc_route(struct agintc_softc * sc,int irq,int enable,struct cpu_info * ci)1103 agintc_route(struct agintc_softc *sc, int irq, int enable, struct cpu_info *ci)
1104 {
1105 	/* XXX does not yet support 'participating node' */
1106 	if (irq >= 32) {
1107 #ifdef DEBUG_AGINTC
1108 		printf("router %x irq %d val %016llx\n", GICD_IROUTER(irq),
1109 		    irq, ci->ci_mpidr & MPIDR_AFF);
1110 #endif
1111 		bus_space_write_8(sc->sc_iot, sc->sc_d_ioh,
1112 		    GICD_IROUTER(irq), ci->ci_mpidr & MPIDR_AFF);
1113 	}
1114 }
1115 
1116 void
agintc_intr_barrier(void * cookie)1117 agintc_intr_barrier(void *cookie)
1118 {
1119 	struct intrhand		*ih = cookie;
1120 
1121 	sched_barrier(ih->ih_ci);
1122 }
1123 
1124 void
agintc_run_handler(struct intrhand * ih,void * frame,int s)1125 agintc_run_handler(struct intrhand *ih, void *frame, int s)
1126 {
1127 	void *arg;
1128 	int handled;
1129 
1130 #ifdef MULTIPROCESSOR
1131 	int need_lock;
1132 
1133 	if (ih->ih_flags & IPL_MPSAFE)
1134 		need_lock = 0;
1135 	else
1136 		need_lock = s < IPL_SCHED;
1137 
1138 	if (need_lock)
1139 		KERNEL_LOCK();
1140 #endif
1141 
1142 	if (ih->ih_arg)
1143 		arg = ih->ih_arg;
1144 	else
1145 		arg = frame;
1146 
1147 	handled = ih->ih_func(arg);
1148 	if (handled)
1149 		ih->ih_count.ec_count++;
1150 
1151 #ifdef MULTIPROCESSOR
1152 	if (need_lock)
1153 		KERNEL_UNLOCK();
1154 #endif
1155 }
1156 
1157 void
agintc_irq_handler(void * frame)1158 agintc_irq_handler(void *frame)
1159 {
1160 	struct agintc_softc	*sc = agintc_sc;
1161 	struct intrhand		*ih;
1162 	int			 irq, pri, s;
1163 
1164 	irq = agintc_iack();
1165 
1166 #ifdef DEBUG_AGINTC
1167 	if (irq != 30)
1168 		printf("irq  %d fired\n", irq);
1169 	else {
1170 		static int cnt = 0;
1171 		if ((cnt++ % 100) == 0) {
1172 			printf("irq  %d fired * _100\n", irq);
1173 #ifdef DDB
1174 			db_enter();
1175 #endif
1176 		}
1177 	}
1178 #endif
1179 
1180 	if (irq == 1023) {
1181 		sc->sc_spur.ec_count++;
1182 		return;
1183 	}
1184 
1185 	if ((irq >= sc->sc_nintr && irq < LPI_BASE) ||
1186 	    irq >= LPI_BASE + sc->sc_nlpi) {
1187 		return;
1188 	}
1189 
1190 	if (irq >= LPI_BASE) {
1191 		if (sc->sc_lpi[irq - LPI_BASE] == NULL)
1192 			return;
1193 		ih = sc->sc_lpi[irq - LPI_BASE]->li_ih;
1194 		KASSERT(ih != NULL);
1195 
1196 		s = agintc_splraise(ih->ih_ipl);
1197 		intr_enable();
1198 		agintc_run_handler(ih, frame, s);
1199 		intr_disable();
1200 		agintc_eoi(irq);
1201 
1202 		agintc_splx(s);
1203 		return;
1204 	}
1205 
1206 	pri = sc->sc_handler[irq].iq_irq_max;
1207 	s = agintc_splraise(pri);
1208 	intr_enable();
1209 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
1210 		agintc_run_handler(ih, frame, s);
1211 	}
1212 	intr_disable();
1213 	agintc_eoi(irq);
1214 
1215 	agintc_splx(s);
1216 }
1217 
1218 void *
agintc_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1219 agintc_intr_establish_fdt(void *cookie, int *cell, int level,
1220     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1221 {
1222 	struct agintc_softc	*sc = agintc_sc;
1223 	int			 irq;
1224 	int			 type;
1225 
1226 	/* 2nd cell contains the interrupt number */
1227 	irq = cell[1];
1228 
1229 	/* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
1230 	if (cell[0] == 0)
1231 		irq += SPI_BASE;
1232 	else if (cell[0] == 1)
1233 		irq += PPI_BASE;
1234 	else
1235 		panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
1236 
1237 	/* SPIs are only active-high level or low-to-high edge */
1238 	if (cell[2] & 0x3)
1239 		type = IST_EDGE_RISING;
1240 	else
1241 		type = IST_LEVEL_HIGH;
1242 
1243 	return agintc_intr_establish(irq, type, level, ci, func, arg, name);
1244 }
1245 
1246 void *
agintc_intr_establish(int irqno,int type,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1247 agintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
1248     int (*func)(void *), void *arg, char *name)
1249 {
1250 	struct agintc_softc	*sc = agintc_sc;
1251 	struct intrhand		*ih;
1252 	u_long			 psw;
1253 
1254 	if (irqno < 0 || (irqno >= sc->sc_nintr && irqno < LPI_BASE) ||
1255 	    irqno >= LPI_BASE + sc->sc_nlpi)
1256 		panic("agintc_intr_establish: bogus irqnumber %d: %s",
1257 		    irqno, name);
1258 
1259 	if (ci == NULL)
1260 		ci = &cpu_info_primary;
1261 
1262 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
1263 	ih->ih_func = func;
1264 	ih->ih_arg = arg;
1265 	ih->ih_ipl = level & IPL_IRQMASK;
1266 	ih->ih_flags = level & IPL_FLAGMASK;
1267 	ih->ih_irq = irqno;
1268 	ih->ih_name = name;
1269 	ih->ih_ci = ci;
1270 
1271 	psw = intr_disable();
1272 
1273 	if (irqno < LPI_BASE) {
1274 		if (!TAILQ_EMPTY(&sc->sc_handler[irqno].iq_list) &&
1275 		    sc->sc_handler[irqno].iq_ci != ci) {
1276 			intr_restore(psw);
1277 			free(ih, M_DEVBUF, sizeof *ih);
1278 			return NULL;
1279 		}
1280 		TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
1281 		sc->sc_handler[irqno].iq_ci = ci;
1282 	}
1283 
1284 	if (name != NULL)
1285 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
1286 
1287 #ifdef DEBUG_AGINTC
1288 	printf("%s: irq %d level %d [%s]\n", __func__, irqno, level, name);
1289 #endif
1290 
1291 	if (irqno < LPI_BASE) {
1292 		agintc_intr_config(sc, irqno, type);
1293 		agintc_calc_irq(sc, irqno);
1294 	} else {
1295 		uint8_t *prop = AGINTC_DMA_KVA(sc->sc_prop);
1296 
1297 		prop[irqno - LPI_BASE] = (((0xff - ih->ih_ipl) << 4) & 0xff) |
1298 		    GICR_PROP_GROUP1 | GICR_PROP_ENABLE;
1299 
1300 		/* Make globally visible. */
1301 		cpu_dcache_wb_range((vaddr_t)&prop[irqno - LPI_BASE],
1302 		    sizeof(*prop));
1303 		__asm volatile("dsb sy");
1304 	}
1305 
1306 	intr_restore(psw);
1307 	return (ih);
1308 }
1309 
1310 void
agintc_intr_disestablish(void * cookie)1311 agintc_intr_disestablish(void *cookie)
1312 {
1313 	struct agintc_softc	*sc = agintc_sc;
1314 	struct intrhand		*ih = cookie;
1315 	int			 irqno = ih->ih_irq;
1316 	u_long			 psw;
1317 	struct agintc_mbi_range	*mr;
1318 	int			 i;
1319 
1320 	psw = intr_disable();
1321 
1322 	if (irqno < LPI_BASE) {
1323 		TAILQ_REMOVE(&sc->sc_handler[irqno].iq_list, ih, ih_list);
1324 		agintc_calc_irq(sc, irqno);
1325 
1326 		/* In case this is an MBI, free it */
1327 		for (i = 0; i < sc->sc_mbi_nranges; i++) {
1328 			mr = &sc->sc_mbi_ranges[i];
1329 			if (irqno < mr->mr_base)
1330 				continue;
1331 			if (irqno >= mr->mr_base + mr->mr_span)
1332 				break;
1333 			if (mr->mr_mbi[irqno - mr->mr_base] != NULL)
1334 				mr->mr_mbi[irqno - mr->mr_base] = NULL;
1335 		}
1336 	} else {
1337 		uint8_t *prop = AGINTC_DMA_KVA(sc->sc_prop);
1338 
1339 		prop[irqno - LPI_BASE] = 0;
1340 
1341 		/* Make globally visible. */
1342 		cpu_dcache_wb_range((vaddr_t)&prop[irqno - LPI_BASE],
1343 		    sizeof(*prop));
1344 		__asm volatile("dsb sy");
1345 	}
1346 
1347 	if (ih->ih_name != NULL)
1348 		evcount_detach(&ih->ih_count);
1349 
1350 	intr_restore(psw);
1351 
1352 	free(ih, M_DEVBUF, 0);
1353 }
1354 
1355 void
agintc_intr_set_wakeup(void * cookie)1356 agintc_intr_set_wakeup(void *cookie)
1357 {
1358 	struct intrhand *ih = cookie;
1359 
1360 	ih->ih_flags |= IPL_WAKEUP;
1361 }
1362 
1363 void *
agintc_intr_establish_mbi(void * self,uint64_t * addr,uint64_t * data,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1364 agintc_intr_establish_mbi(void *self, uint64_t *addr, uint64_t *data,
1365     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1366 {
1367 	struct agintc_softc *sc = agintc_sc;
1368 	struct agintc_mbi_range *mr;
1369 	void *cookie;
1370 	int i, j, hwcpu;
1371 
1372 	if (ci == NULL)
1373 		ci = &cpu_info_primary;
1374 	hwcpu = agintc_sc->sc_cpuremap[ci->ci_cpuid];
1375 
1376 	for (i = 0; i < sc->sc_mbi_nranges; i++) {
1377 		mr = &sc->sc_mbi_ranges[i];
1378 		for (j = 0; j < mr->mr_span; j++) {
1379 			if (mr->mr_mbi[j] != NULL)
1380 				continue;
1381 
1382 			cookie = agintc_intr_establish(mr->mr_base + j,
1383 			    IST_EDGE_RISING, level, ci, func, arg, name);
1384 			if (cookie == NULL)
1385 				return NULL;
1386 
1387 			*addr = sc->sc_mbi_addr;
1388 			*data = mr->mr_base + j;
1389 
1390 			mr->mr_mbi[j] = cookie;
1391 			return cookie;
1392 		}
1393 	}
1394 
1395 	return NULL;
1396 }
1397 
1398 void
agintc_eoi(uint32_t eoi)1399 agintc_eoi(uint32_t eoi)
1400 {
1401 	__asm volatile("msr "STR(ICC_EOIR1)", %x0" :: "r" (eoi));
1402 	__isb();
1403 }
1404 
1405 void
agintc_d_wait_rwp(struct agintc_softc * sc)1406 agintc_d_wait_rwp(struct agintc_softc *sc)
1407 {
1408 	int count = 100000;
1409 	uint32_t v;
1410 
1411 	do {
1412 		v = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR);
1413 	} while (--count && (v & GICD_CTLR_RWP));
1414 
1415 	if (count == 0)
1416 		panic("%s: RWP timed out 0x08%x", __func__, v);
1417 }
1418 
1419 void
agintc_r_wait_rwp(struct agintc_softc * sc)1420 agintc_r_wait_rwp(struct agintc_softc *sc)
1421 {
1422 	struct cpu_info *ci = curcpu();
1423 	int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
1424 	int count = 100000;
1425 	uint32_t v;
1426 
1427 	do {
1428 		v = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
1429 		    GICR_CTLR);
1430 	} while (--count && (v & GICR_CTLR_RWP));
1431 
1432 	if (count == 0)
1433 		panic("%s: RWP timed out 0x08%x", __func__, v);
1434 }
1435 
1436 #ifdef MULTIPROCESSOR
1437 int
agintc_ipi_ddb(void * v)1438 agintc_ipi_ddb(void *v)
1439 {
1440 	/* XXX */
1441 #ifdef DDB
1442 	db_enter();
1443 #endif
1444 	return 1;
1445 }
1446 
1447 int
agintc_ipi_halt(void * v)1448 agintc_ipi_halt(void *v)
1449 {
1450 	struct agintc_softc *sc = v;
1451 	int old = curcpu()->ci_cpl;
1452 
1453 	intr_disable();
1454 	agintc_eoi(sc->sc_ipi_num[ARM_IPI_HALT]);
1455 	agintc_setipl(IPL_NONE);
1456 
1457 	cpu_halt();
1458 
1459 	agintc_setipl(old);
1460 	intr_enable();
1461 	return 1;
1462 }
1463 
1464 int
agintc_ipi_nop(void * v)1465 agintc_ipi_nop(void *v)
1466 {
1467 	/* Nothing to do here, just enough to wake up from WFI */
1468 	return 1;
1469 }
1470 
1471 int
agintc_ipi_combined(void * v)1472 agintc_ipi_combined(void *v)
1473 {
1474 	struct agintc_softc *sc = v;
1475 
1476 	if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
1477 		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1478 		return agintc_ipi_ddb(v);
1479 	} else if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_HALT) {
1480 		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1481 		return agintc_ipi_halt(v);
1482 	} else {
1483 		return agintc_ipi_nop(v);
1484 	}
1485 }
1486 
1487 void
agintc_send_ipi(struct cpu_info * ci,int id)1488 agintc_send_ipi(struct cpu_info *ci, int id)
1489 {
1490 	struct agintc_softc	*sc = agintc_sc;
1491 	uint64_t sendmask;
1492 
1493 	if (ci == curcpu() && id == ARM_IPI_NOP)
1494 		return;
1495 
1496 	/* never overwrite IPI_DDB or IPI_HALT with IPI_NOP */
1497 	if (id == ARM_IPI_DDB || id == ARM_IPI_HALT)
1498 		sc->sc_ipi_reason[ci->ci_cpuid] = id;
1499 
1500 	/* will only send 1 cpu */
1501 	sendmask = (ci->ci_mpidr & MPIDR_AFF3) << 16;
1502 	sendmask |= (ci->ci_mpidr & MPIDR_AFF2) << 16;
1503 	sendmask |= (ci->ci_mpidr & MPIDR_AFF1) << 8;
1504 	sendmask |= 1 << (ci->ci_mpidr & 0x0f);
1505 	sendmask |= (sc->sc_ipi_num[id] << 24);
1506 
1507 	__asm volatile ("msr " STR(ICC_SGI1R)", %x0" ::"r"(sendmask));
1508 }
1509 #endif
1510 
1511 /*
1512  * GICv3 ITS controller for MSI interrupts.
1513  */
1514 #define GITS_CTLR		0x0000
1515 #define  GITS_CTLR_ENABLED	(1UL << 0)
1516 #define GITS_TYPER		0x0008
1517 #define  GITS_TYPER_CIL		(1ULL << 36)
1518 #define  GITS_TYPER_CIDBITS(x)	(((x) >> 32) & 0xf)
1519 #define  GITS_TYPER_HCC(x)	(((x) >> 24) & 0xff)
1520 #define  GITS_TYPER_PTA		(1ULL << 19)
1521 #define  GITS_TYPER_DEVBITS(x)	(((x) >> 13) & 0x1f)
1522 #define  GITS_TYPER_ITE_SZ(x)	(((x) >> 4) & 0xf)
1523 #define  GITS_TYPER_PHYS	(1ULL << 0)
1524 #define GITS_CBASER		0x0080
1525 #define  GITS_CBASER_VALID	(1ULL << 63)
1526 #define  GITS_CBASER_IC_NORM_NC	(1ULL << 59)
1527 #define  GITS_CBASER_MASK	0x1ffffffffff000ULL
1528 #define GITS_CWRITER		0x0088
1529 #define GITS_CREADR		0x0090
1530 #define GITS_BASER(i)		(0x0100 + ((i) * 8))
1531 #define  GITS_BASER_VALID	(1ULL << 63)
1532 #define  GITS_BASER_INDIRECT	(1ULL << 62)
1533 #define  GITS_BASER_IC_NORM_NC	(1ULL << 59)
1534 #define  GITS_BASER_TYPE_MASK	(7ULL << 56)
1535 #define  GITS_BASER_TYPE_DEVICE	(1ULL << 56)
1536 #define  GITS_BASER_TYPE_COLL	(4ULL << 56)
1537 #define  GITS_BASER_TTE_SZ(x)	(((x) >> 48) & 0x1f)
1538 #define  GITS_BASER_PGSZ_MASK	(3ULL << 8)
1539 #define  GITS_BASER_PGSZ_4K	(0ULL << 8)
1540 #define  GITS_BASER_PGSZ_16K	(1ULL << 8)
1541 #define  GITS_BASER_PGSZ_64K	(2ULL << 8)
1542 #define  GITS_BASER_SZ_MASK	(0xffULL)
1543 #define  GITS_BASER_PA_MASK	0x7ffffffff000ULL
1544 #define GITS_TRANSLATER		0x10040
1545 
1546 #define GITS_NUM_BASER		8
1547 
1548 struct gits_cmd {
1549 	uint8_t cmd;
1550 	uint32_t deviceid;
1551 	uint32_t eventid;
1552 	uint32_t intid;
1553 	uint64_t dw2;
1554 	uint64_t dw3;
1555 };
1556 
1557 #define GITS_CMD_VALID		(1ULL << 63)
1558 
1559 /* ITS commands */
1560 #define SYNC	0x05
1561 #define MAPD	0x08
1562 #define MAPC	0x09
1563 #define MAPTI	0x0a
1564 #define INV	0x0c
1565 #define INVALL	0x0d
1566 #define DISCARD 0x0f
1567 
1568 #define GITS_CMDQ_SIZE		(64 * 1024)
1569 #define GITS_CMDQ_NENTRIES	(GITS_CMDQ_SIZE / sizeof(struct gits_cmd))
1570 
1571 struct agintc_msi_device {
1572 	LIST_ENTRY(agintc_msi_device) md_list;
1573 
1574 	uint32_t		md_deviceid;
1575 	uint32_t		md_events;
1576 	struct agintc_dmamem	*md_itt;
1577 };
1578 
1579 int	 agintc_msi_match(struct device *, void *, void *);
1580 void	 agintc_msi_attach(struct device *, struct device *, void *);
1581 void	*agintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
1582 	    int , struct cpu_info *, int (*)(void *), void *, char *);
1583 void	 agintc_intr_disestablish_msi(void *);
1584 void	 agintc_intr_barrier_msi(void *);
1585 
1586 struct agintc_msi_softc {
1587 	struct device			sc_dev;
1588 	bus_space_tag_t			sc_iot;
1589 	bus_space_handle_t		sc_ioh;
1590 	bus_dma_tag_t			sc_dmat;
1591 
1592 	bus_addr_t			sc_msi_addr;
1593 	int				sc_msi_delta;
1594 
1595 	struct agintc_dmamem		*sc_cmdq;
1596 	uint16_t			sc_cmdidx;
1597 
1598 	int				sc_devbits;
1599 	uint32_t			sc_deviceid_max;
1600 	struct agintc_dmamem		*sc_dtt;
1601 	size_t				sc_dtt_pgsz;
1602 	uint8_t				sc_dte_sz;
1603 	int				sc_dtt_indirect;
1604 	int				sc_cidbits;
1605 	struct agintc_dmamem		*sc_ctt;
1606 	size_t				sc_ctt_pgsz;
1607 	uint8_t				sc_cte_sz;
1608 	uint8_t				sc_ite_sz;
1609 
1610 	LIST_HEAD(, agintc_msi_device)	sc_msi_devices;
1611 
1612 	struct interrupt_controller	sc_ic;
1613 };
1614 
1615 const struct cfattach	agintcmsi_ca = {
1616 	sizeof (struct agintc_msi_softc), agintc_msi_match, agintc_msi_attach
1617 };
1618 
1619 struct cfdriver agintcmsi_cd = {
1620 	NULL, "agintcmsi", DV_DULL
1621 };
1622 
1623 void	agintc_msi_send_cmd(struct agintc_msi_softc *, struct gits_cmd *);
1624 void	agintc_msi_wait_cmd(struct agintc_msi_softc *);
1625 
1626 #define CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
1627 #define CPU_PART(midr)  (((midr) >> 4) & 0xfff)
1628 
1629 #define CPU_IMPL_QCOM		0x51
1630 #define CPU_PART_ORYON		0x001
1631 
1632 int
agintc_msi_match(struct device * parent,void * cfdata,void * aux)1633 agintc_msi_match(struct device *parent, void *cfdata, void *aux)
1634 {
1635 	struct fdt_attach_args *faa = aux;
1636 
1637 	/*
1638 	 * XXX For some reason MSIs don't work on Qualcomm X1E SoCs in
1639 	 * ACPI mode.  So skip attaching the ITS in that case.  MSIs
1640 	 * work fine when booting with a DTB.
1641 	 */
1642 	if (OF_is_compatible(OF_peer(0), "openbsd,acpi") &&
1643 	    CPU_IMPL(curcpu()->ci_midr) == CPU_IMPL_QCOM &&
1644 	    CPU_PART(curcpu()->ci_midr) == CPU_PART_ORYON)
1645 		return 0;
1646 
1647 	return OF_is_compatible(faa->fa_node, "arm,gic-v3-its");
1648 }
1649 
1650 void
agintc_msi_attach(struct device * parent,struct device * self,void * aux)1651 agintc_msi_attach(struct device *parent, struct device *self, void *aux)
1652 {
1653 	struct agintc_msi_softc *sc = (struct agintc_msi_softc *)self;
1654 	struct fdt_attach_args *faa = aux;
1655 	struct gits_cmd cmd;
1656 	uint32_t pre_its[2];
1657 	uint64_t typer;
1658 	int i, hwcpu;
1659 
1660 	if (faa->fa_nreg < 1) {
1661 		printf(": no registers\n");
1662 		return;
1663 	}
1664 
1665 	sc->sc_iot = faa->fa_iot;
1666 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
1667 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
1668 		printf(": can't map registers\n");
1669 		return;
1670 	}
1671 	sc->sc_dmat = faa->fa_dmat;
1672 
1673 	sc->sc_msi_addr = faa->fa_reg[0].addr + GITS_TRANSLATER;
1674 	if (OF_getpropintarray(faa->fa_node, "socionext,synquacer-pre-its",
1675 	    pre_its, sizeof(pre_its)) == sizeof(pre_its)) {
1676 		sc->sc_msi_addr = pre_its[0];
1677 		sc->sc_msi_delta = 4;
1678 	}
1679 
1680 	typer = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_TYPER);
1681 	if ((typer & GITS_TYPER_PHYS) == 0 || typer & GITS_TYPER_PTA) {
1682 		printf(": unsupported type 0x%016llx\n", typer);
1683 		goto unmap;
1684 	}
1685 	sc->sc_ite_sz = GITS_TYPER_ITE_SZ(typer) + 1;
1686 	sc->sc_devbits = GITS_TYPER_DEVBITS(typer) + 1;
1687 	if (typer & GITS_TYPER_CIL)
1688 		sc->sc_cidbits = GITS_TYPER_CIDBITS(typer) + 1;
1689 	else
1690 		sc->sc_cidbits = 16;
1691 
1692 	/* Set up command queue. */
1693 	sc->sc_cmdq = agintc_dmamem_alloc(sc->sc_dmat,
1694 	    GITS_CMDQ_SIZE, GITS_CMDQ_SIZE);
1695 	if (sc->sc_cmdq == NULL) {
1696 		printf(": can't alloc command queue\n");
1697 		goto unmap;
1698 	}
1699 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_CBASER,
1700 	    AGINTC_DMA_DVA(sc->sc_cmdq) | GITS_CBASER_IC_NORM_NC |
1701 	    (GITS_CMDQ_SIZE / PAGE_SIZE) - 1 | GITS_CBASER_VALID);
1702 
1703 	/* Set up device translation table. */
1704 	for (i = 0; i < GITS_NUM_BASER; i++) {
1705 		uint64_t baser;
1706 		paddr_t dtt_pa;
1707 		size_t size;
1708 
1709 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1710 		if ((baser & GITS_BASER_TYPE_MASK) != GITS_BASER_TYPE_DEVICE)
1711 			continue;
1712 
1713 		/* Determine the maximum supported page size. */
1714 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1715 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_64K);
1716 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1717 		if ((baser & GITS_BASER_PGSZ_MASK) == GITS_BASER_PGSZ_64K)
1718 			goto dfound;
1719 
1720 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1721 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_16K);
1722 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1723 		if ((baser & GITS_BASER_PGSZ_MASK) == GITS_BASER_PGSZ_16K)
1724 			goto dfound;
1725 
1726 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1727 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_4K);
1728 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1729 
1730 	dfound:
1731 		switch (baser & GITS_BASER_PGSZ_MASK) {
1732 		case GITS_BASER_PGSZ_4K:
1733 			sc->sc_dtt_pgsz = PAGE_SIZE;
1734 			break;
1735 		case GITS_BASER_PGSZ_16K:
1736 			sc->sc_dtt_pgsz = 4 * PAGE_SIZE;
1737 			break;
1738 		case GITS_BASER_PGSZ_64K:
1739 			sc->sc_dtt_pgsz = 16 * PAGE_SIZE;
1740 			break;
1741 		}
1742 
1743 		/* Calculate table size. */
1744 		sc->sc_dte_sz = GITS_BASER_TTE_SZ(baser) + 1;
1745 		size = (1ULL << sc->sc_devbits) * sc->sc_dte_sz;
1746 		size = roundup(size, sc->sc_dtt_pgsz);
1747 
1748 		/* Might make sense to go indirect */
1749 		if (size > 2 * sc->sc_dtt_pgsz) {
1750 			bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1751 			    baser | GITS_BASER_INDIRECT);
1752 			if (bus_space_read_8(sc->sc_iot, sc->sc_ioh,
1753 			    GITS_BASER(i)) & GITS_BASER_INDIRECT)
1754 				sc->sc_dtt_indirect = 1;
1755 		}
1756 		if (sc->sc_dtt_indirect) {
1757 			size = (1ULL << sc->sc_devbits);
1758 			size /= (sc->sc_dtt_pgsz / sc->sc_dte_sz);
1759 			size *= sizeof(uint64_t);
1760 			size = roundup(size, sc->sc_dtt_pgsz);
1761 		}
1762 
1763 		/* Clamp down to maximum configurable num pages */
1764 		if (size / sc->sc_dtt_pgsz > GITS_BASER_SZ_MASK + 1)
1765 			size = (GITS_BASER_SZ_MASK + 1) * sc->sc_dtt_pgsz;
1766 
1767 		/* Calculate max deviceid based off configured size */
1768 		sc->sc_deviceid_max = (size / sc->sc_dte_sz) - 1;
1769 		if (sc->sc_dtt_indirect)
1770 			sc->sc_deviceid_max = ((size / sizeof(uint64_t)) *
1771 			    (sc->sc_dtt_pgsz / sc->sc_dte_sz)) - 1;
1772 
1773 		/* Allocate table. */
1774 		sc->sc_dtt = agintc_dmamem_alloc(sc->sc_dmat,
1775 		    size, sc->sc_dtt_pgsz);
1776 		if (sc->sc_dtt == NULL) {
1777 			printf(": can't alloc translation table\n");
1778 			goto unmap;
1779 		}
1780 
1781 		/* Configure table. */
1782 		dtt_pa = AGINTC_DMA_DVA(sc->sc_dtt);
1783 		KASSERT((dtt_pa & GITS_BASER_PA_MASK) == dtt_pa);
1784 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1785 		    GITS_BASER_IC_NORM_NC | baser & GITS_BASER_PGSZ_MASK |
1786 		    dtt_pa | (size / sc->sc_dtt_pgsz) - 1 |
1787 		    (sc->sc_dtt_indirect ? GITS_BASER_INDIRECT : 0) |
1788 		    GITS_BASER_VALID);
1789 	}
1790 
1791 	/* Set up collection translation table. */
1792 	for (i = 0; i < GITS_NUM_BASER; i++) {
1793 		uint64_t baser;
1794 		paddr_t ctt_pa;
1795 		size_t size;
1796 
1797 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1798 		if ((baser & GITS_BASER_TYPE_MASK) != GITS_BASER_TYPE_COLL)
1799 			continue;
1800 
1801 		/* Determine the maximum supported page size. */
1802 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1803 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_64K);
1804 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1805 		if ((baser & GITS_BASER_PGSZ_MASK) == GITS_BASER_PGSZ_64K)
1806 			goto cfound;
1807 
1808 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1809 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_16K);
1810 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1811 		if ((baser & GITS_BASER_PGSZ_MASK) == GITS_BASER_PGSZ_16K)
1812 			goto cfound;
1813 
1814 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1815 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_4K);
1816 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1817 
1818 	cfound:
1819 		switch (baser & GITS_BASER_PGSZ_MASK) {
1820 		case GITS_BASER_PGSZ_4K:
1821 			sc->sc_ctt_pgsz = PAGE_SIZE;
1822 			break;
1823 		case GITS_BASER_PGSZ_16K:
1824 			sc->sc_ctt_pgsz = 4 * PAGE_SIZE;
1825 			break;
1826 		case GITS_BASER_PGSZ_64K:
1827 			sc->sc_ctt_pgsz = 16 * PAGE_SIZE;
1828 			break;
1829 		}
1830 
1831 		/* Calculate table size. */
1832 		sc->sc_cte_sz = GITS_BASER_TTE_SZ(baser) + 1;
1833 		size = (1ULL << sc->sc_cidbits) * sc->sc_cte_sz;
1834 		size = roundup(size, sc->sc_ctt_pgsz);
1835 
1836 		/* Allocate table. */
1837 		sc->sc_ctt = agintc_dmamem_alloc(sc->sc_dmat,
1838 		    size, sc->sc_ctt_pgsz);
1839 		if (sc->sc_ctt == NULL) {
1840 			printf(": can't alloc translation table\n");
1841 			goto unmap;
1842 		}
1843 
1844 		/* Configure table. */
1845 		ctt_pa = AGINTC_DMA_DVA(sc->sc_ctt);
1846 		KASSERT((ctt_pa & GITS_BASER_PA_MASK) == ctt_pa);
1847 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1848 		    GITS_BASER_IC_NORM_NC | baser & GITS_BASER_PGSZ_MASK |
1849 		    ctt_pa | (size / sc->sc_ctt_pgsz) - 1 | GITS_BASER_VALID);
1850 	}
1851 
1852 	/* Enable ITS. */
1853 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GITS_CTLR,
1854 	    GITS_CTLR_ENABLED);
1855 
1856 	LIST_INIT(&sc->sc_msi_devices);
1857 
1858 	/* Create one collection per core. */
1859 	KASSERT(ncpus <= agintc_sc->sc_num_redist);
1860 	for (i = 0; i < ncpus; i++) {
1861 		hwcpu = agintc_sc->sc_cpuremap[i];
1862 		memset(&cmd, 0, sizeof(cmd));
1863 		cmd.cmd = MAPC;
1864 		cmd.dw2 = GITS_CMD_VALID |
1865 		    (agintc_sc->sc_processor[hwcpu] << 16) | i;
1866 		agintc_msi_send_cmd(sc, &cmd);
1867 		agintc_msi_wait_cmd(sc);
1868 	}
1869 
1870 	printf("\n");
1871 
1872 	sc->sc_ic.ic_node = faa->fa_node;
1873 	sc->sc_ic.ic_cookie = sc;
1874 	sc->sc_ic.ic_establish_msi = agintc_intr_establish_msi;
1875 	sc->sc_ic.ic_disestablish = agintc_intr_disestablish_msi;
1876 	sc->sc_ic.ic_barrier = agintc_intr_barrier_msi;
1877 	sc->sc_ic.ic_gic_its_id = OF_getpropint(faa->fa_node,
1878 	    "openbsd,gic-its-id", 0);
1879 	arm_intr_register_fdt(&sc->sc_ic);
1880 	return;
1881 
1882 unmap:
1883 	if (sc->sc_dtt)
1884 		agintc_dmamem_free(sc->sc_dmat, sc->sc_dtt);
1885 	if (sc->sc_cmdq)
1886 		agintc_dmamem_free(sc->sc_dmat, sc->sc_cmdq);
1887 
1888 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
1889 }
1890 
1891 void
agintc_msi_send_cmd(struct agintc_msi_softc * sc,struct gits_cmd * cmd)1892 agintc_msi_send_cmd(struct agintc_msi_softc *sc, struct gits_cmd *cmd)
1893 {
1894 	struct gits_cmd *queue = AGINTC_DMA_KVA(sc->sc_cmdq);
1895 
1896 	memcpy(&queue[sc->sc_cmdidx], cmd, sizeof(*cmd));
1897 
1898 	/* Make globally visible. */
1899 	cpu_dcache_wb_range((vaddr_t)&queue[sc->sc_cmdidx], sizeof(*cmd));
1900 	__asm volatile("dsb sy");
1901 
1902 	sc->sc_cmdidx++;
1903 	sc->sc_cmdidx %= GITS_CMDQ_NENTRIES;
1904 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_CWRITER,
1905 	    sc->sc_cmdidx * sizeof(*cmd));
1906 }
1907 
1908 void
agintc_msi_wait_cmd(struct agintc_msi_softc * sc)1909 agintc_msi_wait_cmd(struct agintc_msi_softc *sc)
1910 {
1911 	uint64_t creadr;
1912 	int timo;
1913 
1914 	for (timo = 1000; timo > 0; timo--) {
1915 		creadr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_CREADR);
1916 		if (creadr == sc->sc_cmdidx * sizeof(struct gits_cmd))
1917 			break;
1918 		delay(1);
1919 	}
1920 	if (timo == 0)
1921 		printf("%s: command queue timeout\n", sc->sc_dev.dv_xname);
1922 }
1923 
1924 int
agintc_msi_create_device_table(struct agintc_msi_softc * sc,uint32_t deviceid)1925 agintc_msi_create_device_table(struct agintc_msi_softc *sc, uint32_t deviceid)
1926 {
1927 	uint64_t *table = AGINTC_DMA_KVA(sc->sc_dtt);
1928 	uint32_t idx = deviceid / (sc->sc_dtt_pgsz / sc->sc_dte_sz);
1929 	struct agintc_dmamem *dtt;
1930 	paddr_t dtt_pa;
1931 
1932 	/* Out of bounds */
1933 	if (deviceid > sc->sc_deviceid_max)
1934 		return ENXIO;
1935 
1936 	/* No need to adjust */
1937 	if (!sc->sc_dtt_indirect)
1938 		return 0;
1939 
1940 	/* Table already allocated */
1941 	if (table[idx])
1942 		return 0;
1943 
1944 	/* FIXME: leaks */
1945 	dtt = agintc_dmamem_alloc(sc->sc_dmat,
1946 	    sc->sc_dtt_pgsz, sc->sc_dtt_pgsz);
1947 	if (dtt == NULL)
1948 		return ENOMEM;
1949 
1950 	dtt_pa = AGINTC_DMA_DVA(dtt);
1951 	KASSERT((dtt_pa & GITS_BASER_PA_MASK) == dtt_pa);
1952 	table[idx] = dtt_pa | GITS_BASER_VALID;
1953 	cpu_dcache_wb_range((vaddr_t)&table[idx], sizeof(table[idx]));
1954 	__asm volatile("dsb sy");
1955 	return 0;
1956 }
1957 
1958 struct agintc_msi_device *
agintc_msi_create_device(struct agintc_msi_softc * sc,uint32_t deviceid)1959 agintc_msi_create_device(struct agintc_msi_softc *sc, uint32_t deviceid)
1960 {
1961 	struct agintc_msi_device *md;
1962 	struct gits_cmd cmd;
1963 
1964 	if (deviceid > sc->sc_deviceid_max)
1965 		return NULL;
1966 
1967 	if (agintc_msi_create_device_table(sc, deviceid) != 0)
1968 		return NULL;
1969 
1970 	md = malloc(sizeof(*md), M_DEVBUF, M_ZERO | M_WAITOK);
1971 	md->md_deviceid = deviceid;
1972 	md->md_itt = agintc_dmamem_alloc(sc->sc_dmat,
1973 	    32 * sc->sc_ite_sz, PAGE_SIZE);
1974 	LIST_INSERT_HEAD(&sc->sc_msi_devices, md, md_list);
1975 
1976 	memset(&cmd, 0, sizeof(cmd));
1977 	cmd.cmd = MAPD;
1978 	cmd.deviceid = deviceid;
1979 	cmd.eventid = 4;	/* size */
1980 	cmd.dw2 = AGINTC_DMA_DVA(md->md_itt) | GITS_CMD_VALID;
1981 	agintc_msi_send_cmd(sc, &cmd);
1982 	agintc_msi_wait_cmd(sc);
1983 
1984 	return md;
1985 }
1986 
1987 struct agintc_msi_device *
agintc_msi_find_device(struct agintc_msi_softc * sc,uint32_t deviceid)1988 agintc_msi_find_device(struct agintc_msi_softc *sc, uint32_t deviceid)
1989 {
1990 	struct agintc_msi_device *md;
1991 
1992 	LIST_FOREACH(md, &sc->sc_msi_devices, md_list) {
1993 		if (md->md_deviceid == deviceid)
1994 			return md;
1995 	}
1996 
1997 	return agintc_msi_create_device(sc, deviceid);
1998 }
1999 
2000 void
agintc_msi_discard(struct agintc_lpi_info * li)2001 agintc_msi_discard(struct agintc_lpi_info *li)
2002 {
2003 	struct agintc_msi_softc *sc;
2004 	struct cpu_info *ci;
2005 	struct gits_cmd cmd;
2006 	int hwcpu;
2007 
2008 	sc = li->li_msic;
2009 	ci = li->li_ci;
2010 	hwcpu = agintc_sc->sc_cpuremap[ci->ci_cpuid];
2011 
2012 	memset(&cmd, 0, sizeof(cmd));
2013 	cmd.cmd = DISCARD;
2014 	cmd.deviceid = li->li_deviceid;
2015 	cmd.eventid = li->li_eventid;
2016 	agintc_msi_send_cmd(sc, &cmd);
2017 
2018 	memset(&cmd, 0, sizeof(cmd));
2019 	cmd.cmd = SYNC;
2020 	cmd.dw2 = agintc_sc->sc_processor[hwcpu] << 16;
2021 	agintc_msi_send_cmd(sc, &cmd);
2022 	agintc_msi_wait_cmd(sc);
2023 }
2024 
2025 void
agintc_msi_inv(struct agintc_lpi_info * li)2026 agintc_msi_inv(struct agintc_lpi_info *li)
2027 {
2028 	struct agintc_msi_softc *sc;
2029 	struct cpu_info *ci;
2030 	struct gits_cmd cmd;
2031 	int hwcpu;
2032 
2033 	sc = li->li_msic;
2034 	ci = li->li_ci;
2035 	hwcpu = agintc_sc->sc_cpuremap[ci->ci_cpuid];
2036 
2037 	memset(&cmd, 0, sizeof(cmd));
2038 	cmd.cmd = INV;
2039 	cmd.deviceid = li->li_deviceid;
2040 	cmd.eventid = li->li_eventid;
2041 	agintc_msi_send_cmd(sc, &cmd);
2042 
2043 	memset(&cmd, 0, sizeof(cmd));
2044 	cmd.cmd = SYNC;
2045 	cmd.dw2 = agintc_sc->sc_processor[hwcpu] << 16;
2046 	agintc_msi_send_cmd(sc, &cmd);
2047 	agintc_msi_wait_cmd(sc);
2048 }
2049 
2050 void *
agintc_intr_establish_msi(void * self,uint64_t * addr,uint64_t * data,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)2051 agintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
2052     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
2053 {
2054 	struct agintc_msi_softc *sc = (struct agintc_msi_softc *)self;
2055 	struct agintc_msi_device *md;
2056 	struct gits_cmd cmd;
2057 	uint32_t deviceid = *data;
2058 	uint32_t eventid;
2059 	int i, hwcpu;
2060 
2061 	if (ci == NULL)
2062 		ci = &cpu_info_primary;
2063 	hwcpu = agintc_sc->sc_cpuremap[ci->ci_cpuid];
2064 
2065 	md = agintc_msi_find_device(sc, deviceid);
2066 	if (md == NULL)
2067 		return NULL;
2068 
2069 	eventid = *addr;
2070 	if (eventid > 0 && (md->md_events & (1U << eventid)))
2071 		return NULL;
2072 	for (; eventid < 32; eventid++) {
2073 		if ((md->md_events & (1U << eventid)) == 0) {
2074 			md->md_events |= (1U << eventid);
2075 			break;
2076 		}
2077 	}
2078 	if (eventid >= 32)
2079 		return NULL;
2080 
2081 	for (i = 0; i < agintc_sc->sc_nlpi; i++) {
2082 		if (agintc_sc->sc_lpi[i] != NULL)
2083 			continue;
2084 
2085 		agintc_sc->sc_lpi[i] = malloc(sizeof(struct agintc_lpi_info),
2086 		    M_DEVBUF, M_WAITOK | M_ZERO);
2087 		agintc_sc->sc_lpi[i]->li_msic = sc;
2088 		agintc_sc->sc_lpi[i]->li_ci = ci;
2089 		agintc_sc->sc_lpi[i]->li_deviceid = deviceid;
2090 		agintc_sc->sc_lpi[i]->li_eventid = eventid;
2091 		agintc_sc->sc_lpi[i]->li_ih =
2092 		    agintc_intr_establish(LPI_BASE + i,
2093 		    IST_EDGE_RISING, level, ci, func, arg, name);
2094 		if (agintc_sc->sc_lpi[i]->li_ih == NULL) {
2095 			free(agintc_sc->sc_lpi[i], M_DEVBUF,
2096 			    sizeof(struct agintc_lpi_info));
2097 			agintc_sc->sc_lpi[i] = NULL;
2098 			return NULL;
2099 		}
2100 
2101 		memset(&cmd, 0, sizeof(cmd));
2102 		cmd.cmd = MAPTI;
2103 		cmd.deviceid = deviceid;
2104 		cmd.eventid = eventid;
2105 		cmd.intid = LPI_BASE + i;
2106 		cmd.dw2 = ci->ci_cpuid;
2107 		agintc_msi_send_cmd(sc, &cmd);
2108 
2109 		memset(&cmd, 0, sizeof(cmd));
2110 		cmd.cmd = SYNC;
2111 		cmd.dw2 = agintc_sc->sc_processor[hwcpu] << 16;
2112 		agintc_msi_send_cmd(sc, &cmd);
2113 		agintc_msi_wait_cmd(sc);
2114 
2115 		*addr = sc->sc_msi_addr + deviceid * sc->sc_msi_delta;
2116 		*data = eventid;
2117 		return &agintc_sc->sc_lpi[i];
2118 	}
2119 
2120 	return NULL;
2121 }
2122 
2123 void
agintc_intr_disestablish_msi(void * cookie)2124 agintc_intr_disestablish_msi(void *cookie)
2125 {
2126 	struct agintc_lpi_info *li = *(void **)cookie;
2127 
2128 	agintc_intr_disestablish(li->li_ih);
2129 	agintc_msi_discard(li);
2130 	agintc_msi_inv(li);
2131 
2132 	free(li, M_DEVBUF, sizeof(*li));
2133 	*(void **)cookie = NULL;
2134 }
2135 
2136 void
agintc_intr_barrier_msi(void * cookie)2137 agintc_intr_barrier_msi(void *cookie)
2138 {
2139 	struct agintc_lpi_info *li = *(void **)cookie;
2140 
2141 	agintc_intr_barrier(li->li_ih);
2142 }
2143 
2144 struct agintc_dmamem *
agintc_dmamem_alloc(bus_dma_tag_t dmat,bus_size_t size,bus_size_t align)2145 agintc_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
2146 {
2147 	struct agintc_dmamem *adm;
2148 	int nsegs;
2149 
2150 	adm = malloc(sizeof(*adm), M_DEVBUF, M_WAITOK | M_ZERO);
2151 	adm->adm_size = size;
2152 
2153 	if (bus_dmamap_create(dmat, size, 1, size, 0,
2154 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
2155 		goto admfree;
2156 
2157 	if (bus_dmamem_alloc(dmat, size, align, 0, &adm->adm_seg, 1,
2158 	    &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2159 		goto destroy;
2160 
2161 	if (bus_dmamem_map(dmat, &adm->adm_seg, nsegs, size,
2162 	    &adm->adm_kva, BUS_DMA_WAITOK | BUS_DMA_NOCACHE) != 0)
2163 		goto free;
2164 
2165 	if (bus_dmamap_load_raw(dmat, adm->adm_map, &adm->adm_seg,
2166 	    nsegs, size, BUS_DMA_WAITOK) != 0)
2167 		goto unmap;
2168 
2169 	/* Make globally visible. */
2170 	cpu_dcache_wb_range((vaddr_t)adm->adm_kva, size);
2171 	__asm volatile("dsb sy");
2172 	return adm;
2173 
2174 unmap:
2175 	bus_dmamem_unmap(dmat, adm->adm_kva, size);
2176 free:
2177 	bus_dmamem_free(dmat, &adm->adm_seg, 1);
2178 destroy:
2179 	bus_dmamap_destroy(dmat, adm->adm_map);
2180 admfree:
2181 	free(adm, M_DEVBUF, sizeof(*adm));
2182 
2183 	return NULL;
2184 }
2185 
2186 void
agintc_dmamem_free(bus_dma_tag_t dmat,struct agintc_dmamem * adm)2187 agintc_dmamem_free(bus_dma_tag_t dmat, struct agintc_dmamem *adm)
2188 {
2189 	bus_dmamem_unmap(dmat, adm->adm_kva, adm->adm_size);
2190 	bus_dmamem_free(dmat, &adm->adm_seg, 1);
2191 	bus_dmamap_destroy(dmat, adm->adm_map);
2192 	free(adm, M_DEVBUF, sizeof(*adm));
2193 }
2194