xref: /openbsd/sys/arch/arm64/dev/agintc.c (revision 097a140d)
1 /* $OpenBSD: agintc.c,v 1.30 2021/02/17 12:11:45 kettenis Exp $ */
2 /*
3  * Copyright (c) 2007, 2009, 2011, 2017 Dale Rahn <drahn@dalerahn.com>
4  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * This is a device driver for the GICv3/GICv4 IP from ARM as specified
21  * in IHI0069C, an example of this hardware is the GIC 500.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/queue.h>
27 #include <sys/malloc.h>
28 #include <sys/device.h>
29 #include <sys/evcount.h>
30 
31 #include <machine/bus.h>
32 #include <machine/cpufunc.h>
33 #include <machine/fdt.h>
34 
35 #include <dev/ofw/fdt.h>
36 #include <dev/ofw/openfirm.h>
37 
38 #include <arm64/dev/simplebusvar.h>
39 
40 #define ICC_PMR		s3_0_c4_c6_0
41 #define ICC_IAR0	s3_0_c12_c8_0
42 #define ICC_EOIR0	s3_0_c12_c8_1
43 #define ICC_HPPIR0	s3_0_c12_c8_2
44 #define ICC_BPR0	s3_0_c12_c8_3
45 
46 #define ICC_DIR		s3_0_c12_c11_1
47 #define ICC_RPR		s3_0_c12_c11_3
48 #define ICC_SGI1R	s3_0_c12_c11_5
49 #define ICC_SGI0R	s3_0_c12_c11_7
50 
51 #define ICC_IAR1	s3_0_c12_c12_0
52 #define ICC_EOIR1	s3_0_c12_c12_1
53 #define ICC_HPPIR1	s3_0_c12_c12_2
54 #define ICC_BPR1	s3_0_c12_c12_3
55 #define ICC_CTLR	s3_0_c12_c12_4
56 #define ICC_SRE_EL1	s3_0_c12_c12_5
57 #define  ICC_SRE_EL1_EN		0x7
58 #define ICC_IGRPEN0	s3_0_c12_c12_6
59 #define ICC_IGRPEN1	s3_0_c12_c12_7
60 
61 #define _STR(x) #x
62 #define STR(x) _STR(x)
63 
64 /* distributor registers */
65 #define GICD_CTLR		0x0000
66 /* non-secure */
67 #define  GICD_CTLR_RWP			(1U << 31)
68 #define  GICD_CTLR_EnableGrp1		(1 << 0)
69 #define  GICD_CTLR_EnableGrp1A		(1 << 1)
70 #define  GICD_CTLR_ARE_NS		(1 << 4)
71 #define  GICD_CTLR_DS			(1 << 6)
72 #define GICD_TYPER		0x0004
73 #define  GICD_TYPER_LPIS		(1 << 16)
74 #define  GICD_TYPER_ITLINE_M		0x1f
75 #define GICD_IIDR		0x0008
76 #define GICD_ISENABLER(i)	(0x0100 + (IRQ_TO_REG32(i) * 4))
77 #define GICD_ICENABLER(i)	(0x0180 + (IRQ_TO_REG32(i) * 4))
78 #define GICD_ISPENDR(i)		(0x0200 + (IRQ_TO_REG32(i) * 4))
79 #define GICD_ICPENDR(i)		(0x0280 + (IRQ_TO_REG32(i) * 4))
80 #define GICD_ISACTIVER(i)	(0x0300 + (IRQ_TO_REG32(i) * 4))
81 #define GICD_ICACTIVER(i)	(0x0380 + (IRQ_TO_REG32(i) * 4))
82 #define GICD_IPRIORITYR(i)	(0x0400 + (i))
83 #define GICD_ICFGR(i)		(0x0c00 + (IRQ_TO_REG16(i) * 4))
84 #define  GICD_ICFGR_TRIG_LEVEL(i)	(0x0 << (IRQ_TO_REG16BIT(i) * 2))
85 #define  GICD_ICFGR_TRIG_EDGE(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
86 #define  GICD_ICFGR_TRIG_MASK(i)	(0x2 << (IRQ_TO_REG16BIT(i) * 2))
87 #define GICD_NSACR(i)		(0x0e00 + (IRQ_TO_REG16(i) * 4))
88 #define GICD_IROUTER(i)		(0x6000 + ((i) * 8))
89 
90 /* redistributor registers */
91 #define GICR_CTLR		0x00000
92 #define  GICR_CTLR_RWP			((1U << 31) | (1 << 3))
93 #define  GICR_CTLR_ENABLE_LPIS		(1 << 0)
94 #define GICR_IIDR		0x00004
95 #define GICR_TYPER		0x00008
96 #define  GICR_TYPER_LAST		(1 << 4)
97 #define  GICR_TYPER_VLPIS		(1 << 1)
98 #define GICR_WAKER		0x00014
99 #define  GICR_WAKER_X31			(1U << 31)
100 #define  GICR_WAKER_CHILDRENASLEEP	(1 << 2)
101 #define  GICR_WAKER_PROCESSORSLEEP	(1 << 1)
102 #define  GICR_WAKER_X0			(1 << 0)
103 #define GICR_PROPBASER		0x00070
104 #define  GICR_PROPBASER_ISH		(1ULL << 10)
105 #define  GICR_PROPBASER_IC_NORM_NC	(1ULL << 7)
106 #define GICR_PENDBASER		0x00078
107 #define  GICR_PENDBASER_PTZ		(1ULL << 62)
108 #define  GICR_PENDBASER_ISH		(1ULL << 10)
109 #define  GICR_PENDBASER_IC_NORM_NC	(1ULL << 7)
110 #define GICR_IGROUP0		0x10080
111 #define GICR_ISENABLE0		0x10100
112 #define GICR_ICENABLE0		0x10180
113 #define GICR_ISPENDR0		0x10200
114 #define GICR_ICPENDR0		0x10280
115 #define GICR_ISACTIVE0		0x10300
116 #define GICR_ICACTIVE0		0x10380
117 #define GICR_IPRIORITYR(i)	(0x10400 + (i))
118 #define GICR_ICFGR0		0x10c00
119 #define GICR_ICFGR1		0x10c04
120 
121 #define GICR_PROP_SIZE		(64 * 1024)
122 #define  GICR_PROP_GROUP1	(1 << 1)
123 #define  GICR_PROP_ENABLE	(1 << 0)
124 #define GICR_PEND_SIZE		(64 * 1024)
125 
126 #define PPI_BASE		16
127 #define SPI_BASE		32
128 #define LPI_BASE		8192
129 
130 #define IRQ_TO_REG32(i)		(((i) >> 5) & 0x7)
131 #define IRQ_TO_REG32BIT(i)	((i) & 0x1f)
132 
133 #define IRQ_TO_REG16(i)		(((i) >> 4) & 0xf)
134 #define IRQ_TO_REG16BIT(i)	((i) & 0xf)
135 
136 #define IRQ_ENABLE	1
137 #define IRQ_DISABLE	0
138 
139 struct agintc_softc {
140 	struct simplebus_softc	 sc_sbus;
141 	struct intrq		*sc_handler;
142 	struct intrhand		**sc_lpi_handler;
143 	bus_space_tag_t		 sc_iot;
144 	bus_space_handle_t	 sc_d_ioh;
145 	bus_space_handle_t	*sc_r_ioh;
146 	bus_space_handle_t	 sc_redist_base;
147 	bus_dma_tag_t		 sc_dmat;
148 	uint16_t		*sc_processor;
149 	int			 sc_cpuremap[MAXCPUS];
150 	int			 sc_nintr;
151 	int			 sc_nlpi;
152 	int			 sc_prio_shift;
153 	int			 sc_pmr_shift;
154 	int			 sc_rk3399_quirk;
155 	struct evcount		 sc_spur;
156 	int			 sc_ncells;
157 	int			 sc_num_redist;
158 	struct agintc_dmamem	*sc_prop;
159 	struct agintc_dmamem	*sc_pend;
160 	struct interrupt_controller sc_ic;
161 	int			 sc_ipi_num[2]; /* id for NOP and DDB ipi */
162 	int			 sc_ipi_reason[MAXCPUS]; /* NOP or DDB caused */
163 	void			*sc_ipi_irq[2]; /* irqhandle for each ipi */
164 };
165 struct agintc_softc *agintc_sc;
166 
167 struct intrhand {
168 	TAILQ_ENTRY(intrhand)	 ih_list;		/* link on intrq list */
169 	int			(*ih_func)(void *);	/* handler */
170 	void			*ih_arg;		/* arg for handler */
171 	int			 ih_ipl;		/* IPL_* */
172 	int			 ih_flags;
173 	int			 ih_irq;		/* IRQ number */
174 	struct evcount		 ih_count;
175 	char			*ih_name;
176 	struct cpu_info		*ih_ci;			/* CPU the IRQ runs on */
177 };
178 
179 struct intrq {
180 	TAILQ_HEAD(, intrhand)	iq_list;	/* handler list */
181 	struct cpu_info		*iq_ci;		/* CPU the IRQ runs on */
182 	int			iq_irq_max;	/* IRQ to mask while handling */
183 	int			iq_irq_min;	/* lowest IRQ when shared */
184 	int			iq_ist;		/* share type */
185 	int			iq_route;
186 };
187 
188 struct agintc_dmamem {
189 	bus_dmamap_t		adm_map;
190 	bus_dma_segment_t	adm_seg;
191 	size_t			adm_size;
192 	caddr_t			adm_kva;
193 };
194 
195 #define AGINTC_DMA_MAP(_adm)	((_adm)->adm_map)
196 #define AGINTC_DMA_LEN(_adm)	((_adm)->adm_size)
197 #define AGINTC_DMA_DVA(_adm)	((_adm)->adm_map->dm_segs[0].ds_addr)
198 #define AGINTC_DMA_KVA(_adm)	((void *)(_adm)->adm_kva)
199 
200 struct agintc_dmamem *agintc_dmamem_alloc(bus_dma_tag_t, bus_size_t,
201 		    bus_size_t);
202 void		agintc_dmamem_free(bus_dma_tag_t, struct agintc_dmamem *);
203 
204 int		agintc_match(struct device *, void *, void *);
205 void		agintc_attach(struct device *, struct device *, void *);
206 void		agintc_cpuinit(void);
207 int		agintc_spllower(int);
208 void		agintc_splx(int);
209 int		agintc_splraise(int);
210 void		agintc_setipl(int);
211 void		agintc_calc_mask(void);
212 void		agintc_calc_irq(struct agintc_softc *sc, int irq);
213 void		*agintc_intr_establish(int, int, int, struct cpu_info *,
214 		    int (*)(void *), void *, char *);
215 void		*agintc_intr_establish_fdt(void *cookie, int *cell, int level,
216 		    struct cpu_info *, int (*func)(void *), void *arg, char *name);
217 void		agintc_intr_disestablish(void *);
218 void		agintc_irq_handler(void *);
219 uint32_t	agintc_iack(void);
220 void		agintc_eoi(uint32_t);
221 void		agintc_set_priority(struct agintc_softc *sc, int, int);
222 void		agintc_intr_enable(struct agintc_softc *, int);
223 void		agintc_intr_disable(struct agintc_softc *, int);
224 void		agintc_intr_config(struct agintc_softc *, int, int);
225 void		agintc_route(struct agintc_softc *, int, int,
226 		    struct cpu_info *);
227 void		agintc_route_irq(void *, int, struct cpu_info *);
228 void		agintc_intr_barrier(void *);
229 void		agintc_wait_rwp(struct agintc_softc *sc);
230 void		agintc_r_wait_rwp(struct agintc_softc *sc);
231 uint32_t	agintc_r_ictlr(void);
232 
233 int		agintc_ipi_ddb(void *v);
234 int		agintc_ipi_nop(void *v);
235 int		agintc_ipi_combined(void *);
236 void		agintc_send_ipi(struct cpu_info *, int);
237 
238 struct cfattach	agintc_ca = {
239 	sizeof (struct agintc_softc), agintc_match, agintc_attach
240 };
241 
242 struct cfdriver agintc_cd = {
243 	NULL, "agintc", DV_DULL
244 };
245 
246 static char *agintc_compatibles[] = {
247 	"arm,gic-v3",
248 	"arm,gic-v4",
249 	NULL
250 };
251 
252 int
253 agintc_match(struct device *parent, void *cfdata, void *aux)
254 {
255 	struct fdt_attach_args *faa = aux;
256 	int i;
257 
258 	for (i = 0; agintc_compatibles[i]; i++)
259 		if (OF_is_compatible(faa->fa_node, agintc_compatibles[i]))
260 			return (1);
261 
262 	return (0);
263 }
264 
265 static void
266 __isb(void)
267 {
268 	__asm volatile("isb");
269 }
270 
271 void
272 agintc_attach(struct device *parent, struct device *self, void *aux)
273 {
274 	struct agintc_softc	*sc = (struct agintc_softc *)self;
275 	struct fdt_attach_args	*faa = aux;
276 	struct cpu_info		*ci;
277 	CPU_INFO_ITERATOR	 cii;
278 	uint32_t		 typer;
279 	uint32_t		 nsacr, oldnsacr;
280 	uint32_t		 pmr, oldpmr;
281 	uint32_t		 ctrl, bits;
282 	uint32_t		 affinity;
283 	int			 i, nbits, nintr;
284 	int			 psw;
285 	int			 offset, nredist;
286 #ifdef MULTIPROCESSOR
287 	int			 nipi, ipiirq[2];
288 #endif
289 
290 	psw = disable_interrupts();
291 	arm_init_smask();
292 
293 	sc->sc_iot = faa->fa_iot;
294 	sc->sc_dmat = faa->fa_dmat;
295 
296 	/* First row: distributor */
297 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
298 	    faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
299 		panic("%s: ICD bus_space_map failed!", __func__);
300 
301 	/* Second row: redistributor */
302 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
303 	    faa->fa_reg[1].size, 0, &sc->sc_redist_base))
304 		panic("%s: ICP bus_space_map failed!", __func__);
305 
306 	typer = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_TYPER);
307 
308 	if (typer & GICD_TYPER_LPIS) {
309 		/* Allocate redistributor tables */
310 		sc->sc_prop = agintc_dmamem_alloc(sc->sc_dmat,
311 		    GICR_PROP_SIZE, GICR_PROP_SIZE);
312 		if (sc->sc_prop == NULL) {
313 			printf(": can't alloc LPI config table\n");
314 			goto unmap;
315 		}
316 		sc->sc_pend = agintc_dmamem_alloc(sc->sc_dmat,
317 		    GICR_PEND_SIZE, GICR_PEND_SIZE);
318 		if (sc->sc_prop == NULL) {
319 			printf(": can't alloc LPI pending table\n");
320 			goto unmap;
321 		}
322 
323 		/* Minimum number of LPIs supported by any implementation. */
324 		sc->sc_nlpi = 8192;
325 	}
326 
327 	/*
328 	 * We are guaranteed to have at least 16 priority levels, so
329 	 * in principle we just want to use the top 4 bits of the
330 	 * (non-secure) priority field.
331 	 */
332 	sc->sc_prio_shift = sc->sc_pmr_shift = 4;
333 
334 	/*
335 	 * If the system supports two security states and SCR_EL3.FIQ
336 	 * is zero, the non-secure shifted view applies.  We detect
337 	 * this by checking whether the number of writable bits
338 	 * matches the number of implemented priority bits.  If that
339 	 * is the case we will need to adjust the priorities that we
340 	 * write into ICC_PMR_EL1 accordingly.
341 	 *
342 	 * On Ampere eMAG it appears as if there are five writable
343 	 * bits when we write 0xff.  But for higher priorities
344 	 * (smaller values) only the top 4 bits stick.  So we use 0xbf
345 	 * instead to determine the number of writable bits.
346 	 */
347 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR);
348 	if ((ctrl & GICD_CTLR_DS) == 0) {
349 		__asm volatile("mrs %x0, "STR(ICC_CTLR_EL1) : "=r"(ctrl));
350 		nbits = ICC_CTLR_EL1_PRIBITS(ctrl) + 1;
351 		__asm volatile("mrs %x0, "STR(ICC_PMR) : "=r"(oldpmr));
352 		__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xbf));
353 		__asm volatile("mrs %x0, "STR(ICC_PMR) : "=r"(pmr));
354 		__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(oldpmr));
355 		if (nbits == 8 - (ffs(pmr) - 1))
356 			sc->sc_pmr_shift--;
357 	}
358 
359 	/*
360 	 * The Rockchip RK3399 is busted.  Its GIC-500 treats all
361 	 * access to its memory mapped registers as "secure".  As a
362 	 * result, several registers don't behave as expected.  For
363 	 * example, the GICD_IPRIORITYRn and GICR_IPRIORITYRn
364 	 * registers expose the full priority range available to
365 	 * secure interrupts.  We need to be aware of this and write
366 	 * an adjusted priority value into these registers.  We also
367 	 * need to be careful not to touch any bits that shouldn't be
368 	 * writable in non-secure mode.
369 	 *
370 	 * We check whether we have secure mode access to these
371 	 * registers by attempting to write to the GICD_NSACR register
372 	 * and check whether its contents actually change.  In that
373 	 * case we need to adjust the priorities we write into
374 	 * GICD_IPRIORITYRn and GICRIPRIORITYRn accordingly.
375 	 */
376 	oldnsacr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32));
377 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32),
378 	    oldnsacr ^ 0xffffffff);
379 	nsacr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32));
380 	if (nsacr != oldnsacr) {
381 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_NSACR(32),
382 		    oldnsacr);
383 		sc->sc_rk3399_quirk = 1;
384 		sc->sc_prio_shift--;
385 		printf(" sec");
386 	}
387 
388 	printf(" shift %d:%d", sc->sc_prio_shift, sc->sc_pmr_shift);
389 
390 	evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
391 
392 	__asm volatile("msr "STR(ICC_SRE_EL1)", %x0" : : "r" (ICC_SRE_EL1_EN));
393 	__isb();
394 
395 	nintr = 32 * (typer & GICD_TYPER_ITLINE_M);
396 	nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
397 	sc->sc_nintr = nintr;
398 
399 	agintc_sc = sc; /* save this for global access */
400 
401 	/* find the redistributors. */
402 	offset = 0;
403 	for (nredist = 0; ; nredist++) {
404 		int32_t sz = (64 * 1024 * 2);
405 		uint64_t typer;
406 
407 		typer = bus_space_read_8(sc->sc_iot, sc->sc_redist_base,
408 		    offset + GICR_TYPER);
409 
410 		if (typer & GICR_TYPER_VLPIS)
411 			sz += (64 * 1024 * 2);
412 
413 #ifdef DEBUG_AGINTC
414 		printf("probing redistributor %d %x\n", nredist, offset);
415 #endif
416 
417 		offset += sz;
418 
419 		if (typer & GICR_TYPER_LAST) {
420 			sc->sc_num_redist = nredist + 1;
421 			break;
422 		}
423 	}
424 
425 	printf(" nirq %d nredist %d", nintr, sc->sc_num_redist);
426 
427 	sc->sc_r_ioh = mallocarray(sc->sc_num_redist,
428 	    sizeof(*sc->sc_r_ioh), M_DEVBUF, M_WAITOK);
429 	sc->sc_processor = mallocarray(sc->sc_num_redist,
430 	    sizeof(*sc->sc_processor), M_DEVBUF, M_WAITOK);
431 
432 	/* submap and configure the redistributors. */
433 	offset = 0;
434 	for (nredist = 0; nredist < sc->sc_num_redist; nredist++) {
435 		int32_t sz = (64 * 1024 * 2);
436 		uint64_t typer;
437 
438 		typer = bus_space_read_8(sc->sc_iot, sc->sc_redist_base,
439 		    offset + GICR_TYPER);
440 
441 		if (typer & GICR_TYPER_VLPIS)
442 			sz += (64 * 1024 * 2);
443 
444 		affinity = bus_space_read_8(sc->sc_iot,
445 		    sc->sc_redist_base, offset + GICR_TYPER) >> 32;
446 		CPU_INFO_FOREACH(cii, ci) {
447 			if (affinity == (((ci->ci_mpidr >> 8) & 0xff000000) |
448 			    (ci->ci_mpidr & 0x00ffffff)))
449 				break;
450 		}
451 		if (ci != NULL)
452 			sc->sc_cpuremap[ci->ci_cpuid] = nredist;
453 #ifdef MULTIPROCESSOR
454 		else
455 			panic("%s: no CPU found for affinity %08x",
456 			    sc->sc_sbus.sc_dev.dv_xname, affinity);
457 #endif
458 
459 		sc->sc_processor[nredist] = bus_space_read_8(sc->sc_iot,
460 		    sc->sc_redist_base, offset + GICR_TYPER) >> 8;
461 
462 		bus_space_subregion(sc->sc_iot, sc->sc_redist_base,
463 		    offset, sz, &sc->sc_r_ioh[nredist]);
464 
465 		if (sc->sc_nlpi > 0) {
466 			bus_space_write_8(sc->sc_iot, sc->sc_redist_base,
467 			    offset + GICR_PROPBASER,
468 			    AGINTC_DMA_DVA(sc->sc_prop) |
469 			    GICR_PROPBASER_ISH | GICR_PROPBASER_IC_NORM_NC |
470 			    fls(LPI_BASE + sc->sc_nlpi - 1) - 1);
471 			bus_space_write_8(sc->sc_iot, sc->sc_redist_base,
472 			    offset + GICR_PENDBASER,
473 			    AGINTC_DMA_DVA(sc->sc_pend) |
474 			    GICR_PENDBASER_ISH | GICR_PENDBASER_IC_NORM_NC |
475 			    GICR_PENDBASER_PTZ);
476 			bus_space_write_4(sc->sc_iot, sc->sc_redist_base,
477 			    offset + GICR_CTLR, GICR_CTLR_ENABLE_LPIS);
478 		}
479 
480 		offset += sz;
481 	}
482 
483 	/* Disable all interrupts, clear all pending */
484 	for (i = 1; i < nintr / 32; i++) {
485 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
486 		    GICD_ICENABLER(i * 32), ~0);
487 	}
488 
489 	for (i = 4; i < nintr; i += 4) {
490 		/* lowest priority ?? */
491 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
492 		    GICD_IPRIORITYR(i), 0xffffffff);
493 	}
494 
495 	for (i = 2; i < nintr / 16; i++) {
496 		/* irq 32 - N */
497 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
498 		    GICD_ICFGR(i * 16), 0);
499 	}
500 
501 	agintc_cpuinit();
502 
503 	sc->sc_handler = mallocarray(nintr,
504 	    sizeof(*sc->sc_handler), M_DEVBUF, M_ZERO | M_WAITOK);
505 	for (i = 0; i < nintr; i++)
506 		TAILQ_INIT(&sc->sc_handler[i].iq_list);
507 	sc->sc_lpi_handler = mallocarray(sc->sc_nlpi,
508 	    sizeof(*sc->sc_lpi_handler), M_DEVBUF, M_ZERO | M_WAITOK);
509 
510 	/* set priority to IPL_HIGH until configure lowers to desired IPL */
511 	agintc_setipl(IPL_HIGH);
512 
513 	/* initialize all interrupts as disabled */
514 	agintc_calc_mask();
515 
516 	/* insert self as interrupt handler */
517 	arm_set_intr_handler(agintc_splraise, agintc_spllower, agintc_splx,
518 	    agintc_setipl, agintc_irq_handler, NULL);
519 
520 	/* enable interrupts */
521 	ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR);
522 	bits = GICD_CTLR_ARE_NS | GICD_CTLR_EnableGrp1A | GICD_CTLR_EnableGrp1;
523 	if (sc->sc_rk3399_quirk) {
524 		bits &= ~GICD_CTLR_EnableGrp1A;
525 		bits <<= 1;
526 	}
527 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR, ctrl | bits);
528 
529 	__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xff));
530 	__asm volatile("msr "STR(ICC_BPR1)", %x0" :: "r"(0));
531 	__asm volatile("msr "STR(ICC_IGRPEN1)", %x0" :: "r"(1));
532 
533 #ifdef MULTIPROCESSOR
534 	/* setup IPI interrupts */
535 
536 	/*
537 	 * Ideally we want two IPI interrupts, one for NOP and one for
538 	 * DDB, however we can survive if only one is available it is
539 	 * possible that most are not available to the non-secure OS.
540 	 */
541 	nipi = 0;
542 	for (i = 0; i < 16; i++) {
543 		int hwcpu = sc->sc_cpuremap[cpu_number()];
544 		int reg, oldreg;
545 
546 		oldreg = bus_space_read_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
547 		    GICR_IPRIORITYR(i));
548 		bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
549 		    GICR_IPRIORITYR(i), oldreg ^ 0x20);
550 
551 		/* if this interrupt is not usable, pri will be unmodified */
552 		reg = bus_space_read_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
553 		    GICR_IPRIORITYR(i));
554 		if (reg == oldreg)
555 			continue;
556 
557 		/* return to original value, will be set when used */
558 		bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
559 		    GICR_IPRIORITYR(i), oldreg);
560 
561 		if (nipi == 0)
562 			printf(" ipi: %d", i);
563 		else
564 			printf(", %d", i);
565 		ipiirq[nipi++] = i;
566 		if (nipi == 2)
567 			break;
568 	}
569 
570 	if (nipi == 0)
571 		panic("no irq available for IPI");
572 
573 	switch (nipi) {
574 	case 1:
575 		sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
576 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
577 		    agintc_ipi_combined, sc, "ipi");
578 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
579 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
580 		break;
581 	case 2:
582 		sc->sc_ipi_irq[0] = agintc_intr_establish(ipiirq[0],
583 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
584 		    agintc_ipi_nop, sc, "ipinop");
585 		sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
586 		sc->sc_ipi_irq[1] = agintc_intr_establish(ipiirq[1],
587 		    IST_EDGE_RISING, IPL_IPI|IPL_MPSAFE, NULL,
588 		    agintc_ipi_ddb, sc, "ipiddb");
589 		sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
590 		break;
591 	default:
592 		panic("nipi unexpected number %d", nipi);
593 	}
594 
595 	intr_send_ipi_func = agintc_send_ipi;
596 #endif
597 
598 	sc->sc_ic.ic_node = faa->fa_node;
599 	sc->sc_ic.ic_cookie = self;
600 	sc->sc_ic.ic_establish = agintc_intr_establish_fdt;
601 	sc->sc_ic.ic_disestablish = agintc_intr_disestablish;
602 	sc->sc_ic.ic_route = agintc_route_irq;
603 	sc->sc_ic.ic_cpu_enable = agintc_cpuinit;
604 	sc->sc_ic.ic_barrier = agintc_intr_barrier;
605 	arm_intr_register_fdt(&sc->sc_ic);
606 
607 	restore_interrupts(psw);
608 
609 	/* Attach ITS. */
610 	simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
611 
612 	return;
613 
614 unmap:
615 	if (sc->sc_r_ioh) {
616 		free(sc->sc_r_ioh, M_DEVBUF,
617 		    sc->sc_num_redist * sizeof(*sc->sc_r_ioh));
618 	}
619 	if (sc->sc_processor) {
620 		free(sc->sc_processor, M_DEVBUF,
621 		     sc->sc_num_redist * sizeof(*sc->sc_processor));
622 	}
623 
624 	if (sc->sc_pend)
625 		agintc_dmamem_free(sc->sc_dmat, sc->sc_pend);
626 	if (sc->sc_prop)
627 		agintc_dmamem_free(sc->sc_dmat, sc->sc_prop);
628 
629 	bus_space_unmap(sc->sc_iot, sc->sc_redist_base, faa->fa_reg[1].size);
630 	bus_space_unmap(sc->sc_iot, sc->sc_d_ioh, faa->fa_reg[0].size);
631 }
632 
633 /* Initialize redistributors on each core. */
634 void
635 agintc_cpuinit(void)
636 {
637 	struct agintc_softc *sc = agintc_sc;
638 	uint32_t waker;
639 	int timeout = 100000;
640 	int hwcpu;
641 	int i;
642 
643 	hwcpu = sc->sc_cpuremap[cpu_number()];
644 	waker = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
645 	    GICR_WAKER);
646 	waker &= ~(GICR_WAKER_PROCESSORSLEEP);
647 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu], GICR_WAKER,
648 	    waker);
649 
650 	do {
651 		waker = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
652 		    GICR_WAKER);
653 	} while (--timeout && (waker & GICR_WAKER_CHILDRENASLEEP));
654 	if (timeout == 0)
655 		printf("%s: waker timed out\n", __func__);
656 
657 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
658 	    GICR_ICENABLE0, ~0);
659 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
660 	    GICR_ICPENDR0, ~0);
661 	bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
662 	    GICR_ICACTIVE0, ~0);
663 	for (i = 0; i < 32; i += 4) {
664 		bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
665 		    GICR_IPRIORITYR(i), ~0);
666 	}
667 
668 	if (sc->sc_ipi_irq[0] != NULL)
669 		agintc_route_irq(sc->sc_ipi_irq[0], IRQ_ENABLE, curcpu());
670 	if (sc->sc_ipi_irq[1] != NULL)
671 		agintc_route_irq(sc->sc_ipi_irq[1], IRQ_ENABLE, curcpu());
672 
673 	__asm volatile("msr "STR(ICC_PMR)", %x0" :: "r"(0xff));
674 	__asm volatile("msr "STR(ICC_BPR1)", %x0" :: "r"(0));
675 	__asm volatile("msr "STR(ICC_IGRPEN1)", %x0" :: "r"(1));
676 	enable_interrupts();
677 }
678 
679 void
680 agintc_set_priority(struct agintc_softc *sc, int irq, int ipl)
681 {
682 	struct cpu_info	*ci = curcpu();
683 	int		 hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
684 	uint32_t	 prival;
685 
686 	prival = ((0xff - ipl) << sc->sc_prio_shift) & 0xff;
687 
688 	if (irq >= SPI_BASE) {
689 		bus_space_write_1(sc->sc_iot, sc->sc_d_ioh,
690 		    GICD_IPRIORITYR(irq), prival);
691 	} else  {
692 		/* only sets local redistributor */
693 		bus_space_write_1(sc->sc_iot, sc->sc_r_ioh[hwcpu],
694 		    GICR_IPRIORITYR(irq), prival);
695 	}
696 }
697 
698 void
699 agintc_setipl(int ipl)
700 {
701 	struct agintc_softc	*sc = agintc_sc;
702 	struct cpu_info		*ci = curcpu();
703 	int			 psw;
704 	uint32_t		 prival;
705 
706 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
707 	psw = disable_interrupts();
708 	ci->ci_cpl = ipl;
709 
710 	prival = ((0xff - ipl) << sc->sc_pmr_shift) & 0xff;
711 	__asm volatile("msr "STR(ICC_PMR)", %x0" : : "r" (prival));
712 	__isb();
713 
714 	restore_interrupts(psw);
715 }
716 
717 void
718 agintc_intr_enable(struct agintc_softc *sc, int irq)
719 {
720 	struct cpu_info	*ci = curcpu();
721 	int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
722 	int bit = 1 << IRQ_TO_REG32BIT(irq);
723 
724 	if (irq >= 32) {
725 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
726 		    GICD_ISENABLER(irq), bit);
727 	} else {
728 		bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
729 		    GICR_ISENABLE0, bit);
730 	}
731 }
732 
733 void
734 agintc_intr_disable(struct agintc_softc *sc, int irq)
735 {
736 	struct cpu_info	*ci = curcpu();
737 	int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
738 
739 	if (irq >= 32) {
740 		bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
741 		    GICD_ICENABLER(irq), 1 << IRQ_TO_REG32BIT(irq));
742 	} else {
743 		bus_space_write_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
744 		    GICR_ICENABLE0, 1 << IRQ_TO_REG32BIT(irq));
745 	}
746 }
747 
748 void
749 agintc_intr_config(struct agintc_softc *sc, int irq, int type)
750 {
751 	uint32_t reg;
752 
753 	/* Don't dare to change SGIs or PPIs (yet) */
754 	if (irq < 32)
755 		return;
756 
757 	reg = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_ICFGR(irq));
758 	reg &= ~GICD_ICFGR_TRIG_MASK(irq);
759 	if (type == IST_EDGE_RISING)
760 		reg |= GICD_ICFGR_TRIG_EDGE(irq);
761 	else
762 		reg |= GICD_ICFGR_TRIG_LEVEL(irq);
763 	bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, GICD_ICFGR(irq), reg);
764 }
765 
766 void
767 agintc_calc_mask(void)
768 {
769 	struct agintc_softc	*sc = agintc_sc;
770 	int			 irq;
771 
772 	for (irq = 0; irq < sc->sc_nintr; irq++)
773 		agintc_calc_irq(sc, irq);
774 }
775 
776 void
777 agintc_calc_irq(struct agintc_softc *sc, int irq)
778 {
779 	struct cpu_info	*ci = sc->sc_handler[irq].iq_ci;
780 	struct intrhand	*ih;
781 	int max = IPL_NONE;
782 	int min = IPL_HIGH;
783 
784 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
785 		if (ih->ih_ipl > max)
786 			max = ih->ih_ipl;
787 
788 		if (ih->ih_ipl < min)
789 			min = ih->ih_ipl;
790 	}
791 
792 	if (max == IPL_NONE)
793 		min = IPL_NONE;
794 
795 	if (sc->sc_handler[irq].iq_irq_max == max &&
796 	    sc->sc_handler[irq].iq_irq_min == min)
797 		return;
798 
799 	sc->sc_handler[irq].iq_irq_max = max;
800 	sc->sc_handler[irq].iq_irq_min = min;
801 
802 #ifdef DEBUG_AGINTC
803 	if (min != IPL_NONE)
804 		printf("irq %d to block at %d %d \n", irq, max, min );
805 #endif
806 	/* Enable interrupts at lower levels, clear -> enable */
807 	/* Set interrupt priority/enable */
808 	if (min != IPL_NONE) {
809 		agintc_set_priority(sc, irq, min);
810 		agintc_route(sc, irq, IRQ_ENABLE, ci);
811 		agintc_intr_enable(sc, irq);
812 	} else {
813 		agintc_intr_disable(sc, irq);
814 		agintc_route(sc, irq, IRQ_DISABLE, ci);
815 	}
816 }
817 
818 void
819 agintc_splx(int new)
820 {
821 	struct cpu_info *ci = curcpu();
822 
823 	if (ci->ci_ipending & arm_smask[new])
824 		arm_do_pending_intr(new);
825 
826 	agintc_setipl(new);
827 }
828 
829 int
830 agintc_spllower(int new)
831 {
832 	struct cpu_info *ci = curcpu();
833 	int old = ci->ci_cpl;
834 
835 	agintc_splx(new);
836 	return (old);
837 }
838 
839 int
840 agintc_splraise(int new)
841 {
842 	struct cpu_info	*ci = curcpu();
843 	int old = ci->ci_cpl;
844 
845 	/*
846 	 * setipl must always be called because there is a race window
847 	 * where the variable is updated before the mask is set
848 	 * an interrupt occurs in that window without the mask always
849 	 * being set, the hardware might not get updated on the next
850 	 * splraise completely messing up spl protection.
851 	 */
852 	if (old > new)
853 		new = old;
854 
855 	agintc_setipl(new);
856 	return (old);
857 }
858 
859 uint32_t
860 agintc_iack(void)
861 {
862 	int irq;
863 
864 	__asm volatile("mrs %x0, "STR(ICC_IAR1) : "=r" (irq));
865 	__asm volatile("dsb sy");
866 	return irq;
867 }
868 
869 void
870 agintc_route_irq(void *v, int enable, struct cpu_info *ci)
871 {
872 	struct agintc_softc	*sc = agintc_sc;
873 	struct intrhand		*ih = v;
874 
875 	if (enable) {
876 		agintc_set_priority(sc, ih->ih_irq,
877 		    sc->sc_handler[ih->ih_irq].iq_irq_min);
878 		agintc_route(sc, ih->ih_irq, IRQ_ENABLE, ci);
879 		agintc_intr_enable(sc, ih->ih_irq);
880 	}
881 }
882 
883 void
884 agintc_route(struct agintc_softc *sc, int irq, int enable, struct cpu_info *ci)
885 {
886 	/* XXX does not yet support 'participating node' */
887 	if (irq >= 32) {
888 #ifdef DEBUG_AGINTC
889 		printf("router %x irq %d val %016llx\n", GICD_IROUTER(irq),
890 		    irq, ci->ci_mpidr & MPIDR_AFF);
891 #endif
892 		bus_space_write_8(sc->sc_iot, sc->sc_d_ioh,
893 		    GICD_IROUTER(irq), ci->ci_mpidr & MPIDR_AFF);
894 	}
895 }
896 
897 void
898 agintc_intr_barrier(void *cookie)
899 {
900 	struct intrhand		*ih = cookie;
901 
902 	sched_barrier(ih->ih_ci);
903 }
904 
905 void
906 agintc_run_handler(struct intrhand *ih, void *frame, int s)
907 {
908 	void *arg;
909 	int handled;
910 
911 #ifdef MULTIPROCESSOR
912 	int need_lock;
913 
914 	if (ih->ih_flags & IPL_MPSAFE)
915 		need_lock = 0;
916 	else
917 		need_lock = s < IPL_SCHED;
918 
919 	if (need_lock)
920 		KERNEL_LOCK();
921 #endif
922 
923 	if (ih->ih_arg != 0)
924 		arg = ih->ih_arg;
925 	else
926 		arg = frame;
927 
928 	enable_interrupts();
929 	handled = ih->ih_func(arg);
930 	disable_interrupts();
931 	if (handled)
932 		ih->ih_count.ec_count++;
933 
934 #ifdef MULTIPROCESSOR
935 	if (need_lock)
936 		KERNEL_UNLOCK();
937 #endif
938 }
939 
940 void
941 agintc_irq_handler(void *frame)
942 {
943 	struct agintc_softc	*sc = agintc_sc;
944 	struct intrhand		*ih;
945 	int			 irq, pri, s;
946 
947 	irq = agintc_iack();
948 
949 #ifdef DEBUG_AGINTC
950 	if (irq != 30)
951 		printf("irq  %d fired\n", irq);
952 	else {
953 		static int cnt = 0;
954 		if ((cnt++ % 100) == 0) {
955 			printf("irq  %d fired * _100\n", irq);
956 #ifdef DDB
957 			db_enter();
958 #endif
959 		}
960 	}
961 #endif
962 
963 	if (irq == 1023) {
964 		sc->sc_spur.ec_count++;
965 		return;
966 	}
967 
968 	if ((irq >= sc->sc_nintr && irq < LPI_BASE) ||
969 	    irq >= LPI_BASE + sc->sc_nlpi) {
970 		return;
971 	}
972 
973 	if (irq >= LPI_BASE) {
974 		ih = sc->sc_lpi_handler[irq - LPI_BASE];
975 		if (ih == NULL)
976 			return;
977 
978 		s = agintc_splraise(ih->ih_ipl);
979 		agintc_run_handler(ih, frame, s);
980 		agintc_eoi(irq);
981 
982 		agintc_splx(s);
983 		return;
984 	}
985 
986 	pri = sc->sc_handler[irq].iq_irq_max;
987 	s = agintc_splraise(pri);
988 	TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
989 		agintc_run_handler(ih, frame, s);
990 	}
991 	agintc_eoi(irq);
992 
993 	agintc_splx(s);
994 }
995 
996 void *
997 agintc_intr_establish_fdt(void *cookie, int *cell, int level,
998     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
999 {
1000 	struct agintc_softc	*sc = agintc_sc;
1001 	int			 irq;
1002 	int			 type;
1003 
1004 	/* 2nd cell contains the interrupt number */
1005 	irq = cell[1];
1006 
1007 	/* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
1008 	if (cell[0] == 0)
1009 		irq += SPI_BASE;
1010 	else if (cell[0] == 1)
1011 		irq += PPI_BASE;
1012 	else
1013 		panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
1014 
1015 	/* SPIs are only active-high level or low-to-high edge */
1016 	if (cell[2] & 0x3)
1017 		type = IST_EDGE_RISING;
1018 	else
1019 		type = IST_LEVEL_HIGH;
1020 
1021 	return agintc_intr_establish(irq, type, level, ci, func, arg, name);
1022 }
1023 
1024 void *
1025 agintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
1026     int (*func)(void *), void *arg, char *name)
1027 {
1028 	struct agintc_softc	*sc = agintc_sc;
1029 	struct intrhand		*ih;
1030 	int			 psw;
1031 
1032 	if (irqno < 0 || (irqno >= sc->sc_nintr && irqno < LPI_BASE) ||
1033 	    irqno >= LPI_BASE + sc->sc_nlpi)
1034 		panic("agintc_intr_establish: bogus irqnumber %d: %s",
1035 		    irqno, name);
1036 
1037 	if (ci == NULL)
1038 		ci = &cpu_info_primary;
1039 
1040 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
1041 	ih->ih_func = func;
1042 	ih->ih_arg = arg;
1043 	ih->ih_ipl = level & IPL_IRQMASK;
1044 	ih->ih_flags = level & IPL_FLAGMASK;
1045 	ih->ih_irq = irqno;
1046 	ih->ih_name = name;
1047 	ih->ih_ci = ci;
1048 
1049 	psw = disable_interrupts();
1050 
1051 	if (irqno < LPI_BASE) {
1052 		if (!TAILQ_EMPTY(&sc->sc_handler[irqno].iq_list) &&
1053 		    sc->sc_handler[irqno].iq_ci != ci) {
1054 			free(ih, M_DEVBUF, sizeof *ih);
1055 			restore_interrupts(psw);
1056 			return NULL;
1057 		}
1058 		TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
1059 		sc->sc_handler[irqno].iq_ci = ci;
1060 	} else
1061 		sc->sc_lpi_handler[irqno - LPI_BASE] = ih;
1062 
1063 	if (name != NULL)
1064 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
1065 
1066 #ifdef DEBUG_AGINTC
1067 	printf("%s: irq %d level %d [%s]\n", __func__, irqno, level, name);
1068 #endif
1069 
1070 	if (irqno < LPI_BASE) {
1071 		agintc_intr_config(sc, irqno, type);
1072 		agintc_calc_irq(sc, irqno);
1073 	} else {
1074 		uint8_t *prop = AGINTC_DMA_KVA(sc->sc_prop);
1075 
1076 		prop[irqno - LPI_BASE] = (((0xff - ih->ih_ipl) << 4) & 0xff) |
1077 		    GICR_PROP_GROUP1 | GICR_PROP_ENABLE;
1078 
1079 		/* Make globally visible. */
1080 		cpu_dcache_wb_range((vaddr_t)prop, 1);
1081 		__asm volatile("dsb sy");
1082 	}
1083 
1084 	restore_interrupts(psw);
1085 	return (ih);
1086 }
1087 
1088 void
1089 agintc_intr_disestablish(void *cookie)
1090 {
1091 	struct agintc_softc	*sc = agintc_sc;
1092 	struct intrhand		*ih = cookie;
1093 	int			 irqno = ih->ih_irq;
1094 	int			 psw;
1095 
1096 	psw = disable_interrupts();
1097 
1098 	TAILQ_REMOVE(&sc->sc_handler[irqno].iq_list, ih, ih_list);
1099 	if (ih->ih_name != NULL)
1100 		evcount_detach(&ih->ih_count);
1101 
1102 	agintc_calc_irq(sc, irqno);
1103 
1104 	restore_interrupts(psw);
1105 
1106 	free(ih, M_DEVBUF, 0);
1107 }
1108 
1109 void
1110 agintc_eoi(uint32_t eoi)
1111 {
1112 	__asm volatile("msr "STR(ICC_EOIR1)", %x0" :: "r" (eoi));
1113 	__isb();
1114 }
1115 
1116 void
1117 agintc_d_wait_rwp(struct agintc_softc *sc)
1118 {
1119 	int count = 100000;
1120 	uint32_t v;
1121 
1122 	do {
1123 		v = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, GICD_CTLR);
1124 	} while (--count && (v & GICD_CTLR_RWP));
1125 
1126 	if (count == 0)
1127 		panic("%s: RWP timed out 0x08%x", __func__, v);
1128 }
1129 
1130 void
1131 agintc_r_wait_rwp(struct agintc_softc *sc)
1132 {
1133 	struct cpu_info *ci = curcpu();
1134 	int hwcpu = sc->sc_cpuremap[ci->ci_cpuid];
1135 	int count = 100000;
1136 	uint32_t v;
1137 
1138 	do {
1139 		v = bus_space_read_4(sc->sc_iot, sc->sc_r_ioh[hwcpu],
1140 		    GICR_CTLR);
1141 	} while (--count && (v & GICR_CTLR_RWP));
1142 
1143 	if (count == 0)
1144 		panic("%s: RWP timed out 0x08%x", __func__, v);
1145 }
1146 
1147 #ifdef MULTIPROCESSOR
1148 int
1149 agintc_ipi_ddb(void *v)
1150 {
1151 	/* XXX */
1152 #ifdef DDB
1153 	db_enter();
1154 #endif
1155 	return 1;
1156 }
1157 
1158 int
1159 agintc_ipi_nop(void *v)
1160 {
1161 	/* Nothing to do here, just enough to wake up from WFI */
1162 	return 1;
1163 }
1164 
1165 int
1166 agintc_ipi_combined(void *v)
1167 {
1168 	struct agintc_softc *sc = v;
1169 
1170 	if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
1171 		sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
1172 		return agintc_ipi_ddb(v);
1173 	} else {
1174 		return agintc_ipi_nop(v);
1175 	}
1176 }
1177 
1178 void
1179 agintc_send_ipi(struct cpu_info *ci, int id)
1180 {
1181 	struct agintc_softc	*sc = agintc_sc;
1182 	uint64_t sendmask;
1183 
1184 	if (ci == curcpu() && id == ARM_IPI_NOP)
1185 		return;
1186 
1187 	/* never overwrite IPI_DDB with IPI_NOP */
1188 	if (id == ARM_IPI_DDB)
1189 		sc->sc_ipi_reason[ci->ci_cpuid] = id;
1190 
1191 	/* will only send 1 cpu */
1192 	sendmask = (ci->ci_mpidr & MPIDR_AFF3) << 16;
1193 	sendmask |= (ci->ci_mpidr & MPIDR_AFF2) << 16;
1194 	sendmask |= (ci->ci_mpidr & MPIDR_AFF1) << 8;
1195 	sendmask |= 1 << (ci->ci_mpidr & 0x0f);
1196 	sendmask |= (sc->sc_ipi_num[id] << 24);
1197 
1198 	__asm volatile ("msr " STR(ICC_SGI1R)", %x0" ::"r"(sendmask));
1199 }
1200 #endif
1201 
1202 /*
1203  * GICv3 ITS controller for MSI interrupts.
1204  */
1205 #define GITS_CTLR		0x0000
1206 #define  GITS_CTLR_ENABLED	(1UL << 0)
1207 #define GITS_TYPER		0x0008
1208 #define  GITS_TYPER_CIL		(1ULL << 36)
1209 #define  GITS_TYPER_HCC(x)	(((x) >> 24) & 0xff)
1210 #define  GITS_TYPER_PTA		(1ULL << 19)
1211 #define  GITS_TYPER_DEVBITS(x)	(((x) >> 13) & 0x1f)
1212 #define  GITS_TYPER_ITE_SZ(x)	(((x) >> 4) & 0xf)
1213 #define  GITS_TYPER_PHYS	(1ULL << 0)
1214 #define GITS_CBASER		0x0080
1215 #define  GITS_CBASER_VALID	(1ULL << 63)
1216 #define  GITS_CBASER_IC_NORM_NC	(1ULL << 59)
1217 #define  GITS_CBASER_MASK	0x1ffffffffff000ULL
1218 #define GITS_CWRITER		0x0088
1219 #define GITS_CREADR		0x0090
1220 #define GITS_BASER(i)		(0x0100 + ((i) * 8))
1221 #define  GITS_BASER_VALID	(1ULL << 63)
1222 #define  GITS_BASER_INDIRECT	(1ULL << 62)
1223 #define  GITS_BASER_IC_NORM_NC	(1ULL << 59)
1224 #define  GITS_BASER_TYPE_MASK	(7ULL << 56)
1225 #define  GITS_BASER_TYPE_DEVICE	(1ULL << 56)
1226 #define  GITS_BASER_DTE_SZ(x)	(((x) >> 48) & 0x1f)
1227 #define  GITS_BASER_PGSZ_MASK	(3ULL << 8)
1228 #define  GITS_BASER_PGSZ_4K	(0ULL << 8)
1229 #define  GITS_BASER_PGSZ_16K	(1ULL << 8)
1230 #define  GITS_BASER_PGSZ_64K	(2ULL << 8)
1231 #define  GITS_BASER_PA_MASK	0x7ffffffff000ULL
1232 #define GITS_TRANSLATER		0x10040
1233 
1234 #define GITS_NUM_BASER		8
1235 
1236 struct gits_cmd {
1237 	uint8_t cmd;
1238 	uint32_t deviceid;
1239 	uint32_t eventid;
1240 	uint32_t intid;
1241 	uint64_t dw2;
1242 	uint64_t dw3;
1243 };
1244 
1245 #define GITS_CMD_VALID		(1ULL << 63)
1246 
1247 /* ITS commands */
1248 #define SYNC	0x05
1249 #define MAPD	0x08
1250 #define MAPC	0x09
1251 #define MAPTI	0x0a
1252 
1253 #define GITS_CMDQ_SIZE		(64 * 1024)
1254 #define GITS_CMDQ_NENTRIES	(GITS_CMDQ_SIZE / sizeof(struct gits_cmd))
1255 
1256 struct agintc_msi_device {
1257 	LIST_ENTRY(agintc_msi_device) md_list;
1258 
1259 	uint32_t		md_deviceid;
1260 	uint32_t		md_eventid;
1261 	struct agintc_dmamem	*md_itt;
1262 };
1263 
1264 int	 agintc_msi_match(struct device *, void *, void *);
1265 void	 agintc_msi_attach(struct device *, struct device *, void *);
1266 void	*agintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
1267 	    int , struct cpu_info *, int (*)(void *), void *, char *);
1268 void	 agintc_intr_disestablish_msi(void *);
1269 void	 agintc_intr_barrier_msi(void *);
1270 
1271 struct agintc_msi_softc {
1272 	struct device			sc_dev;
1273 	bus_space_tag_t			sc_iot;
1274 	bus_space_handle_t		sc_ioh;
1275 	bus_dma_tag_t			sc_dmat;
1276 
1277 	bus_addr_t			sc_msi_addr;
1278 	int				sc_msi_delta;
1279 
1280 	int				sc_nlpi;
1281 	void				**sc_lpi;
1282 
1283 	struct agintc_dmamem		*sc_cmdq;
1284 	uint16_t			sc_cmdidx;
1285 
1286 	int				sc_devbits;
1287 	struct agintc_dmamem		*sc_dtt;
1288 	size_t				sc_dtt_pgsz;
1289 	uint8_t				sc_dte_sz;
1290 	uint8_t				sc_ite_sz;
1291 
1292 	LIST_HEAD(, agintc_msi_device)	sc_msi_devices;
1293 
1294 	struct interrupt_controller	sc_ic;
1295 };
1296 
1297 struct cfattach	agintcmsi_ca = {
1298 	sizeof (struct agintc_msi_softc), agintc_msi_match, agintc_msi_attach
1299 };
1300 
1301 struct cfdriver agintcmsi_cd = {
1302 	NULL, "agintcmsi", DV_DULL
1303 };
1304 
1305 void	agintc_msi_send_cmd(struct agintc_msi_softc *, struct gits_cmd *);
1306 void	agintc_msi_wait_cmd(struct agintc_msi_softc *);
1307 
1308 int
1309 agintc_msi_match(struct device *parent, void *cfdata, void *aux)
1310 {
1311 	struct fdt_attach_args *faa = aux;
1312 
1313 	return OF_is_compatible(faa->fa_node, "arm,gic-v3-its");
1314 }
1315 
1316 void
1317 agintc_msi_attach(struct device *parent, struct device *self, void *aux)
1318 {
1319 	struct agintc_msi_softc *sc = (struct agintc_msi_softc *)self;
1320 	struct fdt_attach_args *faa = aux;
1321 	struct gits_cmd cmd;
1322 	uint32_t pre_its[2];
1323 	uint64_t typer;
1324 	int i, hwcpu;
1325 
1326 	if (faa->fa_nreg < 1) {
1327 		printf(": no registers\n");
1328 		return;
1329 	}
1330 
1331 	sc->sc_iot = faa->fa_iot;
1332 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
1333 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
1334 		printf(": can't map registers\n");
1335 		return;
1336 	}
1337 	sc->sc_dmat = faa->fa_dmat;
1338 
1339 	sc->sc_msi_addr = faa->fa_reg[0].addr + GITS_TRANSLATER;
1340 	if (OF_getpropintarray(faa->fa_node, "socionext,synquacer-pre-its",
1341 	    pre_its, sizeof(pre_its)) == sizeof(pre_its)) {
1342 		sc->sc_msi_addr = pre_its[0];
1343 		sc->sc_msi_delta = 4;
1344 	}
1345 
1346 	typer = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_TYPER);
1347 	if ((typer & GITS_TYPER_PHYS) == 0 || typer & GITS_TYPER_PTA ||
1348 	    GITS_TYPER_HCC(typer) < ncpus || typer & GITS_TYPER_CIL) {
1349 		printf(": unsupported type 0x%016llx\n", typer);
1350 		goto unmap;
1351 	}
1352 	sc->sc_ite_sz = GITS_TYPER_ITE_SZ(typer) + 1;
1353 	sc->sc_devbits = GITS_TYPER_DEVBITS(typer) + 1;
1354 
1355 	sc->sc_nlpi = agintc_sc->sc_nlpi;
1356 	sc->sc_lpi = mallocarray(sc->sc_nlpi, sizeof(void *), M_DEVBUF,
1357 	    M_WAITOK|M_ZERO);
1358 
1359 	/* Set up command queue. */
1360 	sc->sc_cmdq = agintc_dmamem_alloc(sc->sc_dmat,
1361 	    GITS_CMDQ_SIZE, GITS_CMDQ_SIZE);
1362 	if (sc->sc_cmdq == NULL) {
1363 		printf(": can't alloc command queue\n");
1364 		goto unmap;
1365 	}
1366 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_CBASER,
1367 	    AGINTC_DMA_DVA(sc->sc_cmdq) | GITS_CBASER_IC_NORM_NC |
1368 	    (GITS_CMDQ_SIZE / PAGE_SIZE) - 1 | GITS_CBASER_VALID);
1369 
1370 	/* Set up device translation table. */
1371 	for (i = 0; i < GITS_NUM_BASER; i++) {
1372 		uint64_t baser;
1373 		paddr_t dtt_pa;
1374 		size_t size;
1375 
1376 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1377 		if ((baser & GITS_BASER_TYPE_MASK) != GITS_BASER_TYPE_DEVICE)
1378 			continue;
1379 
1380 		/* Determine the maximum supported page size. */
1381 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1382 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_64K);
1383 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1384 		if ((baser & GITS_BASER_PGSZ_MASK) == GITS_BASER_PGSZ_64K)
1385 			goto found;
1386 
1387 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1388 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_16K);
1389 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1390 		if ((baser & GITS_BASER_PGSZ_MASK) == GITS_BASER_PGSZ_16K)
1391 			goto found;
1392 
1393 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1394 		    (baser & ~GITS_BASER_PGSZ_MASK) | GITS_BASER_PGSZ_4K);
1395 		baser = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i));
1396 
1397 	found:
1398 		switch (baser & GITS_BASER_PGSZ_MASK) {
1399 		case GITS_BASER_PGSZ_4K:
1400 			sc->sc_dtt_pgsz = PAGE_SIZE;
1401 			break;
1402 		case GITS_BASER_PGSZ_16K:
1403 			sc->sc_dtt_pgsz = 4 * PAGE_SIZE;
1404 			break;
1405 		case GITS_BASER_PGSZ_64K:
1406 			sc->sc_dtt_pgsz = 16 * PAGE_SIZE;
1407 			break;
1408 		}
1409 
1410 		/* Calculate table size. */
1411 		sc->sc_dte_sz = GITS_BASER_DTE_SZ(baser) + 1;
1412 		size = (1ULL << sc->sc_devbits) * sc->sc_dte_sz;
1413 		size = roundup(size, sc->sc_dtt_pgsz);
1414 
1415 		/* Allocate table. */
1416 		sc->sc_dtt = agintc_dmamem_alloc(sc->sc_dmat,
1417 		    size, sc->sc_dtt_pgsz);
1418 		if (sc->sc_dtt == NULL) {
1419 			printf(": can't alloc translation table\n");
1420 			goto unmap;
1421 		}
1422 
1423 		/* Configure table. */
1424 		dtt_pa = AGINTC_DMA_DVA(sc->sc_dtt);
1425 		KASSERT((dtt_pa & GITS_BASER_PA_MASK) == dtt_pa);
1426 		bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_BASER(i),
1427 		    GITS_BASER_IC_NORM_NC | baser & GITS_BASER_PGSZ_MASK |
1428 		    dtt_pa | (size / sc->sc_dtt_pgsz) - 1 | GITS_BASER_VALID);
1429 	}
1430 
1431 	/* Enable ITS. */
1432 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GITS_CTLR,
1433 	    GITS_CTLR_ENABLED);
1434 
1435 	LIST_INIT(&sc->sc_msi_devices);
1436 
1437 	/* Create one collection per core. */
1438 	KASSERT(ncpus <= agintc_sc->sc_num_redist);
1439 	for (i = 0; i < ncpus; i++) {
1440 		hwcpu = agintc_sc->sc_cpuremap[i];
1441 		memset(&cmd, 0, sizeof(cmd));
1442 		cmd.cmd = MAPC;
1443 		cmd.dw2 = GITS_CMD_VALID |
1444 		    (agintc_sc->sc_processor[hwcpu] << 16) | i;
1445 		agintc_msi_send_cmd(sc, &cmd);
1446 		agintc_msi_wait_cmd(sc);
1447 	}
1448 
1449 	printf("\n");
1450 
1451 	sc->sc_ic.ic_node = faa->fa_node;
1452 	sc->sc_ic.ic_cookie = sc;
1453 	sc->sc_ic.ic_establish_msi = agintc_intr_establish_msi;
1454 	sc->sc_ic.ic_disestablish = agintc_intr_disestablish_msi;
1455 	sc->sc_ic.ic_barrier = agintc_intr_barrier_msi;
1456 	arm_intr_register_fdt(&sc->sc_ic);
1457 	return;
1458 
1459 unmap:
1460 	if (sc->sc_dtt)
1461 		agintc_dmamem_free(sc->sc_dmat, sc->sc_dtt);
1462 	if (sc->sc_cmdq)
1463 		agintc_dmamem_free(sc->sc_dmat, sc->sc_cmdq);
1464 
1465 	if (sc->sc_lpi)
1466 		free(sc->sc_lpi, M_DEVBUF, sc->sc_nlpi * sizeof(void *));
1467 
1468 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
1469 }
1470 
1471 void
1472 agintc_msi_send_cmd(struct agintc_msi_softc *sc, struct gits_cmd *cmd)
1473 {
1474 	struct gits_cmd *queue = AGINTC_DMA_KVA(sc->sc_cmdq);
1475 
1476 	memcpy(&queue[sc->sc_cmdidx], cmd, sizeof(*cmd));
1477 
1478 	/* Make globally visible. */
1479 	cpu_dcache_wb_range((vaddr_t)&queue[sc->sc_cmdidx], sizeof(*cmd));
1480 	__asm volatile("dsb sy");
1481 
1482 	sc->sc_cmdidx++;
1483 	sc->sc_cmdidx %= GITS_CMDQ_NENTRIES;
1484 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, GITS_CWRITER,
1485 	    sc->sc_cmdidx * sizeof(*cmd));
1486 }
1487 
1488 void
1489 agintc_msi_wait_cmd(struct agintc_msi_softc *sc)
1490 {
1491 	uint64_t creadr;
1492 	int timo;
1493 
1494 	for (timo = 1000; timo > 0; timo--) {
1495 		creadr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, GITS_CREADR);
1496 		if (creadr == sc->sc_cmdidx * sizeof(struct gits_cmd))
1497 			break;
1498 		delay(1);
1499 	}
1500 	if (timo == 0)
1501 		printf("%s: command queue timeout\n", sc->sc_dev.dv_xname);
1502 }
1503 
1504 struct agintc_msi_device *
1505 agintc_msi_create_device(struct agintc_msi_softc *sc, uint32_t deviceid)
1506 {
1507 	struct agintc_msi_device *md;
1508 	struct gits_cmd cmd;
1509 
1510 	md = malloc(sizeof(*md), M_DEVBUF, M_ZERO | M_WAITOK);
1511 	md->md_deviceid = deviceid;
1512 	md->md_itt = agintc_dmamem_alloc(sc->sc_dmat,
1513 	    32 * sc->sc_ite_sz, PAGE_SIZE);
1514 	LIST_INSERT_HEAD(&sc->sc_msi_devices, md, md_list);
1515 
1516 	memset(&cmd, 0, sizeof(cmd));
1517 	cmd.cmd = MAPD;
1518 	cmd.deviceid = deviceid;
1519 	cmd.eventid = 4;	/* size */
1520 	cmd.dw2 = AGINTC_DMA_DVA(md->md_itt) | GITS_CMD_VALID;
1521 	agintc_msi_send_cmd(sc, &cmd);
1522 	agintc_msi_wait_cmd(sc);
1523 
1524 	return md;
1525 }
1526 
1527 struct agintc_msi_device *
1528 agintc_msi_find_device(struct agintc_msi_softc *sc, uint32_t deviceid)
1529 {
1530 	struct agintc_msi_device *md;
1531 
1532 	LIST_FOREACH(md, &sc->sc_msi_devices, md_list) {
1533 		if (md->md_deviceid == deviceid)
1534 			return md;
1535 	}
1536 
1537 	return agintc_msi_create_device(sc, deviceid);
1538 }
1539 
1540 void *
1541 agintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
1542     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1543 {
1544 	struct agintc_msi_softc *sc = (struct agintc_msi_softc *)self;
1545 	struct agintc_msi_device *md;
1546 	struct gits_cmd cmd;
1547 	uint32_t deviceid = *data;
1548 	uint32_t eventid;
1549 	void *cookie;
1550 	int i, hwcpu;
1551 
1552 	if (ci == NULL)
1553 		ci = &cpu_info_primary;
1554 	hwcpu = agintc_sc->sc_cpuremap[ci->ci_cpuid];
1555 
1556 	md = agintc_msi_find_device(sc, deviceid);
1557 	if (md == NULL)
1558 		return NULL;
1559 
1560 	eventid = md->md_eventid++;
1561 	if (eventid >= 32)
1562 		return NULL;
1563 
1564 	for (i = 0; i < sc->sc_nlpi; i++) {
1565 		if (sc->sc_lpi[i] != NULL)
1566 			continue;
1567 
1568 		cookie = agintc_intr_establish(LPI_BASE + i,
1569 		    IST_EDGE_RISING, level, ci, func, arg, name);
1570 		if (cookie == NULL)
1571 			return NULL;
1572 
1573 		memset(&cmd, 0, sizeof(cmd));
1574 		cmd.cmd = MAPTI;
1575 		cmd.deviceid = deviceid;
1576 		cmd.eventid = eventid;
1577 		cmd.intid = LPI_BASE + i;
1578 		cmd.dw2 = ci->ci_cpuid;
1579 		agintc_msi_send_cmd(sc, &cmd);
1580 
1581 		memset(&cmd, 0, sizeof(cmd));
1582 		cmd.cmd = SYNC;
1583 		cmd.dw2 = agintc_sc->sc_processor[hwcpu] << 16;
1584 		agintc_msi_send_cmd(sc, &cmd);
1585 		agintc_msi_wait_cmd(sc);
1586 
1587 		*addr = sc->sc_msi_addr + deviceid * sc->sc_msi_delta;
1588 		*data = eventid;
1589 		sc->sc_lpi[i] = cookie;
1590 		return &sc->sc_lpi[i];
1591 	}
1592 
1593 	return NULL;
1594 }
1595 
1596 void
1597 agintc_intr_disestablish_msi(void *cookie)
1598 {
1599 	agintc_intr_disestablish(*(void **)cookie);
1600 	*(void **)cookie = NULL;
1601 }
1602 
1603 void
1604 agintc_intr_barrier_msi(void *cookie)
1605 {
1606 	agintc_intr_barrier(*(void **)cookie);
1607 }
1608 
1609 struct agintc_dmamem *
1610 agintc_dmamem_alloc(bus_dma_tag_t dmat, bus_size_t size, bus_size_t align)
1611 {
1612 	struct agintc_dmamem *adm;
1613 	int nsegs;
1614 
1615 	adm = malloc(sizeof(*adm), M_DEVBUF, M_WAITOK | M_ZERO);
1616 	adm->adm_size = size;
1617 
1618 	if (bus_dmamap_create(dmat, size, 1, size, 0,
1619 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &adm->adm_map) != 0)
1620 		goto admfree;
1621 
1622 	if (bus_dmamem_alloc(dmat, size, align, 0, &adm->adm_seg, 1,
1623 	    &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
1624 		goto destroy;
1625 
1626 	if (bus_dmamem_map(dmat, &adm->adm_seg, nsegs, size,
1627 	    &adm->adm_kva, BUS_DMA_WAITOK | BUS_DMA_NOCACHE) != 0)
1628 		goto free;
1629 
1630 	if (bus_dmamap_load_raw(dmat, adm->adm_map, &adm->adm_seg,
1631 	    nsegs, size, BUS_DMA_WAITOK) != 0)
1632 		goto unmap;
1633 
1634 	/* Make globally visible. */
1635 	cpu_dcache_wb_range((vaddr_t)adm->adm_kva, size);
1636 	__asm volatile("dsb sy");
1637 	return adm;
1638 
1639 unmap:
1640 	bus_dmamem_unmap(dmat, adm->adm_kva, size);
1641 free:
1642 	bus_dmamem_free(dmat, &adm->adm_seg, 1);
1643 destroy:
1644 	bus_dmamap_destroy(dmat, adm->adm_map);
1645 admfree:
1646 	free(adm, M_DEVBUF, sizeof(*adm));
1647 
1648 	return NULL;
1649 }
1650 
1651 void
1652 agintc_dmamem_free(bus_dma_tag_t dmat, struct agintc_dmamem *adm)
1653 {
1654 	bus_dmamem_unmap(dmat, adm->adm_kva, adm->adm_size);
1655 	bus_dmamem_free(dmat, &adm->adm_seg, 1);
1656 	bus_dmamap_destroy(dmat, adm->adm_map);
1657 	free(adm, M_DEVBUF, sizeof(*adm));
1658 }
1659