xref: /freebsd/sys/arm64/arm64/gicv3_its.c (revision d1bdc282)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * Copyright (c) 2023 Arm Ltd
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35 #include "opt_iommu.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/cpuset.h>
41 #include <sys/domainset.h>
42 #include <sys/endian.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
48 #include <sys/physmem.h>
49 #include <sys/proc.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/queue.h>
53 #include <sys/rman.h>
54 #include <sys/sbuf.h>
55 #include <sys/smp.h>
56 #include <sys/sysctl.h>
57 #include <sys/vmem.h>
58 
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_page.h>
62 
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 
66 #include <arm/arm/gic_common.h>
67 #include <arm64/arm64/gic_v3_reg.h>
68 #include <arm64/arm64/gic_v3_var.h>
69 
70 #ifdef FDT
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
74 #endif
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77 
78 #ifdef IOMMU
79 #include <dev/iommu/iommu.h>
80 #include <dev/iommu/iommu_gas.h>
81 #endif
82 
83 #include "pcib_if.h"
84 #include "pic_if.h"
85 #include "msi_if.h"
86 
87 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
88     "ARM GICv3 Interrupt Translation Service");
89 
90 #define	LPI_NIRQS		(64 * 1024)
91 
92 /* The size and alignment of the command circular buffer */
93 #define	ITS_CMDQ_SIZE		(64 * 1024)	/* Must be a multiple of 4K */
94 #define	ITS_CMDQ_ALIGN		(64 * 1024)
95 
96 #define	LPI_CONFTAB_SIZE	LPI_NIRQS
97 #define	LPI_CONFTAB_ALIGN	(64 * 1024)
98 #define	LPI_CONFTAB_MAX_ADDR	((1ul << 48) - 1) /* We need a 47 bit PA */
99 
100 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
101 #define	LPI_PENDTAB_SIZE	((LPI_NIRQS + GIC_FIRST_LPI) / 8)
102 #define	LPI_PENDTAB_ALIGN	(64 * 1024)
103 #define	LPI_PENDTAB_MAX_ADDR	((1ul << 48) - 1) /* We need a 47 bit PA */
104 
105 #define	LPI_INT_TRANS_TAB_ALIGN	256
106 #define	LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
107 
108 /* ITS commands encoding */
109 #define	ITS_CMD_MOVI		(0x01)
110 #define	ITS_CMD_SYNC		(0x05)
111 #define	ITS_CMD_MAPD		(0x08)
112 #define	ITS_CMD_MAPC		(0x09)
113 #define	ITS_CMD_MAPTI		(0x0a)
114 #define	ITS_CMD_MAPI		(0x0b)
115 #define	ITS_CMD_INV		(0x0c)
116 #define	ITS_CMD_INVALL		(0x0d)
117 /* Command */
118 #define	CMD_COMMAND_MASK	(0xFFUL)
119 /* PCI device ID */
120 #define	CMD_DEVID_SHIFT		(32)
121 #define	CMD_DEVID_MASK		(0xFFFFFFFFUL << CMD_DEVID_SHIFT)
122 /* Size of IRQ ID bitfield */
123 #define	CMD_SIZE_MASK		(0xFFUL)
124 /* Virtual LPI ID */
125 #define	CMD_ID_MASK		(0xFFFFFFFFUL)
126 /* Physical LPI ID */
127 #define	CMD_PID_SHIFT		(32)
128 #define	CMD_PID_MASK		(0xFFFFFFFFUL << CMD_PID_SHIFT)
129 /* Collection */
130 #define	CMD_COL_MASK		(0xFFFFUL)
131 /* Target (CPU or Re-Distributor) */
132 #define	CMD_TARGET_SHIFT	(16)
133 #define	CMD_TARGET_MASK		(0xFFFFFFFFUL << CMD_TARGET_SHIFT)
134 /* Interrupt Translation Table address */
135 #define	CMD_ITT_MASK		(0xFFFFFFFFFF00UL)
136 /* Valid command bit */
137 #define	CMD_VALID_SHIFT		(63)
138 #define	CMD_VALID_MASK		(1UL << CMD_VALID_SHIFT)
139 
140 #define	ITS_TARGET_NONE		0xFBADBEEF
141 
142 /* LPI chunk owned by ITS device */
143 struct lpi_chunk {
144 	u_int	lpi_base;
145 	u_int	lpi_free;	/* First free LPI in set */
146 	u_int	lpi_num;	/* Total number of LPIs in chunk */
147 	u_int	lpi_busy;	/* Number of busy LPIs in chink */
148 };
149 
150 /* ITS device */
151 struct its_dev {
152 	TAILQ_ENTRY(its_dev)	entry;
153 	/* PCI device */
154 	device_t		pci_dev;
155 	/* Device ID (i.e. PCI device ID) */
156 	uint32_t		devid;
157 	/* List of assigned LPIs */
158 	struct lpi_chunk	lpis;
159 	/* Virtual address of ITT */
160 	void			*itt;
161 };
162 
163 /*
164  * ITS command descriptor.
165  * Idea for command description passing taken from Linux.
166  */
167 struct its_cmd_desc {
168 	uint8_t cmd_type;
169 
170 	union {
171 		struct {
172 			struct its_dev *its_dev;
173 			struct its_col *col;
174 			uint32_t id;
175 		} cmd_desc_movi;
176 
177 		struct {
178 			struct its_col *col;
179 		} cmd_desc_sync;
180 
181 		struct {
182 			struct its_col *col;
183 			uint8_t valid;
184 		} cmd_desc_mapc;
185 
186 		struct {
187 			struct its_dev *its_dev;
188 			struct its_col *col;
189 			uint32_t pid;
190 			uint32_t id;
191 		} cmd_desc_mapvi;
192 
193 		struct {
194 			struct its_dev *its_dev;
195 			struct its_col *col;
196 			uint32_t pid;
197 		} cmd_desc_mapi;
198 
199 		struct {
200 			struct its_dev *its_dev;
201 			uint8_t valid;
202 		} cmd_desc_mapd;
203 
204 		struct {
205 			struct its_dev *its_dev;
206 			struct its_col *col;
207 			uint32_t pid;
208 		} cmd_desc_inv;
209 
210 		struct {
211 			struct its_col *col;
212 		} cmd_desc_invall;
213 	};
214 };
215 
216 /* ITS command. Each command is 32 bytes long */
217 struct its_cmd {
218 	uint64_t	cmd_dword[4];	/* ITS command double word */
219 };
220 
221 /* An ITS private table */
222 struct its_ptable {
223 	void		*ptab_vaddr;
224 	/* Size of the L1 and L2 tables */
225 	size_t		ptab_l1_size;
226 	size_t		ptab_l2_size;
227 	/* Number of L1 and L2 entries */
228 	int		ptab_l1_nidents;
229 	int		ptab_l2_nidents;
230 
231 	int		ptab_page_size;
232 	int		ptab_share;
233 	bool		ptab_indirect;
234 };
235 
236 /* ITS collection description. */
237 struct its_col {
238 	uint64_t	col_target;	/* Target Re-Distributor */
239 	uint64_t	col_id;		/* Collection ID */
240 };
241 
242 struct gicv3_its_irqsrc {
243 	struct intr_irqsrc	gi_isrc;
244 	u_int			gi_id;
245 	u_int			gi_lpi;
246 	struct its_dev		*gi_its_dev;
247 	TAILQ_ENTRY(gicv3_its_irqsrc) gi_link;
248 };
249 
250 struct gicv3_its_softc {
251 	device_t	dev;
252 	struct intr_pic *sc_pic;
253 	struct resource *sc_its_res;
254 
255 	cpuset_t	sc_cpus;
256 	struct domainset *sc_ds;
257 	u_int		gic_irq_cpu;
258 	int		sc_devbits;
259 	int		sc_dev_table_idx;
260 
261 	struct its_ptable sc_its_ptab[GITS_BASER_NUM];
262 	struct its_col *sc_its_cols[MAXCPU];	/* Per-CPU collections */
263 
264 	/*
265 	 * TODO: We should get these from the parent as we only want a
266 	 * single copy of each across the interrupt controller.
267 	 */
268 	uint8_t		*sc_conf_base;
269 	void		*sc_pend_base[MAXCPU];
270 
271 	/* Command handling */
272 	struct mtx sc_its_cmd_lock;
273 	struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
274 	size_t sc_its_cmd_next_idx;
275 
276 	vmem_t *sc_irq_alloc;
277 	struct gicv3_its_irqsrc	**sc_irqs;
278 	u_int	sc_irq_base;
279 	u_int	sc_irq_length;
280 	u_int	sc_irq_count;
281 
282 	struct mtx sc_its_dev_lock;
283 	TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
284 	TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs;
285 
286 #define	ITS_FLAGS_CMDQ_FLUSH		0x00000001
287 #define	ITS_FLAGS_LPI_CONF_FLUSH	0x00000002
288 #define	ITS_FLAGS_ERRATA_CAVIUM_22375	0x00000004
289 #define	ITS_FLAGS_LPI_PREALLOC		0x00000008
290 	u_int sc_its_flags;
291 	bool	trace_enable;
292 	vm_page_t ma; /* fake msi page */
293 };
294 
295 typedef void (its_quirk_func_t)(device_t);
296 static its_quirk_func_t its_quirk_cavium_22375;
297 
298 static const struct {
299 	const char *desc;
300 	uint32_t iidr;
301 	uint32_t iidr_mask;
302 	its_quirk_func_t *func;
303 } its_quirks[] = {
304 	{
305 		/* Cavium ThunderX Pass 1.x */
306 		.desc = "Cavium ThunderX errata: 22375, 24313",
307 		.iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
308 		    GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
309 		.iidr_mask = ~GITS_IIDR_REVISION_MASK,
310 		.func = its_quirk_cavium_22375,
311 	},
312 };
313 
314 #define	gic_its_read_4(sc, reg)			\
315     bus_read_4((sc)->sc_its_res, (reg))
316 #define	gic_its_read_8(sc, reg)			\
317     bus_read_8((sc)->sc_its_res, (reg))
318 
319 #define	gic_its_write_4(sc, reg, val)		\
320     bus_write_4((sc)->sc_its_res, (reg), (val))
321 #define	gic_its_write_8(sc, reg, val)		\
322     bus_write_8((sc)->sc_its_res, (reg), (val))
323 
324 static device_attach_t gicv3_its_attach;
325 static device_detach_t gicv3_its_detach;
326 
327 static pic_disable_intr_t gicv3_its_disable_intr;
328 static pic_enable_intr_t gicv3_its_enable_intr;
329 static pic_map_intr_t gicv3_its_map_intr;
330 static pic_setup_intr_t gicv3_its_setup_intr;
331 static pic_post_filter_t gicv3_its_post_filter;
332 static pic_post_ithread_t gicv3_its_post_ithread;
333 static pic_pre_ithread_t gicv3_its_pre_ithread;
334 static pic_bind_intr_t gicv3_its_bind_intr;
335 #ifdef SMP
336 static pic_init_secondary_t gicv3_its_init_secondary;
337 #endif
338 static msi_alloc_msi_t gicv3_its_alloc_msi;
339 static msi_release_msi_t gicv3_its_release_msi;
340 static msi_alloc_msix_t gicv3_its_alloc_msix;
341 static msi_release_msix_t gicv3_its_release_msix;
342 static msi_map_msi_t gicv3_its_map_msi;
343 #ifdef IOMMU
344 static msi_iommu_init_t gicv3_iommu_init;
345 static msi_iommu_deinit_t gicv3_iommu_deinit;
346 #endif
347 
348 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
349 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
350 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
351 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
352 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
353 static void its_cmd_invall(device_t, struct its_col *);
354 
355 static device_method_t gicv3_its_methods[] = {
356 	/* Device interface */
357 	DEVMETHOD(device_detach,	gicv3_its_detach),
358 
359 	/* Interrupt controller interface */
360 	DEVMETHOD(pic_disable_intr,	gicv3_its_disable_intr),
361 	DEVMETHOD(pic_enable_intr,	gicv3_its_enable_intr),
362 	DEVMETHOD(pic_map_intr,		gicv3_its_map_intr),
363 	DEVMETHOD(pic_setup_intr,	gicv3_its_setup_intr),
364 	DEVMETHOD(pic_post_filter,	gicv3_its_post_filter),
365 	DEVMETHOD(pic_post_ithread,	gicv3_its_post_ithread),
366 	DEVMETHOD(pic_pre_ithread,	gicv3_its_pre_ithread),
367 #ifdef SMP
368 	DEVMETHOD(pic_bind_intr,	gicv3_its_bind_intr),
369 	DEVMETHOD(pic_init_secondary,	gicv3_its_init_secondary),
370 #endif
371 
372 	/* MSI/MSI-X */
373 	DEVMETHOD(msi_alloc_msi,	gicv3_its_alloc_msi),
374 	DEVMETHOD(msi_release_msi,	gicv3_its_release_msi),
375 	DEVMETHOD(msi_alloc_msix,	gicv3_its_alloc_msix),
376 	DEVMETHOD(msi_release_msix,	gicv3_its_release_msix),
377 	DEVMETHOD(msi_map_msi,		gicv3_its_map_msi),
378 #ifdef IOMMU
379 	DEVMETHOD(msi_iommu_init,	gicv3_iommu_init),
380 	DEVMETHOD(msi_iommu_deinit,	gicv3_iommu_deinit),
381 #endif
382 
383 	/* End */
384 	DEVMETHOD_END
385 };
386 
387 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
388     sizeof(struct gicv3_its_softc));
389 
390 static void
gicv3_its_cmdq_init(struct gicv3_its_softc * sc)391 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
392 {
393 	vm_paddr_t cmd_paddr;
394 	uint64_t reg, tmp;
395 
396 	/* Set up the command circular buffer */
397 	sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS,
398 	    sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN,
399 	    0);
400 	sc->sc_its_cmd_next_idx = 0;
401 
402 	cmd_paddr = vtophys(sc->sc_its_cmd_base);
403 
404 	/* Set the base of the command buffer */
405 	reg = GITS_CBASER_VALID |
406 	    (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
407 	    cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
408 	    (ITS_CMDQ_SIZE / 4096 - 1);
409 	gic_its_write_8(sc, GITS_CBASER, reg);
410 
411 	/* Read back to check for fixed value fields */
412 	tmp = gic_its_read_8(sc, GITS_CBASER);
413 
414 	if ((tmp & GITS_CBASER_SHARE_MASK) !=
415 	    (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
416 		/* Check if the hardware reported non-shareable */
417 		if ((tmp & GITS_CBASER_SHARE_MASK) ==
418 		    (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
419 			/* If so remove the cache attribute */
420 			reg &= ~GITS_CBASER_CACHE_MASK;
421 			reg &= ~GITS_CBASER_SHARE_MASK;
422 			/* Set to Non-cacheable, Non-shareable */
423 			reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
424 			reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
425 
426 			gic_its_write_8(sc, GITS_CBASER, reg);
427 		}
428 
429 		/* The command queue has to be flushed after each command */
430 		sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
431 	}
432 
433 	/* Get the next command from the start of the buffer */
434 	gic_its_write_8(sc, GITS_CWRITER, 0x0);
435 }
436 
437 static int
gicv3_its_table_page_size(struct gicv3_its_softc * sc,int table)438 gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table)
439 {
440 	uint64_t reg, tmp;
441 	int page_size;
442 
443 	page_size = PAGE_SIZE_64K;
444 	reg = gic_its_read_8(sc, GITS_BASER(table));
445 
446 	while (1) {
447 		reg &= GITS_BASER_PSZ_MASK;
448 		switch (page_size) {
449 		case PAGE_SIZE_4K:	/* 4KB */
450 			reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
451 			break;
452 		case PAGE_SIZE_16K:	/* 16KB */
453 			reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
454 			break;
455 		case PAGE_SIZE_64K:	/* 64KB */
456 			reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
457 			break;
458 		}
459 
460 		/* Write the new page size */
461 		gic_its_write_8(sc, GITS_BASER(table), reg);
462 
463 		/* Read back to check */
464 		tmp = gic_its_read_8(sc, GITS_BASER(table));
465 
466 		/* The page size is correct */
467 		if ((tmp & GITS_BASER_PSZ_MASK) == (reg & GITS_BASER_PSZ_MASK))
468 			return (page_size);
469 
470 		switch (page_size) {
471 		default:
472 			return (-1);
473 		case PAGE_SIZE_16K:
474 			page_size = PAGE_SIZE_4K;
475 			break;
476 		case PAGE_SIZE_64K:
477 			page_size = PAGE_SIZE_16K;
478 			break;
479 		}
480 	}
481 }
482 
483 static bool
gicv3_its_table_supports_indirect(struct gicv3_its_softc * sc,int table)484 gicv3_its_table_supports_indirect(struct gicv3_its_softc *sc, int table)
485 {
486 	uint64_t reg;
487 
488 	reg = gic_its_read_8(sc, GITS_BASER(table));
489 
490 	/* Try setting the indirect flag */
491 	reg |= GITS_BASER_INDIRECT;
492 	gic_its_write_8(sc, GITS_BASER(table), reg);
493 
494 	/* Read back to check */
495 	reg = gic_its_read_8(sc, GITS_BASER(table));
496 	return ((reg & GITS_BASER_INDIRECT) != 0);
497 }
498 
499 
500 static int
gicv3_its_table_init(device_t dev,struct gicv3_its_softc * sc)501 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
502 {
503 	void *table;
504 	vm_paddr_t paddr;
505 	uint64_t cache, reg, share, tmp, type;
506 	size_t its_tbl_size, nitspages, npages;
507 	size_t l1_esize, l2_esize, l1_nidents, l2_nidents;
508 	int i, page_size;
509 	int devbits;
510 	bool indirect;
511 
512 	if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
513 		/*
514 		 * GITS_TYPER[17:13] of ThunderX reports that device IDs
515 		 * are to be 21 bits in length. The entry size of the ITS
516 		 * table can be read from GITS_BASERn[52:48] and on ThunderX
517 		 * is supposed to be 8 bytes in length (for device table).
518 		 * Finally the page size that is to be used by ITS to access
519 		 * this table will be set to 64KB.
520 		 *
521 		 * This gives 0x200000 entries of size 0x8 bytes covered by
522 		 * 256 pages each of which 64KB in size. The number of pages
523 		 * (minus 1) should then be written to GITS_BASERn[7:0]. In
524 		 * that case this value would be 0xFF but on ThunderX the
525 		 * maximum value that HW accepts is 0xFD.
526 		 *
527 		 * Set an arbitrary number of device ID bits to 20 in order
528 		 * to limit the number of entries in ITS device table to
529 		 * 0x100000 and the table size to 8MB.
530 		 */
531 		devbits = 20;
532 		cache = 0;
533 	} else {
534 		devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
535 		cache = GITS_BASER_CACHE_WAWB;
536 	}
537 	sc->sc_devbits = devbits;
538 	share = GITS_BASER_SHARE_IS;
539 
540 	for (i = 0; i < GITS_BASER_NUM; i++) {
541 		reg = gic_its_read_8(sc, GITS_BASER(i));
542 		/* The type of table */
543 		type = GITS_BASER_TYPE(reg);
544 		if (type == GITS_BASER_TYPE_UNIMPL)
545 			continue;
546 
547 		/* The table entry size */
548 		l1_esize = GITS_BASER_ESIZE(reg);
549 
550 		/* Find the tables page size */
551 		page_size = gicv3_its_table_page_size(sc, i);
552 		if (page_size == -1) {
553 			device_printf(dev, "No valid page size for table %d\n",
554 			    i);
555 			return (EINVAL);
556 		}
557 
558 		indirect = false;
559 		l2_nidents = 0;
560 		l2_esize = 0;
561 		switch(type) {
562 		case GITS_BASER_TYPE_DEV:
563 			if (sc->sc_dev_table_idx != -1)
564 				device_printf(dev,
565 				    "Warning: Multiple device tables found\n");
566 
567 			sc->sc_dev_table_idx = i;
568 			l1_nidents = (1 << devbits);
569 			if ((l1_esize * l1_nidents) > (page_size * 2)) {
570 				indirect =
571 				    gicv3_its_table_supports_indirect(sc, i);
572 				if (indirect) {
573 					/*
574 					 * Each l1 entry is 8 bytes and points
575 					 * to an l2 table of size page_size.
576 					 * Calculate how many entries this is
577 					 * and use this to find how many
578 					 * 8 byte l1 idents we need.
579 					 */
580 					l2_esize = l1_esize;
581 					l2_nidents = page_size / l2_esize;
582 					l1_nidents = l1_nidents / l2_nidents;
583 					l1_esize = GITS_INDIRECT_L1_ESIZE;
584 				}
585 			}
586 			its_tbl_size = l1_esize * l1_nidents;
587 			its_tbl_size = roundup2(its_tbl_size, page_size);
588 			break;
589 		case GITS_BASER_TYPE_VP:
590 		case GITS_BASER_TYPE_PP: /* Undocumented? */
591 		case GITS_BASER_TYPE_IC:
592 			its_tbl_size = page_size;
593 			break;
594 		default:
595 			if (bootverbose)
596 				device_printf(dev, "Unhandled table type %lx\n",
597 				    type);
598 			continue;
599 		}
600 		npages = howmany(its_tbl_size, PAGE_SIZE);
601 
602 		/* Allocate the table */
603 		table = contigmalloc_domainset(npages * PAGE_SIZE,
604 		    M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0,
605 		    (1ul << 48) - 1, PAGE_SIZE_64K, 0);
606 
607 		sc->sc_its_ptab[i].ptab_vaddr = table;
608 		sc->sc_its_ptab[i].ptab_l1_size = its_tbl_size;
609 		sc->sc_its_ptab[i].ptab_l1_nidents = l1_nidents;
610 		sc->sc_its_ptab[i].ptab_l2_size = page_size;
611 		sc->sc_its_ptab[i].ptab_l2_nidents = l2_nidents;
612 
613 		sc->sc_its_ptab[i].ptab_indirect = indirect;
614 		sc->sc_its_ptab[i].ptab_page_size = page_size;
615 
616 		paddr = vtophys(table);
617 
618 		while (1) {
619 			nitspages = howmany(its_tbl_size, page_size);
620 
621 			/* Clear the fields we will be setting */
622 			reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT |
623 			    GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
624 			    GITS_BASER_PA_MASK |
625 			    GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
626 			    GITS_BASER_SIZE_MASK);
627 			/* Set the new values */
628 			reg |= GITS_BASER_VALID |
629 			    (indirect ? GITS_BASER_INDIRECT : 0) |
630 			    (cache << GITS_BASER_CACHE_SHIFT) |
631 			    (type << GITS_BASER_TYPE_SHIFT) |
632 			    paddr | (share << GITS_BASER_SHARE_SHIFT) |
633 			    (nitspages - 1);
634 
635 			switch (page_size) {
636 			case PAGE_SIZE_4K:	/* 4KB */
637 				reg |=
638 				    GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
639 				break;
640 			case PAGE_SIZE_16K:	/* 16KB */
641 				reg |=
642 				    GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
643 				break;
644 			case PAGE_SIZE_64K:	/* 64KB */
645 				reg |=
646 				    GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
647 				break;
648 			}
649 
650 			gic_its_write_8(sc, GITS_BASER(i), reg);
651 
652 			/* Read back to check */
653 			tmp = gic_its_read_8(sc, GITS_BASER(i));
654 
655 			/* Do the shareability masks line up? */
656 			if ((tmp & GITS_BASER_SHARE_MASK) !=
657 			    (reg & GITS_BASER_SHARE_MASK)) {
658 				share = (tmp & GITS_BASER_SHARE_MASK) >>
659 				    GITS_BASER_SHARE_SHIFT;
660 				continue;
661 			}
662 
663 			if (tmp != reg) {
664 				device_printf(dev, "GITS_BASER%d: "
665 				    "unable to be updated: %lx != %lx\n",
666 				    i, reg, tmp);
667 				return (ENXIO);
668 			}
669 
670 			sc->sc_its_ptab[i].ptab_share = share;
671 			/* We should have made all needed changes */
672 			break;
673 		}
674 	}
675 
676 	return (0);
677 }
678 
679 static void
gicv3_its_conftable_init(struct gicv3_its_softc * sc)680 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
681 {
682 	/* note: we assume the ITS children are serialized by the parent */
683 	static void *conf_table;
684 	int extra_flags = 0;
685 	device_t gicv3;
686 	uint32_t ctlr;
687 	vm_paddr_t conf_pa;
688 	vm_offset_t conf_va;
689 
690 	/*
691 	 * The PROPBASER is a singleton in our parent. We only set it up the
692 	 * first time through. conf_table is effectively global to all the units
693 	 * and we rely on subr_bus to serialize probe/attach.
694 	 */
695 	if (conf_table != NULL) {
696 		sc->sc_conf_base = conf_table;
697 		return;
698 	}
699 
700 	gicv3 = device_get_parent(sc->dev);
701 	ctlr = gic_r_read_4(gicv3, GICR_CTLR);
702 	if ((ctlr & GICR_CTLR_LPI_ENABLE) != 0) {
703 		conf_pa = gic_r_read_8(gicv3, GICR_PROPBASER);
704 		conf_pa &= GICR_PROPBASER_PA_MASK;
705 		/*
706 		 * If there was a pre-existing PROPBASER, then we need to honor
707 		 * it because implementation defined behavior in gicv3 makes it
708 		 * impossible to quiesce to change it out. We will only see a
709 		 * pre-existing one when we've been kexec'd from a Linux kernel,
710 		 * or from a LinuxBoot environment.
711 		 *
712 		 * Linux provides us with a MEMRESERVE table that we put into
713 		 * the excluded physmem area. If PROPBASER isn't in this tabke,
714 		 * the system cannot run due to random memory corruption,
715 		 * so we panic for this case.
716 		 */
717 		if (!physmem_excluded(conf_pa, LPI_CONFTAB_SIZE))
718 			panic("gicv3 PROPBASER needs to reuse %#lx, but not reserved",
719 			    conf_pa);
720 		conf_va = PHYS_TO_DMAP(conf_pa);
721 		if (!pmap_klookup(conf_va, NULL))
722 			panic("Cannot map prior LPI mapping into KVA");
723 		conf_table = (void *)conf_va;
724 		extra_flags = ITS_FLAGS_LPI_PREALLOC | ITS_FLAGS_LPI_CONF_FLUSH;
725 		if (bootverbose)
726 			device_printf(sc->dev,
727 			    "LPI enabled, conf table using pa %#lx va %lx\n",
728 			    conf_pa, conf_va);
729 	} else {
730 		/*
731 		 * Otherwise just allocate contiguous pages. We'll configure the
732 		 * PROPBASER register later in its_init_cpu_lpi().
733 		 */
734 		conf_table = contigmalloc(LPI_CONFTAB_SIZE,
735 		    M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
736 		    LPI_CONFTAB_ALIGN, 0);
737 	}
738 	sc->sc_conf_base = conf_table;
739 	sc->sc_its_flags |= extra_flags;
740 
741 	/* Set the default configuration */
742 	memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
743 	    LPI_CONFTAB_SIZE);
744 
745 	/* Flush the table to memory */
746 	cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE);
747 }
748 
749 static void
gicv3_its_pendtables_init(struct gicv3_its_softc * sc)750 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
751 {
752 
753 	if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) {
754 		for (int i = 0; i <= mp_maxid; i++) {
755 			if (CPU_ISSET(i, &sc->sc_cpus) == 0)
756 				continue;
757 
758 			sc->sc_pend_base[i] = contigmalloc(
759 			    LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
760 			    0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
761 
762 			/* Flush so the ITS can see the memory */
763 			cpu_dcache_wb_range(sc->sc_pend_base[i],
764 			    LPI_PENDTAB_SIZE);
765 		}
766 	}
767 }
768 
769 static void
its_init_cpu_lpi(device_t dev,struct gicv3_its_softc * sc)770 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
771 {
772 	device_t gicv3;
773 	uint64_t xbaser, tmp, size;
774 	uint32_t ctlr;
775 	u_int cpuid;
776 
777 	gicv3 = device_get_parent(dev);
778 	cpuid = PCPU_GET(cpuid);
779 
780 	/*
781 	 * Set the redistributor base. If we're reusing what we found on boot
782 	 * since the gic was already running, then don't touch it here. We also
783 	 * don't need to disable / enable LPI if we're not changing PROPBASER,
784 	 * so only do that if we're not prealloced.
785 	 */
786 	if ((sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) == 0) {
787 		/* Disable LPIs */
788 		ctlr = gic_r_read_4(gicv3, GICR_CTLR);
789 		ctlr &= ~GICR_CTLR_LPI_ENABLE;
790 		gic_r_write_4(gicv3, GICR_CTLR, ctlr);
791 
792 		/* Make sure changes are observable my the GIC */
793 		dsb(sy);
794 
795 		size = (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
796 
797 		xbaser = vtophys(sc->sc_conf_base) |
798 		    (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
799 		    (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
800 		    size;
801 
802 		gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
803 
804 		/* Check the cache attributes we set */
805 		tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
806 
807 		if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
808 		    (xbaser & GICR_PROPBASER_SHARE_MASK)) {
809 			if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
810 			    (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
811 				/* We need to mark as non-cacheable */
812 				xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
813 				    GICR_PROPBASER_CACHE_MASK);
814 				/* Non-cacheable */
815 				xbaser |= GICR_PROPBASER_CACHE_NIN <<
816 				    GICR_PROPBASER_CACHE_SHIFT;
817 				/* Non-shareable */
818 				xbaser |= GICR_PROPBASER_SHARE_NS <<
819 				    GICR_PROPBASER_SHARE_SHIFT;
820 				gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
821 			}
822 			sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
823 		}
824 
825 		/*
826 		 * Set the LPI pending table base
827 		 */
828 		xbaser = vtophys(sc->sc_pend_base[cpuid]) |
829 		    (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
830 		    (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
831 
832 		gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
833 
834 		tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
835 
836 		if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
837 		    (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
838 			/* Clear the cahce and shareability bits */
839 			xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
840 			    GICR_PENDBASER_SHARE_MASK);
841 			/* Mark as non-shareable */
842 			xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
843 			/* And non-cacheable */
844 			xbaser |= GICR_PENDBASER_CACHE_NIN <<
845 			    GICR_PENDBASER_CACHE_SHIFT;
846 		}
847 
848 		/* Enable LPIs */
849 		ctlr = gic_r_read_4(gicv3, GICR_CTLR);
850 		ctlr |= GICR_CTLR_LPI_ENABLE;
851 		gic_r_write_4(gicv3, GICR_CTLR, ctlr);
852 
853 		/* Make sure the GIC has seen everything */
854 		dsb(sy);
855 	} else {
856 		KASSERT(sc->sc_pend_base[cpuid] == NULL,
857 		    ("PREALLOC too soon cpuid %d", cpuid));
858 		tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
859 		tmp &= GICR_PENDBASER_PA_MASK;
860 		if (!physmem_excluded(tmp, LPI_PENDTAB_SIZE))
861 			panic("gicv3 PENDBASER on cpu %d needs to reuse 0x%#lx, but not reserved\n",
862 			    cpuid, tmp);
863 		sc->sc_pend_base[cpuid] = (void *)PHYS_TO_DMAP(tmp);
864 	}
865 
866 
867 	if (bootverbose)
868 		device_printf(gicv3, "using %sPENDBASE of %#lx on cpu %d\n",
869 		    (sc->sc_its_flags & ITS_FLAGS_LPI_PREALLOC) ? "pre-existing " : "",
870 		    vtophys(sc->sc_pend_base[cpuid]), cpuid);
871 }
872 
873 static int
its_init_cpu(device_t dev,struct gicv3_its_softc * sc)874 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
875 {
876 	device_t gicv3;
877 	vm_paddr_t target;
878 	u_int cpuid;
879 	struct redist_pcpu *rpcpu;
880 
881 	gicv3 = device_get_parent(dev);
882 	cpuid = PCPU_GET(cpuid);
883 	if (!CPU_ISSET(cpuid, &sc->sc_cpus))
884 		return (0);
885 
886 	/* Check if the ITS is enabled on this CPU */
887 	if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
888 		return (ENXIO);
889 
890 	rpcpu = gicv3_get_redist(dev);
891 
892 	/* Do per-cpu LPI init once */
893 	if (!rpcpu->lpi_enabled) {
894 		its_init_cpu_lpi(dev, sc);
895 		rpcpu->lpi_enabled = true;
896 	}
897 
898 	if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
899 		/* This ITS wants the redistributor physical address */
900 		target = vtophys((vm_offset_t)rman_get_virtual(rpcpu->res) +
901 		    rpcpu->offset);
902 	} else {
903 		/* This ITS wants the unique processor number */
904 		target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
905 		    CMD_TARGET_SHIFT;
906 	}
907 
908 	sc->sc_its_cols[cpuid]->col_target = target;
909 	sc->sc_its_cols[cpuid]->col_id = cpuid;
910 
911 	its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
912 	its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
913 
914 	return (0);
915 }
916 
917 static int
gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)918 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)
919 {
920 	struct gicv3_its_softc *sc;
921 	int rv;
922 
923 	sc = arg1;
924 
925 	rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req);
926 	if (rv != 0 || req->newptr == NULL)
927 		return (rv);
928 	if (sc->trace_enable)
929 		gic_its_write_8(sc, GITS_TRKCTLR, 3);
930 	else
931 		gic_its_write_8(sc, GITS_TRKCTLR, 0);
932 
933 	return (0);
934 }
935 
936 static int
gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)937 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)
938 {
939 	struct gicv3_its_softc *sc;
940 	struct sbuf *sb;
941 	int err;
942 
943 	sc = arg1;
944 	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
945 	if (sb == NULL) {
946 		device_printf(sc->dev, "Could not allocate sbuf for output.\n");
947 		return (ENOMEM);
948 	}
949 	sbuf_cat(sb, "\n");
950 	sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n",
951 	    gic_its_read_4(sc, GITS_TRKCTLR));
952 	sbuf_printf(sb, "GITS_TRKR:    0x%08X\n",
953 	    gic_its_read_4(sc, GITS_TRKR));
954 	sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n",
955 	    gic_its_read_4(sc, GITS_TRKDIDR));
956 	sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n",
957 	    gic_its_read_4(sc, GITS_TRKPIDR));
958 	sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n",
959 	    gic_its_read_4(sc, GITS_TRKVIDR));
960 	sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n",
961 	   gic_its_read_4(sc, GITS_TRKTGTR));
962 
963 	err = sbuf_finish(sb);
964 	if (err)
965 		device_printf(sc->dev, "Error finishing sbuf: %d\n", err);
966 	sbuf_delete(sb);
967 	return(err);
968 }
969 
970 static int
gicv3_its_init_sysctl(struct gicv3_its_softc * sc)971 gicv3_its_init_sysctl(struct gicv3_its_softc *sc)
972 {
973 	struct sysctl_oid *oid, *child;
974 	struct sysctl_ctx_list *ctx_list;
975 
976 	ctx_list = device_get_sysctl_ctx(sc->dev);
977 	child = device_get_sysctl_tree(sc->dev);
978 	oid = SYSCTL_ADD_NODE(ctx_list,
979 	    SYSCTL_CHILDREN(child), OID_AUTO, "tracing",
980 	    CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing");
981 	if (oid == NULL)
982 		return (ENXIO);
983 
984 	/* Add registers */
985 	SYSCTL_ADD_PROC(ctx_list,
986 	    SYSCTL_CHILDREN(oid), OID_AUTO, "enable",
987 	    CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
988 	    gicv3_its_sysctl_trace_enable, "CU", "Enable tracing");
989 	SYSCTL_ADD_PROC(ctx_list,
990 	    SYSCTL_CHILDREN(oid), OID_AUTO, "capture",
991 	    CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
992 	    gicv3_its_sysctl_trace_regs, "", "Captured tracing registers.");
993 
994 	return (0);
995 }
996 
997 static int
gicv3_its_attach(device_t dev)998 gicv3_its_attach(device_t dev)
999 {
1000 	struct gicv3_its_softc *sc;
1001 	int domain, err, i, rid;
1002 	uint64_t phys;
1003 	uint32_t ctlr, iidr;
1004 
1005 	sc = device_get_softc(dev);
1006 
1007 	sc->sc_dev_table_idx = -1;
1008 	sc->sc_irq_length = gicv3_get_nirqs(dev);
1009 	sc->sc_irq_base = GIC_FIRST_LPI;
1010 	sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
1011 
1012 	rid = 0;
1013 	sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1014 	    RF_ACTIVE);
1015 	if (sc->sc_its_res == NULL) {
1016 		device_printf(dev, "Could not allocate memory\n");
1017 		return (ENXIO);
1018 	}
1019 
1020 	phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) +
1021 	    GITS_TRANSLATER, PAGE_SIZE);
1022 	sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO);
1023 	vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT);
1024 
1025 	CPU_COPY(&all_cpus, &sc->sc_cpus);
1026 	iidr = gic_its_read_4(sc, GITS_IIDR);
1027 	for (i = 0; i < nitems(its_quirks); i++) {
1028 		if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
1029 			if (bootverbose) {
1030 				device_printf(dev, "Applying %s\n",
1031 				    its_quirks[i].desc);
1032 			}
1033 			its_quirks[i].func(dev);
1034 			break;
1035 		}
1036 	}
1037 
1038 	if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) {
1039 		sc->sc_ds = DOMAINSET_PREF(domain);
1040 	} else {
1041 		sc->sc_ds = DOMAINSET_RR();
1042 	}
1043 
1044 	/*
1045 	 * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be
1046 	 * coming in via, for instance, a kexec/kboot style setup where a
1047 	 * previous kernel has configured then relinquished control.  Clear it
1048 	 * so that we can reconfigure GITS_BASER*.
1049 	 */
1050 	ctlr = gic_its_read_4(sc, GITS_CTLR);
1051 	if ((ctlr & GITS_CTLR_EN) != 0) {
1052 		ctlr &= ~GITS_CTLR_EN;
1053 		gic_its_write_4(sc, GITS_CTLR, ctlr);
1054 	}
1055 
1056 	/* Allocate the private tables */
1057 	err = gicv3_its_table_init(dev, sc);
1058 	if (err != 0)
1059 		return (err);
1060 
1061 	/* Protects access to the device list */
1062 	mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
1063 
1064 	/* Protects access to the ITS command circular buffer. */
1065 	mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
1066 
1067 	/* Allocate the command circular buffer */
1068 	gicv3_its_cmdq_init(sc);
1069 
1070 	/* Allocate the per-CPU collections */
1071 	for (int cpu = 0; cpu <= mp_maxid; cpu++)
1072 		if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
1073 			sc->sc_its_cols[cpu] = malloc_domainset(
1074 			    sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
1075 			    DOMAINSET_PREF(pcpu_find(cpu)->pc_domain),
1076 			    M_WAITOK | M_ZERO);
1077 		else
1078 			sc->sc_its_cols[cpu] = NULL;
1079 
1080 	/* Enable the ITS */
1081 	gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN);
1082 
1083 	/* Create the LPI configuration table */
1084 	gicv3_its_conftable_init(sc);
1085 
1086 	/* And the pending tebles */
1087 	gicv3_its_pendtables_init(sc);
1088 
1089 	/* Enable LPIs on this CPU */
1090 	its_init_cpu(dev, sc);
1091 
1092 	TAILQ_INIT(&sc->sc_its_dev_list);
1093 	TAILQ_INIT(&sc->sc_free_irqs);
1094 
1095 	/*
1096 	 * Create the vmem object to allocate INTRNG IRQs from. We try to
1097 	 * use all IRQs not already used by the GICv3.
1098 	 * XXX: This assumes there are no other interrupt controllers in the
1099 	 * system.
1100 	 */
1101 	sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0,
1102 	    gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK);
1103 
1104 	sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
1105 	    M_GICV3_ITS, M_WAITOK | M_ZERO);
1106 
1107 	/* For GIC-500 install tracking sysctls. */
1108 	if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) ==
1109 	    GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0))
1110 		gicv3_its_init_sysctl(sc);
1111 
1112 	return (0);
1113 }
1114 
1115 static int
gicv3_its_detach(device_t dev)1116 gicv3_its_detach(device_t dev)
1117 {
1118 
1119 	return (ENXIO);
1120 }
1121 
1122 static void
its_quirk_cavium_22375(device_t dev)1123 its_quirk_cavium_22375(device_t dev)
1124 {
1125 	struct gicv3_its_softc *sc;
1126 	int domain;
1127 
1128 	sc = device_get_softc(dev);
1129 	sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
1130 
1131 	/*
1132 	 * We need to limit which CPUs we send these interrupts to on
1133 	 * the original dual socket ThunderX as it is unable to
1134 	 * forward them between the two sockets.
1135 	 */
1136 	if (bus_get_domain(dev, &domain) == 0) {
1137 		if (domain < MAXMEMDOM) {
1138 			CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
1139 		} else {
1140 			CPU_ZERO(&sc->sc_cpus);
1141 		}
1142 	}
1143 }
1144 
1145 static void
gicv3_its_disable_intr(device_t dev,struct intr_irqsrc * isrc)1146 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
1147 {
1148 	struct gicv3_its_softc *sc;
1149 	struct gicv3_its_irqsrc *girq;
1150 	uint8_t *conf;
1151 
1152 	sc = device_get_softc(dev);
1153 	girq = (struct gicv3_its_irqsrc *)isrc;
1154 	conf = sc->sc_conf_base;
1155 
1156 	conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
1157 
1158 	if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
1159 		/* Clean D-cache under command. */
1160 		cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
1161 	} else {
1162 		/* DSB inner shareable, store */
1163 		dsb(ishst);
1164 	}
1165 
1166 	its_cmd_inv(dev, girq->gi_its_dev, girq);
1167 }
1168 
1169 static void
gicv3_its_enable_intr(device_t dev,struct intr_irqsrc * isrc)1170 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
1171 {
1172 	struct gicv3_its_softc *sc;
1173 	struct gicv3_its_irqsrc *girq;
1174 	uint8_t *conf;
1175 
1176 	sc = device_get_softc(dev);
1177 	girq = (struct gicv3_its_irqsrc *)isrc;
1178 	conf = sc->sc_conf_base;
1179 
1180 	conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
1181 
1182 	if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
1183 		/* Clean D-cache under command. */
1184 		cpu_dcache_wb_range(&conf[girq->gi_lpi], 1);
1185 	} else {
1186 		/* DSB inner shareable, store */
1187 		dsb(ishst);
1188 	}
1189 
1190 	its_cmd_inv(dev, girq->gi_its_dev, girq);
1191 }
1192 
1193 static int
gicv3_its_intr(void * arg,uintptr_t irq)1194 gicv3_its_intr(void *arg, uintptr_t irq)
1195 {
1196 	struct gicv3_its_softc *sc = arg;
1197 	struct gicv3_its_irqsrc *girq;
1198 	struct trapframe *tf;
1199 
1200 	irq -= sc->sc_irq_base;
1201 	girq = sc->sc_irqs[irq];
1202 	if (girq == NULL)
1203 		panic("gicv3_its_intr: Invalid interrupt %ld",
1204 		    irq + sc->sc_irq_base);
1205 
1206 	tf = curthread->td_intr_frame;
1207 	intr_isrc_dispatch(&girq->gi_isrc, tf);
1208 	return (FILTER_HANDLED);
1209 }
1210 
1211 static void
gicv3_its_pre_ithread(device_t dev,struct intr_irqsrc * isrc)1212 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1213 {
1214 	struct gicv3_its_irqsrc *girq;
1215 
1216 	girq = (struct gicv3_its_irqsrc *)isrc;
1217 	gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1218 }
1219 
1220 static void
gicv3_its_post_ithread(device_t dev,struct intr_irqsrc * isrc)1221 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1222 {
1223 
1224 }
1225 
1226 static void
gicv3_its_post_filter(device_t dev,struct intr_irqsrc * isrc)1227 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
1228 {
1229 	struct gicv3_its_irqsrc *girq;
1230 
1231 	girq = (struct gicv3_its_irqsrc *)isrc;
1232 	gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1233 }
1234 
1235 static int
gicv3_its_select_cpu(device_t dev,struct intr_irqsrc * isrc)1236 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
1237 {
1238 	struct gicv3_its_softc *sc;
1239 
1240 	sc = device_get_softc(dev);
1241 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
1242 		sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
1243 		    &sc->sc_cpus);
1244 		CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
1245 	}
1246 
1247 	return (0);
1248 }
1249 
1250 static int
gicv3_its_bind_intr(device_t dev,struct intr_irqsrc * isrc)1251 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1252 {
1253 	struct gicv3_its_irqsrc *girq;
1254 
1255 	gicv3_its_select_cpu(dev, isrc);
1256 
1257 	girq = (struct gicv3_its_irqsrc *)isrc;
1258 	its_cmd_movi(dev, girq);
1259 	return (0);
1260 }
1261 
1262 static int
gicv3_its_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)1263 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
1264     struct intr_irqsrc **isrcp)
1265 {
1266 
1267 	/*
1268 	 * This should never happen, we only call this function to map
1269 	 * interrupts found before the controller driver is ready.
1270 	 */
1271 	panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
1272 }
1273 
1274 static int
gicv3_its_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)1275 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
1276     struct resource *res, struct intr_map_data *data)
1277 {
1278 
1279 	/* Bind the interrupt to a CPU */
1280 	gicv3_its_bind_intr(dev, isrc);
1281 
1282 	return (0);
1283 }
1284 
1285 #ifdef SMP
1286 static void
gicv3_its_init_secondary(device_t dev)1287 gicv3_its_init_secondary(device_t dev)
1288 {
1289 	struct gicv3_its_softc *sc;
1290 
1291 	sc = device_get_softc(dev);
1292 
1293 	/*
1294 	 * This is fatal as otherwise we may bind interrupts to this CPU.
1295 	 * We need a way to tell the interrupt framework to only bind to a
1296 	 * subset of given CPUs when it performs the shuffle.
1297 	 */
1298 	if (its_init_cpu(dev, sc) != 0)
1299 		panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
1300 		    PCPU_GET(cpuid));
1301 }
1302 #endif
1303 
1304 static uint32_t
its_get_devid(device_t pci_dev)1305 its_get_devid(device_t pci_dev)
1306 {
1307 	uintptr_t id;
1308 
1309 	if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
1310 		panic("%s: %s: Unable to get the MSI DeviceID", __func__,
1311 		    device_get_nameunit(pci_dev));
1312 
1313 	return (id);
1314 }
1315 
1316 static struct its_dev *
its_device_find(device_t dev,device_t child)1317 its_device_find(device_t dev, device_t child)
1318 {
1319 	struct gicv3_its_softc *sc;
1320 	struct its_dev *its_dev = NULL;
1321 
1322 	sc = device_get_softc(dev);
1323 
1324 	mtx_lock_spin(&sc->sc_its_dev_lock);
1325 	TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
1326 		if (its_dev->pci_dev == child)
1327 			break;
1328 	}
1329 	mtx_unlock_spin(&sc->sc_its_dev_lock);
1330 
1331 	return (its_dev);
1332 }
1333 
1334 static bool
its_device_alloc(struct gicv3_its_softc * sc,int devid)1335 its_device_alloc(struct gicv3_its_softc *sc, int devid)
1336 {
1337 	struct its_ptable *ptable;
1338 	void *l2_table;
1339 	uint64_t *table;
1340 	uint32_t index;
1341 	bool shareable;
1342 
1343 	/* No device table */
1344 	if (sc->sc_dev_table_idx < 0) {
1345 		if (devid >= (1 << sc->sc_devbits)) {
1346 			if (bootverbose) {
1347 				device_printf(sc->dev,
1348 				    "%s: Device out of range for hardware "
1349 				    "(%x >= %x)\n", __func__, devid,
1350 				    1 << sc->sc_devbits);
1351 			}
1352 			return (false);
1353 		}
1354 		return (true);
1355 	}
1356 
1357 	ptable = &sc->sc_its_ptab[sc->sc_dev_table_idx];
1358 	/* Check the devid is within the table limit */
1359 	if (!ptable->ptab_indirect) {
1360 		if (devid >= ptable->ptab_l1_nidents) {
1361 			if (bootverbose) {
1362 				device_printf(sc->dev,
1363 				    "%s: Device out of range for table "
1364 				    "(%x >= %x)\n", __func__, devid,
1365 				    ptable->ptab_l1_nidents);
1366 			}
1367 			return (false);
1368 		}
1369 
1370 		return (true);
1371 	}
1372 
1373 	/* Check the devid is within the allocated range */
1374 	index = devid / ptable->ptab_l2_nidents;
1375 	if (index >= ptable->ptab_l1_nidents) {
1376 		if (bootverbose) {
1377 			device_printf(sc->dev,
1378 			    "%s: Index out of range for table (%x >= %x)\n",
1379 			    __func__, index, ptable->ptab_l1_nidents);
1380 		}
1381 		return (false);
1382 	}
1383 
1384 	table = (uint64_t *)ptable->ptab_vaddr;
1385 	/* We have an second level table */
1386 	if ((table[index] & GITS_BASER_VALID) != 0)
1387 		return (true);
1388 
1389 	shareable = true;
1390 	if ((ptable->ptab_share & GITS_BASER_SHARE_MASK) == GITS_BASER_SHARE_NS)
1391 		shareable = false;
1392 
1393 	l2_table = contigmalloc_domainset(ptable->ptab_l2_size,
1394 	    M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
1395 	    ptable->ptab_page_size, 0);
1396 
1397 	if (!shareable)
1398 		cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size);
1399 
1400 	table[index] = vtophys(l2_table) | GITS_BASER_VALID;
1401 	if (!shareable)
1402 		cpu_dcache_wb_range(&table[index], sizeof(table[index]));
1403 
1404 	dsb(sy);
1405 	return (true);
1406 }
1407 
1408 static struct its_dev *
its_device_get(device_t dev,device_t child,u_int nvecs)1409 its_device_get(device_t dev, device_t child, u_int nvecs)
1410 {
1411 	struct gicv3_its_softc *sc;
1412 	struct its_dev *its_dev;
1413 	vmem_addr_t irq_base;
1414 	size_t esize, itt_size;
1415 
1416 	sc = device_get_softc(dev);
1417 
1418 	its_dev = its_device_find(dev, child);
1419 	if (its_dev != NULL)
1420 		return (its_dev);
1421 
1422 	its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1423 	if (its_dev == NULL)
1424 		return (NULL);
1425 
1426 	its_dev->pci_dev = child;
1427 	its_dev->devid = its_get_devid(child);
1428 
1429 	its_dev->lpis.lpi_busy = 0;
1430 	its_dev->lpis.lpi_num = nvecs;
1431 	its_dev->lpis.lpi_free = nvecs;
1432 
1433 	if (!its_device_alloc(sc, its_dev->devid)) {
1434 		free(its_dev, M_GICV3_ITS);
1435 		return (NULL);
1436 	}
1437 
1438 	if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1439 	    &irq_base) != 0) {
1440 		free(its_dev, M_GICV3_ITS);
1441 		return (NULL);
1442 	}
1443 	its_dev->lpis.lpi_base = irq_base;
1444 
1445 	/* Get ITT entry size */
1446 	esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1447 
1448 	/*
1449 	 * Allocate ITT for this device.
1450 	 * PA has to be 256 B aligned. At least two entries for device.
1451 	 */
1452 	itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1453 	its_dev->itt = contigmalloc_domainset(itt_size,
1454 	    M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0,
1455 	    LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0);
1456 	if (its_dev->itt == NULL) {
1457 		vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1458 		free(its_dev, M_GICV3_ITS);
1459 		return (NULL);
1460 	}
1461 
1462 	/* Make sure device sees zeroed ITT. */
1463 	if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0)
1464 		cpu_dcache_wb_range(its_dev->itt, itt_size);
1465 
1466 	mtx_lock_spin(&sc->sc_its_dev_lock);
1467 	TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1468 	mtx_unlock_spin(&sc->sc_its_dev_lock);
1469 
1470 	/* Map device to its ITT */
1471 	its_cmd_mapd(dev, its_dev, 1);
1472 
1473 	return (its_dev);
1474 }
1475 
1476 static void
its_device_release(device_t dev,struct its_dev * its_dev)1477 its_device_release(device_t dev, struct its_dev *its_dev)
1478 {
1479 	struct gicv3_its_softc *sc;
1480 
1481 	KASSERT(its_dev->lpis.lpi_busy == 0,
1482 	    ("its_device_release: Trying to release an inuse ITS device"));
1483 
1484 	/* Unmap device in ITS */
1485 	its_cmd_mapd(dev, its_dev, 0);
1486 
1487 	sc = device_get_softc(dev);
1488 
1489 	/* Remove the device from the list of devices */
1490 	mtx_lock_spin(&sc->sc_its_dev_lock);
1491 	TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1492 	mtx_unlock_spin(&sc->sc_its_dev_lock);
1493 
1494 	/* Free ITT */
1495 	KASSERT(its_dev->itt != NULL, ("Invalid ITT in valid ITS device"));
1496 	free(its_dev->itt, M_GICV3_ITS);
1497 
1498 	/* Free the IRQ allocation */
1499 	vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1500 	    its_dev->lpis.lpi_num);
1501 
1502 	free(its_dev, M_GICV3_ITS);
1503 }
1504 
1505 static struct gicv3_its_irqsrc *
gicv3_its_alloc_irqsrc(device_t dev,struct gicv3_its_softc * sc,u_int irq)1506 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq)
1507 {
1508 	struct gicv3_its_irqsrc *girq = NULL;
1509 
1510 	KASSERT(sc->sc_irqs[irq] == NULL,
1511 	    ("%s: Interrupt %u already allocated", __func__, irq));
1512 	mtx_lock_spin(&sc->sc_its_dev_lock);
1513 	if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
1514 		girq = TAILQ_FIRST(&sc->sc_free_irqs);
1515 		TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
1516 	}
1517 	mtx_unlock_spin(&sc->sc_its_dev_lock);
1518 	if (girq == NULL) {
1519 		girq = malloc(sizeof(*girq), M_GICV3_ITS,
1520 		    M_NOWAIT | M_ZERO);
1521 		if (girq == NULL)
1522 			return (NULL);
1523 		girq->gi_id = -1;
1524 		if (intr_isrc_register(&girq->gi_isrc, dev, 0,
1525 		    "%s,%u", device_get_nameunit(dev), irq) != 0) {
1526 			free(girq, M_GICV3_ITS);
1527 			return (NULL);
1528 		}
1529 	}
1530 	girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI;
1531 	sc->sc_irqs[irq] = girq;
1532 
1533 	return (girq);
1534 }
1535 
1536 static void
gicv3_its_release_irqsrc(struct gicv3_its_softc * sc,struct gicv3_its_irqsrc * girq)1537 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc,
1538     struct gicv3_its_irqsrc *girq)
1539 {
1540 	u_int irq;
1541 
1542 	mtx_assert(&sc->sc_its_dev_lock, MA_OWNED);
1543 
1544 	irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base;
1545 	sc->sc_irqs[irq] = NULL;
1546 
1547 	girq->gi_id = -1;
1548 	girq->gi_its_dev = NULL;
1549 	TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link);
1550 }
1551 
1552 static int
gicv3_its_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)1553 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1554     device_t *pic, struct intr_irqsrc **srcs)
1555 {
1556 	struct gicv3_its_softc *sc;
1557 	struct gicv3_its_irqsrc *girq;
1558 	struct its_dev *its_dev;
1559 	u_int irq;
1560 	int i;
1561 
1562 	its_dev = its_device_get(dev, child, count);
1563 	if (its_dev == NULL)
1564 		return (ENXIO);
1565 
1566 	KASSERT(its_dev->lpis.lpi_free >= count,
1567 	    ("gicv3_its_alloc_msi: No free LPIs"));
1568 	sc = device_get_softc(dev);
1569 	irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1570 	    its_dev->lpis.lpi_free;
1571 
1572 	/* Allocate the irqsrc for each MSI */
1573 	for (i = 0; i < count; i++, irq++) {
1574 		its_dev->lpis.lpi_free--;
1575 		srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev,
1576 		    sc, irq);
1577 		if (srcs[i] == NULL)
1578 			break;
1579 	}
1580 
1581 	/* The allocation failed, release them */
1582 	if (i != count) {
1583 		mtx_lock_spin(&sc->sc_its_dev_lock);
1584 		for (i = 0; i < count; i++) {
1585 			girq = (struct gicv3_its_irqsrc *)srcs[i];
1586 			if (girq == NULL)
1587 				break;
1588 			gicv3_its_release_irqsrc(sc, girq);
1589 			srcs[i] = NULL;
1590 		}
1591 		mtx_unlock_spin(&sc->sc_its_dev_lock);
1592 		return (ENXIO);
1593 	}
1594 
1595 	/* Finish the allocation now we have all MSI irqsrcs */
1596 	for (i = 0; i < count; i++) {
1597 		girq = (struct gicv3_its_irqsrc *)srcs[i];
1598 		girq->gi_id = i;
1599 		girq->gi_its_dev = its_dev;
1600 
1601 		/* Map the message to the given IRQ */
1602 		gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1603 		its_cmd_mapti(dev, girq);
1604 	}
1605 	its_dev->lpis.lpi_busy += count;
1606 	*pic = dev;
1607 
1608 	return (0);
1609 }
1610 
1611 static int
gicv3_its_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1612 gicv3_its_release_msi(device_t dev, device_t child, int count,
1613     struct intr_irqsrc **isrc)
1614 {
1615 	struct gicv3_its_softc *sc;
1616 	struct gicv3_its_irqsrc *girq;
1617 	struct its_dev *its_dev;
1618 	int i;
1619 
1620 	its_dev = its_device_find(dev, child);
1621 
1622 	KASSERT(its_dev != NULL,
1623 	    ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1624 	     "no ITS device"));
1625 	KASSERT(its_dev->lpis.lpi_busy >= count,
1626 	    ("gicv3_its_release_msi: Releasing more interrupts than "
1627 	     "were allocated: releasing %d, allocated %d", count,
1628 	     its_dev->lpis.lpi_busy));
1629 
1630 	sc = device_get_softc(dev);
1631 	mtx_lock_spin(&sc->sc_its_dev_lock);
1632 	for (i = 0; i < count; i++) {
1633 		girq = (struct gicv3_its_irqsrc *)isrc[i];
1634 		gicv3_its_release_irqsrc(sc, girq);
1635 	}
1636 	mtx_unlock_spin(&sc->sc_its_dev_lock);
1637 	its_dev->lpis.lpi_busy -= count;
1638 
1639 	if (its_dev->lpis.lpi_busy == 0)
1640 		its_device_release(dev, its_dev);
1641 
1642 	return (0);
1643 }
1644 
1645 static int
gicv3_its_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrcp)1646 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1647     struct intr_irqsrc **isrcp)
1648 {
1649 	struct gicv3_its_softc *sc;
1650 	struct gicv3_its_irqsrc *girq;
1651 	struct its_dev *its_dev;
1652 	u_int nvecs, irq;
1653 
1654 	nvecs = pci_msix_count(child);
1655 	its_dev = its_device_get(dev, child, nvecs);
1656 	if (its_dev == NULL)
1657 		return (ENXIO);
1658 
1659 	KASSERT(its_dev->lpis.lpi_free > 0,
1660 	    ("gicv3_its_alloc_msix: No free LPIs"));
1661 	sc = device_get_softc(dev);
1662 	irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1663 	    its_dev->lpis.lpi_free;
1664 
1665 	girq = gicv3_its_alloc_irqsrc(dev, sc, irq);
1666 	if (girq == NULL)
1667 		return (ENXIO);
1668 	girq->gi_id = its_dev->lpis.lpi_busy;
1669 	girq->gi_its_dev = its_dev;
1670 
1671 	its_dev->lpis.lpi_free--;
1672 	its_dev->lpis.lpi_busy++;
1673 
1674 	/* Map the message to the given IRQ */
1675 	gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1676 	its_cmd_mapti(dev, girq);
1677 
1678 	*pic = dev;
1679 	*isrcp = (struct intr_irqsrc *)girq;
1680 
1681 	return (0);
1682 }
1683 
1684 static int
gicv3_its_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1685 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1686 {
1687 	struct gicv3_its_softc *sc;
1688 	struct gicv3_its_irqsrc *girq;
1689 	struct its_dev *its_dev;
1690 
1691 	its_dev = its_device_find(dev, child);
1692 
1693 	KASSERT(its_dev != NULL,
1694 	    ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1695 	     "no ITS device"));
1696 	KASSERT(its_dev->lpis.lpi_busy > 0,
1697 	    ("gicv3_its_release_msix: Releasing more interrupts than "
1698 	     "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1699 
1700 	sc = device_get_softc(dev);
1701 	girq = (struct gicv3_its_irqsrc *)isrc;
1702 	mtx_lock_spin(&sc->sc_its_dev_lock);
1703 	gicv3_its_release_irqsrc(sc, girq);
1704 	mtx_unlock_spin(&sc->sc_its_dev_lock);
1705 	its_dev->lpis.lpi_busy--;
1706 
1707 	if (its_dev->lpis.lpi_busy == 0)
1708 		its_device_release(dev, its_dev);
1709 
1710 	return (0);
1711 }
1712 
1713 static int
gicv3_its_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1714 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1715     uint64_t *addr, uint32_t *data)
1716 {
1717 	struct gicv3_its_softc *sc;
1718 	struct gicv3_its_irqsrc *girq;
1719 
1720 	sc = device_get_softc(dev);
1721 	girq = (struct gicv3_its_irqsrc *)isrc;
1722 
1723 	*addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1724 	*data = girq->gi_id;
1725 
1726 	return (0);
1727 }
1728 
1729 #ifdef IOMMU
1730 static int
gicv3_iommu_init(device_t dev,device_t child,struct iommu_domain ** domain)1731 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain)
1732 {
1733 	struct gicv3_its_softc *sc;
1734 	struct iommu_ctx *ctx;
1735 	int error;
1736 
1737 	sc = device_get_softc(dev);
1738 	ctx = iommu_get_dev_ctx(child);
1739 	if (ctx == NULL)
1740 		return (ENXIO);
1741 	/* Map the page containing the GITS_TRANSLATER register. */
1742 	error = iommu_map_msi(ctx, PAGE_SIZE, 0,
1743 	    IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma);
1744 	*domain = iommu_get_ctx_domain(ctx);
1745 
1746 	return (error);
1747 }
1748 
1749 static void
gicv3_iommu_deinit(device_t dev,device_t child)1750 gicv3_iommu_deinit(device_t dev, device_t child)
1751 {
1752 	struct iommu_ctx *ctx;
1753 
1754 	ctx = iommu_get_dev_ctx(child);
1755 	if (ctx == NULL)
1756 		return;
1757 
1758 	iommu_unmap_msi(ctx);
1759 }
1760 #endif
1761 
1762 /*
1763  * Commands handling.
1764  */
1765 
1766 static __inline void
cmd_format_command(struct its_cmd * cmd,uint8_t cmd_type)1767 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1768 {
1769 	/* Command field: DW0 [7:0] */
1770 	cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1771 	cmd->cmd_dword[0] |= htole64(cmd_type);
1772 }
1773 
1774 static __inline void
cmd_format_devid(struct its_cmd * cmd,uint32_t devid)1775 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1776 {
1777 	/* Device ID field: DW0 [63:32] */
1778 	cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1779 	cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1780 }
1781 
1782 static __inline void
cmd_format_size(struct its_cmd * cmd,uint16_t size)1783 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1784 {
1785 	/* Size field: DW1 [4:0] */
1786 	cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1787 	cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1788 }
1789 
1790 static __inline void
cmd_format_id(struct its_cmd * cmd,uint32_t id)1791 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1792 {
1793 	/* ID field: DW1 [31:0] */
1794 	cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1795 	cmd->cmd_dword[1] |= htole64(id);
1796 }
1797 
1798 static __inline void
cmd_format_pid(struct its_cmd * cmd,uint32_t pid)1799 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1800 {
1801 	/* Physical ID field: DW1 [63:32] */
1802 	cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1803 	cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1804 }
1805 
1806 static __inline void
cmd_format_col(struct its_cmd * cmd,uint16_t col_id)1807 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1808 {
1809 	/* Collection field: DW2 [16:0] */
1810 	cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1811 	cmd->cmd_dword[2] |= htole64(col_id);
1812 }
1813 
1814 static __inline void
cmd_format_target(struct its_cmd * cmd,uint64_t target)1815 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1816 {
1817 	/* Target Address field: DW2 [47:16] */
1818 	cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1819 	cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1820 }
1821 
1822 static __inline void
cmd_format_itt(struct its_cmd * cmd,uint64_t itt)1823 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1824 {
1825 	/* ITT Address field: DW2 [47:8] */
1826 	cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1827 	cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1828 }
1829 
1830 static __inline void
cmd_format_valid(struct its_cmd * cmd,uint8_t valid)1831 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1832 {
1833 	/* Valid field: DW2 [63] */
1834 	cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1835 	cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1836 }
1837 
1838 static inline bool
its_cmd_queue_full(struct gicv3_its_softc * sc)1839 its_cmd_queue_full(struct gicv3_its_softc *sc)
1840 {
1841 	size_t read_idx, next_write_idx;
1842 
1843 	/* Get the index of the next command */
1844 	next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1845 	    (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1846 	/* And the index of the current command being read */
1847 	read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1848 
1849 	/*
1850 	 * The queue is full when the write offset points
1851 	 * at the command before the current read offset.
1852 	 */
1853 	return (next_write_idx == read_idx);
1854 }
1855 
1856 static inline void
its_cmd_sync(struct gicv3_its_softc * sc,struct its_cmd * cmd)1857 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1858 {
1859 
1860 	if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1861 		/* Clean D-cache under command. */
1862 		cpu_dcache_wb_range(cmd, sizeof(*cmd));
1863 	} else {
1864 		/* DSB inner shareable, store */
1865 		dsb(ishst);
1866 	}
1867 
1868 }
1869 
1870 static inline uint64_t
its_cmd_cwriter_offset(struct gicv3_its_softc * sc,struct its_cmd * cmd)1871 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1872 {
1873 	uint64_t off;
1874 
1875 	off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1876 
1877 	return (off);
1878 }
1879 
1880 static void
its_cmd_wait_completion(device_t dev,struct its_cmd * cmd_first,struct its_cmd * cmd_last)1881 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1882     struct its_cmd *cmd_last)
1883 {
1884 	struct gicv3_its_softc *sc;
1885 	uint64_t first, last, read;
1886 	size_t us_left;
1887 
1888 	sc = device_get_softc(dev);
1889 
1890 	/*
1891 	 * XXX ARM64TODO: This is obviously a significant delay.
1892 	 * The reason for that is that currently the time frames for
1893 	 * the command to complete are not known.
1894 	 */
1895 	us_left = 1000000;
1896 
1897 	first = its_cmd_cwriter_offset(sc, cmd_first);
1898 	last = its_cmd_cwriter_offset(sc, cmd_last);
1899 
1900 	for (;;) {
1901 		read = gic_its_read_8(sc, GITS_CREADR);
1902 		if (first < last) {
1903 			if (read < first || read >= last)
1904 				break;
1905 		} else if (read < first && read >= last)
1906 			break;
1907 
1908 		if (us_left-- == 0) {
1909 			/* This means timeout */
1910 			device_printf(dev,
1911 			    "Timeout while waiting for CMD completion.\n");
1912 			return;
1913 		}
1914 		DELAY(1);
1915 	}
1916 }
1917 
1918 static struct its_cmd *
its_cmd_alloc_locked(device_t dev)1919 its_cmd_alloc_locked(device_t dev)
1920 {
1921 	struct gicv3_its_softc *sc;
1922 	struct its_cmd *cmd;
1923 	size_t us_left;
1924 
1925 	sc = device_get_softc(dev);
1926 
1927 	/*
1928 	 * XXX ARM64TODO: This is obviously a significant delay.
1929 	 * The reason for that is that currently the time frames for
1930 	 * the command to complete (and therefore free the descriptor)
1931 	 * are not known.
1932 	 */
1933 	us_left = 1000000;
1934 
1935 	mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1936 	while (its_cmd_queue_full(sc)) {
1937 		if (us_left-- == 0) {
1938 			/* Timeout while waiting for free command */
1939 			device_printf(dev,
1940 			    "Timeout while waiting for free command\n");
1941 			return (NULL);
1942 		}
1943 		DELAY(1);
1944 	}
1945 
1946 	cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1947 	sc->sc_its_cmd_next_idx++;
1948 	sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1949 
1950 	return (cmd);
1951 }
1952 
1953 static uint64_t
its_cmd_prepare(struct its_cmd * cmd,struct its_cmd_desc * desc)1954 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1955 {
1956 	uint64_t target;
1957 	uint8_t cmd_type;
1958 	u_int size;
1959 
1960 	cmd_type = desc->cmd_type;
1961 	target = ITS_TARGET_NONE;
1962 
1963 	switch (cmd_type) {
1964 	case ITS_CMD_MOVI:	/* Move interrupt ID to another collection */
1965 		target = desc->cmd_desc_movi.col->col_target;
1966 		cmd_format_command(cmd, ITS_CMD_MOVI);
1967 		cmd_format_id(cmd, desc->cmd_desc_movi.id);
1968 		cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1969 		cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1970 		break;
1971 	case ITS_CMD_SYNC:	/* Wait for previous commands completion */
1972 		target = desc->cmd_desc_sync.col->col_target;
1973 		cmd_format_command(cmd, ITS_CMD_SYNC);
1974 		cmd_format_target(cmd, target);
1975 		break;
1976 	case ITS_CMD_MAPD:	/* Assign ITT to device */
1977 		cmd_format_command(cmd, ITS_CMD_MAPD);
1978 		cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1979 		/*
1980 		 * Size describes number of bits to encode interrupt IDs
1981 		 * supported by the device minus one.
1982 		 * When V (valid) bit is zero, this field should be written
1983 		 * as zero.
1984 		 */
1985 		if (desc->cmd_desc_mapd.valid != 0) {
1986 			size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
1987 			size = MAX(1, size) - 1;
1988 		} else
1989 			size = 0;
1990 
1991 		cmd_format_size(cmd, size);
1992 		cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
1993 		cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
1994 		break;
1995 	case ITS_CMD_MAPC:	/* Map collection to Re-Distributor */
1996 		target = desc->cmd_desc_mapc.col->col_target;
1997 		cmd_format_command(cmd, ITS_CMD_MAPC);
1998 		cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
1999 		cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
2000 		cmd_format_target(cmd, target);
2001 		break;
2002 	case ITS_CMD_MAPTI:
2003 		target = desc->cmd_desc_mapvi.col->col_target;
2004 		cmd_format_command(cmd, ITS_CMD_MAPTI);
2005 		cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
2006 		cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
2007 		cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
2008 		cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
2009 		break;
2010 	case ITS_CMD_MAPI:
2011 		target = desc->cmd_desc_mapi.col->col_target;
2012 		cmd_format_command(cmd, ITS_CMD_MAPI);
2013 		cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
2014 		cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
2015 		cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
2016 		break;
2017 	case ITS_CMD_INV:
2018 		target = desc->cmd_desc_inv.col->col_target;
2019 		cmd_format_command(cmd, ITS_CMD_INV);
2020 		cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
2021 		cmd_format_id(cmd, desc->cmd_desc_inv.pid);
2022 		break;
2023 	case ITS_CMD_INVALL:
2024 		cmd_format_command(cmd, ITS_CMD_INVALL);
2025 		cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
2026 		break;
2027 	default:
2028 		panic("its_cmd_prepare: Invalid command: %x", cmd_type);
2029 	}
2030 
2031 	return (target);
2032 }
2033 
2034 static int
its_cmd_send(device_t dev,struct its_cmd_desc * desc)2035 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
2036 {
2037 	struct gicv3_its_softc *sc;
2038 	struct its_cmd *cmd, *cmd_sync, *cmd_write;
2039 	struct its_col col_sync;
2040 	struct its_cmd_desc desc_sync;
2041 	uint64_t target, cwriter;
2042 
2043 	sc = device_get_softc(dev);
2044 	mtx_lock_spin(&sc->sc_its_cmd_lock);
2045 	cmd = its_cmd_alloc_locked(dev);
2046 	if (cmd == NULL) {
2047 		device_printf(dev, "could not allocate ITS command\n");
2048 		mtx_unlock_spin(&sc->sc_its_cmd_lock);
2049 		return (EBUSY);
2050 	}
2051 
2052 	target = its_cmd_prepare(cmd, desc);
2053 	its_cmd_sync(sc, cmd);
2054 
2055 	if (target != ITS_TARGET_NONE) {
2056 		cmd_sync = its_cmd_alloc_locked(dev);
2057 		if (cmd_sync != NULL) {
2058 			desc_sync.cmd_type = ITS_CMD_SYNC;
2059 			col_sync.col_target = target;
2060 			desc_sync.cmd_desc_sync.col = &col_sync;
2061 			its_cmd_prepare(cmd_sync, &desc_sync);
2062 			its_cmd_sync(sc, cmd_sync);
2063 		}
2064 	}
2065 
2066 	/* Update GITS_CWRITER */
2067 	cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
2068 	gic_its_write_8(sc, GITS_CWRITER, cwriter);
2069 	cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
2070 	mtx_unlock_spin(&sc->sc_its_cmd_lock);
2071 
2072 	its_cmd_wait_completion(dev, cmd, cmd_write);
2073 
2074 	return (0);
2075 }
2076 
2077 /* Handlers to send commands */
2078 static void
its_cmd_movi(device_t dev,struct gicv3_its_irqsrc * girq)2079 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
2080 {
2081 	struct gicv3_its_softc *sc;
2082 	struct its_cmd_desc desc;
2083 	struct its_col *col;
2084 
2085 	sc = device_get_softc(dev);
2086 	col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
2087 
2088 	desc.cmd_type = ITS_CMD_MOVI;
2089 	desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
2090 	desc.cmd_desc_movi.col = col;
2091 	desc.cmd_desc_movi.id = girq->gi_id;
2092 
2093 	its_cmd_send(dev, &desc);
2094 }
2095 
2096 static void
its_cmd_mapc(device_t dev,struct its_col * col,uint8_t valid)2097 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
2098 {
2099 	struct its_cmd_desc desc;
2100 
2101 	desc.cmd_type = ITS_CMD_MAPC;
2102 	desc.cmd_desc_mapc.col = col;
2103 	/*
2104 	 * Valid bit set - map the collection.
2105 	 * Valid bit cleared - unmap the collection.
2106 	 */
2107 	desc.cmd_desc_mapc.valid = valid;
2108 
2109 	its_cmd_send(dev, &desc);
2110 }
2111 
2112 static void
its_cmd_mapti(device_t dev,struct gicv3_its_irqsrc * girq)2113 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
2114 {
2115 	struct gicv3_its_softc *sc;
2116 	struct its_cmd_desc desc;
2117 	struct its_col *col;
2118 	u_int col_id;
2119 
2120 	sc = device_get_softc(dev);
2121 
2122 	col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
2123 	col = sc->sc_its_cols[col_id];
2124 
2125 	desc.cmd_type = ITS_CMD_MAPTI;
2126 	desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
2127 	desc.cmd_desc_mapvi.col = col;
2128 	/* The EventID sent to the device */
2129 	desc.cmd_desc_mapvi.id = girq->gi_id;
2130 	/* The physical interrupt presented to softeware */
2131 	desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI;
2132 
2133 	its_cmd_send(dev, &desc);
2134 }
2135 
2136 static void
its_cmd_mapd(device_t dev,struct its_dev * its_dev,uint8_t valid)2137 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
2138 {
2139 	struct its_cmd_desc desc;
2140 
2141 	desc.cmd_type = ITS_CMD_MAPD;
2142 	desc.cmd_desc_mapd.its_dev = its_dev;
2143 	desc.cmd_desc_mapd.valid = valid;
2144 
2145 	its_cmd_send(dev, &desc);
2146 }
2147 
2148 static void
its_cmd_inv(device_t dev,struct its_dev * its_dev,struct gicv3_its_irqsrc * girq)2149 its_cmd_inv(device_t dev, struct its_dev *its_dev,
2150     struct gicv3_its_irqsrc *girq)
2151 {
2152 	struct gicv3_its_softc *sc;
2153 	struct its_cmd_desc desc;
2154 	struct its_col *col;
2155 
2156 	sc = device_get_softc(dev);
2157 	col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
2158 
2159 	desc.cmd_type = ITS_CMD_INV;
2160 	/* The EventID sent to the device */
2161 	desc.cmd_desc_inv.pid = girq->gi_id;
2162 	desc.cmd_desc_inv.its_dev = its_dev;
2163 	desc.cmd_desc_inv.col = col;
2164 
2165 	its_cmd_send(dev, &desc);
2166 }
2167 
2168 static void
its_cmd_invall(device_t dev,struct its_col * col)2169 its_cmd_invall(device_t dev, struct its_col *col)
2170 {
2171 	struct its_cmd_desc desc;
2172 
2173 	desc.cmd_type = ITS_CMD_INVALL;
2174 	desc.cmd_desc_invall.col = col;
2175 
2176 	its_cmd_send(dev, &desc);
2177 }
2178 
2179 #ifdef FDT
2180 static device_probe_t gicv3_its_fdt_probe;
2181 static device_attach_t gicv3_its_fdt_attach;
2182 
2183 static device_method_t gicv3_its_fdt_methods[] = {
2184 	/* Device interface */
2185 	DEVMETHOD(device_probe,		gicv3_its_fdt_probe),
2186 	DEVMETHOD(device_attach,	gicv3_its_fdt_attach),
2187 
2188 	/* End */
2189 	DEVMETHOD_END
2190 };
2191 
2192 #define its_baseclasses its_fdt_baseclasses
2193 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
2194     sizeof(struct gicv3_its_softc), gicv3_its_driver);
2195 #undef its_baseclasses
2196 
2197 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0,
2198     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
2199 
2200 static int
gicv3_its_fdt_probe(device_t dev)2201 gicv3_its_fdt_probe(device_t dev)
2202 {
2203 
2204 	if (!ofw_bus_status_okay(dev))
2205 		return (ENXIO);
2206 
2207 	if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
2208 		return (ENXIO);
2209 
2210 	if (!gicv3_get_support_lpis(dev))
2211 		return (ENXIO);
2212 
2213 	device_set_desc(dev, "ARM GIC Interrupt Translation Service");
2214 	return (BUS_PROBE_DEFAULT);
2215 }
2216 
2217 static int
gicv3_its_fdt_attach(device_t dev)2218 gicv3_its_fdt_attach(device_t dev)
2219 {
2220 	struct gicv3_its_softc *sc;
2221 	phandle_t xref;
2222 	int err;
2223 
2224 	sc = device_get_softc(dev);
2225 	sc->dev = dev;
2226 	err = gicv3_its_attach(dev);
2227 	if (err != 0)
2228 		return (err);
2229 
2230 	/* Register this device as a interrupt controller */
2231 	xref = OF_xref_from_node(ofw_bus_get_node(dev));
2232 	sc->sc_pic = intr_pic_register(dev, xref);
2233 	err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
2234 	    gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
2235 	if (err != 0) {
2236 		device_printf(dev, "Failed to add PIC handler: %d\n", err);
2237 		return (err);
2238 	}
2239 
2240 	/* Register this device to handle MSI interrupts */
2241 	err = intr_msi_register(dev, xref);
2242 	if (err != 0) {
2243 		device_printf(dev, "Failed to register for MSIs: %d\n", err);
2244 		return (err);
2245 	}
2246 
2247 	return (0);
2248 }
2249 #endif
2250 
2251 #ifdef DEV_ACPI
2252 static device_probe_t gicv3_its_acpi_probe;
2253 static device_attach_t gicv3_its_acpi_attach;
2254 
2255 static device_method_t gicv3_its_acpi_methods[] = {
2256 	/* Device interface */
2257 	DEVMETHOD(device_probe,		gicv3_its_acpi_probe),
2258 	DEVMETHOD(device_attach,	gicv3_its_acpi_attach),
2259 
2260 	/* End */
2261 	DEVMETHOD_END
2262 };
2263 
2264 #define its_baseclasses its_acpi_baseclasses
2265 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
2266     sizeof(struct gicv3_its_softc), gicv3_its_driver);
2267 #undef its_baseclasses
2268 
2269 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0,
2270     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
2271 
2272 static int
gicv3_its_acpi_probe(device_t dev)2273 gicv3_its_acpi_probe(device_t dev)
2274 {
2275 
2276 	if (gic_get_bus(dev) != GIC_BUS_ACPI)
2277 		return (EINVAL);
2278 
2279 	if (gic_get_hw_rev(dev) < 3)
2280 		return (EINVAL);
2281 
2282 	if (!gicv3_get_support_lpis(dev))
2283 		return (ENXIO);
2284 
2285 	device_set_desc(dev, "ARM GIC Interrupt Translation Service");
2286 	return (BUS_PROBE_DEFAULT);
2287 }
2288 
2289 static int
gicv3_its_acpi_attach(device_t dev)2290 gicv3_its_acpi_attach(device_t dev)
2291 {
2292 	struct gicv3_its_softc *sc;
2293 	struct gic_v3_devinfo *di;
2294 	int err;
2295 
2296 	sc = device_get_softc(dev);
2297 	sc->dev = dev;
2298 	err = gicv3_its_attach(dev);
2299 	if (err != 0)
2300 		return (err);
2301 
2302 	di = device_get_ivars(dev);
2303 	sc->sc_pic = intr_pic_register(dev, di->msi_xref);
2304 	err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
2305 	    gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
2306 	if (err != 0) {
2307 		device_printf(dev, "Failed to add PIC handler: %d\n", err);
2308 		return (err);
2309 	}
2310 
2311 	/* Register this device to handle MSI interrupts */
2312 	err = intr_msi_register(dev, di->msi_xref);
2313 	if (err != 0) {
2314 		device_printf(dev, "Failed to register for MSIs: %d\n", err);
2315 		return (err);
2316 	}
2317 
2318 	return (0);
2319 }
2320 #endif
2321