1 /*- 2 * Copyright (c) 2015-2016 The FreeBSD Foundation 3 * 4 * This software was developed by Andrew Turner under 5 * the sponsorship of the FreeBSD Foundation. 6 * 7 * This software was developed by Semihalf under 8 * the sponsorship of the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_acpi.h" 33 #include "opt_platform.h" 34 #include "opt_iommu.h" 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/cpuset.h> 43 #include <sys/domainset.h> 44 #include <sys/endian.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/module.h> 49 #include <sys/mutex.h> 50 #include <sys/proc.h> 51 #include <sys/taskqueue.h> 52 #include <sys/tree.h> 53 #include <sys/queue.h> 54 #include <sys/rman.h> 55 #include <sys/sbuf.h> 56 #include <sys/smp.h> 57 #include <sys/sysctl.h> 58 #include <sys/vmem.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_page.h> 63 64 #include <machine/bus.h> 65 #include <machine/intr.h> 66 67 #include <arm/arm/gic_common.h> 68 #include <arm64/arm64/gic_v3_reg.h> 69 #include <arm64/arm64/gic_v3_var.h> 70 71 #ifdef FDT 72 #include <dev/ofw/openfirm.h> 73 #include <dev/ofw/ofw_bus.h> 74 #include <dev/ofw/ofw_bus_subr.h> 75 #endif 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 79 #ifdef IOMMU 80 #include <dev/iommu/iommu.h> 81 #include <dev/iommu/iommu_gas.h> 82 #endif 83 84 #include "pcib_if.h" 85 #include "pic_if.h" 86 #include "msi_if.h" 87 88 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", 89 "ARM GICv3 Interrupt Translation Service"); 90 91 #define LPI_NIRQS (64 * 1024) 92 93 /* The size and alignment of the command circular buffer */ 94 #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ 95 #define ITS_CMDQ_ALIGN (64 * 1024) 96 97 #define LPI_CONFTAB_SIZE LPI_NIRQS 98 #define LPI_CONFTAB_ALIGN (64 * 1024) 99 #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 100 101 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ 102 #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) 103 #define LPI_PENDTAB_ALIGN (64 * 1024) 104 #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 105 106 #define LPI_INT_TRANS_TAB_ALIGN 256 107 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) 108 109 /* ITS commands encoding */ 110 #define ITS_CMD_MOVI (0x01) 111 #define ITS_CMD_SYNC (0x05) 112 #define ITS_CMD_MAPD (0x08) 113 #define ITS_CMD_MAPC (0x09) 114 #define ITS_CMD_MAPTI (0x0a) 115 #define ITS_CMD_MAPI (0x0b) 116 #define ITS_CMD_INV (0x0c) 117 #define ITS_CMD_INVALL (0x0d) 118 /* Command */ 119 #define CMD_COMMAND_MASK (0xFFUL) 120 /* PCI device ID */ 121 #define CMD_DEVID_SHIFT (32) 122 #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) 123 /* Size of IRQ ID bitfield */ 124 #define CMD_SIZE_MASK (0xFFUL) 125 /* Virtual LPI ID */ 126 #define CMD_ID_MASK (0xFFFFFFFFUL) 127 /* Physical LPI ID */ 128 #define CMD_PID_SHIFT (32) 129 #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) 130 /* Collection */ 131 #define CMD_COL_MASK (0xFFFFUL) 132 /* Target (CPU or Re-Distributor) */ 133 #define CMD_TARGET_SHIFT (16) 134 #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) 135 /* Interrupt Translation Table address */ 136 #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) 137 /* Valid command bit */ 138 #define CMD_VALID_SHIFT (63) 139 #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) 140 141 #define ITS_TARGET_NONE 0xFBADBEEF 142 143 /* LPI chunk owned by ITS device */ 144 struct lpi_chunk { 145 u_int lpi_base; 146 u_int lpi_free; /* First free LPI in set */ 147 u_int lpi_num; /* Total number of LPIs in chunk */ 148 u_int lpi_busy; /* Number of busy LPIs in chink */ 149 }; 150 151 /* ITS device */ 152 struct its_dev { 153 TAILQ_ENTRY(its_dev) entry; 154 /* PCI device */ 155 device_t pci_dev; 156 /* Device ID (i.e. PCI device ID) */ 157 uint32_t devid; 158 /* List of assigned LPIs */ 159 struct lpi_chunk lpis; 160 /* Virtual address of ITT */ 161 vm_offset_t itt; 162 size_t itt_size; 163 }; 164 165 /* 166 * ITS command descriptor. 167 * Idea for command description passing taken from Linux. 168 */ 169 struct its_cmd_desc { 170 uint8_t cmd_type; 171 172 union { 173 struct { 174 struct its_dev *its_dev; 175 struct its_col *col; 176 uint32_t id; 177 } cmd_desc_movi; 178 179 struct { 180 struct its_col *col; 181 } cmd_desc_sync; 182 183 struct { 184 struct its_col *col; 185 uint8_t valid; 186 } cmd_desc_mapc; 187 188 struct { 189 struct its_dev *its_dev; 190 struct its_col *col; 191 uint32_t pid; 192 uint32_t id; 193 } cmd_desc_mapvi; 194 195 struct { 196 struct its_dev *its_dev; 197 struct its_col *col; 198 uint32_t pid; 199 } cmd_desc_mapi; 200 201 struct { 202 struct its_dev *its_dev; 203 uint8_t valid; 204 } cmd_desc_mapd; 205 206 struct { 207 struct its_dev *its_dev; 208 struct its_col *col; 209 uint32_t pid; 210 } cmd_desc_inv; 211 212 struct { 213 struct its_col *col; 214 } cmd_desc_invall; 215 }; 216 }; 217 218 /* ITS command. Each command is 32 bytes long */ 219 struct its_cmd { 220 uint64_t cmd_dword[4]; /* ITS command double word */ 221 }; 222 223 /* An ITS private table */ 224 struct its_ptable { 225 vm_offset_t ptab_vaddr; 226 unsigned long ptab_size; 227 }; 228 229 /* ITS collection description. */ 230 struct its_col { 231 uint64_t col_target; /* Target Re-Distributor */ 232 uint64_t col_id; /* Collection ID */ 233 }; 234 235 struct gicv3_its_irqsrc { 236 struct intr_irqsrc gi_isrc; 237 u_int gi_id; 238 u_int gi_lpi; 239 struct its_dev *gi_its_dev; 240 TAILQ_ENTRY(gicv3_its_irqsrc) gi_link; 241 }; 242 243 struct gicv3_its_softc { 244 device_t dev; 245 struct intr_pic *sc_pic; 246 struct resource *sc_its_res; 247 248 cpuset_t sc_cpus; 249 struct domainset *sc_ds; 250 u_int gic_irq_cpu; 251 252 struct its_ptable sc_its_ptab[GITS_BASER_NUM]; 253 struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ 254 255 /* 256 * TODO: We should get these from the parent as we only want a 257 * single copy of each across the interrupt controller. 258 */ 259 uint8_t *sc_conf_base; 260 vm_offset_t sc_pend_base[MAXCPU]; 261 262 /* Command handling */ 263 struct mtx sc_its_cmd_lock; 264 struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ 265 size_t sc_its_cmd_next_idx; 266 267 vmem_t *sc_irq_alloc; 268 struct gicv3_its_irqsrc **sc_irqs; 269 u_int sc_irq_base; 270 u_int sc_irq_length; 271 u_int sc_irq_count; 272 273 struct mtx sc_its_dev_lock; 274 TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; 275 TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs; 276 277 #define ITS_FLAGS_CMDQ_FLUSH 0x00000001 278 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 279 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 280 u_int sc_its_flags; 281 bool trace_enable; 282 vm_page_t ma; /* fake msi page */ 283 }; 284 285 static void *conf_base; 286 287 typedef void (its_quirk_func_t)(device_t); 288 static its_quirk_func_t its_quirk_cavium_22375; 289 290 static const struct { 291 const char *desc; 292 uint32_t iidr; 293 uint32_t iidr_mask; 294 its_quirk_func_t *func; 295 } its_quirks[] = { 296 { 297 /* Cavium ThunderX Pass 1.x */ 298 .desc = "Cavium ThunderX errata: 22375, 24313", 299 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, 300 GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), 301 .iidr_mask = ~GITS_IIDR_REVISION_MASK, 302 .func = its_quirk_cavium_22375, 303 }, 304 }; 305 306 #define gic_its_read_4(sc, reg) \ 307 bus_read_4((sc)->sc_its_res, (reg)) 308 #define gic_its_read_8(sc, reg) \ 309 bus_read_8((sc)->sc_its_res, (reg)) 310 311 #define gic_its_write_4(sc, reg, val) \ 312 bus_write_4((sc)->sc_its_res, (reg), (val)) 313 #define gic_its_write_8(sc, reg, val) \ 314 bus_write_8((sc)->sc_its_res, (reg), (val)) 315 316 static device_attach_t gicv3_its_attach; 317 static device_detach_t gicv3_its_detach; 318 319 static pic_disable_intr_t gicv3_its_disable_intr; 320 static pic_enable_intr_t gicv3_its_enable_intr; 321 static pic_map_intr_t gicv3_its_map_intr; 322 static pic_setup_intr_t gicv3_its_setup_intr; 323 static pic_post_filter_t gicv3_its_post_filter; 324 static pic_post_ithread_t gicv3_its_post_ithread; 325 static pic_pre_ithread_t gicv3_its_pre_ithread; 326 static pic_bind_intr_t gicv3_its_bind_intr; 327 #ifdef SMP 328 static pic_init_secondary_t gicv3_its_init_secondary; 329 #endif 330 static msi_alloc_msi_t gicv3_its_alloc_msi; 331 static msi_release_msi_t gicv3_its_release_msi; 332 static msi_alloc_msix_t gicv3_its_alloc_msix; 333 static msi_release_msix_t gicv3_its_release_msix; 334 static msi_map_msi_t gicv3_its_map_msi; 335 #ifdef IOMMU 336 static msi_iommu_init_t gicv3_iommu_init; 337 static msi_iommu_deinit_t gicv3_iommu_deinit; 338 #endif 339 340 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); 341 static void its_cmd_mapc(device_t, struct its_col *, uint8_t); 342 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); 343 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); 344 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); 345 static void its_cmd_invall(device_t, struct its_col *); 346 347 static device_method_t gicv3_its_methods[] = { 348 /* Device interface */ 349 DEVMETHOD(device_detach, gicv3_its_detach), 350 351 /* Interrupt controller interface */ 352 DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), 353 DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), 354 DEVMETHOD(pic_map_intr, gicv3_its_map_intr), 355 DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), 356 DEVMETHOD(pic_post_filter, gicv3_its_post_filter), 357 DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), 358 DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), 359 #ifdef SMP 360 DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), 361 DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), 362 #endif 363 364 /* MSI/MSI-X */ 365 DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), 366 DEVMETHOD(msi_release_msi, gicv3_its_release_msi), 367 DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), 368 DEVMETHOD(msi_release_msix, gicv3_its_release_msix), 369 DEVMETHOD(msi_map_msi, gicv3_its_map_msi), 370 #ifdef IOMMU 371 DEVMETHOD(msi_iommu_init, gicv3_iommu_init), 372 DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit), 373 #endif 374 375 /* End */ 376 DEVMETHOD_END 377 }; 378 379 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, 380 sizeof(struct gicv3_its_softc)); 381 382 static void 383 gicv3_its_cmdq_init(struct gicv3_its_softc *sc) 384 { 385 vm_paddr_t cmd_paddr; 386 uint64_t reg, tmp; 387 388 /* Set up the command circular buffer */ 389 sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS, 390 sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 391 0); 392 sc->sc_its_cmd_next_idx = 0; 393 394 cmd_paddr = vtophys(sc->sc_its_cmd_base); 395 396 /* Set the base of the command buffer */ 397 reg = GITS_CBASER_VALID | 398 (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | 399 cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | 400 (ITS_CMDQ_SIZE / 4096 - 1); 401 gic_its_write_8(sc, GITS_CBASER, reg); 402 403 /* Read back to check for fixed value fields */ 404 tmp = gic_its_read_8(sc, GITS_CBASER); 405 406 if ((tmp & GITS_CBASER_SHARE_MASK) != 407 (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { 408 /* Check if the hardware reported non-shareable */ 409 if ((tmp & GITS_CBASER_SHARE_MASK) == 410 (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { 411 /* If so remove the cache attribute */ 412 reg &= ~GITS_CBASER_CACHE_MASK; 413 reg &= ~GITS_CBASER_SHARE_MASK; 414 /* Set to Non-cacheable, Non-shareable */ 415 reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; 416 reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; 417 418 gic_its_write_8(sc, GITS_CBASER, reg); 419 } 420 421 /* The command queue has to be flushed after each command */ 422 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; 423 } 424 425 /* Get the next command from the start of the buffer */ 426 gic_its_write_8(sc, GITS_CWRITER, 0x0); 427 } 428 429 static int 430 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) 431 { 432 vm_offset_t table; 433 vm_paddr_t paddr; 434 uint64_t cache, reg, share, tmp, type; 435 size_t esize, its_tbl_size, nidents, nitspages, npages; 436 int i, page_size; 437 int devbits; 438 439 if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { 440 /* 441 * GITS_TYPER[17:13] of ThunderX reports that device IDs 442 * are to be 21 bits in length. The entry size of the ITS 443 * table can be read from GITS_BASERn[52:48] and on ThunderX 444 * is supposed to be 8 bytes in length (for device table). 445 * Finally the page size that is to be used by ITS to access 446 * this table will be set to 64KB. 447 * 448 * This gives 0x200000 entries of size 0x8 bytes covered by 449 * 256 pages each of which 64KB in size. The number of pages 450 * (minus 1) should then be written to GITS_BASERn[7:0]. In 451 * that case this value would be 0xFF but on ThunderX the 452 * maximum value that HW accepts is 0xFD. 453 * 454 * Set an arbitrary number of device ID bits to 20 in order 455 * to limit the number of entries in ITS device table to 456 * 0x100000 and the table size to 8MB. 457 */ 458 devbits = 20; 459 cache = 0; 460 } else { 461 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); 462 cache = GITS_BASER_CACHE_WAWB; 463 } 464 share = GITS_BASER_SHARE_IS; 465 page_size = PAGE_SIZE_64K; 466 467 for (i = 0; i < GITS_BASER_NUM; i++) { 468 reg = gic_its_read_8(sc, GITS_BASER(i)); 469 /* The type of table */ 470 type = GITS_BASER_TYPE(reg); 471 /* The table entry size */ 472 esize = GITS_BASER_ESIZE(reg); 473 474 switch(type) { 475 case GITS_BASER_TYPE_DEV: 476 nidents = (1 << devbits); 477 its_tbl_size = esize * nidents; 478 its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K); 479 break; 480 case GITS_BASER_TYPE_VP: 481 case GITS_BASER_TYPE_PP: /* Undocumented? */ 482 case GITS_BASER_TYPE_IC: 483 its_tbl_size = page_size; 484 break; 485 default: 486 continue; 487 } 488 npages = howmany(its_tbl_size, PAGE_SIZE); 489 490 /* Allocate the table */ 491 table = (vm_offset_t)contigmalloc_domainset(npages * PAGE_SIZE, 492 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, 493 (1ul << 48) - 1, PAGE_SIZE_64K, 0); 494 495 sc->sc_its_ptab[i].ptab_vaddr = table; 496 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE; 497 498 paddr = vtophys(table); 499 500 while (1) { 501 nitspages = howmany(its_tbl_size, page_size); 502 503 /* Clear the fields we will be setting */ 504 reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT | 505 GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | 506 GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK | 507 GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | 508 GITS_BASER_SIZE_MASK); 509 /* Set the new values */ 510 reg |= GITS_BASER_VALID | 511 (cache << GITS_BASER_CACHE_SHIFT) | 512 (type << GITS_BASER_TYPE_SHIFT) | 513 ((esize - 1) << GITS_BASER_ESIZE_SHIFT) | 514 paddr | (share << GITS_BASER_SHARE_SHIFT) | 515 (nitspages - 1); 516 517 switch (page_size) { 518 case PAGE_SIZE_4K: /* 4KB */ 519 reg |= 520 GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; 521 break; 522 case PAGE_SIZE_16K: /* 16KB */ 523 reg |= 524 GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; 525 break; 526 case PAGE_SIZE_64K: /* 64KB */ 527 reg |= 528 GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; 529 break; 530 } 531 532 gic_its_write_8(sc, GITS_BASER(i), reg); 533 534 /* Read back to check */ 535 tmp = gic_its_read_8(sc, GITS_BASER(i)); 536 537 /* Do the shareability masks line up? */ 538 if ((tmp & GITS_BASER_SHARE_MASK) != 539 (reg & GITS_BASER_SHARE_MASK)) { 540 share = (tmp & GITS_BASER_SHARE_MASK) >> 541 GITS_BASER_SHARE_SHIFT; 542 continue; 543 } 544 545 if ((tmp & GITS_BASER_PSZ_MASK) != 546 (reg & GITS_BASER_PSZ_MASK)) { 547 switch (page_size) { 548 case PAGE_SIZE_16K: 549 page_size = PAGE_SIZE_4K; 550 continue; 551 case PAGE_SIZE_64K: 552 page_size = PAGE_SIZE_16K; 553 continue; 554 } 555 } 556 557 if (tmp != reg) { 558 device_printf(dev, "GITS_BASER%d: " 559 "unable to be updated: %lx != %lx\n", 560 i, reg, tmp); 561 return (ENXIO); 562 } 563 564 /* We should have made all needed changes */ 565 break; 566 } 567 } 568 569 return (0); 570 } 571 572 static void 573 gicv3_its_conftable_init(struct gicv3_its_softc *sc) 574 { 575 void *conf_table; 576 577 conf_table = atomic_load_ptr(&conf_base); 578 if (conf_table == NULL) { 579 conf_table = contigmalloc(LPI_CONFTAB_SIZE, 580 M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, 581 LPI_CONFTAB_ALIGN, 0); 582 583 if (atomic_cmpset_ptr((uintptr_t *)&conf_base, 584 (uintptr_t)NULL, (uintptr_t)conf_table) == 0) { 585 contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS); 586 conf_table = atomic_load_ptr(&conf_base); 587 } 588 } 589 sc->sc_conf_base = conf_table; 590 591 /* Set the default configuration */ 592 memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, 593 LPI_CONFTAB_SIZE); 594 595 /* Flush the table to memory */ 596 cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE); 597 } 598 599 static void 600 gicv3_its_pendtables_init(struct gicv3_its_softc *sc) 601 { 602 int i; 603 604 for (i = 0; i <= mp_maxid; i++) { 605 if (CPU_ISSET(i, &sc->sc_cpus) == 0) 606 continue; 607 608 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc( 609 LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, 610 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); 611 612 /* Flush so the ITS can see the memory */ 613 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i], 614 LPI_PENDTAB_SIZE); 615 } 616 } 617 618 static void 619 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc) 620 { 621 device_t gicv3; 622 uint64_t xbaser, tmp; 623 uint32_t ctlr; 624 u_int cpuid; 625 626 gicv3 = device_get_parent(dev); 627 cpuid = PCPU_GET(cpuid); 628 629 /* Disable LPIs */ 630 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 631 ctlr &= ~GICR_CTLR_LPI_ENABLE; 632 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 633 634 /* Make sure changes are observable my the GIC */ 635 dsb(sy); 636 637 /* 638 * Set the redistributor base 639 */ 640 xbaser = vtophys(sc->sc_conf_base) | 641 (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | 642 (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | 643 (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); 644 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 645 646 /* Check the cache attributes we set */ 647 tmp = gic_r_read_8(gicv3, GICR_PROPBASER); 648 649 if ((tmp & GICR_PROPBASER_SHARE_MASK) != 650 (xbaser & GICR_PROPBASER_SHARE_MASK)) { 651 if ((tmp & GICR_PROPBASER_SHARE_MASK) == 652 (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { 653 /* We need to mark as non-cacheable */ 654 xbaser &= ~(GICR_PROPBASER_SHARE_MASK | 655 GICR_PROPBASER_CACHE_MASK); 656 /* Non-cacheable */ 657 xbaser |= GICR_PROPBASER_CACHE_NIN << 658 GICR_PROPBASER_CACHE_SHIFT; 659 /* Non-shareable */ 660 xbaser |= GICR_PROPBASER_SHARE_NS << 661 GICR_PROPBASER_SHARE_SHIFT; 662 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 663 } 664 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; 665 } 666 667 /* 668 * Set the LPI pending table base 669 */ 670 xbaser = vtophys(sc->sc_pend_base[cpuid]) | 671 (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | 672 (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); 673 674 gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); 675 676 tmp = gic_r_read_8(gicv3, GICR_PENDBASER); 677 678 if ((tmp & GICR_PENDBASER_SHARE_MASK) == 679 (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { 680 /* Clear the cahce and shareability bits */ 681 xbaser &= ~(GICR_PENDBASER_CACHE_MASK | 682 GICR_PENDBASER_SHARE_MASK); 683 /* Mark as non-shareable */ 684 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; 685 /* And non-cacheable */ 686 xbaser |= GICR_PENDBASER_CACHE_NIN << 687 GICR_PENDBASER_CACHE_SHIFT; 688 } 689 690 /* Enable LPIs */ 691 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 692 ctlr |= GICR_CTLR_LPI_ENABLE; 693 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 694 695 /* Make sure the GIC has seen everything */ 696 dsb(sy); 697 } 698 699 static int 700 its_init_cpu(device_t dev, struct gicv3_its_softc *sc) 701 { 702 device_t gicv3; 703 vm_paddr_t target; 704 u_int cpuid; 705 struct redist_pcpu *rpcpu; 706 707 gicv3 = device_get_parent(dev); 708 cpuid = PCPU_GET(cpuid); 709 if (!CPU_ISSET(cpuid, &sc->sc_cpus)) 710 return (0); 711 712 /* Check if the ITS is enabled on this CPU */ 713 if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) 714 return (ENXIO); 715 716 rpcpu = gicv3_get_redist(dev); 717 718 /* Do per-cpu LPI init once */ 719 if (!rpcpu->lpi_enabled) { 720 its_init_cpu_lpi(dev, sc); 721 rpcpu->lpi_enabled = true; 722 } 723 724 if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { 725 /* This ITS wants the redistributor physical address */ 726 target = vtophys(rman_get_virtual(&rpcpu->res)); 727 } else { 728 /* This ITS wants the unique processor number */ 729 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) << 730 CMD_TARGET_SHIFT; 731 } 732 733 sc->sc_its_cols[cpuid]->col_target = target; 734 sc->sc_its_cols[cpuid]->col_id = cpuid; 735 736 its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); 737 its_cmd_invall(dev, sc->sc_its_cols[cpuid]); 738 739 return (0); 740 } 741 742 static int 743 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS) 744 { 745 struct gicv3_its_softc *sc; 746 int rv; 747 748 sc = arg1; 749 750 rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req); 751 if (rv != 0 || req->newptr == NULL) 752 return (rv); 753 if (sc->trace_enable) 754 gic_its_write_8(sc, GITS_TRKCTLR, 3); 755 else 756 gic_its_write_8(sc, GITS_TRKCTLR, 0); 757 758 return (0); 759 } 760 761 static int 762 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS) 763 { 764 struct gicv3_its_softc *sc; 765 struct sbuf *sb; 766 int err; 767 768 sc = arg1; 769 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 770 if (sb == NULL) { 771 device_printf(sc->dev, "Could not allocate sbuf for output.\n"); 772 return (ENOMEM); 773 } 774 sbuf_cat(sb, "\n"); 775 sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n", 776 gic_its_read_4(sc, GITS_TRKCTLR)); 777 sbuf_printf(sb, "GITS_TRKR: 0x%08X\n", 778 gic_its_read_4(sc, GITS_TRKR)); 779 sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n", 780 gic_its_read_4(sc, GITS_TRKDIDR)); 781 sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n", 782 gic_its_read_4(sc, GITS_TRKPIDR)); 783 sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n", 784 gic_its_read_4(sc, GITS_TRKVIDR)); 785 sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n", 786 gic_its_read_4(sc, GITS_TRKTGTR)); 787 788 err = sbuf_finish(sb); 789 if (err) 790 device_printf(sc->dev, "Error finishing sbuf: %d\n", err); 791 sbuf_delete(sb); 792 return(err); 793 } 794 795 static int 796 gicv3_its_init_sysctl(struct gicv3_its_softc *sc) 797 { 798 struct sysctl_oid *oid, *child; 799 struct sysctl_ctx_list *ctx_list; 800 801 ctx_list = device_get_sysctl_ctx(sc->dev); 802 child = device_get_sysctl_tree(sc->dev); 803 oid = SYSCTL_ADD_NODE(ctx_list, 804 SYSCTL_CHILDREN(child), OID_AUTO, "tracing", 805 CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing"); 806 if (oid == NULL) 807 return (ENXIO); 808 809 /* Add registers */ 810 SYSCTL_ADD_PROC(ctx_list, 811 SYSCTL_CHILDREN(oid), OID_AUTO, "enable", 812 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 813 gicv3_its_sysctl_trace_enable, "CU", "Enable tracing"); 814 SYSCTL_ADD_PROC(ctx_list, 815 SYSCTL_CHILDREN(oid), OID_AUTO, "capture", 816 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 817 gicv3_its_sysctl_trace_regs, "", "Captured tracing registers."); 818 819 return (0); 820 } 821 822 static int 823 gicv3_its_attach(device_t dev) 824 { 825 struct gicv3_its_softc *sc; 826 int domain, err, i, rid; 827 uint64_t phys; 828 uint32_t ctlr, iidr; 829 830 sc = device_get_softc(dev); 831 832 sc->sc_irq_length = gicv3_get_nirqs(dev); 833 sc->sc_irq_base = GIC_FIRST_LPI; 834 sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length; 835 836 rid = 0; 837 sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 838 RF_ACTIVE); 839 if (sc->sc_its_res == NULL) { 840 device_printf(dev, "Could not allocate memory\n"); 841 return (ENXIO); 842 } 843 844 phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) + 845 GITS_TRANSLATER, PAGE_SIZE); 846 sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO); 847 vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT); 848 849 CPU_COPY(&all_cpus, &sc->sc_cpus); 850 iidr = gic_its_read_4(sc, GITS_IIDR); 851 for (i = 0; i < nitems(its_quirks); i++) { 852 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { 853 if (bootverbose) { 854 device_printf(dev, "Applying %s\n", 855 its_quirks[i].desc); 856 } 857 its_quirks[i].func(dev); 858 break; 859 } 860 } 861 862 if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) { 863 sc->sc_ds = DOMAINSET_PREF(domain); 864 } else { 865 sc->sc_ds = DOMAINSET_RR(); 866 } 867 868 /* 869 * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be 870 * coming in via, for instance, a kexec/kboot style setup where a 871 * previous kernel has configured then relinquished control. Clear it 872 * so that we can reconfigure GITS_BASER*. 873 */ 874 ctlr = gic_its_read_4(sc, GITS_CTLR); 875 if ((ctlr & GITS_CTLR_EN) != 0) { 876 ctlr &= ~GITS_CTLR_EN; 877 gic_its_write_4(sc, GITS_CTLR, ctlr); 878 } 879 880 /* Allocate the private tables */ 881 err = gicv3_its_table_init(dev, sc); 882 if (err != 0) 883 return (err); 884 885 /* Protects access to the device list */ 886 mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); 887 888 /* Protects access to the ITS command circular buffer. */ 889 mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); 890 891 /* Allocate the command circular buffer */ 892 gicv3_its_cmdq_init(sc); 893 894 /* Allocate the per-CPU collections */ 895 for (int cpu = 0; cpu <= mp_maxid; cpu++) 896 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0) 897 sc->sc_its_cols[cpu] = malloc_domainset( 898 sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, 899 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), 900 M_WAITOK | M_ZERO); 901 else 902 sc->sc_its_cols[cpu] = NULL; 903 904 /* Enable the ITS */ 905 gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN); 906 907 /* Create the LPI configuration table */ 908 gicv3_its_conftable_init(sc); 909 910 /* And the pending tebles */ 911 gicv3_its_pendtables_init(sc); 912 913 /* Enable LPIs on this CPU */ 914 its_init_cpu(dev, sc); 915 916 TAILQ_INIT(&sc->sc_its_dev_list); 917 TAILQ_INIT(&sc->sc_free_irqs); 918 919 /* 920 * Create the vmem object to allocate INTRNG IRQs from. We try to 921 * use all IRQs not already used by the GICv3. 922 * XXX: This assumes there are no other interrupt controllers in the 923 * system. 924 */ 925 sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0, 926 gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK); 927 928 sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length, 929 M_GICV3_ITS, M_WAITOK | M_ZERO); 930 931 /* For GIC-500 install tracking sysctls. */ 932 if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) == 933 GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0)) 934 gicv3_its_init_sysctl(sc); 935 936 return (0); 937 } 938 939 static int 940 gicv3_its_detach(device_t dev) 941 { 942 943 return (ENXIO); 944 } 945 946 static void 947 its_quirk_cavium_22375(device_t dev) 948 { 949 struct gicv3_its_softc *sc; 950 int domain; 951 952 sc = device_get_softc(dev); 953 sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; 954 955 /* 956 * We need to limit which CPUs we send these interrupts to on 957 * the original dual socket ThunderX as it is unable to 958 * forward them between the two sockets. 959 */ 960 if (bus_get_domain(dev, &domain) == 0) { 961 if (domain < MAXMEMDOM) { 962 CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus); 963 } else { 964 CPU_ZERO(&sc->sc_cpus); 965 } 966 } 967 } 968 969 static void 970 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) 971 { 972 struct gicv3_its_softc *sc; 973 struct gicv3_its_irqsrc *girq; 974 uint8_t *conf; 975 976 sc = device_get_softc(dev); 977 girq = (struct gicv3_its_irqsrc *)isrc; 978 conf = sc->sc_conf_base; 979 980 conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE; 981 982 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 983 /* Clean D-cache under command. */ 984 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 985 } else { 986 /* DSB inner shareable, store */ 987 dsb(ishst); 988 } 989 990 its_cmd_inv(dev, girq->gi_its_dev, girq); 991 } 992 993 static void 994 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) 995 { 996 struct gicv3_its_softc *sc; 997 struct gicv3_its_irqsrc *girq; 998 uint8_t *conf; 999 1000 sc = device_get_softc(dev); 1001 girq = (struct gicv3_its_irqsrc *)isrc; 1002 conf = sc->sc_conf_base; 1003 1004 conf[girq->gi_lpi] |= LPI_CONF_ENABLE; 1005 1006 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 1007 /* Clean D-cache under command. */ 1008 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 1009 } else { 1010 /* DSB inner shareable, store */ 1011 dsb(ishst); 1012 } 1013 1014 its_cmd_inv(dev, girq->gi_its_dev, girq); 1015 } 1016 1017 static int 1018 gicv3_its_intr(void *arg, uintptr_t irq) 1019 { 1020 struct gicv3_its_softc *sc = arg; 1021 struct gicv3_its_irqsrc *girq; 1022 struct trapframe *tf; 1023 1024 irq -= sc->sc_irq_base; 1025 girq = sc->sc_irqs[irq]; 1026 if (girq == NULL) 1027 panic("gicv3_its_intr: Invalid interrupt %ld", 1028 irq + sc->sc_irq_base); 1029 1030 tf = curthread->td_intr_frame; 1031 intr_isrc_dispatch(&girq->gi_isrc, tf); 1032 return (FILTER_HANDLED); 1033 } 1034 1035 static void 1036 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) 1037 { 1038 struct gicv3_its_irqsrc *girq; 1039 1040 girq = (struct gicv3_its_irqsrc *)isrc; 1041 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1042 } 1043 1044 static void 1045 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) 1046 { 1047 1048 } 1049 1050 static void 1051 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) 1052 { 1053 struct gicv3_its_irqsrc *girq; 1054 1055 girq = (struct gicv3_its_irqsrc *)isrc; 1056 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1057 } 1058 1059 static int 1060 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc) 1061 { 1062 struct gicv3_its_softc *sc; 1063 1064 sc = device_get_softc(dev); 1065 if (CPU_EMPTY(&isrc->isrc_cpu)) { 1066 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu, 1067 &sc->sc_cpus); 1068 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu); 1069 } 1070 1071 return (0); 1072 } 1073 1074 static int 1075 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) 1076 { 1077 struct gicv3_its_irqsrc *girq; 1078 1079 gicv3_its_select_cpu(dev, isrc); 1080 1081 girq = (struct gicv3_its_irqsrc *)isrc; 1082 its_cmd_movi(dev, girq); 1083 return (0); 1084 } 1085 1086 static int 1087 gicv3_its_map_intr(device_t dev, struct intr_map_data *data, 1088 struct intr_irqsrc **isrcp) 1089 { 1090 1091 /* 1092 * This should never happen, we only call this function to map 1093 * interrupts found before the controller driver is ready. 1094 */ 1095 panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); 1096 } 1097 1098 static int 1099 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, 1100 struct resource *res, struct intr_map_data *data) 1101 { 1102 1103 /* Bind the interrupt to a CPU */ 1104 gicv3_its_bind_intr(dev, isrc); 1105 1106 return (0); 1107 } 1108 1109 #ifdef SMP 1110 static void 1111 gicv3_its_init_secondary(device_t dev) 1112 { 1113 struct gicv3_its_softc *sc; 1114 1115 sc = device_get_softc(dev); 1116 1117 /* 1118 * This is fatal as otherwise we may bind interrupts to this CPU. 1119 * We need a way to tell the interrupt framework to only bind to a 1120 * subset of given CPUs when it performs the shuffle. 1121 */ 1122 if (its_init_cpu(dev, sc) != 0) 1123 panic("gicv3_its_init_secondary: No usable ITS on CPU%d", 1124 PCPU_GET(cpuid)); 1125 } 1126 #endif 1127 1128 static uint32_t 1129 its_get_devid(device_t pci_dev) 1130 { 1131 uintptr_t id; 1132 1133 if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) 1134 panic("%s: %s: Unable to get the MSI DeviceID", __func__, 1135 device_get_nameunit(pci_dev)); 1136 1137 return (id); 1138 } 1139 1140 static struct its_dev * 1141 its_device_find(device_t dev, device_t child) 1142 { 1143 struct gicv3_its_softc *sc; 1144 struct its_dev *its_dev = NULL; 1145 1146 sc = device_get_softc(dev); 1147 1148 mtx_lock_spin(&sc->sc_its_dev_lock); 1149 TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { 1150 if (its_dev->pci_dev == child) 1151 break; 1152 } 1153 mtx_unlock_spin(&sc->sc_its_dev_lock); 1154 1155 return (its_dev); 1156 } 1157 1158 static struct its_dev * 1159 its_device_get(device_t dev, device_t child, u_int nvecs) 1160 { 1161 struct gicv3_its_softc *sc; 1162 struct its_dev *its_dev; 1163 vmem_addr_t irq_base; 1164 size_t esize; 1165 1166 sc = device_get_softc(dev); 1167 1168 its_dev = its_device_find(dev, child); 1169 if (its_dev != NULL) 1170 return (its_dev); 1171 1172 its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); 1173 if (its_dev == NULL) 1174 return (NULL); 1175 1176 its_dev->pci_dev = child; 1177 its_dev->devid = its_get_devid(child); 1178 1179 its_dev->lpis.lpi_busy = 0; 1180 its_dev->lpis.lpi_num = nvecs; 1181 its_dev->lpis.lpi_free = nvecs; 1182 1183 if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, 1184 &irq_base) != 0) { 1185 free(its_dev, M_GICV3_ITS); 1186 return (NULL); 1187 } 1188 its_dev->lpis.lpi_base = irq_base; 1189 1190 /* Get ITT entry size */ 1191 esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); 1192 1193 /* 1194 * Allocate ITT for this device. 1195 * PA has to be 256 B aligned. At least two entries for device. 1196 */ 1197 its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256); 1198 its_dev->itt = (vm_offset_t)contigmalloc_domainset(its_dev->itt_size, 1199 M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0, 1200 LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0); 1201 if (its_dev->itt == 0) { 1202 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); 1203 free(its_dev, M_GICV3_ITS); 1204 return (NULL); 1205 } 1206 1207 /* Make sure device sees zeroed ITT. */ 1208 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) 1209 cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size); 1210 1211 mtx_lock_spin(&sc->sc_its_dev_lock); 1212 TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); 1213 mtx_unlock_spin(&sc->sc_its_dev_lock); 1214 1215 /* Map device to its ITT */ 1216 its_cmd_mapd(dev, its_dev, 1); 1217 1218 return (its_dev); 1219 } 1220 1221 static void 1222 its_device_release(device_t dev, struct its_dev *its_dev) 1223 { 1224 struct gicv3_its_softc *sc; 1225 1226 KASSERT(its_dev->lpis.lpi_busy == 0, 1227 ("its_device_release: Trying to release an inuse ITS device")); 1228 1229 /* Unmap device in ITS */ 1230 its_cmd_mapd(dev, its_dev, 0); 1231 1232 sc = device_get_softc(dev); 1233 1234 /* Remove the device from the list of devices */ 1235 mtx_lock_spin(&sc->sc_its_dev_lock); 1236 TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); 1237 mtx_unlock_spin(&sc->sc_its_dev_lock); 1238 1239 /* Free ITT */ 1240 KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device")); 1241 contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS); 1242 1243 /* Free the IRQ allocation */ 1244 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, 1245 its_dev->lpis.lpi_num); 1246 1247 free(its_dev, M_GICV3_ITS); 1248 } 1249 1250 static struct gicv3_its_irqsrc * 1251 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq) 1252 { 1253 struct gicv3_its_irqsrc *girq = NULL; 1254 1255 KASSERT(sc->sc_irqs[irq] == NULL, 1256 ("%s: Interrupt %u already allocated", __func__, irq)); 1257 mtx_lock_spin(&sc->sc_its_dev_lock); 1258 if (!TAILQ_EMPTY(&sc->sc_free_irqs)) { 1259 girq = TAILQ_FIRST(&sc->sc_free_irqs); 1260 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link); 1261 } 1262 mtx_unlock_spin(&sc->sc_its_dev_lock); 1263 if (girq == NULL) { 1264 girq = malloc(sizeof(*girq), M_GICV3_ITS, 1265 M_NOWAIT | M_ZERO); 1266 if (girq == NULL) 1267 return (NULL); 1268 girq->gi_id = -1; 1269 if (intr_isrc_register(&girq->gi_isrc, dev, 0, 1270 "%s,%u", device_get_nameunit(dev), irq) != 0) { 1271 free(girq, M_GICV3_ITS); 1272 return (NULL); 1273 } 1274 } 1275 girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI; 1276 sc->sc_irqs[irq] = girq; 1277 1278 return (girq); 1279 } 1280 1281 static void 1282 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc, 1283 struct gicv3_its_irqsrc *girq) 1284 { 1285 u_int irq; 1286 1287 mtx_assert(&sc->sc_its_dev_lock, MA_OWNED); 1288 1289 irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base; 1290 sc->sc_irqs[irq] = NULL; 1291 1292 girq->gi_id = -1; 1293 girq->gi_its_dev = NULL; 1294 TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link); 1295 } 1296 1297 static int 1298 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, 1299 device_t *pic, struct intr_irqsrc **srcs) 1300 { 1301 struct gicv3_its_softc *sc; 1302 struct gicv3_its_irqsrc *girq; 1303 struct its_dev *its_dev; 1304 u_int irq; 1305 int i; 1306 1307 its_dev = its_device_get(dev, child, count); 1308 if (its_dev == NULL) 1309 return (ENXIO); 1310 1311 KASSERT(its_dev->lpis.lpi_free >= count, 1312 ("gicv3_its_alloc_msi: No free LPIs")); 1313 sc = device_get_softc(dev); 1314 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1315 its_dev->lpis.lpi_free; 1316 1317 /* Allocate the irqsrc for each MSI */ 1318 for (i = 0; i < count; i++, irq++) { 1319 its_dev->lpis.lpi_free--; 1320 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev, 1321 sc, irq); 1322 if (srcs[i] == NULL) 1323 break; 1324 } 1325 1326 /* The allocation failed, release them */ 1327 if (i != count) { 1328 mtx_lock_spin(&sc->sc_its_dev_lock); 1329 for (i = 0; i < count; i++) { 1330 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1331 if (girq == NULL) 1332 break; 1333 gicv3_its_release_irqsrc(sc, girq); 1334 srcs[i] = NULL; 1335 } 1336 mtx_unlock_spin(&sc->sc_its_dev_lock); 1337 return (ENXIO); 1338 } 1339 1340 /* Finish the allocation now we have all MSI irqsrcs */ 1341 for (i = 0; i < count; i++) { 1342 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1343 girq->gi_id = i; 1344 girq->gi_its_dev = its_dev; 1345 1346 /* Map the message to the given IRQ */ 1347 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1348 its_cmd_mapti(dev, girq); 1349 } 1350 its_dev->lpis.lpi_busy += count; 1351 *pic = dev; 1352 1353 return (0); 1354 } 1355 1356 static int 1357 gicv3_its_release_msi(device_t dev, device_t child, int count, 1358 struct intr_irqsrc **isrc) 1359 { 1360 struct gicv3_its_softc *sc; 1361 struct gicv3_its_irqsrc *girq; 1362 struct its_dev *its_dev; 1363 int i; 1364 1365 its_dev = its_device_find(dev, child); 1366 1367 KASSERT(its_dev != NULL, 1368 ("gicv3_its_release_msi: Releasing a MSI interrupt with " 1369 "no ITS device")); 1370 KASSERT(its_dev->lpis.lpi_busy >= count, 1371 ("gicv3_its_release_msi: Releasing more interrupts than " 1372 "were allocated: releasing %d, allocated %d", count, 1373 its_dev->lpis.lpi_busy)); 1374 1375 sc = device_get_softc(dev); 1376 mtx_lock_spin(&sc->sc_its_dev_lock); 1377 for (i = 0; i < count; i++) { 1378 girq = (struct gicv3_its_irqsrc *)isrc[i]; 1379 gicv3_its_release_irqsrc(sc, girq); 1380 } 1381 mtx_unlock_spin(&sc->sc_its_dev_lock); 1382 its_dev->lpis.lpi_busy -= count; 1383 1384 if (its_dev->lpis.lpi_busy == 0) 1385 its_device_release(dev, its_dev); 1386 1387 return (0); 1388 } 1389 1390 static int 1391 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, 1392 struct intr_irqsrc **isrcp) 1393 { 1394 struct gicv3_its_softc *sc; 1395 struct gicv3_its_irqsrc *girq; 1396 struct its_dev *its_dev; 1397 u_int nvecs, irq; 1398 1399 nvecs = pci_msix_count(child); 1400 its_dev = its_device_get(dev, child, nvecs); 1401 if (its_dev == NULL) 1402 return (ENXIO); 1403 1404 KASSERT(its_dev->lpis.lpi_free > 0, 1405 ("gicv3_its_alloc_msix: No free LPIs")); 1406 sc = device_get_softc(dev); 1407 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1408 its_dev->lpis.lpi_free; 1409 1410 girq = gicv3_its_alloc_irqsrc(dev, sc, irq); 1411 if (girq == NULL) 1412 return (ENXIO); 1413 girq->gi_id = its_dev->lpis.lpi_busy; 1414 girq->gi_its_dev = its_dev; 1415 1416 its_dev->lpis.lpi_free--; 1417 its_dev->lpis.lpi_busy++; 1418 1419 /* Map the message to the given IRQ */ 1420 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1421 its_cmd_mapti(dev, girq); 1422 1423 *pic = dev; 1424 *isrcp = (struct intr_irqsrc *)girq; 1425 1426 return (0); 1427 } 1428 1429 static int 1430 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) 1431 { 1432 struct gicv3_its_softc *sc; 1433 struct gicv3_its_irqsrc *girq; 1434 struct its_dev *its_dev; 1435 1436 its_dev = its_device_find(dev, child); 1437 1438 KASSERT(its_dev != NULL, 1439 ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " 1440 "no ITS device")); 1441 KASSERT(its_dev->lpis.lpi_busy > 0, 1442 ("gicv3_its_release_msix: Releasing more interrupts than " 1443 "were allocated: allocated %d", its_dev->lpis.lpi_busy)); 1444 1445 sc = device_get_softc(dev); 1446 girq = (struct gicv3_its_irqsrc *)isrc; 1447 mtx_lock_spin(&sc->sc_its_dev_lock); 1448 gicv3_its_release_irqsrc(sc, girq); 1449 mtx_unlock_spin(&sc->sc_its_dev_lock); 1450 its_dev->lpis.lpi_busy--; 1451 1452 if (its_dev->lpis.lpi_busy == 0) 1453 its_device_release(dev, its_dev); 1454 1455 return (0); 1456 } 1457 1458 static int 1459 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, 1460 uint64_t *addr, uint32_t *data) 1461 { 1462 struct gicv3_its_softc *sc; 1463 struct gicv3_its_irqsrc *girq; 1464 1465 sc = device_get_softc(dev); 1466 girq = (struct gicv3_its_irqsrc *)isrc; 1467 1468 *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; 1469 *data = girq->gi_id; 1470 1471 return (0); 1472 } 1473 1474 #ifdef IOMMU 1475 static int 1476 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain) 1477 { 1478 struct gicv3_its_softc *sc; 1479 struct iommu_ctx *ctx; 1480 int error; 1481 1482 sc = device_get_softc(dev); 1483 ctx = iommu_get_dev_ctx(child); 1484 if (ctx == NULL) 1485 return (ENXIO); 1486 /* Map the page containing the GITS_TRANSLATER register. */ 1487 error = iommu_map_msi(ctx, PAGE_SIZE, 0, 1488 IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma); 1489 *domain = iommu_get_ctx_domain(ctx); 1490 1491 return (error); 1492 } 1493 1494 static void 1495 gicv3_iommu_deinit(device_t dev, device_t child) 1496 { 1497 struct iommu_ctx *ctx; 1498 1499 ctx = iommu_get_dev_ctx(child); 1500 if (ctx == NULL) 1501 return; 1502 1503 iommu_unmap_msi(ctx); 1504 } 1505 #endif 1506 1507 /* 1508 * Commands handling. 1509 */ 1510 1511 static __inline void 1512 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) 1513 { 1514 /* Command field: DW0 [7:0] */ 1515 cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); 1516 cmd->cmd_dword[0] |= htole64(cmd_type); 1517 } 1518 1519 static __inline void 1520 cmd_format_devid(struct its_cmd *cmd, uint32_t devid) 1521 { 1522 /* Device ID field: DW0 [63:32] */ 1523 cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); 1524 cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); 1525 } 1526 1527 static __inline void 1528 cmd_format_size(struct its_cmd *cmd, uint16_t size) 1529 { 1530 /* Size field: DW1 [4:0] */ 1531 cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); 1532 cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); 1533 } 1534 1535 static __inline void 1536 cmd_format_id(struct its_cmd *cmd, uint32_t id) 1537 { 1538 /* ID field: DW1 [31:0] */ 1539 cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); 1540 cmd->cmd_dword[1] |= htole64(id); 1541 } 1542 1543 static __inline void 1544 cmd_format_pid(struct its_cmd *cmd, uint32_t pid) 1545 { 1546 /* Physical ID field: DW1 [63:32] */ 1547 cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); 1548 cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); 1549 } 1550 1551 static __inline void 1552 cmd_format_col(struct its_cmd *cmd, uint16_t col_id) 1553 { 1554 /* Collection field: DW2 [16:0] */ 1555 cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); 1556 cmd->cmd_dword[2] |= htole64(col_id); 1557 } 1558 1559 static __inline void 1560 cmd_format_target(struct its_cmd *cmd, uint64_t target) 1561 { 1562 /* Target Address field: DW2 [47:16] */ 1563 cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); 1564 cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); 1565 } 1566 1567 static __inline void 1568 cmd_format_itt(struct its_cmd *cmd, uint64_t itt) 1569 { 1570 /* ITT Address field: DW2 [47:8] */ 1571 cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); 1572 cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); 1573 } 1574 1575 static __inline void 1576 cmd_format_valid(struct its_cmd *cmd, uint8_t valid) 1577 { 1578 /* Valid field: DW2 [63] */ 1579 cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); 1580 cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); 1581 } 1582 1583 static inline bool 1584 its_cmd_queue_full(struct gicv3_its_softc *sc) 1585 { 1586 size_t read_idx, next_write_idx; 1587 1588 /* Get the index of the next command */ 1589 next_write_idx = (sc->sc_its_cmd_next_idx + 1) % 1590 (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); 1591 /* And the index of the current command being read */ 1592 read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); 1593 1594 /* 1595 * The queue is full when the write offset points 1596 * at the command before the current read offset. 1597 */ 1598 return (next_write_idx == read_idx); 1599 } 1600 1601 static inline void 1602 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1603 { 1604 1605 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { 1606 /* Clean D-cache under command. */ 1607 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); 1608 } else { 1609 /* DSB inner shareable, store */ 1610 dsb(ishst); 1611 } 1612 1613 } 1614 1615 static inline uint64_t 1616 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1617 { 1618 uint64_t off; 1619 1620 off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); 1621 1622 return (off); 1623 } 1624 1625 static void 1626 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, 1627 struct its_cmd *cmd_last) 1628 { 1629 struct gicv3_its_softc *sc; 1630 uint64_t first, last, read; 1631 size_t us_left; 1632 1633 sc = device_get_softc(dev); 1634 1635 /* 1636 * XXX ARM64TODO: This is obviously a significant delay. 1637 * The reason for that is that currently the time frames for 1638 * the command to complete are not known. 1639 */ 1640 us_left = 1000000; 1641 1642 first = its_cmd_cwriter_offset(sc, cmd_first); 1643 last = its_cmd_cwriter_offset(sc, cmd_last); 1644 1645 for (;;) { 1646 read = gic_its_read_8(sc, GITS_CREADR); 1647 if (first < last) { 1648 if (read < first || read >= last) 1649 break; 1650 } else if (read < first && read >= last) 1651 break; 1652 1653 if (us_left-- == 0) { 1654 /* This means timeout */ 1655 device_printf(dev, 1656 "Timeout while waiting for CMD completion.\n"); 1657 return; 1658 } 1659 DELAY(1); 1660 } 1661 } 1662 1663 static struct its_cmd * 1664 its_cmd_alloc_locked(device_t dev) 1665 { 1666 struct gicv3_its_softc *sc; 1667 struct its_cmd *cmd; 1668 size_t us_left; 1669 1670 sc = device_get_softc(dev); 1671 1672 /* 1673 * XXX ARM64TODO: This is obviously a significant delay. 1674 * The reason for that is that currently the time frames for 1675 * the command to complete (and therefore free the descriptor) 1676 * are not known. 1677 */ 1678 us_left = 1000000; 1679 1680 mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); 1681 while (its_cmd_queue_full(sc)) { 1682 if (us_left-- == 0) { 1683 /* Timeout while waiting for free command */ 1684 device_printf(dev, 1685 "Timeout while waiting for free command\n"); 1686 return (NULL); 1687 } 1688 DELAY(1); 1689 } 1690 1691 cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 1692 sc->sc_its_cmd_next_idx++; 1693 sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); 1694 1695 return (cmd); 1696 } 1697 1698 static uint64_t 1699 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) 1700 { 1701 uint64_t target; 1702 uint8_t cmd_type; 1703 u_int size; 1704 1705 cmd_type = desc->cmd_type; 1706 target = ITS_TARGET_NONE; 1707 1708 switch (cmd_type) { 1709 case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ 1710 target = desc->cmd_desc_movi.col->col_target; 1711 cmd_format_command(cmd, ITS_CMD_MOVI); 1712 cmd_format_id(cmd, desc->cmd_desc_movi.id); 1713 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); 1714 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); 1715 break; 1716 case ITS_CMD_SYNC: /* Wait for previous commands completion */ 1717 target = desc->cmd_desc_sync.col->col_target; 1718 cmd_format_command(cmd, ITS_CMD_SYNC); 1719 cmd_format_target(cmd, target); 1720 break; 1721 case ITS_CMD_MAPD: /* Assign ITT to device */ 1722 cmd_format_command(cmd, ITS_CMD_MAPD); 1723 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); 1724 /* 1725 * Size describes number of bits to encode interrupt IDs 1726 * supported by the device minus one. 1727 * When V (valid) bit is zero, this field should be written 1728 * as zero. 1729 */ 1730 if (desc->cmd_desc_mapd.valid != 0) { 1731 size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); 1732 size = MAX(1, size) - 1; 1733 } else 1734 size = 0; 1735 1736 cmd_format_size(cmd, size); 1737 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); 1738 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); 1739 break; 1740 case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ 1741 target = desc->cmd_desc_mapc.col->col_target; 1742 cmd_format_command(cmd, ITS_CMD_MAPC); 1743 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); 1744 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); 1745 cmd_format_target(cmd, target); 1746 break; 1747 case ITS_CMD_MAPTI: 1748 target = desc->cmd_desc_mapvi.col->col_target; 1749 cmd_format_command(cmd, ITS_CMD_MAPTI); 1750 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); 1751 cmd_format_id(cmd, desc->cmd_desc_mapvi.id); 1752 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); 1753 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); 1754 break; 1755 case ITS_CMD_MAPI: 1756 target = desc->cmd_desc_mapi.col->col_target; 1757 cmd_format_command(cmd, ITS_CMD_MAPI); 1758 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); 1759 cmd_format_id(cmd, desc->cmd_desc_mapi.pid); 1760 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); 1761 break; 1762 case ITS_CMD_INV: 1763 target = desc->cmd_desc_inv.col->col_target; 1764 cmd_format_command(cmd, ITS_CMD_INV); 1765 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); 1766 cmd_format_id(cmd, desc->cmd_desc_inv.pid); 1767 break; 1768 case ITS_CMD_INVALL: 1769 cmd_format_command(cmd, ITS_CMD_INVALL); 1770 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); 1771 break; 1772 default: 1773 panic("its_cmd_prepare: Invalid command: %x", cmd_type); 1774 } 1775 1776 return (target); 1777 } 1778 1779 static int 1780 its_cmd_send(device_t dev, struct its_cmd_desc *desc) 1781 { 1782 struct gicv3_its_softc *sc; 1783 struct its_cmd *cmd, *cmd_sync, *cmd_write; 1784 struct its_col col_sync; 1785 struct its_cmd_desc desc_sync; 1786 uint64_t target, cwriter; 1787 1788 sc = device_get_softc(dev); 1789 mtx_lock_spin(&sc->sc_its_cmd_lock); 1790 cmd = its_cmd_alloc_locked(dev); 1791 if (cmd == NULL) { 1792 device_printf(dev, "could not allocate ITS command\n"); 1793 mtx_unlock_spin(&sc->sc_its_cmd_lock); 1794 return (EBUSY); 1795 } 1796 1797 target = its_cmd_prepare(cmd, desc); 1798 its_cmd_sync(sc, cmd); 1799 1800 if (target != ITS_TARGET_NONE) { 1801 cmd_sync = its_cmd_alloc_locked(dev); 1802 if (cmd_sync != NULL) { 1803 desc_sync.cmd_type = ITS_CMD_SYNC; 1804 col_sync.col_target = target; 1805 desc_sync.cmd_desc_sync.col = &col_sync; 1806 its_cmd_prepare(cmd_sync, &desc_sync); 1807 its_cmd_sync(sc, cmd_sync); 1808 } 1809 } 1810 1811 /* Update GITS_CWRITER */ 1812 cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); 1813 gic_its_write_8(sc, GITS_CWRITER, cwriter); 1814 cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 1815 mtx_unlock_spin(&sc->sc_its_cmd_lock); 1816 1817 its_cmd_wait_completion(dev, cmd, cmd_write); 1818 1819 return (0); 1820 } 1821 1822 /* Handlers to send commands */ 1823 static void 1824 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) 1825 { 1826 struct gicv3_its_softc *sc; 1827 struct its_cmd_desc desc; 1828 struct its_col *col; 1829 1830 sc = device_get_softc(dev); 1831 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 1832 1833 desc.cmd_type = ITS_CMD_MOVI; 1834 desc.cmd_desc_movi.its_dev = girq->gi_its_dev; 1835 desc.cmd_desc_movi.col = col; 1836 desc.cmd_desc_movi.id = girq->gi_id; 1837 1838 its_cmd_send(dev, &desc); 1839 } 1840 1841 static void 1842 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) 1843 { 1844 struct its_cmd_desc desc; 1845 1846 desc.cmd_type = ITS_CMD_MAPC; 1847 desc.cmd_desc_mapc.col = col; 1848 /* 1849 * Valid bit set - map the collection. 1850 * Valid bit cleared - unmap the collection. 1851 */ 1852 desc.cmd_desc_mapc.valid = valid; 1853 1854 its_cmd_send(dev, &desc); 1855 } 1856 1857 static void 1858 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) 1859 { 1860 struct gicv3_its_softc *sc; 1861 struct its_cmd_desc desc; 1862 struct its_col *col; 1863 u_int col_id; 1864 1865 sc = device_get_softc(dev); 1866 1867 col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; 1868 col = sc->sc_its_cols[col_id]; 1869 1870 desc.cmd_type = ITS_CMD_MAPTI; 1871 desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; 1872 desc.cmd_desc_mapvi.col = col; 1873 /* The EventID sent to the device */ 1874 desc.cmd_desc_mapvi.id = girq->gi_id; 1875 /* The physical interrupt presented to softeware */ 1876 desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI; 1877 1878 its_cmd_send(dev, &desc); 1879 } 1880 1881 static void 1882 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) 1883 { 1884 struct its_cmd_desc desc; 1885 1886 desc.cmd_type = ITS_CMD_MAPD; 1887 desc.cmd_desc_mapd.its_dev = its_dev; 1888 desc.cmd_desc_mapd.valid = valid; 1889 1890 its_cmd_send(dev, &desc); 1891 } 1892 1893 static void 1894 its_cmd_inv(device_t dev, struct its_dev *its_dev, 1895 struct gicv3_its_irqsrc *girq) 1896 { 1897 struct gicv3_its_softc *sc; 1898 struct its_cmd_desc desc; 1899 struct its_col *col; 1900 1901 sc = device_get_softc(dev); 1902 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 1903 1904 desc.cmd_type = ITS_CMD_INV; 1905 /* The EventID sent to the device */ 1906 desc.cmd_desc_inv.pid = girq->gi_id; 1907 desc.cmd_desc_inv.its_dev = its_dev; 1908 desc.cmd_desc_inv.col = col; 1909 1910 its_cmd_send(dev, &desc); 1911 } 1912 1913 static void 1914 its_cmd_invall(device_t dev, struct its_col *col) 1915 { 1916 struct its_cmd_desc desc; 1917 1918 desc.cmd_type = ITS_CMD_INVALL; 1919 desc.cmd_desc_invall.col = col; 1920 1921 its_cmd_send(dev, &desc); 1922 } 1923 1924 #ifdef FDT 1925 static device_probe_t gicv3_its_fdt_probe; 1926 static device_attach_t gicv3_its_fdt_attach; 1927 1928 static device_method_t gicv3_its_fdt_methods[] = { 1929 /* Device interface */ 1930 DEVMETHOD(device_probe, gicv3_its_fdt_probe), 1931 DEVMETHOD(device_attach, gicv3_its_fdt_attach), 1932 1933 /* End */ 1934 DEVMETHOD_END 1935 }; 1936 1937 #define its_baseclasses its_fdt_baseclasses 1938 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, 1939 sizeof(struct gicv3_its_softc), gicv3_its_driver); 1940 #undef its_baseclasses 1941 1942 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0, 1943 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 1944 1945 static int 1946 gicv3_its_fdt_probe(device_t dev) 1947 { 1948 1949 if (!ofw_bus_status_okay(dev)) 1950 return (ENXIO); 1951 1952 if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) 1953 return (ENXIO); 1954 1955 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 1956 return (BUS_PROBE_DEFAULT); 1957 } 1958 1959 static int 1960 gicv3_its_fdt_attach(device_t dev) 1961 { 1962 struct gicv3_its_softc *sc; 1963 phandle_t xref; 1964 int err; 1965 1966 sc = device_get_softc(dev); 1967 sc->dev = dev; 1968 err = gicv3_its_attach(dev); 1969 if (err != 0) 1970 return (err); 1971 1972 /* Register this device as a interrupt controller */ 1973 xref = OF_xref_from_node(ofw_bus_get_node(dev)); 1974 sc->sc_pic = intr_pic_register(dev, xref); 1975 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 1976 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 1977 if (err != 0) { 1978 device_printf(dev, "Failed to add PIC handler: %d\n", err); 1979 return (err); 1980 } 1981 1982 /* Register this device to handle MSI interrupts */ 1983 err = intr_msi_register(dev, xref); 1984 if (err != 0) { 1985 device_printf(dev, "Failed to register for MSIs: %d\n", err); 1986 return (err); 1987 } 1988 1989 return (0); 1990 } 1991 #endif 1992 1993 #ifdef DEV_ACPI 1994 static device_probe_t gicv3_its_acpi_probe; 1995 static device_attach_t gicv3_its_acpi_attach; 1996 1997 static device_method_t gicv3_its_acpi_methods[] = { 1998 /* Device interface */ 1999 DEVMETHOD(device_probe, gicv3_its_acpi_probe), 2000 DEVMETHOD(device_attach, gicv3_its_acpi_attach), 2001 2002 /* End */ 2003 DEVMETHOD_END 2004 }; 2005 2006 #define its_baseclasses its_acpi_baseclasses 2007 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods, 2008 sizeof(struct gicv3_its_softc), gicv3_its_driver); 2009 #undef its_baseclasses 2010 2011 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0, 2012 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 2013 2014 static int 2015 gicv3_its_acpi_probe(device_t dev) 2016 { 2017 2018 if (gic_get_bus(dev) != GIC_BUS_ACPI) 2019 return (EINVAL); 2020 2021 if (gic_get_hw_rev(dev) < 3) 2022 return (EINVAL); 2023 2024 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 2025 return (BUS_PROBE_DEFAULT); 2026 } 2027 2028 static int 2029 gicv3_its_acpi_attach(device_t dev) 2030 { 2031 struct gicv3_its_softc *sc; 2032 struct gic_v3_devinfo *di; 2033 int err; 2034 2035 sc = device_get_softc(dev); 2036 sc->dev = dev; 2037 err = gicv3_its_attach(dev); 2038 if (err != 0) 2039 return (err); 2040 2041 di = device_get_ivars(dev); 2042 sc->sc_pic = intr_pic_register(dev, di->msi_xref); 2043 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 2044 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 2045 if (err != 0) { 2046 device_printf(dev, "Failed to add PIC handler: %d\n", err); 2047 return (err); 2048 } 2049 2050 /* Register this device to handle MSI interrupts */ 2051 err = intr_msi_register(dev, di->msi_xref); 2052 if (err != 0) { 2053 device_printf(dev, "Failed to register for MSIs: %d\n", err); 2054 return (err); 2055 } 2056 2057 return (0); 2058 } 2059 #endif 2060