1 /*- 2 * Copyright (c) 2015-2016 The FreeBSD Foundation 3 * Copyright (c) 2023 Arm Ltd 4 * 5 * This software was developed by Andrew Turner under 6 * the sponsorship of the FreeBSD Foundation. 7 * 8 * This software was developed by Semihalf under 9 * the sponsorship of the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include "opt_acpi.h" 34 #include "opt_platform.h" 35 #include "opt_iommu.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/cpuset.h> 41 #include <sys/domainset.h> 42 #include <sys/endian.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/module.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/taskqueue.h> 50 #include <sys/tree.h> 51 #include <sys/queue.h> 52 #include <sys/rman.h> 53 #include <sys/sbuf.h> 54 #include <sys/smp.h> 55 #include <sys/sysctl.h> 56 #include <sys/vmem.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_page.h> 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 65 #include <arm/arm/gic_common.h> 66 #include <arm64/arm64/gic_v3_reg.h> 67 #include <arm64/arm64/gic_v3_var.h> 68 69 #ifdef FDT 70 #include <dev/ofw/openfirm.h> 71 #include <dev/ofw/ofw_bus.h> 72 #include <dev/ofw/ofw_bus_subr.h> 73 #endif 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 77 #ifdef IOMMU 78 #include <dev/iommu/iommu.h> 79 #include <dev/iommu/iommu_gas.h> 80 #endif 81 82 #include "pcib_if.h" 83 #include "pic_if.h" 84 #include "msi_if.h" 85 86 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", 87 "ARM GICv3 Interrupt Translation Service"); 88 89 #define LPI_NIRQS (64 * 1024) 90 91 /* The size and alignment of the command circular buffer */ 92 #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ 93 #define ITS_CMDQ_ALIGN (64 * 1024) 94 95 #define LPI_CONFTAB_SIZE LPI_NIRQS 96 #define LPI_CONFTAB_ALIGN (64 * 1024) 97 #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 98 99 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ 100 #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) 101 #define LPI_PENDTAB_ALIGN (64 * 1024) 102 #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 103 104 #define LPI_INT_TRANS_TAB_ALIGN 256 105 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) 106 107 /* ITS commands encoding */ 108 #define ITS_CMD_MOVI (0x01) 109 #define ITS_CMD_SYNC (0x05) 110 #define ITS_CMD_MAPD (0x08) 111 #define ITS_CMD_MAPC (0x09) 112 #define ITS_CMD_MAPTI (0x0a) 113 #define ITS_CMD_MAPI (0x0b) 114 #define ITS_CMD_INV (0x0c) 115 #define ITS_CMD_INVALL (0x0d) 116 /* Command */ 117 #define CMD_COMMAND_MASK (0xFFUL) 118 /* PCI device ID */ 119 #define CMD_DEVID_SHIFT (32) 120 #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) 121 /* Size of IRQ ID bitfield */ 122 #define CMD_SIZE_MASK (0xFFUL) 123 /* Virtual LPI ID */ 124 #define CMD_ID_MASK (0xFFFFFFFFUL) 125 /* Physical LPI ID */ 126 #define CMD_PID_SHIFT (32) 127 #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) 128 /* Collection */ 129 #define CMD_COL_MASK (0xFFFFUL) 130 /* Target (CPU or Re-Distributor) */ 131 #define CMD_TARGET_SHIFT (16) 132 #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) 133 /* Interrupt Translation Table address */ 134 #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) 135 /* Valid command bit */ 136 #define CMD_VALID_SHIFT (63) 137 #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) 138 139 #define ITS_TARGET_NONE 0xFBADBEEF 140 141 /* LPI chunk owned by ITS device */ 142 struct lpi_chunk { 143 u_int lpi_base; 144 u_int lpi_free; /* First free LPI in set */ 145 u_int lpi_num; /* Total number of LPIs in chunk */ 146 u_int lpi_busy; /* Number of busy LPIs in chink */ 147 }; 148 149 /* ITS device */ 150 struct its_dev { 151 TAILQ_ENTRY(its_dev) entry; 152 /* PCI device */ 153 device_t pci_dev; 154 /* Device ID (i.e. PCI device ID) */ 155 uint32_t devid; 156 /* List of assigned LPIs */ 157 struct lpi_chunk lpis; 158 /* Virtual address of ITT */ 159 vm_offset_t itt; 160 size_t itt_size; 161 }; 162 163 /* 164 * ITS command descriptor. 165 * Idea for command description passing taken from Linux. 166 */ 167 struct its_cmd_desc { 168 uint8_t cmd_type; 169 170 union { 171 struct { 172 struct its_dev *its_dev; 173 struct its_col *col; 174 uint32_t id; 175 } cmd_desc_movi; 176 177 struct { 178 struct its_col *col; 179 } cmd_desc_sync; 180 181 struct { 182 struct its_col *col; 183 uint8_t valid; 184 } cmd_desc_mapc; 185 186 struct { 187 struct its_dev *its_dev; 188 struct its_col *col; 189 uint32_t pid; 190 uint32_t id; 191 } cmd_desc_mapvi; 192 193 struct { 194 struct its_dev *its_dev; 195 struct its_col *col; 196 uint32_t pid; 197 } cmd_desc_mapi; 198 199 struct { 200 struct its_dev *its_dev; 201 uint8_t valid; 202 } cmd_desc_mapd; 203 204 struct { 205 struct its_dev *its_dev; 206 struct its_col *col; 207 uint32_t pid; 208 } cmd_desc_inv; 209 210 struct { 211 struct its_col *col; 212 } cmd_desc_invall; 213 }; 214 }; 215 216 /* ITS command. Each command is 32 bytes long */ 217 struct its_cmd { 218 uint64_t cmd_dword[4]; /* ITS command double word */ 219 }; 220 221 /* An ITS private table */ 222 struct its_ptable { 223 vm_offset_t ptab_vaddr; 224 /* Size of the L1 and L2 tables */ 225 size_t ptab_l1_size; 226 size_t ptab_l2_size; 227 /* Number of L1 and L2 entries */ 228 int ptab_l1_nidents; 229 int ptab_l2_nidents; 230 231 int ptab_page_size; 232 int ptab_share; 233 bool ptab_indirect; 234 }; 235 236 /* ITS collection description. */ 237 struct its_col { 238 uint64_t col_target; /* Target Re-Distributor */ 239 uint64_t col_id; /* Collection ID */ 240 }; 241 242 struct gicv3_its_irqsrc { 243 struct intr_irqsrc gi_isrc; 244 u_int gi_id; 245 u_int gi_lpi; 246 struct its_dev *gi_its_dev; 247 TAILQ_ENTRY(gicv3_its_irqsrc) gi_link; 248 }; 249 250 struct gicv3_its_softc { 251 device_t dev; 252 struct intr_pic *sc_pic; 253 struct resource *sc_its_res; 254 255 cpuset_t sc_cpus; 256 struct domainset *sc_ds; 257 u_int gic_irq_cpu; 258 int sc_devbits; 259 int sc_dev_table_idx; 260 261 struct its_ptable sc_its_ptab[GITS_BASER_NUM]; 262 struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ 263 264 /* 265 * TODO: We should get these from the parent as we only want a 266 * single copy of each across the interrupt controller. 267 */ 268 uint8_t *sc_conf_base; 269 vm_offset_t sc_pend_base[MAXCPU]; 270 271 /* Command handling */ 272 struct mtx sc_its_cmd_lock; 273 struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ 274 size_t sc_its_cmd_next_idx; 275 276 vmem_t *sc_irq_alloc; 277 struct gicv3_its_irqsrc **sc_irqs; 278 u_int sc_irq_base; 279 u_int sc_irq_length; 280 u_int sc_irq_count; 281 282 struct mtx sc_its_dev_lock; 283 TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; 284 TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs; 285 286 #define ITS_FLAGS_CMDQ_FLUSH 0x00000001 287 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 288 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 289 u_int sc_its_flags; 290 bool trace_enable; 291 vm_page_t ma; /* fake msi page */ 292 }; 293 294 static void *conf_base; 295 296 typedef void (its_quirk_func_t)(device_t); 297 static its_quirk_func_t its_quirk_cavium_22375; 298 299 static const struct { 300 const char *desc; 301 uint32_t iidr; 302 uint32_t iidr_mask; 303 its_quirk_func_t *func; 304 } its_quirks[] = { 305 { 306 /* Cavium ThunderX Pass 1.x */ 307 .desc = "Cavium ThunderX errata: 22375, 24313", 308 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, 309 GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), 310 .iidr_mask = ~GITS_IIDR_REVISION_MASK, 311 .func = its_quirk_cavium_22375, 312 }, 313 }; 314 315 #define gic_its_read_4(sc, reg) \ 316 bus_read_4((sc)->sc_its_res, (reg)) 317 #define gic_its_read_8(sc, reg) \ 318 bus_read_8((sc)->sc_its_res, (reg)) 319 320 #define gic_its_write_4(sc, reg, val) \ 321 bus_write_4((sc)->sc_its_res, (reg), (val)) 322 #define gic_its_write_8(sc, reg, val) \ 323 bus_write_8((sc)->sc_its_res, (reg), (val)) 324 325 static device_attach_t gicv3_its_attach; 326 static device_detach_t gicv3_its_detach; 327 328 static pic_disable_intr_t gicv3_its_disable_intr; 329 static pic_enable_intr_t gicv3_its_enable_intr; 330 static pic_map_intr_t gicv3_its_map_intr; 331 static pic_setup_intr_t gicv3_its_setup_intr; 332 static pic_post_filter_t gicv3_its_post_filter; 333 static pic_post_ithread_t gicv3_its_post_ithread; 334 static pic_pre_ithread_t gicv3_its_pre_ithread; 335 static pic_bind_intr_t gicv3_its_bind_intr; 336 #ifdef SMP 337 static pic_init_secondary_t gicv3_its_init_secondary; 338 #endif 339 static msi_alloc_msi_t gicv3_its_alloc_msi; 340 static msi_release_msi_t gicv3_its_release_msi; 341 static msi_alloc_msix_t gicv3_its_alloc_msix; 342 static msi_release_msix_t gicv3_its_release_msix; 343 static msi_map_msi_t gicv3_its_map_msi; 344 #ifdef IOMMU 345 static msi_iommu_init_t gicv3_iommu_init; 346 static msi_iommu_deinit_t gicv3_iommu_deinit; 347 #endif 348 349 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); 350 static void its_cmd_mapc(device_t, struct its_col *, uint8_t); 351 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); 352 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); 353 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); 354 static void its_cmd_invall(device_t, struct its_col *); 355 356 static device_method_t gicv3_its_methods[] = { 357 /* Device interface */ 358 DEVMETHOD(device_detach, gicv3_its_detach), 359 360 /* Interrupt controller interface */ 361 DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), 362 DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), 363 DEVMETHOD(pic_map_intr, gicv3_its_map_intr), 364 DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), 365 DEVMETHOD(pic_post_filter, gicv3_its_post_filter), 366 DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), 367 DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), 368 #ifdef SMP 369 DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), 370 DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), 371 #endif 372 373 /* MSI/MSI-X */ 374 DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), 375 DEVMETHOD(msi_release_msi, gicv3_its_release_msi), 376 DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), 377 DEVMETHOD(msi_release_msix, gicv3_its_release_msix), 378 DEVMETHOD(msi_map_msi, gicv3_its_map_msi), 379 #ifdef IOMMU 380 DEVMETHOD(msi_iommu_init, gicv3_iommu_init), 381 DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit), 382 #endif 383 384 /* End */ 385 DEVMETHOD_END 386 }; 387 388 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, 389 sizeof(struct gicv3_its_softc)); 390 391 static void 392 gicv3_its_cmdq_init(struct gicv3_its_softc *sc) 393 { 394 vm_paddr_t cmd_paddr; 395 uint64_t reg, tmp; 396 397 /* Set up the command circular buffer */ 398 sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS, 399 sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 400 0); 401 sc->sc_its_cmd_next_idx = 0; 402 403 cmd_paddr = vtophys(sc->sc_its_cmd_base); 404 405 /* Set the base of the command buffer */ 406 reg = GITS_CBASER_VALID | 407 (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | 408 cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | 409 (ITS_CMDQ_SIZE / 4096 - 1); 410 gic_its_write_8(sc, GITS_CBASER, reg); 411 412 /* Read back to check for fixed value fields */ 413 tmp = gic_its_read_8(sc, GITS_CBASER); 414 415 if ((tmp & GITS_CBASER_SHARE_MASK) != 416 (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { 417 /* Check if the hardware reported non-shareable */ 418 if ((tmp & GITS_CBASER_SHARE_MASK) == 419 (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { 420 /* If so remove the cache attribute */ 421 reg &= ~GITS_CBASER_CACHE_MASK; 422 reg &= ~GITS_CBASER_SHARE_MASK; 423 /* Set to Non-cacheable, Non-shareable */ 424 reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; 425 reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; 426 427 gic_its_write_8(sc, GITS_CBASER, reg); 428 } 429 430 /* The command queue has to be flushed after each command */ 431 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; 432 } 433 434 /* Get the next command from the start of the buffer */ 435 gic_its_write_8(sc, GITS_CWRITER, 0x0); 436 } 437 438 static int 439 gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table) 440 { 441 uint64_t reg, tmp; 442 int page_size; 443 444 page_size = PAGE_SIZE_64K; 445 reg = gic_its_read_8(sc, GITS_BASER(table)); 446 447 while (1) { 448 reg &= GITS_BASER_PSZ_MASK; 449 switch (page_size) { 450 case PAGE_SIZE_4K: /* 4KB */ 451 reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; 452 break; 453 case PAGE_SIZE_16K: /* 16KB */ 454 reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; 455 break; 456 case PAGE_SIZE_64K: /* 64KB */ 457 reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; 458 break; 459 } 460 461 /* Write the new page size */ 462 gic_its_write_8(sc, GITS_BASER(table), reg); 463 464 /* Read back to check */ 465 tmp = gic_its_read_8(sc, GITS_BASER(table)); 466 467 /* The page size is correct */ 468 if ((tmp & GITS_BASER_PSZ_MASK) == (reg & GITS_BASER_PSZ_MASK)) 469 return (page_size); 470 471 switch (page_size) { 472 default: 473 return (-1); 474 case PAGE_SIZE_16K: 475 page_size = PAGE_SIZE_4K; 476 break; 477 case PAGE_SIZE_64K: 478 page_size = PAGE_SIZE_16K; 479 break; 480 } 481 } 482 } 483 484 static bool 485 gicv3_its_table_supports_indirect(struct gicv3_its_softc *sc, int table) 486 { 487 uint64_t reg; 488 489 reg = gic_its_read_8(sc, GITS_BASER(table)); 490 491 /* Try setting the indirect flag */ 492 reg |= GITS_BASER_INDIRECT; 493 gic_its_write_8(sc, GITS_BASER(table), reg); 494 495 /* Read back to check */ 496 reg = gic_its_read_8(sc, GITS_BASER(table)); 497 return ((reg & GITS_BASER_INDIRECT) != 0); 498 } 499 500 501 static int 502 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) 503 { 504 vm_offset_t table; 505 vm_paddr_t paddr; 506 uint64_t cache, reg, share, tmp, type; 507 size_t its_tbl_size, nitspages, npages; 508 size_t l1_esize, l2_esize, l1_nidents, l2_nidents; 509 int i, page_size; 510 int devbits; 511 bool indirect; 512 513 if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { 514 /* 515 * GITS_TYPER[17:13] of ThunderX reports that device IDs 516 * are to be 21 bits in length. The entry size of the ITS 517 * table can be read from GITS_BASERn[52:48] and on ThunderX 518 * is supposed to be 8 bytes in length (for device table). 519 * Finally the page size that is to be used by ITS to access 520 * this table will be set to 64KB. 521 * 522 * This gives 0x200000 entries of size 0x8 bytes covered by 523 * 256 pages each of which 64KB in size. The number of pages 524 * (minus 1) should then be written to GITS_BASERn[7:0]. In 525 * that case this value would be 0xFF but on ThunderX the 526 * maximum value that HW accepts is 0xFD. 527 * 528 * Set an arbitrary number of device ID bits to 20 in order 529 * to limit the number of entries in ITS device table to 530 * 0x100000 and the table size to 8MB. 531 */ 532 devbits = 20; 533 cache = 0; 534 } else { 535 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); 536 cache = GITS_BASER_CACHE_WAWB; 537 } 538 sc->sc_devbits = devbits; 539 share = GITS_BASER_SHARE_IS; 540 541 for (i = 0; i < GITS_BASER_NUM; i++) { 542 reg = gic_its_read_8(sc, GITS_BASER(i)); 543 /* The type of table */ 544 type = GITS_BASER_TYPE(reg); 545 if (type == GITS_BASER_TYPE_UNIMPL) 546 continue; 547 548 /* The table entry size */ 549 l1_esize = GITS_BASER_ESIZE(reg); 550 551 /* Find the tables page size */ 552 page_size = gicv3_its_table_page_size(sc, i); 553 if (page_size == -1) { 554 device_printf(dev, "No valid page size for table %d\n", 555 i); 556 return (EINVAL); 557 } 558 559 indirect = false; 560 l2_nidents = 0; 561 l2_esize = 0; 562 switch(type) { 563 case GITS_BASER_TYPE_DEV: 564 if (sc->sc_dev_table_idx != -1) 565 device_printf(dev, 566 "Warning: Multiple device tables found\n"); 567 568 sc->sc_dev_table_idx = i; 569 l1_nidents = (1 << devbits); 570 if ((l1_esize * l1_nidents) > (page_size * 2)) { 571 indirect = 572 gicv3_its_table_supports_indirect(sc, i); 573 if (indirect) { 574 /* 575 * Each l1 entry is 8 bytes and points 576 * to an l2 table of size page_size. 577 * Calculate how many entries this is 578 * and use this to find how many 579 * 8 byte l1 idents we need. 580 */ 581 l2_esize = l1_esize; 582 l2_nidents = page_size / l2_esize; 583 l1_nidents = l1_nidents / l2_nidents; 584 l1_esize = GITS_INDIRECT_L1_ESIZE; 585 } 586 } 587 its_tbl_size = l1_esize * l1_nidents; 588 its_tbl_size = roundup2(its_tbl_size, page_size); 589 break; 590 case GITS_BASER_TYPE_VP: 591 case GITS_BASER_TYPE_PP: /* Undocumented? */ 592 case GITS_BASER_TYPE_IC: 593 its_tbl_size = page_size; 594 break; 595 default: 596 if (bootverbose) 597 device_printf(dev, "Unhandled table type %lx\n", 598 type); 599 continue; 600 } 601 npages = howmany(its_tbl_size, PAGE_SIZE); 602 603 /* Allocate the table */ 604 table = (vm_offset_t)contigmalloc_domainset(npages * PAGE_SIZE, 605 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, 606 (1ul << 48) - 1, PAGE_SIZE_64K, 0); 607 608 sc->sc_its_ptab[i].ptab_vaddr = table; 609 sc->sc_its_ptab[i].ptab_l1_size = its_tbl_size; 610 sc->sc_its_ptab[i].ptab_l1_nidents = l1_nidents; 611 sc->sc_its_ptab[i].ptab_l2_size = page_size; 612 sc->sc_its_ptab[i].ptab_l2_nidents = l2_nidents; 613 614 sc->sc_its_ptab[i].ptab_indirect = indirect; 615 sc->sc_its_ptab[i].ptab_page_size = page_size; 616 617 paddr = vtophys(table); 618 619 while (1) { 620 nitspages = howmany(its_tbl_size, page_size); 621 622 /* Clear the fields we will be setting */ 623 reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT | 624 GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | 625 GITS_BASER_PA_MASK | 626 GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | 627 GITS_BASER_SIZE_MASK); 628 /* Set the new values */ 629 reg |= GITS_BASER_VALID | 630 (indirect ? GITS_BASER_INDIRECT : 0) | 631 (cache << GITS_BASER_CACHE_SHIFT) | 632 (type << GITS_BASER_TYPE_SHIFT) | 633 paddr | (share << GITS_BASER_SHARE_SHIFT) | 634 (nitspages - 1); 635 636 switch (page_size) { 637 case PAGE_SIZE_4K: /* 4KB */ 638 reg |= 639 GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; 640 break; 641 case PAGE_SIZE_16K: /* 16KB */ 642 reg |= 643 GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; 644 break; 645 case PAGE_SIZE_64K: /* 64KB */ 646 reg |= 647 GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; 648 break; 649 } 650 651 gic_its_write_8(sc, GITS_BASER(i), reg); 652 653 /* Read back to check */ 654 tmp = gic_its_read_8(sc, GITS_BASER(i)); 655 656 /* Do the shareability masks line up? */ 657 if ((tmp & GITS_BASER_SHARE_MASK) != 658 (reg & GITS_BASER_SHARE_MASK)) { 659 share = (tmp & GITS_BASER_SHARE_MASK) >> 660 GITS_BASER_SHARE_SHIFT; 661 continue; 662 } 663 664 if (tmp != reg) { 665 device_printf(dev, "GITS_BASER%d: " 666 "unable to be updated: %lx != %lx\n", 667 i, reg, tmp); 668 return (ENXIO); 669 } 670 671 sc->sc_its_ptab[i].ptab_share = share; 672 /* We should have made all needed changes */ 673 break; 674 } 675 } 676 677 return (0); 678 } 679 680 static void 681 gicv3_its_conftable_init(struct gicv3_its_softc *sc) 682 { 683 void *conf_table; 684 685 conf_table = atomic_load_ptr(&conf_base); 686 if (conf_table == NULL) { 687 conf_table = contigmalloc(LPI_CONFTAB_SIZE, 688 M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, 689 LPI_CONFTAB_ALIGN, 0); 690 691 if (atomic_cmpset_ptr((uintptr_t *)&conf_base, 692 (uintptr_t)NULL, (uintptr_t)conf_table) == 0) { 693 contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS); 694 conf_table = atomic_load_ptr(&conf_base); 695 } 696 } 697 sc->sc_conf_base = conf_table; 698 699 /* Set the default configuration */ 700 memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, 701 LPI_CONFTAB_SIZE); 702 703 /* Flush the table to memory */ 704 cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE); 705 } 706 707 static void 708 gicv3_its_pendtables_init(struct gicv3_its_softc *sc) 709 { 710 int i; 711 712 for (i = 0; i <= mp_maxid; i++) { 713 if (CPU_ISSET(i, &sc->sc_cpus) == 0) 714 continue; 715 716 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc( 717 LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, 718 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); 719 720 /* Flush so the ITS can see the memory */ 721 cpu_dcache_wb_range(sc->sc_pend_base[i], LPI_PENDTAB_SIZE); 722 } 723 } 724 725 static void 726 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc) 727 { 728 device_t gicv3; 729 uint64_t xbaser, tmp; 730 uint32_t ctlr; 731 u_int cpuid; 732 733 gicv3 = device_get_parent(dev); 734 cpuid = PCPU_GET(cpuid); 735 736 /* Disable LPIs */ 737 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 738 ctlr &= ~GICR_CTLR_LPI_ENABLE; 739 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 740 741 /* Make sure changes are observable my the GIC */ 742 dsb(sy); 743 744 /* 745 * Set the redistributor base 746 */ 747 xbaser = vtophys(sc->sc_conf_base) | 748 (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | 749 (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | 750 (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); 751 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 752 753 /* Check the cache attributes we set */ 754 tmp = gic_r_read_8(gicv3, GICR_PROPBASER); 755 756 if ((tmp & GICR_PROPBASER_SHARE_MASK) != 757 (xbaser & GICR_PROPBASER_SHARE_MASK)) { 758 if ((tmp & GICR_PROPBASER_SHARE_MASK) == 759 (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { 760 /* We need to mark as non-cacheable */ 761 xbaser &= ~(GICR_PROPBASER_SHARE_MASK | 762 GICR_PROPBASER_CACHE_MASK); 763 /* Non-cacheable */ 764 xbaser |= GICR_PROPBASER_CACHE_NIN << 765 GICR_PROPBASER_CACHE_SHIFT; 766 /* Non-shareable */ 767 xbaser |= GICR_PROPBASER_SHARE_NS << 768 GICR_PROPBASER_SHARE_SHIFT; 769 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 770 } 771 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; 772 } 773 774 /* 775 * Set the LPI pending table base 776 */ 777 xbaser = vtophys(sc->sc_pend_base[cpuid]) | 778 (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | 779 (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); 780 781 gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); 782 783 tmp = gic_r_read_8(gicv3, GICR_PENDBASER); 784 785 if ((tmp & GICR_PENDBASER_SHARE_MASK) == 786 (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { 787 /* Clear the cahce and shareability bits */ 788 xbaser &= ~(GICR_PENDBASER_CACHE_MASK | 789 GICR_PENDBASER_SHARE_MASK); 790 /* Mark as non-shareable */ 791 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; 792 /* And non-cacheable */ 793 xbaser |= GICR_PENDBASER_CACHE_NIN << 794 GICR_PENDBASER_CACHE_SHIFT; 795 } 796 797 /* Enable LPIs */ 798 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 799 ctlr |= GICR_CTLR_LPI_ENABLE; 800 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 801 802 /* Make sure the GIC has seen everything */ 803 dsb(sy); 804 } 805 806 static int 807 its_init_cpu(device_t dev, struct gicv3_its_softc *sc) 808 { 809 device_t gicv3; 810 vm_paddr_t target; 811 u_int cpuid; 812 struct redist_pcpu *rpcpu; 813 814 gicv3 = device_get_parent(dev); 815 cpuid = PCPU_GET(cpuid); 816 if (!CPU_ISSET(cpuid, &sc->sc_cpus)) 817 return (0); 818 819 /* Check if the ITS is enabled on this CPU */ 820 if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) 821 return (ENXIO); 822 823 rpcpu = gicv3_get_redist(dev); 824 825 /* Do per-cpu LPI init once */ 826 if (!rpcpu->lpi_enabled) { 827 its_init_cpu_lpi(dev, sc); 828 rpcpu->lpi_enabled = true; 829 } 830 831 if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { 832 /* This ITS wants the redistributor physical address */ 833 target = vtophys((vm_offset_t)rman_get_virtual(rpcpu->res) + 834 rpcpu->offset); 835 } else { 836 /* This ITS wants the unique processor number */ 837 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) << 838 CMD_TARGET_SHIFT; 839 } 840 841 sc->sc_its_cols[cpuid]->col_target = target; 842 sc->sc_its_cols[cpuid]->col_id = cpuid; 843 844 its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); 845 its_cmd_invall(dev, sc->sc_its_cols[cpuid]); 846 847 return (0); 848 } 849 850 static int 851 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS) 852 { 853 struct gicv3_its_softc *sc; 854 int rv; 855 856 sc = arg1; 857 858 rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req); 859 if (rv != 0 || req->newptr == NULL) 860 return (rv); 861 if (sc->trace_enable) 862 gic_its_write_8(sc, GITS_TRKCTLR, 3); 863 else 864 gic_its_write_8(sc, GITS_TRKCTLR, 0); 865 866 return (0); 867 } 868 869 static int 870 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS) 871 { 872 struct gicv3_its_softc *sc; 873 struct sbuf *sb; 874 int err; 875 876 sc = arg1; 877 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 878 if (sb == NULL) { 879 device_printf(sc->dev, "Could not allocate sbuf for output.\n"); 880 return (ENOMEM); 881 } 882 sbuf_cat(sb, "\n"); 883 sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n", 884 gic_its_read_4(sc, GITS_TRKCTLR)); 885 sbuf_printf(sb, "GITS_TRKR: 0x%08X\n", 886 gic_its_read_4(sc, GITS_TRKR)); 887 sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n", 888 gic_its_read_4(sc, GITS_TRKDIDR)); 889 sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n", 890 gic_its_read_4(sc, GITS_TRKPIDR)); 891 sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n", 892 gic_its_read_4(sc, GITS_TRKVIDR)); 893 sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n", 894 gic_its_read_4(sc, GITS_TRKTGTR)); 895 896 err = sbuf_finish(sb); 897 if (err) 898 device_printf(sc->dev, "Error finishing sbuf: %d\n", err); 899 sbuf_delete(sb); 900 return(err); 901 } 902 903 static int 904 gicv3_its_init_sysctl(struct gicv3_its_softc *sc) 905 { 906 struct sysctl_oid *oid, *child; 907 struct sysctl_ctx_list *ctx_list; 908 909 ctx_list = device_get_sysctl_ctx(sc->dev); 910 child = device_get_sysctl_tree(sc->dev); 911 oid = SYSCTL_ADD_NODE(ctx_list, 912 SYSCTL_CHILDREN(child), OID_AUTO, "tracing", 913 CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing"); 914 if (oid == NULL) 915 return (ENXIO); 916 917 /* Add registers */ 918 SYSCTL_ADD_PROC(ctx_list, 919 SYSCTL_CHILDREN(oid), OID_AUTO, "enable", 920 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 921 gicv3_its_sysctl_trace_enable, "CU", "Enable tracing"); 922 SYSCTL_ADD_PROC(ctx_list, 923 SYSCTL_CHILDREN(oid), OID_AUTO, "capture", 924 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 925 gicv3_its_sysctl_trace_regs, "", "Captured tracing registers."); 926 927 return (0); 928 } 929 930 static int 931 gicv3_its_attach(device_t dev) 932 { 933 struct gicv3_its_softc *sc; 934 int domain, err, i, rid; 935 uint64_t phys; 936 uint32_t ctlr, iidr; 937 938 sc = device_get_softc(dev); 939 940 sc->sc_dev_table_idx = -1; 941 sc->sc_irq_length = gicv3_get_nirqs(dev); 942 sc->sc_irq_base = GIC_FIRST_LPI; 943 sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length; 944 945 rid = 0; 946 sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 947 RF_ACTIVE); 948 if (sc->sc_its_res == NULL) { 949 device_printf(dev, "Could not allocate memory\n"); 950 return (ENXIO); 951 } 952 953 phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) + 954 GITS_TRANSLATER, PAGE_SIZE); 955 sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO); 956 vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT); 957 958 CPU_COPY(&all_cpus, &sc->sc_cpus); 959 iidr = gic_its_read_4(sc, GITS_IIDR); 960 for (i = 0; i < nitems(its_quirks); i++) { 961 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { 962 if (bootverbose) { 963 device_printf(dev, "Applying %s\n", 964 its_quirks[i].desc); 965 } 966 its_quirks[i].func(dev); 967 break; 968 } 969 } 970 971 if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) { 972 sc->sc_ds = DOMAINSET_PREF(domain); 973 } else { 974 sc->sc_ds = DOMAINSET_RR(); 975 } 976 977 /* 978 * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be 979 * coming in via, for instance, a kexec/kboot style setup where a 980 * previous kernel has configured then relinquished control. Clear it 981 * so that we can reconfigure GITS_BASER*. 982 */ 983 ctlr = gic_its_read_4(sc, GITS_CTLR); 984 if ((ctlr & GITS_CTLR_EN) != 0) { 985 ctlr &= ~GITS_CTLR_EN; 986 gic_its_write_4(sc, GITS_CTLR, ctlr); 987 } 988 989 /* Allocate the private tables */ 990 err = gicv3_its_table_init(dev, sc); 991 if (err != 0) 992 return (err); 993 994 /* Protects access to the device list */ 995 mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); 996 997 /* Protects access to the ITS command circular buffer. */ 998 mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); 999 1000 /* Allocate the command circular buffer */ 1001 gicv3_its_cmdq_init(sc); 1002 1003 /* Allocate the per-CPU collections */ 1004 for (int cpu = 0; cpu <= mp_maxid; cpu++) 1005 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0) 1006 sc->sc_its_cols[cpu] = malloc_domainset( 1007 sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, 1008 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), 1009 M_WAITOK | M_ZERO); 1010 else 1011 sc->sc_its_cols[cpu] = NULL; 1012 1013 /* Enable the ITS */ 1014 gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN); 1015 1016 /* Create the LPI configuration table */ 1017 gicv3_its_conftable_init(sc); 1018 1019 /* And the pending tebles */ 1020 gicv3_its_pendtables_init(sc); 1021 1022 /* Enable LPIs on this CPU */ 1023 its_init_cpu(dev, sc); 1024 1025 TAILQ_INIT(&sc->sc_its_dev_list); 1026 TAILQ_INIT(&sc->sc_free_irqs); 1027 1028 /* 1029 * Create the vmem object to allocate INTRNG IRQs from. We try to 1030 * use all IRQs not already used by the GICv3. 1031 * XXX: This assumes there are no other interrupt controllers in the 1032 * system. 1033 */ 1034 sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0, 1035 gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK); 1036 1037 sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length, 1038 M_GICV3_ITS, M_WAITOK | M_ZERO); 1039 1040 /* For GIC-500 install tracking sysctls. */ 1041 if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) == 1042 GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0)) 1043 gicv3_its_init_sysctl(sc); 1044 1045 return (0); 1046 } 1047 1048 static int 1049 gicv3_its_detach(device_t dev) 1050 { 1051 1052 return (ENXIO); 1053 } 1054 1055 static void 1056 its_quirk_cavium_22375(device_t dev) 1057 { 1058 struct gicv3_its_softc *sc; 1059 int domain; 1060 1061 sc = device_get_softc(dev); 1062 sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; 1063 1064 /* 1065 * We need to limit which CPUs we send these interrupts to on 1066 * the original dual socket ThunderX as it is unable to 1067 * forward them between the two sockets. 1068 */ 1069 if (bus_get_domain(dev, &domain) == 0) { 1070 if (domain < MAXMEMDOM) { 1071 CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus); 1072 } else { 1073 CPU_ZERO(&sc->sc_cpus); 1074 } 1075 } 1076 } 1077 1078 static void 1079 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) 1080 { 1081 struct gicv3_its_softc *sc; 1082 struct gicv3_its_irqsrc *girq; 1083 uint8_t *conf; 1084 1085 sc = device_get_softc(dev); 1086 girq = (struct gicv3_its_irqsrc *)isrc; 1087 conf = sc->sc_conf_base; 1088 1089 conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE; 1090 1091 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 1092 /* Clean D-cache under command. */ 1093 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 1094 } else { 1095 /* DSB inner shareable, store */ 1096 dsb(ishst); 1097 } 1098 1099 its_cmd_inv(dev, girq->gi_its_dev, girq); 1100 } 1101 1102 static void 1103 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) 1104 { 1105 struct gicv3_its_softc *sc; 1106 struct gicv3_its_irqsrc *girq; 1107 uint8_t *conf; 1108 1109 sc = device_get_softc(dev); 1110 girq = (struct gicv3_its_irqsrc *)isrc; 1111 conf = sc->sc_conf_base; 1112 1113 conf[girq->gi_lpi] |= LPI_CONF_ENABLE; 1114 1115 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 1116 /* Clean D-cache under command. */ 1117 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 1118 } else { 1119 /* DSB inner shareable, store */ 1120 dsb(ishst); 1121 } 1122 1123 its_cmd_inv(dev, girq->gi_its_dev, girq); 1124 } 1125 1126 static int 1127 gicv3_its_intr(void *arg, uintptr_t irq) 1128 { 1129 struct gicv3_its_softc *sc = arg; 1130 struct gicv3_its_irqsrc *girq; 1131 struct trapframe *tf; 1132 1133 irq -= sc->sc_irq_base; 1134 girq = sc->sc_irqs[irq]; 1135 if (girq == NULL) 1136 panic("gicv3_its_intr: Invalid interrupt %ld", 1137 irq + sc->sc_irq_base); 1138 1139 tf = curthread->td_intr_frame; 1140 intr_isrc_dispatch(&girq->gi_isrc, tf); 1141 return (FILTER_HANDLED); 1142 } 1143 1144 static void 1145 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) 1146 { 1147 struct gicv3_its_irqsrc *girq; 1148 1149 girq = (struct gicv3_its_irqsrc *)isrc; 1150 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1151 } 1152 1153 static void 1154 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) 1155 { 1156 1157 } 1158 1159 static void 1160 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) 1161 { 1162 struct gicv3_its_irqsrc *girq; 1163 1164 girq = (struct gicv3_its_irqsrc *)isrc; 1165 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1166 } 1167 1168 static int 1169 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc) 1170 { 1171 struct gicv3_its_softc *sc; 1172 1173 sc = device_get_softc(dev); 1174 if (CPU_EMPTY(&isrc->isrc_cpu)) { 1175 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu, 1176 &sc->sc_cpus); 1177 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu); 1178 } 1179 1180 return (0); 1181 } 1182 1183 static int 1184 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) 1185 { 1186 struct gicv3_its_irqsrc *girq; 1187 1188 gicv3_its_select_cpu(dev, isrc); 1189 1190 girq = (struct gicv3_its_irqsrc *)isrc; 1191 its_cmd_movi(dev, girq); 1192 return (0); 1193 } 1194 1195 static int 1196 gicv3_its_map_intr(device_t dev, struct intr_map_data *data, 1197 struct intr_irqsrc **isrcp) 1198 { 1199 1200 /* 1201 * This should never happen, we only call this function to map 1202 * interrupts found before the controller driver is ready. 1203 */ 1204 panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); 1205 } 1206 1207 static int 1208 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, 1209 struct resource *res, struct intr_map_data *data) 1210 { 1211 1212 /* Bind the interrupt to a CPU */ 1213 gicv3_its_bind_intr(dev, isrc); 1214 1215 return (0); 1216 } 1217 1218 #ifdef SMP 1219 static void 1220 gicv3_its_init_secondary(device_t dev) 1221 { 1222 struct gicv3_its_softc *sc; 1223 1224 sc = device_get_softc(dev); 1225 1226 /* 1227 * This is fatal as otherwise we may bind interrupts to this CPU. 1228 * We need a way to tell the interrupt framework to only bind to a 1229 * subset of given CPUs when it performs the shuffle. 1230 */ 1231 if (its_init_cpu(dev, sc) != 0) 1232 panic("gicv3_its_init_secondary: No usable ITS on CPU%d", 1233 PCPU_GET(cpuid)); 1234 } 1235 #endif 1236 1237 static uint32_t 1238 its_get_devid(device_t pci_dev) 1239 { 1240 uintptr_t id; 1241 1242 if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) 1243 panic("%s: %s: Unable to get the MSI DeviceID", __func__, 1244 device_get_nameunit(pci_dev)); 1245 1246 return (id); 1247 } 1248 1249 static struct its_dev * 1250 its_device_find(device_t dev, device_t child) 1251 { 1252 struct gicv3_its_softc *sc; 1253 struct its_dev *its_dev = NULL; 1254 1255 sc = device_get_softc(dev); 1256 1257 mtx_lock_spin(&sc->sc_its_dev_lock); 1258 TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { 1259 if (its_dev->pci_dev == child) 1260 break; 1261 } 1262 mtx_unlock_spin(&sc->sc_its_dev_lock); 1263 1264 return (its_dev); 1265 } 1266 1267 static bool 1268 its_device_alloc(struct gicv3_its_softc *sc, int devid) 1269 { 1270 struct its_ptable *ptable; 1271 vm_offset_t l2_table; 1272 uint64_t *table; 1273 uint32_t index; 1274 bool shareable; 1275 1276 /* No device table */ 1277 if (sc->sc_dev_table_idx < 0) { 1278 if (devid >= (1 << sc->sc_devbits)) { 1279 if (bootverbose) { 1280 device_printf(sc->dev, 1281 "%s: Device out of range for hardware " 1282 "(%x >= %x)\n", __func__, devid, 1283 1 << sc->sc_devbits); 1284 } 1285 return (false); 1286 } 1287 return (true); 1288 } 1289 1290 ptable = &sc->sc_its_ptab[sc->sc_dev_table_idx]; 1291 /* Check the devid is within the table limit */ 1292 if (!ptable->ptab_indirect) { 1293 if (devid >= ptable->ptab_l1_nidents) { 1294 if (bootverbose) { 1295 device_printf(sc->dev, 1296 "%s: Device out of range for table " 1297 "(%x >= %x)\n", __func__, devid, 1298 ptable->ptab_l1_nidents); 1299 } 1300 return (false); 1301 } 1302 1303 return (true); 1304 } 1305 1306 /* Check the devid is within the allocated range */ 1307 index = devid / ptable->ptab_l2_nidents; 1308 if (index >= ptable->ptab_l1_nidents) { 1309 if (bootverbose) { 1310 device_printf(sc->dev, 1311 "%s: Index out of range for table (%x >= %x)\n", 1312 __func__, index, ptable->ptab_l1_nidents); 1313 } 1314 return (false); 1315 } 1316 1317 table = (uint64_t *)ptable->ptab_vaddr; 1318 /* We have an second level table */ 1319 if ((table[index] & GITS_BASER_VALID) != 0) 1320 return (true); 1321 1322 shareable = true; 1323 if ((ptable->ptab_share & GITS_BASER_SHARE_MASK) == GITS_BASER_SHARE_NS) 1324 shareable = false; 1325 1326 l2_table = (vm_offset_t)contigmalloc_domainset(ptable->ptab_l2_size, 1327 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, 1328 ptable->ptab_page_size, 0); 1329 1330 if (!shareable) 1331 cpu_dcache_wb_range(l2_table, ptable->ptab_l2_size); 1332 1333 table[index] = vtophys(l2_table) | GITS_BASER_VALID; 1334 if (!shareable) 1335 cpu_dcache_wb_range((vm_offset_t)&table[index], 1336 sizeof(table[index])); 1337 1338 dsb(sy); 1339 return (true); 1340 } 1341 1342 static struct its_dev * 1343 its_device_get(device_t dev, device_t child, u_int nvecs) 1344 { 1345 struct gicv3_its_softc *sc; 1346 struct its_dev *its_dev; 1347 vmem_addr_t irq_base; 1348 size_t esize; 1349 1350 sc = device_get_softc(dev); 1351 1352 its_dev = its_device_find(dev, child); 1353 if (its_dev != NULL) 1354 return (its_dev); 1355 1356 its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); 1357 if (its_dev == NULL) 1358 return (NULL); 1359 1360 its_dev->pci_dev = child; 1361 its_dev->devid = its_get_devid(child); 1362 1363 its_dev->lpis.lpi_busy = 0; 1364 its_dev->lpis.lpi_num = nvecs; 1365 its_dev->lpis.lpi_free = nvecs; 1366 1367 if (!its_device_alloc(sc, its_dev->devid)) { 1368 free(its_dev, M_GICV3_ITS); 1369 return (NULL); 1370 } 1371 1372 if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, 1373 &irq_base) != 0) { 1374 free(its_dev, M_GICV3_ITS); 1375 return (NULL); 1376 } 1377 its_dev->lpis.lpi_base = irq_base; 1378 1379 /* Get ITT entry size */ 1380 esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); 1381 1382 /* 1383 * Allocate ITT for this device. 1384 * PA has to be 256 B aligned. At least two entries for device. 1385 */ 1386 its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256); 1387 its_dev->itt = (vm_offset_t)contigmalloc_domainset(its_dev->itt_size, 1388 M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0, 1389 LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0); 1390 if (its_dev->itt == 0) { 1391 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); 1392 free(its_dev, M_GICV3_ITS); 1393 return (NULL); 1394 } 1395 1396 /* Make sure device sees zeroed ITT. */ 1397 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) 1398 cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size); 1399 1400 mtx_lock_spin(&sc->sc_its_dev_lock); 1401 TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); 1402 mtx_unlock_spin(&sc->sc_its_dev_lock); 1403 1404 /* Map device to its ITT */ 1405 its_cmd_mapd(dev, its_dev, 1); 1406 1407 return (its_dev); 1408 } 1409 1410 static void 1411 its_device_release(device_t dev, struct its_dev *its_dev) 1412 { 1413 struct gicv3_its_softc *sc; 1414 1415 KASSERT(its_dev->lpis.lpi_busy == 0, 1416 ("its_device_release: Trying to release an inuse ITS device")); 1417 1418 /* Unmap device in ITS */ 1419 its_cmd_mapd(dev, its_dev, 0); 1420 1421 sc = device_get_softc(dev); 1422 1423 /* Remove the device from the list of devices */ 1424 mtx_lock_spin(&sc->sc_its_dev_lock); 1425 TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); 1426 mtx_unlock_spin(&sc->sc_its_dev_lock); 1427 1428 /* Free ITT */ 1429 KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device")); 1430 contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS); 1431 1432 /* Free the IRQ allocation */ 1433 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, 1434 its_dev->lpis.lpi_num); 1435 1436 free(its_dev, M_GICV3_ITS); 1437 } 1438 1439 static struct gicv3_its_irqsrc * 1440 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq) 1441 { 1442 struct gicv3_its_irqsrc *girq = NULL; 1443 1444 KASSERT(sc->sc_irqs[irq] == NULL, 1445 ("%s: Interrupt %u already allocated", __func__, irq)); 1446 mtx_lock_spin(&sc->sc_its_dev_lock); 1447 if (!TAILQ_EMPTY(&sc->sc_free_irqs)) { 1448 girq = TAILQ_FIRST(&sc->sc_free_irqs); 1449 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link); 1450 } 1451 mtx_unlock_spin(&sc->sc_its_dev_lock); 1452 if (girq == NULL) { 1453 girq = malloc(sizeof(*girq), M_GICV3_ITS, 1454 M_NOWAIT | M_ZERO); 1455 if (girq == NULL) 1456 return (NULL); 1457 girq->gi_id = -1; 1458 if (intr_isrc_register(&girq->gi_isrc, dev, 0, 1459 "%s,%u", device_get_nameunit(dev), irq) != 0) { 1460 free(girq, M_GICV3_ITS); 1461 return (NULL); 1462 } 1463 } 1464 girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI; 1465 sc->sc_irqs[irq] = girq; 1466 1467 return (girq); 1468 } 1469 1470 static void 1471 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc, 1472 struct gicv3_its_irqsrc *girq) 1473 { 1474 u_int irq; 1475 1476 mtx_assert(&sc->sc_its_dev_lock, MA_OWNED); 1477 1478 irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base; 1479 sc->sc_irqs[irq] = NULL; 1480 1481 girq->gi_id = -1; 1482 girq->gi_its_dev = NULL; 1483 TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link); 1484 } 1485 1486 static int 1487 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, 1488 device_t *pic, struct intr_irqsrc **srcs) 1489 { 1490 struct gicv3_its_softc *sc; 1491 struct gicv3_its_irqsrc *girq; 1492 struct its_dev *its_dev; 1493 u_int irq; 1494 int i; 1495 1496 its_dev = its_device_get(dev, child, count); 1497 if (its_dev == NULL) 1498 return (ENXIO); 1499 1500 KASSERT(its_dev->lpis.lpi_free >= count, 1501 ("gicv3_its_alloc_msi: No free LPIs")); 1502 sc = device_get_softc(dev); 1503 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1504 its_dev->lpis.lpi_free; 1505 1506 /* Allocate the irqsrc for each MSI */ 1507 for (i = 0; i < count; i++, irq++) { 1508 its_dev->lpis.lpi_free--; 1509 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev, 1510 sc, irq); 1511 if (srcs[i] == NULL) 1512 break; 1513 } 1514 1515 /* The allocation failed, release them */ 1516 if (i != count) { 1517 mtx_lock_spin(&sc->sc_its_dev_lock); 1518 for (i = 0; i < count; i++) { 1519 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1520 if (girq == NULL) 1521 break; 1522 gicv3_its_release_irqsrc(sc, girq); 1523 srcs[i] = NULL; 1524 } 1525 mtx_unlock_spin(&sc->sc_its_dev_lock); 1526 return (ENXIO); 1527 } 1528 1529 /* Finish the allocation now we have all MSI irqsrcs */ 1530 for (i = 0; i < count; i++) { 1531 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1532 girq->gi_id = i; 1533 girq->gi_its_dev = its_dev; 1534 1535 /* Map the message to the given IRQ */ 1536 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1537 its_cmd_mapti(dev, girq); 1538 } 1539 its_dev->lpis.lpi_busy += count; 1540 *pic = dev; 1541 1542 return (0); 1543 } 1544 1545 static int 1546 gicv3_its_release_msi(device_t dev, device_t child, int count, 1547 struct intr_irqsrc **isrc) 1548 { 1549 struct gicv3_its_softc *sc; 1550 struct gicv3_its_irqsrc *girq; 1551 struct its_dev *its_dev; 1552 int i; 1553 1554 its_dev = its_device_find(dev, child); 1555 1556 KASSERT(its_dev != NULL, 1557 ("gicv3_its_release_msi: Releasing a MSI interrupt with " 1558 "no ITS device")); 1559 KASSERT(its_dev->lpis.lpi_busy >= count, 1560 ("gicv3_its_release_msi: Releasing more interrupts than " 1561 "were allocated: releasing %d, allocated %d", count, 1562 its_dev->lpis.lpi_busy)); 1563 1564 sc = device_get_softc(dev); 1565 mtx_lock_spin(&sc->sc_its_dev_lock); 1566 for (i = 0; i < count; i++) { 1567 girq = (struct gicv3_its_irqsrc *)isrc[i]; 1568 gicv3_its_release_irqsrc(sc, girq); 1569 } 1570 mtx_unlock_spin(&sc->sc_its_dev_lock); 1571 its_dev->lpis.lpi_busy -= count; 1572 1573 if (its_dev->lpis.lpi_busy == 0) 1574 its_device_release(dev, its_dev); 1575 1576 return (0); 1577 } 1578 1579 static int 1580 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, 1581 struct intr_irqsrc **isrcp) 1582 { 1583 struct gicv3_its_softc *sc; 1584 struct gicv3_its_irqsrc *girq; 1585 struct its_dev *its_dev; 1586 u_int nvecs, irq; 1587 1588 nvecs = pci_msix_count(child); 1589 its_dev = its_device_get(dev, child, nvecs); 1590 if (its_dev == NULL) 1591 return (ENXIO); 1592 1593 KASSERT(its_dev->lpis.lpi_free > 0, 1594 ("gicv3_its_alloc_msix: No free LPIs")); 1595 sc = device_get_softc(dev); 1596 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1597 its_dev->lpis.lpi_free; 1598 1599 girq = gicv3_its_alloc_irqsrc(dev, sc, irq); 1600 if (girq == NULL) 1601 return (ENXIO); 1602 girq->gi_id = its_dev->lpis.lpi_busy; 1603 girq->gi_its_dev = its_dev; 1604 1605 its_dev->lpis.lpi_free--; 1606 its_dev->lpis.lpi_busy++; 1607 1608 /* Map the message to the given IRQ */ 1609 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1610 its_cmd_mapti(dev, girq); 1611 1612 *pic = dev; 1613 *isrcp = (struct intr_irqsrc *)girq; 1614 1615 return (0); 1616 } 1617 1618 static int 1619 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) 1620 { 1621 struct gicv3_its_softc *sc; 1622 struct gicv3_its_irqsrc *girq; 1623 struct its_dev *its_dev; 1624 1625 its_dev = its_device_find(dev, child); 1626 1627 KASSERT(its_dev != NULL, 1628 ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " 1629 "no ITS device")); 1630 KASSERT(its_dev->lpis.lpi_busy > 0, 1631 ("gicv3_its_release_msix: Releasing more interrupts than " 1632 "were allocated: allocated %d", its_dev->lpis.lpi_busy)); 1633 1634 sc = device_get_softc(dev); 1635 girq = (struct gicv3_its_irqsrc *)isrc; 1636 mtx_lock_spin(&sc->sc_its_dev_lock); 1637 gicv3_its_release_irqsrc(sc, girq); 1638 mtx_unlock_spin(&sc->sc_its_dev_lock); 1639 its_dev->lpis.lpi_busy--; 1640 1641 if (its_dev->lpis.lpi_busy == 0) 1642 its_device_release(dev, its_dev); 1643 1644 return (0); 1645 } 1646 1647 static int 1648 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, 1649 uint64_t *addr, uint32_t *data) 1650 { 1651 struct gicv3_its_softc *sc; 1652 struct gicv3_its_irqsrc *girq; 1653 1654 sc = device_get_softc(dev); 1655 girq = (struct gicv3_its_irqsrc *)isrc; 1656 1657 *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; 1658 *data = girq->gi_id; 1659 1660 return (0); 1661 } 1662 1663 #ifdef IOMMU 1664 static int 1665 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain) 1666 { 1667 struct gicv3_its_softc *sc; 1668 struct iommu_ctx *ctx; 1669 int error; 1670 1671 sc = device_get_softc(dev); 1672 ctx = iommu_get_dev_ctx(child); 1673 if (ctx == NULL) 1674 return (ENXIO); 1675 /* Map the page containing the GITS_TRANSLATER register. */ 1676 error = iommu_map_msi(ctx, PAGE_SIZE, 0, 1677 IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma); 1678 *domain = iommu_get_ctx_domain(ctx); 1679 1680 return (error); 1681 } 1682 1683 static void 1684 gicv3_iommu_deinit(device_t dev, device_t child) 1685 { 1686 struct iommu_ctx *ctx; 1687 1688 ctx = iommu_get_dev_ctx(child); 1689 if (ctx == NULL) 1690 return; 1691 1692 iommu_unmap_msi(ctx); 1693 } 1694 #endif 1695 1696 /* 1697 * Commands handling. 1698 */ 1699 1700 static __inline void 1701 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) 1702 { 1703 /* Command field: DW0 [7:0] */ 1704 cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); 1705 cmd->cmd_dword[0] |= htole64(cmd_type); 1706 } 1707 1708 static __inline void 1709 cmd_format_devid(struct its_cmd *cmd, uint32_t devid) 1710 { 1711 /* Device ID field: DW0 [63:32] */ 1712 cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); 1713 cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); 1714 } 1715 1716 static __inline void 1717 cmd_format_size(struct its_cmd *cmd, uint16_t size) 1718 { 1719 /* Size field: DW1 [4:0] */ 1720 cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); 1721 cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); 1722 } 1723 1724 static __inline void 1725 cmd_format_id(struct its_cmd *cmd, uint32_t id) 1726 { 1727 /* ID field: DW1 [31:0] */ 1728 cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); 1729 cmd->cmd_dword[1] |= htole64(id); 1730 } 1731 1732 static __inline void 1733 cmd_format_pid(struct its_cmd *cmd, uint32_t pid) 1734 { 1735 /* Physical ID field: DW1 [63:32] */ 1736 cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); 1737 cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); 1738 } 1739 1740 static __inline void 1741 cmd_format_col(struct its_cmd *cmd, uint16_t col_id) 1742 { 1743 /* Collection field: DW2 [16:0] */ 1744 cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); 1745 cmd->cmd_dword[2] |= htole64(col_id); 1746 } 1747 1748 static __inline void 1749 cmd_format_target(struct its_cmd *cmd, uint64_t target) 1750 { 1751 /* Target Address field: DW2 [47:16] */ 1752 cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); 1753 cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); 1754 } 1755 1756 static __inline void 1757 cmd_format_itt(struct its_cmd *cmd, uint64_t itt) 1758 { 1759 /* ITT Address field: DW2 [47:8] */ 1760 cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); 1761 cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); 1762 } 1763 1764 static __inline void 1765 cmd_format_valid(struct its_cmd *cmd, uint8_t valid) 1766 { 1767 /* Valid field: DW2 [63] */ 1768 cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); 1769 cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); 1770 } 1771 1772 static inline bool 1773 its_cmd_queue_full(struct gicv3_its_softc *sc) 1774 { 1775 size_t read_idx, next_write_idx; 1776 1777 /* Get the index of the next command */ 1778 next_write_idx = (sc->sc_its_cmd_next_idx + 1) % 1779 (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); 1780 /* And the index of the current command being read */ 1781 read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); 1782 1783 /* 1784 * The queue is full when the write offset points 1785 * at the command before the current read offset. 1786 */ 1787 return (next_write_idx == read_idx); 1788 } 1789 1790 static inline void 1791 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1792 { 1793 1794 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { 1795 /* Clean D-cache under command. */ 1796 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); 1797 } else { 1798 /* DSB inner shareable, store */ 1799 dsb(ishst); 1800 } 1801 1802 } 1803 1804 static inline uint64_t 1805 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1806 { 1807 uint64_t off; 1808 1809 off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); 1810 1811 return (off); 1812 } 1813 1814 static void 1815 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, 1816 struct its_cmd *cmd_last) 1817 { 1818 struct gicv3_its_softc *sc; 1819 uint64_t first, last, read; 1820 size_t us_left; 1821 1822 sc = device_get_softc(dev); 1823 1824 /* 1825 * XXX ARM64TODO: This is obviously a significant delay. 1826 * The reason for that is that currently the time frames for 1827 * the command to complete are not known. 1828 */ 1829 us_left = 1000000; 1830 1831 first = its_cmd_cwriter_offset(sc, cmd_first); 1832 last = its_cmd_cwriter_offset(sc, cmd_last); 1833 1834 for (;;) { 1835 read = gic_its_read_8(sc, GITS_CREADR); 1836 if (first < last) { 1837 if (read < first || read >= last) 1838 break; 1839 } else if (read < first && read >= last) 1840 break; 1841 1842 if (us_left-- == 0) { 1843 /* This means timeout */ 1844 device_printf(dev, 1845 "Timeout while waiting for CMD completion.\n"); 1846 return; 1847 } 1848 DELAY(1); 1849 } 1850 } 1851 1852 static struct its_cmd * 1853 its_cmd_alloc_locked(device_t dev) 1854 { 1855 struct gicv3_its_softc *sc; 1856 struct its_cmd *cmd; 1857 size_t us_left; 1858 1859 sc = device_get_softc(dev); 1860 1861 /* 1862 * XXX ARM64TODO: This is obviously a significant delay. 1863 * The reason for that is that currently the time frames for 1864 * the command to complete (and therefore free the descriptor) 1865 * are not known. 1866 */ 1867 us_left = 1000000; 1868 1869 mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); 1870 while (its_cmd_queue_full(sc)) { 1871 if (us_left-- == 0) { 1872 /* Timeout while waiting for free command */ 1873 device_printf(dev, 1874 "Timeout while waiting for free command\n"); 1875 return (NULL); 1876 } 1877 DELAY(1); 1878 } 1879 1880 cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 1881 sc->sc_its_cmd_next_idx++; 1882 sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); 1883 1884 return (cmd); 1885 } 1886 1887 static uint64_t 1888 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) 1889 { 1890 uint64_t target; 1891 uint8_t cmd_type; 1892 u_int size; 1893 1894 cmd_type = desc->cmd_type; 1895 target = ITS_TARGET_NONE; 1896 1897 switch (cmd_type) { 1898 case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ 1899 target = desc->cmd_desc_movi.col->col_target; 1900 cmd_format_command(cmd, ITS_CMD_MOVI); 1901 cmd_format_id(cmd, desc->cmd_desc_movi.id); 1902 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); 1903 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); 1904 break; 1905 case ITS_CMD_SYNC: /* Wait for previous commands completion */ 1906 target = desc->cmd_desc_sync.col->col_target; 1907 cmd_format_command(cmd, ITS_CMD_SYNC); 1908 cmd_format_target(cmd, target); 1909 break; 1910 case ITS_CMD_MAPD: /* Assign ITT to device */ 1911 cmd_format_command(cmd, ITS_CMD_MAPD); 1912 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); 1913 /* 1914 * Size describes number of bits to encode interrupt IDs 1915 * supported by the device minus one. 1916 * When V (valid) bit is zero, this field should be written 1917 * as zero. 1918 */ 1919 if (desc->cmd_desc_mapd.valid != 0) { 1920 size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); 1921 size = MAX(1, size) - 1; 1922 } else 1923 size = 0; 1924 1925 cmd_format_size(cmd, size); 1926 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); 1927 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); 1928 break; 1929 case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ 1930 target = desc->cmd_desc_mapc.col->col_target; 1931 cmd_format_command(cmd, ITS_CMD_MAPC); 1932 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); 1933 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); 1934 cmd_format_target(cmd, target); 1935 break; 1936 case ITS_CMD_MAPTI: 1937 target = desc->cmd_desc_mapvi.col->col_target; 1938 cmd_format_command(cmd, ITS_CMD_MAPTI); 1939 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); 1940 cmd_format_id(cmd, desc->cmd_desc_mapvi.id); 1941 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); 1942 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); 1943 break; 1944 case ITS_CMD_MAPI: 1945 target = desc->cmd_desc_mapi.col->col_target; 1946 cmd_format_command(cmd, ITS_CMD_MAPI); 1947 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); 1948 cmd_format_id(cmd, desc->cmd_desc_mapi.pid); 1949 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); 1950 break; 1951 case ITS_CMD_INV: 1952 target = desc->cmd_desc_inv.col->col_target; 1953 cmd_format_command(cmd, ITS_CMD_INV); 1954 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); 1955 cmd_format_id(cmd, desc->cmd_desc_inv.pid); 1956 break; 1957 case ITS_CMD_INVALL: 1958 cmd_format_command(cmd, ITS_CMD_INVALL); 1959 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); 1960 break; 1961 default: 1962 panic("its_cmd_prepare: Invalid command: %x", cmd_type); 1963 } 1964 1965 return (target); 1966 } 1967 1968 static int 1969 its_cmd_send(device_t dev, struct its_cmd_desc *desc) 1970 { 1971 struct gicv3_its_softc *sc; 1972 struct its_cmd *cmd, *cmd_sync, *cmd_write; 1973 struct its_col col_sync; 1974 struct its_cmd_desc desc_sync; 1975 uint64_t target, cwriter; 1976 1977 sc = device_get_softc(dev); 1978 mtx_lock_spin(&sc->sc_its_cmd_lock); 1979 cmd = its_cmd_alloc_locked(dev); 1980 if (cmd == NULL) { 1981 device_printf(dev, "could not allocate ITS command\n"); 1982 mtx_unlock_spin(&sc->sc_its_cmd_lock); 1983 return (EBUSY); 1984 } 1985 1986 target = its_cmd_prepare(cmd, desc); 1987 its_cmd_sync(sc, cmd); 1988 1989 if (target != ITS_TARGET_NONE) { 1990 cmd_sync = its_cmd_alloc_locked(dev); 1991 if (cmd_sync != NULL) { 1992 desc_sync.cmd_type = ITS_CMD_SYNC; 1993 col_sync.col_target = target; 1994 desc_sync.cmd_desc_sync.col = &col_sync; 1995 its_cmd_prepare(cmd_sync, &desc_sync); 1996 its_cmd_sync(sc, cmd_sync); 1997 } 1998 } 1999 2000 /* Update GITS_CWRITER */ 2001 cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); 2002 gic_its_write_8(sc, GITS_CWRITER, cwriter); 2003 cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 2004 mtx_unlock_spin(&sc->sc_its_cmd_lock); 2005 2006 its_cmd_wait_completion(dev, cmd, cmd_write); 2007 2008 return (0); 2009 } 2010 2011 /* Handlers to send commands */ 2012 static void 2013 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) 2014 { 2015 struct gicv3_its_softc *sc; 2016 struct its_cmd_desc desc; 2017 struct its_col *col; 2018 2019 sc = device_get_softc(dev); 2020 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 2021 2022 desc.cmd_type = ITS_CMD_MOVI; 2023 desc.cmd_desc_movi.its_dev = girq->gi_its_dev; 2024 desc.cmd_desc_movi.col = col; 2025 desc.cmd_desc_movi.id = girq->gi_id; 2026 2027 its_cmd_send(dev, &desc); 2028 } 2029 2030 static void 2031 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) 2032 { 2033 struct its_cmd_desc desc; 2034 2035 desc.cmd_type = ITS_CMD_MAPC; 2036 desc.cmd_desc_mapc.col = col; 2037 /* 2038 * Valid bit set - map the collection. 2039 * Valid bit cleared - unmap the collection. 2040 */ 2041 desc.cmd_desc_mapc.valid = valid; 2042 2043 its_cmd_send(dev, &desc); 2044 } 2045 2046 static void 2047 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) 2048 { 2049 struct gicv3_its_softc *sc; 2050 struct its_cmd_desc desc; 2051 struct its_col *col; 2052 u_int col_id; 2053 2054 sc = device_get_softc(dev); 2055 2056 col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; 2057 col = sc->sc_its_cols[col_id]; 2058 2059 desc.cmd_type = ITS_CMD_MAPTI; 2060 desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; 2061 desc.cmd_desc_mapvi.col = col; 2062 /* The EventID sent to the device */ 2063 desc.cmd_desc_mapvi.id = girq->gi_id; 2064 /* The physical interrupt presented to softeware */ 2065 desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI; 2066 2067 its_cmd_send(dev, &desc); 2068 } 2069 2070 static void 2071 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) 2072 { 2073 struct its_cmd_desc desc; 2074 2075 desc.cmd_type = ITS_CMD_MAPD; 2076 desc.cmd_desc_mapd.its_dev = its_dev; 2077 desc.cmd_desc_mapd.valid = valid; 2078 2079 its_cmd_send(dev, &desc); 2080 } 2081 2082 static void 2083 its_cmd_inv(device_t dev, struct its_dev *its_dev, 2084 struct gicv3_its_irqsrc *girq) 2085 { 2086 struct gicv3_its_softc *sc; 2087 struct its_cmd_desc desc; 2088 struct its_col *col; 2089 2090 sc = device_get_softc(dev); 2091 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 2092 2093 desc.cmd_type = ITS_CMD_INV; 2094 /* The EventID sent to the device */ 2095 desc.cmd_desc_inv.pid = girq->gi_id; 2096 desc.cmd_desc_inv.its_dev = its_dev; 2097 desc.cmd_desc_inv.col = col; 2098 2099 its_cmd_send(dev, &desc); 2100 } 2101 2102 static void 2103 its_cmd_invall(device_t dev, struct its_col *col) 2104 { 2105 struct its_cmd_desc desc; 2106 2107 desc.cmd_type = ITS_CMD_INVALL; 2108 desc.cmd_desc_invall.col = col; 2109 2110 its_cmd_send(dev, &desc); 2111 } 2112 2113 #ifdef FDT 2114 static device_probe_t gicv3_its_fdt_probe; 2115 static device_attach_t gicv3_its_fdt_attach; 2116 2117 static device_method_t gicv3_its_fdt_methods[] = { 2118 /* Device interface */ 2119 DEVMETHOD(device_probe, gicv3_its_fdt_probe), 2120 DEVMETHOD(device_attach, gicv3_its_fdt_attach), 2121 2122 /* End */ 2123 DEVMETHOD_END 2124 }; 2125 2126 #define its_baseclasses its_fdt_baseclasses 2127 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, 2128 sizeof(struct gicv3_its_softc), gicv3_its_driver); 2129 #undef its_baseclasses 2130 2131 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0, 2132 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 2133 2134 static int 2135 gicv3_its_fdt_probe(device_t dev) 2136 { 2137 2138 if (!ofw_bus_status_okay(dev)) 2139 return (ENXIO); 2140 2141 if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) 2142 return (ENXIO); 2143 2144 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 2145 return (BUS_PROBE_DEFAULT); 2146 } 2147 2148 static int 2149 gicv3_its_fdt_attach(device_t dev) 2150 { 2151 struct gicv3_its_softc *sc; 2152 phandle_t xref; 2153 int err; 2154 2155 sc = device_get_softc(dev); 2156 sc->dev = dev; 2157 err = gicv3_its_attach(dev); 2158 if (err != 0) 2159 return (err); 2160 2161 /* Register this device as a interrupt controller */ 2162 xref = OF_xref_from_node(ofw_bus_get_node(dev)); 2163 sc->sc_pic = intr_pic_register(dev, xref); 2164 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 2165 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 2166 if (err != 0) { 2167 device_printf(dev, "Failed to add PIC handler: %d\n", err); 2168 return (err); 2169 } 2170 2171 /* Register this device to handle MSI interrupts */ 2172 err = intr_msi_register(dev, xref); 2173 if (err != 0) { 2174 device_printf(dev, "Failed to register for MSIs: %d\n", err); 2175 return (err); 2176 } 2177 2178 return (0); 2179 } 2180 #endif 2181 2182 #ifdef DEV_ACPI 2183 static device_probe_t gicv3_its_acpi_probe; 2184 static device_attach_t gicv3_its_acpi_attach; 2185 2186 static device_method_t gicv3_its_acpi_methods[] = { 2187 /* Device interface */ 2188 DEVMETHOD(device_probe, gicv3_its_acpi_probe), 2189 DEVMETHOD(device_attach, gicv3_its_acpi_attach), 2190 2191 /* End */ 2192 DEVMETHOD_END 2193 }; 2194 2195 #define its_baseclasses its_acpi_baseclasses 2196 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods, 2197 sizeof(struct gicv3_its_softc), gicv3_its_driver); 2198 #undef its_baseclasses 2199 2200 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0, 2201 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 2202 2203 static int 2204 gicv3_its_acpi_probe(device_t dev) 2205 { 2206 2207 if (gic_get_bus(dev) != GIC_BUS_ACPI) 2208 return (EINVAL); 2209 2210 if (gic_get_hw_rev(dev) < 3) 2211 return (EINVAL); 2212 2213 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 2214 return (BUS_PROBE_DEFAULT); 2215 } 2216 2217 static int 2218 gicv3_its_acpi_attach(device_t dev) 2219 { 2220 struct gicv3_its_softc *sc; 2221 struct gic_v3_devinfo *di; 2222 int err; 2223 2224 sc = device_get_softc(dev); 2225 sc->dev = dev; 2226 err = gicv3_its_attach(dev); 2227 if (err != 0) 2228 return (err); 2229 2230 di = device_get_ivars(dev); 2231 sc->sc_pic = intr_pic_register(dev, di->msi_xref); 2232 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 2233 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 2234 if (err != 0) { 2235 device_printf(dev, "Failed to add PIC handler: %d\n", err); 2236 return (err); 2237 } 2238 2239 /* Register this device to handle MSI interrupts */ 2240 err = intr_msi_register(dev, di->msi_xref); 2241 if (err != 0) { 2242 device_printf(dev, "Failed to register for MSIs: %d\n", err); 2243 return (err); 2244 } 2245 2246 return (0); 2247 } 2248 #endif 2249