1 /*- 2 * Copyright (c) 2015-2016 The FreeBSD Foundation 3 * Copyright (c) 2023 Arm Ltd 4 * 5 * This software was developed by Andrew Turner under 6 * the sponsorship of the FreeBSD Foundation. 7 * 8 * This software was developed by Semihalf under 9 * the sponsorship of the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include "opt_acpi.h" 34 #include "opt_platform.h" 35 #include "opt_iommu.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/cpuset.h> 41 #include <sys/domainset.h> 42 #include <sys/endian.h> 43 #include <sys/kernel.h> 44 #include <sys/lock.h> 45 #include <sys/malloc.h> 46 #include <sys/module.h> 47 #include <sys/mutex.h> 48 #include <sys/proc.h> 49 #include <sys/taskqueue.h> 50 #include <sys/tree.h> 51 #include <sys/queue.h> 52 #include <sys/rman.h> 53 #include <sys/sbuf.h> 54 #include <sys/smp.h> 55 #include <sys/sysctl.h> 56 #include <sys/vmem.h> 57 58 #include <vm/vm.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_page.h> 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 65 #include <arm/arm/gic_common.h> 66 #include <arm64/arm64/gic_v3_reg.h> 67 #include <arm64/arm64/gic_v3_var.h> 68 69 #ifdef FDT 70 #include <dev/ofw/openfirm.h> 71 #include <dev/ofw/ofw_bus.h> 72 #include <dev/ofw/ofw_bus_subr.h> 73 #endif 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 77 #ifdef IOMMU 78 #include <dev/iommu/iommu.h> 79 #include <dev/iommu/iommu_gas.h> 80 #endif 81 82 #include "pcib_if.h" 83 #include "pic_if.h" 84 #include "msi_if.h" 85 86 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", 87 "ARM GICv3 Interrupt Translation Service"); 88 89 #define LPI_NIRQS (64 * 1024) 90 91 /* The size and alignment of the command circular buffer */ 92 #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ 93 #define ITS_CMDQ_ALIGN (64 * 1024) 94 95 #define LPI_CONFTAB_SIZE LPI_NIRQS 96 #define LPI_CONFTAB_ALIGN (64 * 1024) 97 #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 98 99 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ 100 #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) 101 #define LPI_PENDTAB_ALIGN (64 * 1024) 102 #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 103 104 #define LPI_INT_TRANS_TAB_ALIGN 256 105 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) 106 107 /* ITS commands encoding */ 108 #define ITS_CMD_MOVI (0x01) 109 #define ITS_CMD_SYNC (0x05) 110 #define ITS_CMD_MAPD (0x08) 111 #define ITS_CMD_MAPC (0x09) 112 #define ITS_CMD_MAPTI (0x0a) 113 #define ITS_CMD_MAPI (0x0b) 114 #define ITS_CMD_INV (0x0c) 115 #define ITS_CMD_INVALL (0x0d) 116 /* Command */ 117 #define CMD_COMMAND_MASK (0xFFUL) 118 /* PCI device ID */ 119 #define CMD_DEVID_SHIFT (32) 120 #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) 121 /* Size of IRQ ID bitfield */ 122 #define CMD_SIZE_MASK (0xFFUL) 123 /* Virtual LPI ID */ 124 #define CMD_ID_MASK (0xFFFFFFFFUL) 125 /* Physical LPI ID */ 126 #define CMD_PID_SHIFT (32) 127 #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) 128 /* Collection */ 129 #define CMD_COL_MASK (0xFFFFUL) 130 /* Target (CPU or Re-Distributor) */ 131 #define CMD_TARGET_SHIFT (16) 132 #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) 133 /* Interrupt Translation Table address */ 134 #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) 135 /* Valid command bit */ 136 #define CMD_VALID_SHIFT (63) 137 #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) 138 139 #define ITS_TARGET_NONE 0xFBADBEEF 140 141 /* LPI chunk owned by ITS device */ 142 struct lpi_chunk { 143 u_int lpi_base; 144 u_int lpi_free; /* First free LPI in set */ 145 u_int lpi_num; /* Total number of LPIs in chunk */ 146 u_int lpi_busy; /* Number of busy LPIs in chink */ 147 }; 148 149 /* ITS device */ 150 struct its_dev { 151 TAILQ_ENTRY(its_dev) entry; 152 /* PCI device */ 153 device_t pci_dev; 154 /* Device ID (i.e. PCI device ID) */ 155 uint32_t devid; 156 /* List of assigned LPIs */ 157 struct lpi_chunk lpis; 158 /* Virtual address of ITT */ 159 vm_offset_t itt; 160 size_t itt_size; 161 }; 162 163 /* 164 * ITS command descriptor. 165 * Idea for command description passing taken from Linux. 166 */ 167 struct its_cmd_desc { 168 uint8_t cmd_type; 169 170 union { 171 struct { 172 struct its_dev *its_dev; 173 struct its_col *col; 174 uint32_t id; 175 } cmd_desc_movi; 176 177 struct { 178 struct its_col *col; 179 } cmd_desc_sync; 180 181 struct { 182 struct its_col *col; 183 uint8_t valid; 184 } cmd_desc_mapc; 185 186 struct { 187 struct its_dev *its_dev; 188 struct its_col *col; 189 uint32_t pid; 190 uint32_t id; 191 } cmd_desc_mapvi; 192 193 struct { 194 struct its_dev *its_dev; 195 struct its_col *col; 196 uint32_t pid; 197 } cmd_desc_mapi; 198 199 struct { 200 struct its_dev *its_dev; 201 uint8_t valid; 202 } cmd_desc_mapd; 203 204 struct { 205 struct its_dev *its_dev; 206 struct its_col *col; 207 uint32_t pid; 208 } cmd_desc_inv; 209 210 struct { 211 struct its_col *col; 212 } cmd_desc_invall; 213 }; 214 }; 215 216 /* ITS command. Each command is 32 bytes long */ 217 struct its_cmd { 218 uint64_t cmd_dword[4]; /* ITS command double word */ 219 }; 220 221 /* An ITS private table */ 222 struct its_ptable { 223 vm_offset_t ptab_vaddr; 224 /* Size of the L1 and L2 tables */ 225 size_t ptab_l1_size; 226 size_t ptab_l2_size; 227 /* Number of L1 and L2 entries */ 228 int ptab_l1_nidents; 229 int ptab_l2_nidents; 230 231 int ptab_page_size; 232 int ptab_share; 233 bool ptab_indirect; 234 }; 235 236 /* ITS collection description. */ 237 struct its_col { 238 uint64_t col_target; /* Target Re-Distributor */ 239 uint64_t col_id; /* Collection ID */ 240 }; 241 242 struct gicv3_its_irqsrc { 243 struct intr_irqsrc gi_isrc; 244 u_int gi_id; 245 u_int gi_lpi; 246 struct its_dev *gi_its_dev; 247 TAILQ_ENTRY(gicv3_its_irqsrc) gi_link; 248 }; 249 250 struct gicv3_its_softc { 251 device_t dev; 252 struct intr_pic *sc_pic; 253 struct resource *sc_its_res; 254 255 cpuset_t sc_cpus; 256 struct domainset *sc_ds; 257 u_int gic_irq_cpu; 258 int sc_devbits; 259 int sc_dev_table_idx; 260 261 struct its_ptable sc_its_ptab[GITS_BASER_NUM]; 262 struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ 263 264 /* 265 * TODO: We should get these from the parent as we only want a 266 * single copy of each across the interrupt controller. 267 */ 268 uint8_t *sc_conf_base; 269 vm_offset_t sc_pend_base[MAXCPU]; 270 271 /* Command handling */ 272 struct mtx sc_its_cmd_lock; 273 struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ 274 size_t sc_its_cmd_next_idx; 275 276 vmem_t *sc_irq_alloc; 277 struct gicv3_its_irqsrc **sc_irqs; 278 u_int sc_irq_base; 279 u_int sc_irq_length; 280 u_int sc_irq_count; 281 282 struct mtx sc_its_dev_lock; 283 TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; 284 TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs; 285 286 #define ITS_FLAGS_CMDQ_FLUSH 0x00000001 287 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 288 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 289 u_int sc_its_flags; 290 bool trace_enable; 291 vm_page_t ma; /* fake msi page */ 292 }; 293 294 static void *conf_base; 295 296 typedef void (its_quirk_func_t)(device_t); 297 static its_quirk_func_t its_quirk_cavium_22375; 298 299 static const struct { 300 const char *desc; 301 uint32_t iidr; 302 uint32_t iidr_mask; 303 its_quirk_func_t *func; 304 } its_quirks[] = { 305 { 306 /* Cavium ThunderX Pass 1.x */ 307 .desc = "Cavium ThunderX errata: 22375, 24313", 308 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, 309 GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), 310 .iidr_mask = ~GITS_IIDR_REVISION_MASK, 311 .func = its_quirk_cavium_22375, 312 }, 313 }; 314 315 #define gic_its_read_4(sc, reg) \ 316 bus_read_4((sc)->sc_its_res, (reg)) 317 #define gic_its_read_8(sc, reg) \ 318 bus_read_8((sc)->sc_its_res, (reg)) 319 320 #define gic_its_write_4(sc, reg, val) \ 321 bus_write_4((sc)->sc_its_res, (reg), (val)) 322 #define gic_its_write_8(sc, reg, val) \ 323 bus_write_8((sc)->sc_its_res, (reg), (val)) 324 325 static device_attach_t gicv3_its_attach; 326 static device_detach_t gicv3_its_detach; 327 328 static pic_disable_intr_t gicv3_its_disable_intr; 329 static pic_enable_intr_t gicv3_its_enable_intr; 330 static pic_map_intr_t gicv3_its_map_intr; 331 static pic_setup_intr_t gicv3_its_setup_intr; 332 static pic_post_filter_t gicv3_its_post_filter; 333 static pic_post_ithread_t gicv3_its_post_ithread; 334 static pic_pre_ithread_t gicv3_its_pre_ithread; 335 static pic_bind_intr_t gicv3_its_bind_intr; 336 #ifdef SMP 337 static pic_init_secondary_t gicv3_its_init_secondary; 338 #endif 339 static msi_alloc_msi_t gicv3_its_alloc_msi; 340 static msi_release_msi_t gicv3_its_release_msi; 341 static msi_alloc_msix_t gicv3_its_alloc_msix; 342 static msi_release_msix_t gicv3_its_release_msix; 343 static msi_map_msi_t gicv3_its_map_msi; 344 #ifdef IOMMU 345 static msi_iommu_init_t gicv3_iommu_init; 346 static msi_iommu_deinit_t gicv3_iommu_deinit; 347 #endif 348 349 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); 350 static void its_cmd_mapc(device_t, struct its_col *, uint8_t); 351 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); 352 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); 353 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); 354 static void its_cmd_invall(device_t, struct its_col *); 355 356 static device_method_t gicv3_its_methods[] = { 357 /* Device interface */ 358 DEVMETHOD(device_detach, gicv3_its_detach), 359 360 /* Interrupt controller interface */ 361 DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), 362 DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), 363 DEVMETHOD(pic_map_intr, gicv3_its_map_intr), 364 DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), 365 DEVMETHOD(pic_post_filter, gicv3_its_post_filter), 366 DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), 367 DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), 368 #ifdef SMP 369 DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), 370 DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), 371 #endif 372 373 /* MSI/MSI-X */ 374 DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), 375 DEVMETHOD(msi_release_msi, gicv3_its_release_msi), 376 DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), 377 DEVMETHOD(msi_release_msix, gicv3_its_release_msix), 378 DEVMETHOD(msi_map_msi, gicv3_its_map_msi), 379 #ifdef IOMMU 380 DEVMETHOD(msi_iommu_init, gicv3_iommu_init), 381 DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit), 382 #endif 383 384 /* End */ 385 DEVMETHOD_END 386 }; 387 388 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, 389 sizeof(struct gicv3_its_softc)); 390 391 static void 392 gicv3_its_cmdq_init(struct gicv3_its_softc *sc) 393 { 394 vm_paddr_t cmd_paddr; 395 uint64_t reg, tmp; 396 397 /* Set up the command circular buffer */ 398 sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS, 399 sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 400 0); 401 sc->sc_its_cmd_next_idx = 0; 402 403 cmd_paddr = vtophys(sc->sc_its_cmd_base); 404 405 /* Set the base of the command buffer */ 406 reg = GITS_CBASER_VALID | 407 (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | 408 cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | 409 (ITS_CMDQ_SIZE / 4096 - 1); 410 gic_its_write_8(sc, GITS_CBASER, reg); 411 412 /* Read back to check for fixed value fields */ 413 tmp = gic_its_read_8(sc, GITS_CBASER); 414 415 if ((tmp & GITS_CBASER_SHARE_MASK) != 416 (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { 417 /* Check if the hardware reported non-shareable */ 418 if ((tmp & GITS_CBASER_SHARE_MASK) == 419 (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { 420 /* If so remove the cache attribute */ 421 reg &= ~GITS_CBASER_CACHE_MASK; 422 reg &= ~GITS_CBASER_SHARE_MASK; 423 /* Set to Non-cacheable, Non-shareable */ 424 reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; 425 reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; 426 427 gic_its_write_8(sc, GITS_CBASER, reg); 428 } 429 430 /* The command queue has to be flushed after each command */ 431 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; 432 } 433 434 /* Get the next command from the start of the buffer */ 435 gic_its_write_8(sc, GITS_CWRITER, 0x0); 436 } 437 438 static int 439 gicv3_its_table_page_size(struct gicv3_its_softc *sc, int table) 440 { 441 uint64_t reg, tmp; 442 int page_size; 443 444 page_size = PAGE_SIZE_64K; 445 reg = gic_its_read_8(sc, GITS_BASER(table)); 446 447 while (1) { 448 reg &= GITS_BASER_PSZ_MASK; 449 switch (page_size) { 450 case PAGE_SIZE_4K: /* 4KB */ 451 reg |= GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; 452 break; 453 case PAGE_SIZE_16K: /* 16KB */ 454 reg |= GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; 455 break; 456 case PAGE_SIZE_64K: /* 64KB */ 457 reg |= GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; 458 break; 459 } 460 461 /* Write the new page size */ 462 gic_its_write_8(sc, GITS_BASER(table), reg); 463 464 /* Read back to check */ 465 tmp = gic_its_read_8(sc, GITS_BASER(table)); 466 467 /* The page size is correct */ 468 if ((tmp & GITS_BASER_PSZ_MASK) == (reg & GITS_BASER_PSZ_MASK)) 469 return (page_size); 470 471 switch (page_size) { 472 default: 473 return (-1); 474 case PAGE_SIZE_16K: 475 page_size = PAGE_SIZE_4K; 476 break; 477 case PAGE_SIZE_64K: 478 page_size = PAGE_SIZE_16K; 479 break; 480 } 481 } 482 } 483 484 static bool 485 gicv3_its_table_supports_indirect(struct gicv3_its_softc *sc, int table) 486 { 487 uint64_t reg; 488 489 reg = gic_its_read_8(sc, GITS_BASER(table)); 490 491 /* Try setting the indirect flag */ 492 reg |= GITS_BASER_INDIRECT; 493 gic_its_write_8(sc, GITS_BASER(table), reg); 494 495 /* Read back to check */ 496 reg = gic_its_read_8(sc, GITS_BASER(table)); 497 return ((reg & GITS_BASER_INDIRECT) != 0); 498 } 499 500 501 static int 502 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) 503 { 504 vm_offset_t table; 505 vm_paddr_t paddr; 506 uint64_t cache, reg, share, tmp, type; 507 size_t its_tbl_size, nitspages, npages; 508 size_t l1_esize, l2_esize, l1_nidents, l2_nidents; 509 int i, page_size; 510 int devbits; 511 bool indirect; 512 513 if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { 514 /* 515 * GITS_TYPER[17:13] of ThunderX reports that device IDs 516 * are to be 21 bits in length. The entry size of the ITS 517 * table can be read from GITS_BASERn[52:48] and on ThunderX 518 * is supposed to be 8 bytes in length (for device table). 519 * Finally the page size that is to be used by ITS to access 520 * this table will be set to 64KB. 521 * 522 * This gives 0x200000 entries of size 0x8 bytes covered by 523 * 256 pages each of which 64KB in size. The number of pages 524 * (minus 1) should then be written to GITS_BASERn[7:0]. In 525 * that case this value would be 0xFF but on ThunderX the 526 * maximum value that HW accepts is 0xFD. 527 * 528 * Set an arbitrary number of device ID bits to 20 in order 529 * to limit the number of entries in ITS device table to 530 * 0x100000 and the table size to 8MB. 531 */ 532 devbits = 20; 533 cache = 0; 534 } else { 535 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); 536 cache = GITS_BASER_CACHE_WAWB; 537 } 538 sc->sc_devbits = devbits; 539 share = GITS_BASER_SHARE_IS; 540 541 for (i = 0; i < GITS_BASER_NUM; i++) { 542 reg = gic_its_read_8(sc, GITS_BASER(i)); 543 /* The type of table */ 544 type = GITS_BASER_TYPE(reg); 545 if (type == GITS_BASER_TYPE_UNIMPL) 546 continue; 547 548 /* The table entry size */ 549 l1_esize = GITS_BASER_ESIZE(reg); 550 551 /* Find the tables page size */ 552 page_size = gicv3_its_table_page_size(sc, i); 553 if (page_size == -1) { 554 device_printf(dev, "No valid page size for table %d\n", 555 i); 556 return (EINVAL); 557 } 558 559 indirect = false; 560 l2_nidents = 0; 561 l2_esize = 0; 562 switch(type) { 563 case GITS_BASER_TYPE_DEV: 564 if (sc->sc_dev_table_idx != -1) 565 device_printf(dev, 566 "Warning: Multiple device tables found\n"); 567 568 sc->sc_dev_table_idx = i; 569 l1_nidents = (1 << devbits); 570 if ((l1_esize * l1_nidents) > (page_size * 2)) { 571 indirect = 572 gicv3_its_table_supports_indirect(sc, i); 573 if (indirect) { 574 /* 575 * Each l1 entry is 8 bytes and points 576 * to an l2 table of size page_size. 577 * Calculate how many entries this is 578 * and use this to find how many 579 * 8 byte l1 idents we need. 580 */ 581 l2_esize = l1_esize; 582 l2_nidents = page_size / l2_esize; 583 l1_nidents = l1_nidents / l2_nidents; 584 l1_esize = GITS_INDIRECT_L1_ESIZE; 585 } 586 } 587 its_tbl_size = l1_esize * l1_nidents; 588 its_tbl_size = roundup2(its_tbl_size, page_size); 589 break; 590 case GITS_BASER_TYPE_VP: 591 case GITS_BASER_TYPE_PP: /* Undocumented? */ 592 case GITS_BASER_TYPE_IC: 593 its_tbl_size = page_size; 594 break; 595 default: 596 if (bootverbose) 597 device_printf(dev, "Unhandled table type %lx\n", 598 type); 599 continue; 600 } 601 npages = howmany(its_tbl_size, PAGE_SIZE); 602 603 /* Allocate the table */ 604 table = (vm_offset_t)contigmalloc_domainset(npages * PAGE_SIZE, 605 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, 606 (1ul << 48) - 1, PAGE_SIZE_64K, 0); 607 608 sc->sc_its_ptab[i].ptab_vaddr = table; 609 sc->sc_its_ptab[i].ptab_l1_size = its_tbl_size; 610 sc->sc_its_ptab[i].ptab_l1_nidents = l1_nidents; 611 sc->sc_its_ptab[i].ptab_l2_size = page_size; 612 sc->sc_its_ptab[i].ptab_l2_nidents = l2_nidents; 613 614 sc->sc_its_ptab[i].ptab_indirect = indirect; 615 sc->sc_its_ptab[i].ptab_page_size = page_size; 616 617 paddr = vtophys(table); 618 619 while (1) { 620 nitspages = howmany(its_tbl_size, page_size); 621 622 /* Clear the fields we will be setting */ 623 reg &= ~(GITS_BASER_VALID | GITS_BASER_INDIRECT | 624 GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | 625 GITS_BASER_PA_MASK | 626 GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | 627 GITS_BASER_SIZE_MASK); 628 /* Set the new values */ 629 reg |= GITS_BASER_VALID | 630 (indirect ? GITS_BASER_INDIRECT : 0) | 631 (cache << GITS_BASER_CACHE_SHIFT) | 632 (type << GITS_BASER_TYPE_SHIFT) | 633 paddr | (share << GITS_BASER_SHARE_SHIFT) | 634 (nitspages - 1); 635 636 switch (page_size) { 637 case PAGE_SIZE_4K: /* 4KB */ 638 reg |= 639 GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; 640 break; 641 case PAGE_SIZE_16K: /* 16KB */ 642 reg |= 643 GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; 644 break; 645 case PAGE_SIZE_64K: /* 64KB */ 646 reg |= 647 GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; 648 break; 649 } 650 651 gic_its_write_8(sc, GITS_BASER(i), reg); 652 653 /* Read back to check */ 654 tmp = gic_its_read_8(sc, GITS_BASER(i)); 655 656 /* Do the shareability masks line up? */ 657 if ((tmp & GITS_BASER_SHARE_MASK) != 658 (reg & GITS_BASER_SHARE_MASK)) { 659 share = (tmp & GITS_BASER_SHARE_MASK) >> 660 GITS_BASER_SHARE_SHIFT; 661 continue; 662 } 663 664 if (tmp != reg) { 665 device_printf(dev, "GITS_BASER%d: " 666 "unable to be updated: %lx != %lx\n", 667 i, reg, tmp); 668 return (ENXIO); 669 } 670 671 sc->sc_its_ptab[i].ptab_share = share; 672 /* We should have made all needed changes */ 673 break; 674 } 675 } 676 677 return (0); 678 } 679 680 static void 681 gicv3_its_conftable_init(struct gicv3_its_softc *sc) 682 { 683 void *conf_table; 684 685 conf_table = atomic_load_ptr(&conf_base); 686 if (conf_table == NULL) { 687 conf_table = contigmalloc(LPI_CONFTAB_SIZE, 688 M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, 689 LPI_CONFTAB_ALIGN, 0); 690 691 if (atomic_cmpset_ptr((uintptr_t *)&conf_base, 692 (uintptr_t)NULL, (uintptr_t)conf_table) == 0) { 693 contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS); 694 conf_table = atomic_load_ptr(&conf_base); 695 } 696 } 697 sc->sc_conf_base = conf_table; 698 699 /* Set the default configuration */ 700 memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, 701 LPI_CONFTAB_SIZE); 702 703 /* Flush the table to memory */ 704 cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE); 705 } 706 707 static void 708 gicv3_its_pendtables_init(struct gicv3_its_softc *sc) 709 { 710 int i; 711 712 for (i = 0; i <= mp_maxid; i++) { 713 if (CPU_ISSET(i, &sc->sc_cpus) == 0) 714 continue; 715 716 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc( 717 LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, 718 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); 719 720 /* Flush so the ITS can see the memory */ 721 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i], 722 LPI_PENDTAB_SIZE); 723 } 724 } 725 726 static void 727 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc) 728 { 729 device_t gicv3; 730 uint64_t xbaser, tmp; 731 uint32_t ctlr; 732 u_int cpuid; 733 734 gicv3 = device_get_parent(dev); 735 cpuid = PCPU_GET(cpuid); 736 737 /* Disable LPIs */ 738 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 739 ctlr &= ~GICR_CTLR_LPI_ENABLE; 740 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 741 742 /* Make sure changes are observable my the GIC */ 743 dsb(sy); 744 745 /* 746 * Set the redistributor base 747 */ 748 xbaser = vtophys(sc->sc_conf_base) | 749 (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | 750 (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | 751 (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); 752 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 753 754 /* Check the cache attributes we set */ 755 tmp = gic_r_read_8(gicv3, GICR_PROPBASER); 756 757 if ((tmp & GICR_PROPBASER_SHARE_MASK) != 758 (xbaser & GICR_PROPBASER_SHARE_MASK)) { 759 if ((tmp & GICR_PROPBASER_SHARE_MASK) == 760 (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { 761 /* We need to mark as non-cacheable */ 762 xbaser &= ~(GICR_PROPBASER_SHARE_MASK | 763 GICR_PROPBASER_CACHE_MASK); 764 /* Non-cacheable */ 765 xbaser |= GICR_PROPBASER_CACHE_NIN << 766 GICR_PROPBASER_CACHE_SHIFT; 767 /* Non-shareable */ 768 xbaser |= GICR_PROPBASER_SHARE_NS << 769 GICR_PROPBASER_SHARE_SHIFT; 770 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 771 } 772 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; 773 } 774 775 /* 776 * Set the LPI pending table base 777 */ 778 xbaser = vtophys(sc->sc_pend_base[cpuid]) | 779 (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | 780 (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); 781 782 gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); 783 784 tmp = gic_r_read_8(gicv3, GICR_PENDBASER); 785 786 if ((tmp & GICR_PENDBASER_SHARE_MASK) == 787 (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { 788 /* Clear the cahce and shareability bits */ 789 xbaser &= ~(GICR_PENDBASER_CACHE_MASK | 790 GICR_PENDBASER_SHARE_MASK); 791 /* Mark as non-shareable */ 792 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; 793 /* And non-cacheable */ 794 xbaser |= GICR_PENDBASER_CACHE_NIN << 795 GICR_PENDBASER_CACHE_SHIFT; 796 } 797 798 /* Enable LPIs */ 799 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 800 ctlr |= GICR_CTLR_LPI_ENABLE; 801 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 802 803 /* Make sure the GIC has seen everything */ 804 dsb(sy); 805 } 806 807 static int 808 its_init_cpu(device_t dev, struct gicv3_its_softc *sc) 809 { 810 device_t gicv3; 811 vm_paddr_t target; 812 u_int cpuid; 813 struct redist_pcpu *rpcpu; 814 815 gicv3 = device_get_parent(dev); 816 cpuid = PCPU_GET(cpuid); 817 if (!CPU_ISSET(cpuid, &sc->sc_cpus)) 818 return (0); 819 820 /* Check if the ITS is enabled on this CPU */ 821 if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) 822 return (ENXIO); 823 824 rpcpu = gicv3_get_redist(dev); 825 826 /* Do per-cpu LPI init once */ 827 if (!rpcpu->lpi_enabled) { 828 its_init_cpu_lpi(dev, sc); 829 rpcpu->lpi_enabled = true; 830 } 831 832 if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { 833 /* This ITS wants the redistributor physical address */ 834 target = vtophys((vm_offset_t)rman_get_virtual(rpcpu->res) + 835 rpcpu->offset); 836 } else { 837 /* This ITS wants the unique processor number */ 838 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) << 839 CMD_TARGET_SHIFT; 840 } 841 842 sc->sc_its_cols[cpuid]->col_target = target; 843 sc->sc_its_cols[cpuid]->col_id = cpuid; 844 845 its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); 846 its_cmd_invall(dev, sc->sc_its_cols[cpuid]); 847 848 return (0); 849 } 850 851 static int 852 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS) 853 { 854 struct gicv3_its_softc *sc; 855 int rv; 856 857 sc = arg1; 858 859 rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req); 860 if (rv != 0 || req->newptr == NULL) 861 return (rv); 862 if (sc->trace_enable) 863 gic_its_write_8(sc, GITS_TRKCTLR, 3); 864 else 865 gic_its_write_8(sc, GITS_TRKCTLR, 0); 866 867 return (0); 868 } 869 870 static int 871 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS) 872 { 873 struct gicv3_its_softc *sc; 874 struct sbuf *sb; 875 int err; 876 877 sc = arg1; 878 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 879 if (sb == NULL) { 880 device_printf(sc->dev, "Could not allocate sbuf for output.\n"); 881 return (ENOMEM); 882 } 883 sbuf_cat(sb, "\n"); 884 sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n", 885 gic_its_read_4(sc, GITS_TRKCTLR)); 886 sbuf_printf(sb, "GITS_TRKR: 0x%08X\n", 887 gic_its_read_4(sc, GITS_TRKR)); 888 sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n", 889 gic_its_read_4(sc, GITS_TRKDIDR)); 890 sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n", 891 gic_its_read_4(sc, GITS_TRKPIDR)); 892 sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n", 893 gic_its_read_4(sc, GITS_TRKVIDR)); 894 sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n", 895 gic_its_read_4(sc, GITS_TRKTGTR)); 896 897 err = sbuf_finish(sb); 898 if (err) 899 device_printf(sc->dev, "Error finishing sbuf: %d\n", err); 900 sbuf_delete(sb); 901 return(err); 902 } 903 904 static int 905 gicv3_its_init_sysctl(struct gicv3_its_softc *sc) 906 { 907 struct sysctl_oid *oid, *child; 908 struct sysctl_ctx_list *ctx_list; 909 910 ctx_list = device_get_sysctl_ctx(sc->dev); 911 child = device_get_sysctl_tree(sc->dev); 912 oid = SYSCTL_ADD_NODE(ctx_list, 913 SYSCTL_CHILDREN(child), OID_AUTO, "tracing", 914 CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing"); 915 if (oid == NULL) 916 return (ENXIO); 917 918 /* Add registers */ 919 SYSCTL_ADD_PROC(ctx_list, 920 SYSCTL_CHILDREN(oid), OID_AUTO, "enable", 921 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 922 gicv3_its_sysctl_trace_enable, "CU", "Enable tracing"); 923 SYSCTL_ADD_PROC(ctx_list, 924 SYSCTL_CHILDREN(oid), OID_AUTO, "capture", 925 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 926 gicv3_its_sysctl_trace_regs, "", "Captured tracing registers."); 927 928 return (0); 929 } 930 931 static int 932 gicv3_its_attach(device_t dev) 933 { 934 struct gicv3_its_softc *sc; 935 int domain, err, i, rid; 936 uint64_t phys; 937 uint32_t ctlr, iidr; 938 939 sc = device_get_softc(dev); 940 941 sc->sc_dev_table_idx = -1; 942 sc->sc_irq_length = gicv3_get_nirqs(dev); 943 sc->sc_irq_base = GIC_FIRST_LPI; 944 sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length; 945 946 rid = 0; 947 sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 948 RF_ACTIVE); 949 if (sc->sc_its_res == NULL) { 950 device_printf(dev, "Could not allocate memory\n"); 951 return (ENXIO); 952 } 953 954 phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) + 955 GITS_TRANSLATER, PAGE_SIZE); 956 sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO); 957 vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT); 958 959 CPU_COPY(&all_cpus, &sc->sc_cpus); 960 iidr = gic_its_read_4(sc, GITS_IIDR); 961 for (i = 0; i < nitems(its_quirks); i++) { 962 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { 963 if (bootverbose) { 964 device_printf(dev, "Applying %s\n", 965 its_quirks[i].desc); 966 } 967 its_quirks[i].func(dev); 968 break; 969 } 970 } 971 972 if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) { 973 sc->sc_ds = DOMAINSET_PREF(domain); 974 } else { 975 sc->sc_ds = DOMAINSET_RR(); 976 } 977 978 /* 979 * GIT_CTLR_EN is mandated to reset to 0 on a Warm reset, but we may be 980 * coming in via, for instance, a kexec/kboot style setup where a 981 * previous kernel has configured then relinquished control. Clear it 982 * so that we can reconfigure GITS_BASER*. 983 */ 984 ctlr = gic_its_read_4(sc, GITS_CTLR); 985 if ((ctlr & GITS_CTLR_EN) != 0) { 986 ctlr &= ~GITS_CTLR_EN; 987 gic_its_write_4(sc, GITS_CTLR, ctlr); 988 } 989 990 /* Allocate the private tables */ 991 err = gicv3_its_table_init(dev, sc); 992 if (err != 0) 993 return (err); 994 995 /* Protects access to the device list */ 996 mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); 997 998 /* Protects access to the ITS command circular buffer. */ 999 mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); 1000 1001 /* Allocate the command circular buffer */ 1002 gicv3_its_cmdq_init(sc); 1003 1004 /* Allocate the per-CPU collections */ 1005 for (int cpu = 0; cpu <= mp_maxid; cpu++) 1006 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0) 1007 sc->sc_its_cols[cpu] = malloc_domainset( 1008 sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, 1009 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), 1010 M_WAITOK | M_ZERO); 1011 else 1012 sc->sc_its_cols[cpu] = NULL; 1013 1014 /* Enable the ITS */ 1015 gic_its_write_4(sc, GITS_CTLR, ctlr | GITS_CTLR_EN); 1016 1017 /* Create the LPI configuration table */ 1018 gicv3_its_conftable_init(sc); 1019 1020 /* And the pending tebles */ 1021 gicv3_its_pendtables_init(sc); 1022 1023 /* Enable LPIs on this CPU */ 1024 its_init_cpu(dev, sc); 1025 1026 TAILQ_INIT(&sc->sc_its_dev_list); 1027 TAILQ_INIT(&sc->sc_free_irqs); 1028 1029 /* 1030 * Create the vmem object to allocate INTRNG IRQs from. We try to 1031 * use all IRQs not already used by the GICv3. 1032 * XXX: This assumes there are no other interrupt controllers in the 1033 * system. 1034 */ 1035 sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0, 1036 gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK); 1037 1038 sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length, 1039 M_GICV3_ITS, M_WAITOK | M_ZERO); 1040 1041 /* For GIC-500 install tracking sysctls. */ 1042 if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) == 1043 GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0)) 1044 gicv3_its_init_sysctl(sc); 1045 1046 return (0); 1047 } 1048 1049 static int 1050 gicv3_its_detach(device_t dev) 1051 { 1052 1053 return (ENXIO); 1054 } 1055 1056 static void 1057 its_quirk_cavium_22375(device_t dev) 1058 { 1059 struct gicv3_its_softc *sc; 1060 int domain; 1061 1062 sc = device_get_softc(dev); 1063 sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; 1064 1065 /* 1066 * We need to limit which CPUs we send these interrupts to on 1067 * the original dual socket ThunderX as it is unable to 1068 * forward them between the two sockets. 1069 */ 1070 if (bus_get_domain(dev, &domain) == 0) { 1071 if (domain < MAXMEMDOM) { 1072 CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus); 1073 } else { 1074 CPU_ZERO(&sc->sc_cpus); 1075 } 1076 } 1077 } 1078 1079 static void 1080 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) 1081 { 1082 struct gicv3_its_softc *sc; 1083 struct gicv3_its_irqsrc *girq; 1084 uint8_t *conf; 1085 1086 sc = device_get_softc(dev); 1087 girq = (struct gicv3_its_irqsrc *)isrc; 1088 conf = sc->sc_conf_base; 1089 1090 conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE; 1091 1092 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 1093 /* Clean D-cache under command. */ 1094 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 1095 } else { 1096 /* DSB inner shareable, store */ 1097 dsb(ishst); 1098 } 1099 1100 its_cmd_inv(dev, girq->gi_its_dev, girq); 1101 } 1102 1103 static void 1104 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) 1105 { 1106 struct gicv3_its_softc *sc; 1107 struct gicv3_its_irqsrc *girq; 1108 uint8_t *conf; 1109 1110 sc = device_get_softc(dev); 1111 girq = (struct gicv3_its_irqsrc *)isrc; 1112 conf = sc->sc_conf_base; 1113 1114 conf[girq->gi_lpi] |= LPI_CONF_ENABLE; 1115 1116 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 1117 /* Clean D-cache under command. */ 1118 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 1119 } else { 1120 /* DSB inner shareable, store */ 1121 dsb(ishst); 1122 } 1123 1124 its_cmd_inv(dev, girq->gi_its_dev, girq); 1125 } 1126 1127 static int 1128 gicv3_its_intr(void *arg, uintptr_t irq) 1129 { 1130 struct gicv3_its_softc *sc = arg; 1131 struct gicv3_its_irqsrc *girq; 1132 struct trapframe *tf; 1133 1134 irq -= sc->sc_irq_base; 1135 girq = sc->sc_irqs[irq]; 1136 if (girq == NULL) 1137 panic("gicv3_its_intr: Invalid interrupt %ld", 1138 irq + sc->sc_irq_base); 1139 1140 tf = curthread->td_intr_frame; 1141 intr_isrc_dispatch(&girq->gi_isrc, tf); 1142 return (FILTER_HANDLED); 1143 } 1144 1145 static void 1146 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) 1147 { 1148 struct gicv3_its_irqsrc *girq; 1149 1150 girq = (struct gicv3_its_irqsrc *)isrc; 1151 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1152 } 1153 1154 static void 1155 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) 1156 { 1157 1158 } 1159 1160 static void 1161 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) 1162 { 1163 struct gicv3_its_irqsrc *girq; 1164 1165 girq = (struct gicv3_its_irqsrc *)isrc; 1166 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1167 } 1168 1169 static int 1170 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc) 1171 { 1172 struct gicv3_its_softc *sc; 1173 1174 sc = device_get_softc(dev); 1175 if (CPU_EMPTY(&isrc->isrc_cpu)) { 1176 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu, 1177 &sc->sc_cpus); 1178 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu); 1179 } 1180 1181 return (0); 1182 } 1183 1184 static int 1185 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) 1186 { 1187 struct gicv3_its_irqsrc *girq; 1188 1189 gicv3_its_select_cpu(dev, isrc); 1190 1191 girq = (struct gicv3_its_irqsrc *)isrc; 1192 its_cmd_movi(dev, girq); 1193 return (0); 1194 } 1195 1196 static int 1197 gicv3_its_map_intr(device_t dev, struct intr_map_data *data, 1198 struct intr_irqsrc **isrcp) 1199 { 1200 1201 /* 1202 * This should never happen, we only call this function to map 1203 * interrupts found before the controller driver is ready. 1204 */ 1205 panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); 1206 } 1207 1208 static int 1209 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, 1210 struct resource *res, struct intr_map_data *data) 1211 { 1212 1213 /* Bind the interrupt to a CPU */ 1214 gicv3_its_bind_intr(dev, isrc); 1215 1216 return (0); 1217 } 1218 1219 #ifdef SMP 1220 static void 1221 gicv3_its_init_secondary(device_t dev) 1222 { 1223 struct gicv3_its_softc *sc; 1224 1225 sc = device_get_softc(dev); 1226 1227 /* 1228 * This is fatal as otherwise we may bind interrupts to this CPU. 1229 * We need a way to tell the interrupt framework to only bind to a 1230 * subset of given CPUs when it performs the shuffle. 1231 */ 1232 if (its_init_cpu(dev, sc) != 0) 1233 panic("gicv3_its_init_secondary: No usable ITS on CPU%d", 1234 PCPU_GET(cpuid)); 1235 } 1236 #endif 1237 1238 static uint32_t 1239 its_get_devid(device_t pci_dev) 1240 { 1241 uintptr_t id; 1242 1243 if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) 1244 panic("%s: %s: Unable to get the MSI DeviceID", __func__, 1245 device_get_nameunit(pci_dev)); 1246 1247 return (id); 1248 } 1249 1250 static struct its_dev * 1251 its_device_find(device_t dev, device_t child) 1252 { 1253 struct gicv3_its_softc *sc; 1254 struct its_dev *its_dev = NULL; 1255 1256 sc = device_get_softc(dev); 1257 1258 mtx_lock_spin(&sc->sc_its_dev_lock); 1259 TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { 1260 if (its_dev->pci_dev == child) 1261 break; 1262 } 1263 mtx_unlock_spin(&sc->sc_its_dev_lock); 1264 1265 return (its_dev); 1266 } 1267 1268 static bool 1269 its_device_alloc(struct gicv3_its_softc *sc, int devid) 1270 { 1271 struct its_ptable *ptable; 1272 vm_offset_t l2_table; 1273 uint64_t *table; 1274 uint32_t index; 1275 bool shareable; 1276 1277 /* No device table */ 1278 if (sc->sc_dev_table_idx < 0) { 1279 if (devid >= (1 << sc->sc_devbits)) { 1280 if (bootverbose) { 1281 device_printf(sc->dev, 1282 "%s: Device out of range for hardware " 1283 "(%x >= %x)\n", __func__, devid, 1284 1 << sc->sc_devbits); 1285 } 1286 return (false); 1287 } 1288 return (true); 1289 } 1290 1291 ptable = &sc->sc_its_ptab[sc->sc_dev_table_idx]; 1292 /* Check the devid is within the table limit */ 1293 if (!ptable->ptab_indirect) { 1294 if (devid >= ptable->ptab_l1_nidents) { 1295 if (bootverbose) { 1296 device_printf(sc->dev, 1297 "%s: Device out of range for table " 1298 "(%x >= %x)\n", __func__, devid, 1299 ptable->ptab_l1_nidents); 1300 } 1301 return (false); 1302 } 1303 1304 return (true); 1305 } 1306 1307 /* Check the devid is within the allocated range */ 1308 index = devid / ptable->ptab_l2_nidents; 1309 if (index >= ptable->ptab_l1_nidents) { 1310 if (bootverbose) { 1311 device_printf(sc->dev, 1312 "%s: Index out of range for table (%x >= %x)\n", 1313 __func__, index, ptable->ptab_l1_nidents); 1314 } 1315 return (false); 1316 } 1317 1318 table = (uint64_t *)ptable->ptab_vaddr; 1319 /* We have an second level table */ 1320 if ((table[index] & GITS_BASER_VALID) != 0) 1321 return (true); 1322 1323 shareable = true; 1324 if ((ptable->ptab_share & GITS_BASER_SHARE_MASK) == GITS_BASER_SHARE_NS) 1325 shareable = false; 1326 1327 l2_table = (vm_offset_t)contigmalloc_domainset(ptable->ptab_l2_size, 1328 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, 1329 ptable->ptab_page_size, 0); 1330 1331 if (!shareable) 1332 cpu_dcache_wb_range((vm_offset_t)l2_table, 1333 ptable->ptab_l2_size); 1334 1335 table[index] = vtophys(l2_table) | GITS_BASER_VALID; 1336 if (!shareable) 1337 cpu_dcache_wb_range((vm_offset_t)&table[index], 1338 sizeof(table[index])); 1339 1340 dsb(sy); 1341 return (true); 1342 } 1343 1344 static struct its_dev * 1345 its_device_get(device_t dev, device_t child, u_int nvecs) 1346 { 1347 struct gicv3_its_softc *sc; 1348 struct its_dev *its_dev; 1349 vmem_addr_t irq_base; 1350 size_t esize; 1351 1352 sc = device_get_softc(dev); 1353 1354 its_dev = its_device_find(dev, child); 1355 if (its_dev != NULL) 1356 return (its_dev); 1357 1358 its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); 1359 if (its_dev == NULL) 1360 return (NULL); 1361 1362 its_dev->pci_dev = child; 1363 its_dev->devid = its_get_devid(child); 1364 1365 its_dev->lpis.lpi_busy = 0; 1366 its_dev->lpis.lpi_num = nvecs; 1367 its_dev->lpis.lpi_free = nvecs; 1368 1369 if (!its_device_alloc(sc, its_dev->devid)) { 1370 free(its_dev, M_GICV3_ITS); 1371 return (NULL); 1372 } 1373 1374 if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, 1375 &irq_base) != 0) { 1376 free(its_dev, M_GICV3_ITS); 1377 return (NULL); 1378 } 1379 its_dev->lpis.lpi_base = irq_base; 1380 1381 /* Get ITT entry size */ 1382 esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); 1383 1384 /* 1385 * Allocate ITT for this device. 1386 * PA has to be 256 B aligned. At least two entries for device. 1387 */ 1388 its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256); 1389 its_dev->itt = (vm_offset_t)contigmalloc_domainset(its_dev->itt_size, 1390 M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0, 1391 LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0); 1392 if (its_dev->itt == 0) { 1393 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); 1394 free(its_dev, M_GICV3_ITS); 1395 return (NULL); 1396 } 1397 1398 /* Make sure device sees zeroed ITT. */ 1399 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) 1400 cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size); 1401 1402 mtx_lock_spin(&sc->sc_its_dev_lock); 1403 TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); 1404 mtx_unlock_spin(&sc->sc_its_dev_lock); 1405 1406 /* Map device to its ITT */ 1407 its_cmd_mapd(dev, its_dev, 1); 1408 1409 return (its_dev); 1410 } 1411 1412 static void 1413 its_device_release(device_t dev, struct its_dev *its_dev) 1414 { 1415 struct gicv3_its_softc *sc; 1416 1417 KASSERT(its_dev->lpis.lpi_busy == 0, 1418 ("its_device_release: Trying to release an inuse ITS device")); 1419 1420 /* Unmap device in ITS */ 1421 its_cmd_mapd(dev, its_dev, 0); 1422 1423 sc = device_get_softc(dev); 1424 1425 /* Remove the device from the list of devices */ 1426 mtx_lock_spin(&sc->sc_its_dev_lock); 1427 TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); 1428 mtx_unlock_spin(&sc->sc_its_dev_lock); 1429 1430 /* Free ITT */ 1431 KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device")); 1432 contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS); 1433 1434 /* Free the IRQ allocation */ 1435 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, 1436 its_dev->lpis.lpi_num); 1437 1438 free(its_dev, M_GICV3_ITS); 1439 } 1440 1441 static struct gicv3_its_irqsrc * 1442 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq) 1443 { 1444 struct gicv3_its_irqsrc *girq = NULL; 1445 1446 KASSERT(sc->sc_irqs[irq] == NULL, 1447 ("%s: Interrupt %u already allocated", __func__, irq)); 1448 mtx_lock_spin(&sc->sc_its_dev_lock); 1449 if (!TAILQ_EMPTY(&sc->sc_free_irqs)) { 1450 girq = TAILQ_FIRST(&sc->sc_free_irqs); 1451 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link); 1452 } 1453 mtx_unlock_spin(&sc->sc_its_dev_lock); 1454 if (girq == NULL) { 1455 girq = malloc(sizeof(*girq), M_GICV3_ITS, 1456 M_NOWAIT | M_ZERO); 1457 if (girq == NULL) 1458 return (NULL); 1459 girq->gi_id = -1; 1460 if (intr_isrc_register(&girq->gi_isrc, dev, 0, 1461 "%s,%u", device_get_nameunit(dev), irq) != 0) { 1462 free(girq, M_GICV3_ITS); 1463 return (NULL); 1464 } 1465 } 1466 girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI; 1467 sc->sc_irqs[irq] = girq; 1468 1469 return (girq); 1470 } 1471 1472 static void 1473 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc, 1474 struct gicv3_its_irqsrc *girq) 1475 { 1476 u_int irq; 1477 1478 mtx_assert(&sc->sc_its_dev_lock, MA_OWNED); 1479 1480 irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base; 1481 sc->sc_irqs[irq] = NULL; 1482 1483 girq->gi_id = -1; 1484 girq->gi_its_dev = NULL; 1485 TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link); 1486 } 1487 1488 static int 1489 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, 1490 device_t *pic, struct intr_irqsrc **srcs) 1491 { 1492 struct gicv3_its_softc *sc; 1493 struct gicv3_its_irqsrc *girq; 1494 struct its_dev *its_dev; 1495 u_int irq; 1496 int i; 1497 1498 its_dev = its_device_get(dev, child, count); 1499 if (its_dev == NULL) 1500 return (ENXIO); 1501 1502 KASSERT(its_dev->lpis.lpi_free >= count, 1503 ("gicv3_its_alloc_msi: No free LPIs")); 1504 sc = device_get_softc(dev); 1505 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1506 its_dev->lpis.lpi_free; 1507 1508 /* Allocate the irqsrc for each MSI */ 1509 for (i = 0; i < count; i++, irq++) { 1510 its_dev->lpis.lpi_free--; 1511 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev, 1512 sc, irq); 1513 if (srcs[i] == NULL) 1514 break; 1515 } 1516 1517 /* The allocation failed, release them */ 1518 if (i != count) { 1519 mtx_lock_spin(&sc->sc_its_dev_lock); 1520 for (i = 0; i < count; i++) { 1521 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1522 if (girq == NULL) 1523 break; 1524 gicv3_its_release_irqsrc(sc, girq); 1525 srcs[i] = NULL; 1526 } 1527 mtx_unlock_spin(&sc->sc_its_dev_lock); 1528 return (ENXIO); 1529 } 1530 1531 /* Finish the allocation now we have all MSI irqsrcs */ 1532 for (i = 0; i < count; i++) { 1533 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1534 girq->gi_id = i; 1535 girq->gi_its_dev = its_dev; 1536 1537 /* Map the message to the given IRQ */ 1538 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1539 its_cmd_mapti(dev, girq); 1540 } 1541 its_dev->lpis.lpi_busy += count; 1542 *pic = dev; 1543 1544 return (0); 1545 } 1546 1547 static int 1548 gicv3_its_release_msi(device_t dev, device_t child, int count, 1549 struct intr_irqsrc **isrc) 1550 { 1551 struct gicv3_its_softc *sc; 1552 struct gicv3_its_irqsrc *girq; 1553 struct its_dev *its_dev; 1554 int i; 1555 1556 its_dev = its_device_find(dev, child); 1557 1558 KASSERT(its_dev != NULL, 1559 ("gicv3_its_release_msi: Releasing a MSI interrupt with " 1560 "no ITS device")); 1561 KASSERT(its_dev->lpis.lpi_busy >= count, 1562 ("gicv3_its_release_msi: Releasing more interrupts than " 1563 "were allocated: releasing %d, allocated %d", count, 1564 its_dev->lpis.lpi_busy)); 1565 1566 sc = device_get_softc(dev); 1567 mtx_lock_spin(&sc->sc_its_dev_lock); 1568 for (i = 0; i < count; i++) { 1569 girq = (struct gicv3_its_irqsrc *)isrc[i]; 1570 gicv3_its_release_irqsrc(sc, girq); 1571 } 1572 mtx_unlock_spin(&sc->sc_its_dev_lock); 1573 its_dev->lpis.lpi_busy -= count; 1574 1575 if (its_dev->lpis.lpi_busy == 0) 1576 its_device_release(dev, its_dev); 1577 1578 return (0); 1579 } 1580 1581 static int 1582 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, 1583 struct intr_irqsrc **isrcp) 1584 { 1585 struct gicv3_its_softc *sc; 1586 struct gicv3_its_irqsrc *girq; 1587 struct its_dev *its_dev; 1588 u_int nvecs, irq; 1589 1590 nvecs = pci_msix_count(child); 1591 its_dev = its_device_get(dev, child, nvecs); 1592 if (its_dev == NULL) 1593 return (ENXIO); 1594 1595 KASSERT(its_dev->lpis.lpi_free > 0, 1596 ("gicv3_its_alloc_msix: No free LPIs")); 1597 sc = device_get_softc(dev); 1598 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1599 its_dev->lpis.lpi_free; 1600 1601 girq = gicv3_its_alloc_irqsrc(dev, sc, irq); 1602 if (girq == NULL) 1603 return (ENXIO); 1604 girq->gi_id = its_dev->lpis.lpi_busy; 1605 girq->gi_its_dev = its_dev; 1606 1607 its_dev->lpis.lpi_free--; 1608 its_dev->lpis.lpi_busy++; 1609 1610 /* Map the message to the given IRQ */ 1611 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1612 its_cmd_mapti(dev, girq); 1613 1614 *pic = dev; 1615 *isrcp = (struct intr_irqsrc *)girq; 1616 1617 return (0); 1618 } 1619 1620 static int 1621 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) 1622 { 1623 struct gicv3_its_softc *sc; 1624 struct gicv3_its_irqsrc *girq; 1625 struct its_dev *its_dev; 1626 1627 its_dev = its_device_find(dev, child); 1628 1629 KASSERT(its_dev != NULL, 1630 ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " 1631 "no ITS device")); 1632 KASSERT(its_dev->lpis.lpi_busy > 0, 1633 ("gicv3_its_release_msix: Releasing more interrupts than " 1634 "were allocated: allocated %d", its_dev->lpis.lpi_busy)); 1635 1636 sc = device_get_softc(dev); 1637 girq = (struct gicv3_its_irqsrc *)isrc; 1638 mtx_lock_spin(&sc->sc_its_dev_lock); 1639 gicv3_its_release_irqsrc(sc, girq); 1640 mtx_unlock_spin(&sc->sc_its_dev_lock); 1641 its_dev->lpis.lpi_busy--; 1642 1643 if (its_dev->lpis.lpi_busy == 0) 1644 its_device_release(dev, its_dev); 1645 1646 return (0); 1647 } 1648 1649 static int 1650 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, 1651 uint64_t *addr, uint32_t *data) 1652 { 1653 struct gicv3_its_softc *sc; 1654 struct gicv3_its_irqsrc *girq; 1655 1656 sc = device_get_softc(dev); 1657 girq = (struct gicv3_its_irqsrc *)isrc; 1658 1659 *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; 1660 *data = girq->gi_id; 1661 1662 return (0); 1663 } 1664 1665 #ifdef IOMMU 1666 static int 1667 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain) 1668 { 1669 struct gicv3_its_softc *sc; 1670 struct iommu_ctx *ctx; 1671 int error; 1672 1673 sc = device_get_softc(dev); 1674 ctx = iommu_get_dev_ctx(child); 1675 if (ctx == NULL) 1676 return (ENXIO); 1677 /* Map the page containing the GITS_TRANSLATER register. */ 1678 error = iommu_map_msi(ctx, PAGE_SIZE, 0, 1679 IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma); 1680 *domain = iommu_get_ctx_domain(ctx); 1681 1682 return (error); 1683 } 1684 1685 static void 1686 gicv3_iommu_deinit(device_t dev, device_t child) 1687 { 1688 struct iommu_ctx *ctx; 1689 1690 ctx = iommu_get_dev_ctx(child); 1691 if (ctx == NULL) 1692 return; 1693 1694 iommu_unmap_msi(ctx); 1695 } 1696 #endif 1697 1698 /* 1699 * Commands handling. 1700 */ 1701 1702 static __inline void 1703 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) 1704 { 1705 /* Command field: DW0 [7:0] */ 1706 cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); 1707 cmd->cmd_dword[0] |= htole64(cmd_type); 1708 } 1709 1710 static __inline void 1711 cmd_format_devid(struct its_cmd *cmd, uint32_t devid) 1712 { 1713 /* Device ID field: DW0 [63:32] */ 1714 cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); 1715 cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); 1716 } 1717 1718 static __inline void 1719 cmd_format_size(struct its_cmd *cmd, uint16_t size) 1720 { 1721 /* Size field: DW1 [4:0] */ 1722 cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); 1723 cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); 1724 } 1725 1726 static __inline void 1727 cmd_format_id(struct its_cmd *cmd, uint32_t id) 1728 { 1729 /* ID field: DW1 [31:0] */ 1730 cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); 1731 cmd->cmd_dword[1] |= htole64(id); 1732 } 1733 1734 static __inline void 1735 cmd_format_pid(struct its_cmd *cmd, uint32_t pid) 1736 { 1737 /* Physical ID field: DW1 [63:32] */ 1738 cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); 1739 cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); 1740 } 1741 1742 static __inline void 1743 cmd_format_col(struct its_cmd *cmd, uint16_t col_id) 1744 { 1745 /* Collection field: DW2 [16:0] */ 1746 cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); 1747 cmd->cmd_dword[2] |= htole64(col_id); 1748 } 1749 1750 static __inline void 1751 cmd_format_target(struct its_cmd *cmd, uint64_t target) 1752 { 1753 /* Target Address field: DW2 [47:16] */ 1754 cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); 1755 cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); 1756 } 1757 1758 static __inline void 1759 cmd_format_itt(struct its_cmd *cmd, uint64_t itt) 1760 { 1761 /* ITT Address field: DW2 [47:8] */ 1762 cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); 1763 cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); 1764 } 1765 1766 static __inline void 1767 cmd_format_valid(struct its_cmd *cmd, uint8_t valid) 1768 { 1769 /* Valid field: DW2 [63] */ 1770 cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); 1771 cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); 1772 } 1773 1774 static inline bool 1775 its_cmd_queue_full(struct gicv3_its_softc *sc) 1776 { 1777 size_t read_idx, next_write_idx; 1778 1779 /* Get the index of the next command */ 1780 next_write_idx = (sc->sc_its_cmd_next_idx + 1) % 1781 (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); 1782 /* And the index of the current command being read */ 1783 read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); 1784 1785 /* 1786 * The queue is full when the write offset points 1787 * at the command before the current read offset. 1788 */ 1789 return (next_write_idx == read_idx); 1790 } 1791 1792 static inline void 1793 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1794 { 1795 1796 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { 1797 /* Clean D-cache under command. */ 1798 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); 1799 } else { 1800 /* DSB inner shareable, store */ 1801 dsb(ishst); 1802 } 1803 1804 } 1805 1806 static inline uint64_t 1807 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1808 { 1809 uint64_t off; 1810 1811 off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); 1812 1813 return (off); 1814 } 1815 1816 static void 1817 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, 1818 struct its_cmd *cmd_last) 1819 { 1820 struct gicv3_its_softc *sc; 1821 uint64_t first, last, read; 1822 size_t us_left; 1823 1824 sc = device_get_softc(dev); 1825 1826 /* 1827 * XXX ARM64TODO: This is obviously a significant delay. 1828 * The reason for that is that currently the time frames for 1829 * the command to complete are not known. 1830 */ 1831 us_left = 1000000; 1832 1833 first = its_cmd_cwriter_offset(sc, cmd_first); 1834 last = its_cmd_cwriter_offset(sc, cmd_last); 1835 1836 for (;;) { 1837 read = gic_its_read_8(sc, GITS_CREADR); 1838 if (first < last) { 1839 if (read < first || read >= last) 1840 break; 1841 } else if (read < first && read >= last) 1842 break; 1843 1844 if (us_left-- == 0) { 1845 /* This means timeout */ 1846 device_printf(dev, 1847 "Timeout while waiting for CMD completion.\n"); 1848 return; 1849 } 1850 DELAY(1); 1851 } 1852 } 1853 1854 static struct its_cmd * 1855 its_cmd_alloc_locked(device_t dev) 1856 { 1857 struct gicv3_its_softc *sc; 1858 struct its_cmd *cmd; 1859 size_t us_left; 1860 1861 sc = device_get_softc(dev); 1862 1863 /* 1864 * XXX ARM64TODO: This is obviously a significant delay. 1865 * The reason for that is that currently the time frames for 1866 * the command to complete (and therefore free the descriptor) 1867 * are not known. 1868 */ 1869 us_left = 1000000; 1870 1871 mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); 1872 while (its_cmd_queue_full(sc)) { 1873 if (us_left-- == 0) { 1874 /* Timeout while waiting for free command */ 1875 device_printf(dev, 1876 "Timeout while waiting for free command\n"); 1877 return (NULL); 1878 } 1879 DELAY(1); 1880 } 1881 1882 cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 1883 sc->sc_its_cmd_next_idx++; 1884 sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); 1885 1886 return (cmd); 1887 } 1888 1889 static uint64_t 1890 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) 1891 { 1892 uint64_t target; 1893 uint8_t cmd_type; 1894 u_int size; 1895 1896 cmd_type = desc->cmd_type; 1897 target = ITS_TARGET_NONE; 1898 1899 switch (cmd_type) { 1900 case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ 1901 target = desc->cmd_desc_movi.col->col_target; 1902 cmd_format_command(cmd, ITS_CMD_MOVI); 1903 cmd_format_id(cmd, desc->cmd_desc_movi.id); 1904 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); 1905 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); 1906 break; 1907 case ITS_CMD_SYNC: /* Wait for previous commands completion */ 1908 target = desc->cmd_desc_sync.col->col_target; 1909 cmd_format_command(cmd, ITS_CMD_SYNC); 1910 cmd_format_target(cmd, target); 1911 break; 1912 case ITS_CMD_MAPD: /* Assign ITT to device */ 1913 cmd_format_command(cmd, ITS_CMD_MAPD); 1914 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); 1915 /* 1916 * Size describes number of bits to encode interrupt IDs 1917 * supported by the device minus one. 1918 * When V (valid) bit is zero, this field should be written 1919 * as zero. 1920 */ 1921 if (desc->cmd_desc_mapd.valid != 0) { 1922 size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); 1923 size = MAX(1, size) - 1; 1924 } else 1925 size = 0; 1926 1927 cmd_format_size(cmd, size); 1928 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); 1929 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); 1930 break; 1931 case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ 1932 target = desc->cmd_desc_mapc.col->col_target; 1933 cmd_format_command(cmd, ITS_CMD_MAPC); 1934 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); 1935 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); 1936 cmd_format_target(cmd, target); 1937 break; 1938 case ITS_CMD_MAPTI: 1939 target = desc->cmd_desc_mapvi.col->col_target; 1940 cmd_format_command(cmd, ITS_CMD_MAPTI); 1941 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); 1942 cmd_format_id(cmd, desc->cmd_desc_mapvi.id); 1943 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); 1944 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); 1945 break; 1946 case ITS_CMD_MAPI: 1947 target = desc->cmd_desc_mapi.col->col_target; 1948 cmd_format_command(cmd, ITS_CMD_MAPI); 1949 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); 1950 cmd_format_id(cmd, desc->cmd_desc_mapi.pid); 1951 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); 1952 break; 1953 case ITS_CMD_INV: 1954 target = desc->cmd_desc_inv.col->col_target; 1955 cmd_format_command(cmd, ITS_CMD_INV); 1956 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); 1957 cmd_format_id(cmd, desc->cmd_desc_inv.pid); 1958 break; 1959 case ITS_CMD_INVALL: 1960 cmd_format_command(cmd, ITS_CMD_INVALL); 1961 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); 1962 break; 1963 default: 1964 panic("its_cmd_prepare: Invalid command: %x", cmd_type); 1965 } 1966 1967 return (target); 1968 } 1969 1970 static int 1971 its_cmd_send(device_t dev, struct its_cmd_desc *desc) 1972 { 1973 struct gicv3_its_softc *sc; 1974 struct its_cmd *cmd, *cmd_sync, *cmd_write; 1975 struct its_col col_sync; 1976 struct its_cmd_desc desc_sync; 1977 uint64_t target, cwriter; 1978 1979 sc = device_get_softc(dev); 1980 mtx_lock_spin(&sc->sc_its_cmd_lock); 1981 cmd = its_cmd_alloc_locked(dev); 1982 if (cmd == NULL) { 1983 device_printf(dev, "could not allocate ITS command\n"); 1984 mtx_unlock_spin(&sc->sc_its_cmd_lock); 1985 return (EBUSY); 1986 } 1987 1988 target = its_cmd_prepare(cmd, desc); 1989 its_cmd_sync(sc, cmd); 1990 1991 if (target != ITS_TARGET_NONE) { 1992 cmd_sync = its_cmd_alloc_locked(dev); 1993 if (cmd_sync != NULL) { 1994 desc_sync.cmd_type = ITS_CMD_SYNC; 1995 col_sync.col_target = target; 1996 desc_sync.cmd_desc_sync.col = &col_sync; 1997 its_cmd_prepare(cmd_sync, &desc_sync); 1998 its_cmd_sync(sc, cmd_sync); 1999 } 2000 } 2001 2002 /* Update GITS_CWRITER */ 2003 cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); 2004 gic_its_write_8(sc, GITS_CWRITER, cwriter); 2005 cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 2006 mtx_unlock_spin(&sc->sc_its_cmd_lock); 2007 2008 its_cmd_wait_completion(dev, cmd, cmd_write); 2009 2010 return (0); 2011 } 2012 2013 /* Handlers to send commands */ 2014 static void 2015 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) 2016 { 2017 struct gicv3_its_softc *sc; 2018 struct its_cmd_desc desc; 2019 struct its_col *col; 2020 2021 sc = device_get_softc(dev); 2022 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 2023 2024 desc.cmd_type = ITS_CMD_MOVI; 2025 desc.cmd_desc_movi.its_dev = girq->gi_its_dev; 2026 desc.cmd_desc_movi.col = col; 2027 desc.cmd_desc_movi.id = girq->gi_id; 2028 2029 its_cmd_send(dev, &desc); 2030 } 2031 2032 static void 2033 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) 2034 { 2035 struct its_cmd_desc desc; 2036 2037 desc.cmd_type = ITS_CMD_MAPC; 2038 desc.cmd_desc_mapc.col = col; 2039 /* 2040 * Valid bit set - map the collection. 2041 * Valid bit cleared - unmap the collection. 2042 */ 2043 desc.cmd_desc_mapc.valid = valid; 2044 2045 its_cmd_send(dev, &desc); 2046 } 2047 2048 static void 2049 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) 2050 { 2051 struct gicv3_its_softc *sc; 2052 struct its_cmd_desc desc; 2053 struct its_col *col; 2054 u_int col_id; 2055 2056 sc = device_get_softc(dev); 2057 2058 col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; 2059 col = sc->sc_its_cols[col_id]; 2060 2061 desc.cmd_type = ITS_CMD_MAPTI; 2062 desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; 2063 desc.cmd_desc_mapvi.col = col; 2064 /* The EventID sent to the device */ 2065 desc.cmd_desc_mapvi.id = girq->gi_id; 2066 /* The physical interrupt presented to softeware */ 2067 desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI; 2068 2069 its_cmd_send(dev, &desc); 2070 } 2071 2072 static void 2073 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) 2074 { 2075 struct its_cmd_desc desc; 2076 2077 desc.cmd_type = ITS_CMD_MAPD; 2078 desc.cmd_desc_mapd.its_dev = its_dev; 2079 desc.cmd_desc_mapd.valid = valid; 2080 2081 its_cmd_send(dev, &desc); 2082 } 2083 2084 static void 2085 its_cmd_inv(device_t dev, struct its_dev *its_dev, 2086 struct gicv3_its_irqsrc *girq) 2087 { 2088 struct gicv3_its_softc *sc; 2089 struct its_cmd_desc desc; 2090 struct its_col *col; 2091 2092 sc = device_get_softc(dev); 2093 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 2094 2095 desc.cmd_type = ITS_CMD_INV; 2096 /* The EventID sent to the device */ 2097 desc.cmd_desc_inv.pid = girq->gi_id; 2098 desc.cmd_desc_inv.its_dev = its_dev; 2099 desc.cmd_desc_inv.col = col; 2100 2101 its_cmd_send(dev, &desc); 2102 } 2103 2104 static void 2105 its_cmd_invall(device_t dev, struct its_col *col) 2106 { 2107 struct its_cmd_desc desc; 2108 2109 desc.cmd_type = ITS_CMD_INVALL; 2110 desc.cmd_desc_invall.col = col; 2111 2112 its_cmd_send(dev, &desc); 2113 } 2114 2115 #ifdef FDT 2116 static device_probe_t gicv3_its_fdt_probe; 2117 static device_attach_t gicv3_its_fdt_attach; 2118 2119 static device_method_t gicv3_its_fdt_methods[] = { 2120 /* Device interface */ 2121 DEVMETHOD(device_probe, gicv3_its_fdt_probe), 2122 DEVMETHOD(device_attach, gicv3_its_fdt_attach), 2123 2124 /* End */ 2125 DEVMETHOD_END 2126 }; 2127 2128 #define its_baseclasses its_fdt_baseclasses 2129 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, 2130 sizeof(struct gicv3_its_softc), gicv3_its_driver); 2131 #undef its_baseclasses 2132 2133 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 0, 0, 2134 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 2135 2136 static int 2137 gicv3_its_fdt_probe(device_t dev) 2138 { 2139 2140 if (!ofw_bus_status_okay(dev)) 2141 return (ENXIO); 2142 2143 if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) 2144 return (ENXIO); 2145 2146 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 2147 return (BUS_PROBE_DEFAULT); 2148 } 2149 2150 static int 2151 gicv3_its_fdt_attach(device_t dev) 2152 { 2153 struct gicv3_its_softc *sc; 2154 phandle_t xref; 2155 int err; 2156 2157 sc = device_get_softc(dev); 2158 sc->dev = dev; 2159 err = gicv3_its_attach(dev); 2160 if (err != 0) 2161 return (err); 2162 2163 /* Register this device as a interrupt controller */ 2164 xref = OF_xref_from_node(ofw_bus_get_node(dev)); 2165 sc->sc_pic = intr_pic_register(dev, xref); 2166 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 2167 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 2168 if (err != 0) { 2169 device_printf(dev, "Failed to add PIC handler: %d\n", err); 2170 return (err); 2171 } 2172 2173 /* Register this device to handle MSI interrupts */ 2174 err = intr_msi_register(dev, xref); 2175 if (err != 0) { 2176 device_printf(dev, "Failed to register for MSIs: %d\n", err); 2177 return (err); 2178 } 2179 2180 return (0); 2181 } 2182 #endif 2183 2184 #ifdef DEV_ACPI 2185 static device_probe_t gicv3_its_acpi_probe; 2186 static device_attach_t gicv3_its_acpi_attach; 2187 2188 static device_method_t gicv3_its_acpi_methods[] = { 2189 /* Device interface */ 2190 DEVMETHOD(device_probe, gicv3_its_acpi_probe), 2191 DEVMETHOD(device_attach, gicv3_its_acpi_attach), 2192 2193 /* End */ 2194 DEVMETHOD_END 2195 }; 2196 2197 #define its_baseclasses its_acpi_baseclasses 2198 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods, 2199 sizeof(struct gicv3_its_softc), gicv3_its_driver); 2200 #undef its_baseclasses 2201 2202 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 0, 0, 2203 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 2204 2205 static int 2206 gicv3_its_acpi_probe(device_t dev) 2207 { 2208 2209 if (gic_get_bus(dev) != GIC_BUS_ACPI) 2210 return (EINVAL); 2211 2212 if (gic_get_hw_rev(dev) < 3) 2213 return (EINVAL); 2214 2215 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 2216 return (BUS_PROBE_DEFAULT); 2217 } 2218 2219 static int 2220 gicv3_its_acpi_attach(device_t dev) 2221 { 2222 struct gicv3_its_softc *sc; 2223 struct gic_v3_devinfo *di; 2224 int err; 2225 2226 sc = device_get_softc(dev); 2227 sc->dev = dev; 2228 err = gicv3_its_attach(dev); 2229 if (err != 0) 2230 return (err); 2231 2232 di = device_get_ivars(dev); 2233 sc->sc_pic = intr_pic_register(dev, di->msi_xref); 2234 err = intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 2235 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 2236 if (err != 0) { 2237 device_printf(dev, "Failed to add PIC handler: %d\n", err); 2238 return (err); 2239 } 2240 2241 /* Register this device to handle MSI interrupts */ 2242 err = intr_msi_register(dev, di->msi_xref); 2243 if (err != 0) { 2244 device_printf(dev, "Failed to register for MSIs: %d\n", err); 2245 return (err); 2246 } 2247 2248 return (0); 2249 } 2250 #endif 2251