1 /*- 2 * Copyright (c) 2015-2016 The FreeBSD Foundation 3 * 4 * This software was developed by Andrew Turner under 5 * the sponsorship of the FreeBSD Foundation. 6 * 7 * This software was developed by Semihalf under 8 * the sponsorship of the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_acpi.h" 33 #include "opt_platform.h" 34 #include "opt_iommu.h" 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/cpuset.h> 43 #include <sys/domainset.h> 44 #include <sys/endian.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/module.h> 49 #include <sys/mutex.h> 50 #include <sys/proc.h> 51 #include <sys/taskqueue.h> 52 #include <sys/tree.h> 53 #include <sys/queue.h> 54 #include <sys/rman.h> 55 #include <sys/sbuf.h> 56 #include <sys/smp.h> 57 #include <sys/sysctl.h> 58 #include <sys/vmem.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 #include <vm/vm_page.h> 63 64 #include <machine/bus.h> 65 #include <machine/intr.h> 66 67 #include <arm/arm/gic_common.h> 68 #include <arm64/arm64/gic_v3_reg.h> 69 #include <arm64/arm64/gic_v3_var.h> 70 71 #ifdef FDT 72 #include <dev/ofw/openfirm.h> 73 #include <dev/ofw/ofw_bus.h> 74 #include <dev/ofw/ofw_bus_subr.h> 75 #endif 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 79 #ifdef IOMMU 80 #include <dev/iommu/iommu.h> 81 #include <dev/iommu/iommu_gas.h> 82 #endif 83 84 #include "pcib_if.h" 85 #include "pic_if.h" 86 #include "msi_if.h" 87 88 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS", 89 "ARM GICv3 Interrupt Translation Service"); 90 91 #define LPI_NIRQS (64 * 1024) 92 93 /* The size and alignment of the command circular buffer */ 94 #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */ 95 #define ITS_CMDQ_ALIGN (64 * 1024) 96 97 #define LPI_CONFTAB_SIZE LPI_NIRQS 98 #define LPI_CONFTAB_ALIGN (64 * 1024) 99 #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 100 101 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */ 102 #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8) 103 #define LPI_PENDTAB_ALIGN (64 * 1024) 104 #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */ 105 106 #define LPI_INT_TRANS_TAB_ALIGN 256 107 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1) 108 109 /* ITS commands encoding */ 110 #define ITS_CMD_MOVI (0x01) 111 #define ITS_CMD_SYNC (0x05) 112 #define ITS_CMD_MAPD (0x08) 113 #define ITS_CMD_MAPC (0x09) 114 #define ITS_CMD_MAPTI (0x0a) 115 #define ITS_CMD_MAPI (0x0b) 116 #define ITS_CMD_INV (0x0c) 117 #define ITS_CMD_INVALL (0x0d) 118 /* Command */ 119 #define CMD_COMMAND_MASK (0xFFUL) 120 /* PCI device ID */ 121 #define CMD_DEVID_SHIFT (32) 122 #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT) 123 /* Size of IRQ ID bitfield */ 124 #define CMD_SIZE_MASK (0xFFUL) 125 /* Virtual LPI ID */ 126 #define CMD_ID_MASK (0xFFFFFFFFUL) 127 /* Physical LPI ID */ 128 #define CMD_PID_SHIFT (32) 129 #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT) 130 /* Collection */ 131 #define CMD_COL_MASK (0xFFFFUL) 132 /* Target (CPU or Re-Distributor) */ 133 #define CMD_TARGET_SHIFT (16) 134 #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT) 135 /* Interrupt Translation Table address */ 136 #define CMD_ITT_MASK (0xFFFFFFFFFF00UL) 137 /* Valid command bit */ 138 #define CMD_VALID_SHIFT (63) 139 #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT) 140 141 #define ITS_TARGET_NONE 0xFBADBEEF 142 143 /* LPI chunk owned by ITS device */ 144 struct lpi_chunk { 145 u_int lpi_base; 146 u_int lpi_free; /* First free LPI in set */ 147 u_int lpi_num; /* Total number of LPIs in chunk */ 148 u_int lpi_busy; /* Number of busy LPIs in chink */ 149 }; 150 151 /* ITS device */ 152 struct its_dev { 153 TAILQ_ENTRY(its_dev) entry; 154 /* PCI device */ 155 device_t pci_dev; 156 /* Device ID (i.e. PCI device ID) */ 157 uint32_t devid; 158 /* List of assigned LPIs */ 159 struct lpi_chunk lpis; 160 /* Virtual address of ITT */ 161 vm_offset_t itt; 162 size_t itt_size; 163 }; 164 165 /* 166 * ITS command descriptor. 167 * Idea for command description passing taken from Linux. 168 */ 169 struct its_cmd_desc { 170 uint8_t cmd_type; 171 172 union { 173 struct { 174 struct its_dev *its_dev; 175 struct its_col *col; 176 uint32_t id; 177 } cmd_desc_movi; 178 179 struct { 180 struct its_col *col; 181 } cmd_desc_sync; 182 183 struct { 184 struct its_col *col; 185 uint8_t valid; 186 } cmd_desc_mapc; 187 188 struct { 189 struct its_dev *its_dev; 190 struct its_col *col; 191 uint32_t pid; 192 uint32_t id; 193 } cmd_desc_mapvi; 194 195 struct { 196 struct its_dev *its_dev; 197 struct its_col *col; 198 uint32_t pid; 199 } cmd_desc_mapi; 200 201 struct { 202 struct its_dev *its_dev; 203 uint8_t valid; 204 } cmd_desc_mapd; 205 206 struct { 207 struct its_dev *its_dev; 208 struct its_col *col; 209 uint32_t pid; 210 } cmd_desc_inv; 211 212 struct { 213 struct its_col *col; 214 } cmd_desc_invall; 215 }; 216 }; 217 218 /* ITS command. Each command is 32 bytes long */ 219 struct its_cmd { 220 uint64_t cmd_dword[4]; /* ITS command double word */ 221 }; 222 223 /* An ITS private table */ 224 struct its_ptable { 225 vm_offset_t ptab_vaddr; 226 unsigned long ptab_size; 227 }; 228 229 /* ITS collection description. */ 230 struct its_col { 231 uint64_t col_target; /* Target Re-Distributor */ 232 uint64_t col_id; /* Collection ID */ 233 }; 234 235 struct gicv3_its_irqsrc { 236 struct intr_irqsrc gi_isrc; 237 u_int gi_id; 238 u_int gi_lpi; 239 struct its_dev *gi_its_dev; 240 TAILQ_ENTRY(gicv3_its_irqsrc) gi_link; 241 }; 242 243 struct gicv3_its_softc { 244 device_t dev; 245 struct intr_pic *sc_pic; 246 struct resource *sc_its_res; 247 248 cpuset_t sc_cpus; 249 struct domainset *sc_ds; 250 u_int gic_irq_cpu; 251 252 struct its_ptable sc_its_ptab[GITS_BASER_NUM]; 253 struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */ 254 255 /* 256 * TODO: We should get these from the parent as we only want a 257 * single copy of each across the interrupt controller. 258 */ 259 uint8_t *sc_conf_base; 260 vm_offset_t sc_pend_base[MAXCPU]; 261 262 /* Command handling */ 263 struct mtx sc_its_cmd_lock; 264 struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */ 265 size_t sc_its_cmd_next_idx; 266 267 vmem_t *sc_irq_alloc; 268 struct gicv3_its_irqsrc **sc_irqs; 269 u_int sc_irq_base; 270 u_int sc_irq_length; 271 u_int sc_irq_count; 272 273 struct mtx sc_its_dev_lock; 274 TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list; 275 TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs; 276 277 #define ITS_FLAGS_CMDQ_FLUSH 0x00000001 278 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002 279 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004 280 u_int sc_its_flags; 281 bool trace_enable; 282 vm_page_t ma; /* fake msi page */ 283 }; 284 285 static void *conf_base; 286 287 typedef void (its_quirk_func_t)(device_t); 288 static its_quirk_func_t its_quirk_cavium_22375; 289 290 static const struct { 291 const char *desc; 292 uint32_t iidr; 293 uint32_t iidr_mask; 294 its_quirk_func_t *func; 295 } its_quirks[] = { 296 { 297 /* Cavium ThunderX Pass 1.x */ 298 .desc = "Cavium ThunderX errata: 22375, 24313", 299 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM, 300 GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0), 301 .iidr_mask = ~GITS_IIDR_REVISION_MASK, 302 .func = its_quirk_cavium_22375, 303 }, 304 }; 305 306 #define gic_its_read_4(sc, reg) \ 307 bus_read_4((sc)->sc_its_res, (reg)) 308 #define gic_its_read_8(sc, reg) \ 309 bus_read_8((sc)->sc_its_res, (reg)) 310 311 #define gic_its_write_4(sc, reg, val) \ 312 bus_write_4((sc)->sc_its_res, (reg), (val)) 313 #define gic_its_write_8(sc, reg, val) \ 314 bus_write_8((sc)->sc_its_res, (reg), (val)) 315 316 static device_attach_t gicv3_its_attach; 317 static device_detach_t gicv3_its_detach; 318 319 static pic_disable_intr_t gicv3_its_disable_intr; 320 static pic_enable_intr_t gicv3_its_enable_intr; 321 static pic_map_intr_t gicv3_its_map_intr; 322 static pic_setup_intr_t gicv3_its_setup_intr; 323 static pic_post_filter_t gicv3_its_post_filter; 324 static pic_post_ithread_t gicv3_its_post_ithread; 325 static pic_pre_ithread_t gicv3_its_pre_ithread; 326 static pic_bind_intr_t gicv3_its_bind_intr; 327 #ifdef SMP 328 static pic_init_secondary_t gicv3_its_init_secondary; 329 #endif 330 static msi_alloc_msi_t gicv3_its_alloc_msi; 331 static msi_release_msi_t gicv3_its_release_msi; 332 static msi_alloc_msix_t gicv3_its_alloc_msix; 333 static msi_release_msix_t gicv3_its_release_msix; 334 static msi_map_msi_t gicv3_its_map_msi; 335 #ifdef IOMMU 336 static msi_iommu_init_t gicv3_iommu_init; 337 static msi_iommu_deinit_t gicv3_iommu_deinit; 338 #endif 339 340 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *); 341 static void its_cmd_mapc(device_t, struct its_col *, uint8_t); 342 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *); 343 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t); 344 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *); 345 static void its_cmd_invall(device_t, struct its_col *); 346 347 static device_method_t gicv3_its_methods[] = { 348 /* Device interface */ 349 DEVMETHOD(device_detach, gicv3_its_detach), 350 351 /* Interrupt controller interface */ 352 DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr), 353 DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr), 354 DEVMETHOD(pic_map_intr, gicv3_its_map_intr), 355 DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr), 356 DEVMETHOD(pic_post_filter, gicv3_its_post_filter), 357 DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread), 358 DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread), 359 #ifdef SMP 360 DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr), 361 DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary), 362 #endif 363 364 /* MSI/MSI-X */ 365 DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi), 366 DEVMETHOD(msi_release_msi, gicv3_its_release_msi), 367 DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix), 368 DEVMETHOD(msi_release_msix, gicv3_its_release_msix), 369 DEVMETHOD(msi_map_msi, gicv3_its_map_msi), 370 #ifdef IOMMU 371 DEVMETHOD(msi_iommu_init, gicv3_iommu_init), 372 DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit), 373 #endif 374 375 /* End */ 376 DEVMETHOD_END 377 }; 378 379 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods, 380 sizeof(struct gicv3_its_softc)); 381 382 static void 383 gicv3_its_cmdq_init(struct gicv3_its_softc *sc) 384 { 385 vm_paddr_t cmd_paddr; 386 uint64_t reg, tmp; 387 388 /* Set up the command circular buffer */ 389 sc->sc_its_cmd_base = contigmalloc_domainset(ITS_CMDQ_SIZE, M_GICV3_ITS, 390 sc->sc_ds, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 391 0); 392 sc->sc_its_cmd_next_idx = 0; 393 394 cmd_paddr = vtophys(sc->sc_its_cmd_base); 395 396 /* Set the base of the command buffer */ 397 reg = GITS_CBASER_VALID | 398 (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) | 399 cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) | 400 (ITS_CMDQ_SIZE / 4096 - 1); 401 gic_its_write_8(sc, GITS_CBASER, reg); 402 403 /* Read back to check for fixed value fields */ 404 tmp = gic_its_read_8(sc, GITS_CBASER); 405 406 if ((tmp & GITS_CBASER_SHARE_MASK) != 407 (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) { 408 /* Check if the hardware reported non-shareable */ 409 if ((tmp & GITS_CBASER_SHARE_MASK) == 410 (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) { 411 /* If so remove the cache attribute */ 412 reg &= ~GITS_CBASER_CACHE_MASK; 413 reg &= ~GITS_CBASER_SHARE_MASK; 414 /* Set to Non-cacheable, Non-shareable */ 415 reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT; 416 reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT; 417 418 gic_its_write_8(sc, GITS_CBASER, reg); 419 } 420 421 /* The command queue has to be flushed after each command */ 422 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH; 423 } 424 425 /* Get the next command from the start of the buffer */ 426 gic_its_write_8(sc, GITS_CWRITER, 0x0); 427 } 428 429 static int 430 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc) 431 { 432 vm_offset_t table; 433 vm_paddr_t paddr; 434 uint64_t cache, reg, share, tmp, type; 435 size_t esize, its_tbl_size, nidents, nitspages, npages; 436 int i, page_size; 437 int devbits; 438 439 if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) { 440 /* 441 * GITS_TYPER[17:13] of ThunderX reports that device IDs 442 * are to be 21 bits in length. The entry size of the ITS 443 * table can be read from GITS_BASERn[52:48] and on ThunderX 444 * is supposed to be 8 bytes in length (for device table). 445 * Finally the page size that is to be used by ITS to access 446 * this table will be set to 64KB. 447 * 448 * This gives 0x200000 entries of size 0x8 bytes covered by 449 * 256 pages each of which 64KB in size. The number of pages 450 * (minus 1) should then be written to GITS_BASERn[7:0]. In 451 * that case this value would be 0xFF but on ThunderX the 452 * maximum value that HW accepts is 0xFD. 453 * 454 * Set an arbitrary number of device ID bits to 20 in order 455 * to limit the number of entries in ITS device table to 456 * 0x100000 and the table size to 8MB. 457 */ 458 devbits = 20; 459 cache = 0; 460 } else { 461 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER)); 462 cache = GITS_BASER_CACHE_WAWB; 463 } 464 share = GITS_BASER_SHARE_IS; 465 page_size = PAGE_SIZE_64K; 466 467 for (i = 0; i < GITS_BASER_NUM; i++) { 468 reg = gic_its_read_8(sc, GITS_BASER(i)); 469 /* The type of table */ 470 type = GITS_BASER_TYPE(reg); 471 /* The table entry size */ 472 esize = GITS_BASER_ESIZE(reg); 473 474 switch(type) { 475 case GITS_BASER_TYPE_DEV: 476 nidents = (1 << devbits); 477 its_tbl_size = esize * nidents; 478 its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K); 479 break; 480 case GITS_BASER_TYPE_VP: 481 case GITS_BASER_TYPE_PP: /* Undocumented? */ 482 case GITS_BASER_TYPE_IC: 483 its_tbl_size = page_size; 484 break; 485 default: 486 continue; 487 } 488 npages = howmany(its_tbl_size, PAGE_SIZE); 489 490 /* Allocate the table */ 491 table = (vm_offset_t)contigmalloc_domainset(npages * PAGE_SIZE, 492 M_GICV3_ITS, sc->sc_ds, M_WAITOK | M_ZERO, 0, 493 (1ul << 48) - 1, PAGE_SIZE_64K, 0); 494 495 sc->sc_its_ptab[i].ptab_vaddr = table; 496 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE; 497 498 paddr = vtophys(table); 499 500 while (1) { 501 nitspages = howmany(its_tbl_size, page_size); 502 503 /* Clear the fields we will be setting */ 504 reg &= ~(GITS_BASER_VALID | 505 GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK | 506 GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK | 507 GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK | 508 GITS_BASER_SIZE_MASK); 509 /* Set the new values */ 510 reg |= GITS_BASER_VALID | 511 (cache << GITS_BASER_CACHE_SHIFT) | 512 (type << GITS_BASER_TYPE_SHIFT) | 513 ((esize - 1) << GITS_BASER_ESIZE_SHIFT) | 514 paddr | (share << GITS_BASER_SHARE_SHIFT) | 515 (nitspages - 1); 516 517 switch (page_size) { 518 case PAGE_SIZE_4K: /* 4KB */ 519 reg |= 520 GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT; 521 break; 522 case PAGE_SIZE_16K: /* 16KB */ 523 reg |= 524 GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT; 525 break; 526 case PAGE_SIZE_64K: /* 64KB */ 527 reg |= 528 GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT; 529 break; 530 } 531 532 gic_its_write_8(sc, GITS_BASER(i), reg); 533 534 /* Read back to check */ 535 tmp = gic_its_read_8(sc, GITS_BASER(i)); 536 537 /* Do the shareability masks line up? */ 538 if ((tmp & GITS_BASER_SHARE_MASK) != 539 (reg & GITS_BASER_SHARE_MASK)) { 540 share = (tmp & GITS_BASER_SHARE_MASK) >> 541 GITS_BASER_SHARE_SHIFT; 542 continue; 543 } 544 545 if ((tmp & GITS_BASER_PSZ_MASK) != 546 (reg & GITS_BASER_PSZ_MASK)) { 547 switch (page_size) { 548 case PAGE_SIZE_16K: 549 page_size = PAGE_SIZE_4K; 550 continue; 551 case PAGE_SIZE_64K: 552 page_size = PAGE_SIZE_16K; 553 continue; 554 } 555 } 556 557 if (tmp != reg) { 558 device_printf(dev, "GITS_BASER%d: " 559 "unable to be updated: %lx != %lx\n", 560 i, reg, tmp); 561 return (ENXIO); 562 } 563 564 /* We should have made all needed changes */ 565 break; 566 } 567 } 568 569 return (0); 570 } 571 572 static void 573 gicv3_its_conftable_init(struct gicv3_its_softc *sc) 574 { 575 void *conf_table; 576 577 conf_table = atomic_load_ptr(&conf_base); 578 if (conf_table == NULL) { 579 conf_table = contigmalloc(LPI_CONFTAB_SIZE, 580 M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, 581 LPI_CONFTAB_ALIGN, 0); 582 583 if (atomic_cmpset_ptr((uintptr_t *)&conf_base, 584 (uintptr_t)NULL, (uintptr_t)conf_table) == 0) { 585 contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS); 586 conf_table = atomic_load_ptr(&conf_base); 587 } 588 } 589 sc->sc_conf_base = conf_table; 590 591 /* Set the default configuration */ 592 memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1, 593 LPI_CONFTAB_SIZE); 594 595 /* Flush the table to memory */ 596 cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE); 597 } 598 599 static void 600 gicv3_its_pendtables_init(struct gicv3_its_softc *sc) 601 { 602 int i; 603 604 for (i = 0; i <= mp_maxid; i++) { 605 if (CPU_ISSET(i, &sc->sc_cpus) == 0) 606 continue; 607 608 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc( 609 LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO, 610 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0); 611 612 /* Flush so the ITS can see the memory */ 613 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i], 614 LPI_PENDTAB_SIZE); 615 } 616 } 617 618 static void 619 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc) 620 { 621 device_t gicv3; 622 uint64_t xbaser, tmp; 623 uint32_t ctlr; 624 u_int cpuid; 625 626 gicv3 = device_get_parent(dev); 627 cpuid = PCPU_GET(cpuid); 628 629 /* Disable LPIs */ 630 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 631 ctlr &= ~GICR_CTLR_LPI_ENABLE; 632 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 633 634 /* Make sure changes are observable my the GIC */ 635 dsb(sy); 636 637 /* 638 * Set the redistributor base 639 */ 640 xbaser = vtophys(sc->sc_conf_base) | 641 (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) | 642 (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) | 643 (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1); 644 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 645 646 /* Check the cache attributes we set */ 647 tmp = gic_r_read_8(gicv3, GICR_PROPBASER); 648 649 if ((tmp & GICR_PROPBASER_SHARE_MASK) != 650 (xbaser & GICR_PROPBASER_SHARE_MASK)) { 651 if ((tmp & GICR_PROPBASER_SHARE_MASK) == 652 (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) { 653 /* We need to mark as non-cacheable */ 654 xbaser &= ~(GICR_PROPBASER_SHARE_MASK | 655 GICR_PROPBASER_CACHE_MASK); 656 /* Non-cacheable */ 657 xbaser |= GICR_PROPBASER_CACHE_NIN << 658 GICR_PROPBASER_CACHE_SHIFT; 659 /* Non-sareable */ 660 xbaser |= GICR_PROPBASER_SHARE_NS << 661 GICR_PROPBASER_SHARE_SHIFT; 662 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser); 663 } 664 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH; 665 } 666 667 /* 668 * Set the LPI pending table base 669 */ 670 xbaser = vtophys(sc->sc_pend_base[cpuid]) | 671 (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) | 672 (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT); 673 674 gic_r_write_8(gicv3, GICR_PENDBASER, xbaser); 675 676 tmp = gic_r_read_8(gicv3, GICR_PENDBASER); 677 678 if ((tmp & GICR_PENDBASER_SHARE_MASK) == 679 (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) { 680 /* Clear the cahce and shareability bits */ 681 xbaser &= ~(GICR_PENDBASER_CACHE_MASK | 682 GICR_PENDBASER_SHARE_MASK); 683 /* Mark as non-shareable */ 684 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT; 685 /* And non-cacheable */ 686 xbaser |= GICR_PENDBASER_CACHE_NIN << 687 GICR_PENDBASER_CACHE_SHIFT; 688 } 689 690 /* Enable LPIs */ 691 ctlr = gic_r_read_4(gicv3, GICR_CTLR); 692 ctlr |= GICR_CTLR_LPI_ENABLE; 693 gic_r_write_4(gicv3, GICR_CTLR, ctlr); 694 695 /* Make sure the GIC has seen everything */ 696 dsb(sy); 697 } 698 699 static int 700 its_init_cpu(device_t dev, struct gicv3_its_softc *sc) 701 { 702 device_t gicv3; 703 vm_paddr_t target; 704 u_int cpuid; 705 struct redist_pcpu *rpcpu; 706 707 gicv3 = device_get_parent(dev); 708 cpuid = PCPU_GET(cpuid); 709 if (!CPU_ISSET(cpuid, &sc->sc_cpus)) 710 return (0); 711 712 /* Check if the ITS is enabled on this CPU */ 713 if ((gic_r_read_8(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) 714 return (ENXIO); 715 716 rpcpu = gicv3_get_redist(dev); 717 718 /* Do per-cpu LPI init once */ 719 if (!rpcpu->lpi_enabled) { 720 its_init_cpu_lpi(dev, sc); 721 rpcpu->lpi_enabled = true; 722 } 723 724 if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) { 725 /* This ITS wants the redistributor physical address */ 726 target = vtophys(rman_get_virtual(&rpcpu->res)); 727 } else { 728 /* This ITS wants the unique processor number */ 729 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) << 730 CMD_TARGET_SHIFT; 731 } 732 733 sc->sc_its_cols[cpuid]->col_target = target; 734 sc->sc_its_cols[cpuid]->col_id = cpuid; 735 736 its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1); 737 its_cmd_invall(dev, sc->sc_its_cols[cpuid]); 738 739 return (0); 740 } 741 742 static int 743 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS) 744 { 745 struct gicv3_its_softc *sc; 746 int rv; 747 748 sc = arg1; 749 750 rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req); 751 if (rv != 0 || req->newptr == NULL) 752 return (rv); 753 if (sc->trace_enable) 754 gic_its_write_8(sc, GITS_TRKCTLR, 3); 755 else 756 gic_its_write_8(sc, GITS_TRKCTLR, 0); 757 758 return (0); 759 } 760 761 static int 762 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS) 763 { 764 struct gicv3_its_softc *sc; 765 struct sbuf *sb; 766 int err; 767 768 sc = arg1; 769 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 770 if (sb == NULL) { 771 device_printf(sc->dev, "Could not allocate sbuf for output.\n"); 772 return (ENOMEM); 773 } 774 sbuf_cat(sb, "\n"); 775 sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n", 776 gic_its_read_4(sc, GITS_TRKCTLR)); 777 sbuf_printf(sb, "GITS_TRKR: 0x%08X\n", 778 gic_its_read_4(sc, GITS_TRKR)); 779 sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n", 780 gic_its_read_4(sc, GITS_TRKDIDR)); 781 sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n", 782 gic_its_read_4(sc, GITS_TRKPIDR)); 783 sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n", 784 gic_its_read_4(sc, GITS_TRKVIDR)); 785 sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n", 786 gic_its_read_4(sc, GITS_TRKTGTR)); 787 788 err = sbuf_finish(sb); 789 if (err) 790 device_printf(sc->dev, "Error finishing sbuf: %d\n", err); 791 sbuf_delete(sb); 792 return(err); 793 } 794 795 static int 796 gicv3_its_init_sysctl(struct gicv3_its_softc *sc) 797 { 798 struct sysctl_oid *oid, *child; 799 struct sysctl_ctx_list *ctx_list; 800 801 ctx_list = device_get_sysctl_ctx(sc->dev); 802 child = device_get_sysctl_tree(sc->dev); 803 oid = SYSCTL_ADD_NODE(ctx_list, 804 SYSCTL_CHILDREN(child), OID_AUTO, "tracing", 805 CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing"); 806 if (oid == NULL) 807 return (ENXIO); 808 809 /* Add registers */ 810 SYSCTL_ADD_PROC(ctx_list, 811 SYSCTL_CHILDREN(oid), OID_AUTO, "enable", 812 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 813 gicv3_its_sysctl_trace_enable, "CU", "Enable tracing"); 814 SYSCTL_ADD_PROC(ctx_list, 815 SYSCTL_CHILDREN(oid), OID_AUTO, "capture", 816 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 817 gicv3_its_sysctl_trace_regs, "", "Captured tracing registers."); 818 819 return (0); 820 } 821 822 static int 823 gicv3_its_attach(device_t dev) 824 { 825 struct gicv3_its_softc *sc; 826 int domain, err, i, rid; 827 uint64_t phys; 828 uint32_t iidr; 829 830 sc = device_get_softc(dev); 831 832 sc->sc_irq_length = gicv3_get_nirqs(dev); 833 sc->sc_irq_base = GIC_FIRST_LPI; 834 sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length; 835 836 rid = 0; 837 sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 838 RF_ACTIVE); 839 if (sc->sc_its_res == NULL) { 840 device_printf(dev, "Could not allocate memory\n"); 841 return (ENXIO); 842 } 843 844 phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) + 845 GITS_TRANSLATER, PAGE_SIZE); 846 sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO); 847 vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT); 848 849 CPU_COPY(&all_cpus, &sc->sc_cpus); 850 iidr = gic_its_read_4(sc, GITS_IIDR); 851 for (i = 0; i < nitems(its_quirks); i++) { 852 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) { 853 if (bootverbose) { 854 device_printf(dev, "Applying %s\n", 855 its_quirks[i].desc); 856 } 857 its_quirks[i].func(dev); 858 break; 859 } 860 } 861 862 if (bus_get_domain(dev, &domain) == 0 && domain < MAXMEMDOM) { 863 sc->sc_ds = DOMAINSET_PREF(domain); 864 } else { 865 sc->sc_ds = DOMAINSET_RR(); 866 } 867 868 /* Allocate the private tables */ 869 err = gicv3_its_table_init(dev, sc); 870 if (err != 0) 871 return (err); 872 873 /* Protects access to the device list */ 874 mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN); 875 876 /* Protects access to the ITS command circular buffer. */ 877 mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN); 878 879 /* Allocate the command circular buffer */ 880 gicv3_its_cmdq_init(sc); 881 882 /* Allocate the per-CPU collections */ 883 for (int cpu = 0; cpu <= mp_maxid; cpu++) 884 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0) 885 sc->sc_its_cols[cpu] = malloc_domainset( 886 sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS, 887 DOMAINSET_PREF(pcpu_find(cpu)->pc_domain), 888 M_WAITOK | M_ZERO); 889 else 890 sc->sc_its_cols[cpu] = NULL; 891 892 /* Enable the ITS */ 893 gic_its_write_4(sc, GITS_CTLR, 894 gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN); 895 896 /* Create the LPI configuration table */ 897 gicv3_its_conftable_init(sc); 898 899 /* And the pending tebles */ 900 gicv3_its_pendtables_init(sc); 901 902 /* Enable LPIs on this CPU */ 903 its_init_cpu(dev, sc); 904 905 TAILQ_INIT(&sc->sc_its_dev_list); 906 TAILQ_INIT(&sc->sc_free_irqs); 907 908 /* 909 * Create the vmem object to allocate INTRNG IRQs from. We try to 910 * use all IRQs not already used by the GICv3. 911 * XXX: This assumes there are no other interrupt controllers in the 912 * system. 913 */ 914 sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0, 915 gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK); 916 917 sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length, 918 M_GICV3_ITS, M_WAITOK | M_ZERO); 919 920 /* For GIC-500 install tracking sysctls. */ 921 if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) == 922 GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0)) 923 gicv3_its_init_sysctl(sc); 924 925 return (0); 926 } 927 928 static int 929 gicv3_its_detach(device_t dev) 930 { 931 932 return (ENXIO); 933 } 934 935 static void 936 its_quirk_cavium_22375(device_t dev) 937 { 938 struct gicv3_its_softc *sc; 939 int domain; 940 941 sc = device_get_softc(dev); 942 sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375; 943 944 /* 945 * We need to limit which CPUs we send these interrupts to on 946 * the original dual socket ThunderX as it is unable to 947 * forward them between the two sockets. 948 */ 949 if (bus_get_domain(dev, &domain) == 0) { 950 if (domain < MAXMEMDOM) { 951 CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus); 952 } else { 953 CPU_ZERO(&sc->sc_cpus); 954 } 955 } 956 } 957 958 static void 959 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc) 960 { 961 struct gicv3_its_softc *sc; 962 struct gicv3_its_irqsrc *girq; 963 uint8_t *conf; 964 965 sc = device_get_softc(dev); 966 girq = (struct gicv3_its_irqsrc *)isrc; 967 conf = sc->sc_conf_base; 968 969 conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE; 970 971 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 972 /* Clean D-cache under command. */ 973 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 974 } else { 975 /* DSB inner shareable, store */ 976 dsb(ishst); 977 } 978 979 its_cmd_inv(dev, girq->gi_its_dev, girq); 980 } 981 982 static void 983 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc) 984 { 985 struct gicv3_its_softc *sc; 986 struct gicv3_its_irqsrc *girq; 987 uint8_t *conf; 988 989 sc = device_get_softc(dev); 990 girq = (struct gicv3_its_irqsrc *)isrc; 991 conf = sc->sc_conf_base; 992 993 conf[girq->gi_lpi] |= LPI_CONF_ENABLE; 994 995 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) { 996 /* Clean D-cache under command. */ 997 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1); 998 } else { 999 /* DSB inner shareable, store */ 1000 dsb(ishst); 1001 } 1002 1003 its_cmd_inv(dev, girq->gi_its_dev, girq); 1004 } 1005 1006 static int 1007 gicv3_its_intr(void *arg, uintptr_t irq) 1008 { 1009 struct gicv3_its_softc *sc = arg; 1010 struct gicv3_its_irqsrc *girq; 1011 struct trapframe *tf; 1012 1013 irq -= sc->sc_irq_base; 1014 girq = sc->sc_irqs[irq]; 1015 if (girq == NULL) 1016 panic("gicv3_its_intr: Invalid interrupt %ld", 1017 irq + sc->sc_irq_base); 1018 1019 tf = curthread->td_intr_frame; 1020 intr_isrc_dispatch(&girq->gi_isrc, tf); 1021 return (FILTER_HANDLED); 1022 } 1023 1024 static void 1025 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc) 1026 { 1027 struct gicv3_its_irqsrc *girq; 1028 struct gicv3_its_softc *sc; 1029 1030 sc = device_get_softc(dev); 1031 girq = (struct gicv3_its_irqsrc *)isrc; 1032 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1033 } 1034 1035 static void 1036 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc) 1037 { 1038 1039 } 1040 1041 static void 1042 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc) 1043 { 1044 struct gicv3_its_irqsrc *girq; 1045 struct gicv3_its_softc *sc; 1046 1047 sc = device_get_softc(dev); 1048 girq = (struct gicv3_its_irqsrc *)isrc; 1049 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI); 1050 } 1051 1052 static int 1053 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc) 1054 { 1055 struct gicv3_its_softc *sc; 1056 1057 sc = device_get_softc(dev); 1058 if (CPU_EMPTY(&isrc->isrc_cpu)) { 1059 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu, 1060 &sc->sc_cpus); 1061 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu); 1062 } 1063 1064 return (0); 1065 } 1066 1067 static int 1068 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc) 1069 { 1070 struct gicv3_its_irqsrc *girq; 1071 1072 gicv3_its_select_cpu(dev, isrc); 1073 1074 girq = (struct gicv3_its_irqsrc *)isrc; 1075 its_cmd_movi(dev, girq); 1076 return (0); 1077 } 1078 1079 static int 1080 gicv3_its_map_intr(device_t dev, struct intr_map_data *data, 1081 struct intr_irqsrc **isrcp) 1082 { 1083 1084 /* 1085 * This should never happen, we only call this function to map 1086 * interrupts found before the controller driver is ready. 1087 */ 1088 panic("gicv3_its_map_intr: Unable to map a MSI interrupt"); 1089 } 1090 1091 static int 1092 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc, 1093 struct resource *res, struct intr_map_data *data) 1094 { 1095 1096 /* Bind the interrupt to a CPU */ 1097 gicv3_its_bind_intr(dev, isrc); 1098 1099 return (0); 1100 } 1101 1102 #ifdef SMP 1103 static void 1104 gicv3_its_init_secondary(device_t dev) 1105 { 1106 struct gicv3_its_softc *sc; 1107 1108 sc = device_get_softc(dev); 1109 1110 /* 1111 * This is fatal as otherwise we may bind interrupts to this CPU. 1112 * We need a way to tell the interrupt framework to only bind to a 1113 * subset of given CPUs when it performs the shuffle. 1114 */ 1115 if (its_init_cpu(dev, sc) != 0) 1116 panic("gicv3_its_init_secondary: No usable ITS on CPU%d", 1117 PCPU_GET(cpuid)); 1118 } 1119 #endif 1120 1121 static uint32_t 1122 its_get_devid(device_t pci_dev) 1123 { 1124 uintptr_t id; 1125 1126 if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0) 1127 panic("%s: %s: Unable to get the MSI DeviceID", __func__, 1128 device_get_nameunit(pci_dev)); 1129 1130 return (id); 1131 } 1132 1133 static struct its_dev * 1134 its_device_find(device_t dev, device_t child) 1135 { 1136 struct gicv3_its_softc *sc; 1137 struct its_dev *its_dev = NULL; 1138 1139 sc = device_get_softc(dev); 1140 1141 mtx_lock_spin(&sc->sc_its_dev_lock); 1142 TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) { 1143 if (its_dev->pci_dev == child) 1144 break; 1145 } 1146 mtx_unlock_spin(&sc->sc_its_dev_lock); 1147 1148 return (its_dev); 1149 } 1150 1151 static struct its_dev * 1152 its_device_get(device_t dev, device_t child, u_int nvecs) 1153 { 1154 struct gicv3_its_softc *sc; 1155 struct its_dev *its_dev; 1156 vmem_addr_t irq_base; 1157 size_t esize; 1158 1159 sc = device_get_softc(dev); 1160 1161 its_dev = its_device_find(dev, child); 1162 if (its_dev != NULL) 1163 return (its_dev); 1164 1165 its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO); 1166 if (its_dev == NULL) 1167 return (NULL); 1168 1169 its_dev->pci_dev = child; 1170 its_dev->devid = its_get_devid(child); 1171 1172 its_dev->lpis.lpi_busy = 0; 1173 its_dev->lpis.lpi_num = nvecs; 1174 its_dev->lpis.lpi_free = nvecs; 1175 1176 if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT, 1177 &irq_base) != 0) { 1178 free(its_dev, M_GICV3_ITS); 1179 return (NULL); 1180 } 1181 its_dev->lpis.lpi_base = irq_base; 1182 1183 /* Get ITT entry size */ 1184 esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER)); 1185 1186 /* 1187 * Allocate ITT for this device. 1188 * PA has to be 256 B aligned. At least two entries for device. 1189 */ 1190 its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256); 1191 its_dev->itt = (vm_offset_t)contigmalloc_domainset(its_dev->itt_size, 1192 M_GICV3_ITS, sc->sc_ds, M_NOWAIT | M_ZERO, 0, 1193 LPI_INT_TRANS_TAB_MAX_ADDR, LPI_INT_TRANS_TAB_ALIGN, 0); 1194 if (its_dev->itt == 0) { 1195 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs); 1196 free(its_dev, M_GICV3_ITS); 1197 return (NULL); 1198 } 1199 1200 /* Make sure device sees zeroed ITT. */ 1201 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) 1202 cpu_dcache_wb_range(its_dev->itt, its_dev->itt_size); 1203 1204 mtx_lock_spin(&sc->sc_its_dev_lock); 1205 TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry); 1206 mtx_unlock_spin(&sc->sc_its_dev_lock); 1207 1208 /* Map device to its ITT */ 1209 its_cmd_mapd(dev, its_dev, 1); 1210 1211 return (its_dev); 1212 } 1213 1214 static void 1215 its_device_release(device_t dev, struct its_dev *its_dev) 1216 { 1217 struct gicv3_its_softc *sc; 1218 1219 KASSERT(its_dev->lpis.lpi_busy == 0, 1220 ("its_device_release: Trying to release an inuse ITS device")); 1221 1222 /* Unmap device in ITS */ 1223 its_cmd_mapd(dev, its_dev, 0); 1224 1225 sc = device_get_softc(dev); 1226 1227 /* Remove the device from the list of devices */ 1228 mtx_lock_spin(&sc->sc_its_dev_lock); 1229 TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry); 1230 mtx_unlock_spin(&sc->sc_its_dev_lock); 1231 1232 /* Free ITT */ 1233 KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device")); 1234 contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS); 1235 1236 /* Free the IRQ allocation */ 1237 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, 1238 its_dev->lpis.lpi_num); 1239 1240 free(its_dev, M_GICV3_ITS); 1241 } 1242 1243 static struct gicv3_its_irqsrc * 1244 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq) 1245 { 1246 struct gicv3_its_irqsrc *girq = NULL; 1247 1248 KASSERT(sc->sc_irqs[irq] == NULL, 1249 ("%s: Interrupt %u already allocated", __func__, irq)); 1250 mtx_lock_spin(&sc->sc_its_dev_lock); 1251 if (!TAILQ_EMPTY(&sc->sc_free_irqs)) { 1252 girq = TAILQ_FIRST(&sc->sc_free_irqs); 1253 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link); 1254 } 1255 mtx_unlock_spin(&sc->sc_its_dev_lock); 1256 if (girq == NULL) { 1257 girq = malloc(sizeof(*girq), M_GICV3_ITS, 1258 M_NOWAIT | M_ZERO); 1259 if (girq == NULL) 1260 return (NULL); 1261 girq->gi_id = -1; 1262 if (intr_isrc_register(&girq->gi_isrc, dev, 0, 1263 "%s,%u", device_get_nameunit(dev), irq) != 0) { 1264 free(girq, M_GICV3_ITS); 1265 return (NULL); 1266 } 1267 } 1268 girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI; 1269 sc->sc_irqs[irq] = girq; 1270 1271 return (girq); 1272 } 1273 1274 static void 1275 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc, 1276 struct gicv3_its_irqsrc *girq) 1277 { 1278 u_int irq; 1279 1280 mtx_assert(&sc->sc_its_dev_lock, MA_OWNED); 1281 1282 irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base; 1283 sc->sc_irqs[irq] = NULL; 1284 1285 girq->gi_id = -1; 1286 girq->gi_its_dev = NULL; 1287 TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link); 1288 } 1289 1290 static int 1291 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount, 1292 device_t *pic, struct intr_irqsrc **srcs) 1293 { 1294 struct gicv3_its_softc *sc; 1295 struct gicv3_its_irqsrc *girq; 1296 struct its_dev *its_dev; 1297 u_int irq; 1298 int i; 1299 1300 its_dev = its_device_get(dev, child, count); 1301 if (its_dev == NULL) 1302 return (ENXIO); 1303 1304 KASSERT(its_dev->lpis.lpi_free >= count, 1305 ("gicv3_its_alloc_msi: No free LPIs")); 1306 sc = device_get_softc(dev); 1307 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1308 its_dev->lpis.lpi_free; 1309 1310 /* Allocate the irqsrc for each MSI */ 1311 for (i = 0; i < count; i++, irq++) { 1312 its_dev->lpis.lpi_free--; 1313 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev, 1314 sc, irq); 1315 if (srcs[i] == NULL) 1316 break; 1317 } 1318 1319 /* The allocation failed, release them */ 1320 if (i != count) { 1321 mtx_lock_spin(&sc->sc_its_dev_lock); 1322 for (i = 0; i < count; i++) { 1323 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1324 if (girq == NULL) 1325 break; 1326 gicv3_its_release_irqsrc(sc, girq); 1327 srcs[i] = NULL; 1328 } 1329 mtx_unlock_spin(&sc->sc_its_dev_lock); 1330 return (ENXIO); 1331 } 1332 1333 /* Finish the allocation now we have all MSI irqsrcs */ 1334 for (i = 0; i < count; i++) { 1335 girq = (struct gicv3_its_irqsrc *)srcs[i]; 1336 girq->gi_id = i; 1337 girq->gi_its_dev = its_dev; 1338 1339 /* Map the message to the given IRQ */ 1340 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1341 its_cmd_mapti(dev, girq); 1342 } 1343 its_dev->lpis.lpi_busy += count; 1344 *pic = dev; 1345 1346 return (0); 1347 } 1348 1349 static int 1350 gicv3_its_release_msi(device_t dev, device_t child, int count, 1351 struct intr_irqsrc **isrc) 1352 { 1353 struct gicv3_its_softc *sc; 1354 struct gicv3_its_irqsrc *girq; 1355 struct its_dev *its_dev; 1356 int i; 1357 1358 its_dev = its_device_find(dev, child); 1359 1360 KASSERT(its_dev != NULL, 1361 ("gicv3_its_release_msi: Releasing a MSI interrupt with " 1362 "no ITS device")); 1363 KASSERT(its_dev->lpis.lpi_busy >= count, 1364 ("gicv3_its_release_msi: Releasing more interrupts than " 1365 "were allocated: releasing %d, allocated %d", count, 1366 its_dev->lpis.lpi_busy)); 1367 1368 sc = device_get_softc(dev); 1369 mtx_lock_spin(&sc->sc_its_dev_lock); 1370 for (i = 0; i < count; i++) { 1371 girq = (struct gicv3_its_irqsrc *)isrc[i]; 1372 gicv3_its_release_irqsrc(sc, girq); 1373 } 1374 mtx_unlock_spin(&sc->sc_its_dev_lock); 1375 its_dev->lpis.lpi_busy -= count; 1376 1377 if (its_dev->lpis.lpi_busy == 0) 1378 its_device_release(dev, its_dev); 1379 1380 return (0); 1381 } 1382 1383 static int 1384 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic, 1385 struct intr_irqsrc **isrcp) 1386 { 1387 struct gicv3_its_softc *sc; 1388 struct gicv3_its_irqsrc *girq; 1389 struct its_dev *its_dev; 1390 u_int nvecs, irq; 1391 1392 nvecs = pci_msix_count(child); 1393 its_dev = its_device_get(dev, child, nvecs); 1394 if (its_dev == NULL) 1395 return (ENXIO); 1396 1397 KASSERT(its_dev->lpis.lpi_free > 0, 1398 ("gicv3_its_alloc_msix: No free LPIs")); 1399 sc = device_get_softc(dev); 1400 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num - 1401 its_dev->lpis.lpi_free; 1402 1403 girq = gicv3_its_alloc_irqsrc(dev, sc, irq); 1404 if (girq == NULL) 1405 return (ENXIO); 1406 girq->gi_id = its_dev->lpis.lpi_busy; 1407 girq->gi_its_dev = its_dev; 1408 1409 its_dev->lpis.lpi_free--; 1410 its_dev->lpis.lpi_busy++; 1411 1412 /* Map the message to the given IRQ */ 1413 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq); 1414 its_cmd_mapti(dev, girq); 1415 1416 *pic = dev; 1417 *isrcp = (struct intr_irqsrc *)girq; 1418 1419 return (0); 1420 } 1421 1422 static int 1423 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc) 1424 { 1425 struct gicv3_its_softc *sc; 1426 struct gicv3_its_irqsrc *girq; 1427 struct its_dev *its_dev; 1428 1429 its_dev = its_device_find(dev, child); 1430 1431 KASSERT(its_dev != NULL, 1432 ("gicv3_its_release_msix: Releasing a MSI-X interrupt with " 1433 "no ITS device")); 1434 KASSERT(its_dev->lpis.lpi_busy > 0, 1435 ("gicv3_its_release_msix: Releasing more interrupts than " 1436 "were allocated: allocated %d", its_dev->lpis.lpi_busy)); 1437 1438 sc = device_get_softc(dev); 1439 girq = (struct gicv3_its_irqsrc *)isrc; 1440 mtx_lock_spin(&sc->sc_its_dev_lock); 1441 gicv3_its_release_irqsrc(sc, girq); 1442 mtx_unlock_spin(&sc->sc_its_dev_lock); 1443 its_dev->lpis.lpi_busy--; 1444 1445 if (its_dev->lpis.lpi_busy == 0) 1446 its_device_release(dev, its_dev); 1447 1448 return (0); 1449 } 1450 1451 static int 1452 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, 1453 uint64_t *addr, uint32_t *data) 1454 { 1455 struct gicv3_its_softc *sc; 1456 struct gicv3_its_irqsrc *girq; 1457 1458 sc = device_get_softc(dev); 1459 girq = (struct gicv3_its_irqsrc *)isrc; 1460 1461 *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER; 1462 *data = girq->gi_id; 1463 1464 return (0); 1465 } 1466 1467 #ifdef IOMMU 1468 static int 1469 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain) 1470 { 1471 struct gicv3_its_softc *sc; 1472 struct iommu_ctx *ctx; 1473 int error; 1474 1475 sc = device_get_softc(dev); 1476 ctx = iommu_get_dev_ctx(child); 1477 error = iommu_map_msi(ctx, PAGE_SIZE, GITS_TRANSLATER, 1478 IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma); 1479 *domain = iommu_get_ctx_domain(ctx); 1480 1481 return (error); 1482 } 1483 1484 static void 1485 gicv3_iommu_deinit(device_t dev, device_t child) 1486 { 1487 struct iommu_ctx *ctx; 1488 1489 ctx = iommu_get_dev_ctx(child); 1490 iommu_unmap_msi(ctx); 1491 } 1492 #endif 1493 1494 /* 1495 * Commands handling. 1496 */ 1497 1498 static __inline void 1499 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type) 1500 { 1501 /* Command field: DW0 [7:0] */ 1502 cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK); 1503 cmd->cmd_dword[0] |= htole64(cmd_type); 1504 } 1505 1506 static __inline void 1507 cmd_format_devid(struct its_cmd *cmd, uint32_t devid) 1508 { 1509 /* Device ID field: DW0 [63:32] */ 1510 cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK); 1511 cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT); 1512 } 1513 1514 static __inline void 1515 cmd_format_size(struct its_cmd *cmd, uint16_t size) 1516 { 1517 /* Size field: DW1 [4:0] */ 1518 cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK); 1519 cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK)); 1520 } 1521 1522 static __inline void 1523 cmd_format_id(struct its_cmd *cmd, uint32_t id) 1524 { 1525 /* ID field: DW1 [31:0] */ 1526 cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK); 1527 cmd->cmd_dword[1] |= htole64(id); 1528 } 1529 1530 static __inline void 1531 cmd_format_pid(struct its_cmd *cmd, uint32_t pid) 1532 { 1533 /* Physical ID field: DW1 [63:32] */ 1534 cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK); 1535 cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT); 1536 } 1537 1538 static __inline void 1539 cmd_format_col(struct its_cmd *cmd, uint16_t col_id) 1540 { 1541 /* Collection field: DW2 [16:0] */ 1542 cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK); 1543 cmd->cmd_dword[2] |= htole64(col_id); 1544 } 1545 1546 static __inline void 1547 cmd_format_target(struct its_cmd *cmd, uint64_t target) 1548 { 1549 /* Target Address field: DW2 [47:16] */ 1550 cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK); 1551 cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK); 1552 } 1553 1554 static __inline void 1555 cmd_format_itt(struct its_cmd *cmd, uint64_t itt) 1556 { 1557 /* ITT Address field: DW2 [47:8] */ 1558 cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK); 1559 cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK); 1560 } 1561 1562 static __inline void 1563 cmd_format_valid(struct its_cmd *cmd, uint8_t valid) 1564 { 1565 /* Valid field: DW2 [63] */ 1566 cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK); 1567 cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT); 1568 } 1569 1570 static inline bool 1571 its_cmd_queue_full(struct gicv3_its_softc *sc) 1572 { 1573 size_t read_idx, next_write_idx; 1574 1575 /* Get the index of the next command */ 1576 next_write_idx = (sc->sc_its_cmd_next_idx + 1) % 1577 (ITS_CMDQ_SIZE / sizeof(struct its_cmd)); 1578 /* And the index of the current command being read */ 1579 read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd); 1580 1581 /* 1582 * The queue is full when the write offset points 1583 * at the command before the current read offset. 1584 */ 1585 return (next_write_idx == read_idx); 1586 } 1587 1588 static inline void 1589 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1590 { 1591 1592 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) { 1593 /* Clean D-cache under command. */ 1594 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd)); 1595 } else { 1596 /* DSB inner shareable, store */ 1597 dsb(ishst); 1598 } 1599 1600 } 1601 1602 static inline uint64_t 1603 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd) 1604 { 1605 uint64_t off; 1606 1607 off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd); 1608 1609 return (off); 1610 } 1611 1612 static void 1613 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first, 1614 struct its_cmd *cmd_last) 1615 { 1616 struct gicv3_its_softc *sc; 1617 uint64_t first, last, read; 1618 size_t us_left; 1619 1620 sc = device_get_softc(dev); 1621 1622 /* 1623 * XXX ARM64TODO: This is obviously a significant delay. 1624 * The reason for that is that currently the time frames for 1625 * the command to complete are not known. 1626 */ 1627 us_left = 1000000; 1628 1629 first = its_cmd_cwriter_offset(sc, cmd_first); 1630 last = its_cmd_cwriter_offset(sc, cmd_last); 1631 1632 for (;;) { 1633 read = gic_its_read_8(sc, GITS_CREADR); 1634 if (first < last) { 1635 if (read < first || read >= last) 1636 break; 1637 } else if (read < first && read >= last) 1638 break; 1639 1640 if (us_left-- == 0) { 1641 /* This means timeout */ 1642 device_printf(dev, 1643 "Timeout while waiting for CMD completion.\n"); 1644 return; 1645 } 1646 DELAY(1); 1647 } 1648 } 1649 1650 static struct its_cmd * 1651 its_cmd_alloc_locked(device_t dev) 1652 { 1653 struct gicv3_its_softc *sc; 1654 struct its_cmd *cmd; 1655 size_t us_left; 1656 1657 sc = device_get_softc(dev); 1658 1659 /* 1660 * XXX ARM64TODO: This is obviously a significant delay. 1661 * The reason for that is that currently the time frames for 1662 * the command to complete (and therefore free the descriptor) 1663 * are not known. 1664 */ 1665 us_left = 1000000; 1666 1667 mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED); 1668 while (its_cmd_queue_full(sc)) { 1669 if (us_left-- == 0) { 1670 /* Timeout while waiting for free command */ 1671 device_printf(dev, 1672 "Timeout while waiting for free command\n"); 1673 return (NULL); 1674 } 1675 DELAY(1); 1676 } 1677 1678 cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 1679 sc->sc_its_cmd_next_idx++; 1680 sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd); 1681 1682 return (cmd); 1683 } 1684 1685 static uint64_t 1686 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc) 1687 { 1688 uint64_t target; 1689 uint8_t cmd_type; 1690 u_int size; 1691 1692 cmd_type = desc->cmd_type; 1693 target = ITS_TARGET_NONE; 1694 1695 switch (cmd_type) { 1696 case ITS_CMD_MOVI: /* Move interrupt ID to another collection */ 1697 target = desc->cmd_desc_movi.col->col_target; 1698 cmd_format_command(cmd, ITS_CMD_MOVI); 1699 cmd_format_id(cmd, desc->cmd_desc_movi.id); 1700 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id); 1701 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid); 1702 break; 1703 case ITS_CMD_SYNC: /* Wait for previous commands completion */ 1704 target = desc->cmd_desc_sync.col->col_target; 1705 cmd_format_command(cmd, ITS_CMD_SYNC); 1706 cmd_format_target(cmd, target); 1707 break; 1708 case ITS_CMD_MAPD: /* Assign ITT to device */ 1709 cmd_format_command(cmd, ITS_CMD_MAPD); 1710 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt)); 1711 /* 1712 * Size describes number of bits to encode interrupt IDs 1713 * supported by the device minus one. 1714 * When V (valid) bit is zero, this field should be written 1715 * as zero. 1716 */ 1717 if (desc->cmd_desc_mapd.valid != 0) { 1718 size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num); 1719 size = MAX(1, size) - 1; 1720 } else 1721 size = 0; 1722 1723 cmd_format_size(cmd, size); 1724 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid); 1725 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid); 1726 break; 1727 case ITS_CMD_MAPC: /* Map collection to Re-Distributor */ 1728 target = desc->cmd_desc_mapc.col->col_target; 1729 cmd_format_command(cmd, ITS_CMD_MAPC); 1730 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id); 1731 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid); 1732 cmd_format_target(cmd, target); 1733 break; 1734 case ITS_CMD_MAPTI: 1735 target = desc->cmd_desc_mapvi.col->col_target; 1736 cmd_format_command(cmd, ITS_CMD_MAPTI); 1737 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid); 1738 cmd_format_id(cmd, desc->cmd_desc_mapvi.id); 1739 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid); 1740 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id); 1741 break; 1742 case ITS_CMD_MAPI: 1743 target = desc->cmd_desc_mapi.col->col_target; 1744 cmd_format_command(cmd, ITS_CMD_MAPI); 1745 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid); 1746 cmd_format_id(cmd, desc->cmd_desc_mapi.pid); 1747 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id); 1748 break; 1749 case ITS_CMD_INV: 1750 target = desc->cmd_desc_inv.col->col_target; 1751 cmd_format_command(cmd, ITS_CMD_INV); 1752 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid); 1753 cmd_format_id(cmd, desc->cmd_desc_inv.pid); 1754 break; 1755 case ITS_CMD_INVALL: 1756 cmd_format_command(cmd, ITS_CMD_INVALL); 1757 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id); 1758 break; 1759 default: 1760 panic("its_cmd_prepare: Invalid command: %x", cmd_type); 1761 } 1762 1763 return (target); 1764 } 1765 1766 static int 1767 its_cmd_send(device_t dev, struct its_cmd_desc *desc) 1768 { 1769 struct gicv3_its_softc *sc; 1770 struct its_cmd *cmd, *cmd_sync, *cmd_write; 1771 struct its_col col_sync; 1772 struct its_cmd_desc desc_sync; 1773 uint64_t target, cwriter; 1774 1775 sc = device_get_softc(dev); 1776 mtx_lock_spin(&sc->sc_its_cmd_lock); 1777 cmd = its_cmd_alloc_locked(dev); 1778 if (cmd == NULL) { 1779 device_printf(dev, "could not allocate ITS command\n"); 1780 mtx_unlock_spin(&sc->sc_its_cmd_lock); 1781 return (EBUSY); 1782 } 1783 1784 target = its_cmd_prepare(cmd, desc); 1785 its_cmd_sync(sc, cmd); 1786 1787 if (target != ITS_TARGET_NONE) { 1788 cmd_sync = its_cmd_alloc_locked(dev); 1789 if (cmd_sync != NULL) { 1790 desc_sync.cmd_type = ITS_CMD_SYNC; 1791 col_sync.col_target = target; 1792 desc_sync.cmd_desc_sync.col = &col_sync; 1793 its_cmd_prepare(cmd_sync, &desc_sync); 1794 its_cmd_sync(sc, cmd_sync); 1795 } 1796 } 1797 1798 /* Update GITS_CWRITER */ 1799 cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd); 1800 gic_its_write_8(sc, GITS_CWRITER, cwriter); 1801 cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx]; 1802 mtx_unlock_spin(&sc->sc_its_cmd_lock); 1803 1804 its_cmd_wait_completion(dev, cmd, cmd_write); 1805 1806 return (0); 1807 } 1808 1809 /* Handlers to send commands */ 1810 static void 1811 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq) 1812 { 1813 struct gicv3_its_softc *sc; 1814 struct its_cmd_desc desc; 1815 struct its_col *col; 1816 1817 sc = device_get_softc(dev); 1818 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 1819 1820 desc.cmd_type = ITS_CMD_MOVI; 1821 desc.cmd_desc_movi.its_dev = girq->gi_its_dev; 1822 desc.cmd_desc_movi.col = col; 1823 desc.cmd_desc_movi.id = girq->gi_id; 1824 1825 its_cmd_send(dev, &desc); 1826 } 1827 1828 static void 1829 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid) 1830 { 1831 struct its_cmd_desc desc; 1832 1833 desc.cmd_type = ITS_CMD_MAPC; 1834 desc.cmd_desc_mapc.col = col; 1835 /* 1836 * Valid bit set - map the collection. 1837 * Valid bit cleared - unmap the collection. 1838 */ 1839 desc.cmd_desc_mapc.valid = valid; 1840 1841 its_cmd_send(dev, &desc); 1842 } 1843 1844 static void 1845 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq) 1846 { 1847 struct gicv3_its_softc *sc; 1848 struct its_cmd_desc desc; 1849 struct its_col *col; 1850 u_int col_id; 1851 1852 sc = device_get_softc(dev); 1853 1854 col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1; 1855 col = sc->sc_its_cols[col_id]; 1856 1857 desc.cmd_type = ITS_CMD_MAPTI; 1858 desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev; 1859 desc.cmd_desc_mapvi.col = col; 1860 /* The EventID sent to the device */ 1861 desc.cmd_desc_mapvi.id = girq->gi_id; 1862 /* The physical interrupt presented to softeware */ 1863 desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI; 1864 1865 its_cmd_send(dev, &desc); 1866 } 1867 1868 static void 1869 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid) 1870 { 1871 struct its_cmd_desc desc; 1872 1873 desc.cmd_type = ITS_CMD_MAPD; 1874 desc.cmd_desc_mapd.its_dev = its_dev; 1875 desc.cmd_desc_mapd.valid = valid; 1876 1877 its_cmd_send(dev, &desc); 1878 } 1879 1880 static void 1881 its_cmd_inv(device_t dev, struct its_dev *its_dev, 1882 struct gicv3_its_irqsrc *girq) 1883 { 1884 struct gicv3_its_softc *sc; 1885 struct its_cmd_desc desc; 1886 struct its_col *col; 1887 1888 sc = device_get_softc(dev); 1889 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1]; 1890 1891 desc.cmd_type = ITS_CMD_INV; 1892 /* The EventID sent to the device */ 1893 desc.cmd_desc_inv.pid = girq->gi_id; 1894 desc.cmd_desc_inv.its_dev = its_dev; 1895 desc.cmd_desc_inv.col = col; 1896 1897 its_cmd_send(dev, &desc); 1898 } 1899 1900 static void 1901 its_cmd_invall(device_t dev, struct its_col *col) 1902 { 1903 struct its_cmd_desc desc; 1904 1905 desc.cmd_type = ITS_CMD_INVALL; 1906 desc.cmd_desc_invall.col = col; 1907 1908 its_cmd_send(dev, &desc); 1909 } 1910 1911 #ifdef FDT 1912 static device_probe_t gicv3_its_fdt_probe; 1913 static device_attach_t gicv3_its_fdt_attach; 1914 1915 static device_method_t gicv3_its_fdt_methods[] = { 1916 /* Device interface */ 1917 DEVMETHOD(device_probe, gicv3_its_fdt_probe), 1918 DEVMETHOD(device_attach, gicv3_its_fdt_attach), 1919 1920 /* End */ 1921 DEVMETHOD_END 1922 }; 1923 1924 #define its_baseclasses its_fdt_baseclasses 1925 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods, 1926 sizeof(struct gicv3_its_softc), gicv3_its_driver); 1927 #undef its_baseclasses 1928 static devclass_t gicv3_its_fdt_devclass; 1929 1930 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver, 1931 gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 1932 1933 static int 1934 gicv3_its_fdt_probe(device_t dev) 1935 { 1936 1937 if (!ofw_bus_status_okay(dev)) 1938 return (ENXIO); 1939 1940 if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its")) 1941 return (ENXIO); 1942 1943 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 1944 return (BUS_PROBE_DEFAULT); 1945 } 1946 1947 static int 1948 gicv3_its_fdt_attach(device_t dev) 1949 { 1950 struct gicv3_its_softc *sc; 1951 phandle_t xref; 1952 int err; 1953 1954 sc = device_get_softc(dev); 1955 sc->dev = dev; 1956 err = gicv3_its_attach(dev); 1957 if (err != 0) 1958 return (err); 1959 1960 /* Register this device as a interrupt controller */ 1961 xref = OF_xref_from_node(ofw_bus_get_node(dev)); 1962 sc->sc_pic = intr_pic_register(dev, xref); 1963 intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 1964 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 1965 1966 /* Register this device to handle MSI interrupts */ 1967 intr_msi_register(dev, xref); 1968 1969 return (0); 1970 } 1971 #endif 1972 1973 #ifdef DEV_ACPI 1974 static device_probe_t gicv3_its_acpi_probe; 1975 static device_attach_t gicv3_its_acpi_attach; 1976 1977 static device_method_t gicv3_its_acpi_methods[] = { 1978 /* Device interface */ 1979 DEVMETHOD(device_probe, gicv3_its_acpi_probe), 1980 DEVMETHOD(device_attach, gicv3_its_acpi_attach), 1981 1982 /* End */ 1983 DEVMETHOD_END 1984 }; 1985 1986 #define its_baseclasses its_acpi_baseclasses 1987 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods, 1988 sizeof(struct gicv3_its_softc), gicv3_its_driver); 1989 #undef its_baseclasses 1990 static devclass_t gicv3_its_acpi_devclass; 1991 1992 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver, 1993 gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); 1994 1995 static int 1996 gicv3_its_acpi_probe(device_t dev) 1997 { 1998 1999 if (gic_get_bus(dev) != GIC_BUS_ACPI) 2000 return (EINVAL); 2001 2002 if (gic_get_hw_rev(dev) < 3) 2003 return (EINVAL); 2004 2005 device_set_desc(dev, "ARM GIC Interrupt Translation Service"); 2006 return (BUS_PROBE_DEFAULT); 2007 } 2008 2009 static int 2010 gicv3_its_acpi_attach(device_t dev) 2011 { 2012 struct gicv3_its_softc *sc; 2013 struct gic_v3_devinfo *di; 2014 int err; 2015 2016 sc = device_get_softc(dev); 2017 sc->dev = dev; 2018 err = gicv3_its_attach(dev); 2019 if (err != 0) 2020 return (err); 2021 2022 di = device_get_ivars(dev); 2023 sc->sc_pic = intr_pic_register(dev, di->msi_xref); 2024 intr_pic_add_handler(device_get_parent(dev), sc->sc_pic, 2025 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length); 2026 2027 /* Register this device to handle MSI interrupts */ 2028 intr_msi_register(dev, di->msi_xref); 2029 2030 return (0); 2031 } 2032 #endif 2033