1 /*- 2 * Copyright (c) 2015-2016 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Andrew Turner under 6 * the sponsorship of the FreeBSD Foundation. 7 * 8 * This software was developed by Semihalf under 9 * the sponsorship of the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include "opt_platform.h" 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bitstring.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/ktr.h> 44 #include <sys/malloc.h> 45 #include <sys/module.h> 46 #include <sys/rman.h> 47 #include <sys/pcpu.h> 48 #include <sys/proc.h> 49 #include <sys/cpuset.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/smp.h> 53 54 #include <vm/vm.h> 55 #include <vm/pmap.h> 56 57 #include <machine/bus.h> 58 #include <machine/cpu.h> 59 #include <machine/intr.h> 60 61 #ifdef FDT 62 #include <dev/fdt/fdt_intr.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 #endif 65 66 #include "pic_if.h" 67 68 #include <arm/arm/gic_common.h> 69 #include "gic_v3_reg.h" 70 #include "gic_v3_var.h" 71 72 static bus_get_domain_t gic_v3_get_domain; 73 static bus_read_ivar_t gic_v3_read_ivar; 74 75 static pic_disable_intr_t gic_v3_disable_intr; 76 static pic_enable_intr_t gic_v3_enable_intr; 77 static pic_map_intr_t gic_v3_map_intr; 78 static pic_setup_intr_t gic_v3_setup_intr; 79 static pic_teardown_intr_t gic_v3_teardown_intr; 80 static pic_post_filter_t gic_v3_post_filter; 81 static pic_post_ithread_t gic_v3_post_ithread; 82 static pic_pre_ithread_t gic_v3_pre_ithread; 83 static pic_bind_intr_t gic_v3_bind_intr; 84 #ifdef SMP 85 static pic_init_secondary_t gic_v3_init_secondary; 86 static pic_ipi_send_t gic_v3_ipi_send; 87 static pic_ipi_setup_t gic_v3_ipi_setup; 88 #endif 89 90 static u_int gic_irq_cpu; 91 #ifdef SMP 92 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1]; 93 static u_int sgi_first_unused = GIC_FIRST_SGI; 94 #endif 95 96 static device_method_t gic_v3_methods[] = { 97 /* Device interface */ 98 DEVMETHOD(device_detach, gic_v3_detach), 99 100 /* Bus interface */ 101 DEVMETHOD(bus_get_domain, gic_v3_get_domain), 102 DEVMETHOD(bus_read_ivar, gic_v3_read_ivar), 103 104 /* Interrupt controller interface */ 105 DEVMETHOD(pic_disable_intr, gic_v3_disable_intr), 106 DEVMETHOD(pic_enable_intr, gic_v3_enable_intr), 107 DEVMETHOD(pic_map_intr, gic_v3_map_intr), 108 DEVMETHOD(pic_setup_intr, gic_v3_setup_intr), 109 DEVMETHOD(pic_teardown_intr, gic_v3_teardown_intr), 110 DEVMETHOD(pic_post_filter, gic_v3_post_filter), 111 DEVMETHOD(pic_post_ithread, gic_v3_post_ithread), 112 DEVMETHOD(pic_pre_ithread, gic_v3_pre_ithread), 113 #ifdef SMP 114 DEVMETHOD(pic_bind_intr, gic_v3_bind_intr), 115 DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), 116 DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), 117 DEVMETHOD(pic_ipi_setup, gic_v3_ipi_setup), 118 #endif 119 120 /* End */ 121 DEVMETHOD_END 122 }; 123 124 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods, 125 sizeof(struct gic_v3_softc)); 126 127 /* 128 * Driver-specific definitions. 129 */ 130 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); 131 132 /* 133 * Helper functions and definitions. 134 */ 135 /* Destination registers, either Distributor or Re-Distributor */ 136 enum gic_v3_xdist { 137 DIST = 0, 138 REDIST, 139 }; 140 141 struct gic_v3_irqsrc { 142 struct intr_irqsrc gi_isrc; 143 uint32_t gi_irq; 144 enum intr_polarity gi_pol; 145 enum intr_trigger gi_trig; 146 }; 147 148 /* Helper routines starting with gic_v3_ */ 149 static int gic_v3_dist_init(struct gic_v3_softc *); 150 static int gic_v3_redist_alloc(struct gic_v3_softc *); 151 static int gic_v3_redist_find(struct gic_v3_softc *); 152 static int gic_v3_redist_init(struct gic_v3_softc *); 153 static int gic_v3_cpu_init(struct gic_v3_softc *); 154 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); 155 156 /* A sequence of init functions for primary (boot) CPU */ 157 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); 158 /* Primary CPU initialization sequence */ 159 static gic_v3_initseq_t gic_v3_primary_init[] = { 160 gic_v3_dist_init, 161 gic_v3_redist_alloc, 162 gic_v3_redist_init, 163 gic_v3_cpu_init, 164 NULL 165 }; 166 167 #ifdef SMP 168 /* Secondary CPU initialization sequence */ 169 static gic_v3_initseq_t gic_v3_secondary_init[] = { 170 gic_v3_redist_init, 171 gic_v3_cpu_init, 172 NULL 173 }; 174 #endif 175 176 uint32_t 177 gic_r_read_4(device_t dev, bus_size_t offset) 178 { 179 struct gic_v3_softc *sc; 180 181 sc = device_get_softc(dev); 182 return (bus_read_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); 183 } 184 185 uint64_t 186 gic_r_read_8(device_t dev, bus_size_t offset) 187 { 188 struct gic_v3_softc *sc; 189 190 sc = device_get_softc(dev); 191 return (bus_read_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset)); 192 } 193 194 void 195 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val) 196 { 197 struct gic_v3_softc *sc; 198 199 sc = device_get_softc(dev); 200 bus_write_4(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); 201 } 202 203 void 204 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val) 205 { 206 struct gic_v3_softc *sc; 207 208 sc = device_get_softc(dev); 209 bus_write_8(sc->gic_redists.pcpu[PCPU_GET(cpuid)], offset, val); 210 } 211 212 /* 213 * Device interface. 214 */ 215 int 216 gic_v3_attach(device_t dev) 217 { 218 struct gic_v3_softc *sc; 219 gic_v3_initseq_t *init_func; 220 uint32_t typer; 221 int rid; 222 int err; 223 size_t i; 224 u_int irq; 225 const char *name; 226 227 sc = device_get_softc(dev); 228 sc->gic_registered = FALSE; 229 sc->dev = dev; 230 err = 0; 231 232 /* Initialize mutex */ 233 mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); 234 235 /* 236 * Allocate array of struct resource. 237 * One entry for Distributor and all remaining for Re-Distributor. 238 */ 239 sc->gic_res = malloc( 240 sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1), 241 M_GIC_V3, M_WAITOK); 242 243 /* Now allocate corresponding resources */ 244 for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { 245 sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 246 &rid, RF_ACTIVE); 247 if (sc->gic_res[rid] == NULL) 248 return (ENXIO); 249 } 250 251 /* 252 * Distributor interface 253 */ 254 sc->gic_dist = sc->gic_res[0]; 255 256 /* 257 * Re-Dristributor interface 258 */ 259 /* Allocate space under region descriptions */ 260 sc->gic_redists.regions = malloc( 261 sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, 262 M_GIC_V3, M_WAITOK); 263 264 /* Fill-up bus_space information for each region. */ 265 for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) 266 sc->gic_redists.regions[i] = sc->gic_res[rid]; 267 268 /* Get the number of supported SPI interrupts */ 269 typer = gic_d_read(sc, 4, GICD_TYPER); 270 sc->gic_nirqs = GICD_TYPER_I_NUM(typer); 271 if (sc->gic_nirqs > GIC_I_NUM_MAX) 272 sc->gic_nirqs = GIC_I_NUM_MAX; 273 274 sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs, 275 M_GIC_V3, M_WAITOK | M_ZERO); 276 name = device_get_nameunit(dev); 277 for (irq = 0; irq < sc->gic_nirqs; irq++) { 278 struct intr_irqsrc *isrc; 279 280 sc->gic_irqs[irq].gi_irq = irq; 281 sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM; 282 sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM; 283 284 isrc = &sc->gic_irqs[irq].gi_isrc; 285 if (irq <= GIC_LAST_SGI) { 286 err = intr_isrc_register(isrc, sc->dev, 287 INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI); 288 } else if (irq <= GIC_LAST_PPI) { 289 err = intr_isrc_register(isrc, sc->dev, 290 INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI); 291 } else { 292 err = intr_isrc_register(isrc, sc->dev, 0, 293 "%s,s%u", name, irq - GIC_FIRST_SPI); 294 } 295 if (err != 0) { 296 /* XXX call intr_isrc_deregister() */ 297 free(sc->gic_irqs, M_DEVBUF); 298 return (err); 299 } 300 } 301 302 /* 303 * Read the Peripheral ID2 register. This is an implementation 304 * defined register, but seems to be implemented in all GICv3 305 * parts and Linux expects it to be there. 306 */ 307 sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2); 308 309 /* Get the number of supported interrupt identifier bits */ 310 sc->gic_idbits = GICD_TYPER_IDBITS(typer); 311 312 if (bootverbose) { 313 device_printf(dev, "SPIs: %u, IDs: %u\n", 314 sc->gic_nirqs, (1 << sc->gic_idbits) - 1); 315 } 316 317 /* Train init sequence for boot CPU */ 318 for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { 319 err = (*init_func)(sc); 320 if (err != 0) 321 return (err); 322 } 323 324 return (0); 325 } 326 327 int 328 gic_v3_detach(device_t dev) 329 { 330 struct gic_v3_softc *sc; 331 size_t i; 332 int rid; 333 334 sc = device_get_softc(dev); 335 336 if (device_is_attached(dev)) { 337 /* 338 * XXX: We should probably deregister PIC 339 */ 340 if (sc->gic_registered) 341 panic("Trying to detach registered PIC"); 342 } 343 for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) 344 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); 345 346 for (i = 0; i <= mp_maxid; i++) 347 free(sc->gic_redists.pcpu[i], M_GIC_V3); 348 349 free(sc->gic_res, M_GIC_V3); 350 free(sc->gic_redists.regions, M_GIC_V3); 351 352 return (0); 353 } 354 355 static int 356 gic_v3_get_domain(device_t dev, device_t child, int *domain) 357 { 358 struct gic_v3_devinfo *di; 359 360 di = device_get_ivars(child); 361 if (di->gic_domain < 0) 362 return (ENOENT); 363 364 *domain = di->gic_domain; 365 return (0); 366 } 367 368 static int 369 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 370 { 371 struct gic_v3_softc *sc; 372 373 sc = device_get_softc(dev); 374 375 switch (which) { 376 case GICV3_IVAR_NIRQS: 377 *result = (NIRQ - sc->gic_nirqs) / sc->gic_nchildren; 378 return (0); 379 case GICV3_IVAR_REDIST_VADDR: 380 *result = (uintptr_t)rman_get_virtual( 381 sc->gic_redists.pcpu[PCPU_GET(cpuid)]); 382 return (0); 383 case GIC_IVAR_HW_REV: 384 KASSERT( 385 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 || 386 GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4, 387 ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)", 388 GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2)); 389 *result = GICR_PIDR2_ARCH(sc->gic_pidr2); 390 return (0); 391 case GIC_IVAR_BUS: 392 KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN, 393 ("gic_v3_read_ivar: Unknown bus type")); 394 KASSERT(sc->gic_bus <= GIC_BUS_MAX, 395 ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus)); 396 *result = sc->gic_bus; 397 return (0); 398 } 399 400 return (ENOENT); 401 } 402 403 int 404 arm_gic_v3_intr(void *arg) 405 { 406 struct gic_v3_softc *sc = arg; 407 struct gic_v3_irqsrc *gi; 408 struct intr_pic *pic; 409 uint64_t active_irq; 410 struct trapframe *tf; 411 bool first; 412 413 first = true; 414 pic = sc->gic_pic; 415 416 while (1) { 417 if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { 418 /* 419 * Hardware: Cavium ThunderX 420 * Chip revision: Pass 1.0 (early version) 421 * Pass 1.1 (production) 422 * ERRATUM: 22978, 23154 423 */ 424 __asm __volatile( 425 "nop;nop;nop;nop;nop;nop;nop;nop; \n" 426 "mrs %0, ICC_IAR1_EL1 \n" 427 "nop;nop;nop;nop; \n" 428 "dsb sy \n" 429 : "=&r" (active_irq)); 430 } else { 431 active_irq = gic_icc_read(IAR1); 432 } 433 434 if (active_irq >= GIC_FIRST_LPI) { 435 intr_child_irq_handler(pic, active_irq); 436 continue; 437 } 438 439 if (__predict_false(active_irq >= sc->gic_nirqs)) 440 return (FILTER_HANDLED); 441 442 tf = curthread->td_intr_frame; 443 gi = &sc->gic_irqs[active_irq]; 444 if (active_irq <= GIC_LAST_SGI) { 445 /* Call EOI for all IPI before dispatch. */ 446 gic_icc_write(EOIR1, (uint64_t)active_irq); 447 #ifdef SMP 448 intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf); 449 #else 450 device_printf(sc->dev, "SGI %ju on UP system detected\n", 451 (uintmax_t)(active_irq - GIC_FIRST_SGI)); 452 #endif 453 } else if (active_irq >= GIC_FIRST_PPI && 454 active_irq <= GIC_LAST_SPI) { 455 if (gi->gi_trig == INTR_TRIGGER_EDGE) 456 gic_icc_write(EOIR1, gi->gi_irq); 457 458 if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) { 459 if (gi->gi_trig != INTR_TRIGGER_EDGE) 460 gic_icc_write(EOIR1, gi->gi_irq); 461 gic_v3_disable_intr(sc->dev, &gi->gi_isrc); 462 device_printf(sc->dev, 463 "Stray irq %lu disabled\n", active_irq); 464 } 465 } 466 } 467 } 468 469 #ifdef FDT 470 static int 471 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp, 472 enum intr_polarity *polp, enum intr_trigger *trigp) 473 { 474 u_int irq; 475 476 if (ncells < 3) 477 return (EINVAL); 478 479 /* 480 * The 1st cell is the interrupt type: 481 * 0 = SPI 482 * 1 = PPI 483 * The 2nd cell contains the interrupt number: 484 * [0 - 987] for SPI 485 * [0 - 15] for PPI 486 * The 3rd cell is the flags, encoded as follows: 487 * bits[3:0] trigger type and level flags 488 * 1 = edge triggered 489 * 2 = edge triggered (PPI only) 490 * 4 = level-sensitive 491 * 8 = level-sensitive (PPI only) 492 */ 493 switch (cells[0]) { 494 case 0: 495 irq = GIC_FIRST_SPI + cells[1]; 496 /* SPI irq is checked later. */ 497 break; 498 case 1: 499 irq = GIC_FIRST_PPI + cells[1]; 500 if (irq > GIC_LAST_PPI) { 501 device_printf(dev, "unsupported PPI interrupt " 502 "number %u\n", cells[1]); 503 return (EINVAL); 504 } 505 break; 506 default: 507 device_printf(dev, "unsupported interrupt type " 508 "configuration %u\n", cells[0]); 509 return (EINVAL); 510 } 511 512 switch (cells[2] & FDT_INTR_MASK) { 513 case FDT_INTR_EDGE_RISING: 514 *trigp = INTR_TRIGGER_EDGE; 515 *polp = INTR_POLARITY_HIGH; 516 break; 517 case FDT_INTR_EDGE_FALLING: 518 *trigp = INTR_TRIGGER_EDGE; 519 *polp = INTR_POLARITY_LOW; 520 break; 521 case FDT_INTR_LEVEL_HIGH: 522 *trigp = INTR_TRIGGER_LEVEL; 523 *polp = INTR_POLARITY_HIGH; 524 break; 525 case FDT_INTR_LEVEL_LOW: 526 *trigp = INTR_TRIGGER_LEVEL; 527 *polp = INTR_POLARITY_LOW; 528 break; 529 default: 530 device_printf(dev, "unsupported trigger/polarity " 531 "configuration 0x%02x\n", cells[2]); 532 return (EINVAL); 533 } 534 535 /* Check the interrupt is valid */ 536 if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH) 537 return (EINVAL); 538 539 *irqp = irq; 540 return (0); 541 } 542 #endif 543 544 static int 545 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp, 546 enum intr_polarity *polp, enum intr_trigger *trigp) 547 { 548 struct gic_v3_irqsrc *gi; 549 550 /* SPI-mapped MSI */ 551 gi = (struct gic_v3_irqsrc *)msi_data->isrc; 552 if (gi == NULL) 553 return (ENXIO); 554 555 *irqp = gi->gi_irq; 556 557 /* MSI/MSI-X interrupts are always edge triggered with high polarity */ 558 *polp = INTR_POLARITY_HIGH; 559 *trigp = INTR_TRIGGER_EDGE; 560 561 return (0); 562 } 563 564 static int 565 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp, 566 enum intr_polarity *polp, enum intr_trigger *trigp) 567 { 568 struct gic_v3_softc *sc; 569 enum intr_polarity pol; 570 enum intr_trigger trig; 571 struct intr_map_data_msi *dam; 572 #ifdef FDT 573 struct intr_map_data_fdt *daf; 574 #endif 575 u_int irq; 576 577 sc = device_get_softc(dev); 578 579 switch (data->type) { 580 #ifdef FDT 581 case INTR_MAP_DATA_FDT: 582 daf = (struct intr_map_data_fdt *)data; 583 if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol, 584 &trig) != 0) 585 return (EINVAL); 586 break; 587 #endif 588 case INTR_MAP_DATA_MSI: 589 /* SPI-mapped MSI */ 590 dam = (struct intr_map_data_msi *)data; 591 if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0) 592 return (EINVAL); 593 break; 594 default: 595 return (EINVAL); 596 } 597 598 if (irq >= sc->gic_nirqs) 599 return (EINVAL); 600 switch (pol) { 601 case INTR_POLARITY_CONFORM: 602 case INTR_POLARITY_LOW: 603 case INTR_POLARITY_HIGH: 604 break; 605 default: 606 return (EINVAL); 607 } 608 switch (trig) { 609 case INTR_TRIGGER_CONFORM: 610 case INTR_TRIGGER_EDGE: 611 case INTR_TRIGGER_LEVEL: 612 break; 613 default: 614 return (EINVAL); 615 } 616 617 *irqp = irq; 618 if (polp != NULL) 619 *polp = pol; 620 if (trigp != NULL) 621 *trigp = trig; 622 return (0); 623 } 624 625 static int 626 gic_v3_map_intr(device_t dev, struct intr_map_data *data, 627 struct intr_irqsrc **isrcp) 628 { 629 struct gic_v3_softc *sc; 630 int error; 631 u_int irq; 632 633 error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL); 634 if (error == 0) { 635 sc = device_get_softc(dev); 636 *isrcp = GIC_INTR_ISRC(sc, irq); 637 } 638 return (error); 639 } 640 641 static int 642 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc, 643 struct resource *res, struct intr_map_data *data) 644 { 645 struct gic_v3_softc *sc = device_get_softc(dev); 646 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; 647 enum intr_trigger trig; 648 enum intr_polarity pol; 649 uint32_t reg; 650 u_int irq; 651 int error; 652 653 if (data == NULL) 654 return (ENOTSUP); 655 656 error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig); 657 if (error != 0) 658 return (error); 659 660 if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM || 661 trig == INTR_TRIGGER_CONFORM) 662 return (EINVAL); 663 664 /* Compare config if this is not first setup. */ 665 if (isrc->isrc_handlers != 0) { 666 if (pol != gi->gi_pol || trig != gi->gi_trig) 667 return (EINVAL); 668 else 669 return (0); 670 } 671 672 gi->gi_pol = pol; 673 gi->gi_trig = trig; 674 675 /* 676 * XXX - In case that per CPU interrupt is going to be enabled in time 677 * when SMP is already started, we need some IPI call which 678 * enables it on others CPUs. Further, it's more complicated as 679 * pic_enable_source() and pic_disable_source() should act on 680 * per CPU basis only. Thus, it should be solved here somehow. 681 */ 682 if (isrc->isrc_flags & INTR_ISRCF_PPI) 683 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); 684 685 if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) { 686 mtx_lock_spin(&sc->gic_mtx); 687 688 /* Set the trigger and polarity */ 689 if (irq <= GIC_LAST_PPI) 690 reg = gic_r_read(sc, 4, 691 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq)); 692 else 693 reg = gic_d_read(sc, 4, GICD_ICFGR(irq)); 694 if (trig == INTR_TRIGGER_LEVEL) 695 reg &= ~(2 << ((irq % 16) * 2)); 696 else 697 reg |= 2 << ((irq % 16) * 2); 698 699 if (irq <= GIC_LAST_PPI) { 700 gic_r_write(sc, 4, 701 GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg); 702 gic_v3_wait_for_rwp(sc, REDIST); 703 } else { 704 gic_d_write(sc, 4, GICD_ICFGR(irq), reg); 705 gic_v3_wait_for_rwp(sc, DIST); 706 } 707 708 mtx_unlock_spin(&sc->gic_mtx); 709 710 gic_v3_bind_intr(dev, isrc); 711 } 712 713 return (0); 714 } 715 716 static int 717 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc, 718 struct resource *res, struct intr_map_data *data) 719 { 720 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; 721 722 if (isrc->isrc_handlers == 0) { 723 gi->gi_pol = INTR_POLARITY_CONFORM; 724 gi->gi_trig = INTR_TRIGGER_CONFORM; 725 } 726 727 return (0); 728 } 729 730 static void 731 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc) 732 { 733 struct gic_v3_softc *sc; 734 struct gic_v3_irqsrc *gi; 735 u_int irq; 736 737 sc = device_get_softc(dev); 738 gi = (struct gic_v3_irqsrc *)isrc; 739 irq = gi->gi_irq; 740 741 if (irq <= GIC_LAST_PPI) { 742 /* SGIs and PPIs in corresponding Re-Distributor */ 743 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), 744 GICD_I_MASK(irq)); 745 gic_v3_wait_for_rwp(sc, REDIST); 746 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { 747 /* SPIs in distributor */ 748 gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); 749 gic_v3_wait_for_rwp(sc, DIST); 750 } else 751 panic("%s: Unsupported IRQ %u", __func__, irq); 752 } 753 754 static void 755 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc) 756 { 757 struct gic_v3_softc *sc; 758 struct gic_v3_irqsrc *gi; 759 u_int irq; 760 761 sc = device_get_softc(dev); 762 gi = (struct gic_v3_irqsrc *)isrc; 763 irq = gi->gi_irq; 764 765 if (irq <= GIC_LAST_PPI) { 766 /* SGIs and PPIs in corresponding Re-Distributor */ 767 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), 768 GICD_I_MASK(irq)); 769 gic_v3_wait_for_rwp(sc, REDIST); 770 } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { 771 /* SPIs in distributor */ 772 gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); 773 gic_v3_wait_for_rwp(sc, DIST); 774 } else 775 panic("%s: Unsupported IRQ %u", __func__, irq); 776 } 777 778 static void 779 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc) 780 { 781 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; 782 783 gic_v3_disable_intr(dev, isrc); 784 gic_icc_write(EOIR1, gi->gi_irq); 785 } 786 787 static void 788 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc) 789 { 790 791 gic_v3_enable_intr(dev, isrc); 792 } 793 794 static void 795 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc) 796 { 797 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; 798 799 if (gi->gi_trig == INTR_TRIGGER_EDGE) 800 return; 801 802 gic_icc_write(EOIR1, gi->gi_irq); 803 } 804 805 static int 806 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc) 807 { 808 struct gic_v3_softc *sc; 809 struct gic_v3_irqsrc *gi; 810 int cpu; 811 812 gi = (struct gic_v3_irqsrc *)isrc; 813 if (gi->gi_irq <= GIC_LAST_PPI) 814 return (EINVAL); 815 816 KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI, 817 ("%s: Attempting to bind an invalid IRQ", __func__)); 818 819 sc = device_get_softc(dev); 820 821 if (CPU_EMPTY(&isrc->isrc_cpu)) { 822 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus); 823 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu); 824 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), 825 CPU_AFFINITY(gic_irq_cpu)); 826 } else { 827 /* 828 * We can only bind to a single CPU so select 829 * the first CPU found. 830 */ 831 cpu = CPU_FFS(&isrc->isrc_cpu) - 1; 832 gic_d_write(sc, 4, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu)); 833 } 834 835 return (0); 836 } 837 838 #ifdef SMP 839 static void 840 gic_v3_init_secondary(device_t dev) 841 { 842 device_t child; 843 struct gic_v3_softc *sc; 844 gic_v3_initseq_t *init_func; 845 struct intr_irqsrc *isrc; 846 u_int cpu, irq; 847 int err, i; 848 849 sc = device_get_softc(dev); 850 cpu = PCPU_GET(cpuid); 851 852 /* Train init sequence for boot CPU */ 853 for (init_func = gic_v3_secondary_init; *init_func != NULL; 854 init_func++) { 855 err = (*init_func)(sc); 856 if (err != 0) { 857 device_printf(dev, 858 "Could not initialize GIC for CPU%u\n", cpu); 859 return; 860 } 861 } 862 863 /* Unmask attached SGI interrupts. */ 864 for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) { 865 isrc = GIC_INTR_ISRC(sc, irq); 866 if (intr_isrc_init_on_cpu(isrc, cpu)) 867 gic_v3_enable_intr(dev, isrc); 868 } 869 870 /* Unmask attached PPI interrupts. */ 871 for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) { 872 isrc = GIC_INTR_ISRC(sc, irq); 873 if (intr_isrc_init_on_cpu(isrc, cpu)) 874 gic_v3_enable_intr(dev, isrc); 875 } 876 877 for (i = 0; i < sc->gic_nchildren; i++) { 878 child = sc->gic_children[i]; 879 PIC_INIT_SECONDARY(child); 880 } 881 } 882 883 static void 884 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus, 885 u_int ipi) 886 { 887 struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc; 888 uint64_t aff, val, irq; 889 int i; 890 891 #define GIC_AFF_MASK (CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK) 892 #define GIC_AFFINITY(i) (CPU_AFFINITY(i) & GIC_AFF_MASK) 893 aff = GIC_AFFINITY(0); 894 irq = gi->gi_irq; 895 val = 0; 896 897 /* Iterate through all CPUs in set */ 898 for (i = 0; i <= mp_maxid; i++) { 899 /* Move to the next affinity group */ 900 if (aff != GIC_AFFINITY(i)) { 901 /* Send the IPI */ 902 if (val != 0) { 903 gic_icc_write(SGI1R, val); 904 val = 0; 905 } 906 aff = GIC_AFFINITY(i); 907 } 908 909 /* Send the IPI to this cpu */ 910 if (CPU_ISSET(i, &cpus)) { 911 #define ICC_SGI1R_AFFINITY(aff) \ 912 (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) | \ 913 ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) | \ 914 ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT)) 915 /* Set the affinity when the first at this level */ 916 if (val == 0) 917 val = ICC_SGI1R_AFFINITY(aff) | 918 irq << ICC_SGI1R_EL1_SGIID_SHIFT; 919 /* Set the bit to send the IPI to te CPU */ 920 val |= 1 << CPU_AFF0(CPU_AFFINITY(i)); 921 } 922 } 923 924 /* Send the IPI to the last cpu affinity group */ 925 if (val != 0) 926 gic_icc_write(SGI1R, val); 927 #undef GIC_AFF_MASK 928 #undef GIC_AFFINITY 929 } 930 931 static int 932 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp) 933 { 934 struct intr_irqsrc *isrc; 935 struct gic_v3_softc *sc = device_get_softc(dev); 936 937 if (sgi_first_unused > GIC_LAST_SGI) 938 return (ENOSPC); 939 940 isrc = GIC_INTR_ISRC(sc, sgi_first_unused); 941 sgi_to_ipi[sgi_first_unused++] = ipi; 942 943 CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu); 944 945 *isrcp = isrc; 946 return (0); 947 } 948 #endif /* SMP */ 949 950 /* 951 * Helper routines 952 */ 953 static void 954 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) 955 { 956 struct resource *res; 957 u_int cpuid; 958 size_t us_left = 1000000; 959 960 cpuid = PCPU_GET(cpuid); 961 962 switch (xdist) { 963 case DIST: 964 res = sc->gic_dist; 965 break; 966 case REDIST: 967 res = sc->gic_redists.pcpu[cpuid]; 968 break; 969 default: 970 KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); 971 return; 972 } 973 974 while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) { 975 DELAY(1); 976 if (us_left-- == 0) 977 panic("GICD Register write pending for too long"); 978 } 979 } 980 981 /* CPU interface. */ 982 static __inline void 983 gic_v3_cpu_priority(uint64_t mask) 984 { 985 986 /* Set prority mask */ 987 gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); 988 } 989 990 static int 991 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) 992 { 993 uint64_t sre; 994 u_int cpuid; 995 996 cpuid = PCPU_GET(cpuid); 997 /* 998 * Set the SRE bit to enable access to GIC CPU interface 999 * via system registers. 1000 */ 1001 sre = READ_SPECIALREG(icc_sre_el1); 1002 sre |= ICC_SRE_EL1_SRE; 1003 WRITE_SPECIALREG(icc_sre_el1, sre); 1004 isb(); 1005 /* 1006 * Now ensure that the bit is set. 1007 */ 1008 sre = READ_SPECIALREG(icc_sre_el1); 1009 if ((sre & ICC_SRE_EL1_SRE) == 0) { 1010 /* We are done. This was disabled in EL2 */ 1011 device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " 1012 "via system registers\n", cpuid); 1013 return (ENXIO); 1014 } else if (bootverbose) { 1015 device_printf(sc->dev, 1016 "CPU%u enabled CPU interface via system registers\n", 1017 cpuid); 1018 } 1019 1020 return (0); 1021 } 1022 1023 static int 1024 gic_v3_cpu_init(struct gic_v3_softc *sc) 1025 { 1026 int err; 1027 1028 /* Enable access to CPU interface via system registers */ 1029 err = gic_v3_cpu_enable_sre(sc); 1030 if (err != 0) 1031 return (err); 1032 /* Priority mask to minimum - accept all interrupts */ 1033 gic_v3_cpu_priority(GIC_PRIORITY_MIN); 1034 /* Disable EOI mode */ 1035 gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); 1036 /* Enable group 1 (insecure) interrups */ 1037 gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); 1038 1039 return (0); 1040 } 1041 1042 /* Distributor */ 1043 static int 1044 gic_v3_dist_init(struct gic_v3_softc *sc) 1045 { 1046 uint64_t aff; 1047 u_int i; 1048 1049 /* 1050 * 1. Disable the Distributor 1051 */ 1052 gic_d_write(sc, 4, GICD_CTLR, 0); 1053 gic_v3_wait_for_rwp(sc, DIST); 1054 1055 /* 1056 * 2. Configure the Distributor 1057 */ 1058 /* Set all SPIs to be Group 1 Non-secure */ 1059 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn) 1060 gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF); 1061 1062 /* Set all global interrupts to be level triggered, active low. */ 1063 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) 1064 gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); 1065 1066 /* Set priority to all shared interrupts */ 1067 for (i = GIC_FIRST_SPI; 1068 i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { 1069 /* Set highest priority */ 1070 gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); 1071 } 1072 1073 /* 1074 * Disable all interrupts. Leave PPI and SGIs as they are enabled in 1075 * Re-Distributor registers. 1076 */ 1077 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) 1078 gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); 1079 1080 gic_v3_wait_for_rwp(sc, DIST); 1081 1082 /* 1083 * 3. Enable Distributor 1084 */ 1085 /* Enable Distributor with ARE, Group 1 */ 1086 gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | 1087 GICD_CTLR_G1); 1088 1089 /* 1090 * 4. Route all interrupts to boot CPU. 1091 */ 1092 aff = CPU_AFFINITY(0); 1093 for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) 1094 gic_d_write(sc, 4, GICD_IROUTER(i), aff); 1095 1096 return (0); 1097 } 1098 1099 /* Re-Distributor */ 1100 static int 1101 gic_v3_redist_alloc(struct gic_v3_softc *sc) 1102 { 1103 u_int cpuid; 1104 1105 /* Allocate struct resource for all CPU's Re-Distributor registers */ 1106 for (cpuid = 0; cpuid <= mp_maxid; cpuid++) 1107 if (CPU_ISSET(cpuid, &all_cpus) != 0) 1108 sc->gic_redists.pcpu[cpuid] = 1109 malloc(sizeof(*sc->gic_redists.pcpu[0]), 1110 M_GIC_V3, M_WAITOK); 1111 else 1112 sc->gic_redists.pcpu[cpuid] = NULL; 1113 return (0); 1114 } 1115 1116 static int 1117 gic_v3_redist_find(struct gic_v3_softc *sc) 1118 { 1119 struct resource r_res; 1120 bus_space_handle_t r_bsh; 1121 uint64_t aff; 1122 uint64_t typer; 1123 uint32_t pidr2; 1124 u_int cpuid; 1125 size_t i; 1126 1127 cpuid = PCPU_GET(cpuid); 1128 1129 aff = CPU_AFFINITY(cpuid); 1130 /* Affinity in format for comparison with typer */ 1131 aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | 1132 (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); 1133 1134 if (bootverbose) { 1135 device_printf(sc->dev, 1136 "Start searching for Re-Distributor\n"); 1137 } 1138 /* Iterate through Re-Distributor regions */ 1139 for (i = 0; i < sc->gic_redists.nregions; i++) { 1140 /* Take a copy of the region's resource */ 1141 r_res = *sc->gic_redists.regions[i]; 1142 r_bsh = rman_get_bushandle(&r_res); 1143 1144 pidr2 = bus_read_4(&r_res, GICR_PIDR2); 1145 switch (GICR_PIDR2_ARCH(pidr2)) { 1146 case GICR_PIDR2_ARCH_GICv3: /* fall through */ 1147 case GICR_PIDR2_ARCH_GICv4: 1148 break; 1149 default: 1150 device_printf(sc->dev, 1151 "No Re-Distributor found for CPU%u\n", cpuid); 1152 return (ENODEV); 1153 } 1154 1155 do { 1156 typer = bus_read_8(&r_res, GICR_TYPER); 1157 if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { 1158 KASSERT(sc->gic_redists.pcpu[cpuid] != NULL, 1159 ("Invalid pointer to per-CPU redistributor")); 1160 /* Copy res contents to its final destination */ 1161 *sc->gic_redists.pcpu[cpuid] = r_res; 1162 if (bootverbose) { 1163 device_printf(sc->dev, 1164 "CPU%u Re-Distributor has been found\n", 1165 cpuid); 1166 } 1167 return (0); 1168 } 1169 1170 r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); 1171 if ((typer & GICR_TYPER_VLPIS) != 0) { 1172 r_bsh += 1173 (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); 1174 } 1175 1176 rman_set_bushandle(&r_res, r_bsh); 1177 } while ((typer & GICR_TYPER_LAST) == 0); 1178 } 1179 1180 device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); 1181 return (ENXIO); 1182 } 1183 1184 static int 1185 gic_v3_redist_wake(struct gic_v3_softc *sc) 1186 { 1187 uint32_t waker; 1188 size_t us_left = 1000000; 1189 1190 waker = gic_r_read(sc, 4, GICR_WAKER); 1191 /* Wake up Re-Distributor for this CPU */ 1192 waker &= ~GICR_WAKER_PS; 1193 gic_r_write(sc, 4, GICR_WAKER, waker); 1194 /* 1195 * When clearing ProcessorSleep bit it is required to wait for 1196 * ChildrenAsleep to become zero following the processor power-on. 1197 */ 1198 while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { 1199 DELAY(1); 1200 if (us_left-- == 0) { 1201 panic("Could not wake Re-Distributor for CPU%u", 1202 PCPU_GET(cpuid)); 1203 } 1204 } 1205 1206 if (bootverbose) { 1207 device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", 1208 PCPU_GET(cpuid)); 1209 } 1210 1211 return (0); 1212 } 1213 1214 static int 1215 gic_v3_redist_init(struct gic_v3_softc *sc) 1216 { 1217 int err; 1218 size_t i; 1219 1220 err = gic_v3_redist_find(sc); 1221 if (err != 0) 1222 return (err); 1223 1224 err = gic_v3_redist_wake(sc); 1225 if (err != 0) 1226 return (err); 1227 1228 /* Configure SGIs and PPIs to be Group1 Non-secure */ 1229 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0, 1230 0xFFFFFFFF); 1231 1232 /* Disable SPIs */ 1233 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, 1234 GICR_I_ENABLER_PPI_MASK); 1235 /* Enable SGIs */ 1236 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, 1237 GICR_I_ENABLER_SGI_MASK); 1238 1239 /* Set priority for SGIs and PPIs */ 1240 for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { 1241 gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), 1242 GIC_PRIORITY_MAX); 1243 } 1244 1245 gic_v3_wait_for_rwp(sc, REDIST); 1246 1247 return (0); 1248 } 1249