1 /*- 2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 #include "opt_platform.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/rman.h> 41 #include <sys/module.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcib_private.h> 48 #include <dev/pci/pci_host_generic.h> 49 50 #include <machine/bus.h> 51 #include <machine/intr.h> 52 53 #include "pcib_if.h" 54 55 #if defined(VM_MEMATTR_DEVICE_NP) 56 #define PCI_UNMAPPED 57 #define PCI_RF_FLAGS RF_UNMAPPED 58 #else 59 #define PCI_RF_FLAGS 0 60 #endif 61 62 63 /* Forward prototypes */ 64 65 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 66 u_int func, u_int reg, int bytes); 67 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 68 u_int func, u_int reg, uint32_t val, int bytes); 69 static int generic_pcie_maxslots(device_t dev); 70 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 71 uintptr_t *result); 72 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 73 uintptr_t value); 74 75 int 76 pci_host_generic_core_attach(device_t dev) 77 { 78 #ifdef PCI_UNMAPPED 79 struct resource_map_request req; 80 struct resource_map map; 81 #endif 82 struct generic_pcie_core_softc *sc; 83 uint64_t phys_base; 84 uint64_t pci_base; 85 uint64_t size; 86 char buf[64]; 87 int domain, error; 88 int flags, rid, tuple, type; 89 90 sc = device_get_softc(dev); 91 sc->dev = dev; 92 93 /* Create the parent DMA tag to pass down the coherent flag */ 94 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 95 1, 0, /* alignment, bounds */ 96 BUS_SPACE_MAXADDR, /* lowaddr */ 97 BUS_SPACE_MAXADDR, /* highaddr */ 98 NULL, NULL, /* filter, filterarg */ 99 BUS_SPACE_MAXSIZE, /* maxsize */ 100 BUS_SPACE_UNRESTRICTED, /* nsegments */ 101 BUS_SPACE_MAXSIZE, /* maxsegsize */ 102 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 103 NULL, NULL, /* lockfunc, lockarg */ 104 &sc->dmat); 105 if (error != 0) 106 return (error); 107 108 /* 109 * Attempt to set the domain. If it's missing, or we are unable to 110 * set it then memory allocations may be placed in the wrong domain. 111 */ 112 if (bus_get_domain(dev, &domain) == 0) 113 (void)bus_dma_tag_set_domain(sc->dmat, domain); 114 115 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) { 116 rid = 0; 117 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 118 PCI_RF_FLAGS | RF_ACTIVE); 119 if (sc->res == NULL) { 120 device_printf(dev, "could not allocate memory.\n"); 121 error = ENXIO; 122 goto err_resource; 123 } 124 #ifdef PCI_UNMAPPED 125 resource_init_map_request(&req); 126 req.memattr = VM_MEMATTR_DEVICE_NP; 127 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, 128 &map); 129 if (error != 0) { 130 device_printf(dev, "could not map memory.\n"); 131 return (error); 132 } 133 rman_set_mapping(sc->res, &map); 134 #endif 135 } 136 137 sc->has_pmem = false; 138 sc->pmem_rman.rm_type = RMAN_ARRAY; 139 snprintf(buf, sizeof(buf), "%s prefetch window", 140 device_get_nameunit(dev)); 141 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF); 142 143 sc->mem_rman.rm_type = RMAN_ARRAY; 144 snprintf(buf, sizeof(buf), "%s memory window", 145 device_get_nameunit(dev)); 146 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF); 147 148 sc->io_rman.rm_type = RMAN_ARRAY; 149 snprintf(buf, sizeof(buf), "%s I/O port window", 150 device_get_nameunit(dev)); 151 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF); 152 153 /* Initialize rman and allocate memory regions */ 154 error = rman_init(&sc->pmem_rman); 155 if (error) { 156 device_printf(dev, "rman_init() failed. error = %d\n", error); 157 goto err_pmem_rman; 158 } 159 160 error = rman_init(&sc->mem_rman); 161 if (error) { 162 device_printf(dev, "rman_init() failed. error = %d\n", error); 163 goto err_mem_rman; 164 } 165 166 error = rman_init(&sc->io_rman); 167 if (error) { 168 device_printf(dev, "rman_init() failed. error = %d\n", error); 169 goto err_io_rman; 170 } 171 172 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 173 phys_base = sc->ranges[tuple].phys_base; 174 pci_base = sc->ranges[tuple].pci_base; 175 size = sc->ranges[tuple].size; 176 rid = tuple + 1; 177 if (size == 0) 178 continue; /* empty range element */ 179 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 180 case FLAG_TYPE_PMEM: 181 sc->has_pmem = true; 182 flags = RF_PREFETCHABLE; 183 type = SYS_RES_MEMORY; 184 error = rman_manage_region(&sc->pmem_rman, 185 pci_base, pci_base + size - 1); 186 break; 187 case FLAG_TYPE_MEM: 188 flags = 0; 189 type = SYS_RES_MEMORY; 190 error = rman_manage_region(&sc->mem_rman, 191 pci_base, pci_base + size - 1); 192 break; 193 case FLAG_TYPE_IO: 194 flags = 0; 195 type = SYS_RES_IOPORT; 196 error = rman_manage_region(&sc->io_rman, 197 pci_base, pci_base + size - 1); 198 break; 199 default: 200 continue; 201 } 202 if (error) { 203 device_printf(dev, "rman_manage_region() failed." 204 "error = %d\n", error); 205 goto err_rman_manage; 206 } 207 error = bus_set_resource(dev, type, rid, phys_base, size); 208 if (error != 0) { 209 device_printf(dev, 210 "failed to set resource for range %d: %d\n", tuple, 211 error); 212 goto err_rman_manage; 213 } 214 sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid, 215 RF_ACTIVE | RF_UNMAPPED | flags); 216 if (sc->ranges[tuple].res == NULL) { 217 device_printf(dev, 218 "failed to allocate resource for range %d\n", tuple); 219 goto err_rman_manage; 220 } 221 } 222 223 return (0); 224 225 err_rman_manage: 226 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 227 if (sc->ranges[tuple].size == 0) 228 continue; /* empty range element */ 229 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 230 case FLAG_TYPE_PMEM: 231 case FLAG_TYPE_MEM: 232 type = SYS_RES_MEMORY; 233 break; 234 case FLAG_TYPE_IO: 235 type = SYS_RES_IOPORT; 236 break; 237 default: 238 continue; 239 } 240 if (sc->ranges[tuple].res != NULL) 241 bus_release_resource(dev, type, tuple + 1, 242 sc->ranges[tuple].res); 243 bus_delete_resource(dev, type, tuple + 1); 244 } 245 rman_fini(&sc->io_rman); 246 err_io_rman: 247 rman_fini(&sc->mem_rman); 248 err_mem_rman: 249 rman_fini(&sc->pmem_rman); 250 err_pmem_rman: 251 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); 252 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); 253 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); 254 if (sc->res != NULL) 255 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 256 err_resource: 257 bus_dma_tag_destroy(sc->dmat); 258 return (error); 259 } 260 261 int 262 pci_host_generic_core_detach(device_t dev) 263 { 264 struct generic_pcie_core_softc *sc; 265 int error, tuple, type; 266 267 sc = device_get_softc(dev); 268 269 error = bus_generic_detach(dev); 270 if (error != 0) 271 return (error); 272 273 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 274 if (sc->ranges[tuple].size == 0) 275 continue; /* empty range element */ 276 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 277 case FLAG_TYPE_PMEM: 278 case FLAG_TYPE_MEM: 279 type = SYS_RES_MEMORY; 280 break; 281 case FLAG_TYPE_IO: 282 type = SYS_RES_IOPORT; 283 break; 284 default: 285 continue; 286 } 287 if (sc->ranges[tuple].res != NULL) 288 bus_release_resource(dev, type, tuple + 1, 289 sc->ranges[tuple].res); 290 bus_delete_resource(dev, type, tuple + 1); 291 } 292 rman_fini(&sc->io_rman); 293 rman_fini(&sc->mem_rman); 294 rman_fini(&sc->pmem_rman); 295 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); 296 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); 297 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); 298 if (sc->res != NULL) 299 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 300 bus_dma_tag_destroy(sc->dmat); 301 302 return (0); 303 } 304 305 static uint32_t 306 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 307 u_int func, u_int reg, int bytes) 308 { 309 struct generic_pcie_core_softc *sc; 310 uint64_t offset; 311 uint32_t data; 312 313 sc = device_get_softc(dev); 314 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 315 return (~0U); 316 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 317 (reg > PCIE_REGMAX)) 318 return (~0U); 319 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) 320 return (~0U); 321 322 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 323 324 switch (bytes) { 325 case 1: 326 data = bus_read_1(sc->res, offset); 327 break; 328 case 2: 329 data = le16toh(bus_read_2(sc->res, offset)); 330 break; 331 case 4: 332 data = le32toh(bus_read_4(sc->res, offset)); 333 break; 334 default: 335 return (~0U); 336 } 337 338 return (data); 339 } 340 341 static void 342 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 343 u_int func, u_int reg, uint32_t val, int bytes) 344 { 345 struct generic_pcie_core_softc *sc; 346 uint64_t offset; 347 348 sc = device_get_softc(dev); 349 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 350 return; 351 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 352 (reg > PCIE_REGMAX)) 353 return; 354 355 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 356 357 switch (bytes) { 358 case 1: 359 bus_write_1(sc->res, offset, val); 360 break; 361 case 2: 362 bus_write_2(sc->res, offset, htole16(val)); 363 break; 364 case 4: 365 bus_write_4(sc->res, offset, htole32(val)); 366 break; 367 default: 368 return; 369 } 370 } 371 372 static int 373 generic_pcie_maxslots(device_t dev) 374 { 375 376 return (31); /* max slots per bus acc. to standard */ 377 } 378 379 static int 380 generic_pcie_read_ivar(device_t dev, device_t child, int index, 381 uintptr_t *result) 382 { 383 struct generic_pcie_core_softc *sc; 384 385 sc = device_get_softc(dev); 386 387 if (index == PCIB_IVAR_BUS) { 388 *result = sc->bus_start; 389 return (0); 390 } 391 392 if (index == PCIB_IVAR_DOMAIN) { 393 *result = sc->ecam; 394 return (0); 395 } 396 397 if (bootverbose) 398 device_printf(dev, "ERROR: Unknown index %d.\n", index); 399 return (ENOENT); 400 } 401 402 static int 403 generic_pcie_write_ivar(device_t dev, device_t child, int index, 404 uintptr_t value) 405 { 406 407 return (ENOENT); 408 } 409 410 static struct rman * 411 generic_pcie_get_rman(device_t dev, int type, u_int flags) 412 { 413 struct generic_pcie_core_softc *sc = device_get_softc(dev); 414 415 switch (type) { 416 case SYS_RES_IOPORT: 417 return (&sc->io_rman); 418 case SYS_RES_MEMORY: 419 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) 420 return (&sc->pmem_rman); 421 return (&sc->mem_rman); 422 default: 423 break; 424 } 425 426 return (NULL); 427 } 428 429 int 430 pci_host_generic_core_release_resource(device_t dev, device_t child, int type, 431 int rid, struct resource *res) 432 { 433 struct generic_pcie_core_softc *sc; 434 435 sc = device_get_softc(dev); 436 437 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 438 if (type == PCI_RES_BUS) { 439 return (pci_domain_release_bus(sc->ecam, child, rid, res)); 440 } 441 #endif 442 return (bus_generic_rman_release_resource(dev, child, type, rid, res)); 443 } 444 445 static struct pcie_range * 446 generic_pcie_containing_range(device_t dev, int type, rman_res_t start, 447 rman_res_t end) 448 { 449 struct generic_pcie_core_softc *sc = device_get_softc(dev); 450 uint64_t pci_base; 451 uint64_t size; 452 int i, space; 453 454 switch (type) { 455 case SYS_RES_IOPORT: 456 case SYS_RES_MEMORY: 457 break; 458 default: 459 return (NULL); 460 } 461 462 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 463 pci_base = sc->ranges[i].pci_base; 464 size = sc->ranges[i].size; 465 if (size == 0) 466 continue; /* empty range element */ 467 468 if (start < pci_base || end >= pci_base + size) 469 continue; 470 471 switch (FLAG_TYPE(sc->ranges[i].flags)) { 472 case FLAG_TYPE_MEM: 473 case FLAG_TYPE_PMEM: 474 space = SYS_RES_MEMORY; 475 break; 476 case FLAG_TYPE_IO: 477 space = SYS_RES_IOPORT; 478 break; 479 default: 480 continue; 481 } 482 483 if (type == space) 484 return (&sc->ranges[i]); 485 } 486 return (NULL); 487 } 488 489 static int 490 generic_pcie_translate_resource_common(device_t dev, int type, rman_res_t start, 491 rman_res_t end, rman_res_t *new_start, rman_res_t *new_end) 492 { 493 struct pcie_range *range; 494 495 /* Translate the address from a PCI address to a physical address */ 496 switch (type) { 497 case SYS_RES_IOPORT: 498 case SYS_RES_MEMORY: 499 range = generic_pcie_containing_range(dev, type, start, end); 500 if (range == NULL) 501 return (ENOENT); 502 if (range != NULL) { 503 *new_start = start - range->pci_base + range->phys_base; 504 *new_end = end - range->pci_base + range->phys_base; 505 } 506 break; 507 default: 508 /* No translation for non-memory types */ 509 *new_start = start; 510 *new_end = end; 511 break; 512 } 513 514 return (0); 515 } 516 517 static int 518 generic_pcie_translate_resource(device_t bus, int type, 519 rman_res_t start, rman_res_t *newstart) 520 { 521 rman_res_t newend; /* unused */ 522 523 return (generic_pcie_translate_resource_common( 524 bus, type, start, 0, newstart, &newend)); 525 } 526 527 struct resource * 528 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, 529 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 530 { 531 struct generic_pcie_core_softc *sc; 532 struct resource *res; 533 534 sc = device_get_softc(dev); 535 536 switch (type) { 537 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 538 case PCI_RES_BUS: 539 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 540 count, flags); 541 break; 542 #endif 543 case SYS_RES_IOPORT: 544 case SYS_RES_MEMORY: 545 res = bus_generic_rman_alloc_resource(dev, child, type, rid, 546 start, end, count, flags); 547 break; 548 default: 549 res = bus_generic_alloc_resource(dev, child, type, rid, start, 550 end, count, flags); 551 break; 552 } 553 if (res == NULL) { 554 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 555 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 556 __func__, type, *rid, start, end, count, flags); 557 } 558 return (res); 559 } 560 561 static int 562 generic_pcie_activate_resource(device_t dev, device_t child, int type, 563 int rid, struct resource *r) 564 { 565 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 566 struct generic_pcie_core_softc *sc; 567 568 sc = device_get_softc(dev); 569 #endif 570 switch (type) { 571 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 572 case PCI_RES_BUS: 573 return (pci_domain_activate_bus(sc->ecam, child, rid, r)); 574 #endif 575 case SYS_RES_IOPORT: 576 case SYS_RES_MEMORY: 577 return (bus_generic_rman_activate_resource(dev, child, type, 578 rid, r)); 579 default: 580 return (bus_generic_activate_resource(dev, child, type, rid, 581 r)); 582 } 583 } 584 585 static int 586 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, 587 int rid, struct resource *r) 588 { 589 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 590 struct generic_pcie_core_softc *sc; 591 592 sc = device_get_softc(dev); 593 #endif 594 switch (type) { 595 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 596 case PCI_RES_BUS: 597 return (pci_domain_deactivate_bus(sc->ecam, child, rid, r)); 598 #endif 599 case SYS_RES_IOPORT: 600 case SYS_RES_MEMORY: 601 return (bus_generic_rman_deactivate_resource(dev, child, type, 602 rid, r)); 603 default: 604 return (bus_generic_deactivate_resource(dev, child, type, rid, 605 r)); 606 } 607 } 608 609 static int 610 generic_pcie_adjust_resource(device_t dev, device_t child, int type, 611 struct resource *res, rman_res_t start, rman_res_t end) 612 { 613 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 614 struct generic_pcie_core_softc *sc; 615 616 sc = device_get_softc(dev); 617 #endif 618 switch (type) { 619 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 620 case PCI_RES_BUS: 621 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 622 end)); 623 #endif 624 case SYS_RES_IOPORT: 625 case SYS_RES_MEMORY: 626 return (bus_generic_rman_adjust_resource(dev, child, type, res, 627 start, end)); 628 default: 629 return (bus_generic_adjust_resource(dev, child, type, res, 630 start, end)); 631 } 632 } 633 634 static int 635 generic_pcie_map_resource(device_t dev, device_t child, int type, 636 struct resource *r, struct resource_map_request *argsp, 637 struct resource_map *map) 638 { 639 struct resource_map_request args; 640 struct pcie_range *range; 641 rman_res_t length, start; 642 int error; 643 644 switch (type) { 645 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 646 case PCI_RES_BUS: 647 return (EINVAL); 648 #endif 649 case SYS_RES_IOPORT: 650 case SYS_RES_MEMORY: 651 break; 652 default: 653 return (bus_generic_map_resource(dev, child, type, r, argsp, 654 map)); 655 } 656 657 /* Resources must be active to be mapped. */ 658 if (!(rman_get_flags(r) & RF_ACTIVE)) 659 return (ENXIO); 660 661 resource_init_map_request(&args); 662 error = resource_validate_map_request(r, argsp, &args, &start, &length); 663 if (error) 664 return (error); 665 666 range = generic_pcie_containing_range(dev, type, rman_get_start(r), 667 rman_get_end(r)); 668 if (range == NULL || range->res == NULL) 669 return (ENOENT); 670 671 args.offset = start - range->pci_base; 672 args.length = length; 673 return (bus_generic_map_resource(dev, child, type, range->res, &args, 674 map)); 675 } 676 677 static int 678 generic_pcie_unmap_resource(device_t dev, device_t child, int type, 679 struct resource *r, struct resource_map *map) 680 { 681 struct pcie_range *range; 682 683 switch (type) { 684 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 685 case PCI_RES_BUS: 686 return (EINVAL); 687 #endif 688 case SYS_RES_IOPORT: 689 case SYS_RES_MEMORY: 690 range = generic_pcie_containing_range(dev, type, 691 rman_get_start(r), rman_get_end(r)); 692 if (range == NULL || range->res == NULL) 693 return (ENOENT); 694 r = range->res; 695 break; 696 default: 697 break; 698 } 699 return (bus_generic_unmap_resource(dev, child, type, r, map)); 700 } 701 702 static bus_dma_tag_t 703 generic_pcie_get_dma_tag(device_t dev, device_t child) 704 { 705 struct generic_pcie_core_softc *sc; 706 707 sc = device_get_softc(dev); 708 return (sc->dmat); 709 } 710 711 static device_method_t generic_pcie_methods[] = { 712 DEVMETHOD(device_attach, pci_host_generic_core_attach), 713 DEVMETHOD(device_detach, pci_host_generic_core_detach), 714 715 DEVMETHOD(bus_get_rman, generic_pcie_get_rman), 716 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 717 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 718 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), 719 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 720 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 721 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 722 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), 723 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), 724 DEVMETHOD(bus_map_resource, generic_pcie_map_resource), 725 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource), 726 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 727 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 728 729 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 730 731 /* pcib interface */ 732 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 733 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 734 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 735 736 DEVMETHOD_END 737 }; 738 739 DEFINE_CLASS_0(pcib, generic_pcie_core_driver, 740 generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); 741