1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kobject.h> 60 #include <linux/device.h> 61 #include <linux/slab.h> 62 #include <linux/module.h> 63 #include <linux/cdev.h> 64 #include <linux/file.h> 65 #include <linux/sysfs.h> 66 #include <linux/mm.h> 67 #include <linux/io.h> 68 #include <linux/vmalloc.h> 69 #include <linux/pci.h> 70 #include <linux/compat.h> 71 72 #include <linux/backlight.h> 73 74 #include "backlight_if.h" 75 #include "pcib_if.h" 76 77 /* Undef the linux function macro defined in linux/pci.h */ 78 #undef pci_get_class 79 80 static device_probe_t linux_pci_probe; 81 static device_attach_t linux_pci_attach; 82 static device_detach_t linux_pci_detach; 83 static device_suspend_t linux_pci_suspend; 84 static device_resume_t linux_pci_resume; 85 static device_shutdown_t linux_pci_shutdown; 86 static pci_iov_init_t linux_pci_iov_init; 87 static pci_iov_uninit_t linux_pci_iov_uninit; 88 static pci_iov_add_vf_t linux_pci_iov_add_vf; 89 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 90 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 91 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 92 93 static device_method_t pci_methods[] = { 94 DEVMETHOD(device_probe, linux_pci_probe), 95 DEVMETHOD(device_attach, linux_pci_attach), 96 DEVMETHOD(device_detach, linux_pci_detach), 97 DEVMETHOD(device_suspend, linux_pci_suspend), 98 DEVMETHOD(device_resume, linux_pci_resume), 99 DEVMETHOD(device_shutdown, linux_pci_shutdown), 100 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 101 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 102 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 103 104 /* backlight interface */ 105 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 106 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 107 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 108 DEVMETHOD_END 109 }; 110 111 struct linux_dma_priv { 112 uint64_t dma_mask; 113 bus_dma_tag_t dmat; 114 uint64_t dma_coherent_mask; 115 bus_dma_tag_t dmat_coherent; 116 struct mtx lock; 117 struct pctrie ptree; 118 }; 119 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 120 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 if (priv->dmat_coherent) 131 bus_dma_tag_destroy(priv->dmat_coherent); 132 mtx_destroy(&priv->lock); 133 pdev->dev.dma_priv = NULL; 134 free(priv, M_DEVBUF); 135 return (0); 136 } 137 138 static int 139 linux_pdev_dma_init(struct pci_dev *pdev) 140 { 141 struct linux_dma_priv *priv; 142 int error; 143 144 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 145 146 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 147 pctrie_init(&priv->ptree); 148 149 pdev->dev.dma_priv = priv; 150 151 /* Create a default DMA tags. */ 152 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 153 if (error != 0) 154 goto err; 155 /* Coherent is lower 32bit only by default in Linux. */ 156 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 157 if (error != 0) 158 goto err; 159 160 return (error); 161 162 err: 163 linux_pdev_dma_uninit(pdev); 164 return (error); 165 } 166 167 int 168 linux_dma_tag_init(struct device *dev, u64 dma_mask) 169 { 170 struct linux_dma_priv *priv; 171 int error; 172 173 priv = dev->dma_priv; 174 175 if (priv->dmat) { 176 if (priv->dma_mask == dma_mask) 177 return (0); 178 179 bus_dma_tag_destroy(priv->dmat); 180 } 181 182 priv->dma_mask = dma_mask; 183 184 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 185 1, 0, /* alignment, boundary */ 186 dma_mask, /* lowaddr */ 187 BUS_SPACE_MAXADDR, /* highaddr */ 188 NULL, NULL, /* filtfunc, filtfuncarg */ 189 BUS_SPACE_MAXSIZE, /* maxsize */ 190 1, /* nsegments */ 191 BUS_SPACE_MAXSIZE, /* maxsegsz */ 192 0, /* flags */ 193 NULL, NULL, /* lockfunc, lockfuncarg */ 194 &priv->dmat); 195 return (-error); 196 } 197 198 int 199 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 200 { 201 struct linux_dma_priv *priv; 202 int error; 203 204 priv = dev->dma_priv; 205 206 if (priv->dmat_coherent) { 207 if (priv->dma_coherent_mask == dma_mask) 208 return (0); 209 210 bus_dma_tag_destroy(priv->dmat_coherent); 211 } 212 213 priv->dma_coherent_mask = dma_mask; 214 215 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 216 1, 0, /* alignment, boundary */ 217 dma_mask, /* lowaddr */ 218 BUS_SPACE_MAXADDR, /* highaddr */ 219 NULL, NULL, /* filtfunc, filtfuncarg */ 220 BUS_SPACE_MAXSIZE, /* maxsize */ 221 1, /* nsegments */ 222 BUS_SPACE_MAXSIZE, /* maxsegsz */ 223 0, /* flags */ 224 NULL, NULL, /* lockfunc, lockfuncarg */ 225 &priv->dmat_coherent); 226 return (-error); 227 } 228 229 static struct pci_driver * 230 linux_pci_find(device_t dev, const struct pci_device_id **idp) 231 { 232 const struct pci_device_id *id; 233 struct pci_driver *pdrv; 234 uint16_t vendor; 235 uint16_t device; 236 uint16_t subvendor; 237 uint16_t subdevice; 238 239 vendor = pci_get_vendor(dev); 240 device = pci_get_device(dev); 241 subvendor = pci_get_subvendor(dev); 242 subdevice = pci_get_subdevice(dev); 243 244 spin_lock(&pci_lock); 245 list_for_each_entry(pdrv, &pci_drivers, node) { 246 for (id = pdrv->id_table; id->vendor != 0; id++) { 247 if (vendor == id->vendor && 248 (PCI_ANY_ID == id->device || device == id->device) && 249 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 250 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 251 *idp = id; 252 spin_unlock(&pci_lock); 253 return (pdrv); 254 } 255 } 256 } 257 spin_unlock(&pci_lock); 258 return (NULL); 259 } 260 261 static void 262 lkpi_pci_dev_release(struct device *dev) 263 { 264 265 lkpi_devres_release_free_list(dev); 266 spin_lock_destroy(&dev->devres_lock); 267 } 268 269 static void 270 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 271 { 272 273 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 274 pdev->vendor = pci_get_vendor(dev); 275 pdev->device = pci_get_device(dev); 276 pdev->subsystem_vendor = pci_get_subvendor(dev); 277 pdev->subsystem_device = pci_get_subdevice(dev); 278 pdev->class = pci_get_class(dev); 279 pdev->revision = pci_get_revid(dev); 280 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 281 pdev->bus->self = pdev; 282 pdev->bus->number = pci_get_bus(dev); 283 pdev->bus->domain = pci_get_domain(dev); 284 pdev->dev.bsddev = dev; 285 pdev->dev.parent = &linux_root_device; 286 pdev->dev.release = lkpi_pci_dev_release; 287 INIT_LIST_HEAD(&pdev->dev.irqents); 288 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 289 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 290 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 291 kobject_name(&pdev->dev.kobj)); 292 spin_lock_init(&pdev->dev.devres_lock); 293 INIT_LIST_HEAD(&pdev->dev.devres_head); 294 } 295 296 static void 297 lkpinew_pci_dev_release(struct device *dev) 298 { 299 struct pci_dev *pdev; 300 301 pdev = to_pci_dev(dev); 302 if (pdev->root != NULL) 303 pci_dev_put(pdev->root); 304 free(pdev->bus, M_DEVBUF); 305 free(pdev, M_DEVBUF); 306 } 307 308 struct pci_dev * 309 lkpinew_pci_dev(device_t dev) 310 { 311 struct pci_dev *pdev; 312 313 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 314 lkpifill_pci_dev(dev, pdev); 315 pdev->dev.release = lkpinew_pci_dev_release; 316 317 return (pdev); 318 } 319 320 struct pci_dev * 321 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 322 { 323 device_t dev; 324 device_t devfrom = NULL; 325 struct pci_dev *pdev; 326 327 if (from != NULL) 328 devfrom = from->dev.bsddev; 329 330 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 331 if (dev == NULL) 332 return (NULL); 333 334 pdev = lkpinew_pci_dev(dev); 335 return (pdev); 336 } 337 338 struct pci_dev * 339 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 340 unsigned int devfn) 341 { 342 device_t dev; 343 struct pci_dev *pdev; 344 345 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 346 if (dev == NULL) 347 return (NULL); 348 349 pdev = lkpinew_pci_dev(dev); 350 return (pdev); 351 } 352 353 static int 354 linux_pci_probe(device_t dev) 355 { 356 const struct pci_device_id *id; 357 struct pci_driver *pdrv; 358 359 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 360 return (ENXIO); 361 if (device_get_driver(dev) != &pdrv->bsddriver) 362 return (ENXIO); 363 device_set_desc(dev, pdrv->name); 364 365 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 366 if (pdrv->bsd_probe_return == 0) 367 return (BUS_PROBE_DEFAULT); 368 else 369 return (pdrv->bsd_probe_return); 370 } 371 372 static int 373 linux_pci_attach(device_t dev) 374 { 375 const struct pci_device_id *id; 376 struct pci_driver *pdrv; 377 struct pci_dev *pdev; 378 379 pdrv = linux_pci_find(dev, &id); 380 pdev = device_get_softc(dev); 381 382 MPASS(pdrv != NULL); 383 MPASS(pdev != NULL); 384 385 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 386 } 387 388 int 389 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 390 const struct pci_device_id *id, struct pci_dev *pdev) 391 { 392 struct resource_list_entry *rle; 393 device_t parent; 394 uintptr_t rid; 395 int error; 396 bool isdrm; 397 398 linux_set_current(curthread); 399 400 parent = device_get_parent(dev); 401 isdrm = pdrv != NULL && pdrv->isdrm; 402 403 if (isdrm) { 404 struct pci_devinfo *dinfo; 405 406 dinfo = device_get_ivars(parent); 407 device_set_ivars(dev, dinfo); 408 } 409 410 lkpifill_pci_dev(dev, pdev); 411 if (isdrm) 412 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 413 else 414 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 415 pdev->devfn = rid; 416 pdev->pdrv = pdrv; 417 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 418 if (rle != NULL) 419 pdev->dev.irq = rle->start; 420 else 421 pdev->dev.irq = LINUX_IRQ_INVALID; 422 pdev->irq = pdev->dev.irq; 423 error = linux_pdev_dma_init(pdev); 424 if (error) 425 goto out_dma_init; 426 427 TAILQ_INIT(&pdev->mmio); 428 429 spin_lock(&pci_lock); 430 list_add(&pdev->links, &pci_devices); 431 spin_unlock(&pci_lock); 432 433 if (pdrv != NULL) { 434 error = pdrv->probe(pdev, id); 435 if (error) 436 goto out_probe; 437 } 438 return (0); 439 440 out_probe: 441 free(pdev->bus, M_DEVBUF); 442 linux_pdev_dma_uninit(pdev); 443 out_dma_init: 444 spin_lock(&pci_lock); 445 list_del(&pdev->links); 446 spin_unlock(&pci_lock); 447 put_device(&pdev->dev); 448 return (-error); 449 } 450 451 static int 452 linux_pci_detach(device_t dev) 453 { 454 struct pci_dev *pdev; 455 456 pdev = device_get_softc(dev); 457 458 MPASS(pdev != NULL); 459 460 device_set_desc(dev, NULL); 461 462 return (linux_pci_detach_device(pdev)); 463 } 464 465 int 466 linux_pci_detach_device(struct pci_dev *pdev) 467 { 468 469 linux_set_current(curthread); 470 471 if (pdev->pdrv != NULL) 472 pdev->pdrv->remove(pdev); 473 474 if (pdev->root != NULL) 475 pci_dev_put(pdev->root); 476 free(pdev->bus, M_DEVBUF); 477 linux_pdev_dma_uninit(pdev); 478 479 spin_lock(&pci_lock); 480 list_del(&pdev->links); 481 spin_unlock(&pci_lock); 482 put_device(&pdev->dev); 483 484 return (0); 485 } 486 487 static int 488 lkpi_pci_disable_dev(struct device *dev) 489 { 490 491 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 492 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 493 return (0); 494 } 495 496 void 497 lkpi_pci_devres_release(struct device *dev, void *p) 498 { 499 struct pci_devres *dr; 500 struct pci_dev *pdev; 501 int bar; 502 503 pdev = to_pci_dev(dev); 504 dr = p; 505 506 if (pdev->msix_enabled) 507 lkpi_pci_disable_msix(pdev); 508 if (pdev->msi_enabled) 509 lkpi_pci_disable_msi(pdev); 510 511 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 512 dr->enable_io = false; 513 514 if (dr->region_mask == 0) 515 return; 516 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 517 518 if ((dr->region_mask & (1 << bar)) == 0) 519 continue; 520 pci_release_region(pdev, bar); 521 } 522 } 523 524 void 525 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 526 { 527 struct pcim_iomap_devres *dr; 528 struct pci_dev *pdev; 529 int bar; 530 531 dr = p; 532 pdev = to_pci_dev(dev); 533 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 534 535 if (dr->mmio_table[bar] == NULL) 536 continue; 537 538 pci_iounmap(pdev, dr->mmio_table[bar]); 539 } 540 } 541 542 static int 543 linux_pci_suspend(device_t dev) 544 { 545 const struct dev_pm_ops *pmops; 546 struct pm_message pm = { }; 547 struct pci_dev *pdev; 548 int error; 549 550 error = 0; 551 linux_set_current(curthread); 552 pdev = device_get_softc(dev); 553 pmops = pdev->pdrv->driver.pm; 554 555 if (pdev->pdrv->suspend != NULL) 556 error = -pdev->pdrv->suspend(pdev, pm); 557 else if (pmops != NULL && pmops->suspend != NULL) { 558 error = -pmops->suspend(&pdev->dev); 559 if (error == 0 && pmops->suspend_late != NULL) 560 error = -pmops->suspend_late(&pdev->dev); 561 } 562 return (error); 563 } 564 565 static int 566 linux_pci_resume(device_t dev) 567 { 568 const struct dev_pm_ops *pmops; 569 struct pci_dev *pdev; 570 int error; 571 572 error = 0; 573 linux_set_current(curthread); 574 pdev = device_get_softc(dev); 575 pmops = pdev->pdrv->driver.pm; 576 577 if (pdev->pdrv->resume != NULL) 578 error = -pdev->pdrv->resume(pdev); 579 else if (pmops != NULL && pmops->resume != NULL) { 580 if (pmops->resume_early != NULL) 581 error = -pmops->resume_early(&pdev->dev); 582 if (error == 0 && pmops->resume != NULL) 583 error = -pmops->resume(&pdev->dev); 584 } 585 return (error); 586 } 587 588 static int 589 linux_pci_shutdown(device_t dev) 590 { 591 struct pci_dev *pdev; 592 593 linux_set_current(curthread); 594 pdev = device_get_softc(dev); 595 if (pdev->pdrv->shutdown != NULL) 596 pdev->pdrv->shutdown(pdev); 597 return (0); 598 } 599 600 static int 601 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 602 { 603 struct pci_dev *pdev; 604 int error; 605 606 linux_set_current(curthread); 607 pdev = device_get_softc(dev); 608 if (pdev->pdrv->bsd_iov_init != NULL) 609 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 610 else 611 error = EINVAL; 612 return (error); 613 } 614 615 static void 616 linux_pci_iov_uninit(device_t dev) 617 { 618 struct pci_dev *pdev; 619 620 linux_set_current(curthread); 621 pdev = device_get_softc(dev); 622 if (pdev->pdrv->bsd_iov_uninit != NULL) 623 pdev->pdrv->bsd_iov_uninit(dev); 624 } 625 626 static int 627 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 628 { 629 struct pci_dev *pdev; 630 int error; 631 632 linux_set_current(curthread); 633 pdev = device_get_softc(dev); 634 if (pdev->pdrv->bsd_iov_add_vf != NULL) 635 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 636 else 637 error = EINVAL; 638 return (error); 639 } 640 641 static int 642 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 643 { 644 int error; 645 646 linux_set_current(curthread); 647 spin_lock(&pci_lock); 648 list_add(&pdrv->node, &pci_drivers); 649 spin_unlock(&pci_lock); 650 pdrv->bsddriver.name = pdrv->name; 651 pdrv->bsddriver.methods = pci_methods; 652 pdrv->bsddriver.size = sizeof(struct pci_dev); 653 654 bus_topo_lock(); 655 error = devclass_add_driver(dc, &pdrv->bsddriver, 656 BUS_PASS_DEFAULT, &pdrv->bsdclass); 657 bus_topo_unlock(); 658 return (-error); 659 } 660 661 int 662 linux_pci_register_driver(struct pci_driver *pdrv) 663 { 664 devclass_t dc; 665 666 dc = devclass_find("pci"); 667 if (dc == NULL) 668 return (-ENXIO); 669 pdrv->isdrm = false; 670 return (_linux_pci_register_driver(pdrv, dc)); 671 } 672 673 struct resource_list_entry * 674 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 675 int type, int rid) 676 { 677 device_t dev; 678 struct resource *res; 679 680 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 681 ("trying to reserve non-BAR type %d", type)); 682 683 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 684 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 685 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 686 1, 1, 0); 687 if (res == NULL) 688 return (NULL); 689 return (resource_list_find(rl, type, rid)); 690 } 691 692 unsigned long 693 pci_resource_start(struct pci_dev *pdev, int bar) 694 { 695 struct resource_list_entry *rle; 696 rman_res_t newstart; 697 device_t dev; 698 int error; 699 700 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 701 return (0); 702 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 703 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 704 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 705 if (error != 0) { 706 device_printf(pdev->dev.bsddev, 707 "translate of %#jx failed: %d\n", 708 (uintmax_t)rle->start, error); 709 return (0); 710 } 711 return (newstart); 712 } 713 714 unsigned long 715 pci_resource_len(struct pci_dev *pdev, int bar) 716 { 717 struct resource_list_entry *rle; 718 719 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 720 return (0); 721 return (rle->count); 722 } 723 724 int 725 linux_pci_register_drm_driver(struct pci_driver *pdrv) 726 { 727 devclass_t dc; 728 729 dc = devclass_create("vgapci"); 730 if (dc == NULL) 731 return (-ENXIO); 732 pdrv->isdrm = true; 733 pdrv->name = "drmn"; 734 return (_linux_pci_register_driver(pdrv, dc)); 735 } 736 737 void 738 linux_pci_unregister_driver(struct pci_driver *pdrv) 739 { 740 devclass_t bus; 741 742 bus = devclass_find("pci"); 743 744 spin_lock(&pci_lock); 745 list_del(&pdrv->node); 746 spin_unlock(&pci_lock); 747 bus_topo_lock(); 748 if (bus != NULL) 749 devclass_delete_driver(bus, &pdrv->bsddriver); 750 bus_topo_unlock(); 751 } 752 753 void 754 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 755 { 756 devclass_t bus; 757 758 bus = devclass_find("vgapci"); 759 760 spin_lock(&pci_lock); 761 list_del(&pdrv->node); 762 spin_unlock(&pci_lock); 763 bus_topo_lock(); 764 if (bus != NULL) 765 devclass_delete_driver(bus, &pdrv->bsddriver); 766 bus_topo_unlock(); 767 } 768 769 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 770 771 struct linux_dma_obj { 772 void *vaddr; 773 uint64_t dma_addr; 774 bus_dmamap_t dmamap; 775 bus_dma_tag_t dmat; 776 }; 777 778 static uma_zone_t linux_dma_trie_zone; 779 static uma_zone_t linux_dma_obj_zone; 780 781 static void 782 linux_dma_init(void *arg) 783 { 784 785 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 786 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 787 UMA_ALIGN_PTR, 0); 788 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 789 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 790 UMA_ALIGN_PTR, 0); 791 792 } 793 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 794 795 static void 796 linux_dma_uninit(void *arg) 797 { 798 799 uma_zdestroy(linux_dma_obj_zone); 800 uma_zdestroy(linux_dma_trie_zone); 801 } 802 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 803 804 static void * 805 linux_dma_trie_alloc(struct pctrie *ptree) 806 { 807 808 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 809 } 810 811 static void 812 linux_dma_trie_free(struct pctrie *ptree, void *node) 813 { 814 815 uma_zfree(linux_dma_trie_zone, node); 816 } 817 818 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 819 linux_dma_trie_free); 820 821 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 822 static dma_addr_t 823 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 824 bus_dma_tag_t dmat) 825 { 826 struct linux_dma_priv *priv; 827 struct linux_dma_obj *obj; 828 int error, nseg; 829 bus_dma_segment_t seg; 830 831 priv = dev->dma_priv; 832 833 /* 834 * If the resultant mapping will be entirely 1:1 with the 835 * physical address, short-circuit the remainder of the 836 * bus_dma API. This avoids tracking collisions in the pctrie 837 * with the additional benefit of reducing overhead. 838 */ 839 if (bus_dma_id_mapped(dmat, phys, len)) 840 return (phys); 841 842 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 843 if (obj == NULL) { 844 return (0); 845 } 846 obj->dmat = dmat; 847 848 DMA_PRIV_LOCK(priv); 849 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 850 DMA_PRIV_UNLOCK(priv); 851 uma_zfree(linux_dma_obj_zone, obj); 852 return (0); 853 } 854 855 nseg = -1; 856 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 857 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 858 bus_dmamap_destroy(obj->dmat, obj->dmamap); 859 DMA_PRIV_UNLOCK(priv); 860 uma_zfree(linux_dma_obj_zone, obj); 861 return (0); 862 } 863 864 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 865 obj->dma_addr = seg.ds_addr; 866 867 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 868 if (error != 0) { 869 bus_dmamap_unload(obj->dmat, obj->dmamap); 870 bus_dmamap_destroy(obj->dmat, obj->dmamap); 871 DMA_PRIV_UNLOCK(priv); 872 uma_zfree(linux_dma_obj_zone, obj); 873 return (0); 874 } 875 DMA_PRIV_UNLOCK(priv); 876 return (obj->dma_addr); 877 } 878 #else 879 static dma_addr_t 880 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 881 size_t len __unused, bus_dma_tag_t dmat __unused) 882 { 883 return (phys); 884 } 885 #endif 886 887 dma_addr_t 888 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 889 { 890 struct linux_dma_priv *priv; 891 892 priv = dev->dma_priv; 893 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 894 } 895 896 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 897 void 898 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 899 { 900 struct linux_dma_priv *priv; 901 struct linux_dma_obj *obj; 902 903 priv = dev->dma_priv; 904 905 if (pctrie_is_empty(&priv->ptree)) 906 return; 907 908 DMA_PRIV_LOCK(priv); 909 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 910 if (obj == NULL) { 911 DMA_PRIV_UNLOCK(priv); 912 return; 913 } 914 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 915 bus_dmamap_unload(obj->dmat, obj->dmamap); 916 bus_dmamap_destroy(obj->dmat, obj->dmamap); 917 DMA_PRIV_UNLOCK(priv); 918 919 uma_zfree(linux_dma_obj_zone, obj); 920 } 921 #else 922 void 923 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 924 { 925 } 926 #endif 927 928 void * 929 linux_dma_alloc_coherent(struct device *dev, size_t size, 930 dma_addr_t *dma_handle, gfp_t flag) 931 { 932 struct linux_dma_priv *priv; 933 vm_paddr_t high; 934 size_t align; 935 void *mem; 936 937 if (dev == NULL || dev->dma_priv == NULL) { 938 *dma_handle = 0; 939 return (NULL); 940 } 941 priv = dev->dma_priv; 942 if (priv->dma_coherent_mask) 943 high = priv->dma_coherent_mask; 944 else 945 /* Coherent is lower 32bit only by default in Linux. */ 946 high = BUS_SPACE_MAXADDR_32BIT; 947 align = PAGE_SIZE << get_order(size); 948 /* Always zero the allocation. */ 949 flag |= M_ZERO; 950 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 951 align, 0, VM_MEMATTR_DEFAULT); 952 if (mem != NULL) { 953 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 954 priv->dmat_coherent); 955 if (*dma_handle == 0) { 956 kmem_free((vm_offset_t)mem, size); 957 mem = NULL; 958 } 959 } else { 960 *dma_handle = 0; 961 } 962 return (mem); 963 } 964 965 void 966 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 967 bus_dmasync_op_t op) 968 { 969 struct linux_dma_priv *priv; 970 struct linux_dma_obj *obj; 971 972 priv = dev->dma_priv; 973 974 if (pctrie_is_empty(&priv->ptree)) 975 return; 976 977 DMA_PRIV_LOCK(priv); 978 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 979 if (obj == NULL) { 980 DMA_PRIV_UNLOCK(priv); 981 return; 982 } 983 984 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 985 DMA_PRIV_UNLOCK(priv); 986 } 987 988 int 989 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 990 enum dma_data_direction direction, unsigned long attrs __unused) 991 { 992 struct linux_dma_priv *priv; 993 struct scatterlist *sg; 994 int i, nseg; 995 bus_dma_segment_t seg; 996 997 priv = dev->dma_priv; 998 999 DMA_PRIV_LOCK(priv); 1000 1001 /* create common DMA map in the first S/G entry */ 1002 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1003 DMA_PRIV_UNLOCK(priv); 1004 return (0); 1005 } 1006 1007 /* load all S/G list entries */ 1008 for_each_sg(sgl, sg, nents, i) { 1009 nseg = -1; 1010 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1011 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1012 &seg, &nseg) != 0) { 1013 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1014 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1015 DMA_PRIV_UNLOCK(priv); 1016 return (0); 1017 } 1018 KASSERT(nseg == 0, 1019 ("More than one segment (nseg=%d)", nseg + 1)); 1020 1021 sg_dma_address(sg) = seg.ds_addr; 1022 } 1023 1024 switch (direction) { 1025 case DMA_BIDIRECTIONAL: 1026 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1027 break; 1028 case DMA_TO_DEVICE: 1029 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1030 break; 1031 case DMA_FROM_DEVICE: 1032 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1033 break; 1034 default: 1035 break; 1036 } 1037 1038 DMA_PRIV_UNLOCK(priv); 1039 1040 return (nents); 1041 } 1042 1043 void 1044 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1045 int nents __unused, enum dma_data_direction direction, 1046 unsigned long attrs __unused) 1047 { 1048 struct linux_dma_priv *priv; 1049 1050 priv = dev->dma_priv; 1051 1052 DMA_PRIV_LOCK(priv); 1053 1054 switch (direction) { 1055 case DMA_BIDIRECTIONAL: 1056 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1057 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1058 break; 1059 case DMA_TO_DEVICE: 1060 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1061 break; 1062 case DMA_FROM_DEVICE: 1063 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1064 break; 1065 default: 1066 break; 1067 } 1068 1069 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1070 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1071 DMA_PRIV_UNLOCK(priv); 1072 } 1073 1074 struct dma_pool { 1075 struct device *pool_device; 1076 uma_zone_t pool_zone; 1077 struct mtx pool_lock; 1078 bus_dma_tag_t pool_dmat; 1079 size_t pool_entry_size; 1080 struct pctrie pool_ptree; 1081 }; 1082 1083 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1084 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1085 1086 static inline int 1087 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1088 { 1089 struct linux_dma_obj *obj = mem; 1090 struct dma_pool *pool = arg; 1091 int error, nseg; 1092 bus_dma_segment_t seg; 1093 1094 nseg = -1; 1095 DMA_POOL_LOCK(pool); 1096 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1097 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1098 &seg, &nseg); 1099 DMA_POOL_UNLOCK(pool); 1100 if (error != 0) { 1101 return (error); 1102 } 1103 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1104 obj->dma_addr = seg.ds_addr; 1105 1106 return (0); 1107 } 1108 1109 static void 1110 dma_pool_obj_dtor(void *mem, int size, void *arg) 1111 { 1112 struct linux_dma_obj *obj = mem; 1113 struct dma_pool *pool = arg; 1114 1115 DMA_POOL_LOCK(pool); 1116 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1117 DMA_POOL_UNLOCK(pool); 1118 } 1119 1120 static int 1121 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1122 int flags) 1123 { 1124 struct dma_pool *pool = arg; 1125 struct linux_dma_obj *obj; 1126 int error, i; 1127 1128 for (i = 0; i < count; i++) { 1129 obj = uma_zalloc(linux_dma_obj_zone, flags); 1130 if (obj == NULL) 1131 break; 1132 1133 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1134 BUS_DMA_NOWAIT, &obj->dmamap); 1135 if (error!= 0) { 1136 uma_zfree(linux_dma_obj_zone, obj); 1137 break; 1138 } 1139 1140 store[i] = obj; 1141 } 1142 1143 return (i); 1144 } 1145 1146 static void 1147 dma_pool_obj_release(void *arg, void **store, int count) 1148 { 1149 struct dma_pool *pool = arg; 1150 struct linux_dma_obj *obj; 1151 int i; 1152 1153 for (i = 0; i < count; i++) { 1154 obj = store[i]; 1155 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1156 uma_zfree(linux_dma_obj_zone, obj); 1157 } 1158 } 1159 1160 struct dma_pool * 1161 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1162 size_t align, size_t boundary) 1163 { 1164 struct linux_dma_priv *priv; 1165 struct dma_pool *pool; 1166 1167 priv = dev->dma_priv; 1168 1169 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1170 pool->pool_device = dev; 1171 pool->pool_entry_size = size; 1172 1173 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1174 align, boundary, /* alignment, boundary */ 1175 priv->dma_mask, /* lowaddr */ 1176 BUS_SPACE_MAXADDR, /* highaddr */ 1177 NULL, NULL, /* filtfunc, filtfuncarg */ 1178 size, /* maxsize */ 1179 1, /* nsegments */ 1180 size, /* maxsegsz */ 1181 0, /* flags */ 1182 NULL, NULL, /* lockfunc, lockfuncarg */ 1183 &pool->pool_dmat)) { 1184 kfree(pool); 1185 return (NULL); 1186 } 1187 1188 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1189 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1190 dma_pool_obj_release, pool, 0); 1191 1192 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1193 pctrie_init(&pool->pool_ptree); 1194 1195 return (pool); 1196 } 1197 1198 void 1199 linux_dma_pool_destroy(struct dma_pool *pool) 1200 { 1201 1202 uma_zdestroy(pool->pool_zone); 1203 bus_dma_tag_destroy(pool->pool_dmat); 1204 mtx_destroy(&pool->pool_lock); 1205 kfree(pool); 1206 } 1207 1208 void 1209 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1210 { 1211 struct dma_pool *pool; 1212 1213 pool = *(struct dma_pool **)p; 1214 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1215 linux_dma_pool_destroy(pool); 1216 } 1217 1218 void * 1219 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1220 dma_addr_t *handle) 1221 { 1222 struct linux_dma_obj *obj; 1223 1224 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1225 if (obj == NULL) 1226 return (NULL); 1227 1228 DMA_POOL_LOCK(pool); 1229 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1230 DMA_POOL_UNLOCK(pool); 1231 uma_zfree_arg(pool->pool_zone, obj, pool); 1232 return (NULL); 1233 } 1234 DMA_POOL_UNLOCK(pool); 1235 1236 *handle = obj->dma_addr; 1237 return (obj->vaddr); 1238 } 1239 1240 void 1241 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1242 { 1243 struct linux_dma_obj *obj; 1244 1245 DMA_POOL_LOCK(pool); 1246 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1247 if (obj == NULL) { 1248 DMA_POOL_UNLOCK(pool); 1249 return; 1250 } 1251 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1252 DMA_POOL_UNLOCK(pool); 1253 1254 uma_zfree_arg(pool->pool_zone, obj, pool); 1255 } 1256 1257 static int 1258 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1259 { 1260 struct pci_dev *pdev; 1261 1262 linux_set_current(curthread); 1263 pdev = device_get_softc(dev); 1264 1265 props->brightness = pdev->dev.bd->props.brightness; 1266 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1267 props->nlevels = 0; 1268 1269 return (0); 1270 } 1271 1272 static int 1273 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1274 { 1275 struct pci_dev *pdev; 1276 1277 linux_set_current(curthread); 1278 pdev = device_get_softc(dev); 1279 1280 info->type = BACKLIGHT_TYPE_PANEL; 1281 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1282 return (0); 1283 } 1284 1285 static int 1286 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1287 { 1288 struct pci_dev *pdev; 1289 1290 linux_set_current(curthread); 1291 pdev = device_get_softc(dev); 1292 1293 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1294 props->brightness / 100; 1295 pdev->dev.bd->props.power = props->brightness == 0 ? 1296 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1297 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1298 } 1299 1300 struct backlight_device * 1301 linux_backlight_device_register(const char *name, struct device *dev, 1302 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1303 { 1304 1305 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1306 dev->bd->ops = ops; 1307 dev->bd->props.type = props->type; 1308 dev->bd->props.max_brightness = props->max_brightness; 1309 dev->bd->props.brightness = props->brightness; 1310 dev->bd->props.power = props->power; 1311 dev->bd->data = data; 1312 dev->bd->dev = dev; 1313 dev->bd->name = strdup(name, M_DEVBUF); 1314 1315 dev->backlight_dev = backlight_register(name, dev->bsddev); 1316 1317 return (dev->bd); 1318 } 1319 1320 void 1321 linux_backlight_device_unregister(struct backlight_device *bd) 1322 { 1323 1324 backlight_destroy(bd->dev->backlight_dev); 1325 free(bd->name, M_DEVBUF); 1326 free(bd, M_DEVBUF); 1327 } 1328