1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com> 4 * (C) Copyright 2002-2004 IBM Corp. 5 * (C) Copyright 2003 Matthew Wilcox 6 * (C) Copyright 2003 Hewlett-Packard 7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com> 8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com> 9 * 10 * File attributes for PCI devices 11 * 12 * Modeled after usb's driverfs.c 13 */ 14 15 16 #include <linux/kernel.h> 17 #include <linux/sched.h> 18 #include <linux/pci.h> 19 #include <linux/stat.h> 20 #include <linux/export.h> 21 #include <linux/topology.h> 22 #include <linux/mm.h> 23 #include <linux/fs.h> 24 #include <linux/capability.h> 25 #include <linux/security.h> 26 #include <linux/slab.h> 27 #include <linux/vgaarb.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/msi.h> 30 #include <linux/of.h> 31 #include <linux/aperture.h> 32 #include "pci.h" 33 34 static int sysfs_initialized; /* = 0 */ 35 36 /* show configuration fields */ 37 #define pci_config_attr(field, format_string) \ 38 static ssize_t \ 39 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ 40 { \ 41 struct pci_dev *pdev; \ 42 \ 43 pdev = to_pci_dev(dev); \ 44 return sysfs_emit(buf, format_string, pdev->field); \ 45 } \ 46 static DEVICE_ATTR_RO(field) 47 48 pci_config_attr(vendor, "0x%04x\n"); 49 pci_config_attr(device, "0x%04x\n"); 50 pci_config_attr(subsystem_vendor, "0x%04x\n"); 51 pci_config_attr(subsystem_device, "0x%04x\n"); 52 pci_config_attr(revision, "0x%02x\n"); 53 pci_config_attr(class, "0x%06x\n"); 54 55 static ssize_t irq_show(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58 { 59 struct pci_dev *pdev = to_pci_dev(dev); 60 61 #ifdef CONFIG_PCI_MSI 62 /* 63 * For MSI, show the first MSI IRQ; for all other cases including 64 * MSI-X, show the legacy INTx IRQ. 65 */ 66 if (pdev->msi_enabled) 67 return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0)); 68 #endif 69 70 return sysfs_emit(buf, "%u\n", pdev->irq); 71 } 72 static DEVICE_ATTR_RO(irq); 73 74 static ssize_t broken_parity_status_show(struct device *dev, 75 struct device_attribute *attr, 76 char *buf) 77 { 78 struct pci_dev *pdev = to_pci_dev(dev); 79 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status); 80 } 81 82 static ssize_t broken_parity_status_store(struct device *dev, 83 struct device_attribute *attr, 84 const char *buf, size_t count) 85 { 86 struct pci_dev *pdev = to_pci_dev(dev); 87 unsigned long val; 88 89 if (kstrtoul(buf, 0, &val) < 0) 90 return -EINVAL; 91 92 pdev->broken_parity_status = !!val; 93 94 return count; 95 } 96 static DEVICE_ATTR_RW(broken_parity_status); 97 98 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, 99 struct device_attribute *attr, char *buf) 100 { 101 const struct cpumask *mask; 102 103 #ifdef CONFIG_NUMA 104 if (dev_to_node(dev) == NUMA_NO_NODE) 105 mask = cpu_online_mask; 106 else 107 mask = cpumask_of_node(dev_to_node(dev)); 108 #else 109 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 110 #endif 111 return cpumap_print_to_pagebuf(list, buf, mask); 112 } 113 114 static ssize_t local_cpus_show(struct device *dev, 115 struct device_attribute *attr, char *buf) 116 { 117 return pci_dev_show_local_cpu(dev, false, attr, buf); 118 } 119 static DEVICE_ATTR_RO(local_cpus); 120 121 static ssize_t local_cpulist_show(struct device *dev, 122 struct device_attribute *attr, char *buf) 123 { 124 return pci_dev_show_local_cpu(dev, true, attr, buf); 125 } 126 static DEVICE_ATTR_RO(local_cpulist); 127 128 /* 129 * PCI Bus Class Devices 130 */ 131 static ssize_t cpuaffinity_show(struct device *dev, 132 struct device_attribute *attr, char *buf) 133 { 134 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 135 136 return cpumap_print_to_pagebuf(false, buf, cpumask); 137 } 138 static DEVICE_ATTR_RO(cpuaffinity); 139 140 static ssize_t cpulistaffinity_show(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); 144 145 return cpumap_print_to_pagebuf(true, buf, cpumask); 146 } 147 static DEVICE_ATTR_RO(cpulistaffinity); 148 149 static ssize_t power_state_show(struct device *dev, 150 struct device_attribute *attr, char *buf) 151 { 152 struct pci_dev *pdev = to_pci_dev(dev); 153 154 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state)); 155 } 156 static DEVICE_ATTR_RO(power_state); 157 158 /* show resources */ 159 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 160 char *buf) 161 { 162 struct pci_dev *pci_dev = to_pci_dev(dev); 163 int i; 164 int max; 165 resource_size_t start, end; 166 size_t len = 0; 167 168 if (pci_dev->subordinate) 169 max = DEVICE_COUNT_RESOURCE; 170 else 171 max = PCI_BRIDGE_RESOURCES; 172 173 for (i = 0; i < max; i++) { 174 struct resource *res = &pci_dev->resource[i]; 175 pci_resource_to_user(pci_dev, i, res, &start, &end); 176 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n", 177 (unsigned long long)start, 178 (unsigned long long)end, 179 (unsigned long long)res->flags); 180 } 181 return len; 182 } 183 static DEVICE_ATTR_RO(resource); 184 185 static ssize_t max_link_speed_show(struct device *dev, 186 struct device_attribute *attr, char *buf) 187 { 188 struct pci_dev *pdev = to_pci_dev(dev); 189 190 return sysfs_emit(buf, "%s\n", 191 pci_speed_string(pcie_get_speed_cap(pdev))); 192 } 193 static DEVICE_ATTR_RO(max_link_speed); 194 195 static ssize_t max_link_width_show(struct device *dev, 196 struct device_attribute *attr, char *buf) 197 { 198 struct pci_dev *pdev = to_pci_dev(dev); 199 200 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); 201 } 202 static DEVICE_ATTR_RO(max_link_width); 203 204 static ssize_t current_link_speed_show(struct device *dev, 205 struct device_attribute *attr, char *buf) 206 { 207 struct pci_dev *pci_dev = to_pci_dev(dev); 208 u16 linkstat; 209 int err; 210 enum pci_bus_speed speed; 211 212 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 213 if (err) 214 return -EINVAL; 215 216 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS]; 217 218 return sysfs_emit(buf, "%s\n", pci_speed_string(speed)); 219 } 220 static DEVICE_ATTR_RO(current_link_speed); 221 222 static ssize_t current_link_width_show(struct device *dev, 223 struct device_attribute *attr, char *buf) 224 { 225 struct pci_dev *pci_dev = to_pci_dev(dev); 226 u16 linkstat; 227 int err; 228 229 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); 230 if (err) 231 return -EINVAL; 232 233 return sysfs_emit(buf, "%u\n", 234 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); 235 } 236 static DEVICE_ATTR_RO(current_link_width); 237 238 static ssize_t secondary_bus_number_show(struct device *dev, 239 struct device_attribute *attr, 240 char *buf) 241 { 242 struct pci_dev *pci_dev = to_pci_dev(dev); 243 u8 sec_bus; 244 int err; 245 246 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); 247 if (err) 248 return -EINVAL; 249 250 return sysfs_emit(buf, "%u\n", sec_bus); 251 } 252 static DEVICE_ATTR_RO(secondary_bus_number); 253 254 static ssize_t subordinate_bus_number_show(struct device *dev, 255 struct device_attribute *attr, 256 char *buf) 257 { 258 struct pci_dev *pci_dev = to_pci_dev(dev); 259 u8 sub_bus; 260 int err; 261 262 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); 263 if (err) 264 return -EINVAL; 265 266 return sysfs_emit(buf, "%u\n", sub_bus); 267 } 268 static DEVICE_ATTR_RO(subordinate_bus_number); 269 270 static ssize_t ari_enabled_show(struct device *dev, 271 struct device_attribute *attr, 272 char *buf) 273 { 274 struct pci_dev *pci_dev = to_pci_dev(dev); 275 276 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); 277 } 278 static DEVICE_ATTR_RO(ari_enabled); 279 280 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 281 char *buf) 282 { 283 struct pci_dev *pci_dev = to_pci_dev(dev); 284 285 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", 286 pci_dev->vendor, pci_dev->device, 287 pci_dev->subsystem_vendor, pci_dev->subsystem_device, 288 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), 289 (u8)(pci_dev->class)); 290 } 291 static DEVICE_ATTR_RO(modalias); 292 293 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 294 const char *buf, size_t count) 295 { 296 struct pci_dev *pdev = to_pci_dev(dev); 297 unsigned long val; 298 ssize_t result = 0; 299 300 /* this can crash the machine when done on the "wrong" device */ 301 if (!capable(CAP_SYS_ADMIN)) 302 return -EPERM; 303 304 if (kstrtoul(buf, 0, &val) < 0) 305 return -EINVAL; 306 307 device_lock(dev); 308 if (dev->driver) 309 result = -EBUSY; 310 else if (val) 311 result = pci_enable_device(pdev); 312 else if (pci_is_enabled(pdev)) 313 pci_disable_device(pdev); 314 else 315 result = -EIO; 316 device_unlock(dev); 317 318 return result < 0 ? result : count; 319 } 320 321 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 322 char *buf) 323 { 324 struct pci_dev *pdev; 325 326 pdev = to_pci_dev(dev); 327 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 328 } 329 static DEVICE_ATTR_RW(enable); 330 331 #ifdef CONFIG_NUMA 332 static ssize_t numa_node_store(struct device *dev, 333 struct device_attribute *attr, const char *buf, 334 size_t count) 335 { 336 struct pci_dev *pdev = to_pci_dev(dev); 337 int node; 338 339 if (!capable(CAP_SYS_ADMIN)) 340 return -EPERM; 341 342 if (kstrtoint(buf, 0, &node) < 0) 343 return -EINVAL; 344 345 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) 346 return -EINVAL; 347 348 if (node != NUMA_NO_NODE && !node_online(node)) 349 return -EINVAL; 350 351 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 352 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", 353 node); 354 355 dev->numa_node = node; 356 return count; 357 } 358 359 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 360 char *buf) 361 { 362 return sysfs_emit(buf, "%d\n", dev->numa_node); 363 } 364 static DEVICE_ATTR_RW(numa_node); 365 #endif 366 367 static ssize_t dma_mask_bits_show(struct device *dev, 368 struct device_attribute *attr, char *buf) 369 { 370 struct pci_dev *pdev = to_pci_dev(dev); 371 372 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask)); 373 } 374 static DEVICE_ATTR_RO(dma_mask_bits); 375 376 static ssize_t consistent_dma_mask_bits_show(struct device *dev, 377 struct device_attribute *attr, 378 char *buf) 379 { 380 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask)); 381 } 382 static DEVICE_ATTR_RO(consistent_dma_mask_bits); 383 384 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, 385 char *buf) 386 { 387 struct pci_dev *pdev = to_pci_dev(dev); 388 struct pci_bus *subordinate = pdev->subordinate; 389 390 return sysfs_emit(buf, "%u\n", subordinate ? 391 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) 392 : !pdev->no_msi); 393 } 394 395 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, 396 const char *buf, size_t count) 397 { 398 struct pci_dev *pdev = to_pci_dev(dev); 399 struct pci_bus *subordinate = pdev->subordinate; 400 unsigned long val; 401 402 if (!capable(CAP_SYS_ADMIN)) 403 return -EPERM; 404 405 if (kstrtoul(buf, 0, &val) < 0) 406 return -EINVAL; 407 408 /* 409 * "no_msi" and "bus_flags" only affect what happens when a driver 410 * requests MSI or MSI-X. They don't affect any drivers that have 411 * already requested MSI or MSI-X. 412 */ 413 if (!subordinate) { 414 pdev->no_msi = !val; 415 pci_info(pdev, "MSI/MSI-X %s for future drivers\n", 416 val ? "allowed" : "disallowed"); 417 return count; 418 } 419 420 if (val) 421 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; 422 else 423 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; 424 425 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", 426 val ? "allowed" : "disallowed"); 427 return count; 428 } 429 static DEVICE_ATTR_RW(msi_bus); 430 431 static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count) 432 { 433 unsigned long val; 434 struct pci_bus *b = NULL; 435 436 if (kstrtoul(buf, 0, &val) < 0) 437 return -EINVAL; 438 439 if (val) { 440 pci_lock_rescan_remove(); 441 while ((b = pci_find_next_bus(b)) != NULL) 442 pci_rescan_bus(b); 443 pci_unlock_rescan_remove(); 444 } 445 return count; 446 } 447 static BUS_ATTR_WO(rescan); 448 449 static struct attribute *pci_bus_attrs[] = { 450 &bus_attr_rescan.attr, 451 NULL, 452 }; 453 454 static const struct attribute_group pci_bus_group = { 455 .attrs = pci_bus_attrs, 456 }; 457 458 const struct attribute_group *pci_bus_groups[] = { 459 &pci_bus_group, 460 NULL, 461 }; 462 463 static ssize_t dev_rescan_store(struct device *dev, 464 struct device_attribute *attr, const char *buf, 465 size_t count) 466 { 467 unsigned long val; 468 struct pci_dev *pdev = to_pci_dev(dev); 469 470 if (kstrtoul(buf, 0, &val) < 0) 471 return -EINVAL; 472 473 if (val) { 474 pci_lock_rescan_remove(); 475 pci_rescan_bus(pdev->bus); 476 pci_unlock_rescan_remove(); 477 } 478 return count; 479 } 480 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL, 481 dev_rescan_store); 482 483 static ssize_t remove_store(struct device *dev, struct device_attribute *attr, 484 const char *buf, size_t count) 485 { 486 unsigned long val; 487 488 if (kstrtoul(buf, 0, &val) < 0) 489 return -EINVAL; 490 491 if (val && device_remove_file_self(dev, attr)) 492 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); 493 return count; 494 } 495 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL, 496 remove_store); 497 498 static ssize_t bus_rescan_store(struct device *dev, 499 struct device_attribute *attr, 500 const char *buf, size_t count) 501 { 502 unsigned long val; 503 struct pci_bus *bus = to_pci_bus(dev); 504 505 if (kstrtoul(buf, 0, &val) < 0) 506 return -EINVAL; 507 508 if (val) { 509 pci_lock_rescan_remove(); 510 if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) 511 pci_rescan_bus_bridge_resize(bus->self); 512 else 513 pci_rescan_bus(bus); 514 pci_unlock_rescan_remove(); 515 } 516 return count; 517 } 518 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, 519 bus_rescan_store); 520 521 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 522 static ssize_t d3cold_allowed_store(struct device *dev, 523 struct device_attribute *attr, 524 const char *buf, size_t count) 525 { 526 struct pci_dev *pdev = to_pci_dev(dev); 527 unsigned long val; 528 529 if (kstrtoul(buf, 0, &val) < 0) 530 return -EINVAL; 531 532 pdev->d3cold_allowed = !!val; 533 if (pdev->d3cold_allowed) 534 pci_d3cold_enable(pdev); 535 else 536 pci_d3cold_disable(pdev); 537 538 pm_runtime_resume(dev); 539 540 return count; 541 } 542 543 static ssize_t d3cold_allowed_show(struct device *dev, 544 struct device_attribute *attr, char *buf) 545 { 546 struct pci_dev *pdev = to_pci_dev(dev); 547 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed); 548 } 549 static DEVICE_ATTR_RW(d3cold_allowed); 550 #endif 551 552 #ifdef CONFIG_OF 553 static ssize_t devspec_show(struct device *dev, 554 struct device_attribute *attr, char *buf) 555 { 556 struct pci_dev *pdev = to_pci_dev(dev); 557 struct device_node *np = pci_device_to_OF_node(pdev); 558 559 if (np == NULL) 560 return 0; 561 return sysfs_emit(buf, "%pOF\n", np); 562 } 563 static DEVICE_ATTR_RO(devspec); 564 #endif 565 566 static ssize_t driver_override_store(struct device *dev, 567 struct device_attribute *attr, 568 const char *buf, size_t count) 569 { 570 struct pci_dev *pdev = to_pci_dev(dev); 571 int ret; 572 573 ret = driver_set_override(dev, &pdev->driver_override, buf, count); 574 if (ret) 575 return ret; 576 577 return count; 578 } 579 580 static ssize_t driver_override_show(struct device *dev, 581 struct device_attribute *attr, char *buf) 582 { 583 struct pci_dev *pdev = to_pci_dev(dev); 584 ssize_t len; 585 586 device_lock(dev); 587 len = sysfs_emit(buf, "%s\n", pdev->driver_override); 588 device_unlock(dev); 589 return len; 590 } 591 static DEVICE_ATTR_RW(driver_override); 592 593 static struct attribute *pci_dev_attrs[] = { 594 &dev_attr_power_state.attr, 595 &dev_attr_resource.attr, 596 &dev_attr_vendor.attr, 597 &dev_attr_device.attr, 598 &dev_attr_subsystem_vendor.attr, 599 &dev_attr_subsystem_device.attr, 600 &dev_attr_revision.attr, 601 &dev_attr_class.attr, 602 &dev_attr_irq.attr, 603 &dev_attr_local_cpus.attr, 604 &dev_attr_local_cpulist.attr, 605 &dev_attr_modalias.attr, 606 #ifdef CONFIG_NUMA 607 &dev_attr_numa_node.attr, 608 #endif 609 &dev_attr_dma_mask_bits.attr, 610 &dev_attr_consistent_dma_mask_bits.attr, 611 &dev_attr_enable.attr, 612 &dev_attr_broken_parity_status.attr, 613 &dev_attr_msi_bus.attr, 614 #if defined(CONFIG_PM) && defined(CONFIG_ACPI) 615 &dev_attr_d3cold_allowed.attr, 616 #endif 617 #ifdef CONFIG_OF 618 &dev_attr_devspec.attr, 619 #endif 620 &dev_attr_driver_override.attr, 621 &dev_attr_ari_enabled.attr, 622 NULL, 623 }; 624 625 static struct attribute *pci_bridge_attrs[] = { 626 &dev_attr_subordinate_bus_number.attr, 627 &dev_attr_secondary_bus_number.attr, 628 NULL, 629 }; 630 631 static struct attribute *pcie_dev_attrs[] = { 632 &dev_attr_current_link_speed.attr, 633 &dev_attr_current_link_width.attr, 634 &dev_attr_max_link_width.attr, 635 &dev_attr_max_link_speed.attr, 636 NULL, 637 }; 638 639 static struct attribute *pcibus_attrs[] = { 640 &dev_attr_bus_rescan.attr, 641 &dev_attr_cpuaffinity.attr, 642 &dev_attr_cpulistaffinity.attr, 643 NULL, 644 }; 645 646 static const struct attribute_group pcibus_group = { 647 .attrs = pcibus_attrs, 648 }; 649 650 const struct attribute_group *pcibus_groups[] = { 651 &pcibus_group, 652 NULL, 653 }; 654 655 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, 656 char *buf) 657 { 658 struct pci_dev *pdev = to_pci_dev(dev); 659 struct pci_dev *vga_dev = vga_default_device(); 660 661 if (vga_dev) 662 return sysfs_emit(buf, "%u\n", (pdev == vga_dev)); 663 664 return sysfs_emit(buf, "%u\n", 665 !!(pdev->resource[PCI_ROM_RESOURCE].flags & 666 IORESOURCE_ROM_SHADOW)); 667 } 668 static DEVICE_ATTR_RO(boot_vga); 669 670 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, 671 struct bin_attribute *bin_attr, char *buf, 672 loff_t off, size_t count) 673 { 674 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 675 unsigned int size = 64; 676 loff_t init_off = off; 677 u8 *data = (u8 *) buf; 678 679 /* Several chips lock up trying to read undefined config space */ 680 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) 681 size = dev->cfg_size; 682 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 683 size = 128; 684 685 if (off > size) 686 return 0; 687 if (off + count > size) { 688 size -= off; 689 count = size; 690 } else { 691 size = count; 692 } 693 694 pci_config_pm_runtime_get(dev); 695 696 if ((off & 1) && size) { 697 u8 val; 698 pci_user_read_config_byte(dev, off, &val); 699 data[off - init_off] = val; 700 off++; 701 size--; 702 } 703 704 if ((off & 3) && size > 2) { 705 u16 val; 706 pci_user_read_config_word(dev, off, &val); 707 data[off - init_off] = val & 0xff; 708 data[off - init_off + 1] = (val >> 8) & 0xff; 709 off += 2; 710 size -= 2; 711 } 712 713 while (size > 3) { 714 u32 val; 715 pci_user_read_config_dword(dev, off, &val); 716 data[off - init_off] = val & 0xff; 717 data[off - init_off + 1] = (val >> 8) & 0xff; 718 data[off - init_off + 2] = (val >> 16) & 0xff; 719 data[off - init_off + 3] = (val >> 24) & 0xff; 720 off += 4; 721 size -= 4; 722 cond_resched(); 723 } 724 725 if (size >= 2) { 726 u16 val; 727 pci_user_read_config_word(dev, off, &val); 728 data[off - init_off] = val & 0xff; 729 data[off - init_off + 1] = (val >> 8) & 0xff; 730 off += 2; 731 size -= 2; 732 } 733 734 if (size > 0) { 735 u8 val; 736 pci_user_read_config_byte(dev, off, &val); 737 data[off - init_off] = val; 738 } 739 740 pci_config_pm_runtime_put(dev); 741 742 return count; 743 } 744 745 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, 746 struct bin_attribute *bin_attr, char *buf, 747 loff_t off, size_t count) 748 { 749 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); 750 unsigned int size = count; 751 loff_t init_off = off; 752 u8 *data = (u8 *) buf; 753 int ret; 754 755 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 756 if (ret) 757 return ret; 758 759 if (resource_is_exclusive(&dev->driver_exclusive_resource, off, 760 count)) { 761 pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx", 762 current->comm, off); 763 add_taint(TAINT_USER, LOCKDEP_STILL_OK); 764 } 765 766 if (off > dev->cfg_size) 767 return 0; 768 if (off + count > dev->cfg_size) { 769 size = dev->cfg_size - off; 770 count = size; 771 } 772 773 pci_config_pm_runtime_get(dev); 774 775 if ((off & 1) && size) { 776 pci_user_write_config_byte(dev, off, data[off - init_off]); 777 off++; 778 size--; 779 } 780 781 if ((off & 3) && size > 2) { 782 u16 val = data[off - init_off]; 783 val |= (u16) data[off - init_off + 1] << 8; 784 pci_user_write_config_word(dev, off, val); 785 off += 2; 786 size -= 2; 787 } 788 789 while (size > 3) { 790 u32 val = data[off - init_off]; 791 val |= (u32) data[off - init_off + 1] << 8; 792 val |= (u32) data[off - init_off + 2] << 16; 793 val |= (u32) data[off - init_off + 3] << 24; 794 pci_user_write_config_dword(dev, off, val); 795 off += 4; 796 size -= 4; 797 } 798 799 if (size >= 2) { 800 u16 val = data[off - init_off]; 801 val |= (u16) data[off - init_off + 1] << 8; 802 pci_user_write_config_word(dev, off, val); 803 off += 2; 804 size -= 2; 805 } 806 807 if (size) 808 pci_user_write_config_byte(dev, off, data[off - init_off]); 809 810 pci_config_pm_runtime_put(dev); 811 812 return count; 813 } 814 static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0); 815 816 static struct bin_attribute *pci_dev_config_attrs[] = { 817 &bin_attr_config, 818 NULL, 819 }; 820 821 static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj, 822 struct bin_attribute *a, int n) 823 { 824 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 825 826 a->size = PCI_CFG_SPACE_SIZE; 827 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) 828 a->size = PCI_CFG_SPACE_EXP_SIZE; 829 830 return a->attr.mode; 831 } 832 833 static const struct attribute_group pci_dev_config_attr_group = { 834 .bin_attrs = pci_dev_config_attrs, 835 .is_bin_visible = pci_dev_config_attr_is_visible, 836 }; 837 838 #ifdef HAVE_PCI_LEGACY 839 /** 840 * pci_read_legacy_io - read byte(s) from legacy I/O port space 841 * @filp: open sysfs file 842 * @kobj: kobject corresponding to file to read from 843 * @bin_attr: struct bin_attribute for this file 844 * @buf: buffer to store results 845 * @off: offset into legacy I/O port space 846 * @count: number of bytes to read 847 * 848 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific 849 * callback routine (pci_legacy_read). 850 */ 851 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, 852 struct bin_attribute *bin_attr, char *buf, 853 loff_t off, size_t count) 854 { 855 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 856 857 /* Only support 1, 2 or 4 byte accesses */ 858 if (count != 1 && count != 2 && count != 4) 859 return -EINVAL; 860 861 return pci_legacy_read(bus, off, (u32 *)buf, count); 862 } 863 864 /** 865 * pci_write_legacy_io - write byte(s) to legacy I/O port space 866 * @filp: open sysfs file 867 * @kobj: kobject corresponding to file to read from 868 * @bin_attr: struct bin_attribute for this file 869 * @buf: buffer containing value to be written 870 * @off: offset into legacy I/O port space 871 * @count: number of bytes to write 872 * 873 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific 874 * callback routine (pci_legacy_write). 875 */ 876 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, 877 struct bin_attribute *bin_attr, char *buf, 878 loff_t off, size_t count) 879 { 880 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 881 882 /* Only support 1, 2 or 4 byte accesses */ 883 if (count != 1 && count != 2 && count != 4) 884 return -EINVAL; 885 886 return pci_legacy_write(bus, off, *(u32 *)buf, count); 887 } 888 889 /** 890 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space 891 * @filp: open sysfs file 892 * @kobj: kobject corresponding to device to be mapped 893 * @attr: struct bin_attribute for this file 894 * @vma: struct vm_area_struct passed to mmap 895 * 896 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap 897 * legacy memory space (first meg of bus space) into application virtual 898 * memory space. 899 */ 900 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, 901 struct bin_attribute *attr, 902 struct vm_area_struct *vma) 903 { 904 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 905 906 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); 907 } 908 909 /** 910 * pci_mmap_legacy_io - map legacy PCI IO into user memory space 911 * @filp: open sysfs file 912 * @kobj: kobject corresponding to device to be mapped 913 * @attr: struct bin_attribute for this file 914 * @vma: struct vm_area_struct passed to mmap 915 * 916 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap 917 * legacy IO space (first meg of bus space) into application virtual 918 * memory space. Returns -ENOSYS if the operation isn't supported 919 */ 920 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, 921 struct bin_attribute *attr, 922 struct vm_area_struct *vma) 923 { 924 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); 925 926 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); 927 } 928 929 /** 930 * pci_adjust_legacy_attr - adjustment of legacy file attributes 931 * @b: bus to create files under 932 * @mmap_type: I/O port or memory 933 * 934 * Stub implementation. Can be overridden by arch if necessary. 935 */ 936 void __weak pci_adjust_legacy_attr(struct pci_bus *b, 937 enum pci_mmap_state mmap_type) 938 { 939 } 940 941 /** 942 * pci_create_legacy_files - create legacy I/O port and memory files 943 * @b: bus to create files under 944 * 945 * Some platforms allow access to legacy I/O port and ISA memory space on 946 * a per-bus basis. This routine creates the files and ties them into 947 * their associated read, write and mmap files from pci-sysfs.c 948 * 949 * On error unwind, but don't propagate the error to the caller 950 * as it is ok to set up the PCI bus without these files. 951 */ 952 void pci_create_legacy_files(struct pci_bus *b) 953 { 954 int error; 955 956 if (!sysfs_initialized) 957 return; 958 959 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), 960 GFP_ATOMIC); 961 if (!b->legacy_io) 962 goto kzalloc_err; 963 964 sysfs_bin_attr_init(b->legacy_io); 965 b->legacy_io->attr.name = "legacy_io"; 966 b->legacy_io->size = 0xffff; 967 b->legacy_io->attr.mode = 0600; 968 b->legacy_io->read = pci_read_legacy_io; 969 b->legacy_io->write = pci_write_legacy_io; 970 b->legacy_io->mmap = pci_mmap_legacy_io; 971 b->legacy_io->f_mapping = iomem_get_mapping; 972 pci_adjust_legacy_attr(b, pci_mmap_io); 973 error = device_create_bin_file(&b->dev, b->legacy_io); 974 if (error) 975 goto legacy_io_err; 976 977 /* Allocated above after the legacy_io struct */ 978 b->legacy_mem = b->legacy_io + 1; 979 sysfs_bin_attr_init(b->legacy_mem); 980 b->legacy_mem->attr.name = "legacy_mem"; 981 b->legacy_mem->size = 1024*1024; 982 b->legacy_mem->attr.mode = 0600; 983 b->legacy_mem->mmap = pci_mmap_legacy_mem; 984 b->legacy_mem->f_mapping = iomem_get_mapping; 985 pci_adjust_legacy_attr(b, pci_mmap_mem); 986 error = device_create_bin_file(&b->dev, b->legacy_mem); 987 if (error) 988 goto legacy_mem_err; 989 990 return; 991 992 legacy_mem_err: 993 device_remove_bin_file(&b->dev, b->legacy_io); 994 legacy_io_err: 995 kfree(b->legacy_io); 996 b->legacy_io = NULL; 997 kzalloc_err: 998 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); 999 } 1000 1001 void pci_remove_legacy_files(struct pci_bus *b) 1002 { 1003 if (b->legacy_io) { 1004 device_remove_bin_file(&b->dev, b->legacy_io); 1005 device_remove_bin_file(&b->dev, b->legacy_mem); 1006 kfree(b->legacy_io); /* both are allocated here */ 1007 } 1008 } 1009 #endif /* HAVE_PCI_LEGACY */ 1010 1011 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 1012 1013 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, 1014 enum pci_mmap_api mmap_api) 1015 { 1016 unsigned long nr, start, size; 1017 resource_size_t pci_start = 0, pci_end; 1018 1019 if (pci_resource_len(pdev, resno) == 0) 1020 return 0; 1021 nr = vma_pages(vma); 1022 start = vma->vm_pgoff; 1023 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 1024 if (mmap_api == PCI_MMAP_PROCFS) { 1025 pci_resource_to_user(pdev, resno, &pdev->resource[resno], 1026 &pci_start, &pci_end); 1027 pci_start >>= PAGE_SHIFT; 1028 } 1029 if (start >= pci_start && start < pci_start + size && 1030 start + nr <= pci_start + size) 1031 return 1; 1032 return 0; 1033 } 1034 1035 /** 1036 * pci_mmap_resource - map a PCI resource into user memory space 1037 * @kobj: kobject for mapping 1038 * @attr: struct bin_attribute for the file being mapped 1039 * @vma: struct vm_area_struct passed into the mmap 1040 * @write_combine: 1 for write_combine mapping 1041 * 1042 * Use the regular PCI mapping routines to map a PCI resource into userspace. 1043 */ 1044 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, 1045 struct vm_area_struct *vma, int write_combine) 1046 { 1047 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1048 int bar = (unsigned long)attr->private; 1049 enum pci_mmap_state mmap_type; 1050 struct resource *res = &pdev->resource[bar]; 1051 int ret; 1052 1053 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 1054 if (ret) 1055 return ret; 1056 1057 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) 1058 return -EINVAL; 1059 1060 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) 1061 return -EINVAL; 1062 1063 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 1064 1065 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); 1066 } 1067 1068 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, 1069 struct bin_attribute *attr, 1070 struct vm_area_struct *vma) 1071 { 1072 return pci_mmap_resource(kobj, attr, vma, 0); 1073 } 1074 1075 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, 1076 struct bin_attribute *attr, 1077 struct vm_area_struct *vma) 1078 { 1079 return pci_mmap_resource(kobj, attr, vma, 1); 1080 } 1081 1082 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, 1083 struct bin_attribute *attr, char *buf, 1084 loff_t off, size_t count, bool write) 1085 { 1086 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1087 int bar = (unsigned long)attr->private; 1088 unsigned long port = off; 1089 1090 port += pci_resource_start(pdev, bar); 1091 1092 if (port > pci_resource_end(pdev, bar)) 1093 return 0; 1094 1095 if (port + count - 1 > pci_resource_end(pdev, bar)) 1096 return -EINVAL; 1097 1098 switch (count) { 1099 case 1: 1100 if (write) 1101 outb(*(u8 *)buf, port); 1102 else 1103 *(u8 *)buf = inb(port); 1104 return 1; 1105 case 2: 1106 if (write) 1107 outw(*(u16 *)buf, port); 1108 else 1109 *(u16 *)buf = inw(port); 1110 return 2; 1111 case 4: 1112 if (write) 1113 outl(*(u32 *)buf, port); 1114 else 1115 *(u32 *)buf = inl(port); 1116 return 4; 1117 } 1118 return -EINVAL; 1119 } 1120 1121 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, 1122 struct bin_attribute *attr, char *buf, 1123 loff_t off, size_t count) 1124 { 1125 return pci_resource_io(filp, kobj, attr, buf, off, count, false); 1126 } 1127 1128 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, 1129 struct bin_attribute *attr, char *buf, 1130 loff_t off, size_t count) 1131 { 1132 int ret; 1133 1134 ret = security_locked_down(LOCKDOWN_PCI_ACCESS); 1135 if (ret) 1136 return ret; 1137 1138 return pci_resource_io(filp, kobj, attr, buf, off, count, true); 1139 } 1140 1141 /** 1142 * pci_remove_resource_files - cleanup resource files 1143 * @pdev: dev to cleanup 1144 * 1145 * If we created resource files for @pdev, remove them from sysfs and 1146 * free their resources. 1147 */ 1148 static void pci_remove_resource_files(struct pci_dev *pdev) 1149 { 1150 int i; 1151 1152 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 1153 struct bin_attribute *res_attr; 1154 1155 res_attr = pdev->res_attr[i]; 1156 if (res_attr) { 1157 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1158 kfree(res_attr); 1159 } 1160 1161 res_attr = pdev->res_attr_wc[i]; 1162 if (res_attr) { 1163 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); 1164 kfree(res_attr); 1165 } 1166 } 1167 } 1168 1169 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) 1170 { 1171 /* allocate attribute structure, piggyback attribute name */ 1172 int name_len = write_combine ? 13 : 10; 1173 struct bin_attribute *res_attr; 1174 char *res_attr_name; 1175 int retval; 1176 1177 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); 1178 if (!res_attr) 1179 return -ENOMEM; 1180 1181 res_attr_name = (char *)(res_attr + 1); 1182 1183 sysfs_bin_attr_init(res_attr); 1184 if (write_combine) { 1185 pdev->res_attr_wc[num] = res_attr; 1186 sprintf(res_attr_name, "resource%d_wc", num); 1187 res_attr->mmap = pci_mmap_resource_wc; 1188 } else { 1189 pdev->res_attr[num] = res_attr; 1190 sprintf(res_attr_name, "resource%d", num); 1191 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { 1192 res_attr->read = pci_read_resource_io; 1193 res_attr->write = pci_write_resource_io; 1194 if (arch_can_pci_mmap_io()) 1195 res_attr->mmap = pci_mmap_resource_uc; 1196 } else { 1197 res_attr->mmap = pci_mmap_resource_uc; 1198 } 1199 } 1200 if (res_attr->mmap) 1201 res_attr->f_mapping = iomem_get_mapping; 1202 res_attr->attr.name = res_attr_name; 1203 res_attr->attr.mode = 0600; 1204 res_attr->size = pci_resource_len(pdev, num); 1205 res_attr->private = (void *)(unsigned long)num; 1206 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); 1207 if (retval) 1208 kfree(res_attr); 1209 1210 return retval; 1211 } 1212 1213 /** 1214 * pci_create_resource_files - create resource files in sysfs for @dev 1215 * @pdev: dev in question 1216 * 1217 * Walk the resources in @pdev creating files for each resource available. 1218 */ 1219 static int pci_create_resource_files(struct pci_dev *pdev) 1220 { 1221 int i; 1222 int retval; 1223 1224 /* Expose the PCI resources from this device as files */ 1225 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 1226 1227 /* skip empty resources */ 1228 if (!pci_resource_len(pdev, i)) 1229 continue; 1230 1231 retval = pci_create_attr(pdev, i, 0); 1232 /* for prefetchable resources, create a WC mappable file */ 1233 if (!retval && arch_can_pci_mmap_wc() && 1234 pdev->resource[i].flags & IORESOURCE_PREFETCH) 1235 retval = pci_create_attr(pdev, i, 1); 1236 if (retval) { 1237 pci_remove_resource_files(pdev); 1238 return retval; 1239 } 1240 } 1241 return 0; 1242 } 1243 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */ 1244 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } 1245 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } 1246 #endif 1247 1248 /** 1249 * pci_write_rom - used to enable access to the PCI ROM display 1250 * @filp: sysfs file 1251 * @kobj: kernel object handle 1252 * @bin_attr: struct bin_attribute for this file 1253 * @buf: user input 1254 * @off: file offset 1255 * @count: number of byte in input 1256 * 1257 * writing anything except 0 enables it 1258 */ 1259 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, 1260 struct bin_attribute *bin_attr, char *buf, 1261 loff_t off, size_t count) 1262 { 1263 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1264 1265 if ((off == 0) && (*buf == '0') && (count == 2)) 1266 pdev->rom_attr_enabled = 0; 1267 else 1268 pdev->rom_attr_enabled = 1; 1269 1270 return count; 1271 } 1272 1273 /** 1274 * pci_read_rom - read a PCI ROM 1275 * @filp: sysfs file 1276 * @kobj: kernel object handle 1277 * @bin_attr: struct bin_attribute for this file 1278 * @buf: where to put the data we read from the ROM 1279 * @off: file offset 1280 * @count: number of bytes to read 1281 * 1282 * Put @count bytes starting at @off into @buf from the ROM in the PCI 1283 * device corresponding to @kobj. 1284 */ 1285 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, 1286 struct bin_attribute *bin_attr, char *buf, 1287 loff_t off, size_t count) 1288 { 1289 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1290 void __iomem *rom; 1291 size_t size; 1292 1293 if (!pdev->rom_attr_enabled) 1294 return -EINVAL; 1295 1296 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ 1297 if (!rom || !size) 1298 return -EIO; 1299 1300 if (off >= size) 1301 count = 0; 1302 else { 1303 if (off + count > size) 1304 count = size - off; 1305 1306 memcpy_fromio(buf, rom + off, count); 1307 } 1308 pci_unmap_rom(pdev, rom); 1309 1310 return count; 1311 } 1312 static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0); 1313 1314 static struct bin_attribute *pci_dev_rom_attrs[] = { 1315 &bin_attr_rom, 1316 NULL, 1317 }; 1318 1319 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj, 1320 struct bin_attribute *a, int n) 1321 { 1322 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1323 size_t rom_size; 1324 1325 /* If the device has a ROM, try to expose it in sysfs. */ 1326 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); 1327 if (!rom_size) 1328 return 0; 1329 1330 a->size = rom_size; 1331 1332 return a->attr.mode; 1333 } 1334 1335 static const struct attribute_group pci_dev_rom_attr_group = { 1336 .bin_attrs = pci_dev_rom_attrs, 1337 .is_bin_visible = pci_dev_rom_attr_is_visible, 1338 }; 1339 1340 static ssize_t reset_store(struct device *dev, struct device_attribute *attr, 1341 const char *buf, size_t count) 1342 { 1343 struct pci_dev *pdev = to_pci_dev(dev); 1344 unsigned long val; 1345 ssize_t result; 1346 1347 if (kstrtoul(buf, 0, &val) < 0) 1348 return -EINVAL; 1349 1350 if (val != 1) 1351 return -EINVAL; 1352 1353 pm_runtime_get_sync(dev); 1354 result = pci_reset_function(pdev); 1355 pm_runtime_put(dev); 1356 if (result < 0) 1357 return result; 1358 1359 return count; 1360 } 1361 static DEVICE_ATTR_WO(reset); 1362 1363 static struct attribute *pci_dev_reset_attrs[] = { 1364 &dev_attr_reset.attr, 1365 NULL, 1366 }; 1367 1368 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj, 1369 struct attribute *a, int n) 1370 { 1371 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1372 1373 if (!pci_reset_supported(pdev)) 1374 return 0; 1375 1376 return a->mode; 1377 } 1378 1379 static const struct attribute_group pci_dev_reset_attr_group = { 1380 .attrs = pci_dev_reset_attrs, 1381 .is_visible = pci_dev_reset_attr_is_visible, 1382 }; 1383 1384 #define pci_dev_resource_resize_attr(n) \ 1385 static ssize_t resource##n##_resize_show(struct device *dev, \ 1386 struct device_attribute *attr, \ 1387 char * buf) \ 1388 { \ 1389 struct pci_dev *pdev = to_pci_dev(dev); \ 1390 ssize_t ret; \ 1391 \ 1392 pci_config_pm_runtime_get(pdev); \ 1393 \ 1394 ret = sysfs_emit(buf, "%016llx\n", \ 1395 (u64)pci_rebar_get_possible_sizes(pdev, n)); \ 1396 \ 1397 pci_config_pm_runtime_put(pdev); \ 1398 \ 1399 return ret; \ 1400 } \ 1401 \ 1402 static ssize_t resource##n##_resize_store(struct device *dev, \ 1403 struct device_attribute *attr,\ 1404 const char *buf, size_t count)\ 1405 { \ 1406 struct pci_dev *pdev = to_pci_dev(dev); \ 1407 unsigned long size, flags; \ 1408 int ret, i; \ 1409 u16 cmd; \ 1410 \ 1411 if (kstrtoul(buf, 0, &size) < 0) \ 1412 return -EINVAL; \ 1413 \ 1414 device_lock(dev); \ 1415 if (dev->driver) { \ 1416 ret = -EBUSY; \ 1417 goto unlock; \ 1418 } \ 1419 \ 1420 pci_config_pm_runtime_get(pdev); \ 1421 \ 1422 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \ 1423 ret = aperture_remove_conflicting_pci_devices(pdev, \ 1424 "resourceN_resize"); \ 1425 if (ret) \ 1426 goto pm_put; \ 1427 } \ 1428 \ 1429 pci_read_config_word(pdev, PCI_COMMAND, &cmd); \ 1430 pci_write_config_word(pdev, PCI_COMMAND, \ 1431 cmd & ~PCI_COMMAND_MEMORY); \ 1432 \ 1433 flags = pci_resource_flags(pdev, n); \ 1434 \ 1435 pci_remove_resource_files(pdev); \ 1436 \ 1437 for (i = 0; i < PCI_STD_NUM_BARS; i++) { \ 1438 if (pci_resource_len(pdev, i) && \ 1439 pci_resource_flags(pdev, i) == flags) \ 1440 pci_release_resource(pdev, i); \ 1441 } \ 1442 \ 1443 ret = pci_resize_resource(pdev, n, size); \ 1444 \ 1445 pci_assign_unassigned_bus_resources(pdev->bus); \ 1446 \ 1447 if (pci_create_resource_files(pdev)) \ 1448 pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\ 1449 \ 1450 pci_write_config_word(pdev, PCI_COMMAND, cmd); \ 1451 pm_put: \ 1452 pci_config_pm_runtime_put(pdev); \ 1453 unlock: \ 1454 device_unlock(dev); \ 1455 \ 1456 return ret ? ret : count; \ 1457 } \ 1458 static DEVICE_ATTR_RW(resource##n##_resize) 1459 1460 pci_dev_resource_resize_attr(0); 1461 pci_dev_resource_resize_attr(1); 1462 pci_dev_resource_resize_attr(2); 1463 pci_dev_resource_resize_attr(3); 1464 pci_dev_resource_resize_attr(4); 1465 pci_dev_resource_resize_attr(5); 1466 1467 static struct attribute *resource_resize_attrs[] = { 1468 &dev_attr_resource0_resize.attr, 1469 &dev_attr_resource1_resize.attr, 1470 &dev_attr_resource2_resize.attr, 1471 &dev_attr_resource3_resize.attr, 1472 &dev_attr_resource4_resize.attr, 1473 &dev_attr_resource5_resize.attr, 1474 NULL, 1475 }; 1476 1477 static umode_t resource_resize_is_visible(struct kobject *kobj, 1478 struct attribute *a, int n) 1479 { 1480 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 1481 1482 return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode; 1483 } 1484 1485 static const struct attribute_group pci_dev_resource_resize_group = { 1486 .attrs = resource_resize_attrs, 1487 .is_visible = resource_resize_is_visible, 1488 }; 1489 1490 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) 1491 { 1492 if (!sysfs_initialized) 1493 return -EACCES; 1494 1495 return pci_create_resource_files(pdev); 1496 } 1497 1498 /** 1499 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files 1500 * @pdev: device whose entries we should free 1501 * 1502 * Cleanup when @pdev is removed from sysfs. 1503 */ 1504 void pci_remove_sysfs_dev_files(struct pci_dev *pdev) 1505 { 1506 if (!sysfs_initialized) 1507 return; 1508 1509 pci_remove_resource_files(pdev); 1510 } 1511 1512 static int __init pci_sysfs_init(void) 1513 { 1514 struct pci_dev *pdev = NULL; 1515 struct pci_bus *pbus = NULL; 1516 int retval; 1517 1518 sysfs_initialized = 1; 1519 for_each_pci_dev(pdev) { 1520 retval = pci_create_sysfs_dev_files(pdev); 1521 if (retval) { 1522 pci_dev_put(pdev); 1523 return retval; 1524 } 1525 } 1526 1527 while ((pbus = pci_find_next_bus(pbus))) 1528 pci_create_legacy_files(pbus); 1529 1530 return 0; 1531 } 1532 late_initcall(pci_sysfs_init); 1533 1534 static struct attribute *pci_dev_dev_attrs[] = { 1535 &dev_attr_boot_vga.attr, 1536 NULL, 1537 }; 1538 1539 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, 1540 struct attribute *a, int n) 1541 { 1542 struct device *dev = kobj_to_dev(kobj); 1543 struct pci_dev *pdev = to_pci_dev(dev); 1544 1545 if (a == &dev_attr_boot_vga.attr) 1546 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 1547 return 0; 1548 1549 return a->mode; 1550 } 1551 1552 static struct attribute *pci_dev_hp_attrs[] = { 1553 &dev_attr_remove.attr, 1554 &dev_attr_dev_rescan.attr, 1555 NULL, 1556 }; 1557 1558 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, 1559 struct attribute *a, int n) 1560 { 1561 struct device *dev = kobj_to_dev(kobj); 1562 struct pci_dev *pdev = to_pci_dev(dev); 1563 1564 if (pdev->is_virtfn) 1565 return 0; 1566 1567 return a->mode; 1568 } 1569 1570 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, 1571 struct attribute *a, int n) 1572 { 1573 struct device *dev = kobj_to_dev(kobj); 1574 struct pci_dev *pdev = to_pci_dev(dev); 1575 1576 if (pci_is_bridge(pdev)) 1577 return a->mode; 1578 1579 return 0; 1580 } 1581 1582 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, 1583 struct attribute *a, int n) 1584 { 1585 struct device *dev = kobj_to_dev(kobj); 1586 struct pci_dev *pdev = to_pci_dev(dev); 1587 1588 if (pci_is_pcie(pdev)) 1589 return a->mode; 1590 1591 return 0; 1592 } 1593 1594 static const struct attribute_group pci_dev_group = { 1595 .attrs = pci_dev_attrs, 1596 }; 1597 1598 const struct attribute_group *pci_dev_groups[] = { 1599 &pci_dev_group, 1600 &pci_dev_config_attr_group, 1601 &pci_dev_rom_attr_group, 1602 &pci_dev_reset_attr_group, 1603 &pci_dev_reset_method_attr_group, 1604 &pci_dev_vpd_attr_group, 1605 #ifdef CONFIG_DMI 1606 &pci_dev_smbios_attr_group, 1607 #endif 1608 #ifdef CONFIG_ACPI 1609 &pci_dev_acpi_attr_group, 1610 #endif 1611 &pci_dev_resource_resize_group, 1612 NULL, 1613 }; 1614 1615 static const struct attribute_group pci_dev_hp_attr_group = { 1616 .attrs = pci_dev_hp_attrs, 1617 .is_visible = pci_dev_hp_attrs_are_visible, 1618 }; 1619 1620 static const struct attribute_group pci_dev_attr_group = { 1621 .attrs = pci_dev_dev_attrs, 1622 .is_visible = pci_dev_attrs_are_visible, 1623 }; 1624 1625 static const struct attribute_group pci_bridge_attr_group = { 1626 .attrs = pci_bridge_attrs, 1627 .is_visible = pci_bridge_attrs_are_visible, 1628 }; 1629 1630 static const struct attribute_group pcie_dev_attr_group = { 1631 .attrs = pcie_dev_attrs, 1632 .is_visible = pcie_dev_attrs_are_visible, 1633 }; 1634 1635 static const struct attribute_group *pci_dev_attr_groups[] = { 1636 &pci_dev_attr_group, 1637 &pci_dev_hp_attr_group, 1638 #ifdef CONFIG_PCI_IOV 1639 &sriov_pf_dev_attr_group, 1640 &sriov_vf_dev_attr_group, 1641 #endif 1642 &pci_bridge_attr_group, 1643 &pcie_dev_attr_group, 1644 #ifdef CONFIG_PCIEAER 1645 &aer_stats_attr_group, 1646 #endif 1647 #ifdef CONFIG_PCIEASPM 1648 &aspm_ctrl_attr_group, 1649 #endif 1650 NULL, 1651 }; 1652 1653 const struct device_type pci_dev_type = { 1654 .groups = pci_dev_attr_groups, 1655 }; 1656