1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43
44 #include <linux/pci_ids.h>
45
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
47 PCI_STATUS_SIG_SYSTEM_ERROR | \
48 PCI_STATUS_REC_MASTER_ABORT | \
49 PCI_STATUS_REC_TARGET_ABORT | \
50 PCI_STATUS_SIG_TARGET_ABORT | \
51 PCI_STATUS_PARITY)
52
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55
56 #define PCI_RESET_PROBE true
57 #define PCI_RESET_DO_RESET false
58
59 /*
60 * The PCI interface treats multi-function devices as independent
61 * devices. The slot/function address of each device is encoded
62 * in a single byte as follows:
63 *
64 * 7:3 = slot
65 * 2:0 = function
66 *
67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68 * In the interest of not exposing interfaces to user-space unnecessarily,
69 * the following kernel-only defines are being added here.
70 */
71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 struct pci_bus *bus; /* Bus this slot is on */
78 struct list_head list; /* Node in list of slots */
79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
81 struct kobject kobj;
82 };
83
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 return kobject_name(&slot->kobj);
87 }
88
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 pci_mmap_io,
92 pci_mmap_mem
93 };
94
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 /* #0-5: standard PCI resources */
98 PCI_STD_RESOURCES,
99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100
101 /* #6: expansion ROM resource */
102 PCI_ROM_RESOURCE,
103
104 /* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 PCI_IOV_RESOURCES,
107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
114
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
120
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_BRIDGE_RESOURCE_NUM 4
123
124 /* Resources assigned to buses behind the bridge */
125 PCI_BRIDGE_RESOURCES,
126 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
127 PCI_BRIDGE_RESOURCE_NUM - 1,
128
129 /* Total resources associated with a PCI device */
130 PCI_NUM_RESOURCES,
131
132 /* Preserve this for compatibility */
133 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
134 };
135
136 /**
137 * enum pci_interrupt_pin - PCI INTx interrupt values
138 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
139 * @PCI_INTERRUPT_INTA: PCI INTA pin
140 * @PCI_INTERRUPT_INTB: PCI INTB pin
141 * @PCI_INTERRUPT_INTC: PCI INTC pin
142 * @PCI_INTERRUPT_INTD: PCI INTD pin
143 *
144 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
145 * PCI_INTERRUPT_PIN register.
146 */
147 enum pci_interrupt_pin {
148 PCI_INTERRUPT_UNKNOWN,
149 PCI_INTERRUPT_INTA,
150 PCI_INTERRUPT_INTB,
151 PCI_INTERRUPT_INTC,
152 PCI_INTERRUPT_INTD,
153 };
154
155 /* The number of legacy PCI INTx interrupts */
156 #define PCI_NUM_INTX 4
157
158 /*
159 * Reading from a device that doesn't respond typically returns ~0. A
160 * successful read from a device may also return ~0, so you need additional
161 * information to reliably identify errors.
162 */
163 #define PCI_ERROR_RESPONSE (~0ULL)
164 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
165 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
166
167 /*
168 * pci_power_t values must match the bits in the Capabilities PME_Support
169 * and Control/Status PowerState fields in the Power Management capability.
170 */
171 typedef int __bitwise pci_power_t;
172
173 #define PCI_D0 ((pci_power_t __force) 0)
174 #define PCI_D1 ((pci_power_t __force) 1)
175 #define PCI_D2 ((pci_power_t __force) 2)
176 #define PCI_D3hot ((pci_power_t __force) 3)
177 #define PCI_D3cold ((pci_power_t __force) 4)
178 #define PCI_UNKNOWN ((pci_power_t __force) 5)
179 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
180
181 /* Remember to update this when the list above changes! */
182 extern const char *pci_power_names[];
183
pci_power_name(pci_power_t state)184 static inline const char *pci_power_name(pci_power_t state)
185 {
186 return pci_power_names[1 + (__force int) state];
187 }
188
189 /**
190 * typedef pci_channel_state_t
191 *
192 * The pci_channel state describes connectivity between the CPU and
193 * the PCI device. If some PCI bus between here and the PCI device
194 * has crashed or locked up, this info is reflected here.
195 */
196 typedef unsigned int __bitwise pci_channel_state_t;
197
198 enum {
199 /* I/O channel is in normal state */
200 pci_channel_io_normal = (__force pci_channel_state_t) 1,
201
202 /* I/O to channel is blocked */
203 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
204
205 /* PCI card is dead */
206 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
207 };
208
209 typedef unsigned int __bitwise pcie_reset_state_t;
210
211 enum pcie_reset_state {
212 /* Reset is NOT asserted (Use to deassert reset) */
213 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
214
215 /* Use #PERST to reset PCIe device */
216 pcie_warm_reset = (__force pcie_reset_state_t) 2,
217
218 /* Use PCIe Hot Reset to reset device */
219 pcie_hot_reset = (__force pcie_reset_state_t) 3
220 };
221
222 typedef unsigned short __bitwise pci_dev_flags_t;
223 enum pci_dev_flags {
224 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
225 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
226 /* Device configuration is irrevocably lost if disabled into D3 */
227 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
228 /* Provide indication device is assigned by a Virtual Machine Manager */
229 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
230 /* Flag for quirk use to store if quirk-specific ACS is enabled */
231 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
232 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
233 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
234 /* Do not use bus resets for device */
235 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
236 /* Do not use PM reset even if device advertises NoSoftRst- */
237 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
238 /* Get VPD from function 0 VPD */
239 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
240 /* A non-root bridge where translation occurs, stop alias search here */
241 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
242 /* Do not use FLR even if device advertises PCI_AF_CAP */
243 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
244 /* Don't use Relaxed Ordering for TLPs directed at this device */
245 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
246 /* Device does honor MSI masking despite saying otherwise */
247 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
248 };
249
250 enum pci_irq_reroute_variant {
251 INTEL_IRQ_REROUTE_VARIANT = 1,
252 MAX_IRQ_REROUTE_VARIANTS = 3
253 };
254
255 typedef unsigned short __bitwise pci_bus_flags_t;
256 enum pci_bus_flags {
257 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
258 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
259 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
260 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
261 };
262
263 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
264 enum pcie_link_width {
265 PCIE_LNK_WIDTH_RESRV = 0x00,
266 PCIE_LNK_X1 = 0x01,
267 PCIE_LNK_X2 = 0x02,
268 PCIE_LNK_X4 = 0x04,
269 PCIE_LNK_X8 = 0x08,
270 PCIE_LNK_X12 = 0x0c,
271 PCIE_LNK_X16 = 0x10,
272 PCIE_LNK_X32 = 0x20,
273 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
274 };
275
276 /* See matching string table in pci_speed_string() */
277 enum pci_bus_speed {
278 PCI_SPEED_33MHz = 0x00,
279 PCI_SPEED_66MHz = 0x01,
280 PCI_SPEED_66MHz_PCIX = 0x02,
281 PCI_SPEED_100MHz_PCIX = 0x03,
282 PCI_SPEED_133MHz_PCIX = 0x04,
283 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
284 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
285 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
286 PCI_SPEED_66MHz_PCIX_266 = 0x09,
287 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
288 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
289 AGP_UNKNOWN = 0x0c,
290 AGP_1X = 0x0d,
291 AGP_2X = 0x0e,
292 AGP_4X = 0x0f,
293 AGP_8X = 0x10,
294 PCI_SPEED_66MHz_PCIX_533 = 0x11,
295 PCI_SPEED_100MHz_PCIX_533 = 0x12,
296 PCI_SPEED_133MHz_PCIX_533 = 0x13,
297 PCIE_SPEED_2_5GT = 0x14,
298 PCIE_SPEED_5_0GT = 0x15,
299 PCIE_SPEED_8_0GT = 0x16,
300 PCIE_SPEED_16_0GT = 0x17,
301 PCIE_SPEED_32_0GT = 0x18,
302 PCIE_SPEED_64_0GT = 0x19,
303 PCI_SPEED_UNKNOWN = 0xff,
304 };
305
306 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
307 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
308
309 struct pci_vpd {
310 struct mutex lock;
311 unsigned int len;
312 u8 cap;
313 };
314
315 struct irq_affinity;
316 struct pcie_link_state;
317 struct pci_sriov;
318 struct pci_p2pdma;
319 struct rcec_ea;
320
321 /* The pci_dev structure describes PCI devices */
322 struct pci_dev {
323 struct list_head bus_list; /* Node in per-bus list */
324 struct pci_bus *bus; /* Bus this device is on */
325 struct pci_bus *subordinate; /* Bus this device bridges to */
326
327 void *sysdata; /* Hook for sys-specific extension */
328 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
329 struct pci_slot *slot; /* Physical slot this device is in */
330
331 unsigned int devfn; /* Encoded device & function index */
332 unsigned short vendor;
333 unsigned short device;
334 unsigned short subsystem_vendor;
335 unsigned short subsystem_device;
336 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
337 u8 revision; /* PCI revision, low byte of class word */
338 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
339 #ifdef CONFIG_PCIEAER
340 u16 aer_cap; /* AER capability offset */
341 struct aer_stats *aer_stats; /* AER stats for this device */
342 #endif
343 #ifdef CONFIG_PCIEPORTBUS
344 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
345 struct pci_dev *rcec; /* Associated RCEC device */
346 #endif
347 u32 devcap; /* PCIe Device Capabilities */
348 u8 pcie_cap; /* PCIe capability offset */
349 u8 msi_cap; /* MSI capability offset */
350 u8 msix_cap; /* MSI-X capability offset */
351 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
352 u8 rom_base_reg; /* Config register controlling ROM */
353 u8 pin; /* Interrupt pin this device uses */
354 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
355 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
356
357 struct pci_driver *driver; /* Driver bound to this device */
358 u64 dma_mask; /* Mask of the bits of bus address this
359 device implements. Normally this is
360 0xffffffff. You only need to change
361 this if your device has broken DMA
362 or supports 64-bit transfers. */
363
364 struct device_dma_parameters dma_parms;
365
366 pci_power_t current_state; /* Current operating state. In ACPI,
367 this is D0-D3, D0 being fully
368 functional, and D3 being off. */
369 u8 pm_cap; /* PM capability offset */
370 unsigned int pme_support:5; /* Bitmask of states from which PME#
371 can be generated */
372 unsigned int pme_poll:1; /* Poll device's PME status bit */
373 unsigned int pinned:1; /* Whether this dev is pinned */
374 unsigned int config_rrs_sv:1; /* Config RRS software visibility */
375 unsigned int imm_ready:1; /* Supports Immediate Readiness */
376 unsigned int d1_support:1; /* Low power state D1 is supported */
377 unsigned int d2_support:1; /* Low power state D2 is supported */
378 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
379 unsigned int no_d3cold:1; /* D3cold is forbidden */
380 unsigned int bridge_d3:1; /* Allow D3 for bridge */
381 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
382 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
383 decoding during BAR sizing */
384 unsigned int wakeup_prepared:1;
385 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
386 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
387 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
388 controlled exclusively by
389 user sysfs */
390 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
391 bit manually */
392 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
393 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
394
395 u16 l1ss; /* L1SS Capability pointer */
396 #ifdef CONFIG_PCIEASPM
397 struct pcie_link_state *link_state; /* ASPM link state */
398 unsigned int ltr_path:1; /* Latency Tolerance Reporting
399 supported from root to here */
400 #endif
401 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
402 unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
403
404 pci_channel_state_t error_state; /* Current connectivity state */
405 struct device dev; /* Generic device interface */
406
407 int cfg_size; /* Size of config space */
408
409 /*
410 * Instead of touching interrupt line and base address registers
411 * directly, use the values stored here. They might be different!
412 */
413 unsigned int irq;
414 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
415 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
416
417 bool match_driver; /* Skip attaching driver */
418
419 unsigned int transparent:1; /* Subtractive decode bridge */
420 unsigned int io_window:1; /* Bridge has I/O window */
421 unsigned int pref_window:1; /* Bridge has pref mem window */
422 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
423 unsigned int multifunction:1; /* Multi-function device */
424
425 unsigned int is_busmaster:1; /* Is busmaster */
426 unsigned int no_msi:1; /* May not use MSI */
427 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
428 unsigned int block_cfg_access:1; /* Config space access blocked */
429 unsigned int broken_parity_status:1; /* Generates false positive parity */
430 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
431 unsigned int msi_enabled:1;
432 unsigned int msix_enabled:1;
433 unsigned int ari_enabled:1; /* ARI forwarding */
434 unsigned int ats_enabled:1; /* Address Translation Svc */
435 unsigned int pasid_enabled:1; /* Process Address Space ID */
436 unsigned int pri_enabled:1; /* Page Request Interface */
437 unsigned int is_managed:1; /* Managed via devres */
438 unsigned int is_msi_managed:1; /* MSI release via devres installed */
439 unsigned int needs_freset:1; /* Requires fundamental reset */
440 unsigned int state_saved:1;
441 unsigned int is_physfn:1;
442 unsigned int is_virtfn:1;
443 unsigned int is_hotplug_bridge:1;
444 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
445 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
446 /*
447 * Devices marked being untrusted are the ones that can potentially
448 * execute DMA attacks and similar. They are typically connected
449 * through external ports such as Thunderbolt but not limited to
450 * that. When an IOMMU is enabled they should be getting full
451 * mappings to make sure they cannot access arbitrary memory.
452 */
453 unsigned int untrusted:1;
454 /*
455 * Info from the platform, e.g., ACPI or device tree, may mark a
456 * device as "external-facing". An external-facing device is
457 * itself internal but devices downstream from it are external.
458 */
459 unsigned int external_facing:1;
460 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
461 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
462 unsigned int irq_managed:1;
463 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
464 unsigned int is_probed:1; /* Device probing in progress */
465 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
466 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
467 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
468 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
469 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
470 pci_dev_flags_t dev_flags;
471 atomic_t enable_cnt; /* pci_enable_device has been called */
472
473 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
474 u32 saved_config_space[16]; /* Config space saved at suspend time */
475 struct hlist_head saved_cap_space;
476 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
477 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
478
479 #ifdef CONFIG_HOTPLUG_PCI_PCIE
480 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
481 #endif
482 #ifdef CONFIG_PCIE_PTM
483 u16 ptm_cap; /* PTM Capability */
484 unsigned int ptm_root:1;
485 unsigned int ptm_enabled:1;
486 u8 ptm_granularity;
487 #endif
488 #ifdef CONFIG_PCI_MSI
489 void __iomem *msix_base;
490 raw_spinlock_t msi_lock;
491 #endif
492 struct pci_vpd vpd;
493 #ifdef CONFIG_PCIE_DPC
494 u16 dpc_cap;
495 unsigned int dpc_rp_extensions:1;
496 u8 dpc_rp_log_size;
497 #endif
498 #ifdef CONFIG_PCI_ATS
499 union {
500 struct pci_sriov *sriov; /* PF: SR-IOV info */
501 struct pci_dev *physfn; /* VF: related PF */
502 };
503 u16 ats_cap; /* ATS Capability offset */
504 u8 ats_stu; /* ATS Smallest Translation Unit */
505 #endif
506 #ifdef CONFIG_PCI_PRI
507 u16 pri_cap; /* PRI Capability offset */
508 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
509 unsigned int pasid_required:1; /* PRG Response PASID Required */
510 #endif
511 #ifdef CONFIG_PCI_PASID
512 u16 pasid_cap; /* PASID Capability offset */
513 u16 pasid_features;
514 #endif
515 #ifdef CONFIG_PCI_P2PDMA
516 struct pci_p2pdma __rcu *p2pdma;
517 #endif
518 #ifdef CONFIG_PCI_DOE
519 struct xarray doe_mbs; /* Data Object Exchange mailboxes */
520 #endif
521 #ifdef CONFIG_PCI_NPEM
522 struct npem *npem; /* Native PCIe Enclosure Management */
523 #endif
524 u16 acs_cap; /* ACS Capability offset */
525 phys_addr_t rom; /* Physical address if not from BAR */
526 size_t romlen; /* Length if not from BAR */
527 /*
528 * Driver name to force a match. Do not set directly, because core
529 * frees it. Use driver_set_override() to set or clear it.
530 */
531 const char *driver_override;
532
533 unsigned long priv_flags; /* Private flags for the PCI driver */
534
535 /* These methods index pci_reset_fn_methods[] */
536 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
537 };
538
pci_physfn(struct pci_dev * dev)539 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
540 {
541 #ifdef CONFIG_PCI_IOV
542 if (dev->is_virtfn)
543 dev = dev->physfn;
544 #endif
545 return dev;
546 }
547
548 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
549
550 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
551 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
552
pci_channel_offline(struct pci_dev * pdev)553 static inline int pci_channel_offline(struct pci_dev *pdev)
554 {
555 return (pdev->error_state != pci_channel_io_normal);
556 }
557
558 /*
559 * Currently in ACPI spec, for each PCI host bridge, PCI Segment
560 * Group number is limited to a 16-bit value, therefore (int)-1 is
561 * not a valid PCI domain number, and can be used as a sentinel
562 * value indicating ->domain_nr is not set by the driver (and
563 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
564 * pci_bus_find_domain_nr()).
565 */
566 #define PCI_DOMAIN_NR_NOT_SET (-1)
567
568 struct pci_host_bridge {
569 struct device dev;
570 struct pci_bus *bus; /* Root bus */
571 struct pci_ops *ops;
572 struct pci_ops *child_ops;
573 void *sysdata;
574 int busnr;
575 int domain_nr;
576 struct list_head windows; /* resource_entry */
577 struct list_head dma_ranges; /* dma ranges resource list */
578 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
579 int (*map_irq)(const struct pci_dev *, u8, u8);
580 void (*release_fn)(struct pci_host_bridge *);
581 void *release_data;
582 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
583 unsigned int no_ext_tags:1; /* No Extended Tags */
584 unsigned int no_inc_mrrs:1; /* No Increase MRRS */
585 unsigned int native_aer:1; /* OS may use PCIe AER */
586 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
587 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
588 unsigned int native_pme:1; /* OS may use PCIe PME */
589 unsigned int native_ltr:1; /* OS may use PCIe LTR */
590 unsigned int native_dpc:1; /* OS may use PCIe DPC */
591 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
592 unsigned int preserve_config:1; /* Preserve FW resource setup */
593 unsigned int size_windows:1; /* Enable root bus sizing */
594 unsigned int msi_domain:1; /* Bridge wants MSI domain */
595
596 /* Resource alignment requirements */
597 resource_size_t (*align_resource)(struct pci_dev *dev,
598 const struct resource *res,
599 resource_size_t start,
600 resource_size_t size,
601 resource_size_t align);
602 unsigned long private[] ____cacheline_aligned;
603 };
604
605 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
606
pci_host_bridge_priv(struct pci_host_bridge * bridge)607 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
608 {
609 return (void *)bridge->private;
610 }
611
pci_host_bridge_from_priv(void * priv)612 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
613 {
614 return container_of(priv, struct pci_host_bridge, private);
615 }
616
617 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
618 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
619 size_t priv);
620 void pci_free_host_bridge(struct pci_host_bridge *bridge);
621 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
622
623 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
624 void (*release_fn)(struct pci_host_bridge *),
625 void *release_data);
626
627 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
628
629 /*
630 * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
631 * to P2P or CardBus bridge windows) go in a table. Additional ones (for
632 * buses below host bridges or subtractive decode bridges) go in the list.
633 * Use pci_bus_for_each_resource() to iterate through all the resources.
634 */
635
636 /*
637 * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
638 * and there's no way to program the bridge with the details of the window.
639 * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
640 * decode bit set, because they are explicit and can be programmed with _SRS.
641 */
642 #define PCI_SUBTRACTIVE_DECODE 0x1
643
644 struct pci_bus_resource {
645 struct list_head list;
646 struct resource *res;
647 unsigned int flags;
648 };
649
650 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
651
652 struct pci_bus {
653 struct list_head node; /* Node in list of buses */
654 struct pci_bus *parent; /* Parent bus this bridge is on */
655 struct list_head children; /* List of child buses */
656 struct list_head devices; /* List of devices on this bus */
657 struct pci_dev *self; /* Bridge device as seen by parent */
658 struct list_head slots; /* List of slots on this bus;
659 protected by pci_slot_mutex */
660 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
661 struct list_head resources; /* Address space routed to this bus */
662 struct resource busn_res; /* Bus numbers routed to this bus */
663
664 struct pci_ops *ops; /* Configuration access functions */
665 void *sysdata; /* Hook for sys-specific extension */
666 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
667
668 unsigned char number; /* Bus number */
669 unsigned char primary; /* Number of primary bridge */
670 unsigned char max_bus_speed; /* enum pci_bus_speed */
671 unsigned char cur_bus_speed; /* enum pci_bus_speed */
672 #ifdef CONFIG_PCI_DOMAINS_GENERIC
673 int domain_nr;
674 #endif
675
676 char name[48];
677
678 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
679 pci_bus_flags_t bus_flags; /* Inherited by child buses */
680 struct device *bridge;
681 struct device dev;
682 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
683 struct bin_attribute *legacy_mem; /* Legacy mem */
684 unsigned int is_added:1;
685 unsigned int unsafe_warn:1; /* warned about RW1C config write */
686 };
687
688 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
689
pci_dev_id(struct pci_dev * dev)690 static inline u16 pci_dev_id(struct pci_dev *dev)
691 {
692 return PCI_DEVID(dev->bus->number, dev->devfn);
693 }
694
695 /*
696 * Returns true if the PCI bus is root (behind host-PCI bridge),
697 * false otherwise
698 *
699 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
700 * This is incorrect because "virtual" buses added for SR-IOV (via
701 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
702 */
pci_is_root_bus(struct pci_bus * pbus)703 static inline bool pci_is_root_bus(struct pci_bus *pbus)
704 {
705 return !(pbus->parent);
706 }
707
708 /**
709 * pci_is_bridge - check if the PCI device is a bridge
710 * @dev: PCI device
711 *
712 * Return true if the PCI device is bridge whether it has subordinate
713 * or not.
714 */
pci_is_bridge(struct pci_dev * dev)715 static inline bool pci_is_bridge(struct pci_dev *dev)
716 {
717 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
718 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
719 }
720
721 /**
722 * pci_is_vga - check if the PCI device is a VGA device
723 * @pdev: PCI device
724 *
725 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
726 * VGA Base Class and Sub-Classes:
727 *
728 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
729 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
730 *
731 * Return true if the PCI device is a VGA device and uses the legacy VGA
732 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
733 * aliases).
734 */
pci_is_vga(struct pci_dev * pdev)735 static inline bool pci_is_vga(struct pci_dev *pdev)
736 {
737 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
738 return true;
739
740 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
741 return true;
742
743 return false;
744 }
745
746 #define for_each_pci_bridge(dev, bus) \
747 list_for_each_entry(dev, &bus->devices, bus_list) \
748 if (!pci_is_bridge(dev)) {} else
749
pci_upstream_bridge(struct pci_dev * dev)750 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
751 {
752 dev = pci_physfn(dev);
753 if (pci_is_root_bus(dev->bus))
754 return NULL;
755
756 return dev->bus->self;
757 }
758
759 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)760 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
761 {
762 return pci_dev->msi_enabled || pci_dev->msix_enabled;
763 }
764 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)765 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
766 #endif
767
768 /* Error values that may be returned by PCI functions */
769 #define PCIBIOS_SUCCESSFUL 0x00
770 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
771 #define PCIBIOS_BAD_VENDOR_ID 0x83
772 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
773 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
774 #define PCIBIOS_SET_FAILED 0x88
775 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
776
777 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)778 static inline int pcibios_err_to_errno(int err)
779 {
780 if (err <= PCIBIOS_SUCCESSFUL)
781 return err; /* Assume already errno */
782
783 switch (err) {
784 case PCIBIOS_FUNC_NOT_SUPPORTED:
785 return -ENOENT;
786 case PCIBIOS_BAD_VENDOR_ID:
787 return -ENOTTY;
788 case PCIBIOS_DEVICE_NOT_FOUND:
789 return -ENODEV;
790 case PCIBIOS_BAD_REGISTER_NUMBER:
791 return -EFAULT;
792 case PCIBIOS_SET_FAILED:
793 return -EIO;
794 case PCIBIOS_BUFFER_TOO_SMALL:
795 return -ENOSPC;
796 }
797
798 return -ERANGE;
799 }
800
801 /* Low-level architecture-dependent routines */
802
803 struct pci_ops {
804 int (*add_bus)(struct pci_bus *bus);
805 void (*remove_bus)(struct pci_bus *bus);
806 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
807 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
808 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
809 };
810
811 /*
812 * ACPI needs to be able to access PCI config space before we've done a
813 * PCI bus scan and created pci_bus structures.
814 */
815 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
816 int reg, int len, u32 *val);
817 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
818 int reg, int len, u32 val);
819
820 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
821 typedef u64 pci_bus_addr_t;
822 #else
823 typedef u32 pci_bus_addr_t;
824 #endif
825
826 struct pci_bus_region {
827 pci_bus_addr_t start;
828 pci_bus_addr_t end;
829 };
830
831 struct pci_dynids {
832 spinlock_t lock; /* Protects list, index */
833 struct list_head list; /* For IDs added at runtime */
834 };
835
836
837 /*
838 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
839 * a set of callbacks in struct pci_error_handlers, that device driver
840 * will be notified of PCI bus errors, and will be driven to recovery
841 * when an error occurs.
842 */
843
844 typedef unsigned int __bitwise pci_ers_result_t;
845
846 enum pci_ers_result {
847 /* No result/none/not supported in device driver */
848 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
849
850 /* Device driver can recover without slot reset */
851 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
852
853 /* Device driver wants slot to be reset */
854 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
855
856 /* Device has completely failed, is unrecoverable */
857 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
858
859 /* Device driver is fully recovered and operational */
860 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
861
862 /* No AER capabilities registered for the driver */
863 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
864 };
865
866 /* PCI bus error event callbacks */
867 struct pci_error_handlers {
868 /* PCI bus error detected on this device */
869 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
870 pci_channel_state_t error);
871
872 /* MMIO has been re-enabled, but not DMA */
873 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
874
875 /* PCI slot has been reset */
876 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
877
878 /* PCI function reset prepare or completed */
879 void (*reset_prepare)(struct pci_dev *dev);
880 void (*reset_done)(struct pci_dev *dev);
881
882 /* Device driver may resume normal operations */
883 void (*resume)(struct pci_dev *dev);
884
885 /* Allow device driver to record more details of a correctable error */
886 void (*cor_error_detected)(struct pci_dev *dev);
887 };
888
889
890 struct module;
891
892 /**
893 * struct pci_driver - PCI driver structure
894 * @name: Driver name.
895 * @id_table: Pointer to table of device IDs the driver is
896 * interested in. Most drivers should export this
897 * table using MODULE_DEVICE_TABLE(pci,...).
898 * @probe: This probing function gets called (during execution
899 * of pci_register_driver() for already existing
900 * devices or later if a new device gets inserted) for
901 * all PCI devices which match the ID table and are not
902 * "owned" by the other drivers yet. This function gets
903 * passed a "struct pci_dev \*" for each device whose
904 * entry in the ID table matches the device. The probe
905 * function returns zero when the driver chooses to
906 * take "ownership" of the device or an error code
907 * (negative number) otherwise.
908 * The probe function always gets called from process
909 * context, so it can sleep.
910 * @remove: The remove() function gets called whenever a device
911 * being handled by this driver is removed (either during
912 * deregistration of the driver or when it's manually
913 * pulled out of a hot-pluggable slot).
914 * The remove function always gets called from process
915 * context, so it can sleep.
916 * @suspend: Put device into low power state.
917 * @resume: Wake device from low power state.
918 * (Please see Documentation/power/pci.rst for descriptions
919 * of PCI Power Management and the related functions.)
920 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
921 * Intended to stop any idling DMA operations.
922 * Useful for enabling wake-on-lan (NIC) or changing
923 * the power state of a device before reboot.
924 * e.g. drivers/net/e100.c.
925 * @sriov_configure: Optional driver callback to allow configuration of
926 * number of VFs to enable via sysfs "sriov_numvfs" file.
927 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
928 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
929 * This will change MSI-X Table Size in the VF Message Control
930 * registers.
931 * @sriov_get_vf_total_msix: PF driver callback to get the total number of
932 * MSI-X vectors available for distribution to the VFs.
933 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
934 * @groups: Sysfs attribute groups.
935 * @dev_groups: Attributes attached to the device that will be
936 * created once it is bound to the driver.
937 * @driver: Driver model structure.
938 * @dynids: List of dynamically added device IDs.
939 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
940 * For most device drivers, no need to care about this flag
941 * as long as all DMAs are handled through the kernel DMA API.
942 * For some special ones, for example VFIO drivers, they know
943 * how to manage the DMA themselves and set this flag so that
944 * the IOMMU layer will allow them to setup and manage their
945 * own I/O address space.
946 */
947 struct pci_driver {
948 const char *name;
949 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
950 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
951 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
952 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
953 int (*resume)(struct pci_dev *dev); /* Device woken up */
954 void (*shutdown)(struct pci_dev *dev);
955 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
956 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
957 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
958 const struct pci_error_handlers *err_handler;
959 const struct attribute_group **groups;
960 const struct attribute_group **dev_groups;
961 struct device_driver driver;
962 struct pci_dynids dynids;
963 bool driver_managed_dma;
964 };
965
966 #define to_pci_driver(__drv) \
967 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
968
969 /**
970 * PCI_DEVICE - macro used to describe a specific PCI device
971 * @vend: the 16 bit PCI Vendor ID
972 * @dev: the 16 bit PCI Device ID
973 *
974 * This macro is used to create a struct pci_device_id that matches a
975 * specific device. The subvendor and subdevice fields will be set to
976 * PCI_ANY_ID.
977 */
978 #define PCI_DEVICE(vend,dev) \
979 .vendor = (vend), .device = (dev), \
980 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
981
982 /**
983 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
984 * override_only flags.
985 * @vend: the 16 bit PCI Vendor ID
986 * @dev: the 16 bit PCI Device ID
987 * @driver_override: the 32 bit PCI Device override_only
988 *
989 * This macro is used to create a struct pci_device_id that matches only a
990 * driver_override device. The subvendor and subdevice fields will be set to
991 * PCI_ANY_ID.
992 */
993 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
994 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
995 .subdevice = PCI_ANY_ID, .override_only = (driver_override)
996
997 /**
998 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
999 * "driver_override" PCI device.
1000 * @vend: the 16 bit PCI Vendor ID
1001 * @dev: the 16 bit PCI Device ID
1002 *
1003 * This macro is used to create a struct pci_device_id that matches a
1004 * specific device. The subvendor and subdevice fields will be set to
1005 * PCI_ANY_ID and the driver_override will be set to
1006 * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1007 */
1008 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1009 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1010
1011 /**
1012 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1013 * @vend: the 16 bit PCI Vendor ID
1014 * @dev: the 16 bit PCI Device ID
1015 * @subvend: the 16 bit PCI Subvendor ID
1016 * @subdev: the 16 bit PCI Subdevice ID
1017 *
1018 * This macro is used to create a struct pci_device_id that matches a
1019 * specific device with subsystem information.
1020 */
1021 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1022 .vendor = (vend), .device = (dev), \
1023 .subvendor = (subvend), .subdevice = (subdev)
1024
1025 /**
1026 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1027 * @dev_class: the class, subclass, prog-if triple for this device
1028 * @dev_class_mask: the class mask for this device
1029 *
1030 * This macro is used to create a struct pci_device_id that matches a
1031 * specific PCI class. The vendor, device, subvendor, and subdevice
1032 * fields will be set to PCI_ANY_ID.
1033 */
1034 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1035 .class = (dev_class), .class_mask = (dev_class_mask), \
1036 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1037 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1038
1039 /**
1040 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1041 * @vend: the vendor name
1042 * @dev: the 16 bit PCI Device ID
1043 *
1044 * This macro is used to create a struct pci_device_id that matches a
1045 * specific PCI device. The subvendor, and subdevice fields will be set
1046 * to PCI_ANY_ID. The macro allows the next field to follow as the device
1047 * private data.
1048 */
1049 #define PCI_VDEVICE(vend, dev) \
1050 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1051 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1052
1053 /**
1054 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1055 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1056 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1057 * @data: the driver data to be filled
1058 *
1059 * This macro is used to create a struct pci_device_id that matches a
1060 * specific PCI device. The subvendor, and subdevice fields will be set
1061 * to PCI_ANY_ID.
1062 */
1063 #define PCI_DEVICE_DATA(vend, dev, data) \
1064 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1065 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1066 .driver_data = (kernel_ulong_t)(data)
1067
1068 enum {
1069 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
1070 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
1071 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
1072 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
1073 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
1074 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
1075 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
1076 };
1077
1078 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
1079 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1080 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1081 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1082
1083 /* These external functions are only available when PCI support is enabled */
1084 #ifdef CONFIG_PCI
1085
1086 extern unsigned int pci_flags;
1087
pci_set_flags(int flags)1088 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1089 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1090 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1091 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1092
1093 void pcie_bus_configure_settings(struct pci_bus *bus);
1094
1095 enum pcie_bus_config_types {
1096 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
1097 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
1098 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
1099 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
1100 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
1101 };
1102
1103 extern enum pcie_bus_config_types pcie_bus_config;
1104
1105 extern const struct bus_type pci_bus_type;
1106
1107 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1108 * code, or PCI core code. */
1109 extern struct list_head pci_root_buses; /* List of all known PCI buses */
1110 /* Some device drivers need know if PCI is initiated */
1111 int no_pci_devices(void);
1112
1113 void pcibios_resource_survey_bus(struct pci_bus *bus);
1114 void pcibios_bus_add_device(struct pci_dev *pdev);
1115 void pcibios_add_bus(struct pci_bus *bus);
1116 void pcibios_remove_bus(struct pci_bus *bus);
1117 void pcibios_fixup_bus(struct pci_bus *);
1118 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1119 /* Architecture-specific versions may override this (weak) */
1120 char *pcibios_setup(char *str);
1121
1122 /* Used only when drivers/pci/setup.c is used */
1123 resource_size_t pcibios_align_resource(void *, const struct resource *,
1124 resource_size_t,
1125 resource_size_t);
1126
1127 /* Weak but can be overridden by arch */
1128 void pci_fixup_cardbus(struct pci_bus *);
1129
1130 /* Generic PCI functions used internally */
1131
1132 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1133 struct resource *res);
1134 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1135 struct pci_bus_region *region);
1136 void pcibios_scan_specific_bus(int busn);
1137 struct pci_bus *pci_find_bus(int domain, int busnr);
1138 void pci_bus_add_devices(const struct pci_bus *bus);
1139 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1140 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1141 struct pci_ops *ops, void *sysdata,
1142 struct list_head *resources);
1143 int pci_host_probe(struct pci_host_bridge *bridge);
1144 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1145 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1146 void pci_bus_release_busn_res(struct pci_bus *b);
1147 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1148 struct pci_ops *ops, void *sysdata,
1149 struct list_head *resources);
1150 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1151 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1152 int busnr);
1153 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1154 const char *name,
1155 struct hotplug_slot *hotplug);
1156 void pci_destroy_slot(struct pci_slot *slot);
1157 #ifdef CONFIG_SYSFS
1158 void pci_dev_assign_slot(struct pci_dev *dev);
1159 #else
pci_dev_assign_slot(struct pci_dev * dev)1160 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1161 #endif
1162 int pci_scan_slot(struct pci_bus *bus, int devfn);
1163 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1164 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1165 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1166 void pci_bus_add_device(struct pci_dev *dev);
1167 void pci_read_bridge_bases(struct pci_bus *child);
1168 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1169 struct resource *res);
1170 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1171 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1172 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1173 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1174 void pci_dev_put(struct pci_dev *dev);
1175 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1176 void pci_remove_bus(struct pci_bus *b);
1177 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1178 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1179 void pci_stop_root_bus(struct pci_bus *bus);
1180 void pci_remove_root_bus(struct pci_bus *bus);
1181 void pci_setup_cardbus(struct pci_bus *bus);
1182 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1183 void pci_sort_breadthfirst(void);
1184 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1185 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1186
1187 /* Generic PCI functions exported to card drivers */
1188
1189 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1190 u8 pci_find_capability(struct pci_dev *dev, int cap);
1191 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1192 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1193 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1194 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1195 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1196 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1197 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1198 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1199
1200 u64 pci_get_dsn(struct pci_dev *dev);
1201
1202 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1203 struct pci_dev *from);
1204 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1205 unsigned int ss_vendor, unsigned int ss_device,
1206 struct pci_dev *from);
1207 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1208 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1209 unsigned int devfn);
1210 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1211 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1212
1213 int pci_dev_present(const struct pci_device_id *ids);
1214
1215 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1216 int where, u8 *val);
1217 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1218 int where, u16 *val);
1219 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1220 int where, u32 *val);
1221 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1222 int where, u8 val);
1223 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1224 int where, u16 val);
1225 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1226 int where, u32 val);
1227
1228 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1229 int where, int size, u32 *val);
1230 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1231 int where, int size, u32 val);
1232 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1233 int where, int size, u32 *val);
1234 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1235 int where, int size, u32 val);
1236
1237 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1238
1239 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1240 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1241 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1242 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1243 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1244 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1245 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1246 u32 clear, u32 set);
1247
1248 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1249 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1250 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1251 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1252 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1253 u16 clear, u16 set);
1254 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1255 u16 clear, u16 set);
1256 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1257 u32 clear, u32 set);
1258
1259 /**
1260 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1261 * @dev: PCI device structure of the PCI Express device
1262 * @pos: PCI Express Capability Register
1263 * @clear: Clear bitmask
1264 * @set: Set bitmask
1265 *
1266 * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1267 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1268 * Capability Registers are accessed concurrently in RMW fashion, hence
1269 * require locking which is handled transparently to the caller.
1270 */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1271 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1272 int pos,
1273 u16 clear, u16 set)
1274 {
1275 switch (pos) {
1276 case PCI_EXP_LNKCTL:
1277 case PCI_EXP_RTCTL:
1278 return pcie_capability_clear_and_set_word_locked(dev, pos,
1279 clear, set);
1280 default:
1281 return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1282 clear, set);
1283 }
1284 }
1285
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1286 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1287 u16 set)
1288 {
1289 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1290 }
1291
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1292 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1293 u32 set)
1294 {
1295 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1296 }
1297
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1298 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1299 u16 clear)
1300 {
1301 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1302 }
1303
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1304 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1305 u32 clear)
1306 {
1307 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1308 }
1309
1310 /* User-space driven config access */
1311 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1312 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1313 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1314 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1315 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1316 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1317
1318 int __must_check pci_enable_device(struct pci_dev *dev);
1319 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1320 int __must_check pci_reenable_device(struct pci_dev *);
1321 int __must_check pcim_enable_device(struct pci_dev *pdev);
1322 void pcim_pin_device(struct pci_dev *pdev);
1323
pci_intx_mask_supported(struct pci_dev * pdev)1324 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1325 {
1326 /*
1327 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1328 * writable and no quirk has marked the feature broken.
1329 */
1330 return !pdev->broken_intx_masking;
1331 }
1332
pci_is_enabled(struct pci_dev * pdev)1333 static inline int pci_is_enabled(struct pci_dev *pdev)
1334 {
1335 return (atomic_read(&pdev->enable_cnt) > 0);
1336 }
1337
pci_is_managed(struct pci_dev * pdev)1338 static inline int pci_is_managed(struct pci_dev *pdev)
1339 {
1340 return pdev->is_managed;
1341 }
1342
1343 void pci_disable_device(struct pci_dev *dev);
1344
1345 extern unsigned int pcibios_max_latency;
1346 void pci_set_master(struct pci_dev *dev);
1347 void pci_clear_master(struct pci_dev *dev);
1348
1349 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1350 int pci_set_cacheline_size(struct pci_dev *dev);
1351 int __must_check pci_set_mwi(struct pci_dev *dev);
1352 int __must_check pcim_set_mwi(struct pci_dev *dev);
1353 int pci_try_set_mwi(struct pci_dev *dev);
1354 void pci_clear_mwi(struct pci_dev *dev);
1355 void pci_disable_parity(struct pci_dev *dev);
1356 void pci_intx(struct pci_dev *dev, int enable);
1357 bool pci_check_and_mask_intx(struct pci_dev *dev);
1358 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1359 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1360 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1361 int pcix_get_max_mmrbc(struct pci_dev *dev);
1362 int pcix_get_mmrbc(struct pci_dev *dev);
1363 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1364 int pcie_get_readrq(struct pci_dev *dev);
1365 int pcie_set_readrq(struct pci_dev *dev, int rq);
1366 int pcie_get_mps(struct pci_dev *dev);
1367 int pcie_set_mps(struct pci_dev *dev, int mps);
1368 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1369 enum pci_bus_speed *speed,
1370 enum pcie_link_width *width);
1371 int pcie_link_speed_mbps(struct pci_dev *pdev);
1372 void pcie_print_link_status(struct pci_dev *dev);
1373 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1374 int pcie_flr(struct pci_dev *dev);
1375 int __pci_reset_function_locked(struct pci_dev *dev);
1376 int pci_reset_function(struct pci_dev *dev);
1377 int pci_reset_function_locked(struct pci_dev *dev);
1378 int pci_try_reset_function(struct pci_dev *dev);
1379 int pci_probe_reset_slot(struct pci_slot *slot);
1380 int pci_probe_reset_bus(struct pci_bus *bus);
1381 int pci_reset_bus(struct pci_dev *dev);
1382 void pci_reset_secondary_bus(struct pci_dev *dev);
1383 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1384 void pci_update_resource(struct pci_dev *dev, int resno);
1385 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1386 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
1387 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1388 static inline int pci_rebar_bytes_to_size(u64 bytes)
1389 {
1390 bytes = roundup_pow_of_two(bytes);
1391
1392 /* Return BAR size as defined in the resizable BAR specification */
1393 return max(ilog2(bytes), 20) - 20;
1394 }
1395
1396 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1397 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1398 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1399 bool pci_device_is_present(struct pci_dev *pdev);
1400 void pci_ignore_hotplug(struct pci_dev *dev);
1401 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1402 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1403
1404 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1405 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1406 const char *fmt, ...);
1407 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1408
1409 /* ROM control related routines */
1410 int pci_enable_rom(struct pci_dev *pdev);
1411 void pci_disable_rom(struct pci_dev *pdev);
1412 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1413 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1414
1415 /* Power management related routines */
1416 int pci_save_state(struct pci_dev *dev);
1417 void pci_restore_state(struct pci_dev *dev);
1418 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1419 int pci_load_saved_state(struct pci_dev *dev,
1420 struct pci_saved_state *state);
1421 int pci_load_and_free_saved_state(struct pci_dev *dev,
1422 struct pci_saved_state **state);
1423 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1424 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1425 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1426 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1427 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1428 void pci_pme_active(struct pci_dev *dev, bool enable);
1429 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1430 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1431 int pci_prepare_to_sleep(struct pci_dev *dev);
1432 int pci_back_from_sleep(struct pci_dev *dev);
1433 bool pci_dev_run_wake(struct pci_dev *dev);
1434 void pci_d3cold_enable(struct pci_dev *dev);
1435 void pci_d3cold_disable(struct pci_dev *dev);
1436 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1437 void pci_resume_bus(struct pci_bus *bus);
1438 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1439
1440 /* For use by arch with custom probe code */
1441 void set_pcie_port_type(struct pci_dev *pdev);
1442 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1443
1444 /* Functions for PCI Hotplug drivers to use */
1445 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
1446 unsigned int pci_rescan_bus(struct pci_bus *bus);
1447 void pci_lock_rescan_remove(void);
1448 void pci_unlock_rescan_remove(void);
1449
1450 /* Vital Product Data routines */
1451 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1452 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1453 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1454 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1455
1456 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1457 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1458 void pci_bus_assign_resources(const struct pci_bus *bus);
1459 void pci_bus_claim_resources(struct pci_bus *bus);
1460 void pci_bus_size_bridges(struct pci_bus *bus);
1461 int pci_claim_resource(struct pci_dev *, int);
1462 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1463 void pci_assign_unassigned_resources(void);
1464 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1465 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1466 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1467 int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
1468 int pci_enable_resources(struct pci_dev *, int mask);
1469 void pci_assign_irq(struct pci_dev *dev);
1470 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1471 #define HAVE_PCI_REQ_REGIONS 2
1472 int __must_check pci_request_regions(struct pci_dev *, const char *);
1473 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1474 void pci_release_regions(struct pci_dev *);
1475 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1476 void pci_release_region(struct pci_dev *, int);
1477 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1478 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1479 void pci_release_selected_regions(struct pci_dev *, int);
1480
1481 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1482 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1483 unsigned int len, const char *name)
1484 {
1485 return __request_region(&pdev->driver_exclusive_resource, offset, len,
1486 name, IORESOURCE_EXCLUSIVE);
1487 }
1488
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1489 static inline void pci_release_config_region(struct pci_dev *pdev,
1490 unsigned int offset,
1491 unsigned int len)
1492 {
1493 __release_region(&pdev->driver_exclusive_resource, offset, len);
1494 }
1495
1496 /* drivers/pci/bus.c */
1497 void pci_add_resource(struct list_head *resources, struct resource *res);
1498 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1499 resource_size_t offset);
1500 void pci_free_resource_list(struct list_head *resources);
1501 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
1502 unsigned int flags);
1503 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1504 void pci_bus_remove_resources(struct pci_bus *bus);
1505 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1506 int devm_request_pci_bus_resources(struct device *dev,
1507 struct list_head *resources);
1508
1509 /* Temporary until new and working PCI SBR API in place */
1510 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1511
1512 #define __pci_bus_for_each_res0(bus, res, ...) \
1513 for (unsigned int __b = 0; \
1514 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1515 __b++)
1516
1517 #define __pci_bus_for_each_res1(bus, res, __b) \
1518 for (__b = 0; \
1519 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1520 __b++)
1521
1522 /**
1523 * pci_bus_for_each_resource - iterate over PCI bus resources
1524 * @bus: the PCI bus
1525 * @res: pointer to the current resource
1526 * @...: optional index of the current resource
1527 *
1528 * Iterate over PCI bus resources. The first part is to go over PCI bus
1529 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1530 * After that continue with the separate list of the additional resources,
1531 * if not empty. That's why the Logical OR is being used.
1532 *
1533 * Possible usage:
1534 *
1535 * struct pci_bus *bus = ...;
1536 * struct resource *res;
1537 * unsigned int i;
1538 *
1539 * // With optional index
1540 * pci_bus_for_each_resource(bus, res, i)
1541 * pr_info("PCI bus resource[%u]: %pR\n", i, res);
1542 *
1543 * // Without index
1544 * pci_bus_for_each_resource(bus, res)
1545 * _do_something_(res);
1546 */
1547 #define pci_bus_for_each_resource(bus, res, ...) \
1548 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
1549 (bus, res, __VA_ARGS__)
1550
1551 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1552 struct resource *res, resource_size_t size,
1553 resource_size_t align, resource_size_t min,
1554 unsigned long type_mask,
1555 resource_alignf alignf,
1556 void *alignf_data);
1557
1558
1559 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
1560 resource_size_t size);
1561 unsigned long pci_address_to_pio(phys_addr_t addr);
1562 phys_addr_t pci_pio_to_address(unsigned long pio);
1563 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1564 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1565 phys_addr_t phys_addr);
1566 void pci_unmap_iospace(struct resource *res);
1567 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1568 resource_size_t offset,
1569 resource_size_t size);
1570 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1571 struct resource *res);
1572
pci_bus_address(struct pci_dev * pdev,int bar)1573 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1574 {
1575 struct pci_bus_region region;
1576
1577 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1578 return region.start;
1579 }
1580
1581 /* Proper probing supporting hot-pluggable devices */
1582 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1583 const char *mod_name);
1584
1585 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1586 #define pci_register_driver(driver) \
1587 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1588
1589 void pci_unregister_driver(struct pci_driver *dev);
1590
1591 /**
1592 * module_pci_driver() - Helper macro for registering a PCI driver
1593 * @__pci_driver: pci_driver struct
1594 *
1595 * Helper macro for PCI drivers which do not do anything special in module
1596 * init/exit. This eliminates a lot of boilerplate. Each module may only
1597 * use this macro once, and calling it replaces module_init() and module_exit()
1598 */
1599 #define module_pci_driver(__pci_driver) \
1600 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1601
1602 /**
1603 * builtin_pci_driver() - Helper macro for registering a PCI driver
1604 * @__pci_driver: pci_driver struct
1605 *
1606 * Helper macro for PCI drivers which do not do anything special in their
1607 * init code. This eliminates a lot of boilerplate. Each driver may only
1608 * use this macro once, and calling it replaces device_initcall(...)
1609 */
1610 #define builtin_pci_driver(__pci_driver) \
1611 builtin_driver(__pci_driver, pci_register_driver)
1612
1613 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1614 int pci_add_dynid(struct pci_driver *drv,
1615 unsigned int vendor, unsigned int device,
1616 unsigned int subvendor, unsigned int subdevice,
1617 unsigned int class, unsigned int class_mask,
1618 unsigned long driver_data);
1619 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1620 struct pci_dev *dev);
1621 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1622 int pass);
1623
1624 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1625 void *userdata);
1626 void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1627 void *userdata);
1628 int pci_cfg_space_size(struct pci_dev *dev);
1629 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1630 void pci_setup_bridge(struct pci_bus *bus);
1631 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1632 unsigned long type);
1633
1634 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1635 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1636
1637 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1638 unsigned int command_bits, u32 flags);
1639
1640 /*
1641 * Virtual interrupts allow for more interrupts to be allocated
1642 * than the device has interrupts for. These are not programmed
1643 * into the device's MSI-X table and must be handled by some
1644 * other driver means.
1645 */
1646 #define PCI_IRQ_VIRTUAL (1 << 4)
1647
1648 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1649
1650 #include <linux/dmapool.h>
1651
1652 struct msix_entry {
1653 u32 vector; /* Kernel uses to write allocated vector */
1654 u16 entry; /* Driver uses to specify entry, OS writes */
1655 };
1656
1657 #ifdef CONFIG_PCI_MSI
1658 int pci_msi_vec_count(struct pci_dev *dev);
1659 void pci_disable_msi(struct pci_dev *dev);
1660 int pci_msix_vec_count(struct pci_dev *dev);
1661 void pci_disable_msix(struct pci_dev *dev);
1662 void pci_restore_msi_state(struct pci_dev *dev);
1663 int pci_msi_enabled(void);
1664 int pci_enable_msi(struct pci_dev *dev);
1665 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1666 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1667 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1668 struct msix_entry *entries, int nvec)
1669 {
1670 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1671 if (rc < 0)
1672 return rc;
1673 return 0;
1674 }
1675 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1676 unsigned int max_vecs, unsigned int flags);
1677 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1678 unsigned int max_vecs, unsigned int flags,
1679 struct irq_affinity *affd);
1680
1681 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1682 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1683 const struct irq_affinity_desc *affdesc);
1684 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1685
1686 void pci_free_irq_vectors(struct pci_dev *dev);
1687 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1688 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1689
1690 #else
pci_msi_vec_count(struct pci_dev * dev)1691 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1692 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1693 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1694 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1695 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1696 static inline int pci_msi_enabled(void) { return 0; }
pci_enable_msi(struct pci_dev * dev)1697 static inline int pci_enable_msi(struct pci_dev *dev)
1698 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1699 static inline int pci_enable_msix_range(struct pci_dev *dev,
1700 struct msix_entry *entries, int minvec, int maxvec)
1701 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1702 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1703 struct msix_entry *entries, int nvec)
1704 { return -ENOSYS; }
1705
1706 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1707 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1708 unsigned int max_vecs, unsigned int flags,
1709 struct irq_affinity *aff_desc)
1710 {
1711 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1712 return 1;
1713 return -ENOSPC;
1714 }
1715 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1716 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1717 unsigned int max_vecs, unsigned int flags)
1718 {
1719 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1720 flags, NULL);
1721 }
1722
pci_msix_can_alloc_dyn(struct pci_dev * dev)1723 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1724 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1725 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1726 const struct irq_affinity_desc *affdesc)
1727 {
1728 struct msi_map map = { .index = -ENOSYS, };
1729
1730 return map;
1731 }
1732
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1733 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1734 {
1735 }
1736
pci_free_irq_vectors(struct pci_dev * dev)1737 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1738 {
1739 }
1740
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1741 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1742 {
1743 if (WARN_ON_ONCE(nr > 0))
1744 return -EINVAL;
1745 return dev->irq;
1746 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1747 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1748 int vec)
1749 {
1750 return cpu_possible_mask;
1751 }
1752 #endif
1753
1754 /**
1755 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1756 * @d: the INTx IRQ domain
1757 * @node: the DT node for the device whose interrupt we're translating
1758 * @intspec: the interrupt specifier data from the DT
1759 * @intsize: the number of entries in @intspec
1760 * @out_hwirq: pointer at which to write the hwirq number
1761 * @out_type: pointer at which to write the interrupt type
1762 *
1763 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1764 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1765 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1766 * INTx value to obtain the hwirq number.
1767 *
1768 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1769 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1770 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1771 struct device_node *node,
1772 const u32 *intspec,
1773 unsigned int intsize,
1774 unsigned long *out_hwirq,
1775 unsigned int *out_type)
1776 {
1777 const u32 intx = intspec[0];
1778
1779 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1780 return -EINVAL;
1781
1782 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1783 return 0;
1784 }
1785
1786 #ifdef CONFIG_PCIEPORTBUS
1787 extern bool pcie_ports_disabled;
1788 extern bool pcie_ports_native;
1789 #else
1790 #define pcie_ports_disabled true
1791 #define pcie_ports_native false
1792 #endif
1793
1794 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1795 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
1796 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
1797 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */
1798 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */
1799 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */
1800 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\
1801 PCIE_LINK_STATE_L1 |\
1802 PCIE_LINK_STATE_L1_1 |\
1803 PCIE_LINK_STATE_L1_2 |\
1804 PCIE_LINK_STATE_L1_1_PCIPM |\
1805 PCIE_LINK_STATE_L1_2_PCIPM)
1806 #define PCIE_LINK_STATE_CLKPM BIT(7)
1807 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\
1808 PCIE_LINK_STATE_CLKPM)
1809
1810 #ifdef CONFIG_PCIEASPM
1811 int pci_disable_link_state(struct pci_dev *pdev, int state);
1812 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1813 int pci_enable_link_state(struct pci_dev *pdev, int state);
1814 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1815 void pcie_no_aspm(void);
1816 bool pcie_aspm_support_enabled(void);
1817 bool pcie_aspm_enabled(struct pci_dev *pdev);
1818 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1819 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1820 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1821 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1822 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1823 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1824 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1825 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1826 { return 0; }
pcie_no_aspm(void)1827 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1828 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1829 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1830 #endif
1831
1832 #ifdef CONFIG_PCIEAER
1833 bool pci_aer_available(void);
1834 #else
pci_aer_available(void)1835 static inline bool pci_aer_available(void) { return false; }
1836 #endif
1837
1838 bool pci_ats_disabled(void);
1839
1840 #ifdef CONFIG_PCIE_PTM
1841 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1842 void pci_disable_ptm(struct pci_dev *dev);
1843 bool pcie_ptm_enabled(struct pci_dev *dev);
1844 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1845 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1846 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1847 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1848 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1849 { return false; }
1850 #endif
1851
1852 void pci_cfg_access_lock(struct pci_dev *dev);
1853 bool pci_cfg_access_trylock(struct pci_dev *dev);
1854 void pci_cfg_access_unlock(struct pci_dev *dev);
1855
1856 void pci_dev_lock(struct pci_dev *dev);
1857 int pci_dev_trylock(struct pci_dev *dev);
1858 void pci_dev_unlock(struct pci_dev *dev);
1859 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1860
1861 /*
1862 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1863 * a PCI domain is defined to be a set of PCI buses which share
1864 * configuration space.
1865 */
1866 #ifdef CONFIG_PCI_DOMAINS
1867 extern int pci_domains_supported;
1868 #else
1869 enum { pci_domains_supported = 0 };
1870 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1871 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1872 #endif /* CONFIG_PCI_DOMAINS */
1873
1874 /*
1875 * Generic implementation for PCI domain support. If your
1876 * architecture does not need custom management of PCI
1877 * domains then this implementation will be used
1878 */
1879 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1880 static inline int pci_domain_nr(struct pci_bus *bus)
1881 {
1882 return bus->domain_nr;
1883 }
1884 #ifdef CONFIG_ACPI
1885 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1886 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1887 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1888 { return 0; }
1889 #endif
1890 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1891 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1892 #endif
1893
1894 /* Some architectures require additional setup to direct VGA traffic */
1895 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1896 unsigned int command_bits, u32 flags);
1897 void pci_register_set_vga_state(arch_set_vga_state_t func);
1898
1899 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1900 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1901 {
1902 return pci_request_selected_regions(pdev,
1903 pci_select_bars(pdev, IORESOURCE_IO), name);
1904 }
1905
1906 static inline void
pci_release_io_regions(struct pci_dev * pdev)1907 pci_release_io_regions(struct pci_dev *pdev)
1908 {
1909 return pci_release_selected_regions(pdev,
1910 pci_select_bars(pdev, IORESOURCE_IO));
1911 }
1912
1913 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)1914 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
1915 {
1916 return pci_request_selected_regions(pdev,
1917 pci_select_bars(pdev, IORESOURCE_MEM), name);
1918 }
1919
1920 static inline void
pci_release_mem_regions(struct pci_dev * pdev)1921 pci_release_mem_regions(struct pci_dev *pdev)
1922 {
1923 return pci_release_selected_regions(pdev,
1924 pci_select_bars(pdev, IORESOURCE_MEM));
1925 }
1926
1927 #else /* CONFIG_PCI is not enabled */
1928
pci_set_flags(int flags)1929 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)1930 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)1931 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)1932 static inline int pci_has_flag(int flag) { return 0; }
1933
1934 /*
1935 * If the system does not have PCI, clearly these return errors. Define
1936 * these as simple inline functions to avoid hair in drivers.
1937 */
1938 #define _PCI_NOP(o, s, t) \
1939 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1940 int where, t val) \
1941 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
1942
1943 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
1944 _PCI_NOP(o, word, u16 x) \
1945 _PCI_NOP(o, dword, u32 x)
1946 _PCI_NOP_ALL(read, *)
1947 _PCI_NOP_ALL(write,)
1948
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)1949 static inline struct pci_dev *pci_get_device(unsigned int vendor,
1950 unsigned int device,
1951 struct pci_dev *from)
1952 { return NULL; }
1953
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)1954 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
1955 unsigned int device,
1956 unsigned int ss_vendor,
1957 unsigned int ss_device,
1958 struct pci_dev *from)
1959 { return NULL; }
1960
pci_get_class(unsigned int class,struct pci_dev * from)1961 static inline struct pci_dev *pci_get_class(unsigned int class,
1962 struct pci_dev *from)
1963 { return NULL; }
1964
pci_get_base_class(unsigned int class,struct pci_dev * from)1965 static inline struct pci_dev *pci_get_base_class(unsigned int class,
1966 struct pci_dev *from)
1967 { return NULL; }
1968
pci_dev_present(const struct pci_device_id * ids)1969 static inline int pci_dev_present(const struct pci_device_id *ids)
1970 { return 0; }
1971
1972 #define no_pci_devices() (1)
1973 #define pci_dev_put(dev) do { } while (0)
1974
pci_set_master(struct pci_dev * dev)1975 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)1976 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)1977 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)1978 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)1979 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)1980 static inline int pci_assign_resource(struct pci_dev *dev, int i)
1981 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)1982 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
1983 struct module *owner,
1984 const char *mod_name)
1985 { return 0; }
pci_register_driver(struct pci_driver * drv)1986 static inline int pci_register_driver(struct pci_driver *drv)
1987 { return 0; }
pci_unregister_driver(struct pci_driver * drv)1988 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)1989 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
1990 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)1991 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
1992 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)1993 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
1994 { return 0; }
1995
pci_get_dsn(struct pci_dev * dev)1996 static inline u64 pci_get_dsn(struct pci_dev *dev)
1997 { return 0; }
1998
1999 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2000 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2001 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2002 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2003 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2004 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2005 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2006 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2007 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2008 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2009 pm_message_t state)
2010 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2011 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2012 int enable)
2013 { return 0; }
2014
pci_find_resource(struct pci_dev * dev,struct resource * res)2015 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2016 struct resource *res)
2017 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2018 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2019 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2020 static inline void pci_release_regions(struct pci_dev *dev) { }
2021
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2022 static inline int pci_register_io_range(struct fwnode_handle *fwnode,
2023 phys_addr_t addr, resource_size_t size)
2024 { return -EINVAL; }
2025
pci_address_to_pio(phys_addr_t addr)2026 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2027
pci_find_next_bus(const struct pci_bus * from)2028 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2029 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2030 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2031 unsigned int devfn)
2032 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2033 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2034 unsigned int bus, unsigned int devfn)
2035 { return NULL; }
2036
pci_domain_nr(struct pci_bus * bus)2037 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2038 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2039
2040 #define dev_is_pci(d) (false)
2041 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2042 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2043 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2044 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2045 struct device_node *node,
2046 const u32 *intspec,
2047 unsigned int intsize,
2048 unsigned long *out_hwirq,
2049 unsigned int *out_type)
2050 { return -EINVAL; }
2051
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2052 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2053 struct pci_dev *dev)
2054 { return NULL; }
pci_ats_disabled(void)2055 static inline bool pci_ats_disabled(void) { return true; }
2056
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2057 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2058 {
2059 return -EINVAL;
2060 }
2061
2062 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2063 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2064 unsigned int max_vecs, unsigned int flags,
2065 struct irq_affinity *aff_desc)
2066 {
2067 return -ENOSPC;
2068 }
2069 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2070 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2071 unsigned int max_vecs, unsigned int flags)
2072 {
2073 return -ENOSPC;
2074 }
2075 #endif /* CONFIG_PCI */
2076
2077 /* Include architecture-dependent settings and functions */
2078
2079 #include <asm/pci.h>
2080
2081 /*
2082 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2083 * is expected to be an offset within that region.
2084 *
2085 */
2086 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2087 struct vm_area_struct *vma,
2088 enum pci_mmap_state mmap_state, int write_combine);
2089
2090 #ifndef arch_can_pci_mmap_wc
2091 #define arch_can_pci_mmap_wc() 0
2092 #endif
2093
2094 #ifndef arch_can_pci_mmap_io
2095 #define arch_can_pci_mmap_io() 0
2096 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2097 #else
2098 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2099 #endif
2100
2101 #ifndef pci_root_bus_fwnode
2102 #define pci_root_bus_fwnode(bus) NULL
2103 #endif
2104
2105 /*
2106 * These helpers provide future and backwards compatibility
2107 * for accessing popular PCI BAR info
2108 */
2109 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
2110 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
2111 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
2112 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
2113 #define pci_resource_len(dev,bar) \
2114 (pci_resource_end((dev), (bar)) ? \
2115 resource_size(pci_resource_n((dev), (bar))) : 0)
2116
2117 #define __pci_dev_for_each_res0(dev, res, ...) \
2118 for (unsigned int __b = 0; \
2119 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2120 __b++)
2121
2122 #define __pci_dev_for_each_res1(dev, res, __b) \
2123 for (__b = 0; \
2124 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2125 __b++)
2126
2127 #define pci_dev_for_each_resource(dev, res, ...) \
2128 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
2129 (dev, res, __VA_ARGS__)
2130
2131 /*
2132 * Similar to the helpers above, these manipulate per-pci_dev
2133 * driver-specific data. They are really just a wrapper around
2134 * the generic device structure functions of these calls.
2135 */
pci_get_drvdata(struct pci_dev * pdev)2136 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2137 {
2138 return dev_get_drvdata(&pdev->dev);
2139 }
2140
pci_set_drvdata(struct pci_dev * pdev,void * data)2141 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2142 {
2143 dev_set_drvdata(&pdev->dev, data);
2144 }
2145
pci_name(const struct pci_dev * pdev)2146 static inline const char *pci_name(const struct pci_dev *pdev)
2147 {
2148 return dev_name(&pdev->dev);
2149 }
2150
2151 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2152 const struct resource *rsrc,
2153 resource_size_t *start, resource_size_t *end);
2154
2155 /*
2156 * The world is not perfect and supplies us with broken PCI devices.
2157 * For at least a part of these bugs we need a work-around, so both
2158 * generic (drivers/pci/quirks.c) and per-architecture code can define
2159 * fixup hooks to be called for particular buggy devices.
2160 */
2161
2162 struct pci_fixup {
2163 u16 vendor; /* Or PCI_ANY_ID */
2164 u16 device; /* Or PCI_ANY_ID */
2165 u32 class; /* Or PCI_ANY_ID */
2166 unsigned int class_shift; /* should be 0, 8, 16 */
2167 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2168 int hook_offset;
2169 #else
2170 void (*hook)(struct pci_dev *dev);
2171 #endif
2172 };
2173
2174 enum pci_fixup_pass {
2175 pci_fixup_early, /* Before probing BARs */
2176 pci_fixup_header, /* After reading configuration header */
2177 pci_fixup_final, /* Final phase of device fixups */
2178 pci_fixup_enable, /* pci_enable_device() time */
2179 pci_fixup_resume, /* pci_device_resume() */
2180 pci_fixup_suspend, /* pci_device_suspend() */
2181 pci_fixup_resume_early, /* pci_device_resume_early() */
2182 pci_fixup_suspend_late, /* pci_device_suspend_late() */
2183 };
2184
2185 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2186 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2187 class_shift, hook) \
2188 __ADDRESSABLE(hook) \
2189 asm(".section " #sec ", \"a\" \n" \
2190 ".balign 16 \n" \
2191 ".short " #vendor ", " #device " \n" \
2192 ".long " #class ", " #class_shift " \n" \
2193 ".long " #hook " - . \n" \
2194 ".previous \n");
2195
2196 /*
2197 * Clang's LTO may rename static functions in C, but has no way to
2198 * handle such renamings when referenced from inline asm. To work
2199 * around this, create global C stubs for these cases.
2200 */
2201 #ifdef CONFIG_LTO_CLANG
2202 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2203 class_shift, hook, stub) \
2204 void stub(struct pci_dev *dev); \
2205 void stub(struct pci_dev *dev) \
2206 { \
2207 hook(dev); \
2208 } \
2209 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2210 class_shift, stub)
2211 #else
2212 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2213 class_shift, hook, stub) \
2214 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2215 class_shift, hook)
2216 #endif
2217
2218 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2219 class_shift, hook) \
2220 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2221 class_shift, hook, __UNIQUE_ID(hook))
2222 #else
2223 /* Anonymous variables would be nice... */
2224 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
2225 class_shift, hook) \
2226 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
2227 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
2228 = { vendor, device, class, class_shift, hook };
2229 #endif
2230
2231 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
2232 class_shift, hook) \
2233 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2234 hook, vendor, device, class, class_shift, hook)
2235 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
2236 class_shift, hook) \
2237 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2238 hook, vendor, device, class, class_shift, hook)
2239 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
2240 class_shift, hook) \
2241 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2242 hook, vendor, device, class, class_shift, hook)
2243 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
2244 class_shift, hook) \
2245 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2246 hook, vendor, device, class, class_shift, hook)
2247 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
2248 class_shift, hook) \
2249 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2250 resume##hook, vendor, device, class, class_shift, hook)
2251 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
2252 class_shift, hook) \
2253 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2254 resume_early##hook, vendor, device, class, class_shift, hook)
2255 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
2256 class_shift, hook) \
2257 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2258 suspend##hook, vendor, device, class, class_shift, hook)
2259 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
2260 class_shift, hook) \
2261 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2262 suspend_late##hook, vendor, device, class, class_shift, hook)
2263
2264 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
2265 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2266 hook, vendor, device, PCI_ANY_ID, 0, hook)
2267 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
2268 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2269 hook, vendor, device, PCI_ANY_ID, 0, hook)
2270 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
2271 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2272 hook, vendor, device, PCI_ANY_ID, 0, hook)
2273 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
2274 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2275 hook, vendor, device, PCI_ANY_ID, 0, hook)
2276 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
2277 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2278 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2279 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
2280 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2281 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2282 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
2283 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2284 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2285 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
2286 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2287 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2288
2289 #ifdef CONFIG_PCI_QUIRKS
2290 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2291 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2292 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2293 struct pci_dev *dev) { }
2294 #endif
2295
2296 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2297 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2298 const char *name);
2299 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2300 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2301 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2302 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2303 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
2304 const char *name);
2305 void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
2306 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2307 unsigned long offset, unsigned long len);
2308
2309 extern int pci_pci_problems;
2310 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
2311 #define PCIPCI_TRITON 2
2312 #define PCIPCI_NATOMA 4
2313 #define PCIPCI_VIAETBF 8
2314 #define PCIPCI_VSFX 16
2315 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2316 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2317
2318 extern unsigned long pci_cardbus_io_size;
2319 extern unsigned long pci_cardbus_mem_size;
2320 extern u8 pci_dfl_cache_line_size;
2321 extern u8 pci_cache_line_size;
2322
2323 /* Architecture-specific versions may override these (weak) */
2324 void pcibios_disable_device(struct pci_dev *dev);
2325 void pcibios_set_master(struct pci_dev *dev);
2326 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2327 enum pcie_reset_state state);
2328 int pcibios_device_add(struct pci_dev *dev);
2329 void pcibios_release_device(struct pci_dev *dev);
2330 #ifdef CONFIG_PCI
2331 void pcibios_penalize_isa_irq(int irq, int active);
2332 #else
pcibios_penalize_isa_irq(int irq,int active)2333 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2334 #endif
2335 int pcibios_alloc_irq(struct pci_dev *dev);
2336 void pcibios_free_irq(struct pci_dev *dev);
2337 resource_size_t pcibios_default_alignment(void);
2338
2339 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2340 extern int pci_create_resource_files(struct pci_dev *dev);
2341 extern void pci_remove_resource_files(struct pci_dev *dev);
2342 #endif
2343
2344 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2345 void __init pci_mmcfg_early_init(void);
2346 void __init pci_mmcfg_late_init(void);
2347 #else
pci_mmcfg_early_init(void)2348 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2349 static inline void pci_mmcfg_late_init(void) { }
2350 #endif
2351
2352 int pci_ext_cfg_avail(void);
2353
2354 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2355 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2356
2357 #ifdef CONFIG_PCI_IOV
2358 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2359 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2360 int pci_iov_vf_id(struct pci_dev *dev);
2361 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2362 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2363 void pci_disable_sriov(struct pci_dev *dev);
2364
2365 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2366 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2367 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2368 int pci_num_vf(struct pci_dev *dev);
2369 int pci_vfs_assigned(struct pci_dev *dev);
2370 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2371 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2372 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2373 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2374 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2375
2376 /* Arch may override these (weak) */
2377 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2378 int pcibios_sriov_disable(struct pci_dev *pdev);
2379 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2380 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2381 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2382 {
2383 return -ENOSYS;
2384 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2385 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2386 {
2387 return -ENOSYS;
2388 }
2389
pci_iov_vf_id(struct pci_dev * dev)2390 static inline int pci_iov_vf_id(struct pci_dev *dev)
2391 {
2392 return -ENOSYS;
2393 }
2394
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2395 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2396 struct pci_driver *pf_driver)
2397 {
2398 return ERR_PTR(-EINVAL);
2399 }
2400
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2401 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2402 { return -ENODEV; }
2403
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2404 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2405 struct pci_dev *virtfn, int id)
2406 {
2407 return -ENODEV;
2408 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2409 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2410 {
2411 return -ENOSYS;
2412 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2413 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2414 int id) { }
pci_disable_sriov(struct pci_dev * dev)2415 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2416 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2417 static inline int pci_vfs_assigned(struct pci_dev *dev)
2418 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2419 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2420 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2421 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2422 { return 0; }
2423 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2424 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2425 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2426 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2427 #endif
2428
2429 #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
2430 void pci_hp_create_module_link(struct pci_slot *pci_slot);
2431 void pci_hp_remove_module_link(struct pci_slot *pci_slot);
2432 #endif
2433
2434 /**
2435 * pci_pcie_cap - get the saved PCIe capability offset
2436 * @dev: PCI device
2437 *
2438 * PCIe capability offset is calculated at PCI device initialization
2439 * time and saved in the data structure. This function returns saved
2440 * PCIe capability offset. Using this instead of pci_find_capability()
2441 * reduces unnecessary search in the PCI configuration space. If you
2442 * need to calculate PCIe capability offset from raw device for some
2443 * reasons, please use pci_find_capability() instead.
2444 */
pci_pcie_cap(struct pci_dev * dev)2445 static inline int pci_pcie_cap(struct pci_dev *dev)
2446 {
2447 return dev->pcie_cap;
2448 }
2449
2450 /**
2451 * pci_is_pcie - check if the PCI device is PCI Express capable
2452 * @dev: PCI device
2453 *
2454 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2455 */
pci_is_pcie(struct pci_dev * dev)2456 static inline bool pci_is_pcie(struct pci_dev *dev)
2457 {
2458 return pci_pcie_cap(dev);
2459 }
2460
2461 /**
2462 * pcie_caps_reg - get the PCIe Capabilities Register
2463 * @dev: PCI device
2464 */
pcie_caps_reg(const struct pci_dev * dev)2465 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2466 {
2467 return dev->pcie_flags_reg;
2468 }
2469
2470 /**
2471 * pci_pcie_type - get the PCIe device/port type
2472 * @dev: PCI device
2473 */
pci_pcie_type(const struct pci_dev * dev)2474 static inline int pci_pcie_type(const struct pci_dev *dev)
2475 {
2476 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2477 }
2478
2479 /**
2480 * pcie_find_root_port - Get the PCIe root port device
2481 * @dev: PCI device
2482 *
2483 * Traverse up the parent chain and return the PCIe Root Port PCI Device
2484 * for a given PCI/PCIe Device.
2485 */
pcie_find_root_port(struct pci_dev * dev)2486 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2487 {
2488 while (dev) {
2489 if (pci_is_pcie(dev) &&
2490 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2491 return dev;
2492 dev = pci_upstream_bridge(dev);
2493 }
2494
2495 return NULL;
2496 }
2497
pci_dev_is_disconnected(const struct pci_dev * dev)2498 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2499 {
2500 /*
2501 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2502 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2503 * the value (e.g. inside the loop in pci_dev_wait()).
2504 */
2505 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2506 }
2507
2508 void pci_request_acs(void);
2509 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2510 bool pci_acs_path_enabled(struct pci_dev *start,
2511 struct pci_dev *end, u16 acs_flags);
2512 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2513
2514 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2515 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2516
2517 /* Large Resource Data Type Tag Item Names */
2518 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2519 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2520 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2521
2522 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2523 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2524 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2525
2526 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2527 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
2528 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2529 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2530 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2531
2532 /**
2533 * pci_vpd_alloc - Allocate buffer and read VPD into it
2534 * @dev: PCI device
2535 * @size: pointer to field where VPD length is returned
2536 *
2537 * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2538 */
2539 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2540
2541 /**
2542 * pci_vpd_find_id_string - Locate id string in VPD
2543 * @buf: Pointer to buffered VPD data
2544 * @len: The length of the buffer area in which to search
2545 * @size: Pointer to field where length of id string is returned
2546 *
2547 * Returns the index of the id string or -ENOENT if not found.
2548 */
2549 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2550
2551 /**
2552 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2553 * @buf: Pointer to buffered VPD data
2554 * @len: The length of the buffer area in which to search
2555 * @kw: The keyword to search for
2556 * @size: Pointer to field where length of found keyword data is returned
2557 *
2558 * Returns the index of the information field keyword data or -ENOENT if
2559 * not found.
2560 */
2561 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2562 const char *kw, unsigned int *size);
2563
2564 /**
2565 * pci_vpd_check_csum - Check VPD checksum
2566 * @buf: Pointer to buffered VPD data
2567 * @len: VPD size
2568 *
2569 * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2570 */
2571 int pci_vpd_check_csum(const void *buf, unsigned int len);
2572
2573 /* PCI <-> OF binding helpers */
2574 #ifdef CONFIG_OF
2575 struct device_node;
2576 struct irq_domain;
2577 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2578 bool pci_host_of_has_msi_map(struct device *dev);
2579
2580 /* Arch may override this (weak) */
2581 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2582
2583 #else /* CONFIG_OF */
2584 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2585 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2586 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2587 #endif /* CONFIG_OF */
2588
2589 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2590 pci_device_to_OF_node(const struct pci_dev *pdev)
2591 {
2592 return pdev ? pdev->dev.of_node : NULL;
2593 }
2594
pci_bus_to_OF_node(struct pci_bus * bus)2595 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2596 {
2597 return bus ? bus->dev.of_node : NULL;
2598 }
2599
2600 #ifdef CONFIG_ACPI
2601 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2602
2603 void
2604 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2605 bool pci_pr3_present(struct pci_dev *pdev);
2606 #else
2607 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2608 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2609 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2610 #endif
2611
2612 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2613 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2614 {
2615 return pdev->dev.archdata.edev;
2616 }
2617 #endif
2618
2619 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2620 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2621 int pci_for_each_dma_alias(struct pci_dev *pdev,
2622 int (*fn)(struct pci_dev *pdev,
2623 u16 alias, void *data), void *data);
2624
2625 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2626 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2627 {
2628 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2629 }
pci_clear_dev_assigned(struct pci_dev * pdev)2630 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2631 {
2632 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2633 }
pci_is_dev_assigned(struct pci_dev * pdev)2634 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2635 {
2636 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2637 }
2638
2639 /**
2640 * pci_ari_enabled - query ARI forwarding status
2641 * @bus: the PCI bus
2642 *
2643 * Returns true if ARI forwarding is enabled.
2644 */
pci_ari_enabled(struct pci_bus * bus)2645 static inline bool pci_ari_enabled(struct pci_bus *bus)
2646 {
2647 return bus->self && bus->self->ari_enabled;
2648 }
2649
2650 /**
2651 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2652 * @pdev: PCI device to check
2653 *
2654 * Walk upwards from @pdev and check for each encountered bridge if it's part
2655 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2656 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2657 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2658 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2659 {
2660 struct pci_dev *parent = pdev;
2661
2662 if (pdev->is_thunderbolt)
2663 return true;
2664
2665 while ((parent = pci_upstream_bridge(parent)))
2666 if (parent->is_thunderbolt)
2667 return true;
2668
2669 return false;
2670 }
2671
2672 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2673 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2674 #endif
2675
2676 #include <linux/dma-mapping.h>
2677
2678 #define pci_printk(level, pdev, fmt, arg...) \
2679 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2680
2681 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2682 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2683 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2684 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2685 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2686 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2687 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2688 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2689 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2690
2691 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2692 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2693
2694 #define pci_info_ratelimited(pdev, fmt, arg...) \
2695 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2696
2697 #define pci_WARN(pdev, condition, fmt, arg...) \
2698 WARN(condition, "%s %s: " fmt, \
2699 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2700
2701 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2702 WARN_ONCE(condition, "%s %s: " fmt, \
2703 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2704
2705 #endif /* LINUX_PCI_H */
2706