1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_PCI_H_
32 #define	_LINUX_PCI_H_
33 
34 #define	CONFIG_PCI_MSI
35 
36 #include <linux/types.h>
37 
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/nv.h>
41 #include <sys/pciio.h>
42 #include <sys/rman.h>
43 #include <sys/bus.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pci_private.h>
47 
48 #include <machine/resource.h>
49 
50 #include <linux/list.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/compiler.h>
54 #include <linux/errno.h>
55 #include <asm/atomic.h>
56 #include <linux/device.h>
57 
58 struct pci_device_id {
59 	uint32_t	vendor;
60 	uint32_t	device;
61 	uint32_t	subvendor;
62 	uint32_t	subdevice;
63 	uint32_t	class;
64 	uint32_t	class_mask;
65 	uintptr_t	driver_data;
66 };
67 
68 #define	MODULE_DEVICE_TABLE(bus, table)
69 
70 #define	PCI_BASE_CLASS_DISPLAY		0x03
71 #define	PCI_CLASS_DISPLAY_VGA		0x0300
72 #define	PCI_CLASS_DISPLAY_OTHER		0x0380
73 #define	PCI_BASE_CLASS_BRIDGE		0x06
74 #define	PCI_CLASS_BRIDGE_ISA		0x0601
75 
76 #define	PCI_ANY_ID			-1U
77 #define	PCI_VENDOR_ID_APPLE		0x106b
78 #define	PCI_VENDOR_ID_ASUSTEK		0x1043
79 #define	PCI_VENDOR_ID_ATI		0x1002
80 #define	PCI_VENDOR_ID_DELL		0x1028
81 #define	PCI_VENDOR_ID_HP		0x103c
82 #define	PCI_VENDOR_ID_IBM		0x1014
83 #define	PCI_VENDOR_ID_INTEL		0x8086
84 #define	PCI_VENDOR_ID_MELLANOX			0x15b3
85 #define	PCI_VENDOR_ID_REDHAT_QUMRANET	0x1af4
86 #define	PCI_VENDOR_ID_SERVERWORKS	0x1166
87 #define	PCI_VENDOR_ID_SONY		0x104d
88 #define	PCI_VENDOR_ID_TOPSPIN			0x1867
89 #define	PCI_VENDOR_ID_VIA		0x1106
90 #define	PCI_SUBVENDOR_ID_REDHAT_QUMRANET	0x1af4
91 #define	PCI_DEVICE_ID_ATI_RADEON_QY	0x5159
92 #define	PCI_DEVICE_ID_MELLANOX_TAVOR		0x5a44
93 #define	PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE	0x5a46
94 #define	PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT	0x6278
95 #define	PCI_DEVICE_ID_MELLANOX_ARBEL		0x6282
96 #define	PCI_DEVICE_ID_MELLANOX_SINAI_OLD	0x5e8c
97 #define	PCI_DEVICE_ID_MELLANOX_SINAI		0x6274
98 #define	PCI_SUBDEVICE_ID_QEMU		0x1100
99 
100 #define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
101 #define PCI_SLOT(devfn)		(((devfn) >> 3) & 0x1f)
102 #define PCI_FUNC(devfn)		((devfn) & 0x07)
103 #define	PCI_BUS_NUM(devfn)	(((devfn) >> 8) & 0xff)
104 
105 #define PCI_VDEVICE(_vendor, _device)					\
106 	    .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device),	\
107 	    .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
108 #define	PCI_DEVICE(_vendor, _device)					\
109 	    .vendor = (_vendor), .device = (_device),			\
110 	    .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
111 
112 #define	to_pci_dev(n)	container_of(n, struct pci_dev, dev)
113 
114 #define	PCI_VENDOR_ID		PCIR_DEVVENDOR
115 #define	PCI_COMMAND		PCIR_COMMAND
116 #define	PCI_EXP_DEVCTL		PCIER_DEVICE_CTL		/* Device Control */
117 #define	PCI_EXP_LNKCTL		PCIER_LINK_CTL			/* Link Control */
118 #define	PCI_EXP_FLAGS_TYPE	PCIEM_FLAGS_TYPE		/* Device/Port type */
119 #define	PCI_EXP_DEVCAP		PCIER_DEVICE_CAP		/* Device capabilities */
120 #define	PCI_EXP_DEVSTA		PCIER_DEVICE_STA		/* Device Status */
121 #define	PCI_EXP_LNKCAP		PCIER_LINK_CAP			/* Link Capabilities */
122 #define	PCI_EXP_LNKSTA		PCIER_LINK_STA			/* Link Status */
123 #define	PCI_EXP_SLTCAP		PCIER_SLOT_CAP			/* Slot Capabilities */
124 #define	PCI_EXP_SLTCTL		PCIER_SLOT_CTL			/* Slot Control */
125 #define	PCI_EXP_SLTSTA		PCIER_SLOT_STA			/* Slot Status */
126 #define	PCI_EXP_RTCTL		PCIER_ROOT_CTL			/* Root Control */
127 #define	PCI_EXP_RTCAP		PCIER_ROOT_CAP			/* Root Capabilities */
128 #define	PCI_EXP_RTSTA		PCIER_ROOT_STA			/* Root Status */
129 #define	PCI_EXP_DEVCAP2		PCIER_DEVICE_CAP2		/* Device Capabilities 2 */
130 #define	PCI_EXP_DEVCTL2		PCIER_DEVICE_CTL2		/* Device Control 2 */
131 #define	PCI_EXP_LNKCAP2		PCIER_LINK_CAP2			/* Link Capabilities 2 */
132 #define	PCI_EXP_LNKCTL2		PCIER_LINK_CTL2			/* Link Control 2 */
133 #define	PCI_EXP_LNKSTA2		PCIER_LINK_STA2			/* Link Status 2 */
134 #define	PCI_EXP_FLAGS		PCIER_FLAGS			/* Capabilities register */
135 #define	PCI_EXP_FLAGS_VERS	PCIEM_FLAGS_VERSION		/* Capability version */
136 #define	PCI_EXP_TYPE_ROOT_PORT	PCIEM_TYPE_ROOT_PORT		/* Root Port */
137 #define	PCI_EXP_TYPE_ENDPOINT	PCIEM_TYPE_ENDPOINT		/* Express Endpoint */
138 #define	PCI_EXP_TYPE_LEG_END	PCIEM_TYPE_LEGACY_ENDPOINT	/* Legacy Endpoint */
139 #define	PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT	/* Downstream Port */
140 #define	PCI_EXP_FLAGS_SLOT	PCIEM_FLAGS_SLOT		/* Slot implemented */
141 #define	PCI_EXP_TYPE_RC_EC	PCIEM_TYPE_ROOT_EC		/* Root Complex Event Collector */
142 #define	PCI_EXP_LNKCAP_SLS_2_5GB 0x01	/* Supported Link Speed 2.5GT/s */
143 #define	PCI_EXP_LNKCAP_SLS_5_0GB 0x02	/* Supported Link Speed 5.0GT/s */
144 #define	PCI_EXP_LNKCAP_SLS_8_0GB 0x04	/* Supported Link Speed 8.0GT/s */
145 #define	PCI_EXP_LNKCAP_SLS_16_0GB 0x08	/* Supported Link Speed 16.0GT/s */
146 #define	PCI_EXP_LNKCAP_MLW	0x03f0	/* Maximum Link Width */
147 #define	PCI_EXP_LNKCAP2_SLS_2_5GB 0x02	/* Supported Link Speed 2.5GT/s */
148 #define	PCI_EXP_LNKCAP2_SLS_5_0GB 0x04	/* Supported Link Speed 5.0GT/s */
149 #define	PCI_EXP_LNKCAP2_SLS_8_0GB 0x08	/* Supported Link Speed 8.0GT/s */
150 #define	PCI_EXP_LNKCAP2_SLS_16_0GB 0x10	/* Supported Link Speed 16.0GT/s */
151 
152 #define PCI_EXP_LNKCTL_HAWD	PCIEM_LINK_CTL_HAWD
153 #define PCI_EXP_LNKCAP_CLKPM	0x00040000
154 #define PCI_EXP_DEVSTA_TRPND	0x0020
155 
156 #define	IORESOURCE_MEM	(1 << SYS_RES_MEMORY)
157 #define	IORESOURCE_IO	(1 << SYS_RES_IOPORT)
158 #define	IORESOURCE_IRQ	(1 << SYS_RES_IRQ)
159 
160 enum pci_bus_speed {
161 	PCI_SPEED_UNKNOWN = -1,
162 	PCIE_SPEED_2_5GT,
163 	PCIE_SPEED_5_0GT,
164 	PCIE_SPEED_8_0GT,
165 	PCIE_SPEED_16_0GT,
166 };
167 
168 enum pcie_link_width {
169 	PCIE_LNK_WIDTH_RESRV	= 0x00,
170 	PCIE_LNK_X1		= 0x01,
171 	PCIE_LNK_X2		= 0x02,
172 	PCIE_LNK_X4		= 0x04,
173 	PCIE_LNK_X8		= 0x08,
174 	PCIE_LNK_X12		= 0x0c,
175 	PCIE_LNK_X16		= 0x10,
176 	PCIE_LNK_X32		= 0x20,
177 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
178 };
179 
180 typedef int pci_power_t;
181 
182 #define PCI_D0	PCI_POWERSTATE_D0
183 #define PCI_D1	PCI_POWERSTATE_D1
184 #define PCI_D2	PCI_POWERSTATE_D2
185 #define PCI_D3hot	PCI_POWERSTATE_D3
186 #define PCI_D3cold	4
187 
188 #define PCI_POWER_ERROR	PCI_POWERSTATE_UNKNOWN
189 
190 struct pci_dev;
191 
192 struct pci_driver {
193 	struct list_head		links;
194 	char				*name;
195 	const struct pci_device_id		*id_table;
196 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
197 	void (*remove)(struct pci_dev *dev);
198 	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
199 	int  (*resume) (struct pci_dev *dev);		/* Device woken up */
200 	void (*shutdown) (struct pci_dev *dev);		/* Device shutdown */
201 	driver_t			bsddriver;
202 	devclass_t			bsdclass;
203 	struct device_driver		driver;
204 	const struct pci_error_handlers       *err_handler;
205 	bool				isdrm;
206 	int  (*bsd_iov_init)(device_t dev, uint16_t num_vfs,
207 	    const nvlist_t *pf_config);
208 	void  (*bsd_iov_uninit)(device_t dev);
209 	int  (*bsd_iov_add_vf)(device_t dev, uint16_t vfnum,
210 	    const nvlist_t *vf_config);
211 };
212 
213 struct pci_bus {
214 	struct pci_dev	*self;
215 	int		domain;
216 	int		number;
217 };
218 
219 extern struct list_head pci_drivers;
220 extern struct list_head pci_devices;
221 extern spinlock_t pci_lock;
222 
223 #define	__devexit_p(x)	x
224 
225 struct pci_mmio_region {
226 	TAILQ_ENTRY(pci_mmio_region)	next;
227 	struct resource			*res;
228 	int				rid;
229 	int				type;
230 };
231 
232 struct pci_dev {
233 	struct device		dev;
234 	struct list_head	links;
235 	struct pci_driver	*pdrv;
236 	struct pci_bus		*bus;
237 	uint16_t		device;
238 	uint16_t		vendor;
239 	uint16_t		subsystem_vendor;
240 	uint16_t		subsystem_device;
241 	unsigned int		irq;
242 	unsigned int		devfn;
243 	uint32_t		class;
244 	uint8_t			revision;
245 	bool			msi_enabled;
246 
247 	TAILQ_HEAD(, pci_mmio_region)	mmio;
248 };
249 
250 static inline struct resource_list_entry *
251 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid)
252 {
253 	struct pci_devinfo *dinfo;
254 	struct resource_list *rl;
255 
256 	dinfo = device_get_ivars(pdev->dev.bsddev);
257 	rl = &dinfo->resources;
258 	return resource_list_find(rl, type, rid);
259 }
260 
261 static inline struct resource_list_entry *
262 linux_pci_get_bar(struct pci_dev *pdev, int bar)
263 {
264 	struct resource_list_entry *rle;
265 
266 	bar = PCIR_BAR(bar);
267 	if ((rle = linux_pci_get_rle(pdev, SYS_RES_MEMORY, bar)) == NULL)
268 		rle = linux_pci_get_rle(pdev, SYS_RES_IOPORT, bar);
269 	return (rle);
270 }
271 
272 static inline struct device *
273 linux_pci_find_irq_dev(unsigned int irq)
274 {
275 	struct pci_dev *pdev;
276 	struct device *found;
277 
278 	found = NULL;
279 	spin_lock(&pci_lock);
280 	list_for_each_entry(pdev, &pci_devices, links) {
281 		if (irq == pdev->dev.irq ||
282 		    (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
283 			found = &pdev->dev;
284 			break;
285 		}
286 	}
287 	spin_unlock(&pci_lock);
288 	return (found);
289 }
290 
291 static inline int
292 pci_resource_type(struct pci_dev *pdev, int bar)
293 {
294 	struct pci_map *pm;
295 
296 	pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar));
297 	if (!pm)
298 		return (-1);
299 
300 	if (PCI_BAR_IO(pm->pm_value))
301 		return (SYS_RES_IOPORT);
302 	else
303 		return (SYS_RES_MEMORY);
304 }
305 
306 /*
307  * All drivers just seem to want to inspect the type not flags.
308  */
309 static inline int
310 pci_resource_flags(struct pci_dev *pdev, int bar)
311 {
312 	int type;
313 
314 	type = pci_resource_type(pdev, bar);
315 	if (type < 0)
316 		return (0);
317 	return (1 << type);
318 }
319 
320 static inline const char *
321 pci_name(struct pci_dev *d)
322 {
323 
324 	return device_get_desc(d->dev.bsddev);
325 }
326 
327 static inline void *
328 pci_get_drvdata(struct pci_dev *pdev)
329 {
330 
331 	return dev_get_drvdata(&pdev->dev);
332 }
333 
334 static inline void
335 pci_set_drvdata(struct pci_dev *pdev, void *data)
336 {
337 
338 	dev_set_drvdata(&pdev->dev, data);
339 }
340 
341 static inline int
342 pci_enable_device(struct pci_dev *pdev)
343 {
344 
345 	pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
346 	pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
347 	return (0);
348 }
349 
350 static inline void
351 pci_disable_device(struct pci_dev *pdev)
352 {
353 
354 	pci_disable_busmaster(pdev->dev.bsddev);
355 }
356 
357 static inline int
358 pci_set_master(struct pci_dev *pdev)
359 {
360 
361 	pci_enable_busmaster(pdev->dev.bsddev);
362 	return (0);
363 }
364 
365 static inline int
366 pci_set_power_state(struct pci_dev *pdev, int state)
367 {
368 
369 	pci_set_powerstate(pdev->dev.bsddev, state);
370 	return (0);
371 }
372 
373 static inline int
374 pci_clear_master(struct pci_dev *pdev)
375 {
376 
377 	pci_disable_busmaster(pdev->dev.bsddev);
378 	return (0);
379 }
380 
381 static inline int
382 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
383 {
384 	int rid;
385 	int type;
386 
387 	type = pci_resource_type(pdev, bar);
388 	if (type < 0)
389 		return (-ENODEV);
390 	rid = PCIR_BAR(bar);
391 	if (bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
392 	    RF_ACTIVE) == NULL)
393 		return (-EINVAL);
394 	return (0);
395 }
396 
397 static inline void
398 pci_release_region(struct pci_dev *pdev, int bar)
399 {
400 	struct resource_list_entry *rle;
401 
402 	if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
403 		return;
404 	bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
405 }
406 
407 static inline void
408 pci_release_regions(struct pci_dev *pdev)
409 {
410 	int i;
411 
412 	for (i = 0; i <= PCIR_MAX_BAR_0; i++)
413 		pci_release_region(pdev, i);
414 }
415 
416 static inline int
417 pci_request_regions(struct pci_dev *pdev, const char *res_name)
418 {
419 	int error;
420 	int i;
421 
422 	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
423 		error = pci_request_region(pdev, i, res_name);
424 		if (error && error != -ENODEV) {
425 			pci_release_regions(pdev);
426 			return (error);
427 		}
428 	}
429 	return (0);
430 }
431 
432 static inline void
433 pci_disable_msix(struct pci_dev *pdev)
434 {
435 
436 	pci_release_msi(pdev->dev.bsddev);
437 
438 	/*
439 	 * The MSIX IRQ numbers associated with this PCI device are no
440 	 * longer valid and might be re-assigned. Make sure
441 	 * linux_pci_find_irq_dev() does no longer see them by
442 	 * resetting their references to zero:
443 	 */
444 	pdev->dev.irq_start = 0;
445 	pdev->dev.irq_end = 0;
446 }
447 
448 #define	pci_disable_msi(pdev) \
449   linux_pci_disable_msi(pdev)
450 
451 static inline void
452 linux_pci_disable_msi(struct pci_dev *pdev)
453 {
454 
455 	pci_release_msi(pdev->dev.bsddev);
456 
457 	pdev->dev.irq_start = 0;
458 	pdev->dev.irq_end = 0;
459 	pdev->irq = pdev->dev.irq;
460 	pdev->msi_enabled = false;
461 }
462 
463 unsigned long	pci_resource_start(struct pci_dev *pdev, int bar);
464 unsigned long	pci_resource_len(struct pci_dev *pdev, int bar);
465 
466 static inline bus_addr_t
467 pci_bus_address(struct pci_dev *pdev, int bar)
468 {
469 
470 	return (pci_resource_start(pdev, bar));
471 }
472 
473 #define	PCI_CAP_ID_EXP	PCIY_EXPRESS
474 #define	PCI_CAP_ID_PCIX	PCIY_PCIX
475 #define PCI_CAP_ID_AGP  PCIY_AGP
476 #define PCI_CAP_ID_PM   PCIY_PMG
477 
478 #define PCI_EXP_DEVCTL		PCIER_DEVICE_CTL
479 #define PCI_EXP_DEVCTL_PAYLOAD	PCIEM_CTL_MAX_PAYLOAD
480 #define PCI_EXP_DEVCTL_READRQ	PCIEM_CTL_MAX_READ_REQUEST
481 #define PCI_EXP_LNKCTL		PCIER_LINK_CTL
482 #define PCI_EXP_LNKSTA		PCIER_LINK_STA
483 
484 static inline int
485 pci_find_capability(struct pci_dev *pdev, int capid)
486 {
487 	int reg;
488 
489 	if (pci_find_cap(pdev->dev.bsddev, capid, &reg))
490 		return (0);
491 	return (reg);
492 }
493 
494 static inline int pci_pcie_cap(struct pci_dev *dev)
495 {
496 	return pci_find_capability(dev, PCI_CAP_ID_EXP);
497 }
498 
499 
500 static inline int
501 pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val)
502 {
503 
504 	*val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
505 	return (0);
506 }
507 
508 static inline int
509 pci_read_config_word(struct pci_dev *pdev, int where, u16 *val)
510 {
511 
512 	*val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
513 	return (0);
514 }
515 
516 static inline int
517 pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val)
518 {
519 
520 	*val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
521 	return (0);
522 }
523 
524 static inline int
525 pci_write_config_byte(struct pci_dev *pdev, int where, u8 val)
526 {
527 
528 	pci_write_config(pdev->dev.bsddev, where, val, 1);
529 	return (0);
530 }
531 
532 static inline int
533 pci_write_config_word(struct pci_dev *pdev, int where, u16 val)
534 {
535 
536 	pci_write_config(pdev->dev.bsddev, where, val, 2);
537 	return (0);
538 }
539 
540 static inline int
541 pci_write_config_dword(struct pci_dev *pdev, int where, u32 val)
542 {
543 
544 	pci_write_config(pdev->dev.bsddev, where, val, 4);
545 	return (0);
546 }
547 
548 int	linux_pci_register_driver(struct pci_driver *pdrv);
549 int	linux_pci_register_drm_driver(struct pci_driver *pdrv);
550 void	linux_pci_unregister_driver(struct pci_driver *pdrv);
551 void	linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
552 
553 #define	pci_register_driver(pdrv)	linux_pci_register_driver(pdrv)
554 #define	pci_unregister_driver(pdrv)	linux_pci_unregister_driver(pdrv)
555 
556 struct msix_entry {
557 	int entry;
558 	int vector;
559 };
560 
561 /*
562  * Enable msix, positive errors indicate actual number of available
563  * vectors.  Negative errors are failures.
564  *
565  * NB: define added to prevent this definition of pci_enable_msix from
566  * clashing with the native FreeBSD version.
567  */
568 #define	pci_enable_msix(...) \
569   linux_pci_enable_msix(__VA_ARGS__)
570 
571 static inline int
572 pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
573 {
574 	struct resource_list_entry *rle;
575 	int error;
576 	int avail;
577 	int i;
578 
579 	avail = pci_msix_count(pdev->dev.bsddev);
580 	if (avail < nreq) {
581 		if (avail == 0)
582 			return -EINVAL;
583 		return avail;
584 	}
585 	avail = nreq;
586 	if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
587 		return error;
588 	/*
589 	 * Handle case where "pci_alloc_msix()" may allocate less
590 	 * interrupts than available and return with no error:
591 	 */
592 	if (avail < nreq) {
593 		pci_release_msi(pdev->dev.bsddev);
594 		return avail;
595 	}
596 	rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1);
597 	pdev->dev.irq_start = rle->start;
598 	pdev->dev.irq_end = rle->start + avail;
599 	for (i = 0; i < nreq; i++)
600 		entries[i].vector = pdev->dev.irq_start + i;
601 	return (0);
602 }
603 
604 #define	pci_enable_msix_range(...) \
605   linux_pci_enable_msix_range(__VA_ARGS__)
606 
607 static inline int
608 pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
609     int minvec, int maxvec)
610 {
611 	int nvec = maxvec;
612 	int rc;
613 
614 	if (maxvec < minvec)
615 		return (-ERANGE);
616 
617 	do {
618 		rc = pci_enable_msix(dev, entries, nvec);
619 		if (rc < 0) {
620 			return (rc);
621 		} else if (rc > 0) {
622 			if (rc < minvec)
623 				return (-ENOSPC);
624 			nvec = rc;
625 		}
626 	} while (rc);
627 	return (nvec);
628 }
629 
630 #define	pci_enable_msi(pdev) \
631   linux_pci_enable_msi(pdev)
632 
633 static inline int
634 pci_enable_msi(struct pci_dev *pdev)
635 {
636 	struct resource_list_entry *rle;
637 	int error;
638 	int avail;
639 
640 	avail = pci_msi_count(pdev->dev.bsddev);
641 	if (avail < 1)
642 		return -EINVAL;
643 
644 	avail = 1;	/* this function only enable one MSI IRQ */
645 	if ((error = -pci_alloc_msi(pdev->dev.bsddev, &avail)) != 0)
646 		return error;
647 
648 	rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1);
649 	pdev->dev.irq_start = rle->start;
650 	pdev->dev.irq_end = rle->start + avail;
651 	pdev->irq = rle->start;
652 	pdev->msi_enabled = true;
653 	return (0);
654 }
655 
656 static inline int
657 pci_channel_offline(struct pci_dev *pdev)
658 {
659 
660 	return (pci_read_config(pdev->dev.bsddev, PCIR_VENDOR, 2) == PCIV_INVALID);
661 }
662 
663 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
664 {
665 	return -ENODEV;
666 }
667 static inline void pci_disable_sriov(struct pci_dev *dev)
668 {
669 }
670 
671 static inline void *
672 pci_iomap(struct pci_dev *dev, int mmio_bar, int mmio_size __unused)
673 {
674 	struct pci_mmio_region *mmio;
675 
676 	mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
677 	mmio->rid = PCIR_BAR(mmio_bar);
678 	mmio->type = pci_resource_type(dev, mmio_bar);
679 	mmio->res = bus_alloc_resource_any(dev->dev.bsddev, mmio->type,
680 	    &mmio->rid, RF_ACTIVE);
681 	if (mmio->res == NULL) {
682 		free(mmio, M_DEVBUF);
683 		return (NULL);
684 	}
685 	TAILQ_INSERT_TAIL(&dev->mmio, mmio, next);
686 
687 	return ((void *)rman_get_bushandle(mmio->res));
688 }
689 
690 static inline void
691 pci_iounmap(struct pci_dev *dev, void *res)
692 {
693 	struct pci_mmio_region *mmio, *p;
694 
695 	TAILQ_FOREACH_SAFE(mmio, &dev->mmio, next, p) {
696 		if (res != (void *)rman_get_bushandle(mmio->res))
697 			continue;
698 		bus_release_resource(dev->dev.bsddev,
699 		    mmio->type, mmio->rid, mmio->res);
700 		TAILQ_REMOVE(&dev->mmio, mmio, next);
701 		free(mmio, M_DEVBUF);
702 		return;
703 	}
704 }
705 
706 #define DEFINE_PCI_DEVICE_TABLE(_table) \
707 	const struct pci_device_id _table[] __devinitdata
708 
709 
710 /* XXX This should not be necessary. */
711 #define	pcix_set_mmrbc(d, v)	0
712 #define	pcix_get_max_mmrbc(d)	0
713 #define	pcie_set_readrq(d, v)	pci_set_max_read_req(&(d)->dev, (v))
714 
715 #define	PCI_DMA_BIDIRECTIONAL	0
716 #define	PCI_DMA_TODEVICE	1
717 #define	PCI_DMA_FROMDEVICE	2
718 #define	PCI_DMA_NONE		3
719 
720 #define	pci_pool		dma_pool
721 #define	pci_pool_destroy(...)	dma_pool_destroy(__VA_ARGS__)
722 #define	pci_pool_alloc(...)	dma_pool_alloc(__VA_ARGS__)
723 #define	pci_pool_free(...)	dma_pool_free(__VA_ARGS__)
724 #define	pci_pool_create(_name, _pdev, _size, _align, _alloc)		\
725 	    dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
726 #define	pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle)		\
727 	    dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
728 		_size, _vaddr, _dma_handle)
729 #define	pci_map_sg(_hwdev, _sg, _nents, _dir)				\
730 	    dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev),	\
731 		_sg, _nents, (enum dma_data_direction)_dir)
732 #define	pci_map_single(_hwdev, _ptr, _size, _dir)			\
733 	    dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev),	\
734 		(_ptr), (_size), (enum dma_data_direction)_dir)
735 #define	pci_unmap_single(_hwdev, _addr, _size, _dir)			\
736 	    dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
737 		_addr, _size, (enum dma_data_direction)_dir)
738 #define	pci_unmap_sg(_hwdev, _sg, _nents, _dir)				\
739 	    dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
740 		_sg, _nents, (enum dma_data_direction)_dir)
741 #define	pci_map_page(_hwdev, _page, _offset, _size, _dir)		\
742 	    dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
743 		_offset, _size, (enum dma_data_direction)_dir)
744 #define	pci_unmap_page(_hwdev, _dma_address, _size, _dir)		\
745 	    dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
746 		_dma_address, _size, (enum dma_data_direction)_dir)
747 #define	pci_set_dma_mask(_pdev, mask)	dma_set_mask(&(_pdev)->dev, (mask))
748 #define	pci_dma_mapping_error(_pdev, _dma_addr)				\
749 	    dma_mapping_error(&(_pdev)->dev, _dma_addr)
750 #define	pci_set_consistent_dma_mask(_pdev, _mask)			\
751 	    dma_set_coherent_mask(&(_pdev)->dev, (_mask))
752 #define	DECLARE_PCI_UNMAP_ADDR(x)	DEFINE_DMA_UNMAP_ADDR(x);
753 #define	DECLARE_PCI_UNMAP_LEN(x)	DEFINE_DMA_UNMAP_LEN(x);
754 #define	pci_unmap_addr		dma_unmap_addr
755 #define	pci_unmap_addr_set	dma_unmap_addr_set
756 #define	pci_unmap_len		dma_unmap_len
757 #define	pci_unmap_len_set	dma_unmap_len_set
758 
759 typedef unsigned int __bitwise pci_channel_state_t;
760 typedef unsigned int __bitwise pci_ers_result_t;
761 
762 enum pci_channel_state {
763 	pci_channel_io_normal = 1,
764 	pci_channel_io_frozen = 2,
765 	pci_channel_io_perm_failure = 3,
766 };
767 
768 enum pci_ers_result {
769 	PCI_ERS_RESULT_NONE = 1,
770 	PCI_ERS_RESULT_CAN_RECOVER = 2,
771 	PCI_ERS_RESULT_NEED_RESET = 3,
772 	PCI_ERS_RESULT_DISCONNECT = 4,
773 	PCI_ERS_RESULT_RECOVERED = 5,
774 };
775 
776 
777 /* PCI bus error event callbacks */
778 struct pci_error_handlers {
779 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
780 	    enum pci_channel_state error);
781 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
782 	pci_ers_result_t (*link_reset)(struct pci_dev *dev);
783 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
784 	void (*resume)(struct pci_dev *dev);
785 };
786 
787 /* FreeBSD does not support SRIOV - yet */
788 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
789 {
790 	return dev;
791 }
792 
793 static inline bool pci_is_pcie(struct pci_dev *dev)
794 {
795 	return !!pci_pcie_cap(dev);
796 }
797 
798 static inline u16 pcie_flags_reg(struct pci_dev *dev)
799 {
800 	int pos;
801 	u16 reg16;
802 
803 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
804 	if (!pos)
805 		return 0;
806 
807 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
808 
809 	return reg16;
810 }
811 
812 
813 static inline int pci_pcie_type(struct pci_dev *dev)
814 {
815 	return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
816 }
817 
818 static inline int pcie_cap_version(struct pci_dev *dev)
819 {
820 	return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
821 }
822 
823 static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
824 {
825 	int type = pci_pcie_type(dev);
826 
827 	return pcie_cap_version(dev) > 1 ||
828 	       type == PCI_EXP_TYPE_ROOT_PORT ||
829 	       type == PCI_EXP_TYPE_ENDPOINT ||
830 	       type == PCI_EXP_TYPE_LEG_END;
831 }
832 
833 static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
834 {
835 		return true;
836 }
837 
838 static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
839 {
840 	int type = pci_pcie_type(dev);
841 
842 	return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
843 	    (type == PCI_EXP_TYPE_DOWNSTREAM &&
844 	    pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
845 }
846 
847 static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
848 {
849 	int type = pci_pcie_type(dev);
850 
851 	return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
852 	    type == PCI_EXP_TYPE_RC_EC;
853 }
854 
855 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
856 {
857 	if (!pci_is_pcie(dev))
858 		return false;
859 
860 	switch (pos) {
861 	case PCI_EXP_FLAGS_TYPE:
862 		return true;
863 	case PCI_EXP_DEVCAP:
864 	case PCI_EXP_DEVCTL:
865 	case PCI_EXP_DEVSTA:
866 		return pcie_cap_has_devctl(dev);
867 	case PCI_EXP_LNKCAP:
868 	case PCI_EXP_LNKCTL:
869 	case PCI_EXP_LNKSTA:
870 		return pcie_cap_has_lnkctl(dev);
871 	case PCI_EXP_SLTCAP:
872 	case PCI_EXP_SLTCTL:
873 	case PCI_EXP_SLTSTA:
874 		return pcie_cap_has_sltctl(dev);
875 	case PCI_EXP_RTCTL:
876 	case PCI_EXP_RTCAP:
877 	case PCI_EXP_RTSTA:
878 		return pcie_cap_has_rtctl(dev);
879 	case PCI_EXP_DEVCAP2:
880 	case PCI_EXP_DEVCTL2:
881 	case PCI_EXP_LNKCAP2:
882 	case PCI_EXP_LNKCTL2:
883 	case PCI_EXP_LNKSTA2:
884 		return pcie_cap_version(dev) > 1;
885 	default:
886 		return false;
887 	}
888 }
889 
890 static inline int
891 pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst)
892 {
893 	if (pos & 3)
894 		return -EINVAL;
895 
896 	if (!pcie_capability_reg_implemented(dev, pos))
897 		return -EINVAL;
898 
899 	return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst);
900 }
901 
902 static inline int
903 pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst)
904 {
905 	if (pos & 3)
906 		return -EINVAL;
907 
908 	if (!pcie_capability_reg_implemented(dev, pos))
909 		return -EINVAL;
910 
911 	return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst);
912 }
913 
914 static inline int
915 pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
916 {
917 	if (pos & 1)
918 		return -EINVAL;
919 
920 	if (!pcie_capability_reg_implemented(dev, pos))
921 		return 0;
922 
923 	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
924 }
925 
926 static inline int pcie_get_minimum_link(struct pci_dev *dev,
927     enum pci_bus_speed *speed, enum pcie_link_width *width)
928 {
929 	*speed = PCI_SPEED_UNKNOWN;
930 	*width = PCIE_LNK_WIDTH_UNKNOWN;
931 	return (0);
932 }
933 
934 static inline int
935 pci_num_vf(struct pci_dev *dev)
936 {
937 	return (0);
938 }
939 
940 static inline enum pci_bus_speed
941 pcie_get_speed_cap(struct pci_dev *dev)
942 {
943 	device_t root;
944 	uint32_t lnkcap, lnkcap2;
945 	int error, pos;
946 
947 	root = device_get_parent(dev->dev.bsddev);
948 	if (root == NULL)
949 		return (PCI_SPEED_UNKNOWN);
950 	root = device_get_parent(root);
951 	if (root == NULL)
952 		return (PCI_SPEED_UNKNOWN);
953 	root = device_get_parent(root);
954 	if (root == NULL)
955 		return (PCI_SPEED_UNKNOWN);
956 
957 	if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
958 	    pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
959 		return (PCI_SPEED_UNKNOWN);
960 
961 	if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0)
962 		return (PCI_SPEED_UNKNOWN);
963 
964 	lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
965 
966 	if (lnkcap2) {	/* PCIe r3.0-compliant */
967 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
968 			return (PCIE_SPEED_2_5GT);
969 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
970 			return (PCIE_SPEED_5_0GT);
971 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
972 			return (PCIE_SPEED_8_0GT);
973 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
974 			return (PCIE_SPEED_16_0GT);
975 	} else {	/* pre-r3.0 */
976 		lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
977 		if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
978 			return (PCIE_SPEED_2_5GT);
979 		if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
980 			return (PCIE_SPEED_5_0GT);
981 		if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
982 			return (PCIE_SPEED_8_0GT);
983 		if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
984 			return (PCIE_SPEED_16_0GT);
985 	}
986 	return (PCI_SPEED_UNKNOWN);
987 }
988 
989 static inline enum pcie_link_width
990 pcie_get_width_cap(struct pci_dev *dev)
991 {
992 	uint32_t lnkcap;
993 
994 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
995 	if (lnkcap)
996 		return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
997 
998 	return (PCIE_LNK_WIDTH_UNKNOWN);
999 }
1000 
1001 static inline int
1002 pcie_get_mps(struct pci_dev *dev)
1003 {
1004 	return (pci_get_max_payload(dev->dev.bsddev));
1005 }
1006 
1007 static inline uint32_t
1008 PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)
1009 {
1010 
1011 	switch(spd) {
1012 	case PCIE_SPEED_16_0GT:
1013 		return (16000 * 128 / 130);
1014 	case PCIE_SPEED_8_0GT:
1015 		return (8000 * 128 / 130);
1016 	case PCIE_SPEED_5_0GT:
1017 		return (5000 * 8 / 10);
1018 	case PCIE_SPEED_2_5GT:
1019 		return (2500 * 8 / 10);
1020 	default:
1021 		return (0);
1022 	}
1023 }
1024 
1025 static inline uint32_t
1026 pcie_bandwidth_available(struct pci_dev *pdev,
1027     struct pci_dev **limiting,
1028     enum pci_bus_speed *speed,
1029     enum pcie_link_width *width)
1030 {
1031 	enum pci_bus_speed nspeed = pcie_get_speed_cap(pdev);
1032 	enum pcie_link_width nwidth = pcie_get_width_cap(pdev);
1033 
1034 	if (speed)
1035 		*speed = nspeed;
1036 	if (width)
1037 		*width = nwidth;
1038 
1039 	return (nwidth * PCIE_SPEED2MBS_ENC(nspeed));
1040 }
1041 
1042 /*
1043  * The following functions can be used to attach/detach the LinuxKPI's
1044  * PCI device runtime. The pci_driver and pci_device_id pointer is
1045  * allowed to be NULL. Other pointers must be all valid.
1046  * The pci_dev structure should be zero-initialized before passed
1047  * to the linux_pci_attach_device function.
1048  */
1049 extern int linux_pci_attach_device(device_t, struct pci_driver *,
1050     const struct pci_device_id *, struct pci_dev *);
1051 extern int linux_pci_detach_device(struct pci_dev *);
1052 
1053 static inline int
1054 pci_dev_present(const struct pci_device_id *cur)
1055 {
1056 	while (cur != NULL && (cur->vendor || cur->device)) {
1057 		if (pci_find_device(cur->vendor, cur->device) != NULL) {
1058 			return (1);
1059 		}
1060 		cur++;
1061 	}
1062 	return (0);
1063 }
1064 
1065 #endif	/* _LINUX_PCI_H_ */
1066