1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10 
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16 
17 #define DRIVER_AUTHOR   "Intel Corporation"
18 #define IFCVF_DRIVER_NAME       "ifcvf"
19 
ifcvf_config_changed(int irq,void * arg)20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22 	struct ifcvf_hw *vf = arg;
23 
24 	if (vf->config_cb.callback)
25 		return vf->config_cb.callback(vf->config_cb.private);
26 
27 	return IRQ_HANDLED;
28 }
29 
ifcvf_intr_handler(int irq,void * arg)30 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
31 {
32 	struct vring_info *vring = arg;
33 
34 	if (vring->cb.callback)
35 		return vring->cb.callback(vring->cb.private);
36 
37 	return IRQ_HANDLED;
38 }
39 
ifcvf_free_irq_vectors(void * data)40 static void ifcvf_free_irq_vectors(void *data)
41 {
42 	pci_free_irq_vectors(data);
43 }
44 
ifcvf_free_irq(struct ifcvf_adapter * adapter,int queues)45 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
46 {
47 	struct pci_dev *pdev = adapter->pdev;
48 	struct ifcvf_hw *vf = &adapter->vf;
49 	int i;
50 
51 
52 	for (i = 0; i < queues; i++) {
53 		devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
54 		vf->vring[i].irq = -EINVAL;
55 	}
56 
57 	devm_free_irq(&pdev->dev, vf->config_irq, vf);
58 	ifcvf_free_irq_vectors(pdev);
59 }
60 
ifcvf_request_irq(struct ifcvf_adapter * adapter)61 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
62 {
63 	struct pci_dev *pdev = adapter->pdev;
64 	struct ifcvf_hw *vf = &adapter->vf;
65 	int vector, i, ret, irq;
66 
67 	ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
68 				    IFCVF_MAX_INTR, PCI_IRQ_MSIX);
69 	if (ret < 0) {
70 		IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
71 		return ret;
72 	}
73 
74 	snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
75 		 pci_name(pdev));
76 	vector = 0;
77 	vf->config_irq = pci_irq_vector(pdev, vector);
78 	ret = devm_request_irq(&pdev->dev, vf->config_irq,
79 			       ifcvf_config_changed, 0,
80 			       vf->config_msix_name, vf);
81 	if (ret) {
82 		IFCVF_ERR(pdev, "Failed to request config irq\n");
83 		return ret;
84 	}
85 
86 	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
87 		snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
88 			 pci_name(pdev), i);
89 		vector = i + IFCVF_MSI_QUEUE_OFF;
90 		irq = pci_irq_vector(pdev, vector);
91 		ret = devm_request_irq(&pdev->dev, irq,
92 				       ifcvf_intr_handler, 0,
93 				       vf->vring[i].msix_name,
94 				       &vf->vring[i]);
95 		if (ret) {
96 			IFCVF_ERR(pdev,
97 				  "Failed to request irq for vq %d\n", i);
98 			ifcvf_free_irq(adapter, i);
99 
100 			return ret;
101 		}
102 
103 		vf->vring[i].irq = irq;
104 	}
105 
106 	return 0;
107 }
108 
ifcvf_start_datapath(void * private)109 static int ifcvf_start_datapath(void *private)
110 {
111 	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
112 	u8 status;
113 	int ret;
114 
115 	vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
116 	ret = ifcvf_start_hw(vf);
117 	if (ret < 0) {
118 		status = ifcvf_get_status(vf);
119 		status |= VIRTIO_CONFIG_S_FAILED;
120 		ifcvf_set_status(vf, status);
121 	}
122 
123 	return ret;
124 }
125 
ifcvf_stop_datapath(void * private)126 static int ifcvf_stop_datapath(void *private)
127 {
128 	struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
129 	int i;
130 
131 	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
132 		vf->vring[i].cb.callback = NULL;
133 
134 	ifcvf_stop_hw(vf);
135 
136 	return 0;
137 }
138 
ifcvf_reset_vring(struct ifcvf_adapter * adapter)139 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
140 {
141 	struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
142 	int i;
143 
144 	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
145 		vf->vring[i].last_avail_idx = 0;
146 		vf->vring[i].desc = 0;
147 		vf->vring[i].avail = 0;
148 		vf->vring[i].used = 0;
149 		vf->vring[i].ready = 0;
150 		vf->vring[i].cb.callback = NULL;
151 		vf->vring[i].cb.private = NULL;
152 	}
153 
154 	ifcvf_reset(vf);
155 }
156 
vdpa_to_adapter(struct vdpa_device * vdpa_dev)157 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
158 {
159 	return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
160 }
161 
vdpa_to_vf(struct vdpa_device * vdpa_dev)162 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
163 {
164 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
165 
166 	return &adapter->vf;
167 }
168 
ifcvf_vdpa_get_features(struct vdpa_device * vdpa_dev)169 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
170 {
171 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
172 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
173 	struct pci_dev *pdev = adapter->pdev;
174 
175 	u64 features;
176 
177 	switch (vf->dev_type) {
178 	case VIRTIO_ID_NET:
179 		features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
180 		break;
181 	case VIRTIO_ID_BLOCK:
182 		features = ifcvf_get_features(vf);
183 		break;
184 	default:
185 		features = 0;
186 		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
187 	}
188 
189 	return features;
190 }
191 
ifcvf_vdpa_set_features(struct vdpa_device * vdpa_dev,u64 features)192 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
193 {
194 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
195 	int ret;
196 
197 	ret = ifcvf_verify_min_features(vf, features);
198 	if (ret)
199 		return ret;
200 
201 	vf->req_features = features;
202 
203 	return 0;
204 }
205 
ifcvf_vdpa_get_status(struct vdpa_device * vdpa_dev)206 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
207 {
208 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
209 
210 	return ifcvf_get_status(vf);
211 }
212 
ifcvf_vdpa_set_status(struct vdpa_device * vdpa_dev,u8 status)213 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
214 {
215 	struct ifcvf_adapter *adapter;
216 	struct ifcvf_hw *vf;
217 	u8 status_old;
218 	int ret;
219 
220 	vf  = vdpa_to_vf(vdpa_dev);
221 	adapter = dev_get_drvdata(vdpa_dev->dev.parent);
222 	status_old = ifcvf_get_status(vf);
223 
224 	if (status_old == status)
225 		return;
226 
227 	if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
228 	    !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
229 		ifcvf_stop_datapath(adapter);
230 		ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
231 	}
232 
233 	if (status == 0) {
234 		ifcvf_reset_vring(adapter);
235 		return;
236 	}
237 
238 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
239 	    !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
240 		ret = ifcvf_request_irq(adapter);
241 		if (ret) {
242 			status = ifcvf_get_status(vf);
243 			status |= VIRTIO_CONFIG_S_FAILED;
244 			ifcvf_set_status(vf, status);
245 			return;
246 		}
247 
248 		if (ifcvf_start_datapath(adapter) < 0)
249 			IFCVF_ERR(adapter->pdev,
250 				  "Failed to set ifcvf vdpa  status %u\n",
251 				  status);
252 	}
253 
254 	ifcvf_set_status(vf, status);
255 }
256 
ifcvf_vdpa_get_vq_num_max(struct vdpa_device * vdpa_dev)257 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
258 {
259 	return IFCVF_QUEUE_MAX;
260 }
261 
ifcvf_vdpa_get_vq_state(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_vq_state * state)262 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
263 				   struct vdpa_vq_state *state)
264 {
265 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
266 
267 	state->avail_index = ifcvf_get_vq_state(vf, qid);
268 	return 0;
269 }
270 
ifcvf_vdpa_set_vq_state(struct vdpa_device * vdpa_dev,u16 qid,const struct vdpa_vq_state * state)271 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
272 				   const struct vdpa_vq_state *state)
273 {
274 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
275 
276 	return ifcvf_set_vq_state(vf, qid, state->avail_index);
277 }
278 
ifcvf_vdpa_set_vq_cb(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_callback * cb)279 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
280 				 struct vdpa_callback *cb)
281 {
282 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
283 
284 	vf->vring[qid].cb = *cb;
285 }
286 
ifcvf_vdpa_set_vq_ready(struct vdpa_device * vdpa_dev,u16 qid,bool ready)287 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
288 				    u16 qid, bool ready)
289 {
290 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
291 
292 	vf->vring[qid].ready = ready;
293 }
294 
ifcvf_vdpa_get_vq_ready(struct vdpa_device * vdpa_dev,u16 qid)295 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
296 {
297 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
298 
299 	return vf->vring[qid].ready;
300 }
301 
ifcvf_vdpa_set_vq_num(struct vdpa_device * vdpa_dev,u16 qid,u32 num)302 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
303 				  u32 num)
304 {
305 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
306 
307 	vf->vring[qid].size = num;
308 }
309 
ifcvf_vdpa_set_vq_address(struct vdpa_device * vdpa_dev,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)310 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
311 				     u64 desc_area, u64 driver_area,
312 				     u64 device_area)
313 {
314 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
315 
316 	vf->vring[qid].desc = desc_area;
317 	vf->vring[qid].avail = driver_area;
318 	vf->vring[qid].used = device_area;
319 
320 	return 0;
321 }
322 
ifcvf_vdpa_kick_vq(struct vdpa_device * vdpa_dev,u16 qid)323 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
324 {
325 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
326 
327 	ifcvf_notify_queue(vf, qid);
328 }
329 
ifcvf_vdpa_get_generation(struct vdpa_device * vdpa_dev)330 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
331 {
332 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
333 
334 	return ioread8(&vf->common_cfg->config_generation);
335 }
336 
ifcvf_vdpa_get_device_id(struct vdpa_device * vdpa_dev)337 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
338 {
339 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
340 
341 	return vf->dev_type;
342 }
343 
ifcvf_vdpa_get_vendor_id(struct vdpa_device * vdpa_dev)344 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
345 {
346 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
347 	struct pci_dev *pdev = adapter->pdev;
348 
349 	return pdev->subsystem_vendor;
350 }
351 
ifcvf_vdpa_get_vq_align(struct vdpa_device * vdpa_dev)352 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
353 {
354 	return IFCVF_QUEUE_ALIGNMENT;
355 }
356 
ifcvf_vdpa_get_config_size(struct vdpa_device * vdpa_dev)357 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
358 {
359 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
360 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
361 	struct pci_dev *pdev = adapter->pdev;
362 	size_t size;
363 
364 	switch (vf->dev_type) {
365 	case VIRTIO_ID_NET:
366 		size = sizeof(struct virtio_net_config);
367 		break;
368 	case VIRTIO_ID_BLOCK:
369 		size = sizeof(struct virtio_blk_config);
370 		break;
371 	default:
372 		size = 0;
373 		IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
374 	}
375 
376 	return size;
377 }
378 
ifcvf_vdpa_get_config(struct vdpa_device * vdpa_dev,unsigned int offset,void * buf,unsigned int len)379 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
380 				  unsigned int offset,
381 				  void *buf, unsigned int len)
382 {
383 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
384 
385 	WARN_ON(offset + len > sizeof(struct virtio_net_config));
386 	ifcvf_read_net_config(vf, offset, buf, len);
387 }
388 
ifcvf_vdpa_set_config(struct vdpa_device * vdpa_dev,unsigned int offset,const void * buf,unsigned int len)389 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
390 				  unsigned int offset, const void *buf,
391 				  unsigned int len)
392 {
393 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
394 
395 	WARN_ON(offset + len > sizeof(struct virtio_net_config));
396 	ifcvf_write_net_config(vf, offset, buf, len);
397 }
398 
ifcvf_vdpa_set_config_cb(struct vdpa_device * vdpa_dev,struct vdpa_callback * cb)399 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
400 				     struct vdpa_callback *cb)
401 {
402 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
403 
404 	vf->config_cb.callback = cb->callback;
405 	vf->config_cb.private = cb->private;
406 }
407 
ifcvf_vdpa_get_vq_irq(struct vdpa_device * vdpa_dev,u16 qid)408 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
409 				 u16 qid)
410 {
411 	struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
412 
413 	return vf->vring[qid].irq;
414 }
415 
416 /*
417  * IFCVF currently does't have on-chip IOMMU, so not
418  * implemented set_map()/dma_map()/dma_unmap()
419  */
420 static const struct vdpa_config_ops ifc_vdpa_ops = {
421 	.get_features	= ifcvf_vdpa_get_features,
422 	.set_features	= ifcvf_vdpa_set_features,
423 	.get_status	= ifcvf_vdpa_get_status,
424 	.set_status	= ifcvf_vdpa_set_status,
425 	.get_vq_num_max	= ifcvf_vdpa_get_vq_num_max,
426 	.get_vq_state	= ifcvf_vdpa_get_vq_state,
427 	.set_vq_state	= ifcvf_vdpa_set_vq_state,
428 	.set_vq_cb	= ifcvf_vdpa_set_vq_cb,
429 	.set_vq_ready	= ifcvf_vdpa_set_vq_ready,
430 	.get_vq_ready	= ifcvf_vdpa_get_vq_ready,
431 	.set_vq_num	= ifcvf_vdpa_set_vq_num,
432 	.set_vq_address	= ifcvf_vdpa_set_vq_address,
433 	.get_vq_irq	= ifcvf_vdpa_get_vq_irq,
434 	.kick_vq	= ifcvf_vdpa_kick_vq,
435 	.get_generation	= ifcvf_vdpa_get_generation,
436 	.get_device_id	= ifcvf_vdpa_get_device_id,
437 	.get_vendor_id	= ifcvf_vdpa_get_vendor_id,
438 	.get_vq_align	= ifcvf_vdpa_get_vq_align,
439 	.get_config_size	= ifcvf_vdpa_get_config_size,
440 	.get_config	= ifcvf_vdpa_get_config,
441 	.set_config	= ifcvf_vdpa_set_config,
442 	.set_config_cb  = ifcvf_vdpa_set_config_cb,
443 };
444 
ifcvf_probe(struct pci_dev * pdev,const struct pci_device_id * id)445 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
446 {
447 	struct device *dev = &pdev->dev;
448 	struct ifcvf_adapter *adapter;
449 	struct ifcvf_hw *vf;
450 	int ret, i;
451 
452 	ret = pcim_enable_device(pdev);
453 	if (ret) {
454 		IFCVF_ERR(pdev, "Failed to enable device\n");
455 		return ret;
456 	}
457 
458 	ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
459 				 IFCVF_DRIVER_NAME);
460 	if (ret) {
461 		IFCVF_ERR(pdev, "Failed to request MMIO region\n");
462 		return ret;
463 	}
464 
465 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
466 	if (ret) {
467 		IFCVF_ERR(pdev, "No usable DMA configuration\n");
468 		return ret;
469 	}
470 
471 	ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
472 	if (ret) {
473 		IFCVF_ERR(pdev,
474 			  "Failed for adding devres for freeing irq vectors\n");
475 		return ret;
476 	}
477 
478 	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
479 				    dev, &ifc_vdpa_ops, NULL);
480 	if (adapter == NULL) {
481 		IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
482 		return -ENOMEM;
483 	}
484 
485 	pci_set_master(pdev);
486 	pci_set_drvdata(pdev, adapter);
487 
488 	vf = &adapter->vf;
489 
490 	/* This drirver drives both modern virtio devices and transitional
491 	 * devices in modern mode.
492 	 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
493 	 * so legacy devices and transitional devices in legacy
494 	 * mode will not work for vDPA, this driver will not
495 	 * drive devices with legacy interface.
496 	 */
497 	if (pdev->device < 0x1040)
498 		vf->dev_type =  pdev->subsystem_device;
499 	else
500 		vf->dev_type =  pdev->device - 0x1040;
501 
502 	vf->base = pcim_iomap_table(pdev);
503 
504 	adapter->pdev = pdev;
505 	adapter->vdpa.dma_dev = &pdev->dev;
506 
507 	ret = ifcvf_init_hw(vf, pdev);
508 	if (ret) {
509 		IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
510 		goto err;
511 	}
512 
513 	for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
514 		vf->vring[i].irq = -EINVAL;
515 
516 	vf->hw_features = ifcvf_get_hw_features(vf);
517 
518 	ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
519 	if (ret) {
520 		IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
521 		goto err;
522 	}
523 
524 	return 0;
525 
526 err:
527 	put_device(&adapter->vdpa.dev);
528 	return ret;
529 }
530 
ifcvf_remove(struct pci_dev * pdev)531 static void ifcvf_remove(struct pci_dev *pdev)
532 {
533 	struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
534 
535 	vdpa_unregister_device(&adapter->vdpa);
536 }
537 
538 static struct pci_device_id ifcvf_pci_ids[] = {
539 	{ PCI_DEVICE_SUB(N3000_VENDOR_ID,
540 			 N3000_DEVICE_ID,
541 			 N3000_SUBSYS_VENDOR_ID,
542 			 N3000_SUBSYS_DEVICE_ID) },
543 	{ PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID,
544 			 C5000X_PL_DEVICE_ID,
545 			 C5000X_PL_SUBSYS_VENDOR_ID,
546 			 C5000X_PL_SUBSYS_DEVICE_ID) },
547 	{ PCI_DEVICE_SUB(C5000X_PL_BLK_VENDOR_ID,
548 			 C5000X_PL_BLK_DEVICE_ID,
549 			 C5000X_PL_BLK_SUBSYS_VENDOR_ID,
550 			 C5000X_PL_BLK_SUBSYS_DEVICE_ID) },
551 
552 	{ 0 },
553 };
554 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
555 
556 static struct pci_driver ifcvf_driver = {
557 	.name     = IFCVF_DRIVER_NAME,
558 	.id_table = ifcvf_pci_ids,
559 	.probe    = ifcvf_probe,
560 	.remove   = ifcvf_remove,
561 };
562 
563 module_pci_driver(ifcvf_driver);
564 
565 MODULE_LICENSE("GPL v2");
566