1 /*
2 * (c) Copyright 2010 Stefan Hajnoczi <stefanha@gmail.com>
3 *
4 * based on the Etherboot virtio-net driver
5 *
6 * (c) Copyright 2008 Bull S.A.S.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 * some parts from Linux Virtio PCI driver
11 *
12 * Copyright IBM Corp. 2007
13 * Authors: Anthony Liguori <aliguori@us.ibm.com>
14 *
15 * some parts from Linux Virtio Ring
16 *
17 * Copyright Rusty Russell IBM Corporation 2007
18 *
19 * This work is licensed under the terms of the GNU GPL, version 2 or later.
20 * See the COPYING file in the top-level directory.
21 */
22
23 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
24
25 #include <errno.h>
26 #include <stdlib.h>
27 #include <unistd.h>
28 #include <ipxe/list.h>
29 #include <ipxe/iobuf.h>
30 #include <ipxe/netdevice.h>
31 #include <ipxe/pci.h>
32 #include <ipxe/if_ether.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/virtio-pci.h>
35 #include <ipxe/virtio-ring.h>
36 #include "virtio-net.h"
37
38 /*
39 * Virtio network device driver
40 *
41 * Specification:
42 * http://ozlabs.org/~rusty/virtio-spec/
43 *
44 * The virtio network device is supported by Linux virtualization software
45 * including QEMU/KVM and lguest. This driver supports the virtio over PCI
46 * transport; virtual machines have one virtio-net PCI adapter per NIC.
47 *
48 * Virtio-net is different from hardware NICs because virtio devices
49 * communicate with the hypervisor via virtqueues, not traditional descriptor
50 * rings. Virtqueues are unordered queues, they support add_buf() and
51 * get_buf() operations. To transmit a packet, the driver has to add the
52 * packet buffer onto the virtqueue. To receive a packet, the driver must
53 * first add an empty buffer to the virtqueue and then get the filled packet
54 * buffer on completion.
55 *
56 * Virtqueues are an abstraction that is commonly implemented using the vring
57 * descriptor ring layout. The vring is the actual shared memory structure
58 * that allows the virtual machine to communicate buffers with the hypervisor.
59 * Because the vring layout is optimized for flexibility and performance rather
60 * than space, it is heavy-weight and allocated like traditional descriptor
61 * rings in the open() function of the driver and not in probe().
62 *
63 * There is no true interrupt enable/disable. Virtqueues have callback
64 * enable/disable flags but these are only hints. The hypervisor may still
65 * raise an interrupt. Nevertheless, this driver disables callbacks in the
66 * hopes of avoiding interrupts.
67 */
68
69 /* Driver types are declared here so virtio-net.h can be easily synced with its
70 * Linux source.
71 */
72
73 /* Virtqueue indices */
74 enum {
75 RX_INDEX = 0,
76 TX_INDEX,
77 QUEUE_NB
78 };
79
80 /** Max number of pending rx packets */
81 #define NUM_RX_BUF 8
82
83 struct virtnet_nic {
84 /** Base pio register address */
85 unsigned long ioaddr;
86
87 /** 0 for legacy, 1 for virtio 1.0 */
88 int virtio_version;
89
90 /** Virtio 1.0 device data */
91 struct virtio_pci_modern_device vdev;
92
93 /** RX/TX virtqueues */
94 struct vring_virtqueue *virtqueue;
95
96 /** RX packets handed to the NIC waiting to be filled in */
97 struct list_head rx_iobufs;
98
99 /** Pending rx packet count */
100 unsigned int rx_num_iobufs;
101
102 /** Virtio net dummy packet headers */
103 struct virtio_net_hdr_modern empty_header[QUEUE_NB];
104 };
105
106 /** Add an iobuf to a virtqueue
107 *
108 * @v netdev Network device
109 * @v vq_idx Virtqueue index (RX_INDEX or TX_INDEX)
110 * @v iobuf I/O buffer
111 *
112 * The virtqueue is kicked after the iobuf has been added.
113 */
virtnet_enqueue_iob(struct net_device * netdev,int vq_idx,struct io_buffer * iobuf)114 static void virtnet_enqueue_iob ( struct net_device *netdev,
115 int vq_idx, struct io_buffer *iobuf ) {
116 struct virtnet_nic *virtnet = netdev->priv;
117 struct vring_virtqueue *vq = &virtnet->virtqueue[vq_idx];
118 struct virtio_net_hdr_modern *header = &virtnet->empty_header[vq_idx];
119 unsigned int out = ( vq_idx == TX_INDEX ) ? 2 : 0;
120 unsigned int in = ( vq_idx == TX_INDEX ) ? 0 : 2;
121 size_t header_len = ( virtnet->virtio_version ?
122 sizeof ( *header ) : sizeof ( header->legacy ) );
123 struct vring_list list[] = {
124 {
125 /* Share a single zeroed virtio net header between all
126 * packets in a ring. This works because this driver
127 * does not use any advanced features so none of the
128 * header fields get used.
129 *
130 * Some host implementations (notably Google Compute
131 * Platform) are known to unconditionally write back
132 * to header->flags for received packets. Work around
133 * this by using separate RX and TX headers.
134 */
135 .addr = ( char* ) header,
136 .length = header_len,
137 },
138 {
139 .addr = ( char* ) iobuf->data,
140 .length = iob_len ( iobuf ),
141 },
142 };
143
144 DBGC2 ( virtnet, "VIRTIO-NET %p enqueuing iobuf %p on vq %d\n",
145 virtnet, iobuf, vq_idx );
146
147 vring_add_buf ( vq, list, out, in, iobuf, 0 );
148 vring_kick ( virtnet->virtio_version ? &virtnet->vdev : NULL,
149 virtnet->ioaddr, vq, 1 );
150 }
151
152 /** Try to keep rx virtqueue filled with iobufs
153 *
154 * @v netdev Network device
155 */
virtnet_refill_rx_virtqueue(struct net_device * netdev)156 static void virtnet_refill_rx_virtqueue ( struct net_device *netdev ) {
157 struct virtnet_nic *virtnet = netdev->priv;
158 size_t len = ( netdev->max_pkt_len + 4 /* VLAN */ );
159
160 while ( virtnet->rx_num_iobufs < NUM_RX_BUF ) {
161 struct io_buffer *iobuf;
162
163 /* Try to allocate a buffer, stop for now if out of memory */
164 iobuf = alloc_iob ( len );
165 if ( ! iobuf )
166 break;
167
168 /* Keep track of iobuf so close() can free it */
169 list_add ( &iobuf->list, &virtnet->rx_iobufs );
170
171 /* Mark packet length until we know the actual size */
172 iob_put ( iobuf, len );
173
174 virtnet_enqueue_iob ( netdev, RX_INDEX, iobuf );
175 virtnet->rx_num_iobufs++;
176 }
177 }
178
179 /** Helper to free all virtqueue memory
180 *
181 * @v netdev Network device
182 */
virtnet_free_virtqueues(struct net_device * netdev)183 static void virtnet_free_virtqueues ( struct net_device *netdev ) {
184 struct virtnet_nic *virtnet = netdev->priv;
185 int i;
186
187 for ( i = 0; i < QUEUE_NB; i++ ) {
188 virtio_pci_unmap_capability ( &virtnet->virtqueue[i].notification );
189 vp_free_vq ( &virtnet->virtqueue[i] );
190 }
191
192 free ( virtnet->virtqueue );
193 virtnet->virtqueue = NULL;
194 }
195
196 /** Open network device, legacy virtio 0.9.5
197 *
198 * @v netdev Network device
199 * @ret rc Return status code
200 */
virtnet_open_legacy(struct net_device * netdev)201 static int virtnet_open_legacy ( struct net_device *netdev ) {
202 struct virtnet_nic *virtnet = netdev->priv;
203 unsigned long ioaddr = virtnet->ioaddr;
204 u32 features;
205 int i;
206
207 /* Reset for sanity */
208 vp_reset ( ioaddr );
209
210 /* Allocate virtqueues */
211 virtnet->virtqueue = zalloc ( QUEUE_NB *
212 sizeof ( *virtnet->virtqueue ) );
213 if ( ! virtnet->virtqueue )
214 return -ENOMEM;
215
216 /* Initialize rx/tx virtqueues */
217 for ( i = 0; i < QUEUE_NB; i++ ) {
218 if ( vp_find_vq ( ioaddr, i, &virtnet->virtqueue[i] ) == -1 ) {
219 DBGC ( virtnet, "VIRTIO-NET %p cannot register queue %d\n",
220 virtnet, i );
221 virtnet_free_virtqueues ( netdev );
222 return -ENOENT;
223 }
224 }
225
226 /* Initialize rx packets */
227 INIT_LIST_HEAD ( &virtnet->rx_iobufs );
228 virtnet->rx_num_iobufs = 0;
229 virtnet_refill_rx_virtqueue ( netdev );
230
231 /* Disable interrupts before starting */
232 netdev_irq ( netdev, 0 );
233
234 /* Driver is ready */
235 features = vp_get_features ( ioaddr );
236 vp_set_features ( ioaddr, features & ( ( 1 << VIRTIO_NET_F_MAC ) |
237 ( 1 << VIRTIO_NET_F_MTU ) ) );
238 vp_set_status ( ioaddr, VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK );
239 return 0;
240 }
241
242 /** Open network device, modern virtio 1.0
243 *
244 * @v netdev Network device
245 * @ret rc Return status code
246 */
virtnet_open_modern(struct net_device * netdev)247 static int virtnet_open_modern ( struct net_device *netdev ) {
248 struct virtnet_nic *virtnet = netdev->priv;
249 u64 features;
250 u8 status;
251
252 /* Negotiate features */
253 features = vpm_get_features ( &virtnet->vdev );
254 if ( ! ( features & VIRTIO_F_VERSION_1 ) ) {
255 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
256 return -EINVAL;
257 }
258 vpm_set_features ( &virtnet->vdev, features & (
259 ( 1ULL << VIRTIO_NET_F_MAC ) |
260 ( 1ULL << VIRTIO_NET_F_MTU ) |
261 ( 1ULL << VIRTIO_F_VERSION_1 ) |
262 ( 1ULL << VIRTIO_F_ANY_LAYOUT ) |
263 ( 1ULL << VIRTIO_F_IOMMU_PLATFORM ) ) );
264 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FEATURES_OK );
265
266 status = vpm_get_status ( &virtnet->vdev );
267 if ( ! ( status & VIRTIO_CONFIG_S_FEATURES_OK ) ) {
268 DBGC ( virtnet, "VIRTIO-NET %p device didn't accept features\n",
269 virtnet );
270 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
271 return -EINVAL;
272 }
273
274 /* Allocate virtqueues */
275 virtnet->virtqueue = zalloc ( QUEUE_NB *
276 sizeof ( *virtnet->virtqueue ) );
277 if ( ! virtnet->virtqueue ) {
278 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
279 return -ENOMEM;
280 }
281
282 /* Initialize rx/tx virtqueues */
283 if ( vpm_find_vqs ( &virtnet->vdev, QUEUE_NB, virtnet->virtqueue ) ) {
284 DBGC ( virtnet, "VIRTIO-NET %p cannot register queues\n",
285 virtnet );
286 virtnet_free_virtqueues ( netdev );
287 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_FAILED );
288 return -ENOENT;
289 }
290
291 /* Disable interrupts before starting */
292 netdev_irq ( netdev, 0 );
293
294 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_DRIVER_OK );
295
296 /* Initialize rx packets */
297 INIT_LIST_HEAD ( &virtnet->rx_iobufs );
298 virtnet->rx_num_iobufs = 0;
299 virtnet_refill_rx_virtqueue ( netdev );
300 return 0;
301 }
302
303 /** Open network device
304 *
305 * @v netdev Network device
306 * @ret rc Return status code
307 */
virtnet_open(struct net_device * netdev)308 static int virtnet_open ( struct net_device *netdev ) {
309 struct virtnet_nic *virtnet = netdev->priv;
310
311 if ( virtnet->virtio_version ) {
312 return virtnet_open_modern ( netdev );
313 } else {
314 return virtnet_open_legacy ( netdev );
315 }
316 }
317
318 /** Close network device
319 *
320 * @v netdev Network device
321 */
virtnet_close(struct net_device * netdev)322 static void virtnet_close ( struct net_device *netdev ) {
323 struct virtnet_nic *virtnet = netdev->priv;
324 struct io_buffer *iobuf;
325 struct io_buffer *next_iobuf;
326
327 if ( virtnet->virtio_version ) {
328 vpm_reset ( &virtnet->vdev );
329 } else {
330 vp_reset ( virtnet->ioaddr );
331 }
332
333 /* Virtqueues can be freed now that NIC is reset */
334 virtnet_free_virtqueues ( netdev );
335
336 /* Free rx iobufs */
337 list_for_each_entry_safe ( iobuf, next_iobuf, &virtnet->rx_iobufs, list ) {
338 free_iob ( iobuf );
339 }
340 INIT_LIST_HEAD ( &virtnet->rx_iobufs );
341 virtnet->rx_num_iobufs = 0;
342 }
343
344 /** Transmit packet
345 *
346 * @v netdev Network device
347 * @v iobuf I/O buffer
348 * @ret rc Return status code
349 */
virtnet_transmit(struct net_device * netdev,struct io_buffer * iobuf)350 static int virtnet_transmit ( struct net_device *netdev,
351 struct io_buffer *iobuf ) {
352 virtnet_enqueue_iob ( netdev, TX_INDEX, iobuf );
353 return 0;
354 }
355
356 /** Complete packet transmission
357 *
358 * @v netdev Network device
359 */
virtnet_process_tx_packets(struct net_device * netdev)360 static void virtnet_process_tx_packets ( struct net_device *netdev ) {
361 struct virtnet_nic *virtnet = netdev->priv;
362 struct vring_virtqueue *tx_vq = &virtnet->virtqueue[TX_INDEX];
363
364 while ( vring_more_used ( tx_vq ) ) {
365 struct io_buffer *iobuf = vring_get_buf ( tx_vq, NULL );
366
367 DBGC2 ( virtnet, "VIRTIO-NET %p tx complete iobuf %p\n",
368 virtnet, iobuf );
369
370 netdev_tx_complete ( netdev, iobuf );
371 }
372 }
373
374 /** Complete packet reception
375 *
376 * @v netdev Network device
377 */
virtnet_process_rx_packets(struct net_device * netdev)378 static void virtnet_process_rx_packets ( struct net_device *netdev ) {
379 struct virtnet_nic *virtnet = netdev->priv;
380 struct vring_virtqueue *rx_vq = &virtnet->virtqueue[RX_INDEX];
381
382 while ( vring_more_used ( rx_vq ) ) {
383 unsigned int len;
384 struct io_buffer *iobuf = vring_get_buf ( rx_vq, &len );
385
386 /* Release ownership of iobuf */
387 list_del ( &iobuf->list );
388 virtnet->rx_num_iobufs--;
389
390 /* Update iobuf length */
391 iob_unput ( iobuf, iob_len ( iobuf ) );
392 iob_put ( iobuf, len - sizeof ( struct virtio_net_hdr ) );
393
394 DBGC2 ( virtnet, "VIRTIO-NET %p rx complete iobuf %p len %zd\n",
395 virtnet, iobuf, iob_len ( iobuf ) );
396
397 /* Pass completed packet to the network stack */
398 netdev_rx ( netdev, iobuf );
399 }
400
401 virtnet_refill_rx_virtqueue ( netdev );
402 }
403
404 /** Poll for completed and received packets
405 *
406 * @v netdev Network device
407 */
virtnet_poll(struct net_device * netdev)408 static void virtnet_poll ( struct net_device *netdev ) {
409 struct virtnet_nic *virtnet = netdev->priv;
410
411 /* Acknowledge interrupt. This is necessary for UNDI operation and
412 * interrupts that are raised despite VRING_AVAIL_F_NO_INTERRUPT being
413 * set (that flag is just a hint and the hypervisor does not have to
414 * honor it).
415 */
416 if ( virtnet->virtio_version ) {
417 vpm_get_isr ( &virtnet->vdev );
418 } else {
419 vp_get_isr ( virtnet->ioaddr );
420 }
421
422 virtnet_process_tx_packets ( netdev );
423 virtnet_process_rx_packets ( netdev );
424 }
425
426 /** Enable or disable interrupts
427 *
428 * @v netdev Network device
429 * @v enable Interrupts should be enabled
430 */
virtnet_irq(struct net_device * netdev,int enable)431 static void virtnet_irq ( struct net_device *netdev, int enable ) {
432 struct virtnet_nic *virtnet = netdev->priv;
433 int i;
434
435 for ( i = 0; i < QUEUE_NB; i++ ) {
436 if ( enable )
437 vring_enable_cb ( &virtnet->virtqueue[i] );
438 else
439 vring_disable_cb ( &virtnet->virtqueue[i] );
440 }
441 }
442
443 /** virtio-net device operations */
444 static struct net_device_operations virtnet_operations = {
445 .open = virtnet_open,
446 .close = virtnet_close,
447 .transmit = virtnet_transmit,
448 .poll = virtnet_poll,
449 .irq = virtnet_irq,
450 };
451
452 /**
453 * Probe PCI device, legacy virtio 0.9.5
454 *
455 * @v pci PCI device
456 * @ret rc Return status code
457 */
virtnet_probe_legacy(struct pci_device * pci)458 static int virtnet_probe_legacy ( struct pci_device *pci ) {
459 unsigned long ioaddr = pci->ioaddr;
460 struct net_device *netdev;
461 struct virtnet_nic *virtnet;
462 u32 features;
463 u16 mtu;
464 int rc;
465
466 /* Allocate and hook up net device */
467 netdev = alloc_etherdev ( sizeof ( *virtnet ) );
468 if ( ! netdev )
469 return -ENOMEM;
470 netdev_init ( netdev, &virtnet_operations );
471 virtnet = netdev->priv;
472 virtnet->ioaddr = ioaddr;
473 pci_set_drvdata ( pci, netdev );
474 netdev->dev = &pci->dev;
475
476 DBGC ( virtnet, "VIRTIO-NET %p busaddr=%s ioaddr=%#lx irq=%d\n",
477 virtnet, pci->dev.name, ioaddr, pci->irq );
478
479 /* Enable PCI bus master and reset NIC */
480 adjust_pci_device ( pci );
481 vp_reset ( ioaddr );
482
483 /* Load MAC address and MTU */
484 features = vp_get_features ( ioaddr );
485 if ( features & ( 1 << VIRTIO_NET_F_MAC ) ) {
486 vp_get ( ioaddr, offsetof ( struct virtio_net_config, mac ),
487 netdev->hw_addr, ETH_ALEN );
488 DBGC ( virtnet, "VIRTIO-NET %p mac=%s\n", virtnet,
489 eth_ntoa ( netdev->hw_addr ) );
490 }
491 if ( features & ( 1ULL << VIRTIO_NET_F_MTU ) ) {
492 vp_get ( ioaddr, offsetof ( struct virtio_net_config, mtu ),
493 &mtu, sizeof ( mtu ) );
494 DBGC ( virtnet, "VIRTIO-NET %p mtu=%d\n", virtnet, mtu );
495 netdev->max_pkt_len = ( mtu + ETH_HLEN );
496 netdev->mtu = mtu;
497 }
498
499 /* Register network device */
500 if ( ( rc = register_netdev ( netdev ) ) != 0 )
501 goto err_register_netdev;
502
503 /* Mark link as up, control virtqueue is not used */
504 netdev_link_up ( netdev );
505
506 return 0;
507
508 unregister_netdev ( netdev );
509 err_register_netdev:
510 vp_reset ( ioaddr );
511 netdev_nullify ( netdev );
512 netdev_put ( netdev );
513 return rc;
514 }
515
516 /**
517 * Probe PCI device, modern virtio 1.0
518 *
519 * @v pci PCI device
520 * @v found_dev Set to non-zero if modern device was found (probe may still fail)
521 * @ret rc Return status code
522 */
virtnet_probe_modern(struct pci_device * pci,int * found_dev)523 static int virtnet_probe_modern ( struct pci_device *pci, int *found_dev ) {
524 struct net_device *netdev;
525 struct virtnet_nic *virtnet;
526 u64 features;
527 u16 mtu;
528 int rc, common, isr, notify, config, device;
529
530 common = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_COMMON_CFG );
531 if ( ! common ) {
532 DBG ( "Common virtio capability not found!\n" );
533 return -ENODEV;
534 }
535 *found_dev = 1;
536
537 isr = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_ISR_CFG );
538 notify = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_NOTIFY_CFG );
539 config = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_PCI_CFG );
540 if ( ! isr || ! notify || ! config ) {
541 DBG ( "Missing virtio capabilities %i/%i/%i/%i\n",
542 common, isr, notify, config );
543 return -EINVAL;
544 }
545 device = virtio_pci_find_capability ( pci, VIRTIO_PCI_CAP_DEVICE_CFG );
546
547 /* Allocate and hook up net device */
548 netdev = alloc_etherdev ( sizeof ( *virtnet ) );
549 if ( ! netdev )
550 return -ENOMEM;
551 netdev_init ( netdev, &virtnet_operations );
552 virtnet = netdev->priv;
553
554 pci_set_drvdata ( pci, netdev );
555 netdev->dev = &pci->dev;
556
557 DBGC ( virtnet, "VIRTIO-NET modern %p busaddr=%s irq=%d\n",
558 virtnet, pci->dev.name, pci->irq );
559
560 virtnet->vdev.pci = pci;
561 rc = virtio_pci_map_capability ( pci, common,
562 sizeof ( struct virtio_pci_common_cfg ), 4,
563 0, sizeof ( struct virtio_pci_common_cfg ),
564 &virtnet->vdev.common );
565 if ( rc )
566 goto err_map_common;
567
568 rc = virtio_pci_map_capability ( pci, isr, sizeof ( u8 ), 1,
569 0, 1,
570 &virtnet->vdev.isr );
571 if ( rc )
572 goto err_map_isr;
573
574 virtnet->vdev.notify_cap_pos = notify;
575 virtnet->vdev.cfg_cap_pos = config;
576
577 /* Map the device capability */
578 if ( device ) {
579 rc = virtio_pci_map_capability ( pci, device,
580 0, 4, 0, sizeof ( struct virtio_net_config ),
581 &virtnet->vdev.device );
582 if ( rc )
583 goto err_map_device;
584 }
585
586 /* Enable the PCI device */
587 adjust_pci_device ( pci );
588
589 /* Reset the device and set initial status bits */
590 vpm_reset ( &virtnet->vdev );
591 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE );
592 vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_DRIVER );
593
594 /* Load MAC address and MTU */
595 if ( device ) {
596 features = vpm_get_features ( &virtnet->vdev );
597 if ( features & ( 1ULL << VIRTIO_NET_F_MAC ) ) {
598 vpm_get ( &virtnet->vdev,
599 offsetof ( struct virtio_net_config, mac ),
600 netdev->hw_addr, ETH_ALEN );
601 DBGC ( virtnet, "VIRTIO-NET %p mac=%s\n", virtnet,
602 eth_ntoa ( netdev->hw_addr ) );
603 }
604 if ( features & ( 1ULL << VIRTIO_NET_F_MTU ) ) {
605 vpm_get ( &virtnet->vdev,
606 offsetof ( struct virtio_net_config, mtu ),
607 &mtu, sizeof ( mtu ) );
608 DBGC ( virtnet, "VIRTIO-NET %p mtu=%d\n", virtnet,
609 mtu );
610 netdev->max_pkt_len = ( mtu + ETH_HLEN );
611 }
612 }
613
614 /* We need a valid MAC address */
615 if ( ! is_valid_ether_addr ( netdev->hw_addr ) ) {
616 rc = -EADDRNOTAVAIL;
617 goto err_mac_address;
618 }
619
620 /* Register network device */
621 if ( ( rc = register_netdev ( netdev ) ) != 0 )
622 goto err_register_netdev;
623
624 /* Mark link as up, control virtqueue is not used */
625 netdev_link_up ( netdev );
626
627 virtnet->virtio_version = 1;
628 return 0;
629
630 unregister_netdev ( netdev );
631 err_register_netdev:
632 err_mac_address:
633 vpm_reset ( &virtnet->vdev );
634 netdev_nullify ( netdev );
635 netdev_put ( netdev );
636
637 virtio_pci_unmap_capability ( &virtnet->vdev.device );
638 err_map_device:
639 virtio_pci_unmap_capability ( &virtnet->vdev.isr );
640 err_map_isr:
641 virtio_pci_unmap_capability ( &virtnet->vdev.common );
642 err_map_common:
643 return rc;
644 }
645
646 /**
647 * Probe PCI device
648 *
649 * @v pci PCI device
650 * @ret rc Return status code
651 */
virtnet_probe(struct pci_device * pci)652 static int virtnet_probe ( struct pci_device *pci ) {
653 int found_modern = 0;
654 int rc = virtnet_probe_modern ( pci, &found_modern );
655 if ( ! found_modern && pci->device < 0x1040 ) {
656 /* fall back to the legacy probe */
657 rc = virtnet_probe_legacy ( pci );
658 }
659 return rc;
660 }
661
662 /**
663 * Remove device
664 *
665 * @v pci PCI device
666 */
virtnet_remove(struct pci_device * pci)667 static void virtnet_remove ( struct pci_device *pci ) {
668 struct net_device *netdev = pci_get_drvdata ( pci );
669 struct virtnet_nic *virtnet = netdev->priv;
670
671 virtio_pci_unmap_capability ( &virtnet->vdev.device );
672 virtio_pci_unmap_capability ( &virtnet->vdev.isr );
673 virtio_pci_unmap_capability ( &virtnet->vdev.common );
674
675 unregister_netdev ( netdev );
676 netdev_nullify ( netdev );
677 netdev_put ( netdev );
678 }
679
680 static struct pci_device_id virtnet_nics[] = {
681 PCI_ROM(0x1af4, 0x1000, "virtio-net", "Virtio Network Interface", 0),
682 PCI_ROM(0x1af4, 0x1041, "virtio-net", "Virtio Network Interface 1.0", 0),
683 };
684
685 struct pci_driver virtnet_driver __pci_driver = {
686 .ids = virtnet_nics,
687 .id_count = ( sizeof ( virtnet_nics ) / sizeof ( virtnet_nics[0] ) ),
688 .probe = virtnet_probe,
689 .remove = virtnet_remove,
690 };
691