1 /******************************************************************************
2  * Copyright (c) 2011 IBM Corporation
3  * All rights reserved.
4  * This program and the accompanying materials
5  * are made available under the terms of the BSD License
6  * which accompanies this distribution, and is available at
7  * http://www.opensource.org/licenses/bsd-license.php
8  *
9  * Contributors:
10  *     IBM Corporation - initial implementation
11  *****************************************************************************/
12 
13 /*
14  * This is the implementation for the Virtio network device driver. Details
15  * about the virtio-net interface can be found in Rusty Russel's "Virtio PCI
16  * Card Specification v0.8.10", appendix C, which can be found here:
17  *
18  *        http://ozlabs.org/~rusty/virtio-spec/virtio-spec.pdf
19  */
20 
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <helpers.h>
25 #include <cache.h>
26 #include <byteorder.h>
27 #include "virtio-net.h"
28 #include "virtio-internal.h"
29 
30 #undef DEBUG
31 //#define DEBUG
32 #ifdef DEBUG
33 # define dprintf(fmt...) do { printf(fmt); } while(0)
34 #else
35 # define dprintf(fmt...)
36 #endif
37 
38 #define sync()  asm volatile (" sync \n" ::: "memory")
39 
40 #define DRIVER_FEATURE_SUPPORT  (VIRTIO_NET_F_MAC | VIRTIO_F_VERSION_1)
41 
42 /* See Virtio Spec, appendix C, "Device Operation" */
43 struct virtio_net_hdr {
44 	uint8_t  flags;
45 	uint8_t  gso_type;
46 	uint16_t  hdr_len;
47 	uint16_t  gso_size;
48 	uint16_t  csum_start;
49 	uint16_t  csum_offset;
50 	// uint16_t  num_buffers;	/* Only if VIRTIO_NET_F_MRG_RXBUF */
51 };
52 
53 static unsigned int net_hdr_size;
54 
55 struct virtio_net_hdr_v1 {
56 	uint8_t  flags;
57 	uint8_t  gso_type;
58 	le16  hdr_len;
59 	le16  gso_size;
60 	le16  csum_start;
61 	le16  csum_offset;
62 	le16  num_buffers;
63 };
64 
65 static uint16_t last_rx_idx;	/* Last index in RX "used" ring */
66 
67 /**
68  * Module init for virtio via PCI.
69  * Checks whether we're reponsible for the given device and set up
70  * the virtqueue configuration.
71  */
virtionet_init_pci(struct virtio_net * vnet,struct virtio_device * dev)72 static int virtionet_init_pci(struct virtio_net *vnet, struct virtio_device *dev)
73 {
74 	struct virtio_device *vdev = &vnet->vdev;
75 
76 	dprintf("virtionet: doing virtionet_init_pci!\n");
77 
78 	if (!dev)
79 		return -1;
80 
81 	/* make a copy of the device structure */
82 	memcpy(vdev, dev, sizeof(struct virtio_device));
83 
84 	/* Reset device */
85 	virtio_reset_device(vdev);
86 
87 	/* The queue information can be retrieved via the virtio header that
88 	 * can be found in the I/O BAR. First queue is the receive queue,
89 	 * second the transmit queue, and the forth is the control queue for
90 	 * networking options.
91 	 * We are only interested in the receive and transmit queue here. */
92 	if (virtio_queue_init_vq(vdev, &vnet->vq_rx, VQ_RX) ||
93 	    virtio_queue_init_vq(vdev, &vnet->vq_tx, VQ_TX)) {
94 		virtio_set_status(vdev, VIRTIO_STAT_ACKNOWLEDGE|VIRTIO_STAT_DRIVER
95 				  |VIRTIO_STAT_FAILED);
96 		return -1;
97 	}
98 
99 	/* Acknowledge device. */
100 	virtio_set_status(vdev, VIRTIO_STAT_ACKNOWLEDGE);
101 
102 	return 0;
103 }
104 
105 /**
106  * Initialize the virtio-net device.
107  * See the Virtio Spec, chapter 2.2.1 and Appendix C "Device Initialization"
108  * for details.
109  */
virtionet_init(struct virtio_net * vnet)110 static int virtionet_init(struct virtio_net *vnet)
111 {
112 	int i;
113 	int status = VIRTIO_STAT_ACKNOWLEDGE | VIRTIO_STAT_DRIVER;
114 	struct virtio_device *vdev = &vnet->vdev;
115 	net_driver_t *driver = &vnet->driver;
116 	struct vqs *vq_tx = &vnet->vq_tx;
117 	struct vqs *vq_rx = &vnet->vq_rx;
118 
119 	dprintf("virtionet_init(%02x:%02x:%02x:%02x:%02x:%02x)\n",
120 		driver->mac_addr[0], driver->mac_addr[1],
121 		driver->mac_addr[2], driver->mac_addr[3],
122 		driver->mac_addr[4], driver->mac_addr[5]);
123 
124 	if (driver->running != 0)
125 		return 0;
126 
127 	/* Tell HV that we know how to drive the device. */
128 	virtio_set_status(vdev, status);
129 
130 	/* Device specific setup */
131 	if (vdev->is_modern) {
132 		if (virtio_negotiate_guest_features(vdev, DRIVER_FEATURE_SUPPORT))
133 			goto dev_error;
134 		net_hdr_size = sizeof(struct virtio_net_hdr_v1);
135 		virtio_get_status(vdev, &status);
136 	} else {
137 		net_hdr_size = sizeof(struct virtio_net_hdr);
138 		virtio_set_guest_features(vdev,  0);
139 	}
140 
141 	/* Allocate memory for one transmit an multiple receive buffers */
142 	vq_rx->buf_mem = SLOF_alloc_mem((BUFFER_ENTRY_SIZE+net_hdr_size)
143 				   * RX_QUEUE_SIZE);
144 	if (!vq_rx->buf_mem) {
145 		printf("virtionet: Failed to allocate buffers!\n");
146 		goto dev_error;
147 	}
148 
149 	/* Prepare receive buffer queue */
150 	for (i = 0; i < RX_QUEUE_SIZE; i++) {
151 		uint64_t addr = (uint64_t)vq_rx->buf_mem
152 			+ i * (BUFFER_ENTRY_SIZE+net_hdr_size);
153 		uint32_t id = i*2;
154 		/* Descriptor for net_hdr: */
155 		virtio_fill_desc(&vq_rx->desc[id], vdev->is_modern, addr, net_hdr_size,
156 				 VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, id + 1);
157 
158 		/* Descriptor for data: */
159 		virtio_fill_desc(&vq_rx->desc[id+1], vdev->is_modern, addr + net_hdr_size,
160 				 BUFFER_ENTRY_SIZE, VRING_DESC_F_WRITE, 0);
161 
162 		vq_rx->avail->ring[i] = virtio_cpu_to_modern16(vdev, id);
163 	}
164 	sync();
165 
166 	vq_rx->avail->flags = virtio_cpu_to_modern16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
167 	vq_rx->avail->idx = virtio_cpu_to_modern16(vdev, RX_QUEUE_SIZE);
168 
169 	last_rx_idx = virtio_modern16_to_cpu(vdev, vq_rx->used->idx);
170 
171 	vq_tx->avail->flags = virtio_cpu_to_modern16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
172 	vq_tx->avail->idx = 0;
173 
174 	/* Tell HV that setup succeeded */
175 	status |= VIRTIO_STAT_DRIVER_OK;
176 	virtio_set_status(vdev, status);
177 
178 	/* Tell HV that RX queues are ready */
179 	virtio_queue_notify(vdev, VQ_RX);
180 
181 	driver->running = 1;
182 	for(i = 0; i < (int)sizeof(driver->mac_addr); i++) {
183 		driver->mac_addr[i] = virtio_get_config(vdev, i, 1);
184 	}
185 	return 0;
186 
187 dev_error:
188 	status |= VIRTIO_STAT_FAILED;
189 	virtio_set_status(vdev, status);
190 	return -1;
191 }
192 
193 
194 /**
195  * Shutdown driver.
196  * We've got to make sure that the hosts stops all transfers since the buffers
197  * in our main memory will become invalid after this module has been terminated.
198  */
virtionet_term(struct virtio_net * vnet)199 static int virtionet_term(struct virtio_net *vnet)
200 {
201 	struct virtio_device *vdev = &vnet->vdev;
202 	net_driver_t *driver = &vnet->driver;
203 	struct vqs *vq_rx = &vnet->vq_rx;
204 	struct vqs *vq_tx = &vnet->vq_tx;
205 
206 	dprintf("virtionet_term()\n");
207 
208 	if (driver->running == 0)
209 		return 0;
210 
211 	/* Quiesce device */
212 	virtio_set_status(vdev, VIRTIO_STAT_FAILED);
213 
214 	/* Reset device */
215 	virtio_reset_device(vdev);
216 
217 	driver->running = 0;
218 
219 	SLOF_free_mem(vq_rx->buf_mem,
220 		      (BUFFER_ENTRY_SIZE+net_hdr_size) * RX_QUEUE_SIZE);
221 	vq_rx->buf_mem = NULL;
222 
223 	virtio_queue_term_vq(vdev, vq_rx, VQ_RX);
224 	virtio_queue_term_vq(vdev, vq_tx, VQ_TX);
225 
226 	return 0;
227 }
228 
229 
230 /**
231  * Transmit a packet
232  */
virtionet_xmit(struct virtio_net * vnet,char * buf,int len)233 static int virtionet_xmit(struct virtio_net *vnet, char *buf, int len)
234 {
235 	int id, idx;
236 	static struct virtio_net_hdr_v1 nethdr_v1;
237 	static struct virtio_net_hdr nethdr_legacy;
238 	void *nethdr = &nethdr_legacy;
239 	struct virtio_device *vdev = &vnet->vdev;
240 	struct vqs *vq_tx = &vnet->vq_tx;
241 
242 	if (len > BUFFER_ENTRY_SIZE) {
243 		printf("virtionet: Packet too big!\n");
244 		return 0;
245 	}
246 
247 	dprintf("\nvirtionet_xmit(packet at %p, %d bytes)\n", buf, len);
248 
249 	if (vdev->is_modern)
250 		nethdr = &nethdr_v1;
251 
252 	memset(nethdr, 0, net_hdr_size);
253 
254 	/* Determine descriptor index */
255 	idx = virtio_modern16_to_cpu(vdev, vq_tx->avail->idx);
256 	id = (idx * 2) % vq_tx->size;
257 
258 	/* Set up virtqueue descriptor for header */
259 	virtio_fill_desc(&vq_tx->desc[id], vdev->is_modern, (uint64_t)nethdr,
260 			 net_hdr_size, VRING_DESC_F_NEXT, id + 1);
261 
262 	/* Set up virtqueue descriptor for data */
263 	virtio_fill_desc(&vq_tx->desc[id+1], vdev->is_modern, (uint64_t)buf, len, 0, 0);
264 
265 	vq_tx->avail->ring[idx % vq_tx->size] = virtio_cpu_to_modern16(vdev, id);
266 	sync();
267 	vq_tx->avail->idx = virtio_cpu_to_modern16(vdev, idx + 1);
268 	sync();
269 
270 	/* Tell HV that TX queue is ready */
271 	virtio_queue_notify(vdev, VQ_TX);
272 
273 	return len;
274 }
275 
276 
277 /**
278  * Receive a packet
279  */
virtionet_receive(struct virtio_net * vnet,char * buf,int maxlen)280 static int virtionet_receive(struct virtio_net *vnet, char *buf, int maxlen)
281 {
282 	uint32_t len = 0;
283 	uint32_t id, idx;
284 	uint16_t avail_idx;
285 	struct virtio_device *vdev = &vnet->vdev;
286 	struct vqs *vq_rx = &vnet->vq_rx;
287 
288 	idx = virtio_modern16_to_cpu(vdev, vq_rx->used->idx);
289 
290 	if (last_rx_idx == idx) {
291 		/* Nothing received yet */
292 		return 0;
293 	}
294 
295 	id = (virtio_modern32_to_cpu(vdev, vq_rx->used->ring[last_rx_idx % vq_rx->size].id) + 1)
296 		% vq_rx->size;
297 	len = virtio_modern32_to_cpu(vdev, vq_rx->used->ring[last_rx_idx % vq_rx->size].len)
298 		- net_hdr_size;
299 	dprintf("virtionet_receive() last_rx_idx=%i, vq_rx->used->idx=%i,"
300 		" id=%i len=%i\n", last_rx_idx, vq_rx->used->idx, id, len);
301 
302 	if (len > (uint32_t)maxlen) {
303 		printf("virtio-net: Receive buffer not big enough!\n");
304 		len = maxlen;
305 	}
306 
307 #if 0
308 	/* Dump packet */
309 	printf("\n");
310 	int i;
311 	for (i=0; i<64; i++) {
312 		printf(" %02x", *(uint8_t*)(vq_rx->desc[id].addr+i));
313 		if ((i%16)==15)
314 			printf("\n");
315 	}
316 	prinfk("\n");
317 #endif
318 
319 	/* Copy data to destination buffer */
320 	memcpy(buf, (void *)virtio_modern64_to_cpu(vdev, vq_rx->desc[id].addr), len);
321 
322 	/* Move indices to next entries */
323 	last_rx_idx = last_rx_idx + 1;
324 
325 	avail_idx = virtio_modern16_to_cpu(vdev, vq_rx->avail->idx);
326 	vq_rx->avail->ring[avail_idx % vq_rx->size] = virtio_cpu_to_modern16(vdev, id - 1);
327 	sync();
328 	vq_rx->avail->idx = virtio_cpu_to_modern16(vdev, avail_idx + 1);
329 
330 	/* Tell HV that RX queue entry is ready */
331 	virtio_queue_notify(vdev, VQ_RX);
332 
333 	return len;
334 }
335 
virtionet_open(struct virtio_device * dev)336 struct virtio_net *virtionet_open(struct virtio_device *dev)
337 {
338 	struct virtio_net *vnet;
339 
340 	vnet = SLOF_alloc_mem(sizeof(*vnet));
341 	if (!vnet) {
342 		printf("Unable to allocate virtio-net driver\n");
343 		return NULL;
344 	}
345 
346 	vnet->driver.running = 0;
347 
348 	if (virtionet_init_pci(vnet, dev))
349 		goto FAIL;
350 
351 	if (virtionet_init(vnet))
352 		goto FAIL;
353 
354 	return vnet;
355 
356 FAIL:
357 	SLOF_free_mem(vnet, sizeof(*vnet));
358 	return NULL;
359 }
360 
virtionet_close(struct virtio_net * vnet)361 void virtionet_close(struct virtio_net *vnet)
362 {
363 	if (vnet) {
364 		virtionet_term(vnet);
365 		SLOF_free_mem(vnet, sizeof(*vnet));
366 	}
367 }
368 
virtionet_read(struct virtio_net * vnet,char * buf,int len)369 int virtionet_read(struct virtio_net *vnet, char *buf, int len)
370 {
371 	if (vnet && buf)
372 		return virtionet_receive(vnet, buf, len);
373 	return -1;
374 }
375 
virtionet_write(struct virtio_net * vnet,char * buf,int len)376 int virtionet_write(struct virtio_net *vnet, char *buf, int len)
377 {
378 	if (vnet && buf)
379 		return virtionet_xmit(vnet, buf, len);
380 	return -1;
381 }
382