1 /* virtio net driver for MINIX 3 2 * 3 * Copyright (c) 2013, A. Welzel, <arne.welzel@gmail.com> 4 * 5 * This software is released under the BSD license. See the LICENSE file 6 * included in the main directory of this source distribution for the 7 * license terms and conditions. 8 */ 9 10 #include <assert.h> 11 #include <sys/types.h> 12 13 #include <net/gen/ether.h> 14 #include <net/gen/eth_io.h> 15 16 #include <minix/drivers.h> 17 #include <minix/netdriver.h> 18 #include <minix/sysutil.h> 19 #include <minix/virtio.h> 20 21 #include <sys/queue.h> 22 23 #include "virtio_net.h" 24 25 #define VERBOSE 0 26 27 #if VERBOSE 28 #define dput(s) do { dprintf(s); printf("\n"); } while (0) 29 #define dprintf(s) do { \ 30 printf("%s: ", name); \ 31 printf s; \ 32 } while (0) 33 #else 34 #define dput(s) 35 #define dprintf(s) 36 #endif 37 38 static struct virtio_device *net_dev; 39 40 static const char *const name = "virtio-net"; 41 42 enum queue {RX_Q, TX_Q, CTRL_Q}; 43 44 /* Number of packets to work with */ 45 /* TODO: This should be an argument to the driver and possibly also 46 * depend on the queue sizes offered by this device. 47 */ 48 #define BUF_PACKETS 64 49 /* Maximum size of a packet */ 50 #define MAX_PACK_SIZE ETH_MAX_PACK_SIZE 51 /* Buffer size needed for the payload of BUF_PACKETS */ 52 #define PACKET_BUF_SZ (BUF_PACKETS * MAX_PACK_SIZE) 53 54 struct packet { 55 int idx; 56 struct virtio_net_hdr *vhdr; 57 phys_bytes phdr; 58 char *vdata; 59 phys_bytes pdata; 60 size_t len; 61 STAILQ_ENTRY(packet) next; 62 }; 63 64 /* Allocated data chunks */ 65 static char *data_vir; 66 static phys_bytes data_phys; 67 static struct virtio_net_hdr *hdrs_vir; 68 static phys_bytes hdrs_phys; 69 static struct packet *packets; 70 static int in_rx; 71 72 /* Packets on this list can be given to the host */ 73 static STAILQ_HEAD(free_list, packet) free_list; 74 75 /* Packets on this list are to be given to inet */ 76 static STAILQ_HEAD(recv_list, packet) recv_list; 77 78 /* Various state data */ 79 static eth_stat_t virtio_net_stats; 80 static int spurious_interrupt; 81 82 /* Prototypes */ 83 static int virtio_net_probe(unsigned int skip); 84 static void virtio_net_config(ether_addr_t *addr); 85 static int virtio_net_alloc_bufs(void); 86 static void virtio_net_init_queues(void); 87 88 static void virtio_net_refill_rx_queue(void); 89 static void virtio_net_check_queues(void); 90 static void virtio_net_check_pending(void); 91 92 static int virtio_net_init(unsigned int instance, ether_addr_t *addr); 93 static void virtio_net_stop(void); 94 static int virtio_net_send(struct netdriver_data *data, size_t len); 95 static ssize_t virtio_net_recv(struct netdriver_data *data, size_t max); 96 static void virtio_net_stat(eth_stat_t *stat); 97 static void virtio_net_intr(unsigned int mask); 98 99 static const struct netdriver virtio_net_table = { 100 .ndr_init = virtio_net_init, 101 .ndr_stop = virtio_net_stop, 102 .ndr_recv = virtio_net_recv, 103 .ndr_send = virtio_net_send, 104 .ndr_stat = virtio_net_stat, 105 .ndr_intr = virtio_net_intr, 106 }; 107 108 /* TODO: Features are pretty much ignored */ 109 static struct virtio_feature netf[] = { 110 { "partial csum", VIRTIO_NET_F_CSUM, 0, 0 }, 111 { "given mac", VIRTIO_NET_F_MAC, 0, 1 }, 112 { "status ", VIRTIO_NET_F_STATUS, 0, 0 }, 113 { "control channel", VIRTIO_NET_F_CTRL_VQ, 0, 1 }, 114 { "control channel rx", VIRTIO_NET_F_CTRL_RX, 0, 0 } 115 }; 116 117 static int 118 virtio_net_probe(unsigned int skip) 119 { 120 /* virtio-net has at least 2 queues */ 121 int queues = 2; 122 net_dev= virtio_setup_device(0x00001, name, netf, 123 sizeof(netf) / sizeof(netf[0]), 124 1 /* threads */, skip); 125 if (net_dev == NULL) 126 return ENXIO; 127 128 /* If the host supports the control queue, allocate it as well */ 129 if (virtio_host_supports(net_dev, VIRTIO_NET_F_CTRL_VQ)) 130 queues += 1; 131 132 if (virtio_alloc_queues(net_dev, queues) != OK) { 133 virtio_free_device(net_dev); 134 return ENOMEM; 135 } 136 137 return OK; 138 } 139 140 static void 141 virtio_net_config(ether_addr_t * addr) 142 { 143 u32_t mac14; 144 u32_t mac56; 145 int i; 146 147 if (virtio_host_supports(net_dev, VIRTIO_NET_F_MAC)) { 148 dprintf(("Mac set by host: ")); 149 mac14 = virtio_sread32(net_dev, 0); 150 mac56 = virtio_sread32(net_dev, 4); 151 memcpy(&addr->ea_addr[0], &mac14, 4); 152 memcpy(&addr->ea_addr[4], &mac56, 2); 153 154 for (i = 0; i < 6; i++) 155 dprintf(("%02x%s", addr->ea_addr[i], 156 i == 5 ? "\n" : ":")); 157 } else { 158 dput(("No mac")); 159 } 160 161 if (virtio_host_supports(net_dev, VIRTIO_NET_F_STATUS)) { 162 dput(("Current Status %x", (u32_t)virtio_sread16(net_dev, 6))); 163 } else { 164 dput(("No status")); 165 } 166 167 if (virtio_host_supports(net_dev, VIRTIO_NET_F_CTRL_VQ)) 168 dput(("Host supports control channel")); 169 170 if (virtio_host_supports(net_dev, VIRTIO_NET_F_CTRL_RX)) 171 dput(("Host supports control channel for RX")); 172 } 173 174 static int 175 virtio_net_alloc_bufs(void) 176 { 177 data_vir = alloc_contig(PACKET_BUF_SZ, 0, &data_phys); 178 179 if (!data_vir) 180 return ENOMEM; 181 182 hdrs_vir = alloc_contig(BUF_PACKETS * sizeof(hdrs_vir[0]), 183 0, &hdrs_phys); 184 185 if (!hdrs_vir) { 186 free_contig(data_vir, PACKET_BUF_SZ); 187 return ENOMEM; 188 } 189 190 packets = malloc(BUF_PACKETS * sizeof(packets[0])); 191 192 if (!packets) { 193 free_contig(data_vir, PACKET_BUF_SZ); 194 free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0])); 195 return ENOMEM; 196 } 197 198 memset(data_vir, 0, PACKET_BUF_SZ); 199 memset(hdrs_vir, 0, BUF_PACKETS * sizeof(hdrs_vir[0])); 200 memset(packets, 0, BUF_PACKETS * sizeof(packets[0])); 201 202 return OK; 203 } 204 205 static void 206 virtio_net_init_queues(void) 207 { 208 int i; 209 STAILQ_INIT(&free_list); 210 STAILQ_INIT(&recv_list); 211 212 for (i = 0; i < BUF_PACKETS; i++) { 213 packets[i].idx = i; 214 packets[i].vhdr = &hdrs_vir[i]; 215 packets[i].phdr = hdrs_phys + i * sizeof(hdrs_vir[i]); 216 packets[i].vdata = data_vir + i * MAX_PACK_SIZE; 217 packets[i].pdata = data_phys + i * MAX_PACK_SIZE; 218 STAILQ_INSERT_HEAD(&free_list, &packets[i], next); 219 } 220 } 221 222 static void 223 virtio_net_refill_rx_queue(void) 224 { 225 struct vumap_phys phys[2]; 226 struct packet *p; 227 228 while ((in_rx < BUF_PACKETS / 2) && !STAILQ_EMPTY(&free_list)) { 229 /* peek */ 230 p = STAILQ_FIRST(&free_list); 231 /* remove */ 232 STAILQ_REMOVE_HEAD(&free_list, next); 233 234 phys[0].vp_addr = p->phdr; 235 assert(!(phys[0].vp_addr & 1)); 236 phys[0].vp_size = sizeof(struct virtio_net_hdr); 237 238 phys[1].vp_addr = p->pdata; 239 assert(!(phys[1].vp_addr & 1)); 240 phys[1].vp_size = MAX_PACK_SIZE; 241 242 /* RX queue needs write */ 243 phys[0].vp_addr |= 1; 244 phys[1].vp_addr |= 1; 245 246 virtio_to_queue(net_dev, RX_Q, phys, 2, p); 247 in_rx++; 248 } 249 250 if (in_rx == 0 && STAILQ_EMPTY(&free_list)) { 251 dput(("warning: rx queue underflow!")); 252 virtio_net_stats.ets_fifoUnder++; 253 } 254 } 255 256 static void 257 virtio_net_check_queues(void) 258 { 259 struct packet *p; 260 size_t len; 261 262 /* Put the received packets into the recv list */ 263 while (virtio_from_queue(net_dev, RX_Q, (void **)&p, &len) == 0) { 264 p->len = len; 265 STAILQ_INSERT_TAIL(&recv_list, p, next); 266 in_rx--; 267 virtio_net_stats.ets_packetR++; 268 } 269 270 /* 271 * Packets from the TX queue just indicated they are free to 272 * be reused now. inet already knows about them as being sent. 273 */ 274 while (virtio_from_queue(net_dev, TX_Q, (void **)&p, NULL) == 0) { 275 memset(p->vhdr, 0, sizeof(*p->vhdr)); 276 memset(p->vdata, 0, MAX_PACK_SIZE); 277 STAILQ_INSERT_HEAD(&free_list, p, next); 278 virtio_net_stats.ets_packetT++; 279 } 280 } 281 282 static void 283 virtio_net_check_pending(void) 284 { 285 286 /* Pending read and something in recv_list? */ 287 if (!STAILQ_EMPTY(&recv_list)) 288 netdriver_recv(); 289 290 if (!STAILQ_EMPTY(&free_list)) 291 netdriver_send(); 292 } 293 294 static void 295 virtio_net_intr(unsigned int __unused mask) 296 { 297 298 /* Check and clear interrupt flag */ 299 if (virtio_had_irq(net_dev)) { 300 virtio_net_check_queues(); 301 } else { 302 if (!spurious_interrupt) 303 dput(("Spurious interrupt")); 304 305 spurious_interrupt = 1; 306 } 307 308 virtio_net_check_pending(); 309 310 virtio_irq_enable(net_dev); 311 312 /* Readd packets to the receive queue as necessary. */ 313 virtio_net_refill_rx_queue(); 314 } 315 316 /* 317 * Put user bytes into a free packet buffer, forward this packet to the TX 318 * queue, and return OK. If there are no free packet buffers, return SUSPEND. 319 */ 320 static int 321 virtio_net_send(struct netdriver_data * data, size_t len) 322 { 323 struct vumap_phys phys[2]; 324 struct packet *p; 325 326 if (STAILQ_EMPTY(&free_list)) 327 return SUSPEND; 328 329 p = STAILQ_FIRST(&free_list); 330 STAILQ_REMOVE_HEAD(&free_list, next); 331 332 if (len > MAX_PACK_SIZE) 333 panic("%s: packet too large to send: %zu", name, len); 334 335 netdriver_copyin(data, 0, p->vdata, len); 336 337 phys[0].vp_addr = p->phdr; 338 assert(!(phys[0].vp_addr & 1)); 339 phys[0].vp_size = sizeof(struct virtio_net_hdr); 340 phys[1].vp_addr = p->pdata; 341 assert(!(phys[1].vp_addr & 1)); 342 phys[1].vp_size = len; 343 virtio_to_queue(net_dev, TX_Q, phys, 2, p); 344 345 return OK; 346 } 347 348 /* 349 * Put a packet receive from the RX queue into a user buffer, and return the 350 * packet length. If there are no received packets, return SUSPEND. 351 */ 352 static ssize_t 353 virtio_net_recv(struct netdriver_data * data, size_t max) 354 { 355 struct packet *p; 356 ssize_t len; 357 358 /* Get the first received packet, if any. */ 359 if (STAILQ_EMPTY(&recv_list)) 360 return SUSPEND; 361 362 p = STAILQ_FIRST(&recv_list); 363 STAILQ_REMOVE_HEAD(&recv_list, next); 364 365 /* Copy out the packet contents. */ 366 len = p->len - sizeof(struct virtio_net_hdr); 367 if (len > max) 368 len = max; 369 370 /* 371 * HACK: due to lack of padding, received packets may in fact be 372 * smaller than the minimum ethernet packet size. Inet will accept the 373 * packets just fine if we increase the length to its minimum. We 374 * already zeroed out the rest of the packet data, so this is safe. 375 */ 376 if (len < ETH_MIN_PACK_SIZE) 377 len = ETH_MIN_PACK_SIZE; 378 379 netdriver_copyout(data, 0, p->vdata, len); 380 381 /* Clean the packet. */ 382 memset(p->vhdr, 0, sizeof(*p->vhdr)); 383 memset(p->vdata, 0, MAX_PACK_SIZE); 384 STAILQ_INSERT_HEAD(&free_list, p, next); 385 386 /* Readd packets to the receive queue as necessary. */ 387 virtio_net_refill_rx_queue(); 388 389 return len; 390 } 391 392 /* 393 * Return statistics. 394 */ 395 static void 396 virtio_net_stat(eth_stat_t *stat) 397 { 398 399 memcpy(stat, &virtio_net_stats, sizeof(*stat)); 400 } 401 402 /* 403 * Initialize the driver and the virtual hardware. 404 */ 405 static int 406 virtio_net_init(unsigned int instance, ether_addr_t *addr) 407 { 408 int r; 409 410 if ((r = virtio_net_probe(instance)) != OK) 411 return r; 412 413 virtio_net_config(addr); 414 415 if (virtio_net_alloc_bufs() != OK) 416 panic("%s: Buffer allocation failed", name); 417 418 virtio_net_init_queues(); 419 420 /* Add packets to the receive queue. */ 421 virtio_net_refill_rx_queue(); 422 423 virtio_device_ready(net_dev); 424 425 virtio_irq_enable(net_dev); 426 427 return(OK); 428 } 429 430 /* 431 * The driver is terminating. Clean up. 432 */ 433 static void 434 virtio_net_stop(void) 435 { 436 437 dput(("Terminating")); 438 439 free_contig(data_vir, PACKET_BUF_SZ); 440 free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0])); 441 free(packets); 442 443 virtio_reset_device(net_dev); 444 virtio_free_queues(net_dev); 445 virtio_free_device(net_dev); 446 net_dev = NULL; 447 } 448 449 /* 450 * The virtio-net device driver. 451 */ 452 int 453 main(int argc, char *argv[]) 454 { 455 456 env_setargs(argc, argv); 457 458 netdriver_task(&virtio_net_table); 459 460 return 0; 461 } 462