1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 #include <sys/random.h> 42 #include <sys/sglist.h> 43 #include <sys/serialize.h> 44 #include <sys/bus.h> 45 #include <sys/rman.h> 46 47 #include <machine/limits.h> 48 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/if_arp.h> 52 #include <net/if_dl.h> 53 #include <net/if_types.h> 54 #include <net/if_media.h> 55 #include <net/vlan/if_vlan_var.h> 56 #include <net/vlan/if_vlan_ether.h> 57 #include <net/ifq_var.h> 58 59 #include <net/bpf.h> 60 61 #include <netinet/in_systm.h> 62 #include <netinet/in.h> 63 #include <netinet/ip.h> 64 #include <netinet/ip6.h> 65 #include <netinet/udp.h> 66 #include <netinet/tcp.h> 67 68 #include <dev/virtual/virtio/virtio/virtio.h> 69 #include <dev/virtual/virtio/virtio/virtqueue.h> 70 #include <dev/virtual/virtio/net/virtio_net.h> 71 #include <dev/virtual/virtio/net/if_vtnetvar.h> 72 73 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header"); 74 75 static int vtnet_probe(device_t); 76 static int vtnet_attach(device_t); 77 static int vtnet_detach(device_t); 78 static int vtnet_suspend(device_t); 79 static int vtnet_resume(device_t); 80 static int vtnet_shutdown(device_t); 81 82 static void vtnet_negotiate_features(struct vtnet_softc *); 83 static int vtnet_alloc_intrs(struct vtnet_softc *); 84 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 85 static void vtnet_get_hwaddr(struct vtnet_softc *); 86 static void vtnet_set_hwaddr(struct vtnet_softc *); 87 static int vtnet_is_link_up(struct vtnet_softc *); 88 static void vtnet_update_link_status(struct vtnet_softc *); 89 #if 0 90 static void vtnet_watchdog(struct vtnet_softc *); 91 #endif 92 static void vtnet_config_change_task(void *, int); 93 static int vtnet_setup_interface(struct vtnet_softc *); 94 static int vtnet_change_mtu(struct vtnet_softc *, int); 95 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 96 97 static int vtnet_init_rx_vq(struct vtnet_softc *); 98 static void vtnet_free_rx_mbufs(struct vtnet_softc *); 99 static void vtnet_free_tx_mbufs(struct vtnet_softc *); 100 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 101 102 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, 103 struct mbuf **); 104 static int vtnet_replace_rxbuf(struct vtnet_softc *, 105 struct mbuf *, int); 106 static int vtnet_newbuf(struct vtnet_softc *); 107 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); 108 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); 109 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); 110 static void vtnet_vlan_tag_remove(struct mbuf *); 111 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, 112 struct virtio_net_hdr *); 113 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); 114 static int vtnet_rxeof(struct vtnet_softc *, int, int *); 115 static void vtnet_rx_intr_task(void *); 116 static void vtnet_rx_vq_intr(void *); 117 118 static void vtnet_enqueue_txhdr(struct vtnet_softc *, 119 struct vtnet_tx_header *); 120 static void vtnet_txeof(struct vtnet_softc *); 121 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, 122 struct virtio_net_hdr *); 123 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, 124 struct vtnet_tx_header *); 125 static int vtnet_encap(struct vtnet_softc *, struct mbuf **); 126 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *); 127 static void vtnet_start(struct ifnet *, struct ifaltq_subque *); 128 static void vtnet_tick(void *); 129 static void vtnet_tx_intr_task(void *); 130 static void vtnet_tx_vq_intr(void *); 131 132 static void vtnet_config_intr(void *); 133 134 static void vtnet_stop(struct vtnet_softc *); 135 static int vtnet_virtio_reinit(struct vtnet_softc *); 136 static void vtnet_init_locked(struct vtnet_softc *); 137 static void vtnet_init(void *); 138 139 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 140 struct sglist *, int, int); 141 142 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); 143 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 144 static int vtnet_set_promisc(struct vtnet_softc *, int); 145 static int vtnet_set_allmulti(struct vtnet_softc *, int); 146 static void vtnet_rx_filter(struct vtnet_softc *sc); 147 static void vtnet_rx_filter_mac(struct vtnet_softc *); 148 149 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 150 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 151 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); 152 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 153 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 154 155 static int vtnet_ifmedia_upd(struct ifnet *); 156 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 157 158 static void vtnet_add_statistics(struct vtnet_softc *); 159 160 static int vtnet_enable_rx_intr(struct vtnet_softc *); 161 static int vtnet_enable_tx_intr(struct vtnet_softc *); 162 static void vtnet_disable_rx_intr(struct vtnet_softc *); 163 static void vtnet_disable_tx_intr(struct vtnet_softc *); 164 165 /* Tunables. */ 166 static int vtnet_csum_disable = 0; 167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 168 static int vtnet_tso_disable = 1; 169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 170 static int vtnet_lro_disable = 0; 171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 172 173 /* 174 * Reducing the number of transmit completed interrupts can 175 * improve performance. To do so, the define below keeps the 176 * Tx vq interrupt disabled and adds calls to vtnet_txeof() 177 * in the start and watchdog paths. The price to pay for this 178 * is the m_free'ing of transmitted mbufs may be delayed until 179 * the watchdog fires. 180 */ 181 #define VTNET_TX_INTR_MODERATION 182 183 static struct virtio_feature_desc vtnet_feature_desc[] = { 184 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 185 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 186 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload" }, 187 { VIRTIO_NET_F_MAC, "MacAddress" }, 188 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 189 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 190 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 191 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 192 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 193 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 194 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 195 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 196 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 197 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 198 { VIRTIO_NET_F_STATUS, "Status" }, 199 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 200 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 201 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 202 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 203 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, 204 { VIRTIO_NET_F_MQ, "RFS" }, 205 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, 206 { 0, NULL } 207 }; 208 209 static device_method_t vtnet_methods[] = { 210 /* Device methods. */ 211 DEVMETHOD(device_probe, vtnet_probe), 212 DEVMETHOD(device_attach, vtnet_attach), 213 DEVMETHOD(device_detach, vtnet_detach), 214 DEVMETHOD(device_suspend, vtnet_suspend), 215 DEVMETHOD(device_resume, vtnet_resume), 216 DEVMETHOD(device_shutdown, vtnet_shutdown), 217 218 DEVMETHOD_END 219 }; 220 221 static driver_t vtnet_driver = { 222 "vtnet", 223 vtnet_methods, 224 sizeof(struct vtnet_softc) 225 }; 226 227 static devclass_t vtnet_devclass; 228 229 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL); 230 MODULE_VERSION(vtnet, 1); 231 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 232 233 static int 234 vtnet_probe(device_t dev) 235 { 236 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 237 return (ENXIO); 238 239 device_set_desc(dev, "VirtIO Networking Adapter"); 240 241 return (BUS_PROBE_DEFAULT); 242 } 243 244 struct irqmap { 245 int irq; 246 driver_intr_t *handler; 247 }; 248 249 static int 250 vtnet_attach(device_t dev) 251 { 252 struct vtnet_softc *sc; 253 int i, error; 254 255 sc = device_get_softc(dev); 256 sc->vtnet_dev = dev; 257 258 lwkt_serialize_init(&sc->vtnet_slz); 259 callout_init(&sc->vtnet_tick_ch); 260 261 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 262 vtnet_ifmedia_sts); 263 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 264 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 265 266 vtnet_add_statistics(sc); 267 SLIST_INIT(&sc->vtnet_txhdr_free); 268 269 /* Register our feature descriptions. */ 270 virtio_set_feature_desc(dev, vtnet_feature_desc); 271 vtnet_negotiate_features(sc); 272 273 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 274 sc->vtnet_flags |= VTNET_FLAG_INDIRECT; 275 276 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 277 /* This feature should always be negotiated. */ 278 sc->vtnet_flags |= VTNET_FLAG_MAC; 279 } 280 281 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 282 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 283 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 284 } else { 285 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 286 } 287 288 sc->vtnet_rx_mbuf_size = MCLBYTES; 289 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 290 291 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 292 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 293 294 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 295 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 296 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 297 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 298 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 299 virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 300 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; 301 } 302 303 error = vtnet_alloc_intrs(sc); 304 if (error) { 305 device_printf(dev, "cannot allocate interrupts\n"); 306 goto fail; 307 } 308 309 error = vtnet_alloc_virtqueues(sc); 310 if (error) { 311 device_printf(dev, "cannot allocate virtqueues\n"); 312 goto fail; 313 } 314 315 /* XXX Separate function */ 316 struct irqmap info[2]; 317 318 /* Possible "Virtqueue <-> IRQ" configurations */ 319 switch (sc->vtnet_nintr) { 320 case 1: 321 info[0] = (struct irqmap){0, vtnet_rx_vq_intr}; 322 info[1] = (struct irqmap){0, vtnet_tx_vq_intr}; 323 break; 324 case 2: 325 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) { 326 info[0] = (struct irqmap){1, vtnet_rx_vq_intr}; 327 } else { 328 info[0] = (struct irqmap){0, vtnet_rx_vq_intr}; 329 } 330 info[1] = (struct irqmap){1, vtnet_tx_vq_intr}; 331 break; 332 case 3: 333 info[0] = (struct irqmap){1, vtnet_rx_vq_intr}; 334 info[1] = (struct irqmap){2, vtnet_tx_vq_intr}; 335 break; 336 default: 337 device_printf(dev, "Invalid interrupt vector count: %d\n", 338 sc->vtnet_nintr); 339 goto fail; 340 } 341 for (i = 0; i < 2; i++) { 342 error = virtio_bind_intr(dev, info[i].irq, i, 343 info[i].handler, sc); 344 if (error) { 345 device_printf(dev, "cannot bind virtqueue IRQs\n"); 346 goto fail; 347 } 348 } 349 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) { 350 error = virtio_bind_intr(dev, 0, -1, vtnet_config_intr, sc); 351 if (error) { 352 device_printf(dev, "cannot bind config_change IRQ\n"); 353 goto fail; 354 } 355 } 356 357 /* Read (or generate) the MAC address for the adapter. */ 358 vtnet_get_hwaddr(sc); 359 360 error = vtnet_setup_interface(sc); 361 if (error) { 362 device_printf(dev, "cannot setup interface\n"); 363 goto fail; 364 } 365 366 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc); 367 368 for (i = 0; i < sc->vtnet_nintr; i++) { 369 error = virtio_setup_intr(dev, i, &sc->vtnet_slz); 370 if (error) { 371 device_printf(dev, "cannot setup virtqueue " 372 "interrupts\n"); 373 ether_ifdetach(sc->vtnet_ifp); 374 goto fail; 375 } 376 } 377 378 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { 379 lwkt_serialize_enter(&sc->vtnet_slz); 380 vtnet_set_hwaddr(sc); 381 lwkt_serialize_exit(&sc->vtnet_slz); 382 } 383 384 /* 385 * Device defaults to promiscuous mode for backwards 386 * compatibility. Turn it off if possible. 387 */ 388 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 389 lwkt_serialize_enter(&sc->vtnet_slz); 390 if (vtnet_set_promisc(sc, 0) != 0) { 391 sc->vtnet_ifp->if_flags |= IFF_PROMISC; 392 device_printf(dev, 393 "cannot disable promiscuous mode\n"); 394 } 395 lwkt_serialize_exit(&sc->vtnet_slz); 396 } else 397 sc->vtnet_ifp->if_flags |= IFF_PROMISC; 398 399 fail: 400 if (error) 401 vtnet_detach(dev); 402 403 return (error); 404 } 405 406 static int 407 vtnet_detach(device_t dev) 408 { 409 struct vtnet_softc *sc; 410 struct ifnet *ifp; 411 int i; 412 413 sc = device_get_softc(dev); 414 ifp = sc->vtnet_ifp; 415 416 for (i = 0; i < sc->vtnet_nintr; i++) 417 virtio_teardown_intr(dev, i); 418 419 if (device_is_attached(dev)) { 420 lwkt_serialize_enter(&sc->vtnet_slz); 421 vtnet_stop(sc); 422 lwkt_serialize_exit(&sc->vtnet_slz); 423 424 callout_stop(&sc->vtnet_tick_ch); 425 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task); 426 427 ether_ifdetach(ifp); 428 } 429 430 if (sc->vtnet_vlan_attach != NULL) { 431 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 432 sc->vtnet_vlan_attach = NULL; 433 } 434 if (sc->vtnet_vlan_detach != NULL) { 435 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach); 436 sc->vtnet_vlan_detach = NULL; 437 } 438 439 if (ifp) { 440 if_free(ifp); 441 sc->vtnet_ifp = NULL; 442 } 443 444 if (sc->vtnet_rx_vq != NULL) 445 vtnet_free_rx_mbufs(sc); 446 if (sc->vtnet_tx_vq != NULL) 447 vtnet_free_tx_mbufs(sc); 448 if (sc->vtnet_ctrl_vq != NULL) 449 vtnet_free_ctrl_vq(sc); 450 451 if (sc->vtnet_txhdrarea != NULL) { 452 contigfree(sc->vtnet_txhdrarea, 453 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header), 454 M_VTNET); 455 sc->vtnet_txhdrarea = NULL; 456 } 457 SLIST_INIT(&sc->vtnet_txhdr_free); 458 if (sc->vtnet_macfilter != NULL) { 459 contigfree(sc->vtnet_macfilter, 460 sizeof(struct vtnet_mac_filter), M_DEVBUF); 461 sc->vtnet_macfilter = NULL; 462 } 463 464 ifmedia_removeall(&sc->vtnet_media); 465 466 return (0); 467 } 468 469 static int 470 vtnet_suspend(device_t dev) 471 { 472 struct vtnet_softc *sc; 473 474 sc = device_get_softc(dev); 475 476 lwkt_serialize_enter(&sc->vtnet_slz); 477 vtnet_stop(sc); 478 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 479 lwkt_serialize_exit(&sc->vtnet_slz); 480 481 return (0); 482 } 483 484 static int 485 vtnet_resume(device_t dev) 486 { 487 struct vtnet_softc *sc; 488 struct ifnet *ifp; 489 490 sc = device_get_softc(dev); 491 ifp = sc->vtnet_ifp; 492 493 lwkt_serialize_enter(&sc->vtnet_slz); 494 if (ifp->if_flags & IFF_UP) 495 vtnet_init_locked(sc); 496 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 497 lwkt_serialize_exit(&sc->vtnet_slz); 498 499 return (0); 500 } 501 502 static int 503 vtnet_shutdown(device_t dev) 504 { 505 506 /* 507 * Suspend already does all of what we need to 508 * do here; we just never expect to be resumed. 509 */ 510 return (vtnet_suspend(dev)); 511 } 512 513 static void 514 vtnet_negotiate_features(struct vtnet_softc *sc) 515 { 516 device_t dev; 517 uint64_t mask, features; 518 519 dev = sc->vtnet_dev; 520 mask = 0; 521 522 if (vtnet_csum_disable) 523 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 524 525 /* 526 * XXX DragonFly doesn't support receive checksum offload for ipv6 yet, 527 * hence always disable the virtio feature for now. 528 * XXX We need to support the DynOffload feature, in order to 529 * dynamically enable/disable this feature. 530 */ 531 mask |= VIRTIO_NET_F_GUEST_CSUM; 532 533 /* 534 * TSO is only available when the tx checksum offload feature is also 535 * negotiated. 536 */ 537 if (vtnet_csum_disable || vtnet_tso_disable) 538 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | 539 VIRTIO_NET_F_HOST_ECN; 540 541 if (vtnet_lro_disable) 542 mask |= VTNET_LRO_FEATURES; 543 544 features = VTNET_FEATURES & ~mask; 545 features |= VIRTIO_F_NOTIFY_ON_EMPTY; 546 features |= VIRTIO_F_ANY_LAYOUT; 547 sc->vtnet_features = virtio_negotiate_features(dev, features); 548 549 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && 550 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { 551 /* 552 * LRO without mergeable buffers requires special care. This 553 * is not ideal because every receive buffer must be large 554 * enough to hold the maximum TCP packet, the Ethernet header, 555 * and the header. This requires up to 34 descriptors with 556 * MCLBYTES clusters. If we do not have indirect descriptors, 557 * LRO is disabled since the virtqueue will not contain very 558 * many receive buffers. 559 */ 560 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { 561 device_printf(dev, 562 "LRO disabled due to both mergeable buffers and " 563 "indirect descriptors not negotiated\n"); 564 565 features &= ~VTNET_LRO_FEATURES; 566 sc->vtnet_features = 567 virtio_negotiate_features(dev, features); 568 } else 569 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; 570 } 571 } 572 573 static int 574 vtnet_alloc_intrs(struct vtnet_softc *sc) 575 { 576 int cnt, error; 577 int intrcount = virtio_intr_count(sc->vtnet_dev); 578 int i; 579 int use_config; 580 581 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) { 582 use_config = 1; 583 /* We can use a maximum of 3 interrupt vectors. */ 584 intrcount = imin(intrcount, 3); 585 } else { 586 /* We can use a maximum of 2 interrupt vectors. */ 587 intrcount = imin(intrcount, 2); 588 } 589 590 if (intrcount < 1) 591 return (ENXIO); 592 593 /* 594 * XXX We should explicitly set the cpus for the rx/tx threads, to 595 * only use cpus, where the network stack is running. 596 */ 597 for (i = 0; i < intrcount; i++) 598 sc->vtnet_cpus[i] = -1; 599 600 cnt = intrcount; 601 error = virtio_intr_alloc(sc->vtnet_dev, &cnt, use_config, 602 sc->vtnet_cpus); 603 if (error != 0) { 604 virtio_intr_release(sc->vtnet_dev); 605 return (error); 606 } 607 sc->vtnet_nintr = cnt; 608 609 return (0); 610 } 611 612 static int 613 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 614 { 615 device_t dev; 616 struct vq_alloc_info vq_info[3]; 617 int nvqs; 618 619 dev = sc->vtnet_dev; 620 nvqs = 2; 621 622 /* 623 * Indirect descriptors are not needed for the Rx 624 * virtqueue when mergeable buffers are negotiated. 625 * The header is placed inline with the data, not 626 * in a separate descriptor, and mbuf clusters are 627 * always physically contiguous. 628 */ 629 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 630 sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ? 631 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; 632 } else 633 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; 634 635 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || 636 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 637 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; 638 else 639 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; 640 641 VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs, &sc->vtnet_rx_vq, 642 "%s receive", device_get_nameunit(dev)); 643 644 VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs, &sc->vtnet_tx_vq, 645 "%s transmit", device_get_nameunit(dev)); 646 647 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 648 nvqs++; 649 650 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, &sc->vtnet_ctrl_vq, 651 "%s control", device_get_nameunit(dev)); 652 } 653 654 return (virtio_alloc_virtqueues(dev, nvqs, vq_info)); 655 } 656 657 static int 658 vtnet_setup_interface(struct vtnet_softc *sc) 659 { 660 device_t dev; 661 struct ifnet *ifp; 662 int i; 663 664 dev = sc->vtnet_dev; 665 666 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 667 if (ifp == NULL) { 668 device_printf(dev, "cannot allocate ifnet structure\n"); 669 return (ENOSPC); 670 } 671 672 ifp->if_softc = sc; 673 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 674 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 675 ifp->if_init = vtnet_init; 676 ifp->if_start = vtnet_start; 677 ifp->if_ioctl = vtnet_ioctl; 678 679 sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq); 680 sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq); 681 if (sc->vtnet_flags & VTNET_FLAG_INDIRECT) 682 sc->vtnet_txhdrcount = sc->vtnet_tx_size; 683 else 684 sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1; 685 sc->vtnet_txhdrarea = contigmalloc( 686 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header), 687 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 688 if (sc->vtnet_txhdrarea == NULL) { 689 device_printf(dev, "cannot contigmalloc the tx headers\n"); 690 return (ENOMEM); 691 } 692 for (i = 0; i < sc->vtnet_txhdrcount; i++) 693 vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]); 694 sc->vtnet_macfilter = contigmalloc( 695 sizeof(struct vtnet_mac_filter), 696 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 697 if (sc->vtnet_macfilter == NULL) { 698 device_printf(dev, 699 "cannot contigmalloc the mac filter table\n"); 700 return (ENOMEM); 701 } 702 ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1); 703 ifq_set_ready(&ifp->if_snd); 704 705 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL); 706 707 /* The Tx IRQ is currently always the last allocated interrupt. */ 708 ifq_set_cpuid(&ifp->if_snd, sc->vtnet_cpus[sc->vtnet_nintr - 1]); 709 710 /* Tell the upper layer(s) we support long frames. */ 711 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 712 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 713 714 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 715 ifp->if_capabilities |= IFCAP_TXCSUM; 716 717 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 718 ifp->if_capabilities |= IFCAP_TSO4; 719 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 720 ifp->if_capabilities |= IFCAP_TSO6; 721 if (ifp->if_capabilities & IFCAP_TSO) 722 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 723 724 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 725 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 726 } 727 728 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) 729 ifp->if_capabilities |= IFCAP_RXCSUM; 730 731 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */ 732 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 733 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 734 ifp->if_capabilities |= IFCAP_LRO; 735 #endif 736 737 if ((ifp->if_capabilities & IFCAP_HWCSUM) == IFCAP_HWCSUM) { 738 /* 739 * VirtIO does not support VLAN tagging, but we can fake 740 * it by inserting and removing the 802.1Q header during 741 * transmit and receive. We are then able to do checksum 742 * offloading of VLAN frames. 743 */ 744 ifp->if_capabilities |= 745 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 746 } 747 748 ifp->if_capenable = ifp->if_capabilities; 749 750 /* 751 * Capabilities after here are not enabled by default. 752 */ 753 754 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 755 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 756 757 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 758 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 759 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 760 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 761 } 762 763 return (0); 764 } 765 766 static void 767 vtnet_set_hwaddr(struct vtnet_softc *sc) 768 { 769 device_t dev; 770 771 dev = sc->vtnet_dev; 772 773 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) && 774 (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) { 775 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) 776 device_printf(dev, "unable to set MAC address\n"); 777 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { 778 virtio_write_device_config(dev, 779 offsetof(struct virtio_net_config, mac), 780 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 781 } 782 } 783 784 static void 785 vtnet_get_hwaddr(struct vtnet_softc *sc) 786 { 787 device_t dev; 788 789 dev = sc->vtnet_dev; 790 791 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { 792 /* 793 * Generate a random locally administered unicast address. 794 * 795 * It would be nice to generate the same MAC address across 796 * reboots, but it seems all the hosts currently available 797 * support the MAC feature, so this isn't too important. 798 */ 799 sc->vtnet_hwaddr[0] = 0xB2; 800 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1); 801 return; 802 } 803 804 virtio_read_device_config(dev, 805 offsetof(struct virtio_net_config, mac), 806 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 807 } 808 809 static int 810 vtnet_is_link_up(struct vtnet_softc *sc) 811 { 812 device_t dev; 813 struct ifnet *ifp; 814 uint16_t status; 815 816 dev = sc->vtnet_dev; 817 ifp = sc->vtnet_ifp; 818 819 ASSERT_SERIALIZED(&sc->vtnet_slz); 820 821 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) { 822 status = virtio_read_dev_config_2(dev, 823 offsetof(struct virtio_net_config, status)); 824 } else { 825 status = VIRTIO_NET_S_LINK_UP; 826 } 827 828 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 829 } 830 831 static void 832 vtnet_update_link_status(struct vtnet_softc *sc) 833 { 834 device_t dev; 835 struct ifnet *ifp; 836 struct ifaltq_subque *ifsq; 837 int link; 838 839 dev = sc->vtnet_dev; 840 ifp = sc->vtnet_ifp; 841 ifsq = ifq_get_subq_default(&ifp->if_snd); 842 843 link = vtnet_is_link_up(sc); 844 845 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { 846 sc->vtnet_flags |= VTNET_FLAG_LINK; 847 if (bootverbose) 848 device_printf(dev, "Link is up\n"); 849 ifp->if_link_state = LINK_STATE_UP; 850 if_link_state_change(ifp); 851 if (!ifsq_is_empty(ifsq)) 852 vtnet_start_locked(ifp, ifsq); 853 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { 854 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 855 if (bootverbose) 856 device_printf(dev, "Link is down\n"); 857 858 ifp->if_link_state = LINK_STATE_DOWN; 859 if_link_state_change(ifp); 860 } 861 } 862 863 #if 0 864 static void 865 vtnet_watchdog(struct vtnet_softc *sc) 866 { 867 struct ifnet *ifp; 868 869 ifp = sc->vtnet_ifp; 870 871 #ifdef VTNET_TX_INTR_MODERATION 872 vtnet_txeof(sc); 873 #endif 874 875 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) 876 return; 877 878 if_printf(ifp, "watchdog timeout -- resetting\n"); 879 #ifdef VTNET_DEBUG 880 virtqueue_dump(sc->vtnet_tx_vq); 881 #endif 882 ifp->if_oerrors++; 883 ifp->if_flags &= ~IFF_RUNNING; 884 vtnet_init_locked(sc); 885 } 886 #endif 887 888 static void 889 vtnet_config_change_task(void *arg, int pending) 890 { 891 struct vtnet_softc *sc; 892 893 sc = arg; 894 895 lwkt_serialize_enter(&sc->vtnet_slz); 896 vtnet_update_link_status(sc); 897 lwkt_serialize_exit(&sc->vtnet_slz); 898 } 899 900 static int 901 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr) 902 { 903 struct vtnet_softc *sc; 904 struct ifreq *ifr; 905 int reinit, mask, error; 906 907 sc = ifp->if_softc; 908 ifr = (struct ifreq *) data; 909 reinit = 0; 910 error = 0; 911 912 switch (cmd) { 913 case SIOCSIFMTU: 914 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) 915 error = EINVAL; 916 else if (ifp->if_mtu != ifr->ifr_mtu) { 917 lwkt_serialize_enter(&sc->vtnet_slz); 918 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 919 lwkt_serialize_exit(&sc->vtnet_slz); 920 } 921 break; 922 923 case SIOCSIFFLAGS: 924 lwkt_serialize_enter(&sc->vtnet_slz); 925 if ((ifp->if_flags & IFF_UP) == 0) { 926 if (ifp->if_flags & IFF_RUNNING) 927 vtnet_stop(sc); 928 } else if (ifp->if_flags & IFF_RUNNING) { 929 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 930 (IFF_PROMISC | IFF_ALLMULTI)) { 931 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 932 vtnet_rx_filter(sc); 933 else 934 error = ENOTSUP; 935 } 936 } else 937 vtnet_init_locked(sc); 938 939 if (error == 0) 940 sc->vtnet_if_flags = ifp->if_flags; 941 lwkt_serialize_exit(&sc->vtnet_slz); 942 break; 943 944 case SIOCADDMULTI: 945 case SIOCDELMULTI: 946 lwkt_serialize_enter(&sc->vtnet_slz); 947 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && 948 (ifp->if_flags & IFF_RUNNING)) 949 vtnet_rx_filter_mac(sc); 950 lwkt_serialize_exit(&sc->vtnet_slz); 951 break; 952 953 case SIOCSIFMEDIA: 954 case SIOCGIFMEDIA: 955 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 956 break; 957 958 case SIOCSIFCAP: 959 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 960 961 lwkt_serialize_enter(&sc->vtnet_slz); 962 963 if (mask & IFCAP_TXCSUM) { 964 ifp->if_capenable ^= IFCAP_TXCSUM; 965 if (ifp->if_capenable & IFCAP_TXCSUM) 966 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 967 else 968 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; 969 } 970 971 if (mask & IFCAP_TSO4) { 972 ifp->if_capenable ^= IFCAP_TSO4; 973 if (ifp->if_capenable & IFCAP_TSO4) 974 ifp->if_hwassist |= CSUM_TSO; 975 else 976 ifp->if_hwassist &= ~CSUM_TSO; 977 } 978 979 if (mask & IFCAP_RXCSUM) { 980 ifp->if_capenable ^= IFCAP_RXCSUM; 981 reinit = 1; 982 } 983 984 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */ 985 if (mask & IFCAP_LRO) { 986 ifp->if_capenable ^= IFCAP_LRO; 987 reinit = 1; 988 } 989 #endif 990 991 if (mask & IFCAP_VLAN_HWFILTER) { 992 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 993 reinit = 1; 994 } 995 996 if (mask & IFCAP_VLAN_HWTSO) 997 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 998 999 if (mask & IFCAP_VLAN_HWTAGGING) 1000 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1001 1002 if (reinit && (ifp->if_flags & IFF_RUNNING)) { 1003 ifp->if_flags &= ~IFF_RUNNING; 1004 vtnet_init_locked(sc); 1005 } 1006 //VLAN_CAPABILITIES(ifp); 1007 1008 lwkt_serialize_exit(&sc->vtnet_slz); 1009 break; 1010 1011 default: 1012 error = ether_ioctl(ifp, cmd, data); 1013 break; 1014 } 1015 1016 return (error); 1017 } 1018 1019 static int 1020 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 1021 { 1022 struct ifnet *ifp; 1023 int new_frame_size, clsize; 1024 1025 ifp = sc->vtnet_ifp; 1026 1027 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1028 new_frame_size = sizeof(struct vtnet_rx_header) + 1029 sizeof(struct ether_vlan_header) + new_mtu; 1030 1031 if (new_frame_size > MJUM9BYTES) 1032 return (EINVAL); 1033 1034 if (new_frame_size <= MCLBYTES) 1035 clsize = MCLBYTES; 1036 else 1037 clsize = MJUM9BYTES; 1038 } else { 1039 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + 1040 sizeof(struct ether_vlan_header) + new_mtu; 1041 1042 if (new_frame_size <= MCLBYTES) 1043 clsize = MCLBYTES; 1044 else 1045 clsize = MJUMPAGESIZE; 1046 } 1047 1048 sc->vtnet_rx_mbuf_size = clsize; 1049 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 1050 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, 1051 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); 1052 1053 ifp->if_mtu = new_mtu; 1054 1055 if (ifp->if_flags & IFF_RUNNING) { 1056 ifp->if_flags &= ~IFF_RUNNING; 1057 vtnet_init_locked(sc); 1058 } 1059 1060 return (0); 1061 } 1062 1063 static int 1064 vtnet_init_rx_vq(struct vtnet_softc *sc) 1065 { 1066 struct virtqueue *vq; 1067 int nbufs, error; 1068 1069 vq = sc->vtnet_rx_vq; 1070 nbufs = 0; 1071 error = ENOSPC; 1072 1073 while (!virtqueue_full(vq)) { 1074 if ((error = vtnet_newbuf(sc)) != 0) 1075 break; 1076 nbufs++; 1077 } 1078 1079 if (nbufs > 0) { 1080 virtqueue_notify(vq, &sc->vtnet_slz); 1081 1082 /* 1083 * EMSGSIZE signifies the virtqueue did not have enough 1084 * entries available to hold the last mbuf. This is not 1085 * an error. We should not get ENOSPC since we check if 1086 * the virtqueue is full before attempting to add a 1087 * buffer. 1088 */ 1089 if (error == EMSGSIZE) 1090 error = 0; 1091 } 1092 1093 return (error); 1094 } 1095 1096 static void 1097 vtnet_free_rx_mbufs(struct vtnet_softc *sc) 1098 { 1099 struct virtqueue *vq; 1100 struct mbuf *m; 1101 int last; 1102 1103 vq = sc->vtnet_rx_vq; 1104 last = 0; 1105 1106 while ((m = virtqueue_drain(vq, &last)) != NULL) 1107 m_freem(m); 1108 1109 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); 1110 } 1111 1112 static void 1113 vtnet_free_tx_mbufs(struct vtnet_softc *sc) 1114 { 1115 struct virtqueue *vq; 1116 struct vtnet_tx_header *txhdr; 1117 int last; 1118 1119 vq = sc->vtnet_tx_vq; 1120 last = 0; 1121 1122 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1123 m_freem(txhdr->vth_mbuf); 1124 vtnet_enqueue_txhdr(sc, txhdr); 1125 } 1126 1127 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); 1128 } 1129 1130 static void 1131 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 1132 { 1133 /* 1134 * The control virtqueue is only polled, therefore 1135 * it should already be empty. 1136 */ 1137 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 1138 ("Ctrl Vq not empty")); 1139 } 1140 1141 static struct mbuf * 1142 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1143 { 1144 struct mbuf *m_head, *m_tail, *m; 1145 int i, clsize; 1146 1147 clsize = sc->vtnet_rx_mbuf_size; 1148 1149 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/ 1150 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize); 1151 m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR ); 1152 if (m_head == NULL) 1153 goto fail; 1154 1155 m_head->m_len = clsize; 1156 m_tail = m_head; 1157 1158 if (nbufs > 1) { 1159 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1160 ("chained Rx mbuf requested without LRO_NOMRG")); 1161 1162 for (i = 0; i < nbufs - 1; i++) { 1163 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize); 1164 m = m_getcl(M_NOWAIT, MT_DATA, 0); 1165 if (m == NULL) 1166 goto fail; 1167 1168 m->m_len = clsize; 1169 m_tail->m_next = m; 1170 m_tail = m; 1171 } 1172 } 1173 1174 if (m_tailp != NULL) 1175 *m_tailp = m_tail; 1176 1177 return (m_head); 1178 1179 fail: 1180 sc->vtnet_stats.mbuf_alloc_failed++; 1181 m_freem(m_head); 1182 1183 return (NULL); 1184 } 1185 1186 static int 1187 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) 1188 { 1189 struct mbuf *m, *m_prev; 1190 struct mbuf *m_new, *m_tail; 1191 int len, clsize, nreplace, error; 1192 1193 m = m0; 1194 m_prev = NULL; 1195 len = len0; 1196 1197 m_tail = NULL; 1198 clsize = sc->vtnet_rx_mbuf_size; 1199 nreplace = 0; 1200 1201 if (m->m_next != NULL) 1202 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1203 ("chained Rx mbuf without LRO_NOMRG")); 1204 1205 /* 1206 * Since LRO_NOMRG mbuf chains are so large, we want to avoid 1207 * allocating an entire chain for each received frame. When 1208 * the received frame's length is less than that of the chain, 1209 * the unused mbufs are reassigned to the new chain. 1210 */ 1211 while (len > 0) { 1212 /* 1213 * Something is seriously wrong if we received 1214 * a frame larger than the mbuf chain. Drop it. 1215 */ 1216 if (m == NULL) { 1217 sc->vtnet_stats.rx_frame_too_large++; 1218 return (EMSGSIZE); 1219 } 1220 1221 KASSERT(m->m_len == clsize, 1222 ("mbuf length not expected cluster size: %d", 1223 m->m_len)); 1224 1225 m->m_len = MIN(m->m_len, len); 1226 len -= m->m_len; 1227 1228 m_prev = m; 1229 m = m->m_next; 1230 nreplace++; 1231 } 1232 1233 KASSERT(m_prev != NULL, ("m_prev == NULL")); 1234 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, 1235 ("too many replacement mbufs: %d/%d", nreplace, 1236 sc->vtnet_rx_mbuf_count)); 1237 1238 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); 1239 if (m_new == NULL) { 1240 m_prev->m_len = clsize; 1241 return (ENOBUFS); 1242 } 1243 1244 /* 1245 * Move unused mbufs, if any, from the original chain 1246 * onto the end of the new chain. 1247 */ 1248 if (m_prev->m_next != NULL) { 1249 m_tail->m_next = m_prev->m_next; 1250 m_prev->m_next = NULL; 1251 } 1252 1253 error = vtnet_enqueue_rxbuf(sc, m_new); 1254 if (error) { 1255 /* 1256 * BAD! We could not enqueue the replacement mbuf chain. We 1257 * must restore the m0 chain to the original state if it was 1258 * modified so we can subsequently discard it. 1259 * 1260 * NOTE: The replacement is suppose to be an identical copy 1261 * to the one just dequeued so this is an unexpected error. 1262 */ 1263 sc->vtnet_stats.rx_enq_replacement_failed++; 1264 1265 if (m_tail->m_next != NULL) { 1266 m_prev->m_next = m_tail->m_next; 1267 m_tail->m_next = NULL; 1268 } 1269 1270 m_prev->m_len = clsize; 1271 m_freem(m_new); 1272 } 1273 1274 return (error); 1275 } 1276 1277 static int 1278 vtnet_newbuf(struct vtnet_softc *sc) 1279 { 1280 struct mbuf *m; 1281 int error; 1282 1283 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); 1284 if (m == NULL) 1285 return (ENOBUFS); 1286 1287 error = vtnet_enqueue_rxbuf(sc, m); 1288 if (error) 1289 m_freem(m); 1290 1291 return (error); 1292 } 1293 1294 static void 1295 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) 1296 { 1297 struct virtqueue *vq; 1298 struct mbuf *m; 1299 1300 vq = sc->vtnet_rx_vq; 1301 1302 while (--nbufs > 0) { 1303 if ((m = virtqueue_dequeue(vq, NULL)) == NULL) 1304 break; 1305 vtnet_discard_rxbuf(sc, m); 1306 } 1307 } 1308 1309 static void 1310 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1311 { 1312 int error; 1313 1314 /* 1315 * Requeue the discarded mbuf. This should always be 1316 * successful since it was just dequeued. 1317 */ 1318 error = vtnet_enqueue_rxbuf(sc, m); 1319 KASSERT(error == 0, ("cannot requeue discarded mbuf")); 1320 } 1321 1322 static int 1323 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1324 { 1325 struct sglist sg; 1326 struct sglist_seg segs[VTNET_MAX_RX_SEGS]; 1327 struct vtnet_rx_header *rxhdr; 1328 struct virtio_net_hdr *hdr; 1329 uint8_t *mdata; 1330 int offset, error; 1331 1332 ASSERT_SERIALIZED(&sc->vtnet_slz); 1333 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0) 1334 KASSERT(m->m_next == NULL, ("chained Rx mbuf")); 1335 1336 sglist_init(&sg, sc->vtnet_rx_nsegs, segs); 1337 1338 mdata = mtod(m, uint8_t *); 1339 offset = 0; 1340 1341 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1342 rxhdr = (struct vtnet_rx_header *) mdata; 1343 hdr = &rxhdr->vrh_hdr; 1344 offset += sizeof(struct vtnet_rx_header); 1345 1346 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); 1347 KASSERT(error == 0, ("cannot add header to sglist")); 1348 } 1349 1350 error = sglist_append(&sg, mdata + offset, m->m_len - offset); 1351 if (error) 1352 return (error); 1353 1354 if (m->m_next != NULL) { 1355 error = sglist_append_mbuf(&sg, m->m_next); 1356 if (error) 1357 return (error); 1358 } 1359 1360 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); 1361 } 1362 1363 static void 1364 vtnet_vlan_tag_remove(struct mbuf *m) 1365 { 1366 struct ether_vlan_header *evl; 1367 1368 evl = mtod(m, struct ether_vlan_header *); 1369 1370 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag); 1371 m->m_flags |= M_VLANTAG; 1372 1373 /* Strip the 802.1Q header. */ 1374 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, 1375 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1376 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1377 } 1378 1379 /* 1380 * Alternative method of doing receive checksum offloading. Rather 1381 * than parsing the received frame down to the IP header, use the 1382 * csum_offset to determine which CSUM_* flags are appropriate. We 1383 * can get by with doing this only because the checksum offsets are 1384 * unique for the things we care about. 1385 */ 1386 static int 1387 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1388 struct virtio_net_hdr *hdr) 1389 { 1390 struct ether_header *eh; 1391 struct ether_vlan_header *evh; 1392 struct udphdr *udp; 1393 int csum_len; 1394 uint16_t eth_type; 1395 1396 csum_len = hdr->csum_start + hdr->csum_offset; 1397 1398 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) 1399 return (1); 1400 if (m->m_len < csum_len) 1401 return (1); 1402 1403 eh = mtod(m, struct ether_header *); 1404 eth_type = ntohs(eh->ether_type); 1405 if (eth_type == ETHERTYPE_VLAN) { 1406 evh = mtod(m, struct ether_vlan_header *); 1407 eth_type = ntohs(evh->evl_proto); 1408 } 1409 1410 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { 1411 sc->vtnet_stats.rx_csum_bad_ethtype++; 1412 return (1); 1413 } 1414 1415 /* Use the offset to determine the appropriate CSUM_* flags. */ 1416 switch (hdr->csum_offset) { 1417 case offsetof(struct udphdr, uh_sum): 1418 if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) 1419 return (1); 1420 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); 1421 if (udp->uh_sum == 0) 1422 return (0); 1423 1424 /* FALLTHROUGH */ 1425 1426 case offsetof(struct tcphdr, th_sum): 1427 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1428 m->m_pkthdr.csum_data = 0xFFFF; 1429 break; 1430 1431 default: 1432 sc->vtnet_stats.rx_csum_bad_offset++; 1433 return (1); 1434 } 1435 1436 sc->vtnet_stats.rx_csum_offloaded++; 1437 1438 return (0); 1439 } 1440 1441 static int 1442 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) 1443 { 1444 struct ifnet *ifp; 1445 struct virtqueue *vq; 1446 struct mbuf *m, *m_tail; 1447 int len; 1448 1449 ifp = sc->vtnet_ifp; 1450 vq = sc->vtnet_rx_vq; 1451 m_tail = m_head; 1452 1453 while (--nbufs > 0) { 1454 m = virtqueue_dequeue(vq, &len); 1455 if (m == NULL) { 1456 ifp->if_ierrors++; 1457 goto fail; 1458 } 1459 1460 if (vtnet_newbuf(sc) != 0) { 1461 ifp->if_iqdrops++; 1462 vtnet_discard_rxbuf(sc, m); 1463 if (nbufs > 1) 1464 vtnet_discard_merged_rxbuf(sc, nbufs); 1465 goto fail; 1466 } 1467 1468 if (m->m_len < len) 1469 len = m->m_len; 1470 1471 m->m_len = len; 1472 m->m_flags &= ~M_PKTHDR; 1473 1474 m_head->m_pkthdr.len += len; 1475 m_tail->m_next = m; 1476 m_tail = m; 1477 } 1478 1479 return (0); 1480 1481 fail: 1482 sc->vtnet_stats.rx_mergeable_failed++; 1483 m_freem(m_head); 1484 1485 return (1); 1486 } 1487 1488 static int 1489 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) 1490 { 1491 struct virtio_net_hdr lhdr; 1492 struct ifnet *ifp; 1493 struct virtqueue *vq; 1494 struct mbuf *m; 1495 struct ether_header *eh; 1496 struct virtio_net_hdr *hdr; 1497 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1498 int len, deq, nbufs, adjsz, rx_npkts; 1499 1500 ifp = sc->vtnet_ifp; 1501 vq = sc->vtnet_rx_vq; 1502 hdr = &lhdr; 1503 deq = 0; 1504 rx_npkts = 0; 1505 1506 ASSERT_SERIALIZED(&sc->vtnet_slz); 1507 1508 while (--count >= 0) { 1509 m = virtqueue_dequeue(vq, &len); 1510 if (m == NULL) 1511 break; 1512 deq++; 1513 1514 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1515 ifp->if_ierrors++; 1516 vtnet_discard_rxbuf(sc, m); 1517 continue; 1518 } 1519 1520 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1521 nbufs = 1; 1522 adjsz = sizeof(struct vtnet_rx_header); 1523 /* 1524 * Account for our pad between the header and 1525 * the actual start of the frame. 1526 */ 1527 len += VTNET_RX_HEADER_PAD; 1528 } else { 1529 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1530 nbufs = mhdr->num_buffers; 1531 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1532 } 1533 1534 if (vtnet_replace_rxbuf(sc, m, len) != 0) { 1535 ifp->if_iqdrops++; 1536 vtnet_discard_rxbuf(sc, m); 1537 if (nbufs > 1) 1538 vtnet_discard_merged_rxbuf(sc, nbufs); 1539 continue; 1540 } 1541 1542 m->m_pkthdr.len = len; 1543 m->m_pkthdr.rcvif = ifp; 1544 m->m_pkthdr.csum_flags = 0; 1545 1546 if (nbufs > 1) { 1547 if (vtnet_rxeof_merged(sc, m, nbufs) != 0) 1548 continue; 1549 } 1550 1551 ifp->if_ipackets++; 1552 1553 /* 1554 * Save copy of header before we strip it. For both mergeable 1555 * and non-mergeable, the VirtIO header is placed first in the 1556 * mbuf's data. We no longer need num_buffers, so always use a 1557 * virtio_net_hdr. 1558 */ 1559 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1560 m_adj(m, adjsz); 1561 1562 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1563 eh = mtod(m, struct ether_header *); 1564 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1565 vtnet_vlan_tag_remove(m); 1566 1567 /* 1568 * With the 802.1Q header removed, update the 1569 * checksum starting location accordingly. 1570 */ 1571 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1572 hdr->csum_start -= 1573 ETHER_VLAN_ENCAP_LEN; 1574 } 1575 } 1576 1577 if (ifp->if_capenable & IFCAP_RXCSUM && 1578 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1579 if (vtnet_rx_csum(sc, m, hdr) != 0) 1580 sc->vtnet_stats.rx_csum_failed++; 1581 } 1582 1583 lwkt_serialize_exit(&sc->vtnet_slz); 1584 rx_npkts++; 1585 ifp->if_input(ifp, m, NULL, -1); 1586 lwkt_serialize_enter(&sc->vtnet_slz); 1587 1588 /* 1589 * The interface may have been stopped while we were 1590 * passing the packet up the network stack. 1591 */ 1592 if ((ifp->if_flags & IFF_RUNNING) == 0) 1593 break; 1594 } 1595 1596 virtqueue_notify(vq, &sc->vtnet_slz); 1597 1598 if (rx_npktsp != NULL) 1599 *rx_npktsp = rx_npkts; 1600 1601 return (count > 0 ? 0 : EAGAIN); 1602 } 1603 1604 static void 1605 vtnet_rx_intr_task(void *arg) 1606 { 1607 struct vtnet_softc *sc; 1608 struct ifnet *ifp; 1609 int more; 1610 1611 sc = arg; 1612 ifp = sc->vtnet_ifp; 1613 1614 next: 1615 // lwkt_serialize_enter(&sc->vtnet_slz); 1616 1617 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1618 vtnet_enable_rx_intr(sc); 1619 // lwkt_serialize_exit(&sc->vtnet_slz); 1620 return; 1621 } 1622 1623 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); 1624 if (!more && vtnet_enable_rx_intr(sc) != 0) { 1625 vtnet_disable_rx_intr(sc); 1626 more = 1; 1627 } 1628 1629 // lwkt_serialize_exit(&sc->vtnet_slz); 1630 1631 if (more) { 1632 sc->vtnet_stats.rx_task_rescheduled++; 1633 goto next; 1634 } 1635 } 1636 1637 static void 1638 vtnet_rx_vq_intr(void *xsc) 1639 { 1640 struct vtnet_softc *sc; 1641 1642 sc = xsc; 1643 1644 if (!virtqueue_pending(sc->vtnet_rx_vq)) 1645 return; 1646 1647 vtnet_disable_rx_intr(sc); 1648 vtnet_rx_intr_task(sc); 1649 } 1650 1651 static void 1652 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr) 1653 { 1654 bzero(txhdr, sizeof(*txhdr)); 1655 SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link); 1656 } 1657 1658 static void 1659 vtnet_txeof(struct vtnet_softc *sc) 1660 { 1661 struct virtqueue *vq; 1662 struct ifnet *ifp; 1663 struct vtnet_tx_header *txhdr; 1664 int deq; 1665 1666 vq = sc->vtnet_tx_vq; 1667 ifp = sc->vtnet_ifp; 1668 deq = 0; 1669 1670 ASSERT_SERIALIZED(&sc->vtnet_slz); 1671 1672 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 1673 deq++; 1674 ifp->if_opackets++; 1675 m_freem(txhdr->vth_mbuf); 1676 vtnet_enqueue_txhdr(sc, txhdr); 1677 } 1678 1679 if (deq > 0) { 1680 ifq_clr_oactive(&ifp->if_snd); 1681 if (virtqueue_empty(vq)) 1682 sc->vtnet_watchdog_timer = 0; 1683 } 1684 } 1685 1686 static struct mbuf * 1687 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, 1688 struct virtio_net_hdr *hdr) 1689 { 1690 struct ifnet *ifp; 1691 struct ether_header *eh; 1692 struct ether_vlan_header *evh; 1693 struct ip *ip; 1694 struct ip6_hdr *ip6; 1695 struct tcphdr *tcp; 1696 int ip_offset; 1697 uint16_t eth_type, csum_start; 1698 uint8_t ip_proto, gso_type; 1699 1700 ifp = sc->vtnet_ifp; 1701 M_ASSERTPKTHDR(m); 1702 1703 ip_offset = sizeof(struct ether_header); 1704 if (m->m_len < ip_offset) { 1705 if ((m = m_pullup(m, ip_offset)) == NULL) 1706 return (NULL); 1707 } 1708 1709 eh = mtod(m, struct ether_header *); 1710 eth_type = ntohs(eh->ether_type); 1711 if (eth_type == ETHERTYPE_VLAN) { 1712 ip_offset = sizeof(struct ether_vlan_header); 1713 if (m->m_len < ip_offset) { 1714 if ((m = m_pullup(m, ip_offset)) == NULL) 1715 return (NULL); 1716 } 1717 evh = mtod(m, struct ether_vlan_header *); 1718 eth_type = ntohs(evh->evl_proto); 1719 } 1720 1721 switch (eth_type) { 1722 case ETHERTYPE_IP: 1723 if (m->m_len < ip_offset + sizeof(struct ip)) { 1724 m = m_pullup(m, ip_offset + sizeof(struct ip)); 1725 if (m == NULL) 1726 return (NULL); 1727 } 1728 1729 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1730 ip_proto = ip->ip_p; 1731 csum_start = ip_offset + (ip->ip_hl << 2); 1732 gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1733 break; 1734 1735 case ETHERTYPE_IPV6: 1736 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { 1737 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); 1738 if (m == NULL) 1739 return (NULL); 1740 } 1741 1742 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1743 /* 1744 * XXX Assume no extension headers are present. Presently, 1745 * this will always be true in the case of TSO, and FreeBSD 1746 * does not perform checksum offloading of IPv6 yet. 1747 */ 1748 ip_proto = ip6->ip6_nxt; 1749 csum_start = ip_offset + sizeof(struct ip6_hdr); 1750 gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1751 break; 1752 1753 default: 1754 return (m); 1755 } 1756 1757 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { 1758 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1759 hdr->csum_start = csum_start; 1760 hdr->csum_offset = m->m_pkthdr.csum_data; 1761 1762 sc->vtnet_stats.tx_csum_offloaded++; 1763 } 1764 1765 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1766 if (ip_proto != IPPROTO_TCP) 1767 return (m); 1768 1769 if (m->m_len < csum_start + sizeof(struct tcphdr)) { 1770 m = m_pullup(m, csum_start + sizeof(struct tcphdr)); 1771 if (m == NULL) 1772 return (NULL); 1773 } 1774 1775 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); 1776 hdr->gso_type = gso_type; 1777 hdr->hdr_len = csum_start + (tcp->th_off << 2); 1778 hdr->gso_size = m->m_pkthdr.tso_segsz; 1779 1780 if (tcp->th_flags & TH_CWR) { 1781 /* 1782 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. 1783 * ECN support is only configurable globally with the 1784 * net.inet.tcp.ecn.enable sysctl knob. 1785 */ 1786 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 1787 if_printf(ifp, "TSO with ECN not supported " 1788 "by host\n"); 1789 m_freem(m); 1790 return (NULL); 1791 } 1792 1793 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1794 } 1795 1796 sc->vtnet_stats.tx_tso_offloaded++; 1797 } 1798 1799 return (m); 1800 } 1801 1802 static int 1803 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, 1804 struct vtnet_tx_header *txhdr) 1805 { 1806 struct sglist sg; 1807 struct sglist_seg segs[VTNET_MAX_TX_SEGS]; 1808 struct virtqueue *vq; 1809 struct mbuf *m; 1810 int error; 1811 1812 vq = sc->vtnet_tx_vq; 1813 m = *m_head; 1814 1815 sglist_init(&sg, sc->vtnet_tx_nsegs, segs); 1816 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 1817 KASSERT(error == 0 && sg.sg_nseg == 1, 1818 ("%s: error %d adding header to sglist", __func__, error)); 1819 1820 error = sglist_append_mbuf(&sg, m); 1821 if (error) { 1822 m = m_defrag(m, M_NOWAIT); 1823 if (m == NULL) 1824 goto fail; 1825 1826 *m_head = m; 1827 sc->vtnet_stats.tx_defragged++; 1828 1829 error = sglist_append_mbuf(&sg, m); 1830 if (error) 1831 goto fail; 1832 } 1833 1834 txhdr->vth_mbuf = m; 1835 error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0); 1836 1837 return (error); 1838 1839 fail: 1840 sc->vtnet_stats.tx_defrag_failed++; 1841 m_freem(*m_head); 1842 *m_head = NULL; 1843 1844 return (ENOBUFS); 1845 } 1846 1847 static struct mbuf * 1848 vtnet_vlan_tag_insert(struct mbuf *m) 1849 { 1850 struct mbuf *n; 1851 struct ether_vlan_header *evl; 1852 1853 if (M_WRITABLE(m) == 0) { 1854 n = m_dup(m, M_NOWAIT); 1855 m_freem(m); 1856 if ((m = n) == NULL) 1857 return (NULL); 1858 } 1859 1860 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT); 1861 if (m == NULL) 1862 return (NULL); 1863 if (m->m_len < sizeof(struct ether_vlan_header)) { 1864 m = m_pullup(m, sizeof(struct ether_vlan_header)); 1865 if (m == NULL) 1866 return (NULL); 1867 } 1868 1869 /* Insert 802.1Q header into the existing Ethernet header. */ 1870 evl = mtod(m, struct ether_vlan_header *); 1871 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN, 1872 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); 1873 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1874 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag); 1875 m->m_flags &= ~M_VLANTAG; 1876 1877 return (m); 1878 } 1879 1880 static int 1881 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) 1882 { 1883 struct vtnet_tx_header *txhdr; 1884 struct virtio_net_hdr *hdr; 1885 struct mbuf *m; 1886 int error; 1887 1888 txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free); 1889 if (txhdr == NULL) 1890 return (ENOBUFS); 1891 SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link); 1892 1893 /* 1894 * Always use the non-mergeable header to simplify things. When 1895 * the mergeable feature is negotiated, the num_buffers field 1896 * must be set to zero. We use vtnet_hdr_size later to enqueue 1897 * the correct header size to the host. 1898 */ 1899 hdr = &txhdr->vth_uhdr.hdr; 1900 m = *m_head; 1901 1902 error = ENOBUFS; 1903 1904 if (m->m_flags & M_VLANTAG) { 1905 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1906 m = vtnet_vlan_tag_insert(m); 1907 if ((*m_head = m) == NULL) 1908 goto fail; 1909 m->m_flags &= ~M_VLANTAG; 1910 } 1911 1912 if (m->m_pkthdr.csum_flags != 0) { 1913 m = vtnet_tx_offload(sc, m, hdr); 1914 if ((*m_head = m) == NULL) 1915 goto fail; 1916 } 1917 1918 error = vtnet_enqueue_txbuf(sc, m_head, txhdr); 1919 fail: 1920 if (error != 0) 1921 vtnet_enqueue_txhdr(sc, txhdr); 1922 return (error); 1923 } 1924 1925 static void 1926 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1927 { 1928 struct vtnet_softc *sc; 1929 1930 sc = ifp->if_softc; 1931 1932 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1933 lwkt_serialize_enter(&sc->vtnet_slz); 1934 vtnet_start_locked(ifp, ifsq); 1935 lwkt_serialize_exit(&sc->vtnet_slz); 1936 } 1937 1938 static void 1939 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1940 { 1941 struct vtnet_softc *sc; 1942 struct virtqueue *vq; 1943 struct mbuf *m0; 1944 int enq; 1945 1946 sc = ifp->if_softc; 1947 vq = sc->vtnet_tx_vq; 1948 enq = 0; 1949 1950 ASSERT_SERIALIZED(&sc->vtnet_slz); 1951 1952 if ((ifp->if_flags & (IFF_RUNNING)) != 1953 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) 1954 return; 1955 1956 #ifdef VTNET_TX_INTR_MODERATION 1957 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) 1958 vtnet_txeof(sc); 1959 #endif 1960 1961 while (!ifsq_is_empty(ifsq)) { 1962 if (virtqueue_full(vq)) { 1963 ifq_set_oactive(&ifp->if_snd); 1964 break; 1965 } 1966 1967 m0 = ifq_dequeue(&ifp->if_snd); 1968 if (m0 == NULL) 1969 break; 1970 1971 if (vtnet_encap(sc, &m0) != 0) { 1972 if (m0 == NULL) 1973 break; 1974 ifq_prepend(&ifp->if_snd, m0); 1975 ifq_set_oactive(&ifp->if_snd); 1976 break; 1977 } 1978 1979 enq++; 1980 ETHER_BPF_MTAP(ifp, m0); 1981 } 1982 1983 if (enq > 0) { 1984 virtqueue_notify(vq, &sc->vtnet_slz); 1985 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; 1986 } 1987 } 1988 1989 static void 1990 vtnet_tick(void *xsc) 1991 { 1992 struct vtnet_softc *sc; 1993 1994 sc = xsc; 1995 1996 #if 0 1997 ASSERT_SERIALIZED(&sc->vtnet_slz); 1998 #ifdef VTNET_DEBUG 1999 virtqueue_dump(sc->vtnet_rx_vq); 2000 virtqueue_dump(sc->vtnet_tx_vq); 2001 #endif 2002 2003 vtnet_watchdog(sc); 2004 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2005 #endif 2006 } 2007 2008 static void 2009 vtnet_tx_intr_task(void *arg) 2010 { 2011 struct vtnet_softc *sc; 2012 struct ifnet *ifp; 2013 struct ifaltq_subque *ifsq; 2014 2015 sc = arg; 2016 ifp = sc->vtnet_ifp; 2017 ifsq = ifq_get_subq_default(&ifp->if_snd); 2018 2019 next: 2020 // lwkt_serialize_enter(&sc->vtnet_slz); 2021 2022 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2023 vtnet_enable_tx_intr(sc); 2024 // lwkt_serialize_exit(&sc->vtnet_slz); 2025 return; 2026 } 2027 2028 vtnet_txeof(sc); 2029 2030 if (!ifsq_is_empty(ifsq)) 2031 vtnet_start_locked(ifp, ifsq); 2032 2033 if (vtnet_enable_tx_intr(sc) != 0) { 2034 vtnet_disable_tx_intr(sc); 2035 sc->vtnet_stats.tx_task_rescheduled++; 2036 // lwkt_serialize_exit(&sc->vtnet_slz); 2037 goto next; 2038 } 2039 2040 // lwkt_serialize_exit(&sc->vtnet_slz); 2041 } 2042 2043 static void 2044 vtnet_tx_vq_intr(void *xsc) 2045 { 2046 struct vtnet_softc *sc; 2047 2048 sc = xsc; 2049 2050 if (!virtqueue_pending(sc->vtnet_tx_vq)) 2051 return; 2052 2053 vtnet_disable_tx_intr(sc); 2054 vtnet_tx_intr_task(sc); 2055 } 2056 2057 static void 2058 vtnet_config_intr(void *arg) 2059 { 2060 struct vtnet_softc *sc; 2061 2062 sc = arg; 2063 2064 taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task); 2065 } 2066 2067 static void 2068 vtnet_stop(struct vtnet_softc *sc) 2069 { 2070 device_t dev; 2071 struct ifnet *ifp; 2072 2073 dev = sc->vtnet_dev; 2074 ifp = sc->vtnet_ifp; 2075 2076 ASSERT_SERIALIZED(&sc->vtnet_slz); 2077 2078 sc->vtnet_watchdog_timer = 0; 2079 callout_stop(&sc->vtnet_tick_ch); 2080 ifq_clr_oactive(&ifp->if_snd); 2081 ifp->if_flags &= ~(IFF_RUNNING); 2082 2083 vtnet_disable_rx_intr(sc); 2084 vtnet_disable_tx_intr(sc); 2085 2086 /* 2087 * Stop the host VirtIO adapter. Note this will reset the host 2088 * adapter's state back to the pre-initialized state, so in 2089 * order to make the device usable again, we must drive it 2090 * through virtio_reinit() and virtio_reinit_complete(). 2091 */ 2092 virtio_stop(dev); 2093 2094 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 2095 2096 vtnet_free_rx_mbufs(sc); 2097 vtnet_free_tx_mbufs(sc); 2098 } 2099 2100 static int 2101 vtnet_virtio_reinit(struct vtnet_softc *sc) 2102 { 2103 device_t dev; 2104 struct ifnet *ifp; 2105 uint64_t features; 2106 int error; 2107 2108 dev = sc->vtnet_dev; 2109 ifp = sc->vtnet_ifp; 2110 features = sc->vtnet_features; 2111 2112 /* 2113 * Re-negotiate with the host, removing any disabled receive 2114 * features. Transmit features are disabled only on our side 2115 * via if_capenable and if_hwassist. 2116 */ 2117 2118 if (ifp->if_capabilities & IFCAP_RXCSUM) { 2119 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 2120 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2121 } 2122 2123 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */ 2124 if (ifp->if_capabilities & IFCAP_LRO) { 2125 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2126 features &= ~VTNET_LRO_FEATURES; 2127 } 2128 #endif 2129 2130 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2131 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2132 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2133 } 2134 2135 error = virtio_reinit(dev, features); 2136 if (error) 2137 device_printf(dev, "virtio reinit error %d\n", error); 2138 2139 return (error); 2140 } 2141 2142 static void 2143 vtnet_init_locked(struct vtnet_softc *sc) 2144 { 2145 device_t dev; 2146 struct ifnet *ifp; 2147 int error; 2148 2149 dev = sc->vtnet_dev; 2150 ifp = sc->vtnet_ifp; 2151 2152 ASSERT_SERIALIZED(&sc->vtnet_slz); 2153 2154 if (ifp->if_flags & IFF_RUNNING) 2155 return; 2156 2157 /* Stop host's adapter, cancel any pending I/O. */ 2158 vtnet_stop(sc); 2159 2160 /* Reinitialize the host device. */ 2161 error = vtnet_virtio_reinit(sc); 2162 if (error) { 2163 device_printf(dev, 2164 "reinitialization failed, stopping device...\n"); 2165 vtnet_stop(sc); 2166 return; 2167 } 2168 2169 /* Update host with assigned MAC address. */ 2170 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2171 vtnet_set_hwaddr(sc); 2172 2173 ifp->if_hwassist = 0; 2174 if (ifp->if_capenable & IFCAP_TXCSUM) 2175 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 2176 if (ifp->if_capenable & IFCAP_TSO4) 2177 ifp->if_hwassist |= CSUM_TSO; 2178 2179 error = vtnet_init_rx_vq(sc); 2180 if (error) { 2181 device_printf(dev, 2182 "cannot allocate mbufs for Rx virtqueue\n"); 2183 vtnet_stop(sc); 2184 return; 2185 } 2186 2187 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 2188 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2189 /* Restore promiscuous and all-multicast modes. */ 2190 vtnet_rx_filter(sc); 2191 2192 /* Restore filtered MAC addresses. */ 2193 vtnet_rx_filter_mac(sc); 2194 } 2195 2196 /* Restore VLAN filters. */ 2197 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2198 vtnet_rx_filter_vlan(sc); 2199 } 2200 2201 { 2202 vtnet_enable_rx_intr(sc); 2203 vtnet_enable_tx_intr(sc); 2204 } 2205 2206 ifp->if_flags |= IFF_RUNNING; 2207 ifq_clr_oactive(&ifp->if_snd); 2208 2209 virtio_reinit_complete(dev); 2210 2211 vtnet_update_link_status(sc); 2212 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2213 } 2214 2215 static void 2216 vtnet_init(void *xsc) 2217 { 2218 struct vtnet_softc *sc; 2219 2220 sc = xsc; 2221 2222 lwkt_serialize_enter(&sc->vtnet_slz); 2223 vtnet_init_locked(sc); 2224 lwkt_serialize_exit(&sc->vtnet_slz); 2225 } 2226 2227 static void 2228 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 2229 struct sglist *sg, int readable, int writable) 2230 { 2231 struct virtqueue *vq; 2232 void *c; 2233 2234 vq = sc->vtnet_ctrl_vq; 2235 2236 ASSERT_SERIALIZED(&sc->vtnet_slz); 2237 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 2238 ("no control virtqueue")); 2239 KASSERT(virtqueue_empty(vq), 2240 ("control command already enqueued")); 2241 2242 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 2243 return; 2244 2245 virtqueue_notify(vq, &sc->vtnet_slz); 2246 2247 /* 2248 * Poll until the command is complete. Previously, we would 2249 * sleep until the control virtqueue interrupt handler woke 2250 * us up, but dropping the VTNET_MTX leads to serialization 2251 * difficulties. 2252 * 2253 * Furthermore, it appears QEMU/KVM only allocates three MSIX 2254 * vectors. Two of those vectors are needed for the Rx and Tx 2255 * virtqueues. We do not support sharing both a Vq and config 2256 * changed notification on the same MSIX vector. 2257 */ 2258 c = virtqueue_poll(vq, NULL); 2259 KASSERT(c == cookie, ("unexpected control command response")); 2260 } 2261 2262 static int 2263 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) 2264 { 2265 struct { 2266 struct virtio_net_ctrl_hdr hdr __aligned(2); 2267 uint8_t pad1; 2268 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8); 2269 uint8_t pad2; 2270 uint8_t ack; 2271 } s; 2272 struct sglist_seg segs[3]; 2273 struct sglist sg; 2274 int error; 2275 2276 s.hdr.class = VIRTIO_NET_CTRL_MAC; 2277 s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; 2278 s.ack = VIRTIO_NET_ERR; 2279 2280 /* Copy the mac address into physically contiguous memory */ 2281 memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN); 2282 2283 sglist_init(&sg, 3, segs); 2284 error = 0; 2285 error |= sglist_append(&sg, &s.hdr, 2286 sizeof(struct virtio_net_ctrl_hdr)); 2287 error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN); 2288 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 2289 KASSERT(error == 0 && sg.sg_nseg == 3, 2290 ("%s: error %d adding set MAC msg to sglist", __func__, error)); 2291 2292 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 2293 2294 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 2295 } 2296 2297 static void 2298 vtnet_rx_filter(struct vtnet_softc *sc) 2299 { 2300 device_t dev; 2301 struct ifnet *ifp; 2302 2303 dev = sc->vtnet_dev; 2304 ifp = sc->vtnet_ifp; 2305 2306 ASSERT_SERIALIZED(&sc->vtnet_slz); 2307 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2308 ("CTRL_RX feature not negotiated")); 2309 2310 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 2311 device_printf(dev, "cannot %s promiscuous mode\n", 2312 (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable"); 2313 2314 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 2315 device_printf(dev, "cannot %s all-multicast mode\n", 2316 (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable"); 2317 } 2318 2319 static int 2320 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 2321 { 2322 struct sglist_seg segs[3]; 2323 struct sglist sg; 2324 struct { 2325 struct virtio_net_ctrl_hdr hdr __aligned(2); 2326 uint8_t pad1; 2327 uint8_t onoff; 2328 uint8_t pad2; 2329 uint8_t ack; 2330 } s; 2331 int error; 2332 2333 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2334 ("%s: CTRL_RX feature not negotiated", __func__)); 2335 2336 s.hdr.class = VIRTIO_NET_CTRL_RX; 2337 s.hdr.cmd = cmd; 2338 s.onoff = !!on; 2339 s.ack = VIRTIO_NET_ERR; 2340 2341 sglist_init(&sg, 3, segs); 2342 error = 0; 2343 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 2344 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); 2345 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 2346 KASSERT(error == 0 && sg.sg_nseg == 3, 2347 ("%s: error %d adding Rx message to sglist", __func__, error)); 2348 2349 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 2350 2351 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 2352 } 2353 2354 static int 2355 vtnet_set_promisc(struct vtnet_softc *sc, int on) 2356 { 2357 2358 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 2359 } 2360 2361 static int 2362 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 2363 { 2364 2365 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 2366 } 2367 2368 static void 2369 vtnet_rx_filter_mac(struct vtnet_softc *sc) 2370 { 2371 struct virtio_net_ctrl_hdr hdr __aligned(2); 2372 struct vtnet_mac_filter *filter; 2373 struct sglist_seg segs[4]; 2374 struct sglist sg; 2375 struct ifnet *ifp; 2376 struct ifaddr *ifa; 2377 struct ifaddr_container *ifac; 2378 struct ifmultiaddr *ifma; 2379 int ucnt, mcnt, promisc, allmulti, error; 2380 uint8_t ack; 2381 2382 ifp = sc->vtnet_ifp; 2383 ucnt = 0; 2384 mcnt = 0; 2385 promisc = 0; 2386 allmulti = 0; 2387 2388 ASSERT_SERIALIZED(&sc->vtnet_slz); 2389 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2390 ("%s: CTRL_RX feature not negotiated", __func__)); 2391 2392 /* Use the MAC filtering table allocated in vtnet_attach. */ 2393 filter = sc->vtnet_macfilter; 2394 memset(filter, 0, sizeof(struct vtnet_mac_filter)); 2395 2396 /* Unicast MAC addresses: */ 2397 //if_addr_rlock(ifp); 2398 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2399 ifa = ifac->ifa; 2400 if (ifa->ifa_addr->sa_family != AF_LINK) 2401 continue; 2402 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2403 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) 2404 continue; 2405 else if (ucnt == VTNET_MAX_MAC_ENTRIES) { 2406 promisc = 1; 2407 break; 2408 } 2409 2410 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2411 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 2412 ucnt++; 2413 } 2414 //if_addr_runlock(ifp); 2415 2416 if (promisc != 0) { 2417 filter->vmf_unicast.nentries = 0; 2418 if_printf(ifp, "more than %d MAC addresses assigned, " 2419 "falling back to promiscuous mode\n", 2420 VTNET_MAX_MAC_ENTRIES); 2421 } else 2422 filter->vmf_unicast.nentries = ucnt; 2423 2424 /* Multicast MAC addresses: */ 2425 //if_maddr_rlock(ifp); 2426 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2427 if (ifma->ifma_addr->sa_family != AF_LINK) 2428 continue; 2429 else if (mcnt == VTNET_MAX_MAC_ENTRIES) { 2430 allmulti = 1; 2431 break; 2432 } 2433 2434 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2435 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 2436 mcnt++; 2437 } 2438 //if_maddr_runlock(ifp); 2439 2440 if (allmulti != 0) { 2441 filter->vmf_multicast.nentries = 0; 2442 if_printf(ifp, "more than %d multicast MAC addresses " 2443 "assigned, falling back to all-multicast mode\n", 2444 VTNET_MAX_MAC_ENTRIES); 2445 } else 2446 filter->vmf_multicast.nentries = mcnt; 2447 2448 if (promisc != 0 && allmulti != 0) 2449 goto out; 2450 2451 hdr.class = VIRTIO_NET_CTRL_MAC; 2452 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 2453 ack = VIRTIO_NET_ERR; 2454 2455 sglist_init(&sg, 4, segs); 2456 error = 0; 2457 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2458 error |= sglist_append(&sg, &filter->vmf_unicast, 2459 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); 2460 error |= sglist_append(&sg, &filter->vmf_multicast, 2461 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); 2462 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2463 KASSERT(error == 0 && sg.sg_nseg == 4, 2464 ("%s: error %d adding MAC filter msg to sglist", __func__, error)); 2465 2466 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2467 2468 if (ack != VIRTIO_NET_OK) 2469 if_printf(ifp, "error setting host MAC filter table\n"); 2470 2471 out: 2472 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0) 2473 if_printf(ifp, "cannot enable promiscuous mode\n"); 2474 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0) 2475 if_printf(ifp, "cannot enable all-multicast mode\n"); 2476 } 2477 2478 static int 2479 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2480 { 2481 struct sglist_seg segs[3]; 2482 struct sglist sg; 2483 struct { 2484 struct virtio_net_ctrl_hdr hdr __aligned(2); 2485 uint8_t pad1; 2486 uint16_t tag; 2487 uint8_t pad2; 2488 uint8_t ack; 2489 } s; 2490 int error; 2491 2492 s.hdr.class = VIRTIO_NET_CTRL_VLAN; 2493 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 2494 s.tag = tag; 2495 s.ack = VIRTIO_NET_ERR; 2496 2497 sglist_init(&sg, 3, segs); 2498 error = 0; 2499 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 2500 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); 2501 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 2502 KASSERT(error == 0 && sg.sg_nseg == 3, 2503 ("%s: error %d adding VLAN message to sglist", __func__, error)); 2504 2505 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 2506 2507 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 2508 } 2509 2510 static void 2511 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 2512 { 2513 uint32_t w; 2514 uint16_t tag; 2515 int i, bit, nvlans; 2516 2517 ASSERT_SERIALIZED(&sc->vtnet_slz); 2518 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2519 ("%s: VLAN_FILTER feature not negotiated", __func__)); 2520 2521 nvlans = sc->vtnet_nvlans; 2522 2523 /* Enable the filter for each configured VLAN. */ 2524 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { 2525 w = sc->vtnet_vlan_shadow[i]; 2526 while ((bit = ffs(w) - 1) != -1) { 2527 w &= ~(1 << bit); 2528 tag = sizeof(w) * CHAR_BIT * i + bit; 2529 nvlans--; 2530 2531 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { 2532 device_printf(sc->vtnet_dev, 2533 "cannot enable VLAN %d filter\n", tag); 2534 } 2535 } 2536 } 2537 2538 KASSERT(nvlans == 0, ("VLAN count incorrect")); 2539 } 2540 2541 static void 2542 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2543 { 2544 struct ifnet *ifp; 2545 int idx, bit; 2546 2547 ifp = sc->vtnet_ifp; 2548 idx = (tag >> 5) & 0x7F; 2549 bit = tag & 0x1F; 2550 2551 if (tag == 0 || tag > 4095) 2552 return; 2553 2554 lwkt_serialize_enter(&sc->vtnet_slz); 2555 2556 /* Update shadow VLAN table. */ 2557 if (add) { 2558 sc->vtnet_nvlans++; 2559 sc->vtnet_vlan_shadow[idx] |= (1 << bit); 2560 } else { 2561 sc->vtnet_nvlans--; 2562 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); 2563 } 2564 2565 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && 2566 vtnet_exec_vlan_filter(sc, add, tag) != 0) { 2567 device_printf(sc->vtnet_dev, 2568 "cannot %s VLAN %d %s the host filter table\n", 2569 add ? "add" : "remove", tag, add ? "to" : "from"); 2570 } 2571 2572 lwkt_serialize_exit(&sc->vtnet_slz); 2573 } 2574 2575 static void 2576 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2577 { 2578 2579 if (ifp->if_softc != arg) 2580 return; 2581 2582 vtnet_update_vlan_filter(arg, 1, tag); 2583 } 2584 2585 static void 2586 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2587 { 2588 2589 if (ifp->if_softc != arg) 2590 return; 2591 2592 vtnet_update_vlan_filter(arg, 0, tag); 2593 } 2594 2595 static int 2596 vtnet_ifmedia_upd(struct ifnet *ifp) 2597 { 2598 struct vtnet_softc *sc; 2599 struct ifmedia *ifm; 2600 2601 sc = ifp->if_softc; 2602 ifm = &sc->vtnet_media; 2603 2604 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2605 return (EINVAL); 2606 2607 return (0); 2608 } 2609 2610 static void 2611 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2612 { 2613 struct vtnet_softc *sc; 2614 2615 sc = ifp->if_softc; 2616 2617 ifmr->ifm_status = IFM_AVALID; 2618 ifmr->ifm_active = IFM_ETHER; 2619 2620 lwkt_serialize_enter(&sc->vtnet_slz); 2621 if (vtnet_is_link_up(sc) != 0) { 2622 ifmr->ifm_status |= IFM_ACTIVE; 2623 ifmr->ifm_active |= VTNET_MEDIATYPE; 2624 } else 2625 ifmr->ifm_active |= IFM_NONE; 2626 lwkt_serialize_exit(&sc->vtnet_slz); 2627 } 2628 2629 static void 2630 vtnet_add_statistics(struct vtnet_softc *sc) 2631 { 2632 device_t dev; 2633 struct vtnet_statistics *stats; 2634 struct sysctl_ctx_list *ctx; 2635 struct sysctl_oid *tree; 2636 struct sysctl_oid_list *child; 2637 2638 dev = sc->vtnet_dev; 2639 stats = &sc->vtnet_stats; 2640 ctx = device_get_sysctl_ctx(dev); 2641 tree = device_get_sysctl_tree(dev); 2642 child = SYSCTL_CHILDREN(tree); 2643 2644 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", 2645 CTLFLAG_RD, &stats->mbuf_alloc_failed, 0, 2646 "Mbuf cluster allocation failures"); 2647 2648 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", 2649 CTLFLAG_RD, &stats->rx_frame_too_large, 0, 2650 "Received frame larger than the mbuf chain"); 2651 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 2652 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0, 2653 "Enqueuing the replacement receive mbuf failed"); 2654 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", 2655 CTLFLAG_RD, &stats->rx_mergeable_failed, 0, 2656 "Mergeable buffers receive failures"); 2657 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 2658 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0, 2659 "Received checksum offloaded buffer with unsupported " 2660 "Ethernet type"); 2661 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 2662 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0, 2663 "Received checksum offloaded buffer with incorrect IP protocol"); 2664 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", 2665 CTLFLAG_RD, &stats->rx_csum_bad_offset, 0, 2666 "Received checksum offloaded buffer with incorrect offset"); 2667 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", 2668 CTLFLAG_RD, &stats->rx_csum_failed, 0, 2669 "Received buffer checksum offload failed"); 2670 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", 2671 CTLFLAG_RD, &stats->rx_csum_offloaded, 0, 2672 "Received buffer checksum offload succeeded"); 2673 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", 2674 CTLFLAG_RD, &stats->rx_task_rescheduled, 0, 2675 "Times the receive interrupt task rescheduled itself"); 2676 2677 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", 2678 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0, 2679 "Aborted transmit of checksum offloaded buffer with unknown " 2680 "Ethernet type"); 2681 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", 2682 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0, 2683 "Aborted transmit of TSO buffer with unknown Ethernet type"); 2684 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", 2685 CTLFLAG_RD, &stats->tx_defragged, 0, 2686 "Transmit mbufs defragged"); 2687 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", 2688 CTLFLAG_RD, &stats->tx_defrag_failed, 0, 2689 "Aborted transmit of buffer because defrag failed"); 2690 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", 2691 CTLFLAG_RD, &stats->tx_csum_offloaded, 0, 2692 "Offloaded checksum of transmitted buffer"); 2693 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", 2694 CTLFLAG_RD, &stats->tx_tso_offloaded, 0, 2695 "Segmentation offload of transmitted buffer"); 2696 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", 2697 CTLFLAG_RD, &stats->tx_task_rescheduled, 0, 2698 "Times the transmit interrupt task rescheduled itself"); 2699 } 2700 2701 static int 2702 vtnet_enable_rx_intr(struct vtnet_softc *sc) 2703 { 2704 2705 return (virtqueue_enable_intr(sc->vtnet_rx_vq)); 2706 } 2707 2708 static void 2709 vtnet_disable_rx_intr(struct vtnet_softc *sc) 2710 { 2711 2712 virtqueue_disable_intr(sc->vtnet_rx_vq); 2713 } 2714 2715 static int 2716 vtnet_enable_tx_intr(struct vtnet_softc *sc) 2717 { 2718 2719 #ifdef VTNET_TX_INTR_MODERATION 2720 return (0); 2721 #else 2722 return (virtqueue_enable_intr(sc->vtnet_tx_vq)); 2723 #endif 2724 } 2725 2726 static void 2727 vtnet_disable_tx_intr(struct vtnet_softc *sc) 2728 { 2729 2730 virtqueue_disable_intr(sc->vtnet_tx_vq); 2731 } 2732