1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 #include <sys/random.h> 42 #include <sys/sglist.h> 43 #include <sys/serialize.h> 44 #include <sys/bus.h> 45 #include <sys/rman.h> 46 47 #include <machine/limits.h> 48 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/if_arp.h> 52 #include <net/if_dl.h> 53 #include <net/if_types.h> 54 #include <net/if_media.h> 55 #include <net/vlan/if_vlan_var.h> 56 #include <net/vlan/if_vlan_ether.h> 57 #include <net/ifq_var.h> 58 59 #include <net/bpf.h> 60 61 #include <netinet/in_systm.h> 62 #include <netinet/in.h> 63 #include <netinet/ip.h> 64 #include <netinet/ip6.h> 65 #include <netinet/udp.h> 66 #include <netinet/tcp.h> 67 68 #include <dev/virtual/virtio/virtio/virtio.h> 69 #include <dev/virtual/virtio/virtio/virtqueue.h> 70 #include <dev/virtual/virtio/net/virtio_net.h> 71 #include <dev/virtual/virtio/net/if_vtnetvar.h> 72 73 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header"); 74 75 static int vtnet_probe(device_t); 76 static int vtnet_attach(device_t); 77 static int vtnet_detach(device_t); 78 static int vtnet_suspend(device_t); 79 static int vtnet_resume(device_t); 80 static int vtnet_shutdown(device_t); 81 82 static void vtnet_negotiate_features(struct vtnet_softc *); 83 static int vtnet_alloc_intrs(struct vtnet_softc *); 84 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 85 static void vtnet_get_hwaddr(struct vtnet_softc *); 86 static void vtnet_set_hwaddr(struct vtnet_softc *); 87 static int vtnet_is_link_up(struct vtnet_softc *); 88 static void vtnet_update_link_status(struct vtnet_softc *); 89 #if 0 90 static void vtnet_watchdog(struct vtnet_softc *); 91 #endif 92 static int vtnet_setup_interface(struct vtnet_softc *); 93 static int vtnet_change_mtu(struct vtnet_softc *, int); 94 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 95 96 static int vtnet_init_rx_vq(struct vtnet_softc *); 97 static void vtnet_free_rx_mbufs(struct vtnet_softc *); 98 static void vtnet_free_tx_mbufs(struct vtnet_softc *); 99 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 100 101 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, 102 struct mbuf **); 103 static int vtnet_replace_rxbuf(struct vtnet_softc *, 104 struct mbuf *, int); 105 static int vtnet_newbuf(struct vtnet_softc *); 106 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); 107 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); 108 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); 109 static void vtnet_vlan_tag_remove(struct mbuf *); 110 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, 111 struct virtio_net_hdr *); 112 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); 113 static int vtnet_rxeof(struct vtnet_softc *, int, int *); 114 static void vtnet_rx_intr_task(void *); 115 static void vtnet_rx_vq_intr(void *); 116 117 static void vtnet_enqueue_txhdr(struct vtnet_softc *, 118 struct vtnet_tx_header *); 119 static void vtnet_txeof(struct vtnet_softc *); 120 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, 121 struct virtio_net_hdr *); 122 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, 123 struct vtnet_tx_header *); 124 static int vtnet_encap(struct vtnet_softc *, struct mbuf **); 125 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *); 126 static void vtnet_start(struct ifnet *, struct ifaltq_subque *); 127 static void vtnet_tx_intr_task(void *); 128 static void vtnet_tx_vq_intr(void *); 129 130 static void vtnet_config_intr(void *); 131 132 static void vtnet_stop(struct vtnet_softc *); 133 static int vtnet_virtio_reinit(struct vtnet_softc *); 134 static void vtnet_init_locked(struct vtnet_softc *); 135 static void vtnet_init(void *); 136 137 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 138 struct sglist *, int, int); 139 140 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); 141 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 142 static int vtnet_set_promisc(struct vtnet_softc *, int); 143 static int vtnet_set_allmulti(struct vtnet_softc *, int); 144 static void vtnet_rx_filter(struct vtnet_softc *sc); 145 static void vtnet_rx_filter_mac(struct vtnet_softc *); 146 147 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 148 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 149 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); 150 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 151 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 152 153 static int vtnet_ifmedia_upd(struct ifnet *); 154 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 155 156 static void vtnet_add_statistics(struct vtnet_softc *); 157 158 static int vtnet_enable_rx_intr(struct vtnet_softc *); 159 static int vtnet_enable_tx_intr(struct vtnet_softc *); 160 static void vtnet_disable_rx_intr(struct vtnet_softc *); 161 static void vtnet_disable_tx_intr(struct vtnet_softc *); 162 163 /* Tunables. */ 164 static int vtnet_csum_disable = 0; 165 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 166 static int vtnet_tso_disable = 1; 167 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 168 static int vtnet_lro_disable = 0; 169 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 170 171 /* 172 * Reducing the number of transmit completed interrupts can 173 * improve performance. To do so, the define below keeps the 174 * Tx vq interrupt disabled and adds calls to vtnet_txeof() 175 * in the start path. The price to pay for this is the m_free'ing 176 * of transmitted mbufs may be delayed. 177 */ 178 #define VTNET_TX_INTR_MODERATION 179 180 static struct virtio_feature_desc vtnet_feature_desc[] = { 181 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 182 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 183 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload" }, 184 { VIRTIO_NET_F_MAC, "MacAddress" }, 185 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 186 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 187 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 188 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 189 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 190 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 191 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 192 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 193 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 194 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 195 { VIRTIO_NET_F_STATUS, "Status" }, 196 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 197 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 198 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 199 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 200 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, 201 { VIRTIO_NET_F_MQ, "RFS" }, 202 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, 203 { 0, NULL } 204 }; 205 206 static device_method_t vtnet_methods[] = { 207 /* Device methods. */ 208 DEVMETHOD(device_probe, vtnet_probe), 209 DEVMETHOD(device_attach, vtnet_attach), 210 DEVMETHOD(device_detach, vtnet_detach), 211 DEVMETHOD(device_suspend, vtnet_suspend), 212 DEVMETHOD(device_resume, vtnet_resume), 213 DEVMETHOD(device_shutdown, vtnet_shutdown), 214 215 DEVMETHOD_END 216 }; 217 218 static driver_t vtnet_driver = { 219 "vtnet", 220 vtnet_methods, 221 sizeof(struct vtnet_softc) 222 }; 223 224 static devclass_t vtnet_devclass; 225 226 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL); 227 MODULE_VERSION(vtnet, 1); 228 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 229 230 static int 231 vtnet_probe(device_t dev) 232 { 233 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 234 return (ENXIO); 235 236 device_set_desc(dev, "VirtIO Networking Adapter"); 237 238 return (BUS_PROBE_DEFAULT); 239 } 240 241 struct irqmap { 242 int irq; 243 driver_intr_t *handler; 244 }; 245 246 static int 247 vtnet_attach(device_t dev) 248 { 249 struct vtnet_softc *sc; 250 int i, error; 251 252 sc = device_get_softc(dev); 253 sc->vtnet_dev = dev; 254 255 lwkt_serialize_init(&sc->vtnet_slz); 256 257 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 258 vtnet_ifmedia_sts); 259 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 260 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 261 262 vtnet_add_statistics(sc); 263 SLIST_INIT(&sc->vtnet_txhdr_free); 264 265 /* Register our feature descriptions. */ 266 virtio_set_feature_desc(dev, vtnet_feature_desc); 267 vtnet_negotiate_features(sc); 268 269 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 270 sc->vtnet_flags |= VTNET_FLAG_INDIRECT; 271 272 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 273 /* This feature should always be negotiated. */ 274 sc->vtnet_flags |= VTNET_FLAG_MAC; 275 } 276 277 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 278 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 279 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 280 } else { 281 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 282 } 283 284 sc->vtnet_rx_mbuf_size = MCLBYTES; 285 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 286 287 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 288 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 289 290 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 291 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 292 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 293 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 294 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) && 295 virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 296 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; 297 } 298 299 error = vtnet_alloc_intrs(sc); 300 if (error) { 301 device_printf(dev, "cannot allocate interrupts\n"); 302 goto fail; 303 } 304 305 error = vtnet_alloc_virtqueues(sc); 306 if (error) { 307 device_printf(dev, "cannot allocate virtqueues\n"); 308 goto fail; 309 } 310 311 /* XXX Separate function */ 312 struct irqmap info[2]; 313 314 /* Possible "Virtqueue <-> IRQ" configurations */ 315 switch (sc->vtnet_nintr) { 316 case 1: 317 info[0] = (struct irqmap){0, vtnet_rx_vq_intr}; 318 info[1] = (struct irqmap){0, vtnet_tx_vq_intr}; 319 break; 320 case 2: 321 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) { 322 info[0] = (struct irqmap){1, vtnet_rx_vq_intr}; 323 } else { 324 info[0] = (struct irqmap){0, vtnet_rx_vq_intr}; 325 } 326 info[1] = (struct irqmap){1, vtnet_tx_vq_intr}; 327 break; 328 case 3: 329 info[0] = (struct irqmap){1, vtnet_rx_vq_intr}; 330 info[1] = (struct irqmap){2, vtnet_tx_vq_intr}; 331 break; 332 default: 333 device_printf(dev, "Invalid interrupt vector count: %d\n", 334 sc->vtnet_nintr); 335 goto fail; 336 } 337 for (i = 0; i < 2; i++) { 338 error = virtio_bind_intr(dev, info[i].irq, i, 339 info[i].handler, sc); 340 if (error) { 341 device_printf(dev, "cannot bind virtqueue IRQs\n"); 342 goto fail; 343 } 344 } 345 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) { 346 error = virtio_bind_intr(dev, 0, -1, vtnet_config_intr, sc); 347 if (error) { 348 device_printf(dev, "cannot bind config_change IRQ\n"); 349 goto fail; 350 } 351 } 352 353 /* Read (or generate) the MAC address for the adapter. */ 354 vtnet_get_hwaddr(sc); 355 356 error = vtnet_setup_interface(sc); 357 if (error) { 358 device_printf(dev, "cannot setup interface\n"); 359 goto fail; 360 } 361 362 for (i = 0; i < sc->vtnet_nintr; i++) { 363 error = virtio_setup_intr(dev, i, &sc->vtnet_slz); 364 if (error) { 365 device_printf(dev, "cannot setup virtqueue " 366 "interrupts\n"); 367 ether_ifdetach(sc->vtnet_ifp); 368 goto fail; 369 } 370 } 371 372 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { 373 lwkt_serialize_enter(&sc->vtnet_slz); 374 vtnet_set_hwaddr(sc); 375 lwkt_serialize_exit(&sc->vtnet_slz); 376 } 377 378 /* 379 * Device defaults to promiscuous mode for backwards 380 * compatibility. Turn it off if possible. 381 */ 382 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 383 lwkt_serialize_enter(&sc->vtnet_slz); 384 if (vtnet_set_promisc(sc, 0) != 0) { 385 sc->vtnet_ifp->if_flags |= IFF_PROMISC; 386 device_printf(dev, 387 "cannot disable promiscuous mode\n"); 388 } 389 lwkt_serialize_exit(&sc->vtnet_slz); 390 } else 391 sc->vtnet_ifp->if_flags |= IFF_PROMISC; 392 393 fail: 394 if (error) 395 vtnet_detach(dev); 396 397 return (error); 398 } 399 400 static int 401 vtnet_detach(device_t dev) 402 { 403 struct vtnet_softc *sc; 404 struct ifnet *ifp; 405 int i; 406 407 sc = device_get_softc(dev); 408 ifp = sc->vtnet_ifp; 409 410 for (i = 0; i < sc->vtnet_nintr; i++) 411 virtio_teardown_intr(dev, i); 412 413 if (device_is_attached(dev)) { 414 lwkt_serialize_enter(&sc->vtnet_slz); 415 vtnet_stop(sc); 416 lwkt_serialize_exit(&sc->vtnet_slz); 417 418 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task); 419 420 ether_ifdetach(ifp); 421 } 422 423 if (sc->vtnet_vlan_attach != NULL) { 424 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 425 sc->vtnet_vlan_attach = NULL; 426 } 427 if (sc->vtnet_vlan_detach != NULL) { 428 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach); 429 sc->vtnet_vlan_detach = NULL; 430 } 431 432 if (ifp) { 433 if_free(ifp); 434 sc->vtnet_ifp = NULL; 435 } 436 437 if (sc->vtnet_rx_vq != NULL) 438 vtnet_free_rx_mbufs(sc); 439 if (sc->vtnet_tx_vq != NULL) 440 vtnet_free_tx_mbufs(sc); 441 if (sc->vtnet_ctrl_vq != NULL) 442 vtnet_free_ctrl_vq(sc); 443 444 if (sc->vtnet_txhdrarea != NULL) { 445 contigfree(sc->vtnet_txhdrarea, 446 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header), 447 M_VTNET); 448 sc->vtnet_txhdrarea = NULL; 449 } 450 SLIST_INIT(&sc->vtnet_txhdr_free); 451 if (sc->vtnet_macfilter != NULL) { 452 contigfree(sc->vtnet_macfilter, 453 sizeof(struct vtnet_mac_filter), M_DEVBUF); 454 sc->vtnet_macfilter = NULL; 455 } 456 457 ifmedia_removeall(&sc->vtnet_media); 458 459 return (0); 460 } 461 462 static int 463 vtnet_suspend(device_t dev) 464 { 465 struct vtnet_softc *sc; 466 467 sc = device_get_softc(dev); 468 469 lwkt_serialize_enter(&sc->vtnet_slz); 470 vtnet_stop(sc); 471 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 472 lwkt_serialize_exit(&sc->vtnet_slz); 473 474 return (0); 475 } 476 477 static int 478 vtnet_resume(device_t dev) 479 { 480 struct vtnet_softc *sc; 481 struct ifnet *ifp; 482 483 sc = device_get_softc(dev); 484 ifp = sc->vtnet_ifp; 485 486 lwkt_serialize_enter(&sc->vtnet_slz); 487 if (ifp->if_flags & IFF_UP) 488 vtnet_init_locked(sc); 489 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 490 lwkt_serialize_exit(&sc->vtnet_slz); 491 492 return (0); 493 } 494 495 static int 496 vtnet_shutdown(device_t dev) 497 { 498 499 /* 500 * Suspend already does all of what we need to 501 * do here; we just never expect to be resumed. 502 */ 503 return (vtnet_suspend(dev)); 504 } 505 506 static void 507 vtnet_negotiate_features(struct vtnet_softc *sc) 508 { 509 device_t dev; 510 uint64_t mask, features; 511 512 dev = sc->vtnet_dev; 513 mask = 0; 514 515 if (vtnet_csum_disable) 516 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 517 518 /* 519 * XXX DragonFly doesn't support receive checksum offload for ipv6 yet, 520 * hence always disable the virtio feature for now. 521 * XXX We need to support the DynOffload feature, in order to 522 * dynamically enable/disable this feature. 523 */ 524 mask |= VIRTIO_NET_F_GUEST_CSUM; 525 526 /* 527 * TSO is only available when the tx checksum offload feature is also 528 * negotiated. 529 */ 530 if (vtnet_csum_disable || vtnet_tso_disable) 531 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | 532 VIRTIO_NET_F_HOST_ECN; 533 534 if (vtnet_lro_disable) 535 mask |= VTNET_LRO_FEATURES; 536 537 features = VTNET_FEATURES & ~mask; 538 features |= VIRTIO_F_NOTIFY_ON_EMPTY; 539 features |= VIRTIO_F_ANY_LAYOUT; 540 sc->vtnet_features = virtio_negotiate_features(dev, features); 541 542 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && 543 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { 544 /* 545 * LRO without mergeable buffers requires special care. This 546 * is not ideal because every receive buffer must be large 547 * enough to hold the maximum TCP packet, the Ethernet header, 548 * and the header. This requires up to 34 descriptors with 549 * MCLBYTES clusters. If we do not have indirect descriptors, 550 * LRO is disabled since the virtqueue will not contain very 551 * many receive buffers. 552 */ 553 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { 554 device_printf(dev, 555 "LRO disabled due to both mergeable buffers and " 556 "indirect descriptors not negotiated\n"); 557 558 features &= ~VTNET_LRO_FEATURES; 559 sc->vtnet_features = 560 virtio_negotiate_features(dev, features); 561 } else 562 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; 563 } 564 } 565 566 static int 567 vtnet_alloc_intrs(struct vtnet_softc *sc) 568 { 569 int cnt, error; 570 int intrcount = virtio_intr_count(sc->vtnet_dev); 571 int i; 572 int use_config; 573 574 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) { 575 use_config = 1; 576 /* We can use a maximum of 3 interrupt vectors. */ 577 intrcount = imin(intrcount, 3); 578 } else { 579 /* We can use a maximum of 2 interrupt vectors. */ 580 intrcount = imin(intrcount, 2); 581 } 582 583 if (intrcount < 1) 584 return (ENXIO); 585 586 /* 587 * XXX We should explicitly set the cpus for the rx/tx threads, to 588 * only use cpus, where the network stack is running. 589 */ 590 for (i = 0; i < intrcount; i++) 591 sc->vtnet_cpus[i] = -1; 592 593 cnt = intrcount; 594 error = virtio_intr_alloc(sc->vtnet_dev, &cnt, use_config, 595 sc->vtnet_cpus); 596 if (error != 0) { 597 virtio_intr_release(sc->vtnet_dev); 598 return (error); 599 } 600 sc->vtnet_nintr = cnt; 601 602 return (0); 603 } 604 605 static int 606 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 607 { 608 device_t dev; 609 struct vq_alloc_info vq_info[3]; 610 int nvqs; 611 612 dev = sc->vtnet_dev; 613 nvqs = 2; 614 615 /* 616 * Indirect descriptors are not needed for the Rx 617 * virtqueue when mergeable buffers are negotiated. 618 * The header is placed inline with the data, not 619 * in a separate descriptor, and mbuf clusters are 620 * always physically contiguous. 621 */ 622 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 623 sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ? 624 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; 625 } else 626 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; 627 628 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || 629 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 630 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; 631 else 632 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; 633 634 VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs, &sc->vtnet_rx_vq, 635 "%s receive", device_get_nameunit(dev)); 636 637 VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs, &sc->vtnet_tx_vq, 638 "%s transmit", device_get_nameunit(dev)); 639 640 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 641 nvqs++; 642 643 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, &sc->vtnet_ctrl_vq, 644 "%s control", device_get_nameunit(dev)); 645 } 646 647 return (virtio_alloc_virtqueues(dev, nvqs, vq_info)); 648 } 649 650 static int 651 vtnet_setup_interface(struct vtnet_softc *sc) 652 { 653 device_t dev; 654 struct ifnet *ifp; 655 int i; 656 657 dev = sc->vtnet_dev; 658 659 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 660 if (ifp == NULL) { 661 device_printf(dev, "cannot allocate ifnet structure\n"); 662 return (ENOSPC); 663 } 664 665 ifp->if_softc = sc; 666 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 667 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 668 ifp->if_init = vtnet_init; 669 ifp->if_start = vtnet_start; 670 ifp->if_ioctl = vtnet_ioctl; 671 672 sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq); 673 sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq); 674 if (sc->vtnet_flags & VTNET_FLAG_INDIRECT) 675 sc->vtnet_txhdrcount = sc->vtnet_tx_size; 676 else 677 sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1; 678 sc->vtnet_txhdrarea = contigmalloc( 679 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header), 680 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 681 if (sc->vtnet_txhdrarea == NULL) { 682 device_printf(dev, "cannot contigmalloc the tx headers\n"); 683 return (ENOMEM); 684 } 685 for (i = 0; i < sc->vtnet_txhdrcount; i++) 686 vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]); 687 sc->vtnet_macfilter = contigmalloc( 688 sizeof(struct vtnet_mac_filter), 689 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 690 if (sc->vtnet_macfilter == NULL) { 691 device_printf(dev, 692 "cannot contigmalloc the mac filter table\n"); 693 return (ENOMEM); 694 } 695 ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1); 696 ifq_set_ready(&ifp->if_snd); 697 698 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL); 699 700 /* The Tx IRQ is currently always the last allocated interrupt. */ 701 ifq_set_cpuid(&ifp->if_snd, sc->vtnet_cpus[sc->vtnet_nintr - 1]); 702 703 /* Tell the upper layer(s) we support long frames. */ 704 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 705 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 706 707 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 708 ifp->if_capabilities |= IFCAP_TXCSUM; 709 710 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 711 ifp->if_capabilities |= IFCAP_TSO4; 712 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 713 ifp->if_capabilities |= IFCAP_TSO6; 714 if (ifp->if_capabilities & IFCAP_TSO) 715 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 716 717 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 718 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 719 } 720 721 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) 722 ifp->if_capabilities |= IFCAP_RXCSUM; 723 724 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */ 725 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 726 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 727 ifp->if_capabilities |= IFCAP_LRO; 728 #endif 729 730 if ((ifp->if_capabilities & IFCAP_HWCSUM) == IFCAP_HWCSUM) { 731 /* 732 * VirtIO does not support VLAN tagging, but we can fake 733 * it by inserting and removing the 802.1Q header during 734 * transmit and receive. We are then able to do checksum 735 * offloading of VLAN frames. 736 */ 737 ifp->if_capabilities |= 738 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 739 } 740 741 ifp->if_capenable = ifp->if_capabilities; 742 743 /* 744 * Capabilities after here are not enabled by default. 745 */ 746 747 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 748 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 749 750 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 751 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 752 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 753 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 754 } 755 756 return (0); 757 } 758 759 static void 760 vtnet_set_hwaddr(struct vtnet_softc *sc) 761 { 762 device_t dev; 763 764 dev = sc->vtnet_dev; 765 766 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) && 767 (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) { 768 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) 769 device_printf(dev, "unable to set MAC address\n"); 770 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { 771 virtio_write_device_config(dev, 772 offsetof(struct virtio_net_config, mac), 773 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 774 } 775 } 776 777 static void 778 vtnet_get_hwaddr(struct vtnet_softc *sc) 779 { 780 device_t dev; 781 782 dev = sc->vtnet_dev; 783 784 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { 785 /* 786 * Generate a random locally administered unicast address. 787 * 788 * It would be nice to generate the same MAC address across 789 * reboots, but it seems all the hosts currently available 790 * support the MAC feature, so this isn't too important. 791 */ 792 sc->vtnet_hwaddr[0] = 0xB2; 793 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1); 794 return; 795 } 796 797 virtio_read_device_config(dev, 798 offsetof(struct virtio_net_config, mac), 799 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 800 } 801 802 static int 803 vtnet_is_link_up(struct vtnet_softc *sc) 804 { 805 device_t dev; 806 struct ifnet *ifp; 807 uint16_t status; 808 809 dev = sc->vtnet_dev; 810 ifp = sc->vtnet_ifp; 811 812 ASSERT_SERIALIZED(&sc->vtnet_slz); 813 814 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) { 815 status = virtio_read_dev_config_2(dev, 816 offsetof(struct virtio_net_config, status)); 817 } else { 818 status = VIRTIO_NET_S_LINK_UP; 819 } 820 821 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 822 } 823 824 static void 825 vtnet_update_link_status(struct vtnet_softc *sc) 826 { 827 device_t dev; 828 struct ifnet *ifp; 829 struct ifaltq_subque *ifsq; 830 int link; 831 832 dev = sc->vtnet_dev; 833 ifp = sc->vtnet_ifp; 834 ifsq = ifq_get_subq_default(&ifp->if_snd); 835 836 link = vtnet_is_link_up(sc); 837 838 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { 839 sc->vtnet_flags |= VTNET_FLAG_LINK; 840 if (bootverbose) 841 device_printf(dev, "Link is up\n"); 842 ifp->if_link_state = LINK_STATE_UP; 843 if_link_state_change(ifp); 844 if (!ifsq_is_empty(ifsq)) 845 vtnet_start_locked(ifp, ifsq); 846 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { 847 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 848 if (bootverbose) 849 device_printf(dev, "Link is down\n"); 850 851 ifp->if_link_state = LINK_STATE_DOWN; 852 if_link_state_change(ifp); 853 } 854 } 855 856 #if 0 857 static void 858 vtnet_watchdog(struct vtnet_softc *sc) 859 { 860 struct ifnet *ifp; 861 862 ifp = sc->vtnet_ifp; 863 864 #ifdef VTNET_TX_INTR_MODERATION 865 vtnet_txeof(sc); 866 #endif 867 868 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) 869 return; 870 871 if_printf(ifp, "watchdog timeout -- resetting\n"); 872 #ifdef VTNET_DEBUG 873 virtqueue_dump(sc->vtnet_tx_vq); 874 #endif 875 ifp->if_oerrors++; 876 ifp->if_flags &= ~IFF_RUNNING; 877 vtnet_init_locked(sc); 878 } 879 #endif 880 881 static int 882 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr) 883 { 884 struct vtnet_softc *sc; 885 struct ifreq *ifr; 886 int reinit, mask, error; 887 888 sc = ifp->if_softc; 889 ifr = (struct ifreq *) data; 890 reinit = 0; 891 error = 0; 892 893 switch (cmd) { 894 case SIOCSIFMTU: 895 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) 896 error = EINVAL; 897 else if (ifp->if_mtu != ifr->ifr_mtu) { 898 lwkt_serialize_enter(&sc->vtnet_slz); 899 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 900 lwkt_serialize_exit(&sc->vtnet_slz); 901 } 902 break; 903 904 case SIOCSIFFLAGS: 905 lwkt_serialize_enter(&sc->vtnet_slz); 906 if ((ifp->if_flags & IFF_UP) == 0) { 907 if (ifp->if_flags & IFF_RUNNING) 908 vtnet_stop(sc); 909 } else if (ifp->if_flags & IFF_RUNNING) { 910 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 911 (IFF_PROMISC | IFF_ALLMULTI)) { 912 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 913 vtnet_rx_filter(sc); 914 else 915 error = ENOTSUP; 916 } 917 } else 918 vtnet_init_locked(sc); 919 920 if (error == 0) 921 sc->vtnet_if_flags = ifp->if_flags; 922 lwkt_serialize_exit(&sc->vtnet_slz); 923 break; 924 925 case SIOCADDMULTI: 926 case SIOCDELMULTI: 927 lwkt_serialize_enter(&sc->vtnet_slz); 928 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && 929 (ifp->if_flags & IFF_RUNNING)) 930 vtnet_rx_filter_mac(sc); 931 lwkt_serialize_exit(&sc->vtnet_slz); 932 break; 933 934 case SIOCSIFMEDIA: 935 case SIOCGIFMEDIA: 936 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 937 break; 938 939 case SIOCSIFCAP: 940 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 941 942 lwkt_serialize_enter(&sc->vtnet_slz); 943 944 if (mask & IFCAP_TXCSUM) { 945 ifp->if_capenable ^= IFCAP_TXCSUM; 946 if (ifp->if_capenable & IFCAP_TXCSUM) 947 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 948 else 949 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; 950 } 951 952 if (mask & IFCAP_TSO4) { 953 ifp->if_capenable ^= IFCAP_TSO4; 954 if (ifp->if_capenable & IFCAP_TSO4) 955 ifp->if_hwassist |= CSUM_TSO; 956 else 957 ifp->if_hwassist &= ~CSUM_TSO; 958 } 959 960 if (mask & IFCAP_RXCSUM) { 961 ifp->if_capenable ^= IFCAP_RXCSUM; 962 reinit = 1; 963 } 964 965 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */ 966 if (mask & IFCAP_LRO) { 967 ifp->if_capenable ^= IFCAP_LRO; 968 reinit = 1; 969 } 970 #endif 971 972 if (mask & IFCAP_VLAN_HWFILTER) { 973 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 974 reinit = 1; 975 } 976 977 if (mask & IFCAP_VLAN_HWTSO) 978 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 979 980 if (mask & IFCAP_VLAN_HWTAGGING) 981 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 982 983 if (reinit && (ifp->if_flags & IFF_RUNNING)) { 984 ifp->if_flags &= ~IFF_RUNNING; 985 vtnet_init_locked(sc); 986 } 987 //VLAN_CAPABILITIES(ifp); 988 989 lwkt_serialize_exit(&sc->vtnet_slz); 990 break; 991 992 default: 993 error = ether_ioctl(ifp, cmd, data); 994 break; 995 } 996 997 return (error); 998 } 999 1000 static int 1001 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 1002 { 1003 struct ifnet *ifp; 1004 int new_frame_size, clsize; 1005 1006 ifp = sc->vtnet_ifp; 1007 1008 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1009 new_frame_size = sizeof(struct vtnet_rx_header) + 1010 sizeof(struct ether_vlan_header) + new_mtu; 1011 1012 if (new_frame_size > MJUM9BYTES) 1013 return (EINVAL); 1014 1015 if (new_frame_size <= MCLBYTES) 1016 clsize = MCLBYTES; 1017 else 1018 clsize = MJUM9BYTES; 1019 } else { 1020 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + 1021 sizeof(struct ether_vlan_header) + new_mtu; 1022 1023 if (new_frame_size <= MCLBYTES) 1024 clsize = MCLBYTES; 1025 else 1026 clsize = MJUMPAGESIZE; 1027 } 1028 1029 sc->vtnet_rx_mbuf_size = clsize; 1030 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 1031 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, 1032 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); 1033 1034 ifp->if_mtu = new_mtu; 1035 1036 if (ifp->if_flags & IFF_RUNNING) { 1037 ifp->if_flags &= ~IFF_RUNNING; 1038 vtnet_init_locked(sc); 1039 } 1040 1041 return (0); 1042 } 1043 1044 static int 1045 vtnet_init_rx_vq(struct vtnet_softc *sc) 1046 { 1047 struct virtqueue *vq; 1048 int nbufs, error; 1049 1050 vq = sc->vtnet_rx_vq; 1051 nbufs = 0; 1052 error = ENOSPC; 1053 1054 while (!virtqueue_full(vq)) { 1055 if ((error = vtnet_newbuf(sc)) != 0) 1056 break; 1057 nbufs++; 1058 } 1059 1060 if (nbufs > 0) { 1061 virtqueue_notify(vq, &sc->vtnet_slz); 1062 1063 /* 1064 * EMSGSIZE signifies the virtqueue did not have enough 1065 * entries available to hold the last mbuf. This is not 1066 * an error. We should not get ENOSPC since we check if 1067 * the virtqueue is full before attempting to add a 1068 * buffer. 1069 */ 1070 if (error == EMSGSIZE) 1071 error = 0; 1072 } 1073 1074 return (error); 1075 } 1076 1077 static void 1078 vtnet_free_rx_mbufs(struct vtnet_softc *sc) 1079 { 1080 struct virtqueue *vq; 1081 struct mbuf *m; 1082 int last; 1083 1084 vq = sc->vtnet_rx_vq; 1085 last = 0; 1086 1087 while ((m = virtqueue_drain(vq, &last)) != NULL) 1088 m_freem(m); 1089 1090 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); 1091 } 1092 1093 static void 1094 vtnet_free_tx_mbufs(struct vtnet_softc *sc) 1095 { 1096 struct virtqueue *vq; 1097 struct vtnet_tx_header *txhdr; 1098 int last; 1099 1100 vq = sc->vtnet_tx_vq; 1101 last = 0; 1102 1103 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1104 m_freem(txhdr->vth_mbuf); 1105 vtnet_enqueue_txhdr(sc, txhdr); 1106 } 1107 1108 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); 1109 } 1110 1111 static void 1112 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 1113 { 1114 /* 1115 * The control virtqueue is only polled, therefore 1116 * it should already be empty. 1117 */ 1118 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 1119 ("Ctrl Vq not empty")); 1120 } 1121 1122 static struct mbuf * 1123 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1124 { 1125 struct mbuf *m_head, *m_tail, *m; 1126 int i, clsize; 1127 1128 clsize = sc->vtnet_rx_mbuf_size; 1129 1130 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/ 1131 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize); 1132 m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR ); 1133 if (m_head == NULL) 1134 goto fail; 1135 1136 m_head->m_len = clsize; 1137 m_tail = m_head; 1138 1139 if (nbufs > 1) { 1140 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1141 ("chained Rx mbuf requested without LRO_NOMRG")); 1142 1143 for (i = 0; i < nbufs - 1; i++) { 1144 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize); 1145 m = m_getcl(M_NOWAIT, MT_DATA, 0); 1146 if (m == NULL) 1147 goto fail; 1148 1149 m->m_len = clsize; 1150 m_tail->m_next = m; 1151 m_tail = m; 1152 } 1153 } 1154 1155 if (m_tailp != NULL) 1156 *m_tailp = m_tail; 1157 1158 return (m_head); 1159 1160 fail: 1161 sc->vtnet_stats.mbuf_alloc_failed++; 1162 m_freem(m_head); 1163 1164 return (NULL); 1165 } 1166 1167 static int 1168 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) 1169 { 1170 struct mbuf *m, *m_prev; 1171 struct mbuf *m_new, *m_tail; 1172 int len, clsize, nreplace, error; 1173 1174 m = m0; 1175 m_prev = NULL; 1176 len = len0; 1177 1178 m_tail = NULL; 1179 clsize = sc->vtnet_rx_mbuf_size; 1180 nreplace = 0; 1181 1182 if (m->m_next != NULL) 1183 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1184 ("chained Rx mbuf without LRO_NOMRG")); 1185 1186 /* 1187 * Since LRO_NOMRG mbuf chains are so large, we want to avoid 1188 * allocating an entire chain for each received frame. When 1189 * the received frame's length is less than that of the chain, 1190 * the unused mbufs are reassigned to the new chain. 1191 */ 1192 while (len > 0) { 1193 /* 1194 * Something is seriously wrong if we received 1195 * a frame larger than the mbuf chain. Drop it. 1196 */ 1197 if (m == NULL) { 1198 sc->vtnet_stats.rx_frame_too_large++; 1199 return (EMSGSIZE); 1200 } 1201 1202 KASSERT(m->m_len == clsize, 1203 ("mbuf length not expected cluster size: %d", 1204 m->m_len)); 1205 1206 m->m_len = MIN(m->m_len, len); 1207 len -= m->m_len; 1208 1209 m_prev = m; 1210 m = m->m_next; 1211 nreplace++; 1212 } 1213 1214 KASSERT(m_prev != NULL, ("m_prev == NULL")); 1215 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, 1216 ("too many replacement mbufs: %d/%d", nreplace, 1217 sc->vtnet_rx_mbuf_count)); 1218 1219 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); 1220 if (m_new == NULL) { 1221 m_prev->m_len = clsize; 1222 return (ENOBUFS); 1223 } 1224 1225 /* 1226 * Move unused mbufs, if any, from the original chain 1227 * onto the end of the new chain. 1228 */ 1229 if (m_prev->m_next != NULL) { 1230 m_tail->m_next = m_prev->m_next; 1231 m_prev->m_next = NULL; 1232 } 1233 1234 error = vtnet_enqueue_rxbuf(sc, m_new); 1235 if (error) { 1236 /* 1237 * BAD! We could not enqueue the replacement mbuf chain. We 1238 * must restore the m0 chain to the original state if it was 1239 * modified so we can subsequently discard it. 1240 * 1241 * NOTE: The replacement is suppose to be an identical copy 1242 * to the one just dequeued so this is an unexpected error. 1243 */ 1244 sc->vtnet_stats.rx_enq_replacement_failed++; 1245 1246 if (m_tail->m_next != NULL) { 1247 m_prev->m_next = m_tail->m_next; 1248 m_tail->m_next = NULL; 1249 } 1250 1251 m_prev->m_len = clsize; 1252 m_freem(m_new); 1253 } 1254 1255 return (error); 1256 } 1257 1258 static int 1259 vtnet_newbuf(struct vtnet_softc *sc) 1260 { 1261 struct mbuf *m; 1262 int error; 1263 1264 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); 1265 if (m == NULL) 1266 return (ENOBUFS); 1267 1268 error = vtnet_enqueue_rxbuf(sc, m); 1269 if (error) 1270 m_freem(m); 1271 1272 return (error); 1273 } 1274 1275 static void 1276 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) 1277 { 1278 struct virtqueue *vq; 1279 struct mbuf *m; 1280 1281 vq = sc->vtnet_rx_vq; 1282 1283 while (--nbufs > 0) { 1284 if ((m = virtqueue_dequeue(vq, NULL)) == NULL) 1285 break; 1286 vtnet_discard_rxbuf(sc, m); 1287 } 1288 } 1289 1290 static void 1291 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1292 { 1293 int error; 1294 1295 /* 1296 * Requeue the discarded mbuf. This should always be 1297 * successful since it was just dequeued. 1298 */ 1299 error = vtnet_enqueue_rxbuf(sc, m); 1300 KASSERT(error == 0, ("cannot requeue discarded mbuf")); 1301 } 1302 1303 static int 1304 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1305 { 1306 struct sglist sg; 1307 struct sglist_seg segs[VTNET_MAX_RX_SEGS]; 1308 struct vtnet_rx_header *rxhdr; 1309 struct virtio_net_hdr *hdr; 1310 uint8_t *mdata; 1311 int offset, error; 1312 1313 ASSERT_SERIALIZED(&sc->vtnet_slz); 1314 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0) 1315 KASSERT(m->m_next == NULL, ("chained Rx mbuf")); 1316 1317 sglist_init(&sg, sc->vtnet_rx_nsegs, segs); 1318 1319 mdata = mtod(m, uint8_t *); 1320 offset = 0; 1321 1322 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1323 rxhdr = (struct vtnet_rx_header *) mdata; 1324 hdr = &rxhdr->vrh_hdr; 1325 offset += sizeof(struct vtnet_rx_header); 1326 1327 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); 1328 KASSERT(error == 0, ("cannot add header to sglist")); 1329 } 1330 1331 error = sglist_append(&sg, mdata + offset, m->m_len - offset); 1332 if (error) 1333 return (error); 1334 1335 if (m->m_next != NULL) { 1336 error = sglist_append_mbuf(&sg, m->m_next); 1337 if (error) 1338 return (error); 1339 } 1340 1341 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); 1342 } 1343 1344 static void 1345 vtnet_vlan_tag_remove(struct mbuf *m) 1346 { 1347 struct ether_vlan_header *evl; 1348 1349 evl = mtod(m, struct ether_vlan_header *); 1350 1351 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag); 1352 m->m_flags |= M_VLANTAG; 1353 1354 /* Strip the 802.1Q header. */ 1355 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, 1356 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1357 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1358 } 1359 1360 /* 1361 * Alternative method of doing receive checksum offloading. Rather 1362 * than parsing the received frame down to the IP header, use the 1363 * csum_offset to determine which CSUM_* flags are appropriate. We 1364 * can get by with doing this only because the checksum offsets are 1365 * unique for the things we care about. 1366 */ 1367 static int 1368 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1369 struct virtio_net_hdr *hdr) 1370 { 1371 struct ether_header *eh; 1372 struct ether_vlan_header *evh; 1373 struct udphdr *udp; 1374 int csum_len; 1375 uint16_t eth_type; 1376 1377 csum_len = hdr->csum_start + hdr->csum_offset; 1378 1379 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) 1380 return (1); 1381 if (m->m_len < csum_len) 1382 return (1); 1383 1384 eh = mtod(m, struct ether_header *); 1385 eth_type = ntohs(eh->ether_type); 1386 if (eth_type == ETHERTYPE_VLAN) { 1387 evh = mtod(m, struct ether_vlan_header *); 1388 eth_type = ntohs(evh->evl_proto); 1389 } 1390 1391 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { 1392 sc->vtnet_stats.rx_csum_bad_ethtype++; 1393 return (1); 1394 } 1395 1396 /* Use the offset to determine the appropriate CSUM_* flags. */ 1397 switch (hdr->csum_offset) { 1398 case offsetof(struct udphdr, uh_sum): 1399 if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) 1400 return (1); 1401 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); 1402 if (udp->uh_sum == 0) 1403 return (0); 1404 1405 /* FALLTHROUGH */ 1406 1407 case offsetof(struct tcphdr, th_sum): 1408 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1409 m->m_pkthdr.csum_data = 0xFFFF; 1410 break; 1411 1412 default: 1413 sc->vtnet_stats.rx_csum_bad_offset++; 1414 return (1); 1415 } 1416 1417 sc->vtnet_stats.rx_csum_offloaded++; 1418 1419 return (0); 1420 } 1421 1422 static int 1423 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) 1424 { 1425 struct ifnet *ifp; 1426 struct virtqueue *vq; 1427 struct mbuf *m, *m_tail; 1428 int len; 1429 1430 ifp = sc->vtnet_ifp; 1431 vq = sc->vtnet_rx_vq; 1432 m_tail = m_head; 1433 1434 while (--nbufs > 0) { 1435 m = virtqueue_dequeue(vq, &len); 1436 if (m == NULL) { 1437 ifp->if_ierrors++; 1438 goto fail; 1439 } 1440 1441 if (vtnet_newbuf(sc) != 0) { 1442 ifp->if_iqdrops++; 1443 vtnet_discard_rxbuf(sc, m); 1444 if (nbufs > 1) 1445 vtnet_discard_merged_rxbuf(sc, nbufs); 1446 goto fail; 1447 } 1448 1449 if (m->m_len < len) 1450 len = m->m_len; 1451 1452 m->m_len = len; 1453 m->m_flags &= ~M_PKTHDR; 1454 1455 m_head->m_pkthdr.len += len; 1456 m_tail->m_next = m; 1457 m_tail = m; 1458 } 1459 1460 return (0); 1461 1462 fail: 1463 sc->vtnet_stats.rx_mergeable_failed++; 1464 m_freem(m_head); 1465 1466 return (1); 1467 } 1468 1469 static int 1470 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) 1471 { 1472 struct virtio_net_hdr lhdr; 1473 struct ifnet *ifp; 1474 struct virtqueue *vq; 1475 struct mbuf *m; 1476 struct ether_header *eh; 1477 struct virtio_net_hdr *hdr; 1478 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1479 int len, deq, nbufs, adjsz, rx_npkts; 1480 1481 ifp = sc->vtnet_ifp; 1482 vq = sc->vtnet_rx_vq; 1483 hdr = &lhdr; 1484 deq = 0; 1485 rx_npkts = 0; 1486 1487 ASSERT_SERIALIZED(&sc->vtnet_slz); 1488 1489 while (--count >= 0) { 1490 m = virtqueue_dequeue(vq, &len); 1491 if (m == NULL) 1492 break; 1493 deq++; 1494 1495 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1496 ifp->if_ierrors++; 1497 vtnet_discard_rxbuf(sc, m); 1498 continue; 1499 } 1500 1501 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1502 nbufs = 1; 1503 adjsz = sizeof(struct vtnet_rx_header); 1504 /* 1505 * Account for our pad between the header and 1506 * the actual start of the frame. 1507 */ 1508 len += VTNET_RX_HEADER_PAD; 1509 } else { 1510 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1511 nbufs = mhdr->num_buffers; 1512 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1513 } 1514 1515 if (vtnet_replace_rxbuf(sc, m, len) != 0) { 1516 ifp->if_iqdrops++; 1517 vtnet_discard_rxbuf(sc, m); 1518 if (nbufs > 1) 1519 vtnet_discard_merged_rxbuf(sc, nbufs); 1520 continue; 1521 } 1522 1523 m->m_pkthdr.len = len; 1524 m->m_pkthdr.rcvif = ifp; 1525 m->m_pkthdr.csum_flags = 0; 1526 1527 if (nbufs > 1) { 1528 if (vtnet_rxeof_merged(sc, m, nbufs) != 0) 1529 continue; 1530 } 1531 1532 ifp->if_ipackets++; 1533 1534 /* 1535 * Save copy of header before we strip it. For both mergeable 1536 * and non-mergeable, the VirtIO header is placed first in the 1537 * mbuf's data. We no longer need num_buffers, so always use a 1538 * virtio_net_hdr. 1539 */ 1540 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1541 m_adj(m, adjsz); 1542 1543 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1544 eh = mtod(m, struct ether_header *); 1545 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1546 vtnet_vlan_tag_remove(m); 1547 1548 /* 1549 * With the 802.1Q header removed, update the 1550 * checksum starting location accordingly. 1551 */ 1552 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1553 hdr->csum_start -= 1554 ETHER_VLAN_ENCAP_LEN; 1555 } 1556 } 1557 1558 if (ifp->if_capenable & IFCAP_RXCSUM && 1559 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1560 if (vtnet_rx_csum(sc, m, hdr) != 0) 1561 sc->vtnet_stats.rx_csum_failed++; 1562 } 1563 1564 lwkt_serialize_exit(&sc->vtnet_slz); 1565 rx_npkts++; 1566 ifp->if_input(ifp, m, NULL, -1); 1567 lwkt_serialize_enter(&sc->vtnet_slz); 1568 1569 /* 1570 * The interface may have been stopped while we were 1571 * passing the packet up the network stack. 1572 */ 1573 if ((ifp->if_flags & IFF_RUNNING) == 0) 1574 break; 1575 } 1576 1577 virtqueue_notify(vq, &sc->vtnet_slz); 1578 1579 if (rx_npktsp != NULL) 1580 *rx_npktsp = rx_npkts; 1581 1582 return (count > 0 ? 0 : EAGAIN); 1583 } 1584 1585 static void 1586 vtnet_rx_intr_task(void *arg) 1587 { 1588 struct vtnet_softc *sc; 1589 struct ifnet *ifp; 1590 int more; 1591 1592 sc = arg; 1593 ifp = sc->vtnet_ifp; 1594 1595 next: 1596 // lwkt_serialize_enter(&sc->vtnet_slz); 1597 1598 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1599 vtnet_enable_rx_intr(sc); 1600 // lwkt_serialize_exit(&sc->vtnet_slz); 1601 return; 1602 } 1603 1604 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); 1605 if (!more && vtnet_enable_rx_intr(sc) != 0) { 1606 vtnet_disable_rx_intr(sc); 1607 more = 1; 1608 } 1609 1610 // lwkt_serialize_exit(&sc->vtnet_slz); 1611 1612 if (more) { 1613 sc->vtnet_stats.rx_task_rescheduled++; 1614 goto next; 1615 } 1616 } 1617 1618 static void 1619 vtnet_rx_vq_intr(void *xsc) 1620 { 1621 struct vtnet_softc *sc; 1622 1623 sc = xsc; 1624 1625 if (!virtqueue_pending(sc->vtnet_rx_vq)) 1626 return; 1627 1628 vtnet_disable_rx_intr(sc); 1629 vtnet_rx_intr_task(sc); 1630 } 1631 1632 static void 1633 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr) 1634 { 1635 bzero(txhdr, sizeof(*txhdr)); 1636 SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link); 1637 } 1638 1639 static void 1640 vtnet_txeof(struct vtnet_softc *sc) 1641 { 1642 struct virtqueue *vq; 1643 struct ifnet *ifp; 1644 struct vtnet_tx_header *txhdr; 1645 int deq; 1646 1647 vq = sc->vtnet_tx_vq; 1648 ifp = sc->vtnet_ifp; 1649 deq = 0; 1650 1651 ASSERT_SERIALIZED(&sc->vtnet_slz); 1652 1653 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 1654 deq++; 1655 ifp->if_opackets++; 1656 m_freem(txhdr->vth_mbuf); 1657 vtnet_enqueue_txhdr(sc, txhdr); 1658 } 1659 1660 if (deq > 0) { 1661 ifq_clr_oactive(&ifp->if_snd); 1662 if (virtqueue_empty(vq)) 1663 sc->vtnet_watchdog_timer = 0; 1664 } 1665 } 1666 1667 static struct mbuf * 1668 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, 1669 struct virtio_net_hdr *hdr) 1670 { 1671 struct ifnet *ifp; 1672 struct ether_header *eh; 1673 struct ether_vlan_header *evh; 1674 struct ip *ip; 1675 struct ip6_hdr *ip6; 1676 struct tcphdr *tcp; 1677 int ip_offset; 1678 uint16_t eth_type, csum_start; 1679 uint8_t ip_proto, gso_type; 1680 1681 ifp = sc->vtnet_ifp; 1682 M_ASSERTPKTHDR(m); 1683 1684 ip_offset = sizeof(struct ether_header); 1685 if (m->m_len < ip_offset) { 1686 if ((m = m_pullup(m, ip_offset)) == NULL) 1687 return (NULL); 1688 } 1689 1690 eh = mtod(m, struct ether_header *); 1691 eth_type = ntohs(eh->ether_type); 1692 if (eth_type == ETHERTYPE_VLAN) { 1693 ip_offset = sizeof(struct ether_vlan_header); 1694 if (m->m_len < ip_offset) { 1695 if ((m = m_pullup(m, ip_offset)) == NULL) 1696 return (NULL); 1697 } 1698 evh = mtod(m, struct ether_vlan_header *); 1699 eth_type = ntohs(evh->evl_proto); 1700 } 1701 1702 switch (eth_type) { 1703 case ETHERTYPE_IP: 1704 if (m->m_len < ip_offset + sizeof(struct ip)) { 1705 m = m_pullup(m, ip_offset + sizeof(struct ip)); 1706 if (m == NULL) 1707 return (NULL); 1708 } 1709 1710 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1711 ip_proto = ip->ip_p; 1712 csum_start = ip_offset + (ip->ip_hl << 2); 1713 gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1714 break; 1715 1716 case ETHERTYPE_IPV6: 1717 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { 1718 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); 1719 if (m == NULL) 1720 return (NULL); 1721 } 1722 1723 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1724 /* 1725 * XXX Assume no extension headers are present. Presently, 1726 * this will always be true in the case of TSO, and FreeBSD 1727 * does not perform checksum offloading of IPv6 yet. 1728 */ 1729 ip_proto = ip6->ip6_nxt; 1730 csum_start = ip_offset + sizeof(struct ip6_hdr); 1731 gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1732 break; 1733 1734 default: 1735 return (m); 1736 } 1737 1738 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { 1739 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1740 hdr->csum_start = csum_start; 1741 hdr->csum_offset = m->m_pkthdr.csum_data; 1742 1743 sc->vtnet_stats.tx_csum_offloaded++; 1744 } 1745 1746 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1747 if (ip_proto != IPPROTO_TCP) 1748 return (m); 1749 1750 if (m->m_len < csum_start + sizeof(struct tcphdr)) { 1751 m = m_pullup(m, csum_start + sizeof(struct tcphdr)); 1752 if (m == NULL) 1753 return (NULL); 1754 } 1755 1756 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); 1757 hdr->gso_type = gso_type; 1758 hdr->hdr_len = csum_start + (tcp->th_off << 2); 1759 hdr->gso_size = m->m_pkthdr.tso_segsz; 1760 1761 if (tcp->th_flags & TH_CWR) { 1762 /* 1763 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. 1764 * ECN support is only configurable globally with the 1765 * net.inet.tcp.ecn.enable sysctl knob. 1766 */ 1767 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 1768 if_printf(ifp, "TSO with ECN not supported " 1769 "by host\n"); 1770 m_freem(m); 1771 return (NULL); 1772 } 1773 1774 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1775 } 1776 1777 sc->vtnet_stats.tx_tso_offloaded++; 1778 } 1779 1780 return (m); 1781 } 1782 1783 static int 1784 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, 1785 struct vtnet_tx_header *txhdr) 1786 { 1787 struct sglist sg; 1788 struct sglist_seg segs[VTNET_MAX_TX_SEGS]; 1789 struct virtqueue *vq; 1790 struct mbuf *m; 1791 int error; 1792 1793 vq = sc->vtnet_tx_vq; 1794 m = *m_head; 1795 1796 sglist_init(&sg, sc->vtnet_tx_nsegs, segs); 1797 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 1798 KASSERT(error == 0 && sg.sg_nseg == 1, 1799 ("%s: error %d adding header to sglist", __func__, error)); 1800 1801 error = sglist_append_mbuf(&sg, m); 1802 if (error) { 1803 m = m_defrag(m, M_NOWAIT); 1804 if (m == NULL) 1805 goto fail; 1806 1807 *m_head = m; 1808 sc->vtnet_stats.tx_defragged++; 1809 1810 error = sglist_append_mbuf(&sg, m); 1811 if (error) 1812 goto fail; 1813 } 1814 1815 txhdr->vth_mbuf = m; 1816 error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0); 1817 1818 return (error); 1819 1820 fail: 1821 sc->vtnet_stats.tx_defrag_failed++; 1822 m_freem(*m_head); 1823 *m_head = NULL; 1824 1825 return (ENOBUFS); 1826 } 1827 1828 static struct mbuf * 1829 vtnet_vlan_tag_insert(struct mbuf *m) 1830 { 1831 struct mbuf *n; 1832 struct ether_vlan_header *evl; 1833 1834 if (M_WRITABLE(m) == 0) { 1835 n = m_dup(m, M_NOWAIT); 1836 m_freem(m); 1837 if ((m = n) == NULL) 1838 return (NULL); 1839 } 1840 1841 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT); 1842 if (m == NULL) 1843 return (NULL); 1844 if (m->m_len < sizeof(struct ether_vlan_header)) { 1845 m = m_pullup(m, sizeof(struct ether_vlan_header)); 1846 if (m == NULL) 1847 return (NULL); 1848 } 1849 1850 /* Insert 802.1Q header into the existing Ethernet header. */ 1851 evl = mtod(m, struct ether_vlan_header *); 1852 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN, 1853 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); 1854 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1855 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag); 1856 m->m_flags &= ~M_VLANTAG; 1857 1858 return (m); 1859 } 1860 1861 static int 1862 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) 1863 { 1864 struct vtnet_tx_header *txhdr; 1865 struct virtio_net_hdr *hdr; 1866 struct mbuf *m; 1867 int error; 1868 1869 txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free); 1870 if (txhdr == NULL) 1871 return (ENOBUFS); 1872 SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link); 1873 1874 /* 1875 * Always use the non-mergeable header to simplify things. When 1876 * the mergeable feature is negotiated, the num_buffers field 1877 * must be set to zero. We use vtnet_hdr_size later to enqueue 1878 * the correct header size to the host. 1879 */ 1880 hdr = &txhdr->vth_uhdr.hdr; 1881 m = *m_head; 1882 1883 error = ENOBUFS; 1884 1885 if (m->m_flags & M_VLANTAG) { 1886 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1887 m = vtnet_vlan_tag_insert(m); 1888 if ((*m_head = m) == NULL) 1889 goto fail; 1890 m->m_flags &= ~M_VLANTAG; 1891 } 1892 1893 if (m->m_pkthdr.csum_flags != 0) { 1894 m = vtnet_tx_offload(sc, m, hdr); 1895 if ((*m_head = m) == NULL) 1896 goto fail; 1897 } 1898 1899 error = vtnet_enqueue_txbuf(sc, m_head, txhdr); 1900 fail: 1901 if (error != 0) 1902 vtnet_enqueue_txhdr(sc, txhdr); 1903 return (error); 1904 } 1905 1906 static void 1907 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1908 { 1909 struct vtnet_softc *sc; 1910 1911 sc = ifp->if_softc; 1912 1913 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1914 lwkt_serialize_enter(&sc->vtnet_slz); 1915 vtnet_start_locked(ifp, ifsq); 1916 lwkt_serialize_exit(&sc->vtnet_slz); 1917 } 1918 1919 static void 1920 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1921 { 1922 struct vtnet_softc *sc; 1923 struct virtqueue *vq; 1924 struct mbuf *m0; 1925 int enq; 1926 1927 sc = ifp->if_softc; 1928 vq = sc->vtnet_tx_vq; 1929 enq = 0; 1930 1931 ASSERT_SERIALIZED(&sc->vtnet_slz); 1932 1933 if ((ifp->if_flags & (IFF_RUNNING)) != 1934 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) 1935 return; 1936 1937 #ifdef VTNET_TX_INTR_MODERATION 1938 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) 1939 vtnet_txeof(sc); 1940 #endif 1941 1942 while (!ifsq_is_empty(ifsq)) { 1943 if (virtqueue_full(vq)) { 1944 ifq_set_oactive(&ifp->if_snd); 1945 break; 1946 } 1947 1948 m0 = ifq_dequeue(&ifp->if_snd); 1949 if (m0 == NULL) 1950 break; 1951 1952 if (vtnet_encap(sc, &m0) != 0) { 1953 if (m0 == NULL) 1954 break; 1955 ifq_prepend(&ifp->if_snd, m0); 1956 ifq_set_oactive(&ifp->if_snd); 1957 break; 1958 } 1959 1960 enq++; 1961 ETHER_BPF_MTAP(ifp, m0); 1962 } 1963 1964 if (enq > 0) { 1965 virtqueue_notify(vq, &sc->vtnet_slz); 1966 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; 1967 } 1968 } 1969 1970 static void 1971 vtnet_tx_intr_task(void *arg) 1972 { 1973 struct vtnet_softc *sc; 1974 struct ifnet *ifp; 1975 struct ifaltq_subque *ifsq; 1976 1977 sc = arg; 1978 ifp = sc->vtnet_ifp; 1979 ifsq = ifq_get_subq_default(&ifp->if_snd); 1980 1981 next: 1982 // lwkt_serialize_enter(&sc->vtnet_slz); 1983 1984 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1985 vtnet_enable_tx_intr(sc); 1986 // lwkt_serialize_exit(&sc->vtnet_slz); 1987 return; 1988 } 1989 1990 vtnet_txeof(sc); 1991 1992 if (!ifsq_is_empty(ifsq)) 1993 vtnet_start_locked(ifp, ifsq); 1994 1995 if (vtnet_enable_tx_intr(sc) != 0) { 1996 vtnet_disable_tx_intr(sc); 1997 sc->vtnet_stats.tx_task_rescheduled++; 1998 // lwkt_serialize_exit(&sc->vtnet_slz); 1999 goto next; 2000 } 2001 2002 // lwkt_serialize_exit(&sc->vtnet_slz); 2003 } 2004 2005 static void 2006 vtnet_tx_vq_intr(void *xsc) 2007 { 2008 struct vtnet_softc *sc; 2009 2010 sc = xsc; 2011 2012 if (!virtqueue_pending(sc->vtnet_tx_vq)) 2013 return; 2014 2015 vtnet_disable_tx_intr(sc); 2016 vtnet_tx_intr_task(sc); 2017 } 2018 2019 static void 2020 vtnet_config_intr(void *arg) 2021 { 2022 struct vtnet_softc *sc; 2023 2024 sc = arg; 2025 2026 vtnet_update_link_status(sc); 2027 } 2028 2029 static void 2030 vtnet_stop(struct vtnet_softc *sc) 2031 { 2032 device_t dev; 2033 struct ifnet *ifp; 2034 2035 dev = sc->vtnet_dev; 2036 ifp = sc->vtnet_ifp; 2037 2038 ASSERT_SERIALIZED(&sc->vtnet_slz); 2039 2040 sc->vtnet_watchdog_timer = 0; 2041 ifq_clr_oactive(&ifp->if_snd); 2042 ifp->if_flags &= ~(IFF_RUNNING); 2043 2044 vtnet_disable_rx_intr(sc); 2045 vtnet_disable_tx_intr(sc); 2046 2047 /* 2048 * Stop the host VirtIO adapter. Note this will reset the host 2049 * adapter's state back to the pre-initialized state, so in 2050 * order to make the device usable again, we must drive it 2051 * through virtio_reinit() and virtio_reinit_complete(). 2052 */ 2053 virtio_stop(dev); 2054 2055 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 2056 2057 vtnet_free_rx_mbufs(sc); 2058 vtnet_free_tx_mbufs(sc); 2059 } 2060 2061 static int 2062 vtnet_virtio_reinit(struct vtnet_softc *sc) 2063 { 2064 device_t dev; 2065 struct ifnet *ifp; 2066 uint64_t features; 2067 int error; 2068 2069 dev = sc->vtnet_dev; 2070 ifp = sc->vtnet_ifp; 2071 features = sc->vtnet_features; 2072 2073 /* 2074 * Re-negotiate with the host, removing any disabled receive 2075 * features. Transmit features are disabled only on our side 2076 * via if_capenable and if_hwassist. 2077 */ 2078 2079 if (ifp->if_capabilities & IFCAP_RXCSUM) { 2080 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 2081 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2082 } 2083 2084 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */ 2085 if (ifp->if_capabilities & IFCAP_LRO) { 2086 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2087 features &= ~VTNET_LRO_FEATURES; 2088 } 2089 #endif 2090 2091 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2092 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2093 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2094 } 2095 2096 error = virtio_reinit(dev, features); 2097 if (error) 2098 device_printf(dev, "virtio reinit error %d\n", error); 2099 2100 return (error); 2101 } 2102 2103 static void 2104 vtnet_init_locked(struct vtnet_softc *sc) 2105 { 2106 device_t dev; 2107 struct ifnet *ifp; 2108 int error; 2109 2110 dev = sc->vtnet_dev; 2111 ifp = sc->vtnet_ifp; 2112 2113 ASSERT_SERIALIZED(&sc->vtnet_slz); 2114 2115 if (ifp->if_flags & IFF_RUNNING) 2116 return; 2117 2118 /* Stop host's adapter, cancel any pending I/O. */ 2119 vtnet_stop(sc); 2120 2121 /* Reinitialize the host device. */ 2122 error = vtnet_virtio_reinit(sc); 2123 if (error) { 2124 device_printf(dev, 2125 "reinitialization failed, stopping device...\n"); 2126 vtnet_stop(sc); 2127 return; 2128 } 2129 2130 /* Update host with assigned MAC address. */ 2131 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2132 vtnet_set_hwaddr(sc); 2133 2134 ifp->if_hwassist = 0; 2135 if (ifp->if_capenable & IFCAP_TXCSUM) 2136 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 2137 if (ifp->if_capenable & IFCAP_TSO4) 2138 ifp->if_hwassist |= CSUM_TSO; 2139 2140 error = vtnet_init_rx_vq(sc); 2141 if (error) { 2142 device_printf(dev, 2143 "cannot allocate mbufs for Rx virtqueue\n"); 2144 vtnet_stop(sc); 2145 return; 2146 } 2147 2148 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 2149 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2150 /* Restore promiscuous and all-multicast modes. */ 2151 vtnet_rx_filter(sc); 2152 2153 /* Restore filtered MAC addresses. */ 2154 vtnet_rx_filter_mac(sc); 2155 } 2156 2157 /* Restore VLAN filters. */ 2158 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2159 vtnet_rx_filter_vlan(sc); 2160 } 2161 2162 { 2163 vtnet_enable_rx_intr(sc); 2164 vtnet_enable_tx_intr(sc); 2165 } 2166 2167 ifp->if_flags |= IFF_RUNNING; 2168 ifq_clr_oactive(&ifp->if_snd); 2169 2170 virtio_reinit_complete(dev); 2171 2172 vtnet_update_link_status(sc); 2173 } 2174 2175 static void 2176 vtnet_init(void *xsc) 2177 { 2178 struct vtnet_softc *sc; 2179 2180 sc = xsc; 2181 2182 lwkt_serialize_enter(&sc->vtnet_slz); 2183 vtnet_init_locked(sc); 2184 lwkt_serialize_exit(&sc->vtnet_slz); 2185 } 2186 2187 static void 2188 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 2189 struct sglist *sg, int readable, int writable) 2190 { 2191 struct virtqueue *vq; 2192 void *c; 2193 2194 vq = sc->vtnet_ctrl_vq; 2195 2196 ASSERT_SERIALIZED(&sc->vtnet_slz); 2197 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 2198 ("no control virtqueue")); 2199 KASSERT(virtqueue_empty(vq), 2200 ("control command already enqueued")); 2201 2202 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 2203 return; 2204 2205 virtqueue_notify(vq, &sc->vtnet_slz); 2206 2207 /* 2208 * Poll until the command is complete. Previously, we would 2209 * sleep until the control virtqueue interrupt handler woke 2210 * us up, but dropping the VTNET_MTX leads to serialization 2211 * difficulties. 2212 * 2213 * Furthermore, it appears QEMU/KVM only allocates three MSIX 2214 * vectors. Two of those vectors are needed for the Rx and Tx 2215 * virtqueues. We do not support sharing both a Vq and config 2216 * changed notification on the same MSIX vector. 2217 */ 2218 c = virtqueue_poll(vq, NULL); 2219 KASSERT(c == cookie, ("unexpected control command response")); 2220 } 2221 2222 static int 2223 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) 2224 { 2225 struct { 2226 struct virtio_net_ctrl_hdr hdr __aligned(2); 2227 uint8_t pad1; 2228 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8); 2229 uint8_t pad2; 2230 uint8_t ack; 2231 } s; 2232 struct sglist_seg segs[3]; 2233 struct sglist sg; 2234 int error; 2235 2236 s.hdr.class = VIRTIO_NET_CTRL_MAC; 2237 s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; 2238 s.ack = VIRTIO_NET_ERR; 2239 2240 /* Copy the mac address into physically contiguous memory */ 2241 memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN); 2242 2243 sglist_init(&sg, 3, segs); 2244 error = 0; 2245 error |= sglist_append(&sg, &s.hdr, 2246 sizeof(struct virtio_net_ctrl_hdr)); 2247 error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN); 2248 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 2249 KASSERT(error == 0 && sg.sg_nseg == 3, 2250 ("%s: error %d adding set MAC msg to sglist", __func__, error)); 2251 2252 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 2253 2254 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 2255 } 2256 2257 static void 2258 vtnet_rx_filter(struct vtnet_softc *sc) 2259 { 2260 device_t dev; 2261 struct ifnet *ifp; 2262 2263 dev = sc->vtnet_dev; 2264 ifp = sc->vtnet_ifp; 2265 2266 ASSERT_SERIALIZED(&sc->vtnet_slz); 2267 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2268 ("CTRL_RX feature not negotiated")); 2269 2270 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 2271 device_printf(dev, "cannot %s promiscuous mode\n", 2272 (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable"); 2273 2274 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 2275 device_printf(dev, "cannot %s all-multicast mode\n", 2276 (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable"); 2277 } 2278 2279 static int 2280 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 2281 { 2282 struct sglist_seg segs[3]; 2283 struct sglist sg; 2284 struct { 2285 struct virtio_net_ctrl_hdr hdr __aligned(2); 2286 uint8_t pad1; 2287 uint8_t onoff; 2288 uint8_t pad2; 2289 uint8_t ack; 2290 } s; 2291 int error; 2292 2293 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2294 ("%s: CTRL_RX feature not negotiated", __func__)); 2295 2296 s.hdr.class = VIRTIO_NET_CTRL_RX; 2297 s.hdr.cmd = cmd; 2298 s.onoff = !!on; 2299 s.ack = VIRTIO_NET_ERR; 2300 2301 sglist_init(&sg, 3, segs); 2302 error = 0; 2303 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 2304 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); 2305 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 2306 KASSERT(error == 0 && sg.sg_nseg == 3, 2307 ("%s: error %d adding Rx message to sglist", __func__, error)); 2308 2309 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 2310 2311 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 2312 } 2313 2314 static int 2315 vtnet_set_promisc(struct vtnet_softc *sc, int on) 2316 { 2317 2318 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 2319 } 2320 2321 static int 2322 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 2323 { 2324 2325 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 2326 } 2327 2328 static void 2329 vtnet_rx_filter_mac(struct vtnet_softc *sc) 2330 { 2331 struct virtio_net_ctrl_hdr hdr __aligned(2); 2332 struct vtnet_mac_filter *filter; 2333 struct sglist_seg segs[4]; 2334 struct sglist sg; 2335 struct ifnet *ifp; 2336 struct ifaddr *ifa; 2337 struct ifaddr_container *ifac; 2338 struct ifmultiaddr *ifma; 2339 int ucnt, mcnt, promisc, allmulti, error; 2340 uint8_t ack; 2341 2342 ifp = sc->vtnet_ifp; 2343 ucnt = 0; 2344 mcnt = 0; 2345 promisc = 0; 2346 allmulti = 0; 2347 2348 ASSERT_SERIALIZED(&sc->vtnet_slz); 2349 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2350 ("%s: CTRL_RX feature not negotiated", __func__)); 2351 2352 /* Use the MAC filtering table allocated in vtnet_attach. */ 2353 filter = sc->vtnet_macfilter; 2354 memset(filter, 0, sizeof(struct vtnet_mac_filter)); 2355 2356 /* Unicast MAC addresses: */ 2357 //if_addr_rlock(ifp); 2358 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2359 ifa = ifac->ifa; 2360 if (ifa->ifa_addr->sa_family != AF_LINK) 2361 continue; 2362 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2363 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) 2364 continue; 2365 else if (ucnt == VTNET_MAX_MAC_ENTRIES) { 2366 promisc = 1; 2367 break; 2368 } 2369 2370 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2371 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 2372 ucnt++; 2373 } 2374 //if_addr_runlock(ifp); 2375 2376 if (promisc != 0) { 2377 filter->vmf_unicast.nentries = 0; 2378 if_printf(ifp, "more than %d MAC addresses assigned, " 2379 "falling back to promiscuous mode\n", 2380 VTNET_MAX_MAC_ENTRIES); 2381 } else 2382 filter->vmf_unicast.nentries = ucnt; 2383 2384 /* Multicast MAC addresses: */ 2385 //if_maddr_rlock(ifp); 2386 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2387 if (ifma->ifma_addr->sa_family != AF_LINK) 2388 continue; 2389 else if (mcnt == VTNET_MAX_MAC_ENTRIES) { 2390 allmulti = 1; 2391 break; 2392 } 2393 2394 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2395 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 2396 mcnt++; 2397 } 2398 //if_maddr_runlock(ifp); 2399 2400 if (allmulti != 0) { 2401 filter->vmf_multicast.nentries = 0; 2402 if_printf(ifp, "more than %d multicast MAC addresses " 2403 "assigned, falling back to all-multicast mode\n", 2404 VTNET_MAX_MAC_ENTRIES); 2405 } else 2406 filter->vmf_multicast.nentries = mcnt; 2407 2408 if (promisc != 0 && allmulti != 0) 2409 goto out; 2410 2411 hdr.class = VIRTIO_NET_CTRL_MAC; 2412 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 2413 ack = VIRTIO_NET_ERR; 2414 2415 sglist_init(&sg, 4, segs); 2416 error = 0; 2417 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2418 error |= sglist_append(&sg, &filter->vmf_unicast, 2419 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); 2420 error |= sglist_append(&sg, &filter->vmf_multicast, 2421 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); 2422 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2423 KASSERT(error == 0 && sg.sg_nseg == 4, 2424 ("%s: error %d adding MAC filter msg to sglist", __func__, error)); 2425 2426 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2427 2428 if (ack != VIRTIO_NET_OK) 2429 if_printf(ifp, "error setting host MAC filter table\n"); 2430 2431 out: 2432 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0) 2433 if_printf(ifp, "cannot enable promiscuous mode\n"); 2434 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0) 2435 if_printf(ifp, "cannot enable all-multicast mode\n"); 2436 } 2437 2438 static int 2439 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2440 { 2441 struct sglist_seg segs[3]; 2442 struct sglist sg; 2443 struct { 2444 struct virtio_net_ctrl_hdr hdr __aligned(2); 2445 uint8_t pad1; 2446 uint16_t tag; 2447 uint8_t pad2; 2448 uint8_t ack; 2449 } s; 2450 int error; 2451 2452 s.hdr.class = VIRTIO_NET_CTRL_VLAN; 2453 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 2454 s.tag = tag; 2455 s.ack = VIRTIO_NET_ERR; 2456 2457 sglist_init(&sg, 3, segs); 2458 error = 0; 2459 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); 2460 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); 2461 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); 2462 KASSERT(error == 0 && sg.sg_nseg == 3, 2463 ("%s: error %d adding VLAN message to sglist", __func__, error)); 2464 2465 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); 2466 2467 return (s.ack == VIRTIO_NET_OK ? 0 : EIO); 2468 } 2469 2470 static void 2471 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 2472 { 2473 uint32_t w; 2474 uint16_t tag; 2475 int i, bit, nvlans; 2476 2477 ASSERT_SERIALIZED(&sc->vtnet_slz); 2478 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2479 ("%s: VLAN_FILTER feature not negotiated", __func__)); 2480 2481 nvlans = sc->vtnet_nvlans; 2482 2483 /* Enable the filter for each configured VLAN. */ 2484 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { 2485 w = sc->vtnet_vlan_shadow[i]; 2486 while ((bit = ffs(w) - 1) != -1) { 2487 w &= ~(1 << bit); 2488 tag = sizeof(w) * CHAR_BIT * i + bit; 2489 nvlans--; 2490 2491 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { 2492 device_printf(sc->vtnet_dev, 2493 "cannot enable VLAN %d filter\n", tag); 2494 } 2495 } 2496 } 2497 2498 KASSERT(nvlans == 0, ("VLAN count incorrect")); 2499 } 2500 2501 static void 2502 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2503 { 2504 struct ifnet *ifp; 2505 int idx, bit; 2506 2507 ifp = sc->vtnet_ifp; 2508 idx = (tag >> 5) & 0x7F; 2509 bit = tag & 0x1F; 2510 2511 if (tag == 0 || tag > 4095) 2512 return; 2513 2514 lwkt_serialize_enter(&sc->vtnet_slz); 2515 2516 /* Update shadow VLAN table. */ 2517 if (add) { 2518 sc->vtnet_nvlans++; 2519 sc->vtnet_vlan_shadow[idx] |= (1 << bit); 2520 } else { 2521 sc->vtnet_nvlans--; 2522 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); 2523 } 2524 2525 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && 2526 vtnet_exec_vlan_filter(sc, add, tag) != 0) { 2527 device_printf(sc->vtnet_dev, 2528 "cannot %s VLAN %d %s the host filter table\n", 2529 add ? "add" : "remove", tag, add ? "to" : "from"); 2530 } 2531 2532 lwkt_serialize_exit(&sc->vtnet_slz); 2533 } 2534 2535 static void 2536 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2537 { 2538 2539 if (ifp->if_softc != arg) 2540 return; 2541 2542 vtnet_update_vlan_filter(arg, 1, tag); 2543 } 2544 2545 static void 2546 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2547 { 2548 2549 if (ifp->if_softc != arg) 2550 return; 2551 2552 vtnet_update_vlan_filter(arg, 0, tag); 2553 } 2554 2555 static int 2556 vtnet_ifmedia_upd(struct ifnet *ifp) 2557 { 2558 struct vtnet_softc *sc; 2559 struct ifmedia *ifm; 2560 2561 sc = ifp->if_softc; 2562 ifm = &sc->vtnet_media; 2563 2564 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2565 return (EINVAL); 2566 2567 return (0); 2568 } 2569 2570 static void 2571 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2572 { 2573 struct vtnet_softc *sc; 2574 2575 sc = ifp->if_softc; 2576 2577 ifmr->ifm_status = IFM_AVALID; 2578 ifmr->ifm_active = IFM_ETHER; 2579 2580 lwkt_serialize_enter(&sc->vtnet_slz); 2581 if (vtnet_is_link_up(sc) != 0) { 2582 ifmr->ifm_status |= IFM_ACTIVE; 2583 ifmr->ifm_active |= VTNET_MEDIATYPE; 2584 } else 2585 ifmr->ifm_active |= IFM_NONE; 2586 lwkt_serialize_exit(&sc->vtnet_slz); 2587 } 2588 2589 static void 2590 vtnet_add_statistics(struct vtnet_softc *sc) 2591 { 2592 device_t dev; 2593 struct vtnet_statistics *stats; 2594 struct sysctl_ctx_list *ctx; 2595 struct sysctl_oid *tree; 2596 struct sysctl_oid_list *child; 2597 2598 dev = sc->vtnet_dev; 2599 stats = &sc->vtnet_stats; 2600 ctx = device_get_sysctl_ctx(dev); 2601 tree = device_get_sysctl_tree(dev); 2602 child = SYSCTL_CHILDREN(tree); 2603 2604 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", 2605 CTLFLAG_RD, &stats->mbuf_alloc_failed, 0, 2606 "Mbuf cluster allocation failures"); 2607 2608 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", 2609 CTLFLAG_RD, &stats->rx_frame_too_large, 0, 2610 "Received frame larger than the mbuf chain"); 2611 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 2612 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0, 2613 "Enqueuing the replacement receive mbuf failed"); 2614 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", 2615 CTLFLAG_RD, &stats->rx_mergeable_failed, 0, 2616 "Mergeable buffers receive failures"); 2617 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 2618 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0, 2619 "Received checksum offloaded buffer with unsupported " 2620 "Ethernet type"); 2621 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 2622 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0, 2623 "Received checksum offloaded buffer with incorrect IP protocol"); 2624 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", 2625 CTLFLAG_RD, &stats->rx_csum_bad_offset, 0, 2626 "Received checksum offloaded buffer with incorrect offset"); 2627 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", 2628 CTLFLAG_RD, &stats->rx_csum_failed, 0, 2629 "Received buffer checksum offload failed"); 2630 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", 2631 CTLFLAG_RD, &stats->rx_csum_offloaded, 0, 2632 "Received buffer checksum offload succeeded"); 2633 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", 2634 CTLFLAG_RD, &stats->rx_task_rescheduled, 0, 2635 "Times the receive interrupt task rescheduled itself"); 2636 2637 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", 2638 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0, 2639 "Aborted transmit of checksum offloaded buffer with unknown " 2640 "Ethernet type"); 2641 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", 2642 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0, 2643 "Aborted transmit of TSO buffer with unknown Ethernet type"); 2644 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", 2645 CTLFLAG_RD, &stats->tx_defragged, 0, 2646 "Transmit mbufs defragged"); 2647 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", 2648 CTLFLAG_RD, &stats->tx_defrag_failed, 0, 2649 "Aborted transmit of buffer because defrag failed"); 2650 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", 2651 CTLFLAG_RD, &stats->tx_csum_offloaded, 0, 2652 "Offloaded checksum of transmitted buffer"); 2653 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", 2654 CTLFLAG_RD, &stats->tx_tso_offloaded, 0, 2655 "Segmentation offload of transmitted buffer"); 2656 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", 2657 CTLFLAG_RD, &stats->tx_task_rescheduled, 0, 2658 "Times the transmit interrupt task rescheduled itself"); 2659 } 2660 2661 static int 2662 vtnet_enable_rx_intr(struct vtnet_softc *sc) 2663 { 2664 2665 return (virtqueue_enable_intr(sc->vtnet_rx_vq)); 2666 } 2667 2668 static void 2669 vtnet_disable_rx_intr(struct vtnet_softc *sc) 2670 { 2671 2672 virtqueue_disable_intr(sc->vtnet_rx_vq); 2673 } 2674 2675 static int 2676 vtnet_enable_tx_intr(struct vtnet_softc *sc) 2677 { 2678 2679 #ifdef VTNET_TX_INTR_MODERATION 2680 return (0); 2681 #else 2682 return (virtqueue_enable_intr(sc->vtnet_tx_vq)); 2683 #endif 2684 } 2685 2686 static void 2687 vtnet_disable_tx_intr(struct vtnet_softc *sc) 2688 { 2689 2690 virtqueue_disable_intr(sc->vtnet_tx_vq); 2691 } 2692