1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 #include <sys/random.h> 42 #include <sys/sglist.h> 43 #include <sys/serialize.h> 44 #include <sys/bus.h> 45 #include <sys/rman.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_types.h> 52 #include <net/if_media.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 #include <net/ifq_var.h> 56 57 #include <net/bpf.h> 58 59 #include <netinet/in_systm.h> 60 #include <netinet/in.h> 61 #include <netinet/ip.h> 62 #include <netinet/ip6.h> 63 #include <netinet/udp.h> 64 #include <netinet/tcp.h> 65 66 #include <dev/virtual/virtio/virtio/virtio.h> 67 #include <dev/virtual/virtio/virtio/virtqueue.h> 68 69 #include "virtio_net.h" 70 #include "virtio_if.h" 71 72 struct vtnet_statistics { 73 unsigned long mbuf_alloc_failed; 74 75 unsigned long rx_frame_too_large; 76 unsigned long rx_enq_replacement_failed; 77 unsigned long rx_mergeable_failed; 78 unsigned long rx_csum_bad_ethtype; 79 unsigned long rx_csum_bad_start; 80 unsigned long rx_csum_bad_ipproto; 81 unsigned long rx_csum_bad_offset; 82 unsigned long rx_csum_failed; 83 unsigned long rx_csum_offloaded; 84 unsigned long rx_task_rescheduled; 85 86 unsigned long tx_csum_offloaded; 87 unsigned long tx_tso_offloaded; 88 unsigned long tx_csum_bad_ethtype; 89 unsigned long tx_tso_bad_ethtype; 90 unsigned long tx_task_rescheduled; 91 }; 92 93 struct vtnet_softc { 94 device_t vtnet_dev; 95 struct ifnet *vtnet_ifp; 96 struct lwkt_serialize vtnet_slz; 97 98 uint32_t vtnet_flags; 99 #define VTNET_FLAG_LINK 0x0001 100 #define VTNET_FLAG_SUSPENDED 0x0002 101 #define VTNET_FLAG_CTRL_VQ 0x0004 102 #define VTNET_FLAG_CTRL_RX 0x0008 103 #define VTNET_FLAG_VLAN_FILTER 0x0010 104 #define VTNET_FLAG_TSO_ECN 0x0020 105 #define VTNET_FLAG_MRG_RXBUFS 0x0040 106 #define VTNET_FLAG_LRO_NOMRG 0x0080 107 108 struct virtqueue *vtnet_rx_vq; 109 struct virtqueue *vtnet_tx_vq; 110 struct virtqueue *vtnet_ctrl_vq; 111 112 struct vtnet_tx_header *vtnet_txhdrarea; 113 uint32_t vtnet_txhdridx; 114 struct vtnet_mac_filter *vtnet_macfilter; 115 116 int vtnet_hdr_size; 117 int vtnet_tx_size; 118 int vtnet_rx_size; 119 int vtnet_rx_process_limit; 120 int vtnet_rx_mbuf_size; 121 int vtnet_rx_mbuf_count; 122 int vtnet_if_flags; 123 int vtnet_watchdog_timer; 124 uint64_t vtnet_features; 125 126 struct task vtnet_cfgchg_task; 127 128 struct vtnet_statistics vtnet_stats; 129 130 struct callout vtnet_tick_ch; 131 132 eventhandler_tag vtnet_vlan_attach; 133 eventhandler_tag vtnet_vlan_detach; 134 135 struct ifmedia vtnet_media; 136 /* 137 * Fake media type; the host does not provide us with 138 * any real media information. 139 */ 140 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX) 141 char vtnet_hwaddr[ETHER_ADDR_LEN]; 142 143 /* 144 * During reset, the host's VLAN filtering table is lost. The 145 * array below is used to restore all the VLANs configured on 146 * this interface after a reset. 147 */ 148 #define VTNET_VLAN_SHADOW_SIZE (4096 / 32) 149 int vtnet_nvlans; 150 uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE]; 151 152 char vtnet_mtx_name[16]; 153 }; 154 155 /* 156 * When mergeable buffers are not negotiated, the vtnet_rx_header structure 157 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to 158 * both keep the VirtIO header and the data non-contiguous and to keep the 159 * frame's payload 4 byte aligned. 160 * 161 * When mergeable buffers are negotiated, the host puts the VirtIO header in 162 * the beginning of the first mbuf's data. 163 */ 164 #define VTNET_RX_HEADER_PAD 4 165 struct vtnet_rx_header { 166 struct virtio_net_hdr vrh_hdr; 167 char vrh_pad[VTNET_RX_HEADER_PAD]; 168 } __packed; 169 170 /* 171 * For each outgoing frame, the vtnet_tx_header below is allocated from 172 * the vtnet_tx_header_zone. 173 */ 174 struct vtnet_tx_header { 175 union { 176 struct virtio_net_hdr hdr; 177 struct virtio_net_hdr_mrg_rxbuf mhdr; 178 } vth_uhdr; 179 180 struct mbuf *vth_mbuf; 181 }; 182 183 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header"); 184 185 /* 186 * The VirtIO specification does not place a limit on the number of MAC 187 * addresses the guest driver may request to be filtered. In practice, 188 * the host is constrained by available resources. To simplify this driver, 189 * impose a reasonably high limit of MAC addresses we will filter before 190 * falling back to promiscuous or all-multicast modes. 191 */ 192 #define VTNET_MAX_MAC_ENTRIES 128 193 194 struct vtnet_mac_table { 195 uint32_t nentries; 196 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; 197 } __packed; 198 199 struct vtnet_mac_filter { 200 struct vtnet_mac_table vmf_unicast; 201 uint32_t vmf_pad; /* Make tables non-contiguous. */ 202 struct vtnet_mac_table vmf_multicast; 203 }; 204 205 #define VTNET_WATCHDOG_TIMEOUT 5 206 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) 207 208 /* Features desired/implemented by this driver. */ 209 #define VTNET_FEATURES \ 210 (VIRTIO_NET_F_MAC | \ 211 VIRTIO_NET_F_STATUS | \ 212 VIRTIO_NET_F_CTRL_VQ | \ 213 VIRTIO_NET_F_CTRL_RX | \ 214 VIRTIO_NET_F_CTRL_VLAN | \ 215 VIRTIO_NET_F_CSUM | \ 216 VIRTIO_NET_F_HOST_TSO4 | \ 217 VIRTIO_NET_F_HOST_TSO6 | \ 218 VIRTIO_NET_F_HOST_ECN | \ 219 VIRTIO_NET_F_GUEST_CSUM | \ 220 VIRTIO_NET_F_GUEST_TSO4 | \ 221 VIRTIO_NET_F_GUEST_TSO6 | \ 222 VIRTIO_NET_F_GUEST_ECN | \ 223 VIRTIO_NET_F_MRG_RXBUF) 224 225 /* 226 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us 227 * frames larger than 1514 bytes. We do not yet support software LRO 228 * via tcp_lro_rx(). 229 */ 230 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ 231 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) 232 233 #define VTNET_MAX_MTU 65536 234 #define VTNET_MAX_RX_SIZE 65550 235 236 /* 237 * Used to preallocate the Vq indirect descriptors. The first segment 238 * is reserved for the header. 239 */ 240 #define VTNET_MIN_RX_SEGS 2 241 #define VTNET_MAX_RX_SEGS 34 242 #define VTNET_MAX_TX_SEGS 34 243 244 #define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */ 245 #define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */ 246 #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */ 247 #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */ 248 #define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */ 249 250 251 /* 252 * Assert we can receive and transmit the maximum with regular 253 * size clusters. 254 */ 255 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); 256 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU); 257 258 /* 259 * Determine how many mbufs are in each receive buffer. For LRO without 260 * mergeable descriptors, we must allocate an mbuf chain large enough to 261 * hold both the vtnet_rx_header and the maximum receivable data. 262 */ 263 #define VTNET_NEEDED_RX_MBUFS(_sc) \ 264 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \ 265 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \ 266 (_sc)->vtnet_rx_mbuf_size) 267 268 static int vtnet_modevent(module_t, int, void *); 269 270 static int vtnet_probe(device_t); 271 static int vtnet_attach(device_t); 272 static int vtnet_detach(device_t); 273 static int vtnet_suspend(device_t); 274 static int vtnet_resume(device_t); 275 static int vtnet_shutdown(device_t); 276 static int vtnet_config_change(device_t); 277 278 static void vtnet_negotiate_features(struct vtnet_softc *); 279 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 280 static void vtnet_get_hwaddr(struct vtnet_softc *); 281 static void vtnet_set_hwaddr(struct vtnet_softc *); 282 static int vtnet_is_link_up(struct vtnet_softc *); 283 static void vtnet_update_link_status(struct vtnet_softc *); 284 #if 0 285 static void vtnet_watchdog(struct vtnet_softc *); 286 #endif 287 static void vtnet_config_change_task(void *, int); 288 static int vtnet_change_mtu(struct vtnet_softc *, int); 289 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 290 291 static int vtnet_init_rx_vq(struct vtnet_softc *); 292 static void vtnet_free_rx_mbufs(struct vtnet_softc *); 293 static void vtnet_free_tx_mbufs(struct vtnet_softc *); 294 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 295 296 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, 297 struct mbuf **); 298 static int vtnet_replace_rxbuf(struct vtnet_softc *, 299 struct mbuf *, int); 300 static int vtnet_newbuf(struct vtnet_softc *); 301 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); 302 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); 303 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); 304 static void vtnet_vlan_tag_remove(struct mbuf *); 305 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, 306 struct virtio_net_hdr *); 307 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); 308 static int vtnet_rxeof(struct vtnet_softc *, int, int *); 309 static void vtnet_rx_intr_task(void *); 310 static int vtnet_rx_vq_intr(void *); 311 312 static void vtnet_txeof(struct vtnet_softc *); 313 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, 314 struct virtio_net_hdr *); 315 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, 316 struct vtnet_tx_header *); 317 static int vtnet_encap(struct vtnet_softc *, struct mbuf **); 318 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *); 319 static void vtnet_start(struct ifnet *, struct ifaltq_subque *); 320 static void vtnet_tick(void *); 321 static void vtnet_tx_intr_task(void *); 322 static int vtnet_tx_vq_intr(void *); 323 324 static void vtnet_stop(struct vtnet_softc *); 325 static int vtnet_reinit(struct vtnet_softc *); 326 static void vtnet_init_locked(struct vtnet_softc *); 327 static void vtnet_init(void *); 328 329 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 330 struct sglist *, int, int); 331 332 static void vtnet_rx_filter(struct vtnet_softc *sc); 333 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 334 static int vtnet_set_promisc(struct vtnet_softc *, int); 335 static int vtnet_set_allmulti(struct vtnet_softc *, int); 336 static void vtnet_rx_filter_mac(struct vtnet_softc *); 337 338 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 339 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 340 static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t); 341 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 342 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 343 344 static int vtnet_ifmedia_upd(struct ifnet *); 345 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 346 347 static void vtnet_add_statistics(struct vtnet_softc *); 348 349 static int vtnet_enable_rx_intr(struct vtnet_softc *); 350 static int vtnet_enable_tx_intr(struct vtnet_softc *); 351 static void vtnet_disable_rx_intr(struct vtnet_softc *); 352 static void vtnet_disable_tx_intr(struct vtnet_softc *); 353 354 /* Tunables. */ 355 static int vtnet_csum_disable = 0; 356 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 357 static int vtnet_tso_disable = 1; 358 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 359 static int vtnet_lro_disable = 1; 360 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 361 362 /* 363 * Reducing the number of transmit completed interrupts can 364 * improve performance. To do so, the define below keeps the 365 * Tx vq interrupt disabled and adds calls to vtnet_txeof() 366 * in the start and watchdog paths. The price to pay for this 367 * is the m_free'ing of transmitted mbufs may be delayed until 368 * the watchdog fires. 369 */ 370 #define VTNET_TX_INTR_MODERATION 371 372 static struct virtio_feature_desc vtnet_feature_desc[] = { 373 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 374 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 375 { VIRTIO_NET_F_MAC, "MacAddress" }, 376 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 377 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 378 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 379 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 380 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 381 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 382 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 383 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 384 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 385 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 386 { VIRTIO_NET_F_STATUS, "Status" }, 387 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 388 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 389 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 390 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 391 { VIRTIO_NET_F_MQ, "RFS" }, 392 { 0, NULL } 393 }; 394 395 static device_method_t vtnet_methods[] = { 396 /* Device methods. */ 397 DEVMETHOD(device_probe, vtnet_probe), 398 DEVMETHOD(device_attach, vtnet_attach), 399 DEVMETHOD(device_detach, vtnet_detach), 400 DEVMETHOD(device_suspend, vtnet_suspend), 401 DEVMETHOD(device_resume, vtnet_resume), 402 DEVMETHOD(device_shutdown, vtnet_shutdown), 403 404 /* VirtIO methods. */ 405 DEVMETHOD(virtio_config_change, vtnet_config_change), 406 407 { 0, 0 } 408 }; 409 410 static driver_t vtnet_driver = { 411 "vtnet", 412 vtnet_methods, 413 sizeof(struct vtnet_softc) 414 }; 415 416 static devclass_t vtnet_devclass; 417 418 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, 419 vtnet_modevent, 0); 420 MODULE_VERSION(vtnet, 1); 421 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 422 423 static int 424 vtnet_modevent(module_t mod, int type, void *unused) 425 { 426 int error; 427 428 error = 0; 429 430 switch (type) { 431 case MOD_LOAD: 432 break; 433 case MOD_UNLOAD: 434 break; 435 case MOD_SHUTDOWN: 436 break; 437 default: 438 error = EOPNOTSUPP; 439 break; 440 } 441 442 return (error); 443 } 444 445 static int 446 vtnet_probe(device_t dev) 447 { 448 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 449 return (ENXIO); 450 451 device_set_desc(dev, "VirtIO Networking Adapter"); 452 453 return (BUS_PROBE_DEFAULT); 454 } 455 456 static int 457 vtnet_attach(device_t dev) 458 { 459 struct vtnet_softc *sc; 460 struct ifnet *ifp; 461 int tx_size, error; 462 463 sc = device_get_softc(dev); 464 sc->vtnet_dev = dev; 465 466 lwkt_serialize_init(&sc->vtnet_slz); 467 callout_init(&sc->vtnet_tick_ch); 468 469 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 470 vtnet_ifmedia_sts); 471 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 472 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 473 474 vtnet_add_statistics(sc); 475 476 virtio_set_feature_desc(dev, vtnet_feature_desc); 477 vtnet_negotiate_features(sc); 478 479 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 480 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 481 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 482 } else { 483 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 484 } 485 486 sc->vtnet_rx_mbuf_size = MCLBYTES; 487 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 488 489 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 490 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 491 492 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 493 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 494 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 495 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 496 } 497 498 vtnet_get_hwaddr(sc); 499 500 error = vtnet_alloc_virtqueues(sc); 501 if (error) { 502 device_printf(dev, "cannot allocate virtqueues\n"); 503 goto fail; 504 } 505 506 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 507 if (ifp == NULL) { 508 device_printf(dev, "cannot allocate ifnet structure\n"); 509 error = ENOSPC; 510 goto fail; 511 } 512 513 ifp->if_softc = sc; 514 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 515 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 516 ifp->if_init = vtnet_init; 517 ifp->if_start = vtnet_start; 518 ifp->if_ioctl = vtnet_ioctl; 519 520 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq); 521 sc->vtnet_rx_process_limit = sc->vtnet_rx_size; 522 523 tx_size = virtqueue_size(sc->vtnet_tx_vq); 524 sc->vtnet_tx_size = tx_size; 525 sc->vtnet_txhdridx = 0; 526 sc->vtnet_txhdrarea = contigmalloc( 527 ((sc->vtnet_tx_size / 2) + 1) * sizeof(struct vtnet_tx_header), 528 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 529 if (sc->vtnet_txhdrarea == NULL) { 530 device_printf(dev, "cannot contigmalloc the tx headers\n"); 531 goto fail; 532 } 533 sc->vtnet_macfilter = contigmalloc( 534 sizeof(struct vtnet_mac_filter), 535 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 536 if (sc->vtnet_macfilter == NULL) { 537 device_printf(dev, 538 "cannot contigmalloc the mac filter table\n"); 539 goto fail; 540 } 541 ifq_set_maxlen(&ifp->if_snd, tx_size - 1); 542 ifq_set_ready(&ifp->if_snd); 543 544 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL); 545 546 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){ 547 //ifp->if_capabilities |= IFCAP_LINKSTATE; 548 kprintf("add dynamic link state\n"); 549 } 550 551 /* Tell the upper layer(s) we support long frames. */ 552 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 553 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 554 555 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 556 ifp->if_capabilities |= IFCAP_TXCSUM; 557 558 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 559 ifp->if_capabilities |= IFCAP_TSO4; 560 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 561 ifp->if_capabilities |= IFCAP_TSO6; 562 if (ifp->if_capabilities & IFCAP_TSO) 563 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 564 565 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 566 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 567 } 568 569 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { 570 ifp->if_capabilities |= IFCAP_RXCSUM; 571 572 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 573 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 574 ifp->if_capabilities |= IFCAP_LRO; 575 } 576 577 if (ifp->if_capabilities & IFCAP_HWCSUM) { 578 /* 579 * VirtIO does not support VLAN tagging, but we can fake 580 * it by inserting and removing the 802.1Q header during 581 * transmit and receive. We are then able to do checksum 582 * offloading of VLAN frames. 583 */ 584 ifp->if_capabilities |= 585 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 586 } 587 588 ifp->if_capenable = ifp->if_capabilities; 589 590 /* 591 * Capabilities after here are not enabled by default. 592 */ 593 594 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 595 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 596 597 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 598 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 599 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 600 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 601 } 602 603 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc); 604 605 error = virtio_setup_intr(dev, &sc->vtnet_slz); 606 if (error) { 607 device_printf(dev, "cannot setup virtqueue interrupts\n"); 608 ether_ifdetach(ifp); 609 goto fail; 610 } 611 612 /* 613 * Device defaults to promiscuous mode for backwards 614 * compatibility. Turn it off if possible. 615 */ 616 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 617 lwkt_serialize_enter(&sc->vtnet_slz); 618 if (vtnet_set_promisc(sc, 0) != 0) { 619 ifp->if_flags |= IFF_PROMISC; 620 device_printf(dev, 621 "cannot disable promiscuous mode\n"); 622 } 623 lwkt_serialize_exit(&sc->vtnet_slz); 624 } else 625 ifp->if_flags |= IFF_PROMISC; 626 627 fail: 628 if (error) 629 vtnet_detach(dev); 630 631 return (error); 632 } 633 634 static int 635 vtnet_detach(device_t dev) 636 { 637 struct vtnet_softc *sc; 638 struct ifnet *ifp; 639 640 sc = device_get_softc(dev); 641 ifp = sc->vtnet_ifp; 642 643 if (device_is_attached(dev)) { 644 lwkt_serialize_enter(&sc->vtnet_slz); 645 vtnet_stop(sc); 646 lwkt_serialize_exit(&sc->vtnet_slz); 647 648 callout_stop(&sc->vtnet_tick_ch); 649 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task); 650 651 ether_ifdetach(ifp); 652 } 653 654 if (sc->vtnet_vlan_attach != NULL) { 655 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 656 sc->vtnet_vlan_attach = NULL; 657 } 658 if (sc->vtnet_vlan_detach != NULL) { 659 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); 660 sc->vtnet_vlan_detach = NULL; 661 } 662 663 if (ifp) { 664 if_free(ifp); 665 sc->vtnet_ifp = NULL; 666 } 667 668 if (sc->vtnet_rx_vq != NULL) 669 vtnet_free_rx_mbufs(sc); 670 if (sc->vtnet_tx_vq != NULL) 671 vtnet_free_tx_mbufs(sc); 672 if (sc->vtnet_ctrl_vq != NULL) 673 vtnet_free_ctrl_vq(sc); 674 675 if (sc->vtnet_txhdrarea != NULL) { 676 contigfree(sc->vtnet_txhdrarea, 677 ((sc->vtnet_tx_size / 2) + 1) * 678 sizeof(struct vtnet_tx_header), M_VTNET); 679 sc->vtnet_txhdrarea = NULL; 680 } 681 if (sc->vtnet_macfilter != NULL) { 682 contigfree(sc->vtnet_macfilter, 683 sizeof(struct vtnet_mac_filter), M_DEVBUF); 684 sc->vtnet_macfilter = NULL; 685 } 686 687 ifmedia_removeall(&sc->vtnet_media); 688 689 return (0); 690 } 691 692 static int 693 vtnet_suspend(device_t dev) 694 { 695 struct vtnet_softc *sc; 696 697 sc = device_get_softc(dev); 698 699 lwkt_serialize_enter(&sc->vtnet_slz); 700 vtnet_stop(sc); 701 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 702 lwkt_serialize_exit(&sc->vtnet_slz); 703 704 return (0); 705 } 706 707 static int 708 vtnet_resume(device_t dev) 709 { 710 struct vtnet_softc *sc; 711 struct ifnet *ifp; 712 713 sc = device_get_softc(dev); 714 ifp = sc->vtnet_ifp; 715 716 lwkt_serialize_enter(&sc->vtnet_slz); 717 if (ifp->if_flags & IFF_UP) 718 vtnet_init_locked(sc); 719 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 720 lwkt_serialize_exit(&sc->vtnet_slz); 721 722 return (0); 723 } 724 725 static int 726 vtnet_shutdown(device_t dev) 727 { 728 729 /* 730 * Suspend already does all of what we need to 731 * do here; we just never expect to be resumed. 732 */ 733 return (vtnet_suspend(dev)); 734 } 735 736 static int 737 vtnet_config_change(device_t dev) 738 { 739 struct vtnet_softc *sc; 740 741 sc = device_get_softc(dev); 742 743 taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task); 744 745 return (1); 746 } 747 748 static void 749 vtnet_negotiate_features(struct vtnet_softc *sc) 750 { 751 device_t dev; 752 uint64_t mask, features; 753 754 dev = sc->vtnet_dev; 755 mask = 0; 756 757 if (vtnet_csum_disable) 758 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 759 760 /* 761 * TSO and LRO are only available when their corresponding 762 * checksum offload feature is also negotiated. 763 */ 764 765 if (vtnet_csum_disable || vtnet_tso_disable) 766 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | 767 VIRTIO_NET_F_HOST_ECN; 768 769 if (vtnet_csum_disable || vtnet_lro_disable) 770 mask |= VTNET_LRO_FEATURES; 771 772 features = VTNET_FEATURES & ~mask; 773 features |= VIRTIO_F_NOTIFY_ON_EMPTY; 774 sc->vtnet_features = virtio_negotiate_features(dev, features); 775 } 776 777 static int 778 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 779 { 780 device_t dev; 781 struct vq_alloc_info vq_info[3]; 782 int nvqs, rxsegs; 783 784 dev = sc->vtnet_dev; 785 nvqs = 2; 786 787 /* 788 * Indirect descriptors are not needed for the Rx 789 * virtqueue when mergeable buffers are negotiated. 790 * The header is placed inline with the data, not 791 * in a separate descriptor, and mbuf clusters are 792 * always physically contiguous. 793 */ 794 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 795 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ? 796 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; 797 } else 798 rxsegs = 0; 799 800 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs, 801 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq, 802 "%s receive", device_get_nameunit(dev)); 803 804 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS, 805 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq, 806 "%s transmit", device_get_nameunit(dev)); 807 808 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 809 nvqs++; 810 811 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL, 812 &sc->vtnet_ctrl_vq, "%s control", 813 device_get_nameunit(dev)); 814 } 815 816 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 817 } 818 819 static void 820 vtnet_get_hwaddr(struct vtnet_softc *sc) 821 { 822 device_t dev; 823 824 dev = sc->vtnet_dev; 825 826 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 827 virtio_read_device_config(dev, 828 offsetof(struct virtio_net_config, mac), 829 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 830 } else { 831 /* Generate random locally administered unicast address. */ 832 sc->vtnet_hwaddr[0] = 0xB2; 833 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1); 834 835 vtnet_set_hwaddr(sc); 836 } 837 } 838 839 static void 840 vtnet_set_hwaddr(struct vtnet_softc *sc) 841 { 842 device_t dev; 843 844 dev = sc->vtnet_dev; 845 846 virtio_write_device_config(dev, 847 offsetof(struct virtio_net_config, mac), 848 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 849 } 850 851 static int 852 vtnet_is_link_up(struct vtnet_softc *sc) 853 { 854 device_t dev; 855 struct ifnet *ifp; 856 uint16_t status; 857 858 dev = sc->vtnet_dev; 859 ifp = sc->vtnet_ifp; 860 861 ASSERT_SERIALIZED(&sc->vtnet_slz); 862 863 status = virtio_read_dev_config_2(dev, 864 offsetof(struct virtio_net_config, status)); 865 866 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 867 } 868 869 static void 870 vtnet_update_link_status(struct vtnet_softc *sc) 871 { 872 device_t dev; 873 struct ifnet *ifp; 874 struct ifaltq_subque *ifsq; 875 int link; 876 877 dev = sc->vtnet_dev; 878 ifp = sc->vtnet_ifp; 879 ifsq = ifq_get_subq_default(&ifp->if_snd); 880 881 link = vtnet_is_link_up(sc); 882 883 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { 884 sc->vtnet_flags |= VTNET_FLAG_LINK; 885 if (bootverbose) 886 device_printf(dev, "Link is up\n"); 887 ifp->if_link_state = LINK_STATE_UP; 888 if_link_state_change(ifp); 889 if (!ifsq_is_empty(ifsq)) 890 vtnet_start_locked(ifp, ifsq); 891 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { 892 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 893 if (bootverbose) 894 device_printf(dev, "Link is down\n"); 895 896 ifp->if_link_state = LINK_STATE_DOWN; 897 if_link_state_change(ifp); 898 } 899 } 900 901 #if 0 902 static void 903 vtnet_watchdog(struct vtnet_softc *sc) 904 { 905 struct ifnet *ifp; 906 907 ifp = sc->vtnet_ifp; 908 909 #ifdef VTNET_TX_INTR_MODERATION 910 vtnet_txeof(sc); 911 #endif 912 913 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) 914 return; 915 916 if_printf(ifp, "watchdog timeout -- resetting\n"); 917 #ifdef VTNET_DEBUG 918 virtqueue_dump(sc->vtnet_tx_vq); 919 #endif 920 ifp->if_oerrors++; 921 ifp->if_flags &= ~IFF_RUNNING; 922 vtnet_init_locked(sc); 923 } 924 #endif 925 926 static void 927 vtnet_config_change_task(void *arg, int pending) 928 { 929 struct vtnet_softc *sc; 930 931 sc = arg; 932 933 lwkt_serialize_enter(&sc->vtnet_slz); 934 vtnet_update_link_status(sc); 935 lwkt_serialize_exit(&sc->vtnet_slz); 936 } 937 938 static int 939 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr) 940 { 941 struct vtnet_softc *sc; 942 struct ifreq *ifr; 943 int reinit, mask, error; 944 945 sc = ifp->if_softc; 946 ifr = (struct ifreq *) data; 947 reinit = 0; 948 error = 0; 949 950 switch (cmd) { 951 case SIOCSIFMTU: 952 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) 953 error = EINVAL; 954 else if (ifp->if_mtu != ifr->ifr_mtu) { 955 lwkt_serialize_enter(&sc->vtnet_slz); 956 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 957 lwkt_serialize_exit(&sc->vtnet_slz); 958 } 959 break; 960 961 case SIOCSIFFLAGS: 962 lwkt_serialize_enter(&sc->vtnet_slz); 963 if ((ifp->if_flags & IFF_UP) == 0) { 964 if (ifp->if_flags & IFF_RUNNING) 965 vtnet_stop(sc); 966 } else if (ifp->if_flags & IFF_RUNNING) { 967 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 968 (IFF_PROMISC | IFF_ALLMULTI)) { 969 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 970 vtnet_rx_filter(sc); 971 else 972 error = ENOTSUP; 973 } 974 } else 975 vtnet_init_locked(sc); 976 977 if (error == 0) 978 sc->vtnet_if_flags = ifp->if_flags; 979 lwkt_serialize_exit(&sc->vtnet_slz); 980 break; 981 982 case SIOCADDMULTI: 983 case SIOCDELMULTI: 984 lwkt_serialize_enter(&sc->vtnet_slz); 985 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && 986 (ifp->if_flags & IFF_RUNNING)) 987 vtnet_rx_filter_mac(sc); 988 lwkt_serialize_exit(&sc->vtnet_slz); 989 break; 990 991 case SIOCSIFMEDIA: 992 case SIOCGIFMEDIA: 993 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 994 break; 995 996 case SIOCSIFCAP: 997 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 998 999 lwkt_serialize_enter(&sc->vtnet_slz); 1000 1001 if (mask & IFCAP_TXCSUM) { 1002 ifp->if_capenable ^= IFCAP_TXCSUM; 1003 if (ifp->if_capenable & IFCAP_TXCSUM) 1004 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 1005 else 1006 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; 1007 } 1008 1009 if (mask & IFCAP_TSO4) { 1010 ifp->if_capenable ^= IFCAP_TSO4; 1011 if (ifp->if_capenable & IFCAP_TSO4) 1012 ifp->if_hwassist |= CSUM_TSO; 1013 else 1014 ifp->if_hwassist &= ~CSUM_TSO; 1015 } 1016 1017 if (mask & IFCAP_RXCSUM) { 1018 ifp->if_capenable ^= IFCAP_RXCSUM; 1019 reinit = 1; 1020 } 1021 1022 if (mask & IFCAP_LRO) { 1023 ifp->if_capenable ^= IFCAP_LRO; 1024 reinit = 1; 1025 } 1026 1027 if (mask & IFCAP_VLAN_HWFILTER) { 1028 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 1029 reinit = 1; 1030 } 1031 1032 if (mask & IFCAP_VLAN_HWTSO) 1033 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1034 1035 if (mask & IFCAP_VLAN_HWTAGGING) 1036 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1037 1038 if (reinit && (ifp->if_flags & IFF_RUNNING)) { 1039 ifp->if_flags &= ~IFF_RUNNING; 1040 vtnet_init_locked(sc); 1041 } 1042 //VLAN_CAPABILITIES(ifp); 1043 1044 lwkt_serialize_exit(&sc->vtnet_slz); 1045 break; 1046 1047 default: 1048 error = ether_ioctl(ifp, cmd, data); 1049 break; 1050 } 1051 1052 return (error); 1053 } 1054 1055 static int 1056 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 1057 { 1058 struct ifnet *ifp; 1059 int new_frame_size, clsize; 1060 1061 ifp = sc->vtnet_ifp; 1062 1063 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1064 new_frame_size = sizeof(struct vtnet_rx_header) + 1065 sizeof(struct ether_vlan_header) + new_mtu; 1066 1067 if (new_frame_size > MJUM9BYTES) 1068 return (EINVAL); 1069 1070 if (new_frame_size <= MCLBYTES) 1071 clsize = MCLBYTES; 1072 else 1073 clsize = MJUM9BYTES; 1074 } else { 1075 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + 1076 sizeof(struct ether_vlan_header) + new_mtu; 1077 1078 if (new_frame_size <= MCLBYTES) 1079 clsize = MCLBYTES; 1080 else 1081 clsize = MJUMPAGESIZE; 1082 } 1083 1084 sc->vtnet_rx_mbuf_size = clsize; 1085 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 1086 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, 1087 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); 1088 1089 ifp->if_mtu = new_mtu; 1090 1091 if (ifp->if_flags & IFF_RUNNING) { 1092 ifp->if_flags &= ~IFF_RUNNING; 1093 vtnet_init_locked(sc); 1094 } 1095 1096 return (0); 1097 } 1098 1099 static int 1100 vtnet_init_rx_vq(struct vtnet_softc *sc) 1101 { 1102 struct virtqueue *vq; 1103 int nbufs, error; 1104 1105 vq = sc->vtnet_rx_vq; 1106 nbufs = 0; 1107 error = ENOSPC; 1108 1109 while (!virtqueue_full(vq)) { 1110 if ((error = vtnet_newbuf(sc)) != 0) 1111 break; 1112 nbufs++; 1113 } 1114 1115 if (nbufs > 0) { 1116 virtqueue_notify(vq, &sc->vtnet_slz); 1117 1118 /* 1119 * EMSGSIZE signifies the virtqueue did not have enough 1120 * entries available to hold the last mbuf. This is not 1121 * an error. We should not get ENOSPC since we check if 1122 * the virtqueue is full before attempting to add a 1123 * buffer. 1124 */ 1125 if (error == EMSGSIZE) 1126 error = 0; 1127 } 1128 1129 return (error); 1130 } 1131 1132 static void 1133 vtnet_free_rx_mbufs(struct vtnet_softc *sc) 1134 { 1135 struct virtqueue *vq; 1136 struct mbuf *m; 1137 int last; 1138 1139 vq = sc->vtnet_rx_vq; 1140 last = 0; 1141 1142 while ((m = virtqueue_drain(vq, &last)) != NULL) 1143 m_freem(m); 1144 1145 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); 1146 } 1147 1148 static void 1149 vtnet_free_tx_mbufs(struct vtnet_softc *sc) 1150 { 1151 struct virtqueue *vq; 1152 struct vtnet_tx_header *txhdr; 1153 int last; 1154 1155 vq = sc->vtnet_tx_vq; 1156 last = 0; 1157 1158 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1159 m_freem(txhdr->vth_mbuf); 1160 } 1161 1162 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); 1163 } 1164 1165 static void 1166 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 1167 { 1168 /* 1169 * The control virtqueue is only polled, therefore 1170 * it should already be empty. 1171 */ 1172 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 1173 ("Ctrl Vq not empty")); 1174 } 1175 1176 static struct mbuf * 1177 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1178 { 1179 struct mbuf *m_head, *m_tail, *m; 1180 int i, clsize; 1181 1182 clsize = sc->vtnet_rx_mbuf_size; 1183 1184 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/ 1185 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize); 1186 m_head = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR ); 1187 if (m_head == NULL) 1188 goto fail; 1189 1190 m_head->m_len = clsize; 1191 m_tail = m_head; 1192 1193 if (nbufs > 1) { 1194 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1195 ("chained Rx mbuf requested without LRO_NOMRG")); 1196 1197 for (i = 0; i < nbufs - 1; i++) { 1198 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize); 1199 m = m_getcl(MB_DONTWAIT, MT_DATA, 0); 1200 if (m == NULL) 1201 goto fail; 1202 1203 m->m_len = clsize; 1204 m_tail->m_next = m; 1205 m_tail = m; 1206 } 1207 } 1208 1209 if (m_tailp != NULL) 1210 *m_tailp = m_tail; 1211 1212 return (m_head); 1213 1214 fail: 1215 sc->vtnet_stats.mbuf_alloc_failed++; 1216 m_freem(m_head); 1217 1218 return (NULL); 1219 } 1220 1221 static int 1222 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) 1223 { 1224 struct mbuf *m, *m_prev; 1225 struct mbuf *m_new, *m_tail; 1226 int len, clsize, nreplace, error; 1227 1228 m = m0; 1229 m_prev = NULL; 1230 len = len0; 1231 1232 m_tail = NULL; 1233 clsize = sc->vtnet_rx_mbuf_size; 1234 nreplace = 0; 1235 1236 if (m->m_next != NULL) 1237 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1238 ("chained Rx mbuf without LRO_NOMRG")); 1239 1240 /* 1241 * Since LRO_NOMRG mbuf chains are so large, we want to avoid 1242 * allocating an entire chain for each received frame. When 1243 * the received frame's length is less than that of the chain, 1244 * the unused mbufs are reassigned to the new chain. 1245 */ 1246 while (len > 0) { 1247 /* 1248 * Something is seriously wrong if we received 1249 * a frame larger than the mbuf chain. Drop it. 1250 */ 1251 if (m == NULL) { 1252 sc->vtnet_stats.rx_frame_too_large++; 1253 return (EMSGSIZE); 1254 } 1255 1256 KASSERT(m->m_len == clsize, 1257 ("mbuf length not expected cluster size: %d", 1258 m->m_len)); 1259 1260 m->m_len = MIN(m->m_len, len); 1261 len -= m->m_len; 1262 1263 m_prev = m; 1264 m = m->m_next; 1265 nreplace++; 1266 } 1267 1268 KASSERT(m_prev != NULL, ("m_prev == NULL")); 1269 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, 1270 ("too many replacement mbufs: %d/%d", nreplace, 1271 sc->vtnet_rx_mbuf_count)); 1272 1273 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); 1274 if (m_new == NULL) { 1275 m_prev->m_len = clsize; 1276 return (ENOBUFS); 1277 } 1278 1279 /* 1280 * Move unused mbufs, if any, from the original chain 1281 * onto the end of the new chain. 1282 */ 1283 if (m_prev->m_next != NULL) { 1284 m_tail->m_next = m_prev->m_next; 1285 m_prev->m_next = NULL; 1286 } 1287 1288 error = vtnet_enqueue_rxbuf(sc, m_new); 1289 if (error) { 1290 /* 1291 * BAD! We could not enqueue the replacement mbuf chain. We 1292 * must restore the m0 chain to the original state if it was 1293 * modified so we can subsequently discard it. 1294 * 1295 * NOTE: The replacement is suppose to be an identical copy 1296 * to the one just dequeued so this is an unexpected error. 1297 */ 1298 sc->vtnet_stats.rx_enq_replacement_failed++; 1299 1300 if (m_tail->m_next != NULL) { 1301 m_prev->m_next = m_tail->m_next; 1302 m_tail->m_next = NULL; 1303 } 1304 1305 m_prev->m_len = clsize; 1306 m_freem(m_new); 1307 } 1308 1309 return (error); 1310 } 1311 1312 static int 1313 vtnet_newbuf(struct vtnet_softc *sc) 1314 { 1315 struct mbuf *m; 1316 int error; 1317 1318 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); 1319 if (m == NULL) 1320 return (ENOBUFS); 1321 1322 error = vtnet_enqueue_rxbuf(sc, m); 1323 if (error) 1324 m_freem(m); 1325 1326 return (error); 1327 } 1328 1329 static void 1330 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) 1331 { 1332 struct virtqueue *vq; 1333 struct mbuf *m; 1334 1335 vq = sc->vtnet_rx_vq; 1336 1337 while (--nbufs > 0) { 1338 if ((m = virtqueue_dequeue(vq, NULL)) == NULL) 1339 break; 1340 vtnet_discard_rxbuf(sc, m); 1341 } 1342 } 1343 1344 static void 1345 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1346 { 1347 int error; 1348 1349 /* 1350 * Requeue the discarded mbuf. This should always be 1351 * successful since it was just dequeued. 1352 */ 1353 error = vtnet_enqueue_rxbuf(sc, m); 1354 KASSERT(error == 0, ("cannot requeue discarded mbuf")); 1355 } 1356 1357 static int 1358 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1359 { 1360 struct sglist sg; 1361 struct sglist_seg segs[VTNET_MAX_RX_SEGS]; 1362 struct vtnet_rx_header *rxhdr; 1363 struct virtio_net_hdr *hdr; 1364 uint8_t *mdata; 1365 int offset, error; 1366 1367 ASSERT_SERIALIZED(&sc->vtnet_slz); 1368 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0) 1369 KASSERT(m->m_next == NULL, ("chained Rx mbuf")); 1370 1371 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs); 1372 1373 mdata = mtod(m, uint8_t *); 1374 offset = 0; 1375 1376 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1377 rxhdr = (struct vtnet_rx_header *) mdata; 1378 hdr = &rxhdr->vrh_hdr; 1379 offset += sizeof(struct vtnet_rx_header); 1380 1381 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); 1382 KASSERT(error == 0, ("cannot add header to sglist")); 1383 } 1384 1385 error = sglist_append(&sg, mdata + offset, m->m_len - offset); 1386 if (error) 1387 return (error); 1388 1389 if (m->m_next != NULL) { 1390 error = sglist_append_mbuf(&sg, m->m_next); 1391 if (error) 1392 return (error); 1393 } 1394 1395 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); 1396 } 1397 1398 static void 1399 vtnet_vlan_tag_remove(struct mbuf *m) 1400 { 1401 struct ether_vlan_header *evl; 1402 1403 evl = mtod(m, struct ether_vlan_header *); 1404 1405 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag); 1406 m->m_flags |= M_VLANTAG; 1407 1408 /* Strip the 802.1Q header. */ 1409 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, 1410 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1411 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1412 } 1413 1414 /* 1415 * Alternative method of doing receive checksum offloading. Rather 1416 * than parsing the received frame down to the IP header, use the 1417 * csum_offset to determine which CSUM_* flags are appropriate. We 1418 * can get by with doing this only because the checksum offsets are 1419 * unique for the things we care about. 1420 */ 1421 static int 1422 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1423 struct virtio_net_hdr *hdr) 1424 { 1425 struct ether_header *eh; 1426 struct ether_vlan_header *evh; 1427 struct udphdr *udp; 1428 int csum_len; 1429 uint16_t eth_type; 1430 1431 csum_len = hdr->csum_start + hdr->csum_offset; 1432 1433 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) 1434 return (1); 1435 if (m->m_len < csum_len) 1436 return (1); 1437 1438 eh = mtod(m, struct ether_header *); 1439 eth_type = ntohs(eh->ether_type); 1440 if (eth_type == ETHERTYPE_VLAN) { 1441 evh = mtod(m, struct ether_vlan_header *); 1442 eth_type = ntohs(evh->evl_proto); 1443 } 1444 1445 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { 1446 sc->vtnet_stats.rx_csum_bad_ethtype++; 1447 return (1); 1448 } 1449 1450 /* Use the offset to determine the appropriate CSUM_* flags. */ 1451 switch (hdr->csum_offset) { 1452 case offsetof(struct udphdr, uh_sum): 1453 if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) 1454 return (1); 1455 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); 1456 if (udp->uh_sum == 0) 1457 return (0); 1458 1459 /* FALLTHROUGH */ 1460 1461 case offsetof(struct tcphdr, th_sum): 1462 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1463 m->m_pkthdr.csum_data = 0xFFFF; 1464 break; 1465 1466 default: 1467 sc->vtnet_stats.rx_csum_bad_offset++; 1468 return (1); 1469 } 1470 1471 sc->vtnet_stats.rx_csum_offloaded++; 1472 1473 return (0); 1474 } 1475 1476 static int 1477 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) 1478 { 1479 struct ifnet *ifp; 1480 struct virtqueue *vq; 1481 struct mbuf *m, *m_tail; 1482 int len; 1483 1484 ifp = sc->vtnet_ifp; 1485 vq = sc->vtnet_rx_vq; 1486 m_tail = m_head; 1487 1488 while (--nbufs > 0) { 1489 m = virtqueue_dequeue(vq, &len); 1490 if (m == NULL) { 1491 ifp->if_ierrors++; 1492 goto fail; 1493 } 1494 1495 if (vtnet_newbuf(sc) != 0) { 1496 ifp->if_iqdrops++; 1497 vtnet_discard_rxbuf(sc, m); 1498 if (nbufs > 1) 1499 vtnet_discard_merged_rxbuf(sc, nbufs); 1500 goto fail; 1501 } 1502 1503 if (m->m_len < len) 1504 len = m->m_len; 1505 1506 m->m_len = len; 1507 m->m_flags &= ~M_PKTHDR; 1508 1509 m_head->m_pkthdr.len += len; 1510 m_tail->m_next = m; 1511 m_tail = m; 1512 } 1513 1514 return (0); 1515 1516 fail: 1517 sc->vtnet_stats.rx_mergeable_failed++; 1518 m_freem(m_head); 1519 1520 return (1); 1521 } 1522 1523 static int 1524 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) 1525 { 1526 struct virtio_net_hdr lhdr; 1527 struct ifnet *ifp; 1528 struct virtqueue *vq; 1529 struct mbuf *m; 1530 struct ether_header *eh; 1531 struct virtio_net_hdr *hdr; 1532 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1533 int len, deq, nbufs, adjsz, rx_npkts; 1534 1535 ifp = sc->vtnet_ifp; 1536 vq = sc->vtnet_rx_vq; 1537 hdr = &lhdr; 1538 deq = 0; 1539 rx_npkts = 0; 1540 1541 ASSERT_SERIALIZED(&sc->vtnet_slz); 1542 1543 while (--count >= 0) { 1544 m = virtqueue_dequeue(vq, &len); 1545 if (m == NULL) 1546 break; 1547 deq++; 1548 1549 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1550 ifp->if_ierrors++; 1551 vtnet_discard_rxbuf(sc, m); 1552 continue; 1553 } 1554 1555 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1556 nbufs = 1; 1557 adjsz = sizeof(struct vtnet_rx_header); 1558 /* 1559 * Account for our pad between the header and 1560 * the actual start of the frame. 1561 */ 1562 len += VTNET_RX_HEADER_PAD; 1563 } else { 1564 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1565 nbufs = mhdr->num_buffers; 1566 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1567 } 1568 1569 if (vtnet_replace_rxbuf(sc, m, len) != 0) { 1570 ifp->if_iqdrops++; 1571 vtnet_discard_rxbuf(sc, m); 1572 if (nbufs > 1) 1573 vtnet_discard_merged_rxbuf(sc, nbufs); 1574 continue; 1575 } 1576 1577 m->m_pkthdr.len = len; 1578 m->m_pkthdr.rcvif = ifp; 1579 m->m_pkthdr.csum_flags = 0; 1580 1581 if (nbufs > 1) { 1582 if (vtnet_rxeof_merged(sc, m, nbufs) != 0) 1583 continue; 1584 } 1585 1586 ifp->if_ipackets++; 1587 1588 /* 1589 * Save copy of header before we strip it. For both mergeable 1590 * and non-mergeable, the VirtIO header is placed first in the 1591 * mbuf's data. We no longer need num_buffers, so always use a 1592 * virtio_net_hdr. 1593 */ 1594 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1595 m_adj(m, adjsz); 1596 1597 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1598 eh = mtod(m, struct ether_header *); 1599 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1600 vtnet_vlan_tag_remove(m); 1601 1602 /* 1603 * With the 802.1Q header removed, update the 1604 * checksum starting location accordingly. 1605 */ 1606 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1607 hdr->csum_start -= 1608 ETHER_VLAN_ENCAP_LEN; 1609 } 1610 } 1611 1612 if (ifp->if_capenable & IFCAP_RXCSUM && 1613 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1614 if (vtnet_rx_csum(sc, m, hdr) != 0) 1615 sc->vtnet_stats.rx_csum_failed++; 1616 } 1617 1618 lwkt_serialize_exit(&sc->vtnet_slz); 1619 rx_npkts++; 1620 ifp->if_input(ifp, m, NULL, -1); 1621 lwkt_serialize_enter(&sc->vtnet_slz); 1622 1623 /* 1624 * The interface may have been stopped while we were 1625 * passing the packet up the network stack. 1626 */ 1627 if ((ifp->if_flags & IFF_RUNNING) == 0) 1628 break; 1629 } 1630 1631 virtqueue_notify(vq, &sc->vtnet_slz); 1632 1633 if (rx_npktsp != NULL) 1634 *rx_npktsp = rx_npkts; 1635 1636 return (count > 0 ? 0 : EAGAIN); 1637 } 1638 1639 static void 1640 vtnet_rx_intr_task(void *arg) 1641 { 1642 struct vtnet_softc *sc; 1643 struct ifnet *ifp; 1644 int more; 1645 1646 sc = arg; 1647 ifp = sc->vtnet_ifp; 1648 1649 next: 1650 // lwkt_serialize_enter(&sc->vtnet_slz); 1651 1652 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1653 vtnet_enable_rx_intr(sc); 1654 // lwkt_serialize_exit(&sc->vtnet_slz); 1655 return; 1656 } 1657 1658 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); 1659 if (!more && vtnet_enable_rx_intr(sc) != 0) { 1660 vtnet_disable_rx_intr(sc); 1661 more = 1; 1662 } 1663 1664 // lwkt_serialize_exit(&sc->vtnet_slz); 1665 1666 if (more) { 1667 sc->vtnet_stats.rx_task_rescheduled++; 1668 goto next; 1669 } 1670 } 1671 1672 static int 1673 vtnet_rx_vq_intr(void *xsc) 1674 { 1675 struct vtnet_softc *sc; 1676 1677 sc = xsc; 1678 1679 vtnet_disable_rx_intr(sc); 1680 vtnet_rx_intr_task(sc); 1681 1682 return (1); 1683 } 1684 1685 static void 1686 vtnet_txeof(struct vtnet_softc *sc) 1687 { 1688 struct virtqueue *vq; 1689 struct ifnet *ifp; 1690 struct vtnet_tx_header *txhdr; 1691 int deq; 1692 1693 vq = sc->vtnet_tx_vq; 1694 ifp = sc->vtnet_ifp; 1695 deq = 0; 1696 1697 ASSERT_SERIALIZED(&sc->vtnet_slz); 1698 1699 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 1700 deq++; 1701 ifp->if_opackets++; 1702 m_freem(txhdr->vth_mbuf); 1703 } 1704 1705 if (deq > 0) { 1706 ifq_clr_oactive(&ifp->if_snd); 1707 if (virtqueue_empty(vq)) 1708 sc->vtnet_watchdog_timer = 0; 1709 } 1710 } 1711 1712 static struct mbuf * 1713 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, 1714 struct virtio_net_hdr *hdr) 1715 { 1716 struct ifnet *ifp; 1717 struct ether_header *eh; 1718 struct ether_vlan_header *evh; 1719 struct ip *ip; 1720 struct ip6_hdr *ip6; 1721 struct tcphdr *tcp; 1722 int ip_offset; 1723 uint16_t eth_type, csum_start; 1724 uint8_t ip_proto, gso_type; 1725 1726 ifp = sc->vtnet_ifp; 1727 M_ASSERTPKTHDR(m); 1728 1729 ip_offset = sizeof(struct ether_header); 1730 if (m->m_len < ip_offset) { 1731 if ((m = m_pullup(m, ip_offset)) == NULL) 1732 return (NULL); 1733 } 1734 1735 eh = mtod(m, struct ether_header *); 1736 eth_type = ntohs(eh->ether_type); 1737 if (eth_type == ETHERTYPE_VLAN) { 1738 ip_offset = sizeof(struct ether_vlan_header); 1739 if (m->m_len < ip_offset) { 1740 if ((m = m_pullup(m, ip_offset)) == NULL) 1741 return (NULL); 1742 } 1743 evh = mtod(m, struct ether_vlan_header *); 1744 eth_type = ntohs(evh->evl_proto); 1745 } 1746 1747 switch (eth_type) { 1748 case ETHERTYPE_IP: 1749 if (m->m_len < ip_offset + sizeof(struct ip)) { 1750 m = m_pullup(m, ip_offset + sizeof(struct ip)); 1751 if (m == NULL) 1752 return (NULL); 1753 } 1754 1755 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1756 ip_proto = ip->ip_p; 1757 csum_start = ip_offset + (ip->ip_hl << 2); 1758 gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1759 break; 1760 1761 case ETHERTYPE_IPV6: 1762 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { 1763 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); 1764 if (m == NULL) 1765 return (NULL); 1766 } 1767 1768 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1769 /* 1770 * XXX Assume no extension headers are present. Presently, 1771 * this will always be true in the case of TSO, and FreeBSD 1772 * does not perform checksum offloading of IPv6 yet. 1773 */ 1774 ip_proto = ip6->ip6_nxt; 1775 csum_start = ip_offset + sizeof(struct ip6_hdr); 1776 gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1777 break; 1778 1779 default: 1780 return (m); 1781 } 1782 1783 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { 1784 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1785 hdr->csum_start = csum_start; 1786 hdr->csum_offset = m->m_pkthdr.csum_data; 1787 1788 sc->vtnet_stats.tx_csum_offloaded++; 1789 } 1790 1791 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1792 if (ip_proto != IPPROTO_TCP) 1793 return (m); 1794 1795 if (m->m_len < csum_start + sizeof(struct tcphdr)) { 1796 m = m_pullup(m, csum_start + sizeof(struct tcphdr)); 1797 if (m == NULL) 1798 return (NULL); 1799 } 1800 1801 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); 1802 hdr->gso_type = gso_type; 1803 hdr->hdr_len = csum_start + (tcp->th_off << 2); 1804 hdr->gso_size = m->m_pkthdr.tso_segsz; 1805 1806 if (tcp->th_flags & TH_CWR) { 1807 /* 1808 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. 1809 * ECN support is only configurable globally with the 1810 * net.inet.tcp.ecn.enable sysctl knob. 1811 */ 1812 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 1813 if_printf(ifp, "TSO with ECN not supported " 1814 "by host\n"); 1815 m_freem(m); 1816 return (NULL); 1817 } 1818 1819 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1820 } 1821 1822 sc->vtnet_stats.tx_tso_offloaded++; 1823 } 1824 1825 return (m); 1826 } 1827 1828 static int 1829 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, 1830 struct vtnet_tx_header *txhdr) 1831 { 1832 struct sglist sg; 1833 struct sglist_seg segs[VTNET_MAX_TX_SEGS]; 1834 struct virtqueue *vq; 1835 struct mbuf *m; 1836 int collapsed, error; 1837 1838 vq = sc->vtnet_tx_vq; 1839 m = *m_head; 1840 collapsed = 0; 1841 1842 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs); 1843 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 1844 KASSERT(error == 0 && sg.sg_nseg == 1, 1845 ("cannot add header to sglist")); 1846 1847 again: 1848 error = sglist_append_mbuf(&sg, m); 1849 if (error) { 1850 if (collapsed) 1851 goto fail; 1852 1853 //m = m_collapse(m, MB_DONTWAIT, VTNET_MAX_TX_SEGS - 1); 1854 m = m_defrag(m, MB_DONTWAIT); 1855 if (m == NULL) 1856 goto fail; 1857 1858 *m_head = m; 1859 collapsed = 1; 1860 goto again; 1861 } 1862 1863 txhdr->vth_mbuf = m; 1864 1865 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0)); 1866 1867 fail: 1868 m_freem(*m_head); 1869 *m_head = NULL; 1870 1871 return (ENOBUFS); 1872 } 1873 1874 static struct mbuf * 1875 vtnet_vlan_tag_insert(struct mbuf *m) 1876 { 1877 struct mbuf *n; 1878 struct ether_vlan_header *evl; 1879 1880 if (M_WRITABLE(m) == 0) { 1881 n = m_dup(m, MB_DONTWAIT); 1882 m_freem(m); 1883 if ((m = n) == NULL) 1884 return (NULL); 1885 } 1886 1887 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, MB_DONTWAIT); 1888 if (m == NULL) 1889 return (NULL); 1890 if (m->m_len < sizeof(struct ether_vlan_header)) { 1891 m = m_pullup(m, sizeof(struct ether_vlan_header)); 1892 if (m == NULL) 1893 return (NULL); 1894 } 1895 1896 /* Insert 802.1Q header into the existing Ethernet header. */ 1897 evl = mtod(m, struct ether_vlan_header *); 1898 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN, 1899 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); 1900 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1901 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag); 1902 m->m_flags &= ~M_VLANTAG; 1903 1904 return (m); 1905 } 1906 1907 static int 1908 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) 1909 { 1910 struct vtnet_tx_header *txhdr; 1911 struct virtio_net_hdr *hdr; 1912 struct mbuf *m; 1913 int error; 1914 1915 txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx]; 1916 memset(txhdr, 0, sizeof(struct vtnet_tx_header)); 1917 1918 /* 1919 * Always use the non-mergeable header to simplify things. When 1920 * the mergeable feature is negotiated, the num_buffers field 1921 * must be set to zero. We use vtnet_hdr_size later to enqueue 1922 * the correct header size to the host. 1923 */ 1924 hdr = &txhdr->vth_uhdr.hdr; 1925 m = *m_head; 1926 1927 error = ENOBUFS; 1928 1929 if (m->m_flags & M_VLANTAG) { 1930 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1931 m = vtnet_vlan_tag_insert(m); 1932 if ((*m_head = m) == NULL) 1933 goto fail; 1934 m->m_flags &= ~M_VLANTAG; 1935 } 1936 1937 if (m->m_pkthdr.csum_flags != 0) { 1938 m = vtnet_tx_offload(sc, m, hdr); 1939 if ((*m_head = m) == NULL) 1940 goto fail; 1941 } 1942 1943 error = vtnet_enqueue_txbuf(sc, m_head, txhdr); 1944 if (error == 0) 1945 sc->vtnet_txhdridx = 1946 (sc->vtnet_txhdridx + 1) % ((sc->vtnet_tx_size / 2) + 1); 1947 fail: 1948 return (error); 1949 } 1950 1951 static void 1952 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1953 { 1954 struct vtnet_softc *sc; 1955 1956 sc = ifp->if_softc; 1957 1958 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1959 lwkt_serialize_enter(&sc->vtnet_slz); 1960 vtnet_start_locked(ifp, ifsq); 1961 lwkt_serialize_exit(&sc->vtnet_slz); 1962 } 1963 1964 static void 1965 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1966 { 1967 struct vtnet_softc *sc; 1968 struct virtqueue *vq; 1969 struct mbuf *m0; 1970 int enq; 1971 1972 sc = ifp->if_softc; 1973 vq = sc->vtnet_tx_vq; 1974 enq = 0; 1975 1976 ASSERT_SERIALIZED(&sc->vtnet_slz); 1977 1978 if ((ifp->if_flags & (IFF_RUNNING)) != 1979 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) 1980 return; 1981 1982 #ifdef VTNET_TX_INTR_MODERATION 1983 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) 1984 vtnet_txeof(sc); 1985 #endif 1986 1987 while (!ifsq_is_empty(ifsq)) { 1988 if (virtqueue_full(vq)) { 1989 ifq_set_oactive(&ifp->if_snd); 1990 break; 1991 } 1992 1993 m0 = ifq_dequeue(&ifp->if_snd); 1994 if (m0 == NULL) 1995 break; 1996 1997 if (vtnet_encap(sc, &m0) != 0) { 1998 if (m0 == NULL) 1999 break; 2000 ifq_prepend(&ifp->if_snd, m0); 2001 ifq_set_oactive(&ifp->if_snd); 2002 break; 2003 } 2004 2005 enq++; 2006 ETHER_BPF_MTAP(ifp, m0); 2007 } 2008 2009 if (enq > 0) { 2010 virtqueue_notify(vq, &sc->vtnet_slz); 2011 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; 2012 } 2013 } 2014 2015 static void 2016 vtnet_tick(void *xsc) 2017 { 2018 struct vtnet_softc *sc; 2019 2020 sc = xsc; 2021 2022 #if 0 2023 ASSERT_SERIALIZED(&sc->vtnet_slz); 2024 #ifdef VTNET_DEBUG 2025 virtqueue_dump(sc->vtnet_rx_vq); 2026 virtqueue_dump(sc->vtnet_tx_vq); 2027 #endif 2028 2029 vtnet_watchdog(sc); 2030 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2031 #endif 2032 } 2033 2034 static void 2035 vtnet_tx_intr_task(void *arg) 2036 { 2037 struct vtnet_softc *sc; 2038 struct ifnet *ifp; 2039 struct ifaltq_subque *ifsq; 2040 2041 sc = arg; 2042 ifp = sc->vtnet_ifp; 2043 ifsq = ifq_get_subq_default(&ifp->if_snd); 2044 2045 next: 2046 // lwkt_serialize_enter(&sc->vtnet_slz); 2047 2048 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2049 vtnet_enable_tx_intr(sc); 2050 // lwkt_serialize_exit(&sc->vtnet_slz); 2051 return; 2052 } 2053 2054 vtnet_txeof(sc); 2055 2056 if (!ifsq_is_empty(ifsq)) 2057 vtnet_start_locked(ifp, ifsq); 2058 2059 if (vtnet_enable_tx_intr(sc) != 0) { 2060 vtnet_disable_tx_intr(sc); 2061 sc->vtnet_stats.tx_task_rescheduled++; 2062 // lwkt_serialize_exit(&sc->vtnet_slz); 2063 goto next; 2064 } 2065 2066 // lwkt_serialize_exit(&sc->vtnet_slz); 2067 } 2068 2069 static int 2070 vtnet_tx_vq_intr(void *xsc) 2071 { 2072 struct vtnet_softc *sc; 2073 2074 sc = xsc; 2075 2076 vtnet_disable_tx_intr(sc); 2077 vtnet_tx_intr_task(sc); 2078 2079 return (1); 2080 } 2081 2082 static void 2083 vtnet_stop(struct vtnet_softc *sc) 2084 { 2085 device_t dev; 2086 struct ifnet *ifp; 2087 2088 dev = sc->vtnet_dev; 2089 ifp = sc->vtnet_ifp; 2090 2091 ASSERT_SERIALIZED(&sc->vtnet_slz); 2092 2093 sc->vtnet_watchdog_timer = 0; 2094 callout_stop(&sc->vtnet_tick_ch); 2095 ifq_clr_oactive(&ifp->if_snd); 2096 ifp->if_flags &= ~(IFF_RUNNING); 2097 2098 vtnet_disable_rx_intr(sc); 2099 vtnet_disable_tx_intr(sc); 2100 2101 /* 2102 * Stop the host VirtIO adapter. Note this will reset the host 2103 * adapter's state back to the pre-initialized state, so in 2104 * order to make the device usable again, we must drive it 2105 * through virtio_reinit() and virtio_reinit_complete(). 2106 */ 2107 virtio_stop(dev); 2108 2109 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 2110 2111 vtnet_free_rx_mbufs(sc); 2112 vtnet_free_tx_mbufs(sc); 2113 } 2114 2115 static int 2116 vtnet_reinit(struct vtnet_softc *sc) 2117 { 2118 struct ifnet *ifp; 2119 uint64_t features; 2120 2121 ifp = sc->vtnet_ifp; 2122 features = sc->vtnet_features; 2123 2124 /* 2125 * Re-negotiate with the host, removing any disabled receive 2126 * features. Transmit features are disabled only on our side 2127 * via if_capenable and if_hwassist. 2128 */ 2129 2130 if (ifp->if_capabilities & IFCAP_RXCSUM) { 2131 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 2132 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2133 } 2134 2135 if (ifp->if_capabilities & IFCAP_LRO) { 2136 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2137 features &= ~VTNET_LRO_FEATURES; 2138 } 2139 2140 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2141 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2142 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2143 } 2144 2145 return (virtio_reinit(sc->vtnet_dev, features)); 2146 } 2147 2148 static void 2149 vtnet_init_locked(struct vtnet_softc *sc) 2150 { 2151 device_t dev; 2152 struct ifnet *ifp; 2153 int error; 2154 2155 dev = sc->vtnet_dev; 2156 ifp = sc->vtnet_ifp; 2157 2158 ASSERT_SERIALIZED(&sc->vtnet_slz); 2159 2160 if (ifp->if_flags & IFF_RUNNING) 2161 return; 2162 2163 /* Stop host's adapter, cancel any pending I/O. */ 2164 vtnet_stop(sc); 2165 2166 /* Reinitialize the host device. */ 2167 error = vtnet_reinit(sc); 2168 if (error) { 2169 device_printf(dev, 2170 "reinitialization failed, stopping device...\n"); 2171 vtnet_stop(sc); 2172 return; 2173 } 2174 2175 /* Update host with assigned MAC address. */ 2176 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2177 vtnet_set_hwaddr(sc); 2178 2179 ifp->if_hwassist = 0; 2180 if (ifp->if_capenable & IFCAP_TXCSUM) 2181 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 2182 if (ifp->if_capenable & IFCAP_TSO4) 2183 ifp->if_hwassist |= CSUM_TSO; 2184 2185 error = vtnet_init_rx_vq(sc); 2186 if (error) { 2187 device_printf(dev, 2188 "cannot allocate mbufs for Rx virtqueue\n"); 2189 vtnet_stop(sc); 2190 return; 2191 } 2192 2193 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 2194 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2195 /* Restore promiscuous and all-multicast modes. */ 2196 vtnet_rx_filter(sc); 2197 2198 /* Restore filtered MAC addresses. */ 2199 vtnet_rx_filter_mac(sc); 2200 } 2201 2202 /* Restore VLAN filters. */ 2203 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2204 vtnet_rx_filter_vlan(sc); 2205 } 2206 2207 { 2208 vtnet_enable_rx_intr(sc); 2209 vtnet_enable_tx_intr(sc); 2210 } 2211 2212 ifp->if_flags |= IFF_RUNNING; 2213 ifq_clr_oactive(&ifp->if_snd); 2214 2215 virtio_reinit_complete(dev); 2216 2217 vtnet_update_link_status(sc); 2218 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2219 } 2220 2221 static void 2222 vtnet_init(void *xsc) 2223 { 2224 struct vtnet_softc *sc; 2225 2226 sc = xsc; 2227 2228 lwkt_serialize_enter(&sc->vtnet_slz); 2229 vtnet_init_locked(sc); 2230 lwkt_serialize_exit(&sc->vtnet_slz); 2231 } 2232 2233 static void 2234 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 2235 struct sglist *sg, int readable, int writable) 2236 { 2237 struct virtqueue *vq; 2238 void *c; 2239 2240 vq = sc->vtnet_ctrl_vq; 2241 2242 ASSERT_SERIALIZED(&sc->vtnet_slz); 2243 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 2244 ("no control virtqueue")); 2245 KASSERT(virtqueue_empty(vq), 2246 ("control command already enqueued")); 2247 2248 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 2249 return; 2250 2251 virtqueue_notify(vq, &sc->vtnet_slz); 2252 2253 /* 2254 * Poll until the command is complete. Previously, we would 2255 * sleep until the control virtqueue interrupt handler woke 2256 * us up, but dropping the VTNET_MTX leads to serialization 2257 * difficulties. 2258 * 2259 * Furthermore, it appears QEMU/KVM only allocates three MSIX 2260 * vectors. Two of those vectors are needed for the Rx and Tx 2261 * virtqueues. We do not support sharing both a Vq and config 2262 * changed notification on the same MSIX vector. 2263 */ 2264 c = virtqueue_poll(vq, NULL); 2265 KASSERT(c == cookie, ("unexpected control command response")); 2266 } 2267 2268 static void 2269 vtnet_rx_filter(struct vtnet_softc *sc) 2270 { 2271 device_t dev; 2272 struct ifnet *ifp; 2273 2274 dev = sc->vtnet_dev; 2275 ifp = sc->vtnet_ifp; 2276 2277 ASSERT_SERIALIZED(&sc->vtnet_slz); 2278 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2279 ("CTRL_RX feature not negotiated")); 2280 2281 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 2282 device_printf(dev, "cannot %s promiscuous mode\n", 2283 ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); 2284 2285 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 2286 device_printf(dev, "cannot %s all-multicast mode\n", 2287 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); 2288 } 2289 2290 static int 2291 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 2292 { 2293 struct virtio_net_ctrl_hdr hdr __aligned(2); 2294 struct sglist_seg segs[3]; 2295 struct sglist sg; 2296 uint8_t onoff, ack; 2297 int error; 2298 2299 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) 2300 return (ENOTSUP); 2301 2302 error = 0; 2303 2304 hdr.class = VIRTIO_NET_CTRL_RX; 2305 hdr.cmd = cmd; 2306 onoff = !!on; 2307 ack = VIRTIO_NET_ERR; 2308 2309 sglist_init(&sg, 3, segs); 2310 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2311 error |= sglist_append(&sg, &onoff, sizeof(uint8_t)); 2312 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2313 KASSERT(error == 0 && sg.sg_nseg == 3, 2314 ("error adding Rx filter message to sglist")); 2315 2316 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2317 2318 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2319 } 2320 2321 static int 2322 vtnet_set_promisc(struct vtnet_softc *sc, int on) 2323 { 2324 2325 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 2326 } 2327 2328 static int 2329 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 2330 { 2331 2332 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 2333 } 2334 2335 static void 2336 vtnet_rx_filter_mac(struct vtnet_softc *sc) 2337 { 2338 struct virtio_net_ctrl_hdr hdr __aligned(2); 2339 struct vtnet_mac_filter *filter; 2340 struct sglist_seg segs[4]; 2341 struct sglist sg; 2342 struct ifnet *ifp; 2343 struct ifaddr *ifa; 2344 struct ifaddr_container *ifac; 2345 struct ifmultiaddr *ifma; 2346 int ucnt, mcnt, promisc, allmulti, error; 2347 uint8_t ack; 2348 2349 ifp = sc->vtnet_ifp; 2350 ucnt = 0; 2351 mcnt = 0; 2352 promisc = 0; 2353 allmulti = 0; 2354 error = 0; 2355 2356 ASSERT_SERIALIZED(&sc->vtnet_slz); 2357 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2358 ("CTRL_RX feature not negotiated")); 2359 2360 /* Use the MAC filtering table allocated in vtnet_attach. */ 2361 filter = sc->vtnet_macfilter; 2362 memset(filter, 0, sizeof(struct vtnet_mac_filter)); 2363 2364 /* Unicast MAC addresses: */ 2365 //if_addr_rlock(ifp); 2366 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2367 ifa = ifac->ifa; 2368 if (ifa->ifa_addr->sa_family != AF_LINK) 2369 continue; 2370 else if (ucnt == VTNET_MAX_MAC_ENTRIES) 2371 break; 2372 2373 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2374 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 2375 ucnt++; 2376 } 2377 //if_addr_runlock(ifp); 2378 2379 if (ucnt >= VTNET_MAX_MAC_ENTRIES) { 2380 promisc = 1; 2381 filter->vmf_unicast.nentries = 0; 2382 2383 if_printf(ifp, "more than %d MAC addresses assigned, " 2384 "falling back to promiscuous mode\n", 2385 VTNET_MAX_MAC_ENTRIES); 2386 } else 2387 filter->vmf_unicast.nentries = ucnt; 2388 2389 /* Multicast MAC addresses: */ 2390 //if_maddr_rlock(ifp); 2391 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2392 if (ifma->ifma_addr->sa_family != AF_LINK) 2393 continue; 2394 else if (mcnt == VTNET_MAX_MAC_ENTRIES) 2395 break; 2396 2397 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2398 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 2399 mcnt++; 2400 } 2401 //if_maddr_runlock(ifp); 2402 2403 if (mcnt >= VTNET_MAX_MAC_ENTRIES) { 2404 allmulti = 1; 2405 filter->vmf_multicast.nentries = 0; 2406 2407 if_printf(ifp, "more than %d multicast MAC addresses " 2408 "assigned, falling back to all-multicast mode\n", 2409 VTNET_MAX_MAC_ENTRIES); 2410 } else 2411 filter->vmf_multicast.nentries = mcnt; 2412 2413 if (promisc && allmulti) 2414 goto out; 2415 2416 hdr.class = VIRTIO_NET_CTRL_MAC; 2417 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 2418 ack = VIRTIO_NET_ERR; 2419 2420 sglist_init(&sg, 4, segs); 2421 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2422 error |= sglist_append(&sg, &filter->vmf_unicast, 2423 sizeof(struct vtnet_mac_table)); 2424 error |= sglist_append(&sg, &filter->vmf_multicast, 2425 sizeof(struct vtnet_mac_table)); 2426 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2427 KASSERT(error == 0 && sg.sg_nseg == 4, 2428 ("error adding MAC filtering message to sglist")); 2429 2430 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2431 2432 if (ack != VIRTIO_NET_OK) 2433 if_printf(ifp, "error setting host MAC filter table\n"); 2434 2435 out: 2436 if (promisc) 2437 if (vtnet_set_promisc(sc, 1) != 0) 2438 if_printf(ifp, "cannot enable promiscuous mode\n"); 2439 if (allmulti) 2440 if (vtnet_set_allmulti(sc, 1) != 0) 2441 if_printf(ifp, "cannot enable all-multicast mode\n"); 2442 } 2443 2444 static int 2445 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2446 { 2447 struct virtio_net_ctrl_hdr hdr __aligned(2); 2448 struct sglist_seg segs[3]; 2449 struct sglist sg; 2450 uint8_t ack; 2451 int error; 2452 2453 hdr.class = VIRTIO_NET_CTRL_VLAN; 2454 hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 2455 ack = VIRTIO_NET_ERR; 2456 error = 0; 2457 2458 sglist_init(&sg, 3, segs); 2459 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2460 error |= sglist_append(&sg, &tag, sizeof(uint16_t)); 2461 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2462 KASSERT(error == 0 && sg.sg_nseg == 3, 2463 ("error adding VLAN control message to sglist")); 2464 2465 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2466 2467 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2468 } 2469 2470 static void 2471 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 2472 { 2473 device_t dev; 2474 uint32_t w, mask; 2475 uint16_t tag; 2476 int i, nvlans, error; 2477 2478 ASSERT_SERIALIZED(&sc->vtnet_slz); 2479 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2480 ("VLAN_FILTER feature not negotiated")); 2481 2482 dev = sc->vtnet_dev; 2483 nvlans = sc->vtnet_nvlans; 2484 error = 0; 2485 2486 /* Enable filtering for each configured VLAN. */ 2487 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { 2488 w = sc->vtnet_vlan_shadow[i]; 2489 for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) { 2490 if ((w & mask) != 0) { 2491 w &= ~mask; 2492 nvlans--; 2493 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) 2494 error++; 2495 } 2496 } 2497 } 2498 2499 KASSERT(nvlans == 0, ("VLAN count incorrect")); 2500 if (error) 2501 device_printf(dev, "cannot restore VLAN filter table\n"); 2502 } 2503 2504 static void 2505 vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2506 { 2507 struct ifnet *ifp; 2508 int idx, bit; 2509 2510 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2511 ("VLAN_FILTER feature not negotiated")); 2512 2513 if ((tag == 0) || (tag > 4095)) 2514 return; 2515 2516 ifp = sc->vtnet_ifp; 2517 idx = (tag >> 5) & 0x7F; 2518 bit = tag & 0x1F; 2519 2520 lwkt_serialize_enter(&sc->vtnet_slz); 2521 2522 /* Update shadow VLAN table. */ 2523 if (add) { 2524 sc->vtnet_nvlans++; 2525 sc->vtnet_vlan_shadow[idx] |= (1 << bit); 2526 } else { 2527 sc->vtnet_nvlans--; 2528 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); 2529 } 2530 2531 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2532 if (vtnet_exec_vlan_filter(sc, add, tag) != 0) { 2533 device_printf(sc->vtnet_dev, 2534 "cannot %s VLAN %d %s the host filter table\n", 2535 add ? "add" : "remove", tag, 2536 add ? "to" : "from"); 2537 } 2538 } 2539 2540 lwkt_serialize_exit(&sc->vtnet_slz); 2541 } 2542 2543 static void 2544 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2545 { 2546 2547 if (ifp->if_softc != arg) 2548 return; 2549 2550 vtnet_set_vlan_filter(arg, 1, tag); 2551 } 2552 2553 static void 2554 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2555 { 2556 2557 if (ifp->if_softc != arg) 2558 return; 2559 2560 vtnet_set_vlan_filter(arg, 0, tag); 2561 } 2562 2563 static int 2564 vtnet_ifmedia_upd(struct ifnet *ifp) 2565 { 2566 struct vtnet_softc *sc; 2567 struct ifmedia *ifm; 2568 2569 sc = ifp->if_softc; 2570 ifm = &sc->vtnet_media; 2571 2572 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2573 return (EINVAL); 2574 2575 return (0); 2576 } 2577 2578 static void 2579 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2580 { 2581 struct vtnet_softc *sc; 2582 2583 sc = ifp->if_softc; 2584 2585 ifmr->ifm_status = IFM_AVALID; 2586 ifmr->ifm_active = IFM_ETHER; 2587 2588 lwkt_serialize_enter(&sc->vtnet_slz); 2589 if (vtnet_is_link_up(sc) != 0) { 2590 ifmr->ifm_status |= IFM_ACTIVE; 2591 ifmr->ifm_active |= VTNET_MEDIATYPE; 2592 } else 2593 ifmr->ifm_active |= IFM_NONE; 2594 lwkt_serialize_exit(&sc->vtnet_slz); 2595 } 2596 2597 static void 2598 vtnet_add_statistics(struct vtnet_softc *sc) 2599 { 2600 device_t dev; 2601 struct vtnet_statistics *stats; 2602 struct sysctl_ctx_list *ctx; 2603 struct sysctl_oid *tree; 2604 struct sysctl_oid_list *child; 2605 2606 dev = sc->vtnet_dev; 2607 stats = &sc->vtnet_stats; 2608 ctx = device_get_sysctl_ctx(dev); 2609 tree = device_get_sysctl_tree(dev); 2610 child = SYSCTL_CHILDREN(tree); 2611 2612 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed", 2613 CTLFLAG_RD, &stats->mbuf_alloc_failed, 2614 "Mbuf cluster allocation failures"); 2615 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large", 2616 CTLFLAG_RD, &stats->rx_frame_too_large, 2617 "Received frame larger than the mbuf chain"); 2618 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 2619 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 2620 "Enqueuing the replacement receive mbuf failed"); 2621 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed", 2622 CTLFLAG_RD, &stats->rx_mergeable_failed, 2623 "Mergeable buffers receive failures"); 2624 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 2625 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 2626 "Received checksum offloaded buffer with unsupported " 2627 "Ethernet type"); 2628 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start", 2629 CTLFLAG_RD, &stats->rx_csum_bad_start, 2630 "Received checksum offloaded buffer with incorrect start offset"); 2631 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 2632 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 2633 "Received checksum offloaded buffer with incorrect IP protocol"); 2634 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset", 2635 CTLFLAG_RD, &stats->rx_csum_bad_offset, 2636 "Received checksum offloaded buffer with incorrect offset"); 2637 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed", 2638 CTLFLAG_RD, &stats->rx_csum_failed, 2639 "Received buffer checksum offload failed"); 2640 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded", 2641 CTLFLAG_RD, &stats->rx_csum_offloaded, 2642 "Received buffer checksum offload succeeded"); 2643 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled", 2644 CTLFLAG_RD, &stats->rx_task_rescheduled, 2645 "Times the receive interrupt task rescheduled itself"); 2646 2647 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded", 2648 CTLFLAG_RD, &stats->tx_csum_offloaded, 2649 "Offloaded checksum of transmitted buffer"); 2650 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded", 2651 CTLFLAG_RD, &stats->tx_tso_offloaded, 2652 "Segmentation offload of transmitted buffer"); 2653 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", 2654 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 2655 "Aborted transmit of checksum offloaded buffer with unknown " 2656 "Ethernet type"); 2657 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", 2658 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 2659 "Aborted transmit of TSO buffer with unknown Ethernet type"); 2660 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled", 2661 CTLFLAG_RD, &stats->tx_task_rescheduled, 2662 "Times the transmit interrupt task rescheduled itself"); 2663 } 2664 2665 static int 2666 vtnet_enable_rx_intr(struct vtnet_softc *sc) 2667 { 2668 2669 return (virtqueue_enable_intr(sc->vtnet_rx_vq)); 2670 } 2671 2672 static void 2673 vtnet_disable_rx_intr(struct vtnet_softc *sc) 2674 { 2675 2676 virtqueue_disable_intr(sc->vtnet_rx_vq); 2677 } 2678 2679 static int 2680 vtnet_enable_tx_intr(struct vtnet_softc *sc) 2681 { 2682 2683 #ifdef VTNET_TX_INTR_MODERATION 2684 return (0); 2685 #else 2686 return (virtqueue_enable_intr(sc->vtnet_tx_vq)); 2687 #endif 2688 } 2689 2690 static void 2691 vtnet_disable_tx_intr(struct vtnet_softc *sc) 2692 { 2693 2694 virtqueue_disable_intr(sc->vtnet_tx_vq); 2695 } 2696