1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 #include <sys/random.h> 42 #include <sys/sglist.h> 43 #include <sys/serialize.h> 44 #include <sys/bus.h> 45 #include <sys/rman.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_types.h> 52 #include <net/if_media.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 #include <net/ifq_var.h> 56 57 #include <net/bpf.h> 58 59 #include <netinet/in_systm.h> 60 #include <netinet/in.h> 61 #include <netinet/ip.h> 62 #include <netinet/ip6.h> 63 #include <netinet/udp.h> 64 #include <netinet/tcp.h> 65 #include <netinet/sctp.h> 66 67 #include <dev/virtual/virtio/virtio/virtio.h> 68 #include <dev/virtual/virtio/virtio/virtqueue.h> 69 70 #include "virtio_net.h" 71 #include "virtio_if.h" 72 73 struct vtnet_statistics { 74 unsigned long mbuf_alloc_failed; 75 76 unsigned long rx_frame_too_large; 77 unsigned long rx_enq_replacement_failed; 78 unsigned long rx_mergeable_failed; 79 unsigned long rx_csum_bad_ethtype; 80 unsigned long rx_csum_bad_start; 81 unsigned long rx_csum_bad_ipproto; 82 unsigned long rx_csum_bad_offset; 83 unsigned long rx_csum_failed; 84 unsigned long rx_csum_offloaded; 85 unsigned long rx_task_rescheduled; 86 87 unsigned long tx_csum_offloaded; 88 unsigned long tx_tso_offloaded; 89 unsigned long tx_csum_bad_ethtype; 90 unsigned long tx_tso_bad_ethtype; 91 unsigned long tx_task_rescheduled; 92 }; 93 94 struct vtnet_softc { 95 device_t vtnet_dev; 96 struct ifnet *vtnet_ifp; 97 struct lwkt_serialize vtnet_slz; 98 99 uint32_t vtnet_flags; 100 #define VTNET_FLAG_LINK 0x0001 101 #define VTNET_FLAG_SUSPENDED 0x0002 102 #define VTNET_FLAG_CTRL_VQ 0x0004 103 #define VTNET_FLAG_CTRL_RX 0x0008 104 #define VTNET_FLAG_VLAN_FILTER 0x0010 105 #define VTNET_FLAG_TSO_ECN 0x0020 106 #define VTNET_FLAG_MRG_RXBUFS 0x0040 107 #define VTNET_FLAG_LRO_NOMRG 0x0080 108 109 struct virtqueue *vtnet_rx_vq; 110 struct virtqueue *vtnet_tx_vq; 111 struct virtqueue *vtnet_ctrl_vq; 112 113 struct vtnet_tx_header *vtnet_txhdrarea; 114 uint32_t vtnet_txhdridx; 115 struct vtnet_mac_filter *vtnet_macfilter; 116 117 int vtnet_hdr_size; 118 int vtnet_tx_size; 119 int vtnet_rx_size; 120 int vtnet_rx_process_limit; 121 int vtnet_rx_mbuf_size; 122 int vtnet_rx_mbuf_count; 123 int vtnet_if_flags; 124 int vtnet_watchdog_timer; 125 uint64_t vtnet_features; 126 127 struct task vtnet_cfgchg_task; 128 129 struct vtnet_statistics vtnet_stats; 130 131 struct sysctl_ctx_list vtnet_sysctl_ctx; 132 struct sysctl_oid *vtnet_sysctl_tree; 133 134 struct callout vtnet_tick_ch; 135 136 eventhandler_tag vtnet_vlan_attach; 137 eventhandler_tag vtnet_vlan_detach; 138 139 struct ifmedia vtnet_media; 140 /* 141 * Fake media type; the host does not provide us with 142 * any real media information. 143 */ 144 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX) 145 char vtnet_hwaddr[ETHER_ADDR_LEN]; 146 147 /* 148 * During reset, the host's VLAN filtering table is lost. The 149 * array below is used to restore all the VLANs configured on 150 * this interface after a reset. 151 */ 152 #define VTNET_VLAN_SHADOW_SIZE (4096 / 32) 153 int vtnet_nvlans; 154 uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE]; 155 156 char vtnet_mtx_name[16]; 157 }; 158 159 /* 160 * When mergeable buffers are not negotiated, the vtnet_rx_header structure 161 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to 162 * both keep the VirtIO header and the data non-contiguous and to keep the 163 * frame's payload 4 byte aligned. 164 * 165 * When mergeable buffers are negotiated, the host puts the VirtIO header in 166 * the beginning of the first mbuf's data. 167 */ 168 #define VTNET_RX_HEADER_PAD 4 169 struct vtnet_rx_header { 170 struct virtio_net_hdr vrh_hdr; 171 char vrh_pad[VTNET_RX_HEADER_PAD]; 172 } __packed; 173 174 /* 175 * For each outgoing frame, the vtnet_tx_header below is allocated from 176 * the vtnet_tx_header_zone. 177 */ 178 struct vtnet_tx_header { 179 union { 180 struct virtio_net_hdr hdr; 181 struct virtio_net_hdr_mrg_rxbuf mhdr; 182 } vth_uhdr; 183 184 struct mbuf *vth_mbuf; 185 }; 186 187 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header"); 188 189 /* 190 * The VirtIO specification does not place a limit on the number of MAC 191 * addresses the guest driver may request to be filtered. In practice, 192 * the host is constrained by available resources. To simplify this driver, 193 * impose a reasonably high limit of MAC addresses we will filter before 194 * falling back to promiscuous or all-multicast modes. 195 */ 196 #define VTNET_MAX_MAC_ENTRIES 128 197 198 struct vtnet_mac_table { 199 uint32_t nentries; 200 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; 201 } __packed; 202 203 struct vtnet_mac_filter { 204 struct vtnet_mac_table vmf_unicast; 205 uint32_t vmf_pad; /* Make tables non-contiguous. */ 206 struct vtnet_mac_table vmf_multicast; 207 }; 208 209 #define VTNET_WATCHDOG_TIMEOUT 5 210 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)// | CSUM_SCTP) 211 212 /* Features desired/implemented by this driver. */ 213 #define VTNET_FEATURES \ 214 (VIRTIO_NET_F_MAC | \ 215 VIRTIO_NET_F_STATUS | \ 216 VIRTIO_NET_F_CTRL_VQ | \ 217 VIRTIO_NET_F_CTRL_RX | \ 218 VIRTIO_NET_F_CTRL_VLAN | \ 219 VIRTIO_NET_F_CSUM | \ 220 VIRTIO_NET_F_HOST_TSO4 | \ 221 VIRTIO_NET_F_HOST_TSO6 | \ 222 VIRTIO_NET_F_HOST_ECN | \ 223 VIRTIO_NET_F_GUEST_CSUM | \ 224 VIRTIO_NET_F_GUEST_TSO4 | \ 225 VIRTIO_NET_F_GUEST_TSO6 | \ 226 VIRTIO_NET_F_GUEST_ECN | \ 227 VIRTIO_NET_F_MRG_RXBUF) 228 229 /* 230 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us 231 * frames larger than 1514 bytes. We do not yet support software LRO 232 * via tcp_lro_rx(). 233 */ 234 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ 235 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) 236 237 #define VTNET_MAX_MTU 65536 238 #define VTNET_MAX_RX_SIZE 65550 239 240 /* 241 * Used to preallocate the Vq indirect descriptors. The first segment 242 * is reserved for the header. 243 */ 244 #define VTNET_MIN_RX_SEGS 2 245 #define VTNET_MAX_RX_SEGS 34 246 #define VTNET_MAX_TX_SEGS 34 247 248 #define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */ 249 #define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */ 250 #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */ 251 #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */ 252 #define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */ 253 254 255 /* 256 * Assert we can receive and transmit the maximum with regular 257 * size clusters. 258 */ 259 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); 260 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU); 261 262 /* 263 * Determine how many mbufs are in each receive buffer. For LRO without 264 * mergeable descriptors, we must allocate an mbuf chain large enough to 265 * hold both the vtnet_rx_header and the maximum receivable data. 266 */ 267 #define VTNET_NEEDED_RX_MBUFS(_sc) \ 268 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \ 269 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \ 270 (_sc)->vtnet_rx_mbuf_size) 271 272 static int vtnet_modevent(module_t, int, void *); 273 274 static int vtnet_probe(device_t); 275 static int vtnet_attach(device_t); 276 static int vtnet_detach(device_t); 277 static int vtnet_suspend(device_t); 278 static int vtnet_resume(device_t); 279 static int vtnet_shutdown(device_t); 280 static int vtnet_config_change(device_t); 281 282 static void vtnet_negotiate_features(struct vtnet_softc *); 283 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 284 static void vtnet_get_hwaddr(struct vtnet_softc *); 285 static void vtnet_set_hwaddr(struct vtnet_softc *); 286 static int vtnet_is_link_up(struct vtnet_softc *); 287 static void vtnet_update_link_status(struct vtnet_softc *); 288 #if 0 289 static void vtnet_watchdog(struct vtnet_softc *); 290 #endif 291 static void vtnet_config_change_task(void *, int); 292 static int vtnet_change_mtu(struct vtnet_softc *, int); 293 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 294 295 static int vtnet_init_rx_vq(struct vtnet_softc *); 296 static void vtnet_free_rx_mbufs(struct vtnet_softc *); 297 static void vtnet_free_tx_mbufs(struct vtnet_softc *); 298 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 299 300 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, 301 struct mbuf **); 302 static int vtnet_replace_rxbuf(struct vtnet_softc *, 303 struct mbuf *, int); 304 static int vtnet_newbuf(struct vtnet_softc *); 305 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); 306 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); 307 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); 308 static void vtnet_vlan_tag_remove(struct mbuf *); 309 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, 310 struct virtio_net_hdr *); 311 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); 312 static int vtnet_rxeof(struct vtnet_softc *, int, int *); 313 static void vtnet_rx_intr_task(void *); 314 static int vtnet_rx_vq_intr(void *); 315 316 static void vtnet_txeof(struct vtnet_softc *); 317 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, 318 struct virtio_net_hdr *); 319 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, 320 struct vtnet_tx_header *); 321 static int vtnet_encap(struct vtnet_softc *, struct mbuf **); 322 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *); 323 static void vtnet_start(struct ifnet *, struct ifaltq_subque *); 324 static void vtnet_tick(void *); 325 static void vtnet_tx_intr_task(void *); 326 static int vtnet_tx_vq_intr(void *); 327 328 static void vtnet_stop(struct vtnet_softc *); 329 static int vtnet_reinit(struct vtnet_softc *); 330 static void vtnet_init_locked(struct vtnet_softc *); 331 static void vtnet_init(void *); 332 333 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 334 struct sglist *, int, int); 335 336 static void vtnet_rx_filter(struct vtnet_softc *sc); 337 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 338 static int vtnet_set_promisc(struct vtnet_softc *, int); 339 static int vtnet_set_allmulti(struct vtnet_softc *, int); 340 static void vtnet_rx_filter_mac(struct vtnet_softc *); 341 342 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 343 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 344 static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t); 345 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 346 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 347 348 static int vtnet_ifmedia_upd(struct ifnet *); 349 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 350 351 static void vtnet_add_statistics(struct vtnet_softc *); 352 353 static int vtnet_enable_rx_intr(struct vtnet_softc *); 354 static int vtnet_enable_tx_intr(struct vtnet_softc *); 355 static void vtnet_disable_rx_intr(struct vtnet_softc *); 356 static void vtnet_disable_tx_intr(struct vtnet_softc *); 357 358 /* Tunables. */ 359 static int vtnet_csum_disable = 0; 360 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 361 static int vtnet_tso_disable = 1; 362 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 363 static int vtnet_lro_disable = 1; 364 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 365 366 /* 367 * Reducing the number of transmit completed interrupts can 368 * improve performance. To do so, the define below keeps the 369 * Tx vq interrupt disabled and adds calls to vtnet_txeof() 370 * in the start and watchdog paths. The price to pay for this 371 * is the m_free'ing of transmitted mbufs may be delayed until 372 * the watchdog fires. 373 */ 374 #define VTNET_TX_INTR_MODERATION 375 376 static struct virtio_feature_desc vtnet_feature_desc[] = { 377 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 378 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 379 { VIRTIO_NET_F_MAC, "MacAddress" }, 380 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 381 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 382 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 383 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 384 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 385 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 386 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 387 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 388 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 389 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 390 { VIRTIO_NET_F_STATUS, "Status" }, 391 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 392 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 393 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 394 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 395 { VIRTIO_NET_F_MQ, "RFS" }, 396 { 0, NULL } 397 }; 398 399 static device_method_t vtnet_methods[] = { 400 /* Device methods. */ 401 DEVMETHOD(device_probe, vtnet_probe), 402 DEVMETHOD(device_attach, vtnet_attach), 403 DEVMETHOD(device_detach, vtnet_detach), 404 DEVMETHOD(device_suspend, vtnet_suspend), 405 DEVMETHOD(device_resume, vtnet_resume), 406 DEVMETHOD(device_shutdown, vtnet_shutdown), 407 408 /* VirtIO methods. */ 409 DEVMETHOD(virtio_config_change, vtnet_config_change), 410 411 { 0, 0 } 412 }; 413 414 static driver_t vtnet_driver = { 415 "vtnet", 416 vtnet_methods, 417 sizeof(struct vtnet_softc) 418 }; 419 420 static devclass_t vtnet_devclass; 421 422 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, 423 vtnet_modevent, 0); 424 MODULE_VERSION(vtnet, 1); 425 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 426 427 static int 428 vtnet_modevent(module_t mod, int type, void *unused) 429 { 430 int error; 431 432 error = 0; 433 434 switch (type) { 435 case MOD_LOAD: 436 break; 437 case MOD_UNLOAD: 438 break; 439 case MOD_SHUTDOWN: 440 break; 441 default: 442 error = EOPNOTSUPP; 443 break; 444 } 445 446 return (error); 447 } 448 449 static int 450 vtnet_probe(device_t dev) 451 { 452 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 453 return (ENXIO); 454 455 device_set_desc(dev, "VirtIO Networking Adapter"); 456 457 return (BUS_PROBE_DEFAULT); 458 } 459 460 static int 461 vtnet_attach(device_t dev) 462 { 463 struct vtnet_softc *sc; 464 struct ifnet *ifp; 465 int tx_size, error; 466 467 sc = device_get_softc(dev); 468 sc->vtnet_dev = dev; 469 470 lwkt_serialize_init(&sc->vtnet_slz); 471 callout_init(&sc->vtnet_tick_ch); 472 473 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 474 vtnet_ifmedia_sts); 475 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 476 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 477 478 vtnet_add_statistics(sc); 479 480 virtio_set_feature_desc(dev, vtnet_feature_desc); 481 vtnet_negotiate_features(sc); 482 483 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 484 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 485 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 486 } else { 487 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 488 } 489 490 sc->vtnet_rx_mbuf_size = MCLBYTES; 491 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 492 493 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 494 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 495 496 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 497 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 498 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 499 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 500 } 501 502 vtnet_get_hwaddr(sc); 503 504 error = vtnet_alloc_virtqueues(sc); 505 if (error) { 506 device_printf(dev, "cannot allocate virtqueues\n"); 507 goto fail; 508 } 509 510 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 511 if (ifp == NULL) { 512 device_printf(dev, "cannot allocate ifnet structure\n"); 513 error = ENOSPC; 514 goto fail; 515 } 516 517 ifp->if_softc = sc; 518 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 519 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 520 ifp->if_init = vtnet_init; 521 ifp->if_start = vtnet_start; 522 ifp->if_ioctl = vtnet_ioctl; 523 524 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq); 525 sc->vtnet_rx_process_limit = sc->vtnet_rx_size; 526 527 tx_size = virtqueue_size(sc->vtnet_tx_vq); 528 sc->vtnet_tx_size = tx_size; 529 sc->vtnet_txhdridx = 0; 530 sc->vtnet_txhdrarea = contigmalloc( 531 ((sc->vtnet_tx_size / 2) + 1) * sizeof(struct vtnet_tx_header), 532 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 533 if (sc->vtnet_txhdrarea == NULL) { 534 device_printf(dev, "cannot contigmalloc the tx headers\n"); 535 goto fail; 536 } 537 sc->vtnet_macfilter = contigmalloc( 538 sizeof(struct vtnet_mac_filter), 539 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 540 if (sc->vtnet_macfilter == NULL) { 541 device_printf(dev, 542 "cannot contigmalloc the mac filter table\n"); 543 goto fail; 544 } 545 ifq_set_maxlen(&ifp->if_snd, tx_size - 1); 546 ifq_set_ready(&ifp->if_snd); 547 548 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL); 549 550 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){ 551 //ifp->if_capabilities |= IFCAP_LINKSTATE; 552 kprintf("add dynamic link state\n"); 553 } 554 555 /* Tell the upper layer(s) we support long frames. */ 556 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 557 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 558 559 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 560 ifp->if_capabilities |= IFCAP_TXCSUM; 561 562 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 563 ifp->if_capabilities |= IFCAP_TSO4; 564 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 565 ifp->if_capabilities |= IFCAP_TSO6; 566 if (ifp->if_capabilities & IFCAP_TSO) 567 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 568 569 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 570 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 571 } 572 573 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { 574 ifp->if_capabilities |= IFCAP_RXCSUM; 575 576 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 577 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 578 ifp->if_capabilities |= IFCAP_LRO; 579 } 580 581 if (ifp->if_capabilities & IFCAP_HWCSUM) { 582 /* 583 * VirtIO does not support VLAN tagging, but we can fake 584 * it by inserting and removing the 802.1Q header during 585 * transmit and receive. We are then able to do checksum 586 * offloading of VLAN frames. 587 */ 588 ifp->if_capabilities |= 589 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 590 } 591 592 ifp->if_capenable = ifp->if_capabilities; 593 594 /* 595 * Capabilities after here are not enabled by default. 596 */ 597 598 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 599 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 600 601 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 602 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 603 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 604 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 605 } 606 607 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc); 608 609 error = virtio_setup_intr(dev, &sc->vtnet_slz); 610 if (error) { 611 device_printf(dev, "cannot setup virtqueue interrupts\n"); 612 ether_ifdetach(ifp); 613 goto fail; 614 } 615 616 /* 617 * Device defaults to promiscuous mode for backwards 618 * compatibility. Turn it off if possible. 619 */ 620 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 621 lwkt_serialize_enter(&sc->vtnet_slz); 622 if (vtnet_set_promisc(sc, 0) != 0) { 623 ifp->if_flags |= IFF_PROMISC; 624 device_printf(dev, 625 "cannot disable promiscuous mode\n"); 626 } 627 lwkt_serialize_exit(&sc->vtnet_slz); 628 } else 629 ifp->if_flags |= IFF_PROMISC; 630 631 fail: 632 if (error) 633 vtnet_detach(dev); 634 635 return (error); 636 } 637 638 static int 639 vtnet_detach(device_t dev) 640 { 641 struct vtnet_softc *sc; 642 struct ifnet *ifp; 643 644 sc = device_get_softc(dev); 645 ifp = sc->vtnet_ifp; 646 647 if (device_is_attached(dev)) { 648 lwkt_serialize_enter(&sc->vtnet_slz); 649 vtnet_stop(sc); 650 lwkt_serialize_exit(&sc->vtnet_slz); 651 652 callout_stop(&sc->vtnet_tick_ch); 653 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task); 654 655 ether_ifdetach(ifp); 656 } 657 658 if (sc->vtnet_vlan_attach != NULL) { 659 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 660 sc->vtnet_vlan_attach = NULL; 661 } 662 if (sc->vtnet_vlan_detach != NULL) { 663 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); 664 sc->vtnet_vlan_detach = NULL; 665 } 666 667 if (ifp) { 668 if_free(ifp); 669 sc->vtnet_ifp = NULL; 670 } 671 672 if (sc->vtnet_rx_vq != NULL) 673 vtnet_free_rx_mbufs(sc); 674 if (sc->vtnet_tx_vq != NULL) 675 vtnet_free_tx_mbufs(sc); 676 if (sc->vtnet_ctrl_vq != NULL) 677 vtnet_free_ctrl_vq(sc); 678 679 if (sc->vtnet_txhdrarea != NULL) { 680 contigfree(sc->vtnet_txhdrarea, 681 ((sc->vtnet_tx_size / 2) + 1) * 682 sizeof(struct vtnet_tx_header), M_VTNET); 683 sc->vtnet_txhdrarea = NULL; 684 } 685 if (sc->vtnet_macfilter != NULL) { 686 contigfree(sc->vtnet_macfilter, 687 sizeof(struct vtnet_mac_filter), M_DEVBUF); 688 sc->vtnet_macfilter = NULL; 689 } 690 691 ifmedia_removeall(&sc->vtnet_media); 692 693 return (0); 694 } 695 696 static int 697 vtnet_suspend(device_t dev) 698 { 699 struct vtnet_softc *sc; 700 701 sc = device_get_softc(dev); 702 703 lwkt_serialize_enter(&sc->vtnet_slz); 704 vtnet_stop(sc); 705 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 706 lwkt_serialize_exit(&sc->vtnet_slz); 707 708 return (0); 709 } 710 711 static int 712 vtnet_resume(device_t dev) 713 { 714 struct vtnet_softc *sc; 715 struct ifnet *ifp; 716 717 sc = device_get_softc(dev); 718 ifp = sc->vtnet_ifp; 719 720 lwkt_serialize_enter(&sc->vtnet_slz); 721 if (ifp->if_flags & IFF_UP) 722 vtnet_init_locked(sc); 723 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 724 lwkt_serialize_exit(&sc->vtnet_slz); 725 726 return (0); 727 } 728 729 static int 730 vtnet_shutdown(device_t dev) 731 { 732 733 /* 734 * Suspend already does all of what we need to 735 * do here; we just never expect to be resumed. 736 */ 737 return (vtnet_suspend(dev)); 738 } 739 740 static int 741 vtnet_config_change(device_t dev) 742 { 743 struct vtnet_softc *sc; 744 745 sc = device_get_softc(dev); 746 747 taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task); 748 749 return (1); 750 } 751 752 static void 753 vtnet_negotiate_features(struct vtnet_softc *sc) 754 { 755 device_t dev; 756 uint64_t mask, features; 757 758 dev = sc->vtnet_dev; 759 mask = 0; 760 761 if (vtnet_csum_disable) 762 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 763 764 /* 765 * TSO and LRO are only available when their corresponding 766 * checksum offload feature is also negotiated. 767 */ 768 769 if (vtnet_csum_disable || vtnet_tso_disable) 770 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | 771 VIRTIO_NET_F_HOST_ECN; 772 773 if (vtnet_csum_disable || vtnet_lro_disable) 774 mask |= VTNET_LRO_FEATURES; 775 776 features = VTNET_FEATURES & ~mask; 777 features |= VIRTIO_F_NOTIFY_ON_EMPTY; 778 sc->vtnet_features = virtio_negotiate_features(dev, features); 779 } 780 781 static int 782 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 783 { 784 device_t dev; 785 struct vq_alloc_info vq_info[3]; 786 int nvqs, rxsegs; 787 788 dev = sc->vtnet_dev; 789 nvqs = 2; 790 791 /* 792 * Indirect descriptors are not needed for the Rx 793 * virtqueue when mergeable buffers are negotiated. 794 * The header is placed inline with the data, not 795 * in a separate descriptor, and mbuf clusters are 796 * always physically contiguous. 797 */ 798 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 799 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ? 800 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; 801 } else 802 rxsegs = 0; 803 804 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs, 805 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq, 806 "%s receive", device_get_nameunit(dev)); 807 808 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS, 809 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq, 810 "%s transmit", device_get_nameunit(dev)); 811 812 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 813 nvqs++; 814 815 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL, 816 &sc->vtnet_ctrl_vq, "%s control", 817 device_get_nameunit(dev)); 818 } 819 820 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 821 } 822 823 static void 824 vtnet_get_hwaddr(struct vtnet_softc *sc) 825 { 826 device_t dev; 827 828 dev = sc->vtnet_dev; 829 830 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 831 virtio_read_device_config(dev, 832 offsetof(struct virtio_net_config, mac), 833 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 834 } else { 835 /* Generate random locally administered unicast address. */ 836 sc->vtnet_hwaddr[0] = 0xB2; 837 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1); 838 839 vtnet_set_hwaddr(sc); 840 } 841 } 842 843 static void 844 vtnet_set_hwaddr(struct vtnet_softc *sc) 845 { 846 device_t dev; 847 848 dev = sc->vtnet_dev; 849 850 virtio_write_device_config(dev, 851 offsetof(struct virtio_net_config, mac), 852 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 853 } 854 855 static int 856 vtnet_is_link_up(struct vtnet_softc *sc) 857 { 858 device_t dev; 859 struct ifnet *ifp; 860 uint16_t status; 861 862 dev = sc->vtnet_dev; 863 ifp = sc->vtnet_ifp; 864 865 ASSERT_SERIALIZED(&sc->vtnet_slz); 866 867 status = virtio_read_dev_config_2(dev, 868 offsetof(struct virtio_net_config, status)); 869 870 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 871 } 872 873 static void 874 vtnet_update_link_status(struct vtnet_softc *sc) 875 { 876 device_t dev; 877 struct ifnet *ifp; 878 struct ifaltq_subque *ifsq; 879 int link; 880 881 dev = sc->vtnet_dev; 882 ifp = sc->vtnet_ifp; 883 ifsq = ifq_get_subq_default(&ifp->if_snd); 884 885 link = vtnet_is_link_up(sc); 886 887 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { 888 sc->vtnet_flags |= VTNET_FLAG_LINK; 889 if (bootverbose) 890 device_printf(dev, "Link is up\n"); 891 ifp->if_link_state = LINK_STATE_UP; 892 if_link_state_change(ifp); 893 if (!ifsq_is_empty(ifsq)) 894 vtnet_start_locked(ifp, ifsq); 895 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { 896 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 897 if (bootverbose) 898 device_printf(dev, "Link is down\n"); 899 900 ifp->if_link_state = LINK_STATE_DOWN; 901 if_link_state_change(ifp); 902 } 903 } 904 905 #if 0 906 static void 907 vtnet_watchdog(struct vtnet_softc *sc) 908 { 909 struct ifnet *ifp; 910 911 ifp = sc->vtnet_ifp; 912 913 #ifdef VTNET_TX_INTR_MODERATION 914 vtnet_txeof(sc); 915 #endif 916 917 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) 918 return; 919 920 if_printf(ifp, "watchdog timeout -- resetting\n"); 921 #ifdef VTNET_DEBUG 922 virtqueue_dump(sc->vtnet_tx_vq); 923 #endif 924 ifp->if_oerrors++; 925 ifp->if_flags &= ~IFF_RUNNING; 926 vtnet_init_locked(sc); 927 } 928 #endif 929 930 static void 931 vtnet_config_change_task(void *arg, int pending) 932 { 933 struct vtnet_softc *sc; 934 935 sc = arg; 936 937 lwkt_serialize_enter(&sc->vtnet_slz); 938 vtnet_update_link_status(sc); 939 lwkt_serialize_exit(&sc->vtnet_slz); 940 } 941 942 static int 943 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr) 944 { 945 struct vtnet_softc *sc; 946 struct ifreq *ifr; 947 int reinit, mask, error; 948 949 sc = ifp->if_softc; 950 ifr = (struct ifreq *) data; 951 reinit = 0; 952 error = 0; 953 954 switch (cmd) { 955 case SIOCSIFMTU: 956 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) 957 error = EINVAL; 958 else if (ifp->if_mtu != ifr->ifr_mtu) { 959 lwkt_serialize_enter(&sc->vtnet_slz); 960 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 961 lwkt_serialize_exit(&sc->vtnet_slz); 962 } 963 break; 964 965 case SIOCSIFFLAGS: 966 lwkt_serialize_enter(&sc->vtnet_slz); 967 if ((ifp->if_flags & IFF_UP) == 0) { 968 if (ifp->if_flags & IFF_RUNNING) 969 vtnet_stop(sc); 970 } else if (ifp->if_flags & IFF_RUNNING) { 971 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 972 (IFF_PROMISC | IFF_ALLMULTI)) { 973 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 974 vtnet_rx_filter(sc); 975 else 976 error = ENOTSUP; 977 } 978 } else 979 vtnet_init_locked(sc); 980 981 if (error == 0) 982 sc->vtnet_if_flags = ifp->if_flags; 983 lwkt_serialize_exit(&sc->vtnet_slz); 984 break; 985 986 case SIOCADDMULTI: 987 case SIOCDELMULTI: 988 lwkt_serialize_enter(&sc->vtnet_slz); 989 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && 990 (ifp->if_flags & IFF_RUNNING)) 991 vtnet_rx_filter_mac(sc); 992 lwkt_serialize_exit(&sc->vtnet_slz); 993 break; 994 995 case SIOCSIFMEDIA: 996 case SIOCGIFMEDIA: 997 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 998 break; 999 1000 case SIOCSIFCAP: 1001 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1002 1003 lwkt_serialize_enter(&sc->vtnet_slz); 1004 1005 if (mask & IFCAP_TXCSUM) { 1006 ifp->if_capenable ^= IFCAP_TXCSUM; 1007 if (ifp->if_capenable & IFCAP_TXCSUM) 1008 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 1009 else 1010 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; 1011 } 1012 1013 if (mask & IFCAP_TSO4) { 1014 ifp->if_capenable ^= IFCAP_TSO4; 1015 if (ifp->if_capenable & IFCAP_TSO4) 1016 ifp->if_hwassist |= CSUM_TSO; 1017 else 1018 ifp->if_hwassist &= ~CSUM_TSO; 1019 } 1020 1021 if (mask & IFCAP_RXCSUM) { 1022 ifp->if_capenable ^= IFCAP_RXCSUM; 1023 reinit = 1; 1024 } 1025 1026 if (mask & IFCAP_LRO) { 1027 ifp->if_capenable ^= IFCAP_LRO; 1028 reinit = 1; 1029 } 1030 1031 if (mask & IFCAP_VLAN_HWFILTER) { 1032 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 1033 reinit = 1; 1034 } 1035 1036 if (mask & IFCAP_VLAN_HWTSO) 1037 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1038 1039 if (mask & IFCAP_VLAN_HWTAGGING) 1040 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1041 1042 if (reinit && (ifp->if_flags & IFF_RUNNING)) { 1043 ifp->if_flags &= ~IFF_RUNNING; 1044 vtnet_init_locked(sc); 1045 } 1046 //VLAN_CAPABILITIES(ifp); 1047 1048 lwkt_serialize_exit(&sc->vtnet_slz); 1049 break; 1050 1051 default: 1052 error = ether_ioctl(ifp, cmd, data); 1053 break; 1054 } 1055 1056 return (error); 1057 } 1058 1059 static int 1060 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 1061 { 1062 struct ifnet *ifp; 1063 int new_frame_size, clsize; 1064 1065 ifp = sc->vtnet_ifp; 1066 1067 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1068 new_frame_size = sizeof(struct vtnet_rx_header) + 1069 sizeof(struct ether_vlan_header) + new_mtu; 1070 1071 if (new_frame_size > MJUM9BYTES) 1072 return (EINVAL); 1073 1074 if (new_frame_size <= MCLBYTES) 1075 clsize = MCLBYTES; 1076 else 1077 clsize = MJUM9BYTES; 1078 } else { 1079 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + 1080 sizeof(struct ether_vlan_header) + new_mtu; 1081 1082 if (new_frame_size <= MCLBYTES) 1083 clsize = MCLBYTES; 1084 else 1085 clsize = MJUMPAGESIZE; 1086 } 1087 1088 sc->vtnet_rx_mbuf_size = clsize; 1089 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 1090 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, 1091 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); 1092 1093 ifp->if_mtu = new_mtu; 1094 1095 if (ifp->if_flags & IFF_RUNNING) { 1096 ifp->if_flags &= ~IFF_RUNNING; 1097 vtnet_init_locked(sc); 1098 } 1099 1100 return (0); 1101 } 1102 1103 static int 1104 vtnet_init_rx_vq(struct vtnet_softc *sc) 1105 { 1106 struct virtqueue *vq; 1107 int nbufs, error; 1108 1109 vq = sc->vtnet_rx_vq; 1110 nbufs = 0; 1111 error = ENOSPC; 1112 1113 while (!virtqueue_full(vq)) { 1114 if ((error = vtnet_newbuf(sc)) != 0) 1115 break; 1116 nbufs++; 1117 } 1118 1119 if (nbufs > 0) { 1120 virtqueue_notify(vq, &sc->vtnet_slz); 1121 1122 /* 1123 * EMSGSIZE signifies the virtqueue did not have enough 1124 * entries available to hold the last mbuf. This is not 1125 * an error. We should not get ENOSPC since we check if 1126 * the virtqueue is full before attempting to add a 1127 * buffer. 1128 */ 1129 if (error == EMSGSIZE) 1130 error = 0; 1131 } 1132 1133 return (error); 1134 } 1135 1136 static void 1137 vtnet_free_rx_mbufs(struct vtnet_softc *sc) 1138 { 1139 struct virtqueue *vq; 1140 struct mbuf *m; 1141 int last; 1142 1143 vq = sc->vtnet_rx_vq; 1144 last = 0; 1145 1146 while ((m = virtqueue_drain(vq, &last)) != NULL) 1147 m_freem(m); 1148 1149 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); 1150 } 1151 1152 static void 1153 vtnet_free_tx_mbufs(struct vtnet_softc *sc) 1154 { 1155 struct virtqueue *vq; 1156 struct vtnet_tx_header *txhdr; 1157 int last; 1158 1159 vq = sc->vtnet_tx_vq; 1160 last = 0; 1161 1162 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1163 m_freem(txhdr->vth_mbuf); 1164 } 1165 1166 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); 1167 } 1168 1169 static void 1170 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 1171 { 1172 /* 1173 * The control virtqueue is only polled, therefore 1174 * it should already be empty. 1175 */ 1176 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 1177 ("Ctrl Vq not empty")); 1178 } 1179 1180 static struct mbuf * 1181 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1182 { 1183 struct mbuf *m_head, *m_tail, *m; 1184 int i, clsize; 1185 1186 clsize = sc->vtnet_rx_mbuf_size; 1187 1188 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/ 1189 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize); 1190 m_head = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR ); 1191 if (m_head == NULL) 1192 goto fail; 1193 1194 m_head->m_len = clsize; 1195 m_tail = m_head; 1196 1197 if (nbufs > 1) { 1198 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1199 ("chained Rx mbuf requested without LRO_NOMRG")); 1200 1201 for (i = 0; i < nbufs - 1; i++) { 1202 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize); 1203 m = m_getcl(MB_DONTWAIT, MT_DATA, 0); 1204 if (m == NULL) 1205 goto fail; 1206 1207 m->m_len = clsize; 1208 m_tail->m_next = m; 1209 m_tail = m; 1210 } 1211 } 1212 1213 if (m_tailp != NULL) 1214 *m_tailp = m_tail; 1215 1216 return (m_head); 1217 1218 fail: 1219 sc->vtnet_stats.mbuf_alloc_failed++; 1220 m_freem(m_head); 1221 1222 return (NULL); 1223 } 1224 1225 static int 1226 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) 1227 { 1228 struct mbuf *m, *m_prev; 1229 struct mbuf *m_new, *m_tail; 1230 int len, clsize, nreplace, error; 1231 1232 m = m0; 1233 m_prev = NULL; 1234 len = len0; 1235 1236 m_tail = NULL; 1237 clsize = sc->vtnet_rx_mbuf_size; 1238 nreplace = 0; 1239 1240 if (m->m_next != NULL) 1241 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1242 ("chained Rx mbuf without LRO_NOMRG")); 1243 1244 /* 1245 * Since LRO_NOMRG mbuf chains are so large, we want to avoid 1246 * allocating an entire chain for each received frame. When 1247 * the received frame's length is less than that of the chain, 1248 * the unused mbufs are reassigned to the new chain. 1249 */ 1250 while (len > 0) { 1251 /* 1252 * Something is seriously wrong if we received 1253 * a frame larger than the mbuf chain. Drop it. 1254 */ 1255 if (m == NULL) { 1256 sc->vtnet_stats.rx_frame_too_large++; 1257 return (EMSGSIZE); 1258 } 1259 1260 KASSERT(m->m_len == clsize, 1261 ("mbuf length not expected cluster size: %d", 1262 m->m_len)); 1263 1264 m->m_len = MIN(m->m_len, len); 1265 len -= m->m_len; 1266 1267 m_prev = m; 1268 m = m->m_next; 1269 nreplace++; 1270 } 1271 1272 KASSERT(m_prev != NULL, ("m_prev == NULL")); 1273 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, 1274 ("too many replacement mbufs: %d/%d", nreplace, 1275 sc->vtnet_rx_mbuf_count)); 1276 1277 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); 1278 if (m_new == NULL) { 1279 m_prev->m_len = clsize; 1280 return (ENOBUFS); 1281 } 1282 1283 /* 1284 * Move unused mbufs, if any, from the original chain 1285 * onto the end of the new chain. 1286 */ 1287 if (m_prev->m_next != NULL) { 1288 m_tail->m_next = m_prev->m_next; 1289 m_prev->m_next = NULL; 1290 } 1291 1292 error = vtnet_enqueue_rxbuf(sc, m_new); 1293 if (error) { 1294 /* 1295 * BAD! We could not enqueue the replacement mbuf chain. We 1296 * must restore the m0 chain to the original state if it was 1297 * modified so we can subsequently discard it. 1298 * 1299 * NOTE: The replacement is suppose to be an identical copy 1300 * to the one just dequeued so this is an unexpected error. 1301 */ 1302 sc->vtnet_stats.rx_enq_replacement_failed++; 1303 1304 if (m_tail->m_next != NULL) { 1305 m_prev->m_next = m_tail->m_next; 1306 m_tail->m_next = NULL; 1307 } 1308 1309 m_prev->m_len = clsize; 1310 m_freem(m_new); 1311 } 1312 1313 return (error); 1314 } 1315 1316 static int 1317 vtnet_newbuf(struct vtnet_softc *sc) 1318 { 1319 struct mbuf *m; 1320 int error; 1321 1322 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); 1323 if (m == NULL) 1324 return (ENOBUFS); 1325 1326 error = vtnet_enqueue_rxbuf(sc, m); 1327 if (error) 1328 m_freem(m); 1329 1330 return (error); 1331 } 1332 1333 static void 1334 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) 1335 { 1336 struct virtqueue *vq; 1337 struct mbuf *m; 1338 1339 vq = sc->vtnet_rx_vq; 1340 1341 while (--nbufs > 0) { 1342 if ((m = virtqueue_dequeue(vq, NULL)) == NULL) 1343 break; 1344 vtnet_discard_rxbuf(sc, m); 1345 } 1346 } 1347 1348 static void 1349 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1350 { 1351 int error; 1352 1353 /* 1354 * Requeue the discarded mbuf. This should always be 1355 * successful since it was just dequeued. 1356 */ 1357 error = vtnet_enqueue_rxbuf(sc, m); 1358 KASSERT(error == 0, ("cannot requeue discarded mbuf")); 1359 } 1360 1361 static int 1362 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1363 { 1364 struct sglist sg; 1365 struct sglist_seg segs[VTNET_MAX_RX_SEGS]; 1366 struct vtnet_rx_header *rxhdr; 1367 struct virtio_net_hdr *hdr; 1368 uint8_t *mdata; 1369 int offset, error; 1370 1371 ASSERT_SERIALIZED(&sc->vtnet_slz); 1372 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0) 1373 KASSERT(m->m_next == NULL, ("chained Rx mbuf")); 1374 1375 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs); 1376 1377 mdata = mtod(m, uint8_t *); 1378 offset = 0; 1379 1380 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1381 rxhdr = (struct vtnet_rx_header *) mdata; 1382 hdr = &rxhdr->vrh_hdr; 1383 offset += sizeof(struct vtnet_rx_header); 1384 1385 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); 1386 KASSERT(error == 0, ("cannot add header to sglist")); 1387 } 1388 1389 error = sglist_append(&sg, mdata + offset, m->m_len - offset); 1390 if (error) 1391 return (error); 1392 1393 if (m->m_next != NULL) { 1394 error = sglist_append_mbuf(&sg, m->m_next); 1395 if (error) 1396 return (error); 1397 } 1398 1399 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); 1400 } 1401 1402 static void 1403 vtnet_vlan_tag_remove(struct mbuf *m) 1404 { 1405 struct ether_vlan_header *evl; 1406 1407 evl = mtod(m, struct ether_vlan_header *); 1408 1409 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag); 1410 m->m_flags |= M_VLANTAG; 1411 1412 /* Strip the 802.1Q header. */ 1413 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, 1414 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1415 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1416 } 1417 1418 /* 1419 * Alternative method of doing receive checksum offloading. Rather 1420 * than parsing the received frame down to the IP header, use the 1421 * csum_offset to determine which CSUM_* flags are appropriate. We 1422 * can get by with doing this only because the checksum offsets are 1423 * unique for the things we care about. 1424 */ 1425 static int 1426 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1427 struct virtio_net_hdr *hdr) 1428 { 1429 struct ether_header *eh; 1430 struct ether_vlan_header *evh; 1431 struct udphdr *udp; 1432 int csum_len; 1433 uint16_t eth_type; 1434 1435 csum_len = hdr->csum_start + hdr->csum_offset; 1436 1437 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) 1438 return (1); 1439 if (m->m_len < csum_len) 1440 return (1); 1441 1442 eh = mtod(m, struct ether_header *); 1443 eth_type = ntohs(eh->ether_type); 1444 if (eth_type == ETHERTYPE_VLAN) { 1445 evh = mtod(m, struct ether_vlan_header *); 1446 eth_type = ntohs(evh->evl_proto); 1447 } 1448 1449 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { 1450 sc->vtnet_stats.rx_csum_bad_ethtype++; 1451 return (1); 1452 } 1453 1454 /* Use the offset to determine the appropriate CSUM_* flags. */ 1455 switch (hdr->csum_offset) { 1456 case offsetof(struct udphdr, uh_sum): 1457 if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) 1458 return (1); 1459 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); 1460 if (udp->uh_sum == 0) 1461 return (0); 1462 1463 /* FALLTHROUGH */ 1464 1465 case offsetof(struct tcphdr, th_sum): 1466 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1467 m->m_pkthdr.csum_data = 0xFFFF; 1468 break; 1469 1470 case offsetof(struct sctphdr, checksum): 1471 //m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1472 break; 1473 1474 default: 1475 sc->vtnet_stats.rx_csum_bad_offset++; 1476 return (1); 1477 } 1478 1479 sc->vtnet_stats.rx_csum_offloaded++; 1480 1481 return (0); 1482 } 1483 1484 static int 1485 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) 1486 { 1487 struct ifnet *ifp; 1488 struct virtqueue *vq; 1489 struct mbuf *m, *m_tail; 1490 int len; 1491 1492 ifp = sc->vtnet_ifp; 1493 vq = sc->vtnet_rx_vq; 1494 m_tail = m_head; 1495 1496 while (--nbufs > 0) { 1497 m = virtqueue_dequeue(vq, &len); 1498 if (m == NULL) { 1499 ifp->if_ierrors++; 1500 goto fail; 1501 } 1502 1503 if (vtnet_newbuf(sc) != 0) { 1504 ifp->if_iqdrops++; 1505 vtnet_discard_rxbuf(sc, m); 1506 if (nbufs > 1) 1507 vtnet_discard_merged_rxbuf(sc, nbufs); 1508 goto fail; 1509 } 1510 1511 if (m->m_len < len) 1512 len = m->m_len; 1513 1514 m->m_len = len; 1515 m->m_flags &= ~M_PKTHDR; 1516 1517 m_head->m_pkthdr.len += len; 1518 m_tail->m_next = m; 1519 m_tail = m; 1520 } 1521 1522 return (0); 1523 1524 fail: 1525 sc->vtnet_stats.rx_mergeable_failed++; 1526 m_freem(m_head); 1527 1528 return (1); 1529 } 1530 1531 static int 1532 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) 1533 { 1534 struct virtio_net_hdr lhdr; 1535 struct ifnet *ifp; 1536 struct virtqueue *vq; 1537 struct mbuf *m; 1538 struct ether_header *eh; 1539 struct virtio_net_hdr *hdr; 1540 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1541 int len, deq, nbufs, adjsz, rx_npkts; 1542 1543 ifp = sc->vtnet_ifp; 1544 vq = sc->vtnet_rx_vq; 1545 hdr = &lhdr; 1546 deq = 0; 1547 rx_npkts = 0; 1548 1549 ASSERT_SERIALIZED(&sc->vtnet_slz); 1550 1551 while (--count >= 0) { 1552 m = virtqueue_dequeue(vq, &len); 1553 if (m == NULL) 1554 break; 1555 deq++; 1556 1557 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1558 ifp->if_ierrors++; 1559 vtnet_discard_rxbuf(sc, m); 1560 continue; 1561 } 1562 1563 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1564 nbufs = 1; 1565 adjsz = sizeof(struct vtnet_rx_header); 1566 /* 1567 * Account for our pad between the header and 1568 * the actual start of the frame. 1569 */ 1570 len += VTNET_RX_HEADER_PAD; 1571 } else { 1572 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1573 nbufs = mhdr->num_buffers; 1574 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1575 } 1576 1577 if (vtnet_replace_rxbuf(sc, m, len) != 0) { 1578 ifp->if_iqdrops++; 1579 vtnet_discard_rxbuf(sc, m); 1580 if (nbufs > 1) 1581 vtnet_discard_merged_rxbuf(sc, nbufs); 1582 continue; 1583 } 1584 1585 m->m_pkthdr.len = len; 1586 m->m_pkthdr.rcvif = ifp; 1587 m->m_pkthdr.csum_flags = 0; 1588 1589 if (nbufs > 1) { 1590 if (vtnet_rxeof_merged(sc, m, nbufs) != 0) 1591 continue; 1592 } 1593 1594 ifp->if_ipackets++; 1595 1596 /* 1597 * Save copy of header before we strip it. For both mergeable 1598 * and non-mergeable, the VirtIO header is placed first in the 1599 * mbuf's data. We no longer need num_buffers, so always use a 1600 * virtio_net_hdr. 1601 */ 1602 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1603 m_adj(m, adjsz); 1604 1605 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1606 eh = mtod(m, struct ether_header *); 1607 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1608 vtnet_vlan_tag_remove(m); 1609 1610 /* 1611 * With the 802.1Q header removed, update the 1612 * checksum starting location accordingly. 1613 */ 1614 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1615 hdr->csum_start -= 1616 ETHER_VLAN_ENCAP_LEN; 1617 } 1618 } 1619 1620 if (ifp->if_capenable & IFCAP_RXCSUM && 1621 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1622 if (vtnet_rx_csum(sc, m, hdr) != 0) 1623 sc->vtnet_stats.rx_csum_failed++; 1624 } 1625 1626 lwkt_serialize_exit(&sc->vtnet_slz); 1627 rx_npkts++; 1628 ifp->if_input(ifp, m, NULL, -1); 1629 lwkt_serialize_enter(&sc->vtnet_slz); 1630 1631 /* 1632 * The interface may have been stopped while we were 1633 * passing the packet up the network stack. 1634 */ 1635 if ((ifp->if_flags & IFF_RUNNING) == 0) 1636 break; 1637 } 1638 1639 virtqueue_notify(vq, &sc->vtnet_slz); 1640 1641 if (rx_npktsp != NULL) 1642 *rx_npktsp = rx_npkts; 1643 1644 return (count > 0 ? 0 : EAGAIN); 1645 } 1646 1647 static void 1648 vtnet_rx_intr_task(void *arg) 1649 { 1650 struct vtnet_softc *sc; 1651 struct ifnet *ifp; 1652 int more; 1653 1654 sc = arg; 1655 ifp = sc->vtnet_ifp; 1656 1657 next: 1658 // lwkt_serialize_enter(&sc->vtnet_slz); 1659 1660 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1661 vtnet_enable_rx_intr(sc); 1662 // lwkt_serialize_exit(&sc->vtnet_slz); 1663 return; 1664 } 1665 1666 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); 1667 if (!more && vtnet_enable_rx_intr(sc) != 0) { 1668 vtnet_disable_rx_intr(sc); 1669 more = 1; 1670 } 1671 1672 // lwkt_serialize_exit(&sc->vtnet_slz); 1673 1674 if (more) { 1675 sc->vtnet_stats.rx_task_rescheduled++; 1676 goto next; 1677 } 1678 } 1679 1680 static int 1681 vtnet_rx_vq_intr(void *xsc) 1682 { 1683 struct vtnet_softc *sc; 1684 1685 sc = xsc; 1686 1687 vtnet_disable_rx_intr(sc); 1688 vtnet_rx_intr_task(sc); 1689 1690 return (1); 1691 } 1692 1693 static void 1694 vtnet_txeof(struct vtnet_softc *sc) 1695 { 1696 struct virtqueue *vq; 1697 struct ifnet *ifp; 1698 struct vtnet_tx_header *txhdr; 1699 int deq; 1700 1701 vq = sc->vtnet_tx_vq; 1702 ifp = sc->vtnet_ifp; 1703 deq = 0; 1704 1705 ASSERT_SERIALIZED(&sc->vtnet_slz); 1706 1707 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 1708 deq++; 1709 ifp->if_opackets++; 1710 m_freem(txhdr->vth_mbuf); 1711 } 1712 1713 if (deq > 0) { 1714 ifq_clr_oactive(&ifp->if_snd); 1715 if (virtqueue_empty(vq)) 1716 sc->vtnet_watchdog_timer = 0; 1717 } 1718 } 1719 1720 static struct mbuf * 1721 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, 1722 struct virtio_net_hdr *hdr) 1723 { 1724 struct ifnet *ifp; 1725 struct ether_header *eh; 1726 struct ether_vlan_header *evh; 1727 struct ip *ip; 1728 struct ip6_hdr *ip6; 1729 struct tcphdr *tcp; 1730 int ip_offset; 1731 uint16_t eth_type, csum_start; 1732 uint8_t ip_proto, gso_type; 1733 1734 ifp = sc->vtnet_ifp; 1735 M_ASSERTPKTHDR(m); 1736 1737 ip_offset = sizeof(struct ether_header); 1738 if (m->m_len < ip_offset) { 1739 if ((m = m_pullup(m, ip_offset)) == NULL) 1740 return (NULL); 1741 } 1742 1743 eh = mtod(m, struct ether_header *); 1744 eth_type = ntohs(eh->ether_type); 1745 if (eth_type == ETHERTYPE_VLAN) { 1746 ip_offset = sizeof(struct ether_vlan_header); 1747 if (m->m_len < ip_offset) { 1748 if ((m = m_pullup(m, ip_offset)) == NULL) 1749 return (NULL); 1750 } 1751 evh = mtod(m, struct ether_vlan_header *); 1752 eth_type = ntohs(evh->evl_proto); 1753 } 1754 1755 switch (eth_type) { 1756 case ETHERTYPE_IP: 1757 if (m->m_len < ip_offset + sizeof(struct ip)) { 1758 m = m_pullup(m, ip_offset + sizeof(struct ip)); 1759 if (m == NULL) 1760 return (NULL); 1761 } 1762 1763 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1764 ip_proto = ip->ip_p; 1765 csum_start = ip_offset + (ip->ip_hl << 2); 1766 gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1767 break; 1768 1769 case ETHERTYPE_IPV6: 1770 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { 1771 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); 1772 if (m == NULL) 1773 return (NULL); 1774 } 1775 1776 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1777 /* 1778 * XXX Assume no extension headers are present. Presently, 1779 * this will always be true in the case of TSO, and FreeBSD 1780 * does not perform checksum offloading of IPv6 yet. 1781 */ 1782 ip_proto = ip6->ip6_nxt; 1783 csum_start = ip_offset + sizeof(struct ip6_hdr); 1784 gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1785 break; 1786 1787 default: 1788 return (m); 1789 } 1790 1791 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { 1792 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1793 hdr->csum_start = csum_start; 1794 hdr->csum_offset = m->m_pkthdr.csum_data; 1795 1796 sc->vtnet_stats.tx_csum_offloaded++; 1797 } 1798 1799 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1800 if (ip_proto != IPPROTO_TCP) 1801 return (m); 1802 1803 if (m->m_len < csum_start + sizeof(struct tcphdr)) { 1804 m = m_pullup(m, csum_start + sizeof(struct tcphdr)); 1805 if (m == NULL) 1806 return (NULL); 1807 } 1808 1809 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); 1810 hdr->gso_type = gso_type; 1811 hdr->hdr_len = csum_start + (tcp->th_off << 2); 1812 hdr->gso_size = m->m_pkthdr.tso_segsz; 1813 1814 if (tcp->th_flags & TH_CWR) { 1815 /* 1816 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. 1817 * ECN support is only configurable globally with the 1818 * net.inet.tcp.ecn.enable sysctl knob. 1819 */ 1820 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 1821 if_printf(ifp, "TSO with ECN not supported " 1822 "by host\n"); 1823 m_freem(m); 1824 return (NULL); 1825 } 1826 1827 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1828 } 1829 1830 sc->vtnet_stats.tx_tso_offloaded++; 1831 } 1832 1833 return (m); 1834 } 1835 1836 static int 1837 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, 1838 struct vtnet_tx_header *txhdr) 1839 { 1840 struct sglist sg; 1841 struct sglist_seg segs[VTNET_MAX_TX_SEGS]; 1842 struct virtqueue *vq; 1843 struct mbuf *m; 1844 int collapsed, error; 1845 1846 vq = sc->vtnet_tx_vq; 1847 m = *m_head; 1848 collapsed = 0; 1849 1850 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs); 1851 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 1852 KASSERT(error == 0 && sg.sg_nseg == 1, 1853 ("cannot add header to sglist")); 1854 1855 again: 1856 error = sglist_append_mbuf(&sg, m); 1857 if (error) { 1858 if (collapsed) 1859 goto fail; 1860 1861 //m = m_collapse(m, MB_DONTWAIT, VTNET_MAX_TX_SEGS - 1); 1862 m = m_defrag(m, MB_DONTWAIT); 1863 if (m == NULL) 1864 goto fail; 1865 1866 *m_head = m; 1867 collapsed = 1; 1868 goto again; 1869 } 1870 1871 txhdr->vth_mbuf = m; 1872 1873 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0)); 1874 1875 fail: 1876 m_freem(*m_head); 1877 *m_head = NULL; 1878 1879 return (ENOBUFS); 1880 } 1881 1882 static struct mbuf * 1883 vtnet_vlan_tag_insert(struct mbuf *m) 1884 { 1885 struct mbuf *n; 1886 struct ether_vlan_header *evl; 1887 1888 if (M_WRITABLE(m) == 0) { 1889 n = m_dup(m, MB_DONTWAIT); 1890 m_freem(m); 1891 if ((m = n) == NULL) 1892 return (NULL); 1893 } 1894 1895 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, MB_DONTWAIT); 1896 if (m == NULL) 1897 return (NULL); 1898 if (m->m_len < sizeof(struct ether_vlan_header)) { 1899 m = m_pullup(m, sizeof(struct ether_vlan_header)); 1900 if (m == NULL) 1901 return (NULL); 1902 } 1903 1904 /* Insert 802.1Q header into the existing Ethernet header. */ 1905 evl = mtod(m, struct ether_vlan_header *); 1906 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN, 1907 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); 1908 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1909 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag); 1910 m->m_flags &= ~M_VLANTAG; 1911 1912 return (m); 1913 } 1914 1915 static int 1916 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) 1917 { 1918 struct vtnet_tx_header *txhdr; 1919 struct virtio_net_hdr *hdr; 1920 struct mbuf *m; 1921 int error; 1922 1923 txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx]; 1924 memset(txhdr, 0, sizeof(struct vtnet_tx_header)); 1925 1926 /* 1927 * Always use the non-mergeable header to simplify things. When 1928 * the mergeable feature is negotiated, the num_buffers field 1929 * must be set to zero. We use vtnet_hdr_size later to enqueue 1930 * the correct header size to the host. 1931 */ 1932 hdr = &txhdr->vth_uhdr.hdr; 1933 m = *m_head; 1934 1935 error = ENOBUFS; 1936 1937 if (m->m_flags & M_VLANTAG) { 1938 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1939 m = vtnet_vlan_tag_insert(m); 1940 if ((*m_head = m) == NULL) 1941 goto fail; 1942 m->m_flags &= ~M_VLANTAG; 1943 } 1944 1945 if (m->m_pkthdr.csum_flags != 0) { 1946 m = vtnet_tx_offload(sc, m, hdr); 1947 if ((*m_head = m) == NULL) 1948 goto fail; 1949 } 1950 1951 error = vtnet_enqueue_txbuf(sc, m_head, txhdr); 1952 if (error == 0) 1953 sc->vtnet_txhdridx = 1954 (sc->vtnet_txhdridx + 1) % ((sc->vtnet_tx_size / 2) + 1); 1955 fail: 1956 return (error); 1957 } 1958 1959 static void 1960 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1961 { 1962 struct vtnet_softc *sc; 1963 1964 sc = ifp->if_softc; 1965 1966 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1967 lwkt_serialize_enter(&sc->vtnet_slz); 1968 vtnet_start_locked(ifp, ifsq); 1969 lwkt_serialize_exit(&sc->vtnet_slz); 1970 } 1971 1972 static void 1973 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1974 { 1975 struct vtnet_softc *sc; 1976 struct virtqueue *vq; 1977 struct mbuf *m0; 1978 int enq; 1979 1980 sc = ifp->if_softc; 1981 vq = sc->vtnet_tx_vq; 1982 enq = 0; 1983 1984 ASSERT_SERIALIZED(&sc->vtnet_slz); 1985 1986 if ((ifp->if_flags & (IFF_RUNNING)) != 1987 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) 1988 return; 1989 1990 #ifdef VTNET_TX_INTR_MODERATION 1991 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) 1992 vtnet_txeof(sc); 1993 #endif 1994 1995 while (!ifsq_is_empty(ifsq)) { 1996 if (virtqueue_full(vq)) { 1997 ifq_set_oactive(&ifp->if_snd); 1998 break; 1999 } 2000 2001 m0 = ifq_dequeue(&ifp->if_snd); 2002 if (m0 == NULL) 2003 break; 2004 2005 if (vtnet_encap(sc, &m0) != 0) { 2006 if (m0 == NULL) 2007 break; 2008 ifq_prepend(&ifp->if_snd, m0); 2009 ifq_set_oactive(&ifp->if_snd); 2010 break; 2011 } 2012 2013 enq++; 2014 ETHER_BPF_MTAP(ifp, m0); 2015 } 2016 2017 if (enq > 0) { 2018 virtqueue_notify(vq, &sc->vtnet_slz); 2019 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; 2020 } 2021 } 2022 2023 static void 2024 vtnet_tick(void *xsc) 2025 { 2026 struct vtnet_softc *sc; 2027 2028 sc = xsc; 2029 2030 #if 0 2031 ASSERT_SERIALIZED(&sc->vtnet_slz); 2032 #ifdef VTNET_DEBUG 2033 virtqueue_dump(sc->vtnet_rx_vq); 2034 virtqueue_dump(sc->vtnet_tx_vq); 2035 #endif 2036 2037 vtnet_watchdog(sc); 2038 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2039 #endif 2040 } 2041 2042 static void 2043 vtnet_tx_intr_task(void *arg) 2044 { 2045 struct vtnet_softc *sc; 2046 struct ifnet *ifp; 2047 struct ifaltq_subque *ifsq; 2048 2049 sc = arg; 2050 ifp = sc->vtnet_ifp; 2051 ifsq = ifq_get_subq_default(&ifp->if_snd); 2052 2053 next: 2054 // lwkt_serialize_enter(&sc->vtnet_slz); 2055 2056 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2057 vtnet_enable_tx_intr(sc); 2058 // lwkt_serialize_exit(&sc->vtnet_slz); 2059 return; 2060 } 2061 2062 vtnet_txeof(sc); 2063 2064 if (!ifsq_is_empty(ifsq)) 2065 vtnet_start_locked(ifp, ifsq); 2066 2067 if (vtnet_enable_tx_intr(sc) != 0) { 2068 vtnet_disable_tx_intr(sc); 2069 sc->vtnet_stats.tx_task_rescheduled++; 2070 // lwkt_serialize_exit(&sc->vtnet_slz); 2071 goto next; 2072 } 2073 2074 // lwkt_serialize_exit(&sc->vtnet_slz); 2075 } 2076 2077 static int 2078 vtnet_tx_vq_intr(void *xsc) 2079 { 2080 struct vtnet_softc *sc; 2081 2082 sc = xsc; 2083 2084 vtnet_disable_tx_intr(sc); 2085 vtnet_tx_intr_task(sc); 2086 2087 return (1); 2088 } 2089 2090 static void 2091 vtnet_stop(struct vtnet_softc *sc) 2092 { 2093 device_t dev; 2094 struct ifnet *ifp; 2095 2096 dev = sc->vtnet_dev; 2097 ifp = sc->vtnet_ifp; 2098 2099 ASSERT_SERIALIZED(&sc->vtnet_slz); 2100 2101 sc->vtnet_watchdog_timer = 0; 2102 callout_stop(&sc->vtnet_tick_ch); 2103 ifq_clr_oactive(&ifp->if_snd); 2104 ifp->if_flags &= ~(IFF_RUNNING); 2105 2106 vtnet_disable_rx_intr(sc); 2107 vtnet_disable_tx_intr(sc); 2108 2109 /* 2110 * Stop the host VirtIO adapter. Note this will reset the host 2111 * adapter's state back to the pre-initialized state, so in 2112 * order to make the device usable again, we must drive it 2113 * through virtio_reinit() and virtio_reinit_complete(). 2114 */ 2115 virtio_stop(dev); 2116 2117 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 2118 2119 vtnet_free_rx_mbufs(sc); 2120 vtnet_free_tx_mbufs(sc); 2121 } 2122 2123 static int 2124 vtnet_reinit(struct vtnet_softc *sc) 2125 { 2126 struct ifnet *ifp; 2127 uint64_t features; 2128 2129 ifp = sc->vtnet_ifp; 2130 features = sc->vtnet_features; 2131 2132 /* 2133 * Re-negotiate with the host, removing any disabled receive 2134 * features. Transmit features are disabled only on our side 2135 * via if_capenable and if_hwassist. 2136 */ 2137 2138 if (ifp->if_capabilities & IFCAP_RXCSUM) { 2139 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 2140 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2141 } 2142 2143 if (ifp->if_capabilities & IFCAP_LRO) { 2144 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2145 features &= ~VTNET_LRO_FEATURES; 2146 } 2147 2148 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2149 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2150 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2151 } 2152 2153 return (virtio_reinit(sc->vtnet_dev, features)); 2154 } 2155 2156 static void 2157 vtnet_init_locked(struct vtnet_softc *sc) 2158 { 2159 device_t dev; 2160 struct ifnet *ifp; 2161 int error; 2162 2163 dev = sc->vtnet_dev; 2164 ifp = sc->vtnet_ifp; 2165 2166 ASSERT_SERIALIZED(&sc->vtnet_slz); 2167 2168 if (ifp->if_flags & IFF_RUNNING) 2169 return; 2170 2171 /* Stop host's adapter, cancel any pending I/O. */ 2172 vtnet_stop(sc); 2173 2174 /* Reinitialize the host device. */ 2175 error = vtnet_reinit(sc); 2176 if (error) { 2177 device_printf(dev, 2178 "reinitialization failed, stopping device...\n"); 2179 vtnet_stop(sc); 2180 return; 2181 } 2182 2183 /* Update host with assigned MAC address. */ 2184 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2185 vtnet_set_hwaddr(sc); 2186 2187 ifp->if_hwassist = 0; 2188 if (ifp->if_capenable & IFCAP_TXCSUM) 2189 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 2190 if (ifp->if_capenable & IFCAP_TSO4) 2191 ifp->if_hwassist |= CSUM_TSO; 2192 2193 error = vtnet_init_rx_vq(sc); 2194 if (error) { 2195 device_printf(dev, 2196 "cannot allocate mbufs for Rx virtqueue\n"); 2197 vtnet_stop(sc); 2198 return; 2199 } 2200 2201 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 2202 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2203 /* Restore promiscuous and all-multicast modes. */ 2204 vtnet_rx_filter(sc); 2205 2206 /* Restore filtered MAC addresses. */ 2207 vtnet_rx_filter_mac(sc); 2208 } 2209 2210 /* Restore VLAN filters. */ 2211 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2212 vtnet_rx_filter_vlan(sc); 2213 } 2214 2215 { 2216 vtnet_enable_rx_intr(sc); 2217 vtnet_enable_tx_intr(sc); 2218 } 2219 2220 ifp->if_flags |= IFF_RUNNING; 2221 ifq_clr_oactive(&ifp->if_snd); 2222 2223 virtio_reinit_complete(dev); 2224 2225 vtnet_update_link_status(sc); 2226 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2227 } 2228 2229 static void 2230 vtnet_init(void *xsc) 2231 { 2232 struct vtnet_softc *sc; 2233 2234 sc = xsc; 2235 2236 lwkt_serialize_enter(&sc->vtnet_slz); 2237 vtnet_init_locked(sc); 2238 lwkt_serialize_exit(&sc->vtnet_slz); 2239 } 2240 2241 static void 2242 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 2243 struct sglist *sg, int readable, int writable) 2244 { 2245 struct virtqueue *vq; 2246 void *c; 2247 2248 vq = sc->vtnet_ctrl_vq; 2249 2250 ASSERT_SERIALIZED(&sc->vtnet_slz); 2251 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 2252 ("no control virtqueue")); 2253 KASSERT(virtqueue_empty(vq), 2254 ("control command already enqueued")); 2255 2256 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 2257 return; 2258 2259 virtqueue_notify(vq, &sc->vtnet_slz); 2260 2261 /* 2262 * Poll until the command is complete. Previously, we would 2263 * sleep until the control virtqueue interrupt handler woke 2264 * us up, but dropping the VTNET_MTX leads to serialization 2265 * difficulties. 2266 * 2267 * Furthermore, it appears QEMU/KVM only allocates three MSIX 2268 * vectors. Two of those vectors are needed for the Rx and Tx 2269 * virtqueues. We do not support sharing both a Vq and config 2270 * changed notification on the same MSIX vector. 2271 */ 2272 c = virtqueue_poll(vq, NULL); 2273 KASSERT(c == cookie, ("unexpected control command response")); 2274 } 2275 2276 static void 2277 vtnet_rx_filter(struct vtnet_softc *sc) 2278 { 2279 device_t dev; 2280 struct ifnet *ifp; 2281 2282 dev = sc->vtnet_dev; 2283 ifp = sc->vtnet_ifp; 2284 2285 ASSERT_SERIALIZED(&sc->vtnet_slz); 2286 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2287 ("CTRL_RX feature not negotiated")); 2288 2289 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 2290 device_printf(dev, "cannot %s promiscuous mode\n", 2291 ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); 2292 2293 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 2294 device_printf(dev, "cannot %s all-multicast mode\n", 2295 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); 2296 } 2297 2298 static int 2299 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 2300 { 2301 struct virtio_net_ctrl_hdr hdr __aligned(2); 2302 struct sglist_seg segs[3]; 2303 struct sglist sg; 2304 uint8_t onoff, ack; 2305 int error; 2306 2307 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) 2308 return (ENOTSUP); 2309 2310 error = 0; 2311 2312 hdr.class = VIRTIO_NET_CTRL_RX; 2313 hdr.cmd = cmd; 2314 onoff = !!on; 2315 ack = VIRTIO_NET_ERR; 2316 2317 sglist_init(&sg, 3, segs); 2318 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2319 error |= sglist_append(&sg, &onoff, sizeof(uint8_t)); 2320 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2321 KASSERT(error == 0 && sg.sg_nseg == 3, 2322 ("error adding Rx filter message to sglist")); 2323 2324 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2325 2326 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2327 } 2328 2329 static int 2330 vtnet_set_promisc(struct vtnet_softc *sc, int on) 2331 { 2332 2333 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 2334 } 2335 2336 static int 2337 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 2338 { 2339 2340 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 2341 } 2342 2343 static void 2344 vtnet_rx_filter_mac(struct vtnet_softc *sc) 2345 { 2346 struct virtio_net_ctrl_hdr hdr __aligned(2); 2347 struct vtnet_mac_filter *filter; 2348 struct sglist_seg segs[4]; 2349 struct sglist sg; 2350 struct ifnet *ifp; 2351 struct ifaddr *ifa; 2352 struct ifaddr_container *ifac; 2353 struct ifmultiaddr *ifma; 2354 int ucnt, mcnt, promisc, allmulti, error; 2355 uint8_t ack; 2356 2357 ifp = sc->vtnet_ifp; 2358 ucnt = 0; 2359 mcnt = 0; 2360 promisc = 0; 2361 allmulti = 0; 2362 error = 0; 2363 2364 ASSERT_SERIALIZED(&sc->vtnet_slz); 2365 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2366 ("CTRL_RX feature not negotiated")); 2367 2368 /* Use the MAC filtering table allocated in vtnet_attach. */ 2369 filter = sc->vtnet_macfilter; 2370 memset(filter, 0, sizeof(struct vtnet_mac_filter)); 2371 2372 /* Unicast MAC addresses: */ 2373 //if_addr_rlock(ifp); 2374 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2375 ifa = ifac->ifa; 2376 if (ifa->ifa_addr->sa_family != AF_LINK) 2377 continue; 2378 else if (ucnt == VTNET_MAX_MAC_ENTRIES) 2379 break; 2380 2381 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2382 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 2383 ucnt++; 2384 } 2385 //if_addr_runlock(ifp); 2386 2387 if (ucnt >= VTNET_MAX_MAC_ENTRIES) { 2388 promisc = 1; 2389 filter->vmf_unicast.nentries = 0; 2390 2391 if_printf(ifp, "more than %d MAC addresses assigned, " 2392 "falling back to promiscuous mode\n", 2393 VTNET_MAX_MAC_ENTRIES); 2394 } else 2395 filter->vmf_unicast.nentries = ucnt; 2396 2397 /* Multicast MAC addresses: */ 2398 //if_maddr_rlock(ifp); 2399 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2400 if (ifma->ifma_addr->sa_family != AF_LINK) 2401 continue; 2402 else if (mcnt == VTNET_MAX_MAC_ENTRIES) 2403 break; 2404 2405 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2406 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 2407 mcnt++; 2408 } 2409 //if_maddr_runlock(ifp); 2410 2411 if (mcnt >= VTNET_MAX_MAC_ENTRIES) { 2412 allmulti = 1; 2413 filter->vmf_multicast.nentries = 0; 2414 2415 if_printf(ifp, "more than %d multicast MAC addresses " 2416 "assigned, falling back to all-multicast mode\n", 2417 VTNET_MAX_MAC_ENTRIES); 2418 } else 2419 filter->vmf_multicast.nentries = mcnt; 2420 2421 if (promisc && allmulti) 2422 goto out; 2423 2424 hdr.class = VIRTIO_NET_CTRL_MAC; 2425 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 2426 ack = VIRTIO_NET_ERR; 2427 2428 sglist_init(&sg, 4, segs); 2429 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2430 error |= sglist_append(&sg, &filter->vmf_unicast, 2431 sizeof(struct vtnet_mac_table)); 2432 error |= sglist_append(&sg, &filter->vmf_multicast, 2433 sizeof(struct vtnet_mac_table)); 2434 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2435 KASSERT(error == 0 && sg.sg_nseg == 4, 2436 ("error adding MAC filtering message to sglist")); 2437 2438 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2439 2440 if (ack != VIRTIO_NET_OK) 2441 if_printf(ifp, "error setting host MAC filter table\n"); 2442 2443 out: 2444 if (promisc) 2445 if (vtnet_set_promisc(sc, 1) != 0) 2446 if_printf(ifp, "cannot enable promiscuous mode\n"); 2447 if (allmulti) 2448 if (vtnet_set_allmulti(sc, 1) != 0) 2449 if_printf(ifp, "cannot enable all-multicast mode\n"); 2450 } 2451 2452 static int 2453 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2454 { 2455 struct virtio_net_ctrl_hdr hdr __aligned(2); 2456 struct sglist_seg segs[3]; 2457 struct sglist sg; 2458 uint8_t ack; 2459 int error; 2460 2461 hdr.class = VIRTIO_NET_CTRL_VLAN; 2462 hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 2463 ack = VIRTIO_NET_ERR; 2464 error = 0; 2465 2466 sglist_init(&sg, 3, segs); 2467 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2468 error |= sglist_append(&sg, &tag, sizeof(uint16_t)); 2469 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2470 KASSERT(error == 0 && sg.sg_nseg == 3, 2471 ("error adding VLAN control message to sglist")); 2472 2473 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2474 2475 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2476 } 2477 2478 static void 2479 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 2480 { 2481 device_t dev; 2482 uint32_t w, mask; 2483 uint16_t tag; 2484 int i, nvlans, error; 2485 2486 ASSERT_SERIALIZED(&sc->vtnet_slz); 2487 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2488 ("VLAN_FILTER feature not negotiated")); 2489 2490 dev = sc->vtnet_dev; 2491 nvlans = sc->vtnet_nvlans; 2492 error = 0; 2493 2494 /* Enable filtering for each configured VLAN. */ 2495 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { 2496 w = sc->vtnet_vlan_shadow[i]; 2497 for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) { 2498 if ((w & mask) != 0) { 2499 w &= ~mask; 2500 nvlans--; 2501 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) 2502 error++; 2503 } 2504 } 2505 } 2506 2507 KASSERT(nvlans == 0, ("VLAN count incorrect")); 2508 if (error) 2509 device_printf(dev, "cannot restore VLAN filter table\n"); 2510 } 2511 2512 static void 2513 vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2514 { 2515 struct ifnet *ifp; 2516 int idx, bit; 2517 2518 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2519 ("VLAN_FILTER feature not negotiated")); 2520 2521 if ((tag == 0) || (tag > 4095)) 2522 return; 2523 2524 ifp = sc->vtnet_ifp; 2525 idx = (tag >> 5) & 0x7F; 2526 bit = tag & 0x1F; 2527 2528 lwkt_serialize_enter(&sc->vtnet_slz); 2529 2530 /* Update shadow VLAN table. */ 2531 if (add) { 2532 sc->vtnet_nvlans++; 2533 sc->vtnet_vlan_shadow[idx] |= (1 << bit); 2534 } else { 2535 sc->vtnet_nvlans--; 2536 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); 2537 } 2538 2539 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2540 if (vtnet_exec_vlan_filter(sc, add, tag) != 0) { 2541 device_printf(sc->vtnet_dev, 2542 "cannot %s VLAN %d %s the host filter table\n", 2543 add ? "add" : "remove", tag, 2544 add ? "to" : "from"); 2545 } 2546 } 2547 2548 lwkt_serialize_exit(&sc->vtnet_slz); 2549 } 2550 2551 static void 2552 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2553 { 2554 2555 if (ifp->if_softc != arg) 2556 return; 2557 2558 vtnet_set_vlan_filter(arg, 1, tag); 2559 } 2560 2561 static void 2562 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2563 { 2564 2565 if (ifp->if_softc != arg) 2566 return; 2567 2568 vtnet_set_vlan_filter(arg, 0, tag); 2569 } 2570 2571 static int 2572 vtnet_ifmedia_upd(struct ifnet *ifp) 2573 { 2574 struct vtnet_softc *sc; 2575 struct ifmedia *ifm; 2576 2577 sc = ifp->if_softc; 2578 ifm = &sc->vtnet_media; 2579 2580 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2581 return (EINVAL); 2582 2583 return (0); 2584 } 2585 2586 static void 2587 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2588 { 2589 struct vtnet_softc *sc; 2590 2591 sc = ifp->if_softc; 2592 2593 ifmr->ifm_status = IFM_AVALID; 2594 ifmr->ifm_active = IFM_ETHER; 2595 2596 lwkt_serialize_enter(&sc->vtnet_slz); 2597 if (vtnet_is_link_up(sc) != 0) { 2598 ifmr->ifm_status |= IFM_ACTIVE; 2599 ifmr->ifm_active |= VTNET_MEDIATYPE; 2600 } else 2601 ifmr->ifm_active |= IFM_NONE; 2602 lwkt_serialize_exit(&sc->vtnet_slz); 2603 } 2604 2605 static void 2606 vtnet_add_statistics(struct vtnet_softc *sc) 2607 { 2608 device_t dev; 2609 struct vtnet_statistics *stats; 2610 //struct sysctl_ctx_list *ctx; 2611 //struct sysctl_oid *tree; 2612 //struct sysctl_oid_list *child; 2613 int error = 0; 2614 2615 dev = sc->vtnet_dev; 2616 stats = &sc->vtnet_stats; 2617 sysctl_ctx_init(&sc->vtnet_sysctl_ctx); 2618 sc->vtnet_sysctl_tree = SYSCTL_ADD_NODE(&sc->vtnet_sysctl_ctx, 2619 SYSCTL_STATIC_CHILDREN(_hw), 2620 OID_AUTO, 2621 device_get_nameunit(dev), 2622 CTLFLAG_RD, 0, ""); 2623 2624 if (sc->vtnet_sysctl_tree == NULL) { 2625 device_printf(dev, "can't add sysctl node\n"); 2626 error = ENXIO; 2627 } 2628 2629 2630 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, 2631 SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, 2632 "mbuf_alloc_failed", CTLFLAG_RD, &stats->mbuf_alloc_failed, 2633 "Mbuf cluster allocation failures"); 2634 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, 2635 SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, 2636 "rx_frame_too_large", CTLFLAG_RD, &stats->rx_frame_too_large, 2637 "Received frame larger than the mbuf chain"); 2638 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx,SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_enq_replacement_failed", 2639 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 2640 "Enqueuing the replacement receive mbuf failed"); 2641 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_mergeable_failed", 2642 CTLFLAG_RD, &stats->rx_mergeable_failed, 2643 "Mergeable buffers receive failures"); 2644 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_csum_bad_ethtype", 2645 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 2646 "Received checksum offloaded buffer with unsupported " 2647 "Ethernet type"); 2648 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_csum_bad_start", 2649 CTLFLAG_RD, &stats->rx_csum_bad_start, 2650 "Received checksum offloaded buffer with incorrect start offset"); 2651 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_csum_bad_ipproto", 2652 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 2653 "Received checksum offloaded buffer with incorrect IP protocol"); 2654 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_csum_bad_offset", 2655 CTLFLAG_RD, &stats->rx_csum_bad_offset, 2656 "Received checksum offloaded buffer with incorrect offset"); 2657 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_csum_failed", 2658 CTLFLAG_RD, &stats->rx_csum_failed, 2659 "Received buffer checksum offload failed"); 2660 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_csum_offloaded", 2661 CTLFLAG_RD, &stats->rx_csum_offloaded, 2662 "Received buffer checksum offload succeeded"); 2663 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "rx_task_rescheduled", 2664 CTLFLAG_RD, &stats->rx_task_rescheduled, 2665 "Times the receive interrupt task rescheduled itself"); 2666 2667 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "tx_csum_offloaded", 2668 CTLFLAG_RD, &stats->tx_csum_offloaded, 2669 "Offloaded checksum of transmitted buffer"); 2670 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "tx_tso_offloaded", 2671 CTLFLAG_RD, &stats->tx_tso_offloaded, 2672 "Segmentation offload of transmitted buffer"); 2673 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "tx_csum_bad_ethtype", 2674 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 2675 "Aborted transmit of checksum offloaded buffer with unknown " 2676 "Ethernet type"); 2677 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "tx_tso_bad_ethtype", 2678 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 2679 "Aborted transmit of TSO buffer with unknown Ethernet type"); 2680 SYSCTL_ADD_ULONG(&sc->vtnet_sysctl_ctx, SYSCTL_CHILDREN(sc->vtnet_sysctl_tree), OID_AUTO, "tx_task_rescheduled", 2681 CTLFLAG_RD, &stats->tx_task_rescheduled, 2682 "Times the transmit interrupt task rescheduled itself"); 2683 } 2684 2685 static int 2686 vtnet_enable_rx_intr(struct vtnet_softc *sc) 2687 { 2688 2689 return (virtqueue_enable_intr(sc->vtnet_rx_vq)); 2690 } 2691 2692 static void 2693 vtnet_disable_rx_intr(struct vtnet_softc *sc) 2694 { 2695 2696 virtqueue_disable_intr(sc->vtnet_rx_vq); 2697 } 2698 2699 static int 2700 vtnet_enable_tx_intr(struct vtnet_softc *sc) 2701 { 2702 2703 #ifdef VTNET_TX_INTR_MODERATION 2704 return (0); 2705 #else 2706 return (virtqueue_enable_intr(sc->vtnet_tx_vq)); 2707 #endif 2708 } 2709 2710 static void 2711 vtnet_disable_tx_intr(struct vtnet_softc *sc) 2712 { 2713 2714 virtqueue_disable_intr(sc->vtnet_tx_vq); 2715 } 2716