1 /*- 2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* Driver for VirtIO network devices. */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/socket.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 #include <sys/random.h> 42 #include <sys/sglist.h> 43 #include <sys/serialize.h> 44 #include <sys/bus.h> 45 #include <sys/rman.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_types.h> 52 #include <net/if_media.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 #include <net/ifq_var.h> 56 57 #include <net/bpf.h> 58 59 #include <netinet/in_systm.h> 60 #include <netinet/in.h> 61 #include <netinet/ip.h> 62 #include <netinet/ip6.h> 63 #include <netinet/udp.h> 64 #include <netinet/tcp.h> 65 #include <netinet/sctp.h> 66 67 #include <dev/virtual/virtio/virtio/virtio.h> 68 #include <dev/virtual/virtio/virtio/virtqueue.h> 69 70 #include "virtio_net.h" 71 #include "virtio_if.h" 72 73 struct vtnet_statistics { 74 unsigned long mbuf_alloc_failed; 75 76 unsigned long rx_frame_too_large; 77 unsigned long rx_enq_replacement_failed; 78 unsigned long rx_mergeable_failed; 79 unsigned long rx_csum_bad_ethtype; 80 unsigned long rx_csum_bad_start; 81 unsigned long rx_csum_bad_ipproto; 82 unsigned long rx_csum_bad_offset; 83 unsigned long rx_csum_failed; 84 unsigned long rx_csum_offloaded; 85 unsigned long rx_task_rescheduled; 86 87 unsigned long tx_csum_offloaded; 88 unsigned long tx_tso_offloaded; 89 unsigned long tx_csum_bad_ethtype; 90 unsigned long tx_tso_bad_ethtype; 91 unsigned long tx_task_rescheduled; 92 }; 93 94 struct vtnet_softc { 95 device_t vtnet_dev; 96 struct ifnet *vtnet_ifp; 97 struct lwkt_serialize vtnet_slz; 98 99 uint32_t vtnet_flags; 100 #define VTNET_FLAG_LINK 0x0001 101 #define VTNET_FLAG_SUSPENDED 0x0002 102 #define VTNET_FLAG_CTRL_VQ 0x0004 103 #define VTNET_FLAG_CTRL_RX 0x0008 104 #define VTNET_FLAG_VLAN_FILTER 0x0010 105 #define VTNET_FLAG_TSO_ECN 0x0020 106 #define VTNET_FLAG_MRG_RXBUFS 0x0040 107 #define VTNET_FLAG_LRO_NOMRG 0x0080 108 109 struct virtqueue *vtnet_rx_vq; 110 struct virtqueue *vtnet_tx_vq; 111 struct virtqueue *vtnet_ctrl_vq; 112 113 struct vtnet_tx_header *vtnet_txhdrarea; 114 uint32_t vtnet_txhdridx; 115 struct vtnet_mac_filter *vtnet_macfilter; 116 117 int vtnet_hdr_size; 118 int vtnet_tx_size; 119 int vtnet_rx_size; 120 int vtnet_rx_process_limit; 121 int vtnet_rx_mbuf_size; 122 int vtnet_rx_mbuf_count; 123 int vtnet_if_flags; 124 int vtnet_watchdog_timer; 125 uint64_t vtnet_features; 126 127 struct task vtnet_cfgchg_task; 128 129 struct vtnet_statistics vtnet_stats; 130 131 struct callout vtnet_tick_ch; 132 133 eventhandler_tag vtnet_vlan_attach; 134 eventhandler_tag vtnet_vlan_detach; 135 136 struct ifmedia vtnet_media; 137 /* 138 * Fake media type; the host does not provide us with 139 * any real media information. 140 */ 141 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX) 142 char vtnet_hwaddr[ETHER_ADDR_LEN]; 143 144 /* 145 * During reset, the host's VLAN filtering table is lost. The 146 * array below is used to restore all the VLANs configured on 147 * this interface after a reset. 148 */ 149 #define VTNET_VLAN_SHADOW_SIZE (4096 / 32) 150 int vtnet_nvlans; 151 uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE]; 152 153 char vtnet_mtx_name[16]; 154 }; 155 156 /* 157 * When mergeable buffers are not negotiated, the vtnet_rx_header structure 158 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to 159 * both keep the VirtIO header and the data non-contiguous and to keep the 160 * frame's payload 4 byte aligned. 161 * 162 * When mergeable buffers are negotiated, the host puts the VirtIO header in 163 * the beginning of the first mbuf's data. 164 */ 165 #define VTNET_RX_HEADER_PAD 4 166 struct vtnet_rx_header { 167 struct virtio_net_hdr vrh_hdr; 168 char vrh_pad[VTNET_RX_HEADER_PAD]; 169 } __packed; 170 171 /* 172 * For each outgoing frame, the vtnet_tx_header below is allocated from 173 * the vtnet_tx_header_zone. 174 */ 175 struct vtnet_tx_header { 176 union { 177 struct virtio_net_hdr hdr; 178 struct virtio_net_hdr_mrg_rxbuf mhdr; 179 } vth_uhdr; 180 181 struct mbuf *vth_mbuf; 182 }; 183 184 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header"); 185 186 /* 187 * The VirtIO specification does not place a limit on the number of MAC 188 * addresses the guest driver may request to be filtered. In practice, 189 * the host is constrained by available resources. To simplify this driver, 190 * impose a reasonably high limit of MAC addresses we will filter before 191 * falling back to promiscuous or all-multicast modes. 192 */ 193 #define VTNET_MAX_MAC_ENTRIES 128 194 195 struct vtnet_mac_table { 196 uint32_t nentries; 197 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; 198 } __packed; 199 200 struct vtnet_mac_filter { 201 struct vtnet_mac_table vmf_unicast; 202 uint32_t vmf_pad; /* Make tables non-contiguous. */ 203 struct vtnet_mac_table vmf_multicast; 204 }; 205 206 #define VTNET_WATCHDOG_TIMEOUT 5 207 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)// | CSUM_SCTP) 208 209 /* Features desired/implemented by this driver. */ 210 #define VTNET_FEATURES \ 211 (VIRTIO_NET_F_MAC | \ 212 VIRTIO_NET_F_STATUS | \ 213 VIRTIO_NET_F_CTRL_VQ | \ 214 VIRTIO_NET_F_CTRL_RX | \ 215 VIRTIO_NET_F_CTRL_VLAN | \ 216 VIRTIO_NET_F_CSUM | \ 217 VIRTIO_NET_F_HOST_TSO4 | \ 218 VIRTIO_NET_F_HOST_TSO6 | \ 219 VIRTIO_NET_F_HOST_ECN | \ 220 VIRTIO_NET_F_GUEST_CSUM | \ 221 VIRTIO_NET_F_GUEST_TSO4 | \ 222 VIRTIO_NET_F_GUEST_TSO6 | \ 223 VIRTIO_NET_F_GUEST_ECN | \ 224 VIRTIO_NET_F_MRG_RXBUF) 225 226 /* 227 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us 228 * frames larger than 1514 bytes. We do not yet support software LRO 229 * via tcp_lro_rx(). 230 */ 231 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ 232 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) 233 234 #define VTNET_MAX_MTU 65536 235 #define VTNET_MAX_RX_SIZE 65550 236 237 /* 238 * Used to preallocate the Vq indirect descriptors. The first segment 239 * is reserved for the header. 240 */ 241 #define VTNET_MIN_RX_SEGS 2 242 #define VTNET_MAX_RX_SEGS 34 243 #define VTNET_MAX_TX_SEGS 34 244 245 #define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */ 246 #define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */ 247 #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */ 248 #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */ 249 #define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */ 250 251 252 /* 253 * Assert we can receive and transmit the maximum with regular 254 * size clusters. 255 */ 256 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); 257 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU); 258 259 /* 260 * Determine how many mbufs are in each receive buffer. For LRO without 261 * mergeable descriptors, we must allocate an mbuf chain large enough to 262 * hold both the vtnet_rx_header and the maximum receivable data. 263 */ 264 #define VTNET_NEEDED_RX_MBUFS(_sc) \ 265 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \ 266 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \ 267 (_sc)->vtnet_rx_mbuf_size) 268 269 static int vtnet_modevent(module_t, int, void *); 270 271 static int vtnet_probe(device_t); 272 static int vtnet_attach(device_t); 273 static int vtnet_detach(device_t); 274 static int vtnet_suspend(device_t); 275 static int vtnet_resume(device_t); 276 static int vtnet_shutdown(device_t); 277 static int vtnet_config_change(device_t); 278 279 static void vtnet_negotiate_features(struct vtnet_softc *); 280 static int vtnet_alloc_virtqueues(struct vtnet_softc *); 281 static void vtnet_get_hwaddr(struct vtnet_softc *); 282 static void vtnet_set_hwaddr(struct vtnet_softc *); 283 static int vtnet_is_link_up(struct vtnet_softc *); 284 static void vtnet_update_link_status(struct vtnet_softc *); 285 #if 0 286 static void vtnet_watchdog(struct vtnet_softc *); 287 #endif 288 static void vtnet_config_change_task(void *, int); 289 static int vtnet_change_mtu(struct vtnet_softc *, int); 290 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 291 292 static int vtnet_init_rx_vq(struct vtnet_softc *); 293 static void vtnet_free_rx_mbufs(struct vtnet_softc *); 294 static void vtnet_free_tx_mbufs(struct vtnet_softc *); 295 static void vtnet_free_ctrl_vq(struct vtnet_softc *); 296 297 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int, 298 struct mbuf **); 299 static int vtnet_replace_rxbuf(struct vtnet_softc *, 300 struct mbuf *, int); 301 static int vtnet_newbuf(struct vtnet_softc *); 302 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int); 303 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *); 304 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *); 305 static void vtnet_vlan_tag_remove(struct mbuf *); 306 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *, 307 struct virtio_net_hdr *); 308 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int); 309 static int vtnet_rxeof(struct vtnet_softc *, int, int *); 310 static void vtnet_rx_intr_task(void *); 311 static int vtnet_rx_vq_intr(void *); 312 313 static void vtnet_txeof(struct vtnet_softc *); 314 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *, 315 struct virtio_net_hdr *); 316 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **, 317 struct vtnet_tx_header *); 318 static int vtnet_encap(struct vtnet_softc *, struct mbuf **); 319 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *); 320 static void vtnet_start(struct ifnet *, struct ifaltq_subque *); 321 static void vtnet_tick(void *); 322 static void vtnet_tx_intr_task(void *); 323 static int vtnet_tx_vq_intr(void *); 324 325 static void vtnet_stop(struct vtnet_softc *); 326 static int vtnet_reinit(struct vtnet_softc *); 327 static void vtnet_init_locked(struct vtnet_softc *); 328 static void vtnet_init(void *); 329 330 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, 331 struct sglist *, int, int); 332 333 static void vtnet_rx_filter(struct vtnet_softc *sc); 334 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); 335 static int vtnet_set_promisc(struct vtnet_softc *, int); 336 static int vtnet_set_allmulti(struct vtnet_softc *, int); 337 static void vtnet_rx_filter_mac(struct vtnet_softc *); 338 339 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); 340 static void vtnet_rx_filter_vlan(struct vtnet_softc *); 341 static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t); 342 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); 343 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); 344 345 static int vtnet_ifmedia_upd(struct ifnet *); 346 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); 347 348 static void vtnet_add_statistics(struct vtnet_softc *); 349 350 static int vtnet_enable_rx_intr(struct vtnet_softc *); 351 static int vtnet_enable_tx_intr(struct vtnet_softc *); 352 static void vtnet_disable_rx_intr(struct vtnet_softc *); 353 static void vtnet_disable_tx_intr(struct vtnet_softc *); 354 355 /* Tunables. */ 356 static int vtnet_csum_disable = 0; 357 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); 358 static int vtnet_tso_disable = 1; 359 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); 360 static int vtnet_lro_disable = 1; 361 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); 362 363 /* 364 * Reducing the number of transmit completed interrupts can 365 * improve performance. To do so, the define below keeps the 366 * Tx vq interrupt disabled and adds calls to vtnet_txeof() 367 * in the start and watchdog paths. The price to pay for this 368 * is the m_free'ing of transmitted mbufs may be delayed until 369 * the watchdog fires. 370 */ 371 #define VTNET_TX_INTR_MODERATION 372 373 static struct virtio_feature_desc vtnet_feature_desc[] = { 374 { VIRTIO_NET_F_CSUM, "TxChecksum" }, 375 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, 376 { VIRTIO_NET_F_MAC, "MacAddress" }, 377 { VIRTIO_NET_F_GSO, "TxAllGSO" }, 378 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, 379 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, 380 { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, 381 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, 382 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, 383 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, 384 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, 385 { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, 386 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, 387 { VIRTIO_NET_F_STATUS, "Status" }, 388 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, 389 { VIRTIO_NET_F_CTRL_RX, "RxMode" }, 390 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, 391 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, 392 { VIRTIO_NET_F_MQ, "RFS" }, 393 { 0, NULL } 394 }; 395 396 static device_method_t vtnet_methods[] = { 397 /* Device methods. */ 398 DEVMETHOD(device_probe, vtnet_probe), 399 DEVMETHOD(device_attach, vtnet_attach), 400 DEVMETHOD(device_detach, vtnet_detach), 401 DEVMETHOD(device_suspend, vtnet_suspend), 402 DEVMETHOD(device_resume, vtnet_resume), 403 DEVMETHOD(device_shutdown, vtnet_shutdown), 404 405 /* VirtIO methods. */ 406 DEVMETHOD(virtio_config_change, vtnet_config_change), 407 408 { 0, 0 } 409 }; 410 411 static driver_t vtnet_driver = { 412 "vtnet", 413 vtnet_methods, 414 sizeof(struct vtnet_softc) 415 }; 416 417 static devclass_t vtnet_devclass; 418 419 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, 420 vtnet_modevent, 0); 421 MODULE_VERSION(vtnet, 1); 422 MODULE_DEPEND(vtnet, virtio, 1, 1, 1); 423 424 static int 425 vtnet_modevent(module_t mod, int type, void *unused) 426 { 427 int error; 428 429 error = 0; 430 431 switch (type) { 432 case MOD_LOAD: 433 break; 434 case MOD_UNLOAD: 435 break; 436 case MOD_SHUTDOWN: 437 break; 438 default: 439 error = EOPNOTSUPP; 440 break; 441 } 442 443 return (error); 444 } 445 446 static int 447 vtnet_probe(device_t dev) 448 { 449 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) 450 return (ENXIO); 451 452 device_set_desc(dev, "VirtIO Networking Adapter"); 453 454 return (BUS_PROBE_DEFAULT); 455 } 456 457 static int 458 vtnet_attach(device_t dev) 459 { 460 struct vtnet_softc *sc; 461 struct ifnet *ifp; 462 int tx_size, error; 463 464 sc = device_get_softc(dev); 465 sc->vtnet_dev = dev; 466 467 lwkt_serialize_init(&sc->vtnet_slz); 468 callout_init(&sc->vtnet_tick_ch); 469 470 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, 471 vtnet_ifmedia_sts); 472 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); 473 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); 474 475 vtnet_add_statistics(sc); 476 477 virtio_set_feature_desc(dev, vtnet_feature_desc); 478 vtnet_negotiate_features(sc); 479 480 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { 481 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; 482 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); 483 } else { 484 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); 485 } 486 487 sc->vtnet_rx_mbuf_size = MCLBYTES; 488 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 489 490 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { 491 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; 492 493 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) 494 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; 495 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) 496 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; 497 } 498 499 vtnet_get_hwaddr(sc); 500 501 error = vtnet_alloc_virtqueues(sc); 502 if (error) { 503 device_printf(dev, "cannot allocate virtqueues\n"); 504 goto fail; 505 } 506 507 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); 508 if (ifp == NULL) { 509 device_printf(dev, "cannot allocate ifnet structure\n"); 510 error = ENOSPC; 511 goto fail; 512 } 513 514 ifp->if_softc = sc; 515 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 516 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 517 ifp->if_init = vtnet_init; 518 ifp->if_start = vtnet_start; 519 ifp->if_ioctl = vtnet_ioctl; 520 521 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq); 522 sc->vtnet_rx_process_limit = sc->vtnet_rx_size; 523 524 tx_size = virtqueue_size(sc->vtnet_tx_vq); 525 sc->vtnet_tx_size = tx_size; 526 sc->vtnet_txhdridx = 0; 527 sc->vtnet_txhdrarea = contigmalloc( 528 ((sc->vtnet_tx_size / 2) + 1) * sizeof(struct vtnet_tx_header), 529 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 530 if (sc->vtnet_txhdrarea == NULL) { 531 device_printf(dev, "cannot contigmalloc the tx headers\n"); 532 goto fail; 533 } 534 sc->vtnet_macfilter = contigmalloc( 535 sizeof(struct vtnet_mac_filter), 536 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0); 537 if (sc->vtnet_macfilter == NULL) { 538 device_printf(dev, 539 "cannot contigmalloc the mac filter table\n"); 540 goto fail; 541 } 542 ifq_set_maxlen(&ifp->if_snd, tx_size - 1); 543 ifq_set_ready(&ifp->if_snd); 544 545 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL); 546 547 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){ 548 //ifp->if_capabilities |= IFCAP_LINKSTATE; 549 kprintf("add dynamic link state\n"); 550 } 551 552 /* Tell the upper layer(s) we support long frames. */ 553 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 554 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 555 556 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { 557 ifp->if_capabilities |= IFCAP_TXCSUM; 558 559 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) 560 ifp->if_capabilities |= IFCAP_TSO4; 561 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) 562 ifp->if_capabilities |= IFCAP_TSO6; 563 if (ifp->if_capabilities & IFCAP_TSO) 564 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 565 566 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) 567 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; 568 } 569 570 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { 571 ifp->if_capabilities |= IFCAP_RXCSUM; 572 573 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || 574 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) 575 ifp->if_capabilities |= IFCAP_LRO; 576 } 577 578 if (ifp->if_capabilities & IFCAP_HWCSUM) { 579 /* 580 * VirtIO does not support VLAN tagging, but we can fake 581 * it by inserting and removing the 802.1Q header during 582 * transmit and receive. We are then able to do checksum 583 * offloading of VLAN frames. 584 */ 585 ifp->if_capabilities |= 586 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 587 } 588 589 ifp->if_capenable = ifp->if_capabilities; 590 591 /* 592 * Capabilities after here are not enabled by default. 593 */ 594 595 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { 596 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 597 598 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 599 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 600 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 601 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 602 } 603 604 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc); 605 606 error = virtio_setup_intr(dev, &sc->vtnet_slz); 607 if (error) { 608 device_printf(dev, "cannot setup virtqueue interrupts\n"); 609 ether_ifdetach(ifp); 610 goto fail; 611 } 612 613 /* 614 * Device defaults to promiscuous mode for backwards 615 * compatibility. Turn it off if possible. 616 */ 617 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 618 lwkt_serialize_enter(&sc->vtnet_slz); 619 if (vtnet_set_promisc(sc, 0) != 0) { 620 ifp->if_flags |= IFF_PROMISC; 621 device_printf(dev, 622 "cannot disable promiscuous mode\n"); 623 } 624 lwkt_serialize_exit(&sc->vtnet_slz); 625 } else 626 ifp->if_flags |= IFF_PROMISC; 627 628 fail: 629 if (error) 630 vtnet_detach(dev); 631 632 return (error); 633 } 634 635 static int 636 vtnet_detach(device_t dev) 637 { 638 struct vtnet_softc *sc; 639 struct ifnet *ifp; 640 641 sc = device_get_softc(dev); 642 ifp = sc->vtnet_ifp; 643 644 if (device_is_attached(dev)) { 645 lwkt_serialize_enter(&sc->vtnet_slz); 646 vtnet_stop(sc); 647 lwkt_serialize_exit(&sc->vtnet_slz); 648 649 callout_stop(&sc->vtnet_tick_ch); 650 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task); 651 652 ether_ifdetach(ifp); 653 } 654 655 if (sc->vtnet_vlan_attach != NULL) { 656 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); 657 sc->vtnet_vlan_attach = NULL; 658 } 659 if (sc->vtnet_vlan_detach != NULL) { 660 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach); 661 sc->vtnet_vlan_detach = NULL; 662 } 663 664 if (ifp) { 665 if_free(ifp); 666 sc->vtnet_ifp = NULL; 667 } 668 669 if (sc->vtnet_rx_vq != NULL) 670 vtnet_free_rx_mbufs(sc); 671 if (sc->vtnet_tx_vq != NULL) 672 vtnet_free_tx_mbufs(sc); 673 if (sc->vtnet_ctrl_vq != NULL) 674 vtnet_free_ctrl_vq(sc); 675 676 if (sc->vtnet_txhdrarea != NULL) { 677 contigfree(sc->vtnet_txhdrarea, 678 ((sc->vtnet_tx_size / 2) + 1) * 679 sizeof(struct vtnet_tx_header), M_VTNET); 680 sc->vtnet_txhdrarea = NULL; 681 } 682 if (sc->vtnet_macfilter != NULL) { 683 contigfree(sc->vtnet_macfilter, 684 sizeof(struct vtnet_mac_filter), M_DEVBUF); 685 sc->vtnet_macfilter = NULL; 686 } 687 688 ifmedia_removeall(&sc->vtnet_media); 689 690 return (0); 691 } 692 693 static int 694 vtnet_suspend(device_t dev) 695 { 696 struct vtnet_softc *sc; 697 698 sc = device_get_softc(dev); 699 700 lwkt_serialize_enter(&sc->vtnet_slz); 701 vtnet_stop(sc); 702 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; 703 lwkt_serialize_exit(&sc->vtnet_slz); 704 705 return (0); 706 } 707 708 static int 709 vtnet_resume(device_t dev) 710 { 711 struct vtnet_softc *sc; 712 struct ifnet *ifp; 713 714 sc = device_get_softc(dev); 715 ifp = sc->vtnet_ifp; 716 717 lwkt_serialize_enter(&sc->vtnet_slz); 718 if (ifp->if_flags & IFF_UP) 719 vtnet_init_locked(sc); 720 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; 721 lwkt_serialize_exit(&sc->vtnet_slz); 722 723 return (0); 724 } 725 726 static int 727 vtnet_shutdown(device_t dev) 728 { 729 730 /* 731 * Suspend already does all of what we need to 732 * do here; we just never expect to be resumed. 733 */ 734 return (vtnet_suspend(dev)); 735 } 736 737 static int 738 vtnet_config_change(device_t dev) 739 { 740 struct vtnet_softc *sc; 741 742 sc = device_get_softc(dev); 743 744 taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task); 745 746 return (1); 747 } 748 749 static void 750 vtnet_negotiate_features(struct vtnet_softc *sc) 751 { 752 device_t dev; 753 uint64_t mask, features; 754 755 dev = sc->vtnet_dev; 756 mask = 0; 757 758 if (vtnet_csum_disable) 759 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; 760 761 /* 762 * TSO and LRO are only available when their corresponding 763 * checksum offload feature is also negotiated. 764 */ 765 766 if (vtnet_csum_disable || vtnet_tso_disable) 767 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | 768 VIRTIO_NET_F_HOST_ECN; 769 770 if (vtnet_csum_disable || vtnet_lro_disable) 771 mask |= VTNET_LRO_FEATURES; 772 773 features = VTNET_FEATURES & ~mask; 774 features |= VIRTIO_F_NOTIFY_ON_EMPTY; 775 sc->vtnet_features = virtio_negotiate_features(dev, features); 776 } 777 778 static int 779 vtnet_alloc_virtqueues(struct vtnet_softc *sc) 780 { 781 device_t dev; 782 struct vq_alloc_info vq_info[3]; 783 int nvqs, rxsegs; 784 785 dev = sc->vtnet_dev; 786 nvqs = 2; 787 788 /* 789 * Indirect descriptors are not needed for the Rx 790 * virtqueue when mergeable buffers are negotiated. 791 * The header is placed inline with the data, not 792 * in a separate descriptor, and mbuf clusters are 793 * always physically contiguous. 794 */ 795 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 796 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ? 797 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS; 798 } else 799 rxsegs = 0; 800 801 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs, 802 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq, 803 "%s receive", device_get_nameunit(dev)); 804 805 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS, 806 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq, 807 "%s transmit", device_get_nameunit(dev)); 808 809 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 810 nvqs++; 811 812 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL, 813 &sc->vtnet_ctrl_vq, "%s control", 814 device_get_nameunit(dev)); 815 } 816 817 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info)); 818 } 819 820 static void 821 vtnet_get_hwaddr(struct vtnet_softc *sc) 822 { 823 device_t dev; 824 825 dev = sc->vtnet_dev; 826 827 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { 828 virtio_read_device_config(dev, 829 offsetof(struct virtio_net_config, mac), 830 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 831 } else { 832 /* Generate random locally administered unicast address. */ 833 sc->vtnet_hwaddr[0] = 0xB2; 834 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1); 835 836 vtnet_set_hwaddr(sc); 837 } 838 } 839 840 static void 841 vtnet_set_hwaddr(struct vtnet_softc *sc) 842 { 843 device_t dev; 844 845 dev = sc->vtnet_dev; 846 847 virtio_write_device_config(dev, 848 offsetof(struct virtio_net_config, mac), 849 sc->vtnet_hwaddr, ETHER_ADDR_LEN); 850 } 851 852 static int 853 vtnet_is_link_up(struct vtnet_softc *sc) 854 { 855 device_t dev; 856 struct ifnet *ifp; 857 uint16_t status; 858 859 dev = sc->vtnet_dev; 860 ifp = sc->vtnet_ifp; 861 862 ASSERT_SERIALIZED(&sc->vtnet_slz); 863 864 status = virtio_read_dev_config_2(dev, 865 offsetof(struct virtio_net_config, status)); 866 867 return ((status & VIRTIO_NET_S_LINK_UP) != 0); 868 } 869 870 static void 871 vtnet_update_link_status(struct vtnet_softc *sc) 872 { 873 device_t dev; 874 struct ifnet *ifp; 875 struct ifaltq_subque *ifsq; 876 int link; 877 878 dev = sc->vtnet_dev; 879 ifp = sc->vtnet_ifp; 880 ifsq = ifq_get_subq_default(&ifp->if_snd); 881 882 link = vtnet_is_link_up(sc); 883 884 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) { 885 sc->vtnet_flags |= VTNET_FLAG_LINK; 886 if (bootverbose) 887 device_printf(dev, "Link is up\n"); 888 ifp->if_link_state = LINK_STATE_UP; 889 if_link_state_change(ifp); 890 if (!ifsq_is_empty(ifsq)) 891 vtnet_start_locked(ifp, ifsq); 892 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) { 893 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 894 if (bootverbose) 895 device_printf(dev, "Link is down\n"); 896 897 ifp->if_link_state = LINK_STATE_DOWN; 898 if_link_state_change(ifp); 899 } 900 } 901 902 #if 0 903 static void 904 vtnet_watchdog(struct vtnet_softc *sc) 905 { 906 struct ifnet *ifp; 907 908 ifp = sc->vtnet_ifp; 909 910 #ifdef VTNET_TX_INTR_MODERATION 911 vtnet_txeof(sc); 912 #endif 913 914 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer) 915 return; 916 917 if_printf(ifp, "watchdog timeout -- resetting\n"); 918 #ifdef VTNET_DEBUG 919 virtqueue_dump(sc->vtnet_tx_vq); 920 #endif 921 ifp->if_oerrors++; 922 ifp->if_flags &= ~IFF_RUNNING; 923 vtnet_init_locked(sc); 924 } 925 #endif 926 927 static void 928 vtnet_config_change_task(void *arg, int pending) 929 { 930 struct vtnet_softc *sc; 931 932 sc = arg; 933 934 lwkt_serialize_enter(&sc->vtnet_slz); 935 vtnet_update_link_status(sc); 936 lwkt_serialize_exit(&sc->vtnet_slz); 937 } 938 939 static int 940 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr) 941 { 942 struct vtnet_softc *sc; 943 struct ifreq *ifr; 944 int reinit, mask, error; 945 946 sc = ifp->if_softc; 947 ifr = (struct ifreq *) data; 948 reinit = 0; 949 error = 0; 950 951 switch (cmd) { 952 case SIOCSIFMTU: 953 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU) 954 error = EINVAL; 955 else if (ifp->if_mtu != ifr->ifr_mtu) { 956 lwkt_serialize_enter(&sc->vtnet_slz); 957 error = vtnet_change_mtu(sc, ifr->ifr_mtu); 958 lwkt_serialize_exit(&sc->vtnet_slz); 959 } 960 break; 961 962 case SIOCSIFFLAGS: 963 lwkt_serialize_enter(&sc->vtnet_slz); 964 if ((ifp->if_flags & IFF_UP) == 0) { 965 if (ifp->if_flags & IFF_RUNNING) 966 vtnet_stop(sc); 967 } else if (ifp->if_flags & IFF_RUNNING) { 968 if ((ifp->if_flags ^ sc->vtnet_if_flags) & 969 (IFF_PROMISC | IFF_ALLMULTI)) { 970 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) 971 vtnet_rx_filter(sc); 972 else 973 error = ENOTSUP; 974 } 975 } else 976 vtnet_init_locked(sc); 977 978 if (error == 0) 979 sc->vtnet_if_flags = ifp->if_flags; 980 lwkt_serialize_exit(&sc->vtnet_slz); 981 break; 982 983 case SIOCADDMULTI: 984 case SIOCDELMULTI: 985 lwkt_serialize_enter(&sc->vtnet_slz); 986 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) && 987 (ifp->if_flags & IFF_RUNNING)) 988 vtnet_rx_filter_mac(sc); 989 lwkt_serialize_exit(&sc->vtnet_slz); 990 break; 991 992 case SIOCSIFMEDIA: 993 case SIOCGIFMEDIA: 994 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); 995 break; 996 997 case SIOCSIFCAP: 998 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 999 1000 lwkt_serialize_enter(&sc->vtnet_slz); 1001 1002 if (mask & IFCAP_TXCSUM) { 1003 ifp->if_capenable ^= IFCAP_TXCSUM; 1004 if (ifp->if_capenable & IFCAP_TXCSUM) 1005 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 1006 else 1007 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD; 1008 } 1009 1010 if (mask & IFCAP_TSO4) { 1011 ifp->if_capenable ^= IFCAP_TSO4; 1012 if (ifp->if_capenable & IFCAP_TSO4) 1013 ifp->if_hwassist |= CSUM_TSO; 1014 else 1015 ifp->if_hwassist &= ~CSUM_TSO; 1016 } 1017 1018 if (mask & IFCAP_RXCSUM) { 1019 ifp->if_capenable ^= IFCAP_RXCSUM; 1020 reinit = 1; 1021 } 1022 1023 if (mask & IFCAP_LRO) { 1024 ifp->if_capenable ^= IFCAP_LRO; 1025 reinit = 1; 1026 } 1027 1028 if (mask & IFCAP_VLAN_HWFILTER) { 1029 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; 1030 reinit = 1; 1031 } 1032 1033 if (mask & IFCAP_VLAN_HWTSO) 1034 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1035 1036 if (mask & IFCAP_VLAN_HWTAGGING) 1037 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1038 1039 if (reinit && (ifp->if_flags & IFF_RUNNING)) { 1040 ifp->if_flags &= ~IFF_RUNNING; 1041 vtnet_init_locked(sc); 1042 } 1043 //VLAN_CAPABILITIES(ifp); 1044 1045 lwkt_serialize_exit(&sc->vtnet_slz); 1046 break; 1047 1048 default: 1049 error = ether_ioctl(ifp, cmd, data); 1050 break; 1051 } 1052 1053 return (error); 1054 } 1055 1056 static int 1057 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) 1058 { 1059 struct ifnet *ifp; 1060 int new_frame_size, clsize; 1061 1062 ifp = sc->vtnet_ifp; 1063 1064 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1065 new_frame_size = sizeof(struct vtnet_rx_header) + 1066 sizeof(struct ether_vlan_header) + new_mtu; 1067 1068 if (new_frame_size > MJUM9BYTES) 1069 return (EINVAL); 1070 1071 if (new_frame_size <= MCLBYTES) 1072 clsize = MCLBYTES; 1073 else 1074 clsize = MJUM9BYTES; 1075 } else { 1076 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) + 1077 sizeof(struct ether_vlan_header) + new_mtu; 1078 1079 if (new_frame_size <= MCLBYTES) 1080 clsize = MCLBYTES; 1081 else 1082 clsize = MJUMPAGESIZE; 1083 } 1084 1085 sc->vtnet_rx_mbuf_size = clsize; 1086 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc); 1087 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS, 1088 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count)); 1089 1090 ifp->if_mtu = new_mtu; 1091 1092 if (ifp->if_flags & IFF_RUNNING) { 1093 ifp->if_flags &= ~IFF_RUNNING; 1094 vtnet_init_locked(sc); 1095 } 1096 1097 return (0); 1098 } 1099 1100 static int 1101 vtnet_init_rx_vq(struct vtnet_softc *sc) 1102 { 1103 struct virtqueue *vq; 1104 int nbufs, error; 1105 1106 vq = sc->vtnet_rx_vq; 1107 nbufs = 0; 1108 error = ENOSPC; 1109 1110 while (!virtqueue_full(vq)) { 1111 if ((error = vtnet_newbuf(sc)) != 0) 1112 break; 1113 nbufs++; 1114 } 1115 1116 if (nbufs > 0) { 1117 virtqueue_notify(vq, &sc->vtnet_slz); 1118 1119 /* 1120 * EMSGSIZE signifies the virtqueue did not have enough 1121 * entries available to hold the last mbuf. This is not 1122 * an error. We should not get ENOSPC since we check if 1123 * the virtqueue is full before attempting to add a 1124 * buffer. 1125 */ 1126 if (error == EMSGSIZE) 1127 error = 0; 1128 } 1129 1130 return (error); 1131 } 1132 1133 static void 1134 vtnet_free_rx_mbufs(struct vtnet_softc *sc) 1135 { 1136 struct virtqueue *vq; 1137 struct mbuf *m; 1138 int last; 1139 1140 vq = sc->vtnet_rx_vq; 1141 last = 0; 1142 1143 while ((m = virtqueue_drain(vq, &last)) != NULL) 1144 m_freem(m); 1145 1146 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq")); 1147 } 1148 1149 static void 1150 vtnet_free_tx_mbufs(struct vtnet_softc *sc) 1151 { 1152 struct virtqueue *vq; 1153 struct vtnet_tx_header *txhdr; 1154 int last; 1155 1156 vq = sc->vtnet_tx_vq; 1157 last = 0; 1158 1159 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { 1160 m_freem(txhdr->vth_mbuf); 1161 } 1162 1163 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq")); 1164 } 1165 1166 static void 1167 vtnet_free_ctrl_vq(struct vtnet_softc *sc) 1168 { 1169 /* 1170 * The control virtqueue is only polled, therefore 1171 * it should already be empty. 1172 */ 1173 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq), 1174 ("Ctrl Vq not empty")); 1175 } 1176 1177 static struct mbuf * 1178 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) 1179 { 1180 struct mbuf *m_head, *m_tail, *m; 1181 int i, clsize; 1182 1183 clsize = sc->vtnet_rx_mbuf_size; 1184 1185 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/ 1186 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize); 1187 m_head = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR ); 1188 if (m_head == NULL) 1189 goto fail; 1190 1191 m_head->m_len = clsize; 1192 m_tail = m_head; 1193 1194 if (nbufs > 1) { 1195 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1196 ("chained Rx mbuf requested without LRO_NOMRG")); 1197 1198 for (i = 0; i < nbufs - 1; i++) { 1199 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize); 1200 m = m_getcl(MB_DONTWAIT, MT_DATA, 0); 1201 if (m == NULL) 1202 goto fail; 1203 1204 m->m_len = clsize; 1205 m_tail->m_next = m; 1206 m_tail = m; 1207 } 1208 } 1209 1210 if (m_tailp != NULL) 1211 *m_tailp = m_tail; 1212 1213 return (m_head); 1214 1215 fail: 1216 sc->vtnet_stats.mbuf_alloc_failed++; 1217 m_freem(m_head); 1218 1219 return (NULL); 1220 } 1221 1222 static int 1223 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0) 1224 { 1225 struct mbuf *m, *m_prev; 1226 struct mbuf *m_new, *m_tail; 1227 int len, clsize, nreplace, error; 1228 1229 m = m0; 1230 m_prev = NULL; 1231 len = len0; 1232 1233 m_tail = NULL; 1234 clsize = sc->vtnet_rx_mbuf_size; 1235 nreplace = 0; 1236 1237 if (m->m_next != NULL) 1238 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, 1239 ("chained Rx mbuf without LRO_NOMRG")); 1240 1241 /* 1242 * Since LRO_NOMRG mbuf chains are so large, we want to avoid 1243 * allocating an entire chain for each received frame. When 1244 * the received frame's length is less than that of the chain, 1245 * the unused mbufs are reassigned to the new chain. 1246 */ 1247 while (len > 0) { 1248 /* 1249 * Something is seriously wrong if we received 1250 * a frame larger than the mbuf chain. Drop it. 1251 */ 1252 if (m == NULL) { 1253 sc->vtnet_stats.rx_frame_too_large++; 1254 return (EMSGSIZE); 1255 } 1256 1257 KASSERT(m->m_len == clsize, 1258 ("mbuf length not expected cluster size: %d", 1259 m->m_len)); 1260 1261 m->m_len = MIN(m->m_len, len); 1262 len -= m->m_len; 1263 1264 m_prev = m; 1265 m = m->m_next; 1266 nreplace++; 1267 } 1268 1269 KASSERT(m_prev != NULL, ("m_prev == NULL")); 1270 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count, 1271 ("too many replacement mbufs: %d/%d", nreplace, 1272 sc->vtnet_rx_mbuf_count)); 1273 1274 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail); 1275 if (m_new == NULL) { 1276 m_prev->m_len = clsize; 1277 return (ENOBUFS); 1278 } 1279 1280 /* 1281 * Move unused mbufs, if any, from the original chain 1282 * onto the end of the new chain. 1283 */ 1284 if (m_prev->m_next != NULL) { 1285 m_tail->m_next = m_prev->m_next; 1286 m_prev->m_next = NULL; 1287 } 1288 1289 error = vtnet_enqueue_rxbuf(sc, m_new); 1290 if (error) { 1291 /* 1292 * BAD! We could not enqueue the replacement mbuf chain. We 1293 * must restore the m0 chain to the original state if it was 1294 * modified so we can subsequently discard it. 1295 * 1296 * NOTE: The replacement is suppose to be an identical copy 1297 * to the one just dequeued so this is an unexpected error. 1298 */ 1299 sc->vtnet_stats.rx_enq_replacement_failed++; 1300 1301 if (m_tail->m_next != NULL) { 1302 m_prev->m_next = m_tail->m_next; 1303 m_tail->m_next = NULL; 1304 } 1305 1306 m_prev->m_len = clsize; 1307 m_freem(m_new); 1308 } 1309 1310 return (error); 1311 } 1312 1313 static int 1314 vtnet_newbuf(struct vtnet_softc *sc) 1315 { 1316 struct mbuf *m; 1317 int error; 1318 1319 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL); 1320 if (m == NULL) 1321 return (ENOBUFS); 1322 1323 error = vtnet_enqueue_rxbuf(sc, m); 1324 if (error) 1325 m_freem(m); 1326 1327 return (error); 1328 } 1329 1330 static void 1331 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs) 1332 { 1333 struct virtqueue *vq; 1334 struct mbuf *m; 1335 1336 vq = sc->vtnet_rx_vq; 1337 1338 while (--nbufs > 0) { 1339 if ((m = virtqueue_dequeue(vq, NULL)) == NULL) 1340 break; 1341 vtnet_discard_rxbuf(sc, m); 1342 } 1343 } 1344 1345 static void 1346 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1347 { 1348 int error; 1349 1350 /* 1351 * Requeue the discarded mbuf. This should always be 1352 * successful since it was just dequeued. 1353 */ 1354 error = vtnet_enqueue_rxbuf(sc, m); 1355 KASSERT(error == 0, ("cannot requeue discarded mbuf")); 1356 } 1357 1358 static int 1359 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m) 1360 { 1361 struct sglist sg; 1362 struct sglist_seg segs[VTNET_MAX_RX_SEGS]; 1363 struct vtnet_rx_header *rxhdr; 1364 struct virtio_net_hdr *hdr; 1365 uint8_t *mdata; 1366 int offset, error; 1367 1368 ASSERT_SERIALIZED(&sc->vtnet_slz); 1369 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0) 1370 KASSERT(m->m_next == NULL, ("chained Rx mbuf")); 1371 1372 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs); 1373 1374 mdata = mtod(m, uint8_t *); 1375 offset = 0; 1376 1377 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1378 rxhdr = (struct vtnet_rx_header *) mdata; 1379 hdr = &rxhdr->vrh_hdr; 1380 offset += sizeof(struct vtnet_rx_header); 1381 1382 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size); 1383 KASSERT(error == 0, ("cannot add header to sglist")); 1384 } 1385 1386 error = sglist_append(&sg, mdata + offset, m->m_len - offset); 1387 if (error) 1388 return (error); 1389 1390 if (m->m_next != NULL) { 1391 error = sglist_append_mbuf(&sg, m->m_next); 1392 if (error) 1393 return (error); 1394 } 1395 1396 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg)); 1397 } 1398 1399 static void 1400 vtnet_vlan_tag_remove(struct mbuf *m) 1401 { 1402 struct ether_vlan_header *evl; 1403 1404 evl = mtod(m, struct ether_vlan_header *); 1405 1406 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag); 1407 m->m_flags |= M_VLANTAG; 1408 1409 /* Strip the 802.1Q header. */ 1410 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN, 1411 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1412 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1413 } 1414 1415 /* 1416 * Alternative method of doing receive checksum offloading. Rather 1417 * than parsing the received frame down to the IP header, use the 1418 * csum_offset to determine which CSUM_* flags are appropriate. We 1419 * can get by with doing this only because the checksum offsets are 1420 * unique for the things we care about. 1421 */ 1422 static int 1423 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m, 1424 struct virtio_net_hdr *hdr) 1425 { 1426 struct ether_header *eh; 1427 struct ether_vlan_header *evh; 1428 struct udphdr *udp; 1429 int csum_len; 1430 uint16_t eth_type; 1431 1432 csum_len = hdr->csum_start + hdr->csum_offset; 1433 1434 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip)) 1435 return (1); 1436 if (m->m_len < csum_len) 1437 return (1); 1438 1439 eh = mtod(m, struct ether_header *); 1440 eth_type = ntohs(eh->ether_type); 1441 if (eth_type == ETHERTYPE_VLAN) { 1442 evh = mtod(m, struct ether_vlan_header *); 1443 eth_type = ntohs(evh->evl_proto); 1444 } 1445 1446 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) { 1447 sc->vtnet_stats.rx_csum_bad_ethtype++; 1448 return (1); 1449 } 1450 1451 /* Use the offset to determine the appropriate CSUM_* flags. */ 1452 switch (hdr->csum_offset) { 1453 case offsetof(struct udphdr, uh_sum): 1454 if (m->m_len < hdr->csum_start + sizeof(struct udphdr)) 1455 return (1); 1456 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start); 1457 if (udp->uh_sum == 0) 1458 return (0); 1459 1460 /* FALLTHROUGH */ 1461 1462 case offsetof(struct tcphdr, th_sum): 1463 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1464 m->m_pkthdr.csum_data = 0xFFFF; 1465 break; 1466 1467 case offsetof(struct sctphdr, checksum): 1468 //m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1469 break; 1470 1471 default: 1472 sc->vtnet_stats.rx_csum_bad_offset++; 1473 return (1); 1474 } 1475 1476 sc->vtnet_stats.rx_csum_offloaded++; 1477 1478 return (0); 1479 } 1480 1481 static int 1482 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs) 1483 { 1484 struct ifnet *ifp; 1485 struct virtqueue *vq; 1486 struct mbuf *m, *m_tail; 1487 int len; 1488 1489 ifp = sc->vtnet_ifp; 1490 vq = sc->vtnet_rx_vq; 1491 m_tail = m_head; 1492 1493 while (--nbufs > 0) { 1494 m = virtqueue_dequeue(vq, &len); 1495 if (m == NULL) { 1496 ifp->if_ierrors++; 1497 goto fail; 1498 } 1499 1500 if (vtnet_newbuf(sc) != 0) { 1501 ifp->if_iqdrops++; 1502 vtnet_discard_rxbuf(sc, m); 1503 if (nbufs > 1) 1504 vtnet_discard_merged_rxbuf(sc, nbufs); 1505 goto fail; 1506 } 1507 1508 if (m->m_len < len) 1509 len = m->m_len; 1510 1511 m->m_len = len; 1512 m->m_flags &= ~M_PKTHDR; 1513 1514 m_head->m_pkthdr.len += len; 1515 m_tail->m_next = m; 1516 m_tail = m; 1517 } 1518 1519 return (0); 1520 1521 fail: 1522 sc->vtnet_stats.rx_mergeable_failed++; 1523 m_freem(m_head); 1524 1525 return (1); 1526 } 1527 1528 static int 1529 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp) 1530 { 1531 struct virtio_net_hdr lhdr; 1532 struct ifnet *ifp; 1533 struct virtqueue *vq; 1534 struct mbuf *m; 1535 struct ether_header *eh; 1536 struct virtio_net_hdr *hdr; 1537 struct virtio_net_hdr_mrg_rxbuf *mhdr; 1538 int len, deq, nbufs, adjsz, rx_npkts; 1539 1540 ifp = sc->vtnet_ifp; 1541 vq = sc->vtnet_rx_vq; 1542 hdr = &lhdr; 1543 deq = 0; 1544 rx_npkts = 0; 1545 1546 ASSERT_SERIALIZED(&sc->vtnet_slz); 1547 1548 while (--count >= 0) { 1549 m = virtqueue_dequeue(vq, &len); 1550 if (m == NULL) 1551 break; 1552 deq++; 1553 1554 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { 1555 ifp->if_ierrors++; 1556 vtnet_discard_rxbuf(sc, m); 1557 continue; 1558 } 1559 1560 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { 1561 nbufs = 1; 1562 adjsz = sizeof(struct vtnet_rx_header); 1563 /* 1564 * Account for our pad between the header and 1565 * the actual start of the frame. 1566 */ 1567 len += VTNET_RX_HEADER_PAD; 1568 } else { 1569 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); 1570 nbufs = mhdr->num_buffers; 1571 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1572 } 1573 1574 if (vtnet_replace_rxbuf(sc, m, len) != 0) { 1575 ifp->if_iqdrops++; 1576 vtnet_discard_rxbuf(sc, m); 1577 if (nbufs > 1) 1578 vtnet_discard_merged_rxbuf(sc, nbufs); 1579 continue; 1580 } 1581 1582 m->m_pkthdr.len = len; 1583 m->m_pkthdr.rcvif = ifp; 1584 m->m_pkthdr.csum_flags = 0; 1585 1586 if (nbufs > 1) { 1587 if (vtnet_rxeof_merged(sc, m, nbufs) != 0) 1588 continue; 1589 } 1590 1591 ifp->if_ipackets++; 1592 1593 /* 1594 * Save copy of header before we strip it. For both mergeable 1595 * and non-mergeable, the VirtIO header is placed first in the 1596 * mbuf's data. We no longer need num_buffers, so always use a 1597 * virtio_net_hdr. 1598 */ 1599 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); 1600 m_adj(m, adjsz); 1601 1602 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1603 eh = mtod(m, struct ether_header *); 1604 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1605 vtnet_vlan_tag_remove(m); 1606 1607 /* 1608 * With the 802.1Q header removed, update the 1609 * checksum starting location accordingly. 1610 */ 1611 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1612 hdr->csum_start -= 1613 ETHER_VLAN_ENCAP_LEN; 1614 } 1615 } 1616 1617 if (ifp->if_capenable & IFCAP_RXCSUM && 1618 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1619 if (vtnet_rx_csum(sc, m, hdr) != 0) 1620 sc->vtnet_stats.rx_csum_failed++; 1621 } 1622 1623 lwkt_serialize_exit(&sc->vtnet_slz); 1624 rx_npkts++; 1625 ifp->if_input(ifp, m, NULL, -1); 1626 lwkt_serialize_enter(&sc->vtnet_slz); 1627 1628 /* 1629 * The interface may have been stopped while we were 1630 * passing the packet up the network stack. 1631 */ 1632 if ((ifp->if_flags & IFF_RUNNING) == 0) 1633 break; 1634 } 1635 1636 virtqueue_notify(vq, &sc->vtnet_slz); 1637 1638 if (rx_npktsp != NULL) 1639 *rx_npktsp = rx_npkts; 1640 1641 return (count > 0 ? 0 : EAGAIN); 1642 } 1643 1644 static void 1645 vtnet_rx_intr_task(void *arg) 1646 { 1647 struct vtnet_softc *sc; 1648 struct ifnet *ifp; 1649 int more; 1650 1651 sc = arg; 1652 ifp = sc->vtnet_ifp; 1653 1654 next: 1655 // lwkt_serialize_enter(&sc->vtnet_slz); 1656 1657 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1658 vtnet_enable_rx_intr(sc); 1659 // lwkt_serialize_exit(&sc->vtnet_slz); 1660 return; 1661 } 1662 1663 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL); 1664 if (!more && vtnet_enable_rx_intr(sc) != 0) { 1665 vtnet_disable_rx_intr(sc); 1666 more = 1; 1667 } 1668 1669 // lwkt_serialize_exit(&sc->vtnet_slz); 1670 1671 if (more) { 1672 sc->vtnet_stats.rx_task_rescheduled++; 1673 goto next; 1674 } 1675 } 1676 1677 static int 1678 vtnet_rx_vq_intr(void *xsc) 1679 { 1680 struct vtnet_softc *sc; 1681 1682 sc = xsc; 1683 1684 vtnet_disable_rx_intr(sc); 1685 vtnet_rx_intr_task(sc); 1686 1687 return (1); 1688 } 1689 1690 static void 1691 vtnet_txeof(struct vtnet_softc *sc) 1692 { 1693 struct virtqueue *vq; 1694 struct ifnet *ifp; 1695 struct vtnet_tx_header *txhdr; 1696 int deq; 1697 1698 vq = sc->vtnet_tx_vq; 1699 ifp = sc->vtnet_ifp; 1700 deq = 0; 1701 1702 ASSERT_SERIALIZED(&sc->vtnet_slz); 1703 1704 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { 1705 deq++; 1706 ifp->if_opackets++; 1707 m_freem(txhdr->vth_mbuf); 1708 } 1709 1710 if (deq > 0) { 1711 ifq_clr_oactive(&ifp->if_snd); 1712 if (virtqueue_empty(vq)) 1713 sc->vtnet_watchdog_timer = 0; 1714 } 1715 } 1716 1717 static struct mbuf * 1718 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m, 1719 struct virtio_net_hdr *hdr) 1720 { 1721 struct ifnet *ifp; 1722 struct ether_header *eh; 1723 struct ether_vlan_header *evh; 1724 struct ip *ip; 1725 struct ip6_hdr *ip6; 1726 struct tcphdr *tcp; 1727 int ip_offset; 1728 uint16_t eth_type, csum_start; 1729 uint8_t ip_proto, gso_type; 1730 1731 ifp = sc->vtnet_ifp; 1732 M_ASSERTPKTHDR(m); 1733 1734 ip_offset = sizeof(struct ether_header); 1735 if (m->m_len < ip_offset) { 1736 if ((m = m_pullup(m, ip_offset)) == NULL) 1737 return (NULL); 1738 } 1739 1740 eh = mtod(m, struct ether_header *); 1741 eth_type = ntohs(eh->ether_type); 1742 if (eth_type == ETHERTYPE_VLAN) { 1743 ip_offset = sizeof(struct ether_vlan_header); 1744 if (m->m_len < ip_offset) { 1745 if ((m = m_pullup(m, ip_offset)) == NULL) 1746 return (NULL); 1747 } 1748 evh = mtod(m, struct ether_vlan_header *); 1749 eth_type = ntohs(evh->evl_proto); 1750 } 1751 1752 switch (eth_type) { 1753 case ETHERTYPE_IP: 1754 if (m->m_len < ip_offset + sizeof(struct ip)) { 1755 m = m_pullup(m, ip_offset + sizeof(struct ip)); 1756 if (m == NULL) 1757 return (NULL); 1758 } 1759 1760 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset); 1761 ip_proto = ip->ip_p; 1762 csum_start = ip_offset + (ip->ip_hl << 2); 1763 gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1764 break; 1765 1766 case ETHERTYPE_IPV6: 1767 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) { 1768 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr)); 1769 if (m == NULL) 1770 return (NULL); 1771 } 1772 1773 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset); 1774 /* 1775 * XXX Assume no extension headers are present. Presently, 1776 * this will always be true in the case of TSO, and FreeBSD 1777 * does not perform checksum offloading of IPv6 yet. 1778 */ 1779 ip_proto = ip6->ip6_nxt; 1780 csum_start = ip_offset + sizeof(struct ip6_hdr); 1781 gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1782 break; 1783 1784 default: 1785 return (m); 1786 } 1787 1788 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) { 1789 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1790 hdr->csum_start = csum_start; 1791 hdr->csum_offset = m->m_pkthdr.csum_data; 1792 1793 sc->vtnet_stats.tx_csum_offloaded++; 1794 } 1795 1796 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1797 if (ip_proto != IPPROTO_TCP) 1798 return (m); 1799 1800 if (m->m_len < csum_start + sizeof(struct tcphdr)) { 1801 m = m_pullup(m, csum_start + sizeof(struct tcphdr)); 1802 if (m == NULL) 1803 return (NULL); 1804 } 1805 1806 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start); 1807 hdr->gso_type = gso_type; 1808 hdr->hdr_len = csum_start + (tcp->th_off << 2); 1809 hdr->gso_size = m->m_pkthdr.tso_segsz; 1810 1811 if (tcp->th_flags & TH_CWR) { 1812 /* 1813 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN. 1814 * ECN support is only configurable globally with the 1815 * net.inet.tcp.ecn.enable sysctl knob. 1816 */ 1817 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { 1818 if_printf(ifp, "TSO with ECN not supported " 1819 "by host\n"); 1820 m_freem(m); 1821 return (NULL); 1822 } 1823 1824 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1825 } 1826 1827 sc->vtnet_stats.tx_tso_offloaded++; 1828 } 1829 1830 return (m); 1831 } 1832 1833 static int 1834 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head, 1835 struct vtnet_tx_header *txhdr) 1836 { 1837 struct sglist sg; 1838 struct sglist_seg segs[VTNET_MAX_TX_SEGS]; 1839 struct virtqueue *vq; 1840 struct mbuf *m; 1841 int collapsed, error; 1842 1843 vq = sc->vtnet_tx_vq; 1844 m = *m_head; 1845 collapsed = 0; 1846 1847 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs); 1848 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); 1849 KASSERT(error == 0 && sg.sg_nseg == 1, 1850 ("cannot add header to sglist")); 1851 1852 again: 1853 error = sglist_append_mbuf(&sg, m); 1854 if (error) { 1855 if (collapsed) 1856 goto fail; 1857 1858 //m = m_collapse(m, MB_DONTWAIT, VTNET_MAX_TX_SEGS - 1); 1859 m = m_defrag(m, MB_DONTWAIT); 1860 if (m == NULL) 1861 goto fail; 1862 1863 *m_head = m; 1864 collapsed = 1; 1865 goto again; 1866 } 1867 1868 txhdr->vth_mbuf = m; 1869 1870 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0)); 1871 1872 fail: 1873 m_freem(*m_head); 1874 *m_head = NULL; 1875 1876 return (ENOBUFS); 1877 } 1878 1879 static struct mbuf * 1880 vtnet_vlan_tag_insert(struct mbuf *m) 1881 { 1882 struct mbuf *n; 1883 struct ether_vlan_header *evl; 1884 1885 if (M_WRITABLE(m) == 0) { 1886 n = m_dup(m, MB_DONTWAIT); 1887 m_freem(m); 1888 if ((m = n) == NULL) 1889 return (NULL); 1890 } 1891 1892 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, MB_DONTWAIT); 1893 if (m == NULL) 1894 return (NULL); 1895 if (m->m_len < sizeof(struct ether_vlan_header)) { 1896 m = m_pullup(m, sizeof(struct ether_vlan_header)); 1897 if (m == NULL) 1898 return (NULL); 1899 } 1900 1901 /* Insert 802.1Q header into the existing Ethernet header. */ 1902 evl = mtod(m, struct ether_vlan_header *); 1903 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN, 1904 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); 1905 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1906 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag); 1907 m->m_flags &= ~M_VLANTAG; 1908 1909 return (m); 1910 } 1911 1912 static int 1913 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head) 1914 { 1915 struct vtnet_tx_header *txhdr; 1916 struct virtio_net_hdr *hdr; 1917 struct mbuf *m; 1918 int error; 1919 1920 txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx]; 1921 memset(txhdr, 0, sizeof(struct vtnet_tx_header)); 1922 1923 /* 1924 * Always use the non-mergeable header to simplify things. When 1925 * the mergeable feature is negotiated, the num_buffers field 1926 * must be set to zero. We use vtnet_hdr_size later to enqueue 1927 * the correct header size to the host. 1928 */ 1929 hdr = &txhdr->vth_uhdr.hdr; 1930 m = *m_head; 1931 1932 error = ENOBUFS; 1933 1934 if (m->m_flags & M_VLANTAG) { 1935 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1936 m = vtnet_vlan_tag_insert(m); 1937 if ((*m_head = m) == NULL) 1938 goto fail; 1939 m->m_flags &= ~M_VLANTAG; 1940 } 1941 1942 if (m->m_pkthdr.csum_flags != 0) { 1943 m = vtnet_tx_offload(sc, m, hdr); 1944 if ((*m_head = m) == NULL) 1945 goto fail; 1946 } 1947 1948 error = vtnet_enqueue_txbuf(sc, m_head, txhdr); 1949 if (error == 0) 1950 sc->vtnet_txhdridx = 1951 (sc->vtnet_txhdridx + 1) % ((sc->vtnet_tx_size / 2) + 1); 1952 fail: 1953 return (error); 1954 } 1955 1956 static void 1957 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1958 { 1959 struct vtnet_softc *sc; 1960 1961 sc = ifp->if_softc; 1962 1963 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1964 lwkt_serialize_enter(&sc->vtnet_slz); 1965 vtnet_start_locked(ifp, ifsq); 1966 lwkt_serialize_exit(&sc->vtnet_slz); 1967 } 1968 1969 static void 1970 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1971 { 1972 struct vtnet_softc *sc; 1973 struct virtqueue *vq; 1974 struct mbuf *m0; 1975 int enq; 1976 1977 sc = ifp->if_softc; 1978 vq = sc->vtnet_tx_vq; 1979 enq = 0; 1980 1981 ASSERT_SERIALIZED(&sc->vtnet_slz); 1982 1983 if ((ifp->if_flags & (IFF_RUNNING)) != 1984 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) 1985 return; 1986 1987 #ifdef VTNET_TX_INTR_MODERATION 1988 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2) 1989 vtnet_txeof(sc); 1990 #endif 1991 1992 while (!ifsq_is_empty(ifsq)) { 1993 if (virtqueue_full(vq)) { 1994 ifq_set_oactive(&ifp->if_snd); 1995 break; 1996 } 1997 1998 m0 = ifq_dequeue(&ifp->if_snd); 1999 if (m0 == NULL) 2000 break; 2001 2002 if (vtnet_encap(sc, &m0) != 0) { 2003 if (m0 == NULL) 2004 break; 2005 ifq_prepend(&ifp->if_snd, m0); 2006 ifq_set_oactive(&ifp->if_snd); 2007 break; 2008 } 2009 2010 enq++; 2011 ETHER_BPF_MTAP(ifp, m0); 2012 } 2013 2014 if (enq > 0) { 2015 virtqueue_notify(vq, &sc->vtnet_slz); 2016 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT; 2017 } 2018 } 2019 2020 static void 2021 vtnet_tick(void *xsc) 2022 { 2023 struct vtnet_softc *sc; 2024 2025 sc = xsc; 2026 2027 #if 0 2028 ASSERT_SERIALIZED(&sc->vtnet_slz); 2029 #ifdef VTNET_DEBUG 2030 virtqueue_dump(sc->vtnet_rx_vq); 2031 virtqueue_dump(sc->vtnet_tx_vq); 2032 #endif 2033 2034 vtnet_watchdog(sc); 2035 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2036 #endif 2037 } 2038 2039 static void 2040 vtnet_tx_intr_task(void *arg) 2041 { 2042 struct vtnet_softc *sc; 2043 struct ifnet *ifp; 2044 struct ifaltq_subque *ifsq; 2045 2046 sc = arg; 2047 ifp = sc->vtnet_ifp; 2048 ifsq = ifq_get_subq_default(&ifp->if_snd); 2049 2050 next: 2051 // lwkt_serialize_enter(&sc->vtnet_slz); 2052 2053 if ((ifp->if_flags & IFF_RUNNING) == 0) { 2054 vtnet_enable_tx_intr(sc); 2055 // lwkt_serialize_exit(&sc->vtnet_slz); 2056 return; 2057 } 2058 2059 vtnet_txeof(sc); 2060 2061 if (!ifsq_is_empty(ifsq)) 2062 vtnet_start_locked(ifp, ifsq); 2063 2064 if (vtnet_enable_tx_intr(sc) != 0) { 2065 vtnet_disable_tx_intr(sc); 2066 sc->vtnet_stats.tx_task_rescheduled++; 2067 // lwkt_serialize_exit(&sc->vtnet_slz); 2068 goto next; 2069 } 2070 2071 // lwkt_serialize_exit(&sc->vtnet_slz); 2072 } 2073 2074 static int 2075 vtnet_tx_vq_intr(void *xsc) 2076 { 2077 struct vtnet_softc *sc; 2078 2079 sc = xsc; 2080 2081 vtnet_disable_tx_intr(sc); 2082 vtnet_tx_intr_task(sc); 2083 2084 return (1); 2085 } 2086 2087 static void 2088 vtnet_stop(struct vtnet_softc *sc) 2089 { 2090 device_t dev; 2091 struct ifnet *ifp; 2092 2093 dev = sc->vtnet_dev; 2094 ifp = sc->vtnet_ifp; 2095 2096 ASSERT_SERIALIZED(&sc->vtnet_slz); 2097 2098 sc->vtnet_watchdog_timer = 0; 2099 callout_stop(&sc->vtnet_tick_ch); 2100 ifq_clr_oactive(&ifp->if_snd); 2101 ifp->if_flags &= ~(IFF_RUNNING); 2102 2103 vtnet_disable_rx_intr(sc); 2104 vtnet_disable_tx_intr(sc); 2105 2106 /* 2107 * Stop the host VirtIO adapter. Note this will reset the host 2108 * adapter's state back to the pre-initialized state, so in 2109 * order to make the device usable again, we must drive it 2110 * through virtio_reinit() and virtio_reinit_complete(). 2111 */ 2112 virtio_stop(dev); 2113 2114 sc->vtnet_flags &= ~VTNET_FLAG_LINK; 2115 2116 vtnet_free_rx_mbufs(sc); 2117 vtnet_free_tx_mbufs(sc); 2118 } 2119 2120 static int 2121 vtnet_reinit(struct vtnet_softc *sc) 2122 { 2123 struct ifnet *ifp; 2124 uint64_t features; 2125 2126 ifp = sc->vtnet_ifp; 2127 features = sc->vtnet_features; 2128 2129 /* 2130 * Re-negotiate with the host, removing any disabled receive 2131 * features. Transmit features are disabled only on our side 2132 * via if_capenable and if_hwassist. 2133 */ 2134 2135 if (ifp->if_capabilities & IFCAP_RXCSUM) { 2136 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) 2137 features &= ~VIRTIO_NET_F_GUEST_CSUM; 2138 } 2139 2140 if (ifp->if_capabilities & IFCAP_LRO) { 2141 if ((ifp->if_capenable & IFCAP_LRO) == 0) 2142 features &= ~VTNET_LRO_FEATURES; 2143 } 2144 2145 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { 2146 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 2147 features &= ~VIRTIO_NET_F_CTRL_VLAN; 2148 } 2149 2150 return (virtio_reinit(sc->vtnet_dev, features)); 2151 } 2152 2153 static void 2154 vtnet_init_locked(struct vtnet_softc *sc) 2155 { 2156 device_t dev; 2157 struct ifnet *ifp; 2158 int error; 2159 2160 dev = sc->vtnet_dev; 2161 ifp = sc->vtnet_ifp; 2162 2163 ASSERT_SERIALIZED(&sc->vtnet_slz); 2164 2165 if (ifp->if_flags & IFF_RUNNING) 2166 return; 2167 2168 /* Stop host's adapter, cancel any pending I/O. */ 2169 vtnet_stop(sc); 2170 2171 /* Reinitialize the host device. */ 2172 error = vtnet_reinit(sc); 2173 if (error) { 2174 device_printf(dev, 2175 "reinitialization failed, stopping device...\n"); 2176 vtnet_stop(sc); 2177 return; 2178 } 2179 2180 /* Update host with assigned MAC address. */ 2181 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); 2182 vtnet_set_hwaddr(sc); 2183 2184 ifp->if_hwassist = 0; 2185 if (ifp->if_capenable & IFCAP_TXCSUM) 2186 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; 2187 if (ifp->if_capenable & IFCAP_TSO4) 2188 ifp->if_hwassist |= CSUM_TSO; 2189 2190 error = vtnet_init_rx_vq(sc); 2191 if (error) { 2192 device_printf(dev, 2193 "cannot allocate mbufs for Rx virtqueue\n"); 2194 vtnet_stop(sc); 2195 return; 2196 } 2197 2198 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { 2199 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { 2200 /* Restore promiscuous and all-multicast modes. */ 2201 vtnet_rx_filter(sc); 2202 2203 /* Restore filtered MAC addresses. */ 2204 vtnet_rx_filter_mac(sc); 2205 } 2206 2207 /* Restore VLAN filters. */ 2208 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) 2209 vtnet_rx_filter_vlan(sc); 2210 } 2211 2212 { 2213 vtnet_enable_rx_intr(sc); 2214 vtnet_enable_tx_intr(sc); 2215 } 2216 2217 ifp->if_flags |= IFF_RUNNING; 2218 ifq_clr_oactive(&ifp->if_snd); 2219 2220 virtio_reinit_complete(dev); 2221 2222 vtnet_update_link_status(sc); 2223 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); 2224 } 2225 2226 static void 2227 vtnet_init(void *xsc) 2228 { 2229 struct vtnet_softc *sc; 2230 2231 sc = xsc; 2232 2233 lwkt_serialize_enter(&sc->vtnet_slz); 2234 vtnet_init_locked(sc); 2235 lwkt_serialize_exit(&sc->vtnet_slz); 2236 } 2237 2238 static void 2239 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, 2240 struct sglist *sg, int readable, int writable) 2241 { 2242 struct virtqueue *vq; 2243 void *c; 2244 2245 vq = sc->vtnet_ctrl_vq; 2246 2247 ASSERT_SERIALIZED(&sc->vtnet_slz); 2248 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, 2249 ("no control virtqueue")); 2250 KASSERT(virtqueue_empty(vq), 2251 ("control command already enqueued")); 2252 2253 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) 2254 return; 2255 2256 virtqueue_notify(vq, &sc->vtnet_slz); 2257 2258 /* 2259 * Poll until the command is complete. Previously, we would 2260 * sleep until the control virtqueue interrupt handler woke 2261 * us up, but dropping the VTNET_MTX leads to serialization 2262 * difficulties. 2263 * 2264 * Furthermore, it appears QEMU/KVM only allocates three MSIX 2265 * vectors. Two of those vectors are needed for the Rx and Tx 2266 * virtqueues. We do not support sharing both a Vq and config 2267 * changed notification on the same MSIX vector. 2268 */ 2269 c = virtqueue_poll(vq, NULL); 2270 KASSERT(c == cookie, ("unexpected control command response")); 2271 } 2272 2273 static void 2274 vtnet_rx_filter(struct vtnet_softc *sc) 2275 { 2276 device_t dev; 2277 struct ifnet *ifp; 2278 2279 dev = sc->vtnet_dev; 2280 ifp = sc->vtnet_ifp; 2281 2282 ASSERT_SERIALIZED(&sc->vtnet_slz); 2283 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2284 ("CTRL_RX feature not negotiated")); 2285 2286 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) 2287 device_printf(dev, "cannot %s promiscuous mode\n", 2288 ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); 2289 2290 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) 2291 device_printf(dev, "cannot %s all-multicast mode\n", 2292 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); 2293 } 2294 2295 static int 2296 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) 2297 { 2298 struct virtio_net_ctrl_hdr hdr __aligned(2); 2299 struct sglist_seg segs[3]; 2300 struct sglist sg; 2301 uint8_t onoff, ack; 2302 int error; 2303 2304 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) 2305 return (ENOTSUP); 2306 2307 error = 0; 2308 2309 hdr.class = VIRTIO_NET_CTRL_RX; 2310 hdr.cmd = cmd; 2311 onoff = !!on; 2312 ack = VIRTIO_NET_ERR; 2313 2314 sglist_init(&sg, 3, segs); 2315 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2316 error |= sglist_append(&sg, &onoff, sizeof(uint8_t)); 2317 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2318 KASSERT(error == 0 && sg.sg_nseg == 3, 2319 ("error adding Rx filter message to sglist")); 2320 2321 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2322 2323 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2324 } 2325 2326 static int 2327 vtnet_set_promisc(struct vtnet_softc *sc, int on) 2328 { 2329 2330 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); 2331 } 2332 2333 static int 2334 vtnet_set_allmulti(struct vtnet_softc *sc, int on) 2335 { 2336 2337 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); 2338 } 2339 2340 static void 2341 vtnet_rx_filter_mac(struct vtnet_softc *sc) 2342 { 2343 struct virtio_net_ctrl_hdr hdr __aligned(2); 2344 struct vtnet_mac_filter *filter; 2345 struct sglist_seg segs[4]; 2346 struct sglist sg; 2347 struct ifnet *ifp; 2348 struct ifaddr *ifa; 2349 struct ifaddr_container *ifac; 2350 struct ifmultiaddr *ifma; 2351 int ucnt, mcnt, promisc, allmulti, error; 2352 uint8_t ack; 2353 2354 ifp = sc->vtnet_ifp; 2355 ucnt = 0; 2356 mcnt = 0; 2357 promisc = 0; 2358 allmulti = 0; 2359 error = 0; 2360 2361 ASSERT_SERIALIZED(&sc->vtnet_slz); 2362 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, 2363 ("CTRL_RX feature not negotiated")); 2364 2365 /* Use the MAC filtering table allocated in vtnet_attach. */ 2366 filter = sc->vtnet_macfilter; 2367 memset(filter, 0, sizeof(struct vtnet_mac_filter)); 2368 2369 /* Unicast MAC addresses: */ 2370 //if_addr_rlock(ifp); 2371 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2372 ifa = ifac->ifa; 2373 if (ifa->ifa_addr->sa_family != AF_LINK) 2374 continue; 2375 else if (ucnt == VTNET_MAX_MAC_ENTRIES) 2376 break; 2377 2378 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 2379 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); 2380 ucnt++; 2381 } 2382 //if_addr_runlock(ifp); 2383 2384 if (ucnt >= VTNET_MAX_MAC_ENTRIES) { 2385 promisc = 1; 2386 filter->vmf_unicast.nentries = 0; 2387 2388 if_printf(ifp, "more than %d MAC addresses assigned, " 2389 "falling back to promiscuous mode\n", 2390 VTNET_MAX_MAC_ENTRIES); 2391 } else 2392 filter->vmf_unicast.nentries = ucnt; 2393 2394 /* Multicast MAC addresses: */ 2395 //if_maddr_rlock(ifp); 2396 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2397 if (ifma->ifma_addr->sa_family != AF_LINK) 2398 continue; 2399 else if (mcnt == VTNET_MAX_MAC_ENTRIES) 2400 break; 2401 2402 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2403 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); 2404 mcnt++; 2405 } 2406 //if_maddr_runlock(ifp); 2407 2408 if (mcnt >= VTNET_MAX_MAC_ENTRIES) { 2409 allmulti = 1; 2410 filter->vmf_multicast.nentries = 0; 2411 2412 if_printf(ifp, "more than %d multicast MAC addresses " 2413 "assigned, falling back to all-multicast mode\n", 2414 VTNET_MAX_MAC_ENTRIES); 2415 } else 2416 filter->vmf_multicast.nentries = mcnt; 2417 2418 if (promisc && allmulti) 2419 goto out; 2420 2421 hdr.class = VIRTIO_NET_CTRL_MAC; 2422 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 2423 ack = VIRTIO_NET_ERR; 2424 2425 sglist_init(&sg, 4, segs); 2426 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2427 error |= sglist_append(&sg, &filter->vmf_unicast, 2428 sizeof(struct vtnet_mac_table)); 2429 error |= sglist_append(&sg, &filter->vmf_multicast, 2430 sizeof(struct vtnet_mac_table)); 2431 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2432 KASSERT(error == 0 && sg.sg_nseg == 4, 2433 ("error adding MAC filtering message to sglist")); 2434 2435 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2436 2437 if (ack != VIRTIO_NET_OK) 2438 if_printf(ifp, "error setting host MAC filter table\n"); 2439 2440 out: 2441 if (promisc) 2442 if (vtnet_set_promisc(sc, 1) != 0) 2443 if_printf(ifp, "cannot enable promiscuous mode\n"); 2444 if (allmulti) 2445 if (vtnet_set_allmulti(sc, 1) != 0) 2446 if_printf(ifp, "cannot enable all-multicast mode\n"); 2447 } 2448 2449 static int 2450 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2451 { 2452 struct virtio_net_ctrl_hdr hdr __aligned(2); 2453 struct sglist_seg segs[3]; 2454 struct sglist sg; 2455 uint8_t ack; 2456 int error; 2457 2458 hdr.class = VIRTIO_NET_CTRL_VLAN; 2459 hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; 2460 ack = VIRTIO_NET_ERR; 2461 error = 0; 2462 2463 sglist_init(&sg, 3, segs); 2464 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); 2465 error |= sglist_append(&sg, &tag, sizeof(uint16_t)); 2466 error |= sglist_append(&sg, &ack, sizeof(uint8_t)); 2467 KASSERT(error == 0 && sg.sg_nseg == 3, 2468 ("error adding VLAN control message to sglist")); 2469 2470 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); 2471 2472 return (ack == VIRTIO_NET_OK ? 0 : EIO); 2473 } 2474 2475 static void 2476 vtnet_rx_filter_vlan(struct vtnet_softc *sc) 2477 { 2478 device_t dev; 2479 uint32_t w, mask; 2480 uint16_t tag; 2481 int i, nvlans, error; 2482 2483 ASSERT_SERIALIZED(&sc->vtnet_slz); 2484 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2485 ("VLAN_FILTER feature not negotiated")); 2486 2487 dev = sc->vtnet_dev; 2488 nvlans = sc->vtnet_nvlans; 2489 error = 0; 2490 2491 /* Enable filtering for each configured VLAN. */ 2492 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) { 2493 w = sc->vtnet_vlan_shadow[i]; 2494 for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) { 2495 if ((w & mask) != 0) { 2496 w &= ~mask; 2497 nvlans--; 2498 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) 2499 error++; 2500 } 2501 } 2502 } 2503 2504 KASSERT(nvlans == 0, ("VLAN count incorrect")); 2505 if (error) 2506 device_printf(dev, "cannot restore VLAN filter table\n"); 2507 } 2508 2509 static void 2510 vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) 2511 { 2512 struct ifnet *ifp; 2513 int idx, bit; 2514 2515 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, 2516 ("VLAN_FILTER feature not negotiated")); 2517 2518 if ((tag == 0) || (tag > 4095)) 2519 return; 2520 2521 ifp = sc->vtnet_ifp; 2522 idx = (tag >> 5) & 0x7F; 2523 bit = tag & 0x1F; 2524 2525 lwkt_serialize_enter(&sc->vtnet_slz); 2526 2527 /* Update shadow VLAN table. */ 2528 if (add) { 2529 sc->vtnet_nvlans++; 2530 sc->vtnet_vlan_shadow[idx] |= (1 << bit); 2531 } else { 2532 sc->vtnet_nvlans--; 2533 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit); 2534 } 2535 2536 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2537 if (vtnet_exec_vlan_filter(sc, add, tag) != 0) { 2538 device_printf(sc->vtnet_dev, 2539 "cannot %s VLAN %d %s the host filter table\n", 2540 add ? "add" : "remove", tag, 2541 add ? "to" : "from"); 2542 } 2543 } 2544 2545 lwkt_serialize_exit(&sc->vtnet_slz); 2546 } 2547 2548 static void 2549 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2550 { 2551 2552 if (ifp->if_softc != arg) 2553 return; 2554 2555 vtnet_set_vlan_filter(arg, 1, tag); 2556 } 2557 2558 static void 2559 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) 2560 { 2561 2562 if (ifp->if_softc != arg) 2563 return; 2564 2565 vtnet_set_vlan_filter(arg, 0, tag); 2566 } 2567 2568 static int 2569 vtnet_ifmedia_upd(struct ifnet *ifp) 2570 { 2571 struct vtnet_softc *sc; 2572 struct ifmedia *ifm; 2573 2574 sc = ifp->if_softc; 2575 ifm = &sc->vtnet_media; 2576 2577 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2578 return (EINVAL); 2579 2580 return (0); 2581 } 2582 2583 static void 2584 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2585 { 2586 struct vtnet_softc *sc; 2587 2588 sc = ifp->if_softc; 2589 2590 ifmr->ifm_status = IFM_AVALID; 2591 ifmr->ifm_active = IFM_ETHER; 2592 2593 lwkt_serialize_enter(&sc->vtnet_slz); 2594 if (vtnet_is_link_up(sc) != 0) { 2595 ifmr->ifm_status |= IFM_ACTIVE; 2596 ifmr->ifm_active |= VTNET_MEDIATYPE; 2597 } else 2598 ifmr->ifm_active |= IFM_NONE; 2599 lwkt_serialize_exit(&sc->vtnet_slz); 2600 } 2601 2602 static void 2603 vtnet_add_statistics(struct vtnet_softc *sc) 2604 { 2605 device_t dev; 2606 struct vtnet_statistics *stats; 2607 struct sysctl_ctx_list *ctx; 2608 struct sysctl_oid *tree; 2609 struct sysctl_oid_list *child; 2610 2611 dev = sc->vtnet_dev; 2612 stats = &sc->vtnet_stats; 2613 ctx = device_get_sysctl_ctx(dev); 2614 tree = device_get_sysctl_tree(dev); 2615 child = SYSCTL_CHILDREN(tree); 2616 2617 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed", 2618 CTLFLAG_RD, &stats->mbuf_alloc_failed, 2619 "Mbuf cluster allocation failures"); 2620 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large", 2621 CTLFLAG_RD, &stats->rx_frame_too_large, 2622 "Received frame larger than the mbuf chain"); 2623 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed", 2624 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 2625 "Enqueuing the replacement receive mbuf failed"); 2626 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed", 2627 CTLFLAG_RD, &stats->rx_mergeable_failed, 2628 "Mergeable buffers receive failures"); 2629 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", 2630 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 2631 "Received checksum offloaded buffer with unsupported " 2632 "Ethernet type"); 2633 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start", 2634 CTLFLAG_RD, &stats->rx_csum_bad_start, 2635 "Received checksum offloaded buffer with incorrect start offset"); 2636 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", 2637 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 2638 "Received checksum offloaded buffer with incorrect IP protocol"); 2639 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset", 2640 CTLFLAG_RD, &stats->rx_csum_bad_offset, 2641 "Received checksum offloaded buffer with incorrect offset"); 2642 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed", 2643 CTLFLAG_RD, &stats->rx_csum_failed, 2644 "Received buffer checksum offload failed"); 2645 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded", 2646 CTLFLAG_RD, &stats->rx_csum_offloaded, 2647 "Received buffer checksum offload succeeded"); 2648 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled", 2649 CTLFLAG_RD, &stats->rx_task_rescheduled, 2650 "Times the receive interrupt task rescheduled itself"); 2651 2652 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded", 2653 CTLFLAG_RD, &stats->tx_csum_offloaded, 2654 "Offloaded checksum of transmitted buffer"); 2655 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded", 2656 CTLFLAG_RD, &stats->tx_tso_offloaded, 2657 "Segmentation offload of transmitted buffer"); 2658 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", 2659 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 2660 "Aborted transmit of checksum offloaded buffer with unknown " 2661 "Ethernet type"); 2662 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", 2663 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 2664 "Aborted transmit of TSO buffer with unknown Ethernet type"); 2665 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled", 2666 CTLFLAG_RD, &stats->tx_task_rescheduled, 2667 "Times the transmit interrupt task rescheduled itself"); 2668 } 2669 2670 static int 2671 vtnet_enable_rx_intr(struct vtnet_softc *sc) 2672 { 2673 2674 return (virtqueue_enable_intr(sc->vtnet_rx_vq)); 2675 } 2676 2677 static void 2678 vtnet_disable_rx_intr(struct vtnet_softc *sc) 2679 { 2680 2681 virtqueue_disable_intr(sc->vtnet_rx_vq); 2682 } 2683 2684 static int 2685 vtnet_enable_tx_intr(struct vtnet_softc *sc) 2686 { 2687 2688 #ifdef VTNET_TX_INTR_MODERATION 2689 return (0); 2690 #else 2691 return (virtqueue_enable_intr(sc->vtnet_tx_vq)); 2692 #endif 2693 } 2694 2695 static void 2696 vtnet_disable_tx_intr(struct vtnet_softc *sc) 2697 { 2698 2699 virtqueue_disable_intr(sc->vtnet_tx_vq); 2700 } 2701