xref: /dragonfly/sys/dev/virtual/virtio/net/if_vtnet.c (revision 62dc643e)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO network devices. */
28 
29 #include <sys/cdefs.h>
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46 
47 #include <machine/limits.h>
48 
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
58 
59 #include <net/bpf.h>
60 
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
67 
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include <dev/virtual/virtio/net/virtio_net.h>
71 #include <dev/virtual/virtio/net/if_vtnetvar.h>
72 
73 #include "virtio_if.h"
74 
75 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
76 
77 static int	vtnet_probe(device_t);
78 static int	vtnet_attach(device_t);
79 static int	vtnet_detach(device_t);
80 static int	vtnet_suspend(device_t);
81 static int	vtnet_resume(device_t);
82 static int	vtnet_shutdown(device_t);
83 static int	vtnet_config_change(device_t);
84 
85 static void	vtnet_negotiate_features(struct vtnet_softc *);
86 static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
87 static void	vtnet_get_hwaddr(struct vtnet_softc *);
88 static void	vtnet_set_hwaddr(struct vtnet_softc *);
89 static int	vtnet_is_link_up(struct vtnet_softc *);
90 static void	vtnet_update_link_status(struct vtnet_softc *);
91 #if 0
92 static void	vtnet_watchdog(struct vtnet_softc *);
93 #endif
94 static void	vtnet_config_change_task(void *, int);
95 static int	vtnet_setup_interface(struct vtnet_softc *);
96 static int	vtnet_change_mtu(struct vtnet_softc *, int);
97 static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98 
99 static int	vtnet_init_rx_vq(struct vtnet_softc *);
100 static void	vtnet_free_rx_mbufs(struct vtnet_softc *);
101 static void	vtnet_free_tx_mbufs(struct vtnet_softc *);
102 static void	vtnet_free_ctrl_vq(struct vtnet_softc *);
103 
104 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
105 		    struct mbuf **);
106 static int	vtnet_replace_rxbuf(struct vtnet_softc *,
107 		    struct mbuf *, int);
108 static int	vtnet_newbuf(struct vtnet_softc *);
109 static void	vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
110 static void	vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
111 static int	vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
112 static void	vtnet_vlan_tag_remove(struct mbuf *);
113 static int	vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
114 		    struct virtio_net_hdr *);
115 static int	vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
116 static int	vtnet_rxeof(struct vtnet_softc *, int, int *);
117 static void	vtnet_rx_intr_task(void *);
118 static int	vtnet_rx_vq_intr(void *);
119 
120 static void	vtnet_enqueue_txhdr(struct vtnet_softc *,
121 		    struct vtnet_tx_header *);
122 static void	vtnet_txeof(struct vtnet_softc *);
123 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
124 		    struct virtio_net_hdr *);
125 static int	vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
126 		    struct vtnet_tx_header *);
127 static int	vtnet_encap(struct vtnet_softc *, struct mbuf **);
128 static void	vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
129 static void	vtnet_start(struct ifnet *, struct ifaltq_subque *);
130 static void	vtnet_tick(void *);
131 static void	vtnet_tx_intr_task(void *);
132 static int	vtnet_tx_vq_intr(void *);
133 
134 static void	vtnet_stop(struct vtnet_softc *);
135 static int	vtnet_virtio_reinit(struct vtnet_softc *);
136 static void	vtnet_init_locked(struct vtnet_softc *);
137 static void	vtnet_init(void *);
138 
139 static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
140 		    struct sglist *, int, int);
141 
142 static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
143 static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
144 static int	vtnet_set_promisc(struct vtnet_softc *, int);
145 static int	vtnet_set_allmulti(struct vtnet_softc *, int);
146 static void	vtnet_rx_filter(struct vtnet_softc *sc);
147 static void	vtnet_rx_filter_mac(struct vtnet_softc *);
148 
149 static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
150 static void	vtnet_rx_filter_vlan(struct vtnet_softc *);
151 static void	vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
152 static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
153 static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
154 
155 static int	vtnet_ifmedia_upd(struct ifnet *);
156 static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 static void	vtnet_add_statistics(struct vtnet_softc *);
159 
160 static int	vtnet_enable_rx_intr(struct vtnet_softc *);
161 static int	vtnet_enable_tx_intr(struct vtnet_softc *);
162 static void	vtnet_disable_rx_intr(struct vtnet_softc *);
163 static void	vtnet_disable_tx_intr(struct vtnet_softc *);
164 
165 /* Tunables. */
166 static int vtnet_csum_disable = 0;
167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
168 static int vtnet_tso_disable = 1;
169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
170 static int vtnet_lro_disable = 0;
171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
172 
173 /*
174  * Reducing the number of transmit completed interrupts can
175  * improve performance. To do so, the define below keeps the
176  * Tx vq interrupt disabled and adds calls to vtnet_txeof()
177  * in the start and watchdog paths. The price to pay for this
178  * is the m_free'ing of transmitted mbufs may be delayed until
179  * the watchdog fires.
180  */
181 #define VTNET_TX_INTR_MODERATION
182 
183 static struct virtio_feature_desc vtnet_feature_desc[] = {
184 	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
185 	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
186 	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload"	},
187 	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
188 	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
189 	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
190 	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
191 	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
192 	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
193 	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
194 	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
195 	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
196 	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
197 	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
198 	{ VIRTIO_NET_F_STATUS,		"Status"	},
199 	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
200 	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
201 	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
202 	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
203 	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
204 	{ VIRTIO_NET_F_MQ,		"RFS"		},
205 	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
206 	{ 0, NULL }
207 };
208 
209 static device_method_t vtnet_methods[] = {
210 	/* Device methods. */
211 	DEVMETHOD(device_probe,		vtnet_probe),
212 	DEVMETHOD(device_attach,	vtnet_attach),
213 	DEVMETHOD(device_detach,	vtnet_detach),
214 	DEVMETHOD(device_suspend,	vtnet_suspend),
215 	DEVMETHOD(device_resume,	vtnet_resume),
216 	DEVMETHOD(device_shutdown,	vtnet_shutdown),
217 
218 	/* VirtIO methods. */
219 	DEVMETHOD(virtio_config_change, vtnet_config_change),
220 
221 	DEVMETHOD_END
222 };
223 
224 static driver_t vtnet_driver = {
225 	"vtnet",
226 	vtnet_methods,
227 	sizeof(struct vtnet_softc)
228 };
229 
230 static devclass_t vtnet_devclass;
231 
232 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL);
233 MODULE_VERSION(vtnet, 1);
234 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
235 
236 static int
237 vtnet_probe(device_t dev)
238 {
239 	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
240 		return (ENXIO);
241 
242 	device_set_desc(dev, "VirtIO Networking Adapter");
243 
244 	return (BUS_PROBE_DEFAULT);
245 }
246 
247 static int
248 vtnet_attach(device_t dev)
249 {
250 	struct vtnet_softc *sc;
251 	int error;
252 
253 	sc = device_get_softc(dev);
254 	sc->vtnet_dev = dev;
255 
256 	lwkt_serialize_init(&sc->vtnet_slz);
257 	callout_init(&sc->vtnet_tick_ch);
258 
259 	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
260 		     vtnet_ifmedia_sts);
261 	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
262 	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
263 
264 	vtnet_add_statistics(sc);
265 	SLIST_INIT(&sc->vtnet_txhdr_free);
266 
267 	/* Register our feature descriptions. */
268 	virtio_set_feature_desc(dev, vtnet_feature_desc);
269 	vtnet_negotiate_features(sc);
270 
271 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
272 		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
273 
274 	if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
275 		/* This feature should always be negotiated. */
276 		sc->vtnet_flags |= VTNET_FLAG_MAC;
277 	}
278 
279 	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
280 		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
281 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
282 	} else {
283 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
284 	}
285 
286 	sc->vtnet_rx_mbuf_size = MCLBYTES;
287 	sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
288 
289 	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
290 		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
291 
292 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
293 			sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
294 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
295 			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
296 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
297 		    virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
298 			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
299 	}
300 
301 	/* Read (or generate) the MAC address for the adapter. */
302 	vtnet_get_hwaddr(sc);
303 
304 	error = vtnet_alloc_virtqueues(sc);
305 	if (error) {
306 		device_printf(dev, "cannot allocate virtqueues\n");
307 		goto fail;
308 	}
309 
310 	error = vtnet_setup_interface(sc);
311 	if (error) {
312 		device_printf(dev, "cannot setup interface\n");
313 		goto fail;
314 	}
315 
316 	TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
317 
318 	error = virtio_setup_intr(dev, &sc->vtnet_slz);
319 	if (error) {
320 		device_printf(dev, "cannot setup virtqueue interrupts\n");
321 		ether_ifdetach(sc->vtnet_ifp);
322 		goto fail;
323 	}
324 
325 	/*
326 	 * Device defaults to promiscuous mode for backwards
327 	 * compatibility. Turn it off if possible.
328 	 */
329 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
330 		lwkt_serialize_enter(&sc->vtnet_slz);
331 		if (vtnet_set_promisc(sc, 0) != 0) {
332 			sc->vtnet_ifp->if_flags |= IFF_PROMISC;
333 			device_printf(dev,
334 			    "cannot disable promiscuous mode\n");
335 		}
336 		lwkt_serialize_exit(&sc->vtnet_slz);
337 	} else
338 		sc->vtnet_ifp->if_flags |= IFF_PROMISC;
339 
340 fail:
341 	if (error)
342 		vtnet_detach(dev);
343 
344 	return (error);
345 }
346 
347 static int
348 vtnet_detach(device_t dev)
349 {
350 	struct vtnet_softc *sc;
351 	struct ifnet *ifp;
352 
353 	sc = device_get_softc(dev);
354 	ifp = sc->vtnet_ifp;
355 
356 	if (device_is_attached(dev)) {
357 		lwkt_serialize_enter(&sc->vtnet_slz);
358 		vtnet_stop(sc);
359 		lwkt_serialize_exit(&sc->vtnet_slz);
360 
361 		callout_stop(&sc->vtnet_tick_ch);
362 		taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
363 
364 		ether_ifdetach(ifp);
365 	}
366 
367 	if (sc->vtnet_vlan_attach != NULL) {
368 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
369 		sc->vtnet_vlan_attach = NULL;
370 	}
371 	if (sc->vtnet_vlan_detach != NULL) {
372 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
373 		sc->vtnet_vlan_detach = NULL;
374 	}
375 
376 	if (ifp) {
377 		if_free(ifp);
378 		sc->vtnet_ifp = NULL;
379 	}
380 
381 	if (sc->vtnet_rx_vq != NULL)
382 		vtnet_free_rx_mbufs(sc);
383 	if (sc->vtnet_tx_vq != NULL)
384 		vtnet_free_tx_mbufs(sc);
385 	if (sc->vtnet_ctrl_vq != NULL)
386 		vtnet_free_ctrl_vq(sc);
387 
388 	if (sc->vtnet_txhdrarea != NULL) {
389 		contigfree(sc->vtnet_txhdrarea,
390 		    sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
391 		    M_VTNET);
392 		sc->vtnet_txhdrarea = NULL;
393 	}
394 	SLIST_INIT(&sc->vtnet_txhdr_free);
395 	if (sc->vtnet_macfilter != NULL) {
396 		contigfree(sc->vtnet_macfilter,
397 		    sizeof(struct vtnet_mac_filter), M_DEVBUF);
398 		sc->vtnet_macfilter = NULL;
399 	}
400 
401 	ifmedia_removeall(&sc->vtnet_media);
402 
403 	return (0);
404 }
405 
406 static int
407 vtnet_suspend(device_t dev)
408 {
409 	struct vtnet_softc *sc;
410 
411 	sc = device_get_softc(dev);
412 
413 	lwkt_serialize_enter(&sc->vtnet_slz);
414 	vtnet_stop(sc);
415 	sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
416 	lwkt_serialize_exit(&sc->vtnet_slz);
417 
418 	return (0);
419 }
420 
421 static int
422 vtnet_resume(device_t dev)
423 {
424 	struct vtnet_softc *sc;
425 	struct ifnet *ifp;
426 
427 	sc = device_get_softc(dev);
428 	ifp = sc->vtnet_ifp;
429 
430 	lwkt_serialize_enter(&sc->vtnet_slz);
431 	if (ifp->if_flags & IFF_UP)
432 		vtnet_init_locked(sc);
433 	sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
434 	lwkt_serialize_exit(&sc->vtnet_slz);
435 
436 	return (0);
437 }
438 
439 static int
440 vtnet_shutdown(device_t dev)
441 {
442 
443 	/*
444 	 * Suspend already does all of what we need to
445 	 * do here; we just never expect to be resumed.
446 	 */
447 	return (vtnet_suspend(dev));
448 }
449 
450 static int
451 vtnet_config_change(device_t dev)
452 {
453 	struct vtnet_softc *sc;
454 
455 	sc = device_get_softc(dev);
456 
457 	taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
458 
459 	return (1);
460 }
461 
462 static void
463 vtnet_negotiate_features(struct vtnet_softc *sc)
464 {
465 	device_t dev;
466 	uint64_t mask, features;
467 
468 	dev = sc->vtnet_dev;
469 	mask = 0;
470 
471 	if (vtnet_csum_disable)
472 		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
473 
474 	/*
475 	 * TSO and LRO are only available when their corresponding checksum
476 	 * offload feature is also negotiated.
477 	 */
478 
479 	if (vtnet_csum_disable || vtnet_tso_disable)
480 		mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
481 		    VIRTIO_NET_F_HOST_ECN;
482 
483 	if (vtnet_csum_disable || vtnet_lro_disable)
484 		mask |= VTNET_LRO_FEATURES;
485 
486 	features = VTNET_FEATURES & ~mask;
487 	features |= VIRTIO_F_NOTIFY_ON_EMPTY;
488 	features |= VIRTIO_F_ANY_LAYOUT;
489 	sc->vtnet_features = virtio_negotiate_features(dev, features);
490 
491 	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
492 	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
493 		/*
494 		 * LRO without mergeable buffers requires special care. This
495 		 * is not ideal because every receive buffer must be large
496 		 * enough to hold the maximum TCP packet, the Ethernet header,
497 		 * and the header. This requires up to 34 descriptors with
498 		 * MCLBYTES clusters. If we do not have indirect descriptors,
499 		 * LRO is disabled since the virtqueue will not contain very
500 		 * many receive buffers.
501 		 */
502 		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
503 			device_printf(dev,
504 			    "LRO disabled due to both mergeable buffers and "
505 			    "indirect descriptors not negotiated\n");
506 
507 			features &= ~VTNET_LRO_FEATURES;
508 			sc->vtnet_features =
509 			    virtio_negotiate_features(dev, features);
510 		} else
511 			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
512 	}
513 }
514 
515 static int
516 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
517 {
518 	device_t dev;
519 	struct vq_alloc_info vq_info[3];
520 	int nvqs;
521 
522 	dev = sc->vtnet_dev;
523 	nvqs = 2;
524 
525 	/*
526 	 * Indirect descriptors are not needed for the Rx
527 	 * virtqueue when mergeable buffers are negotiated.
528 	 * The header is placed inline with the data, not
529 	 * in a separate descriptor, and mbuf clusters are
530 	 * always physically contiguous.
531 	 */
532 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
533 		sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
534 		    VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
535 	} else
536 		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
537 
538 	if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
539             virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
540 		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
541 	else
542 		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
543 
544 	VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs,
545 	    vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
546 	    "%s receive", device_get_nameunit(dev));
547 
548 	VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs,
549 	    vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
550 	    "%s transmit", device_get_nameunit(dev));
551 
552 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
553 		nvqs++;
554 
555 		VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
556 		    &sc->vtnet_ctrl_vq, "%s control",
557 		    device_get_nameunit(dev));
558 	}
559 
560 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
561 }
562 
563 static int
564 vtnet_setup_interface(struct vtnet_softc *sc)
565 {
566 	device_t dev;
567 	struct ifnet *ifp;
568 	int i;
569 
570 	dev = sc->vtnet_dev;
571 
572 	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
573 	if (ifp == NULL) {
574 		device_printf(dev, "cannot allocate ifnet structure\n");
575 		return (ENOSPC);
576 	}
577 
578 	ifp->if_softc = sc;
579 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
580 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
581 	ifp->if_init = vtnet_init;
582 	ifp->if_start = vtnet_start;
583 	ifp->if_ioctl = vtnet_ioctl;
584 
585 	sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq);
586 	sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq);
587 	if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
588 		sc->vtnet_txhdrcount = sc->vtnet_tx_size;
589 	else
590 		sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
591 	sc->vtnet_txhdrarea = contigmalloc(
592 	    sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
593 	    M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
594 	if (sc->vtnet_txhdrarea == NULL) {
595 		device_printf(dev, "cannot contigmalloc the tx headers\n");
596 		return (ENOMEM);
597 	}
598 	for (i = 0; i < sc->vtnet_txhdrcount; i++)
599 		vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]);
600 	sc->vtnet_macfilter = contigmalloc(
601 	    sizeof(struct vtnet_mac_filter),
602 	    M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
603 	if (sc->vtnet_macfilter == NULL) {
604 		device_printf(dev,
605 		    "cannot contigmalloc the mac filter table\n");
606 		return (ENOMEM);
607 	}
608 	ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1);
609 	ifq_set_ready(&ifp->if_snd);
610 
611 	ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
612 
613 	/* Tell the upper layer(s) we support long frames. */
614 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
615 	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
616 
617 	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
618 		ifp->if_capabilities |= IFCAP_TXCSUM;
619 
620 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
621 			ifp->if_capabilities |= IFCAP_TSO4;
622 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
623 			ifp->if_capabilities |= IFCAP_TSO6;
624 		if (ifp->if_capabilities & IFCAP_TSO)
625 			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
626 
627 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
628 			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
629 	}
630 
631 	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
632 		ifp->if_capabilities |= IFCAP_RXCSUM;
633 
634 		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
635 		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
636 			ifp->if_capabilities |= IFCAP_LRO;
637 	}
638 
639 	if (ifp->if_capabilities & IFCAP_HWCSUM) {
640 		/*
641 		 * VirtIO does not support VLAN tagging, but we can fake
642 		 * it by inserting and removing the 802.1Q header during
643 		 * transmit and receive. We are then able to do checksum
644 		 * offloading of VLAN frames.
645 		 */
646 		ifp->if_capabilities |=
647 			IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
648 	}
649 
650 	ifp->if_capenable = ifp->if_capabilities;
651 
652 	/*
653 	 * Capabilities after here are not enabled by default.
654 	 */
655 
656 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
657 		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
658 
659 		sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
660 		    vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
661 		sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
662 		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
663 	}
664 
665 	return (0);
666 }
667 
668 static void
669 vtnet_set_hwaddr(struct vtnet_softc *sc)
670 {
671 	device_t dev;
672 
673 	dev = sc->vtnet_dev;
674 
675 	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
676 	    (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
677 		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
678 			device_printf(dev, "unable to set MAC address\n");
679 	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
680 		virtio_write_device_config(dev,
681 		    offsetof(struct virtio_net_config, mac),
682 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
683 	}
684 }
685 
686 static void
687 vtnet_get_hwaddr(struct vtnet_softc *sc)
688 {
689 	device_t dev;
690 
691 	dev = sc->vtnet_dev;
692 
693 	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
694 		/*
695 		 * Generate a random locally administered unicast address.
696 		 *
697 		 * It would be nice to generate the same MAC address across
698 		 * reboots, but it seems all the hosts currently available
699 		 * support the MAC feature, so this isn't too important.
700 		 */
701 		sc->vtnet_hwaddr[0] = 0xB2;
702 		karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
703 		vtnet_set_hwaddr(sc);
704 		return;
705 	}
706 
707 	virtio_read_device_config(dev,
708 	    offsetof(struct virtio_net_config, mac),
709 	    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
710 }
711 
712 static int
713 vtnet_is_link_up(struct vtnet_softc *sc)
714 {
715 	device_t dev;
716 	struct ifnet *ifp;
717 	uint16_t status;
718 
719 	dev = sc->vtnet_dev;
720 	ifp = sc->vtnet_ifp;
721 
722 	ASSERT_SERIALIZED(&sc->vtnet_slz);
723 
724 	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) {
725 		status = virtio_read_dev_config_2(dev,
726 				offsetof(struct virtio_net_config, status));
727 	} else {
728 		status = VIRTIO_NET_S_LINK_UP;
729 	}
730 
731 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
732 }
733 
734 static void
735 vtnet_update_link_status(struct vtnet_softc *sc)
736 {
737 	device_t dev;
738 	struct ifnet *ifp;
739 	struct ifaltq_subque *ifsq;
740 	int link;
741 
742 	dev = sc->vtnet_dev;
743 	ifp = sc->vtnet_ifp;
744 	ifsq = ifq_get_subq_default(&ifp->if_snd);
745 
746 	link = vtnet_is_link_up(sc);
747 
748 	if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
749 		sc->vtnet_flags |= VTNET_FLAG_LINK;
750 		if (bootverbose)
751 			device_printf(dev, "Link is up\n");
752 		ifp->if_link_state = LINK_STATE_UP;
753 		if_link_state_change(ifp);
754 		if (!ifsq_is_empty(ifsq))
755 			vtnet_start_locked(ifp, ifsq);
756 	} else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
757 		sc->vtnet_flags &= ~VTNET_FLAG_LINK;
758 		if (bootverbose)
759 			device_printf(dev, "Link is down\n");
760 
761 		ifp->if_link_state = LINK_STATE_DOWN;
762 		if_link_state_change(ifp);
763 	}
764 }
765 
766 #if 0
767 static void
768 vtnet_watchdog(struct vtnet_softc *sc)
769 {
770 	struct ifnet *ifp;
771 
772 	ifp = sc->vtnet_ifp;
773 
774 #ifdef VTNET_TX_INTR_MODERATION
775 	vtnet_txeof(sc);
776 #endif
777 
778 	if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
779 		return;
780 
781 	if_printf(ifp, "watchdog timeout -- resetting\n");
782 #ifdef VTNET_DEBUG
783 	virtqueue_dump(sc->vtnet_tx_vq);
784 #endif
785 	ifp->if_oerrors++;
786 	ifp->if_flags &= ~IFF_RUNNING;
787 	vtnet_init_locked(sc);
788 }
789 #endif
790 
791 static void
792 vtnet_config_change_task(void *arg, int pending)
793 {
794 	struct vtnet_softc *sc;
795 
796 	sc = arg;
797 
798 	lwkt_serialize_enter(&sc->vtnet_slz);
799 	vtnet_update_link_status(sc);
800 	lwkt_serialize_exit(&sc->vtnet_slz);
801 }
802 
803 static int
804 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
805 {
806 	struct vtnet_softc *sc;
807 	struct ifreq *ifr;
808 	int reinit, mask, error;
809 
810 	sc = ifp->if_softc;
811 	ifr = (struct ifreq *) data;
812 	reinit = 0;
813 	error = 0;
814 
815 	switch (cmd) {
816 	case SIOCSIFMTU:
817 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
818 			error = EINVAL;
819 		else if (ifp->if_mtu != ifr->ifr_mtu) {
820 			lwkt_serialize_enter(&sc->vtnet_slz);
821 			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
822 			lwkt_serialize_exit(&sc->vtnet_slz);
823 		}
824 		break;
825 
826 	case SIOCSIFFLAGS:
827 		lwkt_serialize_enter(&sc->vtnet_slz);
828 		if ((ifp->if_flags & IFF_UP) == 0) {
829 			if (ifp->if_flags & IFF_RUNNING)
830 				vtnet_stop(sc);
831 		} else if (ifp->if_flags & IFF_RUNNING) {
832 			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
833 			    (IFF_PROMISC | IFF_ALLMULTI)) {
834 				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
835 					vtnet_rx_filter(sc);
836 				else
837 					error = ENOTSUP;
838 			}
839 		} else
840 			vtnet_init_locked(sc);
841 
842 		if (error == 0)
843 			sc->vtnet_if_flags = ifp->if_flags;
844 		lwkt_serialize_exit(&sc->vtnet_slz);
845 		break;
846 
847 	case SIOCADDMULTI:
848 	case SIOCDELMULTI:
849 		lwkt_serialize_enter(&sc->vtnet_slz);
850 		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
851 		    (ifp->if_flags & IFF_RUNNING))
852 			vtnet_rx_filter_mac(sc);
853 		lwkt_serialize_exit(&sc->vtnet_slz);
854 		break;
855 
856 	case SIOCSIFMEDIA:
857 	case SIOCGIFMEDIA:
858 		error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
859 		break;
860 
861 	case SIOCSIFCAP:
862 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
863 
864 		lwkt_serialize_enter(&sc->vtnet_slz);
865 
866 		if (mask & IFCAP_TXCSUM) {
867 			ifp->if_capenable ^= IFCAP_TXCSUM;
868 			if (ifp->if_capenable & IFCAP_TXCSUM)
869 				ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
870 			else
871 				ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
872 		}
873 
874 		if (mask & IFCAP_TSO4) {
875 			ifp->if_capenable ^= IFCAP_TSO4;
876 			if (ifp->if_capenable & IFCAP_TSO4)
877 				ifp->if_hwassist |= CSUM_TSO;
878 			else
879 				ifp->if_hwassist &= ~CSUM_TSO;
880 		}
881 
882 		if (mask & IFCAP_RXCSUM) {
883 			ifp->if_capenable ^= IFCAP_RXCSUM;
884 			reinit = 1;
885 		}
886 
887 		if (mask & IFCAP_LRO) {
888 			ifp->if_capenable ^= IFCAP_LRO;
889 			reinit = 1;
890 		}
891 
892 		if (mask & IFCAP_VLAN_HWFILTER) {
893 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
894 			reinit = 1;
895 		}
896 
897 		if (mask & IFCAP_VLAN_HWTSO)
898 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
899 
900 		if (mask & IFCAP_VLAN_HWTAGGING)
901 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
902 
903 		if (reinit && (ifp->if_flags & IFF_RUNNING)) {
904 			ifp->if_flags &= ~IFF_RUNNING;
905 			vtnet_init_locked(sc);
906 		}
907 		//VLAN_CAPABILITIES(ifp);
908 
909 		lwkt_serialize_exit(&sc->vtnet_slz);
910 		break;
911 
912 	default:
913 		error = ether_ioctl(ifp, cmd, data);
914 		break;
915 	}
916 
917 	return (error);
918 }
919 
920 static int
921 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
922 {
923 	struct ifnet *ifp;
924 	int new_frame_size, clsize;
925 
926 	ifp = sc->vtnet_ifp;
927 
928 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
929 		new_frame_size = sizeof(struct vtnet_rx_header) +
930 		    sizeof(struct ether_vlan_header) + new_mtu;
931 
932 		if (new_frame_size > MJUM9BYTES)
933 			return (EINVAL);
934 
935 		if (new_frame_size <= MCLBYTES)
936 			clsize = MCLBYTES;
937 		else
938 			clsize = MJUM9BYTES;
939 	} else {
940 		new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
941 		    sizeof(struct ether_vlan_header) + new_mtu;
942 
943 		if (new_frame_size <= MCLBYTES)
944 			clsize = MCLBYTES;
945 		else
946 			clsize = MJUMPAGESIZE;
947 	}
948 
949 	sc->vtnet_rx_mbuf_size = clsize;
950 	sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
951 	KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
952 	    ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
953 
954 	ifp->if_mtu = new_mtu;
955 
956 	if (ifp->if_flags & IFF_RUNNING) {
957 		ifp->if_flags &= ~IFF_RUNNING;
958 		vtnet_init_locked(sc);
959 	}
960 
961 	return (0);
962 }
963 
964 static int
965 vtnet_init_rx_vq(struct vtnet_softc *sc)
966 {
967 	struct virtqueue *vq;
968 	int nbufs, error;
969 
970 	vq = sc->vtnet_rx_vq;
971 	nbufs = 0;
972 	error = ENOSPC;
973 
974 	while (!virtqueue_full(vq)) {
975 		if ((error = vtnet_newbuf(sc)) != 0)
976 			break;
977 		nbufs++;
978 	}
979 
980 	if (nbufs > 0) {
981 		virtqueue_notify(vq, &sc->vtnet_slz);
982 
983 		/*
984 		 * EMSGSIZE signifies the virtqueue did not have enough
985 		 * entries available to hold the last mbuf. This is not
986 		 * an error. We should not get ENOSPC since we check if
987 		 * the virtqueue is full before attempting to add a
988 		 * buffer.
989 		 */
990 		if (error == EMSGSIZE)
991 			error = 0;
992 	}
993 
994 	return (error);
995 }
996 
997 static void
998 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
999 {
1000 	struct virtqueue *vq;
1001 	struct mbuf *m;
1002 	int last;
1003 
1004 	vq = sc->vtnet_rx_vq;
1005 	last = 0;
1006 
1007 	while ((m = virtqueue_drain(vq, &last)) != NULL)
1008 		m_freem(m);
1009 
1010 	KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1011 }
1012 
1013 static void
1014 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1015 {
1016 	struct virtqueue *vq;
1017 	struct vtnet_tx_header *txhdr;
1018 	int last;
1019 
1020 	vq = sc->vtnet_tx_vq;
1021 	last = 0;
1022 
1023 	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1024 		m_freem(txhdr->vth_mbuf);
1025 		vtnet_enqueue_txhdr(sc, txhdr);
1026 	}
1027 
1028 	KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1029 }
1030 
1031 static void
1032 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1033 {
1034 	/*
1035 	 * The control virtqueue is only polled, therefore
1036 	 * it should already be empty.
1037 	 */
1038 	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1039 		("Ctrl Vq not empty"));
1040 }
1041 
1042 static struct mbuf *
1043 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1044 {
1045 	struct mbuf *m_head, *m_tail, *m;
1046 	int i, clsize;
1047 
1048 	clsize = sc->vtnet_rx_mbuf_size;
1049 
1050 	/*use getcl instead of getjcl. see  if_mxge.c comment line 2398*/
1051 	//m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1052 	m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1053 	if (m_head == NULL)
1054 		goto fail;
1055 
1056 	m_head->m_len = clsize;
1057 	m_tail = m_head;
1058 
1059 	if (nbufs > 1) {
1060 		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1061 			("chained Rx mbuf requested without LRO_NOMRG"));
1062 
1063 		for (i = 0; i < nbufs - 1; i++) {
1064 			//m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1065 			m = m_getcl(M_NOWAIT, MT_DATA, 0);
1066 			if (m == NULL)
1067 				goto fail;
1068 
1069 			m->m_len = clsize;
1070 			m_tail->m_next = m;
1071 			m_tail = m;
1072 		}
1073 	}
1074 
1075 	if (m_tailp != NULL)
1076 		*m_tailp = m_tail;
1077 
1078 	return (m_head);
1079 
1080 fail:
1081 	sc->vtnet_stats.mbuf_alloc_failed++;
1082 	m_freem(m_head);
1083 
1084 	return (NULL);
1085 }
1086 
1087 static int
1088 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1089 {
1090 	struct mbuf *m, *m_prev;
1091 	struct mbuf *m_new, *m_tail;
1092 	int len, clsize, nreplace, error;
1093 
1094 	m = m0;
1095 	m_prev = NULL;
1096 	len = len0;
1097 
1098 	m_tail = NULL;
1099 	clsize = sc->vtnet_rx_mbuf_size;
1100 	nreplace = 0;
1101 
1102 	if (m->m_next != NULL)
1103 		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1104 		    ("chained Rx mbuf without LRO_NOMRG"));
1105 
1106 	/*
1107 	 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1108 	 * allocating an entire chain for each received frame. When
1109 	 * the received frame's length is less than that of the chain,
1110 	 * the unused mbufs are reassigned to the new chain.
1111 	 */
1112 	while (len > 0) {
1113 		/*
1114 		 * Something is seriously wrong if we received
1115 		 * a frame larger than the mbuf chain. Drop it.
1116 		 */
1117 		if (m == NULL) {
1118 			sc->vtnet_stats.rx_frame_too_large++;
1119 			return (EMSGSIZE);
1120 		}
1121 
1122 		KASSERT(m->m_len == clsize,
1123 		    ("mbuf length not expected cluster size: %d",
1124 		    m->m_len));
1125 
1126 		m->m_len = MIN(m->m_len, len);
1127 		len -= m->m_len;
1128 
1129 		m_prev = m;
1130 		m = m->m_next;
1131 		nreplace++;
1132 	}
1133 
1134 	KASSERT(m_prev != NULL, ("m_prev == NULL"));
1135 	KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1136 		("too many replacement mbufs: %d/%d", nreplace,
1137 		sc->vtnet_rx_mbuf_count));
1138 
1139 	m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1140 	if (m_new == NULL) {
1141 		m_prev->m_len = clsize;
1142 		return (ENOBUFS);
1143 	}
1144 
1145 	/*
1146 	 * Move unused mbufs, if any, from the original chain
1147 	 * onto the end of the new chain.
1148 	 */
1149 	if (m_prev->m_next != NULL) {
1150 		m_tail->m_next = m_prev->m_next;
1151 		m_prev->m_next = NULL;
1152 	}
1153 
1154 	error = vtnet_enqueue_rxbuf(sc, m_new);
1155 	if (error) {
1156 		/*
1157 		 * BAD! We could not enqueue the replacement mbuf chain. We
1158 		 * must restore the m0 chain to the original state if it was
1159 		 * modified so we can subsequently discard it.
1160 		 *
1161 		 * NOTE: The replacement is suppose to be an identical copy
1162 		 * to the one just dequeued so this is an unexpected error.
1163 		 */
1164 		sc->vtnet_stats.rx_enq_replacement_failed++;
1165 
1166 		if (m_tail->m_next != NULL) {
1167 			m_prev->m_next = m_tail->m_next;
1168 			m_tail->m_next = NULL;
1169 		}
1170 
1171 		m_prev->m_len = clsize;
1172 		m_freem(m_new);
1173 	}
1174 
1175 	return (error);
1176 }
1177 
1178 static int
1179 vtnet_newbuf(struct vtnet_softc *sc)
1180 {
1181 	struct mbuf *m;
1182 	int error;
1183 
1184 	m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1185 	if (m == NULL)
1186 		return (ENOBUFS);
1187 
1188 	error = vtnet_enqueue_rxbuf(sc, m);
1189 	if (error)
1190 		m_freem(m);
1191 
1192 	return (error);
1193 }
1194 
1195 static void
1196 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1197 {
1198 	struct virtqueue *vq;
1199 	struct mbuf *m;
1200 
1201 	vq = sc->vtnet_rx_vq;
1202 
1203 	while (--nbufs > 0) {
1204 		if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1205 			break;
1206 		vtnet_discard_rxbuf(sc, m);
1207 	}
1208 }
1209 
1210 static void
1211 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1212 {
1213 	int error;
1214 
1215 	/*
1216 	 * Requeue the discarded mbuf. This should always be
1217 	 * successful since it was just dequeued.
1218 	 */
1219 	error = vtnet_enqueue_rxbuf(sc, m);
1220 	KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1221 }
1222 
1223 static int
1224 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1225 {
1226 	struct sglist sg;
1227 	struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1228 	struct vtnet_rx_header *rxhdr;
1229 	struct virtio_net_hdr *hdr;
1230 	uint8_t *mdata;
1231 	int offset, error;
1232 
1233 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1234 	if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1235 		KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1236 
1237 	sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1238 
1239 	mdata = mtod(m, uint8_t *);
1240 	offset = 0;
1241 
1242 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1243 		rxhdr = (struct vtnet_rx_header *) mdata;
1244 		hdr = &rxhdr->vrh_hdr;
1245 		offset += sizeof(struct vtnet_rx_header);
1246 
1247 		error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1248 		KASSERT(error == 0, ("cannot add header to sglist"));
1249 	}
1250 
1251 	error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1252 	if (error)
1253 		return (error);
1254 
1255 	if (m->m_next != NULL) {
1256 		error = sglist_append_mbuf(&sg, m->m_next);
1257 		if (error)
1258 			return (error);
1259 	}
1260 
1261 	return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1262 }
1263 
1264 static void
1265 vtnet_vlan_tag_remove(struct mbuf *m)
1266 {
1267 	struct ether_vlan_header *evl;
1268 
1269 	evl = mtod(m, struct ether_vlan_header *);
1270 
1271 	m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1272 	m->m_flags |= M_VLANTAG;
1273 
1274 	/* Strip the 802.1Q header. */
1275 	bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1276 	    ETHER_HDR_LEN - ETHER_TYPE_LEN);
1277 	m_adj(m, ETHER_VLAN_ENCAP_LEN);
1278 }
1279 
1280 /*
1281  * Alternative method of doing receive checksum offloading. Rather
1282  * than parsing the received frame down to the IP header, use the
1283  * csum_offset to determine which CSUM_* flags are appropriate. We
1284  * can get by with doing this only because the checksum offsets are
1285  * unique for the things we care about.
1286  */
1287 static int
1288 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1289     struct virtio_net_hdr *hdr)
1290 {
1291 	struct ether_header *eh;
1292 	struct ether_vlan_header *evh;
1293 	struct udphdr *udp;
1294 	int csum_len;
1295 	uint16_t eth_type;
1296 
1297 	csum_len = hdr->csum_start + hdr->csum_offset;
1298 
1299 	if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1300 		return (1);
1301 	if (m->m_len < csum_len)
1302 		return (1);
1303 
1304 	eh = mtod(m, struct ether_header *);
1305 	eth_type = ntohs(eh->ether_type);
1306 	if (eth_type == ETHERTYPE_VLAN) {
1307 		evh = mtod(m, struct ether_vlan_header *);
1308 		eth_type = ntohs(evh->evl_proto);
1309 	}
1310 
1311 	if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1312 		sc->vtnet_stats.rx_csum_bad_ethtype++;
1313 		return (1);
1314 	}
1315 
1316 	/* Use the offset to determine the appropriate CSUM_* flags. */
1317 	switch (hdr->csum_offset) {
1318 	case offsetof(struct udphdr, uh_sum):
1319 		if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1320 			return (1);
1321 		udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1322 		if (udp->uh_sum == 0)
1323 			return (0);
1324 
1325 		/* FALLTHROUGH */
1326 
1327 	case offsetof(struct tcphdr, th_sum):
1328 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1329 		m->m_pkthdr.csum_data = 0xFFFF;
1330 		break;
1331 
1332 	default:
1333 		sc->vtnet_stats.rx_csum_bad_offset++;
1334 		return (1);
1335 	}
1336 
1337 	sc->vtnet_stats.rx_csum_offloaded++;
1338 
1339 	return (0);
1340 }
1341 
1342 static int
1343 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1344 {
1345 	struct ifnet *ifp;
1346 	struct virtqueue *vq;
1347 	struct mbuf *m, *m_tail;
1348 	int len;
1349 
1350 	ifp = sc->vtnet_ifp;
1351 	vq = sc->vtnet_rx_vq;
1352 	m_tail = m_head;
1353 
1354 	while (--nbufs > 0) {
1355 		m = virtqueue_dequeue(vq, &len);
1356 		if (m == NULL) {
1357 			ifp->if_ierrors++;
1358 			goto fail;
1359 		}
1360 
1361 		if (vtnet_newbuf(sc) != 0) {
1362 			ifp->if_iqdrops++;
1363 			vtnet_discard_rxbuf(sc, m);
1364 			if (nbufs > 1)
1365 				vtnet_discard_merged_rxbuf(sc, nbufs);
1366 			goto fail;
1367 		}
1368 
1369 		if (m->m_len < len)
1370 			len = m->m_len;
1371 
1372 		m->m_len = len;
1373 		m->m_flags &= ~M_PKTHDR;
1374 
1375 		m_head->m_pkthdr.len += len;
1376 		m_tail->m_next = m;
1377 		m_tail = m;
1378 	}
1379 
1380 	return (0);
1381 
1382 fail:
1383 	sc->vtnet_stats.rx_mergeable_failed++;
1384 	m_freem(m_head);
1385 
1386 	return (1);
1387 }
1388 
1389 static int
1390 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1391 {
1392 	struct virtio_net_hdr lhdr;
1393 	struct ifnet *ifp;
1394 	struct virtqueue *vq;
1395 	struct mbuf *m;
1396 	struct ether_header *eh;
1397 	struct virtio_net_hdr *hdr;
1398 	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1399 	int len, deq, nbufs, adjsz, rx_npkts;
1400 
1401 	ifp = sc->vtnet_ifp;
1402 	vq = sc->vtnet_rx_vq;
1403 	hdr = &lhdr;
1404 	deq = 0;
1405 	rx_npkts = 0;
1406 
1407 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1408 
1409 	while (--count >= 0) {
1410 		m = virtqueue_dequeue(vq, &len);
1411 		if (m == NULL)
1412 			break;
1413 		deq++;
1414 
1415 		if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1416 			ifp->if_ierrors++;
1417 			vtnet_discard_rxbuf(sc, m);
1418 			continue;
1419 		}
1420 
1421 		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1422 			nbufs = 1;
1423 			adjsz = sizeof(struct vtnet_rx_header);
1424 			/*
1425 			 * Account for our pad between the header and
1426 			 * the actual start of the frame.
1427 			 */
1428 			len += VTNET_RX_HEADER_PAD;
1429 		} else {
1430 			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1431 			nbufs = mhdr->num_buffers;
1432 			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1433 		}
1434 
1435 		if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1436 			ifp->if_iqdrops++;
1437 			vtnet_discard_rxbuf(sc, m);
1438 			if (nbufs > 1)
1439 				vtnet_discard_merged_rxbuf(sc, nbufs);
1440 			continue;
1441 		}
1442 
1443 		m->m_pkthdr.len = len;
1444 		m->m_pkthdr.rcvif = ifp;
1445 		m->m_pkthdr.csum_flags = 0;
1446 
1447 		if (nbufs > 1) {
1448 			if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1449 				continue;
1450 		}
1451 
1452 		ifp->if_ipackets++;
1453 
1454 		/*
1455 		 * Save copy of header before we strip it. For both mergeable
1456 		 * and non-mergeable, the VirtIO header is placed first in the
1457 		 * mbuf's data. We no longer need num_buffers, so always use a
1458 		 * virtio_net_hdr.
1459 		 */
1460 		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1461 		m_adj(m, adjsz);
1462 
1463 		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1464 			eh = mtod(m, struct ether_header *);
1465 			if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1466 				vtnet_vlan_tag_remove(m);
1467 
1468 				/*
1469 				 * With the 802.1Q header removed, update the
1470 				 * checksum starting location accordingly.
1471 				 */
1472 				if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1473 					hdr->csum_start -=
1474 					    ETHER_VLAN_ENCAP_LEN;
1475 			}
1476 		}
1477 
1478 		if (ifp->if_capenable & IFCAP_RXCSUM &&
1479 		    hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1480 			if (vtnet_rx_csum(sc, m, hdr) != 0)
1481 				sc->vtnet_stats.rx_csum_failed++;
1482 		}
1483 
1484 		lwkt_serialize_exit(&sc->vtnet_slz);
1485 		rx_npkts++;
1486 		ifp->if_input(ifp, m, NULL, -1);
1487 		lwkt_serialize_enter(&sc->vtnet_slz);
1488 
1489 		/*
1490 		 * The interface may have been stopped while we were
1491 		 * passing the packet up the network stack.
1492 		 */
1493 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1494 			break;
1495 	}
1496 
1497 	virtqueue_notify(vq, &sc->vtnet_slz);
1498 
1499 	if (rx_npktsp != NULL)
1500 		*rx_npktsp = rx_npkts;
1501 
1502 	return (count > 0 ? 0 : EAGAIN);
1503 }
1504 
1505 static void
1506 vtnet_rx_intr_task(void *arg)
1507 {
1508 	struct vtnet_softc *sc;
1509 	struct ifnet *ifp;
1510 	int more;
1511 
1512 	sc = arg;
1513 	ifp = sc->vtnet_ifp;
1514 
1515 next:
1516 //	lwkt_serialize_enter(&sc->vtnet_slz);
1517 
1518 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1519 		vtnet_enable_rx_intr(sc);
1520 //		lwkt_serialize_exit(&sc->vtnet_slz);
1521 		return;
1522 	}
1523 
1524 	more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1525 	if (!more && vtnet_enable_rx_intr(sc) != 0) {
1526 		vtnet_disable_rx_intr(sc);
1527 		more = 1;
1528 	}
1529 
1530 //	lwkt_serialize_exit(&sc->vtnet_slz);
1531 
1532 	if (more) {
1533 		sc->vtnet_stats.rx_task_rescheduled++;
1534 		goto next;
1535 	}
1536 }
1537 
1538 static int
1539 vtnet_rx_vq_intr(void *xsc)
1540 {
1541 	struct vtnet_softc *sc;
1542 
1543 	sc = xsc;
1544 
1545 	vtnet_disable_rx_intr(sc);
1546 	vtnet_rx_intr_task(sc);
1547 
1548 	return (1);
1549 }
1550 
1551 static void
1552 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr)
1553 {
1554 	bzero(txhdr, sizeof(*txhdr));
1555 	SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link);
1556 }
1557 
1558 static void
1559 vtnet_txeof(struct vtnet_softc *sc)
1560 {
1561 	struct virtqueue *vq;
1562 	struct ifnet *ifp;
1563 	struct vtnet_tx_header *txhdr;
1564 	int deq;
1565 
1566 	vq = sc->vtnet_tx_vq;
1567 	ifp = sc->vtnet_ifp;
1568 	deq = 0;
1569 
1570 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1571 
1572 	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1573 		deq++;
1574 		ifp->if_opackets++;
1575 		m_freem(txhdr->vth_mbuf);
1576 		vtnet_enqueue_txhdr(sc, txhdr);
1577 	}
1578 
1579 	if (deq > 0) {
1580 		ifq_clr_oactive(&ifp->if_snd);
1581 		if (virtqueue_empty(vq))
1582 			sc->vtnet_watchdog_timer = 0;
1583 	}
1584 }
1585 
1586 static struct mbuf *
1587 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1588     struct virtio_net_hdr *hdr)
1589 {
1590 	struct ifnet *ifp;
1591 	struct ether_header *eh;
1592 	struct ether_vlan_header *evh;
1593 	struct ip *ip;
1594 	struct ip6_hdr *ip6;
1595 	struct tcphdr *tcp;
1596 	int ip_offset;
1597 	uint16_t eth_type, csum_start;
1598 	uint8_t ip_proto, gso_type;
1599 
1600 	ifp = sc->vtnet_ifp;
1601 	M_ASSERTPKTHDR(m);
1602 
1603 	ip_offset = sizeof(struct ether_header);
1604 	if (m->m_len < ip_offset) {
1605 		if ((m = m_pullup(m, ip_offset)) == NULL)
1606 			return (NULL);
1607 	}
1608 
1609 	eh = mtod(m, struct ether_header *);
1610 	eth_type = ntohs(eh->ether_type);
1611 	if (eth_type == ETHERTYPE_VLAN) {
1612 		ip_offset = sizeof(struct ether_vlan_header);
1613 		if (m->m_len < ip_offset) {
1614 			if ((m = m_pullup(m, ip_offset)) == NULL)
1615 				return (NULL);
1616 		}
1617 		evh = mtod(m, struct ether_vlan_header *);
1618 		eth_type = ntohs(evh->evl_proto);
1619 	}
1620 
1621 	switch (eth_type) {
1622 	case ETHERTYPE_IP:
1623 		if (m->m_len < ip_offset + sizeof(struct ip)) {
1624 			m = m_pullup(m, ip_offset + sizeof(struct ip));
1625 			if (m == NULL)
1626 				return (NULL);
1627 		}
1628 
1629 		ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1630 		ip_proto = ip->ip_p;
1631 		csum_start = ip_offset + (ip->ip_hl << 2);
1632 		gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1633 		break;
1634 
1635 	case ETHERTYPE_IPV6:
1636 		if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1637 			m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1638 			if (m == NULL)
1639 				return (NULL);
1640 		}
1641 
1642 		ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1643 		/*
1644 		 * XXX Assume no extension headers are present. Presently,
1645 		 * this will always be true in the case of TSO, and FreeBSD
1646 		 * does not perform checksum offloading of IPv6 yet.
1647 		 */
1648 		ip_proto = ip6->ip6_nxt;
1649 		csum_start = ip_offset + sizeof(struct ip6_hdr);
1650 		gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1651 		break;
1652 
1653 	default:
1654 		return (m);
1655 	}
1656 
1657 	if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1658 		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1659 		hdr->csum_start = csum_start;
1660 		hdr->csum_offset = m->m_pkthdr.csum_data;
1661 
1662 		sc->vtnet_stats.tx_csum_offloaded++;
1663 	}
1664 
1665 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1666 		if (ip_proto != IPPROTO_TCP)
1667 			return (m);
1668 
1669 		if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1670 			m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1671 			if (m == NULL)
1672 				return (NULL);
1673 		}
1674 
1675 		tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1676 		hdr->gso_type = gso_type;
1677 		hdr->hdr_len = csum_start + (tcp->th_off << 2);
1678 		hdr->gso_size = m->m_pkthdr.tso_segsz;
1679 
1680 		if (tcp->th_flags & TH_CWR) {
1681 			/*
1682 			 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1683 			 * ECN support is only configurable globally with the
1684 			 * net.inet.tcp.ecn.enable sysctl knob.
1685 			 */
1686 			if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1687 				if_printf(ifp, "TSO with ECN not supported "
1688 				    "by host\n");
1689 				m_freem(m);
1690 				return (NULL);
1691 			}
1692 
1693 			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1694 		}
1695 
1696 		sc->vtnet_stats.tx_tso_offloaded++;
1697 	}
1698 
1699 	return (m);
1700 }
1701 
1702 static int
1703 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1704     struct vtnet_tx_header *txhdr)
1705 {
1706 	struct sglist sg;
1707 	struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1708 	struct virtqueue *vq;
1709 	struct mbuf *m;
1710 	int error;
1711 
1712 	vq = sc->vtnet_tx_vq;
1713 	m = *m_head;
1714 
1715 	sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
1716 	error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1717 	KASSERT(error == 0 && sg.sg_nseg == 1,
1718 	    ("%s: error %d adding header to sglist", __func__, error));
1719 
1720 	error = sglist_append_mbuf(&sg, m);
1721 	if (error) {
1722 		m = m_defrag(m, M_NOWAIT);
1723 		if (m == NULL)
1724 			goto fail;
1725 
1726 		*m_head = m;
1727 		sc->vtnet_stats.tx_defragged++;
1728 
1729 		error = sglist_append_mbuf(&sg, m);
1730 		if (error)
1731 			goto fail;
1732 	}
1733 
1734 	txhdr->vth_mbuf = m;
1735 	error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
1736 
1737 	return (error);
1738 
1739 fail:
1740 	sc->vtnet_stats.tx_defrag_failed++;
1741 	m_freem(*m_head);
1742 	*m_head = NULL;
1743 
1744 	return (ENOBUFS);
1745 }
1746 
1747 static struct mbuf *
1748 vtnet_vlan_tag_insert(struct mbuf *m)
1749 {
1750 	struct mbuf *n;
1751 	struct ether_vlan_header *evl;
1752 
1753 	if (M_WRITABLE(m) == 0) {
1754 		n = m_dup(m, M_NOWAIT);
1755 		m_freem(m);
1756 		if ((m = n) == NULL)
1757 			return (NULL);
1758 	}
1759 
1760 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1761 	if (m == NULL)
1762 		return (NULL);
1763 	if (m->m_len < sizeof(struct ether_vlan_header)) {
1764 		m = m_pullup(m, sizeof(struct ether_vlan_header));
1765 		if (m == NULL)
1766 			return (NULL);
1767 	}
1768 
1769 	/* Insert 802.1Q header into the existing Ethernet header. */
1770 	evl = mtod(m, struct ether_vlan_header *);
1771 	bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1772 	      (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1773 	evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1774 	evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1775 	m->m_flags &= ~M_VLANTAG;
1776 
1777 	return (m);
1778 }
1779 
1780 static int
1781 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1782 {
1783 	struct vtnet_tx_header *txhdr;
1784 	struct virtio_net_hdr *hdr;
1785 	struct mbuf *m;
1786 	int error;
1787 
1788 	txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free);
1789 	if (txhdr == NULL)
1790 		return (ENOBUFS);
1791 	SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link);
1792 
1793 	/*
1794 	 * Always use the non-mergeable header to simplify things. When
1795 	 * the mergeable feature is negotiated, the num_buffers field
1796 	 * must be set to zero. We use vtnet_hdr_size later to enqueue
1797 	 * the correct header size to the host.
1798 	 */
1799 	hdr = &txhdr->vth_uhdr.hdr;
1800 	m = *m_head;
1801 
1802 	error = ENOBUFS;
1803 
1804 	if (m->m_flags & M_VLANTAG) {
1805 		//m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1806 		m = vtnet_vlan_tag_insert(m);
1807 		if ((*m_head = m) == NULL)
1808 			goto fail;
1809 		m->m_flags &= ~M_VLANTAG;
1810 	}
1811 
1812 	if (m->m_pkthdr.csum_flags != 0) {
1813 		m = vtnet_tx_offload(sc, m, hdr);
1814 		if ((*m_head = m) == NULL)
1815 			goto fail;
1816 	}
1817 
1818 	error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1819 fail:
1820 	if (error != 0)
1821 		vtnet_enqueue_txhdr(sc, txhdr);
1822 	return (error);
1823 }
1824 
1825 static void
1826 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1827 {
1828 	struct vtnet_softc *sc;
1829 
1830 	sc = ifp->if_softc;
1831 
1832 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1833 	lwkt_serialize_enter(&sc->vtnet_slz);
1834 	vtnet_start_locked(ifp, ifsq);
1835 	lwkt_serialize_exit(&sc->vtnet_slz);
1836 }
1837 
1838 static void
1839 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1840 {
1841 	struct vtnet_softc *sc;
1842 	struct virtqueue *vq;
1843 	struct mbuf *m0;
1844 	int enq;
1845 
1846 	sc = ifp->if_softc;
1847 	vq = sc->vtnet_tx_vq;
1848 	enq = 0;
1849 
1850 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1851 
1852 	if ((ifp->if_flags & (IFF_RUNNING)) !=
1853 	    IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
1854 		return;
1855 
1856 #ifdef VTNET_TX_INTR_MODERATION
1857 	if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
1858 		vtnet_txeof(sc);
1859 #endif
1860 
1861 	while (!ifsq_is_empty(ifsq)) {
1862 		if (virtqueue_full(vq)) {
1863 			ifq_set_oactive(&ifp->if_snd);
1864 			break;
1865 		}
1866 
1867 		m0 = ifq_dequeue(&ifp->if_snd);
1868 		if (m0 == NULL)
1869 			break;
1870 
1871 		if (vtnet_encap(sc, &m0) != 0) {
1872 			if (m0 == NULL)
1873 				break;
1874 			ifq_prepend(&ifp->if_snd, m0);
1875 			ifq_set_oactive(&ifp->if_snd);
1876 			break;
1877 		}
1878 
1879 		enq++;
1880 		ETHER_BPF_MTAP(ifp, m0);
1881 	}
1882 
1883 	if (enq > 0) {
1884 		virtqueue_notify(vq, &sc->vtnet_slz);
1885 		sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
1886 	}
1887 }
1888 
1889 static void
1890 vtnet_tick(void *xsc)
1891 {
1892 	struct vtnet_softc *sc;
1893 
1894 	sc = xsc;
1895 
1896 #if 0
1897 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1898 #ifdef VTNET_DEBUG
1899 	virtqueue_dump(sc->vtnet_rx_vq);
1900 	virtqueue_dump(sc->vtnet_tx_vq);
1901 #endif
1902 
1903 	vtnet_watchdog(sc);
1904 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
1905 #endif
1906 }
1907 
1908 static void
1909 vtnet_tx_intr_task(void *arg)
1910 {
1911 	struct vtnet_softc *sc;
1912 	struct ifnet *ifp;
1913 	struct ifaltq_subque *ifsq;
1914 
1915 	sc = arg;
1916 	ifp = sc->vtnet_ifp;
1917 	ifsq = ifq_get_subq_default(&ifp->if_snd);
1918 
1919 next:
1920 //	lwkt_serialize_enter(&sc->vtnet_slz);
1921 
1922 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1923 		vtnet_enable_tx_intr(sc);
1924 //		lwkt_serialize_exit(&sc->vtnet_slz);
1925 		return;
1926 	}
1927 
1928 	vtnet_txeof(sc);
1929 
1930 	if (!ifsq_is_empty(ifsq))
1931 		vtnet_start_locked(ifp, ifsq);
1932 
1933 	if (vtnet_enable_tx_intr(sc) != 0) {
1934 		vtnet_disable_tx_intr(sc);
1935 		sc->vtnet_stats.tx_task_rescheduled++;
1936 //		lwkt_serialize_exit(&sc->vtnet_slz);
1937 		goto next;
1938 	}
1939 
1940 //	lwkt_serialize_exit(&sc->vtnet_slz);
1941 }
1942 
1943 static int
1944 vtnet_tx_vq_intr(void *xsc)
1945 {
1946 	struct vtnet_softc *sc;
1947 
1948 	sc = xsc;
1949 
1950 	vtnet_disable_tx_intr(sc);
1951 	vtnet_tx_intr_task(sc);
1952 
1953 	return (1);
1954 }
1955 
1956 static void
1957 vtnet_stop(struct vtnet_softc *sc)
1958 {
1959 	device_t dev;
1960 	struct ifnet *ifp;
1961 
1962 	dev = sc->vtnet_dev;
1963 	ifp = sc->vtnet_ifp;
1964 
1965 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1966 
1967 	sc->vtnet_watchdog_timer = 0;
1968 	callout_stop(&sc->vtnet_tick_ch);
1969 	ifq_clr_oactive(&ifp->if_snd);
1970 	ifp->if_flags &= ~(IFF_RUNNING);
1971 
1972 	vtnet_disable_rx_intr(sc);
1973 	vtnet_disable_tx_intr(sc);
1974 
1975 	/*
1976 	 * Stop the host VirtIO adapter. Note this will reset the host
1977 	 * adapter's state back to the pre-initialized state, so in
1978 	 * order to make the device usable again, we must drive it
1979 	 * through virtio_reinit() and virtio_reinit_complete().
1980 	 */
1981 	virtio_stop(dev);
1982 
1983 	sc->vtnet_flags &= ~VTNET_FLAG_LINK;
1984 
1985 	vtnet_free_rx_mbufs(sc);
1986 	vtnet_free_tx_mbufs(sc);
1987 }
1988 
1989 static int
1990 vtnet_virtio_reinit(struct vtnet_softc *sc)
1991 {
1992 	device_t dev;
1993 	struct ifnet *ifp;
1994 	uint64_t features;
1995 	int error;
1996 
1997 	dev = sc->vtnet_dev;
1998 	ifp = sc->vtnet_ifp;
1999 	features = sc->vtnet_features;
2000 
2001 	/*
2002 	 * Re-negotiate with the host, removing any disabled receive
2003 	 * features. Transmit features are disabled only on our side
2004 	 * via if_capenable and if_hwassist.
2005 	 */
2006 
2007 	if (ifp->if_capabilities & IFCAP_RXCSUM) {
2008 		if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2009 			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2010 	}
2011 
2012 	if (ifp->if_capabilities & IFCAP_LRO) {
2013 		if ((ifp->if_capenable & IFCAP_LRO) == 0)
2014 			features &= ~VTNET_LRO_FEATURES;
2015 	}
2016 
2017 	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2018 		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2019 			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2020 	}
2021 
2022 	error = virtio_reinit(dev, features);
2023 	if (error)
2024 		device_printf(dev, "virtio reinit error %d\n", error);
2025 
2026 	return (error);
2027 }
2028 
2029 static void
2030 vtnet_init_locked(struct vtnet_softc *sc)
2031 {
2032 	device_t dev;
2033 	struct ifnet *ifp;
2034 	int error;
2035 
2036 	dev = sc->vtnet_dev;
2037 	ifp = sc->vtnet_ifp;
2038 
2039 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2040 
2041 	if (ifp->if_flags & IFF_RUNNING)
2042 		return;
2043 
2044 	/* Stop host's adapter, cancel any pending I/O. */
2045 	vtnet_stop(sc);
2046 
2047 	/* Reinitialize the host device. */
2048 	error = vtnet_virtio_reinit(sc);
2049 	if (error) {
2050 		device_printf(dev,
2051 		    "reinitialization failed, stopping device...\n");
2052 		vtnet_stop(sc);
2053 		return;
2054 	}
2055 
2056 	/* Update host with assigned MAC address. */
2057 	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2058 	vtnet_set_hwaddr(sc);
2059 
2060 	ifp->if_hwassist = 0;
2061 	if (ifp->if_capenable & IFCAP_TXCSUM)
2062 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2063 	if (ifp->if_capenable & IFCAP_TSO4)
2064 		ifp->if_hwassist |= CSUM_TSO;
2065 
2066 	error = vtnet_init_rx_vq(sc);
2067 	if (error) {
2068 		device_printf(dev,
2069 		    "cannot allocate mbufs for Rx virtqueue\n");
2070 		vtnet_stop(sc);
2071 		return;
2072 	}
2073 
2074 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2075 		if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2076 			/* Restore promiscuous and all-multicast modes. */
2077 			vtnet_rx_filter(sc);
2078 
2079 			/* Restore filtered MAC addresses. */
2080 			vtnet_rx_filter_mac(sc);
2081 		}
2082 
2083 		/* Restore VLAN filters. */
2084 		if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2085 			vtnet_rx_filter_vlan(sc);
2086 	}
2087 
2088 	{
2089 		vtnet_enable_rx_intr(sc);
2090 		vtnet_enable_tx_intr(sc);
2091 	}
2092 
2093 	ifp->if_flags |= IFF_RUNNING;
2094 	ifq_clr_oactive(&ifp->if_snd);
2095 
2096 	virtio_reinit_complete(dev);
2097 
2098 	vtnet_update_link_status(sc);
2099 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2100 }
2101 
2102 static void
2103 vtnet_init(void *xsc)
2104 {
2105 	struct vtnet_softc *sc;
2106 
2107 	sc = xsc;
2108 
2109 	lwkt_serialize_enter(&sc->vtnet_slz);
2110 	vtnet_init_locked(sc);
2111 	lwkt_serialize_exit(&sc->vtnet_slz);
2112 }
2113 
2114 static void
2115 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2116     struct sglist *sg, int readable, int writable)
2117 {
2118 	struct virtqueue *vq;
2119 	void *c;
2120 
2121 	vq = sc->vtnet_ctrl_vq;
2122 
2123 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2124 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2125 	    ("no control virtqueue"));
2126 	KASSERT(virtqueue_empty(vq),
2127 	    ("control command already enqueued"));
2128 
2129 	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2130 		return;
2131 
2132 	virtqueue_notify(vq, &sc->vtnet_slz);
2133 
2134 	/*
2135 	 * Poll until the command is complete. Previously, we would
2136 	 * sleep until the control virtqueue interrupt handler woke
2137 	 * us up, but dropping the VTNET_MTX leads to serialization
2138 	 * difficulties.
2139 	 *
2140 	 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2141 	 * vectors. Two of those vectors are needed for the Rx and Tx
2142 	 * virtqueues. We do not support sharing both a Vq and config
2143 	 * changed notification on the same MSIX vector.
2144 	 */
2145 	c = virtqueue_poll(vq, NULL);
2146 	KASSERT(c == cookie, ("unexpected control command response"));
2147 }
2148 
2149 static int
2150 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2151 {
2152 	struct {
2153 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2154 		uint8_t pad1;
2155 		char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2156 		uint8_t pad2;
2157 		uint8_t ack;
2158 	} s;
2159 	struct sglist_seg segs[3];
2160 	struct sglist sg;
2161 	int error;
2162 
2163 	s.hdr.class = VIRTIO_NET_CTRL_MAC;
2164 	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2165 	s.ack = VIRTIO_NET_ERR;
2166 
2167 	/* Copy the mac address into physically contiguous memory */
2168 	memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2169 
2170 	sglist_init(&sg, 3, segs);
2171 	error = 0;
2172 	error |= sglist_append(&sg, &s.hdr,
2173 	    sizeof(struct virtio_net_ctrl_hdr));
2174 	error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2175 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2176 	KASSERT(error == 0 && sg.sg_nseg == 3,
2177 	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
2178 
2179 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2180 
2181 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2182 }
2183 
2184 static void
2185 vtnet_rx_filter(struct vtnet_softc *sc)
2186 {
2187 	device_t dev;
2188 	struct ifnet *ifp;
2189 
2190 	dev = sc->vtnet_dev;
2191 	ifp = sc->vtnet_ifp;
2192 
2193 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2194 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2195 	    ("CTRL_RX feature not negotiated"));
2196 
2197 	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2198 		device_printf(dev, "cannot %s promiscuous mode\n",
2199 		    (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2200 
2201 	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2202 		device_printf(dev, "cannot %s all-multicast mode\n",
2203 		    (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2204 }
2205 
2206 static int
2207 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2208 {
2209 	struct sglist_seg segs[3];
2210 	struct sglist sg;
2211 	struct {
2212 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2213 		uint8_t pad1;
2214 		uint8_t onoff;
2215 		uint8_t pad2;
2216 		uint8_t ack;
2217 	} s;
2218 	int error;
2219 
2220 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2221 	    ("%s: CTRL_RX feature not negotiated", __func__));
2222 
2223 	s.hdr.class = VIRTIO_NET_CTRL_RX;
2224 	s.hdr.cmd = cmd;
2225 	s.onoff = !!on;
2226 	s.ack = VIRTIO_NET_ERR;
2227 
2228 	sglist_init(&sg, 3, segs);
2229 	error = 0;
2230 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2231 	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2232 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2233 	KASSERT(error == 0 && sg.sg_nseg == 3,
2234 	    ("%s: error %d adding Rx message to sglist", __func__, error));
2235 
2236 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2237 
2238 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2239 }
2240 
2241 static int
2242 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2243 {
2244 
2245 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2246 }
2247 
2248 static int
2249 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2250 {
2251 
2252 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2253 }
2254 
2255 static void
2256 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2257 {
2258 	struct virtio_net_ctrl_hdr hdr __aligned(2);
2259 	struct vtnet_mac_filter *filter;
2260 	struct sglist_seg segs[4];
2261 	struct sglist sg;
2262 	struct ifnet *ifp;
2263 	struct ifaddr *ifa;
2264         struct ifaddr_container *ifac;
2265 	struct ifmultiaddr *ifma;
2266 	int ucnt, mcnt, promisc, allmulti, error;
2267 	uint8_t ack;
2268 
2269 	ifp = sc->vtnet_ifp;
2270 	ucnt = 0;
2271 	mcnt = 0;
2272 	promisc = 0;
2273 	allmulti = 0;
2274 
2275 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2276 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2277 	    ("%s: CTRL_RX feature not negotiated", __func__));
2278 
2279 	/* Use the MAC filtering table allocated in vtnet_attach. */
2280 	filter = sc->vtnet_macfilter;
2281 	memset(filter, 0, sizeof(struct vtnet_mac_filter));
2282 
2283 	/* Unicast MAC addresses: */
2284 	//if_addr_rlock(ifp);
2285 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2286 		ifa = ifac->ifa;
2287 		if (ifa->ifa_addr->sa_family != AF_LINK)
2288 			continue;
2289 		else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2290 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2291 			continue;
2292 		else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2293 			promisc = 1;
2294 			break;
2295 		}
2296 
2297 		bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2298 		    &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2299 		ucnt++;
2300 	}
2301 	//if_addr_runlock(ifp);
2302 
2303 	if (promisc != 0) {
2304 		filter->vmf_unicast.nentries = 0;
2305 		if_printf(ifp, "more than %d MAC addresses assigned, "
2306 		    "falling back to promiscuous mode\n",
2307 		    VTNET_MAX_MAC_ENTRIES);
2308 	} else
2309 		filter->vmf_unicast.nentries = ucnt;
2310 
2311 	/* Multicast MAC addresses: */
2312 	//if_maddr_rlock(ifp);
2313 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2314 		if (ifma->ifma_addr->sa_family != AF_LINK)
2315 			continue;
2316 		else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2317 			allmulti = 1;
2318 			break;
2319 		}
2320 
2321 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2322 		    &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2323 		mcnt++;
2324 	}
2325 	//if_maddr_runlock(ifp);
2326 
2327 	if (allmulti != 0) {
2328 		filter->vmf_multicast.nentries = 0;
2329 		if_printf(ifp, "more than %d multicast MAC addresses "
2330 		    "assigned, falling back to all-multicast mode\n",
2331 		    VTNET_MAX_MAC_ENTRIES);
2332 	} else
2333 		filter->vmf_multicast.nentries = mcnt;
2334 
2335 	if (promisc != 0 && allmulti != 0)
2336 		goto out;
2337 
2338 	hdr.class = VIRTIO_NET_CTRL_MAC;
2339 	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2340 	ack = VIRTIO_NET_ERR;
2341 
2342 	sglist_init(&sg, 4, segs);
2343 	error = 0;
2344 	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2345 	error |= sglist_append(&sg, &filter->vmf_unicast,
2346 	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2347 	error |= sglist_append(&sg, &filter->vmf_multicast,
2348 	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2349 	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2350 	KASSERT(error == 0 && sg.sg_nseg == 4,
2351 	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2352 
2353 	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2354 
2355 	if (ack != VIRTIO_NET_OK)
2356 		if_printf(ifp, "error setting host MAC filter table\n");
2357 
2358 out:
2359 	if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2360 		if_printf(ifp, "cannot enable promiscuous mode\n");
2361 	if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2362 		if_printf(ifp, "cannot enable all-multicast mode\n");
2363 }
2364 
2365 static int
2366 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2367 {
2368 	struct sglist_seg segs[3];
2369 	struct sglist sg;
2370 	struct {
2371 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2372 		uint8_t pad1;
2373 		uint16_t tag;
2374 		uint8_t pad2;
2375 		uint8_t ack;
2376 	} s;
2377 	int error;
2378 
2379 	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2380 	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2381 	s.tag = tag;
2382 	s.ack = VIRTIO_NET_ERR;
2383 
2384 	sglist_init(&sg, 3, segs);
2385 	error = 0;
2386 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2387 	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2388 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2389 	KASSERT(error == 0 && sg.sg_nseg == 3,
2390 	    ("%s: error %d adding VLAN message to sglist", __func__, error));
2391 
2392 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2393 
2394 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2395 }
2396 
2397 static void
2398 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2399 {
2400 	uint32_t w;
2401 	uint16_t tag;
2402 	int i, bit, nvlans;
2403 
2404 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2405 	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2406 	    ("%s: VLAN_FILTER feature not negotiated", __func__));
2407 
2408 	nvlans = sc->vtnet_nvlans;
2409 
2410 	/* Enable the filter for each configured VLAN. */
2411 	for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2412 		w = sc->vtnet_vlan_shadow[i];
2413 		while ((bit = ffs(w) - 1) != -1) {
2414 			w &= ~(1 << bit);
2415 			tag = sizeof(w) * CHAR_BIT * i + bit;
2416 			nvlans--;
2417 
2418 			if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2419 				device_printf(sc->vtnet_dev,
2420 				    "cannot enable VLAN %d filter\n", tag);
2421 			}
2422 		}
2423 	}
2424 
2425 	KASSERT(nvlans == 0, ("VLAN count incorrect"));
2426 }
2427 
2428 static void
2429 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2430 {
2431 	struct ifnet *ifp;
2432 	int idx, bit;
2433 
2434 	ifp = sc->vtnet_ifp;
2435 	idx = (tag >> 5) & 0x7F;
2436 	bit = tag & 0x1F;
2437 
2438 	if (tag == 0 || tag > 4095)
2439 		return;
2440 
2441 	lwkt_serialize_enter(&sc->vtnet_slz);
2442 
2443 	/* Update shadow VLAN table. */
2444 	if (add) {
2445 		sc->vtnet_nvlans++;
2446 		sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2447 	} else {
2448 		sc->vtnet_nvlans--;
2449 		sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2450 	}
2451 
2452 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2453 	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2454 		device_printf(sc->vtnet_dev,
2455 		    "cannot %s VLAN %d %s the host filter table\n",
2456 		    add ? "add" : "remove", tag, add ? "to" : "from");
2457 	}
2458 
2459 	lwkt_serialize_exit(&sc->vtnet_slz);
2460 }
2461 
2462 static void
2463 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2464 {
2465 
2466 	if (ifp->if_softc != arg)
2467 		return;
2468 
2469 	vtnet_update_vlan_filter(arg, 1, tag);
2470 }
2471 
2472 static void
2473 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2474 {
2475 
2476 	if (ifp->if_softc != arg)
2477 		return;
2478 
2479 	vtnet_update_vlan_filter(arg, 0, tag);
2480 }
2481 
2482 static int
2483 vtnet_ifmedia_upd(struct ifnet *ifp)
2484 {
2485 	struct vtnet_softc *sc;
2486 	struct ifmedia *ifm;
2487 
2488 	sc = ifp->if_softc;
2489 	ifm = &sc->vtnet_media;
2490 
2491 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2492 		return (EINVAL);
2493 
2494 	return (0);
2495 }
2496 
2497 static void
2498 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2499 {
2500 	struct vtnet_softc *sc;
2501 
2502 	sc = ifp->if_softc;
2503 
2504 	ifmr->ifm_status = IFM_AVALID;
2505 	ifmr->ifm_active = IFM_ETHER;
2506 
2507 	lwkt_serialize_enter(&sc->vtnet_slz);
2508 	if (vtnet_is_link_up(sc) != 0) {
2509 		ifmr->ifm_status |= IFM_ACTIVE;
2510 		ifmr->ifm_active |= VTNET_MEDIATYPE;
2511 	} else
2512 		ifmr->ifm_active |= IFM_NONE;
2513 	lwkt_serialize_exit(&sc->vtnet_slz);
2514 }
2515 
2516 static void
2517 vtnet_add_statistics(struct vtnet_softc *sc)
2518 {
2519 	device_t dev;
2520 	struct vtnet_statistics *stats;
2521 	struct sysctl_ctx_list *ctx;
2522 	struct sysctl_oid *tree;
2523 	struct sysctl_oid_list *child;
2524 
2525 	dev = sc->vtnet_dev;
2526 	stats = &sc->vtnet_stats;
2527 	ctx = device_get_sysctl_ctx(dev);
2528 	tree = device_get_sysctl_tree(dev);
2529 	child = SYSCTL_CHILDREN(tree);
2530 
2531 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2532 	    CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2533 	    "Mbuf cluster allocation failures");
2534 
2535 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2536 	    CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2537 	    "Received frame larger than the mbuf chain");
2538 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2539 	    CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2540 	    "Enqueuing the replacement receive mbuf failed");
2541 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2542 	    CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2543 	    "Mergeable buffers receive failures");
2544 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2545 	    CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2546 	    "Received checksum offloaded buffer with unsupported "
2547 	    "Ethernet type");
2548 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2549 	    CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2550 	    "Received checksum offloaded buffer with incorrect IP protocol");
2551 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2552 	    CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2553 	    "Received checksum offloaded buffer with incorrect offset");
2554 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2555 	    CTLFLAG_RD, &stats->rx_csum_failed, 0,
2556 	    "Received buffer checksum offload failed");
2557 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2558 	    CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2559 	    "Received buffer checksum offload succeeded");
2560 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2561 	    CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2562 	    "Times the receive interrupt task rescheduled itself");
2563 
2564 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2565 	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2566 	    "Aborted transmit of checksum offloaded buffer with unknown "
2567 	    "Ethernet type");
2568 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2569 	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2570 	    "Aborted transmit of TSO buffer with unknown Ethernet type");
2571 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2572 	    CTLFLAG_RD, &stats->tx_defragged, 0,
2573 	    "Transmit mbufs defragged");
2574 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2575 	    CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2576 	    "Aborted transmit of buffer because defrag failed");
2577 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2578 	    CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2579 	    "Offloaded checksum of transmitted buffer");
2580 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2581 	    CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2582 	    "Segmentation offload of transmitted buffer");
2583 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2584 	    CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2585 	    "Times the transmit interrupt task rescheduled itself");
2586 }
2587 
2588 static int
2589 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2590 {
2591 
2592 	return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2593 }
2594 
2595 static void
2596 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2597 {
2598 
2599 	virtqueue_disable_intr(sc->vtnet_rx_vq);
2600 }
2601 
2602 static int
2603 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2604 {
2605 
2606 #ifdef VTNET_TX_INTR_MODERATION
2607 	return (0);
2608 #else
2609 	return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2610 #endif
2611 }
2612 
2613 static void
2614 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2615 {
2616 
2617 	virtqueue_disable_intr(sc->vtnet_tx_vq);
2618 }
2619