xref: /dragonfly/sys/dev/virtual/virtio/net/if_vtnet.c (revision 1fe7e945)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO network devices. */
28 
29 #include <sys/cdefs.h>
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46 
47 #include <machine/limits.h>
48 
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
58 
59 #include <net/bpf.h>
60 
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
67 
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include <dev/virtual/virtio/net/virtio_net.h>
71 #include <dev/virtual/virtio/net/if_vtnetvar.h>
72 
73 #include "virtio_if.h"
74 
75 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
76 
77 static int	vtnet_probe(device_t);
78 static int	vtnet_attach(device_t);
79 static int	vtnet_detach(device_t);
80 static int	vtnet_suspend(device_t);
81 static int	vtnet_resume(device_t);
82 static int	vtnet_shutdown(device_t);
83 static int	vtnet_config_change(device_t);
84 
85 static void	vtnet_negotiate_features(struct vtnet_softc *);
86 static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
87 static void	vtnet_get_hwaddr(struct vtnet_softc *);
88 static void	vtnet_set_hwaddr(struct vtnet_softc *);
89 static int	vtnet_is_link_up(struct vtnet_softc *);
90 static void	vtnet_update_link_status(struct vtnet_softc *);
91 #if 0
92 static void	vtnet_watchdog(struct vtnet_softc *);
93 #endif
94 static void	vtnet_config_change_task(void *, int);
95 static int	vtnet_setup_interface(struct vtnet_softc *);
96 static int	vtnet_change_mtu(struct vtnet_softc *, int);
97 static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98 
99 static int	vtnet_init_rx_vq(struct vtnet_softc *);
100 static void	vtnet_free_rx_mbufs(struct vtnet_softc *);
101 static void	vtnet_free_tx_mbufs(struct vtnet_softc *);
102 static void	vtnet_free_ctrl_vq(struct vtnet_softc *);
103 
104 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
105 		    struct mbuf **);
106 static int	vtnet_replace_rxbuf(struct vtnet_softc *,
107 		    struct mbuf *, int);
108 static int	vtnet_newbuf(struct vtnet_softc *);
109 static void	vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
110 static void	vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
111 static int	vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
112 static void	vtnet_vlan_tag_remove(struct mbuf *);
113 static int	vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
114 		    struct virtio_net_hdr *);
115 static int	vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
116 static int	vtnet_rxeof(struct vtnet_softc *, int, int *);
117 static void	vtnet_rx_intr_task(void *);
118 static int	vtnet_rx_vq_intr(void *);
119 
120 static void	vtnet_enqueue_txhdr(struct vtnet_softc *,
121 		    struct vtnet_tx_header *);
122 static void	vtnet_txeof(struct vtnet_softc *);
123 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
124 		    struct virtio_net_hdr *);
125 static int	vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
126 		    struct vtnet_tx_header *);
127 static int	vtnet_encap(struct vtnet_softc *, struct mbuf **);
128 static void	vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
129 static void	vtnet_start(struct ifnet *, struct ifaltq_subque *);
130 static void	vtnet_tick(void *);
131 static void	vtnet_tx_intr_task(void *);
132 static int	vtnet_tx_vq_intr(void *);
133 
134 static void	vtnet_stop(struct vtnet_softc *);
135 static int	vtnet_virtio_reinit(struct vtnet_softc *);
136 static void	vtnet_init_locked(struct vtnet_softc *);
137 static void	vtnet_init(void *);
138 
139 static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
140 		    struct sglist *, int, int);
141 
142 static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
143 static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
144 static int	vtnet_set_promisc(struct vtnet_softc *, int);
145 static int	vtnet_set_allmulti(struct vtnet_softc *, int);
146 static void	vtnet_rx_filter(struct vtnet_softc *sc);
147 static void	vtnet_rx_filter_mac(struct vtnet_softc *);
148 
149 static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
150 static void	vtnet_rx_filter_vlan(struct vtnet_softc *);
151 static void	vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
152 static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
153 static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
154 
155 static int	vtnet_ifmedia_upd(struct ifnet *);
156 static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 static void	vtnet_add_statistics(struct vtnet_softc *);
159 
160 static int	vtnet_enable_rx_intr(struct vtnet_softc *);
161 static int	vtnet_enable_tx_intr(struct vtnet_softc *);
162 static void	vtnet_disable_rx_intr(struct vtnet_softc *);
163 static void	vtnet_disable_tx_intr(struct vtnet_softc *);
164 
165 /* Tunables. */
166 static int vtnet_csum_disable = 0;
167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
168 static int vtnet_tso_disable = 1;
169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
170 static int vtnet_lro_disable = 0;
171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
172 
173 /*
174  * Reducing the number of transmit completed interrupts can
175  * improve performance. To do so, the define below keeps the
176  * Tx vq interrupt disabled and adds calls to vtnet_txeof()
177  * in the start and watchdog paths. The price to pay for this
178  * is the m_free'ing of transmitted mbufs may be delayed until
179  * the watchdog fires.
180  */
181 #define VTNET_TX_INTR_MODERATION
182 
183 static struct virtio_feature_desc vtnet_feature_desc[] = {
184 	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
185 	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
186 	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload"	},
187 	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
188 	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
189 	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
190 	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
191 	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
192 	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
193 	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
194 	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
195 	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
196 	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
197 	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
198 	{ VIRTIO_NET_F_STATUS,		"Status"	},
199 	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
200 	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
201 	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
202 	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
203 	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
204 	{ VIRTIO_NET_F_MQ,		"RFS"		},
205 	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
206 	{ 0, NULL }
207 };
208 
209 static device_method_t vtnet_methods[] = {
210 	/* Device methods. */
211 	DEVMETHOD(device_probe,		vtnet_probe),
212 	DEVMETHOD(device_attach,	vtnet_attach),
213 	DEVMETHOD(device_detach,	vtnet_detach),
214 	DEVMETHOD(device_suspend,	vtnet_suspend),
215 	DEVMETHOD(device_resume,	vtnet_resume),
216 	DEVMETHOD(device_shutdown,	vtnet_shutdown),
217 
218 	/* VirtIO methods. */
219 	DEVMETHOD(virtio_config_change, vtnet_config_change),
220 
221 	DEVMETHOD_END
222 };
223 
224 static driver_t vtnet_driver = {
225 	"vtnet",
226 	vtnet_methods,
227 	sizeof(struct vtnet_softc)
228 };
229 
230 static devclass_t vtnet_devclass;
231 
232 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL);
233 MODULE_VERSION(vtnet, 1);
234 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
235 
236 static int
237 vtnet_probe(device_t dev)
238 {
239 	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
240 		return (ENXIO);
241 
242 	device_set_desc(dev, "VirtIO Networking Adapter");
243 
244 	return (BUS_PROBE_DEFAULT);
245 }
246 
247 static int
248 vtnet_attach(device_t dev)
249 {
250 	struct vtnet_softc *sc;
251 	int error;
252 
253 	sc = device_get_softc(dev);
254 	sc->vtnet_dev = dev;
255 
256 	lwkt_serialize_init(&sc->vtnet_slz);
257 	callout_init(&sc->vtnet_tick_ch);
258 
259 	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
260 		     vtnet_ifmedia_sts);
261 	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
262 	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
263 
264 	vtnet_add_statistics(sc);
265 	SLIST_INIT(&sc->vtnet_txhdr_free);
266 
267 	/* Register our feature descriptions. */
268 	virtio_set_feature_desc(dev, vtnet_feature_desc);
269 	vtnet_negotiate_features(sc);
270 
271 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
272 		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
273 
274 	if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
275 		/* This feature should always be negotiated. */
276 		sc->vtnet_flags |= VTNET_FLAG_MAC;
277 	}
278 
279 	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
280 		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
281 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
282 	} else {
283 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
284 	}
285 
286 	sc->vtnet_rx_mbuf_size = MCLBYTES;
287 	sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
288 
289 	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
290 		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
291 
292 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
293 			sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
294 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
295 			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
296 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
297 		    virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
298 			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
299 	}
300 
301 	/* Read (or generate) the MAC address for the adapter. */
302 	vtnet_get_hwaddr(sc);
303 
304 	error = vtnet_alloc_virtqueues(sc);
305 	if (error) {
306 		device_printf(dev, "cannot allocate virtqueues\n");
307 		goto fail;
308 	}
309 
310 	error = vtnet_setup_interface(sc);
311 	if (error) {
312 		device_printf(dev, "cannot setup interface\n");
313 		goto fail;
314 	}
315 
316 	TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
317 
318 	error = virtio_setup_intr(dev, &sc->vtnet_slz);
319 	if (error) {
320 		device_printf(dev, "cannot setup virtqueue interrupts\n");
321 		ether_ifdetach(sc->vtnet_ifp);
322 		goto fail;
323 	}
324 
325 	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
326 		lwkt_serialize_enter(&sc->vtnet_slz);
327 		vtnet_set_hwaddr(sc);
328 		lwkt_serialize_exit(&sc->vtnet_slz);
329 	}
330 
331 	/*
332 	 * Device defaults to promiscuous mode for backwards
333 	 * compatibility. Turn it off if possible.
334 	 */
335 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
336 		lwkt_serialize_enter(&sc->vtnet_slz);
337 		if (vtnet_set_promisc(sc, 0) != 0) {
338 			sc->vtnet_ifp->if_flags |= IFF_PROMISC;
339 			device_printf(dev,
340 			    "cannot disable promiscuous mode\n");
341 		}
342 		lwkt_serialize_exit(&sc->vtnet_slz);
343 	} else
344 		sc->vtnet_ifp->if_flags |= IFF_PROMISC;
345 
346 fail:
347 	if (error)
348 		vtnet_detach(dev);
349 
350 	return (error);
351 }
352 
353 static int
354 vtnet_detach(device_t dev)
355 {
356 	struct vtnet_softc *sc;
357 	struct ifnet *ifp;
358 
359 	sc = device_get_softc(dev);
360 	ifp = sc->vtnet_ifp;
361 
362 	if (device_is_attached(dev)) {
363 		lwkt_serialize_enter(&sc->vtnet_slz);
364 		vtnet_stop(sc);
365 		lwkt_serialize_exit(&sc->vtnet_slz);
366 
367 		callout_stop(&sc->vtnet_tick_ch);
368 		taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
369 
370 		ether_ifdetach(ifp);
371 	}
372 
373 	if (sc->vtnet_vlan_attach != NULL) {
374 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
375 		sc->vtnet_vlan_attach = NULL;
376 	}
377 	if (sc->vtnet_vlan_detach != NULL) {
378 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
379 		sc->vtnet_vlan_detach = NULL;
380 	}
381 
382 	if (ifp) {
383 		if_free(ifp);
384 		sc->vtnet_ifp = NULL;
385 	}
386 
387 	if (sc->vtnet_rx_vq != NULL)
388 		vtnet_free_rx_mbufs(sc);
389 	if (sc->vtnet_tx_vq != NULL)
390 		vtnet_free_tx_mbufs(sc);
391 	if (sc->vtnet_ctrl_vq != NULL)
392 		vtnet_free_ctrl_vq(sc);
393 
394 	if (sc->vtnet_txhdrarea != NULL) {
395 		contigfree(sc->vtnet_txhdrarea,
396 		    sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
397 		    M_VTNET);
398 		sc->vtnet_txhdrarea = NULL;
399 	}
400 	SLIST_INIT(&sc->vtnet_txhdr_free);
401 	if (sc->vtnet_macfilter != NULL) {
402 		contigfree(sc->vtnet_macfilter,
403 		    sizeof(struct vtnet_mac_filter), M_DEVBUF);
404 		sc->vtnet_macfilter = NULL;
405 	}
406 
407 	ifmedia_removeall(&sc->vtnet_media);
408 
409 	return (0);
410 }
411 
412 static int
413 vtnet_suspend(device_t dev)
414 {
415 	struct vtnet_softc *sc;
416 
417 	sc = device_get_softc(dev);
418 
419 	lwkt_serialize_enter(&sc->vtnet_slz);
420 	vtnet_stop(sc);
421 	sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
422 	lwkt_serialize_exit(&sc->vtnet_slz);
423 
424 	return (0);
425 }
426 
427 static int
428 vtnet_resume(device_t dev)
429 {
430 	struct vtnet_softc *sc;
431 	struct ifnet *ifp;
432 
433 	sc = device_get_softc(dev);
434 	ifp = sc->vtnet_ifp;
435 
436 	lwkt_serialize_enter(&sc->vtnet_slz);
437 	if (ifp->if_flags & IFF_UP)
438 		vtnet_init_locked(sc);
439 	sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
440 	lwkt_serialize_exit(&sc->vtnet_slz);
441 
442 	return (0);
443 }
444 
445 static int
446 vtnet_shutdown(device_t dev)
447 {
448 
449 	/*
450 	 * Suspend already does all of what we need to
451 	 * do here; we just never expect to be resumed.
452 	 */
453 	return (vtnet_suspend(dev));
454 }
455 
456 static int
457 vtnet_config_change(device_t dev)
458 {
459 	struct vtnet_softc *sc;
460 
461 	sc = device_get_softc(dev);
462 
463 	taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
464 
465 	return (1);
466 }
467 
468 static void
469 vtnet_negotiate_features(struct vtnet_softc *sc)
470 {
471 	device_t dev;
472 	uint64_t mask, features;
473 
474 	dev = sc->vtnet_dev;
475 	mask = 0;
476 
477 	if (vtnet_csum_disable)
478 		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
479 
480 	/*
481 	 * TSO and LRO are only available when their corresponding checksum
482 	 * offload feature is also negotiated.
483 	 */
484 
485 	if (vtnet_csum_disable || vtnet_tso_disable)
486 		mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
487 		    VIRTIO_NET_F_HOST_ECN;
488 
489 	if (vtnet_csum_disable || vtnet_lro_disable)
490 		mask |= VTNET_LRO_FEATURES;
491 
492 	features = VTNET_FEATURES & ~mask;
493 	features |= VIRTIO_F_NOTIFY_ON_EMPTY;
494 	features |= VIRTIO_F_ANY_LAYOUT;
495 	sc->vtnet_features = virtio_negotiate_features(dev, features);
496 
497 	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
498 	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
499 		/*
500 		 * LRO without mergeable buffers requires special care. This
501 		 * is not ideal because every receive buffer must be large
502 		 * enough to hold the maximum TCP packet, the Ethernet header,
503 		 * and the header. This requires up to 34 descriptors with
504 		 * MCLBYTES clusters. If we do not have indirect descriptors,
505 		 * LRO is disabled since the virtqueue will not contain very
506 		 * many receive buffers.
507 		 */
508 		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
509 			device_printf(dev,
510 			    "LRO disabled due to both mergeable buffers and "
511 			    "indirect descriptors not negotiated\n");
512 
513 			features &= ~VTNET_LRO_FEATURES;
514 			sc->vtnet_features =
515 			    virtio_negotiate_features(dev, features);
516 		} else
517 			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
518 	}
519 }
520 
521 static int
522 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
523 {
524 	device_t dev;
525 	struct vq_alloc_info vq_info[3];
526 	int nvqs;
527 
528 	dev = sc->vtnet_dev;
529 	nvqs = 2;
530 
531 	/*
532 	 * Indirect descriptors are not needed for the Rx
533 	 * virtqueue when mergeable buffers are negotiated.
534 	 * The header is placed inline with the data, not
535 	 * in a separate descriptor, and mbuf clusters are
536 	 * always physically contiguous.
537 	 */
538 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
539 		sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
540 		    VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
541 	} else
542 		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
543 
544 	if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
545             virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
546 		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
547 	else
548 		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
549 
550 	VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs,
551 	    vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
552 	    "%s receive", device_get_nameunit(dev));
553 
554 	VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs,
555 	    vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
556 	    "%s transmit", device_get_nameunit(dev));
557 
558 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
559 		nvqs++;
560 
561 		VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
562 		    &sc->vtnet_ctrl_vq, "%s control",
563 		    device_get_nameunit(dev));
564 	}
565 
566 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
567 }
568 
569 static int
570 vtnet_setup_interface(struct vtnet_softc *sc)
571 {
572 	device_t dev;
573 	struct ifnet *ifp;
574 	int i;
575 
576 	dev = sc->vtnet_dev;
577 
578 	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
579 	if (ifp == NULL) {
580 		device_printf(dev, "cannot allocate ifnet structure\n");
581 		return (ENOSPC);
582 	}
583 
584 	ifp->if_softc = sc;
585 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
586 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
587 	ifp->if_init = vtnet_init;
588 	ifp->if_start = vtnet_start;
589 	ifp->if_ioctl = vtnet_ioctl;
590 
591 	sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq);
592 	sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq);
593 	if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
594 		sc->vtnet_txhdrcount = sc->vtnet_tx_size;
595 	else
596 		sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
597 	sc->vtnet_txhdrarea = contigmalloc(
598 	    sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
599 	    M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
600 	if (sc->vtnet_txhdrarea == NULL) {
601 		device_printf(dev, "cannot contigmalloc the tx headers\n");
602 		return (ENOMEM);
603 	}
604 	for (i = 0; i < sc->vtnet_txhdrcount; i++)
605 		vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]);
606 	sc->vtnet_macfilter = contigmalloc(
607 	    sizeof(struct vtnet_mac_filter),
608 	    M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
609 	if (sc->vtnet_macfilter == NULL) {
610 		device_printf(dev,
611 		    "cannot contigmalloc the mac filter table\n");
612 		return (ENOMEM);
613 	}
614 	ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1);
615 	ifq_set_ready(&ifp->if_snd);
616 
617 	ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
618 
619 	/* Tell the upper layer(s) we support long frames. */
620 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
621 	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
622 
623 	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
624 		ifp->if_capabilities |= IFCAP_TXCSUM;
625 
626 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
627 			ifp->if_capabilities |= IFCAP_TSO4;
628 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
629 			ifp->if_capabilities |= IFCAP_TSO6;
630 		if (ifp->if_capabilities & IFCAP_TSO)
631 			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
632 
633 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
634 			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
635 	}
636 
637 	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
638 		ifp->if_capabilities |= IFCAP_RXCSUM;
639 
640 		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
641 		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
642 			ifp->if_capabilities |= IFCAP_LRO;
643 	}
644 
645 	if (ifp->if_capabilities & IFCAP_HWCSUM) {
646 		/*
647 		 * VirtIO does not support VLAN tagging, but we can fake
648 		 * it by inserting and removing the 802.1Q header during
649 		 * transmit and receive. We are then able to do checksum
650 		 * offloading of VLAN frames.
651 		 */
652 		ifp->if_capabilities |=
653 			IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
654 	}
655 
656 	ifp->if_capenable = ifp->if_capabilities;
657 
658 	/*
659 	 * Capabilities after here are not enabled by default.
660 	 */
661 
662 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
663 		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
664 
665 		sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
666 		    vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
667 		sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
668 		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
669 	}
670 
671 	return (0);
672 }
673 
674 static void
675 vtnet_set_hwaddr(struct vtnet_softc *sc)
676 {
677 	device_t dev;
678 
679 	dev = sc->vtnet_dev;
680 
681 	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
682 	    (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
683 		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
684 			device_printf(dev, "unable to set MAC address\n");
685 	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
686 		virtio_write_device_config(dev,
687 		    offsetof(struct virtio_net_config, mac),
688 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
689 	}
690 }
691 
692 static void
693 vtnet_get_hwaddr(struct vtnet_softc *sc)
694 {
695 	device_t dev;
696 
697 	dev = sc->vtnet_dev;
698 
699 	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
700 		/*
701 		 * Generate a random locally administered unicast address.
702 		 *
703 		 * It would be nice to generate the same MAC address across
704 		 * reboots, but it seems all the hosts currently available
705 		 * support the MAC feature, so this isn't too important.
706 		 */
707 		sc->vtnet_hwaddr[0] = 0xB2;
708 		karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
709 		return;
710 	}
711 
712 	virtio_read_device_config(dev,
713 	    offsetof(struct virtio_net_config, mac),
714 	    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
715 }
716 
717 static int
718 vtnet_is_link_up(struct vtnet_softc *sc)
719 {
720 	device_t dev;
721 	struct ifnet *ifp;
722 	uint16_t status;
723 
724 	dev = sc->vtnet_dev;
725 	ifp = sc->vtnet_ifp;
726 
727 	ASSERT_SERIALIZED(&sc->vtnet_slz);
728 
729 	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) {
730 		status = virtio_read_dev_config_2(dev,
731 				offsetof(struct virtio_net_config, status));
732 	} else {
733 		status = VIRTIO_NET_S_LINK_UP;
734 	}
735 
736 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
737 }
738 
739 static void
740 vtnet_update_link_status(struct vtnet_softc *sc)
741 {
742 	device_t dev;
743 	struct ifnet *ifp;
744 	struct ifaltq_subque *ifsq;
745 	int link;
746 
747 	dev = sc->vtnet_dev;
748 	ifp = sc->vtnet_ifp;
749 	ifsq = ifq_get_subq_default(&ifp->if_snd);
750 
751 	link = vtnet_is_link_up(sc);
752 
753 	if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
754 		sc->vtnet_flags |= VTNET_FLAG_LINK;
755 		if (bootverbose)
756 			device_printf(dev, "Link is up\n");
757 		ifp->if_link_state = LINK_STATE_UP;
758 		if_link_state_change(ifp);
759 		if (!ifsq_is_empty(ifsq))
760 			vtnet_start_locked(ifp, ifsq);
761 	} else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
762 		sc->vtnet_flags &= ~VTNET_FLAG_LINK;
763 		if (bootverbose)
764 			device_printf(dev, "Link is down\n");
765 
766 		ifp->if_link_state = LINK_STATE_DOWN;
767 		if_link_state_change(ifp);
768 	}
769 }
770 
771 #if 0
772 static void
773 vtnet_watchdog(struct vtnet_softc *sc)
774 {
775 	struct ifnet *ifp;
776 
777 	ifp = sc->vtnet_ifp;
778 
779 #ifdef VTNET_TX_INTR_MODERATION
780 	vtnet_txeof(sc);
781 #endif
782 
783 	if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
784 		return;
785 
786 	if_printf(ifp, "watchdog timeout -- resetting\n");
787 #ifdef VTNET_DEBUG
788 	virtqueue_dump(sc->vtnet_tx_vq);
789 #endif
790 	ifp->if_oerrors++;
791 	ifp->if_flags &= ~IFF_RUNNING;
792 	vtnet_init_locked(sc);
793 }
794 #endif
795 
796 static void
797 vtnet_config_change_task(void *arg, int pending)
798 {
799 	struct vtnet_softc *sc;
800 
801 	sc = arg;
802 
803 	lwkt_serialize_enter(&sc->vtnet_slz);
804 	vtnet_update_link_status(sc);
805 	lwkt_serialize_exit(&sc->vtnet_slz);
806 }
807 
808 static int
809 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
810 {
811 	struct vtnet_softc *sc;
812 	struct ifreq *ifr;
813 	int reinit, mask, error;
814 
815 	sc = ifp->if_softc;
816 	ifr = (struct ifreq *) data;
817 	reinit = 0;
818 	error = 0;
819 
820 	switch (cmd) {
821 	case SIOCSIFMTU:
822 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
823 			error = EINVAL;
824 		else if (ifp->if_mtu != ifr->ifr_mtu) {
825 			lwkt_serialize_enter(&sc->vtnet_slz);
826 			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
827 			lwkt_serialize_exit(&sc->vtnet_slz);
828 		}
829 		break;
830 
831 	case SIOCSIFFLAGS:
832 		lwkt_serialize_enter(&sc->vtnet_slz);
833 		if ((ifp->if_flags & IFF_UP) == 0) {
834 			if (ifp->if_flags & IFF_RUNNING)
835 				vtnet_stop(sc);
836 		} else if (ifp->if_flags & IFF_RUNNING) {
837 			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
838 			    (IFF_PROMISC | IFF_ALLMULTI)) {
839 				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
840 					vtnet_rx_filter(sc);
841 				else
842 					error = ENOTSUP;
843 			}
844 		} else
845 			vtnet_init_locked(sc);
846 
847 		if (error == 0)
848 			sc->vtnet_if_flags = ifp->if_flags;
849 		lwkt_serialize_exit(&sc->vtnet_slz);
850 		break;
851 
852 	case SIOCADDMULTI:
853 	case SIOCDELMULTI:
854 		lwkt_serialize_enter(&sc->vtnet_slz);
855 		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
856 		    (ifp->if_flags & IFF_RUNNING))
857 			vtnet_rx_filter_mac(sc);
858 		lwkt_serialize_exit(&sc->vtnet_slz);
859 		break;
860 
861 	case SIOCSIFMEDIA:
862 	case SIOCGIFMEDIA:
863 		error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
864 		break;
865 
866 	case SIOCSIFCAP:
867 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
868 
869 		lwkt_serialize_enter(&sc->vtnet_slz);
870 
871 		if (mask & IFCAP_TXCSUM) {
872 			ifp->if_capenable ^= IFCAP_TXCSUM;
873 			if (ifp->if_capenable & IFCAP_TXCSUM)
874 				ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
875 			else
876 				ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
877 		}
878 
879 		if (mask & IFCAP_TSO4) {
880 			ifp->if_capenable ^= IFCAP_TSO4;
881 			if (ifp->if_capenable & IFCAP_TSO4)
882 				ifp->if_hwassist |= CSUM_TSO;
883 			else
884 				ifp->if_hwassist &= ~CSUM_TSO;
885 		}
886 
887 		if (mask & IFCAP_RXCSUM) {
888 			ifp->if_capenable ^= IFCAP_RXCSUM;
889 			reinit = 1;
890 		}
891 
892 		if (mask & IFCAP_LRO) {
893 			ifp->if_capenable ^= IFCAP_LRO;
894 			reinit = 1;
895 		}
896 
897 		if (mask & IFCAP_VLAN_HWFILTER) {
898 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
899 			reinit = 1;
900 		}
901 
902 		if (mask & IFCAP_VLAN_HWTSO)
903 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
904 
905 		if (mask & IFCAP_VLAN_HWTAGGING)
906 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
907 
908 		if (reinit && (ifp->if_flags & IFF_RUNNING)) {
909 			ifp->if_flags &= ~IFF_RUNNING;
910 			vtnet_init_locked(sc);
911 		}
912 		//VLAN_CAPABILITIES(ifp);
913 
914 		lwkt_serialize_exit(&sc->vtnet_slz);
915 		break;
916 
917 	default:
918 		error = ether_ioctl(ifp, cmd, data);
919 		break;
920 	}
921 
922 	return (error);
923 }
924 
925 static int
926 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
927 {
928 	struct ifnet *ifp;
929 	int new_frame_size, clsize;
930 
931 	ifp = sc->vtnet_ifp;
932 
933 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
934 		new_frame_size = sizeof(struct vtnet_rx_header) +
935 		    sizeof(struct ether_vlan_header) + new_mtu;
936 
937 		if (new_frame_size > MJUM9BYTES)
938 			return (EINVAL);
939 
940 		if (new_frame_size <= MCLBYTES)
941 			clsize = MCLBYTES;
942 		else
943 			clsize = MJUM9BYTES;
944 	} else {
945 		new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
946 		    sizeof(struct ether_vlan_header) + new_mtu;
947 
948 		if (new_frame_size <= MCLBYTES)
949 			clsize = MCLBYTES;
950 		else
951 			clsize = MJUMPAGESIZE;
952 	}
953 
954 	sc->vtnet_rx_mbuf_size = clsize;
955 	sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
956 	KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
957 	    ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
958 
959 	ifp->if_mtu = new_mtu;
960 
961 	if (ifp->if_flags & IFF_RUNNING) {
962 		ifp->if_flags &= ~IFF_RUNNING;
963 		vtnet_init_locked(sc);
964 	}
965 
966 	return (0);
967 }
968 
969 static int
970 vtnet_init_rx_vq(struct vtnet_softc *sc)
971 {
972 	struct virtqueue *vq;
973 	int nbufs, error;
974 
975 	vq = sc->vtnet_rx_vq;
976 	nbufs = 0;
977 	error = ENOSPC;
978 
979 	while (!virtqueue_full(vq)) {
980 		if ((error = vtnet_newbuf(sc)) != 0)
981 			break;
982 		nbufs++;
983 	}
984 
985 	if (nbufs > 0) {
986 		virtqueue_notify(vq, &sc->vtnet_slz);
987 
988 		/*
989 		 * EMSGSIZE signifies the virtqueue did not have enough
990 		 * entries available to hold the last mbuf. This is not
991 		 * an error. We should not get ENOSPC since we check if
992 		 * the virtqueue is full before attempting to add a
993 		 * buffer.
994 		 */
995 		if (error == EMSGSIZE)
996 			error = 0;
997 	}
998 
999 	return (error);
1000 }
1001 
1002 static void
1003 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1004 {
1005 	struct virtqueue *vq;
1006 	struct mbuf *m;
1007 	int last;
1008 
1009 	vq = sc->vtnet_rx_vq;
1010 	last = 0;
1011 
1012 	while ((m = virtqueue_drain(vq, &last)) != NULL)
1013 		m_freem(m);
1014 
1015 	KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1016 }
1017 
1018 static void
1019 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1020 {
1021 	struct virtqueue *vq;
1022 	struct vtnet_tx_header *txhdr;
1023 	int last;
1024 
1025 	vq = sc->vtnet_tx_vq;
1026 	last = 0;
1027 
1028 	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1029 		m_freem(txhdr->vth_mbuf);
1030 		vtnet_enqueue_txhdr(sc, txhdr);
1031 	}
1032 
1033 	KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1034 }
1035 
1036 static void
1037 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1038 {
1039 	/*
1040 	 * The control virtqueue is only polled, therefore
1041 	 * it should already be empty.
1042 	 */
1043 	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1044 		("Ctrl Vq not empty"));
1045 }
1046 
1047 static struct mbuf *
1048 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1049 {
1050 	struct mbuf *m_head, *m_tail, *m;
1051 	int i, clsize;
1052 
1053 	clsize = sc->vtnet_rx_mbuf_size;
1054 
1055 	/*use getcl instead of getjcl. see  if_mxge.c comment line 2398*/
1056 	//m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1057 	m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1058 	if (m_head == NULL)
1059 		goto fail;
1060 
1061 	m_head->m_len = clsize;
1062 	m_tail = m_head;
1063 
1064 	if (nbufs > 1) {
1065 		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1066 			("chained Rx mbuf requested without LRO_NOMRG"));
1067 
1068 		for (i = 0; i < nbufs - 1; i++) {
1069 			//m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1070 			m = m_getcl(M_NOWAIT, MT_DATA, 0);
1071 			if (m == NULL)
1072 				goto fail;
1073 
1074 			m->m_len = clsize;
1075 			m_tail->m_next = m;
1076 			m_tail = m;
1077 		}
1078 	}
1079 
1080 	if (m_tailp != NULL)
1081 		*m_tailp = m_tail;
1082 
1083 	return (m_head);
1084 
1085 fail:
1086 	sc->vtnet_stats.mbuf_alloc_failed++;
1087 	m_freem(m_head);
1088 
1089 	return (NULL);
1090 }
1091 
1092 static int
1093 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1094 {
1095 	struct mbuf *m, *m_prev;
1096 	struct mbuf *m_new, *m_tail;
1097 	int len, clsize, nreplace, error;
1098 
1099 	m = m0;
1100 	m_prev = NULL;
1101 	len = len0;
1102 
1103 	m_tail = NULL;
1104 	clsize = sc->vtnet_rx_mbuf_size;
1105 	nreplace = 0;
1106 
1107 	if (m->m_next != NULL)
1108 		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1109 		    ("chained Rx mbuf without LRO_NOMRG"));
1110 
1111 	/*
1112 	 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1113 	 * allocating an entire chain for each received frame. When
1114 	 * the received frame's length is less than that of the chain,
1115 	 * the unused mbufs are reassigned to the new chain.
1116 	 */
1117 	while (len > 0) {
1118 		/*
1119 		 * Something is seriously wrong if we received
1120 		 * a frame larger than the mbuf chain. Drop it.
1121 		 */
1122 		if (m == NULL) {
1123 			sc->vtnet_stats.rx_frame_too_large++;
1124 			return (EMSGSIZE);
1125 		}
1126 
1127 		KASSERT(m->m_len == clsize,
1128 		    ("mbuf length not expected cluster size: %d",
1129 		    m->m_len));
1130 
1131 		m->m_len = MIN(m->m_len, len);
1132 		len -= m->m_len;
1133 
1134 		m_prev = m;
1135 		m = m->m_next;
1136 		nreplace++;
1137 	}
1138 
1139 	KASSERT(m_prev != NULL, ("m_prev == NULL"));
1140 	KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1141 		("too many replacement mbufs: %d/%d", nreplace,
1142 		sc->vtnet_rx_mbuf_count));
1143 
1144 	m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1145 	if (m_new == NULL) {
1146 		m_prev->m_len = clsize;
1147 		return (ENOBUFS);
1148 	}
1149 
1150 	/*
1151 	 * Move unused mbufs, if any, from the original chain
1152 	 * onto the end of the new chain.
1153 	 */
1154 	if (m_prev->m_next != NULL) {
1155 		m_tail->m_next = m_prev->m_next;
1156 		m_prev->m_next = NULL;
1157 	}
1158 
1159 	error = vtnet_enqueue_rxbuf(sc, m_new);
1160 	if (error) {
1161 		/*
1162 		 * BAD! We could not enqueue the replacement mbuf chain. We
1163 		 * must restore the m0 chain to the original state if it was
1164 		 * modified so we can subsequently discard it.
1165 		 *
1166 		 * NOTE: The replacement is suppose to be an identical copy
1167 		 * to the one just dequeued so this is an unexpected error.
1168 		 */
1169 		sc->vtnet_stats.rx_enq_replacement_failed++;
1170 
1171 		if (m_tail->m_next != NULL) {
1172 			m_prev->m_next = m_tail->m_next;
1173 			m_tail->m_next = NULL;
1174 		}
1175 
1176 		m_prev->m_len = clsize;
1177 		m_freem(m_new);
1178 	}
1179 
1180 	return (error);
1181 }
1182 
1183 static int
1184 vtnet_newbuf(struct vtnet_softc *sc)
1185 {
1186 	struct mbuf *m;
1187 	int error;
1188 
1189 	m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1190 	if (m == NULL)
1191 		return (ENOBUFS);
1192 
1193 	error = vtnet_enqueue_rxbuf(sc, m);
1194 	if (error)
1195 		m_freem(m);
1196 
1197 	return (error);
1198 }
1199 
1200 static void
1201 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1202 {
1203 	struct virtqueue *vq;
1204 	struct mbuf *m;
1205 
1206 	vq = sc->vtnet_rx_vq;
1207 
1208 	while (--nbufs > 0) {
1209 		if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1210 			break;
1211 		vtnet_discard_rxbuf(sc, m);
1212 	}
1213 }
1214 
1215 static void
1216 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1217 {
1218 	int error;
1219 
1220 	/*
1221 	 * Requeue the discarded mbuf. This should always be
1222 	 * successful since it was just dequeued.
1223 	 */
1224 	error = vtnet_enqueue_rxbuf(sc, m);
1225 	KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1226 }
1227 
1228 static int
1229 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1230 {
1231 	struct sglist sg;
1232 	struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1233 	struct vtnet_rx_header *rxhdr;
1234 	struct virtio_net_hdr *hdr;
1235 	uint8_t *mdata;
1236 	int offset, error;
1237 
1238 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1239 	if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1240 		KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1241 
1242 	sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1243 
1244 	mdata = mtod(m, uint8_t *);
1245 	offset = 0;
1246 
1247 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1248 		rxhdr = (struct vtnet_rx_header *) mdata;
1249 		hdr = &rxhdr->vrh_hdr;
1250 		offset += sizeof(struct vtnet_rx_header);
1251 
1252 		error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1253 		KASSERT(error == 0, ("cannot add header to sglist"));
1254 	}
1255 
1256 	error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1257 	if (error)
1258 		return (error);
1259 
1260 	if (m->m_next != NULL) {
1261 		error = sglist_append_mbuf(&sg, m->m_next);
1262 		if (error)
1263 			return (error);
1264 	}
1265 
1266 	return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1267 }
1268 
1269 static void
1270 vtnet_vlan_tag_remove(struct mbuf *m)
1271 {
1272 	struct ether_vlan_header *evl;
1273 
1274 	evl = mtod(m, struct ether_vlan_header *);
1275 
1276 	m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1277 	m->m_flags |= M_VLANTAG;
1278 
1279 	/* Strip the 802.1Q header. */
1280 	bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1281 	    ETHER_HDR_LEN - ETHER_TYPE_LEN);
1282 	m_adj(m, ETHER_VLAN_ENCAP_LEN);
1283 }
1284 
1285 /*
1286  * Alternative method of doing receive checksum offloading. Rather
1287  * than parsing the received frame down to the IP header, use the
1288  * csum_offset to determine which CSUM_* flags are appropriate. We
1289  * can get by with doing this only because the checksum offsets are
1290  * unique for the things we care about.
1291  */
1292 static int
1293 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1294     struct virtio_net_hdr *hdr)
1295 {
1296 	struct ether_header *eh;
1297 	struct ether_vlan_header *evh;
1298 	struct udphdr *udp;
1299 	int csum_len;
1300 	uint16_t eth_type;
1301 
1302 	csum_len = hdr->csum_start + hdr->csum_offset;
1303 
1304 	if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1305 		return (1);
1306 	if (m->m_len < csum_len)
1307 		return (1);
1308 
1309 	eh = mtod(m, struct ether_header *);
1310 	eth_type = ntohs(eh->ether_type);
1311 	if (eth_type == ETHERTYPE_VLAN) {
1312 		evh = mtod(m, struct ether_vlan_header *);
1313 		eth_type = ntohs(evh->evl_proto);
1314 	}
1315 
1316 	if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1317 		sc->vtnet_stats.rx_csum_bad_ethtype++;
1318 		return (1);
1319 	}
1320 
1321 	/* Use the offset to determine the appropriate CSUM_* flags. */
1322 	switch (hdr->csum_offset) {
1323 	case offsetof(struct udphdr, uh_sum):
1324 		if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1325 			return (1);
1326 		udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1327 		if (udp->uh_sum == 0)
1328 			return (0);
1329 
1330 		/* FALLTHROUGH */
1331 
1332 	case offsetof(struct tcphdr, th_sum):
1333 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1334 		m->m_pkthdr.csum_data = 0xFFFF;
1335 		break;
1336 
1337 	default:
1338 		sc->vtnet_stats.rx_csum_bad_offset++;
1339 		return (1);
1340 	}
1341 
1342 	sc->vtnet_stats.rx_csum_offloaded++;
1343 
1344 	return (0);
1345 }
1346 
1347 static int
1348 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1349 {
1350 	struct ifnet *ifp;
1351 	struct virtqueue *vq;
1352 	struct mbuf *m, *m_tail;
1353 	int len;
1354 
1355 	ifp = sc->vtnet_ifp;
1356 	vq = sc->vtnet_rx_vq;
1357 	m_tail = m_head;
1358 
1359 	while (--nbufs > 0) {
1360 		m = virtqueue_dequeue(vq, &len);
1361 		if (m == NULL) {
1362 			ifp->if_ierrors++;
1363 			goto fail;
1364 		}
1365 
1366 		if (vtnet_newbuf(sc) != 0) {
1367 			ifp->if_iqdrops++;
1368 			vtnet_discard_rxbuf(sc, m);
1369 			if (nbufs > 1)
1370 				vtnet_discard_merged_rxbuf(sc, nbufs);
1371 			goto fail;
1372 		}
1373 
1374 		if (m->m_len < len)
1375 			len = m->m_len;
1376 
1377 		m->m_len = len;
1378 		m->m_flags &= ~M_PKTHDR;
1379 
1380 		m_head->m_pkthdr.len += len;
1381 		m_tail->m_next = m;
1382 		m_tail = m;
1383 	}
1384 
1385 	return (0);
1386 
1387 fail:
1388 	sc->vtnet_stats.rx_mergeable_failed++;
1389 	m_freem(m_head);
1390 
1391 	return (1);
1392 }
1393 
1394 static int
1395 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1396 {
1397 	struct virtio_net_hdr lhdr;
1398 	struct ifnet *ifp;
1399 	struct virtqueue *vq;
1400 	struct mbuf *m;
1401 	struct ether_header *eh;
1402 	struct virtio_net_hdr *hdr;
1403 	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1404 	int len, deq, nbufs, adjsz, rx_npkts;
1405 
1406 	ifp = sc->vtnet_ifp;
1407 	vq = sc->vtnet_rx_vq;
1408 	hdr = &lhdr;
1409 	deq = 0;
1410 	rx_npkts = 0;
1411 
1412 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1413 
1414 	while (--count >= 0) {
1415 		m = virtqueue_dequeue(vq, &len);
1416 		if (m == NULL)
1417 			break;
1418 		deq++;
1419 
1420 		if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1421 			ifp->if_ierrors++;
1422 			vtnet_discard_rxbuf(sc, m);
1423 			continue;
1424 		}
1425 
1426 		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1427 			nbufs = 1;
1428 			adjsz = sizeof(struct vtnet_rx_header);
1429 			/*
1430 			 * Account for our pad between the header and
1431 			 * the actual start of the frame.
1432 			 */
1433 			len += VTNET_RX_HEADER_PAD;
1434 		} else {
1435 			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1436 			nbufs = mhdr->num_buffers;
1437 			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1438 		}
1439 
1440 		if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1441 			ifp->if_iqdrops++;
1442 			vtnet_discard_rxbuf(sc, m);
1443 			if (nbufs > 1)
1444 				vtnet_discard_merged_rxbuf(sc, nbufs);
1445 			continue;
1446 		}
1447 
1448 		m->m_pkthdr.len = len;
1449 		m->m_pkthdr.rcvif = ifp;
1450 		m->m_pkthdr.csum_flags = 0;
1451 
1452 		if (nbufs > 1) {
1453 			if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1454 				continue;
1455 		}
1456 
1457 		ifp->if_ipackets++;
1458 
1459 		/*
1460 		 * Save copy of header before we strip it. For both mergeable
1461 		 * and non-mergeable, the VirtIO header is placed first in the
1462 		 * mbuf's data. We no longer need num_buffers, so always use a
1463 		 * virtio_net_hdr.
1464 		 */
1465 		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1466 		m_adj(m, adjsz);
1467 
1468 		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1469 			eh = mtod(m, struct ether_header *);
1470 			if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1471 				vtnet_vlan_tag_remove(m);
1472 
1473 				/*
1474 				 * With the 802.1Q header removed, update the
1475 				 * checksum starting location accordingly.
1476 				 */
1477 				if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1478 					hdr->csum_start -=
1479 					    ETHER_VLAN_ENCAP_LEN;
1480 			}
1481 		}
1482 
1483 		if (ifp->if_capenable & IFCAP_RXCSUM &&
1484 		    hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1485 			if (vtnet_rx_csum(sc, m, hdr) != 0)
1486 				sc->vtnet_stats.rx_csum_failed++;
1487 		}
1488 
1489 		lwkt_serialize_exit(&sc->vtnet_slz);
1490 		rx_npkts++;
1491 		ifp->if_input(ifp, m, NULL, -1);
1492 		lwkt_serialize_enter(&sc->vtnet_slz);
1493 
1494 		/*
1495 		 * The interface may have been stopped while we were
1496 		 * passing the packet up the network stack.
1497 		 */
1498 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1499 			break;
1500 	}
1501 
1502 	virtqueue_notify(vq, &sc->vtnet_slz);
1503 
1504 	if (rx_npktsp != NULL)
1505 		*rx_npktsp = rx_npkts;
1506 
1507 	return (count > 0 ? 0 : EAGAIN);
1508 }
1509 
1510 static void
1511 vtnet_rx_intr_task(void *arg)
1512 {
1513 	struct vtnet_softc *sc;
1514 	struct ifnet *ifp;
1515 	int more;
1516 
1517 	sc = arg;
1518 	ifp = sc->vtnet_ifp;
1519 
1520 next:
1521 //	lwkt_serialize_enter(&sc->vtnet_slz);
1522 
1523 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1524 		vtnet_enable_rx_intr(sc);
1525 //		lwkt_serialize_exit(&sc->vtnet_slz);
1526 		return;
1527 	}
1528 
1529 	more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1530 	if (!more && vtnet_enable_rx_intr(sc) != 0) {
1531 		vtnet_disable_rx_intr(sc);
1532 		more = 1;
1533 	}
1534 
1535 //	lwkt_serialize_exit(&sc->vtnet_slz);
1536 
1537 	if (more) {
1538 		sc->vtnet_stats.rx_task_rescheduled++;
1539 		goto next;
1540 	}
1541 }
1542 
1543 static int
1544 vtnet_rx_vq_intr(void *xsc)
1545 {
1546 	struct vtnet_softc *sc;
1547 
1548 	sc = xsc;
1549 
1550 	vtnet_disable_rx_intr(sc);
1551 	vtnet_rx_intr_task(sc);
1552 
1553 	return (1);
1554 }
1555 
1556 static void
1557 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr)
1558 {
1559 	bzero(txhdr, sizeof(*txhdr));
1560 	SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link);
1561 }
1562 
1563 static void
1564 vtnet_txeof(struct vtnet_softc *sc)
1565 {
1566 	struct virtqueue *vq;
1567 	struct ifnet *ifp;
1568 	struct vtnet_tx_header *txhdr;
1569 	int deq;
1570 
1571 	vq = sc->vtnet_tx_vq;
1572 	ifp = sc->vtnet_ifp;
1573 	deq = 0;
1574 
1575 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1576 
1577 	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1578 		deq++;
1579 		ifp->if_opackets++;
1580 		m_freem(txhdr->vth_mbuf);
1581 		vtnet_enqueue_txhdr(sc, txhdr);
1582 	}
1583 
1584 	if (deq > 0) {
1585 		ifq_clr_oactive(&ifp->if_snd);
1586 		if (virtqueue_empty(vq))
1587 			sc->vtnet_watchdog_timer = 0;
1588 	}
1589 }
1590 
1591 static struct mbuf *
1592 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1593     struct virtio_net_hdr *hdr)
1594 {
1595 	struct ifnet *ifp;
1596 	struct ether_header *eh;
1597 	struct ether_vlan_header *evh;
1598 	struct ip *ip;
1599 	struct ip6_hdr *ip6;
1600 	struct tcphdr *tcp;
1601 	int ip_offset;
1602 	uint16_t eth_type, csum_start;
1603 	uint8_t ip_proto, gso_type;
1604 
1605 	ifp = sc->vtnet_ifp;
1606 	M_ASSERTPKTHDR(m);
1607 
1608 	ip_offset = sizeof(struct ether_header);
1609 	if (m->m_len < ip_offset) {
1610 		if ((m = m_pullup(m, ip_offset)) == NULL)
1611 			return (NULL);
1612 	}
1613 
1614 	eh = mtod(m, struct ether_header *);
1615 	eth_type = ntohs(eh->ether_type);
1616 	if (eth_type == ETHERTYPE_VLAN) {
1617 		ip_offset = sizeof(struct ether_vlan_header);
1618 		if (m->m_len < ip_offset) {
1619 			if ((m = m_pullup(m, ip_offset)) == NULL)
1620 				return (NULL);
1621 		}
1622 		evh = mtod(m, struct ether_vlan_header *);
1623 		eth_type = ntohs(evh->evl_proto);
1624 	}
1625 
1626 	switch (eth_type) {
1627 	case ETHERTYPE_IP:
1628 		if (m->m_len < ip_offset + sizeof(struct ip)) {
1629 			m = m_pullup(m, ip_offset + sizeof(struct ip));
1630 			if (m == NULL)
1631 				return (NULL);
1632 		}
1633 
1634 		ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1635 		ip_proto = ip->ip_p;
1636 		csum_start = ip_offset + (ip->ip_hl << 2);
1637 		gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1638 		break;
1639 
1640 	case ETHERTYPE_IPV6:
1641 		if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1642 			m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1643 			if (m == NULL)
1644 				return (NULL);
1645 		}
1646 
1647 		ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1648 		/*
1649 		 * XXX Assume no extension headers are present. Presently,
1650 		 * this will always be true in the case of TSO, and FreeBSD
1651 		 * does not perform checksum offloading of IPv6 yet.
1652 		 */
1653 		ip_proto = ip6->ip6_nxt;
1654 		csum_start = ip_offset + sizeof(struct ip6_hdr);
1655 		gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1656 		break;
1657 
1658 	default:
1659 		return (m);
1660 	}
1661 
1662 	if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1663 		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1664 		hdr->csum_start = csum_start;
1665 		hdr->csum_offset = m->m_pkthdr.csum_data;
1666 
1667 		sc->vtnet_stats.tx_csum_offloaded++;
1668 	}
1669 
1670 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1671 		if (ip_proto != IPPROTO_TCP)
1672 			return (m);
1673 
1674 		if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1675 			m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1676 			if (m == NULL)
1677 				return (NULL);
1678 		}
1679 
1680 		tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1681 		hdr->gso_type = gso_type;
1682 		hdr->hdr_len = csum_start + (tcp->th_off << 2);
1683 		hdr->gso_size = m->m_pkthdr.tso_segsz;
1684 
1685 		if (tcp->th_flags & TH_CWR) {
1686 			/*
1687 			 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1688 			 * ECN support is only configurable globally with the
1689 			 * net.inet.tcp.ecn.enable sysctl knob.
1690 			 */
1691 			if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1692 				if_printf(ifp, "TSO with ECN not supported "
1693 				    "by host\n");
1694 				m_freem(m);
1695 				return (NULL);
1696 			}
1697 
1698 			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1699 		}
1700 
1701 		sc->vtnet_stats.tx_tso_offloaded++;
1702 	}
1703 
1704 	return (m);
1705 }
1706 
1707 static int
1708 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1709     struct vtnet_tx_header *txhdr)
1710 {
1711 	struct sglist sg;
1712 	struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1713 	struct virtqueue *vq;
1714 	struct mbuf *m;
1715 	int error;
1716 
1717 	vq = sc->vtnet_tx_vq;
1718 	m = *m_head;
1719 
1720 	sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
1721 	error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1722 	KASSERT(error == 0 && sg.sg_nseg == 1,
1723 	    ("%s: error %d adding header to sglist", __func__, error));
1724 
1725 	error = sglist_append_mbuf(&sg, m);
1726 	if (error) {
1727 		m = m_defrag(m, M_NOWAIT);
1728 		if (m == NULL)
1729 			goto fail;
1730 
1731 		*m_head = m;
1732 		sc->vtnet_stats.tx_defragged++;
1733 
1734 		error = sglist_append_mbuf(&sg, m);
1735 		if (error)
1736 			goto fail;
1737 	}
1738 
1739 	txhdr->vth_mbuf = m;
1740 	error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
1741 
1742 	return (error);
1743 
1744 fail:
1745 	sc->vtnet_stats.tx_defrag_failed++;
1746 	m_freem(*m_head);
1747 	*m_head = NULL;
1748 
1749 	return (ENOBUFS);
1750 }
1751 
1752 static struct mbuf *
1753 vtnet_vlan_tag_insert(struct mbuf *m)
1754 {
1755 	struct mbuf *n;
1756 	struct ether_vlan_header *evl;
1757 
1758 	if (M_WRITABLE(m) == 0) {
1759 		n = m_dup(m, M_NOWAIT);
1760 		m_freem(m);
1761 		if ((m = n) == NULL)
1762 			return (NULL);
1763 	}
1764 
1765 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1766 	if (m == NULL)
1767 		return (NULL);
1768 	if (m->m_len < sizeof(struct ether_vlan_header)) {
1769 		m = m_pullup(m, sizeof(struct ether_vlan_header));
1770 		if (m == NULL)
1771 			return (NULL);
1772 	}
1773 
1774 	/* Insert 802.1Q header into the existing Ethernet header. */
1775 	evl = mtod(m, struct ether_vlan_header *);
1776 	bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1777 	      (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1778 	evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1779 	evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1780 	m->m_flags &= ~M_VLANTAG;
1781 
1782 	return (m);
1783 }
1784 
1785 static int
1786 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1787 {
1788 	struct vtnet_tx_header *txhdr;
1789 	struct virtio_net_hdr *hdr;
1790 	struct mbuf *m;
1791 	int error;
1792 
1793 	txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free);
1794 	if (txhdr == NULL)
1795 		return (ENOBUFS);
1796 	SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link);
1797 
1798 	/*
1799 	 * Always use the non-mergeable header to simplify things. When
1800 	 * the mergeable feature is negotiated, the num_buffers field
1801 	 * must be set to zero. We use vtnet_hdr_size later to enqueue
1802 	 * the correct header size to the host.
1803 	 */
1804 	hdr = &txhdr->vth_uhdr.hdr;
1805 	m = *m_head;
1806 
1807 	error = ENOBUFS;
1808 
1809 	if (m->m_flags & M_VLANTAG) {
1810 		//m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1811 		m = vtnet_vlan_tag_insert(m);
1812 		if ((*m_head = m) == NULL)
1813 			goto fail;
1814 		m->m_flags &= ~M_VLANTAG;
1815 	}
1816 
1817 	if (m->m_pkthdr.csum_flags != 0) {
1818 		m = vtnet_tx_offload(sc, m, hdr);
1819 		if ((*m_head = m) == NULL)
1820 			goto fail;
1821 	}
1822 
1823 	error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1824 fail:
1825 	if (error != 0)
1826 		vtnet_enqueue_txhdr(sc, txhdr);
1827 	return (error);
1828 }
1829 
1830 static void
1831 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1832 {
1833 	struct vtnet_softc *sc;
1834 
1835 	sc = ifp->if_softc;
1836 
1837 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1838 	lwkt_serialize_enter(&sc->vtnet_slz);
1839 	vtnet_start_locked(ifp, ifsq);
1840 	lwkt_serialize_exit(&sc->vtnet_slz);
1841 }
1842 
1843 static void
1844 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1845 {
1846 	struct vtnet_softc *sc;
1847 	struct virtqueue *vq;
1848 	struct mbuf *m0;
1849 	int enq;
1850 
1851 	sc = ifp->if_softc;
1852 	vq = sc->vtnet_tx_vq;
1853 	enq = 0;
1854 
1855 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1856 
1857 	if ((ifp->if_flags & (IFF_RUNNING)) !=
1858 	    IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
1859 		return;
1860 
1861 #ifdef VTNET_TX_INTR_MODERATION
1862 	if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
1863 		vtnet_txeof(sc);
1864 #endif
1865 
1866 	while (!ifsq_is_empty(ifsq)) {
1867 		if (virtqueue_full(vq)) {
1868 			ifq_set_oactive(&ifp->if_snd);
1869 			break;
1870 		}
1871 
1872 		m0 = ifq_dequeue(&ifp->if_snd);
1873 		if (m0 == NULL)
1874 			break;
1875 
1876 		if (vtnet_encap(sc, &m0) != 0) {
1877 			if (m0 == NULL)
1878 				break;
1879 			ifq_prepend(&ifp->if_snd, m0);
1880 			ifq_set_oactive(&ifp->if_snd);
1881 			break;
1882 		}
1883 
1884 		enq++;
1885 		ETHER_BPF_MTAP(ifp, m0);
1886 	}
1887 
1888 	if (enq > 0) {
1889 		virtqueue_notify(vq, &sc->vtnet_slz);
1890 		sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
1891 	}
1892 }
1893 
1894 static void
1895 vtnet_tick(void *xsc)
1896 {
1897 	struct vtnet_softc *sc;
1898 
1899 	sc = xsc;
1900 
1901 #if 0
1902 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1903 #ifdef VTNET_DEBUG
1904 	virtqueue_dump(sc->vtnet_rx_vq);
1905 	virtqueue_dump(sc->vtnet_tx_vq);
1906 #endif
1907 
1908 	vtnet_watchdog(sc);
1909 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
1910 #endif
1911 }
1912 
1913 static void
1914 vtnet_tx_intr_task(void *arg)
1915 {
1916 	struct vtnet_softc *sc;
1917 	struct ifnet *ifp;
1918 	struct ifaltq_subque *ifsq;
1919 
1920 	sc = arg;
1921 	ifp = sc->vtnet_ifp;
1922 	ifsq = ifq_get_subq_default(&ifp->if_snd);
1923 
1924 next:
1925 //	lwkt_serialize_enter(&sc->vtnet_slz);
1926 
1927 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1928 		vtnet_enable_tx_intr(sc);
1929 //		lwkt_serialize_exit(&sc->vtnet_slz);
1930 		return;
1931 	}
1932 
1933 	vtnet_txeof(sc);
1934 
1935 	if (!ifsq_is_empty(ifsq))
1936 		vtnet_start_locked(ifp, ifsq);
1937 
1938 	if (vtnet_enable_tx_intr(sc) != 0) {
1939 		vtnet_disable_tx_intr(sc);
1940 		sc->vtnet_stats.tx_task_rescheduled++;
1941 //		lwkt_serialize_exit(&sc->vtnet_slz);
1942 		goto next;
1943 	}
1944 
1945 //	lwkt_serialize_exit(&sc->vtnet_slz);
1946 }
1947 
1948 static int
1949 vtnet_tx_vq_intr(void *xsc)
1950 {
1951 	struct vtnet_softc *sc;
1952 
1953 	sc = xsc;
1954 
1955 	vtnet_disable_tx_intr(sc);
1956 	vtnet_tx_intr_task(sc);
1957 
1958 	return (1);
1959 }
1960 
1961 static void
1962 vtnet_stop(struct vtnet_softc *sc)
1963 {
1964 	device_t dev;
1965 	struct ifnet *ifp;
1966 
1967 	dev = sc->vtnet_dev;
1968 	ifp = sc->vtnet_ifp;
1969 
1970 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1971 
1972 	sc->vtnet_watchdog_timer = 0;
1973 	callout_stop(&sc->vtnet_tick_ch);
1974 	ifq_clr_oactive(&ifp->if_snd);
1975 	ifp->if_flags &= ~(IFF_RUNNING);
1976 
1977 	vtnet_disable_rx_intr(sc);
1978 	vtnet_disable_tx_intr(sc);
1979 
1980 	/*
1981 	 * Stop the host VirtIO adapter. Note this will reset the host
1982 	 * adapter's state back to the pre-initialized state, so in
1983 	 * order to make the device usable again, we must drive it
1984 	 * through virtio_reinit() and virtio_reinit_complete().
1985 	 */
1986 	virtio_stop(dev);
1987 
1988 	sc->vtnet_flags &= ~VTNET_FLAG_LINK;
1989 
1990 	vtnet_free_rx_mbufs(sc);
1991 	vtnet_free_tx_mbufs(sc);
1992 }
1993 
1994 static int
1995 vtnet_virtio_reinit(struct vtnet_softc *sc)
1996 {
1997 	device_t dev;
1998 	struct ifnet *ifp;
1999 	uint64_t features;
2000 	int error;
2001 
2002 	dev = sc->vtnet_dev;
2003 	ifp = sc->vtnet_ifp;
2004 	features = sc->vtnet_features;
2005 
2006 	/*
2007 	 * Re-negotiate with the host, removing any disabled receive
2008 	 * features. Transmit features are disabled only on our side
2009 	 * via if_capenable and if_hwassist.
2010 	 */
2011 
2012 	if (ifp->if_capabilities & IFCAP_RXCSUM) {
2013 		if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2014 			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2015 	}
2016 
2017 	if (ifp->if_capabilities & IFCAP_LRO) {
2018 		if ((ifp->if_capenable & IFCAP_LRO) == 0)
2019 			features &= ~VTNET_LRO_FEATURES;
2020 	}
2021 
2022 	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2023 		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2024 			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2025 	}
2026 
2027 	error = virtio_reinit(dev, features);
2028 	if (error)
2029 		device_printf(dev, "virtio reinit error %d\n", error);
2030 
2031 	return (error);
2032 }
2033 
2034 static void
2035 vtnet_init_locked(struct vtnet_softc *sc)
2036 {
2037 	device_t dev;
2038 	struct ifnet *ifp;
2039 	int error;
2040 
2041 	dev = sc->vtnet_dev;
2042 	ifp = sc->vtnet_ifp;
2043 
2044 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2045 
2046 	if (ifp->if_flags & IFF_RUNNING)
2047 		return;
2048 
2049 	/* Stop host's adapter, cancel any pending I/O. */
2050 	vtnet_stop(sc);
2051 
2052 	/* Reinitialize the host device. */
2053 	error = vtnet_virtio_reinit(sc);
2054 	if (error) {
2055 		device_printf(dev,
2056 		    "reinitialization failed, stopping device...\n");
2057 		vtnet_stop(sc);
2058 		return;
2059 	}
2060 
2061 	/* Update host with assigned MAC address. */
2062 	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2063 	vtnet_set_hwaddr(sc);
2064 
2065 	ifp->if_hwassist = 0;
2066 	if (ifp->if_capenable & IFCAP_TXCSUM)
2067 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2068 	if (ifp->if_capenable & IFCAP_TSO4)
2069 		ifp->if_hwassist |= CSUM_TSO;
2070 
2071 	error = vtnet_init_rx_vq(sc);
2072 	if (error) {
2073 		device_printf(dev,
2074 		    "cannot allocate mbufs for Rx virtqueue\n");
2075 		vtnet_stop(sc);
2076 		return;
2077 	}
2078 
2079 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2080 		if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2081 			/* Restore promiscuous and all-multicast modes. */
2082 			vtnet_rx_filter(sc);
2083 
2084 			/* Restore filtered MAC addresses. */
2085 			vtnet_rx_filter_mac(sc);
2086 		}
2087 
2088 		/* Restore VLAN filters. */
2089 		if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2090 			vtnet_rx_filter_vlan(sc);
2091 	}
2092 
2093 	{
2094 		vtnet_enable_rx_intr(sc);
2095 		vtnet_enable_tx_intr(sc);
2096 	}
2097 
2098 	ifp->if_flags |= IFF_RUNNING;
2099 	ifq_clr_oactive(&ifp->if_snd);
2100 
2101 	virtio_reinit_complete(dev);
2102 
2103 	vtnet_update_link_status(sc);
2104 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2105 }
2106 
2107 static void
2108 vtnet_init(void *xsc)
2109 {
2110 	struct vtnet_softc *sc;
2111 
2112 	sc = xsc;
2113 
2114 	lwkt_serialize_enter(&sc->vtnet_slz);
2115 	vtnet_init_locked(sc);
2116 	lwkt_serialize_exit(&sc->vtnet_slz);
2117 }
2118 
2119 static void
2120 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2121     struct sglist *sg, int readable, int writable)
2122 {
2123 	struct virtqueue *vq;
2124 	void *c;
2125 
2126 	vq = sc->vtnet_ctrl_vq;
2127 
2128 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2129 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2130 	    ("no control virtqueue"));
2131 	KASSERT(virtqueue_empty(vq),
2132 	    ("control command already enqueued"));
2133 
2134 	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2135 		return;
2136 
2137 	virtqueue_notify(vq, &sc->vtnet_slz);
2138 
2139 	/*
2140 	 * Poll until the command is complete. Previously, we would
2141 	 * sleep until the control virtqueue interrupt handler woke
2142 	 * us up, but dropping the VTNET_MTX leads to serialization
2143 	 * difficulties.
2144 	 *
2145 	 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2146 	 * vectors. Two of those vectors are needed for the Rx and Tx
2147 	 * virtqueues. We do not support sharing both a Vq and config
2148 	 * changed notification on the same MSIX vector.
2149 	 */
2150 	c = virtqueue_poll(vq, NULL);
2151 	KASSERT(c == cookie, ("unexpected control command response"));
2152 }
2153 
2154 static int
2155 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2156 {
2157 	struct {
2158 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2159 		uint8_t pad1;
2160 		char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2161 		uint8_t pad2;
2162 		uint8_t ack;
2163 	} s;
2164 	struct sglist_seg segs[3];
2165 	struct sglist sg;
2166 	int error;
2167 
2168 	s.hdr.class = VIRTIO_NET_CTRL_MAC;
2169 	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2170 	s.ack = VIRTIO_NET_ERR;
2171 
2172 	/* Copy the mac address into physically contiguous memory */
2173 	memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2174 
2175 	sglist_init(&sg, 3, segs);
2176 	error = 0;
2177 	error |= sglist_append(&sg, &s.hdr,
2178 	    sizeof(struct virtio_net_ctrl_hdr));
2179 	error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2180 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2181 	KASSERT(error == 0 && sg.sg_nseg == 3,
2182 	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
2183 
2184 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2185 
2186 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2187 }
2188 
2189 static void
2190 vtnet_rx_filter(struct vtnet_softc *sc)
2191 {
2192 	device_t dev;
2193 	struct ifnet *ifp;
2194 
2195 	dev = sc->vtnet_dev;
2196 	ifp = sc->vtnet_ifp;
2197 
2198 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2199 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2200 	    ("CTRL_RX feature not negotiated"));
2201 
2202 	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2203 		device_printf(dev, "cannot %s promiscuous mode\n",
2204 		    (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2205 
2206 	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2207 		device_printf(dev, "cannot %s all-multicast mode\n",
2208 		    (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2209 }
2210 
2211 static int
2212 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2213 {
2214 	struct sglist_seg segs[3];
2215 	struct sglist sg;
2216 	struct {
2217 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2218 		uint8_t pad1;
2219 		uint8_t onoff;
2220 		uint8_t pad2;
2221 		uint8_t ack;
2222 	} s;
2223 	int error;
2224 
2225 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2226 	    ("%s: CTRL_RX feature not negotiated", __func__));
2227 
2228 	s.hdr.class = VIRTIO_NET_CTRL_RX;
2229 	s.hdr.cmd = cmd;
2230 	s.onoff = !!on;
2231 	s.ack = VIRTIO_NET_ERR;
2232 
2233 	sglist_init(&sg, 3, segs);
2234 	error = 0;
2235 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2236 	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2237 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2238 	KASSERT(error == 0 && sg.sg_nseg == 3,
2239 	    ("%s: error %d adding Rx message to sglist", __func__, error));
2240 
2241 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2242 
2243 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2244 }
2245 
2246 static int
2247 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2248 {
2249 
2250 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2251 }
2252 
2253 static int
2254 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2255 {
2256 
2257 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2258 }
2259 
2260 static void
2261 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2262 {
2263 	struct virtio_net_ctrl_hdr hdr __aligned(2);
2264 	struct vtnet_mac_filter *filter;
2265 	struct sglist_seg segs[4];
2266 	struct sglist sg;
2267 	struct ifnet *ifp;
2268 	struct ifaddr *ifa;
2269         struct ifaddr_container *ifac;
2270 	struct ifmultiaddr *ifma;
2271 	int ucnt, mcnt, promisc, allmulti, error;
2272 	uint8_t ack;
2273 
2274 	ifp = sc->vtnet_ifp;
2275 	ucnt = 0;
2276 	mcnt = 0;
2277 	promisc = 0;
2278 	allmulti = 0;
2279 
2280 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2281 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2282 	    ("%s: CTRL_RX feature not negotiated", __func__));
2283 
2284 	/* Use the MAC filtering table allocated in vtnet_attach. */
2285 	filter = sc->vtnet_macfilter;
2286 	memset(filter, 0, sizeof(struct vtnet_mac_filter));
2287 
2288 	/* Unicast MAC addresses: */
2289 	//if_addr_rlock(ifp);
2290 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2291 		ifa = ifac->ifa;
2292 		if (ifa->ifa_addr->sa_family != AF_LINK)
2293 			continue;
2294 		else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2295 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2296 			continue;
2297 		else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2298 			promisc = 1;
2299 			break;
2300 		}
2301 
2302 		bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2303 		    &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2304 		ucnt++;
2305 	}
2306 	//if_addr_runlock(ifp);
2307 
2308 	if (promisc != 0) {
2309 		filter->vmf_unicast.nentries = 0;
2310 		if_printf(ifp, "more than %d MAC addresses assigned, "
2311 		    "falling back to promiscuous mode\n",
2312 		    VTNET_MAX_MAC_ENTRIES);
2313 	} else
2314 		filter->vmf_unicast.nentries = ucnt;
2315 
2316 	/* Multicast MAC addresses: */
2317 	//if_maddr_rlock(ifp);
2318 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2319 		if (ifma->ifma_addr->sa_family != AF_LINK)
2320 			continue;
2321 		else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2322 			allmulti = 1;
2323 			break;
2324 		}
2325 
2326 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2327 		    &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2328 		mcnt++;
2329 	}
2330 	//if_maddr_runlock(ifp);
2331 
2332 	if (allmulti != 0) {
2333 		filter->vmf_multicast.nentries = 0;
2334 		if_printf(ifp, "more than %d multicast MAC addresses "
2335 		    "assigned, falling back to all-multicast mode\n",
2336 		    VTNET_MAX_MAC_ENTRIES);
2337 	} else
2338 		filter->vmf_multicast.nentries = mcnt;
2339 
2340 	if (promisc != 0 && allmulti != 0)
2341 		goto out;
2342 
2343 	hdr.class = VIRTIO_NET_CTRL_MAC;
2344 	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2345 	ack = VIRTIO_NET_ERR;
2346 
2347 	sglist_init(&sg, 4, segs);
2348 	error = 0;
2349 	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2350 	error |= sglist_append(&sg, &filter->vmf_unicast,
2351 	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2352 	error |= sglist_append(&sg, &filter->vmf_multicast,
2353 	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2354 	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2355 	KASSERT(error == 0 && sg.sg_nseg == 4,
2356 	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2357 
2358 	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2359 
2360 	if (ack != VIRTIO_NET_OK)
2361 		if_printf(ifp, "error setting host MAC filter table\n");
2362 
2363 out:
2364 	if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2365 		if_printf(ifp, "cannot enable promiscuous mode\n");
2366 	if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2367 		if_printf(ifp, "cannot enable all-multicast mode\n");
2368 }
2369 
2370 static int
2371 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2372 {
2373 	struct sglist_seg segs[3];
2374 	struct sglist sg;
2375 	struct {
2376 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2377 		uint8_t pad1;
2378 		uint16_t tag;
2379 		uint8_t pad2;
2380 		uint8_t ack;
2381 	} s;
2382 	int error;
2383 
2384 	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2385 	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2386 	s.tag = tag;
2387 	s.ack = VIRTIO_NET_ERR;
2388 
2389 	sglist_init(&sg, 3, segs);
2390 	error = 0;
2391 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2392 	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2393 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2394 	KASSERT(error == 0 && sg.sg_nseg == 3,
2395 	    ("%s: error %d adding VLAN message to sglist", __func__, error));
2396 
2397 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2398 
2399 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2400 }
2401 
2402 static void
2403 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2404 {
2405 	uint32_t w;
2406 	uint16_t tag;
2407 	int i, bit, nvlans;
2408 
2409 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2410 	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2411 	    ("%s: VLAN_FILTER feature not negotiated", __func__));
2412 
2413 	nvlans = sc->vtnet_nvlans;
2414 
2415 	/* Enable the filter for each configured VLAN. */
2416 	for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2417 		w = sc->vtnet_vlan_shadow[i];
2418 		while ((bit = ffs(w) - 1) != -1) {
2419 			w &= ~(1 << bit);
2420 			tag = sizeof(w) * CHAR_BIT * i + bit;
2421 			nvlans--;
2422 
2423 			if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2424 				device_printf(sc->vtnet_dev,
2425 				    "cannot enable VLAN %d filter\n", tag);
2426 			}
2427 		}
2428 	}
2429 
2430 	KASSERT(nvlans == 0, ("VLAN count incorrect"));
2431 }
2432 
2433 static void
2434 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2435 {
2436 	struct ifnet *ifp;
2437 	int idx, bit;
2438 
2439 	ifp = sc->vtnet_ifp;
2440 	idx = (tag >> 5) & 0x7F;
2441 	bit = tag & 0x1F;
2442 
2443 	if (tag == 0 || tag > 4095)
2444 		return;
2445 
2446 	lwkt_serialize_enter(&sc->vtnet_slz);
2447 
2448 	/* Update shadow VLAN table. */
2449 	if (add) {
2450 		sc->vtnet_nvlans++;
2451 		sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2452 	} else {
2453 		sc->vtnet_nvlans--;
2454 		sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2455 	}
2456 
2457 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2458 	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2459 		device_printf(sc->vtnet_dev,
2460 		    "cannot %s VLAN %d %s the host filter table\n",
2461 		    add ? "add" : "remove", tag, add ? "to" : "from");
2462 	}
2463 
2464 	lwkt_serialize_exit(&sc->vtnet_slz);
2465 }
2466 
2467 static void
2468 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2469 {
2470 
2471 	if (ifp->if_softc != arg)
2472 		return;
2473 
2474 	vtnet_update_vlan_filter(arg, 1, tag);
2475 }
2476 
2477 static void
2478 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2479 {
2480 
2481 	if (ifp->if_softc != arg)
2482 		return;
2483 
2484 	vtnet_update_vlan_filter(arg, 0, tag);
2485 }
2486 
2487 static int
2488 vtnet_ifmedia_upd(struct ifnet *ifp)
2489 {
2490 	struct vtnet_softc *sc;
2491 	struct ifmedia *ifm;
2492 
2493 	sc = ifp->if_softc;
2494 	ifm = &sc->vtnet_media;
2495 
2496 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2497 		return (EINVAL);
2498 
2499 	return (0);
2500 }
2501 
2502 static void
2503 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2504 {
2505 	struct vtnet_softc *sc;
2506 
2507 	sc = ifp->if_softc;
2508 
2509 	ifmr->ifm_status = IFM_AVALID;
2510 	ifmr->ifm_active = IFM_ETHER;
2511 
2512 	lwkt_serialize_enter(&sc->vtnet_slz);
2513 	if (vtnet_is_link_up(sc) != 0) {
2514 		ifmr->ifm_status |= IFM_ACTIVE;
2515 		ifmr->ifm_active |= VTNET_MEDIATYPE;
2516 	} else
2517 		ifmr->ifm_active |= IFM_NONE;
2518 	lwkt_serialize_exit(&sc->vtnet_slz);
2519 }
2520 
2521 static void
2522 vtnet_add_statistics(struct vtnet_softc *sc)
2523 {
2524 	device_t dev;
2525 	struct vtnet_statistics *stats;
2526 	struct sysctl_ctx_list *ctx;
2527 	struct sysctl_oid *tree;
2528 	struct sysctl_oid_list *child;
2529 
2530 	dev = sc->vtnet_dev;
2531 	stats = &sc->vtnet_stats;
2532 	ctx = device_get_sysctl_ctx(dev);
2533 	tree = device_get_sysctl_tree(dev);
2534 	child = SYSCTL_CHILDREN(tree);
2535 
2536 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2537 	    CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2538 	    "Mbuf cluster allocation failures");
2539 
2540 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2541 	    CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2542 	    "Received frame larger than the mbuf chain");
2543 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2544 	    CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2545 	    "Enqueuing the replacement receive mbuf failed");
2546 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2547 	    CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2548 	    "Mergeable buffers receive failures");
2549 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2550 	    CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2551 	    "Received checksum offloaded buffer with unsupported "
2552 	    "Ethernet type");
2553 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2554 	    CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2555 	    "Received checksum offloaded buffer with incorrect IP protocol");
2556 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2557 	    CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2558 	    "Received checksum offloaded buffer with incorrect offset");
2559 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2560 	    CTLFLAG_RD, &stats->rx_csum_failed, 0,
2561 	    "Received buffer checksum offload failed");
2562 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2563 	    CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2564 	    "Received buffer checksum offload succeeded");
2565 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2566 	    CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2567 	    "Times the receive interrupt task rescheduled itself");
2568 
2569 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2570 	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2571 	    "Aborted transmit of checksum offloaded buffer with unknown "
2572 	    "Ethernet type");
2573 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2574 	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2575 	    "Aborted transmit of TSO buffer with unknown Ethernet type");
2576 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2577 	    CTLFLAG_RD, &stats->tx_defragged, 0,
2578 	    "Transmit mbufs defragged");
2579 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2580 	    CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2581 	    "Aborted transmit of buffer because defrag failed");
2582 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2583 	    CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2584 	    "Offloaded checksum of transmitted buffer");
2585 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2586 	    CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2587 	    "Segmentation offload of transmitted buffer");
2588 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2589 	    CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2590 	    "Times the transmit interrupt task rescheduled itself");
2591 }
2592 
2593 static int
2594 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2595 {
2596 
2597 	return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2598 }
2599 
2600 static void
2601 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2602 {
2603 
2604 	virtqueue_disable_intr(sc->vtnet_rx_vq);
2605 }
2606 
2607 static int
2608 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2609 {
2610 
2611 #ifdef VTNET_TX_INTR_MODERATION
2612 	return (0);
2613 #else
2614 	return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2615 #endif
2616 }
2617 
2618 static void
2619 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2620 {
2621 
2622 	virtqueue_disable_intr(sc->vtnet_tx_vq);
2623 }
2624