xref: /dragonfly/sys/dev/virtual/virtio/net/if_vtnet.c (revision 017817f0)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO network devices. */
28 
29 #include <sys/cdefs.h>
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46 
47 #include <machine/limits.h>
48 
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
58 
59 #include <net/bpf.h>
60 
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
67 
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include <dev/virtual/virtio/net/virtio_net.h>
71 #include <dev/virtual/virtio/net/if_vtnetvar.h>
72 
73 #include "virtio_if.h"
74 
75 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
76 
77 static int	vtnet_modevent(module_t, int, void *);
78 
79 static int	vtnet_probe(device_t);
80 static int	vtnet_attach(device_t);
81 static int	vtnet_detach(device_t);
82 static int	vtnet_suspend(device_t);
83 static int	vtnet_resume(device_t);
84 static int	vtnet_shutdown(device_t);
85 static int	vtnet_config_change(device_t);
86 
87 static void	vtnet_negotiate_features(struct vtnet_softc *);
88 static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
89 static void	vtnet_get_hwaddr(struct vtnet_softc *);
90 static void	vtnet_set_hwaddr(struct vtnet_softc *);
91 static int	vtnet_is_link_up(struct vtnet_softc *);
92 static void	vtnet_update_link_status(struct vtnet_softc *);
93 #if 0
94 static void	vtnet_watchdog(struct vtnet_softc *);
95 #endif
96 static void	vtnet_config_change_task(void *, int);
97 static int	vtnet_setup_interface(struct vtnet_softc *);
98 static int	vtnet_change_mtu(struct vtnet_softc *, int);
99 static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
100 
101 static int	vtnet_init_rx_vq(struct vtnet_softc *);
102 static void	vtnet_free_rx_mbufs(struct vtnet_softc *);
103 static void	vtnet_free_tx_mbufs(struct vtnet_softc *);
104 static void	vtnet_free_ctrl_vq(struct vtnet_softc *);
105 
106 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
107 		    struct mbuf **);
108 static int	vtnet_replace_rxbuf(struct vtnet_softc *,
109 		    struct mbuf *, int);
110 static int	vtnet_newbuf(struct vtnet_softc *);
111 static void	vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
112 static void	vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
113 static int	vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
114 static void	vtnet_vlan_tag_remove(struct mbuf *);
115 static int	vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
116 		    struct virtio_net_hdr *);
117 static int	vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
118 static int	vtnet_rxeof(struct vtnet_softc *, int, int *);
119 static void	vtnet_rx_intr_task(void *);
120 static int	vtnet_rx_vq_intr(void *);
121 
122 static void	vtnet_txeof(struct vtnet_softc *);
123 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
124 		    struct virtio_net_hdr *);
125 static int	vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
126 		    struct vtnet_tx_header *);
127 static int	vtnet_encap(struct vtnet_softc *, struct mbuf **);
128 static void	vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
129 static void	vtnet_start(struct ifnet *, struct ifaltq_subque *);
130 static void	vtnet_tick(void *);
131 static void	vtnet_tx_intr_task(void *);
132 static int	vtnet_tx_vq_intr(void *);
133 
134 static void	vtnet_stop(struct vtnet_softc *);
135 static int	vtnet_virtio_reinit(struct vtnet_softc *);
136 static void	vtnet_init_locked(struct vtnet_softc *);
137 static void	vtnet_init(void *);
138 
139 static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
140 		    struct sglist *, int, int);
141 
142 static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
143 static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
144 static int	vtnet_set_promisc(struct vtnet_softc *, int);
145 static int	vtnet_set_allmulti(struct vtnet_softc *, int);
146 static void	vtnet_rx_filter(struct vtnet_softc *sc);
147 static void	vtnet_rx_filter_mac(struct vtnet_softc *);
148 
149 static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
150 static void	vtnet_rx_filter_vlan(struct vtnet_softc *);
151 static void	vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
152 static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
153 static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
154 
155 static int	vtnet_ifmedia_upd(struct ifnet *);
156 static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 static void	vtnet_add_statistics(struct vtnet_softc *);
159 
160 static int	vtnet_enable_rx_intr(struct vtnet_softc *);
161 static int	vtnet_enable_tx_intr(struct vtnet_softc *);
162 static void	vtnet_disable_rx_intr(struct vtnet_softc *);
163 static void	vtnet_disable_tx_intr(struct vtnet_softc *);
164 
165 /* Tunables. */
166 static int vtnet_csum_disable = 0;
167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
168 static int vtnet_tso_disable = 1;
169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
170 static int vtnet_lro_disable = 0;
171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
172 
173 /*
174  * Reducing the number of transmit completed interrupts can
175  * improve performance. To do so, the define below keeps the
176  * Tx vq interrupt disabled and adds calls to vtnet_txeof()
177  * in the start and watchdog paths. The price to pay for this
178  * is the m_free'ing of transmitted mbufs may be delayed until
179  * the watchdog fires.
180  */
181 #define VTNET_TX_INTR_MODERATION
182 
183 static struct virtio_feature_desc vtnet_feature_desc[] = {
184 	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
185 	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
186 	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload"	},
187 	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
188 	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
189 	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
190 	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
191 	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
192 	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
193 	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
194 	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
195 	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
196 	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
197 	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
198 	{ VIRTIO_NET_F_STATUS,		"Status"	},
199 	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
200 	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
201 	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
202 	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
203 	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
204 	{ VIRTIO_NET_F_MQ,		"RFS"		},
205 	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
206 	{ 0, NULL }
207 };
208 
209 static device_method_t vtnet_methods[] = {
210 	/* Device methods. */
211 	DEVMETHOD(device_probe,		vtnet_probe),
212 	DEVMETHOD(device_attach,	vtnet_attach),
213 	DEVMETHOD(device_detach,	vtnet_detach),
214 	DEVMETHOD(device_suspend,	vtnet_suspend),
215 	DEVMETHOD(device_resume,	vtnet_resume),
216 	DEVMETHOD(device_shutdown,	vtnet_shutdown),
217 
218 	/* VirtIO methods. */
219 	DEVMETHOD(virtio_config_change, vtnet_config_change),
220 
221 	{ 0, 0 }
222 };
223 
224 static driver_t vtnet_driver = {
225 	"vtnet",
226 	vtnet_methods,
227 	sizeof(struct vtnet_softc)
228 };
229 
230 static devclass_t vtnet_devclass;
231 
232 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
233     vtnet_modevent, 0);
234 MODULE_VERSION(vtnet, 1);
235 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
236 
237 static int
238 vtnet_modevent(module_t mod, int type, void *unused)
239 {
240 	int error;
241 
242 	error = 0;
243 
244 	switch (type) {
245 	case MOD_LOAD:
246 		break;
247 	case MOD_UNLOAD:
248 		break;
249 	case MOD_SHUTDOWN:
250 		break;
251 	default:
252 		error = EOPNOTSUPP;
253 		break;
254 	}
255 
256 	return (error);
257 }
258 
259 static int
260 vtnet_probe(device_t dev)
261 {
262 	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
263 		return (ENXIO);
264 
265 	device_set_desc(dev, "VirtIO Networking Adapter");
266 
267 	return (BUS_PROBE_DEFAULT);
268 }
269 
270 static int
271 vtnet_attach(device_t dev)
272 {
273 	struct vtnet_softc *sc;
274 	int error;
275 
276 	sc = device_get_softc(dev);
277 	sc->vtnet_dev = dev;
278 
279 	lwkt_serialize_init(&sc->vtnet_slz);
280 	callout_init(&sc->vtnet_tick_ch);
281 
282 	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
283 		     vtnet_ifmedia_sts);
284 	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
285 	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
286 
287 	vtnet_add_statistics(sc);
288 
289 	/* Register our feature descriptions. */
290 	virtio_set_feature_desc(dev, vtnet_feature_desc);
291 	vtnet_negotiate_features(sc);
292 
293 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
294 		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
295 
296 	if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
297 		/* This feature should always be negotiated. */
298 		sc->vtnet_flags |= VTNET_FLAG_MAC;
299 	}
300 
301 	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
302 		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
303 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
304 	} else {
305 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
306 	}
307 
308 	sc->vtnet_rx_mbuf_size = MCLBYTES;
309 	sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
310 
311 	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
312 		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
313 
314 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
315 			sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
316 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
317 			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
318 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
319 		    virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
320 			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
321 	}
322 
323 	/* Read (or generate) the MAC address for the adapter. */
324 	vtnet_get_hwaddr(sc);
325 
326 	error = vtnet_alloc_virtqueues(sc);
327 	if (error) {
328 		device_printf(dev, "cannot allocate virtqueues\n");
329 		goto fail;
330 	}
331 
332 	error = vtnet_setup_interface(sc);
333 	if (error) {
334 		device_printf(dev, "cannot setup interface\n");
335 		goto fail;
336 	}
337 
338 	TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
339 
340 	error = virtio_setup_intr(dev, &sc->vtnet_slz);
341 	if (error) {
342 		device_printf(dev, "cannot setup virtqueue interrupts\n");
343 		ether_ifdetach(sc->vtnet_ifp);
344 		goto fail;
345 	}
346 
347 	/*
348 	 * Device defaults to promiscuous mode for backwards
349 	 * compatibility. Turn it off if possible.
350 	 */
351 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
352 		lwkt_serialize_enter(&sc->vtnet_slz);
353 		if (vtnet_set_promisc(sc, 0) != 0) {
354 			sc->vtnet_ifp->if_flags |= IFF_PROMISC;
355 			device_printf(dev,
356 			    "cannot disable promiscuous mode\n");
357 		}
358 		lwkt_serialize_exit(&sc->vtnet_slz);
359 	} else
360 		sc->vtnet_ifp->if_flags |= IFF_PROMISC;
361 
362 fail:
363 	if (error)
364 		vtnet_detach(dev);
365 
366 	return (error);
367 }
368 
369 static int
370 vtnet_detach(device_t dev)
371 {
372 	struct vtnet_softc *sc;
373 	struct ifnet *ifp;
374 
375 	sc = device_get_softc(dev);
376 	ifp = sc->vtnet_ifp;
377 
378 	if (device_is_attached(dev)) {
379 		lwkt_serialize_enter(&sc->vtnet_slz);
380 		vtnet_stop(sc);
381 		lwkt_serialize_exit(&sc->vtnet_slz);
382 
383 		callout_stop(&sc->vtnet_tick_ch);
384 		taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
385 
386 		ether_ifdetach(ifp);
387 	}
388 
389 	if (sc->vtnet_vlan_attach != NULL) {
390 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
391 		sc->vtnet_vlan_attach = NULL;
392 	}
393 	if (sc->vtnet_vlan_detach != NULL) {
394 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
395 		sc->vtnet_vlan_detach = NULL;
396 	}
397 
398 	if (ifp) {
399 		if_free(ifp);
400 		sc->vtnet_ifp = NULL;
401 	}
402 
403 	if (sc->vtnet_rx_vq != NULL)
404 		vtnet_free_rx_mbufs(sc);
405 	if (sc->vtnet_tx_vq != NULL)
406 		vtnet_free_tx_mbufs(sc);
407 	if (sc->vtnet_ctrl_vq != NULL)
408 		vtnet_free_ctrl_vq(sc);
409 
410 	if (sc->vtnet_txhdrarea != NULL) {
411 		contigfree(sc->vtnet_txhdrarea,
412 		    sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
413 		    M_VTNET);
414 		sc->vtnet_txhdrarea = NULL;
415 	}
416 	if (sc->vtnet_macfilter != NULL) {
417 		contigfree(sc->vtnet_macfilter,
418 		    sizeof(struct vtnet_mac_filter), M_DEVBUF);
419 		sc->vtnet_macfilter = NULL;
420 	}
421 
422 	ifmedia_removeall(&sc->vtnet_media);
423 
424 	return (0);
425 }
426 
427 static int
428 vtnet_suspend(device_t dev)
429 {
430 	struct vtnet_softc *sc;
431 
432 	sc = device_get_softc(dev);
433 
434 	lwkt_serialize_enter(&sc->vtnet_slz);
435 	vtnet_stop(sc);
436 	sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
437 	lwkt_serialize_exit(&sc->vtnet_slz);
438 
439 	return (0);
440 }
441 
442 static int
443 vtnet_resume(device_t dev)
444 {
445 	struct vtnet_softc *sc;
446 	struct ifnet *ifp;
447 
448 	sc = device_get_softc(dev);
449 	ifp = sc->vtnet_ifp;
450 
451 	lwkt_serialize_enter(&sc->vtnet_slz);
452 	if (ifp->if_flags & IFF_UP)
453 		vtnet_init_locked(sc);
454 	sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
455 	lwkt_serialize_exit(&sc->vtnet_slz);
456 
457 	return (0);
458 }
459 
460 static int
461 vtnet_shutdown(device_t dev)
462 {
463 
464 	/*
465 	 * Suspend already does all of what we need to
466 	 * do here; we just never expect to be resumed.
467 	 */
468 	return (vtnet_suspend(dev));
469 }
470 
471 static int
472 vtnet_config_change(device_t dev)
473 {
474 	struct vtnet_softc *sc;
475 
476 	sc = device_get_softc(dev);
477 
478 	taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
479 
480 	return (1);
481 }
482 
483 static void
484 vtnet_negotiate_features(struct vtnet_softc *sc)
485 {
486 	device_t dev;
487 	uint64_t mask, features;
488 
489 	dev = sc->vtnet_dev;
490 	mask = 0;
491 
492 	if (vtnet_csum_disable)
493 		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
494 
495 	/*
496 	 * TSO and LRO are only available when their corresponding checksum
497 	 * offload feature is also negotiated.
498 	 */
499 
500 	if (vtnet_csum_disable || vtnet_tso_disable)
501 		mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
502 		    VIRTIO_NET_F_HOST_ECN;
503 
504 	if (vtnet_csum_disable || vtnet_lro_disable)
505 		mask |= VTNET_LRO_FEATURES;
506 
507 	features = VTNET_FEATURES & ~mask;
508 	features |= VIRTIO_F_NOTIFY_ON_EMPTY;
509 	features |= VIRTIO_F_ANY_LAYOUT;
510 	sc->vtnet_features = virtio_negotiate_features(dev, features);
511 
512 	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
513 	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
514 		/*
515 		 * LRO without mergeable buffers requires special care. This
516 		 * is not ideal because every receive buffer must be large
517 		 * enough to hold the maximum TCP packet, the Ethernet header,
518 		 * and the header. This requires up to 34 descriptors with
519 		 * MCLBYTES clusters. If we do not have indirect descriptors,
520 		 * LRO is disabled since the virtqueue will not contain very
521 		 * many receive buffers.
522 		 */
523 		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
524 			device_printf(dev,
525 			    "LRO disabled due to both mergeable buffers and "
526 			    "indirect descriptors not negotiated\n");
527 
528 			features &= ~VTNET_LRO_FEATURES;
529 			sc->vtnet_features =
530 			    virtio_negotiate_features(dev, features);
531 		} else
532 			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
533 	}
534 }
535 
536 static int
537 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
538 {
539 	device_t dev;
540 	struct vq_alloc_info vq_info[3];
541 	int nvqs;
542 
543 	dev = sc->vtnet_dev;
544 	nvqs = 2;
545 
546 	/*
547 	 * Indirect descriptors are not needed for the Rx
548 	 * virtqueue when mergeable buffers are negotiated.
549 	 * The header is placed inline with the data, not
550 	 * in a separate descriptor, and mbuf clusters are
551 	 * always physically contiguous.
552 	 */
553 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
554 		sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
555 		    VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
556 	} else
557 		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
558 
559 	if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
560             virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
561 		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
562 	else
563 		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
564 
565 	VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs,
566 	    vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
567 	    "%s receive", device_get_nameunit(dev));
568 
569 	VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs,
570 	    vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
571 	    "%s transmit", device_get_nameunit(dev));
572 
573 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
574 		nvqs++;
575 
576 		VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
577 		    &sc->vtnet_ctrl_vq, "%s control",
578 		    device_get_nameunit(dev));
579 	}
580 
581 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
582 }
583 
584 static int
585 vtnet_setup_interface(struct vtnet_softc *sc)
586 {
587 	device_t dev;
588 	struct ifnet *ifp;
589 	int tx_size;
590 
591 	dev = sc->vtnet_dev;
592 
593 	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
594 	if (ifp == NULL) {
595 		device_printf(dev, "cannot allocate ifnet structure\n");
596 		return (ENOSPC);
597 	}
598 
599 	ifp->if_softc = sc;
600 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
601 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
602 	ifp->if_init = vtnet_init;
603 	ifp->if_start = vtnet_start;
604 	ifp->if_ioctl = vtnet_ioctl;
605 
606 	sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
607 	sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
608 
609 	tx_size = virtqueue_size(sc->vtnet_tx_vq);
610 	sc->vtnet_tx_size = tx_size;
611 	sc->vtnet_txhdridx = 0;
612 	if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
613 		sc->vtnet_txhdrcount = sc->vtnet_tx_size;
614 	else
615 		sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
616 	sc->vtnet_txhdrarea = contigmalloc(
617 	    sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
618 	    M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
619 	if (sc->vtnet_txhdrarea == NULL) {
620 		device_printf(dev, "cannot contigmalloc the tx headers\n");
621 		return (ENOMEM);
622 	}
623 	sc->vtnet_macfilter = contigmalloc(
624 	    sizeof(struct vtnet_mac_filter),
625 	    M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
626 	if (sc->vtnet_macfilter == NULL) {
627 		device_printf(dev,
628 		    "cannot contigmalloc the mac filter table\n");
629 		return (ENOMEM);
630 	}
631 	ifq_set_maxlen(&ifp->if_snd, tx_size - 1);
632 	ifq_set_ready(&ifp->if_snd);
633 
634 	ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
635 
636 	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){
637 		//ifp->if_capabilities |= IFCAP_LINKSTATE;
638 		 kprintf("add dynamic link state\n");
639 	}
640 
641 	/* Tell the upper layer(s) we support long frames. */
642 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
643 	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
644 
645 	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
646 		ifp->if_capabilities |= IFCAP_TXCSUM;
647 
648 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
649 			ifp->if_capabilities |= IFCAP_TSO4;
650 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
651 			ifp->if_capabilities |= IFCAP_TSO6;
652 		if (ifp->if_capabilities & IFCAP_TSO)
653 			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
654 
655 		if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
656 			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
657 	}
658 
659 	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
660 		ifp->if_capabilities |= IFCAP_RXCSUM;
661 
662 		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
663 		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
664 			ifp->if_capabilities |= IFCAP_LRO;
665 	}
666 
667 	if (ifp->if_capabilities & IFCAP_HWCSUM) {
668 		/*
669 		 * VirtIO does not support VLAN tagging, but we can fake
670 		 * it by inserting and removing the 802.1Q header during
671 		 * transmit and receive. We are then able to do checksum
672 		 * offloading of VLAN frames.
673 		 */
674 		ifp->if_capabilities |=
675 			IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
676 	}
677 
678 	ifp->if_capenable = ifp->if_capabilities;
679 
680 	/*
681 	 * Capabilities after here are not enabled by default.
682 	 */
683 
684 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
685 		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
686 
687 		sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
688 		    vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
689 		sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
690 		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
691 	}
692 
693 	return (0);
694 }
695 
696 static void
697 vtnet_set_hwaddr(struct vtnet_softc *sc)
698 {
699 	device_t dev;
700 
701 	dev = sc->vtnet_dev;
702 
703 	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
704 	    (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
705 		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
706 			device_printf(dev, "unable to set MAC address\n");
707 	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
708 		virtio_write_device_config(dev,
709 		    offsetof(struct virtio_net_config, mac),
710 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
711 	}
712 }
713 
714 static void
715 vtnet_get_hwaddr(struct vtnet_softc *sc)
716 {
717 	device_t dev;
718 
719 	dev = sc->vtnet_dev;
720 
721 	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
722 		/*
723 		 * Generate a random locally administered unicast address.
724 		 *
725 		 * It would be nice to generate the same MAC address across
726 		 * reboots, but it seems all the hosts currently available
727 		 * support the MAC feature, so this isn't too important.
728 		 */
729 		sc->vtnet_hwaddr[0] = 0xB2;
730 		karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
731 		vtnet_set_hwaddr(sc);
732 		return;
733 	}
734 
735 	virtio_read_device_config(dev,
736 	    offsetof(struct virtio_net_config, mac),
737 	    sc->vtnet_hwaddr, ETHER_ADDR_LEN);
738 }
739 
740 static int
741 vtnet_is_link_up(struct vtnet_softc *sc)
742 {
743 	device_t dev;
744 	struct ifnet *ifp;
745 	uint16_t status;
746 
747 	dev = sc->vtnet_dev;
748 	ifp = sc->vtnet_ifp;
749 
750 	ASSERT_SERIALIZED(&sc->vtnet_slz);
751 
752 	status = virtio_read_dev_config_2(dev,
753 			offsetof(struct virtio_net_config, status));
754 
755 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
756 }
757 
758 static void
759 vtnet_update_link_status(struct vtnet_softc *sc)
760 {
761 	device_t dev;
762 	struct ifnet *ifp;
763 	struct ifaltq_subque *ifsq;
764 	int link;
765 
766 	dev = sc->vtnet_dev;
767 	ifp = sc->vtnet_ifp;
768 	ifsq = ifq_get_subq_default(&ifp->if_snd);
769 
770 	link = vtnet_is_link_up(sc);
771 
772 	if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
773 		sc->vtnet_flags |= VTNET_FLAG_LINK;
774 		if (bootverbose)
775 			device_printf(dev, "Link is up\n");
776 		ifp->if_link_state = LINK_STATE_UP;
777 		if_link_state_change(ifp);
778 		if (!ifsq_is_empty(ifsq))
779 			vtnet_start_locked(ifp, ifsq);
780 	} else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
781 		sc->vtnet_flags &= ~VTNET_FLAG_LINK;
782 		if (bootverbose)
783 			device_printf(dev, "Link is down\n");
784 
785 		ifp->if_link_state = LINK_STATE_DOWN;
786 		if_link_state_change(ifp);
787 	}
788 }
789 
790 #if 0
791 static void
792 vtnet_watchdog(struct vtnet_softc *sc)
793 {
794 	struct ifnet *ifp;
795 
796 	ifp = sc->vtnet_ifp;
797 
798 #ifdef VTNET_TX_INTR_MODERATION
799 	vtnet_txeof(sc);
800 #endif
801 
802 	if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
803 		return;
804 
805 	if_printf(ifp, "watchdog timeout -- resetting\n");
806 #ifdef VTNET_DEBUG
807 	virtqueue_dump(sc->vtnet_tx_vq);
808 #endif
809 	ifp->if_oerrors++;
810 	ifp->if_flags &= ~IFF_RUNNING;
811 	vtnet_init_locked(sc);
812 }
813 #endif
814 
815 static void
816 vtnet_config_change_task(void *arg, int pending)
817 {
818 	struct vtnet_softc *sc;
819 
820 	sc = arg;
821 
822 	lwkt_serialize_enter(&sc->vtnet_slz);
823 	vtnet_update_link_status(sc);
824 	lwkt_serialize_exit(&sc->vtnet_slz);
825 }
826 
827 static int
828 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
829 {
830 	struct vtnet_softc *sc;
831 	struct ifreq *ifr;
832 	int reinit, mask, error;
833 
834 	sc = ifp->if_softc;
835 	ifr = (struct ifreq *) data;
836 	reinit = 0;
837 	error = 0;
838 
839 	switch (cmd) {
840 	case SIOCSIFMTU:
841 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
842 			error = EINVAL;
843 		else if (ifp->if_mtu != ifr->ifr_mtu) {
844 			lwkt_serialize_enter(&sc->vtnet_slz);
845 			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
846 			lwkt_serialize_exit(&sc->vtnet_slz);
847 		}
848 		break;
849 
850 	case SIOCSIFFLAGS:
851 		lwkt_serialize_enter(&sc->vtnet_slz);
852 		if ((ifp->if_flags & IFF_UP) == 0) {
853 			if (ifp->if_flags & IFF_RUNNING)
854 				vtnet_stop(sc);
855 		} else if (ifp->if_flags & IFF_RUNNING) {
856 			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
857 			    (IFF_PROMISC | IFF_ALLMULTI)) {
858 				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
859 					vtnet_rx_filter(sc);
860 				else
861 					error = ENOTSUP;
862 			}
863 		} else
864 			vtnet_init_locked(sc);
865 
866 		if (error == 0)
867 			sc->vtnet_if_flags = ifp->if_flags;
868 		lwkt_serialize_exit(&sc->vtnet_slz);
869 		break;
870 
871 	case SIOCADDMULTI:
872 	case SIOCDELMULTI:
873 		lwkt_serialize_enter(&sc->vtnet_slz);
874 		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
875 		    (ifp->if_flags & IFF_RUNNING))
876 			vtnet_rx_filter_mac(sc);
877 		lwkt_serialize_exit(&sc->vtnet_slz);
878 		break;
879 
880 	case SIOCSIFMEDIA:
881 	case SIOCGIFMEDIA:
882 		error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
883 		break;
884 
885 	case SIOCSIFCAP:
886 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
887 
888 		lwkt_serialize_enter(&sc->vtnet_slz);
889 
890 		if (mask & IFCAP_TXCSUM) {
891 			ifp->if_capenable ^= IFCAP_TXCSUM;
892 			if (ifp->if_capenable & IFCAP_TXCSUM)
893 				ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
894 			else
895 				ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
896 		}
897 
898 		if (mask & IFCAP_TSO4) {
899 			ifp->if_capenable ^= IFCAP_TSO4;
900 			if (ifp->if_capenable & IFCAP_TSO4)
901 				ifp->if_hwassist |= CSUM_TSO;
902 			else
903 				ifp->if_hwassist &= ~CSUM_TSO;
904 		}
905 
906 		if (mask & IFCAP_RXCSUM) {
907 			ifp->if_capenable ^= IFCAP_RXCSUM;
908 			reinit = 1;
909 		}
910 
911 		if (mask & IFCAP_LRO) {
912 			ifp->if_capenable ^= IFCAP_LRO;
913 			reinit = 1;
914 		}
915 
916 		if (mask & IFCAP_VLAN_HWFILTER) {
917 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
918 			reinit = 1;
919 		}
920 
921 		if (mask & IFCAP_VLAN_HWTSO)
922 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
923 
924 		if (mask & IFCAP_VLAN_HWTAGGING)
925 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
926 
927 		if (reinit && (ifp->if_flags & IFF_RUNNING)) {
928 			ifp->if_flags &= ~IFF_RUNNING;
929 			vtnet_init_locked(sc);
930 		}
931 		//VLAN_CAPABILITIES(ifp);
932 
933 		lwkt_serialize_exit(&sc->vtnet_slz);
934 		break;
935 
936 	default:
937 		error = ether_ioctl(ifp, cmd, data);
938 		break;
939 	}
940 
941 	return (error);
942 }
943 
944 static int
945 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
946 {
947 	struct ifnet *ifp;
948 	int new_frame_size, clsize;
949 
950 	ifp = sc->vtnet_ifp;
951 
952 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
953 		new_frame_size = sizeof(struct vtnet_rx_header) +
954 		    sizeof(struct ether_vlan_header) + new_mtu;
955 
956 		if (new_frame_size > MJUM9BYTES)
957 			return (EINVAL);
958 
959 		if (new_frame_size <= MCLBYTES)
960 			clsize = MCLBYTES;
961 		else
962 			clsize = MJUM9BYTES;
963 	} else {
964 		new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
965 		    sizeof(struct ether_vlan_header) + new_mtu;
966 
967 		if (new_frame_size <= MCLBYTES)
968 			clsize = MCLBYTES;
969 		else
970 			clsize = MJUMPAGESIZE;
971 	}
972 
973 	sc->vtnet_rx_mbuf_size = clsize;
974 	sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
975 	KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
976 	    ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
977 
978 	ifp->if_mtu = new_mtu;
979 
980 	if (ifp->if_flags & IFF_RUNNING) {
981 		ifp->if_flags &= ~IFF_RUNNING;
982 		vtnet_init_locked(sc);
983 	}
984 
985 	return (0);
986 }
987 
988 static int
989 vtnet_init_rx_vq(struct vtnet_softc *sc)
990 {
991 	struct virtqueue *vq;
992 	int nbufs, error;
993 
994 	vq = sc->vtnet_rx_vq;
995 	nbufs = 0;
996 	error = ENOSPC;
997 
998 	while (!virtqueue_full(vq)) {
999 		if ((error = vtnet_newbuf(sc)) != 0)
1000 			break;
1001 		nbufs++;
1002 	}
1003 
1004 	if (nbufs > 0) {
1005 		virtqueue_notify(vq, &sc->vtnet_slz);
1006 
1007 		/*
1008 		 * EMSGSIZE signifies the virtqueue did not have enough
1009 		 * entries available to hold the last mbuf. This is not
1010 		 * an error. We should not get ENOSPC since we check if
1011 		 * the virtqueue is full before attempting to add a
1012 		 * buffer.
1013 		 */
1014 		if (error == EMSGSIZE)
1015 			error = 0;
1016 	}
1017 
1018 	return (error);
1019 }
1020 
1021 static void
1022 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1023 {
1024 	struct virtqueue *vq;
1025 	struct mbuf *m;
1026 	int last;
1027 
1028 	vq = sc->vtnet_rx_vq;
1029 	last = 0;
1030 
1031 	while ((m = virtqueue_drain(vq, &last)) != NULL)
1032 		m_freem(m);
1033 
1034 	KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1035 }
1036 
1037 static void
1038 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1039 {
1040 	struct virtqueue *vq;
1041 	struct vtnet_tx_header *txhdr;
1042 	int last;
1043 
1044 	vq = sc->vtnet_tx_vq;
1045 	last = 0;
1046 
1047 	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1048 		m_freem(txhdr->vth_mbuf);
1049 	}
1050 
1051 	KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1052 }
1053 
1054 static void
1055 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1056 {
1057 	/*
1058 	 * The control virtqueue is only polled, therefore
1059 	 * it should already be empty.
1060 	 */
1061 	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1062 		("Ctrl Vq not empty"));
1063 }
1064 
1065 static struct mbuf *
1066 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1067 {
1068 	struct mbuf *m_head, *m_tail, *m;
1069 	int i, clsize;
1070 
1071 	clsize = sc->vtnet_rx_mbuf_size;
1072 
1073 	/*use getcl instead of getjcl. see  if_mxge.c comment line 2398*/
1074 	//m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1075 	m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1076 	if (m_head == NULL)
1077 		goto fail;
1078 
1079 	m_head->m_len = clsize;
1080 	m_tail = m_head;
1081 
1082 	if (nbufs > 1) {
1083 		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1084 			("chained Rx mbuf requested without LRO_NOMRG"));
1085 
1086 		for (i = 0; i < nbufs - 1; i++) {
1087 			//m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1088 			m = m_getcl(M_NOWAIT, MT_DATA, 0);
1089 			if (m == NULL)
1090 				goto fail;
1091 
1092 			m->m_len = clsize;
1093 			m_tail->m_next = m;
1094 			m_tail = m;
1095 		}
1096 	}
1097 
1098 	if (m_tailp != NULL)
1099 		*m_tailp = m_tail;
1100 
1101 	return (m_head);
1102 
1103 fail:
1104 	sc->vtnet_stats.mbuf_alloc_failed++;
1105 	m_freem(m_head);
1106 
1107 	return (NULL);
1108 }
1109 
1110 static int
1111 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1112 {
1113 	struct mbuf *m, *m_prev;
1114 	struct mbuf *m_new, *m_tail;
1115 	int len, clsize, nreplace, error;
1116 
1117 	m = m0;
1118 	m_prev = NULL;
1119 	len = len0;
1120 
1121 	m_tail = NULL;
1122 	clsize = sc->vtnet_rx_mbuf_size;
1123 	nreplace = 0;
1124 
1125 	if (m->m_next != NULL)
1126 		KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1127 		    ("chained Rx mbuf without LRO_NOMRG"));
1128 
1129 	/*
1130 	 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1131 	 * allocating an entire chain for each received frame. When
1132 	 * the received frame's length is less than that of the chain,
1133 	 * the unused mbufs are reassigned to the new chain.
1134 	 */
1135 	while (len > 0) {
1136 		/*
1137 		 * Something is seriously wrong if we received
1138 		 * a frame larger than the mbuf chain. Drop it.
1139 		 */
1140 		if (m == NULL) {
1141 			sc->vtnet_stats.rx_frame_too_large++;
1142 			return (EMSGSIZE);
1143 		}
1144 
1145 		KASSERT(m->m_len == clsize,
1146 		    ("mbuf length not expected cluster size: %d",
1147 		    m->m_len));
1148 
1149 		m->m_len = MIN(m->m_len, len);
1150 		len -= m->m_len;
1151 
1152 		m_prev = m;
1153 		m = m->m_next;
1154 		nreplace++;
1155 	}
1156 
1157 	KASSERT(m_prev != NULL, ("m_prev == NULL"));
1158 	KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1159 		("too many replacement mbufs: %d/%d", nreplace,
1160 		sc->vtnet_rx_mbuf_count));
1161 
1162 	m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1163 	if (m_new == NULL) {
1164 		m_prev->m_len = clsize;
1165 		return (ENOBUFS);
1166 	}
1167 
1168 	/*
1169 	 * Move unused mbufs, if any, from the original chain
1170 	 * onto the end of the new chain.
1171 	 */
1172 	if (m_prev->m_next != NULL) {
1173 		m_tail->m_next = m_prev->m_next;
1174 		m_prev->m_next = NULL;
1175 	}
1176 
1177 	error = vtnet_enqueue_rxbuf(sc, m_new);
1178 	if (error) {
1179 		/*
1180 		 * BAD! We could not enqueue the replacement mbuf chain. We
1181 		 * must restore the m0 chain to the original state if it was
1182 		 * modified so we can subsequently discard it.
1183 		 *
1184 		 * NOTE: The replacement is suppose to be an identical copy
1185 		 * to the one just dequeued so this is an unexpected error.
1186 		 */
1187 		sc->vtnet_stats.rx_enq_replacement_failed++;
1188 
1189 		if (m_tail->m_next != NULL) {
1190 			m_prev->m_next = m_tail->m_next;
1191 			m_tail->m_next = NULL;
1192 		}
1193 
1194 		m_prev->m_len = clsize;
1195 		m_freem(m_new);
1196 	}
1197 
1198 	return (error);
1199 }
1200 
1201 static int
1202 vtnet_newbuf(struct vtnet_softc *sc)
1203 {
1204 	struct mbuf *m;
1205 	int error;
1206 
1207 	m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1208 	if (m == NULL)
1209 		return (ENOBUFS);
1210 
1211 	error = vtnet_enqueue_rxbuf(sc, m);
1212 	if (error)
1213 		m_freem(m);
1214 
1215 	return (error);
1216 }
1217 
1218 static void
1219 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1220 {
1221 	struct virtqueue *vq;
1222 	struct mbuf *m;
1223 
1224 	vq = sc->vtnet_rx_vq;
1225 
1226 	while (--nbufs > 0) {
1227 		if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1228 			break;
1229 		vtnet_discard_rxbuf(sc, m);
1230 	}
1231 }
1232 
1233 static void
1234 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1235 {
1236 	int error;
1237 
1238 	/*
1239 	 * Requeue the discarded mbuf. This should always be
1240 	 * successful since it was just dequeued.
1241 	 */
1242 	error = vtnet_enqueue_rxbuf(sc, m);
1243 	KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1244 }
1245 
1246 static int
1247 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1248 {
1249 	struct sglist sg;
1250 	struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1251 	struct vtnet_rx_header *rxhdr;
1252 	struct virtio_net_hdr *hdr;
1253 	uint8_t *mdata;
1254 	int offset, error;
1255 
1256 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1257 	if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1258 		KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1259 
1260 	sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1261 
1262 	mdata = mtod(m, uint8_t *);
1263 	offset = 0;
1264 
1265 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1266 		rxhdr = (struct vtnet_rx_header *) mdata;
1267 		hdr = &rxhdr->vrh_hdr;
1268 		offset += sizeof(struct vtnet_rx_header);
1269 
1270 		error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1271 		KASSERT(error == 0, ("cannot add header to sglist"));
1272 	}
1273 
1274 	error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1275 	if (error)
1276 		return (error);
1277 
1278 	if (m->m_next != NULL) {
1279 		error = sglist_append_mbuf(&sg, m->m_next);
1280 		if (error)
1281 			return (error);
1282 	}
1283 
1284 	return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1285 }
1286 
1287 static void
1288 vtnet_vlan_tag_remove(struct mbuf *m)
1289 {
1290 	struct ether_vlan_header *evl;
1291 
1292 	evl = mtod(m, struct ether_vlan_header *);
1293 
1294 	m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1295 	m->m_flags |= M_VLANTAG;
1296 
1297 	/* Strip the 802.1Q header. */
1298 	bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1299 	    ETHER_HDR_LEN - ETHER_TYPE_LEN);
1300 	m_adj(m, ETHER_VLAN_ENCAP_LEN);
1301 }
1302 
1303 /*
1304  * Alternative method of doing receive checksum offloading. Rather
1305  * than parsing the received frame down to the IP header, use the
1306  * csum_offset to determine which CSUM_* flags are appropriate. We
1307  * can get by with doing this only because the checksum offsets are
1308  * unique for the things we care about.
1309  */
1310 static int
1311 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1312     struct virtio_net_hdr *hdr)
1313 {
1314 	struct ether_header *eh;
1315 	struct ether_vlan_header *evh;
1316 	struct udphdr *udp;
1317 	int csum_len;
1318 	uint16_t eth_type;
1319 
1320 	csum_len = hdr->csum_start + hdr->csum_offset;
1321 
1322 	if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1323 		return (1);
1324 	if (m->m_len < csum_len)
1325 		return (1);
1326 
1327 	eh = mtod(m, struct ether_header *);
1328 	eth_type = ntohs(eh->ether_type);
1329 	if (eth_type == ETHERTYPE_VLAN) {
1330 		evh = mtod(m, struct ether_vlan_header *);
1331 		eth_type = ntohs(evh->evl_proto);
1332 	}
1333 
1334 	if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1335 		sc->vtnet_stats.rx_csum_bad_ethtype++;
1336 		return (1);
1337 	}
1338 
1339 	/* Use the offset to determine the appropriate CSUM_* flags. */
1340 	switch (hdr->csum_offset) {
1341 	case offsetof(struct udphdr, uh_sum):
1342 		if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1343 			return (1);
1344 		udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1345 		if (udp->uh_sum == 0)
1346 			return (0);
1347 
1348 		/* FALLTHROUGH */
1349 
1350 	case offsetof(struct tcphdr, th_sum):
1351 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1352 		m->m_pkthdr.csum_data = 0xFFFF;
1353 		break;
1354 
1355 	default:
1356 		sc->vtnet_stats.rx_csum_bad_offset++;
1357 		return (1);
1358 	}
1359 
1360 	sc->vtnet_stats.rx_csum_offloaded++;
1361 
1362 	return (0);
1363 }
1364 
1365 static int
1366 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1367 {
1368 	struct ifnet *ifp;
1369 	struct virtqueue *vq;
1370 	struct mbuf *m, *m_tail;
1371 	int len;
1372 
1373 	ifp = sc->vtnet_ifp;
1374 	vq = sc->vtnet_rx_vq;
1375 	m_tail = m_head;
1376 
1377 	while (--nbufs > 0) {
1378 		m = virtqueue_dequeue(vq, &len);
1379 		if (m == NULL) {
1380 			ifp->if_ierrors++;
1381 			goto fail;
1382 		}
1383 
1384 		if (vtnet_newbuf(sc) != 0) {
1385 			ifp->if_iqdrops++;
1386 			vtnet_discard_rxbuf(sc, m);
1387 			if (nbufs > 1)
1388 				vtnet_discard_merged_rxbuf(sc, nbufs);
1389 			goto fail;
1390 		}
1391 
1392 		if (m->m_len < len)
1393 			len = m->m_len;
1394 
1395 		m->m_len = len;
1396 		m->m_flags &= ~M_PKTHDR;
1397 
1398 		m_head->m_pkthdr.len += len;
1399 		m_tail->m_next = m;
1400 		m_tail = m;
1401 	}
1402 
1403 	return (0);
1404 
1405 fail:
1406 	sc->vtnet_stats.rx_mergeable_failed++;
1407 	m_freem(m_head);
1408 
1409 	return (1);
1410 }
1411 
1412 static int
1413 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1414 {
1415 	struct virtio_net_hdr lhdr;
1416 	struct ifnet *ifp;
1417 	struct virtqueue *vq;
1418 	struct mbuf *m;
1419 	struct ether_header *eh;
1420 	struct virtio_net_hdr *hdr;
1421 	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1422 	int len, deq, nbufs, adjsz, rx_npkts;
1423 
1424 	ifp = sc->vtnet_ifp;
1425 	vq = sc->vtnet_rx_vq;
1426 	hdr = &lhdr;
1427 	deq = 0;
1428 	rx_npkts = 0;
1429 
1430 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1431 
1432 	while (--count >= 0) {
1433 		m = virtqueue_dequeue(vq, &len);
1434 		if (m == NULL)
1435 			break;
1436 		deq++;
1437 
1438 		if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1439 			ifp->if_ierrors++;
1440 			vtnet_discard_rxbuf(sc, m);
1441 			continue;
1442 		}
1443 
1444 		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1445 			nbufs = 1;
1446 			adjsz = sizeof(struct vtnet_rx_header);
1447 			/*
1448 			 * Account for our pad between the header and
1449 			 * the actual start of the frame.
1450 			 */
1451 			len += VTNET_RX_HEADER_PAD;
1452 		} else {
1453 			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1454 			nbufs = mhdr->num_buffers;
1455 			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1456 		}
1457 
1458 		if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1459 			ifp->if_iqdrops++;
1460 			vtnet_discard_rxbuf(sc, m);
1461 			if (nbufs > 1)
1462 				vtnet_discard_merged_rxbuf(sc, nbufs);
1463 			continue;
1464 		}
1465 
1466 		m->m_pkthdr.len = len;
1467 		m->m_pkthdr.rcvif = ifp;
1468 		m->m_pkthdr.csum_flags = 0;
1469 
1470 		if (nbufs > 1) {
1471 			if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1472 				continue;
1473 		}
1474 
1475 		ifp->if_ipackets++;
1476 
1477 		/*
1478 		 * Save copy of header before we strip it. For both mergeable
1479 		 * and non-mergeable, the VirtIO header is placed first in the
1480 		 * mbuf's data. We no longer need num_buffers, so always use a
1481 		 * virtio_net_hdr.
1482 		 */
1483 		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1484 		m_adj(m, adjsz);
1485 
1486 		if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1487 			eh = mtod(m, struct ether_header *);
1488 			if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1489 				vtnet_vlan_tag_remove(m);
1490 
1491 				/*
1492 				 * With the 802.1Q header removed, update the
1493 				 * checksum starting location accordingly.
1494 				 */
1495 				if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1496 					hdr->csum_start -=
1497 					    ETHER_VLAN_ENCAP_LEN;
1498 			}
1499 		}
1500 
1501 		if (ifp->if_capenable & IFCAP_RXCSUM &&
1502 		    hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1503 			if (vtnet_rx_csum(sc, m, hdr) != 0)
1504 				sc->vtnet_stats.rx_csum_failed++;
1505 		}
1506 
1507 		lwkt_serialize_exit(&sc->vtnet_slz);
1508 		rx_npkts++;
1509 		ifp->if_input(ifp, m, NULL, -1);
1510 		lwkt_serialize_enter(&sc->vtnet_slz);
1511 
1512 		/*
1513 		 * The interface may have been stopped while we were
1514 		 * passing the packet up the network stack.
1515 		 */
1516 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1517 			break;
1518 	}
1519 
1520 	virtqueue_notify(vq, &sc->vtnet_slz);
1521 
1522 	if (rx_npktsp != NULL)
1523 		*rx_npktsp = rx_npkts;
1524 
1525 	return (count > 0 ? 0 : EAGAIN);
1526 }
1527 
1528 static void
1529 vtnet_rx_intr_task(void *arg)
1530 {
1531 	struct vtnet_softc *sc;
1532 	struct ifnet *ifp;
1533 	int more;
1534 
1535 	sc = arg;
1536 	ifp = sc->vtnet_ifp;
1537 
1538 next:
1539 //	lwkt_serialize_enter(&sc->vtnet_slz);
1540 
1541 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1542 		vtnet_enable_rx_intr(sc);
1543 //		lwkt_serialize_exit(&sc->vtnet_slz);
1544 		return;
1545 	}
1546 
1547 	more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1548 	if (!more && vtnet_enable_rx_intr(sc) != 0) {
1549 		vtnet_disable_rx_intr(sc);
1550 		more = 1;
1551 	}
1552 
1553 //	lwkt_serialize_exit(&sc->vtnet_slz);
1554 
1555 	if (more) {
1556 		sc->vtnet_stats.rx_task_rescheduled++;
1557 		goto next;
1558 	}
1559 }
1560 
1561 static int
1562 vtnet_rx_vq_intr(void *xsc)
1563 {
1564 	struct vtnet_softc *sc;
1565 
1566 	sc = xsc;
1567 
1568 	vtnet_disable_rx_intr(sc);
1569 	vtnet_rx_intr_task(sc);
1570 
1571 	return (1);
1572 }
1573 
1574 static void
1575 vtnet_txeof(struct vtnet_softc *sc)
1576 {
1577 	struct virtqueue *vq;
1578 	struct ifnet *ifp;
1579 	struct vtnet_tx_header *txhdr;
1580 	int deq;
1581 
1582 	vq = sc->vtnet_tx_vq;
1583 	ifp = sc->vtnet_ifp;
1584 	deq = 0;
1585 
1586 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1587 
1588 	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1589 		deq++;
1590 		ifp->if_opackets++;
1591 		m_freem(txhdr->vth_mbuf);
1592 	}
1593 
1594 	if (deq > 0) {
1595 		ifq_clr_oactive(&ifp->if_snd);
1596 		if (virtqueue_empty(vq))
1597 			sc->vtnet_watchdog_timer = 0;
1598 	}
1599 }
1600 
1601 static struct mbuf *
1602 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1603     struct virtio_net_hdr *hdr)
1604 {
1605 	struct ifnet *ifp;
1606 	struct ether_header *eh;
1607 	struct ether_vlan_header *evh;
1608 	struct ip *ip;
1609 	struct ip6_hdr *ip6;
1610 	struct tcphdr *tcp;
1611 	int ip_offset;
1612 	uint16_t eth_type, csum_start;
1613 	uint8_t ip_proto, gso_type;
1614 
1615 	ifp = sc->vtnet_ifp;
1616 	M_ASSERTPKTHDR(m);
1617 
1618 	ip_offset = sizeof(struct ether_header);
1619 	if (m->m_len < ip_offset) {
1620 		if ((m = m_pullup(m, ip_offset)) == NULL)
1621 			return (NULL);
1622 	}
1623 
1624 	eh = mtod(m, struct ether_header *);
1625 	eth_type = ntohs(eh->ether_type);
1626 	if (eth_type == ETHERTYPE_VLAN) {
1627 		ip_offset = sizeof(struct ether_vlan_header);
1628 		if (m->m_len < ip_offset) {
1629 			if ((m = m_pullup(m, ip_offset)) == NULL)
1630 				return (NULL);
1631 		}
1632 		evh = mtod(m, struct ether_vlan_header *);
1633 		eth_type = ntohs(evh->evl_proto);
1634 	}
1635 
1636 	switch (eth_type) {
1637 	case ETHERTYPE_IP:
1638 		if (m->m_len < ip_offset + sizeof(struct ip)) {
1639 			m = m_pullup(m, ip_offset + sizeof(struct ip));
1640 			if (m == NULL)
1641 				return (NULL);
1642 		}
1643 
1644 		ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1645 		ip_proto = ip->ip_p;
1646 		csum_start = ip_offset + (ip->ip_hl << 2);
1647 		gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1648 		break;
1649 
1650 	case ETHERTYPE_IPV6:
1651 		if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1652 			m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1653 			if (m == NULL)
1654 				return (NULL);
1655 		}
1656 
1657 		ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1658 		/*
1659 		 * XXX Assume no extension headers are present. Presently,
1660 		 * this will always be true in the case of TSO, and FreeBSD
1661 		 * does not perform checksum offloading of IPv6 yet.
1662 		 */
1663 		ip_proto = ip6->ip6_nxt;
1664 		csum_start = ip_offset + sizeof(struct ip6_hdr);
1665 		gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1666 		break;
1667 
1668 	default:
1669 		return (m);
1670 	}
1671 
1672 	if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1673 		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1674 		hdr->csum_start = csum_start;
1675 		hdr->csum_offset = m->m_pkthdr.csum_data;
1676 
1677 		sc->vtnet_stats.tx_csum_offloaded++;
1678 	}
1679 
1680 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1681 		if (ip_proto != IPPROTO_TCP)
1682 			return (m);
1683 
1684 		if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1685 			m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1686 			if (m == NULL)
1687 				return (NULL);
1688 		}
1689 
1690 		tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1691 		hdr->gso_type = gso_type;
1692 		hdr->hdr_len = csum_start + (tcp->th_off << 2);
1693 		hdr->gso_size = m->m_pkthdr.tso_segsz;
1694 
1695 		if (tcp->th_flags & TH_CWR) {
1696 			/*
1697 			 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1698 			 * ECN support is only configurable globally with the
1699 			 * net.inet.tcp.ecn.enable sysctl knob.
1700 			 */
1701 			if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1702 				if_printf(ifp, "TSO with ECN not supported "
1703 				    "by host\n");
1704 				m_freem(m);
1705 				return (NULL);
1706 			}
1707 
1708 			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1709 		}
1710 
1711 		sc->vtnet_stats.tx_tso_offloaded++;
1712 	}
1713 
1714 	return (m);
1715 }
1716 
1717 static int
1718 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1719     struct vtnet_tx_header *txhdr)
1720 {
1721 	struct sglist sg;
1722 	struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1723 	struct virtqueue *vq;
1724 	struct mbuf *m;
1725 	int error;
1726 
1727 	vq = sc->vtnet_tx_vq;
1728 	m = *m_head;
1729 
1730 	sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
1731 	error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1732 	KASSERT(error == 0 && sg.sg_nseg == 1,
1733 	    ("%s: error %d adding header to sglist", __func__, error));
1734 
1735 	error = sglist_append_mbuf(&sg, m);
1736 	if (error) {
1737 		m = m_defrag(m, M_NOWAIT);
1738 		if (m == NULL)
1739 			goto fail;
1740 
1741 		*m_head = m;
1742 		sc->vtnet_stats.tx_defragged++;
1743 
1744 		error = sglist_append_mbuf(&sg, m);
1745 		if (error)
1746 			goto fail;
1747 	}
1748 
1749 	txhdr->vth_mbuf = m;
1750 	error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
1751 
1752 	return (error);
1753 
1754 fail:
1755 	sc->vtnet_stats.tx_defrag_failed++;
1756 	m_freem(*m_head);
1757 	*m_head = NULL;
1758 
1759 	return (ENOBUFS);
1760 }
1761 
1762 static struct mbuf *
1763 vtnet_vlan_tag_insert(struct mbuf *m)
1764 {
1765 	struct mbuf *n;
1766 	struct ether_vlan_header *evl;
1767 
1768 	if (M_WRITABLE(m) == 0) {
1769 		n = m_dup(m, M_NOWAIT);
1770 		m_freem(m);
1771 		if ((m = n) == NULL)
1772 			return (NULL);
1773 	}
1774 
1775 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1776 	if (m == NULL)
1777 		return (NULL);
1778 	if (m->m_len < sizeof(struct ether_vlan_header)) {
1779 		m = m_pullup(m, sizeof(struct ether_vlan_header));
1780 		if (m == NULL)
1781 			return (NULL);
1782 	}
1783 
1784 	/* Insert 802.1Q header into the existing Ethernet header. */
1785 	evl = mtod(m, struct ether_vlan_header *);
1786 	bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1787 	      (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1788 	evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1789 	evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1790 	m->m_flags &= ~M_VLANTAG;
1791 
1792 	return (m);
1793 }
1794 
1795 static int
1796 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1797 {
1798 	struct vtnet_tx_header *txhdr;
1799 	struct virtio_net_hdr *hdr;
1800 	struct mbuf *m;
1801 	int error;
1802 
1803 	txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx];
1804 	memset(txhdr, 0, sizeof(struct vtnet_tx_header));
1805 
1806 	/*
1807 	 * Always use the non-mergeable header to simplify things. When
1808 	 * the mergeable feature is negotiated, the num_buffers field
1809 	 * must be set to zero. We use vtnet_hdr_size later to enqueue
1810 	 * the correct header size to the host.
1811 	 */
1812 	hdr = &txhdr->vth_uhdr.hdr;
1813 	m = *m_head;
1814 
1815 	error = ENOBUFS;
1816 
1817 	if (m->m_flags & M_VLANTAG) {
1818 		//m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1819 		m = vtnet_vlan_tag_insert(m);
1820 		if ((*m_head = m) == NULL)
1821 			goto fail;
1822 		m->m_flags &= ~M_VLANTAG;
1823 	}
1824 
1825 	if (m->m_pkthdr.csum_flags != 0) {
1826 		m = vtnet_tx_offload(sc, m, hdr);
1827 		if ((*m_head = m) == NULL)
1828 			goto fail;
1829 	}
1830 
1831 	error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1832 	if (error == 0)
1833 		sc->vtnet_txhdridx =
1834 		    (sc->vtnet_txhdridx + 1) % sc->vtnet_txhdrcount;
1835 fail:
1836 	return (error);
1837 }
1838 
1839 static void
1840 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1841 {
1842 	struct vtnet_softc *sc;
1843 
1844 	sc = ifp->if_softc;
1845 
1846 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1847 	lwkt_serialize_enter(&sc->vtnet_slz);
1848 	vtnet_start_locked(ifp, ifsq);
1849 	lwkt_serialize_exit(&sc->vtnet_slz);
1850 }
1851 
1852 static void
1853 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1854 {
1855 	struct vtnet_softc *sc;
1856 	struct virtqueue *vq;
1857 	struct mbuf *m0;
1858 	int enq;
1859 
1860 	sc = ifp->if_softc;
1861 	vq = sc->vtnet_tx_vq;
1862 	enq = 0;
1863 
1864 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1865 
1866 	if ((ifp->if_flags & (IFF_RUNNING)) !=
1867 	    IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
1868 		return;
1869 
1870 #ifdef VTNET_TX_INTR_MODERATION
1871 	if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
1872 		vtnet_txeof(sc);
1873 #endif
1874 
1875 	while (!ifsq_is_empty(ifsq)) {
1876 		if (virtqueue_full(vq)) {
1877 			ifq_set_oactive(&ifp->if_snd);
1878 			break;
1879 		}
1880 
1881 		m0 = ifq_dequeue(&ifp->if_snd);
1882 		if (m0 == NULL)
1883 			break;
1884 
1885 		if (vtnet_encap(sc, &m0) != 0) {
1886 			if (m0 == NULL)
1887 				break;
1888 			ifq_prepend(&ifp->if_snd, m0);
1889 			ifq_set_oactive(&ifp->if_snd);
1890 			break;
1891 		}
1892 
1893 		enq++;
1894 		ETHER_BPF_MTAP(ifp, m0);
1895 	}
1896 
1897 	if (enq > 0) {
1898 		virtqueue_notify(vq, &sc->vtnet_slz);
1899 		sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
1900 	}
1901 }
1902 
1903 static void
1904 vtnet_tick(void *xsc)
1905 {
1906 	struct vtnet_softc *sc;
1907 
1908 	sc = xsc;
1909 
1910 #if 0
1911 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1912 #ifdef VTNET_DEBUG
1913 	virtqueue_dump(sc->vtnet_rx_vq);
1914 	virtqueue_dump(sc->vtnet_tx_vq);
1915 #endif
1916 
1917 	vtnet_watchdog(sc);
1918 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
1919 #endif
1920 }
1921 
1922 static void
1923 vtnet_tx_intr_task(void *arg)
1924 {
1925 	struct vtnet_softc *sc;
1926 	struct ifnet *ifp;
1927 	struct ifaltq_subque *ifsq;
1928 
1929 	sc = arg;
1930 	ifp = sc->vtnet_ifp;
1931 	ifsq = ifq_get_subq_default(&ifp->if_snd);
1932 
1933 next:
1934 //	lwkt_serialize_enter(&sc->vtnet_slz);
1935 
1936 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1937 		vtnet_enable_tx_intr(sc);
1938 //		lwkt_serialize_exit(&sc->vtnet_slz);
1939 		return;
1940 	}
1941 
1942 	vtnet_txeof(sc);
1943 
1944 	if (!ifsq_is_empty(ifsq))
1945 		vtnet_start_locked(ifp, ifsq);
1946 
1947 	if (vtnet_enable_tx_intr(sc) != 0) {
1948 		vtnet_disable_tx_intr(sc);
1949 		sc->vtnet_stats.tx_task_rescheduled++;
1950 //		lwkt_serialize_exit(&sc->vtnet_slz);
1951 		goto next;
1952 	}
1953 
1954 //	lwkt_serialize_exit(&sc->vtnet_slz);
1955 }
1956 
1957 static int
1958 vtnet_tx_vq_intr(void *xsc)
1959 {
1960 	struct vtnet_softc *sc;
1961 
1962 	sc = xsc;
1963 
1964 	vtnet_disable_tx_intr(sc);
1965 	vtnet_tx_intr_task(sc);
1966 
1967 	return (1);
1968 }
1969 
1970 static void
1971 vtnet_stop(struct vtnet_softc *sc)
1972 {
1973 	device_t dev;
1974 	struct ifnet *ifp;
1975 
1976 	dev = sc->vtnet_dev;
1977 	ifp = sc->vtnet_ifp;
1978 
1979 	ASSERT_SERIALIZED(&sc->vtnet_slz);
1980 
1981 	sc->vtnet_watchdog_timer = 0;
1982 	callout_stop(&sc->vtnet_tick_ch);
1983 	ifq_clr_oactive(&ifp->if_snd);
1984 	ifp->if_flags &= ~(IFF_RUNNING);
1985 
1986 	vtnet_disable_rx_intr(sc);
1987 	vtnet_disable_tx_intr(sc);
1988 
1989 	/*
1990 	 * Stop the host VirtIO adapter. Note this will reset the host
1991 	 * adapter's state back to the pre-initialized state, so in
1992 	 * order to make the device usable again, we must drive it
1993 	 * through virtio_reinit() and virtio_reinit_complete().
1994 	 */
1995 	virtio_stop(dev);
1996 
1997 	sc->vtnet_flags &= ~VTNET_FLAG_LINK;
1998 
1999 	vtnet_free_rx_mbufs(sc);
2000 	vtnet_free_tx_mbufs(sc);
2001 }
2002 
2003 static int
2004 vtnet_virtio_reinit(struct vtnet_softc *sc)
2005 {
2006 	device_t dev;
2007 	struct ifnet *ifp;
2008 	uint64_t features;
2009 	int error;
2010 
2011 	dev = sc->vtnet_dev;
2012 	ifp = sc->vtnet_ifp;
2013 	features = sc->vtnet_features;
2014 
2015 	/*
2016 	 * Re-negotiate with the host, removing any disabled receive
2017 	 * features. Transmit features are disabled only on our side
2018 	 * via if_capenable and if_hwassist.
2019 	 */
2020 
2021 	if (ifp->if_capabilities & IFCAP_RXCSUM) {
2022 		if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2023 			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2024 	}
2025 
2026 	if (ifp->if_capabilities & IFCAP_LRO) {
2027 		if ((ifp->if_capenable & IFCAP_LRO) == 0)
2028 			features &= ~VTNET_LRO_FEATURES;
2029 	}
2030 
2031 	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2032 		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2033 			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2034 	}
2035 
2036 	error = virtio_reinit(dev, features);
2037 	if (error)
2038 		device_printf(dev, "virtio reinit error %d\n", error);
2039 
2040 	return (error);
2041 }
2042 
2043 static void
2044 vtnet_init_locked(struct vtnet_softc *sc)
2045 {
2046 	device_t dev;
2047 	struct ifnet *ifp;
2048 	int error;
2049 
2050 	dev = sc->vtnet_dev;
2051 	ifp = sc->vtnet_ifp;
2052 
2053 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2054 
2055 	if (ifp->if_flags & IFF_RUNNING)
2056 		return;
2057 
2058 	/* Stop host's adapter, cancel any pending I/O. */
2059 	vtnet_stop(sc);
2060 
2061 	/* Reinitialize the host device. */
2062 	error = vtnet_virtio_reinit(sc);
2063 	if (error) {
2064 		device_printf(dev,
2065 		    "reinitialization failed, stopping device...\n");
2066 		vtnet_stop(sc);
2067 		return;
2068 	}
2069 
2070 	/* Update host with assigned MAC address. */
2071 	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2072 	vtnet_set_hwaddr(sc);
2073 
2074 	ifp->if_hwassist = 0;
2075 	if (ifp->if_capenable & IFCAP_TXCSUM)
2076 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2077 	if (ifp->if_capenable & IFCAP_TSO4)
2078 		ifp->if_hwassist |= CSUM_TSO;
2079 
2080 	error = vtnet_init_rx_vq(sc);
2081 	if (error) {
2082 		device_printf(dev,
2083 		    "cannot allocate mbufs for Rx virtqueue\n");
2084 		vtnet_stop(sc);
2085 		return;
2086 	}
2087 
2088 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2089 		if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2090 			/* Restore promiscuous and all-multicast modes. */
2091 			vtnet_rx_filter(sc);
2092 
2093 			/* Restore filtered MAC addresses. */
2094 			vtnet_rx_filter_mac(sc);
2095 		}
2096 
2097 		/* Restore VLAN filters. */
2098 		if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2099 			vtnet_rx_filter_vlan(sc);
2100 	}
2101 
2102 	{
2103 		vtnet_enable_rx_intr(sc);
2104 		vtnet_enable_tx_intr(sc);
2105 	}
2106 
2107 	ifp->if_flags |= IFF_RUNNING;
2108 	ifq_clr_oactive(&ifp->if_snd);
2109 
2110 	virtio_reinit_complete(dev);
2111 
2112 	vtnet_update_link_status(sc);
2113 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2114 }
2115 
2116 static void
2117 vtnet_init(void *xsc)
2118 {
2119 	struct vtnet_softc *sc;
2120 
2121 	sc = xsc;
2122 
2123 	lwkt_serialize_enter(&sc->vtnet_slz);
2124 	vtnet_init_locked(sc);
2125 	lwkt_serialize_exit(&sc->vtnet_slz);
2126 }
2127 
2128 static void
2129 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2130     struct sglist *sg, int readable, int writable)
2131 {
2132 	struct virtqueue *vq;
2133 	void *c;
2134 
2135 	vq = sc->vtnet_ctrl_vq;
2136 
2137 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2138 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2139 	    ("no control virtqueue"));
2140 	KASSERT(virtqueue_empty(vq),
2141 	    ("control command already enqueued"));
2142 
2143 	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2144 		return;
2145 
2146 	virtqueue_notify(vq, &sc->vtnet_slz);
2147 
2148 	/*
2149 	 * Poll until the command is complete. Previously, we would
2150 	 * sleep until the control virtqueue interrupt handler woke
2151 	 * us up, but dropping the VTNET_MTX leads to serialization
2152 	 * difficulties.
2153 	 *
2154 	 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2155 	 * vectors. Two of those vectors are needed for the Rx and Tx
2156 	 * virtqueues. We do not support sharing both a Vq and config
2157 	 * changed notification on the same MSIX vector.
2158 	 */
2159 	c = virtqueue_poll(vq, NULL);
2160 	KASSERT(c == cookie, ("unexpected control command response"));
2161 }
2162 
2163 static int
2164 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2165 {
2166 	struct {
2167 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2168 		uint8_t pad1;
2169 		char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2170 		uint8_t pad2;
2171 		uint8_t ack;
2172 	} s;
2173 	struct sglist_seg segs[3];
2174 	struct sglist sg;
2175 	int error;
2176 
2177 	s.hdr.class = VIRTIO_NET_CTRL_MAC;
2178 	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2179 	s.ack = VIRTIO_NET_ERR;
2180 
2181 	/* Copy the mac address into physically contiguous memory */
2182 	memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2183 
2184 	sglist_init(&sg, 3, segs);
2185 	error = 0;
2186 	error |= sglist_append(&sg, &s.hdr,
2187 	    sizeof(struct virtio_net_ctrl_hdr));
2188 	error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2189 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2190 	KASSERT(error == 0 && sg.sg_nseg == 3,
2191 	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
2192 
2193 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2194 
2195 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2196 }
2197 
2198 static void
2199 vtnet_rx_filter(struct vtnet_softc *sc)
2200 {
2201 	device_t dev;
2202 	struct ifnet *ifp;
2203 
2204 	dev = sc->vtnet_dev;
2205 	ifp = sc->vtnet_ifp;
2206 
2207 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2208 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2209 	    ("CTRL_RX feature not negotiated"));
2210 
2211 	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2212 		device_printf(dev, "cannot %s promiscuous mode\n",
2213 		    (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2214 
2215 	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2216 		device_printf(dev, "cannot %s all-multicast mode\n",
2217 		    (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2218 }
2219 
2220 static int
2221 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2222 {
2223 	struct sglist_seg segs[3];
2224 	struct sglist sg;
2225 	struct {
2226 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2227 		uint8_t pad1;
2228 		uint8_t onoff;
2229 		uint8_t pad2;
2230 		uint8_t ack;
2231 	} s;
2232 	int error;
2233 
2234 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2235 	    ("%s: CTRL_RX feature not negotiated", __func__));
2236 
2237 	s.hdr.class = VIRTIO_NET_CTRL_RX;
2238 	s.hdr.cmd = cmd;
2239 	s.onoff = !!on;
2240 	s.ack = VIRTIO_NET_ERR;
2241 
2242 	sglist_init(&sg, 3, segs);
2243 	error = 0;
2244 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2245 	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2246 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2247 	KASSERT(error == 0 && sg.sg_nseg == 3,
2248 	    ("%s: error %d adding Rx message to sglist", __func__, error));
2249 
2250 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2251 
2252 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2253 }
2254 
2255 static int
2256 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2257 {
2258 
2259 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2260 }
2261 
2262 static int
2263 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2264 {
2265 
2266 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2267 }
2268 
2269 static void
2270 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2271 {
2272 	struct virtio_net_ctrl_hdr hdr __aligned(2);
2273 	struct vtnet_mac_filter *filter;
2274 	struct sglist_seg segs[4];
2275 	struct sglist sg;
2276 	struct ifnet *ifp;
2277 	struct ifaddr *ifa;
2278         struct ifaddr_container *ifac;
2279 	struct ifmultiaddr *ifma;
2280 	int ucnt, mcnt, promisc, allmulti, error;
2281 	uint8_t ack;
2282 
2283 	ifp = sc->vtnet_ifp;
2284 	ucnt = 0;
2285 	mcnt = 0;
2286 	promisc = 0;
2287 	allmulti = 0;
2288 
2289 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2290 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2291 	    ("%s: CTRL_RX feature not negotiated", __func__));
2292 
2293 	/* Use the MAC filtering table allocated in vtnet_attach. */
2294 	filter = sc->vtnet_macfilter;
2295 	memset(filter, 0, sizeof(struct vtnet_mac_filter));
2296 
2297 	/* Unicast MAC addresses: */
2298 	//if_addr_rlock(ifp);
2299 	TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2300 		ifa = ifac->ifa;
2301 		if (ifa->ifa_addr->sa_family != AF_LINK)
2302 			continue;
2303 		else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2304 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2305 			continue;
2306 		else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2307 			promisc = 1;
2308 			break;
2309 		}
2310 
2311 		bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2312 		    &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2313 		ucnt++;
2314 	}
2315 	//if_addr_runlock(ifp);
2316 
2317 	if (promisc != 0) {
2318 		filter->vmf_unicast.nentries = 0;
2319 		if_printf(ifp, "more than %d MAC addresses assigned, "
2320 		    "falling back to promiscuous mode\n",
2321 		    VTNET_MAX_MAC_ENTRIES);
2322 	} else
2323 		filter->vmf_unicast.nentries = ucnt;
2324 
2325 	/* Multicast MAC addresses: */
2326 	//if_maddr_rlock(ifp);
2327 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2328 		if (ifma->ifma_addr->sa_family != AF_LINK)
2329 			continue;
2330 		else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2331 			allmulti = 1;
2332 			break;
2333 		}
2334 
2335 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2336 		    &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2337 		mcnt++;
2338 	}
2339 	//if_maddr_runlock(ifp);
2340 
2341 	if (allmulti != 0) {
2342 		filter->vmf_multicast.nentries = 0;
2343 		if_printf(ifp, "more than %d multicast MAC addresses "
2344 		    "assigned, falling back to all-multicast mode\n",
2345 		    VTNET_MAX_MAC_ENTRIES);
2346 	} else
2347 		filter->vmf_multicast.nentries = mcnt;
2348 
2349 	if (promisc != 0 && allmulti != 0)
2350 		goto out;
2351 
2352 	hdr.class = VIRTIO_NET_CTRL_MAC;
2353 	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2354 	ack = VIRTIO_NET_ERR;
2355 
2356 	sglist_init(&sg, 4, segs);
2357 	error = 0;
2358 	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2359 	error |= sglist_append(&sg, &filter->vmf_unicast,
2360 	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2361 	error |= sglist_append(&sg, &filter->vmf_multicast,
2362 	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2363 	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2364 	KASSERT(error == 0 && sg.sg_nseg == 4,
2365 	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2366 
2367 	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2368 
2369 	if (ack != VIRTIO_NET_OK)
2370 		if_printf(ifp, "error setting host MAC filter table\n");
2371 
2372 out:
2373 	if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2374 		if_printf(ifp, "cannot enable promiscuous mode\n");
2375 	if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2376 		if_printf(ifp, "cannot enable all-multicast mode\n");
2377 }
2378 
2379 static int
2380 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2381 {
2382 	struct sglist_seg segs[3];
2383 	struct sglist sg;
2384 	struct {
2385 		struct virtio_net_ctrl_hdr hdr __aligned(2);
2386 		uint8_t pad1;
2387 		uint16_t tag;
2388 		uint8_t pad2;
2389 		uint8_t ack;
2390 	} s;
2391 	int error;
2392 
2393 	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2394 	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2395 	s.tag = tag;
2396 	s.ack = VIRTIO_NET_ERR;
2397 
2398 	sglist_init(&sg, 3, segs);
2399 	error = 0;
2400 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2401 	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2402 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2403 	KASSERT(error == 0 && sg.sg_nseg == 3,
2404 	    ("%s: error %d adding VLAN message to sglist", __func__, error));
2405 
2406 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2407 
2408 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2409 }
2410 
2411 static void
2412 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2413 {
2414 	uint32_t w;
2415 	uint16_t tag;
2416 	int i, bit, nvlans;
2417 
2418 	ASSERT_SERIALIZED(&sc->vtnet_slz);
2419 	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2420 	    ("%s: VLAN_FILTER feature not negotiated", __func__));
2421 
2422 	nvlans = sc->vtnet_nvlans;
2423 
2424 	/* Enable the filter for each configured VLAN. */
2425 	for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2426 		w = sc->vtnet_vlan_shadow[i];
2427 		while ((bit = ffs(w) - 1) != -1) {
2428 			w &= ~(1 << bit);
2429 			tag = sizeof(w) * CHAR_BIT * i + bit;
2430 			nvlans--;
2431 
2432 			if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2433 				device_printf(sc->vtnet_dev,
2434 				    "cannot enable VLAN %d filter\n", tag);
2435 			}
2436 		}
2437 	}
2438 
2439 	KASSERT(nvlans == 0, ("VLAN count incorrect"));
2440 }
2441 
2442 static void
2443 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2444 {
2445 	struct ifnet *ifp;
2446 	int idx, bit;
2447 
2448 	ifp = sc->vtnet_ifp;
2449 	idx = (tag >> 5) & 0x7F;
2450 	bit = tag & 0x1F;
2451 
2452 	if (tag == 0 || tag > 4095)
2453 		return;
2454 
2455 	lwkt_serialize_enter(&sc->vtnet_slz);
2456 
2457 	/* Update shadow VLAN table. */
2458 	if (add) {
2459 		sc->vtnet_nvlans++;
2460 		sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2461 	} else {
2462 		sc->vtnet_nvlans--;
2463 		sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2464 	}
2465 
2466 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2467 	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2468 		device_printf(sc->vtnet_dev,
2469 		    "cannot %s VLAN %d %s the host filter table\n",
2470 		    add ? "add" : "remove", tag, add ? "to" : "from");
2471 	}
2472 
2473 	lwkt_serialize_exit(&sc->vtnet_slz);
2474 }
2475 
2476 static void
2477 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2478 {
2479 
2480 	if (ifp->if_softc != arg)
2481 		return;
2482 
2483 	vtnet_update_vlan_filter(arg, 1, tag);
2484 }
2485 
2486 static void
2487 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2488 {
2489 
2490 	if (ifp->if_softc != arg)
2491 		return;
2492 
2493 	vtnet_update_vlan_filter(arg, 0, tag);
2494 }
2495 
2496 static int
2497 vtnet_ifmedia_upd(struct ifnet *ifp)
2498 {
2499 	struct vtnet_softc *sc;
2500 	struct ifmedia *ifm;
2501 
2502 	sc = ifp->if_softc;
2503 	ifm = &sc->vtnet_media;
2504 
2505 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2506 		return (EINVAL);
2507 
2508 	return (0);
2509 }
2510 
2511 static void
2512 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2513 {
2514 	struct vtnet_softc *sc;
2515 
2516 	sc = ifp->if_softc;
2517 
2518 	ifmr->ifm_status = IFM_AVALID;
2519 	ifmr->ifm_active = IFM_ETHER;
2520 
2521 	lwkt_serialize_enter(&sc->vtnet_slz);
2522 	if (vtnet_is_link_up(sc) != 0) {
2523 		ifmr->ifm_status |= IFM_ACTIVE;
2524 		ifmr->ifm_active |= VTNET_MEDIATYPE;
2525 	} else
2526 		ifmr->ifm_active |= IFM_NONE;
2527 	lwkt_serialize_exit(&sc->vtnet_slz);
2528 }
2529 
2530 static void
2531 vtnet_add_statistics(struct vtnet_softc *sc)
2532 {
2533 	device_t dev;
2534 	struct vtnet_statistics *stats;
2535 	struct sysctl_ctx_list *ctx;
2536 	struct sysctl_oid *tree;
2537 	struct sysctl_oid_list *child;
2538 
2539 	dev = sc->vtnet_dev;
2540 	stats = &sc->vtnet_stats;
2541 	ctx = device_get_sysctl_ctx(dev);
2542 	tree = device_get_sysctl_tree(dev);
2543 	child = SYSCTL_CHILDREN(tree);
2544 
2545 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2546 	    CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2547 	    "Mbuf cluster allocation failures");
2548 
2549 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2550 	    CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2551 	    "Received frame larger than the mbuf chain");
2552 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2553 	    CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2554 	    "Enqueuing the replacement receive mbuf failed");
2555 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2556 	    CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2557 	    "Mergeable buffers receive failures");
2558 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2559 	    CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2560 	    "Received checksum offloaded buffer with unsupported "
2561 	    "Ethernet type");
2562 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2563 	    CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2564 	    "Received checksum offloaded buffer with incorrect IP protocol");
2565 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2566 	    CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2567 	    "Received checksum offloaded buffer with incorrect offset");
2568 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2569 	    CTLFLAG_RD, &stats->rx_csum_failed, 0,
2570 	    "Received buffer checksum offload failed");
2571 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2572 	    CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2573 	    "Received buffer checksum offload succeeded");
2574 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2575 	    CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2576 	    "Times the receive interrupt task rescheduled itself");
2577 
2578 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2579 	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2580 	    "Aborted transmit of checksum offloaded buffer with unknown "
2581 	    "Ethernet type");
2582 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2583 	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2584 	    "Aborted transmit of TSO buffer with unknown Ethernet type");
2585 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2586 	    CTLFLAG_RD, &stats->tx_defragged, 0,
2587 	    "Transmit mbufs defragged");
2588 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2589 	    CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2590 	    "Aborted transmit of buffer because defrag failed");
2591 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2592 	    CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2593 	    "Offloaded checksum of transmitted buffer");
2594 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2595 	    CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2596 	    "Segmentation offload of transmitted buffer");
2597 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2598 	    CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2599 	    "Times the transmit interrupt task rescheduled itself");
2600 }
2601 
2602 static int
2603 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2604 {
2605 
2606 	return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2607 }
2608 
2609 static void
2610 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2611 {
2612 
2613 	virtqueue_disable_intr(sc->vtnet_rx_vq);
2614 }
2615 
2616 static int
2617 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2618 {
2619 
2620 #ifdef VTNET_TX_INTR_MODERATION
2621 	return (0);
2622 #else
2623 	return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2624 #endif
2625 }
2626 
2627 static void
2628 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2629 {
2630 
2631 	virtqueue_disable_intr(sc->vtnet_tx_vq);
2632 }
2633