1 /*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* Driver for VirtIO network devices. */
28
29 #include "opt_ifpoll.h"
30
31 #include <sys/cdefs.h>
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sockio.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/serialize.h>
46 #include <sys/bus.h>
47 #include <sys/rman.h>
48
49 #include <machine/limits.h>
50
51 #include <net/ethernet.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 #include <net/if_media.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 #include <net/if_poll.h>
60 #include <net/ifq_var.h>
61
62 #include <net/bpf.h>
63
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet/udp.h>
69 #include <netinet/tcp.h>
70
71 #include <dev/virtual/virtio/virtio/virtio.h>
72 #include <dev/virtual/virtio/virtio/virtqueue.h>
73 #include <dev/virtual/virtio/net/virtio_net.h>
74 #include <dev/virtual/virtio/net/if_vtnetvar.h>
75
76 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
77
78 static int vtnet_probe(device_t);
79 static int vtnet_attach(device_t);
80 static int vtnet_detach(device_t);
81 static int vtnet_suspend(device_t);
82 static int vtnet_resume(device_t);
83 static int vtnet_shutdown(device_t);
84
85 static void vtnet_negotiate_features(struct vtnet_softc *);
86 #ifdef IFPOLL_ENABLE
87 static void vtnet_npoll(struct ifnet *, struct ifpoll_info *);
88 static void vtnet_npoll_status(struct ifnet *);
89 static void vtnet_npoll_rx(struct ifnet *, void *, int);
90 static void vtnet_npoll_tx(struct ifnet *, void *, int);
91 #endif
92 static void vtnet_serialize(struct ifnet *, enum ifnet_serialize);
93 static void vtnet_deserialize(struct ifnet *, enum ifnet_serialize);
94 static int vtnet_tryserialize(struct ifnet *, enum ifnet_serialize);
95 #ifdef INVARIANTS
96 static void vtnet_serialize_assert(struct ifnet *, enum ifnet_serialize,
97 boolean_t);
98 #endif /* INVARIANTS */
99 static int vtnet_alloc_intrs(struct vtnet_softc *);
100 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
101 static int vtnet_bind_intrs(struct vtnet_softc *);
102 static void vtnet_get_hwaddr(struct vtnet_softc *);
103 static void vtnet_set_hwaddr(struct vtnet_softc *);
104 static int vtnet_is_link_up(struct vtnet_softc *);
105 static void vtnet_update_link_status(struct vtnet_softc *);
106 static void vtnet_watchdog(struct ifaltq_subque *);
107 static int vtnet_setup_interface(struct vtnet_softc *);
108 static int vtnet_change_mtu(struct vtnet_softc *, int);
109 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
110
111 static int vtnet_init_rx_vq(struct vtnet_softc *);
112 static void vtnet_free_rx_mbufs(struct vtnet_softc *);
113 static void vtnet_free_tx_mbufs(struct vtnet_softc *);
114 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
115
116 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
117 struct mbuf **);
118 static int vtnet_replace_rxbuf(struct vtnet_softc *,
119 struct mbuf *, int);
120 static int vtnet_newbuf(struct vtnet_softc *);
121 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
122 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
123 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
124 static void vtnet_vlan_tag_remove(struct mbuf *);
125 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
126 struct virtio_net_hdr *);
127 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
128 static int vtnet_rxeof(struct vtnet_softc *, int, int *);
129 static void vtnet_rx_msix_intr(void *);
130 static void vtnet_rx_vq_intr(void *);
131
132 static void vtnet_enqueue_txhdr(struct vtnet_softc *,
133 struct vtnet_tx_header *);
134 static void vtnet_txeof(struct vtnet_softc *);
135 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
136 struct virtio_net_hdr *);
137 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
138 struct vtnet_tx_header *);
139 static int vtnet_encap(struct vtnet_softc *, struct mbuf **);
140 static void vtnet_start(struct ifnet *, struct ifaltq_subque *);
141
142 static void vtnet_config_intr(void *);
143 static void vtnet_tx_msix_intr(void *);
144 static void vtnet_tx_vq_intr(void *);
145
146 static void vtnet_stop(struct vtnet_softc *);
147 static int vtnet_virtio_reinit(struct vtnet_softc *);
148 static void vtnet_init(void *);
149
150 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
151 struct sglist *, int, int);
152
153 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
154 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
155 static int vtnet_set_promisc(struct vtnet_softc *, int);
156 static int vtnet_set_allmulti(struct vtnet_softc *, int);
157 static void vtnet_rx_filter(struct vtnet_softc *sc);
158 static void vtnet_rx_filter_mac(struct vtnet_softc *);
159
160 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
161 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
162 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
163 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
164 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
165
166 static int vtnet_ifmedia_upd(struct ifnet *);
167 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
168
169 static void vtnet_add_statistics(struct vtnet_softc *);
170
171 static int vtnet_enable_rx_intr(struct vtnet_softc *);
172 static int vtnet_enable_tx_intr(struct vtnet_softc *);
173 static void vtnet_disable_rx_intr(struct vtnet_softc *);
174 static void vtnet_disable_tx_intr(struct vtnet_softc *);
175
176 /* Tunables. */
177 static int vtnet_csum_disable = 0;
178 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
179 static int vtnet_tso_disable = 1;
180 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
181 static int vtnet_lro_disable = 0;
182 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
183
184 /*
185 * Reducing the number of transmit completed interrupts can
186 * improve performance. To do so, the define below keeps the
187 * Tx vq interrupt disabled and adds calls to vtnet_txeof()
188 * in the start path. The price to pay for this is the m_free'ing
189 * of transmitted mbufs may be delayed.
190 */
191 #define VTNET_TX_INTR_MODERATION
192
193 static struct virtio_feature_desc vtnet_feature_desc[] = {
194 { VIRTIO_NET_F_CSUM, "TxChecksum" },
195 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
196 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload" },
197 { VIRTIO_NET_F_MAC, "MacAddress" },
198 { VIRTIO_NET_F_GSO, "TxAllGSO" },
199 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
200 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
201 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
202 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
203 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
204 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
205 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
206 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
207 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
208 { VIRTIO_NET_F_STATUS, "Status" },
209 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
210 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
211 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
212 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
213 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
214 { VIRTIO_NET_F_MQ, "Multiqueue" },
215 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
216 { 0, NULL }
217 };
218
219 static device_method_t vtnet_methods[] = {
220 /* Device methods. */
221 DEVMETHOD(device_probe, vtnet_probe),
222 DEVMETHOD(device_attach, vtnet_attach),
223 DEVMETHOD(device_detach, vtnet_detach),
224 DEVMETHOD(device_suspend, vtnet_suspend),
225 DEVMETHOD(device_resume, vtnet_resume),
226 DEVMETHOD(device_shutdown, vtnet_shutdown),
227
228 DEVMETHOD_END
229 };
230
231 static driver_t vtnet_driver = {
232 "vtnet",
233 vtnet_methods,
234 sizeof(struct vtnet_softc)
235 };
236
237 static devclass_t vtnet_devclass;
238
239 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL);
240 MODULE_VERSION(vtnet, 1);
241 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
242
243 static int
vtnet_probe(device_t dev)244 vtnet_probe(device_t dev)
245 {
246 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
247 return (ENXIO);
248
249 device_set_desc(dev, "VirtIO Networking Adapter");
250
251 return (BUS_PROBE_DEFAULT);
252 }
253
254 static int
vtnet_attach(device_t dev)255 vtnet_attach(device_t dev)
256 {
257 struct vtnet_softc *sc;
258 int i, error;
259
260 sc = device_get_softc(dev);
261 sc->vtnet_dev = dev;
262
263 lwkt_serialize_init(&sc->vtnet_slz);
264 lwkt_serialize_init(&sc->vtnet_rx_slz);
265 lwkt_serialize_init(&sc->vtnet_tx_slz);
266 sc->serializes[0] = &sc->vtnet_slz;
267 sc->serializes[1] = &sc->vtnet_rx_slz;
268 sc->serializes[2] = &sc->vtnet_tx_slz;
269
270 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
271 vtnet_ifmedia_sts);
272 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
273 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
274
275 vtnet_add_statistics(sc);
276 SLIST_INIT(&sc->vtnet_txhdr_free);
277
278 /* Register our feature descriptions. */
279 virtio_set_feature_desc(dev, vtnet_feature_desc);
280 vtnet_negotiate_features(sc);
281
282 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
283 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
284
285 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
286 /* This feature should always be negotiated. */
287 sc->vtnet_flags |= VTNET_FLAG_MAC;
288 }
289
290 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
291 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
292 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
293 } else {
294 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
295 }
296
297 sc->vtnet_rx_mbuf_size = MCLBYTES;
298 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
299
300 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
301 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
302
303 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
304 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
305 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
306 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
307 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
308 virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
309 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
310 }
311
312 error = vtnet_alloc_intrs(sc);
313 if (error) {
314 device_printf(dev, "cannot allocate interrupts\n");
315 goto fail;
316 }
317
318 error = vtnet_alloc_virtqueues(sc);
319 if (error) {
320 device_printf(dev, "cannot allocate virtqueues\n");
321 goto fail;
322 }
323
324 error = vtnet_bind_intrs(sc);
325 if (error) {
326 device_printf(dev, "cannot bind virtqueues to interrupts\n");
327 goto fail;
328 }
329
330 /* Read (or generate) the MAC address for the adapter. */
331 vtnet_get_hwaddr(sc);
332
333 error = vtnet_setup_interface(sc);
334 if (error) {
335 device_printf(dev, "cannot setup interface\n");
336 goto fail;
337 }
338
339 for (i = 0; i < sc->vtnet_nintr; i++) {
340 error = virtio_setup_intr(dev, i, sc->vtnet_intr_slz[i]);
341 if (error) {
342 device_printf(dev, "cannot setup virtqueue "
343 "interrupts\n");
344 ether_ifdetach(sc->vtnet_ifp);
345 goto fail;
346 }
347 }
348
349 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
350 ifnet_serialize_all(sc->vtnet_ifp);
351 vtnet_set_hwaddr(sc);
352 ifnet_deserialize_all(sc->vtnet_ifp);
353 }
354
355 /*
356 * Device defaults to promiscuous mode for backwards
357 * compatibility. Turn it off if possible.
358 */
359 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
360 ifnet_serialize_all(sc->vtnet_ifp);
361 if (vtnet_set_promisc(sc, 0) != 0) {
362 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
363 device_printf(dev,
364 "cannot disable promiscuous mode\n");
365 }
366 ifnet_deserialize_all(sc->vtnet_ifp);
367 } else
368 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
369
370 fail:
371 if (error)
372 vtnet_detach(dev);
373
374 return (error);
375 }
376
377 static int
vtnet_detach(device_t dev)378 vtnet_detach(device_t dev)
379 {
380 struct vtnet_softc *sc;
381 struct ifnet *ifp;
382 int i;
383
384 sc = device_get_softc(dev);
385 ifp = sc->vtnet_ifp;
386
387 for (i = 0; i < sc->vtnet_nintr; i++)
388 virtio_teardown_intr(dev, i);
389
390 if (device_is_attached(dev)) {
391 ifnet_serialize_all(ifp);
392 vtnet_stop(sc);
393 lwkt_serialize_handler_disable(&sc->vtnet_slz);
394 lwkt_serialize_handler_disable(&sc->vtnet_rx_slz);
395 lwkt_serialize_handler_disable(&sc->vtnet_tx_slz);
396 ifnet_deserialize_all(ifp);
397
398 ether_ifdetach(ifp);
399 }
400
401 if (sc->vtnet_vlan_attach != NULL) {
402 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
403 sc->vtnet_vlan_attach = NULL;
404 }
405 if (sc->vtnet_vlan_detach != NULL) {
406 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
407 sc->vtnet_vlan_detach = NULL;
408 }
409
410 if (ifp) {
411 if_free(ifp);
412 sc->vtnet_ifp = NULL;
413 }
414
415 if (sc->vtnet_rx_vq != NULL)
416 vtnet_free_rx_mbufs(sc);
417 if (sc->vtnet_tx_vq != NULL)
418 vtnet_free_tx_mbufs(sc);
419 if (sc->vtnet_ctrl_vq != NULL)
420 vtnet_free_ctrl_vq(sc);
421
422 if (sc->vtnet_txhdrarea != NULL) {
423 contigfree(sc->vtnet_txhdrarea,
424 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
425 M_VTNET);
426 sc->vtnet_txhdrarea = NULL;
427 }
428 SLIST_INIT(&sc->vtnet_txhdr_free);
429 if (sc->vtnet_macfilter != NULL) {
430 contigfree(sc->vtnet_macfilter,
431 sizeof(struct vtnet_mac_filter), M_DEVBUF);
432 sc->vtnet_macfilter = NULL;
433 }
434
435 ifmedia_removeall(&sc->vtnet_media);
436
437 return (0);
438 }
439
440 static int
vtnet_suspend(device_t dev)441 vtnet_suspend(device_t dev)
442 {
443 struct vtnet_softc *sc;
444
445 sc = device_get_softc(dev);
446
447 ifnet_serialize_all(sc->vtnet_ifp);
448 vtnet_stop(sc);
449 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
450 ifnet_deserialize_all(sc->vtnet_ifp);
451
452 return (0);
453 }
454
455 static int
vtnet_resume(device_t dev)456 vtnet_resume(device_t dev)
457 {
458 struct vtnet_softc *sc;
459 struct ifnet *ifp;
460
461 sc = device_get_softc(dev);
462 ifp = sc->vtnet_ifp;
463
464 ifnet_serialize_all(ifp);
465 if (ifp->if_flags & IFF_UP)
466 vtnet_init(sc);
467 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
468 ifnet_deserialize_all(ifp);
469
470 return (0);
471 }
472
473 static int
vtnet_shutdown(device_t dev)474 vtnet_shutdown(device_t dev)
475 {
476
477 /*
478 * Suspend already does all of what we need to
479 * do here; we just never expect to be resumed.
480 */
481 return (vtnet_suspend(dev));
482 }
483
484 static void
vtnet_negotiate_features(struct vtnet_softc * sc)485 vtnet_negotiate_features(struct vtnet_softc *sc)
486 {
487 device_t dev;
488 uint64_t mask, features;
489
490 dev = sc->vtnet_dev;
491 mask = 0;
492
493 if (vtnet_csum_disable)
494 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
495
496 /*
497 * XXX DragonFly doesn't support receive checksum offload for ipv6 yet,
498 * hence always disable the virtio feature for now.
499 * XXX We need to support the DynOffload feature, in order to
500 * dynamically enable/disable this feature.
501 */
502 mask |= VIRTIO_NET_F_GUEST_CSUM;
503
504 /*
505 * TSO is only available when the tx checksum offload feature is also
506 * negotiated.
507 */
508 if (vtnet_csum_disable || vtnet_tso_disable)
509 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
510 VIRTIO_NET_F_HOST_ECN;
511
512 if (vtnet_lro_disable)
513 mask |= VTNET_LRO_FEATURES;
514
515 features = VTNET_FEATURES & ~mask;
516 features |= VIRTIO_F_NOTIFY_ON_EMPTY;
517 features |= VIRTIO_F_ANY_LAYOUT;
518 sc->vtnet_features = virtio_negotiate_features(dev, features);
519
520 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
521 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
522 /*
523 * LRO without mergeable buffers requires special care. This
524 * is not ideal because every receive buffer must be large
525 * enough to hold the maximum TCP packet, the Ethernet header,
526 * and the header. This requires up to 34 descriptors with
527 * MCLBYTES clusters. If we do not have indirect descriptors,
528 * LRO is disabled since the virtqueue will not contain very
529 * many receive buffers.
530 */
531 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
532 device_printf(dev,
533 "LRO disabled due to both mergeable buffers and "
534 "indirect descriptors not negotiated\n");
535
536 features &= ~VTNET_LRO_FEATURES;
537 sc->vtnet_features =
538 virtio_negotiate_features(dev, features);
539 } else
540 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
541 }
542 }
543
544 static void
vtnet_serialize(struct ifnet * ifp,enum ifnet_serialize slz)545 vtnet_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
546 {
547 struct vtnet_softc *sc = ifp->if_softc;
548
549 ifnet_serialize_array_enter(sc->serializes, 3, slz);
550 }
551
552 static void
vtnet_deserialize(struct ifnet * ifp,enum ifnet_serialize slz)553 vtnet_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
554 {
555 struct vtnet_softc *sc = ifp->if_softc;
556
557 ifnet_serialize_array_exit(sc->serializes, 3, slz);
558 }
559
560 static int
vtnet_tryserialize(struct ifnet * ifp,enum ifnet_serialize slz)561 vtnet_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
562 {
563 struct vtnet_softc *sc = ifp->if_softc;
564
565 return ifnet_serialize_array_try(sc->serializes, 3, slz);
566 }
567
568 #ifdef INVARIANTS
569
570 static void
vtnet_serialize_assert(struct ifnet * ifp,enum ifnet_serialize slz,boolean_t serialized)571 vtnet_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
572 boolean_t serialized)
573 {
574 struct vtnet_softc *sc = ifp->if_softc;
575
576 ifnet_serialize_array_assert(sc->serializes, 3, slz, serialized);
577 }
578
579 #endif /* INVARIANTS */
580
581 static int
vtnet_alloc_intrs(struct vtnet_softc * sc)582 vtnet_alloc_intrs(struct vtnet_softc *sc)
583 {
584 int cnt, error;
585 int intrcount = virtio_intr_count(sc->vtnet_dev);
586 int i;
587 int use_config;
588
589 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
590 use_config = 1;
591 /* We can use a maximum of 3 interrupt vectors. */
592 intrcount = imin(intrcount, 3);
593 } else {
594 /* We can use a maximum of 2 interrupt vectors. */
595 intrcount = imin(intrcount, 2);
596 }
597
598 if (intrcount < 1)
599 return (ENXIO);
600
601 for (i = 0; i < intrcount; i++)
602 sc->vtnet_cpus[i] = -1;
603
604 cnt = intrcount;
605 error = virtio_intr_alloc(sc->vtnet_dev, &cnt, use_config,
606 sc->vtnet_cpus);
607 if (error != 0) {
608 virtio_intr_release(sc->vtnet_dev);
609 return (error);
610 }
611 sc->vtnet_nintr = cnt;
612
613 return (0);
614 }
615
616 static int
vtnet_alloc_virtqueues(struct vtnet_softc * sc)617 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
618 {
619 device_t dev;
620 struct vq_alloc_info vq_info[3];
621 int nvqs;
622
623 dev = sc->vtnet_dev;
624 nvqs = 2;
625
626 /*
627 * Indirect descriptors are not needed for the Rx
628 * virtqueue when mergeable buffers are negotiated.
629 * The header is placed inline with the data, not
630 * in a separate descriptor, and mbuf clusters are
631 * always physically contiguous.
632 */
633 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
634 sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
635 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
636 } else
637 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
638
639 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
640 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
641 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
642 else
643 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
644
645 VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs, &sc->vtnet_rx_vq,
646 "%s receive", device_get_nameunit(dev));
647
648 VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs, &sc->vtnet_tx_vq,
649 "%s transmit", device_get_nameunit(dev));
650
651 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
652 nvqs++;
653
654 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, &sc->vtnet_ctrl_vq,
655 "%s control", device_get_nameunit(dev));
656 }
657
658 return (virtio_alloc_virtqueues(dev, nvqs, vq_info));
659 }
660
661 static int
vtnet_bind_intrs(struct vtnet_softc * sc)662 vtnet_bind_intrs(struct vtnet_softc *sc)
663 {
664 int error = 0;
665 int i;
666
667 for (i = 0; i < 3; i++)
668 sc->vtnet_intr_slz[i] = &sc->vtnet_slz;
669
670 /* Possible "Virtqueue <-> IRQ" configurations */
671 switch (sc->vtnet_nintr) {
672 case 1:
673 sc->vtnet_irqmap[0] = (struct irqmap){0, vtnet_rx_vq_intr};
674 sc->vtnet_irqmap[1] = (struct irqmap){0, vtnet_tx_vq_intr};
675 break;
676 case 2:
677 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
678 sc->vtnet_irqmap[0] =
679 (struct irqmap){1, vtnet_rx_vq_intr};
680 sc->vtnet_irqmap[1] =
681 (struct irqmap){1, vtnet_tx_vq_intr};
682 } else {
683 sc->vtnet_irqmap[0] =
684 (struct irqmap){0, vtnet_rx_msix_intr};
685 sc->vtnet_irqmap[1] =
686 (struct irqmap){1, vtnet_tx_msix_intr};
687 sc->vtnet_intr_slz[0] = &sc->vtnet_rx_slz;
688 sc->vtnet_intr_slz[1] = &sc->vtnet_tx_slz;
689 }
690 break;
691 case 3:
692 sc->vtnet_irqmap[0] = (struct irqmap){1, vtnet_rx_msix_intr};
693 sc->vtnet_irqmap[1] = (struct irqmap){2, vtnet_tx_msix_intr};
694 sc->vtnet_intr_slz[1] = &sc->vtnet_rx_slz;
695 sc->vtnet_intr_slz[2] = &sc->vtnet_tx_slz;
696 break;
697 default:
698 device_printf(sc->vtnet_dev,
699 "Invalid interrupt vector count: %d\n", sc->vtnet_nintr);
700 error = EINVAL;
701 goto fail;
702 }
703
704 for (i = 0; i < 2; i++) {
705 error = virtio_bind_intr(sc->vtnet_dev,
706 sc->vtnet_irqmap[i].irq, i, sc->vtnet_irqmap[i].handler,
707 sc);
708 if (error) {
709 device_printf(sc->vtnet_dev,
710 "cannot bind virtqueue IRQs\n");
711 goto fail;
712 }
713 }
714 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
715 error = virtio_bind_intr(sc->vtnet_dev, 0, -1,
716 vtnet_config_intr, sc);
717 if (error) {
718 device_printf(sc->vtnet_dev,
719 "cannot bind config_change IRQ\n");
720 goto fail;
721 }
722 }
723
724 fail:
725 return (error);
726 }
727
728 static int
vtnet_setup_interface(struct vtnet_softc * sc)729 vtnet_setup_interface(struct vtnet_softc *sc)
730 {
731 device_t dev;
732 struct ifnet *ifp;
733 int i;
734
735 dev = sc->vtnet_dev;
736
737 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
738 if (ifp == NULL) {
739 device_printf(dev, "cannot allocate ifnet structure\n");
740 return (ENOSPC);
741 }
742
743 ifp->if_softc = sc;
744 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
745 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
746 ifp->if_init = vtnet_init;
747 ifp->if_start = vtnet_start;
748 #ifdef IFPOLL_ENABLE
749 ifp->if_npoll = vtnet_npoll;
750 #endif
751 ifp->if_serialize = vtnet_serialize;
752 ifp->if_deserialize = vtnet_deserialize;
753 ifp->if_tryserialize = vtnet_tryserialize;
754 #ifdef INVARIANTS
755 ifp->if_serialize_assert = vtnet_serialize_assert;
756 #endif
757 ifp->if_ioctl = vtnet_ioctl;
758
759 sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq);
760 sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq);
761 if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
762 sc->vtnet_txhdrcount = sc->vtnet_tx_size;
763 else
764 sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
765 sc->vtnet_txhdrarea = contigmalloc(
766 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
767 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
768 if (sc->vtnet_txhdrarea == NULL) {
769 device_printf(dev, "cannot contigmalloc the tx headers\n");
770 return (ENOMEM);
771 }
772 for (i = 0; i < sc->vtnet_txhdrcount; i++)
773 vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]);
774 sc->vtnet_macfilter = contigmalloc(
775 sizeof(struct vtnet_mac_filter),
776 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
777 if (sc->vtnet_macfilter == NULL) {
778 device_printf(dev,
779 "cannot contigmalloc the mac filter table\n");
780 return (ENOMEM);
781 }
782 ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1);
783 ifq_set_ready(&ifp->if_snd);
784
785 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
786
787 /* The Tx IRQ is currently always the last allocated interrupt. */
788 ifq_set_cpuid(&ifp->if_snd, sc->vtnet_cpus[sc->vtnet_nintr - 1]);
789 ifsq_watchdog_init(&sc->vtnet_tx_watchdog,
790 ifq_get_subq_default(&ifp->if_snd),
791 vtnet_watchdog,
792 IF_WDOG_LASTTICK);
793 ifq_set_hw_serialize(&ifp->if_snd, &sc->vtnet_tx_slz);
794
795 /* Tell the upper layer(s) we support long frames. */
796 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
797 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
798
799 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
800 ifp->if_capabilities |= IFCAP_TXCSUM;
801
802 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
803 ifp->if_capabilities |= IFCAP_TSO4;
804 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
805 ifp->if_capabilities |= IFCAP_TSO6;
806 if (ifp->if_capabilities & IFCAP_TSO)
807 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
808
809 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
810 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
811 }
812
813 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM))
814 ifp->if_capabilities |= IFCAP_RXCSUM;
815
816 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */
817 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
818 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
819 ifp->if_capabilities |= IFCAP_LRO;
820 #endif
821
822 if ((ifp->if_capabilities & IFCAP_HWCSUM) == IFCAP_HWCSUM) {
823 /*
824 * VirtIO does not support VLAN tagging, but we can fake
825 * it by inserting and removing the 802.1Q header during
826 * transmit and receive. We are then able to do checksum
827 * offloading of VLAN frames.
828 */
829 ifp->if_capabilities |=
830 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
831 }
832
833 ifp->if_capenable = ifp->if_capabilities;
834
835 /*
836 * Capabilities after here are not enabled by default.
837 */
838
839 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
840 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
841
842 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
843 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
844 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
845 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
846 }
847
848 return (0);
849 }
850
851 static void
vtnet_set_hwaddr(struct vtnet_softc * sc)852 vtnet_set_hwaddr(struct vtnet_softc *sc)
853 {
854 device_t dev;
855
856 dev = sc->vtnet_dev;
857
858 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
859 (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
860 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
861 device_printf(dev, "unable to set MAC address\n");
862 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
863 virtio_write_device_config(dev,
864 offsetof(struct virtio_net_config, mac),
865 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
866 }
867 }
868
869 static void
vtnet_get_hwaddr(struct vtnet_softc * sc)870 vtnet_get_hwaddr(struct vtnet_softc *sc)
871 {
872 device_t dev;
873
874 dev = sc->vtnet_dev;
875
876 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
877 /*
878 * Generate a random locally administered unicast address.
879 *
880 * It would be nice to generate the same MAC address across
881 * reboots, but it seems all the hosts currently available
882 * support the MAC feature, so this isn't too important.
883 */
884 sc->vtnet_hwaddr[0] = 0xB2;
885 karc4random_buf(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
886 return;
887 }
888
889 virtio_read_device_config(dev,
890 offsetof(struct virtio_net_config, mac),
891 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
892 }
893
894 static int
vtnet_is_link_up(struct vtnet_softc * sc)895 vtnet_is_link_up(struct vtnet_softc *sc)
896 {
897 device_t dev;
898 struct ifnet *ifp;
899 uint16_t status;
900
901 dev = sc->vtnet_dev;
902 ifp = sc->vtnet_ifp;
903
904 ASSERT_SERIALIZED(&sc->vtnet_slz);
905
906 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) {
907 status = virtio_read_dev_config_2(dev,
908 offsetof(struct virtio_net_config, status));
909 } else {
910 status = VIRTIO_NET_S_LINK_UP;
911 }
912
913 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
914 }
915
916 static void
vtnet_update_link_status(struct vtnet_softc * sc)917 vtnet_update_link_status(struct vtnet_softc *sc)
918 {
919 device_t dev;
920 struct ifnet *ifp;
921 struct ifaltq_subque *ifsq;
922 int link;
923
924 dev = sc->vtnet_dev;
925 ifp = sc->vtnet_ifp;
926 ifsq = ifq_get_subq_default(&ifp->if_snd);
927
928 link = vtnet_is_link_up(sc);
929
930 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
931 sc->vtnet_flags |= VTNET_FLAG_LINK;
932 if (bootverbose)
933 device_printf(dev, "Link is up\n");
934 ifp->if_link_state = LINK_STATE_UP;
935 if_link_state_change(ifp);
936 if (!ifsq_is_empty(ifsq))
937 ifsq_devstart_sched(ifsq);
938 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
939 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
940 if (bootverbose)
941 device_printf(dev, "Link is down\n");
942
943 ifp->if_link_state = LINK_STATE_DOWN;
944 if_link_state_change(ifp);
945 }
946 }
947
948 static void
vtnet_watchdog(struct ifaltq_subque * ifsq)949 vtnet_watchdog(struct ifaltq_subque *ifsq)
950 {
951 struct ifnet *ifp;
952 struct vtnet_softc *sc;
953
954 ifp = ifsq_get_ifp(ifsq);
955 sc = ifp->if_softc;
956 ASSERT_IFNET_SERIALIZED_ALL(ifp);
957
958 /*
959 * Clean out expended tx buffers prior to terminal count.
960 *
961 * NOTE: vtnet_txeof() will set wd_timer to 0 if the virtqueue
962 * becomes empty, preventing further watchdog callbacks.
963 */
964 if (sc->vtnet_tx_watchdog.wd_timer != 0) {
965 vtnet_txeof(sc);
966 if (!ifq_is_empty(&ifp->if_snd))
967 if_devstart(ifp);
968 return;
969 }
970
971 /*
972 * Check to see if there are any unexpended transmit descriptors.
973 */
974 if (virtqueue_empty(sc->vtnet_tx_vq)) {
975 if_printf(ifp, "Spurious TX watchdog timeout -- ignoring\n");
976 ifsq_watchdog_set_count(&sc->vtnet_tx_watchdog, 0);
977 return;
978 }
979
980 if_printf(ifp, "TX watchdog timeout -- resetting\n");
981 #ifdef VTNET_DEBUG
982 virtqueue_dump(sc->vtnet_tx_vq);
983 #endif
984 ifp->if_oerrors++;
985 ifp->if_flags &= ~IFF_RUNNING;
986 vtnet_init(sc);
987 ifsq_devstart_sched(ifsq);
988 }
989
990 static int
vtnet_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data,struct ucred * cr)991 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
992 {
993 struct vtnet_softc *sc;
994 struct ifreq *ifr;
995 int reinit, mask, error;
996
997 sc = ifp->if_softc;
998 ifr = (struct ifreq *) data;
999 reinit = 0;
1000 error = 0;
1001
1002 switch (cmd) {
1003 case SIOCSIFMTU:
1004 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
1005 error = EINVAL;
1006 else if (ifp->if_mtu != ifr->ifr_mtu)
1007 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1008 break;
1009
1010 case SIOCSIFFLAGS:
1011 if ((ifp->if_flags & IFF_UP) == 0) {
1012 if (ifp->if_flags & IFF_RUNNING)
1013 vtnet_stop(sc);
1014 } else if (ifp->if_flags & IFF_RUNNING) {
1015 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1016 (IFF_PROMISC | IFF_ALLMULTI)) {
1017 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1018 vtnet_rx_filter(sc);
1019 else
1020 error = ENOTSUP;
1021 }
1022 } else {
1023 vtnet_init(sc);
1024 }
1025
1026 if (error == 0)
1027 sc->vtnet_if_flags = ifp->if_flags;
1028 break;
1029
1030 case SIOCADDMULTI:
1031 case SIOCDELMULTI:
1032 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
1033 (ifp->if_flags & IFF_RUNNING))
1034 vtnet_rx_filter_mac(sc);
1035 break;
1036
1037 case SIOCSIFMEDIA:
1038 case SIOCGIFMEDIA:
1039 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1040 break;
1041
1042 case SIOCSIFCAP:
1043 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1044
1045
1046 if (mask & IFCAP_TXCSUM) {
1047 ifp->if_capenable ^= IFCAP_TXCSUM;
1048 if (ifp->if_capenable & IFCAP_TXCSUM)
1049 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
1050 else
1051 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
1052 }
1053
1054 if (mask & IFCAP_TSO4) {
1055 ifp->if_capenable ^= IFCAP_TSO4;
1056 if (ifp->if_capenable & IFCAP_TSO4)
1057 ifp->if_hwassist |= CSUM_TSO;
1058 else
1059 ifp->if_hwassist &= ~CSUM_TSO;
1060 }
1061
1062 if (mask & IFCAP_RXCSUM) {
1063 ifp->if_capenable ^= IFCAP_RXCSUM;
1064 reinit = 1;
1065 }
1066
1067 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */
1068 if (mask & IFCAP_LRO) {
1069 ifp->if_capenable ^= IFCAP_LRO;
1070 reinit = 1;
1071 }
1072 #endif
1073
1074 if (mask & IFCAP_VLAN_HWFILTER) {
1075 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1076 reinit = 1;
1077 }
1078
1079 if (mask & IFCAP_VLAN_HWTSO)
1080 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1081
1082 if (mask & IFCAP_VLAN_HWTAGGING)
1083 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1084
1085 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
1086 ifp->if_flags &= ~IFF_RUNNING;
1087 vtnet_init(sc);
1088 }
1089 //VLAN_CAPABILITIES(ifp);
1090
1091 break;
1092
1093 default:
1094 error = ether_ioctl(ifp, cmd, data);
1095 break;
1096 }
1097
1098 return (error);
1099 }
1100
1101 static int
vtnet_change_mtu(struct vtnet_softc * sc,int new_mtu)1102 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1103 {
1104 struct ifnet *ifp;
1105 int new_frame_size, clsize;
1106
1107 ifp = sc->vtnet_ifp;
1108
1109 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1110 new_frame_size = sizeof(struct vtnet_rx_header) +
1111 sizeof(struct ether_vlan_header) + new_mtu;
1112
1113 if (new_frame_size > MJUM9BYTES)
1114 return (EINVAL);
1115
1116 if (new_frame_size <= MCLBYTES)
1117 clsize = MCLBYTES;
1118 else
1119 clsize = MJUM9BYTES;
1120 } else {
1121 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
1122 sizeof(struct ether_vlan_header) + new_mtu;
1123
1124 if (new_frame_size <= MCLBYTES)
1125 clsize = MCLBYTES;
1126 else
1127 clsize = MJUMPAGESIZE;
1128 }
1129
1130 sc->vtnet_rx_mbuf_size = clsize;
1131 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
1132 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
1133 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
1134
1135 ifp->if_mtu = new_mtu;
1136
1137 if (ifp->if_flags & IFF_RUNNING) {
1138 ifp->if_flags &= ~IFF_RUNNING;
1139 vtnet_init(sc);
1140 }
1141
1142 return (0);
1143 }
1144
1145 static int
vtnet_init_rx_vq(struct vtnet_softc * sc)1146 vtnet_init_rx_vq(struct vtnet_softc *sc)
1147 {
1148 struct virtqueue *vq;
1149 int nbufs, error;
1150
1151 vq = sc->vtnet_rx_vq;
1152 nbufs = 0;
1153 error = ENOSPC;
1154
1155 while (!virtqueue_full(vq)) {
1156 if ((error = vtnet_newbuf(sc)) != 0)
1157 break;
1158 nbufs++;
1159 }
1160
1161 if (nbufs > 0) {
1162 virtqueue_notify(vq, NULL);
1163
1164 /*
1165 * EMSGSIZE signifies the virtqueue did not have enough
1166 * entries available to hold the last mbuf. This is not
1167 * an error. We should not get ENOSPC since we check if
1168 * the virtqueue is full before attempting to add a
1169 * buffer.
1170 */
1171 if (error == EMSGSIZE)
1172 error = 0;
1173 }
1174
1175 return (error);
1176 }
1177
1178 static void
vtnet_free_rx_mbufs(struct vtnet_softc * sc)1179 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1180 {
1181 struct virtqueue *vq;
1182 struct mbuf *m;
1183 int last;
1184
1185 vq = sc->vtnet_rx_vq;
1186 last = 0;
1187
1188 while ((m = virtqueue_drain(vq, &last)) != NULL)
1189 m_freem(m);
1190
1191 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1192 }
1193
1194 static void
vtnet_free_tx_mbufs(struct vtnet_softc * sc)1195 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1196 {
1197 struct virtqueue *vq;
1198 struct vtnet_tx_header *txhdr;
1199 int last;
1200
1201 vq = sc->vtnet_tx_vq;
1202 last = 0;
1203
1204 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1205 m_freem(txhdr->vth_mbuf);
1206 vtnet_enqueue_txhdr(sc, txhdr);
1207 }
1208
1209 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1210 }
1211
1212 static void
vtnet_free_ctrl_vq(struct vtnet_softc * sc)1213 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1214 {
1215 /*
1216 * The control virtqueue is only polled, therefore
1217 * it should already be empty.
1218 */
1219 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1220 ("Ctrl Vq not empty"));
1221 }
1222
1223 static struct mbuf *
vtnet_alloc_rxbuf(struct vtnet_softc * sc,int nbufs,struct mbuf ** m_tailp)1224 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1225 {
1226 struct mbuf *m_head, *m_tail, *m;
1227 int i, clsize;
1228
1229 clsize = sc->vtnet_rx_mbuf_size;
1230
1231 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/
1232 if (clsize > MCLBYTES)
1233 m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1234 else
1235 m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1236 if (m_head == NULL)
1237 goto fail;
1238
1239 m_head->m_len = clsize;
1240 m_tail = m_head;
1241
1242 if (nbufs > 1) {
1243 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1244 ("chained Rx mbuf requested without LRO_NOMRG"));
1245
1246 for (i = 0; i < nbufs - 1; i++) {
1247 if (clsize > MCLBYTES)
1248 m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1249 else
1250 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1251 if (m == NULL)
1252 goto fail;
1253
1254 m->m_len = clsize;
1255 m_tail->m_next = m;
1256 m_tail = m;
1257 }
1258 }
1259
1260 if (m_tailp != NULL)
1261 *m_tailp = m_tail;
1262
1263 return (m_head);
1264
1265 fail:
1266 sc->vtnet_stats.mbuf_alloc_failed++;
1267 m_freem(m_head);
1268
1269 return (NULL);
1270 }
1271
1272 static int
vtnet_replace_rxbuf(struct vtnet_softc * sc,struct mbuf * m0,int len0)1273 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1274 {
1275 struct mbuf *m, *m_prev;
1276 struct mbuf *m_new, *m_tail;
1277 int len, clsize, nreplace, error;
1278
1279 m = m0;
1280 m_prev = NULL;
1281 len = len0;
1282
1283 m_tail = NULL;
1284 clsize = sc->vtnet_rx_mbuf_size;
1285 nreplace = 0;
1286
1287 if (m->m_next != NULL)
1288 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1289 ("chained Rx mbuf without LRO_NOMRG"));
1290
1291 /*
1292 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1293 * allocating an entire chain for each received frame. When
1294 * the received frame's length is less than that of the chain,
1295 * the unused mbufs are reassigned to the new chain.
1296 */
1297 while (len > 0) {
1298 /*
1299 * Something is seriously wrong if we received
1300 * a frame larger than the mbuf chain. Drop it.
1301 */
1302 if (m == NULL) {
1303 sc->vtnet_stats.rx_frame_too_large++;
1304 return (EMSGSIZE);
1305 }
1306
1307 KASSERT(m->m_len == clsize,
1308 ("mbuf length not expected cluster size: %d",
1309 m->m_len));
1310
1311 m->m_len = MIN(m->m_len, len);
1312 len -= m->m_len;
1313
1314 m_prev = m;
1315 m = m->m_next;
1316 nreplace++;
1317 }
1318
1319 KASSERT(m_prev != NULL, ("m_prev == NULL"));
1320 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1321 ("too many replacement mbufs: %d/%d", nreplace,
1322 sc->vtnet_rx_mbuf_count));
1323
1324 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1325 if (m_new == NULL) {
1326 m_prev->m_len = clsize;
1327 return (ENOBUFS);
1328 }
1329
1330 /*
1331 * Move unused mbufs, if any, from the original chain
1332 * onto the end of the new chain.
1333 */
1334 if (m_prev->m_next != NULL) {
1335 m_tail->m_next = m_prev->m_next;
1336 m_prev->m_next = NULL;
1337 }
1338
1339 error = vtnet_enqueue_rxbuf(sc, m_new);
1340 if (error) {
1341 /*
1342 * BAD! We could not enqueue the replacement mbuf chain. We
1343 * must restore the m0 chain to the original state if it was
1344 * modified so we can subsequently discard it.
1345 *
1346 * NOTE: The replacement is suppose to be an identical copy
1347 * to the one just dequeued so this is an unexpected error.
1348 */
1349 sc->vtnet_stats.rx_enq_replacement_failed++;
1350
1351 if (m_tail->m_next != NULL) {
1352 m_prev->m_next = m_tail->m_next;
1353 m_tail->m_next = NULL;
1354 }
1355
1356 m_prev->m_len = clsize;
1357 m_freem(m_new);
1358 }
1359
1360 return (error);
1361 }
1362
1363 static int
vtnet_newbuf(struct vtnet_softc * sc)1364 vtnet_newbuf(struct vtnet_softc *sc)
1365 {
1366 struct mbuf *m;
1367 int error;
1368
1369 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1370 if (m == NULL)
1371 return (ENOBUFS);
1372
1373 error = vtnet_enqueue_rxbuf(sc, m);
1374 if (error)
1375 m_freem(m);
1376
1377 return (error);
1378 }
1379
1380 static void
vtnet_discard_merged_rxbuf(struct vtnet_softc * sc,int nbufs)1381 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1382 {
1383 struct virtqueue *vq;
1384 struct mbuf *m;
1385
1386 vq = sc->vtnet_rx_vq;
1387
1388 while (--nbufs > 0) {
1389 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1390 break;
1391 vtnet_discard_rxbuf(sc, m);
1392 }
1393 }
1394
1395 static void
vtnet_discard_rxbuf(struct vtnet_softc * sc,struct mbuf * m)1396 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1397 {
1398 int error;
1399
1400 /*
1401 * Requeue the discarded mbuf. This should always be
1402 * successful since it was just dequeued.
1403 */
1404 error = vtnet_enqueue_rxbuf(sc, m);
1405 KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1406 }
1407
1408 static int
vtnet_enqueue_rxbuf(struct vtnet_softc * sc,struct mbuf * m)1409 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1410 {
1411 struct sglist sg;
1412 struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1413 struct vtnet_rx_header *rxhdr;
1414 struct virtio_net_hdr *hdr;
1415 uint8_t *mdata;
1416 int offset, error;
1417
1418 ASSERT_SERIALIZED(&sc->vtnet_rx_slz);
1419 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1420 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1421
1422 sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1423
1424 mdata = mtod(m, uint8_t *);
1425 offset = 0;
1426
1427 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1428 rxhdr = (struct vtnet_rx_header *) mdata;
1429 hdr = &rxhdr->vrh_hdr;
1430 offset += sizeof(struct vtnet_rx_header);
1431
1432 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1433 KASSERT(error == 0, ("cannot add header to sglist"));
1434 }
1435
1436 error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1437 if (error)
1438 return (error);
1439
1440 if (m->m_next != NULL) {
1441 error = sglist_append_mbuf(&sg, m->m_next);
1442 if (error)
1443 return (error);
1444 }
1445
1446 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1447 }
1448
1449 #ifdef IFPOLL_ENABLE
1450
1451 static void
vtnet_npoll_status(struct ifnet * ifp)1452 vtnet_npoll_status(struct ifnet *ifp)
1453 {
1454 struct vtnet_softc *sc = ifp->if_softc;
1455
1456 ASSERT_SERIALIZED(&sc->vtnet_slz);
1457
1458 vtnet_update_link_status(sc);
1459 }
1460
1461 static void
vtnet_npoll_rx(struct ifnet * ifp,void * arg __unused,int cycle)1462 vtnet_npoll_rx(struct ifnet *ifp, void *arg __unused, int cycle)
1463 {
1464 struct vtnet_softc *sc = ifp->if_softc;
1465
1466 vtnet_rxeof(sc, cycle, NULL);
1467 }
1468
1469 static void
vtnet_npoll_tx(struct ifnet * ifp,void * arg __unused,int cycle __unused)1470 vtnet_npoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused)
1471 {
1472 struct vtnet_softc *sc = ifp->if_softc;
1473
1474 ASSERT_SERIALIZED(&sc->vtnet_tx_slz);
1475
1476 vtnet_txeof(sc);
1477 if (!ifq_is_empty(&ifp->if_snd))
1478 if_devstart(ifp);
1479 }
1480
1481 static void
vtnet_npoll(struct ifnet * ifp,struct ifpoll_info * info)1482 vtnet_npoll(struct ifnet *ifp, struct ifpoll_info *info)
1483 {
1484 struct vtnet_softc *sc = ifp->if_softc;
1485 int i;
1486
1487 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1488
1489 if (info) {
1490 int cpu;
1491
1492 info->ifpi_status.status_func = vtnet_npoll_status;
1493 info->ifpi_status.serializer = &sc->vtnet_slz;
1494
1495 /* Use the same cpu for rx and tx. */
1496 cpu = device_get_unit(device_get_parent(sc->vtnet_dev));
1497 /* Shuffle a bit. */
1498 cpu = (cpu * 61) % netisr_ncpus;
1499 KKASSERT(cpu < netisr_ncpus);
1500 info->ifpi_tx[cpu].poll_func = vtnet_npoll_tx;
1501 info->ifpi_tx[cpu].arg = NULL;
1502 info->ifpi_tx[cpu].serializer = &sc->vtnet_tx_slz;
1503 ifq_set_cpuid(&ifp->if_snd, cpu);
1504
1505 info->ifpi_rx[cpu].poll_func = vtnet_npoll_rx;
1506 info->ifpi_rx[cpu].arg = NULL;
1507 info->ifpi_rx[cpu].serializer = &sc->vtnet_rx_slz;
1508
1509 for (i = 0; i < 3; i++)
1510 lwkt_serialize_handler_disable(sc->serializes[i]);
1511 vtnet_disable_rx_intr(sc);
1512 vtnet_disable_tx_intr(sc);
1513 for (i = 0; i < sc->vtnet_nintr; i++)
1514 virtio_teardown_intr(sc->vtnet_dev, i);
1515 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS))
1516 virtio_unbind_intr(sc->vtnet_dev, -1);
1517 for (i = 0; i < 2; i++)
1518 virtio_unbind_intr(sc->vtnet_dev, i);
1519 } else {
1520 int error;
1521
1522 ifq_set_cpuid(&ifp->if_snd,
1523 sc->vtnet_cpus[sc->vtnet_nintr - 1]);
1524 for (i = 0; i < 3; i++)
1525 lwkt_serialize_handler_enable(sc->serializes[i]);
1526 for (i = 0; i < 2; i++) {
1527 error = virtio_bind_intr(sc->vtnet_dev,
1528 sc->vtnet_irqmap[i].irq, i,
1529 sc->vtnet_irqmap[i].handler, sc);
1530 if (error) {
1531 device_printf(sc->vtnet_dev,
1532 "cannot re-bind virtqueue IRQs\n");
1533 }
1534 }
1535 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
1536 error = virtio_bind_intr(sc->vtnet_dev, 0, -1,
1537 vtnet_config_intr, sc);
1538 if (error) {
1539 device_printf(sc->vtnet_dev,
1540 "cannot re-bind config_change IRQ\n");
1541 }
1542 }
1543 for (i = 0; i < sc->vtnet_nintr; i++) {
1544 error = virtio_setup_intr(sc->vtnet_dev, i,
1545 sc->vtnet_intr_slz[i]);
1546 if (error) {
1547 device_printf(sc->vtnet_dev,
1548 "cannot setup virtqueue interrupts\n");
1549 }
1550 }
1551 vtnet_enable_rx_intr(sc);
1552 vtnet_enable_tx_intr(sc);
1553 }
1554 }
1555
1556 #endif /* IFPOLL_ENABLE */
1557
1558 static void
vtnet_vlan_tag_remove(struct mbuf * m)1559 vtnet_vlan_tag_remove(struct mbuf *m)
1560 {
1561 struct ether_vlan_header *evl;
1562
1563 evl = mtod(m, struct ether_vlan_header *);
1564
1565 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1566 m->m_flags |= M_VLANTAG;
1567
1568 /* Strip the 802.1Q header. */
1569 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1570 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1571 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1572 }
1573
1574 /*
1575 * Alternative method of doing receive checksum offloading. Rather
1576 * than parsing the received frame down to the IP header, use the
1577 * csum_offset to determine which CSUM_* flags are appropriate. We
1578 * can get by with doing this only because the checksum offsets are
1579 * unique for the things we care about.
1580 */
1581 static int
vtnet_rx_csum(struct vtnet_softc * sc,struct mbuf * m,struct virtio_net_hdr * hdr)1582 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1583 struct virtio_net_hdr *hdr)
1584 {
1585 struct ether_header *eh;
1586 struct ether_vlan_header *evh;
1587 struct udphdr *udp;
1588 int csum_len;
1589 uint16_t eth_type;
1590
1591 csum_len = hdr->csum_start + hdr->csum_offset;
1592
1593 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1594 return (1);
1595 if (m->m_len < csum_len)
1596 return (1);
1597
1598 eh = mtod(m, struct ether_header *);
1599 eth_type = ntohs(eh->ether_type);
1600 if (eth_type == ETHERTYPE_VLAN) {
1601 evh = mtod(m, struct ether_vlan_header *);
1602 eth_type = ntohs(evh->evl_proto);
1603 }
1604
1605 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1606 sc->vtnet_stats.rx_csum_bad_ethtype++;
1607 return (1);
1608 }
1609
1610 /* Use the offset to determine the appropriate CSUM_* flags. */
1611 switch (hdr->csum_offset) {
1612 case offsetof(struct udphdr, uh_sum):
1613 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1614 return (1);
1615 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1616 if (udp->uh_sum == 0)
1617 return (0);
1618
1619 /* FALLTHROUGH */
1620
1621 case offsetof(struct tcphdr, th_sum):
1622 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1623 m->m_pkthdr.csum_data = 0xFFFF;
1624 break;
1625
1626 default:
1627 sc->vtnet_stats.rx_csum_bad_offset++;
1628 return (1);
1629 }
1630
1631 sc->vtnet_stats.rx_csum_offloaded++;
1632
1633 return (0);
1634 }
1635
1636 static int
vtnet_rxeof_merged(struct vtnet_softc * sc,struct mbuf * m_head,int nbufs)1637 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1638 {
1639 struct ifnet *ifp;
1640 struct virtqueue *vq;
1641 struct mbuf *m, *m_tail;
1642 int len;
1643
1644 ifp = sc->vtnet_ifp;
1645 vq = sc->vtnet_rx_vq;
1646 m_tail = m_head;
1647
1648 while (--nbufs > 0) {
1649 m = virtqueue_dequeue(vq, &len);
1650 if (m == NULL) {
1651 ifp->if_ierrors++;
1652 goto fail;
1653 }
1654
1655 if (vtnet_newbuf(sc) != 0) {
1656 ifp->if_iqdrops++;
1657 vtnet_discard_rxbuf(sc, m);
1658 if (nbufs > 1)
1659 vtnet_discard_merged_rxbuf(sc, nbufs);
1660 goto fail;
1661 }
1662
1663 if (m->m_len < len)
1664 len = m->m_len;
1665
1666 m->m_len = len;
1667 m->m_flags &= ~M_PKTHDR;
1668
1669 m_head->m_pkthdr.len += len;
1670 m_tail->m_next = m;
1671 m_tail = m;
1672 }
1673
1674 return (0);
1675
1676 fail:
1677 sc->vtnet_stats.rx_mergeable_failed++;
1678 m_freem(m_head);
1679
1680 return (1);
1681 }
1682
1683 static int
vtnet_rxeof(struct vtnet_softc * sc,int count,int * rx_npktsp)1684 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1685 {
1686 struct virtio_net_hdr lhdr;
1687 struct ifnet *ifp;
1688 struct virtqueue *vq;
1689 struct mbuf *m;
1690 struct ether_header *eh;
1691 struct virtio_net_hdr *hdr;
1692 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1693 int len, deq, nbufs, adjsz, rx_npkts;
1694
1695 ifp = sc->vtnet_ifp;
1696 vq = sc->vtnet_rx_vq;
1697 hdr = &lhdr;
1698 deq = 0;
1699 rx_npkts = 0;
1700
1701 while (--count >= 0) {
1702 m = virtqueue_dequeue(vq, &len);
1703 if (m == NULL)
1704 break;
1705 deq++;
1706
1707 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1708 ifp->if_ierrors++;
1709 vtnet_discard_rxbuf(sc, m);
1710 continue;
1711 }
1712
1713 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1714 nbufs = 1;
1715 adjsz = sizeof(struct vtnet_rx_header);
1716 /*
1717 * Account for our pad between the header and
1718 * the actual start of the frame.
1719 */
1720 len += VTNET_RX_HEADER_PAD;
1721 } else {
1722 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1723 nbufs = mhdr->num_buffers;
1724 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1725 }
1726
1727 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1728 ifp->if_iqdrops++;
1729 vtnet_discard_rxbuf(sc, m);
1730 if (nbufs > 1)
1731 vtnet_discard_merged_rxbuf(sc, nbufs);
1732 continue;
1733 }
1734
1735 m->m_pkthdr.len = len;
1736 m->m_pkthdr.rcvif = ifp;
1737 m->m_pkthdr.csum_flags = 0;
1738
1739 if (nbufs > 1) {
1740 if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1741 continue;
1742 }
1743
1744 ifp->if_ipackets++;
1745
1746 /*
1747 * Save copy of header before we strip it. For both mergeable
1748 * and non-mergeable, the VirtIO header is placed first in the
1749 * mbuf's data. We no longer need num_buffers, so always use a
1750 * virtio_net_hdr.
1751 */
1752 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1753 m_adj(m, adjsz);
1754
1755 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1756 eh = mtod(m, struct ether_header *);
1757 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1758 vtnet_vlan_tag_remove(m);
1759
1760 /*
1761 * With the 802.1Q header removed, update the
1762 * checksum starting location accordingly.
1763 */
1764 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1765 hdr->csum_start -=
1766 ETHER_VLAN_ENCAP_LEN;
1767 }
1768 }
1769
1770 if (ifp->if_capenable & IFCAP_RXCSUM &&
1771 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1772 if (vtnet_rx_csum(sc, m, hdr) != 0)
1773 sc->vtnet_stats.rx_csum_failed++;
1774 }
1775
1776 rx_npkts++;
1777 ifp->if_input(ifp, m, NULL, mycpuid);
1778
1779 /*
1780 * The interface may have been stopped while we were
1781 * passing the packet up the network stack.
1782 */
1783 if ((ifp->if_flags & IFF_RUNNING) == 0)
1784 break;
1785 }
1786
1787 if (deq > 0)
1788 virtqueue_notify(vq, NULL);
1789
1790 if (rx_npktsp != NULL)
1791 *rx_npktsp = rx_npkts;
1792
1793 return (count > 0 ? 0 : EAGAIN);
1794 }
1795
1796 static void
vtnet_rx_msix_intr(void * xsc)1797 vtnet_rx_msix_intr(void *xsc)
1798 {
1799 struct vtnet_softc *sc;
1800 struct ifnet *ifp;
1801 int more;
1802
1803 sc = xsc;
1804 ifp = sc->vtnet_ifp;
1805
1806 if (!virtqueue_pending(sc->vtnet_rx_vq))
1807 return;
1808
1809 vtnet_disable_rx_intr(sc);
1810 next:
1811 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1812 vtnet_enable_rx_intr(sc);
1813 return;
1814 }
1815
1816 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1817 if (!more && vtnet_enable_rx_intr(sc) != 0) {
1818 vtnet_disable_rx_intr(sc);
1819 more = 1;
1820 }
1821
1822 if (more) {
1823 sc->vtnet_stats.rx_task_rescheduled++;
1824 goto next;
1825 }
1826 }
1827
1828 static void
vtnet_rx_vq_intr(void * xsc)1829 vtnet_rx_vq_intr(void *xsc)
1830 {
1831 struct vtnet_softc *sc = xsc;
1832
1833 lwkt_serialize_enter(&sc->vtnet_rx_slz);
1834 vtnet_rx_msix_intr(xsc);
1835 lwkt_serialize_exit(&sc->vtnet_rx_slz);
1836 }
1837
1838 static void
vtnet_enqueue_txhdr(struct vtnet_softc * sc,struct vtnet_tx_header * txhdr)1839 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr)
1840 {
1841 bzero(txhdr, sizeof(*txhdr));
1842 SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link);
1843 }
1844
1845 static void
vtnet_txeof(struct vtnet_softc * sc)1846 vtnet_txeof(struct vtnet_softc *sc)
1847 {
1848 struct virtqueue *vq;
1849 struct ifnet *ifp;
1850 struct vtnet_tx_header *txhdr;
1851 int deq;
1852
1853 vq = sc->vtnet_tx_vq;
1854 ifp = sc->vtnet_ifp;
1855 deq = 0;
1856
1857 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1858 deq++;
1859 ifp->if_opackets++;
1860 m_freem(txhdr->vth_mbuf);
1861 vtnet_enqueue_txhdr(sc, txhdr);
1862 }
1863
1864 if (deq > 0) {
1865 ifq_clr_oactive(&ifp->if_snd);
1866 if (virtqueue_empty(vq))
1867 ifsq_watchdog_set_count(&sc->vtnet_tx_watchdog, 0);
1868 else
1869 ifsq_watchdog_set_count(&sc->vtnet_tx_watchdog,
1870 VTNET_WATCHDOG_TIMEOUT);
1871 }
1872 }
1873
1874 static struct mbuf *
vtnet_tx_offload(struct vtnet_softc * sc,struct mbuf * m,struct virtio_net_hdr * hdr)1875 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1876 struct virtio_net_hdr *hdr)
1877 {
1878 struct ifnet *ifp;
1879 struct ether_header *eh;
1880 struct ether_vlan_header *evh;
1881 struct ip *ip;
1882 struct ip6_hdr *ip6;
1883 struct tcphdr *tcp;
1884 int ip_offset;
1885 uint16_t eth_type, csum_start;
1886 uint8_t ip_proto, gso_type;
1887
1888 ifp = sc->vtnet_ifp;
1889 M_ASSERTPKTHDR(m);
1890
1891 ip_offset = sizeof(struct ether_header);
1892 if (m->m_len < ip_offset) {
1893 if ((m = m_pullup(m, ip_offset)) == NULL)
1894 return (NULL);
1895 }
1896
1897 eh = mtod(m, struct ether_header *);
1898 eth_type = ntohs(eh->ether_type);
1899 if (eth_type == ETHERTYPE_VLAN) {
1900 ip_offset = sizeof(struct ether_vlan_header);
1901 if (m->m_len < ip_offset) {
1902 if ((m = m_pullup(m, ip_offset)) == NULL)
1903 return (NULL);
1904 }
1905 evh = mtod(m, struct ether_vlan_header *);
1906 eth_type = ntohs(evh->evl_proto);
1907 }
1908
1909 switch (eth_type) {
1910 case ETHERTYPE_IP:
1911 if (m->m_len < ip_offset + sizeof(struct ip)) {
1912 m = m_pullup(m, ip_offset + sizeof(struct ip));
1913 if (m == NULL)
1914 return (NULL);
1915 }
1916
1917 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1918 ip_proto = ip->ip_p;
1919 csum_start = ip_offset + (ip->ip_hl << 2);
1920 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1921 break;
1922
1923 case ETHERTYPE_IPV6:
1924 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1925 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1926 if (m == NULL)
1927 return (NULL);
1928 }
1929
1930 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1931 /*
1932 * XXX Assume no extension headers are present. Presently,
1933 * this will always be true in the case of TSO, and FreeBSD
1934 * does not perform checksum offloading of IPv6 yet.
1935 */
1936 ip_proto = ip6->ip6_nxt;
1937 csum_start = ip_offset + sizeof(struct ip6_hdr);
1938 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1939 break;
1940
1941 default:
1942 return (m);
1943 }
1944
1945 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1946 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1947 hdr->csum_start = csum_start;
1948 hdr->csum_offset = m->m_pkthdr.csum_data;
1949
1950 sc->vtnet_stats.tx_csum_offloaded++;
1951 }
1952
1953 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1954 if (ip_proto != IPPROTO_TCP)
1955 return (m);
1956
1957 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1958 m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1959 if (m == NULL)
1960 return (NULL);
1961 }
1962
1963 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1964 hdr->gso_type = gso_type;
1965 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1966 hdr->gso_size = m->m_pkthdr.tso_segsz;
1967
1968 if (tcp->th_flags & TH_CWR) {
1969 /*
1970 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1971 * ECN support is only configurable globally with the
1972 * net.inet.tcp.ecn.enable sysctl knob.
1973 */
1974 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1975 if_printf(ifp, "TSO with ECN not supported "
1976 "by host\n");
1977 m_freem(m);
1978 return (NULL);
1979 }
1980
1981 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1982 }
1983
1984 sc->vtnet_stats.tx_tso_offloaded++;
1985 }
1986
1987 return (m);
1988 }
1989
1990 static int
vtnet_enqueue_txbuf(struct vtnet_softc * sc,struct mbuf ** m_head,struct vtnet_tx_header * txhdr)1991 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1992 struct vtnet_tx_header *txhdr)
1993 {
1994 struct sglist sg;
1995 struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1996 struct virtqueue *vq;
1997 struct mbuf *m;
1998 int error;
1999
2000 vq = sc->vtnet_tx_vq;
2001 m = *m_head;
2002
2003 sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
2004 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2005 KASSERT(error == 0 && sg.sg_nseg == 1,
2006 ("%s: error %d adding header to sglist", __func__, error));
2007
2008 error = sglist_append_mbuf(&sg, m);
2009 if (error) {
2010 m = m_defrag(m, M_NOWAIT);
2011 if (m == NULL)
2012 goto fail;
2013
2014 *m_head = m;
2015 sc->vtnet_stats.tx_defragged++;
2016
2017 error = sglist_append_mbuf(&sg, m);
2018 if (error)
2019 goto fail;
2020 }
2021
2022 txhdr->vth_mbuf = m;
2023 error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
2024
2025 return (error);
2026
2027 fail:
2028 sc->vtnet_stats.tx_defrag_failed++;
2029 m_freem(*m_head);
2030 *m_head = NULL;
2031
2032 return (ENOBUFS);
2033 }
2034
2035 static struct mbuf *
vtnet_vlan_tag_insert(struct mbuf * m)2036 vtnet_vlan_tag_insert(struct mbuf *m)
2037 {
2038 struct mbuf *n;
2039 struct ether_vlan_header *evl;
2040
2041 if (M_WRITABLE(m) == 0) {
2042 n = m_dup(m, M_NOWAIT);
2043 m_freem(m);
2044 if ((m = n) == NULL)
2045 return (NULL);
2046 }
2047
2048 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
2049 if (m == NULL)
2050 return (NULL);
2051 if (m->m_len < sizeof(struct ether_vlan_header)) {
2052 m = m_pullup(m, sizeof(struct ether_vlan_header));
2053 if (m == NULL)
2054 return (NULL);
2055 }
2056
2057 /* Insert 802.1Q header into the existing Ethernet header. */
2058 evl = mtod(m, struct ether_vlan_header *);
2059 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
2060 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
2061 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
2062 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
2063 m->m_flags &= ~M_VLANTAG;
2064
2065 return (m);
2066 }
2067
2068 static int
vtnet_encap(struct vtnet_softc * sc,struct mbuf ** m_head)2069 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
2070 {
2071 struct vtnet_tx_header *txhdr;
2072 struct virtio_net_hdr *hdr;
2073 struct mbuf *m;
2074 int error;
2075
2076 txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free);
2077 if (txhdr == NULL)
2078 return (ENOBUFS);
2079 SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link);
2080
2081 /*
2082 * Always use the non-mergeable header to simplify things. When
2083 * the mergeable feature is negotiated, the num_buffers field
2084 * must be set to zero. We use vtnet_hdr_size later to enqueue
2085 * the correct header size to the host.
2086 */
2087 hdr = &txhdr->vth_uhdr.hdr;
2088 m = *m_head;
2089
2090 error = ENOBUFS;
2091
2092 if (m->m_flags & M_VLANTAG) {
2093 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2094 m = vtnet_vlan_tag_insert(m);
2095 if ((*m_head = m) == NULL)
2096 goto fail;
2097 m->m_flags &= ~M_VLANTAG;
2098 }
2099
2100 if (m->m_pkthdr.csum_flags != 0) {
2101 m = vtnet_tx_offload(sc, m, hdr);
2102 if ((*m_head = m) == NULL)
2103 goto fail;
2104 }
2105
2106 error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
2107 fail:
2108 if (error != 0)
2109 vtnet_enqueue_txhdr(sc, txhdr);
2110 return (error);
2111 }
2112
2113 static void
vtnet_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)2114 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2115 {
2116 struct vtnet_softc *sc;
2117 struct virtqueue *vq;
2118 struct mbuf *m0;
2119 int enq;
2120
2121 sc = ifp->if_softc;
2122 vq = sc->vtnet_tx_vq;
2123 enq = 0;
2124
2125 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2126 ASSERT_SERIALIZED(&sc->vtnet_tx_slz);
2127
2128 if ((ifp->if_flags & (IFF_RUNNING)) !=
2129 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
2130 return;
2131
2132 #ifdef VTNET_TX_INTR_MODERATION
2133 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
2134 vtnet_txeof(sc);
2135 #endif
2136
2137 while (!ifsq_is_empty(ifsq)) {
2138 if (virtqueue_full(vq)) {
2139 ifsq_set_oactive(ifsq);
2140 break;
2141 }
2142
2143 m0 = ifsq_dequeue(ifsq);
2144 if (m0 == NULL)
2145 break;
2146
2147 if (vtnet_encap(sc, &m0) != 0) {
2148 if (m0 == NULL)
2149 break;
2150 ifsq_prepend(ifsq, m0);
2151 ifsq_set_oactive(ifsq);
2152 break;
2153 }
2154
2155 enq++;
2156 ETHER_BPF_MTAP(ifp, m0);
2157 }
2158
2159 if (enq > 0) {
2160 virtqueue_notify(vq, NULL);
2161 ifsq_watchdog_set_count(&sc->vtnet_tx_watchdog,
2162 VTNET_WATCHDOG_TIMEOUT);
2163 }
2164 }
2165
2166 static void
vtnet_tx_msix_intr(void * xsc)2167 vtnet_tx_msix_intr(void *xsc)
2168 {
2169 struct vtnet_softc *sc;
2170 struct ifnet *ifp;
2171 struct ifaltq_subque *ifsq;
2172
2173 sc = xsc;
2174 ifp = sc->vtnet_ifp;
2175 ifsq = ifq_get_subq_default(&ifp->if_snd);
2176
2177 if (!virtqueue_pending(sc->vtnet_tx_vq))
2178 return;
2179
2180 vtnet_disable_tx_intr(sc);
2181 next:
2182 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2183 vtnet_enable_tx_intr(sc);
2184 return;
2185 }
2186
2187 vtnet_txeof(sc);
2188
2189 if (!ifsq_is_empty(ifsq))
2190 ifsq_devstart(ifsq);
2191
2192 if (vtnet_enable_tx_intr(sc) != 0) {
2193 vtnet_disable_tx_intr(sc);
2194 sc->vtnet_stats.tx_task_rescheduled++;
2195 goto next;
2196 }
2197 }
2198
2199 static void
vtnet_tx_vq_intr(void * xsc)2200 vtnet_tx_vq_intr(void *xsc)
2201 {
2202 struct vtnet_softc *sc = xsc;
2203
2204 lwkt_serialize_enter(&sc->vtnet_tx_slz);
2205 vtnet_tx_msix_intr(xsc);
2206 lwkt_serialize_exit(&sc->vtnet_tx_slz);
2207 }
2208
2209 static void
vtnet_config_intr(void * arg)2210 vtnet_config_intr(void *arg)
2211 {
2212 struct vtnet_softc *sc;
2213
2214 sc = arg;
2215
2216 vtnet_update_link_status(sc);
2217 }
2218
2219 static void
vtnet_stop(struct vtnet_softc * sc)2220 vtnet_stop(struct vtnet_softc *sc)
2221 {
2222 device_t dev;
2223 struct ifnet *ifp;
2224
2225 dev = sc->vtnet_dev;
2226 ifp = sc->vtnet_ifp;
2227
2228 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2229
2230 ifq_clr_oactive(&ifp->if_snd);
2231 ifsq_watchdog_stop(&sc->vtnet_tx_watchdog);
2232 ifp->if_flags &= ~(IFF_RUNNING);
2233
2234 vtnet_disable_rx_intr(sc);
2235 vtnet_disable_tx_intr(sc);
2236
2237 /*
2238 * Stop the host VirtIO adapter. Note this will reset the host
2239 * adapter's state back to the pre-initialized state, so in
2240 * order to make the device usable again, we must drive it
2241 * through virtio_reinit() and virtio_reinit_complete().
2242 */
2243 virtio_stop(dev);
2244
2245 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
2246
2247 vtnet_free_rx_mbufs(sc);
2248 vtnet_free_tx_mbufs(sc);
2249 }
2250
2251 static int
vtnet_virtio_reinit(struct vtnet_softc * sc)2252 vtnet_virtio_reinit(struct vtnet_softc *sc)
2253 {
2254 device_t dev;
2255 struct ifnet *ifp;
2256 uint64_t features;
2257 int error;
2258
2259 dev = sc->vtnet_dev;
2260 ifp = sc->vtnet_ifp;
2261 features = sc->vtnet_features;
2262
2263 /*
2264 * Re-negotiate with the host, removing any disabled receive
2265 * features. Transmit features are disabled only on our side
2266 * via if_capenable and if_hwassist.
2267 */
2268
2269 if (ifp->if_capabilities & IFCAP_RXCSUM) {
2270 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2271 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2272 }
2273
2274 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */
2275 if (ifp->if_capabilities & IFCAP_LRO) {
2276 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2277 features &= ~VTNET_LRO_FEATURES;
2278 }
2279 #endif
2280
2281 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2282 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2283 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2284 }
2285
2286 error = virtio_reinit(dev, features);
2287 if (error)
2288 device_printf(dev, "virtio reinit error %d\n", error);
2289
2290 return (error);
2291 }
2292
2293 static void
vtnet_init(void * xsc)2294 vtnet_init(void *xsc)
2295 {
2296 struct vtnet_softc *sc;
2297 device_t dev;
2298 struct ifnet *ifp;
2299 int error;
2300
2301 sc = xsc;
2302 dev = sc->vtnet_dev;
2303 ifp = sc->vtnet_ifp;
2304
2305 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2306
2307 if (ifp->if_flags & IFF_RUNNING)
2308 return;
2309
2310 /* Stop host's adapter, cancel any pending I/O. */
2311 vtnet_stop(sc);
2312
2313 /* Reinitialize the host device. */
2314 error = vtnet_virtio_reinit(sc);
2315 if (error) {
2316 device_printf(dev,
2317 "reinitialization failed, stopping device...\n");
2318 vtnet_stop(sc);
2319 return;
2320 }
2321
2322 /* Update host with assigned MAC address. */
2323 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2324 vtnet_set_hwaddr(sc);
2325
2326 ifp->if_hwassist = 0;
2327 if (ifp->if_capenable & IFCAP_TXCSUM)
2328 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2329 if (ifp->if_capenable & IFCAP_TSO4)
2330 ifp->if_hwassist |= CSUM_TSO;
2331
2332 error = vtnet_init_rx_vq(sc);
2333 if (error) {
2334 device_printf(dev,
2335 "cannot allocate mbufs for Rx virtqueue\n");
2336 vtnet_stop(sc);
2337 return;
2338 }
2339
2340 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2341 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2342 /* Restore promiscuous and all-multicast modes. */
2343 vtnet_rx_filter(sc);
2344
2345 /* Restore filtered MAC addresses. */
2346 vtnet_rx_filter_mac(sc);
2347 }
2348
2349 /* Restore VLAN filters. */
2350 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2351 vtnet_rx_filter_vlan(sc);
2352 }
2353
2354 #ifdef IFPOLL_ENABLE
2355 if (!(ifp->if_flags & IFF_NPOLLING))
2356 #endif
2357 {
2358 vtnet_enable_rx_intr(sc);
2359 vtnet_enable_tx_intr(sc);
2360 }
2361
2362 ifp->if_flags |= IFF_RUNNING;
2363 ifq_clr_oactive(&ifp->if_snd);
2364 ifsq_watchdog_start(&sc->vtnet_tx_watchdog);
2365
2366 virtio_reinit_complete(dev);
2367
2368 vtnet_update_link_status(sc);
2369 }
2370
2371 static void
vtnet_exec_ctrl_cmd(struct vtnet_softc * sc,void * cookie,struct sglist * sg,int readable,int writable)2372 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2373 struct sglist *sg, int readable, int writable)
2374 {
2375 struct virtqueue *vq;
2376 void *c;
2377
2378 vq = sc->vtnet_ctrl_vq;
2379
2380 ASSERT_IFNET_SERIALIZED_ALL(sc->vtnet_ifp);
2381 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2382 ("no control virtqueue"));
2383 KASSERT(virtqueue_empty(vq),
2384 ("control command already enqueued"));
2385
2386 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2387 return;
2388
2389 /*
2390 * XXX We can safely drop the serializer between here, and the end of
2391 * the function, when we can correctly sleep for this command to
2392 * be finished.
2393 */
2394 virtqueue_notify(vq, NULL);
2395
2396 /*
2397 * Poll until the command is complete. Previously, we would
2398 * sleep until the control virtqueue interrupt handler woke
2399 * us up, but dropping the VTNET_MTX leads to serialization
2400 * difficulties.
2401 *
2402 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2403 * vectors. Two of those vectors are needed for the Rx and Tx
2404 * virtqueues. We do not support sharing both a Vq and config
2405 * changed notification on the same MSIX vector.
2406 */
2407 c = virtqueue_poll(vq, NULL);
2408 KASSERT(c == cookie, ("unexpected control command response"));
2409 }
2410
2411 static int
vtnet_ctrl_mac_cmd(struct vtnet_softc * sc,uint8_t * hwaddr)2412 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2413 {
2414 struct {
2415 struct virtio_net_ctrl_hdr hdr __aligned(2);
2416 uint8_t pad1;
2417 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2418 uint8_t pad2;
2419 uint8_t ack;
2420 } s;
2421 struct sglist_seg segs[3];
2422 struct sglist sg;
2423 int error;
2424
2425 s.hdr.class = VIRTIO_NET_CTRL_MAC;
2426 s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2427 s.ack = VIRTIO_NET_ERR;
2428
2429 /* Copy the mac address into physically contiguous memory */
2430 memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2431
2432 sglist_init(&sg, 3, segs);
2433 error = 0;
2434 error |= sglist_append(&sg, &s.hdr,
2435 sizeof(struct virtio_net_ctrl_hdr));
2436 error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2437 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2438 KASSERT(error == 0 && sg.sg_nseg == 3,
2439 ("%s: error %d adding set MAC msg to sglist", __func__, error));
2440
2441 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2442
2443 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2444 }
2445
2446 static void
vtnet_rx_filter(struct vtnet_softc * sc)2447 vtnet_rx_filter(struct vtnet_softc *sc)
2448 {
2449 device_t dev;
2450 struct ifnet *ifp;
2451
2452 dev = sc->vtnet_dev;
2453 ifp = sc->vtnet_ifp;
2454
2455 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2456 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2457 ("CTRL_RX feature not negotiated"));
2458
2459 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2460 device_printf(dev, "cannot %s promiscuous mode\n",
2461 (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2462
2463 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2464 device_printf(dev, "cannot %s all-multicast mode\n",
2465 (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2466 }
2467
2468 static int
vtnet_ctrl_rx_cmd(struct vtnet_softc * sc,int cmd,int on)2469 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2470 {
2471 struct sglist_seg segs[3];
2472 struct sglist sg;
2473 struct {
2474 struct virtio_net_ctrl_hdr hdr __aligned(2);
2475 uint8_t pad1;
2476 uint8_t onoff;
2477 uint8_t pad2;
2478 uint8_t ack;
2479 } s;
2480 int error;
2481
2482 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2483 ("%s: CTRL_RX feature not negotiated", __func__));
2484
2485 s.hdr.class = VIRTIO_NET_CTRL_RX;
2486 s.hdr.cmd = cmd;
2487 s.onoff = !!on;
2488 s.ack = VIRTIO_NET_ERR;
2489
2490 sglist_init(&sg, 3, segs);
2491 error = 0;
2492 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2493 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2494 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2495 KASSERT(error == 0 && sg.sg_nseg == 3,
2496 ("%s: error %d adding Rx message to sglist", __func__, error));
2497
2498 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2499
2500 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2501 }
2502
2503 static int
vtnet_set_promisc(struct vtnet_softc * sc,int on)2504 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2505 {
2506
2507 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2508 }
2509
2510 static int
vtnet_set_allmulti(struct vtnet_softc * sc,int on)2511 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2512 {
2513
2514 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2515 }
2516
2517 static void
vtnet_rx_filter_mac(struct vtnet_softc * sc)2518 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2519 {
2520 struct virtio_net_ctrl_hdr hdr __aligned(2);
2521 struct vtnet_mac_filter *filter;
2522 struct sglist_seg segs[4];
2523 struct sglist sg;
2524 struct ifnet *ifp;
2525 struct ifaddr *ifa;
2526 struct ifaddr_container *ifac;
2527 struct ifmultiaddr *ifma;
2528 int ucnt, mcnt, promisc, allmulti, error;
2529 uint8_t ack;
2530
2531 ifp = sc->vtnet_ifp;
2532 ucnt = 0;
2533 mcnt = 0;
2534 promisc = 0;
2535 allmulti = 0;
2536
2537 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2538 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2539 ("%s: CTRL_RX feature not negotiated", __func__));
2540
2541 /* Use the MAC filtering table allocated in vtnet_attach. */
2542 filter = sc->vtnet_macfilter;
2543 memset(filter, 0, sizeof(struct vtnet_mac_filter));
2544
2545 /* Unicast MAC addresses: */
2546 //if_addr_rlock(ifp);
2547 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2548 ifa = ifac->ifa;
2549 if (ifa->ifa_addr->sa_family != AF_LINK)
2550 continue;
2551 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2552 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2553 continue;
2554 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2555 promisc = 1;
2556 break;
2557 }
2558
2559 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2560 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2561 ucnt++;
2562 }
2563 //if_addr_runlock(ifp);
2564
2565 if (promisc != 0) {
2566 filter->vmf_unicast.nentries = 0;
2567 if_printf(ifp, "more than %d MAC addresses assigned, "
2568 "falling back to promiscuous mode\n",
2569 VTNET_MAX_MAC_ENTRIES);
2570 } else
2571 filter->vmf_unicast.nentries = ucnt;
2572
2573 /* Multicast MAC addresses: */
2574 //if_maddr_rlock(ifp);
2575 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2576 if (ifma->ifma_addr->sa_family != AF_LINK)
2577 continue;
2578 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2579 allmulti = 1;
2580 break;
2581 }
2582
2583 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2584 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2585 mcnt++;
2586 }
2587 //if_maddr_runlock(ifp);
2588
2589 if (allmulti != 0) {
2590 filter->vmf_multicast.nentries = 0;
2591 if_printf(ifp, "more than %d multicast MAC addresses "
2592 "assigned, falling back to all-multicast mode\n",
2593 VTNET_MAX_MAC_ENTRIES);
2594 } else
2595 filter->vmf_multicast.nentries = mcnt;
2596
2597 if (promisc != 0 && allmulti != 0)
2598 goto out;
2599
2600 hdr.class = VIRTIO_NET_CTRL_MAC;
2601 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2602 ack = VIRTIO_NET_ERR;
2603
2604 sglist_init(&sg, 4, segs);
2605 error = 0;
2606 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2607 error |= sglist_append(&sg, &filter->vmf_unicast,
2608 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2609 error |= sglist_append(&sg, &filter->vmf_multicast,
2610 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2611 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2612 KASSERT(error == 0 && sg.sg_nseg == 4,
2613 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2614
2615 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2616
2617 if (ack != VIRTIO_NET_OK)
2618 if_printf(ifp, "error setting host MAC filter table\n");
2619
2620 out:
2621 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2622 if_printf(ifp, "cannot enable promiscuous mode\n");
2623 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2624 if_printf(ifp, "cannot enable all-multicast mode\n");
2625 }
2626
2627 static int
vtnet_exec_vlan_filter(struct vtnet_softc * sc,int add,uint16_t tag)2628 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2629 {
2630 struct sglist_seg segs[3];
2631 struct sglist sg;
2632 struct {
2633 struct virtio_net_ctrl_hdr hdr __aligned(2);
2634 uint8_t pad1;
2635 uint16_t tag;
2636 uint8_t pad2;
2637 uint8_t ack;
2638 } s;
2639 int error;
2640
2641 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2642 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2643 s.tag = tag;
2644 s.ack = VIRTIO_NET_ERR;
2645
2646 sglist_init(&sg, 3, segs);
2647 error = 0;
2648 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2649 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2650 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2651 KASSERT(error == 0 && sg.sg_nseg == 3,
2652 ("%s: error %d adding VLAN message to sglist", __func__, error));
2653
2654 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2655
2656 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2657 }
2658
2659 static void
vtnet_rx_filter_vlan(struct vtnet_softc * sc)2660 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2661 {
2662 uint32_t w;
2663 uint16_t tag;
2664 int i, bit, nvlans;
2665
2666 ASSERT_IFNET_SERIALIZED_ALL(sc->vtnet_ifp);
2667 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2668 ("%s: VLAN_FILTER feature not negotiated", __func__));
2669
2670 nvlans = sc->vtnet_nvlans;
2671
2672 /* Enable the filter for each configured VLAN. */
2673 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2674 w = sc->vtnet_vlan_shadow[i];
2675 while ((bit = ffs(w) - 1) != -1) {
2676 w &= ~(1 << bit);
2677 tag = sizeof(w) * CHAR_BIT * i + bit;
2678 nvlans--;
2679
2680 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2681 device_printf(sc->vtnet_dev,
2682 "cannot enable VLAN %d filter\n", tag);
2683 }
2684 }
2685 }
2686
2687 KASSERT(nvlans == 0, ("VLAN count incorrect"));
2688 }
2689
2690 static void
vtnet_update_vlan_filter(struct vtnet_softc * sc,int add,uint16_t tag)2691 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2692 {
2693 struct ifnet *ifp;
2694 int idx, bit;
2695
2696 ifp = sc->vtnet_ifp;
2697 idx = (tag >> 5) & 0x7F;
2698 bit = tag & 0x1F;
2699
2700 if (tag == 0 || tag > 4095)
2701 return;
2702
2703 ifnet_serialize_all(ifp);
2704
2705 /* Update shadow VLAN table. */
2706 if (add) {
2707 sc->vtnet_nvlans++;
2708 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2709 } else {
2710 sc->vtnet_nvlans--;
2711 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2712 }
2713
2714 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2715 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2716 device_printf(sc->vtnet_dev,
2717 "cannot %s VLAN %d %s the host filter table\n",
2718 add ? "add" : "remove", tag, add ? "to" : "from");
2719 }
2720
2721 ifnet_deserialize_all(ifp);
2722 }
2723
2724 static void
vtnet_register_vlan(void * arg,struct ifnet * ifp,uint16_t tag)2725 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2726 {
2727
2728 if (ifp->if_softc != arg)
2729 return;
2730
2731 vtnet_update_vlan_filter(arg, 1, tag);
2732 }
2733
2734 static void
vtnet_unregister_vlan(void * arg,struct ifnet * ifp,uint16_t tag)2735 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2736 {
2737
2738 if (ifp->if_softc != arg)
2739 return;
2740
2741 vtnet_update_vlan_filter(arg, 0, tag);
2742 }
2743
2744 static int
vtnet_ifmedia_upd(struct ifnet * ifp)2745 vtnet_ifmedia_upd(struct ifnet *ifp)
2746 {
2747 struct vtnet_softc *sc;
2748 struct ifmedia *ifm;
2749
2750 sc = ifp->if_softc;
2751 ifm = &sc->vtnet_media;
2752
2753 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2754 return (EINVAL);
2755
2756 return (0);
2757 }
2758
2759 static void
vtnet_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)2760 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2761 {
2762 struct vtnet_softc *sc;
2763
2764 sc = ifp->if_softc;
2765
2766 ifmr->ifm_status = IFM_AVALID;
2767 ifmr->ifm_active = IFM_ETHER;
2768
2769 if (vtnet_is_link_up(sc) != 0) {
2770 ifmr->ifm_status |= IFM_ACTIVE;
2771 ifmr->ifm_active |= VTNET_MEDIATYPE;
2772 } else
2773 ifmr->ifm_active |= IFM_NONE;
2774 }
2775
2776 static void
vtnet_add_statistics(struct vtnet_softc * sc)2777 vtnet_add_statistics(struct vtnet_softc *sc)
2778 {
2779 device_t dev;
2780 struct vtnet_statistics *stats;
2781 struct sysctl_ctx_list *ctx;
2782 struct sysctl_oid *tree;
2783 struct sysctl_oid_list *child;
2784
2785 dev = sc->vtnet_dev;
2786 stats = &sc->vtnet_stats;
2787 ctx = device_get_sysctl_ctx(dev);
2788 tree = device_get_sysctl_tree(dev);
2789 child = SYSCTL_CHILDREN(tree);
2790
2791 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2792 CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2793 "Mbuf cluster allocation failures");
2794
2795 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2796 CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2797 "Received frame larger than the mbuf chain");
2798 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2799 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2800 "Enqueuing the replacement receive mbuf failed");
2801 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2802 CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2803 "Mergeable buffers receive failures");
2804 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2805 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2806 "Received checksum offloaded buffer with unsupported "
2807 "Ethernet type");
2808 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2809 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2810 "Received checksum offloaded buffer with incorrect IP protocol");
2811 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2812 CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2813 "Received checksum offloaded buffer with incorrect offset");
2814 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2815 CTLFLAG_RD, &stats->rx_csum_failed, 0,
2816 "Received buffer checksum offload failed");
2817 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2818 CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2819 "Received buffer checksum offload succeeded");
2820 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2821 CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2822 "Times the receive interrupt task rescheduled itself");
2823
2824 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2825 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2826 "Aborted transmit of checksum offloaded buffer with unknown "
2827 "Ethernet type");
2828 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2829 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2830 "Aborted transmit of TSO buffer with unknown Ethernet type");
2831 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2832 CTLFLAG_RD, &stats->tx_defragged, 0,
2833 "Transmit mbufs defragged");
2834 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2835 CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2836 "Aborted transmit of buffer because defrag failed");
2837 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2838 CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2839 "Offloaded checksum of transmitted buffer");
2840 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2841 CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2842 "Segmentation offload of transmitted buffer");
2843 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2844 CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2845 "Times the transmit interrupt task rescheduled itself");
2846 }
2847
2848 static int
vtnet_enable_rx_intr(struct vtnet_softc * sc)2849 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2850 {
2851
2852 return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2853 }
2854
2855 static void
vtnet_disable_rx_intr(struct vtnet_softc * sc)2856 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2857 {
2858
2859 virtqueue_disable_intr(sc->vtnet_rx_vq);
2860 }
2861
2862 static int
vtnet_enable_tx_intr(struct vtnet_softc * sc)2863 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2864 {
2865
2866 #ifdef VTNET_TX_INTR_MODERATION
2867 return (0);
2868 #else
2869 return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2870 #endif
2871 }
2872
2873 static void
vtnet_disable_tx_intr(struct vtnet_softc * sc)2874 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2875 {
2876
2877 virtqueue_disable_intr(sc->vtnet_tx_vq);
2878 }
2879