1 /*
2 * Virtio Network Device
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/atomic.h"
16 #include "qemu/iov.h"
17 #include "qemu/log.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/module.h"
20 #include "hw/virtio/virtio.h"
21 #include "net/net.h"
22 #include "net/checksum.h"
23 #include "net/tap.h"
24 #include "qemu/error-report.h"
25 #include "qemu/timer.h"
26 #include "qemu/option.h"
27 #include "qemu/option_int.h"
28 #include "qemu/config-file.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/virtio/virtio-net.h"
31 #include "net/vhost_net.h"
32 #include "net/announce.h"
33 #include "hw/virtio/virtio-bus.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-events-net.h"
36 #include "hw/qdev-properties.h"
37 #include "qapi/qapi-types-migration.h"
38 #include "qapi/qapi-events-migration.h"
39 #include "hw/virtio/virtio-access.h"
40 #include "migration/misc.h"
41 #include "standard-headers/linux/ethtool.h"
42 #include "sysemu/sysemu.h"
43 #include "trace.h"
44 #include "monitor/qdev.h"
45 #include "monitor/monitor.h"
46 #include "hw/pci/pci_device.h"
47 #include "net_rx_pkt.h"
48 #include "hw/virtio/vhost.h"
49 #include "sysemu/qtest.h"
50
51 #define VIRTIO_NET_VM_VERSION 11
52
53 /* previously fixed value */
54 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
55 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
56
57 /* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
58 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
59 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
60
61 #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */
62
63 #define VIRTIO_NET_TCP_FLAG 0x3F
64 #define VIRTIO_NET_TCP_HDR_LENGTH 0xF000
65
66 /* IPv4 max payload, 16 bits in the header */
67 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
68 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
69
70 /* header length value in ip header without option */
71 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
72
73 #define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */
74 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
75
76 /* Purge coalesced packets timer interval, This value affects the performance
77 a lot, and should be tuned carefully, '300000'(300us) is the recommended
78 value to pass the WHQL test, '50000' can gain 2x netperf throughput with
79 tso/gso/gro 'off'. */
80 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
81
82 #define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
83 VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
84 VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
85 VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
86 VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
87 VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
88 VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
89 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
90 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
91
92 static const VirtIOFeature feature_sizes[] = {
93 {.flags = 1ULL << VIRTIO_NET_F_MAC,
94 .end = endof(struct virtio_net_config, mac)},
95 {.flags = 1ULL << VIRTIO_NET_F_STATUS,
96 .end = endof(struct virtio_net_config, status)},
97 {.flags = 1ULL << VIRTIO_NET_F_MQ,
98 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
99 {.flags = 1ULL << VIRTIO_NET_F_MTU,
100 .end = endof(struct virtio_net_config, mtu)},
101 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
102 .end = endof(struct virtio_net_config, duplex)},
103 {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
104 .end = endof(struct virtio_net_config, supported_hash_types)},
105 {}
106 };
107
108 static const VirtIOConfigSizeParams cfg_size_params = {
109 .min_size = endof(struct virtio_net_config, mac),
110 .max_size = sizeof(struct virtio_net_config),
111 .feature_sizes = feature_sizes
112 };
113
virtio_net_get_subqueue(NetClientState * nc)114 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
115 {
116 VirtIONet *n = qemu_get_nic_opaque(nc);
117
118 return &n->vqs[nc->queue_index];
119 }
120
vq2q(int queue_index)121 static int vq2q(int queue_index)
122 {
123 return queue_index / 2;
124 }
125
flush_or_purge_queued_packets(NetClientState * nc)126 static void flush_or_purge_queued_packets(NetClientState *nc)
127 {
128 if (!nc->peer) {
129 return;
130 }
131
132 qemu_flush_or_purge_queued_packets(nc->peer, true);
133 assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
134 }
135
136 /* TODO
137 * - we could suppress RX interrupt if we were so inclined.
138 */
139
virtio_net_get_config(VirtIODevice * vdev,uint8_t * config)140 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
141 {
142 VirtIONet *n = VIRTIO_NET(vdev);
143 struct virtio_net_config netcfg;
144 NetClientState *nc = qemu_get_queue(n->nic);
145 static const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
146
147 int ret = 0;
148 memset(&netcfg, 0 , sizeof(struct virtio_net_config));
149 virtio_stw_p(vdev, &netcfg.status, n->status);
150 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs);
151 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
152 memcpy(netcfg.mac, n->mac, ETH_ALEN);
153 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
154 netcfg.duplex = n->net_conf.duplex;
155 netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
156 virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
157 virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
158 VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
159 virtio_stl_p(vdev, &netcfg.supported_hash_types,
160 VIRTIO_NET_RSS_SUPPORTED_HASHES);
161 memcpy(config, &netcfg, n->config_size);
162
163 /*
164 * Is this VDPA? No peer means not VDPA: there's no way to
165 * disconnect/reconnect a VDPA peer.
166 */
167 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
168 ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg,
169 n->config_size);
170 if (ret == -1) {
171 return;
172 }
173
174 /*
175 * Some NIC/kernel combinations present 0 as the mac address. As that
176 * is not a legal address, try to proceed with the address from the
177 * QEMU command line in the hope that the address has been configured
178 * correctly elsewhere - just not reported by the device.
179 */
180 if (memcmp(&netcfg.mac, &zero, sizeof(zero)) == 0) {
181 info_report("Zero hardware mac address detected. Ignoring.");
182 memcpy(netcfg.mac, n->mac, ETH_ALEN);
183 }
184
185 netcfg.status |= virtio_tswap16(vdev,
186 n->status & VIRTIO_NET_S_ANNOUNCE);
187 memcpy(config, &netcfg, n->config_size);
188 }
189 }
190
virtio_net_set_config(VirtIODevice * vdev,const uint8_t * config)191 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
192 {
193 VirtIONet *n = VIRTIO_NET(vdev);
194 struct virtio_net_config netcfg = {};
195 NetClientState *nc = qemu_get_queue(n->nic);
196
197 memcpy(&netcfg, config, n->config_size);
198
199 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
200 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
201 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
202 memcpy(n->mac, netcfg.mac, ETH_ALEN);
203 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
204 }
205
206 /*
207 * Is this VDPA? No peer means not VDPA: there's no way to
208 * disconnect/reconnect a VDPA peer.
209 */
210 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
211 vhost_net_set_config(get_vhost_net(nc->peer),
212 (uint8_t *)&netcfg, 0, n->config_size,
213 VHOST_SET_CONFIG_TYPE_FRONTEND);
214 }
215 }
216
virtio_net_started(VirtIONet * n,uint8_t status)217 static bool virtio_net_started(VirtIONet *n, uint8_t status)
218 {
219 VirtIODevice *vdev = VIRTIO_DEVICE(n);
220 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
221 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
222 }
223
virtio_net_announce_notify(VirtIONet * net)224 static void virtio_net_announce_notify(VirtIONet *net)
225 {
226 VirtIODevice *vdev = VIRTIO_DEVICE(net);
227 trace_virtio_net_announce_notify();
228
229 net->status |= VIRTIO_NET_S_ANNOUNCE;
230 virtio_notify_config(vdev);
231 }
232
virtio_net_announce_timer(void * opaque)233 static void virtio_net_announce_timer(void *opaque)
234 {
235 VirtIONet *n = opaque;
236 trace_virtio_net_announce_timer(n->announce_timer.round);
237
238 n->announce_timer.round--;
239 virtio_net_announce_notify(n);
240 }
241
virtio_net_announce(NetClientState * nc)242 static void virtio_net_announce(NetClientState *nc)
243 {
244 VirtIONet *n = qemu_get_nic_opaque(nc);
245 VirtIODevice *vdev = VIRTIO_DEVICE(n);
246
247 /*
248 * Make sure the virtio migration announcement timer isn't running
249 * If it is, let it trigger announcement so that we do not cause
250 * confusion.
251 */
252 if (n->announce_timer.round) {
253 return;
254 }
255
256 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
257 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
258 virtio_net_announce_notify(n);
259 }
260 }
261
virtio_net_vhost_status(VirtIONet * n,uint8_t status)262 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
263 {
264 VirtIODevice *vdev = VIRTIO_DEVICE(n);
265 NetClientState *nc = qemu_get_queue(n->nic);
266 int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
267 int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
268 n->max_ncs - n->max_queue_pairs : 0;
269
270 if (!get_vhost_net(nc->peer)) {
271 return;
272 }
273
274 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
275 !!n->vhost_started) {
276 return;
277 }
278 if (!n->vhost_started) {
279 int r, i;
280
281 if (n->needs_vnet_hdr_swap) {
282 error_report("backend does not support %s vnet headers; "
283 "falling back on userspace virtio",
284 virtio_is_big_endian(vdev) ? "BE" : "LE");
285 return;
286 }
287
288 /* Any packets outstanding? Purge them to avoid touching rings
289 * when vhost is running.
290 */
291 for (i = 0; i < queue_pairs; i++) {
292 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
293
294 /* Purge both directions: TX and RX. */
295 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
296 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
297 }
298
299 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
300 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
301 if (r < 0) {
302 error_report("%uBytes MTU not supported by the backend",
303 n->net_conf.mtu);
304
305 return;
306 }
307 }
308
309 n->vhost_started = 1;
310 r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq);
311 if (r < 0) {
312 error_report("unable to start vhost net: %d: "
313 "falling back on userspace virtio", -r);
314 n->vhost_started = 0;
315 }
316 } else {
317 vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq);
318 n->vhost_started = 0;
319 }
320 }
321
virtio_net_set_vnet_endian_one(VirtIODevice * vdev,NetClientState * peer,bool enable)322 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
323 NetClientState *peer,
324 bool enable)
325 {
326 if (virtio_is_big_endian(vdev)) {
327 return qemu_set_vnet_be(peer, enable);
328 } else {
329 return qemu_set_vnet_le(peer, enable);
330 }
331 }
332
virtio_net_set_vnet_endian(VirtIODevice * vdev,NetClientState * ncs,int queue_pairs,bool enable)333 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
334 int queue_pairs, bool enable)
335 {
336 int i;
337
338 for (i = 0; i < queue_pairs; i++) {
339 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
340 enable) {
341 while (--i >= 0) {
342 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
343 }
344
345 return true;
346 }
347 }
348
349 return false;
350 }
351
virtio_net_vnet_endian_status(VirtIONet * n,uint8_t status)352 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
353 {
354 VirtIODevice *vdev = VIRTIO_DEVICE(n);
355 int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
356
357 if (virtio_net_started(n, status)) {
358 /* Before using the device, we tell the network backend about the
359 * endianness to use when parsing vnet headers. If the backend
360 * can't do it, we fallback onto fixing the headers in the core
361 * virtio-net code.
362 */
363 n->needs_vnet_hdr_swap = n->has_vnet_hdr &&
364 virtio_net_set_vnet_endian(vdev, n->nic->ncs,
365 queue_pairs, true);
366 } else if (virtio_net_started(n, vdev->status)) {
367 /* After using the device, we need to reset the network backend to
368 * the default (guest native endianness), otherwise the guest may
369 * lose network connectivity if it is rebooted into a different
370 * endianness.
371 */
372 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false);
373 }
374 }
375
virtio_net_drop_tx_queue_data(VirtIODevice * vdev,VirtQueue * vq)376 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
377 {
378 unsigned int dropped = virtqueue_drop_all(vq);
379 if (dropped) {
380 virtio_notify(vdev, vq);
381 }
382 }
383
virtio_net_set_status(struct VirtIODevice * vdev,uint8_t status)384 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
385 {
386 VirtIONet *n = VIRTIO_NET(vdev);
387 VirtIONetQueue *q;
388 int i;
389 uint8_t queue_status;
390
391 virtio_net_vnet_endian_status(n, status);
392 virtio_net_vhost_status(n, status);
393
394 for (i = 0; i < n->max_queue_pairs; i++) {
395 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
396 bool queue_started;
397 q = &n->vqs[i];
398
399 if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) {
400 queue_status = 0;
401 } else {
402 queue_status = status;
403 }
404 queue_started =
405 virtio_net_started(n, queue_status) && !n->vhost_started;
406
407 if (queue_started) {
408 qemu_flush_queued_packets(ncs);
409 }
410
411 if (!q->tx_waiting) {
412 continue;
413 }
414
415 if (queue_started) {
416 if (q->tx_timer) {
417 timer_mod(q->tx_timer,
418 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
419 } else {
420 qemu_bh_schedule(q->tx_bh);
421 }
422 } else {
423 if (q->tx_timer) {
424 timer_del(q->tx_timer);
425 } else {
426 qemu_bh_cancel(q->tx_bh);
427 }
428 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
429 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
430 vdev->vm_running) {
431 /* if tx is waiting we are likely have some packets in tx queue
432 * and disabled notification */
433 q->tx_waiting = 0;
434 virtio_queue_set_notification(q->tx_vq, 1);
435 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
436 }
437 }
438 }
439 }
440
virtio_net_set_link_status(NetClientState * nc)441 static void virtio_net_set_link_status(NetClientState *nc)
442 {
443 VirtIONet *n = qemu_get_nic_opaque(nc);
444 VirtIODevice *vdev = VIRTIO_DEVICE(n);
445 uint16_t old_status = n->status;
446
447 if (nc->link_down)
448 n->status &= ~VIRTIO_NET_S_LINK_UP;
449 else
450 n->status |= VIRTIO_NET_S_LINK_UP;
451
452 if (n->status != old_status)
453 virtio_notify_config(vdev);
454
455 virtio_net_set_status(vdev, vdev->status);
456 }
457
rxfilter_notify(NetClientState * nc)458 static void rxfilter_notify(NetClientState *nc)
459 {
460 VirtIONet *n = qemu_get_nic_opaque(nc);
461
462 if (nc->rxfilter_notify_enabled) {
463 char *path = object_get_canonical_path(OBJECT(n->qdev));
464 qapi_event_send_nic_rx_filter_changed(n->netclient_name, path);
465 g_free(path);
466
467 /* disable event notification to avoid events flooding */
468 nc->rxfilter_notify_enabled = 0;
469 }
470 }
471
get_vlan_table(VirtIONet * n)472 static intList *get_vlan_table(VirtIONet *n)
473 {
474 intList *list;
475 int i, j;
476
477 list = NULL;
478 for (i = 0; i < MAX_VLAN >> 5; i++) {
479 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
480 if (n->vlans[i] & (1U << j)) {
481 QAPI_LIST_PREPEND(list, (i << 5) + j);
482 }
483 }
484 }
485
486 return list;
487 }
488
virtio_net_query_rxfilter(NetClientState * nc)489 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
490 {
491 VirtIONet *n = qemu_get_nic_opaque(nc);
492 VirtIODevice *vdev = VIRTIO_DEVICE(n);
493 RxFilterInfo *info;
494 strList *str_list;
495 int i;
496
497 info = g_malloc0(sizeof(*info));
498 info->name = g_strdup(nc->name);
499 info->promiscuous = n->promisc;
500
501 if (n->nouni) {
502 info->unicast = RX_STATE_NONE;
503 } else if (n->alluni) {
504 info->unicast = RX_STATE_ALL;
505 } else {
506 info->unicast = RX_STATE_NORMAL;
507 }
508
509 if (n->nomulti) {
510 info->multicast = RX_STATE_NONE;
511 } else if (n->allmulti) {
512 info->multicast = RX_STATE_ALL;
513 } else {
514 info->multicast = RX_STATE_NORMAL;
515 }
516
517 info->broadcast_allowed = n->nobcast;
518 info->multicast_overflow = n->mac_table.multi_overflow;
519 info->unicast_overflow = n->mac_table.uni_overflow;
520
521 info->main_mac = qemu_mac_strdup_printf(n->mac);
522
523 str_list = NULL;
524 for (i = 0; i < n->mac_table.first_multi; i++) {
525 QAPI_LIST_PREPEND(str_list,
526 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
527 }
528 info->unicast_table = str_list;
529
530 str_list = NULL;
531 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
532 QAPI_LIST_PREPEND(str_list,
533 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
534 }
535 info->multicast_table = str_list;
536 info->vlan_table = get_vlan_table(n);
537
538 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
539 info->vlan = RX_STATE_ALL;
540 } else if (!info->vlan_table) {
541 info->vlan = RX_STATE_NONE;
542 } else {
543 info->vlan = RX_STATE_NORMAL;
544 }
545
546 /* enable event notification after query */
547 nc->rxfilter_notify_enabled = 1;
548
549 return info;
550 }
551
virtio_net_queue_reset(VirtIODevice * vdev,uint32_t queue_index)552 static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
553 {
554 VirtIONet *n = VIRTIO_NET(vdev);
555 NetClientState *nc;
556
557 /* validate queue_index and skip for cvq */
558 if (queue_index >= n->max_queue_pairs * 2) {
559 return;
560 }
561
562 nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
563
564 if (!nc->peer) {
565 return;
566 }
567
568 if (get_vhost_net(nc->peer) &&
569 nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
570 vhost_net_virtqueue_reset(vdev, nc, queue_index);
571 }
572
573 flush_or_purge_queued_packets(nc);
574 }
575
virtio_net_queue_enable(VirtIODevice * vdev,uint32_t queue_index)576 static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
577 {
578 VirtIONet *n = VIRTIO_NET(vdev);
579 NetClientState *nc;
580 int r;
581
582 /* validate queue_index and skip for cvq */
583 if (queue_index >= n->max_queue_pairs * 2) {
584 return;
585 }
586
587 nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
588
589 if (!nc->peer || !vdev->vhost_started) {
590 return;
591 }
592
593 if (get_vhost_net(nc->peer) &&
594 nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
595 r = vhost_net_virtqueue_restart(vdev, nc, queue_index);
596 if (r < 0) {
597 error_report("unable to restart vhost net virtqueue: %d, "
598 "when resetting the queue", queue_index);
599 }
600 }
601 }
602
peer_test_vnet_hdr(VirtIONet * n)603 static void peer_test_vnet_hdr(VirtIONet *n)
604 {
605 NetClientState *nc = qemu_get_queue(n->nic);
606 if (!nc->peer) {
607 return;
608 }
609
610 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
611 }
612
peer_has_vnet_hdr(VirtIONet * n)613 static int peer_has_vnet_hdr(VirtIONet *n)
614 {
615 return n->has_vnet_hdr;
616 }
617
peer_has_ufo(VirtIONet * n)618 static int peer_has_ufo(VirtIONet *n)
619 {
620 if (!peer_has_vnet_hdr(n))
621 return 0;
622
623 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
624
625 return n->has_ufo;
626 }
627
peer_has_uso(VirtIONet * n)628 static int peer_has_uso(VirtIONet *n)
629 {
630 if (!peer_has_vnet_hdr(n)) {
631 return 0;
632 }
633
634 return qemu_has_uso(qemu_get_queue(n->nic)->peer);
635 }
636
virtio_net_set_mrg_rx_bufs(VirtIONet * n,int mergeable_rx_bufs,int version_1,int hash_report)637 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
638 int version_1, int hash_report)
639 {
640 int i;
641 NetClientState *nc;
642
643 n->mergeable_rx_bufs = mergeable_rx_bufs;
644
645 if (version_1) {
646 n->guest_hdr_len = hash_report ?
647 sizeof(struct virtio_net_hdr_v1_hash) :
648 sizeof(struct virtio_net_hdr_mrg_rxbuf);
649 n->rss_data.populate_hash = !!hash_report;
650 } else {
651 n->guest_hdr_len = n->mergeable_rx_bufs ?
652 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
653 sizeof(struct virtio_net_hdr);
654 n->rss_data.populate_hash = false;
655 }
656
657 for (i = 0; i < n->max_queue_pairs; i++) {
658 nc = qemu_get_subqueue(n->nic, i);
659
660 if (peer_has_vnet_hdr(n) &&
661 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
662 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
663 n->host_hdr_len = n->guest_hdr_len;
664 }
665 }
666 }
667
virtio_net_max_tx_queue_size(VirtIONet * n)668 static int virtio_net_max_tx_queue_size(VirtIONet *n)
669 {
670 NetClientState *peer = n->nic_conf.peers.ncs[0];
671
672 /*
673 * Backends other than vhost-user or vhost-vdpa don't support max queue
674 * size.
675 */
676 if (!peer) {
677 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
678 }
679
680 switch(peer->info->type) {
681 case NET_CLIENT_DRIVER_VHOST_USER:
682 case NET_CLIENT_DRIVER_VHOST_VDPA:
683 return VIRTQUEUE_MAX_SIZE;
684 default:
685 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
686 };
687 }
688
peer_attach(VirtIONet * n,int index)689 static int peer_attach(VirtIONet *n, int index)
690 {
691 NetClientState *nc = qemu_get_subqueue(n->nic, index);
692
693 if (!nc->peer) {
694 return 0;
695 }
696
697 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
698 vhost_set_vring_enable(nc->peer, 1);
699 }
700
701 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
702 return 0;
703 }
704
705 if (n->max_queue_pairs == 1) {
706 return 0;
707 }
708
709 return tap_enable(nc->peer);
710 }
711
peer_detach(VirtIONet * n,int index)712 static int peer_detach(VirtIONet *n, int index)
713 {
714 NetClientState *nc = qemu_get_subqueue(n->nic, index);
715
716 if (!nc->peer) {
717 return 0;
718 }
719
720 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
721 vhost_set_vring_enable(nc->peer, 0);
722 }
723
724 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
725 return 0;
726 }
727
728 return tap_disable(nc->peer);
729 }
730
virtio_net_set_queue_pairs(VirtIONet * n)731 static void virtio_net_set_queue_pairs(VirtIONet *n)
732 {
733 int i;
734 int r;
735
736 if (n->nic->peer_deleted) {
737 return;
738 }
739
740 for (i = 0; i < n->max_queue_pairs; i++) {
741 if (i < n->curr_queue_pairs) {
742 r = peer_attach(n, i);
743 assert(!r);
744 } else {
745 r = peer_detach(n, i);
746 assert(!r);
747 }
748 }
749 }
750
751 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
752
virtio_net_get_features(VirtIODevice * vdev,uint64_t features,Error ** errp)753 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
754 Error **errp)
755 {
756 VirtIONet *n = VIRTIO_NET(vdev);
757 NetClientState *nc = qemu_get_queue(n->nic);
758
759 /* Firstly sync all virtio-net possible supported features */
760 features |= n->host_features;
761
762 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
763
764 if (!peer_has_vnet_hdr(n)) {
765 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
766 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
767 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
768 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
769
770 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
771 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
772 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
773 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
774
775 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
776 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
777 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
778
779 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
780 }
781
782 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
783 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
784 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
785 }
786
787 if (!peer_has_uso(n)) {
788 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
789 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
790 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
791 }
792
793 if (!get_vhost_net(nc->peer)) {
794 return features;
795 }
796
797 if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
798 virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
799 }
800 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
801 vdev->backend_features = features;
802
803 if (n->mtu_bypass_backend &&
804 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
805 features |= (1ULL << VIRTIO_NET_F_MTU);
806 }
807
808 /*
809 * Since GUEST_ANNOUNCE is emulated the feature bit could be set without
810 * enabled. This happens in the vDPA case.
811 *
812 * Make sure the feature set is not incoherent, as the driver could refuse
813 * to start.
814 *
815 * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
816 * helping guest to notify the new location with vDPA devices that does not
817 * support it.
818 */
819 if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
820 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
821 }
822
823 return features;
824 }
825
virtio_net_bad_features(VirtIODevice * vdev)826 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
827 {
828 uint64_t features = 0;
829
830 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
831 * but also these: */
832 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
833 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
834 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
835 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
836 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
837
838 return features;
839 }
840
virtio_net_apply_guest_offloads(VirtIONet * n)841 static void virtio_net_apply_guest_offloads(VirtIONet *n)
842 {
843 qemu_set_offload(qemu_get_queue(n->nic)->peer,
844 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
845 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
846 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
847 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
848 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)),
849 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO4)),
850 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO6)));
851 }
852
virtio_net_guest_offloads_by_features(uint64_t features)853 static uint64_t virtio_net_guest_offloads_by_features(uint64_t features)
854 {
855 static const uint64_t guest_offloads_mask =
856 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
857 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
858 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
859 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
860 (1ULL << VIRTIO_NET_F_GUEST_UFO) |
861 (1ULL << VIRTIO_NET_F_GUEST_USO4) |
862 (1ULL << VIRTIO_NET_F_GUEST_USO6);
863
864 return guest_offloads_mask & features;
865 }
866
virtio_net_supported_guest_offloads(const VirtIONet * n)867 uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n)
868 {
869 VirtIODevice *vdev = VIRTIO_DEVICE(n);
870 return virtio_net_guest_offloads_by_features(vdev->guest_features);
871 }
872
873 typedef struct {
874 VirtIONet *n;
875 DeviceState *dev;
876 } FailoverDevice;
877
878 /**
879 * Set the failover primary device
880 *
881 * @opaque: FailoverId to setup
882 * @opts: opts for device we are handling
883 * @errp: returns an error if this function fails
884 */
failover_set_primary(DeviceState * dev,void * opaque)885 static int failover_set_primary(DeviceState *dev, void *opaque)
886 {
887 FailoverDevice *fdev = opaque;
888 PCIDevice *pci_dev = (PCIDevice *)
889 object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE);
890
891 if (!pci_dev) {
892 return 0;
893 }
894
895 if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) {
896 fdev->dev = dev;
897 return 1;
898 }
899
900 return 0;
901 }
902
903 /**
904 * Find the primary device for this failover virtio-net
905 *
906 * @n: VirtIONet device
907 * @errp: returns an error if this function fails
908 */
failover_find_primary_device(VirtIONet * n)909 static DeviceState *failover_find_primary_device(VirtIONet *n)
910 {
911 FailoverDevice fdev = {
912 .n = n,
913 };
914
915 qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL,
916 NULL, NULL, &fdev);
917 return fdev.dev;
918 }
919
failover_add_primary(VirtIONet * n,Error ** errp)920 static void failover_add_primary(VirtIONet *n, Error **errp)
921 {
922 Error *err = NULL;
923 DeviceState *dev = failover_find_primary_device(n);
924
925 if (dev) {
926 return;
927 }
928
929 if (!n->primary_opts) {
930 error_setg(errp, "Primary device not found");
931 error_append_hint(errp, "Virtio-net failover will not work. Make "
932 "sure primary device has parameter"
933 " failover_pair_id=%s\n", n->netclient_name);
934 return;
935 }
936
937 dev = qdev_device_add_from_qdict(n->primary_opts,
938 n->primary_opts_from_json,
939 &err);
940 if (err) {
941 qobject_unref(n->primary_opts);
942 n->primary_opts = NULL;
943 } else {
944 object_unref(OBJECT(dev));
945 }
946 error_propagate(errp, err);
947 }
948
virtio_net_set_features(VirtIODevice * vdev,uint64_t features)949 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
950 {
951 VirtIONet *n = VIRTIO_NET(vdev);
952 Error *err = NULL;
953 int i;
954
955 if (n->mtu_bypass_backend &&
956 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
957 features &= ~(1ULL << VIRTIO_NET_F_MTU);
958 }
959
960 virtio_net_set_multiqueue(n,
961 virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
962 virtio_has_feature(features, VIRTIO_NET_F_MQ));
963
964 virtio_net_set_mrg_rx_bufs(n,
965 virtio_has_feature(features,
966 VIRTIO_NET_F_MRG_RXBUF),
967 virtio_has_feature(features,
968 VIRTIO_F_VERSION_1),
969 virtio_has_feature(features,
970 VIRTIO_NET_F_HASH_REPORT));
971
972 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
973 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
974 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
975 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
976 n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
977
978 if (n->has_vnet_hdr) {
979 n->curr_guest_offloads =
980 virtio_net_guest_offloads_by_features(features);
981 virtio_net_apply_guest_offloads(n);
982 }
983
984 for (i = 0; i < n->max_queue_pairs; i++) {
985 NetClientState *nc = qemu_get_subqueue(n->nic, i);
986
987 if (!get_vhost_net(nc->peer)) {
988 continue;
989 }
990 vhost_net_ack_features(get_vhost_net(nc->peer), features);
991
992 /*
993 * keep acked_features in NetVhostUserState up-to-date so it
994 * can't miss any features configured by guest virtio driver.
995 */
996 vhost_net_save_acked_features(nc->peer);
997 }
998
999 if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
1000 memset(n->vlans, 0xff, MAX_VLAN >> 3);
1001 }
1002
1003 if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
1004 qapi_event_send_failover_negotiated(n->netclient_name);
1005 qatomic_set(&n->failover_primary_hidden, false);
1006 failover_add_primary(n, &err);
1007 if (err) {
1008 if (!qtest_enabled()) {
1009 warn_report_err(err);
1010 } else {
1011 error_free(err);
1012 }
1013 }
1014 }
1015 }
1016
virtio_net_handle_rx_mode(VirtIONet * n,uint8_t cmd,struct iovec * iov,unsigned int iov_cnt)1017 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
1018 struct iovec *iov, unsigned int iov_cnt)
1019 {
1020 uint8_t on;
1021 size_t s;
1022 NetClientState *nc = qemu_get_queue(n->nic);
1023
1024 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
1025 if (s != sizeof(on)) {
1026 return VIRTIO_NET_ERR;
1027 }
1028
1029 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
1030 n->promisc = on;
1031 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
1032 n->allmulti = on;
1033 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
1034 n->alluni = on;
1035 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
1036 n->nomulti = on;
1037 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
1038 n->nouni = on;
1039 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
1040 n->nobcast = on;
1041 } else {
1042 return VIRTIO_NET_ERR;
1043 }
1044
1045 rxfilter_notify(nc);
1046
1047 return VIRTIO_NET_OK;
1048 }
1049
virtio_net_handle_offloads(VirtIONet * n,uint8_t cmd,struct iovec * iov,unsigned int iov_cnt)1050 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
1051 struct iovec *iov, unsigned int iov_cnt)
1052 {
1053 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1054 uint64_t offloads;
1055 size_t s;
1056
1057 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1058 return VIRTIO_NET_ERR;
1059 }
1060
1061 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
1062 if (s != sizeof(offloads)) {
1063 return VIRTIO_NET_ERR;
1064 }
1065
1066 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
1067 uint64_t supported_offloads;
1068
1069 offloads = virtio_ldq_p(vdev, &offloads);
1070
1071 if (!n->has_vnet_hdr) {
1072 return VIRTIO_NET_ERR;
1073 }
1074
1075 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1076 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
1077 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1078 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
1079 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
1080
1081 supported_offloads = virtio_net_supported_guest_offloads(n);
1082 if (offloads & ~supported_offloads) {
1083 return VIRTIO_NET_ERR;
1084 }
1085
1086 n->curr_guest_offloads = offloads;
1087 virtio_net_apply_guest_offloads(n);
1088
1089 return VIRTIO_NET_OK;
1090 } else {
1091 return VIRTIO_NET_ERR;
1092 }
1093 }
1094
virtio_net_handle_mac(VirtIONet * n,uint8_t cmd,struct iovec * iov,unsigned int iov_cnt)1095 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
1096 struct iovec *iov, unsigned int iov_cnt)
1097 {
1098 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1099 struct virtio_net_ctrl_mac mac_data;
1100 size_t s;
1101 NetClientState *nc = qemu_get_queue(n->nic);
1102
1103 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
1104 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
1105 return VIRTIO_NET_ERR;
1106 }
1107 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
1108 assert(s == sizeof(n->mac));
1109 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
1110 rxfilter_notify(nc);
1111
1112 return VIRTIO_NET_OK;
1113 }
1114
1115 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
1116 return VIRTIO_NET_ERR;
1117 }
1118
1119 int in_use = 0;
1120 int first_multi = 0;
1121 uint8_t uni_overflow = 0;
1122 uint8_t multi_overflow = 0;
1123 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1124
1125 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1126 sizeof(mac_data.entries));
1127 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1128 if (s != sizeof(mac_data.entries)) {
1129 goto error;
1130 }
1131 iov_discard_front(&iov, &iov_cnt, s);
1132
1133 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
1134 goto error;
1135 }
1136
1137 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
1138 s = iov_to_buf(iov, iov_cnt, 0, macs,
1139 mac_data.entries * ETH_ALEN);
1140 if (s != mac_data.entries * ETH_ALEN) {
1141 goto error;
1142 }
1143 in_use += mac_data.entries;
1144 } else {
1145 uni_overflow = 1;
1146 }
1147
1148 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
1149
1150 first_multi = in_use;
1151
1152 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1153 sizeof(mac_data.entries));
1154 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1155 if (s != sizeof(mac_data.entries)) {
1156 goto error;
1157 }
1158
1159 iov_discard_front(&iov, &iov_cnt, s);
1160
1161 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
1162 goto error;
1163 }
1164
1165 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
1166 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
1167 mac_data.entries * ETH_ALEN);
1168 if (s != mac_data.entries * ETH_ALEN) {
1169 goto error;
1170 }
1171 in_use += mac_data.entries;
1172 } else {
1173 multi_overflow = 1;
1174 }
1175
1176 n->mac_table.in_use = in_use;
1177 n->mac_table.first_multi = first_multi;
1178 n->mac_table.uni_overflow = uni_overflow;
1179 n->mac_table.multi_overflow = multi_overflow;
1180 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
1181 g_free(macs);
1182 rxfilter_notify(nc);
1183
1184 return VIRTIO_NET_OK;
1185
1186 error:
1187 g_free(macs);
1188 return VIRTIO_NET_ERR;
1189 }
1190
virtio_net_handle_vlan_table(VirtIONet * n,uint8_t cmd,struct iovec * iov,unsigned int iov_cnt)1191 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
1192 struct iovec *iov, unsigned int iov_cnt)
1193 {
1194 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1195 uint16_t vid;
1196 size_t s;
1197 NetClientState *nc = qemu_get_queue(n->nic);
1198
1199 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1200 vid = virtio_lduw_p(vdev, &vid);
1201 if (s != sizeof(vid)) {
1202 return VIRTIO_NET_ERR;
1203 }
1204
1205 if (vid >= MAX_VLAN)
1206 return VIRTIO_NET_ERR;
1207
1208 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
1209 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
1210 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
1211 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
1212 else
1213 return VIRTIO_NET_ERR;
1214
1215 rxfilter_notify(nc);
1216
1217 return VIRTIO_NET_OK;
1218 }
1219
virtio_net_handle_announce(VirtIONet * n,uint8_t cmd,struct iovec * iov,unsigned int iov_cnt)1220 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
1221 struct iovec *iov, unsigned int iov_cnt)
1222 {
1223 trace_virtio_net_handle_announce(n->announce_timer.round);
1224 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1225 n->status & VIRTIO_NET_S_ANNOUNCE) {
1226 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1227 if (n->announce_timer.round) {
1228 qemu_announce_timer_step(&n->announce_timer);
1229 }
1230 return VIRTIO_NET_OK;
1231 } else {
1232 return VIRTIO_NET_ERR;
1233 }
1234 }
1235
virtio_net_attach_ebpf_to_backend(NICState * nic,int prog_fd)1236 static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
1237 {
1238 NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
1239 if (nc == NULL || nc->info->set_steering_ebpf == NULL) {
1240 return false;
1241 }
1242
1243 return nc->info->set_steering_ebpf(nc, prog_fd);
1244 }
1245
rss_data_to_rss_config(struct VirtioNetRssData * data,struct EBPFRSSConfig * config)1246 static void rss_data_to_rss_config(struct VirtioNetRssData *data,
1247 struct EBPFRSSConfig *config)
1248 {
1249 config->redirect = data->redirect;
1250 config->populate_hash = data->populate_hash;
1251 config->hash_types = data->hash_types;
1252 config->indirections_len = data->indirections_len;
1253 config->default_queue = data->default_queue;
1254 }
1255
virtio_net_attach_epbf_rss(VirtIONet * n)1256 static bool virtio_net_attach_epbf_rss(VirtIONet *n)
1257 {
1258 struct EBPFRSSConfig config = {};
1259
1260 if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
1261 return false;
1262 }
1263
1264 rss_data_to_rss_config(&n->rss_data, &config);
1265
1266 if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
1267 n->rss_data.indirections_table, n->rss_data.key)) {
1268 return false;
1269 }
1270
1271 if (!virtio_net_attach_ebpf_to_backend(n->nic, n->ebpf_rss.program_fd)) {
1272 return false;
1273 }
1274
1275 return true;
1276 }
1277
virtio_net_detach_epbf_rss(VirtIONet * n)1278 static void virtio_net_detach_epbf_rss(VirtIONet *n)
1279 {
1280 virtio_net_attach_ebpf_to_backend(n->nic, -1);
1281 }
1282
virtio_net_commit_rss_config(VirtIONet * n)1283 static void virtio_net_commit_rss_config(VirtIONet *n)
1284 {
1285 if (n->rss_data.enabled) {
1286 n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
1287 if (n->rss_data.populate_hash) {
1288 virtio_net_detach_epbf_rss(n);
1289 } else if (!virtio_net_attach_epbf_rss(n)) {
1290 if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
1291 warn_report("Can't load eBPF RSS for vhost");
1292 } else {
1293 warn_report("Can't load eBPF RSS - fallback to software RSS");
1294 n->rss_data.enabled_software_rss = true;
1295 }
1296 }
1297
1298 trace_virtio_net_rss_enable(n->rss_data.hash_types,
1299 n->rss_data.indirections_len,
1300 sizeof(n->rss_data.key));
1301 } else {
1302 virtio_net_detach_epbf_rss(n);
1303 trace_virtio_net_rss_disable();
1304 }
1305 }
1306
virtio_net_disable_rss(VirtIONet * n)1307 static void virtio_net_disable_rss(VirtIONet *n)
1308 {
1309 if (!n->rss_data.enabled) {
1310 return;
1311 }
1312
1313 n->rss_data.enabled = false;
1314 virtio_net_commit_rss_config(n);
1315 }
1316
virtio_net_load_ebpf_fds(VirtIONet * n)1317 static bool virtio_net_load_ebpf_fds(VirtIONet *n)
1318 {
1319 int fds[EBPF_RSS_MAX_FDS] = { [0 ... EBPF_RSS_MAX_FDS - 1] = -1};
1320 int ret = true;
1321 int i = 0;
1322
1323 if (n->nr_ebpf_rss_fds != EBPF_RSS_MAX_FDS) {
1324 warn_report("Expected %d file descriptors but got %d",
1325 EBPF_RSS_MAX_FDS, n->nr_ebpf_rss_fds);
1326 return false;
1327 }
1328
1329 for (i = 0; i < n->nr_ebpf_rss_fds; i++) {
1330 fds[i] = monitor_fd_param(monitor_cur(), n->ebpf_rss_fds[i],
1331 &error_warn);
1332 if (fds[i] < 0) {
1333 ret = false;
1334 goto exit;
1335 }
1336 }
1337
1338 ret = ebpf_rss_load_fds(&n->ebpf_rss, fds[0], fds[1], fds[2], fds[3]);
1339
1340 exit:
1341 if (!ret) {
1342 for (i = 0; i < n->nr_ebpf_rss_fds && fds[i] != -1; i++) {
1343 close(fds[i]);
1344 }
1345 }
1346
1347 return ret;
1348 }
1349
virtio_net_load_ebpf(VirtIONet * n)1350 static bool virtio_net_load_ebpf(VirtIONet *n)
1351 {
1352 bool ret = false;
1353
1354 if (virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
1355 if (!(n->ebpf_rss_fds && virtio_net_load_ebpf_fds(n))) {
1356 ret = ebpf_rss_load(&n->ebpf_rss);
1357 }
1358 }
1359
1360 return ret;
1361 }
1362
virtio_net_unload_ebpf(VirtIONet * n)1363 static void virtio_net_unload_ebpf(VirtIONet *n)
1364 {
1365 virtio_net_attach_ebpf_to_backend(n->nic, -1);
1366 ebpf_rss_unload(&n->ebpf_rss);
1367 }
1368
virtio_net_handle_rss(VirtIONet * n,struct iovec * iov,unsigned int iov_cnt,bool do_rss)1369 static uint16_t virtio_net_handle_rss(VirtIONet *n,
1370 struct iovec *iov,
1371 unsigned int iov_cnt,
1372 bool do_rss)
1373 {
1374 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1375 struct virtio_net_rss_config cfg;
1376 size_t s, offset = 0, size_get;
1377 uint16_t queue_pairs, i;
1378 struct {
1379 uint16_t us;
1380 uint8_t b;
1381 } QEMU_PACKED temp;
1382 const char *err_msg = "";
1383 uint32_t err_value = 0;
1384
1385 if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
1386 err_msg = "RSS is not negotiated";
1387 goto error;
1388 }
1389 if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
1390 err_msg = "Hash report is not negotiated";
1391 goto error;
1392 }
1393 size_get = offsetof(struct virtio_net_rss_config, indirection_table);
1394 s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
1395 if (s != size_get) {
1396 err_msg = "Short command buffer";
1397 err_value = (uint32_t)s;
1398 goto error;
1399 }
1400 n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
1401 n->rss_data.indirections_len =
1402 virtio_lduw_p(vdev, &cfg.indirection_table_mask);
1403 n->rss_data.indirections_len++;
1404 if (!do_rss) {
1405 n->rss_data.indirections_len = 1;
1406 }
1407 if (!is_power_of_2(n->rss_data.indirections_len)) {
1408 err_msg = "Invalid size of indirection table";
1409 err_value = n->rss_data.indirections_len;
1410 goto error;
1411 }
1412 if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
1413 err_msg = "Too large indirection table";
1414 err_value = n->rss_data.indirections_len;
1415 goto error;
1416 }
1417 n->rss_data.default_queue = do_rss ?
1418 virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
1419 if (n->rss_data.default_queue >= n->max_queue_pairs) {
1420 err_msg = "Invalid default queue";
1421 err_value = n->rss_data.default_queue;
1422 goto error;
1423 }
1424 offset += size_get;
1425 size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
1426 g_free(n->rss_data.indirections_table);
1427 n->rss_data.indirections_table = g_malloc(size_get);
1428 if (!n->rss_data.indirections_table) {
1429 err_msg = "Can't allocate indirections table";
1430 err_value = n->rss_data.indirections_len;
1431 goto error;
1432 }
1433 s = iov_to_buf(iov, iov_cnt, offset,
1434 n->rss_data.indirections_table, size_get);
1435 if (s != size_get) {
1436 err_msg = "Short indirection table buffer";
1437 err_value = (uint32_t)s;
1438 goto error;
1439 }
1440 for (i = 0; i < n->rss_data.indirections_len; ++i) {
1441 uint16_t val = n->rss_data.indirections_table[i];
1442 n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
1443 }
1444 offset += size_get;
1445 size_get = sizeof(temp);
1446 s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
1447 if (s != size_get) {
1448 err_msg = "Can't get queue_pairs";
1449 err_value = (uint32_t)s;
1450 goto error;
1451 }
1452 queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs;
1453 if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) {
1454 err_msg = "Invalid number of queue_pairs";
1455 err_value = queue_pairs;
1456 goto error;
1457 }
1458 if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
1459 err_msg = "Invalid key size";
1460 err_value = temp.b;
1461 goto error;
1462 }
1463 if (!temp.b && n->rss_data.hash_types) {
1464 err_msg = "No key provided";
1465 err_value = 0;
1466 goto error;
1467 }
1468 if (!temp.b && !n->rss_data.hash_types) {
1469 virtio_net_disable_rss(n);
1470 return queue_pairs;
1471 }
1472 offset += size_get;
1473 size_get = temp.b;
1474 s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
1475 if (s != size_get) {
1476 err_msg = "Can get key buffer";
1477 err_value = (uint32_t)s;
1478 goto error;
1479 }
1480 n->rss_data.enabled = true;
1481 virtio_net_commit_rss_config(n);
1482 return queue_pairs;
1483 error:
1484 trace_virtio_net_rss_error(err_msg, err_value);
1485 virtio_net_disable_rss(n);
1486 return 0;
1487 }
1488
virtio_net_handle_mq(VirtIONet * n,uint8_t cmd,struct iovec * iov,unsigned int iov_cnt)1489 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1490 struct iovec *iov, unsigned int iov_cnt)
1491 {
1492 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1493 uint16_t queue_pairs;
1494 NetClientState *nc = qemu_get_queue(n->nic);
1495
1496 virtio_net_disable_rss(n);
1497 if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
1498 queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false);
1499 return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
1500 }
1501 if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
1502 queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true);
1503 } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1504 struct virtio_net_ctrl_mq mq;
1505 size_t s;
1506 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
1507 return VIRTIO_NET_ERR;
1508 }
1509 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1510 if (s != sizeof(mq)) {
1511 return VIRTIO_NET_ERR;
1512 }
1513 queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1514
1515 } else {
1516 return VIRTIO_NET_ERR;
1517 }
1518
1519 if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1520 queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1521 queue_pairs > n->max_queue_pairs ||
1522 !n->multiqueue) {
1523 return VIRTIO_NET_ERR;
1524 }
1525
1526 n->curr_queue_pairs = queue_pairs;
1527 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
1528 /*
1529 * Avoid updating the backend for a vdpa device: We're only interested
1530 * in updating the device model queues.
1531 */
1532 return VIRTIO_NET_OK;
1533 }
1534 /* stop the backend before changing the number of queue_pairs to avoid handling a
1535 * disabled queue */
1536 virtio_net_set_status(vdev, vdev->status);
1537 virtio_net_set_queue_pairs(n);
1538
1539 return VIRTIO_NET_OK;
1540 }
1541
virtio_net_handle_ctrl_iov(VirtIODevice * vdev,const struct iovec * in_sg,unsigned in_num,const struct iovec * out_sg,unsigned out_num)1542 size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
1543 const struct iovec *in_sg, unsigned in_num,
1544 const struct iovec *out_sg,
1545 unsigned out_num)
1546 {
1547 VirtIONet *n = VIRTIO_NET(vdev);
1548 struct virtio_net_ctrl_hdr ctrl;
1549 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1550 size_t s;
1551 struct iovec *iov, *iov2;
1552
1553 if (iov_size(in_sg, in_num) < sizeof(status) ||
1554 iov_size(out_sg, out_num) < sizeof(ctrl)) {
1555 virtio_error(vdev, "virtio-net ctrl missing headers");
1556 return 0;
1557 }
1558
1559 iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
1560 s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
1561 iov_discard_front(&iov, &out_num, sizeof(ctrl));
1562 if (s != sizeof(ctrl)) {
1563 status = VIRTIO_NET_ERR;
1564 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1565 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
1566 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1567 status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
1568 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1569 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
1570 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1571 status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
1572 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1573 status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
1574 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1575 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
1576 }
1577
1578 s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
1579 assert(s == sizeof(status));
1580
1581 g_free(iov2);
1582 return sizeof(status);
1583 }
1584
virtio_net_handle_ctrl(VirtIODevice * vdev,VirtQueue * vq)1585 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1586 {
1587 VirtQueueElement *elem;
1588
1589 for (;;) {
1590 size_t written;
1591 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1592 if (!elem) {
1593 break;
1594 }
1595
1596 written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
1597 elem->out_sg, elem->out_num);
1598 if (written > 0) {
1599 virtqueue_push(vq, elem, written);
1600 virtio_notify(vdev, vq);
1601 g_free(elem);
1602 } else {
1603 virtqueue_detach_element(vq, elem, 0);
1604 g_free(elem);
1605 break;
1606 }
1607 }
1608 }
1609
1610 /* RX */
1611
virtio_net_handle_rx(VirtIODevice * vdev,VirtQueue * vq)1612 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1613 {
1614 VirtIONet *n = VIRTIO_NET(vdev);
1615 int queue_index = vq2q(virtio_get_queue_index(vq));
1616
1617 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1618 }
1619
virtio_net_can_receive(NetClientState * nc)1620 static bool virtio_net_can_receive(NetClientState *nc)
1621 {
1622 VirtIONet *n = qemu_get_nic_opaque(nc);
1623 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1624 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1625
1626 if (!vdev->vm_running) {
1627 return false;
1628 }
1629
1630 if (nc->queue_index >= n->curr_queue_pairs) {
1631 return false;
1632 }
1633
1634 if (!virtio_queue_ready(q->rx_vq) ||
1635 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1636 return false;
1637 }
1638
1639 return true;
1640 }
1641
virtio_net_has_buffers(VirtIONetQueue * q,int bufsize)1642 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1643 {
1644 VirtIONet *n = q->n;
1645 if (virtio_queue_empty(q->rx_vq) ||
1646 (n->mergeable_rx_bufs &&
1647 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1648 virtio_queue_set_notification(q->rx_vq, 1);
1649
1650 /* To avoid a race condition where the guest has made some buffers
1651 * available after the above check but before notification was
1652 * enabled, check for available buffers again.
1653 */
1654 if (virtio_queue_empty(q->rx_vq) ||
1655 (n->mergeable_rx_bufs &&
1656 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1657 return 0;
1658 }
1659 }
1660
1661 virtio_queue_set_notification(q->rx_vq, 0);
1662 return 1;
1663 }
1664
virtio_net_hdr_swap(VirtIODevice * vdev,struct virtio_net_hdr * hdr)1665 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1666 {
1667 virtio_tswap16s(vdev, &hdr->hdr_len);
1668 virtio_tswap16s(vdev, &hdr->gso_size);
1669 virtio_tswap16s(vdev, &hdr->csum_start);
1670 virtio_tswap16s(vdev, &hdr->csum_offset);
1671 }
1672
1673 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1674 * it never finds out that the packets don't have valid checksums. This
1675 * causes dhclient to get upset. Fedora's carried a patch for ages to
1676 * fix this with Xen but it hasn't appeared in an upstream release of
1677 * dhclient yet.
1678 *
1679 * To avoid breaking existing guests, we catch udp packets and add
1680 * checksums. This is terrible but it's better than hacking the guest
1681 * kernels.
1682 *
1683 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1684 * we should provide a mechanism to disable it to avoid polluting the host
1685 * cache.
1686 */
work_around_broken_dhclient(struct virtio_net_hdr * hdr,uint8_t * buf,size_t size)1687 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1688 uint8_t *buf, size_t size)
1689 {
1690 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1691 (size > 27 && size < 1500) && /* normal sized MTU */
1692 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1693 (buf[23] == 17) && /* ip.protocol == UDP */
1694 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1695 net_checksum_calculate(buf, size, CSUM_UDP);
1696 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1697 }
1698 }
1699
receive_header(VirtIONet * n,const struct iovec * iov,int iov_cnt,const void * buf,size_t size)1700 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1701 const void *buf, size_t size)
1702 {
1703 if (n->has_vnet_hdr) {
1704 /* FIXME this cast is evil */
1705 void *wbuf = (void *)buf;
1706 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1707 size - n->host_hdr_len);
1708
1709 if (n->needs_vnet_hdr_swap) {
1710 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1711 }
1712 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1713 } else {
1714 struct virtio_net_hdr hdr = {
1715 .flags = 0,
1716 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1717 };
1718 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1719 }
1720 }
1721
receive_filter(VirtIONet * n,const uint8_t * buf,int size)1722 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1723 {
1724 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1725 static const uint8_t vlan[] = {0x81, 0x00};
1726 uint8_t *ptr = (uint8_t *)buf;
1727 int i;
1728
1729 if (n->promisc)
1730 return 1;
1731
1732 ptr += n->host_hdr_len;
1733
1734 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1735 int vid = lduw_be_p(ptr + 14) & 0xfff;
1736 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1737 return 0;
1738 }
1739
1740 if (ptr[0] & 1) { // multicast
1741 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1742 return !n->nobcast;
1743 } else if (n->nomulti) {
1744 return 0;
1745 } else if (n->allmulti || n->mac_table.multi_overflow) {
1746 return 1;
1747 }
1748
1749 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1750 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1751 return 1;
1752 }
1753 }
1754 } else { // unicast
1755 if (n->nouni) {
1756 return 0;
1757 } else if (n->alluni || n->mac_table.uni_overflow) {
1758 return 1;
1759 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1760 return 1;
1761 }
1762
1763 for (i = 0; i < n->mac_table.first_multi; i++) {
1764 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1765 return 1;
1766 }
1767 }
1768 }
1769
1770 return 0;
1771 }
1772
virtio_net_get_hash_type(bool hasip4,bool hasip6,EthL4HdrProto l4hdr_proto,uint32_t types)1773 static uint8_t virtio_net_get_hash_type(bool hasip4,
1774 bool hasip6,
1775 EthL4HdrProto l4hdr_proto,
1776 uint32_t types)
1777 {
1778 if (hasip4) {
1779 switch (l4hdr_proto) {
1780 case ETH_L4_HDR_PROTO_TCP:
1781 if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
1782 return NetPktRssIpV4Tcp;
1783 }
1784 break;
1785
1786 case ETH_L4_HDR_PROTO_UDP:
1787 if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
1788 return NetPktRssIpV4Udp;
1789 }
1790 break;
1791
1792 default:
1793 break;
1794 }
1795
1796 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
1797 return NetPktRssIpV4;
1798 }
1799 } else if (hasip6) {
1800 switch (l4hdr_proto) {
1801 case ETH_L4_HDR_PROTO_TCP:
1802 if (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) {
1803 return NetPktRssIpV6TcpEx;
1804 }
1805 if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
1806 return NetPktRssIpV6Tcp;
1807 }
1808 break;
1809
1810 case ETH_L4_HDR_PROTO_UDP:
1811 if (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) {
1812 return NetPktRssIpV6UdpEx;
1813 }
1814 if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
1815 return NetPktRssIpV6Udp;
1816 }
1817 break;
1818
1819 default:
1820 break;
1821 }
1822
1823 if (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) {
1824 return NetPktRssIpV6Ex;
1825 }
1826 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
1827 return NetPktRssIpV6;
1828 }
1829 }
1830 return 0xff;
1831 }
1832
virtio_net_process_rss(NetClientState * nc,const uint8_t * buf,size_t size,struct virtio_net_hdr_v1_hash * hdr)1833 static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
1834 size_t size,
1835 struct virtio_net_hdr_v1_hash *hdr)
1836 {
1837 VirtIONet *n = qemu_get_nic_opaque(nc);
1838 unsigned int index = nc->queue_index, new_index = index;
1839 struct NetRxPkt *pkt = n->rx_pkt;
1840 uint8_t net_hash_type;
1841 uint32_t hash;
1842 bool hasip4, hasip6;
1843 EthL4HdrProto l4hdr_proto;
1844 static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
1845 VIRTIO_NET_HASH_REPORT_IPv4,
1846 VIRTIO_NET_HASH_REPORT_TCPv4,
1847 VIRTIO_NET_HASH_REPORT_TCPv6,
1848 VIRTIO_NET_HASH_REPORT_IPv6,
1849 VIRTIO_NET_HASH_REPORT_IPv6_EX,
1850 VIRTIO_NET_HASH_REPORT_TCPv6_EX,
1851 VIRTIO_NET_HASH_REPORT_UDPv4,
1852 VIRTIO_NET_HASH_REPORT_UDPv6,
1853 VIRTIO_NET_HASH_REPORT_UDPv6_EX
1854 };
1855 struct iovec iov = {
1856 .iov_base = (void *)buf,
1857 .iov_len = size
1858 };
1859
1860 net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len);
1861 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1862 net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto,
1863 n->rss_data.hash_types);
1864 if (net_hash_type > NetPktRssIpV6UdpEx) {
1865 if (n->rss_data.populate_hash) {
1866 hdr->hash_value = VIRTIO_NET_HASH_REPORT_NONE;
1867 hdr->hash_report = 0;
1868 }
1869 return n->rss_data.redirect ? n->rss_data.default_queue : -1;
1870 }
1871
1872 hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
1873
1874 if (n->rss_data.populate_hash) {
1875 hdr->hash_value = hash;
1876 hdr->hash_report = reports[net_hash_type];
1877 }
1878
1879 if (n->rss_data.redirect) {
1880 new_index = hash & (n->rss_data.indirections_len - 1);
1881 new_index = n->rss_data.indirections_table[new_index];
1882 }
1883
1884 return (index == new_index) ? -1 : new_index;
1885 }
1886
virtio_net_receive_rcu(NetClientState * nc,const uint8_t * buf,size_t size,bool no_rss)1887 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1888 size_t size, bool no_rss)
1889 {
1890 VirtIONet *n = qemu_get_nic_opaque(nc);
1891 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1892 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1893 VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
1894 size_t lens[VIRTQUEUE_MAX_SIZE];
1895 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1896 struct virtio_net_hdr_v1_hash extra_hdr;
1897 unsigned mhdr_cnt = 0;
1898 size_t offset, i, guest_offset, j;
1899 ssize_t err;
1900
1901 if (!virtio_net_can_receive(nc)) {
1902 return -1;
1903 }
1904
1905 if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) {
1906 int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
1907 if (index >= 0) {
1908 NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
1909 return virtio_net_receive_rcu(nc2, buf, size, true);
1910 }
1911 }
1912
1913 /* hdr_len refers to the header we supply to the guest */
1914 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1915 return 0;
1916 }
1917
1918 if (!receive_filter(n, buf, size))
1919 return size;
1920
1921 offset = i = 0;
1922
1923 while (offset < size) {
1924 VirtQueueElement *elem;
1925 int len, total;
1926 const struct iovec *sg;
1927
1928 total = 0;
1929
1930 if (i == VIRTQUEUE_MAX_SIZE) {
1931 virtio_error(vdev, "virtio-net unexpected long buffer chain");
1932 err = size;
1933 goto err;
1934 }
1935
1936 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1937 if (!elem) {
1938 if (i) {
1939 virtio_error(vdev, "virtio-net unexpected empty queue: "
1940 "i %zd mergeable %d offset %zd, size %zd, "
1941 "guest hdr len %zd, host hdr len %zd "
1942 "guest features 0x%" PRIx64,
1943 i, n->mergeable_rx_bufs, offset, size,
1944 n->guest_hdr_len, n->host_hdr_len,
1945 vdev->guest_features);
1946 }
1947 err = -1;
1948 goto err;
1949 }
1950
1951 if (elem->in_num < 1) {
1952 virtio_error(vdev,
1953 "virtio-net receive queue contains no in buffers");
1954 virtqueue_detach_element(q->rx_vq, elem, 0);
1955 g_free(elem);
1956 err = -1;
1957 goto err;
1958 }
1959
1960 sg = elem->in_sg;
1961 if (i == 0) {
1962 assert(offset == 0);
1963 if (n->mergeable_rx_bufs) {
1964 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1965 sg, elem->in_num,
1966 offsetof(typeof(extra_hdr), hdr.num_buffers),
1967 sizeof(extra_hdr.hdr.num_buffers));
1968 }
1969
1970 receive_header(n, sg, elem->in_num, buf, size);
1971 if (n->rss_data.populate_hash) {
1972 offset = offsetof(typeof(extra_hdr), hash_value);
1973 iov_from_buf(sg, elem->in_num, offset,
1974 (char *)&extra_hdr + offset,
1975 sizeof(extra_hdr.hash_value) +
1976 sizeof(extra_hdr.hash_report));
1977 }
1978 offset = n->host_hdr_len;
1979 total += n->guest_hdr_len;
1980 guest_offset = n->guest_hdr_len;
1981 } else {
1982 guest_offset = 0;
1983 }
1984
1985 /* copy in packet. ugh */
1986 len = iov_from_buf(sg, elem->in_num, guest_offset,
1987 buf + offset, size - offset);
1988 total += len;
1989 offset += len;
1990 /* If buffers can't be merged, at this point we
1991 * must have consumed the complete packet.
1992 * Otherwise, drop it. */
1993 if (!n->mergeable_rx_bufs && offset < size) {
1994 virtqueue_unpop(q->rx_vq, elem, total);
1995 g_free(elem);
1996 err = size;
1997 goto err;
1998 }
1999
2000 elems[i] = elem;
2001 lens[i] = total;
2002 i++;
2003 }
2004
2005 if (mhdr_cnt) {
2006 virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
2007 iov_from_buf(mhdr_sg, mhdr_cnt,
2008 0,
2009 &extra_hdr.hdr.num_buffers,
2010 sizeof extra_hdr.hdr.num_buffers);
2011 }
2012
2013 for (j = 0; j < i; j++) {
2014 /* signal other side */
2015 virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
2016 g_free(elems[j]);
2017 }
2018
2019 virtqueue_flush(q->rx_vq, i);
2020 virtio_notify(vdev, q->rx_vq);
2021
2022 return size;
2023
2024 err:
2025 for (j = 0; j < i; j++) {
2026 virtqueue_detach_element(q->rx_vq, elems[j], lens[j]);
2027 g_free(elems[j]);
2028 }
2029
2030 return err;
2031 }
2032
virtio_net_do_receive(NetClientState * nc,const uint8_t * buf,size_t size)2033 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
2034 size_t size)
2035 {
2036 RCU_READ_LOCK_GUARD();
2037
2038 return virtio_net_receive_rcu(nc, buf, size, false);
2039 }
2040
virtio_net_rsc_extract_unit4(VirtioNetRscChain * chain,const uint8_t * buf,VirtioNetRscUnit * unit)2041 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
2042 const uint8_t *buf,
2043 VirtioNetRscUnit *unit)
2044 {
2045 uint16_t ip_hdrlen;
2046 struct ip_header *ip;
2047
2048 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
2049 + sizeof(struct eth_header));
2050 unit->ip = (void *)ip;
2051 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
2052 unit->ip_plen = &ip->ip_len;
2053 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
2054 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2055 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
2056 }
2057
virtio_net_rsc_extract_unit6(VirtioNetRscChain * chain,const uint8_t * buf,VirtioNetRscUnit * unit)2058 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
2059 const uint8_t *buf,
2060 VirtioNetRscUnit *unit)
2061 {
2062 struct ip6_header *ip6;
2063
2064 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
2065 + sizeof(struct eth_header));
2066 unit->ip = ip6;
2067 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2068 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)
2069 + sizeof(struct ip6_header));
2070 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2071
2072 /* There is a difference between payload length in ipv4 and v6,
2073 ip header is excluded in ipv6 */
2074 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
2075 }
2076
virtio_net_rsc_drain_seg(VirtioNetRscChain * chain,VirtioNetRscSeg * seg)2077 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
2078 VirtioNetRscSeg *seg)
2079 {
2080 int ret;
2081 struct virtio_net_hdr_v1 *h;
2082
2083 h = (struct virtio_net_hdr_v1 *)seg->buf;
2084 h->flags = 0;
2085 h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
2086
2087 if (seg->is_coalesced) {
2088 h->rsc.segments = seg->packets;
2089 h->rsc.dup_acks = seg->dup_ack;
2090 h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
2091 if (chain->proto == ETH_P_IP) {
2092 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2093 } else {
2094 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2095 }
2096 }
2097
2098 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
2099 QTAILQ_REMOVE(&chain->buffers, seg, next);
2100 g_free(seg->buf);
2101 g_free(seg);
2102
2103 return ret;
2104 }
2105
virtio_net_rsc_purge(void * opq)2106 static void virtio_net_rsc_purge(void *opq)
2107 {
2108 VirtioNetRscSeg *seg, *rn;
2109 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
2110
2111 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
2112 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2113 chain->stat.purge_failed++;
2114 continue;
2115 }
2116 }
2117
2118 chain->stat.timer++;
2119 if (!QTAILQ_EMPTY(&chain->buffers)) {
2120 timer_mod(chain->drain_timer,
2121 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2122 }
2123 }
2124
virtio_net_rsc_cleanup(VirtIONet * n)2125 static void virtio_net_rsc_cleanup(VirtIONet *n)
2126 {
2127 VirtioNetRscChain *chain, *rn_chain;
2128 VirtioNetRscSeg *seg, *rn_seg;
2129
2130 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
2131 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
2132 QTAILQ_REMOVE(&chain->buffers, seg, next);
2133 g_free(seg->buf);
2134 g_free(seg);
2135 }
2136
2137 timer_free(chain->drain_timer);
2138 QTAILQ_REMOVE(&n->rsc_chains, chain, next);
2139 g_free(chain);
2140 }
2141 }
2142
virtio_net_rsc_cache_buf(VirtioNetRscChain * chain,NetClientState * nc,const uint8_t * buf,size_t size)2143 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
2144 NetClientState *nc,
2145 const uint8_t *buf, size_t size)
2146 {
2147 uint16_t hdr_len;
2148 VirtioNetRscSeg *seg;
2149
2150 hdr_len = chain->n->guest_hdr_len;
2151 seg = g_new(VirtioNetRscSeg, 1);
2152 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
2153 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
2154 memcpy(seg->buf, buf, size);
2155 seg->size = size;
2156 seg->packets = 1;
2157 seg->dup_ack = 0;
2158 seg->is_coalesced = 0;
2159 seg->nc = nc;
2160
2161 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
2162 chain->stat.cache++;
2163
2164 switch (chain->proto) {
2165 case ETH_P_IP:
2166 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
2167 break;
2168 case ETH_P_IPV6:
2169 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
2170 break;
2171 default:
2172 g_assert_not_reached();
2173 }
2174 }
2175
virtio_net_rsc_handle_ack(VirtioNetRscChain * chain,VirtioNetRscSeg * seg,const uint8_t * buf,struct tcp_header * n_tcp,struct tcp_header * o_tcp)2176 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
2177 VirtioNetRscSeg *seg,
2178 const uint8_t *buf,
2179 struct tcp_header *n_tcp,
2180 struct tcp_header *o_tcp)
2181 {
2182 uint32_t nack, oack;
2183 uint16_t nwin, owin;
2184
2185 nack = htonl(n_tcp->th_ack);
2186 nwin = htons(n_tcp->th_win);
2187 oack = htonl(o_tcp->th_ack);
2188 owin = htons(o_tcp->th_win);
2189
2190 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
2191 chain->stat.ack_out_of_win++;
2192 return RSC_FINAL;
2193 } else if (nack == oack) {
2194 /* duplicated ack or window probe */
2195 if (nwin == owin) {
2196 /* duplicated ack, add dup ack count due to whql test up to 1 */
2197 chain->stat.dup_ack++;
2198 return RSC_FINAL;
2199 } else {
2200 /* Coalesce window update */
2201 o_tcp->th_win = n_tcp->th_win;
2202 chain->stat.win_update++;
2203 return RSC_COALESCE;
2204 }
2205 } else {
2206 /* pure ack, go to 'C', finalize*/
2207 chain->stat.pure_ack++;
2208 return RSC_FINAL;
2209 }
2210 }
2211
virtio_net_rsc_coalesce_data(VirtioNetRscChain * chain,VirtioNetRscSeg * seg,const uint8_t * buf,VirtioNetRscUnit * n_unit)2212 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
2213 VirtioNetRscSeg *seg,
2214 const uint8_t *buf,
2215 VirtioNetRscUnit *n_unit)
2216 {
2217 void *data;
2218 uint16_t o_ip_len;
2219 uint32_t nseq, oseq;
2220 VirtioNetRscUnit *o_unit;
2221
2222 o_unit = &seg->unit;
2223 o_ip_len = htons(*o_unit->ip_plen);
2224 nseq = htonl(n_unit->tcp->th_seq);
2225 oseq = htonl(o_unit->tcp->th_seq);
2226
2227 /* out of order or retransmitted. */
2228 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
2229 chain->stat.data_out_of_win++;
2230 return RSC_FINAL;
2231 }
2232
2233 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
2234 if (nseq == oseq) {
2235 if ((o_unit->payload == 0) && n_unit->payload) {
2236 /* From no payload to payload, normal case, not a dup ack or etc */
2237 chain->stat.data_after_pure_ack++;
2238 goto coalesce;
2239 } else {
2240 return virtio_net_rsc_handle_ack(chain, seg, buf,
2241 n_unit->tcp, o_unit->tcp);
2242 }
2243 } else if ((nseq - oseq) != o_unit->payload) {
2244 /* Not a consistent packet, out of order */
2245 chain->stat.data_out_of_order++;
2246 return RSC_FINAL;
2247 } else {
2248 coalesce:
2249 if ((o_ip_len + n_unit->payload) > chain->max_payload) {
2250 chain->stat.over_size++;
2251 return RSC_FINAL;
2252 }
2253
2254 /* Here comes the right data, the payload length in v4/v6 is different,
2255 so use the field value to update and record the new data len */
2256 o_unit->payload += n_unit->payload; /* update new data len */
2257
2258 /* update field in ip header */
2259 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
2260
2261 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
2262 for windows guest, while this may change the behavior for linux
2263 guest (only if it uses RSC feature). */
2264 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
2265
2266 o_unit->tcp->th_ack = n_unit->tcp->th_ack;
2267 o_unit->tcp->th_win = n_unit->tcp->th_win;
2268
2269 memmove(seg->buf + seg->size, data, n_unit->payload);
2270 seg->size += n_unit->payload;
2271 seg->packets++;
2272 chain->stat.coalesced++;
2273 return RSC_COALESCE;
2274 }
2275 }
2276
virtio_net_rsc_coalesce4(VirtioNetRscChain * chain,VirtioNetRscSeg * seg,const uint8_t * buf,size_t size,VirtioNetRscUnit * unit)2277 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
2278 VirtioNetRscSeg *seg,
2279 const uint8_t *buf, size_t size,
2280 VirtioNetRscUnit *unit)
2281 {
2282 struct ip_header *ip1, *ip2;
2283
2284 ip1 = (struct ip_header *)(unit->ip);
2285 ip2 = (struct ip_header *)(seg->unit.ip);
2286 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
2287 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2288 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2289 chain->stat.no_match++;
2290 return RSC_NO_MATCH;
2291 }
2292
2293 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2294 }
2295
virtio_net_rsc_coalesce6(VirtioNetRscChain * chain,VirtioNetRscSeg * seg,const uint8_t * buf,size_t size,VirtioNetRscUnit * unit)2296 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
2297 VirtioNetRscSeg *seg,
2298 const uint8_t *buf, size_t size,
2299 VirtioNetRscUnit *unit)
2300 {
2301 struct ip6_header *ip1, *ip2;
2302
2303 ip1 = (struct ip6_header *)(unit->ip);
2304 ip2 = (struct ip6_header *)(seg->unit.ip);
2305 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
2306 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
2307 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2308 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2309 chain->stat.no_match++;
2310 return RSC_NO_MATCH;
2311 }
2312
2313 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2314 }
2315
2316 /* Packets with 'SYN' should bypass, other flag should be sent after drain
2317 * to prevent out of order */
virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain * chain,struct tcp_header * tcp)2318 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
2319 struct tcp_header *tcp)
2320 {
2321 uint16_t tcp_hdr;
2322 uint16_t tcp_flag;
2323
2324 tcp_flag = htons(tcp->th_offset_flags);
2325 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
2326 tcp_flag &= VIRTIO_NET_TCP_FLAG;
2327 if (tcp_flag & TH_SYN) {
2328 chain->stat.tcp_syn++;
2329 return RSC_BYPASS;
2330 }
2331
2332 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
2333 chain->stat.tcp_ctrl_drain++;
2334 return RSC_FINAL;
2335 }
2336
2337 if (tcp_hdr > sizeof(struct tcp_header)) {
2338 chain->stat.tcp_all_opt++;
2339 return RSC_FINAL;
2340 }
2341
2342 return RSC_CANDIDATE;
2343 }
2344
virtio_net_rsc_do_coalesce(VirtioNetRscChain * chain,NetClientState * nc,const uint8_t * buf,size_t size,VirtioNetRscUnit * unit)2345 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
2346 NetClientState *nc,
2347 const uint8_t *buf, size_t size,
2348 VirtioNetRscUnit *unit)
2349 {
2350 int ret;
2351 VirtioNetRscSeg *seg, *nseg;
2352
2353 if (QTAILQ_EMPTY(&chain->buffers)) {
2354 chain->stat.empty_cache++;
2355 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2356 timer_mod(chain->drain_timer,
2357 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2358 return size;
2359 }
2360
2361 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2362 if (chain->proto == ETH_P_IP) {
2363 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
2364 } else {
2365 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
2366 }
2367
2368 if (ret == RSC_FINAL) {
2369 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2370 /* Send failed */
2371 chain->stat.final_failed++;
2372 return 0;
2373 }
2374
2375 /* Send current packet */
2376 return virtio_net_do_receive(nc, buf, size);
2377 } else if (ret == RSC_NO_MATCH) {
2378 continue;
2379 } else {
2380 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
2381 seg->is_coalesced = 1;
2382 return size;
2383 }
2384 }
2385
2386 chain->stat.no_match_cache++;
2387 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2388 return size;
2389 }
2390
2391 /* Drain a connection data, this is to avoid out of order segments */
virtio_net_rsc_drain_flow(VirtioNetRscChain * chain,NetClientState * nc,const uint8_t * buf,size_t size,uint16_t ip_start,uint16_t ip_size,uint16_t tcp_port)2392 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
2393 NetClientState *nc,
2394 const uint8_t *buf, size_t size,
2395 uint16_t ip_start, uint16_t ip_size,
2396 uint16_t tcp_port)
2397 {
2398 VirtioNetRscSeg *seg, *nseg;
2399 uint32_t ppair1, ppair2;
2400
2401 ppair1 = *(uint32_t *)(buf + tcp_port);
2402 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2403 ppair2 = *(uint32_t *)(seg->buf + tcp_port);
2404 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
2405 || (ppair1 != ppair2)) {
2406 continue;
2407 }
2408 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2409 chain->stat.drain_failed++;
2410 }
2411
2412 break;
2413 }
2414
2415 return virtio_net_do_receive(nc, buf, size);
2416 }
2417
virtio_net_rsc_sanity_check4(VirtioNetRscChain * chain,struct ip_header * ip,const uint8_t * buf,size_t size)2418 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
2419 struct ip_header *ip,
2420 const uint8_t *buf, size_t size)
2421 {
2422 uint16_t ip_len;
2423
2424 /* Not an ipv4 packet */
2425 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
2426 chain->stat.ip_option++;
2427 return RSC_BYPASS;
2428 }
2429
2430 /* Don't handle packets with ip option */
2431 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
2432 chain->stat.ip_option++;
2433 return RSC_BYPASS;
2434 }
2435
2436 if (ip->ip_p != IPPROTO_TCP) {
2437 chain->stat.bypass_not_tcp++;
2438 return RSC_BYPASS;
2439 }
2440
2441 /* Don't handle packets with ip fragment */
2442 if (!(htons(ip->ip_off) & IP_DF)) {
2443 chain->stat.ip_frag++;
2444 return RSC_BYPASS;
2445 }
2446
2447 /* Don't handle packets with ecn flag */
2448 if (IPTOS_ECN(ip->ip_tos)) {
2449 chain->stat.ip_ecn++;
2450 return RSC_BYPASS;
2451 }
2452
2453 ip_len = htons(ip->ip_len);
2454 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
2455 || ip_len > (size - chain->n->guest_hdr_len -
2456 sizeof(struct eth_header))) {
2457 chain->stat.ip_hacked++;
2458 return RSC_BYPASS;
2459 }
2460
2461 return RSC_CANDIDATE;
2462 }
2463
virtio_net_rsc_receive4(VirtioNetRscChain * chain,NetClientState * nc,const uint8_t * buf,size_t size)2464 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
2465 NetClientState *nc,
2466 const uint8_t *buf, size_t size)
2467 {
2468 int32_t ret;
2469 uint16_t hdr_len;
2470 VirtioNetRscUnit unit;
2471
2472 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2473
2474 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
2475 + sizeof(struct tcp_header))) {
2476 chain->stat.bypass_not_tcp++;
2477 return virtio_net_do_receive(nc, buf, size);
2478 }
2479
2480 virtio_net_rsc_extract_unit4(chain, buf, &unit);
2481 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
2482 != RSC_CANDIDATE) {
2483 return virtio_net_do_receive(nc, buf, size);
2484 }
2485
2486 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2487 if (ret == RSC_BYPASS) {
2488 return virtio_net_do_receive(nc, buf, size);
2489 } else if (ret == RSC_FINAL) {
2490 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2491 ((hdr_len + sizeof(struct eth_header)) + 12),
2492 VIRTIO_NET_IP4_ADDR_SIZE,
2493 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
2494 }
2495
2496 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2497 }
2498
virtio_net_rsc_sanity_check6(VirtioNetRscChain * chain,struct ip6_header * ip6,const uint8_t * buf,size_t size)2499 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
2500 struct ip6_header *ip6,
2501 const uint8_t *buf, size_t size)
2502 {
2503 uint16_t ip_len;
2504
2505 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
2506 != IP_HEADER_VERSION_6) {
2507 return RSC_BYPASS;
2508 }
2509
2510 /* Both option and protocol is checked in this */
2511 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
2512 chain->stat.bypass_not_tcp++;
2513 return RSC_BYPASS;
2514 }
2515
2516 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2517 if (ip_len < sizeof(struct tcp_header) ||
2518 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
2519 - sizeof(struct ip6_header))) {
2520 chain->stat.ip_hacked++;
2521 return RSC_BYPASS;
2522 }
2523
2524 /* Don't handle packets with ecn flag */
2525 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
2526 chain->stat.ip_ecn++;
2527 return RSC_BYPASS;
2528 }
2529
2530 return RSC_CANDIDATE;
2531 }
2532
virtio_net_rsc_receive6(void * opq,NetClientState * nc,const uint8_t * buf,size_t size)2533 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
2534 const uint8_t *buf, size_t size)
2535 {
2536 int32_t ret;
2537 uint16_t hdr_len;
2538 VirtioNetRscChain *chain;
2539 VirtioNetRscUnit unit;
2540
2541 chain = opq;
2542 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2543
2544 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
2545 + sizeof(tcp_header))) {
2546 return virtio_net_do_receive(nc, buf, size);
2547 }
2548
2549 virtio_net_rsc_extract_unit6(chain, buf, &unit);
2550 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
2551 unit.ip, buf, size)) {
2552 return virtio_net_do_receive(nc, buf, size);
2553 }
2554
2555 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2556 if (ret == RSC_BYPASS) {
2557 return virtio_net_do_receive(nc, buf, size);
2558 } else if (ret == RSC_FINAL) {
2559 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2560 ((hdr_len + sizeof(struct eth_header)) + 8),
2561 VIRTIO_NET_IP6_ADDR_SIZE,
2562 hdr_len + sizeof(struct eth_header)
2563 + sizeof(struct ip6_header));
2564 }
2565
2566 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2567 }
2568
virtio_net_rsc_lookup_chain(VirtIONet * n,NetClientState * nc,uint16_t proto)2569 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
2570 NetClientState *nc,
2571 uint16_t proto)
2572 {
2573 VirtioNetRscChain *chain;
2574
2575 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
2576 return NULL;
2577 }
2578
2579 QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
2580 if (chain->proto == proto) {
2581 return chain;
2582 }
2583 }
2584
2585 chain = g_malloc(sizeof(*chain));
2586 chain->n = n;
2587 chain->proto = proto;
2588 if (proto == (uint16_t)ETH_P_IP) {
2589 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
2590 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2591 } else {
2592 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
2593 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2594 }
2595 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
2596 virtio_net_rsc_purge, chain);
2597 memset(&chain->stat, 0, sizeof(chain->stat));
2598
2599 QTAILQ_INIT(&chain->buffers);
2600 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
2601
2602 return chain;
2603 }
2604
virtio_net_rsc_receive(NetClientState * nc,const uint8_t * buf,size_t size)2605 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
2606 const uint8_t *buf,
2607 size_t size)
2608 {
2609 uint16_t proto;
2610 VirtioNetRscChain *chain;
2611 struct eth_header *eth;
2612 VirtIONet *n;
2613
2614 n = qemu_get_nic_opaque(nc);
2615 if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
2616 return virtio_net_do_receive(nc, buf, size);
2617 }
2618
2619 eth = (struct eth_header *)(buf + n->guest_hdr_len);
2620 proto = htons(eth->h_proto);
2621
2622 chain = virtio_net_rsc_lookup_chain(n, nc, proto);
2623 if (chain) {
2624 chain->stat.received++;
2625 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
2626 return virtio_net_rsc_receive4(chain, nc, buf, size);
2627 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
2628 return virtio_net_rsc_receive6(chain, nc, buf, size);
2629 }
2630 }
2631 return virtio_net_do_receive(nc, buf, size);
2632 }
2633
virtio_net_receive(NetClientState * nc,const uint8_t * buf,size_t size)2634 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
2635 size_t size)
2636 {
2637 VirtIONet *n = qemu_get_nic_opaque(nc);
2638 if ((n->rsc4_enabled || n->rsc6_enabled)) {
2639 return virtio_net_rsc_receive(nc, buf, size);
2640 } else {
2641 return virtio_net_do_receive(nc, buf, size);
2642 }
2643 }
2644
2645 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
2646
virtio_net_tx_complete(NetClientState * nc,ssize_t len)2647 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
2648 {
2649 VirtIONet *n = qemu_get_nic_opaque(nc);
2650 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
2651 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2652 int ret;
2653
2654 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
2655 virtio_notify(vdev, q->tx_vq);
2656
2657 g_free(q->async_tx.elem);
2658 q->async_tx.elem = NULL;
2659
2660 virtio_queue_set_notification(q->tx_vq, 1);
2661 ret = virtio_net_flush_tx(q);
2662 if (ret >= n->tx_burst) {
2663 /*
2664 * the flush has been stopped by tx_burst
2665 * we will not receive notification for the
2666 * remainining part, so re-schedule
2667 */
2668 virtio_queue_set_notification(q->tx_vq, 0);
2669 if (q->tx_bh) {
2670 qemu_bh_schedule(q->tx_bh);
2671 } else {
2672 timer_mod(q->tx_timer,
2673 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2674 }
2675 q->tx_waiting = 1;
2676 }
2677 }
2678
2679 /* TX */
virtio_net_flush_tx(VirtIONetQueue * q)2680 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2681 {
2682 VirtIONet *n = q->n;
2683 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2684 VirtQueueElement *elem;
2685 int32_t num_packets = 0;
2686 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2687 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2688 return num_packets;
2689 }
2690
2691 if (q->async_tx.elem) {
2692 virtio_queue_set_notification(q->tx_vq, 0);
2693 return num_packets;
2694 }
2695
2696 for (;;) {
2697 ssize_t ret;
2698 unsigned int out_num;
2699 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2700 struct virtio_net_hdr vhdr;
2701
2702 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2703 if (!elem) {
2704 break;
2705 }
2706
2707 out_num = elem->out_num;
2708 out_sg = elem->out_sg;
2709 if (out_num < 1) {
2710 virtio_error(vdev, "virtio-net header not in first element");
2711 goto detach;
2712 }
2713
2714 if (n->needs_vnet_hdr_swap) {
2715 if (iov_to_buf(out_sg, out_num, 0, &vhdr, sizeof(vhdr)) <
2716 sizeof(vhdr)) {
2717 virtio_error(vdev, "virtio-net header incorrect");
2718 goto detach;
2719 }
2720 virtio_net_hdr_swap(vdev, &vhdr);
2721 sg2[0].iov_base = &vhdr;
2722 sg2[0].iov_len = sizeof(vhdr);
2723 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, out_sg, out_num,
2724 sizeof(vhdr), -1);
2725 if (out_num == VIRTQUEUE_MAX_SIZE) {
2726 goto drop;
2727 }
2728 out_num += 1;
2729 out_sg = sg2;
2730 }
2731 /*
2732 * If host wants to see the guest header as is, we can
2733 * pass it on unchanged. Otherwise, copy just the parts
2734 * that host is interested in.
2735 */
2736 assert(n->host_hdr_len <= n->guest_hdr_len);
2737 if (n->host_hdr_len != n->guest_hdr_len) {
2738 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2739 out_sg, out_num,
2740 0, n->host_hdr_len);
2741 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2742 out_sg, out_num,
2743 n->guest_hdr_len, -1);
2744 out_num = sg_num;
2745 out_sg = sg;
2746
2747 if (out_num < 1) {
2748 virtio_error(vdev, "virtio-net nothing to send");
2749 goto detach;
2750 }
2751 }
2752
2753 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2754 out_sg, out_num, virtio_net_tx_complete);
2755 if (ret == 0) {
2756 virtio_queue_set_notification(q->tx_vq, 0);
2757 q->async_tx.elem = elem;
2758 return -EBUSY;
2759 }
2760
2761 drop:
2762 virtqueue_push(q->tx_vq, elem, 0);
2763 virtio_notify(vdev, q->tx_vq);
2764 g_free(elem);
2765
2766 if (++num_packets >= n->tx_burst) {
2767 break;
2768 }
2769 }
2770 return num_packets;
2771
2772 detach:
2773 virtqueue_detach_element(q->tx_vq, elem, 0);
2774 g_free(elem);
2775 return -EINVAL;
2776 }
2777
2778 static void virtio_net_tx_timer(void *opaque);
2779
virtio_net_handle_tx_timer(VirtIODevice * vdev,VirtQueue * vq)2780 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2781 {
2782 VirtIONet *n = VIRTIO_NET(vdev);
2783 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2784
2785 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2786 virtio_net_drop_tx_queue_data(vdev, vq);
2787 return;
2788 }
2789
2790 /* This happens when device was stopped but VCPU wasn't. */
2791 if (!vdev->vm_running) {
2792 q->tx_waiting = 1;
2793 return;
2794 }
2795
2796 if (q->tx_waiting) {
2797 /* We already have queued packets, immediately flush */
2798 timer_del(q->tx_timer);
2799 virtio_net_tx_timer(q);
2800 } else {
2801 /* re-arm timer to flush it (and more) on next tick */
2802 timer_mod(q->tx_timer,
2803 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2804 q->tx_waiting = 1;
2805 virtio_queue_set_notification(vq, 0);
2806 }
2807 }
2808
virtio_net_handle_tx_bh(VirtIODevice * vdev,VirtQueue * vq)2809 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2810 {
2811 VirtIONet *n = VIRTIO_NET(vdev);
2812 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2813
2814 if (unlikely(n->vhost_started)) {
2815 return;
2816 }
2817
2818 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2819 virtio_net_drop_tx_queue_data(vdev, vq);
2820 return;
2821 }
2822
2823 if (unlikely(q->tx_waiting)) {
2824 return;
2825 }
2826 q->tx_waiting = 1;
2827 /* This happens when device was stopped but VCPU wasn't. */
2828 if (!vdev->vm_running) {
2829 return;
2830 }
2831 virtio_queue_set_notification(vq, 0);
2832 qemu_bh_schedule(q->tx_bh);
2833 }
2834
virtio_net_tx_timer(void * opaque)2835 static void virtio_net_tx_timer(void *opaque)
2836 {
2837 VirtIONetQueue *q = opaque;
2838 VirtIONet *n = q->n;
2839 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2840 int ret;
2841
2842 /* This happens when device was stopped but BH wasn't. */
2843 if (!vdev->vm_running) {
2844 /* Make sure tx waiting is set, so we'll run when restarted. */
2845 assert(q->tx_waiting);
2846 return;
2847 }
2848
2849 q->tx_waiting = 0;
2850
2851 /* Just in case the driver is not ready on more */
2852 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2853 return;
2854 }
2855
2856 ret = virtio_net_flush_tx(q);
2857 if (ret == -EBUSY || ret == -EINVAL) {
2858 return;
2859 }
2860 /*
2861 * If we flush a full burst of packets, assume there are
2862 * more coming and immediately rearm
2863 */
2864 if (ret >= n->tx_burst) {
2865 q->tx_waiting = 1;
2866 timer_mod(q->tx_timer,
2867 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2868 return;
2869 }
2870 /*
2871 * If less than a full burst, re-enable notification and flush
2872 * anything that may have come in while we weren't looking. If
2873 * we find something, assume the guest is still active and rearm
2874 */
2875 virtio_queue_set_notification(q->tx_vq, 1);
2876 ret = virtio_net_flush_tx(q);
2877 if (ret > 0) {
2878 virtio_queue_set_notification(q->tx_vq, 0);
2879 q->tx_waiting = 1;
2880 timer_mod(q->tx_timer,
2881 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2882 }
2883 }
2884
virtio_net_tx_bh(void * opaque)2885 static void virtio_net_tx_bh(void *opaque)
2886 {
2887 VirtIONetQueue *q = opaque;
2888 VirtIONet *n = q->n;
2889 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2890 int32_t ret;
2891
2892 /* This happens when device was stopped but BH wasn't. */
2893 if (!vdev->vm_running) {
2894 /* Make sure tx waiting is set, so we'll run when restarted. */
2895 assert(q->tx_waiting);
2896 return;
2897 }
2898
2899 q->tx_waiting = 0;
2900
2901 /* Just in case the driver is not ready on more */
2902 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2903 return;
2904 }
2905
2906 ret = virtio_net_flush_tx(q);
2907 if (ret == -EBUSY || ret == -EINVAL) {
2908 return; /* Notification re-enable handled by tx_complete or device
2909 * broken */
2910 }
2911
2912 /* If we flush a full burst of packets, assume there are
2913 * more coming and immediately reschedule */
2914 if (ret >= n->tx_burst) {
2915 qemu_bh_schedule(q->tx_bh);
2916 q->tx_waiting = 1;
2917 return;
2918 }
2919
2920 /* If less than a full burst, re-enable notification and flush
2921 * anything that may have come in while we weren't looking. If
2922 * we find something, assume the guest is still active and reschedule */
2923 virtio_queue_set_notification(q->tx_vq, 1);
2924 ret = virtio_net_flush_tx(q);
2925 if (ret == -EINVAL) {
2926 return;
2927 } else if (ret > 0) {
2928 virtio_queue_set_notification(q->tx_vq, 0);
2929 qemu_bh_schedule(q->tx_bh);
2930 q->tx_waiting = 1;
2931 }
2932 }
2933
virtio_net_add_queue(VirtIONet * n,int index)2934 static void virtio_net_add_queue(VirtIONet *n, int index)
2935 {
2936 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2937
2938 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2939 virtio_net_handle_rx);
2940
2941 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2942 n->vqs[index].tx_vq =
2943 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2944 virtio_net_handle_tx_timer);
2945 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2946 virtio_net_tx_timer,
2947 &n->vqs[index]);
2948 } else {
2949 n->vqs[index].tx_vq =
2950 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2951 virtio_net_handle_tx_bh);
2952 n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index],
2953 &DEVICE(vdev)->mem_reentrancy_guard);
2954 }
2955
2956 n->vqs[index].tx_waiting = 0;
2957 n->vqs[index].n = n;
2958 }
2959
virtio_net_del_queue(VirtIONet * n,int index)2960 static void virtio_net_del_queue(VirtIONet *n, int index)
2961 {
2962 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2963 VirtIONetQueue *q = &n->vqs[index];
2964 NetClientState *nc = qemu_get_subqueue(n->nic, index);
2965
2966 qemu_purge_queued_packets(nc);
2967
2968 virtio_del_queue(vdev, index * 2);
2969 if (q->tx_timer) {
2970 timer_free(q->tx_timer);
2971 q->tx_timer = NULL;
2972 } else {
2973 qemu_bh_delete(q->tx_bh);
2974 q->tx_bh = NULL;
2975 }
2976 q->tx_waiting = 0;
2977 virtio_del_queue(vdev, index * 2 + 1);
2978 }
2979
virtio_net_change_num_queue_pairs(VirtIONet * n,int new_max_queue_pairs)2980 static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs)
2981 {
2982 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2983 int old_num_queues = virtio_get_num_queues(vdev);
2984 int new_num_queues = new_max_queue_pairs * 2 + 1;
2985 int i;
2986
2987 assert(old_num_queues >= 3);
2988 assert(old_num_queues % 2 == 1);
2989
2990 if (old_num_queues == new_num_queues) {
2991 return;
2992 }
2993
2994 /*
2995 * We always need to remove and add ctrl vq if
2996 * old_num_queues != new_num_queues. Remove ctrl_vq first,
2997 * and then we only enter one of the following two loops.
2998 */
2999 virtio_del_queue(vdev, old_num_queues - 1);
3000
3001 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
3002 /* new_num_queues < old_num_queues */
3003 virtio_net_del_queue(n, i / 2);
3004 }
3005
3006 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
3007 /* new_num_queues > old_num_queues */
3008 virtio_net_add_queue(n, i / 2);
3009 }
3010
3011 /* add ctrl_vq last */
3012 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
3013 }
3014
virtio_net_set_multiqueue(VirtIONet * n,int multiqueue)3015 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
3016 {
3017 int max = multiqueue ? n->max_queue_pairs : 1;
3018
3019 n->multiqueue = multiqueue;
3020 virtio_net_change_num_queue_pairs(n, max);
3021
3022 virtio_net_set_queue_pairs(n);
3023 }
3024
virtio_net_post_load_device(void * opaque,int version_id)3025 static int virtio_net_post_load_device(void *opaque, int version_id)
3026 {
3027 VirtIONet *n = opaque;
3028 VirtIODevice *vdev = VIRTIO_DEVICE(n);
3029 int i, link_down;
3030
3031 trace_virtio_net_post_load_device();
3032 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
3033 virtio_vdev_has_feature(vdev,
3034 VIRTIO_F_VERSION_1),
3035 virtio_vdev_has_feature(vdev,
3036 VIRTIO_NET_F_HASH_REPORT));
3037
3038 /* MAC_TABLE_ENTRIES may be different from the saved image */
3039 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
3040 n->mac_table.in_use = 0;
3041 }
3042
3043 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
3044 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
3045 }
3046
3047 /*
3048 * curr_guest_offloads will be later overwritten by the
3049 * virtio_set_features_nocheck call done from the virtio_load.
3050 * Here we make sure it is preserved and restored accordingly
3051 * in the virtio_net_post_load_virtio callback.
3052 */
3053 n->saved_guest_offloads = n->curr_guest_offloads;
3054
3055 virtio_net_set_queue_pairs(n);
3056
3057 /* Find the first multicast entry in the saved MAC filter */
3058 for (i = 0; i < n->mac_table.in_use; i++) {
3059 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
3060 break;
3061 }
3062 }
3063 n->mac_table.first_multi = i;
3064
3065 /* nc.link_down can't be migrated, so infer link_down according
3066 * to link status bit in n->status */
3067 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
3068 for (i = 0; i < n->max_queue_pairs; i++) {
3069 qemu_get_subqueue(n->nic, i)->link_down = link_down;
3070 }
3071
3072 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
3073 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3074 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3075 QEMU_CLOCK_VIRTUAL,
3076 virtio_net_announce_timer, n);
3077 if (n->announce_timer.round) {
3078 timer_mod(n->announce_timer.tm,
3079 qemu_clock_get_ms(n->announce_timer.type));
3080 } else {
3081 qemu_announce_timer_del(&n->announce_timer, false);
3082 }
3083 }
3084
3085 virtio_net_commit_rss_config(n);
3086 return 0;
3087 }
3088
virtio_net_post_load_virtio(VirtIODevice * vdev)3089 static int virtio_net_post_load_virtio(VirtIODevice *vdev)
3090 {
3091 VirtIONet *n = VIRTIO_NET(vdev);
3092 /*
3093 * The actual needed state is now in saved_guest_offloads,
3094 * see virtio_net_post_load_device for detail.
3095 * Restore it back and apply the desired offloads.
3096 */
3097 n->curr_guest_offloads = n->saved_guest_offloads;
3098 if (peer_has_vnet_hdr(n)) {
3099 virtio_net_apply_guest_offloads(n);
3100 }
3101
3102 return 0;
3103 }
3104
3105 /* tx_waiting field of a VirtIONetQueue */
3106 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
3107 .name = "virtio-net-queue-tx_waiting",
3108 .fields = (const VMStateField[]) {
3109 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
3110 VMSTATE_END_OF_LIST()
3111 },
3112 };
3113
max_queue_pairs_gt_1(void * opaque,int version_id)3114 static bool max_queue_pairs_gt_1(void *opaque, int version_id)
3115 {
3116 return VIRTIO_NET(opaque)->max_queue_pairs > 1;
3117 }
3118
has_ctrl_guest_offloads(void * opaque,int version_id)3119 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
3120 {
3121 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
3122 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3123 }
3124
mac_table_fits(void * opaque,int version_id)3125 static bool mac_table_fits(void *opaque, int version_id)
3126 {
3127 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
3128 }
3129
mac_table_doesnt_fit(void * opaque,int version_id)3130 static bool mac_table_doesnt_fit(void *opaque, int version_id)
3131 {
3132 return !mac_table_fits(opaque, version_id);
3133 }
3134
3135 /* This temporary type is shared by all the WITH_TMP methods
3136 * although only some fields are used by each.
3137 */
3138 struct VirtIONetMigTmp {
3139 VirtIONet *parent;
3140 VirtIONetQueue *vqs_1;
3141 uint16_t curr_queue_pairs_1;
3142 uint8_t has_ufo;
3143 uint32_t has_vnet_hdr;
3144 };
3145
3146 /* The 2nd and subsequent tx_waiting flags are loaded later than
3147 * the 1st entry in the queue_pairs and only if there's more than one
3148 * entry. We use the tmp mechanism to calculate a temporary
3149 * pointer and count and also validate the count.
3150 */
3151
virtio_net_tx_waiting_pre_save(void * opaque)3152 static int virtio_net_tx_waiting_pre_save(void *opaque)
3153 {
3154 struct VirtIONetMigTmp *tmp = opaque;
3155
3156 tmp->vqs_1 = tmp->parent->vqs + 1;
3157 tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1;
3158 if (tmp->parent->curr_queue_pairs == 0) {
3159 tmp->curr_queue_pairs_1 = 0;
3160 }
3161
3162 return 0;
3163 }
3164
virtio_net_tx_waiting_pre_load(void * opaque)3165 static int virtio_net_tx_waiting_pre_load(void *opaque)
3166 {
3167 struct VirtIONetMigTmp *tmp = opaque;
3168
3169 /* Reuse the pointer setup from save */
3170 virtio_net_tx_waiting_pre_save(opaque);
3171
3172 if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
3173 error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
3174 tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
3175
3176 return -EINVAL;
3177 }
3178
3179 return 0; /* all good */
3180 }
3181
3182 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
3183 .name = "virtio-net-tx_waiting",
3184 .pre_load = virtio_net_tx_waiting_pre_load,
3185 .pre_save = virtio_net_tx_waiting_pre_save,
3186 .fields = (const VMStateField[]) {
3187 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
3188 curr_queue_pairs_1,
3189 vmstate_virtio_net_queue_tx_waiting,
3190 struct VirtIONetQueue),
3191 VMSTATE_END_OF_LIST()
3192 },
3193 };
3194
3195 /* the 'has_ufo' flag is just tested; if the incoming stream has the
3196 * flag set we need to check that we have it
3197 */
virtio_net_ufo_post_load(void * opaque,int version_id)3198 static int virtio_net_ufo_post_load(void *opaque, int version_id)
3199 {
3200 struct VirtIONetMigTmp *tmp = opaque;
3201
3202 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
3203 error_report("virtio-net: saved image requires TUN_F_UFO support");
3204 return -EINVAL;
3205 }
3206
3207 return 0;
3208 }
3209
virtio_net_ufo_pre_save(void * opaque)3210 static int virtio_net_ufo_pre_save(void *opaque)
3211 {
3212 struct VirtIONetMigTmp *tmp = opaque;
3213
3214 tmp->has_ufo = tmp->parent->has_ufo;
3215
3216 return 0;
3217 }
3218
3219 static const VMStateDescription vmstate_virtio_net_has_ufo = {
3220 .name = "virtio-net-ufo",
3221 .post_load = virtio_net_ufo_post_load,
3222 .pre_save = virtio_net_ufo_pre_save,
3223 .fields = (const VMStateField[]) {
3224 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
3225 VMSTATE_END_OF_LIST()
3226 },
3227 };
3228
3229 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
3230 * flag set we need to check that we have it
3231 */
virtio_net_vnet_post_load(void * opaque,int version_id)3232 static int virtio_net_vnet_post_load(void *opaque, int version_id)
3233 {
3234 struct VirtIONetMigTmp *tmp = opaque;
3235
3236 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
3237 error_report("virtio-net: saved image requires vnet_hdr=on");
3238 return -EINVAL;
3239 }
3240
3241 return 0;
3242 }
3243
virtio_net_vnet_pre_save(void * opaque)3244 static int virtio_net_vnet_pre_save(void *opaque)
3245 {
3246 struct VirtIONetMigTmp *tmp = opaque;
3247
3248 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
3249
3250 return 0;
3251 }
3252
3253 static const VMStateDescription vmstate_virtio_net_has_vnet = {
3254 .name = "virtio-net-vnet",
3255 .post_load = virtio_net_vnet_post_load,
3256 .pre_save = virtio_net_vnet_pre_save,
3257 .fields = (const VMStateField[]) {
3258 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
3259 VMSTATE_END_OF_LIST()
3260 },
3261 };
3262
virtio_net_rss_needed(void * opaque)3263 static bool virtio_net_rss_needed(void *opaque)
3264 {
3265 return VIRTIO_NET(opaque)->rss_data.enabled;
3266 }
3267
3268 static const VMStateDescription vmstate_virtio_net_rss = {
3269 .name = "virtio-net-device/rss",
3270 .version_id = 1,
3271 .minimum_version_id = 1,
3272 .needed = virtio_net_rss_needed,
3273 .fields = (const VMStateField[]) {
3274 VMSTATE_BOOL(rss_data.enabled, VirtIONet),
3275 VMSTATE_BOOL(rss_data.redirect, VirtIONet),
3276 VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
3277 VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
3278 VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
3279 VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
3280 VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
3281 VIRTIO_NET_RSS_MAX_KEY_SIZE),
3282 VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
3283 rss_data.indirections_len, 0,
3284 vmstate_info_uint16, uint16_t),
3285 VMSTATE_END_OF_LIST()
3286 },
3287 };
3288
3289 static const VMStateDescription vmstate_virtio_net_device = {
3290 .name = "virtio-net-device",
3291 .version_id = VIRTIO_NET_VM_VERSION,
3292 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3293 .post_load = virtio_net_post_load_device,
3294 .fields = (const VMStateField[]) {
3295 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
3296 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
3297 vmstate_virtio_net_queue_tx_waiting,
3298 VirtIONetQueue),
3299 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
3300 VMSTATE_UINT16(status, VirtIONet),
3301 VMSTATE_UINT8(promisc, VirtIONet),
3302 VMSTATE_UINT8(allmulti, VirtIONet),
3303 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
3304
3305 /* Guarded pair: If it fits we load it, else we throw it away
3306 * - can happen if source has a larger MAC table.; post-load
3307 * sets flags in this case.
3308 */
3309 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
3310 0, mac_table_fits, mac_table.in_use,
3311 ETH_ALEN),
3312 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
3313 mac_table.in_use, ETH_ALEN),
3314
3315 /* Note: This is an array of uint32's that's always been saved as a
3316 * buffer; hold onto your endiannesses; it's actually used as a bitmap
3317 * but based on the uint.
3318 */
3319 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
3320 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3321 vmstate_virtio_net_has_vnet),
3322 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
3323 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
3324 VMSTATE_UINT8(alluni, VirtIONet),
3325 VMSTATE_UINT8(nomulti, VirtIONet),
3326 VMSTATE_UINT8(nouni, VirtIONet),
3327 VMSTATE_UINT8(nobcast, VirtIONet),
3328 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3329 vmstate_virtio_net_has_ufo),
3330 VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0,
3331 vmstate_info_uint16_equal, uint16_t),
3332 VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1),
3333 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3334 vmstate_virtio_net_tx_waiting),
3335 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
3336 has_ctrl_guest_offloads),
3337 VMSTATE_END_OF_LIST()
3338 },
3339 .subsections = (const VMStateDescription * const []) {
3340 &vmstate_virtio_net_rss,
3341 NULL
3342 }
3343 };
3344
3345 static NetClientInfo net_virtio_info = {
3346 .type = NET_CLIENT_DRIVER_NIC,
3347 .size = sizeof(NICState),
3348 .can_receive = virtio_net_can_receive,
3349 .receive = virtio_net_receive,
3350 .link_status_changed = virtio_net_set_link_status,
3351 .query_rx_filter = virtio_net_query_rxfilter,
3352 .announce = virtio_net_announce,
3353 };
3354
virtio_net_guest_notifier_pending(VirtIODevice * vdev,int idx)3355 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
3356 {
3357 VirtIONet *n = VIRTIO_NET(vdev);
3358 NetClientState *nc;
3359 assert(n->vhost_started);
3360 if (!n->multiqueue && idx == 2) {
3361 /* Must guard against invalid features and bogus queue index
3362 * from being set by malicious guest, or penetrated through
3363 * buggy migration stream.
3364 */
3365 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3366 qemu_log_mask(LOG_GUEST_ERROR,
3367 "%s: bogus vq index ignored\n", __func__);
3368 return false;
3369 }
3370 nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3371 } else {
3372 nc = qemu_get_subqueue(n->nic, vq2q(idx));
3373 }
3374 /*
3375 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3376 * as the macro of configure interrupt's IDX, If this driver does not
3377 * support, the function will return false
3378 */
3379
3380 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3381 return vhost_net_config_pending(get_vhost_net(nc->peer));
3382 }
3383 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
3384 }
3385
virtio_net_guest_notifier_mask(VirtIODevice * vdev,int idx,bool mask)3386 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
3387 bool mask)
3388 {
3389 VirtIONet *n = VIRTIO_NET(vdev);
3390 NetClientState *nc;
3391 assert(n->vhost_started);
3392 if (!n->multiqueue && idx == 2) {
3393 /* Must guard against invalid features and bogus queue index
3394 * from being set by malicious guest, or penetrated through
3395 * buggy migration stream.
3396 */
3397 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3398 qemu_log_mask(LOG_GUEST_ERROR,
3399 "%s: bogus vq index ignored\n", __func__);
3400 return;
3401 }
3402 nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3403 } else {
3404 nc = qemu_get_subqueue(n->nic, vq2q(idx));
3405 }
3406 /*
3407 *Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3408 * as the macro of configure interrupt's IDX, If this driver does not
3409 * support, the function will return
3410 */
3411
3412 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3413 vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
3414 return;
3415 }
3416 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
3417 }
3418
virtio_net_set_config_size(VirtIONet * n,uint64_t host_features)3419 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
3420 {
3421 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
3422
3423 n->config_size = virtio_get_config_size(&cfg_size_params, host_features);
3424 }
3425
virtio_net_set_netclient_name(VirtIONet * n,const char * name,const char * type)3426 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
3427 const char *type)
3428 {
3429 /*
3430 * The name can be NULL, the netclient name will be type.x.
3431 */
3432 assert(type != NULL);
3433
3434 g_free(n->netclient_name);
3435 g_free(n->netclient_type);
3436 n->netclient_name = g_strdup(name);
3437 n->netclient_type = g_strdup(type);
3438 }
3439
failover_unplug_primary(VirtIONet * n,DeviceState * dev)3440 static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev)
3441 {
3442 HotplugHandler *hotplug_ctrl;
3443 PCIDevice *pci_dev;
3444 Error *err = NULL;
3445
3446 hotplug_ctrl = qdev_get_hotplug_handler(dev);
3447 if (hotplug_ctrl) {
3448 pci_dev = PCI_DEVICE(dev);
3449 pci_dev->partially_hotplugged = true;
3450 hotplug_handler_unplug_request(hotplug_ctrl, dev, &err);
3451 if (err) {
3452 error_report_err(err);
3453 return false;
3454 }
3455 } else {
3456 return false;
3457 }
3458 return true;
3459 }
3460
failover_replug_primary(VirtIONet * n,DeviceState * dev,Error ** errp)3461 static bool failover_replug_primary(VirtIONet *n, DeviceState *dev,
3462 Error **errp)
3463 {
3464 Error *err = NULL;
3465 HotplugHandler *hotplug_ctrl;
3466 PCIDevice *pdev = PCI_DEVICE(dev);
3467 BusState *primary_bus;
3468
3469 if (!pdev->partially_hotplugged) {
3470 return true;
3471 }
3472 primary_bus = dev->parent_bus;
3473 if (!primary_bus) {
3474 error_setg(errp, "virtio_net: couldn't find primary bus");
3475 return false;
3476 }
3477 qdev_set_parent_bus(dev, primary_bus, &error_abort);
3478 qatomic_set(&n->failover_primary_hidden, false);
3479 hotplug_ctrl = qdev_get_hotplug_handler(dev);
3480 if (hotplug_ctrl) {
3481 hotplug_handler_pre_plug(hotplug_ctrl, dev, &err);
3482 if (err) {
3483 goto out;
3484 }
3485 hotplug_handler_plug(hotplug_ctrl, dev, &err);
3486 }
3487 pdev->partially_hotplugged = false;
3488
3489 out:
3490 error_propagate(errp, err);
3491 return !err;
3492 }
3493
virtio_net_handle_migration_primary(VirtIONet * n,MigrationEvent * e)3494 static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationEvent *e)
3495 {
3496 bool should_be_hidden;
3497 Error *err = NULL;
3498 DeviceState *dev = failover_find_primary_device(n);
3499
3500 if (!dev) {
3501 return;
3502 }
3503
3504 should_be_hidden = qatomic_read(&n->failover_primary_hidden);
3505
3506 if (e->type == MIG_EVENT_PRECOPY_SETUP && !should_be_hidden) {
3507 if (failover_unplug_primary(n, dev)) {
3508 vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev);
3509 qapi_event_send_unplug_primary(dev->id);
3510 qatomic_set(&n->failover_primary_hidden, true);
3511 } else {
3512 warn_report("couldn't unplug primary device");
3513 }
3514 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
3515 /* We already unplugged the device let's plug it back */
3516 if (!failover_replug_primary(n, dev, &err)) {
3517 if (err) {
3518 error_report_err(err);
3519 }
3520 }
3521 }
3522 }
3523
virtio_net_migration_state_notifier(NotifierWithReturn * notifier,MigrationEvent * e,Error ** errp)3524 static int virtio_net_migration_state_notifier(NotifierWithReturn *notifier,
3525 MigrationEvent *e, Error **errp)
3526 {
3527 VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
3528 virtio_net_handle_migration_primary(n, e);
3529 return 0;
3530 }
3531
failover_hide_primary_device(DeviceListener * listener,const QDict * device_opts,bool from_json,Error ** errp)3532 static bool failover_hide_primary_device(DeviceListener *listener,
3533 const QDict *device_opts,
3534 bool from_json,
3535 Error **errp)
3536 {
3537 VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
3538 const char *standby_id;
3539
3540 if (!device_opts) {
3541 return false;
3542 }
3543
3544 if (!qdict_haskey(device_opts, "failover_pair_id")) {
3545 return false;
3546 }
3547
3548 if (!qdict_haskey(device_opts, "id")) {
3549 error_setg(errp, "Device with failover_pair_id needs to have id");
3550 return false;
3551 }
3552
3553 standby_id = qdict_get_str(device_opts, "failover_pair_id");
3554 if (g_strcmp0(standby_id, n->netclient_name) != 0) {
3555 return false;
3556 }
3557
3558 /*
3559 * The hide helper can be called several times for a given device.
3560 * Check there is only one primary for a virtio-net device but
3561 * don't duplicate the qdict several times if it's called for the same
3562 * device.
3563 */
3564 if (n->primary_opts) {
3565 const char *old, *new;
3566 /* devices with failover_pair_id always have an id */
3567 old = qdict_get_str(n->primary_opts, "id");
3568 new = qdict_get_str(device_opts, "id");
3569 if (strcmp(old, new) != 0) {
3570 error_setg(errp, "Cannot attach more than one primary device to "
3571 "'%s': '%s' and '%s'", n->netclient_name, old, new);
3572 return false;
3573 }
3574 } else {
3575 n->primary_opts = qdict_clone_shallow(device_opts);
3576 n->primary_opts_from_json = from_json;
3577 }
3578
3579 /* failover_primary_hidden is set during feature negotiation */
3580 return qatomic_read(&n->failover_primary_hidden);
3581 }
3582
virtio_net_device_realize(DeviceState * dev,Error ** errp)3583 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
3584 {
3585 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3586 VirtIONet *n = VIRTIO_NET(dev);
3587 NetClientState *nc;
3588 int i;
3589
3590 if (n->net_conf.mtu) {
3591 n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
3592 }
3593
3594 if (n->net_conf.duplex_str) {
3595 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
3596 n->net_conf.duplex = DUPLEX_HALF;
3597 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
3598 n->net_conf.duplex = DUPLEX_FULL;
3599 } else {
3600 error_setg(errp, "'duplex' must be 'half' or 'full'");
3601 return;
3602 }
3603 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3604 } else {
3605 n->net_conf.duplex = DUPLEX_UNKNOWN;
3606 }
3607
3608 if (n->net_conf.speed < SPEED_UNKNOWN) {
3609 error_setg(errp, "'speed' must be between 0 and INT_MAX");
3610 return;
3611 }
3612 if (n->net_conf.speed >= 0) {
3613 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3614 }
3615
3616 if (n->failover) {
3617 n->primary_listener.hide_device = failover_hide_primary_device;
3618 qatomic_set(&n->failover_primary_hidden, true);
3619 device_listener_register(&n->primary_listener);
3620 migration_add_notifier(&n->migration_state,
3621 virtio_net_migration_state_notifier);
3622 n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
3623 }
3624
3625 virtio_net_set_config_size(n, n->host_features);
3626 virtio_init(vdev, VIRTIO_ID_NET, n->config_size);
3627
3628 /*
3629 * We set a lower limit on RX queue size to what it always was.
3630 * Guests that want a smaller ring can always resize it without
3631 * help from us (using virtio 1 and up).
3632 */
3633 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
3634 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
3635 !is_power_of_2(n->net_conf.rx_queue_size)) {
3636 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
3637 "must be a power of 2 between %d and %d.",
3638 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
3639 VIRTQUEUE_MAX_SIZE);
3640 virtio_cleanup(vdev);
3641 return;
3642 }
3643
3644 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
3645 n->net_conf.tx_queue_size > virtio_net_max_tx_queue_size(n) ||
3646 !is_power_of_2(n->net_conf.tx_queue_size)) {
3647 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
3648 "must be a power of 2 between %d and %d",
3649 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
3650 virtio_net_max_tx_queue_size(n));
3651 virtio_cleanup(vdev);
3652 return;
3653 }
3654
3655 n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
3656
3657 /*
3658 * Figure out the datapath queue pairs since the backend could
3659 * provide control queue via peers as well.
3660 */
3661 if (n->nic_conf.peers.queues) {
3662 for (i = 0; i < n->max_ncs; i++) {
3663 if (n->nic_conf.peers.ncs[i]->is_datapath) {
3664 ++n->max_queue_pairs;
3665 }
3666 }
3667 }
3668 n->max_queue_pairs = MAX(n->max_queue_pairs, 1);
3669
3670 if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) {
3671 error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), "
3672 "must be a positive integer less than %d.",
3673 n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2);
3674 virtio_cleanup(vdev);
3675 return;
3676 }
3677 n->vqs = g_new0(VirtIONetQueue, n->max_queue_pairs);
3678 n->curr_queue_pairs = 1;
3679 n->tx_timeout = n->net_conf.txtimer;
3680
3681 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
3682 && strcmp(n->net_conf.tx, "bh")) {
3683 warn_report("virtio-net: "
3684 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
3685 n->net_conf.tx);
3686 error_printf("Defaulting to \"bh\"");
3687 }
3688
3689 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
3690 n->net_conf.tx_queue_size);
3691
3692 virtio_net_add_queue(n, 0);
3693
3694 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
3695 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
3696 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
3697 n->status = VIRTIO_NET_S_LINK_UP;
3698 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3699 QEMU_CLOCK_VIRTUAL,
3700 virtio_net_announce_timer, n);
3701 n->announce_timer.round = 0;
3702
3703 if (n->netclient_type) {
3704 /*
3705 * Happen when virtio_net_set_netclient_name has been called.
3706 */
3707 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3708 n->netclient_type, n->netclient_name,
3709 &dev->mem_reentrancy_guard, n);
3710 } else {
3711 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3712 object_get_typename(OBJECT(dev)), dev->id,
3713 &dev->mem_reentrancy_guard, n);
3714 }
3715
3716 for (i = 0; i < n->max_queue_pairs; i++) {
3717 n->nic->ncs[i].do_not_pad = true;
3718 }
3719
3720 peer_test_vnet_hdr(n);
3721 if (peer_has_vnet_hdr(n)) {
3722 n->host_hdr_len = sizeof(struct virtio_net_hdr);
3723 } else {
3724 n->host_hdr_len = 0;
3725 }
3726
3727 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
3728
3729 n->vqs[0].tx_waiting = 0;
3730 n->tx_burst = n->net_conf.txburst;
3731 virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
3732 n->promisc = 1; /* for compatibility */
3733
3734 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
3735
3736 n->vlans = g_malloc0(MAX_VLAN >> 3);
3737
3738 nc = qemu_get_queue(n->nic);
3739 nc->rxfilter_notify_enabled = 1;
3740
3741 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
3742 struct virtio_net_config netcfg = {};
3743 memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
3744 vhost_net_set_config(get_vhost_net(nc->peer),
3745 (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_FRONTEND);
3746 }
3747 QTAILQ_INIT(&n->rsc_chains);
3748 n->qdev = dev;
3749
3750 net_rx_pkt_init(&n->rx_pkt);
3751
3752 if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3753 virtio_net_load_ebpf(n);
3754 }
3755 }
3756
virtio_net_device_unrealize(DeviceState * dev)3757 static void virtio_net_device_unrealize(DeviceState *dev)
3758 {
3759 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3760 VirtIONet *n = VIRTIO_NET(dev);
3761 int i, max_queue_pairs;
3762
3763 if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3764 virtio_net_unload_ebpf(n);
3765 }
3766
3767 /* This will stop vhost backend if appropriate. */
3768 virtio_net_set_status(vdev, 0);
3769
3770 g_free(n->netclient_name);
3771 n->netclient_name = NULL;
3772 g_free(n->netclient_type);
3773 n->netclient_type = NULL;
3774
3775 g_free(n->mac_table.macs);
3776 g_free(n->vlans);
3777
3778 if (n->failover) {
3779 qobject_unref(n->primary_opts);
3780 device_listener_unregister(&n->primary_listener);
3781 migration_remove_notifier(&n->migration_state);
3782 } else {
3783 assert(n->primary_opts == NULL);
3784 }
3785
3786 max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
3787 for (i = 0; i < max_queue_pairs; i++) {
3788 virtio_net_del_queue(n, i);
3789 }
3790 /* delete also control vq */
3791 virtio_del_queue(vdev, max_queue_pairs * 2);
3792 qemu_announce_timer_del(&n->announce_timer, false);
3793 g_free(n->vqs);
3794 qemu_del_nic(n->nic);
3795 virtio_net_rsc_cleanup(n);
3796 g_free(n->rss_data.indirections_table);
3797 net_rx_pkt_uninit(n->rx_pkt);
3798 virtio_cleanup(vdev);
3799 }
3800
virtio_net_reset(VirtIODevice * vdev)3801 static void virtio_net_reset(VirtIODevice *vdev)
3802 {
3803 VirtIONet *n = VIRTIO_NET(vdev);
3804 int i;
3805
3806 /* Reset back to compatibility mode */
3807 n->promisc = 1;
3808 n->allmulti = 0;
3809 n->alluni = 0;
3810 n->nomulti = 0;
3811 n->nouni = 0;
3812 n->nobcast = 0;
3813 /* multiqueue is disabled by default */
3814 n->curr_queue_pairs = 1;
3815 timer_del(n->announce_timer.tm);
3816 n->announce_timer.round = 0;
3817 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
3818
3819 /* Flush any MAC and VLAN filter table state */
3820 n->mac_table.in_use = 0;
3821 n->mac_table.first_multi = 0;
3822 n->mac_table.multi_overflow = 0;
3823 n->mac_table.uni_overflow = 0;
3824 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
3825 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
3826 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
3827 memset(n->vlans, 0, MAX_VLAN >> 3);
3828
3829 /* Flush any async TX */
3830 for (i = 0; i < n->max_queue_pairs; i++) {
3831 flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
3832 }
3833
3834 virtio_net_disable_rss(n);
3835 }
3836
virtio_net_instance_init(Object * obj)3837 static void virtio_net_instance_init(Object *obj)
3838 {
3839 VirtIONet *n = VIRTIO_NET(obj);
3840
3841 /*
3842 * The default config_size is sizeof(struct virtio_net_config).
3843 * Can be overridden with virtio_net_set_config_size.
3844 */
3845 n->config_size = sizeof(struct virtio_net_config);
3846 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
3847 "bootindex", "/ethernet-phy@0",
3848 DEVICE(n));
3849
3850 ebpf_rss_init(&n->ebpf_rss);
3851 }
3852
virtio_net_pre_save(void * opaque)3853 static int virtio_net_pre_save(void *opaque)
3854 {
3855 VirtIONet *n = opaque;
3856
3857 /* At this point, backend must be stopped, otherwise
3858 * it might keep writing to memory. */
3859 assert(!n->vhost_started);
3860
3861 return 0;
3862 }
3863
primary_unplug_pending(void * opaque)3864 static bool primary_unplug_pending(void *opaque)
3865 {
3866 DeviceState *dev = opaque;
3867 DeviceState *primary;
3868 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3869 VirtIONet *n = VIRTIO_NET(vdev);
3870
3871 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3872 return false;
3873 }
3874 primary = failover_find_primary_device(n);
3875 return primary ? primary->pending_deleted_event : false;
3876 }
3877
dev_unplug_pending(void * opaque)3878 static bool dev_unplug_pending(void *opaque)
3879 {
3880 DeviceState *dev = opaque;
3881 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3882
3883 return vdc->primary_unplug_pending(dev);
3884 }
3885
virtio_net_get_vhost(VirtIODevice * vdev)3886 static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
3887 {
3888 VirtIONet *n = VIRTIO_NET(vdev);
3889 NetClientState *nc = qemu_get_queue(n->nic);
3890 struct vhost_net *net = get_vhost_net(nc->peer);
3891 return &net->dev;
3892 }
3893
3894 static const VMStateDescription vmstate_virtio_net = {
3895 .name = "virtio-net",
3896 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3897 .version_id = VIRTIO_NET_VM_VERSION,
3898 .fields = (const VMStateField[]) {
3899 VMSTATE_VIRTIO_DEVICE,
3900 VMSTATE_END_OF_LIST()
3901 },
3902 .pre_save = virtio_net_pre_save,
3903 .dev_unplug_pending = dev_unplug_pending,
3904 };
3905
3906 static Property virtio_net_properties[] = {
3907 DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
3908 VIRTIO_NET_F_CSUM, true),
3909 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
3910 VIRTIO_NET_F_GUEST_CSUM, true),
3911 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
3912 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
3913 VIRTIO_NET_F_GUEST_TSO4, true),
3914 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
3915 VIRTIO_NET_F_GUEST_TSO6, true),
3916 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
3917 VIRTIO_NET_F_GUEST_ECN, true),
3918 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
3919 VIRTIO_NET_F_GUEST_UFO, true),
3920 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
3921 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
3922 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
3923 VIRTIO_NET_F_HOST_TSO4, true),
3924 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
3925 VIRTIO_NET_F_HOST_TSO6, true),
3926 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
3927 VIRTIO_NET_F_HOST_ECN, true),
3928 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
3929 VIRTIO_NET_F_HOST_UFO, true),
3930 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
3931 VIRTIO_NET_F_MRG_RXBUF, true),
3932 DEFINE_PROP_BIT64("status", VirtIONet, host_features,
3933 VIRTIO_NET_F_STATUS, true),
3934 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
3935 VIRTIO_NET_F_CTRL_VQ, true),
3936 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
3937 VIRTIO_NET_F_CTRL_RX, true),
3938 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
3939 VIRTIO_NET_F_CTRL_VLAN, true),
3940 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
3941 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
3942 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
3943 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
3944 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
3945 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
3946 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
3947 DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
3948 VIRTIO_NET_F_RSS, false),
3949 DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
3950 VIRTIO_NET_F_HASH_REPORT, false),
3951 DEFINE_PROP_ARRAY("ebpf-rss-fds", VirtIONet, nr_ebpf_rss_fds,
3952 ebpf_rss_fds, qdev_prop_string, char*),
3953 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
3954 VIRTIO_NET_F_RSC_EXT, false),
3955 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
3956 VIRTIO_NET_RSC_DEFAULT_INTERVAL),
3957 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
3958 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
3959 TX_TIMER_INTERVAL),
3960 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
3961 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
3962 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
3963 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
3964 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
3965 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
3966 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
3967 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
3968 true),
3969 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
3970 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
3971 DEFINE_PROP_BOOL("failover", VirtIONet, failover, false),
3972 DEFINE_PROP_BIT64("guest_uso4", VirtIONet, host_features,
3973 VIRTIO_NET_F_GUEST_USO4, true),
3974 DEFINE_PROP_BIT64("guest_uso6", VirtIONet, host_features,
3975 VIRTIO_NET_F_GUEST_USO6, true),
3976 DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
3977 VIRTIO_NET_F_HOST_USO, true),
3978 DEFINE_PROP_END_OF_LIST(),
3979 };
3980
virtio_net_class_init(ObjectClass * klass,void * data)3981 static void virtio_net_class_init(ObjectClass *klass, void *data)
3982 {
3983 DeviceClass *dc = DEVICE_CLASS(klass);
3984 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3985
3986 device_class_set_props(dc, virtio_net_properties);
3987 dc->vmsd = &vmstate_virtio_net;
3988 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
3989 vdc->realize = virtio_net_device_realize;
3990 vdc->unrealize = virtio_net_device_unrealize;
3991 vdc->get_config = virtio_net_get_config;
3992 vdc->set_config = virtio_net_set_config;
3993 vdc->get_features = virtio_net_get_features;
3994 vdc->set_features = virtio_net_set_features;
3995 vdc->bad_features = virtio_net_bad_features;
3996 vdc->reset = virtio_net_reset;
3997 vdc->queue_reset = virtio_net_queue_reset;
3998 vdc->queue_enable = virtio_net_queue_enable;
3999 vdc->set_status = virtio_net_set_status;
4000 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
4001 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
4002 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
4003 vdc->post_load = virtio_net_post_load_virtio;
4004 vdc->vmsd = &vmstate_virtio_net_device;
4005 vdc->primary_unplug_pending = primary_unplug_pending;
4006 vdc->get_vhost = virtio_net_get_vhost;
4007 vdc->toggle_device_iotlb = vhost_toggle_device_iotlb;
4008 }
4009
4010 static const TypeInfo virtio_net_info = {
4011 .name = TYPE_VIRTIO_NET,
4012 .parent = TYPE_VIRTIO_DEVICE,
4013 .instance_size = sizeof(VirtIONet),
4014 .instance_init = virtio_net_instance_init,
4015 .class_init = virtio_net_class_init,
4016 };
4017
virtio_register_types(void)4018 static void virtio_register_types(void)
4019 {
4020 type_register_static(&virtio_net_info);
4021 }
4022
4023 type_init(virtio_register_types)
4024