xref: /qemu/net/vhost-vdpa.c (revision ac132d05)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     Notifier migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_NET_F_CSUM,
66     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
67     VIRTIO_NET_F_CTRL_MAC_ADDR,
68     VIRTIO_NET_F_CTRL_RX,
69     VIRTIO_NET_F_CTRL_RX_EXTRA,
70     VIRTIO_NET_F_CTRL_VLAN,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_NET_F_GSO,
73     VIRTIO_NET_F_GUEST_CSUM,
74     VIRTIO_NET_F_GUEST_ECN,
75     VIRTIO_NET_F_GUEST_TSO4,
76     VIRTIO_NET_F_GUEST_TSO6,
77     VIRTIO_NET_F_GUEST_UFO,
78     VIRTIO_NET_F_GUEST_USO4,
79     VIRTIO_NET_F_GUEST_USO6,
80     VIRTIO_NET_F_HASH_REPORT,
81     VIRTIO_NET_F_HOST_ECN,
82     VIRTIO_NET_F_HOST_TSO4,
83     VIRTIO_NET_F_HOST_TSO6,
84     VIRTIO_NET_F_HOST_UFO,
85     VIRTIO_NET_F_HOST_USO,
86     VIRTIO_NET_F_MQ,
87     VIRTIO_NET_F_MRG_RXBUF,
88     VIRTIO_NET_F_MTU,
89     VIRTIO_NET_F_RSS,
90     VIRTIO_NET_F_STATUS,
91     VIRTIO_RING_F_EVENT_IDX,
92     VIRTIO_RING_F_INDIRECT_DESC,
93 
94     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95     VHOST_INVALID_FEATURE_BIT
96 };
97 
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features =
100     BIT_ULL(VIRTIO_NET_F_CSUM) |
101     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
102     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
103     BIT_ULL(VIRTIO_NET_F_MTU) |
104     BIT_ULL(VIRTIO_NET_F_MAC) |
105     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
106     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
107     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
109     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
110     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
111     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
112     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
113     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
114     BIT_ULL(VIRTIO_NET_F_STATUS) |
115     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
116     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
117     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
118     BIT_ULL(VIRTIO_NET_F_MQ) |
119     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
120     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
121     /* VHOST_F_LOG_ALL is exposed by SVQ */
122     BIT_ULL(VHOST_F_LOG_ALL) |
123     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
124     BIT_ULL(VIRTIO_NET_F_STANDBY) |
125     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
126 
127 #define VHOST_VDPA_NET_CVQ_ASID 1
128 
129 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
130 {
131     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
132     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
133     return s->vhost_net;
134 }
135 
136 static size_t vhost_vdpa_net_cvq_cmd_len(void)
137 {
138     /*
139      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
140      * In buffer is always 1 byte, so it should fit here
141      */
142     return sizeof(struct virtio_net_ctrl_hdr) +
143            2 * sizeof(struct virtio_net_ctrl_mac) +
144            MAC_TABLE_ENTRIES * ETH_ALEN;
145 }
146 
147 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
148 {
149     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
150 }
151 
152 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
153 {
154     uint64_t invalid_dev_features =
155         features & ~vdpa_svq_device_features &
156         /* Transport are all accepted at this point */
157         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
158                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
159 
160     if (invalid_dev_features) {
161         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
162                    invalid_dev_features);
163         return false;
164     }
165 
166     return vhost_svq_valid_features(features, errp);
167 }
168 
169 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
170 {
171     uint32_t device_id;
172     int ret;
173     struct vhost_dev *hdev;
174 
175     hdev = (struct vhost_dev *)&net->dev;
176     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
177     if (device_id != VIRTIO_ID_NET) {
178         return -ENOTSUP;
179     }
180     return ret;
181 }
182 
183 static int vhost_vdpa_add(NetClientState *ncs, void *be,
184                           int queue_pair_index, int nvqs)
185 {
186     VhostNetOptions options;
187     struct vhost_net *net = NULL;
188     VhostVDPAState *s;
189     int ret;
190 
191     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
192     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
193     s = DO_UPCAST(VhostVDPAState, nc, ncs);
194     options.net_backend = ncs;
195     options.opaque      = be;
196     options.busyloop_timeout = 0;
197     options.nvqs = nvqs;
198 
199     net = vhost_net_init(&options);
200     if (!net) {
201         error_report("failed to init vhost_net for queue");
202         goto err_init;
203     }
204     s->vhost_net = net;
205     ret = vhost_vdpa_net_check_device_id(net);
206     if (ret) {
207         goto err_check;
208     }
209     return 0;
210 err_check:
211     vhost_net_cleanup(net);
212     g_free(net);
213 err_init:
214     return -1;
215 }
216 
217 static void vhost_vdpa_cleanup(NetClientState *nc)
218 {
219     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
220 
221     /*
222      * If a peer NIC is attached, do not cleanup anything.
223      * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
224      * when the guest is shutting down.
225      */
226     if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
227         return;
228     }
229     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
230     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
231     if (s->vhost_net) {
232         vhost_net_cleanup(s->vhost_net);
233         g_free(s->vhost_net);
234         s->vhost_net = NULL;
235     }
236      if (s->vhost_vdpa.device_fd >= 0) {
237         qemu_close(s->vhost_vdpa.device_fd);
238         s->vhost_vdpa.device_fd = -1;
239     }
240 }
241 
242 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
243 {
244     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
245 
246     return true;
247 }
248 
249 static bool vhost_vdpa_has_ufo(NetClientState *nc)
250 {
251     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
252     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
253     uint64_t features = 0;
254     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
255     features = vhost_net_get_features(s->vhost_net, features);
256     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
257 
258 }
259 
260 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
261                                        Error **errp)
262 {
263     const char *driver = object_class_get_name(oc);
264 
265     if (!g_str_has_prefix(driver, "virtio-net-")) {
266         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
267         return false;
268     }
269 
270     return true;
271 }
272 
273 /** Dummy receive in case qemu falls back to userland tap networking */
274 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
275                                   size_t size)
276 {
277     return size;
278 }
279 
280 /** From any vdpa net client, get the netclient of the first queue pair */
281 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
282 {
283     NICState *nic = qemu_get_nic(s->nc.peer);
284     NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
285 
286     return DO_UPCAST(VhostVDPAState, nc, nc0);
287 }
288 
289 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
290 {
291     struct vhost_vdpa *v = &s->vhost_vdpa;
292     VirtIONet *n;
293     VirtIODevice *vdev;
294     int data_queue_pairs, cvq, r;
295 
296     /* We are only called on the first data vqs and only if x-svq is not set */
297     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
298         return;
299     }
300 
301     vdev = v->dev->vdev;
302     n = VIRTIO_NET(vdev);
303     if (!n->vhost_started) {
304         return;
305     }
306 
307     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
308     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
309                                   n->max_ncs - n->max_queue_pairs : 0;
310     /*
311      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
312      * in the future and resume the device if read-only operations between
313      * suspend and reset goes wrong.
314      */
315     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
316 
317     /* Start will check migration setup_or_active to configure or not SVQ */
318     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
319     if (unlikely(r < 0)) {
320         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
321     }
322 }
323 
324 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
325 {
326     MigrationState *migration = data;
327     VhostVDPAState *s = container_of(notifier, VhostVDPAState,
328                                      migration_state);
329 
330     if (migration_in_setup(migration)) {
331         vhost_vdpa_net_log_global_enable(s, true);
332     } else if (migration_has_failed(migration)) {
333         vhost_vdpa_net_log_global_enable(s, false);
334     }
335 }
336 
337 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
338 {
339     struct vhost_vdpa *v = &s->vhost_vdpa;
340 
341     add_migration_state_change_notifier(&s->migration_state);
342     if (v->shadow_vqs_enabled) {
343         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
344                                            v->iova_range.last);
345     }
346 }
347 
348 static int vhost_vdpa_net_data_start(NetClientState *nc)
349 {
350     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
351     struct vhost_vdpa *v = &s->vhost_vdpa;
352 
353     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
354 
355     if (s->always_svq ||
356         migration_is_setup_or_active(migrate_get_current()->state)) {
357         v->shadow_vqs_enabled = true;
358         v->shadow_data = true;
359     } else {
360         v->shadow_vqs_enabled = false;
361         v->shadow_data = false;
362     }
363 
364     if (v->index == 0) {
365         vhost_vdpa_net_data_start_first(s);
366         return 0;
367     }
368 
369     if (v->shadow_vqs_enabled) {
370         VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
371         v->iova_tree = s0->vhost_vdpa.iova_tree;
372     }
373 
374     return 0;
375 }
376 
377 static void vhost_vdpa_net_client_stop(NetClientState *nc)
378 {
379     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
380     struct vhost_dev *dev;
381 
382     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
383 
384     if (s->vhost_vdpa.index == 0) {
385         remove_migration_state_change_notifier(&s->migration_state);
386     }
387 
388     dev = s->vhost_vdpa.dev;
389     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
390         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
391     }
392 }
393 
394 static NetClientInfo net_vhost_vdpa_info = {
395         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
396         .size = sizeof(VhostVDPAState),
397         .receive = vhost_vdpa_receive,
398         .start = vhost_vdpa_net_data_start,
399         .stop = vhost_vdpa_net_client_stop,
400         .cleanup = vhost_vdpa_cleanup,
401         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
402         .has_ufo = vhost_vdpa_has_ufo,
403         .check_peer_type = vhost_vdpa_check_peer_type,
404 };
405 
406 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
407                                           Error **errp)
408 {
409     struct vhost_vring_state state = {
410         .index = vq_index,
411     };
412     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
413 
414     if (unlikely(r < 0)) {
415         r = -errno;
416         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
417         return r;
418     }
419 
420     return state.num;
421 }
422 
423 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
424                                            unsigned vq_group,
425                                            unsigned asid_num)
426 {
427     struct vhost_vring_state asid = {
428         .index = vq_group,
429         .num = asid_num,
430     };
431     int r;
432 
433     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
434     if (unlikely(r < 0)) {
435         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
436                      asid.index, asid.num, errno, g_strerror(errno));
437     }
438     return r;
439 }
440 
441 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
442 {
443     VhostIOVATree *tree = v->iova_tree;
444     DMAMap needle = {
445         /*
446          * No need to specify size or to look for more translations since
447          * this contiguous chunk was allocated by us.
448          */
449         .translated_addr = (hwaddr)(uintptr_t)addr,
450     };
451     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
452     int r;
453 
454     if (unlikely(!map)) {
455         error_report("Cannot locate expected map");
456         return;
457     }
458 
459     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
460     if (unlikely(r != 0)) {
461         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
462     }
463 
464     vhost_iova_tree_remove(tree, *map);
465 }
466 
467 /** Map CVQ buffer. */
468 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
469                                   bool write)
470 {
471     DMAMap map = {};
472     int r;
473 
474     map.translated_addr = (hwaddr)(uintptr_t)buf;
475     map.size = size - 1;
476     map.perm = write ? IOMMU_RW : IOMMU_RO,
477     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
478     if (unlikely(r != IOVA_OK)) {
479         error_report("Cannot map injected element");
480         return r;
481     }
482 
483     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
484                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
485     if (unlikely(r < 0)) {
486         goto dma_map_err;
487     }
488 
489     return 0;
490 
491 dma_map_err:
492     vhost_iova_tree_remove(v->iova_tree, map);
493     return r;
494 }
495 
496 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
497 {
498     VhostVDPAState *s, *s0;
499     struct vhost_vdpa *v;
500     int64_t cvq_group;
501     int r;
502     Error *err = NULL;
503 
504     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
505 
506     s = DO_UPCAST(VhostVDPAState, nc, nc);
507     v = &s->vhost_vdpa;
508 
509     s0 = vhost_vdpa_net_first_nc_vdpa(s);
510     v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
511     v->shadow_vqs_enabled = s->always_svq;
512     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
513 
514     if (s->vhost_vdpa.shadow_data) {
515         /* SVQ is already configured for all virtqueues */
516         goto out;
517     }
518 
519     /*
520      * If we early return in these cases SVQ will not be enabled. The migration
521      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
522      */
523     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
524         return 0;
525     }
526 
527     if (!s->cvq_isolated) {
528         return 0;
529     }
530 
531     cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
532                                            v->dev->vq_index_end - 1,
533                                            &err);
534     if (unlikely(cvq_group < 0)) {
535         error_report_err(err);
536         return cvq_group;
537     }
538 
539     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
540     if (unlikely(r < 0)) {
541         return r;
542     }
543 
544     v->shadow_vqs_enabled = true;
545     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
546 
547 out:
548     if (!s->vhost_vdpa.shadow_vqs_enabled) {
549         return 0;
550     }
551 
552     if (s0->vhost_vdpa.iova_tree) {
553         /*
554          * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
555          * simplicity, whether CVQ shares ASID with guest or not, because:
556          * - Memory listener need access to guest's memory addresses allocated
557          *   in the IOVA tree.
558          * - There should be plenty of IOVA address space for both ASID not to
559          *   worry about collisions between them.  Guest's translations are
560          *   still validated with virtio virtqueue_pop so there is no risk for
561          *   the guest to access memory that it shouldn't.
562          *
563          * To allocate a iova tree per ASID is doable but it complicates the
564          * code and it is not worth it for the moment.
565          */
566         v->iova_tree = s0->vhost_vdpa.iova_tree;
567     } else {
568         v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
569                                            v->iova_range.last);
570     }
571 
572     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
573                                vhost_vdpa_net_cvq_cmd_page_len(), false);
574     if (unlikely(r < 0)) {
575         return r;
576     }
577 
578     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
579                                vhost_vdpa_net_cvq_cmd_page_len(), true);
580     if (unlikely(r < 0)) {
581         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
582     }
583 
584     return r;
585 }
586 
587 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
588 {
589     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
590 
591     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
592 
593     if (s->vhost_vdpa.shadow_vqs_enabled) {
594         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
595         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
596     }
597 
598     vhost_vdpa_net_client_stop(nc);
599 }
600 
601 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
602                                       size_t in_len)
603 {
604     /* Buffers for the device */
605     const struct iovec out = {
606         .iov_base = s->cvq_cmd_out_buffer,
607         .iov_len = out_len,
608     };
609     const struct iovec in = {
610         .iov_base = s->status,
611         .iov_len = sizeof(virtio_net_ctrl_ack),
612     };
613     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
614     int r;
615 
616     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
617     if (unlikely(r != 0)) {
618         if (unlikely(r == -ENOSPC)) {
619             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
620                           __func__);
621         }
622         return r;
623     }
624 
625     /*
626      * We can poll here since we've had BQL from the time we sent the
627      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
628      * when BQL is released
629      */
630     return vhost_svq_poll(svq);
631 }
632 
633 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
634                                        uint8_t cmd, const struct iovec *data_sg,
635                                        size_t data_num)
636 {
637     const struct virtio_net_ctrl_hdr ctrl = {
638         .class = class,
639         .cmd = cmd,
640     };
641     size_t data_size = iov_size(data_sg, data_num);
642 
643     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
644 
645     /* pack the CVQ command header */
646     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
647 
648     /* pack the CVQ command command-specific-data */
649     iov_to_buf(data_sg, data_num, 0,
650                s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
651 
652     return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
653                                   sizeof(virtio_net_ctrl_ack));
654 }
655 
656 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
657 {
658     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
659         const struct iovec data = {
660             .iov_base = (void *)n->mac,
661             .iov_len = sizeof(n->mac),
662         };
663         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
664                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
665                                                   &data, 1);
666         if (unlikely(dev_written < 0)) {
667             return dev_written;
668         }
669         if (*s->status != VIRTIO_NET_OK) {
670             return -EIO;
671         }
672     }
673 
674     /*
675      * According to VirtIO standard, "The device MUST have an
676      * empty MAC filtering table on reset.".
677      *
678      * Therefore, there is no need to send this CVQ command if the
679      * driver also sets an empty MAC filter table, which aligns with
680      * the device's defaults.
681      *
682      * Note that the device's defaults can mismatch the driver's
683      * configuration only at live migration.
684      */
685     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
686         n->mac_table.in_use == 0) {
687         return 0;
688     }
689 
690     uint32_t uni_entries = n->mac_table.first_multi,
691              uni_macs_size = uni_entries * ETH_ALEN,
692              mul_entries = n->mac_table.in_use - uni_entries,
693              mul_macs_size = mul_entries * ETH_ALEN;
694     struct virtio_net_ctrl_mac uni = {
695         .entries = cpu_to_le32(uni_entries),
696     };
697     struct virtio_net_ctrl_mac mul = {
698         .entries = cpu_to_le32(mul_entries),
699     };
700     const struct iovec data[] = {
701         {
702             .iov_base = &uni,
703             .iov_len = sizeof(uni),
704         }, {
705             .iov_base = n->mac_table.macs,
706             .iov_len = uni_macs_size,
707         }, {
708             .iov_base = &mul,
709             .iov_len = sizeof(mul),
710         }, {
711             .iov_base = &n->mac_table.macs[uni_macs_size],
712             .iov_len = mul_macs_size,
713         },
714     };
715     ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
716                                 VIRTIO_NET_CTRL_MAC,
717                                 VIRTIO_NET_CTRL_MAC_TABLE_SET,
718                                 data, ARRAY_SIZE(data));
719     if (unlikely(dev_written < 0)) {
720         return dev_written;
721     }
722     if (*s->status != VIRTIO_NET_OK) {
723         return -EIO;
724     }
725 
726     return 0;
727 }
728 
729 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
730                                   const VirtIONet *n)
731 {
732     struct virtio_net_ctrl_mq mq;
733     ssize_t dev_written;
734 
735     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
736         return 0;
737     }
738 
739     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
740     const struct iovec data = {
741         .iov_base = &mq,
742         .iov_len = sizeof(mq),
743     };
744     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
745                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
746                                           &data, 1);
747     if (unlikely(dev_written < 0)) {
748         return dev_written;
749     }
750     if (*s->status != VIRTIO_NET_OK) {
751         return -EIO;
752     }
753 
754     return 0;
755 }
756 
757 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
758                                         const VirtIONet *n)
759 {
760     uint64_t offloads;
761     ssize_t dev_written;
762 
763     if (!virtio_vdev_has_feature(&n->parent_obj,
764                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
765         return 0;
766     }
767 
768     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
769         /*
770          * According to VirtIO standard, "Upon feature negotiation
771          * corresponding offload gets enabled to preserve
772          * backward compatibility.".
773          *
774          * Therefore, there is no need to send this CVQ command if the
775          * driver also enables all supported offloads, which aligns with
776          * the device's defaults.
777          *
778          * Note that the device's defaults can mismatch the driver's
779          * configuration only at live migration.
780          */
781         return 0;
782     }
783 
784     offloads = cpu_to_le64(n->curr_guest_offloads);
785     const struct iovec data = {
786         .iov_base = &offloads,
787         .iov_len = sizeof(offloads),
788     };
789     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
790                                           VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
791                                           &data, 1);
792     if (unlikely(dev_written < 0)) {
793         return dev_written;
794     }
795     if (*s->status != VIRTIO_NET_OK) {
796         return -EIO;
797     }
798 
799     return 0;
800 }
801 
802 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
803                                        uint8_t cmd,
804                                        uint8_t on)
805 {
806     const struct iovec data = {
807         .iov_base = &on,
808         .iov_len = sizeof(on),
809     };
810     return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
811                                    cmd, &data, 1);
812 }
813 
814 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
815                                   const VirtIONet *n)
816 {
817     ssize_t dev_written;
818 
819     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
820         return 0;
821     }
822 
823     /*
824      * According to virtio_net_reset(), device turns promiscuous mode
825      * on by default.
826      *
827      * Additionally, according to VirtIO standard, "Since there are
828      * no guarantees, it can use a hash filter or silently switch to
829      * allmulti or promiscuous mode if it is given too many addresses.".
830      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
831      * non-multicast MAC addresses, indicating that promiscuous mode
832      * should be enabled.
833      *
834      * Therefore, QEMU should only send this CVQ command if the
835      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
836      * which sets promiscuous mode on, different from the device's defaults.
837      *
838      * Note that the device's defaults can mismatch the driver's
839      * configuration only at live migration.
840      */
841     if (!n->mac_table.uni_overflow && !n->promisc) {
842         dev_written = vhost_vdpa_net_load_rx_mode(s,
843                                             VIRTIO_NET_CTRL_RX_PROMISC, 0);
844         if (unlikely(dev_written < 0)) {
845             return dev_written;
846         }
847         if (*s->status != VIRTIO_NET_OK) {
848             return -EIO;
849         }
850     }
851 
852     /*
853      * According to virtio_net_reset(), device turns all-multicast mode
854      * off by default.
855      *
856      * According to VirtIO standard, "Since there are no guarantees,
857      * it can use a hash filter or silently switch to allmulti or
858      * promiscuous mode if it is given too many addresses.". QEMU marks
859      * `n->mac_table.multi_overflow` if guest sets too many
860      * non-multicast MAC addresses.
861      *
862      * Therefore, QEMU should only send this CVQ command if the
863      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
864      * which sets all-multicast mode on, different from the device's defaults.
865      *
866      * Note that the device's defaults can mismatch the driver's
867      * configuration only at live migration.
868      */
869     if (n->mac_table.multi_overflow || n->allmulti) {
870         dev_written = vhost_vdpa_net_load_rx_mode(s,
871                                             VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
872         if (unlikely(dev_written < 0)) {
873             return dev_written;
874         }
875         if (*s->status != VIRTIO_NET_OK) {
876             return -EIO;
877         }
878     }
879 
880     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
881         return 0;
882     }
883 
884     /*
885      * According to virtio_net_reset(), device turns all-unicast mode
886      * off by default.
887      *
888      * Therefore, QEMU should only send this CVQ command if the driver
889      * sets all-unicast mode on, different from the device's defaults.
890      *
891      * Note that the device's defaults can mismatch the driver's
892      * configuration only at live migration.
893      */
894     if (n->alluni) {
895         dev_written = vhost_vdpa_net_load_rx_mode(s,
896                                             VIRTIO_NET_CTRL_RX_ALLUNI, 1);
897         if (dev_written < 0) {
898             return dev_written;
899         }
900         if (*s->status != VIRTIO_NET_OK) {
901             return -EIO;
902         }
903     }
904 
905     /*
906      * According to virtio_net_reset(), device turns non-multicast mode
907      * off by default.
908      *
909      * Therefore, QEMU should only send this CVQ command if the driver
910      * sets non-multicast mode on, different from the device's defaults.
911      *
912      * Note that the device's defaults can mismatch the driver's
913      * configuration only at live migration.
914      */
915     if (n->nomulti) {
916         dev_written = vhost_vdpa_net_load_rx_mode(s,
917                                             VIRTIO_NET_CTRL_RX_NOMULTI, 1);
918         if (dev_written < 0) {
919             return dev_written;
920         }
921         if (*s->status != VIRTIO_NET_OK) {
922             return -EIO;
923         }
924     }
925 
926     /*
927      * According to virtio_net_reset(), device turns non-unicast mode
928      * off by default.
929      *
930      * Therefore, QEMU should only send this CVQ command if the driver
931      * sets non-unicast mode on, different from the device's defaults.
932      *
933      * Note that the device's defaults can mismatch the driver's
934      * configuration only at live migration.
935      */
936     if (n->nouni) {
937         dev_written = vhost_vdpa_net_load_rx_mode(s,
938                                             VIRTIO_NET_CTRL_RX_NOUNI, 1);
939         if (dev_written < 0) {
940             return dev_written;
941         }
942         if (*s->status != VIRTIO_NET_OK) {
943             return -EIO;
944         }
945     }
946 
947     /*
948      * According to virtio_net_reset(), device turns non-broadcast mode
949      * off by default.
950      *
951      * Therefore, QEMU should only send this CVQ command if the driver
952      * sets non-broadcast mode on, different from the device's defaults.
953      *
954      * Note that the device's defaults can mismatch the driver's
955      * configuration only at live migration.
956      */
957     if (n->nobcast) {
958         dev_written = vhost_vdpa_net_load_rx_mode(s,
959                                             VIRTIO_NET_CTRL_RX_NOBCAST, 1);
960         if (dev_written < 0) {
961             return dev_written;
962         }
963         if (*s->status != VIRTIO_NET_OK) {
964             return -EIO;
965         }
966     }
967 
968     return 0;
969 }
970 
971 static int vhost_vdpa_net_load(NetClientState *nc)
972 {
973     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
974     struct vhost_vdpa *v = &s->vhost_vdpa;
975     const VirtIONet *n;
976     int r;
977 
978     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
979 
980     if (!v->shadow_vqs_enabled) {
981         return 0;
982     }
983 
984     n = VIRTIO_NET(v->dev->vdev);
985     r = vhost_vdpa_net_load_mac(s, n);
986     if (unlikely(r < 0)) {
987         return r;
988     }
989     r = vhost_vdpa_net_load_mq(s, n);
990     if (unlikely(r)) {
991         return r;
992     }
993     r = vhost_vdpa_net_load_offloads(s, n);
994     if (unlikely(r)) {
995         return r;
996     }
997     r = vhost_vdpa_net_load_rx(s, n);
998     if (unlikely(r)) {
999         return r;
1000     }
1001 
1002     return 0;
1003 }
1004 
1005 static NetClientInfo net_vhost_vdpa_cvq_info = {
1006     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1007     .size = sizeof(VhostVDPAState),
1008     .receive = vhost_vdpa_receive,
1009     .start = vhost_vdpa_net_cvq_start,
1010     .load = vhost_vdpa_net_load,
1011     .stop = vhost_vdpa_net_cvq_stop,
1012     .cleanup = vhost_vdpa_cleanup,
1013     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1014     .has_ufo = vhost_vdpa_has_ufo,
1015     .check_peer_type = vhost_vdpa_check_peer_type,
1016 };
1017 
1018 /*
1019  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1020  * vdpa device.
1021  *
1022  * Considering that QEMU cannot send the entire filter table to the
1023  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1024  * command to enable promiscuous mode to receive all packets,
1025  * according to VirtIO standard, "Since there are no guarantees,
1026  * it can use a hash filter or silently switch to allmulti or
1027  * promiscuous mode if it is given too many addresses.".
1028  *
1029  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1030  * marks `n->mac_table.x_overflow` accordingly, it should have
1031  * the same effect on the device model to receive
1032  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1033  * The same applies to multicast MAC addresses.
1034  *
1035  * Therefore, QEMU can provide the device model with a fake
1036  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1037  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1038  * MAC addresses. This ensures that the device model marks
1039  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1040  * allowing all packets to be received, which aligns with the
1041  * state of the vdpa device.
1042  */
1043 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1044                                                        VirtQueueElement *elem,
1045                                                        struct iovec *out)
1046 {
1047     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1048     struct virtio_net_ctrl_hdr *hdr_ptr;
1049     uint32_t cursor;
1050     ssize_t r;
1051 
1052     /* parse the non-multicast MAC address entries from CVQ command */
1053     cursor = sizeof(*hdr_ptr);
1054     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1055                    &mac_data, sizeof(mac_data));
1056     if (unlikely(r != sizeof(mac_data))) {
1057         /*
1058          * If the CVQ command is invalid, we should simulate the vdpa device
1059          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1060          */
1061         *s->status = VIRTIO_NET_ERR;
1062         return sizeof(*s->status);
1063     }
1064     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1065 
1066     /* parse the multicast MAC address entries from CVQ command */
1067     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1068                    &mac_data, sizeof(mac_data));
1069     if (r != sizeof(mac_data)) {
1070         /*
1071          * If the CVQ command is invalid, we should simulate the vdpa device
1072          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1073          */
1074         *s->status = VIRTIO_NET_ERR;
1075         return sizeof(*s->status);
1076     }
1077     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1078 
1079     /* validate the CVQ command */
1080     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1081         /*
1082          * If the CVQ command is invalid, we should simulate the vdpa device
1083          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1084          */
1085         *s->status = VIRTIO_NET_ERR;
1086         return sizeof(*s->status);
1087     }
1088 
1089     /*
1090      * According to VirtIO standard, "Since there are no guarantees,
1091      * it can use a hash filter or silently switch to allmulti or
1092      * promiscuous mode if it is given too many addresses.".
1093      *
1094      * Therefore, considering that QEMU is unable to send the entire
1095      * filter table to the vdpa device, it should send the
1096      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1097      */
1098     r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1099     if (unlikely(r < 0)) {
1100         return r;
1101     }
1102     if (*s->status != VIRTIO_NET_OK) {
1103         return sizeof(*s->status);
1104     }
1105 
1106     /*
1107      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1108      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1109      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1110      * multicast MAC addresses.
1111      *
1112      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1113      * and `n->mac_table.multi_overflow`, enabling all packets to be
1114      * received, which aligns with the state of the vdpa device.
1115      */
1116     cursor = 0;
1117     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1118              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1119              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1120                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1121                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1122 
1123     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1124     out->iov_len = fake_cvq_size;
1125 
1126     /* pack the header for fake CVQ command */
1127     hdr_ptr = out->iov_base + cursor;
1128     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1129     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1130     cursor += sizeof(*hdr_ptr);
1131 
1132     /*
1133      * Pack the non-multicast MAC addresses part for fake CVQ command.
1134      *
1135      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1136      * addresses provided in CVQ command. Therefore, only the entries
1137      * field need to be prepared in the CVQ command.
1138      */
1139     mac_ptr = out->iov_base + cursor;
1140     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1141     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1142 
1143     /*
1144      * Pack the multicast MAC addresses part for fake CVQ command.
1145      *
1146      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1147      * addresses provided in CVQ command. Therefore, only the entries
1148      * field need to be prepared in the CVQ command.
1149      */
1150     mac_ptr = out->iov_base + cursor;
1151     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1152 
1153     /*
1154      * Simulating QEMU poll a vdpa device used buffer
1155      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1156      */
1157     return sizeof(*s->status);
1158 }
1159 
1160 /**
1161  * Validate and copy control virtqueue commands.
1162  *
1163  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1164  * prevent TOCTOU bugs.
1165  */
1166 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1167                                             VirtQueueElement *elem,
1168                                             void *opaque)
1169 {
1170     VhostVDPAState *s = opaque;
1171     size_t in_len;
1172     const struct virtio_net_ctrl_hdr *ctrl;
1173     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1174     /* Out buffer sent to both the vdpa device and the device model */
1175     struct iovec out = {
1176         .iov_base = s->cvq_cmd_out_buffer,
1177     };
1178     /* in buffer used for device model */
1179     const struct iovec in = {
1180         .iov_base = &status,
1181         .iov_len = sizeof(status),
1182     };
1183     ssize_t dev_written = -EINVAL;
1184 
1185     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1186                              s->cvq_cmd_out_buffer,
1187                              vhost_vdpa_net_cvq_cmd_page_len());
1188 
1189     ctrl = s->cvq_cmd_out_buffer;
1190     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1191         /*
1192          * Guest announce capability is emulated by qemu, so don't forward to
1193          * the device.
1194          */
1195         dev_written = sizeof(status);
1196         *s->status = VIRTIO_NET_OK;
1197     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1198                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1199                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1200         /*
1201          * Due to the size limitation of the out buffer sent to the vdpa device,
1202          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1203          * MAC addresses set by the driver for the filter table can cause
1204          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1205          * rejects the flawed CVQ command.
1206          *
1207          * Therefore, QEMU must handle this situation instead of sending
1208          * the CVQ command directly.
1209          */
1210         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1211                                                                   &out);
1212         if (unlikely(dev_written < 0)) {
1213             goto out;
1214         }
1215     } else {
1216         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1217         if (unlikely(dev_written < 0)) {
1218             goto out;
1219         }
1220     }
1221 
1222     if (unlikely(dev_written < sizeof(status))) {
1223         error_report("Insufficient written data (%zu)", dev_written);
1224         goto out;
1225     }
1226 
1227     if (*s->status != VIRTIO_NET_OK) {
1228         goto out;
1229     }
1230 
1231     status = VIRTIO_NET_ERR;
1232     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1233     if (status != VIRTIO_NET_OK) {
1234         error_report("Bad CVQ processing in model");
1235     }
1236 
1237 out:
1238     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1239                           sizeof(status));
1240     if (unlikely(in_len < sizeof(status))) {
1241         error_report("Bad device CVQ written length");
1242     }
1243     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1244     /*
1245      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1246      * the function successfully forwards the CVQ command, indicated
1247      * by a non-negative value of `dev_written`. Otherwise, it still
1248      * belongs to SVQ.
1249      * This function should only free the `elem` when it owns.
1250      */
1251     if (dev_written >= 0) {
1252         g_free(elem);
1253     }
1254     return dev_written < 0 ? dev_written : 0;
1255 }
1256 
1257 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1258     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1259 };
1260 
1261 /**
1262  * Probe if CVQ is isolated
1263  *
1264  * @device_fd         The vdpa device fd
1265  * @features          Features offered by the device.
1266  * @cvq_index         The control vq pair index
1267  *
1268  * Returns <0 in case of failure, 0 if false and 1 if true.
1269  */
1270 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1271                                           int cvq_index, Error **errp)
1272 {
1273     uint64_t backend_features;
1274     int64_t cvq_group;
1275     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1276                      VIRTIO_CONFIG_S_DRIVER |
1277                      VIRTIO_CONFIG_S_FEATURES_OK;
1278     int r;
1279 
1280     ERRP_GUARD();
1281 
1282     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1283     if (unlikely(r < 0)) {
1284         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1285         return r;
1286     }
1287 
1288     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1289         return 0;
1290     }
1291 
1292     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1293     if (unlikely(r)) {
1294         error_setg_errno(errp, errno, "Cannot set features");
1295     }
1296 
1297     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1298     if (unlikely(r)) {
1299         error_setg_errno(errp, -r, "Cannot set device features");
1300         goto out;
1301     }
1302 
1303     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1304     if (unlikely(cvq_group < 0)) {
1305         if (cvq_group != -ENOTSUP) {
1306             r = cvq_group;
1307             goto out;
1308         }
1309 
1310         /*
1311          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1312          * support ASID even if the parent driver does not.  The CVQ cannot be
1313          * isolated in this case.
1314          */
1315         error_free(*errp);
1316         *errp = NULL;
1317         r = 0;
1318         goto out;
1319     }
1320 
1321     for (int i = 0; i < cvq_index; ++i) {
1322         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1323         if (unlikely(group < 0)) {
1324             r = group;
1325             goto out;
1326         }
1327 
1328         if (group == (int64_t)cvq_group) {
1329             r = 0;
1330             goto out;
1331         }
1332     }
1333 
1334     r = 1;
1335 
1336 out:
1337     status = 0;
1338     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1339     return r;
1340 }
1341 
1342 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1343                                        const char *device,
1344                                        const char *name,
1345                                        int vdpa_device_fd,
1346                                        int queue_pair_index,
1347                                        int nvqs,
1348                                        bool is_datapath,
1349                                        bool svq,
1350                                        struct vhost_vdpa_iova_range iova_range,
1351                                        uint64_t features,
1352                                        Error **errp)
1353 {
1354     NetClientState *nc = NULL;
1355     VhostVDPAState *s;
1356     int ret = 0;
1357     assert(name);
1358     int cvq_isolated;
1359 
1360     if (is_datapath) {
1361         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1362                                  name);
1363     } else {
1364         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1365                                                       queue_pair_index * 2,
1366                                                       errp);
1367         if (unlikely(cvq_isolated < 0)) {
1368             return NULL;
1369         }
1370 
1371         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1372                                          device, name);
1373     }
1374     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1375     s = DO_UPCAST(VhostVDPAState, nc, nc);
1376 
1377     s->vhost_vdpa.device_fd = vdpa_device_fd;
1378     s->vhost_vdpa.index = queue_pair_index;
1379     s->always_svq = svq;
1380     s->migration_state.notify = vdpa_net_migration_state_notifier;
1381     s->vhost_vdpa.shadow_vqs_enabled = svq;
1382     s->vhost_vdpa.iova_range = iova_range;
1383     s->vhost_vdpa.shadow_data = svq;
1384     if (queue_pair_index == 0) {
1385         vhost_vdpa_net_valid_svq_features(features,
1386                                           &s->vhost_vdpa.migration_blocker);
1387     } else if (!is_datapath) {
1388         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1389                                      PROT_READ | PROT_WRITE,
1390                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1391         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1392                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1393                          -1, 0);
1394 
1395         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1396         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1397         s->cvq_isolated = cvq_isolated;
1398 
1399         /*
1400          * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
1401          * there is no way to set the device state (MAC, MQ, etc) before
1402          * starting the datapath.
1403          *
1404          * Migration blocker ownership now belongs to s->vhost_vdpa.
1405          */
1406         if (!svq) {
1407             error_setg(&s->vhost_vdpa.migration_blocker,
1408                        "net vdpa cannot migrate with CVQ feature");
1409         }
1410     }
1411     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1412     if (ret) {
1413         qemu_del_net_client(nc);
1414         return NULL;
1415     }
1416     return nc;
1417 }
1418 
1419 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1420 {
1421     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1422     if (unlikely(ret < 0)) {
1423         error_setg_errno(errp, errno,
1424                          "Fail to query features from vhost-vDPA device");
1425     }
1426     return ret;
1427 }
1428 
1429 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1430                                           int *has_cvq, Error **errp)
1431 {
1432     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1433     g_autofree struct vhost_vdpa_config *config = NULL;
1434     __virtio16 *max_queue_pairs;
1435     int ret;
1436 
1437     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1438         *has_cvq = 1;
1439     } else {
1440         *has_cvq = 0;
1441     }
1442 
1443     if (features & (1 << VIRTIO_NET_F_MQ)) {
1444         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1445         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1446         config->len = sizeof(*max_queue_pairs);
1447 
1448         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1449         if (ret) {
1450             error_setg(errp, "Fail to get config from vhost-vDPA device");
1451             return -ret;
1452         }
1453 
1454         max_queue_pairs = (__virtio16 *)&config->buf;
1455 
1456         return lduw_le_p(max_queue_pairs);
1457     }
1458 
1459     return 1;
1460 }
1461 
1462 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1463                         NetClientState *peer, Error **errp)
1464 {
1465     const NetdevVhostVDPAOptions *opts;
1466     uint64_t features;
1467     int vdpa_device_fd;
1468     g_autofree NetClientState **ncs = NULL;
1469     struct vhost_vdpa_iova_range iova_range;
1470     NetClientState *nc;
1471     int queue_pairs, r, i = 0, has_cvq = 0;
1472 
1473     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1474     opts = &netdev->u.vhost_vdpa;
1475     if (!opts->vhostdev && !opts->vhostfd) {
1476         error_setg(errp,
1477                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1478         return -1;
1479     }
1480 
1481     if (opts->vhostdev && opts->vhostfd) {
1482         error_setg(errp,
1483                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1484         return -1;
1485     }
1486 
1487     if (opts->vhostdev) {
1488         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1489         if (vdpa_device_fd == -1) {
1490             return -errno;
1491         }
1492     } else {
1493         /* has_vhostfd */
1494         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1495         if (vdpa_device_fd == -1) {
1496             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1497             return -1;
1498         }
1499     }
1500 
1501     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1502     if (unlikely(r < 0)) {
1503         goto err;
1504     }
1505 
1506     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1507                                                  &has_cvq, errp);
1508     if (queue_pairs < 0) {
1509         qemu_close(vdpa_device_fd);
1510         return queue_pairs;
1511     }
1512 
1513     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1514     if (unlikely(r < 0)) {
1515         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1516                    strerror(-r));
1517         goto err;
1518     }
1519 
1520     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1521         goto err;
1522     }
1523 
1524     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1525 
1526     for (i = 0; i < queue_pairs; i++) {
1527         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1528                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1529                                      iova_range, features, errp);
1530         if (!ncs[i])
1531             goto err;
1532     }
1533 
1534     if (has_cvq) {
1535         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1536                                  vdpa_device_fd, i, 1, false,
1537                                  opts->x_svq, iova_range, features, errp);
1538         if (!nc)
1539             goto err;
1540     }
1541 
1542     return 0;
1543 
1544 err:
1545     if (i) {
1546         for (i--; i >= 0; i--) {
1547             qemu_del_net_client(ncs[i]);
1548         }
1549     }
1550 
1551     qemu_close(vdpa_device_fd);
1552 
1553     return -1;
1554 }
1555