xref: /qemu/net/vhost-vdpa.c (revision 5ac034b1)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "hw/virtio/vhost.h"
30 
31 /* Todo:need to add the multiqueue support here */
32 typedef struct VhostVDPAState {
33     NetClientState nc;
34     struct vhost_vdpa vhost_vdpa;
35     VHostNetState *vhost_net;
36 
37     /* Control commands shadow buffers */
38     void *cvq_cmd_out_buffer;
39     virtio_net_ctrl_ack *status;
40 
41     /* The device always have SVQ enabled */
42     bool always_svq;
43     bool started;
44 } VhostVDPAState;
45 
46 const int vdpa_feature_bits[] = {
47     VIRTIO_F_NOTIFY_ON_EMPTY,
48     VIRTIO_RING_F_INDIRECT_DESC,
49     VIRTIO_RING_F_EVENT_IDX,
50     VIRTIO_F_ANY_LAYOUT,
51     VIRTIO_F_VERSION_1,
52     VIRTIO_NET_F_CSUM,
53     VIRTIO_NET_F_GUEST_CSUM,
54     VIRTIO_NET_F_GSO,
55     VIRTIO_NET_F_GUEST_TSO4,
56     VIRTIO_NET_F_GUEST_TSO6,
57     VIRTIO_NET_F_GUEST_ECN,
58     VIRTIO_NET_F_GUEST_UFO,
59     VIRTIO_NET_F_HOST_TSO4,
60     VIRTIO_NET_F_HOST_TSO6,
61     VIRTIO_NET_F_HOST_ECN,
62     VIRTIO_NET_F_HOST_UFO,
63     VIRTIO_NET_F_MRG_RXBUF,
64     VIRTIO_NET_F_MTU,
65     VIRTIO_NET_F_CTRL_RX,
66     VIRTIO_NET_F_CTRL_RX_EXTRA,
67     VIRTIO_NET_F_CTRL_VLAN,
68     VIRTIO_NET_F_CTRL_MAC_ADDR,
69     VIRTIO_NET_F_RSS,
70     VIRTIO_NET_F_MQ,
71     VIRTIO_NET_F_CTRL_VQ,
72     VIRTIO_F_IOMMU_PLATFORM,
73     VIRTIO_F_RING_PACKED,
74     VIRTIO_F_RING_RESET,
75     VIRTIO_NET_F_RSS,
76     VIRTIO_NET_F_HASH_REPORT,
77     VIRTIO_NET_F_STATUS,
78     VHOST_INVALID_FEATURE_BIT
79 };
80 
81 /** Supported device specific feature bits with SVQ */
82 static const uint64_t vdpa_svq_device_features =
83     BIT_ULL(VIRTIO_NET_F_CSUM) |
84     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
85     BIT_ULL(VIRTIO_NET_F_MTU) |
86     BIT_ULL(VIRTIO_NET_F_MAC) |
87     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
88     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
89     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
90     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
91     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
92     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
93     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
94     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
95     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
96     BIT_ULL(VIRTIO_NET_F_STATUS) |
97     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
98     BIT_ULL(VIRTIO_NET_F_MQ) |
99     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
100     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
101     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
102     BIT_ULL(VIRTIO_NET_F_STANDBY);
103 
104 #define VHOST_VDPA_NET_CVQ_ASID 1
105 
106 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
107 {
108     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
109     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
110     return s->vhost_net;
111 }
112 
113 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
114 {
115     uint64_t invalid_dev_features =
116         features & ~vdpa_svq_device_features &
117         /* Transport are all accepted at this point */
118         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
119                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
120 
121     if (invalid_dev_features) {
122         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
123                    invalid_dev_features);
124         return false;
125     }
126 
127     return vhost_svq_valid_features(features, errp);
128 }
129 
130 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
131 {
132     uint32_t device_id;
133     int ret;
134     struct vhost_dev *hdev;
135 
136     hdev = (struct vhost_dev *)&net->dev;
137     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
138     if (device_id != VIRTIO_ID_NET) {
139         return -ENOTSUP;
140     }
141     return ret;
142 }
143 
144 static int vhost_vdpa_add(NetClientState *ncs, void *be,
145                           int queue_pair_index, int nvqs)
146 {
147     VhostNetOptions options;
148     struct vhost_net *net = NULL;
149     VhostVDPAState *s;
150     int ret;
151 
152     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
153     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
154     s = DO_UPCAST(VhostVDPAState, nc, ncs);
155     options.net_backend = ncs;
156     options.opaque      = be;
157     options.busyloop_timeout = 0;
158     options.nvqs = nvqs;
159 
160     net = vhost_net_init(&options);
161     if (!net) {
162         error_report("failed to init vhost_net for queue");
163         goto err_init;
164     }
165     s->vhost_net = net;
166     ret = vhost_vdpa_net_check_device_id(net);
167     if (ret) {
168         goto err_check;
169     }
170     return 0;
171 err_check:
172     vhost_net_cleanup(net);
173     g_free(net);
174 err_init:
175     return -1;
176 }
177 
178 static void vhost_vdpa_cleanup(NetClientState *nc)
179 {
180     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
181     struct vhost_dev *dev = &s->vhost_net->dev;
182 
183     qemu_vfree(s->cvq_cmd_out_buffer);
184     qemu_vfree(s->status);
185     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
186         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
187     }
188     if (s->vhost_net) {
189         vhost_net_cleanup(s->vhost_net);
190         g_free(s->vhost_net);
191         s->vhost_net = NULL;
192     }
193      if (s->vhost_vdpa.device_fd >= 0) {
194         qemu_close(s->vhost_vdpa.device_fd);
195         s->vhost_vdpa.device_fd = -1;
196     }
197 }
198 
199 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
200 {
201     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
202 
203     return true;
204 }
205 
206 static bool vhost_vdpa_has_ufo(NetClientState *nc)
207 {
208     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
209     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
210     uint64_t features = 0;
211     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
212     features = vhost_net_get_features(s->vhost_net, features);
213     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
214 
215 }
216 
217 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
218                                        Error **errp)
219 {
220     const char *driver = object_class_get_name(oc);
221 
222     if (!g_str_has_prefix(driver, "virtio-net-")) {
223         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
224         return false;
225     }
226 
227     return true;
228 }
229 
230 /** Dummy receive in case qemu falls back to userland tap networking */
231 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
232                                   size_t size)
233 {
234     return size;
235 }
236 
237 static NetClientInfo net_vhost_vdpa_info = {
238         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
239         .size = sizeof(VhostVDPAState),
240         .receive = vhost_vdpa_receive,
241         .cleanup = vhost_vdpa_cleanup,
242         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
243         .has_ufo = vhost_vdpa_has_ufo,
244         .check_peer_type = vhost_vdpa_check_peer_type,
245 };
246 
247 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
248 {
249     struct vhost_vring_state state = {
250         .index = vq_index,
251     };
252     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
253 
254     if (unlikely(r < 0)) {
255         error_report("Cannot get VQ %u group: %s", vq_index,
256                      g_strerror(errno));
257         return r;
258     }
259 
260     return state.num;
261 }
262 
263 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
264                                            unsigned vq_group,
265                                            unsigned asid_num)
266 {
267     struct vhost_vring_state asid = {
268         .index = vq_group,
269         .num = asid_num,
270     };
271     int r;
272 
273     r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
274     if (unlikely(r < 0)) {
275         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
276                      asid.index, asid.num, errno, g_strerror(errno));
277     }
278     return r;
279 }
280 
281 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
282 {
283     VhostIOVATree *tree = v->iova_tree;
284     DMAMap needle = {
285         /*
286          * No need to specify size or to look for more translations since
287          * this contiguous chunk was allocated by us.
288          */
289         .translated_addr = (hwaddr)(uintptr_t)addr,
290     };
291     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
292     int r;
293 
294     if (unlikely(!map)) {
295         error_report("Cannot locate expected map");
296         return;
297     }
298 
299     r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
300     if (unlikely(r != 0)) {
301         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
302     }
303 
304     vhost_iova_tree_remove(tree, *map);
305 }
306 
307 static size_t vhost_vdpa_net_cvq_cmd_len(void)
308 {
309     /*
310      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
311      * In buffer is always 1 byte, so it should fit here
312      */
313     return sizeof(struct virtio_net_ctrl_hdr) +
314            2 * sizeof(struct virtio_net_ctrl_mac) +
315            MAC_TABLE_ENTRIES * ETH_ALEN;
316 }
317 
318 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
319 {
320     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
321 }
322 
323 /** Map CVQ buffer. */
324 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
325                                   bool write)
326 {
327     DMAMap map = {};
328     int r;
329 
330     map.translated_addr = (hwaddr)(uintptr_t)buf;
331     map.size = size - 1;
332     map.perm = write ? IOMMU_RW : IOMMU_RO,
333     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
334     if (unlikely(r != IOVA_OK)) {
335         error_report("Cannot map injected element");
336         return r;
337     }
338 
339     r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
340                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
341     if (unlikely(r < 0)) {
342         goto dma_map_err;
343     }
344 
345     return 0;
346 
347 dma_map_err:
348     vhost_iova_tree_remove(v->iova_tree, map);
349     return r;
350 }
351 
352 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
353 {
354     VhostVDPAState *s;
355     struct vhost_vdpa *v;
356     uint64_t backend_features;
357     int64_t cvq_group;
358     int cvq_index, r;
359 
360     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
361 
362     s = DO_UPCAST(VhostVDPAState, nc, nc);
363     v = &s->vhost_vdpa;
364 
365     v->shadow_data = s->always_svq;
366     v->shadow_vqs_enabled = s->always_svq;
367     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
368 
369     if (s->always_svq) {
370         /* SVQ is already configured for all virtqueues */
371         goto out;
372     }
373 
374     /*
375      * If we early return in these cases SVQ will not be enabled. The migration
376      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
377      *
378      * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev
379      * yet.
380      */
381     r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
382     if (unlikely(r < 0)) {
383         error_report("Cannot get vdpa backend_features: %s(%d)",
384             g_strerror(errno), errno);
385         return -1;
386     }
387     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) ||
388         !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
389         return 0;
390     }
391 
392     /*
393      * Check if all the virtqueues of the virtio device are in a different vq
394      * than the last vq. VQ group of last group passed in cvq_group.
395      */
396     cvq_index = v->dev->vq_index_end - 1;
397     cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
398     if (unlikely(cvq_group < 0)) {
399         return cvq_group;
400     }
401     for (int i = 0; i < cvq_index; ++i) {
402         int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
403 
404         if (unlikely(group < 0)) {
405             return group;
406         }
407 
408         if (group == cvq_group) {
409             return 0;
410         }
411     }
412 
413     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
414     if (unlikely(r < 0)) {
415         return r;
416     }
417 
418     v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
419                                        v->iova_range.last);
420     v->shadow_vqs_enabled = true;
421     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
422 
423 out:
424     if (!s->vhost_vdpa.shadow_vqs_enabled) {
425         return 0;
426     }
427 
428     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
429                                vhost_vdpa_net_cvq_cmd_page_len(), false);
430     if (unlikely(r < 0)) {
431         return r;
432     }
433 
434     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
435                                vhost_vdpa_net_cvq_cmd_page_len(), true);
436     if (unlikely(r < 0)) {
437         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
438     }
439 
440     return r;
441 }
442 
443 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
444 {
445     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
446 
447     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
448 
449     if (s->vhost_vdpa.shadow_vqs_enabled) {
450         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
451         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
452         if (!s->always_svq) {
453             /*
454              * If only the CVQ is shadowed we can delete this safely.
455              * If all the VQs are shadows this will be needed by the time the
456              * device is started again to register SVQ vrings and similar.
457              */
458             g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
459         }
460     }
461 }
462 
463 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
464                                       size_t in_len)
465 {
466     /* Buffers for the device */
467     const struct iovec out = {
468         .iov_base = s->cvq_cmd_out_buffer,
469         .iov_len = out_len,
470     };
471     const struct iovec in = {
472         .iov_base = s->status,
473         .iov_len = sizeof(virtio_net_ctrl_ack),
474     };
475     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
476     int r;
477 
478     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
479     if (unlikely(r != 0)) {
480         if (unlikely(r == -ENOSPC)) {
481             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
482                           __func__);
483         }
484         return r;
485     }
486 
487     /*
488      * We can poll here since we've had BQL from the time we sent the
489      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
490      * when BQL is released
491      */
492     return vhost_svq_poll(svq);
493 }
494 
495 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
496                                        uint8_t cmd, const void *data,
497                                        size_t data_size)
498 {
499     const struct virtio_net_ctrl_hdr ctrl = {
500         .class = class,
501         .cmd = cmd,
502     };
503 
504     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
505 
506     memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
507     memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
508 
509     return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
510                                   sizeof(virtio_net_ctrl_ack));
511 }
512 
513 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
514 {
515     uint64_t features = n->parent_obj.guest_features;
516     if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
517         ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
518                                                   VIRTIO_NET_CTRL_MAC_ADDR_SET,
519                                                   n->mac, sizeof(n->mac));
520         if (unlikely(dev_written < 0)) {
521             return dev_written;
522         }
523 
524         return *s->status != VIRTIO_NET_OK;
525     }
526 
527     return 0;
528 }
529 
530 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
531                                   const VirtIONet *n)
532 {
533     struct virtio_net_ctrl_mq mq;
534     uint64_t features = n->parent_obj.guest_features;
535     ssize_t dev_written;
536 
537     if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) {
538         return 0;
539     }
540 
541     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
542     dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
543                                           VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
544                                           sizeof(mq));
545     if (unlikely(dev_written < 0)) {
546         return dev_written;
547     }
548 
549     return *s->status != VIRTIO_NET_OK;
550 }
551 
552 static int vhost_vdpa_net_load(NetClientState *nc)
553 {
554     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
555     struct vhost_vdpa *v = &s->vhost_vdpa;
556     const VirtIONet *n;
557     int r;
558 
559     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
560 
561     if (!v->shadow_vqs_enabled) {
562         return 0;
563     }
564 
565     n = VIRTIO_NET(v->dev->vdev);
566     r = vhost_vdpa_net_load_mac(s, n);
567     if (unlikely(r < 0)) {
568         return r;
569     }
570     r = vhost_vdpa_net_load_mq(s, n);
571     if (unlikely(r)) {
572         return r;
573     }
574 
575     return 0;
576 }
577 
578 static NetClientInfo net_vhost_vdpa_cvq_info = {
579     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
580     .size = sizeof(VhostVDPAState),
581     .receive = vhost_vdpa_receive,
582     .start = vhost_vdpa_net_cvq_start,
583     .load = vhost_vdpa_net_load,
584     .stop = vhost_vdpa_net_cvq_stop,
585     .cleanup = vhost_vdpa_cleanup,
586     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
587     .has_ufo = vhost_vdpa_has_ufo,
588     .check_peer_type = vhost_vdpa_check_peer_type,
589 };
590 
591 /**
592  * Validate and copy control virtqueue commands.
593  *
594  * Following QEMU guidelines, we offer a copy of the buffers to the device to
595  * prevent TOCTOU bugs.
596  */
597 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
598                                             VirtQueueElement *elem,
599                                             void *opaque)
600 {
601     VhostVDPAState *s = opaque;
602     size_t in_len;
603     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
604     /* Out buffer sent to both the vdpa device and the device model */
605     struct iovec out = {
606         .iov_base = s->cvq_cmd_out_buffer,
607     };
608     /* in buffer used for device model */
609     const struct iovec in = {
610         .iov_base = &status,
611         .iov_len = sizeof(status),
612     };
613     ssize_t dev_written = -EINVAL;
614 
615     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
616                              s->cvq_cmd_out_buffer,
617                              vhost_vdpa_net_cvq_cmd_len());
618     if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
619         /*
620          * Guest announce capability is emulated by qemu, so don't forward to
621          * the device.
622          */
623         dev_written = sizeof(status);
624         *s->status = VIRTIO_NET_OK;
625     } else {
626         dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
627         if (unlikely(dev_written < 0)) {
628             goto out;
629         }
630     }
631 
632     if (unlikely(dev_written < sizeof(status))) {
633         error_report("Insufficient written data (%zu)", dev_written);
634         goto out;
635     }
636 
637     if (*s->status != VIRTIO_NET_OK) {
638         return VIRTIO_NET_ERR;
639     }
640 
641     status = VIRTIO_NET_ERR;
642     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
643     if (status != VIRTIO_NET_OK) {
644         error_report("Bad CVQ processing in model");
645     }
646 
647 out:
648     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
649                           sizeof(status));
650     if (unlikely(in_len < sizeof(status))) {
651         error_report("Bad device CVQ written length");
652     }
653     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
654     g_free(elem);
655     return dev_written < 0 ? dev_written : 0;
656 }
657 
658 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
659     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
660 };
661 
662 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
663                                        const char *device,
664                                        const char *name,
665                                        int vdpa_device_fd,
666                                        int queue_pair_index,
667                                        int nvqs,
668                                        bool is_datapath,
669                                        bool svq,
670                                        struct vhost_vdpa_iova_range iova_range,
671                                        VhostIOVATree *iova_tree)
672 {
673     NetClientState *nc = NULL;
674     VhostVDPAState *s;
675     int ret = 0;
676     assert(name);
677     if (is_datapath) {
678         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
679                                  name);
680     } else {
681         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
682                                          device, name);
683     }
684     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
685     s = DO_UPCAST(VhostVDPAState, nc, nc);
686 
687     s->vhost_vdpa.device_fd = vdpa_device_fd;
688     s->vhost_vdpa.index = queue_pair_index;
689     s->always_svq = svq;
690     s->vhost_vdpa.shadow_vqs_enabled = svq;
691     s->vhost_vdpa.iova_range = iova_range;
692     s->vhost_vdpa.shadow_data = svq;
693     s->vhost_vdpa.iova_tree = iova_tree;
694     if (!is_datapath) {
695         s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
696                                             vhost_vdpa_net_cvq_cmd_page_len());
697         memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
698         s->status = qemu_memalign(qemu_real_host_page_size(),
699                                   vhost_vdpa_net_cvq_cmd_page_len());
700         memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
701 
702         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
703         s->vhost_vdpa.shadow_vq_ops_opaque = s;
704     }
705     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
706     if (ret) {
707         qemu_del_net_client(nc);
708         return NULL;
709     }
710     return nc;
711 }
712 
713 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
714 {
715     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
716     if (unlikely(ret < 0)) {
717         error_setg_errno(errp, errno,
718                          "Fail to query features from vhost-vDPA device");
719     }
720     return ret;
721 }
722 
723 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
724                                           int *has_cvq, Error **errp)
725 {
726     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
727     g_autofree struct vhost_vdpa_config *config = NULL;
728     __virtio16 *max_queue_pairs;
729     int ret;
730 
731     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
732         *has_cvq = 1;
733     } else {
734         *has_cvq = 0;
735     }
736 
737     if (features & (1 << VIRTIO_NET_F_MQ)) {
738         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
739         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
740         config->len = sizeof(*max_queue_pairs);
741 
742         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
743         if (ret) {
744             error_setg(errp, "Fail to get config from vhost-vDPA device");
745             return -ret;
746         }
747 
748         max_queue_pairs = (__virtio16 *)&config->buf;
749 
750         return lduw_le_p(max_queue_pairs);
751     }
752 
753     return 1;
754 }
755 
756 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
757                         NetClientState *peer, Error **errp)
758 {
759     const NetdevVhostVDPAOptions *opts;
760     uint64_t features;
761     int vdpa_device_fd;
762     g_autofree NetClientState **ncs = NULL;
763     g_autoptr(VhostIOVATree) iova_tree = NULL;
764     struct vhost_vdpa_iova_range iova_range;
765     NetClientState *nc;
766     int queue_pairs, r, i = 0, has_cvq = 0;
767 
768     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
769     opts = &netdev->u.vhost_vdpa;
770     if (!opts->vhostdev && !opts->vhostfd) {
771         error_setg(errp,
772                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
773         return -1;
774     }
775 
776     if (opts->vhostdev && opts->vhostfd) {
777         error_setg(errp,
778                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
779         return -1;
780     }
781 
782     if (opts->vhostdev) {
783         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
784         if (vdpa_device_fd == -1) {
785             return -errno;
786         }
787     } else {
788         /* has_vhostfd */
789         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
790         if (vdpa_device_fd == -1) {
791             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
792             return -1;
793         }
794     }
795 
796     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
797     if (unlikely(r < 0)) {
798         goto err;
799     }
800 
801     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
802                                                  &has_cvq, errp);
803     if (queue_pairs < 0) {
804         qemu_close(vdpa_device_fd);
805         return queue_pairs;
806     }
807 
808     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
809     if (unlikely(r < 0)) {
810         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
811                    strerror(-r));
812         goto err;
813     }
814 
815     if (opts->x_svq) {
816         if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
817             goto err_svq;
818         }
819 
820         iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
821     }
822 
823     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
824 
825     for (i = 0; i < queue_pairs; i++) {
826         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
827                                      vdpa_device_fd, i, 2, true, opts->x_svq,
828                                      iova_range, iova_tree);
829         if (!ncs[i])
830             goto err;
831     }
832 
833     if (has_cvq) {
834         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
835                                  vdpa_device_fd, i, 1, false,
836                                  opts->x_svq, iova_range, iova_tree);
837         if (!nc)
838             goto err;
839     }
840 
841     /* iova_tree ownership belongs to last NetClientState */
842     g_steal_pointer(&iova_tree);
843     return 0;
844 
845 err:
846     if (i) {
847         for (i--; i >= 0; i--) {
848             qemu_del_net_client(ncs[i]);
849         }
850     }
851 
852 err_svq:
853     qemu_close(vdpa_device_fd);
854 
855     return -1;
856 }
857