xref: /qemu/hw/virtio/vhost-vdpa.c (revision 1ea5208f)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26 
27 /*
28  * Return one past the end of the end of section. Be careful with uint64_t
29  * conversions!
30  */
31 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
32 {
33     Int128 llend = int128_make64(section->offset_within_address_space);
34     llend = int128_add(llend, section->size);
35     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
36 
37     return llend;
38 }
39 
40 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
41                                                 uint64_t iova_min,
42                                                 uint64_t iova_max)
43 {
44     Int128 llend;
45 
46     if ((!memory_region_is_ram(section->mr) &&
47          !memory_region_is_iommu(section->mr)) ||
48         memory_region_is_protected(section->mr) ||
49         /* vhost-vDPA doesn't allow MMIO to be mapped  */
50         memory_region_is_ram_device(section->mr)) {
51         return true;
52     }
53 
54     if (section->offset_within_address_space < iova_min) {
55         error_report("RAM section out of device range (min=0x%" PRIx64
56                      ", addr=0x%" HWADDR_PRIx ")",
57                      iova_min, section->offset_within_address_space);
58         return true;
59     }
60 
61     llend = vhost_vdpa_section_end(section);
62     if (int128_gt(llend, int128_make64(iova_max))) {
63         error_report("RAM section out of device range (max=0x%" PRIx64
64                      ", end addr=0x%" PRIx64 ")",
65                      iova_max, int128_get64(llend));
66         return true;
67     }
68 
69     return false;
70 }
71 
72 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
73                               void *vaddr, bool readonly)
74 {
75     struct vhost_msg_v2 msg = {};
76     int fd = v->device_fd;
77     int ret = 0;
78 
79     msg.type = v->msg_type;
80     msg.iotlb.iova = iova;
81     msg.iotlb.size = size;
82     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
83     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
84     msg.iotlb.type = VHOST_IOTLB_UPDATE;
85 
86    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
87                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
88 
89     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
90         error_report("failed to write, fd=%d, errno=%d (%s)",
91             fd, errno, strerror(errno));
92         return -EIO ;
93     }
94 
95     return ret;
96 }
97 
98 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
99                                 hwaddr size)
100 {
101     struct vhost_msg_v2 msg = {};
102     int fd = v->device_fd;
103     int ret = 0;
104 
105     msg.type = v->msg_type;
106     msg.iotlb.iova = iova;
107     msg.iotlb.size = size;
108     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
109 
110     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
111                                msg.iotlb.size, msg.iotlb.type);
112 
113     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114         error_report("failed to write, fd=%d, errno=%d (%s)",
115             fd, errno, strerror(errno));
116         return -EIO ;
117     }
118 
119     return ret;
120 }
121 
122 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
123 {
124     int fd = v->device_fd;
125     struct vhost_msg_v2 msg = {
126         .type = v->msg_type,
127         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
128     };
129 
130     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
131         error_report("failed to write, fd=%d, errno=%d (%s)",
132                      fd, errno, strerror(errno));
133     }
134 }
135 
136 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
137 {
138     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
139         !v->iotlb_batch_begin_sent) {
140         vhost_vdpa_listener_begin_batch(v);
141     }
142 
143     v->iotlb_batch_begin_sent = true;
144 }
145 
146 static void vhost_vdpa_listener_commit(MemoryListener *listener)
147 {
148     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
149     struct vhost_dev *dev = v->dev;
150     struct vhost_msg_v2 msg = {};
151     int fd = v->device_fd;
152 
153     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
154         return;
155     }
156 
157     if (!v->iotlb_batch_begin_sent) {
158         return;
159     }
160 
161     msg.type = v->msg_type;
162     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
163 
164     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
165         error_report("failed to write, fd=%d, errno=%d (%s)",
166                      fd, errno, strerror(errno));
167     }
168 
169     v->iotlb_batch_begin_sent = false;
170 }
171 
172 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
173                                            MemoryRegionSection *section)
174 {
175     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
176     hwaddr iova;
177     Int128 llend, llsize;
178     void *vaddr;
179     int ret;
180 
181     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
182                                             v->iova_range.last)) {
183         return;
184     }
185 
186     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
187                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
188         error_report("%s received unaligned region", __func__);
189         return;
190     }
191 
192     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
193     llend = vhost_vdpa_section_end(section);
194     if (int128_ge(int128_make64(iova), llend)) {
195         return;
196     }
197 
198     memory_region_ref(section->mr);
199 
200     /* Here we assume that memory_region_is_ram(section->mr)==true */
201 
202     vaddr = memory_region_get_ram_ptr(section->mr) +
203             section->offset_within_region +
204             (iova - section->offset_within_address_space);
205 
206     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
207                                          vaddr, section->readonly);
208 
209     llsize = int128_sub(llend, int128_make64(iova));
210 
211     vhost_vdpa_iotlb_batch_begin_once(v);
212     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
213                              vaddr, section->readonly);
214     if (ret) {
215         error_report("vhost vdpa map fail!");
216         goto fail;
217     }
218 
219     return;
220 
221 fail:
222     /*
223      * On the initfn path, store the first error in the container so we
224      * can gracefully fail.  Runtime, there's not much we can do other
225      * than throw a hardware error.
226      */
227     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
228     return;
229 
230 }
231 
232 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
233                                            MemoryRegionSection *section)
234 {
235     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
236     hwaddr iova;
237     Int128 llend, llsize;
238     int ret;
239 
240     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
241                                             v->iova_range.last)) {
242         return;
243     }
244 
245     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
246                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
247         error_report("%s received unaligned region", __func__);
248         return;
249     }
250 
251     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
252     llend = vhost_vdpa_section_end(section);
253 
254     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
255 
256     if (int128_ge(int128_make64(iova), llend)) {
257         return;
258     }
259 
260     llsize = int128_sub(llend, int128_make64(iova));
261 
262     vhost_vdpa_iotlb_batch_begin_once(v);
263     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
264     if (ret) {
265         error_report("vhost_vdpa dma unmap error!");
266     }
267 
268     memory_region_unref(section->mr);
269 }
270 /*
271  * IOTLB API is used by vhost-vpda which requires incremental updating
272  * of the mapping. So we can not use generic vhost memory listener which
273  * depends on the addnop().
274  */
275 static const MemoryListener vhost_vdpa_memory_listener = {
276     .name = "vhost-vdpa",
277     .commit = vhost_vdpa_listener_commit,
278     .region_add = vhost_vdpa_listener_region_add,
279     .region_del = vhost_vdpa_listener_region_del,
280 };
281 
282 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
283                              void *arg)
284 {
285     struct vhost_vdpa *v = dev->opaque;
286     int fd = v->device_fd;
287     int ret;
288 
289     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
290 
291     ret = ioctl(fd, request, arg);
292     return ret < 0 ? -errno : ret;
293 }
294 
295 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
296 {
297     uint8_t s;
298     int ret;
299 
300     trace_vhost_vdpa_add_status(dev, status);
301     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
302     if (ret < 0) {
303         return ret;
304     }
305 
306     s |= status;
307 
308     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
309     if (ret < 0) {
310         return ret;
311     }
312 
313     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
314     if (ret < 0) {
315         return ret;
316     }
317 
318     if (!(s & status)) {
319         return -EIO;
320     }
321 
322     return 0;
323 }
324 
325 static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
326 {
327     int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
328                               &v->iova_range);
329     if (ret != 0) {
330         v->iova_range.first = 0;
331         v->iova_range.last = UINT64_MAX;
332     }
333 
334     trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
335                                     v->iova_range.last);
336 }
337 
338 static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
339 {
340     struct vhost_vdpa *v = dev->opaque;
341 
342     return v->index != 0;
343 }
344 
345 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
346 {
347     struct vhost_vdpa *v;
348     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
349     trace_vhost_vdpa_init(dev, opaque);
350     int ret;
351 
352     /*
353      * Similar to VFIO, we end up pinning all guest memory and have to
354      * disable discarding of RAM.
355      */
356     ret = ram_block_discard_disable(true);
357     if (ret) {
358         error_report("Cannot set discarding of RAM broken");
359         return ret;
360     }
361 
362     v = opaque;
363     v->dev = dev;
364     dev->opaque =  opaque ;
365     v->listener = vhost_vdpa_memory_listener;
366     v->msg_type = VHOST_IOTLB_MSG_V2;
367 
368     vhost_vdpa_get_iova_range(v);
369 
370     if (vhost_vdpa_one_time_request(dev)) {
371         return 0;
372     }
373 
374     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
375                                VIRTIO_CONFIG_S_DRIVER);
376 
377     return 0;
378 }
379 
380 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
381                                             int queue_index)
382 {
383     size_t page_size = qemu_real_host_page_size;
384     struct vhost_vdpa *v = dev->opaque;
385     VirtIODevice *vdev = dev->vdev;
386     VhostVDPAHostNotifier *n;
387 
388     n = &v->notifier[queue_index];
389 
390     if (n->addr) {
391         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
392         object_unparent(OBJECT(&n->mr));
393         munmap(n->addr, page_size);
394         n->addr = NULL;
395     }
396 }
397 
398 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
399 {
400     size_t page_size = qemu_real_host_page_size;
401     struct vhost_vdpa *v = dev->opaque;
402     VirtIODevice *vdev = dev->vdev;
403     VhostVDPAHostNotifier *n;
404     int fd = v->device_fd;
405     void *addr;
406     char *name;
407 
408     vhost_vdpa_host_notifier_uninit(dev, queue_index);
409 
410     n = &v->notifier[queue_index];
411 
412     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
413                 queue_index * page_size);
414     if (addr == MAP_FAILED) {
415         goto err;
416     }
417 
418     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
419                            v, queue_index);
420     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
421                                       page_size, addr);
422     g_free(name);
423 
424     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
425         object_unparent(OBJECT(&n->mr));
426         munmap(addr, page_size);
427         goto err;
428     }
429     n->addr = addr;
430 
431     return 0;
432 
433 err:
434     return -1;
435 }
436 
437 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
438 {
439     int i;
440 
441     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
442         vhost_vdpa_host_notifier_uninit(dev, i);
443     }
444 }
445 
446 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
447 {
448     int i;
449 
450     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
451         if (vhost_vdpa_host_notifier_init(dev, i)) {
452             goto err;
453         }
454     }
455 
456     return;
457 
458 err:
459     vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
460     return;
461 }
462 
463 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
464 {
465     struct vhost_vdpa *v;
466     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
467     v = dev->opaque;
468     trace_vhost_vdpa_cleanup(dev, v);
469     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
470     memory_listener_unregister(&v->listener);
471 
472     dev->opaque = NULL;
473     ram_block_discard_disable(false);
474 
475     return 0;
476 }
477 
478 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
479 {
480     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
481     return INT_MAX;
482 }
483 
484 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
485                                     struct vhost_memory *mem)
486 {
487     if (vhost_vdpa_one_time_request(dev)) {
488         return 0;
489     }
490 
491     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
492     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
493         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
494         int i;
495         for (i = 0; i < mem->nregions; i++) {
496             trace_vhost_vdpa_dump_regions(dev, i,
497                                           mem->regions[i].guest_phys_addr,
498                                           mem->regions[i].memory_size,
499                                           mem->regions[i].userspace_addr,
500                                           mem->regions[i].flags_padding);
501         }
502     }
503     if (mem->padding) {
504         return -EINVAL;
505     }
506 
507     return 0;
508 }
509 
510 static int vhost_vdpa_set_features(struct vhost_dev *dev,
511                                    uint64_t features)
512 {
513     int ret;
514 
515     if (vhost_vdpa_one_time_request(dev)) {
516         return 0;
517     }
518 
519     trace_vhost_vdpa_set_features(dev, features);
520     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
521     if (ret) {
522         return ret;
523     }
524 
525     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
526 }
527 
528 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
529 {
530     uint64_t features;
531     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
532         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
533     int r;
534 
535     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
536         return -EFAULT;
537     }
538 
539     features &= f;
540 
541     if (vhost_vdpa_one_time_request(dev)) {
542         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
543         if (r) {
544             return -EFAULT;
545         }
546     }
547 
548     dev->backend_cap = features;
549 
550     return 0;
551 }
552 
553 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
554                                     uint32_t *device_id)
555 {
556     int ret;
557     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
558     trace_vhost_vdpa_get_device_id(dev, *device_id);
559     return ret;
560 }
561 
562 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
563 {
564     int ret;
565     uint8_t status = 0;
566 
567     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
568     trace_vhost_vdpa_reset_device(dev, status);
569     return ret;
570 }
571 
572 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
573 {
574     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
575 
576     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
577     return idx;
578 }
579 
580 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
581 {
582     int i;
583     trace_vhost_vdpa_set_vring_ready(dev);
584     for (i = 0; i < dev->nvqs; ++i) {
585         struct vhost_vring_state state = {
586             .index = dev->vq_index + i,
587             .num = 1,
588         };
589         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
590     }
591     return 0;
592 }
593 
594 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
595                                    uint32_t config_len)
596 {
597     int b, len;
598     char line[QEMU_HEXDUMP_LINE_LEN];
599 
600     for (b = 0; b < config_len; b += 16) {
601         len = config_len - b;
602         qemu_hexdump_line(line, b, config, len, false);
603         trace_vhost_vdpa_dump_config(dev, line);
604     }
605 }
606 
607 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
608                                    uint32_t offset, uint32_t size,
609                                    uint32_t flags)
610 {
611     struct vhost_vdpa_config *config;
612     int ret;
613     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
614 
615     trace_vhost_vdpa_set_config(dev, offset, size, flags);
616     config = g_malloc(size + config_size);
617     config->off = offset;
618     config->len = size;
619     memcpy(config->buf, data, size);
620     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
621         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
622         vhost_vdpa_dump_config(dev, data, size);
623     }
624     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
625     g_free(config);
626     return ret;
627 }
628 
629 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
630                                    uint32_t config_len, Error **errp)
631 {
632     struct vhost_vdpa_config *v_config;
633     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
634     int ret;
635 
636     trace_vhost_vdpa_get_config(dev, config, config_len);
637     v_config = g_malloc(config_len + config_size);
638     v_config->len = config_len;
639     v_config->off = 0;
640     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
641     memcpy(config, v_config->buf, config_len);
642     g_free(v_config);
643     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
644         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
645         vhost_vdpa_dump_config(dev, config, config_len);
646     }
647     return ret;
648  }
649 
650 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
651 {
652     struct vhost_vdpa *v = dev->opaque;
653     trace_vhost_vdpa_dev_start(dev, started);
654 
655     if (started) {
656         vhost_vdpa_host_notifiers_init(dev);
657         vhost_vdpa_set_vring_ready(dev);
658     } else {
659         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
660     }
661 
662     if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
663         return 0;
664     }
665 
666     if (started) {
667         memory_listener_register(&v->listener, &address_space_memory);
668         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
669     } else {
670         vhost_vdpa_reset_device(dev);
671         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
672                                    VIRTIO_CONFIG_S_DRIVER);
673         memory_listener_unregister(&v->listener);
674 
675         return 0;
676     }
677 }
678 
679 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
680                                      struct vhost_log *log)
681 {
682     if (vhost_vdpa_one_time_request(dev)) {
683         return 0;
684     }
685 
686     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
687                                   log->log);
688     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
689 }
690 
691 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
692                                        struct vhost_vring_addr *addr)
693 {
694     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
695                                     addr->desc_user_addr, addr->used_user_addr,
696                                     addr->avail_user_addr,
697                                     addr->log_guest_addr);
698     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
699 }
700 
701 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
702                                       struct vhost_vring_state *ring)
703 {
704     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
705     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
706 }
707 
708 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
709                                        struct vhost_vring_state *ring)
710 {
711     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
712     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
713 }
714 
715 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
716                                        struct vhost_vring_state *ring)
717 {
718     int ret;
719 
720     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
721     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
722     return ret;
723 }
724 
725 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
726                                        struct vhost_vring_file *file)
727 {
728     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
729     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
730 }
731 
732 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
733                                        struct vhost_vring_file *file)
734 {
735     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
736     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
737 }
738 
739 static int vhost_vdpa_get_features(struct vhost_dev *dev,
740                                      uint64_t *features)
741 {
742     int ret;
743 
744     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
745     trace_vhost_vdpa_get_features(dev, *features);
746     return ret;
747 }
748 
749 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
750 {
751     if (vhost_vdpa_one_time_request(dev)) {
752         return 0;
753     }
754 
755     trace_vhost_vdpa_set_owner(dev);
756     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
757 }
758 
759 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
760                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
761 {
762     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
763     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
764     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
765     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
766     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
767                                  addr->avail_user_addr, addr->used_user_addr);
768     return 0;
769 }
770 
771 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
772 {
773     return true;
774 }
775 
776 const VhostOps vdpa_ops = {
777         .backend_type = VHOST_BACKEND_TYPE_VDPA,
778         .vhost_backend_init = vhost_vdpa_init,
779         .vhost_backend_cleanup = vhost_vdpa_cleanup,
780         .vhost_set_log_base = vhost_vdpa_set_log_base,
781         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
782         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
783         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
784         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
785         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
786         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
787         .vhost_get_features = vhost_vdpa_get_features,
788         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
789         .vhost_set_owner = vhost_vdpa_set_owner,
790         .vhost_set_vring_endian = NULL,
791         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
792         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
793         .vhost_set_features = vhost_vdpa_set_features,
794         .vhost_reset_device = vhost_vdpa_reset_device,
795         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
796         .vhost_get_config  = vhost_vdpa_get_config,
797         .vhost_set_config = vhost_vdpa_set_config,
798         .vhost_requires_shm_log = NULL,
799         .vhost_migration_done = NULL,
800         .vhost_backend_can_merge = NULL,
801         .vhost_net_set_mtu = NULL,
802         .vhost_set_iotlb_callback = NULL,
803         .vhost_send_device_iotlb_msg = NULL,
804         .vhost_dev_start = vhost_vdpa_dev_start,
805         .vhost_get_device_id = vhost_vdpa_get_device_id,
806         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
807         .vhost_force_iommu = vhost_vdpa_force_iommu,
808 };
809