xref: /qemu/hw/virtio/vhost-vdpa.c (revision 4f7a0a4c)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26 
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
28 {
29     return (!memory_region_is_ram(section->mr) &&
30             !memory_region_is_iommu(section->mr)) ||
31            /* vhost-vDPA doesn't allow MMIO to be mapped  */
32             memory_region_is_ram_device(section->mr) ||
33            /*
34             * Sizing an enabled 64-bit BAR can cause spurious mappings to
35             * addresses in the upper part of the 64-bit address space.  These
36             * are never accessed by the CPU and beyond the address width of
37             * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU width.
38             */
39            section->offset_within_address_space & (1ULL << 63);
40 }
41 
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43                               void *vaddr, bool readonly)
44 {
45     struct vhost_msg_v2 msg = {};
46     int fd = v->device_fd;
47     int ret = 0;
48 
49     msg.type = v->msg_type;
50     msg.iotlb.iova = iova;
51     msg.iotlb.size = size;
52     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54     msg.iotlb.type = VHOST_IOTLB_UPDATE;
55 
56    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
58 
59     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60         error_report("failed to write, fd=%d, errno=%d (%s)",
61             fd, errno, strerror(errno));
62         return -EIO ;
63     }
64 
65     return ret;
66 }
67 
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69                                 hwaddr size)
70 {
71     struct vhost_msg_v2 msg = {};
72     int fd = v->device_fd;
73     int ret = 0;
74 
75     msg.type = v->msg_type;
76     msg.iotlb.iova = iova;
77     msg.iotlb.size = size;
78     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
79 
80     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81                                msg.iotlb.size, msg.iotlb.type);
82 
83     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84         error_report("failed to write, fd=%d, errno=%d (%s)",
85             fd, errno, strerror(errno));
86         return -EIO ;
87     }
88 
89     return ret;
90 }
91 
92 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
93 {
94     int fd = v->device_fd;
95     struct vhost_msg_v2 msg = {
96         .type = v->msg_type,
97         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
98     };
99 
100     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
101         error_report("failed to write, fd=%d, errno=%d (%s)",
102                      fd, errno, strerror(errno));
103     }
104 }
105 
106 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
107 {
108     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
109         !v->iotlb_batch_begin_sent) {
110         vhost_vdpa_listener_begin_batch(v);
111     }
112 
113     v->iotlb_batch_begin_sent = true;
114 }
115 
116 static void vhost_vdpa_listener_commit(MemoryListener *listener)
117 {
118     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
119     struct vhost_dev *dev = v->dev;
120     struct vhost_msg_v2 msg = {};
121     int fd = v->device_fd;
122 
123     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
124         return;
125     }
126 
127     if (!v->iotlb_batch_begin_sent) {
128         return;
129     }
130 
131     msg.type = v->msg_type;
132     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
133 
134     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
135         error_report("failed to write, fd=%d, errno=%d (%s)",
136                      fd, errno, strerror(errno));
137     }
138 
139     v->iotlb_batch_begin_sent = false;
140 }
141 
142 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
143                                            MemoryRegionSection *section)
144 {
145     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
146     hwaddr iova;
147     Int128 llend, llsize;
148     void *vaddr;
149     int ret;
150 
151     if (vhost_vdpa_listener_skipped_section(section)) {
152         return;
153     }
154 
155     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
156                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
157         error_report("%s received unaligned region", __func__);
158         return;
159     }
160 
161     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
162     llend = int128_make64(section->offset_within_address_space);
163     llend = int128_add(llend, section->size);
164     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
165 
166     if (int128_ge(int128_make64(iova), llend)) {
167         return;
168     }
169 
170     memory_region_ref(section->mr);
171 
172     /* Here we assume that memory_region_is_ram(section->mr)==true */
173 
174     vaddr = memory_region_get_ram_ptr(section->mr) +
175             section->offset_within_region +
176             (iova - section->offset_within_address_space);
177 
178     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
179                                          vaddr, section->readonly);
180 
181     llsize = int128_sub(llend, int128_make64(iova));
182 
183     vhost_vdpa_iotlb_batch_begin_once(v);
184     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
185                              vaddr, section->readonly);
186     if (ret) {
187         error_report("vhost vdpa map fail!");
188         goto fail;
189     }
190 
191     return;
192 
193 fail:
194     /*
195      * On the initfn path, store the first error in the container so we
196      * can gracefully fail.  Runtime, there's not much we can do other
197      * than throw a hardware error.
198      */
199     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
200     return;
201 
202 }
203 
204 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
205                                            MemoryRegionSection *section)
206 {
207     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
208     hwaddr iova;
209     Int128 llend, llsize;
210     int ret;
211 
212     if (vhost_vdpa_listener_skipped_section(section)) {
213         return;
214     }
215 
216     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
217                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
218         error_report("%s received unaligned region", __func__);
219         return;
220     }
221 
222     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
223     llend = int128_make64(section->offset_within_address_space);
224     llend = int128_add(llend, section->size);
225     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
226 
227     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
228 
229     if (int128_ge(int128_make64(iova), llend)) {
230         return;
231     }
232 
233     llsize = int128_sub(llend, int128_make64(iova));
234 
235     vhost_vdpa_iotlb_batch_begin_once(v);
236     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
237     if (ret) {
238         error_report("vhost_vdpa dma unmap error!");
239     }
240 
241     memory_region_unref(section->mr);
242 }
243 /*
244  * IOTLB API is used by vhost-vpda which requires incremental updating
245  * of the mapping. So we can not use generic vhost memory listener which
246  * depends on the addnop().
247  */
248 static const MemoryListener vhost_vdpa_memory_listener = {
249     .name = "vhost-vdpa",
250     .commit = vhost_vdpa_listener_commit,
251     .region_add = vhost_vdpa_listener_region_add,
252     .region_del = vhost_vdpa_listener_region_del,
253 };
254 
255 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
256                              void *arg)
257 {
258     struct vhost_vdpa *v = dev->opaque;
259     int fd = v->device_fd;
260     int ret;
261 
262     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
263 
264     ret = ioctl(fd, request, arg);
265     return ret < 0 ? -errno : ret;
266 }
267 
268 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
269 {
270     uint8_t s;
271 
272     trace_vhost_vdpa_add_status(dev, status);
273     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
274         return;
275     }
276 
277     s |= status;
278 
279     vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
280 }
281 
282 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
283 {
284     struct vhost_vdpa *v;
285     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
286     trace_vhost_vdpa_init(dev, opaque);
287 
288     v = opaque;
289     v->dev = dev;
290     dev->opaque =  opaque ;
291     v->listener = vhost_vdpa_memory_listener;
292     v->msg_type = VHOST_IOTLB_MSG_V2;
293 
294     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
295                                VIRTIO_CONFIG_S_DRIVER);
296 
297     return 0;
298 }
299 
300 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
301                                             int queue_index)
302 {
303     size_t page_size = qemu_real_host_page_size;
304     struct vhost_vdpa *v = dev->opaque;
305     VirtIODevice *vdev = dev->vdev;
306     VhostVDPAHostNotifier *n;
307 
308     n = &v->notifier[queue_index];
309 
310     if (n->addr) {
311         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
312         object_unparent(OBJECT(&n->mr));
313         munmap(n->addr, page_size);
314         n->addr = NULL;
315     }
316 }
317 
318 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
319 {
320     int i;
321 
322     for (i = 0; i < n; i++) {
323         vhost_vdpa_host_notifier_uninit(dev, i);
324     }
325 }
326 
327 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
328 {
329     size_t page_size = qemu_real_host_page_size;
330     struct vhost_vdpa *v = dev->opaque;
331     VirtIODevice *vdev = dev->vdev;
332     VhostVDPAHostNotifier *n;
333     int fd = v->device_fd;
334     void *addr;
335     char *name;
336 
337     vhost_vdpa_host_notifier_uninit(dev, queue_index);
338 
339     n = &v->notifier[queue_index];
340 
341     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
342                 queue_index * page_size);
343     if (addr == MAP_FAILED) {
344         goto err;
345     }
346 
347     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
348                            v, queue_index);
349     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
350                                       page_size, addr);
351     g_free(name);
352 
353     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
354         munmap(addr, page_size);
355         goto err;
356     }
357     n->addr = addr;
358 
359     return 0;
360 
361 err:
362     return -1;
363 }
364 
365 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
366 {
367     int i;
368 
369     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
370         if (vhost_vdpa_host_notifier_init(dev, i)) {
371             goto err;
372         }
373     }
374 
375     return;
376 
377 err:
378     vhost_vdpa_host_notifiers_uninit(dev, i);
379     return;
380 }
381 
382 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
383 {
384     struct vhost_vdpa *v;
385     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
386     v = dev->opaque;
387     trace_vhost_vdpa_cleanup(dev, v);
388     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
389     memory_listener_unregister(&v->listener);
390 
391     dev->opaque = NULL;
392     return 0;
393 }
394 
395 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
396 {
397     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
398     return INT_MAX;
399 }
400 
401 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
402                                     struct vhost_memory *mem)
403 {
404     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
405     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
406         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
407         int i;
408         for (i = 0; i < mem->nregions; i++) {
409             trace_vhost_vdpa_dump_regions(dev, i,
410                                           mem->regions[i].guest_phys_addr,
411                                           mem->regions[i].memory_size,
412                                           mem->regions[i].userspace_addr,
413                                           mem->regions[i].flags_padding);
414         }
415     }
416     if (mem->padding) {
417         return -1;
418     }
419 
420     return 0;
421 }
422 
423 static int vhost_vdpa_set_features(struct vhost_dev *dev,
424                                    uint64_t features)
425 {
426     int ret;
427     trace_vhost_vdpa_set_features(dev, features);
428     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
429     uint8_t status = 0;
430     if (ret) {
431         return ret;
432     }
433     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
434     vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
435 
436     return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
437 }
438 
439 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
440 {
441     uint64_t features;
442     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
443         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
444     int r;
445 
446     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
447         return -EFAULT;
448     }
449 
450     features &= f;
451     r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
452     if (r) {
453         return -EFAULT;
454     }
455 
456     dev->backend_cap = features;
457 
458     return 0;
459 }
460 
461 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
462                                     uint32_t *device_id)
463 {
464     int ret;
465     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
466     trace_vhost_vdpa_get_device_id(dev, *device_id);
467     return ret;
468 }
469 
470 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
471 {
472     int ret;
473     uint8_t status = 0;
474 
475     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
476     trace_vhost_vdpa_reset_device(dev, status);
477     return ret;
478 }
479 
480 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
481 {
482     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
483 
484     trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
485     return idx - dev->vq_index;
486 }
487 
488 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
489 {
490     int i;
491     trace_vhost_vdpa_set_vring_ready(dev);
492     for (i = 0; i < dev->nvqs; ++i) {
493         struct vhost_vring_state state = {
494             .index = dev->vq_index + i,
495             .num = 1,
496         };
497         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
498     }
499     return 0;
500 }
501 
502 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
503                                    uint32_t config_len)
504 {
505     int b, len;
506     char line[QEMU_HEXDUMP_LINE_LEN];
507 
508     for (b = 0; b < config_len; b += 16) {
509         len = config_len - b;
510         qemu_hexdump_line(line, b, config, len, false);
511         trace_vhost_vdpa_dump_config(dev, line);
512     }
513 }
514 
515 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
516                                    uint32_t offset, uint32_t size,
517                                    uint32_t flags)
518 {
519     struct vhost_vdpa_config *config;
520     int ret;
521     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
522 
523     trace_vhost_vdpa_set_config(dev, offset, size, flags);
524     config = g_malloc(size + config_size);
525     config->off = offset;
526     config->len = size;
527     memcpy(config->buf, data, size);
528     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
529         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
530         vhost_vdpa_dump_config(dev, data, size);
531     }
532     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
533     g_free(config);
534     return ret;
535 }
536 
537 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
538                                    uint32_t config_len, Error **errp)
539 {
540     struct vhost_vdpa_config *v_config;
541     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
542     int ret;
543 
544     trace_vhost_vdpa_get_config(dev, config, config_len);
545     v_config = g_malloc(config_len + config_size);
546     v_config->len = config_len;
547     v_config->off = 0;
548     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
549     memcpy(config, v_config->buf, config_len);
550     g_free(v_config);
551     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
552         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
553         vhost_vdpa_dump_config(dev, config, config_len);
554     }
555     return ret;
556  }
557 
558 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
559 {
560     struct vhost_vdpa *v = dev->opaque;
561     trace_vhost_vdpa_dev_start(dev, started);
562     if (started) {
563         uint8_t status = 0;
564         memory_listener_register(&v->listener, &address_space_memory);
565         vhost_vdpa_host_notifiers_init(dev);
566         vhost_vdpa_set_vring_ready(dev);
567         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
568         vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
569 
570         return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
571     } else {
572         vhost_vdpa_reset_device(dev);
573         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
574                                    VIRTIO_CONFIG_S_DRIVER);
575         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
576         memory_listener_unregister(&v->listener);
577 
578         return 0;
579     }
580 }
581 
582 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
583                                      struct vhost_log *log)
584 {
585     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
586                                   log->log);
587     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
588 }
589 
590 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
591                                        struct vhost_vring_addr *addr)
592 {
593     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
594                                     addr->desc_user_addr, addr->used_user_addr,
595                                     addr->avail_user_addr,
596                                     addr->log_guest_addr);
597     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
598 }
599 
600 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
601                                       struct vhost_vring_state *ring)
602 {
603     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
604     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
605 }
606 
607 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
608                                        struct vhost_vring_state *ring)
609 {
610     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
611     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
612 }
613 
614 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
615                                        struct vhost_vring_state *ring)
616 {
617     int ret;
618 
619     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
620     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
621     return ret;
622 }
623 
624 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
625                                        struct vhost_vring_file *file)
626 {
627     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
628     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
629 }
630 
631 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
632                                        struct vhost_vring_file *file)
633 {
634     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
635     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
636 }
637 
638 static int vhost_vdpa_get_features(struct vhost_dev *dev,
639                                      uint64_t *features)
640 {
641     int ret;
642 
643     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
644     trace_vhost_vdpa_get_features(dev, *features);
645     return ret;
646 }
647 
648 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
649 {
650     trace_vhost_vdpa_set_owner(dev);
651     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
652 }
653 
654 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
655                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
656 {
657     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
658     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
659     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
660     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
661     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
662                                  addr->avail_user_addr, addr->used_user_addr);
663     return 0;
664 }
665 
666 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
667 {
668     return true;
669 }
670 
671 const VhostOps vdpa_ops = {
672         .backend_type = VHOST_BACKEND_TYPE_VDPA,
673         .vhost_backend_init = vhost_vdpa_init,
674         .vhost_backend_cleanup = vhost_vdpa_cleanup,
675         .vhost_set_log_base = vhost_vdpa_set_log_base,
676         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
677         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
678         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
679         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
680         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
681         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
682         .vhost_get_features = vhost_vdpa_get_features,
683         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
684         .vhost_set_owner = vhost_vdpa_set_owner,
685         .vhost_set_vring_endian = NULL,
686         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
687         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
688         .vhost_set_features = vhost_vdpa_set_features,
689         .vhost_reset_device = vhost_vdpa_reset_device,
690         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
691         .vhost_get_config  = vhost_vdpa_get_config,
692         .vhost_set_config = vhost_vdpa_set_config,
693         .vhost_requires_shm_log = NULL,
694         .vhost_migration_done = NULL,
695         .vhost_backend_can_merge = NULL,
696         .vhost_net_set_mtu = NULL,
697         .vhost_set_iotlb_callback = NULL,
698         .vhost_send_device_iotlb_msg = NULL,
699         .vhost_dev_start = vhost_vdpa_dev_start,
700         .vhost_get_device_id = vhost_vdpa_get_device_id,
701         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
702         .vhost_force_iommu = vhost_vdpa_force_iommu,
703 };
704