xref: /qemu/include/hw/virtio/vhost-vdpa.h (revision d884e272)
1 /*
2  * vhost-vdpa.h
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #ifndef HW_VIRTIO_VHOST_VDPA_H
13 #define HW_VIRTIO_VHOST_VDPA_H
14 
15 #include <gmodule.h>
16 
17 #include "hw/virtio/vhost-iova-tree.h"
18 #include "hw/virtio/vhost-shadow-virtqueue.h"
19 #include "hw/virtio/virtio.h"
20 #include "standard-headers/linux/vhost_types.h"
21 
22 /*
23  * ASID dedicated to map guest's addresses.  If SVQ is disabled it maps GPA to
24  * qemu's IOVA.  If SVQ is enabled it maps also the SVQ vring here
25  */
26 #define VHOST_VDPA_GUEST_PA_ASID 0
27 
28 typedef struct VhostVDPAHostNotifier {
29     MemoryRegion mr;
30     void *addr;
31 } VhostVDPAHostNotifier;
32 
33 typedef enum SVQTransitionState {
34     SVQ_TSTATE_DISABLING = -1,
35     SVQ_TSTATE_DONE,
36     SVQ_TSTATE_ENABLING
37 } SVQTransitionState;
38 
39 /* Info shared by all vhost_vdpa device models */
40 typedef struct vhost_vdpa_shared {
41     int device_fd;
42     MemoryListener listener;
43     struct vhost_vdpa_iova_range iova_range;
44     QLIST_HEAD(, vdpa_iommu) iommu_list;
45 
46     /* IOVA mapping used by the Shadow Virtqueue */
47     VhostIOVATree *iova_tree;
48 
49     /* Copy of backend features */
50     uint64_t backend_cap;
51 
52     bool iotlb_batch_begin_sent;
53 
54     /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
55     bool shadow_data;
56 
57     /* SVQ switching is in progress, or already completed? */
58     SVQTransitionState svq_switching;
59 } VhostVDPAShared;
60 
61 typedef struct vhost_vdpa {
62     int index;
63     uint32_t address_space_id;
64     uint64_t acked_features;
65     bool shadow_vqs_enabled;
66     /* Device suspended successfully */
67     bool suspended;
68     VhostVDPAShared *shared;
69     GPtrArray *shadow_vqs;
70     const VhostShadowVirtqueueOps *shadow_vq_ops;
71     void *shadow_vq_ops_opaque;
72     struct vhost_dev *dev;
73     Error *migration_blocker;
74     VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
75     IOMMUNotifier n;
76 } VhostVDPA;
77 
78 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
79 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
80 
81 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
82                        hwaddr size, void *vaddr, bool readonly);
83 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
84                          hwaddr size);
85 
86 typedef struct vdpa_iommu {
87     VhostVDPAShared *dev_shared;
88     IOMMUMemoryRegion *iommu_mr;
89     hwaddr iommu_offset;
90     IOMMUNotifier n;
91     QLIST_ENTRY(vdpa_iommu) iommu_next;
92 } VDPAIOMMUState;
93 
94 
95 #endif
96