xref: /qemu/include/hw/virtio/vhost.h (revision a976a99a)
1 #ifndef VHOST_H
2 #define VHOST_H
3 
4 #include "hw/virtio/vhost-backend.h"
5 #include "hw/virtio/virtio.h"
6 #include "exec/memory.h"
7 
8 /* Generic structures common for any vhost based device. */
9 
10 struct vhost_inflight {
11     int fd;
12     void *addr;
13     uint64_t size;
14     uint64_t offset;
15     uint16_t queue_size;
16 };
17 
18 struct vhost_virtqueue {
19     int kick;
20     int call;
21     void *desc;
22     void *avail;
23     void *used;
24     int num;
25     unsigned long long desc_phys;
26     unsigned desc_size;
27     unsigned long long avail_phys;
28     unsigned avail_size;
29     unsigned long long used_phys;
30     unsigned used_size;
31     EventNotifier masked_notifier;
32     EventNotifier error_notifier;
33     struct vhost_dev *dev;
34 };
35 
36 typedef unsigned long vhost_log_chunk_t;
37 #define VHOST_LOG_PAGE 0x1000
38 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
39 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
40 #define VHOST_INVALID_FEATURE_BIT   (0xff)
41 
42 struct vhost_log {
43     unsigned long long size;
44     int refcnt;
45     int fd;
46     vhost_log_chunk_t *log;
47 };
48 
49 struct vhost_dev;
50 struct vhost_iommu {
51     struct vhost_dev *hdev;
52     MemoryRegion *mr;
53     hwaddr iommu_offset;
54     IOMMUNotifier n;
55     QLIST_ENTRY(vhost_iommu) iommu_next;
56 };
57 
58 typedef struct VhostDevConfigOps {
59     /* Vhost device config space changed callback
60      */
61     int (*vhost_dev_config_notifier)(struct vhost_dev *dev);
62 } VhostDevConfigOps;
63 
64 struct vhost_memory;
65 
66 /**
67  * struct vhost_dev - common vhost_dev structure
68  * @vhost_ops: backend specific ops
69  * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier)
70  */
71 struct vhost_dev {
72     VirtIODevice *vdev;
73     MemoryListener memory_listener;
74     MemoryListener iommu_listener;
75     struct vhost_memory *mem;
76     int n_mem_sections;
77     MemoryRegionSection *mem_sections;
78     int n_tmp_sections;
79     MemoryRegionSection *tmp_sections;
80     struct vhost_virtqueue *vqs;
81     unsigned int nvqs;
82     /* the first virtqueue which would be used by this vhost dev */
83     int vq_index;
84     /* one past the last vq index for the virtio device (not vhost) */
85     int vq_index_end;
86     /* if non-zero, minimum required value for max_queues */
87     int num_queues;
88     uint64_t features;
89     uint64_t acked_features;
90     uint64_t backend_features;
91     uint64_t protocol_features;
92     uint64_t max_queues;
93     uint64_t backend_cap;
94     bool started;
95     bool log_enabled;
96     uint64_t log_size;
97     Error *migration_blocker;
98     const VhostOps *vhost_ops;
99     void *opaque;
100     struct vhost_log *log;
101     QLIST_ENTRY(vhost_dev) entry;
102     QLIST_HEAD(, vhost_iommu) iommu_list;
103     IOMMUNotifier n;
104     const VhostDevConfigOps *config_ops;
105 };
106 
107 extern const VhostOps kernel_ops;
108 extern const VhostOps user_ops;
109 extern const VhostOps vdpa_ops;
110 
111 struct vhost_net {
112     struct vhost_dev dev;
113     struct vhost_virtqueue vqs[2];
114     int backend;
115     NetClientState *nc;
116 };
117 
118 /**
119  * vhost_dev_init() - initialise the vhost interface
120  * @hdev: the common vhost_dev structure
121  * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa)
122  * @backend_type: type of backend
123  * @busyloop_timeout: timeout for polling virtqueue
124  * @errp: error handle
125  *
126  * The initialisation of the vhost device will trigger the
127  * initialisation of the backend and potentially capability
128  * negotiation of backend interface. Configuration of the VirtIO
129  * itself won't happen until the interface is started.
130  *
131  * Return: 0 on success, non-zero on error while setting errp.
132  */
133 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
134                    VhostBackendType backend_type,
135                    uint32_t busyloop_timeout, Error **errp);
136 
137 /**
138  * vhost_dev_cleanup() - tear down and cleanup vhost interface
139  * @hdev: the common vhost_dev structure
140  */
141 void vhost_dev_cleanup(struct vhost_dev *hdev);
142 
143 /**
144  * vhost_dev_enable_notifiers() - enable event notifiers
145  * @hdev: common vhost_dev structure
146  * @vdev: the VirtIODevice structure
147  *
148  * Enable notifications directly to the vhost device rather than being
149  * triggered by QEMU itself. Notifications should be enabled before
150  * the vhost device is started via @vhost_dev_start.
151  *
152  * Return: 0 on success, < 0 on error.
153  */
154 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
155 
156 /**
157  * vhost_dev_disable_notifiers - disable event notifications
158  * @hdev: common vhost_dev structure
159  * @vdev: the VirtIODevice structure
160  *
161  * Disable direct notifications to vhost device.
162  */
163 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
164 
165 /**
166  * vhost_dev_start() - start the vhost device
167  * @hdev: common vhost_dev structure
168  * @vdev: the VirtIODevice structure
169  *
170  * Starts the vhost device. From this point VirtIO feature negotiation
171  * can start and the device can start processing VirtIO transactions.
172  *
173  * Return: 0 on success, < 0 on error.
174  */
175 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
176 
177 /**
178  * vhost_dev_stop() - stop the vhost device
179  * @hdev: common vhost_dev structure
180  * @vdev: the VirtIODevice structure
181  *
182  * Stop the vhost device. After the device is stopped the notifiers
183  * can be disabled (@vhost_dev_disable_notifiers) and the device can
184  * be torn down (@vhost_dev_cleanup).
185  */
186 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
187 
188 /**
189  * DOC: vhost device configuration handling
190  *
191  * The VirtIO device configuration space is used for rarely changing
192  * or initialisation time parameters. The configuration can be updated
193  * by either the guest driver or the device itself. If the device can
194  * change the configuration over time the vhost handler should
195  * register a @VhostDevConfigOps structure with
196  * @vhost_dev_set_config_notifier so the guest can be notified. Some
197  * devices register a handler anyway and will signal an error if an
198  * unexpected config change happens.
199  */
200 
201 /**
202  * vhost_dev_get_config() - fetch device configuration
203  * @hdev: common vhost_dev_structure
204  * @config: pointer to device appropriate config structure
205  * @config_len: size of device appropriate config structure
206  *
207  * Return: 0 on success, < 0 on error while setting errp
208  */
209 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
210                          uint32_t config_len, Error **errp);
211 
212 /**
213  * vhost_dev_set_config() - set device configuration
214  * @hdev: common vhost_dev_structure
215  * @data: pointer to data to set
216  * @offset: offset into configuration space
217  * @size: length of set
218  * @flags: @VhostSetConfigType flags
219  *
220  * By use of @offset/@size a subset of the configuration space can be
221  * written to. The @flags are used to indicate if it is a normal
222  * transaction or related to migration.
223  *
224  * Return: 0 on success, non-zero on error
225  */
226 int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
227                          uint32_t offset, uint32_t size, uint32_t flags);
228 
229 /**
230  * vhost_dev_set_config_notifier() - register VhostDevConfigOps
231  * @hdev: common vhost_dev_structure
232  * @ops: notifier ops
233  *
234  * If the device is expected to change configuration a notifier can be
235  * setup to handle the case.
236  */
237 void vhost_dev_set_config_notifier(struct vhost_dev *dev,
238                                    const VhostDevConfigOps *ops);
239 
240 
241 /* Test and clear masked event pending status.
242  * Should be called after unmask to avoid losing events.
243  */
244 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
245 
246 /* Mask/unmask events from this vq.
247  */
248 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
249                           bool mask);
250 
251 /**
252  * vhost_get_features() - return a sanitised set of feature bits
253  * @hdev: common vhost_dev structure
254  * @feature_bits: pointer to terminated table of feature bits
255  * @features: original feature set
256  *
257  * This returns a set of features bits that is an intersection of what
258  * is supported by the vhost backend (hdev->features), the supported
259  * feature_bits and the requested feature set.
260  */
261 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
262                             uint64_t features);
263 
264 /**
265  * vhost_ack_features() - set vhost acked_features
266  * @hdev: common vhost_dev structure
267  * @feature_bits: pointer to terminated table of feature bits
268  * @features: requested feature set
269  *
270  * This sets the internal hdev->acked_features to the intersection of
271  * the backends advertised features and the supported feature_bits.
272  */
273 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
274                         uint64_t features);
275 bool vhost_has_free_slot(void);
276 
277 int vhost_net_set_backend(struct vhost_dev *hdev,
278                           struct vhost_vring_file *file);
279 
280 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
281 
282 void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
283 void vhost_dev_free_inflight(struct vhost_inflight *inflight);
284 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
285 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
286 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
287 int vhost_dev_set_inflight(struct vhost_dev *dev,
288                            struct vhost_inflight *inflight);
289 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
290                            struct vhost_inflight *inflight);
291 #endif
292