1 /*
2  * Vhost User library
3  *
4  * Copyright (c) 2016 Red Hat, Inc.
5  *
6  * Authors:
7  *  Victor Kaplansky <victork@redhat.com>
8  *  Marc-André Lureau <mlureau@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or
11  * later.  See the COPYING file in the top-level directory.
12  */
13 
14 #ifndef LIBVHOST_USER_H
15 #define LIBVHOST_USER_H
16 
17 #include <stdint.h>
18 #include <stdbool.h>
19 #include <stddef.h>
20 #include <poll.h>
21 #include <linux/vhost.h>
22 #include <pthread.h>
23 #include "standard-headers/linux/virtio_ring.h"
24 
25 /* Based on qemu/hw/virtio/vhost-user.c */
26 #define VHOST_USER_F_PROTOCOL_FEATURES 30
27 #define VHOST_LOG_PAGE 4096
28 
29 #define VIRTQUEUE_MAX_SIZE 1024
30 
31 #define VHOST_MEMORY_BASELINE_NREGIONS 8
32 
33 /*
34  * Set a reasonable maximum number of ram slots, which will be supported by
35  * any architecture.
36  */
37 #define VHOST_USER_MAX_RAM_SLOTS 32
38 
39 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
40 
41 typedef enum VhostSetConfigType {
42     VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
43     VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
44 } VhostSetConfigType;
45 
46 /*
47  * Maximum size of virtio device config space
48  */
49 #define VHOST_USER_MAX_CONFIG_SIZE 256
50 
51 enum VhostUserProtocolFeature {
52     VHOST_USER_PROTOCOL_F_MQ = 0,
53     VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
54     VHOST_USER_PROTOCOL_F_RARP = 2,
55     VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
56     VHOST_USER_PROTOCOL_F_NET_MTU = 4,
57     VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
58     VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
59     VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
60     VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
61     VHOST_USER_PROTOCOL_F_CONFIG = 9,
62     VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
63     VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
64     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
65     VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
66     VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
67     /* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */
68     VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
69     VHOST_USER_PROTOCOL_F_MAX
70 };
71 
72 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
73 
74 typedef enum VhostUserRequest {
75     VHOST_USER_NONE = 0,
76     VHOST_USER_GET_FEATURES = 1,
77     VHOST_USER_SET_FEATURES = 2,
78     VHOST_USER_SET_OWNER = 3,
79     VHOST_USER_RESET_OWNER = 4,
80     VHOST_USER_SET_MEM_TABLE = 5,
81     VHOST_USER_SET_LOG_BASE = 6,
82     VHOST_USER_SET_LOG_FD = 7,
83     VHOST_USER_SET_VRING_NUM = 8,
84     VHOST_USER_SET_VRING_ADDR = 9,
85     VHOST_USER_SET_VRING_BASE = 10,
86     VHOST_USER_GET_VRING_BASE = 11,
87     VHOST_USER_SET_VRING_KICK = 12,
88     VHOST_USER_SET_VRING_CALL = 13,
89     VHOST_USER_SET_VRING_ERR = 14,
90     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
91     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
92     VHOST_USER_GET_QUEUE_NUM = 17,
93     VHOST_USER_SET_VRING_ENABLE = 18,
94     VHOST_USER_SEND_RARP = 19,
95     VHOST_USER_NET_SET_MTU = 20,
96     VHOST_USER_SET_BACKEND_REQ_FD = 21,
97     VHOST_USER_IOTLB_MSG = 22,
98     VHOST_USER_SET_VRING_ENDIAN = 23,
99     VHOST_USER_GET_CONFIG = 24,
100     VHOST_USER_SET_CONFIG = 25,
101     VHOST_USER_CREATE_CRYPTO_SESSION = 26,
102     VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
103     VHOST_USER_POSTCOPY_ADVISE  = 28,
104     VHOST_USER_POSTCOPY_LISTEN  = 29,
105     VHOST_USER_POSTCOPY_END     = 30,
106     VHOST_USER_GET_INFLIGHT_FD = 31,
107     VHOST_USER_SET_INFLIGHT_FD = 32,
108     VHOST_USER_GPU_SET_SOCKET = 33,
109     VHOST_USER_VRING_KICK = 35,
110     VHOST_USER_GET_MAX_MEM_SLOTS = 36,
111     VHOST_USER_ADD_MEM_REG = 37,
112     VHOST_USER_REM_MEM_REG = 38,
113     VHOST_USER_GET_SHARED_OBJECT = 41,
114     VHOST_USER_MAX
115 } VhostUserRequest;
116 
117 typedef enum VhostUserBackendRequest {
118     VHOST_USER_BACKEND_NONE = 0,
119     VHOST_USER_BACKEND_IOTLB_MSG = 1,
120     VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
121     VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
122     VHOST_USER_BACKEND_VRING_CALL = 4,
123     VHOST_USER_BACKEND_VRING_ERR = 5,
124     VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
125     VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
126     VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
127     VHOST_USER_BACKEND_MAX
128 }  VhostUserBackendRequest;
129 
130 typedef struct VhostUserMemoryRegion {
131     uint64_t guest_phys_addr;
132     uint64_t memory_size;
133     uint64_t userspace_addr;
134     uint64_t mmap_offset;
135 } VhostUserMemoryRegion;
136 
137 #define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion))
138 
139 typedef struct VhostUserMemory {
140     uint32_t nregions;
141     uint32_t padding;
142     VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
143 } VhostUserMemory;
144 
145 typedef struct VhostUserMemRegMsg {
146     uint64_t padding;
147     VhostUserMemoryRegion region;
148 } VhostUserMemRegMsg;
149 
150 typedef struct VhostUserLog {
151     uint64_t mmap_size;
152     uint64_t mmap_offset;
153 } VhostUserLog;
154 
155 typedef struct VhostUserConfig {
156     uint32_t offset;
157     uint32_t size;
158     uint32_t flags;
159     uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
160 } VhostUserConfig;
161 
162 static VhostUserConfig c __attribute__ ((unused));
163 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
164                                    + sizeof(c.size) \
165                                    + sizeof(c.flags))
166 
167 typedef struct VhostUserVringArea {
168     uint64_t u64;
169     uint64_t size;
170     uint64_t offset;
171 } VhostUserVringArea;
172 
173 typedef struct VhostUserInflight {
174     uint64_t mmap_size;
175     uint64_t mmap_offset;
176     uint16_t num_queues;
177     uint16_t queue_size;
178 } VhostUserInflight;
179 
180 #define UUID_LEN 16
181 
182 typedef struct VhostUserShared {
183     unsigned char uuid[UUID_LEN];
184 } VhostUserShared;
185 
186 #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
187 # define VU_PACKED __attribute__((gcc_struct, packed))
188 #else
189 # define VU_PACKED __attribute__((packed))
190 #endif
191 
192 typedef struct VhostUserMsg {
193     int request;
194 
195 #define VHOST_USER_VERSION_MASK     (0x3)
196 #define VHOST_USER_REPLY_MASK       (0x1 << 2)
197 #define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
198     uint32_t flags;
199     uint32_t size; /* the following payload size */
200 
201     union {
202 #define VHOST_USER_VRING_IDX_MASK   (0xff)
203 #define VHOST_USER_VRING_NOFD_MASK  (0x1 << 8)
204         uint64_t u64;
205         struct vhost_vring_state state;
206         struct vhost_vring_addr addr;
207         VhostUserMemory memory;
208         VhostUserMemRegMsg memreg;
209         VhostUserLog log;
210         VhostUserConfig config;
211         VhostUserVringArea area;
212         VhostUserInflight inflight;
213         VhostUserShared object;
214     } payload;
215 
216     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
217     int fd_num;
218     uint8_t *data;
219 } VU_PACKED VhostUserMsg;
220 
221 typedef struct VuDevRegion {
222     /* Guest Physical address. */
223     uint64_t gpa;
224     /* Memory region size. */
225     uint64_t size;
226     /* QEMU virtual address (userspace). */
227     uint64_t qva;
228     /* Starting offset in our mmaped space. */
229     uint64_t mmap_offset;
230     /* Start address of mmaped space. */
231     uint64_t mmap_addr;
232 } VuDevRegion;
233 
234 typedef struct VuDev VuDev;
235 
236 typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
237 typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
238 typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
239                                   int *do_reply);
240 typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
241 typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
242 typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
243 typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
244 typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
245                                  uint32_t offset, uint32_t size,
246                                  uint32_t flags);
247 typedef int (*vu_get_shared_object_cb) (VuDev *dev, const unsigned char *uuid);
248 
249 typedef struct VuDevIface {
250     /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
251     vu_get_features_cb get_features;
252     /* enable vhost implementation features */
253     vu_set_features_cb set_features;
254     /* get the protocol feature bitmask from the underlying vhost
255      * implementation */
256     vu_get_features_cb get_protocol_features;
257     /* enable protocol features in the underlying vhost implementation. */
258     vu_set_features_cb set_protocol_features;
259     /* process_msg is called for each vhost-user message received */
260     /* skip libvhost-user processing if return value != 0 */
261     vu_process_msg_cb process_msg;
262     /* tells when queues can be processed */
263     vu_queue_set_started_cb queue_set_started;
264     /*
265      * If the queue is processed in order, in which case it will be
266      * resumed to vring.used->idx. This can help to support resuming
267      * on unmanaged exit/crash.
268      */
269     vu_queue_is_processed_in_order_cb queue_is_processed_in_order;
270     /* get the config space of the device */
271     vu_get_config_cb get_config;
272     /* set the config space of the device */
273     vu_set_config_cb set_config;
274     /* get virtio shared object from the underlying vhost implementation. */
275     vu_get_shared_object_cb get_shared_object;
276 } VuDevIface;
277 
278 typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
279 
280 typedef struct VuRing {
281     unsigned int num;
282     struct vring_desc *desc;
283     struct vring_avail *avail;
284     struct vring_used *used;
285     uint64_t log_guest_addr;
286     uint32_t flags;
287 } VuRing;
288 
289 typedef struct VuDescStateSplit {
290     /* Indicate whether this descriptor is inflight or not.
291      * Only available for head-descriptor. */
292     uint8_t inflight;
293 
294     /* Padding */
295     uint8_t padding[5];
296 
297     /* Maintain a list for the last batch of used descriptors.
298      * Only available when batching is used for submitting */
299     uint16_t next;
300 
301     /* Used to preserve the order of fetching available descriptors.
302      * Only available for head-descriptor. */
303     uint64_t counter;
304 } VuDescStateSplit;
305 
306 typedef struct VuVirtqInflight {
307     /* The feature flags of this region. Now it's initialized to 0. */
308     uint64_t features;
309 
310     /* The version of this region. It's 1 currently.
311      * Zero value indicates a vm reset happened. */
312     uint16_t version;
313 
314     /*
315      * The size of VuDescStateSplit array. It's equal to the virtqueue size.
316      * Backend could get it from queue size field of VhostUserInflight.
317      */
318     uint16_t desc_num;
319 
320     /* The head of list that track the last batch of used descriptors. */
321     uint16_t last_batch_head;
322 
323     /* Storing the idx value of used ring */
324     uint16_t used_idx;
325 
326     /* Used to track the state of each descriptor in descriptor table */
327     VuDescStateSplit desc[];
328 } VuVirtqInflight;
329 
330 typedef struct VuVirtqInflightDesc {
331     uint16_t index;
332     uint64_t counter;
333 } VuVirtqInflightDesc;
334 
335 typedef struct VuVirtq {
336     VuRing vring;
337 
338     VuVirtqInflight *inflight;
339 
340     VuVirtqInflightDesc *resubmit_list;
341 
342     uint16_t resubmit_num;
343 
344     uint64_t counter;
345 
346     /* Next head to pop */
347     uint16_t last_avail_idx;
348 
349     /* Last avail_idx read from VQ. */
350     uint16_t shadow_avail_idx;
351 
352     uint16_t used_idx;
353 
354     /* Last used index value we have signalled on */
355     uint16_t signalled_used;
356 
357     /* Last used index value we have signalled on */
358     bool signalled_used_valid;
359 
360     /* Notification enabled? */
361     bool notification;
362 
363     unsigned int inuse;
364 
365     vu_queue_handler_cb handler;
366 
367     int call_fd;
368     int kick_fd;
369     int err_fd;
370     unsigned int enable;
371     bool started;
372 
373     /* Guest addresses of our ring */
374     struct vhost_vring_addr vra;
375 } VuVirtq;
376 
377 enum VuWatchCondtion {
378     VU_WATCH_IN = POLLIN,
379     VU_WATCH_OUT = POLLOUT,
380     VU_WATCH_PRI = POLLPRI,
381     VU_WATCH_ERR = POLLERR,
382     VU_WATCH_HUP = POLLHUP,
383 };
384 
385 typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
386 typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
387 typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
388                                  vu_watch_cb cb, void *data);
389 typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
390 
391 typedef struct VuDevInflightInfo {
392     int fd;
393     void *addr;
394     uint64_t size;
395 } VuDevInflightInfo;
396 
397 struct VuDev {
398     int sock;
399     uint32_t nregions;
400     VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
401     VuVirtq *vq;
402     VuDevInflightInfo inflight_info;
403     int log_call_fd;
404     /* Must be held while using backend_fd */
405     pthread_mutex_t backend_mutex;
406     int backend_fd;
407     uint64_t log_size;
408     uint8_t *log_table;
409     uint64_t features;
410     uint64_t protocol_features;
411     bool broken;
412     uint16_t max_queues;
413 
414     /*
415      * @read_msg: custom method to read vhost-user message
416      *
417      * Read data from vhost_user socket fd and fill up
418      * the passed VhostUserMsg *vmsg struct.
419      *
420      * If reading fails, it should close the received set of file
421      * descriptors as socket message's auxiliary data.
422      *
423      * For the details, please refer to vu_message_read in libvhost-user.c
424      * which will be used by default if not custom method is provided when
425      * calling vu_init
426      *
427      * Returns: true if vhost-user message successfully received,
428      *          otherwise return false.
429      *
430      */
431     vu_read_msg_cb read_msg;
432 
433     /*
434      * @set_watch: add or update the given fd to the watch set,
435      * call cb when condition is met.
436      */
437     vu_set_watch_cb set_watch;
438 
439     /* @remove_watch: remove the given fd from the watch set */
440     vu_remove_watch_cb remove_watch;
441 
442     /*
443      * @panic: encountered an unrecoverable error, you may try to re-initialize
444      */
445     vu_panic_cb panic;
446     const VuDevIface *iface;
447 
448     /* Postcopy data */
449     int postcopy_ufd;
450     bool postcopy_listening;
451 };
452 
453 typedef struct VuVirtqElement {
454     unsigned int index;
455     unsigned int out_num;
456     unsigned int in_num;
457     struct iovec *in_sg;
458     struct iovec *out_sg;
459 } VuVirtqElement;
460 
461 /**
462  * vu_init:
463  * @dev: a VuDev context
464  * @max_queues: maximum number of virtqueues
465  * @socket: the socket connected to vhost-user frontend
466  * @panic: a panic callback
467  * @set_watch: a set_watch callback
468  * @remove_watch: a remove_watch callback
469  * @iface: a VuDevIface structure with vhost-user device callbacks
470  *
471  * Initializes a VuDev vhost-user context.
472  *
473  * Returns: true on success, false on failure.
474  **/
475 bool vu_init(VuDev *dev,
476              uint16_t max_queues,
477              int socket,
478              vu_panic_cb panic,
479              vu_read_msg_cb read_msg,
480              vu_set_watch_cb set_watch,
481              vu_remove_watch_cb remove_watch,
482              const VuDevIface *iface);
483 
484 
485 /**
486  * vu_deinit:
487  * @dev: a VuDev context
488  *
489  * Cleans up the VuDev context
490  */
491 void vu_deinit(VuDev *dev);
492 
493 
494 /**
495  * vu_request_to_string: return string for vhost message request
496  * @req: VhostUserMsg request
497  *
498  * Returns a const string, do not free.
499  */
500 const char *vu_request_to_string(unsigned int req);
501 
502 /**
503  * vu_dispatch:
504  * @dev: a VuDev context
505  *
506  * Process one vhost-user message.
507  *
508  * Returns: TRUE on success, FALSE on failure.
509  */
510 bool vu_dispatch(VuDev *dev);
511 
512 /**
513  * vu_gpa_to_va:
514  * @dev: a VuDev context
515  * @plen: guest memory size
516  * @guest_addr: guest address
517  *
518  * Translate a guest address to a pointer. Returns NULL on failure.
519  */
520 void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
521 
522 /**
523  * vu_get_queue:
524  * @dev: a VuDev context
525  * @qidx: queue index
526  *
527  * Returns the queue number @qidx.
528  */
529 VuVirtq *vu_get_queue(VuDev *dev, int qidx);
530 
531 /**
532  * vu_set_queue_handler:
533  * @dev: a VuDev context
534  * @vq: a VuVirtq queue
535  * @handler: the queue handler callback
536  *
537  * Set the queue handler. This function may be called several times
538  * for the same queue. If called with NULL @handler, the handler is
539  * removed.
540  */
541 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
542                           vu_queue_handler_cb handler);
543 
544 /**
545  * vu_set_queue_host_notifier:
546  * @dev: a VuDev context
547  * @vq: a VuVirtq queue
548  * @fd: a file descriptor
549  * @size: host page size
550  * @offset: notifier offset in @fd file
551  *
552  * Set queue's host notifier. This function may be called several
553  * times for the same queue. If called with -1 @fd, the notifier
554  * is removed.
555  */
556 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
557                                 int size, int offset);
558 
559 /**
560  * vu_lookup_shared_object:
561  * @dev: a VuDev context
562  * @uuid: UUID of the shared object
563  * @dmabuf_fd: output dma-buf file descriptor
564  *
565  * Lookup for a virtio shared object (i.e., dma-buf fd) associated with the
566  * received UUID. Result, if found, is stored in the dmabuf_fd argument.
567  *
568  * Returns: whether the virtio object was found.
569  */
570 bool vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
571                              int *dmabuf_fd);
572 
573 /**
574  * vu_add_shared_object:
575  * @dev: a VuDev context
576  * @uuid: UUID of the shared object
577  *
578  * Registers this back-end as the exporter for the object associated with
579  * the received UUID.
580  *
581  * Returns: TRUE on success, FALSE on failure.
582  */
583 bool vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
584 
585 /**
586  * vu_rm_shared_object:
587  * @dev: a VuDev context
588  * @uuid: UUID of the shared object
589  *
590  * Removes a shared object entry (i.e., back-end entry) associated with the
591  * received UUID key from the hash table.
592  *
593  * Returns: TRUE on success, FALSE on failure.
594  */
595 bool vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
596 
597 /**
598  * vu_queue_set_notification:
599  * @dev: a VuDev context
600  * @vq: a VuVirtq queue
601  * @enable: state
602  *
603  * Set whether the queue notifies (via event index or interrupt)
604  */
605 void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
606 
607 /**
608  * vu_queue_enabled:
609  * @dev: a VuDev context
610  * @vq: a VuVirtq queue
611  *
612  * Returns: whether the queue is enabled.
613  */
614 bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
615 
616 /**
617  * vu_queue_started:
618  * @dev: a VuDev context
619  * @vq: a VuVirtq queue
620  *
621  * Returns: whether the queue is started.
622  */
623 bool vu_queue_started(const VuDev *dev, const VuVirtq *vq);
624 
625 /**
626  * vu_queue_empty:
627  * @dev: a VuDev context
628  * @vq: a VuVirtq queue
629  *
630  * Returns: true if the queue is empty or not ready.
631  */
632 bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
633 
634 /**
635  * vu_queue_notify:
636  * @dev: a VuDev context
637  * @vq: a VuVirtq queue
638  *
639  * Request to notify the queue via callfd (skipped if unnecessary)
640  */
641 void vu_queue_notify(VuDev *dev, VuVirtq *vq);
642 
643 void vu_config_change_msg(VuDev *dev);
644 
645 /**
646  * vu_queue_notify_sync:
647  * @dev: a VuDev context
648  * @vq: a VuVirtq queue
649  *
650  * Request to notify the queue via callfd (skipped if unnecessary)
651  * or sync message if possible.
652  */
653 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
654 
655 /**
656  * vu_queue_pop:
657  * @dev: a VuDev context
658  * @vq: a VuVirtq queue
659  * @sz: the size of struct to return (must be >= VuVirtqElement)
660  *
661  * Returns: a VuVirtqElement filled from the queue or NULL. The
662  * returned element must be free()-d by the caller.
663  */
664 void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
665 
666 
667 /**
668  * vu_queue_unpop:
669  * @dev: a VuDev context
670  * @vq: a VuVirtq queue
671  * @elem: The #VuVirtqElement
672  * @len: number of bytes written
673  *
674  * Pretend the most recent element wasn't popped from the virtqueue.  The next
675  * call to vu_queue_pop() will refetch the element.
676  */
677 void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
678                     size_t len);
679 
680 /**
681  * vu_queue_rewind:
682  * @dev: a VuDev context
683  * @vq: a VuVirtq queue
684  * @num: number of elements to push back
685  *
686  * Pretend that elements weren't popped from the virtqueue.  The next
687  * virtqueue_pop() will refetch the oldest element.
688  *
689  * Returns: true on success, false if @num is greater than the number of in use
690  * elements.
691  */
692 bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
693 
694 /**
695  * vu_queue_fill:
696  * @dev: a VuDev context
697  * @vq: a VuVirtq queue
698  * @elem: a VuVirtqElement
699  * @len: length in bytes to write
700  * @idx: optional offset for the used ring index (0 in general)
701  *
702  * Fill the used ring with @elem element.
703  */
704 void vu_queue_fill(VuDev *dev, VuVirtq *vq,
705                    const VuVirtqElement *elem,
706                    unsigned int len, unsigned int idx);
707 
708 /**
709  * vu_queue_push:
710  * @dev: a VuDev context
711  * @vq: a VuVirtq queue
712  * @elem: a VuVirtqElement
713  * @len: length in bytes to write
714  *
715  * Helper that combines vu_queue_fill() with a vu_queue_flush().
716  */
717 void vu_queue_push(VuDev *dev, VuVirtq *vq,
718                    const VuVirtqElement *elem, unsigned int len);
719 
720 /**
721  * vu_queue_flush:
722  * @dev: a VuDev context
723  * @vq: a VuVirtq queue
724  * @num: number of elements to flush
725  *
726  * Mark the last number of elements as done (used.idx is updated by
727  * num elements).
728 */
729 void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
730 
731 /**
732  * vu_queue_get_avail_bytes:
733  * @dev: a VuDev context
734  * @vq: a VuVirtq queue
735  * @in_bytes: in bytes
736  * @out_bytes: out bytes
737  * @max_in_bytes: stop counting after max_in_bytes
738  * @max_out_bytes: stop counting after max_out_bytes
739  *
740  * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
741  */
742 void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
743                               unsigned int *out_bytes,
744                               unsigned max_in_bytes, unsigned max_out_bytes);
745 
746 /**
747  * vu_queue_avail_bytes:
748  * @dev: a VuDev context
749  * @vq: a VuVirtq queue
750  * @in_bytes: expected in bytes
751  * @out_bytes: expected out bytes
752  *
753  * Returns: true if in_bytes <= in_total && out_bytes <= out_total
754  */
755 bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
756                           unsigned int out_bytes);
757 
758 #endif /* LIBVHOST_USER_H */
759