1 /*
2  * Vhost User library
3  *
4  * Copyright (c) 2016 Red Hat, Inc.
5  *
6  * Authors:
7  *  Victor Kaplansky <victork@redhat.com>
8  *  Marc-André Lureau <mlureau@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or
11  * later.  See the COPYING file in the top-level directory.
12  */
13 
14 #ifndef LIBVHOST_USER_H
15 #define LIBVHOST_USER_H
16 
17 #include <stdint.h>
18 #include <stdbool.h>
19 #include <stddef.h>
20 #include <poll.h>
21 #include <linux/vhost.h>
22 #include <pthread.h>
23 #include "standard-headers/linux/virtio_ring.h"
24 
25 /* Based on qemu/hw/virtio/vhost-user.c */
26 #define VHOST_USER_F_PROTOCOL_FEATURES 30
27 #define VHOST_LOG_PAGE 4096
28 
29 #define VIRTQUEUE_MAX_SIZE 1024
30 
31 #define VHOST_MEMORY_BASELINE_NREGIONS 8
32 
33 /*
34  * vhost in the kernel usually supports 509 mem slots. 509 used to be the
35  * KVM limit, it supported 512, but 3 were used for internal purposes. This
36  * limit is sufficient to support many DIMMs and virtio-mem in
37  * "dynamic-memslots" mode.
38  */
39 #define VHOST_USER_MAX_RAM_SLOTS 509
40 
41 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
42 
43 typedef enum VhostSetConfigType {
44     VHOST_SET_CONFIG_TYPE_FRONTEND = 0,
45     VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
46 } VhostSetConfigType;
47 
48 /*
49  * Maximum size of virtio device config space
50  */
51 #define VHOST_USER_MAX_CONFIG_SIZE 256
52 
53 enum VhostUserProtocolFeature {
54     VHOST_USER_PROTOCOL_F_MQ = 0,
55     VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
56     VHOST_USER_PROTOCOL_F_RARP = 2,
57     VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
58     VHOST_USER_PROTOCOL_F_NET_MTU = 4,
59     VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
60     VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
61     VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
62     VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
63     VHOST_USER_PROTOCOL_F_CONFIG = 9,
64     VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10,
65     VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
66     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
67     VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
68     VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
69     /* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */
70     /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
71     VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
72     VHOST_USER_PROTOCOL_F_MAX
73 };
74 
75 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
76 
77 typedef enum VhostUserRequest {
78     VHOST_USER_NONE = 0,
79     VHOST_USER_GET_FEATURES = 1,
80     VHOST_USER_SET_FEATURES = 2,
81     VHOST_USER_SET_OWNER = 3,
82     VHOST_USER_RESET_OWNER = 4,
83     VHOST_USER_SET_MEM_TABLE = 5,
84     VHOST_USER_SET_LOG_BASE = 6,
85     VHOST_USER_SET_LOG_FD = 7,
86     VHOST_USER_SET_VRING_NUM = 8,
87     VHOST_USER_SET_VRING_ADDR = 9,
88     VHOST_USER_SET_VRING_BASE = 10,
89     VHOST_USER_GET_VRING_BASE = 11,
90     VHOST_USER_SET_VRING_KICK = 12,
91     VHOST_USER_SET_VRING_CALL = 13,
92     VHOST_USER_SET_VRING_ERR = 14,
93     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
94     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
95     VHOST_USER_GET_QUEUE_NUM = 17,
96     VHOST_USER_SET_VRING_ENABLE = 18,
97     VHOST_USER_SEND_RARP = 19,
98     VHOST_USER_NET_SET_MTU = 20,
99     VHOST_USER_SET_BACKEND_REQ_FD = 21,
100     VHOST_USER_IOTLB_MSG = 22,
101     VHOST_USER_SET_VRING_ENDIAN = 23,
102     VHOST_USER_GET_CONFIG = 24,
103     VHOST_USER_SET_CONFIG = 25,
104     VHOST_USER_CREATE_CRYPTO_SESSION = 26,
105     VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
106     VHOST_USER_POSTCOPY_ADVISE  = 28,
107     VHOST_USER_POSTCOPY_LISTEN  = 29,
108     VHOST_USER_POSTCOPY_END     = 30,
109     VHOST_USER_GET_INFLIGHT_FD = 31,
110     VHOST_USER_SET_INFLIGHT_FD = 32,
111     VHOST_USER_GPU_SET_SOCKET = 33,
112     VHOST_USER_VRING_KICK = 35,
113     VHOST_USER_GET_MAX_MEM_SLOTS = 36,
114     VHOST_USER_ADD_MEM_REG = 37,
115     VHOST_USER_REM_MEM_REG = 38,
116     VHOST_USER_GET_SHARED_OBJECT = 41,
117     VHOST_USER_MAX
118 } VhostUserRequest;
119 
120 typedef enum VhostUserBackendRequest {
121     VHOST_USER_BACKEND_NONE = 0,
122     VHOST_USER_BACKEND_IOTLB_MSG = 1,
123     VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
124     VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
125     VHOST_USER_BACKEND_VRING_CALL = 4,
126     VHOST_USER_BACKEND_VRING_ERR = 5,
127     VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
128     VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
129     VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
130     VHOST_USER_BACKEND_MAX
131 }  VhostUserBackendRequest;
132 
133 typedef struct VhostUserMemoryRegion {
134     uint64_t guest_phys_addr;
135     uint64_t memory_size;
136     uint64_t userspace_addr;
137     uint64_t mmap_offset;
138 } VhostUserMemoryRegion;
139 
140 #define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion))
141 
142 typedef struct VhostUserMemory {
143     uint32_t nregions;
144     uint32_t padding;
145     VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
146 } VhostUserMemory;
147 
148 typedef struct VhostUserMemRegMsg {
149     uint64_t padding;
150     VhostUserMemoryRegion region;
151 } VhostUserMemRegMsg;
152 
153 typedef struct VhostUserLog {
154     uint64_t mmap_size;
155     uint64_t mmap_offset;
156 } VhostUserLog;
157 
158 typedef struct VhostUserConfig {
159     uint32_t offset;
160     uint32_t size;
161     uint32_t flags;
162     uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
163 } VhostUserConfig;
164 
165 static VhostUserConfig c __attribute__ ((unused));
166 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
167                                    + sizeof(c.size) \
168                                    + sizeof(c.flags))
169 
170 typedef struct VhostUserVringArea {
171     uint64_t u64;
172     uint64_t size;
173     uint64_t offset;
174 } VhostUserVringArea;
175 
176 typedef struct VhostUserInflight {
177     uint64_t mmap_size;
178     uint64_t mmap_offset;
179     uint16_t num_queues;
180     uint16_t queue_size;
181 } VhostUserInflight;
182 
183 #define UUID_LEN 16
184 
185 typedef struct VhostUserShared {
186     unsigned char uuid[UUID_LEN];
187 } VhostUserShared;
188 
189 #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
190 # define VU_PACKED __attribute__((gcc_struct, packed))
191 #else
192 # define VU_PACKED __attribute__((packed))
193 #endif
194 
195 typedef struct VhostUserMsg {
196     int request;
197 
198 #define VHOST_USER_VERSION_MASK     (0x3)
199 #define VHOST_USER_REPLY_MASK       (0x1 << 2)
200 #define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
201     uint32_t flags;
202     uint32_t size; /* the following payload size */
203 
204     union {
205 #define VHOST_USER_VRING_IDX_MASK   (0xff)
206 #define VHOST_USER_VRING_NOFD_MASK  (0x1 << 8)
207         uint64_t u64;
208         struct vhost_vring_state state;
209         struct vhost_vring_addr addr;
210         VhostUserMemory memory;
211         VhostUserMemRegMsg memreg;
212         VhostUserLog log;
213         VhostUserConfig config;
214         VhostUserVringArea area;
215         VhostUserInflight inflight;
216         VhostUserShared object;
217     } payload;
218 
219     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
220     int fd_num;
221     uint8_t *data;
222 } VU_PACKED VhostUserMsg;
223 
224 typedef struct VuDevRegion {
225     /* Guest Physical address. */
226     uint64_t gpa;
227     /* Memory region size. */
228     uint64_t size;
229     /* QEMU virtual address (userspace). */
230     uint64_t qva;
231     /* Starting offset in our mmaped space. */
232     uint64_t mmap_offset;
233     /* Start address of mmaped space. */
234     uint64_t mmap_addr;
235 } VuDevRegion;
236 
237 typedef struct VuDev VuDev;
238 
239 typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
240 typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
241 typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
242                                   int *do_reply);
243 typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
244 typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
245 typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
246 typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
247 typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
248                                  uint32_t offset, uint32_t size,
249                                  uint32_t flags);
250 typedef int (*vu_get_shared_object_cb) (VuDev *dev, const unsigned char *uuid);
251 
252 typedef struct VuDevIface {
253     /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
254     vu_get_features_cb get_features;
255     /* enable vhost implementation features */
256     vu_set_features_cb set_features;
257     /* get the protocol feature bitmask from the underlying vhost
258      * implementation */
259     vu_get_features_cb get_protocol_features;
260     /* enable protocol features in the underlying vhost implementation. */
261     vu_set_features_cb set_protocol_features;
262     /* process_msg is called for each vhost-user message received */
263     /* skip libvhost-user processing if return value != 0 */
264     vu_process_msg_cb process_msg;
265     /* tells when queues can be processed */
266     vu_queue_set_started_cb queue_set_started;
267     /*
268      * If the queue is processed in order, in which case it will be
269      * resumed to vring.used->idx. This can help to support resuming
270      * on unmanaged exit/crash.
271      */
272     vu_queue_is_processed_in_order_cb queue_is_processed_in_order;
273     /* get the config space of the device */
274     vu_get_config_cb get_config;
275     /* set the config space of the device */
276     vu_set_config_cb set_config;
277     /* get virtio shared object from the underlying vhost implementation. */
278     vu_get_shared_object_cb get_shared_object;
279 } VuDevIface;
280 
281 typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
282 
283 typedef struct VuRing {
284     unsigned int num;
285     struct vring_desc *desc;
286     struct vring_avail *avail;
287     struct vring_used *used;
288     uint64_t log_guest_addr;
289     uint32_t flags;
290 } VuRing;
291 
292 typedef struct VuDescStateSplit {
293     /* Indicate whether this descriptor is inflight or not.
294      * Only available for head-descriptor. */
295     uint8_t inflight;
296 
297     /* Padding */
298     uint8_t padding[5];
299 
300     /* Maintain a list for the last batch of used descriptors.
301      * Only available when batching is used for submitting */
302     uint16_t next;
303 
304     /* Used to preserve the order of fetching available descriptors.
305      * Only available for head-descriptor. */
306     uint64_t counter;
307 } VuDescStateSplit;
308 
309 typedef struct VuVirtqInflight {
310     /* The feature flags of this region. Now it's initialized to 0. */
311     uint64_t features;
312 
313     /* The version of this region. It's 1 currently.
314      * Zero value indicates a vm reset happened. */
315     uint16_t version;
316 
317     /*
318      * The size of VuDescStateSplit array. It's equal to the virtqueue size.
319      * Backend could get it from queue size field of VhostUserInflight.
320      */
321     uint16_t desc_num;
322 
323     /* The head of list that track the last batch of used descriptors. */
324     uint16_t last_batch_head;
325 
326     /* Storing the idx value of used ring */
327     uint16_t used_idx;
328 
329     /* Used to track the state of each descriptor in descriptor table */
330     VuDescStateSplit desc[];
331 } VuVirtqInflight;
332 
333 typedef struct VuVirtqInflightDesc {
334     uint16_t index;
335     uint64_t counter;
336 } VuVirtqInflightDesc;
337 
338 typedef struct VuVirtq {
339     VuRing vring;
340 
341     VuVirtqInflight *inflight;
342 
343     VuVirtqInflightDesc *resubmit_list;
344 
345     uint16_t resubmit_num;
346 
347     uint64_t counter;
348 
349     /* Next head to pop */
350     uint16_t last_avail_idx;
351 
352     /* Last avail_idx read from VQ. */
353     uint16_t shadow_avail_idx;
354 
355     uint16_t used_idx;
356 
357     /* Last used index value we have signalled on */
358     uint16_t signalled_used;
359 
360     /* Last used index value we have signalled on */
361     bool signalled_used_valid;
362 
363     /* Notification enabled? */
364     bool notification;
365 
366     unsigned int inuse;
367 
368     vu_queue_handler_cb handler;
369 
370     int call_fd;
371     int kick_fd;
372     int err_fd;
373     unsigned int enable;
374     bool started;
375 
376     /* Guest addresses of our ring */
377     struct vhost_vring_addr vra;
378 } VuVirtq;
379 
380 enum VuWatchCondtion {
381     VU_WATCH_IN = POLLIN,
382     VU_WATCH_OUT = POLLOUT,
383     VU_WATCH_PRI = POLLPRI,
384     VU_WATCH_ERR = POLLERR,
385     VU_WATCH_HUP = POLLHUP,
386 };
387 
388 typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
389 typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
390 typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
391                                  vu_watch_cb cb, void *data);
392 typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
393 
394 typedef struct VuDevInflightInfo {
395     int fd;
396     void *addr;
397     uint64_t size;
398 } VuDevInflightInfo;
399 
400 struct VuDev {
401     int sock;
402     uint32_t nregions;
403     VuDevRegion *regions;
404     VuVirtq *vq;
405     VuDevInflightInfo inflight_info;
406     int log_call_fd;
407     /* Must be held while using backend_fd */
408     pthread_mutex_t backend_mutex;
409     int backend_fd;
410     uint64_t log_size;
411     uint8_t *log_table;
412     uint64_t features;
413     uint64_t protocol_features;
414     bool broken;
415     uint16_t max_queues;
416 
417     /*
418      * @read_msg: custom method to read vhost-user message
419      *
420      * Read data from vhost_user socket fd and fill up
421      * the passed VhostUserMsg *vmsg struct.
422      *
423      * If reading fails, it should close the received set of file
424      * descriptors as socket message's auxiliary data.
425      *
426      * For the details, please refer to vu_message_read in libvhost-user.c
427      * which will be used by default if not custom method is provided when
428      * calling vu_init
429      *
430      * Returns: true if vhost-user message successfully received,
431      *          otherwise return false.
432      *
433      */
434     vu_read_msg_cb read_msg;
435 
436     /*
437      * @set_watch: add or update the given fd to the watch set,
438      * call cb when condition is met.
439      */
440     vu_set_watch_cb set_watch;
441 
442     /* @remove_watch: remove the given fd from the watch set */
443     vu_remove_watch_cb remove_watch;
444 
445     /*
446      * @panic: encountered an unrecoverable error, you may try to re-initialize
447      */
448     vu_panic_cb panic;
449     const VuDevIface *iface;
450 
451     /* Postcopy data */
452     int postcopy_ufd;
453     bool postcopy_listening;
454 };
455 
456 typedef struct VuVirtqElement {
457     unsigned int index;
458     unsigned int out_num;
459     unsigned int in_num;
460     struct iovec *in_sg;
461     struct iovec *out_sg;
462 } VuVirtqElement;
463 
464 /**
465  * vu_init:
466  * @dev: a VuDev context
467  * @max_queues: maximum number of virtqueues
468  * @socket: the socket connected to vhost-user frontend
469  * @panic: a panic callback
470  * @set_watch: a set_watch callback
471  * @remove_watch: a remove_watch callback
472  * @iface: a VuDevIface structure with vhost-user device callbacks
473  *
474  * Initializes a VuDev vhost-user context.
475  *
476  * Returns: true on success, false on failure.
477  **/
478 bool vu_init(VuDev *dev,
479              uint16_t max_queues,
480              int socket,
481              vu_panic_cb panic,
482              vu_read_msg_cb read_msg,
483              vu_set_watch_cb set_watch,
484              vu_remove_watch_cb remove_watch,
485              const VuDevIface *iface);
486 
487 
488 /**
489  * vu_deinit:
490  * @dev: a VuDev context
491  *
492  * Cleans up the VuDev context
493  */
494 void vu_deinit(VuDev *dev);
495 
496 
497 /**
498  * vu_request_to_string: return string for vhost message request
499  * @req: VhostUserMsg request
500  *
501  * Returns a const string, do not free.
502  */
503 const char *vu_request_to_string(unsigned int req);
504 
505 /**
506  * vu_dispatch:
507  * @dev: a VuDev context
508  *
509  * Process one vhost-user message.
510  *
511  * Returns: TRUE on success, FALSE on failure.
512  */
513 bool vu_dispatch(VuDev *dev);
514 
515 /**
516  * vu_gpa_to_va:
517  * @dev: a VuDev context
518  * @plen: guest memory size
519  * @guest_addr: guest address
520  *
521  * Translate a guest address to a pointer. Returns NULL on failure.
522  */
523 void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
524 
525 /**
526  * vu_get_queue:
527  * @dev: a VuDev context
528  * @qidx: queue index
529  *
530  * Returns the queue number @qidx.
531  */
532 VuVirtq *vu_get_queue(VuDev *dev, int qidx);
533 
534 /**
535  * vu_set_queue_handler:
536  * @dev: a VuDev context
537  * @vq: a VuVirtq queue
538  * @handler: the queue handler callback
539  *
540  * Set the queue handler. This function may be called several times
541  * for the same queue. If called with NULL @handler, the handler is
542  * removed.
543  */
544 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
545                           vu_queue_handler_cb handler);
546 
547 /**
548  * vu_set_queue_host_notifier:
549  * @dev: a VuDev context
550  * @vq: a VuVirtq queue
551  * @fd: a file descriptor
552  * @size: host page size
553  * @offset: notifier offset in @fd file
554  *
555  * Set queue's host notifier. This function may be called several
556  * times for the same queue. If called with -1 @fd, the notifier
557  * is removed.
558  */
559 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
560                                 int size, int offset);
561 
562 /**
563  * vu_lookup_shared_object:
564  * @dev: a VuDev context
565  * @uuid: UUID of the shared object
566  * @dmabuf_fd: output dma-buf file descriptor
567  *
568  * Lookup for a virtio shared object (i.e., dma-buf fd) associated with the
569  * received UUID. Result, if found, is stored in the dmabuf_fd argument.
570  *
571  * Returns: whether the virtio object was found.
572  */
573 bool vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
574                              int *dmabuf_fd);
575 
576 /**
577  * vu_add_shared_object:
578  * @dev: a VuDev context
579  * @uuid: UUID of the shared object
580  *
581  * Registers this back-end as the exporter for the object associated with
582  * the received UUID.
583  *
584  * Returns: TRUE on success, FALSE on failure.
585  */
586 bool vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
587 
588 /**
589  * vu_rm_shared_object:
590  * @dev: a VuDev context
591  * @uuid: UUID of the shared object
592  *
593  * Removes a shared object entry (i.e., back-end entry) associated with the
594  * received UUID key from the hash table.
595  *
596  * Returns: TRUE on success, FALSE on failure.
597  */
598 bool vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
599 
600 /**
601  * vu_queue_set_notification:
602  * @dev: a VuDev context
603  * @vq: a VuVirtq queue
604  * @enable: state
605  *
606  * Set whether the queue notifies (via event index or interrupt)
607  */
608 void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
609 
610 /**
611  * vu_queue_enabled:
612  * @dev: a VuDev context
613  * @vq: a VuVirtq queue
614  *
615  * Returns: whether the queue is enabled.
616  */
617 bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
618 
619 /**
620  * vu_queue_started:
621  * @dev: a VuDev context
622  * @vq: a VuVirtq queue
623  *
624  * Returns: whether the queue is started.
625  */
626 bool vu_queue_started(const VuDev *dev, const VuVirtq *vq);
627 
628 /**
629  * vu_queue_empty:
630  * @dev: a VuDev context
631  * @vq: a VuVirtq queue
632  *
633  * Returns: true if the queue is empty or not ready.
634  */
635 bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
636 
637 /**
638  * vu_queue_notify:
639  * @dev: a VuDev context
640  * @vq: a VuVirtq queue
641  *
642  * Request to notify the queue via callfd (skipped if unnecessary)
643  */
644 void vu_queue_notify(VuDev *dev, VuVirtq *vq);
645 
646 void vu_config_change_msg(VuDev *dev);
647 
648 /**
649  * vu_queue_notify_sync:
650  * @dev: a VuDev context
651  * @vq: a VuVirtq queue
652  *
653  * Request to notify the queue via callfd (skipped if unnecessary)
654  * or sync message if possible.
655  */
656 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
657 
658 /**
659  * vu_queue_pop:
660  * @dev: a VuDev context
661  * @vq: a VuVirtq queue
662  * @sz: the size of struct to return (must be >= VuVirtqElement)
663  *
664  * Returns: a VuVirtqElement filled from the queue or NULL. The
665  * returned element must be free()-d by the caller.
666  */
667 void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
668 
669 
670 /**
671  * vu_queue_unpop:
672  * @dev: a VuDev context
673  * @vq: a VuVirtq queue
674  * @elem: The #VuVirtqElement
675  * @len: number of bytes written
676  *
677  * Pretend the most recent element wasn't popped from the virtqueue.  The next
678  * call to vu_queue_pop() will refetch the element.
679  */
680 void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
681                     size_t len);
682 
683 /**
684  * vu_queue_rewind:
685  * @dev: a VuDev context
686  * @vq: a VuVirtq queue
687  * @num: number of elements to push back
688  *
689  * Pretend that elements weren't popped from the virtqueue.  The next
690  * virtqueue_pop() will refetch the oldest element.
691  *
692  * Returns: true on success, false if @num is greater than the number of in use
693  * elements.
694  */
695 bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
696 
697 /**
698  * vu_queue_fill:
699  * @dev: a VuDev context
700  * @vq: a VuVirtq queue
701  * @elem: a VuVirtqElement
702  * @len: length in bytes to write
703  * @idx: optional offset for the used ring index (0 in general)
704  *
705  * Fill the used ring with @elem element.
706  */
707 void vu_queue_fill(VuDev *dev, VuVirtq *vq,
708                    const VuVirtqElement *elem,
709                    unsigned int len, unsigned int idx);
710 
711 /**
712  * vu_queue_push:
713  * @dev: a VuDev context
714  * @vq: a VuVirtq queue
715  * @elem: a VuVirtqElement
716  * @len: length in bytes to write
717  *
718  * Helper that combines vu_queue_fill() with a vu_queue_flush().
719  */
720 void vu_queue_push(VuDev *dev, VuVirtq *vq,
721                    const VuVirtqElement *elem, unsigned int len);
722 
723 /**
724  * vu_queue_flush:
725  * @dev: a VuDev context
726  * @vq: a VuVirtq queue
727  * @num: number of elements to flush
728  *
729  * Mark the last number of elements as done (used.idx is updated by
730  * num elements).
731 */
732 void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
733 
734 /**
735  * vu_queue_get_avail_bytes:
736  * @dev: a VuDev context
737  * @vq: a VuVirtq queue
738  * @in_bytes: in bytes
739  * @out_bytes: out bytes
740  * @max_in_bytes: stop counting after max_in_bytes
741  * @max_out_bytes: stop counting after max_out_bytes
742  *
743  * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
744  */
745 void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
746                               unsigned int *out_bytes,
747                               unsigned max_in_bytes, unsigned max_out_bytes);
748 
749 /**
750  * vu_queue_avail_bytes:
751  * @dev: a VuDev context
752  * @vq: a VuVirtq queue
753  * @in_bytes: expected in bytes
754  * @out_bytes: expected out bytes
755  *
756  * Returns: true if in_bytes <= in_total && out_bytes <= out_total
757  */
758 bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
759                           unsigned int out_bytes);
760 
761 #endif /* LIBVHOST_USER_H */
762