xref: /qemu/hw/virtio/vhost-user.c (revision 75ac231c)
1 /*
2  * vhost-user
3  *
4  * Copyright (c) 2013 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "io/channel-socket.h"
20 #include "sysemu/kvm.h"
21 #include "qemu/error-report.h"
22 #include "qemu/main-loop.h"
23 #include "qemu/sockets.h"
24 #include "sysemu/cryptodev.h"
25 #include "migration/migration.h"
26 #include "migration/postcopy-ram.h"
27 #include "trace.h"
28 #include "exec/ramblock.h"
29 
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
32 #include <sys/un.h>
33 
34 #include "standard-headers/linux/vhost_types.h"
35 
36 #ifdef CONFIG_LINUX
37 #include <linux/userfaultfd.h>
38 #endif
39 
40 #define VHOST_MEMORY_BASELINE_NREGIONS    8
41 #define VHOST_USER_F_PROTOCOL_FEATURES 30
42 #define VHOST_USER_SLAVE_MAX_FDS     8
43 
44 /*
45  * Set maximum number of RAM slots supported to
46  * the maximum number supported by the target
47  * hardware plaform.
48  */
49 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
50     defined(TARGET_ARM) || defined(TARGET_ARM_64)
51 #include "hw/acpi/acpi.h"
52 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
53 
54 #elif defined(TARGET_PPC) || defined(TARGET_PPC64)
55 #include "hw/ppc/spapr.h"
56 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
57 
58 #else
59 #define VHOST_USER_MAX_RAM_SLOTS 512
60 #endif
61 
62 /*
63  * Maximum size of virtio device config space
64  */
65 #define VHOST_USER_MAX_CONFIG_SIZE 256
66 
67 enum VhostUserProtocolFeature {
68     VHOST_USER_PROTOCOL_F_MQ = 0,
69     VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
70     VHOST_USER_PROTOCOL_F_RARP = 2,
71     VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
72     VHOST_USER_PROTOCOL_F_NET_MTU = 4,
73     VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
74     VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
75     VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
76     VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
77     VHOST_USER_PROTOCOL_F_CONFIG = 9,
78     VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
79     VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
80     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
81     VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
82     /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
83     VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
84     VHOST_USER_PROTOCOL_F_MAX
85 };
86 
87 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
88 
89 typedef enum VhostUserRequest {
90     VHOST_USER_NONE = 0,
91     VHOST_USER_GET_FEATURES = 1,
92     VHOST_USER_SET_FEATURES = 2,
93     VHOST_USER_SET_OWNER = 3,
94     VHOST_USER_RESET_OWNER = 4,
95     VHOST_USER_SET_MEM_TABLE = 5,
96     VHOST_USER_SET_LOG_BASE = 6,
97     VHOST_USER_SET_LOG_FD = 7,
98     VHOST_USER_SET_VRING_NUM = 8,
99     VHOST_USER_SET_VRING_ADDR = 9,
100     VHOST_USER_SET_VRING_BASE = 10,
101     VHOST_USER_GET_VRING_BASE = 11,
102     VHOST_USER_SET_VRING_KICK = 12,
103     VHOST_USER_SET_VRING_CALL = 13,
104     VHOST_USER_SET_VRING_ERR = 14,
105     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
106     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
107     VHOST_USER_GET_QUEUE_NUM = 17,
108     VHOST_USER_SET_VRING_ENABLE = 18,
109     VHOST_USER_SEND_RARP = 19,
110     VHOST_USER_NET_SET_MTU = 20,
111     VHOST_USER_SET_SLAVE_REQ_FD = 21,
112     VHOST_USER_IOTLB_MSG = 22,
113     VHOST_USER_SET_VRING_ENDIAN = 23,
114     VHOST_USER_GET_CONFIG = 24,
115     VHOST_USER_SET_CONFIG = 25,
116     VHOST_USER_CREATE_CRYPTO_SESSION = 26,
117     VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
118     VHOST_USER_POSTCOPY_ADVISE  = 28,
119     VHOST_USER_POSTCOPY_LISTEN  = 29,
120     VHOST_USER_POSTCOPY_END     = 30,
121     VHOST_USER_GET_INFLIGHT_FD = 31,
122     VHOST_USER_SET_INFLIGHT_FD = 32,
123     VHOST_USER_GPU_SET_SOCKET = 33,
124     VHOST_USER_RESET_DEVICE = 34,
125     /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
126     VHOST_USER_GET_MAX_MEM_SLOTS = 36,
127     VHOST_USER_ADD_MEM_REG = 37,
128     VHOST_USER_REM_MEM_REG = 38,
129     VHOST_USER_MAX
130 } VhostUserRequest;
131 
132 typedef enum VhostUserSlaveRequest {
133     VHOST_USER_SLAVE_NONE = 0,
134     VHOST_USER_SLAVE_IOTLB_MSG = 1,
135     VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
136     VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
137     VHOST_USER_SLAVE_MAX
138 }  VhostUserSlaveRequest;
139 
140 typedef struct VhostUserMemoryRegion {
141     uint64_t guest_phys_addr;
142     uint64_t memory_size;
143     uint64_t userspace_addr;
144     uint64_t mmap_offset;
145 } VhostUserMemoryRegion;
146 
147 typedef struct VhostUserMemory {
148     uint32_t nregions;
149     uint32_t padding;
150     VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
151 } VhostUserMemory;
152 
153 typedef struct VhostUserMemRegMsg {
154     uint64_t padding;
155     VhostUserMemoryRegion region;
156 } VhostUserMemRegMsg;
157 
158 typedef struct VhostUserLog {
159     uint64_t mmap_size;
160     uint64_t mmap_offset;
161 } VhostUserLog;
162 
163 typedef struct VhostUserConfig {
164     uint32_t offset;
165     uint32_t size;
166     uint32_t flags;
167     uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
168 } VhostUserConfig;
169 
170 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN    512
171 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN  64
172 
173 typedef struct VhostUserCryptoSession {
174     /* session id for success, -1 on errors */
175     int64_t session_id;
176     CryptoDevBackendSymSessionInfo session_setup_data;
177     uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
178     uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
179 } VhostUserCryptoSession;
180 
181 static VhostUserConfig c __attribute__ ((unused));
182 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
183                                    + sizeof(c.size) \
184                                    + sizeof(c.flags))
185 
186 typedef struct VhostUserVringArea {
187     uint64_t u64;
188     uint64_t size;
189     uint64_t offset;
190 } VhostUserVringArea;
191 
192 typedef struct VhostUserInflight {
193     uint64_t mmap_size;
194     uint64_t mmap_offset;
195     uint16_t num_queues;
196     uint16_t queue_size;
197 } VhostUserInflight;
198 
199 typedef struct {
200     VhostUserRequest request;
201 
202 #define VHOST_USER_VERSION_MASK     (0x3)
203 #define VHOST_USER_REPLY_MASK       (0x1 << 2)
204 #define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
205     uint32_t flags;
206     uint32_t size; /* the following payload size */
207 } QEMU_PACKED VhostUserHeader;
208 
209 typedef union {
210 #define VHOST_USER_VRING_IDX_MASK   (0xff)
211 #define VHOST_USER_VRING_NOFD_MASK  (0x1 << 8)
212         uint64_t u64;
213         struct vhost_vring_state state;
214         struct vhost_vring_addr addr;
215         VhostUserMemory memory;
216         VhostUserMemRegMsg mem_reg;
217         VhostUserLog log;
218         struct vhost_iotlb_msg iotlb;
219         VhostUserConfig config;
220         VhostUserCryptoSession session;
221         VhostUserVringArea area;
222         VhostUserInflight inflight;
223 } VhostUserPayload;
224 
225 typedef struct VhostUserMsg {
226     VhostUserHeader hdr;
227     VhostUserPayload payload;
228 } QEMU_PACKED VhostUserMsg;
229 
230 static VhostUserMsg m __attribute__ ((unused));
231 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
232 
233 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
234 
235 /* The version of the protocol we support */
236 #define VHOST_USER_VERSION    (0x1)
237 
238 struct vhost_user {
239     struct vhost_dev *dev;
240     /* Shared between vhost devs of the same virtio device */
241     VhostUserState *user;
242     QIOChannel *slave_ioc;
243     GSource *slave_src;
244     NotifierWithReturn postcopy_notifier;
245     struct PostCopyFD  postcopy_fd;
246     uint64_t           postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
247     /* Length of the region_rb and region_rb_offset arrays */
248     size_t             region_rb_len;
249     /* RAMBlock associated with a given region */
250     RAMBlock         **region_rb;
251     /*
252      * The offset from the start of the RAMBlock to the start of the
253      * vhost region.
254      */
255     ram_addr_t        *region_rb_offset;
256 
257     /* True once we've entered postcopy_listen */
258     bool               postcopy_listen;
259 
260     /* Our current regions */
261     int num_shadow_regions;
262     struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
263 };
264 
265 struct scrub_regions {
266     struct vhost_memory_region *region;
267     int reg_idx;
268     int fd_idx;
269 };
270 
271 static bool ioeventfd_enabled(void)
272 {
273     return !kvm_enabled() || kvm_eventfds_enabled();
274 }
275 
276 static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
277 {
278     struct vhost_user *u = dev->opaque;
279     CharBackend *chr = u->user->chr;
280     uint8_t *p = (uint8_t *) msg;
281     int r, size = VHOST_USER_HDR_SIZE;
282 
283     r = qemu_chr_fe_read_all(chr, p, size);
284     if (r != size) {
285         int saved_errno = errno;
286         error_report("Failed to read msg header. Read %d instead of %d."
287                      " Original request %d.", r, size, msg->hdr.request);
288         return r < 0 ? -saved_errno : -EIO;
289     }
290 
291     /* validate received flags */
292     if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
293         error_report("Failed to read msg header."
294                 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
295                 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
296         return -EPROTO;
297     }
298 
299     trace_vhost_user_read(msg->hdr.request, msg->hdr.flags);
300 
301     return 0;
302 }
303 
304 struct vhost_user_read_cb_data {
305     struct vhost_dev *dev;
306     VhostUserMsg *msg;
307     GMainLoop *loop;
308     int ret;
309 };
310 
311 static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
312                                    gpointer opaque)
313 {
314     struct vhost_user_read_cb_data *data = opaque;
315     struct vhost_dev *dev = data->dev;
316     VhostUserMsg *msg = data->msg;
317     struct vhost_user *u = dev->opaque;
318     CharBackend *chr = u->user->chr;
319     uint8_t *p = (uint8_t *) msg;
320     int r, size;
321 
322     r = vhost_user_read_header(dev, msg);
323     if (r < 0) {
324         data->ret = r;
325         goto end;
326     }
327 
328     /* validate message size is sane */
329     if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
330         error_report("Failed to read msg header."
331                 " Size %d exceeds the maximum %zu.", msg->hdr.size,
332                 VHOST_USER_PAYLOAD_SIZE);
333         data->ret = -EPROTO;
334         goto end;
335     }
336 
337     if (msg->hdr.size) {
338         p += VHOST_USER_HDR_SIZE;
339         size = msg->hdr.size;
340         r = qemu_chr_fe_read_all(chr, p, size);
341         if (r != size) {
342             int saved_errno = errno;
343             error_report("Failed to read msg payload."
344                          " Read %d instead of %d.", r, msg->hdr.size);
345             data->ret = r < 0 ? -saved_errno : -EIO;
346             goto end;
347         }
348     }
349 
350 end:
351     g_main_loop_quit(data->loop);
352     return G_SOURCE_REMOVE;
353 }
354 
355 static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
356                            gpointer opaque);
357 
358 /*
359  * This updates the read handler to use a new event loop context.
360  * Event sources are removed from the previous context : this ensures
361  * that events detected in the previous context are purged. They will
362  * be re-detected and processed in the new context.
363  */
364 static void slave_update_read_handler(struct vhost_dev *dev,
365                                       GMainContext *ctxt)
366 {
367     struct vhost_user *u = dev->opaque;
368 
369     if (!u->slave_ioc) {
370         return;
371     }
372 
373     if (u->slave_src) {
374         g_source_destroy(u->slave_src);
375         g_source_unref(u->slave_src);
376     }
377 
378     u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
379                                                 G_IO_IN | G_IO_HUP,
380                                                 slave_read, dev, NULL,
381                                                 ctxt);
382 }
383 
384 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
385 {
386     struct vhost_user *u = dev->opaque;
387     CharBackend *chr = u->user->chr;
388     GMainContext *prev_ctxt = chr->chr->gcontext;
389     GMainContext *ctxt = g_main_context_new();
390     GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
391     struct vhost_user_read_cb_data data = {
392         .dev = dev,
393         .loop = loop,
394         .msg = msg,
395         .ret = 0
396     };
397 
398     /*
399      * We want to be able to monitor the slave channel fd while waiting
400      * for chr I/O. This requires an event loop, but we can't nest the
401      * one to which chr is currently attached : its fd handlers might not
402      * be prepared for re-entrancy. So we create a new one and switch chr
403      * to use it.
404      */
405     slave_update_read_handler(dev, ctxt);
406     qemu_chr_be_update_read_handlers(chr->chr, ctxt);
407     qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
408 
409     g_main_loop_run(loop);
410 
411     /*
412      * Restore the previous event loop context. This also destroys/recreates
413      * event sources : this guarantees that all pending events in the original
414      * context that have been processed by the nested loop are purged.
415      */
416     qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
417     slave_update_read_handler(dev, NULL);
418 
419     g_main_loop_unref(loop);
420     g_main_context_unref(ctxt);
421 
422     return data.ret;
423 }
424 
425 static int process_message_reply(struct vhost_dev *dev,
426                                  const VhostUserMsg *msg)
427 {
428     int ret;
429     VhostUserMsg msg_reply;
430 
431     if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
432         return 0;
433     }
434 
435     ret = vhost_user_read(dev, &msg_reply);
436     if (ret < 0) {
437         return ret;
438     }
439 
440     if (msg_reply.hdr.request != msg->hdr.request) {
441         error_report("Received unexpected msg type. "
442                      "Expected %d received %d",
443                      msg->hdr.request, msg_reply.hdr.request);
444         return -EPROTO;
445     }
446 
447     return msg_reply.payload.u64 ? -EIO : 0;
448 }
449 
450 static bool vhost_user_one_time_request(VhostUserRequest request)
451 {
452     switch (request) {
453     case VHOST_USER_SET_OWNER:
454     case VHOST_USER_RESET_OWNER:
455     case VHOST_USER_SET_MEM_TABLE:
456     case VHOST_USER_GET_QUEUE_NUM:
457     case VHOST_USER_NET_SET_MTU:
458         return true;
459     default:
460         return false;
461     }
462 }
463 
464 /* most non-init callers ignore the error */
465 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
466                             int *fds, int fd_num)
467 {
468     struct vhost_user *u = dev->opaque;
469     CharBackend *chr = u->user->chr;
470     int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
471 
472     /*
473      * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
474      * we just need send it once in the first time. For later such
475      * request, we just ignore it.
476      */
477     if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
478         msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
479         return 0;
480     }
481 
482     if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
483         error_report("Failed to set msg fds.");
484         return -EINVAL;
485     }
486 
487     ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
488     if (ret != size) {
489         int saved_errno = errno;
490         error_report("Failed to write msg."
491                      " Wrote %d instead of %d.", ret, size);
492         return ret < 0 ? -saved_errno : -EIO;
493     }
494 
495     trace_vhost_user_write(msg->hdr.request, msg->hdr.flags);
496 
497     return 0;
498 }
499 
500 int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
501 {
502     VhostUserMsg msg = {
503         .hdr.request = VHOST_USER_GPU_SET_SOCKET,
504         .hdr.flags = VHOST_USER_VERSION,
505     };
506 
507     return vhost_user_write(dev, &msg, &fd, 1);
508 }
509 
510 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
511                                    struct vhost_log *log)
512 {
513     int fds[VHOST_USER_MAX_RAM_SLOTS];
514     size_t fd_num = 0;
515     bool shmfd = virtio_has_feature(dev->protocol_features,
516                                     VHOST_USER_PROTOCOL_F_LOG_SHMFD);
517     int ret;
518     VhostUserMsg msg = {
519         .hdr.request = VHOST_USER_SET_LOG_BASE,
520         .hdr.flags = VHOST_USER_VERSION,
521         .payload.log.mmap_size = log->size * sizeof(*(log->log)),
522         .payload.log.mmap_offset = 0,
523         .hdr.size = sizeof(msg.payload.log),
524     };
525 
526     if (shmfd && log->fd != -1) {
527         fds[fd_num++] = log->fd;
528     }
529 
530     ret = vhost_user_write(dev, &msg, fds, fd_num);
531     if (ret < 0) {
532         return ret;
533     }
534 
535     if (shmfd) {
536         msg.hdr.size = 0;
537         ret = vhost_user_read(dev, &msg);
538         if (ret < 0) {
539             return ret;
540         }
541 
542         if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
543             error_report("Received unexpected msg type. "
544                          "Expected %d received %d",
545                          VHOST_USER_SET_LOG_BASE, msg.hdr.request);
546             return -EPROTO;
547         }
548     }
549 
550     return 0;
551 }
552 
553 static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
554                                             int *fd)
555 {
556     MemoryRegion *mr;
557 
558     assert((uintptr_t)addr == addr);
559     mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
560     *fd = memory_region_get_fd(mr);
561 
562     return mr;
563 }
564 
565 static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
566                                        struct vhost_memory_region *src,
567                                        uint64_t mmap_offset)
568 {
569     assert(src != NULL && dst != NULL);
570     dst->userspace_addr = src->userspace_addr;
571     dst->memory_size = src->memory_size;
572     dst->guest_phys_addr = src->guest_phys_addr;
573     dst->mmap_offset = mmap_offset;
574 }
575 
576 static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
577                                              struct vhost_dev *dev,
578                                              VhostUserMsg *msg,
579                                              int *fds, size_t *fd_num,
580                                              bool track_ramblocks)
581 {
582     int i, fd;
583     ram_addr_t offset;
584     MemoryRegion *mr;
585     struct vhost_memory_region *reg;
586     VhostUserMemoryRegion region_buffer;
587 
588     msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
589 
590     for (i = 0; i < dev->mem->nregions; ++i) {
591         reg = dev->mem->regions + i;
592 
593         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
594         if (fd > 0) {
595             if (track_ramblocks) {
596                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
597                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
598                                                       reg->memory_size,
599                                                       reg->guest_phys_addr,
600                                                       reg->userspace_addr,
601                                                       offset);
602                 u->region_rb_offset[i] = offset;
603                 u->region_rb[i] = mr->ram_block;
604             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
605                 error_report("Failed preparing vhost-user memory table msg");
606                 return -ENOBUFS;
607             }
608             vhost_user_fill_msg_region(&region_buffer, reg, offset);
609             msg->payload.memory.regions[*fd_num] = region_buffer;
610             fds[(*fd_num)++] = fd;
611         } else if (track_ramblocks) {
612             u->region_rb_offset[i] = 0;
613             u->region_rb[i] = NULL;
614         }
615     }
616 
617     msg->payload.memory.nregions = *fd_num;
618 
619     if (!*fd_num) {
620         error_report("Failed initializing vhost-user memory map, "
621                      "consider using -object memory-backend-file share=on");
622         return -EINVAL;
623     }
624 
625     msg->hdr.size = sizeof(msg->payload.memory.nregions);
626     msg->hdr.size += sizeof(msg->payload.memory.padding);
627     msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
628 
629     return 0;
630 }
631 
632 static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
633                              struct vhost_memory_region *vdev_reg)
634 {
635     return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
636         shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
637         shadow_reg->memory_size == vdev_reg->memory_size;
638 }
639 
640 static void scrub_shadow_regions(struct vhost_dev *dev,
641                                  struct scrub_regions *add_reg,
642                                  int *nr_add_reg,
643                                  struct scrub_regions *rem_reg,
644                                  int *nr_rem_reg, uint64_t *shadow_pcb,
645                                  bool track_ramblocks)
646 {
647     struct vhost_user *u = dev->opaque;
648     bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
649     struct vhost_memory_region *reg, *shadow_reg;
650     int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
651     ram_addr_t offset;
652     MemoryRegion *mr;
653     bool matching;
654 
655     /*
656      * Find memory regions present in our shadow state which are not in
657      * the device's current memory state.
658      *
659      * Mark regions in both the shadow and device state as "found".
660      */
661     for (i = 0; i < u->num_shadow_regions; i++) {
662         shadow_reg = &u->shadow_regions[i];
663         matching = false;
664 
665         for (j = 0; j < dev->mem->nregions; j++) {
666             reg = &dev->mem->regions[j];
667 
668             mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
669 
670             if (reg_equal(shadow_reg, reg)) {
671                 matching = true;
672                 found[j] = true;
673                 if (track_ramblocks) {
674                     /*
675                      * Reset postcopy client bases, region_rb, and
676                      * region_rb_offset in case regions are removed.
677                      */
678                     if (fd > 0) {
679                         u->region_rb_offset[j] = offset;
680                         u->region_rb[j] = mr->ram_block;
681                         shadow_pcb[j] = u->postcopy_client_bases[i];
682                     } else {
683                         u->region_rb_offset[j] = 0;
684                         u->region_rb[j] = NULL;
685                     }
686                 }
687                 break;
688             }
689         }
690 
691         /*
692          * If the region was not found in the current device memory state
693          * create an entry for it in the removed list.
694          */
695         if (!matching) {
696             rem_reg[rm_idx].region = shadow_reg;
697             rem_reg[rm_idx++].reg_idx = i;
698         }
699     }
700 
701     /*
702      * For regions not marked "found", create entries in the added list.
703      *
704      * Note their indexes in the device memory state and the indexes of their
705      * file descriptors.
706      */
707     for (i = 0; i < dev->mem->nregions; i++) {
708         reg = &dev->mem->regions[i];
709         vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
710         if (fd > 0) {
711             ++fd_num;
712         }
713 
714         /*
715          * If the region was in both the shadow and device state we don't
716          * need to send a VHOST_USER_ADD_MEM_REG message for it.
717          */
718         if (found[i]) {
719             continue;
720         }
721 
722         add_reg[add_idx].region = reg;
723         add_reg[add_idx].reg_idx = i;
724         add_reg[add_idx++].fd_idx = fd_num;
725     }
726     *nr_rem_reg = rm_idx;
727     *nr_add_reg = add_idx;
728 
729     return;
730 }
731 
732 static int send_remove_regions(struct vhost_dev *dev,
733                                struct scrub_regions *remove_reg,
734                                int nr_rem_reg, VhostUserMsg *msg,
735                                bool reply_supported)
736 {
737     struct vhost_user *u = dev->opaque;
738     struct vhost_memory_region *shadow_reg;
739     int i, fd, shadow_reg_idx, ret;
740     ram_addr_t offset;
741     VhostUserMemoryRegion region_buffer;
742 
743     /*
744      * The regions in remove_reg appear in the same order they do in the
745      * shadow table. Therefore we can minimize memory copies by iterating
746      * through remove_reg backwards.
747      */
748     for (i = nr_rem_reg - 1; i >= 0; i--) {
749         shadow_reg = remove_reg[i].region;
750         shadow_reg_idx = remove_reg[i].reg_idx;
751 
752         vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
753 
754         if (fd > 0) {
755             msg->hdr.request = VHOST_USER_REM_MEM_REG;
756             vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
757             msg->payload.mem_reg.region = region_buffer;
758 
759             ret = vhost_user_write(dev, msg, NULL, 0);
760             if (ret < 0) {
761                 return ret;
762             }
763 
764             if (reply_supported) {
765                 ret = process_message_reply(dev, msg);
766                 if (ret) {
767                     return ret;
768                 }
769             }
770         }
771 
772         /*
773          * At this point we know the backend has unmapped the region. It is now
774          * safe to remove it from the shadow table.
775          */
776         memmove(&u->shadow_regions[shadow_reg_idx],
777                 &u->shadow_regions[shadow_reg_idx + 1],
778                 sizeof(struct vhost_memory_region) *
779                 (u->num_shadow_regions - shadow_reg_idx - 1));
780         u->num_shadow_regions--;
781     }
782 
783     return 0;
784 }
785 
786 static int send_add_regions(struct vhost_dev *dev,
787                             struct scrub_regions *add_reg, int nr_add_reg,
788                             VhostUserMsg *msg, uint64_t *shadow_pcb,
789                             bool reply_supported, bool track_ramblocks)
790 {
791     struct vhost_user *u = dev->opaque;
792     int i, fd, ret, reg_idx, reg_fd_idx;
793     struct vhost_memory_region *reg;
794     MemoryRegion *mr;
795     ram_addr_t offset;
796     VhostUserMsg msg_reply;
797     VhostUserMemoryRegion region_buffer;
798 
799     for (i = 0; i < nr_add_reg; i++) {
800         reg = add_reg[i].region;
801         reg_idx = add_reg[i].reg_idx;
802         reg_fd_idx = add_reg[i].fd_idx;
803 
804         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
805 
806         if (fd > 0) {
807             if (track_ramblocks) {
808                 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
809                                                       reg->memory_size,
810                                                       reg->guest_phys_addr,
811                                                       reg->userspace_addr,
812                                                       offset);
813                 u->region_rb_offset[reg_idx] = offset;
814                 u->region_rb[reg_idx] = mr->ram_block;
815             }
816             msg->hdr.request = VHOST_USER_ADD_MEM_REG;
817             vhost_user_fill_msg_region(&region_buffer, reg, offset);
818             msg->payload.mem_reg.region = region_buffer;
819 
820             ret = vhost_user_write(dev, msg, &fd, 1);
821             if (ret < 0) {
822                 return ret;
823             }
824 
825             if (track_ramblocks) {
826                 uint64_t reply_gpa;
827 
828                 ret = vhost_user_read(dev, &msg_reply);
829                 if (ret < 0) {
830                     return ret;
831                 }
832 
833                 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
834 
835                 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
836                     error_report("%s: Received unexpected msg type."
837                                  "Expected %d received %d", __func__,
838                                  VHOST_USER_ADD_MEM_REG,
839                                  msg_reply.hdr.request);
840                     return -EPROTO;
841                 }
842 
843                 /*
844                  * We're using the same structure, just reusing one of the
845                  * fields, so it should be the same size.
846                  */
847                 if (msg_reply.hdr.size != msg->hdr.size) {
848                     error_report("%s: Unexpected size for postcopy reply "
849                                  "%d vs %d", __func__, msg_reply.hdr.size,
850                                  msg->hdr.size);
851                     return -EPROTO;
852                 }
853 
854                 /* Get the postcopy client base from the backend's reply. */
855                 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
856                     shadow_pcb[reg_idx] =
857                         msg_reply.payload.mem_reg.region.userspace_addr;
858                     trace_vhost_user_set_mem_table_postcopy(
859                         msg_reply.payload.mem_reg.region.userspace_addr,
860                         msg->payload.mem_reg.region.userspace_addr,
861                         reg_fd_idx, reg_idx);
862                 } else {
863                     error_report("%s: invalid postcopy reply for region. "
864                                  "Got guest physical address %" PRIX64 ", expected "
865                                  "%" PRIX64, __func__, reply_gpa,
866                                  dev->mem->regions[reg_idx].guest_phys_addr);
867                     return -EPROTO;
868                 }
869             } else if (reply_supported) {
870                 ret = process_message_reply(dev, msg);
871                 if (ret) {
872                     return ret;
873                 }
874             }
875         } else if (track_ramblocks) {
876             u->region_rb_offset[reg_idx] = 0;
877             u->region_rb[reg_idx] = NULL;
878         }
879 
880         /*
881          * At this point, we know the backend has mapped in the new
882          * region, if the region has a valid file descriptor.
883          *
884          * The region should now be added to the shadow table.
885          */
886         u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
887             reg->guest_phys_addr;
888         u->shadow_regions[u->num_shadow_regions].userspace_addr =
889             reg->userspace_addr;
890         u->shadow_regions[u->num_shadow_regions].memory_size =
891             reg->memory_size;
892         u->num_shadow_regions++;
893     }
894 
895     return 0;
896 }
897 
898 static int vhost_user_add_remove_regions(struct vhost_dev *dev,
899                                          VhostUserMsg *msg,
900                                          bool reply_supported,
901                                          bool track_ramblocks)
902 {
903     struct vhost_user *u = dev->opaque;
904     struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
905     struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
906     uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
907     int nr_add_reg, nr_rem_reg;
908     int ret;
909 
910     msg->hdr.size = sizeof(msg->payload.mem_reg);
911 
912     /* Find the regions which need to be removed or added. */
913     scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
914                          shadow_pcb, track_ramblocks);
915 
916     if (nr_rem_reg) {
917         ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
918                                   reply_supported);
919         if (ret < 0) {
920             goto err;
921         }
922     }
923 
924     if (nr_add_reg) {
925         ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
926                                reply_supported, track_ramblocks);
927         if (ret < 0) {
928             goto err;
929         }
930     }
931 
932     if (track_ramblocks) {
933         memcpy(u->postcopy_client_bases, shadow_pcb,
934                sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
935         /*
936          * Now we've registered this with the postcopy code, we ack to the
937          * client, because now we're in the position to be able to deal with
938          * any faults it generates.
939          */
940         /* TODO: Use this for failure cases as well with a bad value. */
941         msg->hdr.size = sizeof(msg->payload.u64);
942         msg->payload.u64 = 0; /* OK */
943 
944         ret = vhost_user_write(dev, msg, NULL, 0);
945         if (ret < 0) {
946             return ret;
947         }
948     }
949 
950     return 0;
951 
952 err:
953     if (track_ramblocks) {
954         memcpy(u->postcopy_client_bases, shadow_pcb,
955                sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
956     }
957 
958     return ret;
959 }
960 
961 static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
962                                              struct vhost_memory *mem,
963                                              bool reply_supported,
964                                              bool config_mem_slots)
965 {
966     struct vhost_user *u = dev->opaque;
967     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
968     size_t fd_num = 0;
969     VhostUserMsg msg_reply;
970     int region_i, msg_i;
971     int ret;
972 
973     VhostUserMsg msg = {
974         .hdr.flags = VHOST_USER_VERSION,
975     };
976 
977     if (u->region_rb_len < dev->mem->nregions) {
978         u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
979         u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
980                                       dev->mem->nregions);
981         memset(&(u->region_rb[u->region_rb_len]), '\0',
982                sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
983         memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
984                sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
985         u->region_rb_len = dev->mem->nregions;
986     }
987 
988     if (config_mem_slots) {
989         ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
990         if (ret < 0) {
991             return ret;
992         }
993     } else {
994         ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
995                                                 true);
996         if (ret < 0) {
997             return ret;
998         }
999 
1000         ret = vhost_user_write(dev, &msg, fds, fd_num);
1001         if (ret < 0) {
1002             return ret;
1003         }
1004 
1005         ret = vhost_user_read(dev, &msg_reply);
1006         if (ret < 0) {
1007             return ret;
1008         }
1009 
1010         if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
1011             error_report("%s: Received unexpected msg type."
1012                          "Expected %d received %d", __func__,
1013                          VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
1014             return -EPROTO;
1015         }
1016 
1017         /*
1018          * We're using the same structure, just reusing one of the
1019          * fields, so it should be the same size.
1020          */
1021         if (msg_reply.hdr.size != msg.hdr.size) {
1022             error_report("%s: Unexpected size for postcopy reply "
1023                          "%d vs %d", __func__, msg_reply.hdr.size,
1024                          msg.hdr.size);
1025             return -EPROTO;
1026         }
1027 
1028         memset(u->postcopy_client_bases, 0,
1029                sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
1030 
1031         /*
1032          * They're in the same order as the regions that were sent
1033          * but some of the regions were skipped (above) if they
1034          * didn't have fd's
1035          */
1036         for (msg_i = 0, region_i = 0;
1037              region_i < dev->mem->nregions;
1038              region_i++) {
1039             if (msg_i < fd_num &&
1040                 msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
1041                 dev->mem->regions[region_i].guest_phys_addr) {
1042                 u->postcopy_client_bases[region_i] =
1043                     msg_reply.payload.memory.regions[msg_i].userspace_addr;
1044                 trace_vhost_user_set_mem_table_postcopy(
1045                     msg_reply.payload.memory.regions[msg_i].userspace_addr,
1046                     msg.payload.memory.regions[msg_i].userspace_addr,
1047                     msg_i, region_i);
1048                 msg_i++;
1049             }
1050         }
1051         if (msg_i != fd_num) {
1052             error_report("%s: postcopy reply not fully consumed "
1053                          "%d vs %zd",
1054                          __func__, msg_i, fd_num);
1055             return -EIO;
1056         }
1057 
1058         /*
1059          * Now we've registered this with the postcopy code, we ack to the
1060          * client, because now we're in the position to be able to deal
1061          * with any faults it generates.
1062          */
1063         /* TODO: Use this for failure cases as well with a bad value. */
1064         msg.hdr.size = sizeof(msg.payload.u64);
1065         msg.payload.u64 = 0; /* OK */
1066         ret = vhost_user_write(dev, &msg, NULL, 0);
1067         if (ret < 0) {
1068             return ret;
1069         }
1070     }
1071 
1072     return 0;
1073 }
1074 
1075 static int vhost_user_set_mem_table(struct vhost_dev *dev,
1076                                     struct vhost_memory *mem)
1077 {
1078     struct vhost_user *u = dev->opaque;
1079     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
1080     size_t fd_num = 0;
1081     bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
1082     bool reply_supported = virtio_has_feature(dev->protocol_features,
1083                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1084     bool config_mem_slots =
1085         virtio_has_feature(dev->protocol_features,
1086                            VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
1087     int ret;
1088 
1089     if (do_postcopy) {
1090         /*
1091          * Postcopy has enough differences that it's best done in it's own
1092          * version
1093          */
1094         return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1095                                                  config_mem_slots);
1096     }
1097 
1098     VhostUserMsg msg = {
1099         .hdr.flags = VHOST_USER_VERSION,
1100     };
1101 
1102     if (reply_supported) {
1103         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1104     }
1105 
1106     if (config_mem_slots) {
1107         ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
1108         if (ret < 0) {
1109             return ret;
1110         }
1111     } else {
1112         ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1113                                                 false);
1114         if (ret < 0) {
1115             return ret;
1116         }
1117 
1118         ret = vhost_user_write(dev, &msg, fds, fd_num);
1119         if (ret < 0) {
1120             return ret;
1121         }
1122 
1123         if (reply_supported) {
1124             return process_message_reply(dev, &msg);
1125         }
1126     }
1127 
1128     return 0;
1129 }
1130 
1131 static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1132                                        struct vhost_vring_state *ring)
1133 {
1134     bool cross_endian = virtio_has_feature(dev->protocol_features,
1135                                            VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1136     VhostUserMsg msg = {
1137         .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1138         .hdr.flags = VHOST_USER_VERSION,
1139         .payload.state = *ring,
1140         .hdr.size = sizeof(msg.payload.state),
1141     };
1142 
1143     if (!cross_endian) {
1144         error_report("vhost-user trying to send unhandled ioctl");
1145         return -ENOTSUP;
1146     }
1147 
1148     return vhost_user_write(dev, &msg, NULL, 0);
1149 }
1150 
1151 static int vhost_set_vring(struct vhost_dev *dev,
1152                            unsigned long int request,
1153                            struct vhost_vring_state *ring)
1154 {
1155     VhostUserMsg msg = {
1156         .hdr.request = request,
1157         .hdr.flags = VHOST_USER_VERSION,
1158         .payload.state = *ring,
1159         .hdr.size = sizeof(msg.payload.state),
1160     };
1161 
1162     return vhost_user_write(dev, &msg, NULL, 0);
1163 }
1164 
1165 static int vhost_user_set_vring_num(struct vhost_dev *dev,
1166                                     struct vhost_vring_state *ring)
1167 {
1168     return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
1169 }
1170 
1171 static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
1172 {
1173     assert(n && n->unmap_addr);
1174     munmap(n->unmap_addr, qemu_real_host_page_size());
1175     n->unmap_addr = NULL;
1176 }
1177 
1178 /*
1179  * clean-up function for notifier, will finally free the structure
1180  * under rcu.
1181  */
1182 static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
1183                                             VirtIODevice *vdev)
1184 {
1185     if (n->addr) {
1186         if (vdev) {
1187             virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
1188         }
1189         assert(!n->unmap_addr);
1190         n->unmap_addr = n->addr;
1191         n->addr = NULL;
1192         call_rcu(n, vhost_user_host_notifier_free, rcu);
1193     }
1194 }
1195 
1196 static int vhost_user_set_vring_base(struct vhost_dev *dev,
1197                                      struct vhost_vring_state *ring)
1198 {
1199     return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
1200 }
1201 
1202 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1203 {
1204     int i;
1205 
1206     if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1207         return -EINVAL;
1208     }
1209 
1210     for (i = 0; i < dev->nvqs; ++i) {
1211         int ret;
1212         struct vhost_vring_state state = {
1213             .index = dev->vq_index + i,
1214             .num   = enable,
1215         };
1216 
1217         ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
1218         if (ret < 0) {
1219             /*
1220              * Restoring the previous state is likely infeasible, as well as
1221              * proceeding regardless the error, so just bail out and hope for
1222              * the device-level recovery.
1223              */
1224             return ret;
1225         }
1226     }
1227 
1228     return 0;
1229 }
1230 
1231 static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
1232                                              int idx)
1233 {
1234     if (idx >= u->notifiers->len) {
1235         return NULL;
1236     }
1237     return g_ptr_array_index(u->notifiers, idx);
1238 }
1239 
1240 static int vhost_user_get_vring_base(struct vhost_dev *dev,
1241                                      struct vhost_vring_state *ring)
1242 {
1243     int ret;
1244     VhostUserMsg msg = {
1245         .hdr.request = VHOST_USER_GET_VRING_BASE,
1246         .hdr.flags = VHOST_USER_VERSION,
1247         .payload.state = *ring,
1248         .hdr.size = sizeof(msg.payload.state),
1249     };
1250     struct vhost_user *u = dev->opaque;
1251 
1252     VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
1253     if (n) {
1254         vhost_user_host_notifier_remove(n, dev->vdev);
1255     }
1256 
1257     ret = vhost_user_write(dev, &msg, NULL, 0);
1258     if (ret < 0) {
1259         return ret;
1260     }
1261 
1262     ret = vhost_user_read(dev, &msg);
1263     if (ret < 0) {
1264         return ret;
1265     }
1266 
1267     if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
1268         error_report("Received unexpected msg type. Expected %d received %d",
1269                      VHOST_USER_GET_VRING_BASE, msg.hdr.request);
1270         return -EPROTO;
1271     }
1272 
1273     if (msg.hdr.size != sizeof(msg.payload.state)) {
1274         error_report("Received bad msg size.");
1275         return -EPROTO;
1276     }
1277 
1278     *ring = msg.payload.state;
1279 
1280     return 0;
1281 }
1282 
1283 static int vhost_set_vring_file(struct vhost_dev *dev,
1284                                 VhostUserRequest request,
1285                                 struct vhost_vring_file *file)
1286 {
1287     int fds[VHOST_USER_MAX_RAM_SLOTS];
1288     size_t fd_num = 0;
1289     VhostUserMsg msg = {
1290         .hdr.request = request,
1291         .hdr.flags = VHOST_USER_VERSION,
1292         .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
1293         .hdr.size = sizeof(msg.payload.u64),
1294     };
1295 
1296     if (ioeventfd_enabled() && file->fd > 0) {
1297         fds[fd_num++] = file->fd;
1298     } else {
1299         msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
1300     }
1301 
1302     return vhost_user_write(dev, &msg, fds, fd_num);
1303 }
1304 
1305 static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1306                                      struct vhost_vring_file *file)
1307 {
1308     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1309 }
1310 
1311 static int vhost_user_set_vring_call(struct vhost_dev *dev,
1312                                      struct vhost_vring_file *file)
1313 {
1314     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1315 }
1316 
1317 static int vhost_user_set_vring_err(struct vhost_dev *dev,
1318                                     struct vhost_vring_file *file)
1319 {
1320     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
1321 }
1322 
1323 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1324 {
1325     int ret;
1326     VhostUserMsg msg = {
1327         .hdr.request = request,
1328         .hdr.flags = VHOST_USER_VERSION,
1329     };
1330 
1331     if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
1332         return 0;
1333     }
1334 
1335     ret = vhost_user_write(dev, &msg, NULL, 0);
1336     if (ret < 0) {
1337         return ret;
1338     }
1339 
1340     ret = vhost_user_read(dev, &msg);
1341     if (ret < 0) {
1342         return ret;
1343     }
1344 
1345     if (msg.hdr.request != request) {
1346         error_report("Received unexpected msg type. Expected %d received %d",
1347                      request, msg.hdr.request);
1348         return -EPROTO;
1349     }
1350 
1351     if (msg.hdr.size != sizeof(msg.payload.u64)) {
1352         error_report("Received bad msg size.");
1353         return -EPROTO;
1354     }
1355 
1356     *u64 = msg.payload.u64;
1357 
1358     return 0;
1359 }
1360 
1361 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1362 {
1363     if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1364         return -EPROTO;
1365     }
1366 
1367     return 0;
1368 }
1369 
1370 static int enforce_reply(struct vhost_dev *dev,
1371                          const VhostUserMsg *msg)
1372 {
1373     uint64_t dummy;
1374 
1375     if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1376         return process_message_reply(dev, msg);
1377     }
1378 
1379    /*
1380     * We need to wait for a reply but the backend does not
1381     * support replies for the command we just sent.
1382     * Send VHOST_USER_GET_FEATURES which makes all backends
1383     * send a reply.
1384     */
1385     return vhost_user_get_features(dev, &dummy);
1386 }
1387 
1388 static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1389                                      struct vhost_vring_addr *addr)
1390 {
1391     int ret;
1392     VhostUserMsg msg = {
1393         .hdr.request = VHOST_USER_SET_VRING_ADDR,
1394         .hdr.flags = VHOST_USER_VERSION,
1395         .payload.addr = *addr,
1396         .hdr.size = sizeof(msg.payload.addr),
1397     };
1398 
1399     bool reply_supported = virtio_has_feature(dev->protocol_features,
1400                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1401 
1402     /*
1403      * wait for a reply if logging is enabled to make sure
1404      * backend is actually logging changes
1405      */
1406     bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
1407 
1408     if (reply_supported && wait_for_reply) {
1409         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1410     }
1411 
1412     ret = vhost_user_write(dev, &msg, NULL, 0);
1413     if (ret < 0) {
1414         return ret;
1415     }
1416 
1417     if (wait_for_reply) {
1418         return enforce_reply(dev, &msg);
1419     }
1420 
1421     return 0;
1422 }
1423 
1424 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
1425                               bool wait_for_reply)
1426 {
1427     VhostUserMsg msg = {
1428         .hdr.request = request,
1429         .hdr.flags = VHOST_USER_VERSION,
1430         .payload.u64 = u64,
1431         .hdr.size = sizeof(msg.payload.u64),
1432     };
1433     int ret;
1434 
1435     if (wait_for_reply) {
1436         bool reply_supported = virtio_has_feature(dev->protocol_features,
1437                                           VHOST_USER_PROTOCOL_F_REPLY_ACK);
1438         if (reply_supported) {
1439             msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1440         }
1441     }
1442 
1443     ret = vhost_user_write(dev, &msg, NULL, 0);
1444     if (ret < 0) {
1445         return ret;
1446     }
1447 
1448     if (wait_for_reply) {
1449         return enforce_reply(dev, &msg);
1450     }
1451 
1452     return 0;
1453 }
1454 
1455 static int vhost_user_set_features(struct vhost_dev *dev,
1456                                    uint64_t features)
1457 {
1458     /*
1459      * wait for a reply if logging is enabled to make sure
1460      * backend is actually logging changes
1461      */
1462     bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
1463 
1464     /*
1465      * We need to include any extra backend only feature bits that
1466      * might be needed by our device. Currently this includes the
1467      * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
1468      * features.
1469      */
1470     return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
1471                               features | dev->backend_features,
1472                               log_enabled);
1473 }
1474 
1475 static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1476                                             uint64_t features)
1477 {
1478     return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
1479                               false);
1480 }
1481 
1482 static int vhost_user_set_owner(struct vhost_dev *dev)
1483 {
1484     VhostUserMsg msg = {
1485         .hdr.request = VHOST_USER_SET_OWNER,
1486         .hdr.flags = VHOST_USER_VERSION,
1487     };
1488 
1489     return vhost_user_write(dev, &msg, NULL, 0);
1490 }
1491 
1492 static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1493                                        uint64_t *max_memslots)
1494 {
1495     uint64_t backend_max_memslots;
1496     int err;
1497 
1498     err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1499                              &backend_max_memslots);
1500     if (err < 0) {
1501         return err;
1502     }
1503 
1504     *max_memslots = backend_max_memslots;
1505 
1506     return 0;
1507 }
1508 
1509 static int vhost_user_reset_device(struct vhost_dev *dev)
1510 {
1511     VhostUserMsg msg = {
1512         .hdr.flags = VHOST_USER_VERSION,
1513     };
1514 
1515     msg.hdr.request = virtio_has_feature(dev->protocol_features,
1516                                          VHOST_USER_PROTOCOL_F_RESET_DEVICE)
1517         ? VHOST_USER_RESET_DEVICE
1518         : VHOST_USER_RESET_OWNER;
1519 
1520     return vhost_user_write(dev, &msg, NULL, 0);
1521 }
1522 
1523 static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
1524 {
1525     if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1526         return -ENOSYS;
1527     }
1528 
1529     return dev->config_ops->vhost_dev_config_notifier(dev);
1530 }
1531 
1532 /*
1533  * Fetch or create the notifier for a given idx. Newly created
1534  * notifiers are added to the pointer array that tracks them.
1535  */
1536 static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
1537                                                        int idx)
1538 {
1539     VhostUserHostNotifier *n = NULL;
1540     if (idx >= u->notifiers->len) {
1541         g_ptr_array_set_size(u->notifiers, idx + 1);
1542     }
1543 
1544     n = g_ptr_array_index(u->notifiers, idx);
1545     if (!n) {
1546         n = g_new0(VhostUserHostNotifier, 1);
1547         n->idx = idx;
1548         g_ptr_array_insert(u->notifiers, idx, n);
1549         trace_vhost_user_create_notifier(idx, n);
1550     }
1551 
1552     return n;
1553 }
1554 
1555 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
1556                                                        VhostUserVringArea *area,
1557                                                        int fd)
1558 {
1559     int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
1560     size_t page_size = qemu_real_host_page_size();
1561     struct vhost_user *u = dev->opaque;
1562     VhostUserState *user = u->user;
1563     VirtIODevice *vdev = dev->vdev;
1564     VhostUserHostNotifier *n;
1565     void *addr;
1566     char *name;
1567 
1568     if (!virtio_has_feature(dev->protocol_features,
1569                             VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1570         vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1571         return -EINVAL;
1572     }
1573 
1574     /*
1575      * Fetch notifier and invalidate any old data before setting up
1576      * new mapped address.
1577      */
1578     n = fetch_or_create_notifier(user, queue_idx);
1579     vhost_user_host_notifier_remove(n, vdev);
1580 
1581     if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1582         return 0;
1583     }
1584 
1585     /* Sanity check. */
1586     if (area->size != page_size) {
1587         return -EINVAL;
1588     }
1589 
1590     addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1591                 fd, area->offset);
1592     if (addr == MAP_FAILED) {
1593         return -EFAULT;
1594     }
1595 
1596     name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1597                            user, queue_idx);
1598     if (!n->mr.ram) { /* Don't init again after suspend. */
1599         memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1600                                           page_size, addr);
1601     } else {
1602         n->mr.ram_block->host = addr;
1603     }
1604     g_free(name);
1605 
1606     if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1607         object_unparent(OBJECT(&n->mr));
1608         munmap(addr, page_size);
1609         return -ENXIO;
1610     }
1611 
1612     n->addr = addr;
1613 
1614     return 0;
1615 }
1616 
1617 static void close_slave_channel(struct vhost_user *u)
1618 {
1619     g_source_destroy(u->slave_src);
1620     g_source_unref(u->slave_src);
1621     u->slave_src = NULL;
1622     object_unref(OBJECT(u->slave_ioc));
1623     u->slave_ioc = NULL;
1624 }
1625 
1626 static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
1627                            gpointer opaque)
1628 {
1629     struct vhost_dev *dev = opaque;
1630     struct vhost_user *u = dev->opaque;
1631     VhostUserHeader hdr = { 0, };
1632     VhostUserPayload payload = { 0, };
1633     Error *local_err = NULL;
1634     gboolean rc = G_SOURCE_CONTINUE;
1635     int ret = 0;
1636     struct iovec iov;
1637     g_autofree int *fd = NULL;
1638     size_t fdsize = 0;
1639     int i;
1640 
1641     /* Read header */
1642     iov.iov_base = &hdr;
1643     iov.iov_len = VHOST_USER_HDR_SIZE;
1644 
1645     if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1646         error_report_err(local_err);
1647         goto err;
1648     }
1649 
1650     if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
1651         error_report("Failed to read msg header."
1652                 " Size %d exceeds the maximum %zu.", hdr.size,
1653                 VHOST_USER_PAYLOAD_SIZE);
1654         goto err;
1655     }
1656 
1657     /* Read payload */
1658     if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1659         error_report_err(local_err);
1660         goto err;
1661     }
1662 
1663     switch (hdr.request) {
1664     case VHOST_USER_SLAVE_IOTLB_MSG:
1665         ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
1666         break;
1667     case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1668         ret = vhost_user_slave_handle_config_change(dev);
1669         break;
1670     case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1671         ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
1672                                                           fd ? fd[0] : -1);
1673         break;
1674     default:
1675         error_report("Received unexpected msg type: %d.", hdr.request);
1676         ret = -EINVAL;
1677     }
1678 
1679     /*
1680      * REPLY_ACK feature handling. Other reply types has to be managed
1681      * directly in their request handlers.
1682      */
1683     if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1684         struct iovec iovec[2];
1685 
1686 
1687         hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1688         hdr.flags |= VHOST_USER_REPLY_MASK;
1689 
1690         payload.u64 = !!ret;
1691         hdr.size = sizeof(payload.u64);
1692 
1693         iovec[0].iov_base = &hdr;
1694         iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1695         iovec[1].iov_base = &payload;
1696         iovec[1].iov_len = hdr.size;
1697 
1698         if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) {
1699             error_report_err(local_err);
1700             goto err;
1701         }
1702     }
1703 
1704     goto fdcleanup;
1705 
1706 err:
1707     close_slave_channel(u);
1708     rc = G_SOURCE_REMOVE;
1709 
1710 fdcleanup:
1711     if (fd) {
1712         for (i = 0; i < fdsize; i++) {
1713             close(fd[i]);
1714         }
1715     }
1716     return rc;
1717 }
1718 
1719 static int vhost_setup_slave_channel(struct vhost_dev *dev)
1720 {
1721     VhostUserMsg msg = {
1722         .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1723         .hdr.flags = VHOST_USER_VERSION,
1724     };
1725     struct vhost_user *u = dev->opaque;
1726     int sv[2], ret = 0;
1727     bool reply_supported = virtio_has_feature(dev->protocol_features,
1728                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1729     Error *local_err = NULL;
1730     QIOChannel *ioc;
1731 
1732     if (!virtio_has_feature(dev->protocol_features,
1733                             VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1734         return 0;
1735     }
1736 
1737     if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1738         int saved_errno = errno;
1739         error_report("socketpair() failed");
1740         return -saved_errno;
1741     }
1742 
1743     ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1744     if (!ioc) {
1745         error_report_err(local_err);
1746         return -ECONNREFUSED;
1747     }
1748     u->slave_ioc = ioc;
1749     slave_update_read_handler(dev, NULL);
1750 
1751     if (reply_supported) {
1752         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1753     }
1754 
1755     ret = vhost_user_write(dev, &msg, &sv[1], 1);
1756     if (ret) {
1757         goto out;
1758     }
1759 
1760     if (reply_supported) {
1761         ret = process_message_reply(dev, &msg);
1762     }
1763 
1764 out:
1765     close(sv[1]);
1766     if (ret) {
1767         close_slave_channel(u);
1768     }
1769 
1770     return ret;
1771 }
1772 
1773 #ifdef CONFIG_LINUX
1774 /*
1775  * Called back from the postcopy fault thread when a fault is received on our
1776  * ufd.
1777  * TODO: This is Linux specific
1778  */
1779 static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1780                                              void *ufd)
1781 {
1782     struct vhost_dev *dev = pcfd->data;
1783     struct vhost_user *u = dev->opaque;
1784     struct uffd_msg *msg = ufd;
1785     uint64_t faultaddr = msg->arg.pagefault.address;
1786     RAMBlock *rb = NULL;
1787     uint64_t rb_offset;
1788     int i;
1789 
1790     trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1791                                             dev->mem->nregions);
1792     for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1793         trace_vhost_user_postcopy_fault_handler_loop(i,
1794                 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1795         if (faultaddr >= u->postcopy_client_bases[i]) {
1796             /* Ofset of the fault address in the vhost region */
1797             uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1798             if (region_offset < dev->mem->regions[i].memory_size) {
1799                 rb_offset = region_offset + u->region_rb_offset[i];
1800                 trace_vhost_user_postcopy_fault_handler_found(i,
1801                         region_offset, rb_offset);
1802                 rb = u->region_rb[i];
1803                 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1804                                                     rb_offset);
1805             }
1806         }
1807     }
1808     error_report("%s: Failed to find region for fault %" PRIx64,
1809                  __func__, faultaddr);
1810     return -1;
1811 }
1812 
1813 static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1814                                      uint64_t offset)
1815 {
1816     struct vhost_dev *dev = pcfd->data;
1817     struct vhost_user *u = dev->opaque;
1818     int i;
1819 
1820     trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1821 
1822     if (!u) {
1823         return 0;
1824     }
1825     /* Translate the offset into an address in the clients address space */
1826     for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1827         if (u->region_rb[i] == rb &&
1828             offset >= u->region_rb_offset[i] &&
1829             offset < (u->region_rb_offset[i] +
1830                       dev->mem->regions[i].memory_size)) {
1831             uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1832                                    u->postcopy_client_bases[i];
1833             trace_vhost_user_postcopy_waker_found(client_addr);
1834             return postcopy_wake_shared(pcfd, client_addr, rb);
1835         }
1836     }
1837 
1838     trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1839     return 0;
1840 }
1841 #endif
1842 
1843 /*
1844  * Called at the start of an inbound postcopy on reception of the
1845  * 'advise' command.
1846  */
1847 static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1848 {
1849 #ifdef CONFIG_LINUX
1850     struct vhost_user *u = dev->opaque;
1851     CharBackend *chr = u->user->chr;
1852     int ufd;
1853     int ret;
1854     VhostUserMsg msg = {
1855         .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1856         .hdr.flags = VHOST_USER_VERSION,
1857     };
1858 
1859     ret = vhost_user_write(dev, &msg, NULL, 0);
1860     if (ret < 0) {
1861         error_setg(errp, "Failed to send postcopy_advise to vhost");
1862         return ret;
1863     }
1864 
1865     ret = vhost_user_read(dev, &msg);
1866     if (ret < 0) {
1867         error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1868         return ret;
1869     }
1870 
1871     if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1872         error_setg(errp, "Unexpected msg type. Expected %d received %d",
1873                      VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1874         return -EPROTO;
1875     }
1876 
1877     if (msg.hdr.size) {
1878         error_setg(errp, "Received bad msg size.");
1879         return -EPROTO;
1880     }
1881     ufd = qemu_chr_fe_get_msgfd(chr);
1882     if (ufd < 0) {
1883         error_setg(errp, "%s: Failed to get ufd", __func__);
1884         return -EIO;
1885     }
1886     qemu_socket_set_nonblock(ufd);
1887 
1888     /* register ufd with userfault thread */
1889     u->postcopy_fd.fd = ufd;
1890     u->postcopy_fd.data = dev;
1891     u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
1892     u->postcopy_fd.waker = vhost_user_postcopy_waker;
1893     u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1894     postcopy_register_shared_ufd(&u->postcopy_fd);
1895     return 0;
1896 #else
1897     error_setg(errp, "Postcopy not supported on non-Linux systems");
1898     return -ENOSYS;
1899 #endif
1900 }
1901 
1902 /*
1903  * Called at the switch to postcopy on reception of the 'listen' command.
1904  */
1905 static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1906 {
1907     struct vhost_user *u = dev->opaque;
1908     int ret;
1909     VhostUserMsg msg = {
1910         .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1911         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1912     };
1913     u->postcopy_listen = true;
1914 
1915     trace_vhost_user_postcopy_listen();
1916 
1917     ret = vhost_user_write(dev, &msg, NULL, 0);
1918     if (ret < 0) {
1919         error_setg(errp, "Failed to send postcopy_listen to vhost");
1920         return ret;
1921     }
1922 
1923     ret = process_message_reply(dev, &msg);
1924     if (ret) {
1925         error_setg(errp, "Failed to receive reply to postcopy_listen");
1926         return ret;
1927     }
1928 
1929     return 0;
1930 }
1931 
1932 /*
1933  * Called at the end of postcopy
1934  */
1935 static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1936 {
1937     VhostUserMsg msg = {
1938         .hdr.request = VHOST_USER_POSTCOPY_END,
1939         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1940     };
1941     int ret;
1942     struct vhost_user *u = dev->opaque;
1943 
1944     trace_vhost_user_postcopy_end_entry();
1945 
1946     ret = vhost_user_write(dev, &msg, NULL, 0);
1947     if (ret < 0) {
1948         error_setg(errp, "Failed to send postcopy_end to vhost");
1949         return ret;
1950     }
1951 
1952     ret = process_message_reply(dev, &msg);
1953     if (ret) {
1954         error_setg(errp, "Failed to receive reply to postcopy_end");
1955         return ret;
1956     }
1957     postcopy_unregister_shared_ufd(&u->postcopy_fd);
1958     close(u->postcopy_fd.fd);
1959     u->postcopy_fd.handler = NULL;
1960 
1961     trace_vhost_user_postcopy_end_exit();
1962 
1963     return 0;
1964 }
1965 
1966 static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1967                                         void *opaque)
1968 {
1969     struct PostcopyNotifyData *pnd = opaque;
1970     struct vhost_user *u = container_of(notifier, struct vhost_user,
1971                                          postcopy_notifier);
1972     struct vhost_dev *dev = u->dev;
1973 
1974     switch (pnd->reason) {
1975     case POSTCOPY_NOTIFY_PROBE:
1976         if (!virtio_has_feature(dev->protocol_features,
1977                                 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1978             /* TODO: Get the device name into this error somehow */
1979             error_setg(pnd->errp,
1980                        "vhost-user backend not capable of postcopy");
1981             return -ENOENT;
1982         }
1983         break;
1984 
1985     case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1986         return vhost_user_postcopy_advise(dev, pnd->errp);
1987 
1988     case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1989         return vhost_user_postcopy_listen(dev, pnd->errp);
1990 
1991     case POSTCOPY_NOTIFY_INBOUND_END:
1992         return vhost_user_postcopy_end(dev, pnd->errp);
1993 
1994     default:
1995         /* We ignore notifications we don't know */
1996         break;
1997     }
1998 
1999     return 0;
2000 }
2001 
2002 static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
2003                                    Error **errp)
2004 {
2005     uint64_t features, ram_slots;
2006     struct vhost_user *u;
2007     VhostUserState *vus = (VhostUserState *) opaque;
2008     int err;
2009 
2010     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2011 
2012     u = g_new0(struct vhost_user, 1);
2013     u->user = vus;
2014     u->dev = dev;
2015     dev->opaque = u;
2016 
2017     err = vhost_user_get_features(dev, &features);
2018     if (err < 0) {
2019         error_setg_errno(errp, -err, "vhost_backend_init failed");
2020         return err;
2021     }
2022 
2023     if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
2024         bool supports_f_config = vus->supports_config ||
2025             (dev->config_ops && dev->config_ops->vhost_dev_config_notifier);
2026         uint64_t protocol_features;
2027 
2028         dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
2029 
2030         err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
2031                                  &protocol_features);
2032         if (err < 0) {
2033             error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2034             return -EPROTO;
2035         }
2036 
2037         /*
2038          * We will use all the protocol features we support - although
2039          * we suppress F_CONFIG if we know QEMUs internal code can not support
2040          * it.
2041          */
2042         protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK;
2043 
2044         if (supports_f_config) {
2045             if (!virtio_has_feature(protocol_features,
2046                                     VHOST_USER_PROTOCOL_F_CONFIG)) {
2047                 error_setg(errp, "vhost-user device expecting "
2048                            "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
2049                            "not support it.");
2050                 return -EPROTO;
2051             }
2052         } else {
2053             if (virtio_has_feature(protocol_features,
2054                                    VHOST_USER_PROTOCOL_F_CONFIG)) {
2055                 warn_reportf_err(*errp, "vhost-user backend supports "
2056                                  "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
2057                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
2058             }
2059         }
2060 
2061         /* final set of protocol features */
2062         dev->protocol_features = protocol_features;
2063         err = vhost_user_set_protocol_features(dev, dev->protocol_features);
2064         if (err < 0) {
2065             error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2066             return -EPROTO;
2067         }
2068 
2069         /* query the max queues we support if backend supports Multiple Queue */
2070         if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
2071             err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
2072                                      &dev->max_queues);
2073             if (err < 0) {
2074                 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2075                 return -EPROTO;
2076             }
2077         } else {
2078             dev->max_queues = 1;
2079         }
2080 
2081         if (dev->num_queues && dev->max_queues < dev->num_queues) {
2082             error_setg(errp, "The maximum number of queues supported by the "
2083                        "backend is %" PRIu64, dev->max_queues);
2084             return -EINVAL;
2085         }
2086 
2087         if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
2088                 !(virtio_has_feature(dev->protocol_features,
2089                     VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
2090                  virtio_has_feature(dev->protocol_features,
2091                     VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
2092             error_setg(errp, "IOMMU support requires reply-ack and "
2093                        "slave-req protocol features.");
2094             return -EINVAL;
2095         }
2096 
2097         /* get max memory regions if backend supports configurable RAM slots */
2098         if (!virtio_has_feature(dev->protocol_features,
2099                                 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
2100             u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
2101         } else {
2102             err = vhost_user_get_max_memslots(dev, &ram_slots);
2103             if (err < 0) {
2104                 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2105                 return -EPROTO;
2106             }
2107 
2108             if (ram_slots < u->user->memory_slots) {
2109                 error_setg(errp, "The backend specified a max ram slots limit "
2110                            "of %" PRIu64", when the prior validated limit was "
2111                            "%d. This limit should never decrease.", ram_slots,
2112                            u->user->memory_slots);
2113                 return -EINVAL;
2114             }
2115 
2116             u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
2117         }
2118     }
2119 
2120     if (dev->migration_blocker == NULL &&
2121         !virtio_has_feature(dev->protocol_features,
2122                             VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
2123         error_setg(&dev->migration_blocker,
2124                    "Migration disabled: vhost-user backend lacks "
2125                    "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2126     }
2127 
2128     if (dev->vq_index == 0) {
2129         err = vhost_setup_slave_channel(dev);
2130         if (err < 0) {
2131             error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2132             return -EPROTO;
2133         }
2134     }
2135 
2136     u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
2137     postcopy_add_notifier(&u->postcopy_notifier);
2138 
2139     return 0;
2140 }
2141 
2142 static int vhost_user_backend_cleanup(struct vhost_dev *dev)
2143 {
2144     struct vhost_user *u;
2145 
2146     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2147 
2148     u = dev->opaque;
2149     if (u->postcopy_notifier.notify) {
2150         postcopy_remove_notifier(&u->postcopy_notifier);
2151         u->postcopy_notifier.notify = NULL;
2152     }
2153     u->postcopy_listen = false;
2154     if (u->postcopy_fd.handler) {
2155         postcopy_unregister_shared_ufd(&u->postcopy_fd);
2156         close(u->postcopy_fd.fd);
2157         u->postcopy_fd.handler = NULL;
2158     }
2159     if (u->slave_ioc) {
2160         close_slave_channel(u);
2161     }
2162     g_free(u->region_rb);
2163     u->region_rb = NULL;
2164     g_free(u->region_rb_offset);
2165     u->region_rb_offset = NULL;
2166     u->region_rb_len = 0;
2167     g_free(u);
2168     dev->opaque = 0;
2169 
2170     return 0;
2171 }
2172 
2173 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2174 {
2175     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2176 
2177     return idx;
2178 }
2179 
2180 static int vhost_user_memslots_limit(struct vhost_dev *dev)
2181 {
2182     struct vhost_user *u = dev->opaque;
2183 
2184     return u->user->memory_slots;
2185 }
2186 
2187 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2188 {
2189     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2190 
2191     return virtio_has_feature(dev->protocol_features,
2192                               VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2193 }
2194 
2195 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2196 {
2197     VhostUserMsg msg = { };
2198 
2199     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2200 
2201     /* If guest supports GUEST_ANNOUNCE do nothing */
2202     if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2203         return 0;
2204     }
2205 
2206     /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2207     if (virtio_has_feature(dev->protocol_features,
2208                            VHOST_USER_PROTOCOL_F_RARP)) {
2209         msg.hdr.request = VHOST_USER_SEND_RARP;
2210         msg.hdr.flags = VHOST_USER_VERSION;
2211         memcpy((char *)&msg.payload.u64, mac_addr, 6);
2212         msg.hdr.size = sizeof(msg.payload.u64);
2213 
2214         return vhost_user_write(dev, &msg, NULL, 0);
2215     }
2216     return -ENOTSUP;
2217 }
2218 
2219 static bool vhost_user_can_merge(struct vhost_dev *dev,
2220                                  uint64_t start1, uint64_t size1,
2221                                  uint64_t start2, uint64_t size2)
2222 {
2223     ram_addr_t offset;
2224     int mfd, rfd;
2225 
2226     (void)vhost_user_get_mr_data(start1, &offset, &mfd);
2227     (void)vhost_user_get_mr_data(start2, &offset, &rfd);
2228 
2229     return mfd == rfd;
2230 }
2231 
2232 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2233 {
2234     VhostUserMsg msg;
2235     bool reply_supported = virtio_has_feature(dev->protocol_features,
2236                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
2237     int ret;
2238 
2239     if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2240         return 0;
2241     }
2242 
2243     msg.hdr.request = VHOST_USER_NET_SET_MTU;
2244     msg.payload.u64 = mtu;
2245     msg.hdr.size = sizeof(msg.payload.u64);
2246     msg.hdr.flags = VHOST_USER_VERSION;
2247     if (reply_supported) {
2248         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2249     }
2250 
2251     ret = vhost_user_write(dev, &msg, NULL, 0);
2252     if (ret < 0) {
2253         return ret;
2254     }
2255 
2256     /* If reply_ack supported, slave has to ack specified MTU is valid */
2257     if (reply_supported) {
2258         return process_message_reply(dev, &msg);
2259     }
2260 
2261     return 0;
2262 }
2263 
2264 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2265                                             struct vhost_iotlb_msg *imsg)
2266 {
2267     int ret;
2268     VhostUserMsg msg = {
2269         .hdr.request = VHOST_USER_IOTLB_MSG,
2270         .hdr.size = sizeof(msg.payload.iotlb),
2271         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2272         .payload.iotlb = *imsg,
2273     };
2274 
2275     ret = vhost_user_write(dev, &msg, NULL, 0);
2276     if (ret < 0) {
2277         return ret;
2278     }
2279 
2280     return process_message_reply(dev, &msg);
2281 }
2282 
2283 
2284 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2285 {
2286     /* No-op as the receive channel is not dedicated to IOTLB messages. */
2287 }
2288 
2289 static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
2290                                  uint32_t config_len, Error **errp)
2291 {
2292     int ret;
2293     VhostUserMsg msg = {
2294         .hdr.request = VHOST_USER_GET_CONFIG,
2295         .hdr.flags = VHOST_USER_VERSION,
2296         .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
2297     };
2298 
2299     if (!virtio_has_feature(dev->protocol_features,
2300                 VHOST_USER_PROTOCOL_F_CONFIG)) {
2301         error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2302         return -EINVAL;
2303     }
2304 
2305     assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
2306 
2307     msg.payload.config.offset = 0;
2308     msg.payload.config.size = config_len;
2309     ret = vhost_user_write(dev, &msg, NULL, 0);
2310     if (ret < 0) {
2311         error_setg_errno(errp, -ret, "vhost_get_config failed");
2312         return ret;
2313     }
2314 
2315     ret = vhost_user_read(dev, &msg);
2316     if (ret < 0) {
2317         error_setg_errno(errp, -ret, "vhost_get_config failed");
2318         return ret;
2319     }
2320 
2321     if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
2322         error_setg(errp,
2323                    "Received unexpected msg type. Expected %d received %d",
2324                    VHOST_USER_GET_CONFIG, msg.hdr.request);
2325         return -EPROTO;
2326     }
2327 
2328     if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
2329         error_setg(errp, "Received bad msg size.");
2330         return -EPROTO;
2331     }
2332 
2333     memcpy(config, msg.payload.config.region, config_len);
2334 
2335     return 0;
2336 }
2337 
2338 static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2339                                  uint32_t offset, uint32_t size, uint32_t flags)
2340 {
2341     int ret;
2342     uint8_t *p;
2343     bool reply_supported = virtio_has_feature(dev->protocol_features,
2344                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
2345 
2346     VhostUserMsg msg = {
2347         .hdr.request = VHOST_USER_SET_CONFIG,
2348         .hdr.flags = VHOST_USER_VERSION,
2349         .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
2350     };
2351 
2352     if (!virtio_has_feature(dev->protocol_features,
2353                 VHOST_USER_PROTOCOL_F_CONFIG)) {
2354         return -ENOTSUP;
2355     }
2356 
2357     if (reply_supported) {
2358         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2359     }
2360 
2361     if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2362         return -EINVAL;
2363     }
2364 
2365     msg.payload.config.offset = offset,
2366     msg.payload.config.size = size,
2367     msg.payload.config.flags = flags,
2368     p = msg.payload.config.region;
2369     memcpy(p, data, size);
2370 
2371     ret = vhost_user_write(dev, &msg, NULL, 0);
2372     if (ret < 0) {
2373         return ret;
2374     }
2375 
2376     if (reply_supported) {
2377         return process_message_reply(dev, &msg);
2378     }
2379 
2380     return 0;
2381 }
2382 
2383 static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2384                                             void *session_info,
2385                                             uint64_t *session_id)
2386 {
2387     int ret;
2388     bool crypto_session = virtio_has_feature(dev->protocol_features,
2389                                        VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2390     CryptoDevBackendSymSessionInfo *sess_info = session_info;
2391     VhostUserMsg msg = {
2392         .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2393         .hdr.flags = VHOST_USER_VERSION,
2394         .hdr.size = sizeof(msg.payload.session),
2395     };
2396 
2397     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2398 
2399     if (!crypto_session) {
2400         error_report("vhost-user trying to send unhandled ioctl");
2401         return -ENOTSUP;
2402     }
2403 
2404     memcpy(&msg.payload.session.session_setup_data, sess_info,
2405               sizeof(CryptoDevBackendSymSessionInfo));
2406     if (sess_info->key_len) {
2407         memcpy(&msg.payload.session.key, sess_info->cipher_key,
2408                sess_info->key_len);
2409     }
2410     if (sess_info->auth_key_len > 0) {
2411         memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
2412                sess_info->auth_key_len);
2413     }
2414     ret = vhost_user_write(dev, &msg, NULL, 0);
2415     if (ret < 0) {
2416         error_report("vhost_user_write() return %d, create session failed",
2417                      ret);
2418         return ret;
2419     }
2420 
2421     ret = vhost_user_read(dev, &msg);
2422     if (ret < 0) {
2423         error_report("vhost_user_read() return %d, create session failed",
2424                      ret);
2425         return ret;
2426     }
2427 
2428     if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2429         error_report("Received unexpected msg type. Expected %d received %d",
2430                      VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2431         return -EPROTO;
2432     }
2433 
2434     if (msg.hdr.size != sizeof(msg.payload.session)) {
2435         error_report("Received bad msg size.");
2436         return -EPROTO;
2437     }
2438 
2439     if (msg.payload.session.session_id < 0) {
2440         error_report("Bad session id: %" PRId64 "",
2441                               msg.payload.session.session_id);
2442         return -EINVAL;
2443     }
2444     *session_id = msg.payload.session.session_id;
2445 
2446     return 0;
2447 }
2448 
2449 static int
2450 vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2451 {
2452     int ret;
2453     bool crypto_session = virtio_has_feature(dev->protocol_features,
2454                                        VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2455     VhostUserMsg msg = {
2456         .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2457         .hdr.flags = VHOST_USER_VERSION,
2458         .hdr.size = sizeof(msg.payload.u64),
2459     };
2460     msg.payload.u64 = session_id;
2461 
2462     if (!crypto_session) {
2463         error_report("vhost-user trying to send unhandled ioctl");
2464         return -ENOTSUP;
2465     }
2466 
2467     ret = vhost_user_write(dev, &msg, NULL, 0);
2468     if (ret < 0) {
2469         error_report("vhost_user_write() return %d, close session failed",
2470                      ret);
2471         return ret;
2472     }
2473 
2474     return 0;
2475 }
2476 
2477 static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
2478                                           MemoryRegionSection *section)
2479 {
2480     bool result;
2481 
2482     result = memory_region_get_fd(section->mr) >= 0;
2483 
2484     return result;
2485 }
2486 
2487 static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2488                                       uint16_t queue_size,
2489                                       struct vhost_inflight *inflight)
2490 {
2491     void *addr;
2492     int fd;
2493     int ret;
2494     struct vhost_user *u = dev->opaque;
2495     CharBackend *chr = u->user->chr;
2496     VhostUserMsg msg = {
2497         .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2498         .hdr.flags = VHOST_USER_VERSION,
2499         .payload.inflight.num_queues = dev->nvqs,
2500         .payload.inflight.queue_size = queue_size,
2501         .hdr.size = sizeof(msg.payload.inflight),
2502     };
2503 
2504     if (!virtio_has_feature(dev->protocol_features,
2505                             VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2506         return 0;
2507     }
2508 
2509     ret = vhost_user_write(dev, &msg, NULL, 0);
2510     if (ret < 0) {
2511         return ret;
2512     }
2513 
2514     ret = vhost_user_read(dev, &msg);
2515     if (ret < 0) {
2516         return ret;
2517     }
2518 
2519     if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2520         error_report("Received unexpected msg type. "
2521                      "Expected %d received %d",
2522                      VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2523         return -EPROTO;
2524     }
2525 
2526     if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2527         error_report("Received bad msg size.");
2528         return -EPROTO;
2529     }
2530 
2531     if (!msg.payload.inflight.mmap_size) {
2532         return 0;
2533     }
2534 
2535     fd = qemu_chr_fe_get_msgfd(chr);
2536     if (fd < 0) {
2537         error_report("Failed to get mem fd");
2538         return -EIO;
2539     }
2540 
2541     addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2542                 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2543 
2544     if (addr == MAP_FAILED) {
2545         error_report("Failed to mmap mem fd");
2546         close(fd);
2547         return -EFAULT;
2548     }
2549 
2550     inflight->addr = addr;
2551     inflight->fd = fd;
2552     inflight->size = msg.payload.inflight.mmap_size;
2553     inflight->offset = msg.payload.inflight.mmap_offset;
2554     inflight->queue_size = queue_size;
2555 
2556     return 0;
2557 }
2558 
2559 static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2560                                       struct vhost_inflight *inflight)
2561 {
2562     VhostUserMsg msg = {
2563         .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2564         .hdr.flags = VHOST_USER_VERSION,
2565         .payload.inflight.mmap_size = inflight->size,
2566         .payload.inflight.mmap_offset = inflight->offset,
2567         .payload.inflight.num_queues = dev->nvqs,
2568         .payload.inflight.queue_size = inflight->queue_size,
2569         .hdr.size = sizeof(msg.payload.inflight),
2570     };
2571 
2572     if (!virtio_has_feature(dev->protocol_features,
2573                             VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2574         return 0;
2575     }
2576 
2577     return vhost_user_write(dev, &msg, &inflight->fd, 1);
2578 }
2579 
2580 static void vhost_user_state_destroy(gpointer data)
2581 {
2582     VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
2583     if (n) {
2584         vhost_user_host_notifier_remove(n, NULL);
2585         object_unparent(OBJECT(&n->mr));
2586         /*
2587          * We can't free until vhost_user_host_notifier_remove has
2588          * done it's thing so schedule the free with RCU.
2589          */
2590         g_free_rcu(n, rcu);
2591     }
2592 }
2593 
2594 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
2595 {
2596     if (user->chr) {
2597         error_setg(errp, "Cannot initialize vhost-user state");
2598         return false;
2599     }
2600     user->chr = chr;
2601     user->memory_slots = 0;
2602     user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
2603                                            &vhost_user_state_destroy);
2604     return true;
2605 }
2606 
2607 void vhost_user_cleanup(VhostUserState *user)
2608 {
2609     if (!user->chr) {
2610         return;
2611     }
2612     memory_region_transaction_begin();
2613     user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
2614     memory_region_transaction_commit();
2615     user->chr = NULL;
2616 }
2617 
2618 const VhostOps user_ops = {
2619         .backend_type = VHOST_BACKEND_TYPE_USER,
2620         .vhost_backend_init = vhost_user_backend_init,
2621         .vhost_backend_cleanup = vhost_user_backend_cleanup,
2622         .vhost_backend_memslots_limit = vhost_user_memslots_limit,
2623         .vhost_set_log_base = vhost_user_set_log_base,
2624         .vhost_set_mem_table = vhost_user_set_mem_table,
2625         .vhost_set_vring_addr = vhost_user_set_vring_addr,
2626         .vhost_set_vring_endian = vhost_user_set_vring_endian,
2627         .vhost_set_vring_num = vhost_user_set_vring_num,
2628         .vhost_set_vring_base = vhost_user_set_vring_base,
2629         .vhost_get_vring_base = vhost_user_get_vring_base,
2630         .vhost_set_vring_kick = vhost_user_set_vring_kick,
2631         .vhost_set_vring_call = vhost_user_set_vring_call,
2632         .vhost_set_vring_err = vhost_user_set_vring_err,
2633         .vhost_set_features = vhost_user_set_features,
2634         .vhost_get_features = vhost_user_get_features,
2635         .vhost_set_owner = vhost_user_set_owner,
2636         .vhost_reset_device = vhost_user_reset_device,
2637         .vhost_get_vq_index = vhost_user_get_vq_index,
2638         .vhost_set_vring_enable = vhost_user_set_vring_enable,
2639         .vhost_requires_shm_log = vhost_user_requires_shm_log,
2640         .vhost_migration_done = vhost_user_migration_done,
2641         .vhost_backend_can_merge = vhost_user_can_merge,
2642         .vhost_net_set_mtu = vhost_user_net_set_mtu,
2643         .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
2644         .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
2645         .vhost_get_config = vhost_user_get_config,
2646         .vhost_set_config = vhost_user_set_config,
2647         .vhost_crypto_create_session = vhost_user_crypto_create_session,
2648         .vhost_crypto_close_session = vhost_user_crypto_close_session,
2649         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
2650         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
2651         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
2652 };
2653