xref: /qemu/tests/qtest/vhost-user-test.c (revision 370ed600)
1 /*
2  * QTest testcase for the vhost-user
3  *
4  * Copyright (c) 2014 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qapi/qmp/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "sysemu/sysemu.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
27 
28 #include "libqos/malloc-pc.h"
29 #include "libqos/qgraph_internal.h"
30 #include "hw/virtio/virtio-net.h"
31 
32 #include "standard-headers/linux/vhost_types.h"
33 #include "standard-headers/linux/virtio_ids.h"
34 #include "standard-headers/linux/virtio_net.h"
35 #include "standard-headers/linux/virtio_gpio.h"
36 
37 #ifdef CONFIG_LINUX
38 #include <sys/vfs.h>
39 #endif
40 
41 
42 #define QEMU_CMD_MEM    " -m %d -object memory-backend-file,id=mem,size=%dM," \
43                         "mem-path=%s,share=on -numa node,memdev=mem"
44 #define QEMU_CMD_MEMFD  " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
45                         " -numa node,memdev=mem"
46 #define QEMU_CMD_CHR    " -chardev socket,id=%s,path=%s%s"
47 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
48 
49 #define HUGETLBFS_MAGIC       0x958458f6
50 
51 /*********** FROM hw/virtio/vhost-user.c *************************************/
52 
53 #define VHOST_MEMORY_MAX_NREGIONS    8
54 #define VHOST_MAX_VIRTQUEUES    0x100
55 
56 #define VHOST_USER_F_PROTOCOL_FEATURES 30
57 #define VIRTIO_F_VERSION_1 32
58 
59 #define VHOST_USER_PROTOCOL_F_MQ 0
60 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
61 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN   6
62 #define VHOST_USER_PROTOCOL_F_CONFIG 9
63 
64 #define VHOST_LOG_PAGE 0x1000
65 
66 typedef enum VhostUserRequest {
67     VHOST_USER_NONE = 0,
68     VHOST_USER_GET_FEATURES = 1,
69     VHOST_USER_SET_FEATURES = 2,
70     VHOST_USER_SET_OWNER = 3,
71     VHOST_USER_RESET_OWNER = 4,
72     VHOST_USER_SET_MEM_TABLE = 5,
73     VHOST_USER_SET_LOG_BASE = 6,
74     VHOST_USER_SET_LOG_FD = 7,
75     VHOST_USER_SET_VRING_NUM = 8,
76     VHOST_USER_SET_VRING_ADDR = 9,
77     VHOST_USER_SET_VRING_BASE = 10,
78     VHOST_USER_GET_VRING_BASE = 11,
79     VHOST_USER_SET_VRING_KICK = 12,
80     VHOST_USER_SET_VRING_CALL = 13,
81     VHOST_USER_SET_VRING_ERR = 14,
82     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
83     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
84     VHOST_USER_GET_QUEUE_NUM = 17,
85     VHOST_USER_SET_VRING_ENABLE = 18,
86     VHOST_USER_GET_CONFIG = 24,
87     VHOST_USER_SET_CONFIG = 25,
88     VHOST_USER_MAX
89 } VhostUserRequest;
90 
91 typedef struct VhostUserMemoryRegion {
92     uint64_t guest_phys_addr;
93     uint64_t memory_size;
94     uint64_t userspace_addr;
95     uint64_t mmap_offset;
96 } VhostUserMemoryRegion;
97 
98 typedef struct VhostUserMemory {
99     uint32_t nregions;
100     uint32_t padding;
101     VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
102 } VhostUserMemory;
103 
104 typedef struct VhostUserLog {
105     uint64_t mmap_size;
106     uint64_t mmap_offset;
107 } VhostUserLog;
108 
109 typedef struct VhostUserMsg {
110     VhostUserRequest request;
111 
112 #define VHOST_USER_VERSION_MASK     (0x3)
113 #define VHOST_USER_REPLY_MASK       (0x1<<2)
114     uint32_t flags;
115     uint32_t size; /* the following payload size */
116     union {
117 #define VHOST_USER_VRING_IDX_MASK   (0xff)
118 #define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
119         uint64_t u64;
120         struct vhost_vring_state state;
121         struct vhost_vring_addr addr;
122         VhostUserMemory memory;
123         VhostUserLog log;
124     } payload;
125 } QEMU_PACKED VhostUserMsg;
126 
127 static VhostUserMsg m __attribute__ ((unused));
128 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
129                             + sizeof(m.flags) \
130                             + sizeof(m.size))
131 
132 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
133 
134 /* The version of the protocol we support */
135 #define VHOST_USER_VERSION    (0x1)
136 /*****************************************************************************/
137 
138 enum {
139     TEST_FLAGS_OK,
140     TEST_FLAGS_DISCONNECT,
141     TEST_FLAGS_BAD,
142     TEST_FLAGS_END,
143 };
144 
145 enum {
146     VHOST_USER_NET,
147     VHOST_USER_GPIO,
148 };
149 
150 typedef struct TestServer {
151     gchar *socket_path;
152     gchar *mig_path;
153     gchar *chr_name;
154     gchar *tmpfs;
155     CharBackend chr;
156     int fds_num;
157     int fds[VHOST_MEMORY_MAX_NREGIONS];
158     VhostUserMemory memory;
159     GMainContext *context;
160     GMainLoop *loop;
161     GThread *thread;
162     GMutex data_mutex;
163     GCond data_cond;
164     int log_fd;
165     uint64_t rings;
166     bool test_fail;
167     int test_flags;
168     int queues;
169     struct vhost_user_ops *vu_ops;
170 } TestServer;
171 
172 struct vhost_user_ops {
173     /* Device types. */
174     int type;
175     void (*append_opts)(TestServer *s, GString *cmd_line,
176             const char *chr_opts);
177 
178     /* VHOST-USER commands. */
179     uint64_t (*get_features)(TestServer *s);
180     void (*set_features)(TestServer *s, CharBackend *chr,
181                          VhostUserMsg *msg);
182     void (*get_protocol_features)(TestServer *s,
183                                   CharBackend *chr, VhostUserMsg *msg);
184 };
185 
186 static const char *init_hugepagefs(void);
187 static TestServer *test_server_new(const gchar *name,
188         struct vhost_user_ops *ops);
189 static void test_server_free(TestServer *server);
190 static void test_server_listen(TestServer *server);
191 
192 enum test_memfd {
193     TEST_MEMFD_AUTO,
194     TEST_MEMFD_YES,
195     TEST_MEMFD_NO,
196 };
197 
198 static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
199                              const char *chr_opts)
200 {
201     g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
202                            s->chr_name, s->socket_path,
203                            chr_opts, s->chr_name);
204 }
205 
206 /*
207  * For GPIO there are no other magic devices we need to add (like
208  * block or netdev) so all we need to worry about is the vhost-user
209  * chardev socket.
210  */
211 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line,
212                              const char *chr_opts)
213 {
214     g_string_append_printf(cmd_line, QEMU_CMD_CHR,
215                            s->chr_name, s->socket_path,
216                            chr_opts);
217 }
218 
219 static void append_mem_opts(TestServer *server, GString *cmd_line,
220                             int size, enum test_memfd memfd)
221 {
222     if (memfd == TEST_MEMFD_AUTO) {
223         memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
224                                                     : TEST_MEMFD_NO;
225     }
226 
227     if (memfd == TEST_MEMFD_YES) {
228         g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
229     } else {
230         const char *root = init_hugepagefs() ? : server->tmpfs;
231 
232         g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
233     }
234 }
235 
236 static bool wait_for_fds(TestServer *s)
237 {
238     gint64 end_time;
239     bool got_region;
240     int i;
241 
242     g_mutex_lock(&s->data_mutex);
243 
244     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
245     while (!s->fds_num) {
246         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
247             /* timeout has passed */
248             g_assert(s->fds_num);
249             break;
250         }
251     }
252 
253     /* check for sanity */
254     g_assert_cmpint(s->fds_num, >, 0);
255     g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
256 
257     g_mutex_unlock(&s->data_mutex);
258 
259     got_region = false;
260     for (i = 0; i < s->memory.nregions; ++i) {
261         VhostUserMemoryRegion *reg = &s->memory.regions[i];
262         if (reg->guest_phys_addr == 0) {
263             got_region = true;
264             break;
265         }
266     }
267     if (!got_region) {
268         g_test_skip("No memory at address 0x0");
269     }
270     return got_region;
271 }
272 
273 static void read_guest_mem_server(QTestState *qts, TestServer *s)
274 {
275     uint8_t *guest_mem;
276     int i, j;
277     size_t size;
278 
279     g_mutex_lock(&s->data_mutex);
280 
281     /* iterate all regions */
282     for (i = 0; i < s->fds_num; i++) {
283 
284         /* We'll check only the region statring at 0x0*/
285         if (s->memory.regions[i].guest_phys_addr != 0x0) {
286             continue;
287         }
288 
289         g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
290 
291         size = s->memory.regions[i].memory_size +
292             s->memory.regions[i].mmap_offset;
293 
294         guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
295                          MAP_SHARED, s->fds[i], 0);
296 
297         g_assert(guest_mem != MAP_FAILED);
298         guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
299 
300         for (j = 0; j < 1024; j++) {
301             uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
302             uint32_t b = guest_mem[j];
303 
304             g_assert_cmpint(a, ==, b);
305         }
306 
307         munmap(guest_mem, s->memory.regions[i].memory_size);
308     }
309 
310     g_mutex_unlock(&s->data_mutex);
311 }
312 
313 static void *thread_function(void *data)
314 {
315     GMainLoop *loop = data;
316     g_main_loop_run(loop);
317     return NULL;
318 }
319 
320 static int chr_can_read(void *opaque)
321 {
322     return VHOST_USER_HDR_SIZE;
323 }
324 
325 static void chr_read(void *opaque, const uint8_t *buf, int size)
326 {
327     g_autoptr(GError) err = NULL;
328     TestServer *s = opaque;
329     CharBackend *chr = &s->chr;
330     VhostUserMsg msg;
331     uint8_t *p = (uint8_t *) &msg;
332     int fd = -1;
333 
334     if (s->test_fail) {
335         qemu_chr_fe_disconnect(chr);
336         /* now switch to non-failure */
337         s->test_fail = false;
338     }
339 
340     if (size != VHOST_USER_HDR_SIZE) {
341         qos_printf("%s: Wrong message size received %d\n", __func__, size);
342         return;
343     }
344 
345     g_mutex_lock(&s->data_mutex);
346     memcpy(p, buf, VHOST_USER_HDR_SIZE);
347 
348     if (msg.size) {
349         p += VHOST_USER_HDR_SIZE;
350         size = qemu_chr_fe_read_all(chr, p, msg.size);
351         if (size != msg.size) {
352             qos_printf("%s: Wrong message size received %d != %d\n",
353                        __func__, size, msg.size);
354             goto out;
355         }
356     }
357 
358     switch (msg.request) {
359     case VHOST_USER_GET_FEATURES:
360         /* Mandatory for tests to define get_features */
361         g_assert(s->vu_ops->get_features);
362 
363         /* send back features to qemu */
364         msg.flags |= VHOST_USER_REPLY_MASK;
365         msg.size = sizeof(m.payload.u64);
366 
367         if (s->test_flags >= TEST_FLAGS_BAD) {
368             msg.payload.u64 = 0;
369             s->test_flags = TEST_FLAGS_END;
370         } else {
371             msg.payload.u64 = s->vu_ops->get_features(s);
372         }
373 
374         qemu_chr_fe_write_all(chr, (uint8_t *) &msg,
375                               VHOST_USER_HDR_SIZE + msg.size);
376         break;
377 
378     case VHOST_USER_SET_FEATURES:
379         if (s->vu_ops->set_features) {
380             s->vu_ops->set_features(s, chr, &msg);
381         }
382         break;
383 
384     case VHOST_USER_SET_OWNER:
385         /*
386          * We don't need to do anything here, the remote is just
387          * letting us know it is in charge. Just log it.
388          */
389         qos_printf("set_owner: start of session\n");
390         break;
391 
392     case VHOST_USER_GET_PROTOCOL_FEATURES:
393         if (s->vu_ops->get_protocol_features) {
394             s->vu_ops->get_protocol_features(s, chr, &msg);
395         }
396         break;
397 
398     case VHOST_USER_GET_CONFIG:
399         /*
400          * Treat GET_CONFIG as a NOP and just reply and let the guest
401          * consider we have updated its memory. Tests currently don't
402          * require working configs.
403          */
404         msg.flags |= VHOST_USER_REPLY_MASK;
405         p = (uint8_t *) &msg;
406         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
407         break;
408 
409     case VHOST_USER_SET_PROTOCOL_FEATURES:
410         /*
411          * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
412          * the remote end to send this. There is no handshake reply so
413          * just log the details for debugging.
414          */
415         qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
416         break;
417 
418         /*
419          * A real vhost-user backend would actually set the size and
420          * address of the vrings but we can simply report them.
421          */
422     case VHOST_USER_SET_VRING_NUM:
423         qos_printf("set_vring_num: %d/%d\n",
424                    msg.payload.state.index, msg.payload.state.num);
425         break;
426     case VHOST_USER_SET_VRING_ADDR:
427         qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
428                    msg.payload.addr.avail_user_addr,
429                    msg.payload.addr.desc_user_addr,
430                    msg.payload.addr.used_user_addr);
431         break;
432 
433     case VHOST_USER_GET_VRING_BASE:
434         /* send back vring base to qemu */
435         msg.flags |= VHOST_USER_REPLY_MASK;
436         msg.size = sizeof(m.payload.state);
437         msg.payload.state.num = 0;
438         p = (uint8_t *) &msg;
439         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
440 
441         assert(msg.payload.state.index < s->queues * 2);
442         s->rings &= ~(0x1ULL << msg.payload.state.index);
443         g_cond_broadcast(&s->data_cond);
444         break;
445 
446     case VHOST_USER_SET_MEM_TABLE:
447         /* received the mem table */
448         memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
449         s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
450                                             G_N_ELEMENTS(s->fds));
451 
452         /* signal the test that it can continue */
453         g_cond_broadcast(&s->data_cond);
454         break;
455 
456     case VHOST_USER_SET_VRING_KICK:
457     case VHOST_USER_SET_VRING_CALL:
458         /* consume the fd */
459         qemu_chr_fe_get_msgfds(chr, &fd, 1);
460         /*
461          * This is a non-blocking eventfd.
462          * The receive function forces it to be blocking,
463          * so revert it back to non-blocking.
464          */
465         g_unix_set_fd_nonblocking(fd, true, &err);
466         g_assert_no_error(err);
467         break;
468 
469     case VHOST_USER_SET_LOG_BASE:
470         if (s->log_fd != -1) {
471             close(s->log_fd);
472             s->log_fd = -1;
473         }
474         qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
475         msg.flags |= VHOST_USER_REPLY_MASK;
476         msg.size = 0;
477         p = (uint8_t *) &msg;
478         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
479 
480         g_cond_broadcast(&s->data_cond);
481         break;
482 
483     case VHOST_USER_SET_VRING_BASE:
484         assert(msg.payload.state.index < s->queues * 2);
485         s->rings |= 0x1ULL << msg.payload.state.index;
486         g_cond_broadcast(&s->data_cond);
487         break;
488 
489     case VHOST_USER_GET_QUEUE_NUM:
490         msg.flags |= VHOST_USER_REPLY_MASK;
491         msg.size = sizeof(m.payload.u64);
492         msg.payload.u64 = s->queues;
493         p = (uint8_t *) &msg;
494         qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
495         break;
496 
497     case VHOST_USER_SET_VRING_ENABLE:
498         /*
499          * Another case we ignore as we don't need to respond. With a
500          * fully functioning vhost-user we would enable/disable the
501          * vring monitoring.
502          */
503         qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
504                    msg.payload.state.num ? "enabled" : "disabled");
505         break;
506 
507     default:
508         qos_printf("vhost-user: un-handled message: %d\n", msg.request);
509         break;
510     }
511 
512 out:
513     g_mutex_unlock(&s->data_mutex);
514 }
515 
516 static const char *init_hugepagefs(void)
517 {
518 #ifdef CONFIG_LINUX
519     static const char *hugepagefs;
520     const char *path = getenv("QTEST_HUGETLBFS_PATH");
521     struct statfs fs;
522     int ret;
523 
524     if (hugepagefs) {
525         return hugepagefs;
526     }
527     if (!path) {
528         return NULL;
529     }
530 
531     if (access(path, R_OK | W_OK | X_OK)) {
532         qos_printf("access on path (%s): %s", path, strerror(errno));
533         g_test_fail();
534         return NULL;
535     }
536 
537     do {
538         ret = statfs(path, &fs);
539     } while (ret != 0 && errno == EINTR);
540 
541     if (ret != 0) {
542         qos_printf("statfs on path (%s): %s", path, strerror(errno));
543         g_test_fail();
544         return NULL;
545     }
546 
547     if (fs.f_type != HUGETLBFS_MAGIC) {
548         qos_printf("Warning: path not on HugeTLBFS: %s", path);
549         g_test_fail();
550         return NULL;
551     }
552 
553     hugepagefs = path;
554     return hugepagefs;
555 #else
556     return NULL;
557 #endif
558 }
559 
560 static TestServer *test_server_new(const gchar *name,
561         struct vhost_user_ops *ops)
562 {
563     TestServer *server = g_new0(TestServer, 1);
564     g_autofree const char *tmpfs = NULL;
565     GError *err = NULL;
566 
567     server->context = g_main_context_new();
568     server->loop = g_main_loop_new(server->context, FALSE);
569 
570     /* run the main loop thread so the chardev may operate */
571     server->thread = g_thread_new(NULL, thread_function, server->loop);
572 
573     tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
574     if (!tmpfs) {
575         g_test_message("Can't create temporary directory in %s: %s",
576                        g_get_tmp_dir(), err->message);
577         g_error_free(err);
578     }
579     g_assert(tmpfs);
580 
581     server->tmpfs = g_strdup(tmpfs);
582     server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
583     server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
584     server->chr_name = g_strdup_printf("chr-%s", name);
585 
586     g_mutex_init(&server->data_mutex);
587     g_cond_init(&server->data_cond);
588 
589     server->log_fd = -1;
590     server->queues = 1;
591     server->vu_ops = ops;
592 
593     return server;
594 }
595 
596 static void chr_event(void *opaque, QEMUChrEvent event)
597 {
598     TestServer *s = opaque;
599 
600     if (s->test_flags == TEST_FLAGS_END &&
601         event == CHR_EVENT_CLOSED) {
602         s->test_flags = TEST_FLAGS_OK;
603     }
604 }
605 
606 static void test_server_create_chr(TestServer *server, const gchar *opt)
607 {
608     g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
609                                                  server->socket_path, opt);
610     Chardev *chr;
611 
612     chr = qemu_chr_new(server->chr_name, chr_path, server->context);
613     g_assert(chr);
614 
615     qemu_chr_fe_init(&server->chr, chr, &error_abort);
616     qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
617                              chr_event, NULL, server, server->context, true);
618 }
619 
620 static void test_server_listen(TestServer *server)
621 {
622     test_server_create_chr(server, ",server=on,wait=off");
623 }
624 
625 static void test_server_free(TestServer *server)
626 {
627     int i, ret;
628 
629     /* finish the helper thread and dispatch pending sources */
630     g_main_loop_quit(server->loop);
631     g_thread_join(server->thread);
632     while (g_main_context_pending(NULL)) {
633         g_main_context_iteration(NULL, TRUE);
634     }
635 
636     unlink(server->socket_path);
637     g_free(server->socket_path);
638 
639     unlink(server->mig_path);
640     g_free(server->mig_path);
641 
642     ret = rmdir(server->tmpfs);
643     if (ret != 0) {
644         g_test_message("unable to rmdir: path (%s): %s",
645                        server->tmpfs, strerror(errno));
646     }
647     g_free(server->tmpfs);
648 
649     qemu_chr_fe_deinit(&server->chr, true);
650 
651     for (i = 0; i < server->fds_num; i++) {
652         close(server->fds[i]);
653     }
654 
655     if (server->log_fd != -1) {
656         close(server->log_fd);
657     }
658 
659     g_free(server->chr_name);
660 
661     g_main_loop_unref(server->loop);
662     g_main_context_unref(server->context);
663     g_cond_clear(&server->data_cond);
664     g_mutex_clear(&server->data_mutex);
665     g_free(server);
666 }
667 
668 static void wait_for_log_fd(TestServer *s)
669 {
670     gint64 end_time;
671 
672     g_mutex_lock(&s->data_mutex);
673     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
674     while (s->log_fd == -1) {
675         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
676             /* timeout has passed */
677             g_assert(s->log_fd != -1);
678             break;
679         }
680     }
681 
682     g_mutex_unlock(&s->data_mutex);
683 }
684 
685 static void write_guest_mem(TestServer *s, uint32_t seed)
686 {
687     uint32_t *guest_mem;
688     int i, j;
689     size_t size;
690 
691     /* iterate all regions */
692     for (i = 0; i < s->fds_num; i++) {
693 
694         /* We'll write only the region statring at 0x0 */
695         if (s->memory.regions[i].guest_phys_addr != 0x0) {
696             continue;
697         }
698 
699         g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
700 
701         size = s->memory.regions[i].memory_size +
702             s->memory.regions[i].mmap_offset;
703 
704         guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
705                          MAP_SHARED, s->fds[i], 0);
706 
707         g_assert(guest_mem != MAP_FAILED);
708         guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
709 
710         for (j = 0; j < 256; j++) {
711             guest_mem[j] = seed + j;
712         }
713 
714         munmap(guest_mem, s->memory.regions[i].memory_size);
715         break;
716     }
717 }
718 
719 static guint64 get_log_size(TestServer *s)
720 {
721     guint64 log_size = 0;
722     int i;
723 
724     for (i = 0; i < s->memory.nregions; ++i) {
725         VhostUserMemoryRegion *reg = &s->memory.regions[i];
726         guint64 last = range_get_last(reg->guest_phys_addr,
727                                        reg->memory_size);
728         log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
729     }
730 
731     return log_size;
732 }
733 
734 typedef struct TestMigrateSource {
735     GSource source;
736     TestServer *src;
737     TestServer *dest;
738 } TestMigrateSource;
739 
740 static gboolean
741 test_migrate_source_check(GSource *source)
742 {
743     TestMigrateSource *t = (TestMigrateSource *)source;
744     gboolean overlap = t->src->rings && t->dest->rings;
745 
746     g_assert(!overlap);
747 
748     return FALSE;
749 }
750 
751 GSourceFuncs test_migrate_source_funcs = {
752     .check = test_migrate_source_check,
753 };
754 
755 static void vhost_user_test_cleanup(void *s)
756 {
757     TestServer *server = s;
758 
759     qos_invalidate_command_line();
760     test_server_free(server);
761 }
762 
763 static void *vhost_user_test_setup(GString *cmd_line, void *arg)
764 {
765     TestServer *server = test_server_new("vhost-user-test", arg);
766     test_server_listen(server);
767 
768     append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
769     server->vu_ops->append_opts(server, cmd_line, "");
770 
771     g_test_queue_destroy(vhost_user_test_cleanup, server);
772 
773     return server;
774 }
775 
776 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
777 {
778     TestServer *server = test_server_new("vhost-user-test", arg);
779     test_server_listen(server);
780 
781     append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
782     server->vu_ops->append_opts(server, cmd_line, "");
783 
784     g_test_queue_destroy(vhost_user_test_cleanup, server);
785 
786     return server;
787 }
788 
789 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
790 {
791     TestServer *server = arg;
792 
793     if (!wait_for_fds(server)) {
794         return;
795     }
796 
797     read_guest_mem_server(global_qtest, server);
798 }
799 
800 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
801 {
802     TestServer *s = arg;
803     TestServer *dest;
804     GString *dest_cmdline;
805     char *uri;
806     QTestState *to;
807     GSource *source;
808     QDict *rsp;
809     guint8 *log;
810     guint64 size;
811 
812     if (!wait_for_fds(s)) {
813         return;
814     }
815 
816     dest = test_server_new("dest", s->vu_ops);
817     dest_cmdline = g_string_new(qos_get_current_command_line());
818     uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
819 
820     size = get_log_size(s);
821     g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
822 
823     test_server_listen(dest);
824     g_string_append_printf(dest_cmdline, " -incoming %s", uri);
825     append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
826     dest->vu_ops->append_opts(dest, dest_cmdline, "");
827     to = qtest_init(dest_cmdline->str);
828 
829     /* This would be where you call qos_allocate_objects(to, NULL), if you want
830      * to talk to the QVirtioNet object on the destination.
831      */
832 
833     source = g_source_new(&test_migrate_source_funcs,
834                           sizeof(TestMigrateSource));
835     ((TestMigrateSource *)source)->src = s;
836     ((TestMigrateSource *)source)->dest = dest;
837     g_source_attach(source, s->context);
838 
839     /* slow down migration to have time to fiddle with log */
840     /* TODO: qtest could learn to break on some places */
841     rsp = qmp("{ 'execute': 'migrate-set-parameters',"
842               "'arguments': { 'max-bandwidth': 10 } }");
843     g_assert(qdict_haskey(rsp, "return"));
844     qobject_unref(rsp);
845 
846     rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
847     g_assert(qdict_haskey(rsp, "return"));
848     qobject_unref(rsp);
849 
850     wait_for_log_fd(s);
851 
852     log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
853     g_assert(log != MAP_FAILED);
854 
855     /* modify first page */
856     write_guest_mem(s, 0x42);
857     log[0] = 1;
858     munmap(log, size);
859 
860     /* speed things up */
861     rsp = qmp("{ 'execute': 'migrate-set-parameters',"
862               "'arguments': { 'max-bandwidth': 0 } }");
863     g_assert(qdict_haskey(rsp, "return"));
864     qobject_unref(rsp);
865 
866     qmp_eventwait("STOP");
867     qtest_qmp_eventwait(to, "RESUME");
868 
869     g_assert(wait_for_fds(dest));
870     read_guest_mem_server(to, dest);
871 
872     g_source_destroy(source);
873     g_source_unref(source);
874 
875     qtest_quit(to);
876     test_server_free(dest);
877     g_free(uri);
878     g_string_free(dest_cmdline, true);
879 }
880 
881 static void wait_for_rings_started(TestServer *s, size_t count)
882 {
883     gint64 end_time;
884 
885     g_mutex_lock(&s->data_mutex);
886     end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
887     while (ctpop64(s->rings) != count) {
888         if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
889             /* timeout has passed */
890             g_assert_cmpint(ctpop64(s->rings), ==, count);
891             break;
892         }
893     }
894 
895     g_mutex_unlock(&s->data_mutex);
896 }
897 
898 static inline void test_server_connect(TestServer *server)
899 {
900     test_server_create_chr(server, ",reconnect=1");
901 }
902 
903 static gboolean
904 reconnect_cb(gpointer user_data)
905 {
906     TestServer *s = user_data;
907 
908     qemu_chr_fe_disconnect(&s->chr);
909 
910     return FALSE;
911 }
912 
913 static gpointer
914 connect_thread(gpointer data)
915 {
916     TestServer *s = data;
917 
918     /* wait for qemu to start before first try, to avoid extra warnings */
919     g_usleep(G_USEC_PER_SEC);
920     test_server_connect(s);
921 
922     return NULL;
923 }
924 
925 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
926 {
927     TestServer *s = test_server_new("reconnect", arg);
928 
929     g_thread_new("connect", connect_thread, s);
930     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
931     s->vu_ops->append_opts(s, cmd_line, ",server=on");
932 
933     g_test_queue_destroy(vhost_user_test_cleanup, s);
934 
935     return s;
936 }
937 
938 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
939 {
940     TestServer *s = arg;
941     GSource *src;
942 
943     if (!wait_for_fds(s)) {
944         return;
945     }
946 
947     wait_for_rings_started(s, 2);
948 
949     /* reconnect */
950     s->fds_num = 0;
951     s->rings = 0;
952     src = g_idle_source_new();
953     g_source_set_callback(src, reconnect_cb, s, NULL);
954     g_source_attach(src, s->context);
955     g_source_unref(src);
956     g_assert(wait_for_fds(s));
957     wait_for_rings_started(s, 2);
958 }
959 
960 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
961 {
962     TestServer *s = test_server_new("connect-fail", arg);
963 
964     s->test_fail = true;
965 
966     g_thread_new("connect", connect_thread, s);
967     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
968     s->vu_ops->append_opts(s, cmd_line, ",server=on");
969 
970     g_test_queue_destroy(vhost_user_test_cleanup, s);
971 
972     return s;
973 }
974 
975 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
976 {
977     TestServer *s = test_server_new("flags-mismatch", arg);
978 
979     s->test_flags = TEST_FLAGS_DISCONNECT;
980 
981     g_thread_new("connect", connect_thread, s);
982     append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
983     s->vu_ops->append_opts(s, cmd_line, ",server=on");
984 
985     g_test_queue_destroy(vhost_user_test_cleanup, s);
986 
987     return s;
988 }
989 
990 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
991 {
992     TestServer *s = arg;
993 
994     if (!wait_for_fds(s)) {
995         return;
996     }
997     wait_for_rings_started(s, 2);
998 }
999 
1000 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
1001 {
1002     TestServer *s = vhost_user_test_setup(cmd_line, arg);
1003 
1004     s->queues = 2;
1005     g_string_append_printf(cmd_line,
1006                            " -set netdev.hs0.queues=%d"
1007                            " -global virtio-net-pci.vectors=%d",
1008                            s->queues, s->queues * 2 + 2);
1009 
1010     return s;
1011 }
1012 
1013 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
1014 {
1015     TestServer *s = arg;
1016 
1017     wait_for_rings_started(s, s->queues * 2);
1018 }
1019 
1020 
1021 static uint64_t vu_net_get_features(TestServer *s)
1022 {
1023     uint64_t features = 0x1ULL << VHOST_F_LOG_ALL |
1024         0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1025 
1026     if (s->queues > 1) {
1027         features |= 0x1ULL << VIRTIO_NET_F_MQ;
1028     }
1029 
1030     return features;
1031 }
1032 
1033 static void vu_net_set_features(TestServer *s, CharBackend *chr,
1034                                 VhostUserMsg *msg)
1035 {
1036     g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
1037     if (s->test_flags == TEST_FLAGS_DISCONNECT) {
1038         qemu_chr_fe_disconnect(chr);
1039         s->test_flags = TEST_FLAGS_BAD;
1040     }
1041 }
1042 
1043 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
1044         VhostUserMsg *msg)
1045 {
1046     /* send back features to qemu */
1047     msg->flags |= VHOST_USER_REPLY_MASK;
1048     msg->size = sizeof(m.payload.u64);
1049     msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
1050     msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
1051     if (s->queues > 1) {
1052         msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
1053     }
1054     qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1055 }
1056 
1057 /* Each VHOST-USER device should have its ops structure defined. */
1058 static struct vhost_user_ops g_vu_net_ops = {
1059     .type = VHOST_USER_NET,
1060 
1061     .append_opts = append_vhost_net_opts,
1062 
1063     .get_features = vu_net_get_features,
1064     .set_features = vu_net_set_features,
1065     .get_protocol_features = vu_net_get_protocol_features,
1066 };
1067 
1068 static void register_vhost_user_test(void)
1069 {
1070     QOSGraphTestOptions opts = {
1071         .before = vhost_user_test_setup,
1072         .subprocess = true,
1073         .arg = &g_vu_net_ops,
1074     };
1075 
1076     qemu_add_opts(&qemu_chardev_opts);
1077 
1078     qos_add_test("vhost-user/read-guest-mem/memfile",
1079                  "virtio-net",
1080                  test_read_guest_mem, &opts);
1081 
1082     if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
1083         opts.before = vhost_user_test_setup_memfd;
1084         qos_add_test("vhost-user/read-guest-mem/memfd",
1085                      "virtio-net",
1086                      test_read_guest_mem, &opts);
1087     }
1088 
1089     qos_add_test("vhost-user/migrate",
1090                  "virtio-net",
1091                  test_migrate, &opts);
1092 
1093     opts.before = vhost_user_test_setup_reconnect;
1094     qos_add_test("vhost-user/reconnect", "virtio-net",
1095                  test_reconnect, &opts);
1096 
1097     opts.before = vhost_user_test_setup_connect_fail;
1098     qos_add_test("vhost-user/connect-fail", "virtio-net",
1099                  test_vhost_user_started, &opts);
1100 
1101     opts.before = vhost_user_test_setup_flags_mismatch;
1102     qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1103                  test_vhost_user_started, &opts);
1104 
1105     opts.before = vhost_user_test_setup_multiqueue;
1106     opts.edge.extra_device_opts = "mq=on";
1107     qos_add_test("vhost-user/multiqueue",
1108                  "virtio-net",
1109                  test_multiqueue, &opts);
1110 }
1111 libqos_init(register_vhost_user_test);
1112 
1113 static uint64_t vu_gpio_get_features(TestServer *s)
1114 {
1115     return 0x1ULL << VIRTIO_F_VERSION_1 |
1116         0x1ULL << VIRTIO_GPIO_F_IRQ |
1117         0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1118 }
1119 
1120 /*
1121  * This stub can't handle all the message types but we should reply
1122  * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
1123  * talking to a read vhost-user daemon.
1124  */
1125 static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr,
1126                                           VhostUserMsg *msg)
1127 {
1128     /* send back features to qemu */
1129     msg->flags |= VHOST_USER_REPLY_MASK;
1130     msg->size = sizeof(m.payload.u64);
1131     msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1132 
1133     qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1134 }
1135 
1136 static struct vhost_user_ops g_vu_gpio_ops = {
1137     .type = VHOST_USER_GPIO,
1138 
1139     .append_opts = append_vhost_gpio_opts,
1140 
1141     .get_features = vu_gpio_get_features,
1142     .set_features = vu_net_set_features,
1143     .get_protocol_features = vu_gpio_get_protocol_features,
1144 };
1145 
1146 static void register_vhost_gpio_test(void)
1147 {
1148     QOSGraphTestOptions opts = {
1149         .before = vhost_user_test_setup,
1150         .subprocess = true,
1151         .arg = &g_vu_gpio_ops,
1152     };
1153 
1154     qemu_add_opts(&qemu_chardev_opts);
1155 
1156     qos_add_test("read-guest-mem/memfile",
1157                  "vhost-user-gpio", test_read_guest_mem, &opts);
1158 }
1159 libqos_init(register_vhost_gpio_test);
1160