1 /*
2 * QTest testcase for the vhost-user
3 *
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11 #include "qemu/osdep.h"
12
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qapi/qmp/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "sysemu/sysemu.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
27
28 #include "libqos/malloc-pc.h"
29 #include "libqos/qgraph_internal.h"
30 #include "hw/virtio/virtio-net.h"
31
32 #include "standard-headers/linux/vhost_types.h"
33 #include "standard-headers/linux/virtio_ids.h"
34 #include "standard-headers/linux/virtio_net.h"
35 #include "standard-headers/linux/virtio_gpio.h"
36 #include "standard-headers/linux/virtio_scmi.h"
37
38 #ifdef CONFIG_LINUX
39 #include <sys/vfs.h>
40 #endif
41
42
43 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
44 "mem-path=%s,share=on -numa node,memdev=mem"
45 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
46 " -numa node,memdev=mem"
47 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s"
48 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
49
50 #define HUGETLBFS_MAGIC 0x958458f6
51
52 /*********** FROM hw/virtio/vhost-user.c *************************************/
53
54 #define VHOST_MEMORY_MAX_NREGIONS 8
55 #define VHOST_MAX_VIRTQUEUES 0x100
56
57 #define VHOST_USER_F_PROTOCOL_FEATURES 30
58 #define VIRTIO_F_VERSION_1 32
59
60 #define VHOST_USER_PROTOCOL_F_MQ 0
61 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
62 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
63 #define VHOST_USER_PROTOCOL_F_CONFIG 9
64
65 #define VHOST_LOG_PAGE 0x1000
66
67 typedef enum VhostUserRequest {
68 VHOST_USER_NONE = 0,
69 VHOST_USER_GET_FEATURES = 1,
70 VHOST_USER_SET_FEATURES = 2,
71 VHOST_USER_SET_OWNER = 3,
72 VHOST_USER_RESET_OWNER = 4,
73 VHOST_USER_SET_MEM_TABLE = 5,
74 VHOST_USER_SET_LOG_BASE = 6,
75 VHOST_USER_SET_LOG_FD = 7,
76 VHOST_USER_SET_VRING_NUM = 8,
77 VHOST_USER_SET_VRING_ADDR = 9,
78 VHOST_USER_SET_VRING_BASE = 10,
79 VHOST_USER_GET_VRING_BASE = 11,
80 VHOST_USER_SET_VRING_KICK = 12,
81 VHOST_USER_SET_VRING_CALL = 13,
82 VHOST_USER_SET_VRING_ERR = 14,
83 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
84 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
85 VHOST_USER_GET_QUEUE_NUM = 17,
86 VHOST_USER_SET_VRING_ENABLE = 18,
87 VHOST_USER_GET_CONFIG = 24,
88 VHOST_USER_SET_CONFIG = 25,
89 VHOST_USER_MAX
90 } VhostUserRequest;
91
92 typedef struct VhostUserMemoryRegion {
93 uint64_t guest_phys_addr;
94 uint64_t memory_size;
95 uint64_t userspace_addr;
96 uint64_t mmap_offset;
97 } VhostUserMemoryRegion;
98
99 typedef struct VhostUserMemory {
100 uint32_t nregions;
101 uint32_t padding;
102 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
103 } VhostUserMemory;
104
105 typedef struct VhostUserLog {
106 uint64_t mmap_size;
107 uint64_t mmap_offset;
108 } VhostUserLog;
109
110 typedef struct VhostUserMsg {
111 VhostUserRequest request;
112
113 #define VHOST_USER_VERSION_MASK (0x3)
114 #define VHOST_USER_REPLY_MASK (0x1<<2)
115 uint32_t flags;
116 uint32_t size; /* the following payload size */
117 union {
118 #define VHOST_USER_VRING_IDX_MASK (0xff)
119 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
120 uint64_t u64;
121 struct vhost_vring_state state;
122 struct vhost_vring_addr addr;
123 VhostUserMemory memory;
124 VhostUserLog log;
125 } payload;
126 } QEMU_PACKED VhostUserMsg;
127
128 static VhostUserMsg m __attribute__ ((unused));
129 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
130 + sizeof(m.flags) \
131 + sizeof(m.size))
132
133 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
134
135 /* The version of the protocol we support */
136 #define VHOST_USER_VERSION (0x1)
137 /*****************************************************************************/
138
139 enum {
140 TEST_FLAGS_OK,
141 TEST_FLAGS_DISCONNECT,
142 TEST_FLAGS_BAD,
143 TEST_FLAGS_END,
144 };
145
146 enum {
147 VHOST_USER_NET,
148 VHOST_USER_GPIO,
149 VHOST_USER_SCMI,
150 };
151
152 typedef struct TestServer {
153 gchar *socket_path;
154 gchar *mig_path;
155 gchar *chr_name;
156 gchar *tmpfs;
157 CharBackend chr;
158 int fds_num;
159 int fds[VHOST_MEMORY_MAX_NREGIONS];
160 VhostUserMemory memory;
161 GMainContext *context;
162 GMainLoop *loop;
163 GThread *thread;
164 GMutex data_mutex;
165 GCond data_cond;
166 int log_fd;
167 uint64_t rings;
168 bool test_fail;
169 int test_flags;
170 int queues;
171 struct vhost_user_ops *vu_ops;
172 } TestServer;
173
174 struct vhost_user_ops {
175 /* Device types. */
176 int type;
177 void (*append_opts)(TestServer *s, GString *cmd_line,
178 const char *chr_opts);
179
180 /* VHOST-USER commands. */
181 uint64_t (*get_features)(TestServer *s);
182 void (*set_features)(TestServer *s, CharBackend *chr,
183 VhostUserMsg *msg);
184 void (*get_protocol_features)(TestServer *s,
185 CharBackend *chr, VhostUserMsg *msg);
186 };
187
188 static const char *init_hugepagefs(void);
189 static TestServer *test_server_new(const gchar *name,
190 struct vhost_user_ops *ops);
191 static void test_server_free(TestServer *server);
192 static void test_server_listen(TestServer *server);
193
194 enum test_memfd {
195 TEST_MEMFD_AUTO,
196 TEST_MEMFD_YES,
197 TEST_MEMFD_NO,
198 };
199
append_vhost_net_opts(TestServer * s,GString * cmd_line,const char * chr_opts)200 static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
201 const char *chr_opts)
202 {
203 g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
204 s->chr_name, s->socket_path,
205 chr_opts, s->chr_name);
206 }
207
208 /*
209 * For GPIO there are no other magic devices we need to add (like
210 * block or netdev) so all we need to worry about is the vhost-user
211 * chardev socket.
212 */
append_vhost_gpio_opts(TestServer * s,GString * cmd_line,const char * chr_opts)213 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line,
214 const char *chr_opts)
215 {
216 g_string_append_printf(cmd_line, QEMU_CMD_CHR,
217 s->chr_name, s->socket_path,
218 chr_opts);
219 }
220
append_mem_opts(TestServer * server,GString * cmd_line,int size,enum test_memfd memfd)221 static void append_mem_opts(TestServer *server, GString *cmd_line,
222 int size, enum test_memfd memfd)
223 {
224 if (memfd == TEST_MEMFD_AUTO) {
225 memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
226 : TEST_MEMFD_NO;
227 }
228
229 if (memfd == TEST_MEMFD_YES) {
230 g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
231 } else {
232 const char *root = init_hugepagefs() ? : server->tmpfs;
233
234 g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
235 }
236 }
237
wait_for_fds(TestServer * s)238 static bool wait_for_fds(TestServer *s)
239 {
240 gint64 end_time;
241 bool got_region;
242 int i;
243
244 g_mutex_lock(&s->data_mutex);
245
246 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
247 while (!s->fds_num) {
248 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
249 /* timeout has passed */
250 g_assert(s->fds_num);
251 break;
252 }
253 }
254
255 /* check for sanity */
256 g_assert_cmpint(s->fds_num, >, 0);
257 g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
258
259 g_mutex_unlock(&s->data_mutex);
260
261 got_region = false;
262 for (i = 0; i < s->memory.nregions; ++i) {
263 VhostUserMemoryRegion *reg = &s->memory.regions[i];
264 if (reg->guest_phys_addr == 0) {
265 got_region = true;
266 break;
267 }
268 }
269 if (!got_region) {
270 g_test_skip("No memory at address 0x0");
271 }
272 return got_region;
273 }
274
read_guest_mem_server(QTestState * qts,TestServer * s)275 static void read_guest_mem_server(QTestState *qts, TestServer *s)
276 {
277 uint8_t *guest_mem;
278 int i, j;
279 size_t size;
280
281 g_mutex_lock(&s->data_mutex);
282
283 /* iterate all regions */
284 for (i = 0; i < s->fds_num; i++) {
285
286 /* We'll check only the region starting at 0x0 */
287 if (s->memory.regions[i].guest_phys_addr != 0x0) {
288 continue;
289 }
290
291 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
292
293 size = s->memory.regions[i].memory_size +
294 s->memory.regions[i].mmap_offset;
295
296 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
297 MAP_SHARED, s->fds[i], 0);
298
299 g_assert(guest_mem != MAP_FAILED);
300 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
301
302 for (j = 0; j < 1024; j++) {
303 uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
304 uint32_t b = guest_mem[j];
305
306 g_assert_cmpint(a, ==, b);
307 }
308
309 munmap(guest_mem, s->memory.regions[i].memory_size);
310 }
311
312 g_mutex_unlock(&s->data_mutex);
313 }
314
thread_function(void * data)315 static void *thread_function(void *data)
316 {
317 GMainLoop *loop = data;
318 g_main_loop_run(loop);
319 return NULL;
320 }
321
chr_can_read(void * opaque)322 static int chr_can_read(void *opaque)
323 {
324 return VHOST_USER_HDR_SIZE;
325 }
326
chr_read(void * opaque,const uint8_t * buf,int size)327 static void chr_read(void *opaque, const uint8_t *buf, int size)
328 {
329 g_autoptr(GError) err = NULL;
330 TestServer *s = opaque;
331 CharBackend *chr = &s->chr;
332 VhostUserMsg msg;
333 uint8_t *p = (uint8_t *) &msg;
334 int fd = -1;
335
336 if (s->test_fail) {
337 qemu_chr_fe_disconnect(chr);
338 /* now switch to non-failure */
339 s->test_fail = false;
340 }
341
342 if (size != VHOST_USER_HDR_SIZE) {
343 qos_printf("%s: Wrong message size received %d\n", __func__, size);
344 return;
345 }
346
347 g_mutex_lock(&s->data_mutex);
348 memcpy(p, buf, VHOST_USER_HDR_SIZE);
349
350 if (msg.size) {
351 p += VHOST_USER_HDR_SIZE;
352 size = qemu_chr_fe_read_all(chr, p, msg.size);
353 if (size != msg.size) {
354 qos_printf("%s: Wrong message size received %d != %d\n",
355 __func__, size, msg.size);
356 goto out;
357 }
358 }
359
360 switch (msg.request) {
361 case VHOST_USER_GET_FEATURES:
362 /* Mandatory for tests to define get_features */
363 g_assert(s->vu_ops->get_features);
364
365 /* send back features to qemu */
366 msg.flags |= VHOST_USER_REPLY_MASK;
367 msg.size = sizeof(m.payload.u64);
368
369 if (s->test_flags >= TEST_FLAGS_BAD) {
370 msg.payload.u64 = 0;
371 s->test_flags = TEST_FLAGS_END;
372 } else {
373 msg.payload.u64 = s->vu_ops->get_features(s);
374 }
375
376 qemu_chr_fe_write_all(chr, (uint8_t *) &msg,
377 VHOST_USER_HDR_SIZE + msg.size);
378 break;
379
380 case VHOST_USER_SET_FEATURES:
381 if (s->vu_ops->set_features) {
382 s->vu_ops->set_features(s, chr, &msg);
383 }
384 break;
385
386 case VHOST_USER_SET_OWNER:
387 /*
388 * We don't need to do anything here, the remote is just
389 * letting us know it is in charge. Just log it.
390 */
391 qos_printf("set_owner: start of session\n");
392 break;
393
394 case VHOST_USER_GET_PROTOCOL_FEATURES:
395 if (s->vu_ops->get_protocol_features) {
396 s->vu_ops->get_protocol_features(s, chr, &msg);
397 }
398 break;
399
400 case VHOST_USER_GET_CONFIG:
401 /*
402 * Treat GET_CONFIG as a NOP and just reply and let the guest
403 * consider we have updated its memory. Tests currently don't
404 * require working configs.
405 */
406 msg.flags |= VHOST_USER_REPLY_MASK;
407 p = (uint8_t *) &msg;
408 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
409 break;
410
411 case VHOST_USER_SET_PROTOCOL_FEATURES:
412 /*
413 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
414 * the remote end to send this. There is no handshake reply so
415 * just log the details for debugging.
416 */
417 qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
418 break;
419
420 /*
421 * A real vhost-user backend would actually set the size and
422 * address of the vrings but we can simply report them.
423 */
424 case VHOST_USER_SET_VRING_NUM:
425 qos_printf("set_vring_num: %d/%d\n",
426 msg.payload.state.index, msg.payload.state.num);
427 break;
428 case VHOST_USER_SET_VRING_ADDR:
429 qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
430 msg.payload.addr.avail_user_addr,
431 msg.payload.addr.desc_user_addr,
432 msg.payload.addr.used_user_addr);
433 break;
434
435 case VHOST_USER_GET_VRING_BASE:
436 /* send back vring base to qemu */
437 msg.flags |= VHOST_USER_REPLY_MASK;
438 msg.size = sizeof(m.payload.state);
439 msg.payload.state.num = 0;
440 p = (uint8_t *) &msg;
441 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
442
443 assert(msg.payload.state.index < s->queues * 2);
444 s->rings &= ~(0x1ULL << msg.payload.state.index);
445 g_cond_broadcast(&s->data_cond);
446 break;
447
448 case VHOST_USER_SET_MEM_TABLE:
449 /* received the mem table */
450 memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
451 s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
452 G_N_ELEMENTS(s->fds));
453
454 /* signal the test that it can continue */
455 g_cond_broadcast(&s->data_cond);
456 break;
457
458 case VHOST_USER_SET_VRING_KICK:
459 case VHOST_USER_SET_VRING_CALL:
460 /* consume the fd */
461 qemu_chr_fe_get_msgfds(chr, &fd, 1);
462 /*
463 * This is a non-blocking eventfd.
464 * The receive function forces it to be blocking,
465 * so revert it back to non-blocking.
466 */
467 g_unix_set_fd_nonblocking(fd, true, &err);
468 g_assert_no_error(err);
469 break;
470
471 case VHOST_USER_SET_LOG_BASE:
472 if (s->log_fd != -1) {
473 close(s->log_fd);
474 s->log_fd = -1;
475 }
476 qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
477 msg.flags |= VHOST_USER_REPLY_MASK;
478 msg.size = 0;
479 p = (uint8_t *) &msg;
480 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
481
482 g_cond_broadcast(&s->data_cond);
483 break;
484
485 case VHOST_USER_SET_VRING_BASE:
486 assert(msg.payload.state.index < s->queues * 2);
487 s->rings |= 0x1ULL << msg.payload.state.index;
488 g_cond_broadcast(&s->data_cond);
489 break;
490
491 case VHOST_USER_GET_QUEUE_NUM:
492 msg.flags |= VHOST_USER_REPLY_MASK;
493 msg.size = sizeof(m.payload.u64);
494 msg.payload.u64 = s->queues;
495 p = (uint8_t *) &msg;
496 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
497 break;
498
499 case VHOST_USER_SET_VRING_ENABLE:
500 /*
501 * Another case we ignore as we don't need to respond. With a
502 * fully functioning vhost-user we would enable/disable the
503 * vring monitoring.
504 */
505 qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
506 msg.payload.state.num ? "enabled" : "disabled");
507 break;
508
509 default:
510 qos_printf("vhost-user: un-handled message: %d\n", msg.request);
511 break;
512 }
513
514 out:
515 g_mutex_unlock(&s->data_mutex);
516 }
517
init_hugepagefs(void)518 static const char *init_hugepagefs(void)
519 {
520 #ifdef CONFIG_LINUX
521 static const char *hugepagefs;
522 const char *path = getenv("QTEST_HUGETLBFS_PATH");
523 struct statfs fs;
524 int ret;
525
526 if (hugepagefs) {
527 return hugepagefs;
528 }
529 if (!path) {
530 return NULL;
531 }
532
533 if (access(path, R_OK | W_OK | X_OK)) {
534 qos_printf("access on path (%s): %s", path, strerror(errno));
535 g_test_fail();
536 return NULL;
537 }
538
539 do {
540 ret = statfs(path, &fs);
541 } while (ret != 0 && errno == EINTR);
542
543 if (ret != 0) {
544 qos_printf("statfs on path (%s): %s", path, strerror(errno));
545 g_test_fail();
546 return NULL;
547 }
548
549 if (fs.f_type != HUGETLBFS_MAGIC) {
550 qos_printf("Warning: path not on HugeTLBFS: %s", path);
551 g_test_fail();
552 return NULL;
553 }
554
555 hugepagefs = path;
556 return hugepagefs;
557 #else
558 return NULL;
559 #endif
560 }
561
test_server_new(const gchar * name,struct vhost_user_ops * ops)562 static TestServer *test_server_new(const gchar *name,
563 struct vhost_user_ops *ops)
564 {
565 TestServer *server = g_new0(TestServer, 1);
566 g_autofree const char *tmpfs = NULL;
567 GError *err = NULL;
568
569 server->context = g_main_context_new();
570 server->loop = g_main_loop_new(server->context, FALSE);
571
572 /* run the main loop thread so the chardev may operate */
573 server->thread = g_thread_new(NULL, thread_function, server->loop);
574
575 tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
576 if (!tmpfs) {
577 g_test_message("Can't create temporary directory in %s: %s",
578 g_get_tmp_dir(), err->message);
579 g_error_free(err);
580 }
581 g_assert(tmpfs);
582
583 server->tmpfs = g_strdup(tmpfs);
584 server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
585 server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
586 server->chr_name = g_strdup_printf("chr-%s", name);
587
588 g_mutex_init(&server->data_mutex);
589 g_cond_init(&server->data_cond);
590
591 server->log_fd = -1;
592 server->queues = 1;
593 server->vu_ops = ops;
594
595 return server;
596 }
597
chr_event(void * opaque,QEMUChrEvent event)598 static void chr_event(void *opaque, QEMUChrEvent event)
599 {
600 TestServer *s = opaque;
601
602 if (s->test_flags == TEST_FLAGS_END &&
603 event == CHR_EVENT_CLOSED) {
604 s->test_flags = TEST_FLAGS_OK;
605 }
606 }
607
test_server_create_chr(TestServer * server,const gchar * opt)608 static void test_server_create_chr(TestServer *server, const gchar *opt)
609 {
610 g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
611 server->socket_path, opt);
612 Chardev *chr;
613
614 chr = qemu_chr_new(server->chr_name, chr_path, server->context);
615 g_assert(chr);
616
617 qemu_chr_fe_init(&server->chr, chr, &error_abort);
618 qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
619 chr_event, NULL, server, server->context, true);
620 }
621
test_server_listen(TestServer * server)622 static void test_server_listen(TestServer *server)
623 {
624 test_server_create_chr(server, ",server=on,wait=off");
625 }
626
test_server_free(TestServer * server)627 static void test_server_free(TestServer *server)
628 {
629 int i, ret;
630
631 /* finish the helper thread and dispatch pending sources */
632 g_main_loop_quit(server->loop);
633 g_thread_join(server->thread);
634 while (g_main_context_pending(NULL)) {
635 g_main_context_iteration(NULL, TRUE);
636 }
637
638 unlink(server->socket_path);
639 g_free(server->socket_path);
640
641 unlink(server->mig_path);
642 g_free(server->mig_path);
643
644 ret = rmdir(server->tmpfs);
645 if (ret != 0) {
646 g_test_message("unable to rmdir: path (%s): %s",
647 server->tmpfs, strerror(errno));
648 }
649 g_free(server->tmpfs);
650
651 qemu_chr_fe_deinit(&server->chr, true);
652
653 for (i = 0; i < server->fds_num; i++) {
654 close(server->fds[i]);
655 }
656
657 if (server->log_fd != -1) {
658 close(server->log_fd);
659 }
660
661 g_free(server->chr_name);
662
663 g_main_loop_unref(server->loop);
664 g_main_context_unref(server->context);
665 g_cond_clear(&server->data_cond);
666 g_mutex_clear(&server->data_mutex);
667 g_free(server);
668 }
669
wait_for_log_fd(TestServer * s)670 static void wait_for_log_fd(TestServer *s)
671 {
672 gint64 end_time;
673
674 g_mutex_lock(&s->data_mutex);
675 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
676 while (s->log_fd == -1) {
677 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
678 /* timeout has passed */
679 g_assert(s->log_fd != -1);
680 break;
681 }
682 }
683
684 g_mutex_unlock(&s->data_mutex);
685 }
686
write_guest_mem(TestServer * s,uint32_t seed)687 static void write_guest_mem(TestServer *s, uint32_t seed)
688 {
689 uint32_t *guest_mem;
690 int i, j;
691 size_t size;
692
693 /* iterate all regions */
694 for (i = 0; i < s->fds_num; i++) {
695
696 /* We'll write only the region statring at 0x0 */
697 if (s->memory.regions[i].guest_phys_addr != 0x0) {
698 continue;
699 }
700
701 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
702
703 size = s->memory.regions[i].memory_size +
704 s->memory.regions[i].mmap_offset;
705
706 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
707 MAP_SHARED, s->fds[i], 0);
708
709 g_assert(guest_mem != MAP_FAILED);
710 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
711
712 for (j = 0; j < 256; j++) {
713 guest_mem[j] = seed + j;
714 }
715
716 munmap(guest_mem, s->memory.regions[i].memory_size);
717 break;
718 }
719 }
720
get_log_size(TestServer * s)721 static guint64 get_log_size(TestServer *s)
722 {
723 guint64 log_size = 0;
724 int i;
725
726 for (i = 0; i < s->memory.nregions; ++i) {
727 VhostUserMemoryRegion *reg = &s->memory.regions[i];
728 guint64 last = range_get_last(reg->guest_phys_addr,
729 reg->memory_size);
730 log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
731 }
732
733 return log_size;
734 }
735
736 typedef struct TestMigrateSource {
737 GSource source;
738 TestServer *src;
739 TestServer *dest;
740 } TestMigrateSource;
741
742 static gboolean
test_migrate_source_check(GSource * source)743 test_migrate_source_check(GSource *source)
744 {
745 TestMigrateSource *t = (TestMigrateSource *)source;
746 gboolean overlap = t->src->rings && t->dest->rings;
747
748 g_assert(!overlap);
749
750 return FALSE;
751 }
752
753 GSourceFuncs test_migrate_source_funcs = {
754 .check = test_migrate_source_check,
755 };
756
vhost_user_test_cleanup(void * s)757 static void vhost_user_test_cleanup(void *s)
758 {
759 TestServer *server = s;
760
761 qos_invalidate_command_line();
762 test_server_free(server);
763 }
764
vhost_user_test_setup(GString * cmd_line,void * arg)765 static void *vhost_user_test_setup(GString *cmd_line, void *arg)
766 {
767 TestServer *server = test_server_new("vhost-user-test", arg);
768 test_server_listen(server);
769
770 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
771 server->vu_ops->append_opts(server, cmd_line, "");
772
773 g_test_queue_destroy(vhost_user_test_cleanup, server);
774
775 return server;
776 }
777
vhost_user_test_setup_memfd(GString * cmd_line,void * arg)778 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
779 {
780 TestServer *server = test_server_new("vhost-user-test", arg);
781 test_server_listen(server);
782
783 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
784 server->vu_ops->append_opts(server, cmd_line, "");
785
786 g_test_queue_destroy(vhost_user_test_cleanup, server);
787
788 return server;
789 }
790
test_read_guest_mem(void * obj,void * arg,QGuestAllocator * alloc)791 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
792 {
793 TestServer *server = arg;
794
795 if (!wait_for_fds(server)) {
796 return;
797 }
798
799 read_guest_mem_server(global_qtest, server);
800 }
801
test_migrate(void * obj,void * arg,QGuestAllocator * alloc)802 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
803 {
804 TestServer *s = arg;
805 TestServer *dest;
806 GString *dest_cmdline;
807 char *uri;
808 QTestState *to;
809 GSource *source;
810 QDict *rsp;
811 guint8 *log;
812 guint64 size;
813
814 if (!wait_for_fds(s)) {
815 return;
816 }
817
818 dest = test_server_new("dest", s->vu_ops);
819 dest_cmdline = g_string_new(qos_get_current_command_line());
820 uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
821
822 size = get_log_size(s);
823 g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
824
825 test_server_listen(dest);
826 g_string_append_printf(dest_cmdline, " -incoming %s", uri);
827 append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
828 dest->vu_ops->append_opts(dest, dest_cmdline, "");
829 to = qtest_init(dest_cmdline->str);
830
831 /* This would be where you call qos_allocate_objects(to, NULL), if you want
832 * to talk to the QVirtioNet object on the destination.
833 */
834
835 source = g_source_new(&test_migrate_source_funcs,
836 sizeof(TestMigrateSource));
837 ((TestMigrateSource *)source)->src = s;
838 ((TestMigrateSource *)source)->dest = dest;
839 g_source_attach(source, s->context);
840
841 /* slow down migration to have time to fiddle with log */
842 /* TODO: qtest could learn to break on some places */
843 rsp = qmp("{ 'execute': 'migrate-set-parameters',"
844 "'arguments': { 'max-bandwidth': 10 } }");
845 g_assert(qdict_haskey(rsp, "return"));
846 qobject_unref(rsp);
847
848 rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
849 g_assert(qdict_haskey(rsp, "return"));
850 qobject_unref(rsp);
851
852 wait_for_log_fd(s);
853
854 log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
855 g_assert(log != MAP_FAILED);
856
857 /* modify first page */
858 write_guest_mem(s, 0x42);
859 log[0] = 1;
860 munmap(log, size);
861
862 /* speed things up */
863 rsp = qmp("{ 'execute': 'migrate-set-parameters',"
864 "'arguments': { 'max-bandwidth': 0 } }");
865 g_assert(qdict_haskey(rsp, "return"));
866 qobject_unref(rsp);
867
868 qmp_eventwait("STOP");
869 qtest_qmp_eventwait(to, "RESUME");
870
871 g_assert(wait_for_fds(dest));
872 read_guest_mem_server(to, dest);
873
874 g_source_destroy(source);
875 g_source_unref(source);
876
877 qtest_quit(to);
878 test_server_free(dest);
879 g_free(uri);
880 g_string_free(dest_cmdline, true);
881 }
882
wait_for_rings_started(TestServer * s,size_t count)883 static void wait_for_rings_started(TestServer *s, size_t count)
884 {
885 gint64 end_time;
886
887 g_mutex_lock(&s->data_mutex);
888 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
889 while (ctpop64(s->rings) != count) {
890 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
891 /* timeout has passed */
892 g_assert_cmpint(ctpop64(s->rings), ==, count);
893 break;
894 }
895 }
896
897 g_mutex_unlock(&s->data_mutex);
898 }
899
test_server_connect(TestServer * server)900 static inline void test_server_connect(TestServer *server)
901 {
902 test_server_create_chr(server, ",reconnect=1");
903 }
904
905 static gboolean
reconnect_cb(gpointer user_data)906 reconnect_cb(gpointer user_data)
907 {
908 TestServer *s = user_data;
909
910 qemu_chr_fe_disconnect(&s->chr);
911
912 return FALSE;
913 }
914
915 static gpointer
connect_thread(gpointer data)916 connect_thread(gpointer data)
917 {
918 TestServer *s = data;
919
920 /* wait for qemu to start before first try, to avoid extra warnings */
921 g_usleep(G_USEC_PER_SEC);
922 test_server_connect(s);
923
924 return NULL;
925 }
926
vhost_user_test_setup_reconnect(GString * cmd_line,void * arg)927 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
928 {
929 TestServer *s = test_server_new("reconnect", arg);
930
931 g_thread_new("connect", connect_thread, s);
932 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
933 s->vu_ops->append_opts(s, cmd_line, ",server=on");
934
935 g_test_queue_destroy(vhost_user_test_cleanup, s);
936
937 return s;
938 }
939
test_reconnect(void * obj,void * arg,QGuestAllocator * alloc)940 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
941 {
942 TestServer *s = arg;
943 GSource *src;
944
945 if (!wait_for_fds(s)) {
946 return;
947 }
948
949 wait_for_rings_started(s, 2);
950
951 /* reconnect */
952 s->fds_num = 0;
953 s->rings = 0;
954 src = g_idle_source_new();
955 g_source_set_callback(src, reconnect_cb, s, NULL);
956 g_source_attach(src, s->context);
957 g_source_unref(src);
958 g_assert(wait_for_fds(s));
959 wait_for_rings_started(s, 2);
960 }
961
vhost_user_test_setup_connect_fail(GString * cmd_line,void * arg)962 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
963 {
964 TestServer *s = test_server_new("connect-fail", arg);
965
966 s->test_fail = true;
967
968 g_thread_new("connect", connect_thread, s);
969 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
970 s->vu_ops->append_opts(s, cmd_line, ",server=on");
971
972 g_test_queue_destroy(vhost_user_test_cleanup, s);
973
974 return s;
975 }
976
vhost_user_test_setup_flags_mismatch(GString * cmd_line,void * arg)977 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
978 {
979 TestServer *s = test_server_new("flags-mismatch", arg);
980
981 s->test_flags = TEST_FLAGS_DISCONNECT;
982
983 g_thread_new("connect", connect_thread, s);
984 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
985 s->vu_ops->append_opts(s, cmd_line, ",server=on");
986
987 g_test_queue_destroy(vhost_user_test_cleanup, s);
988
989 return s;
990 }
991
test_vhost_user_started(void * obj,void * arg,QGuestAllocator * alloc)992 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
993 {
994 TestServer *s = arg;
995
996 if (!wait_for_fds(s)) {
997 return;
998 }
999 wait_for_rings_started(s, 2);
1000 }
1001
vhost_user_test_setup_multiqueue(GString * cmd_line,void * arg)1002 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
1003 {
1004 TestServer *s = vhost_user_test_setup(cmd_line, arg);
1005
1006 s->queues = 2;
1007 g_string_append_printf(cmd_line,
1008 " -set netdev.hs0.queues=%d"
1009 " -global virtio-net-pci.vectors=%d",
1010 s->queues, s->queues * 2 + 2);
1011
1012 return s;
1013 }
1014
test_multiqueue(void * obj,void * arg,QGuestAllocator * alloc)1015 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
1016 {
1017 TestServer *s = arg;
1018
1019 wait_for_rings_started(s, s->queues * 2);
1020 }
1021
1022
vu_net_get_features(TestServer * s)1023 static uint64_t vu_net_get_features(TestServer *s)
1024 {
1025 uint64_t features = 0x1ULL << VHOST_F_LOG_ALL |
1026 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1027
1028 if (s->queues > 1) {
1029 features |= 0x1ULL << VIRTIO_NET_F_MQ;
1030 }
1031
1032 return features;
1033 }
1034
vu_net_set_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1035 static void vu_net_set_features(TestServer *s, CharBackend *chr,
1036 VhostUserMsg *msg)
1037 {
1038 g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
1039 if (s->test_flags == TEST_FLAGS_DISCONNECT) {
1040 qemu_chr_fe_disconnect(chr);
1041 s->test_flags = TEST_FLAGS_BAD;
1042 }
1043 }
1044
vu_net_get_protocol_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1045 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
1046 VhostUserMsg *msg)
1047 {
1048 /* send back features to qemu */
1049 msg->flags |= VHOST_USER_REPLY_MASK;
1050 msg->size = sizeof(m.payload.u64);
1051 msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
1052 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
1053 if (s->queues > 1) {
1054 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
1055 }
1056 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1057 }
1058
1059 /* Each VHOST-USER device should have its ops structure defined. */
1060 static struct vhost_user_ops g_vu_net_ops = {
1061 .type = VHOST_USER_NET,
1062
1063 .append_opts = append_vhost_net_opts,
1064
1065 .get_features = vu_net_get_features,
1066 .set_features = vu_net_set_features,
1067 .get_protocol_features = vu_net_get_protocol_features,
1068 };
1069
register_vhost_user_test(void)1070 static void register_vhost_user_test(void)
1071 {
1072 QOSGraphTestOptions opts = {
1073 .before = vhost_user_test_setup,
1074 .subprocess = true,
1075 .arg = &g_vu_net_ops,
1076 };
1077
1078 qemu_add_opts(&qemu_chardev_opts);
1079
1080 qos_add_test("vhost-user/read-guest-mem/memfile",
1081 "virtio-net",
1082 test_read_guest_mem, &opts);
1083
1084 if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
1085 opts.before = vhost_user_test_setup_memfd;
1086 qos_add_test("vhost-user/read-guest-mem/memfd",
1087 "virtio-net",
1088 test_read_guest_mem, &opts);
1089 }
1090
1091 qos_add_test("vhost-user/migrate",
1092 "virtio-net",
1093 test_migrate, &opts);
1094
1095 opts.before = vhost_user_test_setup_reconnect;
1096 qos_add_test("vhost-user/reconnect", "virtio-net",
1097 test_reconnect, &opts);
1098
1099 opts.before = vhost_user_test_setup_connect_fail;
1100 qos_add_test("vhost-user/connect-fail", "virtio-net",
1101 test_vhost_user_started, &opts);
1102
1103 opts.before = vhost_user_test_setup_flags_mismatch;
1104 qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1105 test_vhost_user_started, &opts);
1106
1107 opts.before = vhost_user_test_setup_multiqueue;
1108 opts.edge.extra_device_opts = "mq=on";
1109 qos_add_test("vhost-user/multiqueue",
1110 "virtio-net",
1111 test_multiqueue, &opts);
1112 }
1113 libqos_init(register_vhost_user_test);
1114
vu_gpio_get_features(TestServer * s)1115 static uint64_t vu_gpio_get_features(TestServer *s)
1116 {
1117 return 0x1ULL << VIRTIO_F_VERSION_1 |
1118 0x1ULL << VIRTIO_GPIO_F_IRQ |
1119 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1120 }
1121
1122 /*
1123 * This stub can't handle all the message types but we should reply
1124 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
1125 * talking to a read vhost-user daemon.
1126 */
vu_gpio_get_protocol_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1127 static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr,
1128 VhostUserMsg *msg)
1129 {
1130 /* send back features to qemu */
1131 msg->flags |= VHOST_USER_REPLY_MASK;
1132 msg->size = sizeof(m.payload.u64);
1133 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1134
1135 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1136 }
1137
1138 static struct vhost_user_ops g_vu_gpio_ops = {
1139 .type = VHOST_USER_GPIO,
1140
1141 .append_opts = append_vhost_gpio_opts,
1142
1143 .get_features = vu_gpio_get_features,
1144 .set_features = vu_net_set_features,
1145 .get_protocol_features = vu_gpio_get_protocol_features,
1146 };
1147
register_vhost_gpio_test(void)1148 static void register_vhost_gpio_test(void)
1149 {
1150 QOSGraphTestOptions opts = {
1151 .before = vhost_user_test_setup,
1152 .subprocess = true,
1153 .arg = &g_vu_gpio_ops,
1154 };
1155
1156 qemu_add_opts(&qemu_chardev_opts);
1157
1158 qos_add_test("read-guest-mem/memfile",
1159 "vhost-user-gpio", test_read_guest_mem, &opts);
1160 }
1161 libqos_init(register_vhost_gpio_test);
1162
vu_scmi_get_features(TestServer * s)1163 static uint64_t vu_scmi_get_features(TestServer *s)
1164 {
1165 return 0x1ULL << VIRTIO_F_VERSION_1 |
1166 0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS |
1167 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1168 }
1169
vu_scmi_get_protocol_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1170 static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr,
1171 VhostUserMsg *msg)
1172 {
1173 msg->flags |= VHOST_USER_REPLY_MASK;
1174 msg->size = sizeof(m.payload.u64);
1175 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ;
1176
1177 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1178 }
1179
1180 static struct vhost_user_ops g_vu_scmi_ops = {
1181 .type = VHOST_USER_SCMI,
1182
1183 .append_opts = append_vhost_gpio_opts,
1184
1185 .get_features = vu_scmi_get_features,
1186 .set_features = vu_net_set_features,
1187 .get_protocol_features = vu_scmi_get_protocol_features,
1188 };
1189
register_vhost_scmi_test(void)1190 static void register_vhost_scmi_test(void)
1191 {
1192 QOSGraphTestOptions opts = {
1193 .before = vhost_user_test_setup,
1194 .subprocess = true,
1195 .arg = &g_vu_scmi_ops,
1196 };
1197
1198 qemu_add_opts(&qemu_chardev_opts);
1199
1200 qos_add_test("scmi/read-guest-mem/memfile",
1201 "vhost-user-scmi", test_read_guest_mem, &opts);
1202 }
1203 libqos_init(register_vhost_scmi_test);
1204