1 /*
2 * Vhost User library
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
14 */
15
16 #ifndef _GNU_SOURCE
17 #define _GNU_SOURCE
18 #endif
19
20 /* this code avoids GLib dependency */
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <unistd.h>
24 #include <stdarg.h>
25 #include <errno.h>
26 #include <string.h>
27 #include <assert.h>
28 #include <inttypes.h>
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <sys/eventfd.h>
32 #include <sys/mman.h>
33 #include <endian.h>
34
35 /* Necessary to provide VIRTIO_F_VERSION_1 on system
36 * with older linux headers. Must appear before
37 * <linux/vhost.h> below.
38 */
39 #include "standard-headers/linux/virtio_config.h"
40
41 #if defined(__linux__)
42 #include <sys/syscall.h>
43 #include <fcntl.h>
44 #include <sys/ioctl.h>
45 #include <linux/vhost.h>
46 #include <sys/vfs.h>
47 #include <linux/magic.h>
48
49 #ifdef __NR_userfaultfd
50 #include <linux/userfaultfd.h>
51 #endif
52
53 #endif
54
55 #include "include/atomic.h"
56
57 #include "libvhost-user.h"
58
59 /* usually provided by GLib */
60 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
61 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4)
62 #define G_GNUC_PRINTF(format_idx, arg_idx) \
63 __attribute__((__format__(gnu_printf, format_idx, arg_idx)))
64 #else
65 #define G_GNUC_PRINTF(format_idx, arg_idx) \
66 __attribute__((__format__(__printf__, format_idx, arg_idx)))
67 #endif
68 #else /* !__GNUC__ */
69 #define G_GNUC_PRINTF(format_idx, arg_idx)
70 #endif /* !__GNUC__ */
71 #ifndef MIN
72 #define MIN(x, y) ({ \
73 __typeof__(x) _min1 = (x); \
74 __typeof__(y) _min2 = (y); \
75 (void) (&_min1 == &_min2); \
76 _min1 < _min2 ? _min1 : _min2; })
77 #endif
78
79 /* Round number down to multiple */
80 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
81
82 /* Round number up to multiple */
83 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
84
85 #ifndef unlikely
86 #define unlikely(x) __builtin_expect(!!(x), 0)
87 #endif
88
89 /* Align each region to cache line size in inflight buffer */
90 #define INFLIGHT_ALIGNMENT 64
91
92 /* The version of inflight buffer */
93 #define INFLIGHT_VERSION 1
94
95 /* The version of the protocol we support */
96 #define VHOST_USER_VERSION 1
97 #define LIBVHOST_USER_DEBUG 0
98
99 #define DPRINT(...) \
100 do { \
101 if (LIBVHOST_USER_DEBUG) { \
102 fprintf(stderr, __VA_ARGS__); \
103 } \
104 } while (0)
105
106 static inline
has_feature(uint64_t features,unsigned int fbit)107 bool has_feature(uint64_t features, unsigned int fbit)
108 {
109 assert(fbit < 64);
110 return !!(features & (1ULL << fbit));
111 }
112
113 static inline
vu_has_feature(VuDev * dev,unsigned int fbit)114 bool vu_has_feature(VuDev *dev,
115 unsigned int fbit)
116 {
117 return has_feature(dev->features, fbit);
118 }
119
vu_has_protocol_feature(VuDev * dev,unsigned int fbit)120 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit)
121 {
122 return has_feature(dev->protocol_features, fbit);
123 }
124
125 const char *
vu_request_to_string(unsigned int req)126 vu_request_to_string(unsigned int req)
127 {
128 #define REQ(req) [req] = #req
129 static const char *vu_request_str[] = {
130 REQ(VHOST_USER_NONE),
131 REQ(VHOST_USER_GET_FEATURES),
132 REQ(VHOST_USER_SET_FEATURES),
133 REQ(VHOST_USER_SET_OWNER),
134 REQ(VHOST_USER_RESET_OWNER),
135 REQ(VHOST_USER_SET_MEM_TABLE),
136 REQ(VHOST_USER_SET_LOG_BASE),
137 REQ(VHOST_USER_SET_LOG_FD),
138 REQ(VHOST_USER_SET_VRING_NUM),
139 REQ(VHOST_USER_SET_VRING_ADDR),
140 REQ(VHOST_USER_SET_VRING_BASE),
141 REQ(VHOST_USER_GET_VRING_BASE),
142 REQ(VHOST_USER_SET_VRING_KICK),
143 REQ(VHOST_USER_SET_VRING_CALL),
144 REQ(VHOST_USER_SET_VRING_ERR),
145 REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
146 REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
147 REQ(VHOST_USER_GET_QUEUE_NUM),
148 REQ(VHOST_USER_SET_VRING_ENABLE),
149 REQ(VHOST_USER_SEND_RARP),
150 REQ(VHOST_USER_NET_SET_MTU),
151 REQ(VHOST_USER_SET_BACKEND_REQ_FD),
152 REQ(VHOST_USER_IOTLB_MSG),
153 REQ(VHOST_USER_SET_VRING_ENDIAN),
154 REQ(VHOST_USER_GET_CONFIG),
155 REQ(VHOST_USER_SET_CONFIG),
156 REQ(VHOST_USER_POSTCOPY_ADVISE),
157 REQ(VHOST_USER_POSTCOPY_LISTEN),
158 REQ(VHOST_USER_POSTCOPY_END),
159 REQ(VHOST_USER_GET_INFLIGHT_FD),
160 REQ(VHOST_USER_SET_INFLIGHT_FD),
161 REQ(VHOST_USER_GPU_SET_SOCKET),
162 REQ(VHOST_USER_VRING_KICK),
163 REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
164 REQ(VHOST_USER_ADD_MEM_REG),
165 REQ(VHOST_USER_REM_MEM_REG),
166 REQ(VHOST_USER_GET_SHARED_OBJECT),
167 REQ(VHOST_USER_MAX),
168 };
169 #undef REQ
170
171 if (req < VHOST_USER_MAX) {
172 return vu_request_str[req];
173 } else {
174 return "unknown";
175 }
176 }
177
178 static void G_GNUC_PRINTF(2, 3)
vu_panic(VuDev * dev,const char * msg,...)179 vu_panic(VuDev *dev, const char *msg, ...)
180 {
181 char *buf = NULL;
182 va_list ap;
183
184 va_start(ap, msg);
185 if (vasprintf(&buf, msg, ap) < 0) {
186 buf = NULL;
187 }
188 va_end(ap);
189
190 dev->broken = true;
191 dev->panic(dev, buf);
192 free(buf);
193
194 /*
195 * FIXME:
196 * find a way to call virtio_error, or perhaps close the connection?
197 */
198 }
199
200 /* Search for a memory region that covers this guest physical address. */
201 static VuDevRegion *
vu_gpa_to_mem_region(VuDev * dev,uint64_t guest_addr)202 vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr)
203 {
204 int low = 0;
205 int high = dev->nregions - 1;
206
207 /*
208 * Memory regions cannot overlap in guest physical address space. Each
209 * GPA belongs to exactly one memory region, so there can only be one
210 * match.
211 *
212 * We store our memory regions ordered by GPA and can simply perform a
213 * binary search.
214 */
215 while (low <= high) {
216 unsigned int mid = low + (high - low) / 2;
217 VuDevRegion *cur = &dev->regions[mid];
218
219 if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) {
220 return cur;
221 }
222 if (guest_addr >= cur->gpa + cur->size) {
223 low = mid + 1;
224 }
225 if (guest_addr < cur->gpa) {
226 high = mid - 1;
227 }
228 }
229 return NULL;
230 }
231
232 /* Translate guest physical address to our virtual address. */
233 void *
vu_gpa_to_va(VuDev * dev,uint64_t * plen,uint64_t guest_addr)234 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
235 {
236 VuDevRegion *r;
237
238 if (*plen == 0) {
239 return NULL;
240 }
241
242 r = vu_gpa_to_mem_region(dev, guest_addr);
243 if (!r) {
244 return NULL;
245 }
246
247 if ((guest_addr + *plen) > (r->gpa + r->size)) {
248 *plen = r->gpa + r->size - guest_addr;
249 }
250 return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr +
251 r->mmap_offset;
252 }
253
254 /* Translate qemu virtual address to our virtual address. */
255 static void *
qva_to_va(VuDev * dev,uint64_t qemu_addr)256 qva_to_va(VuDev *dev, uint64_t qemu_addr)
257 {
258 unsigned int i;
259
260 /* Find matching memory region. */
261 for (i = 0; i < dev->nregions; i++) {
262 VuDevRegion *r = &dev->regions[i];
263
264 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
265 return (void *)(uintptr_t)
266 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
267 }
268 }
269
270 return NULL;
271 }
272
273 static void
vu_remove_all_mem_regs(VuDev * dev)274 vu_remove_all_mem_regs(VuDev *dev)
275 {
276 unsigned int i;
277
278 for (i = 0; i < dev->nregions; i++) {
279 VuDevRegion *r = &dev->regions[i];
280
281 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset);
282 }
283 dev->nregions = 0;
284 }
285
286 static bool
map_ring(VuDev * dev,VuVirtq * vq)287 map_ring(VuDev *dev, VuVirtq *vq)
288 {
289 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
290 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
291 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
292
293 DPRINT("Setting virtq addresses:\n");
294 DPRINT(" vring_desc at %p\n", vq->vring.desc);
295 DPRINT(" vring_used at %p\n", vq->vring.used);
296 DPRINT(" vring_avail at %p\n", vq->vring.avail);
297
298 return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
299 }
300
301 static bool
vu_is_vq_usable(VuDev * dev,VuVirtq * vq)302 vu_is_vq_usable(VuDev *dev, VuVirtq *vq)
303 {
304 if (unlikely(dev->broken)) {
305 return false;
306 }
307
308 if (likely(vq->vring.avail)) {
309 return true;
310 }
311
312 /*
313 * In corner cases, we might temporarily remove a memory region that
314 * mapped a ring. When removing a memory region we make sure to
315 * unmap any rings that would be impacted. Let's try to remap if we
316 * already succeeded mapping this ring once.
317 */
318 if (!vq->vra.desc_user_addr || !vq->vra.used_user_addr ||
319 !vq->vra.avail_user_addr) {
320 return false;
321 }
322 if (map_ring(dev, vq)) {
323 vu_panic(dev, "remapping queue on access");
324 return false;
325 }
326 return true;
327 }
328
329 static void
unmap_rings(VuDev * dev,VuDevRegion * r)330 unmap_rings(VuDev *dev, VuDevRegion *r)
331 {
332 int i;
333
334 for (i = 0; i < dev->max_queues; i++) {
335 VuVirtq *vq = &dev->vq[i];
336 const uintptr_t desc = (uintptr_t)vq->vring.desc;
337 const uintptr_t used = (uintptr_t)vq->vring.used;
338 const uintptr_t avail = (uintptr_t)vq->vring.avail;
339
340 if (desc < r->mmap_addr || desc >= r->mmap_addr + r->size) {
341 continue;
342 }
343 if (used < r->mmap_addr || used >= r->mmap_addr + r->size) {
344 continue;
345 }
346 if (avail < r->mmap_addr || avail >= r->mmap_addr + r->size) {
347 continue;
348 }
349
350 DPRINT("Unmapping rings of queue %d\n", i);
351 vq->vring.desc = NULL;
352 vq->vring.used = NULL;
353 vq->vring.avail = NULL;
354 }
355 }
356
357 static size_t
get_fd_hugepagesize(int fd)358 get_fd_hugepagesize(int fd)
359 {
360 #if defined(__linux__)
361 struct statfs fs;
362 int ret;
363
364 do {
365 ret = fstatfs(fd, &fs);
366 } while (ret != 0 && errno == EINTR);
367
368 if (!ret && (unsigned int)fs.f_type == HUGETLBFS_MAGIC) {
369 return fs.f_bsize;
370 }
371 #endif
372 return 0;
373 }
374
375 static void
_vu_add_mem_reg(VuDev * dev,VhostUserMemoryRegion * msg_region,int fd)376 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd)
377 {
378 const uint64_t start_gpa = msg_region->guest_phys_addr;
379 const uint64_t end_gpa = start_gpa + msg_region->memory_size;
380 int prot = PROT_READ | PROT_WRITE;
381 uint64_t mmap_offset, fd_offset;
382 size_t hugepagesize;
383 VuDevRegion *r;
384 void *mmap_addr;
385 int low = 0;
386 int high = dev->nregions - 1;
387 unsigned int idx;
388
389 DPRINT("Adding region %d\n", dev->nregions);
390 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
391 msg_region->guest_phys_addr);
392 DPRINT(" memory_size: 0x%016"PRIx64"\n",
393 msg_region->memory_size);
394 DPRINT(" userspace_addr: 0x%016"PRIx64"\n",
395 msg_region->userspace_addr);
396 DPRINT(" old mmap_offset: 0x%016"PRIx64"\n",
397 msg_region->mmap_offset);
398
399 if (dev->postcopy_listening) {
400 /*
401 * In postcopy we're using PROT_NONE here to catch anyone
402 * accessing it before we userfault
403 */
404 prot = PROT_NONE;
405 }
406
407 /*
408 * We will add memory regions into the array sorted by GPA. Perform a
409 * binary search to locate the insertion point: it will be at the low
410 * index.
411 */
412 while (low <= high) {
413 unsigned int mid = low + (high - low) / 2;
414 VuDevRegion *cur = &dev->regions[mid];
415
416 /* Overlap of GPA addresses. */
417 if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) {
418 vu_panic(dev, "regions with overlapping guest physical addresses");
419 return;
420 }
421 if (start_gpa >= cur->gpa + cur->size) {
422 low = mid + 1;
423 }
424 if (start_gpa < cur->gpa) {
425 high = mid - 1;
426 }
427 }
428 idx = low;
429
430 /*
431 * Convert most of msg_region->mmap_offset to fd_offset. In almost all
432 * cases, this will leave us with mmap_offset == 0, mmap()'ing only
433 * what we really need. Only if a memory region would partially cover
434 * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen
435 * anymore (i.e., modern QEMU).
436 *
437 * Note that mmap() with hugetlb would fail if the offset into the file
438 * is not aligned to the huge page size.
439 */
440 hugepagesize = get_fd_hugepagesize(fd);
441 if (hugepagesize) {
442 fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize);
443 mmap_offset = msg_region->mmap_offset - fd_offset;
444 } else {
445 fd_offset = msg_region->mmap_offset;
446 mmap_offset = 0;
447 }
448
449 DPRINT(" fd_offset: 0x%016"PRIx64"\n",
450 fd_offset);
451 DPRINT(" new mmap_offset: 0x%016"PRIx64"\n",
452 mmap_offset);
453
454 mmap_addr = mmap(0, msg_region->memory_size + mmap_offset,
455 prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset);
456 if (mmap_addr == MAP_FAILED) {
457 vu_panic(dev, "region mmap error: %s", strerror(errno));
458 return;
459 }
460 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
461 (uint64_t)(uintptr_t)mmap_addr);
462
463 #if defined(__linux__)
464 /* Don't include all guest memory in a coredump. */
465 madvise(mmap_addr, msg_region->memory_size + mmap_offset,
466 MADV_DONTDUMP);
467 #endif
468
469 /* Shift all affected entries by 1 to open a hole at idx. */
470 r = &dev->regions[idx];
471 memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx));
472 r->gpa = msg_region->guest_phys_addr;
473 r->size = msg_region->memory_size;
474 r->qva = msg_region->userspace_addr;
475 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
476 r->mmap_offset = mmap_offset;
477 dev->nregions++;
478
479 if (dev->postcopy_listening) {
480 /*
481 * Return the address to QEMU so that it can translate the ufd
482 * fault addresses back.
483 */
484 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset;
485 }
486 }
487
488 static void
vmsg_close_fds(VhostUserMsg * vmsg)489 vmsg_close_fds(VhostUserMsg *vmsg)
490 {
491 int i;
492
493 for (i = 0; i < vmsg->fd_num; i++) {
494 close(vmsg->fds[i]);
495 }
496 }
497
498 /* Set reply payload.u64 and clear request flags and fd_num */
vmsg_set_reply_u64(VhostUserMsg * vmsg,uint64_t val)499 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val)
500 {
501 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */
502 vmsg->size = sizeof(vmsg->payload.u64);
503 vmsg->payload.u64 = val;
504 vmsg->fd_num = 0;
505 }
506
507 /* A test to see if we have userfault available */
508 static bool
have_userfault(void)509 have_userfault(void)
510 {
511 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
512 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
513 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
514 /* Now test the kernel we're running on really has the features */
515 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
516 struct uffdio_api api_struct;
517 if (ufd < 0) {
518 return false;
519 }
520
521 api_struct.api = UFFD_API;
522 api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
523 UFFD_FEATURE_MISSING_HUGETLBFS;
524 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
525 close(ufd);
526 return false;
527 }
528 close(ufd);
529 return true;
530
531 #else
532 return false;
533 #endif
534 }
535
536 static bool
vu_message_read_default(VuDev * dev,int conn_fd,VhostUserMsg * vmsg)537 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
538 {
539 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
540 struct iovec iov = {
541 .iov_base = (char *)vmsg,
542 .iov_len = VHOST_USER_HDR_SIZE,
543 };
544 struct msghdr msg = {
545 .msg_iov = &iov,
546 .msg_iovlen = 1,
547 .msg_control = control,
548 .msg_controllen = sizeof(control),
549 };
550 size_t fd_size;
551 struct cmsghdr *cmsg;
552 int rc;
553
554 do {
555 rc = recvmsg(conn_fd, &msg, 0);
556 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
557
558 if (rc < 0) {
559 vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
560 return false;
561 }
562
563 vmsg->fd_num = 0;
564 for (cmsg = CMSG_FIRSTHDR(&msg);
565 cmsg != NULL;
566 cmsg = CMSG_NXTHDR(&msg, cmsg))
567 {
568 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
569 fd_size = cmsg->cmsg_len - CMSG_LEN(0);
570 vmsg->fd_num = fd_size / sizeof(int);
571 assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS);
572 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
573 break;
574 }
575 }
576
577 if (vmsg->size > sizeof(vmsg->payload)) {
578 vu_panic(dev,
579 "Error: too big message request: %d, size: vmsg->size: %u, "
580 "while sizeof(vmsg->payload) = %zu\n",
581 vmsg->request, vmsg->size, sizeof(vmsg->payload));
582 goto fail;
583 }
584
585 if (vmsg->size) {
586 do {
587 rc = read(conn_fd, &vmsg->payload, vmsg->size);
588 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
589
590 if (rc <= 0) {
591 vu_panic(dev, "Error while reading: %s", strerror(errno));
592 goto fail;
593 }
594
595 assert((uint32_t)rc == vmsg->size);
596 }
597
598 return true;
599
600 fail:
601 vmsg_close_fds(vmsg);
602
603 return false;
604 }
605
606 static bool
vu_message_write(VuDev * dev,int conn_fd,VhostUserMsg * vmsg)607 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
608 {
609 int rc;
610 uint8_t *p = (uint8_t *)vmsg;
611 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {};
612 struct iovec iov = {
613 .iov_base = (char *)vmsg,
614 .iov_len = VHOST_USER_HDR_SIZE,
615 };
616 struct msghdr msg = {
617 .msg_iov = &iov,
618 .msg_iovlen = 1,
619 .msg_control = control,
620 };
621 struct cmsghdr *cmsg;
622
623 memset(control, 0, sizeof(control));
624 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS);
625 if (vmsg->fd_num > 0) {
626 size_t fdsize = vmsg->fd_num * sizeof(int);
627 msg.msg_controllen = CMSG_SPACE(fdsize);
628 cmsg = CMSG_FIRSTHDR(&msg);
629 cmsg->cmsg_len = CMSG_LEN(fdsize);
630 cmsg->cmsg_level = SOL_SOCKET;
631 cmsg->cmsg_type = SCM_RIGHTS;
632 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
633 } else {
634 msg.msg_controllen = 0;
635 }
636
637 do {
638 rc = sendmsg(conn_fd, &msg, 0);
639 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
640
641 if (vmsg->size) {
642 do {
643 if (vmsg->data) {
644 rc = write(conn_fd, vmsg->data, vmsg->size);
645 } else {
646 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
647 }
648 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
649 }
650
651 if (rc <= 0) {
652 vu_panic(dev, "Error while writing: %s", strerror(errno));
653 return false;
654 }
655
656 return true;
657 }
658
659 static bool
vu_send_reply(VuDev * dev,int conn_fd,VhostUserMsg * vmsg)660 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
661 {
662 /* Set the version in the flags when sending the reply */
663 vmsg->flags &= ~VHOST_USER_VERSION_MASK;
664 vmsg->flags |= VHOST_USER_VERSION;
665 vmsg->flags |= VHOST_USER_REPLY_MASK;
666
667 return vu_message_write(dev, conn_fd, vmsg);
668 }
669
670 /*
671 * Processes a reply on the backend channel.
672 * Entered with backend_mutex held and releases it before exit.
673 * Returns true on success.
674 */
675 static bool
vu_process_message_reply(VuDev * dev,const VhostUserMsg * vmsg)676 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
677 {
678 VhostUserMsg msg_reply;
679 bool result = false;
680
681 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
682 result = true;
683 goto out;
684 }
685
686 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
687 goto out;
688 }
689
690 if (msg_reply.request != vmsg->request) {
691 DPRINT("Received unexpected msg type. Expected %d received %d",
692 vmsg->request, msg_reply.request);
693 goto out;
694 }
695
696 result = msg_reply.payload.u64 == 0;
697
698 out:
699 pthread_mutex_unlock(&dev->backend_mutex);
700 return result;
701 }
702
703 /* Kick the log_call_fd if required. */
704 static void
vu_log_kick(VuDev * dev)705 vu_log_kick(VuDev *dev)
706 {
707 if (dev->log_call_fd != -1) {
708 DPRINT("Kicking the QEMU's log...\n");
709 if (eventfd_write(dev->log_call_fd, 1) < 0) {
710 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
711 }
712 }
713 }
714
715 static void
vu_log_page(uint8_t * log_table,uint64_t page)716 vu_log_page(uint8_t *log_table, uint64_t page)
717 {
718 DPRINT("Logged dirty guest page: %"PRId64"\n", page);
719 qatomic_or(&log_table[page / 8], 1 << (page % 8));
720 }
721
722 static void
vu_log_write(VuDev * dev,uint64_t address,uint64_t length)723 vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
724 {
725 uint64_t page;
726
727 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
728 !dev->log_table || !length) {
729 return;
730 }
731
732 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
733
734 page = address / VHOST_LOG_PAGE;
735 while (page * VHOST_LOG_PAGE < address + length) {
736 vu_log_page(dev->log_table, page);
737 page += 1;
738 }
739
740 vu_log_kick(dev);
741 }
742
743 static void
vu_kick_cb(VuDev * dev,int condition,void * data)744 vu_kick_cb(VuDev *dev, int condition, void *data)
745 {
746 int index = (intptr_t)data;
747 VuVirtq *vq = &dev->vq[index];
748 int sock = vq->kick_fd;
749 eventfd_t kick_data;
750 ssize_t rc;
751
752 rc = eventfd_read(sock, &kick_data);
753 if (rc == -1) {
754 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
755 dev->remove_watch(dev, dev->vq[index].kick_fd);
756 } else {
757 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
758 kick_data, vq->handler, index);
759 if (vq->handler) {
760 vq->handler(dev, index);
761 }
762 }
763 }
764
765 static bool
vu_get_features_exec(VuDev * dev,VhostUserMsg * vmsg)766 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
767 {
768 vmsg->payload.u64 =
769 /*
770 * The following VIRTIO feature bits are supported by our virtqueue
771 * implementation:
772 */
773 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY |
774 1ULL << VIRTIO_RING_F_INDIRECT_DESC |
775 1ULL << VIRTIO_RING_F_EVENT_IDX |
776 1ULL << VIRTIO_F_VERSION_1 |
777
778 /* vhost-user feature bits */
779 1ULL << VHOST_F_LOG_ALL |
780 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
781
782 if (dev->iface->get_features) {
783 vmsg->payload.u64 |= dev->iface->get_features(dev);
784 }
785
786 vmsg->size = sizeof(vmsg->payload.u64);
787 vmsg->fd_num = 0;
788
789 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
790
791 return true;
792 }
793
794 static void
vu_set_enable_all_rings(VuDev * dev,bool enabled)795 vu_set_enable_all_rings(VuDev *dev, bool enabled)
796 {
797 uint16_t i;
798
799 for (i = 0; i < dev->max_queues; i++) {
800 dev->vq[i].enable = enabled;
801 }
802 }
803
804 static bool
vu_set_features_exec(VuDev * dev,VhostUserMsg * vmsg)805 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
806 {
807 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
808
809 dev->features = vmsg->payload.u64;
810 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) {
811 /*
812 * We only support devices conforming to VIRTIO 1.0 or
813 * later
814 */
815 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user");
816 return false;
817 }
818
819 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
820 vu_set_enable_all_rings(dev, true);
821 }
822
823 if (dev->iface->set_features) {
824 dev->iface->set_features(dev, dev->features);
825 }
826
827 return false;
828 }
829
830 static bool
vu_set_owner_exec(VuDev * dev,VhostUserMsg * vmsg)831 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
832 {
833 return false;
834 }
835
836 static void
vu_close_log(VuDev * dev)837 vu_close_log(VuDev *dev)
838 {
839 if (dev->log_table) {
840 if (munmap(dev->log_table, dev->log_size) != 0) {
841 perror("close log munmap() error");
842 }
843
844 dev->log_table = NULL;
845 }
846 if (dev->log_call_fd != -1) {
847 close(dev->log_call_fd);
848 dev->log_call_fd = -1;
849 }
850 }
851
852 static bool
vu_reset_device_exec(VuDev * dev,VhostUserMsg * vmsg)853 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
854 {
855 vu_set_enable_all_rings(dev, false);
856
857 return false;
858 }
859
860 static bool
generate_faults(VuDev * dev)861 generate_faults(VuDev *dev) {
862 unsigned int i;
863 for (i = 0; i < dev->nregions; i++) {
864 #ifdef UFFDIO_REGISTER
865 VuDevRegion *dev_region = &dev->regions[i];
866 int ret;
867 struct uffdio_register reg_struct;
868
869 /*
870 * We should already have an open ufd. Mark each memory
871 * range as ufd.
872 * Discard any mapping we have here; note I can't use MADV_REMOVE
873 * or fallocate to make the hole since I don't want to lose
874 * data that's already arrived in the shared process.
875 * TODO: How to do hugepage
876 */
877 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
878 dev_region->size + dev_region->mmap_offset,
879 MADV_DONTNEED);
880 if (ret) {
881 fprintf(stderr,
882 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
883 __func__, i, strerror(errno));
884 }
885 /*
886 * Turn off transparent hugepages so we dont get lose wakeups
887 * in neighbouring pages.
888 * TODO: Turn this backon later.
889 */
890 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
891 dev_region->size + dev_region->mmap_offset,
892 MADV_NOHUGEPAGE);
893 if (ret) {
894 /*
895 * Note: This can happen legally on kernels that are configured
896 * without madvise'able hugepages
897 */
898 fprintf(stderr,
899 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
900 __func__, i, strerror(errno));
901 }
902
903 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
904 reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
905 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
906
907 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) {
908 vu_panic(dev, "%s: Failed to userfault region %d "
909 "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64
910 ": (ufd=%d)%s\n",
911 __func__, i,
912 dev_region->mmap_addr,
913 dev_region->size, dev_region->mmap_offset,
914 dev->postcopy_ufd, strerror(errno));
915 return false;
916 }
917 if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) {
918 vu_panic(dev, "%s Region (%d) doesn't support COPY",
919 __func__, i);
920 return false;
921 }
922 DPRINT("%s: region %d: Registered userfault for %"
923 PRIx64 " + %" PRIx64 "\n", __func__, i,
924 (uint64_t)reg_struct.range.start,
925 (uint64_t)reg_struct.range.len);
926 /* Now it's registered we can let the client at it */
927 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
928 dev_region->size + dev_region->mmap_offset,
929 PROT_READ | PROT_WRITE)) {
930 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
931 i, strerror(errno));
932 return false;
933 }
934 /* TODO: Stash 'zero' support flags somewhere */
935 #endif
936 }
937
938 return true;
939 }
940
941 static bool
vu_add_mem_reg(VuDev * dev,VhostUserMsg * vmsg)942 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
943 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
944
945 if (vmsg->fd_num != 1) {
946 vmsg_close_fds(vmsg);
947 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd "
948 "should be sent for this message type", vmsg->fd_num);
949 return false;
950 }
951
952 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) {
953 close(vmsg->fds[0]);
954 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at "
955 "least %zu bytes and only %d bytes were received",
956 VHOST_USER_MEM_REG_SIZE, vmsg->size);
957 return false;
958 }
959
960 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) {
961 close(vmsg->fds[0]);
962 vu_panic(dev, "failing attempt to hot add memory via "
963 "VHOST_USER_ADD_MEM_REG message because the backend has "
964 "no free ram slots available");
965 return false;
966 }
967
968 /*
969 * If we are in postcopy mode and we receive a u64 payload with a 0 value
970 * we know all the postcopy client bases have been received, and we
971 * should start generating faults.
972 */
973 if (dev->postcopy_listening &&
974 vmsg->size == sizeof(vmsg->payload.u64) &&
975 vmsg->payload.u64 == 0) {
976 (void)generate_faults(dev);
977 return false;
978 }
979
980 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]);
981 close(vmsg->fds[0]);
982
983 if (dev->postcopy_listening) {
984 /* Send the message back to qemu with the addresses filled in. */
985 vmsg->fd_num = 0;
986 DPRINT("Successfully added new region in postcopy\n");
987 return true;
988 }
989 DPRINT("Successfully added new region\n");
990 return false;
991 }
992
reg_equal(VuDevRegion * vudev_reg,VhostUserMemoryRegion * msg_reg)993 static inline bool reg_equal(VuDevRegion *vudev_reg,
994 VhostUserMemoryRegion *msg_reg)
995 {
996 if (vudev_reg->gpa == msg_reg->guest_phys_addr &&
997 vudev_reg->qva == msg_reg->userspace_addr &&
998 vudev_reg->size == msg_reg->memory_size) {
999 return true;
1000 }
1001
1002 return false;
1003 }
1004
1005 static bool
vu_rem_mem_reg(VuDev * dev,VhostUserMsg * vmsg)1006 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
1007 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
1008 unsigned int idx;
1009 VuDevRegion *r;
1010
1011 if (vmsg->fd_num > 1) {
1012 vmsg_close_fds(vmsg);
1013 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd "
1014 "should be sent for this message type", vmsg->fd_num);
1015 return false;
1016 }
1017
1018 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) {
1019 vmsg_close_fds(vmsg);
1020 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at "
1021 "least %zu bytes and only %d bytes were received",
1022 VHOST_USER_MEM_REG_SIZE, vmsg->size);
1023 return false;
1024 }
1025
1026 DPRINT("Removing region:\n");
1027 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
1028 msg_region->guest_phys_addr);
1029 DPRINT(" memory_size: 0x%016"PRIx64"\n",
1030 msg_region->memory_size);
1031 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
1032 msg_region->userspace_addr);
1033 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
1034 msg_region->mmap_offset);
1035
1036 r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr);
1037 if (!r || !reg_equal(r, msg_region)) {
1038 vmsg_close_fds(vmsg);
1039 vu_panic(dev, "Specified region not found\n");
1040 return false;
1041 }
1042
1043 /*
1044 * There might be valid cases where we temporarily remove memory regions
1045 * to readd them again, or remove memory regions and don't use the rings
1046 * anymore before we set the ring addresses and restart the device.
1047 *
1048 * Unmap all affected rings, remapping them on demand later. This should
1049 * be a corner case.
1050 */
1051 unmap_rings(dev, r);
1052
1053 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset);
1054
1055 idx = r - dev->regions;
1056 assert(idx < dev->nregions);
1057 /* Shift all affected entries by 1 to close the hole. */
1058 memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1));
1059 DPRINT("Successfully removed a region\n");
1060 dev->nregions--;
1061
1062 vmsg_close_fds(vmsg);
1063
1064 return false;
1065 }
1066
1067 static bool
vu_get_shared_object(VuDev * dev,VhostUserMsg * vmsg)1068 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
1069 {
1070 int fd_num = 0;
1071 int dmabuf_fd = -1;
1072 if (dev->iface->get_shared_object) {
1073 dmabuf_fd = dev->iface->get_shared_object(
1074 dev, &vmsg->payload.object.uuid[0]);
1075 }
1076 if (dmabuf_fd != -1) {
1077 DPRINT("dmabuf_fd found for requested UUID\n");
1078 vmsg->fds[fd_num++] = dmabuf_fd;
1079 }
1080 vmsg->fd_num = fd_num;
1081
1082 return true;
1083 }
1084
1085 static bool
vu_set_mem_table_exec(VuDev * dev,VhostUserMsg * vmsg)1086 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
1087 {
1088 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
1089 unsigned int i;
1090
1091 vu_remove_all_mem_regs(dev);
1092
1093 DPRINT("Nregions: %u\n", memory->nregions);
1094 for (i = 0; i < memory->nregions; i++) {
1095 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]);
1096 close(vmsg->fds[i]);
1097 }
1098
1099 if (dev->postcopy_listening) {
1100 /* Send the message back to qemu with the addresses filled in */
1101 vmsg->fd_num = 0;
1102 if (!vu_send_reply(dev, dev->sock, vmsg)) {
1103 vu_panic(dev, "failed to respond to set-mem-table for postcopy");
1104 return false;
1105 }
1106
1107 /*
1108 * Wait for QEMU to confirm that it's registered the handler for the
1109 * faults.
1110 */
1111 if (!dev->read_msg(dev, dev->sock, vmsg) ||
1112 vmsg->size != sizeof(vmsg->payload.u64) ||
1113 vmsg->payload.u64 != 0) {
1114 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
1115 return false;
1116 }
1117
1118 /* OK, now we can go and register the memory and generate faults */
1119 (void)generate_faults(dev);
1120 return false;
1121 }
1122
1123 for (i = 0; i < dev->max_queues; i++) {
1124 if (dev->vq[i].vring.desc) {
1125 if (map_ring(dev, &dev->vq[i])) {
1126 vu_panic(dev, "remapping queue %d during setmemtable", i);
1127 }
1128 }
1129 }
1130
1131 return false;
1132 }
1133
1134 static bool
vu_set_log_base_exec(VuDev * dev,VhostUserMsg * vmsg)1135 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
1136 {
1137 int fd;
1138 uint64_t log_mmap_size, log_mmap_offset;
1139 void *rc;
1140
1141 if (vmsg->fd_num != 1 ||
1142 vmsg->size != sizeof(vmsg->payload.log)) {
1143 vu_panic(dev, "Invalid log_base message");
1144 return true;
1145 }
1146
1147 fd = vmsg->fds[0];
1148 log_mmap_offset = vmsg->payload.log.mmap_offset;
1149 log_mmap_size = vmsg->payload.log.mmap_size;
1150 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
1151 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
1152
1153 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
1154 log_mmap_offset);
1155 close(fd);
1156 if (rc == MAP_FAILED) {
1157 perror("log mmap error");
1158 }
1159
1160 if (dev->log_table) {
1161 munmap(dev->log_table, dev->log_size);
1162 }
1163 dev->log_table = rc;
1164 dev->log_size = log_mmap_size;
1165
1166 vmsg->size = sizeof(vmsg->payload.u64);
1167 vmsg->fd_num = 0;
1168
1169 return true;
1170 }
1171
1172 static bool
vu_set_log_fd_exec(VuDev * dev,VhostUserMsg * vmsg)1173 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
1174 {
1175 if (vmsg->fd_num != 1) {
1176 vu_panic(dev, "Invalid log_fd message");
1177 return false;
1178 }
1179
1180 if (dev->log_call_fd != -1) {
1181 close(dev->log_call_fd);
1182 }
1183 dev->log_call_fd = vmsg->fds[0];
1184 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
1185
1186 return false;
1187 }
1188
1189 static bool
vu_set_vring_num_exec(VuDev * dev,VhostUserMsg * vmsg)1190 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1191 {
1192 unsigned int index = vmsg->payload.state.index;
1193 unsigned int num = vmsg->payload.state.num;
1194
1195 DPRINT("State.index: %u\n", index);
1196 DPRINT("State.num: %u\n", num);
1197 dev->vq[index].vring.num = num;
1198
1199 return false;
1200 }
1201
1202 static bool
vu_set_vring_addr_exec(VuDev * dev,VhostUserMsg * vmsg)1203 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
1204 {
1205 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
1206 unsigned int index = vra->index;
1207 VuVirtq *vq = &dev->vq[index];
1208
1209 DPRINT("vhost_vring_addr:\n");
1210 DPRINT(" index: %d\n", vra->index);
1211 DPRINT(" flags: %d\n", vra->flags);
1212 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr);
1213 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr);
1214 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr);
1215 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr);
1216
1217 vq->vra = *vra;
1218 vq->vring.flags = vra->flags;
1219 vq->vring.log_guest_addr = vra->log_guest_addr;
1220
1221
1222 if (map_ring(dev, vq)) {
1223 vu_panic(dev, "Invalid vring_addr message");
1224 return false;
1225 }
1226
1227 vq->used_idx = le16toh(vq->vring.used->idx);
1228
1229 if (vq->last_avail_idx != vq->used_idx) {
1230 bool resume = dev->iface->queue_is_processed_in_order &&
1231 dev->iface->queue_is_processed_in_order(dev, index);
1232
1233 DPRINT("Last avail index != used index: %u != %u%s\n",
1234 vq->last_avail_idx, vq->used_idx,
1235 resume ? ", resuming" : "");
1236
1237 if (resume) {
1238 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
1239 }
1240 }
1241
1242 return false;
1243 }
1244
1245 static bool
vu_set_vring_base_exec(VuDev * dev,VhostUserMsg * vmsg)1246 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
1247 {
1248 unsigned int index = vmsg->payload.state.index;
1249 unsigned int num = vmsg->payload.state.num;
1250
1251 DPRINT("State.index: %u\n", index);
1252 DPRINT("State.num: %u\n", num);
1253 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
1254
1255 return false;
1256 }
1257
1258 static bool
vu_get_vring_base_exec(VuDev * dev,VhostUserMsg * vmsg)1259 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
1260 {
1261 unsigned int index = vmsg->payload.state.index;
1262
1263 DPRINT("State.index: %u\n", index);
1264 vmsg->payload.state.num = dev->vq[index].last_avail_idx;
1265 vmsg->size = sizeof(vmsg->payload.state);
1266
1267 dev->vq[index].started = false;
1268 if (dev->iface->queue_set_started) {
1269 dev->iface->queue_set_started(dev, index, false);
1270 }
1271
1272 if (dev->vq[index].call_fd != -1) {
1273 close(dev->vq[index].call_fd);
1274 dev->vq[index].call_fd = -1;
1275 }
1276 if (dev->vq[index].kick_fd != -1) {
1277 dev->remove_watch(dev, dev->vq[index].kick_fd);
1278 close(dev->vq[index].kick_fd);
1279 dev->vq[index].kick_fd = -1;
1280 }
1281
1282 return true;
1283 }
1284
1285 static bool
vu_check_queue_msg_file(VuDev * dev,VhostUserMsg * vmsg)1286 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
1287 {
1288 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1289 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1290
1291 if (index >= dev->max_queues) {
1292 vmsg_close_fds(vmsg);
1293 vu_panic(dev, "Invalid queue index: %u", index);
1294 return false;
1295 }
1296
1297 if (nofd) {
1298 vmsg_close_fds(vmsg);
1299 return true;
1300 }
1301
1302 if (vmsg->fd_num != 1) {
1303 vmsg_close_fds(vmsg);
1304 vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
1305 return false;
1306 }
1307
1308 return true;
1309 }
1310
1311 static int
inflight_desc_compare(const void * a,const void * b)1312 inflight_desc_compare(const void *a, const void *b)
1313 {
1314 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
1315 *desc1 = (VuVirtqInflightDesc *)b;
1316
1317 if (desc1->counter > desc0->counter &&
1318 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
1319 return 1;
1320 }
1321
1322 return -1;
1323 }
1324
1325 static int
vu_check_queue_inflights(VuDev * dev,VuVirtq * vq)1326 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
1327 {
1328 int i = 0;
1329
1330 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
1331 return 0;
1332 }
1333
1334 if (unlikely(!vq->inflight)) {
1335 return -1;
1336 }
1337
1338 if (unlikely(!vq->inflight->version)) {
1339 /* initialize the buffer */
1340 vq->inflight->version = INFLIGHT_VERSION;
1341 return 0;
1342 }
1343
1344 vq->used_idx = le16toh(vq->vring.used->idx);
1345 vq->resubmit_num = 0;
1346 vq->resubmit_list = NULL;
1347 vq->counter = 0;
1348
1349 if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
1350 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
1351
1352 barrier();
1353
1354 vq->inflight->used_idx = vq->used_idx;
1355 }
1356
1357 for (i = 0; i < vq->inflight->desc_num; i++) {
1358 if (vq->inflight->desc[i].inflight == 1) {
1359 vq->inuse++;
1360 }
1361 }
1362
1363 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
1364
1365 if (vq->inuse) {
1366 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc));
1367 if (!vq->resubmit_list) {
1368 return -1;
1369 }
1370
1371 for (i = 0; i < vq->inflight->desc_num; i++) {
1372 if (vq->inflight->desc[i].inflight) {
1373 vq->resubmit_list[vq->resubmit_num].index = i;
1374 vq->resubmit_list[vq->resubmit_num].counter =
1375 vq->inflight->desc[i].counter;
1376 vq->resubmit_num++;
1377 }
1378 }
1379
1380 if (vq->resubmit_num > 1) {
1381 qsort(vq->resubmit_list, vq->resubmit_num,
1382 sizeof(VuVirtqInflightDesc), inflight_desc_compare);
1383 }
1384 vq->counter = vq->resubmit_list[0].counter + 1;
1385 }
1386
1387 /* in case of I/O hang after reconnecting */
1388 if (eventfd_write(vq->kick_fd, 1)) {
1389 return -1;
1390 }
1391
1392 return 0;
1393 }
1394
1395 static bool
vu_set_vring_kick_exec(VuDev * dev,VhostUserMsg * vmsg)1396 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1397 {
1398 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1399 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1400
1401 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1402
1403 if (!vu_check_queue_msg_file(dev, vmsg)) {
1404 return false;
1405 }
1406
1407 if (dev->vq[index].kick_fd != -1) {
1408 dev->remove_watch(dev, dev->vq[index].kick_fd);
1409 close(dev->vq[index].kick_fd);
1410 dev->vq[index].kick_fd = -1;
1411 }
1412
1413 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0];
1414 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index);
1415
1416 dev->vq[index].started = true;
1417 if (dev->iface->queue_set_started) {
1418 dev->iface->queue_set_started(dev, index, true);
1419 }
1420
1421 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1422 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1423 vu_kick_cb, (void *)(long)index);
1424
1425 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1426 dev->vq[index].kick_fd, index);
1427 }
1428
1429 if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1430 vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1431 }
1432
1433 return false;
1434 }
1435
vu_set_queue_handler(VuDev * dev,VuVirtq * vq,vu_queue_handler_cb handler)1436 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1437 vu_queue_handler_cb handler)
1438 {
1439 int qidx = vq - dev->vq;
1440
1441 vq->handler = handler;
1442 if (vq->kick_fd >= 0) {
1443 if (handler) {
1444 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1445 vu_kick_cb, (void *)(long)qidx);
1446 } else {
1447 dev->remove_watch(dev, vq->kick_fd);
1448 }
1449 }
1450 }
1451
vu_set_queue_host_notifier(VuDev * dev,VuVirtq * vq,int fd,int size,int offset)1452 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1453 int size, int offset)
1454 {
1455 int qidx = vq - dev->vq;
1456 int fd_num = 0;
1457 VhostUserMsg vmsg = {
1458 .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG,
1459 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1460 .size = sizeof(vmsg.payload.area),
1461 .payload.area = {
1462 .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1463 .size = size,
1464 .offset = offset,
1465 },
1466 };
1467
1468 if (fd == -1) {
1469 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1470 } else {
1471 vmsg.fds[fd_num++] = fd;
1472 }
1473
1474 vmsg.fd_num = fd_num;
1475
1476 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) {
1477 return false;
1478 }
1479
1480 pthread_mutex_lock(&dev->backend_mutex);
1481 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) {
1482 pthread_mutex_unlock(&dev->backend_mutex);
1483 return false;
1484 }
1485
1486 /* Also unlocks the backend_mutex */
1487 return vu_process_message_reply(dev, &vmsg);
1488 }
1489
1490 bool
vu_lookup_shared_object(VuDev * dev,unsigned char uuid[UUID_LEN],int * dmabuf_fd)1491 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
1492 int *dmabuf_fd)
1493 {
1494 bool result = false;
1495 VhostUserMsg msg_reply;
1496 VhostUserMsg msg = {
1497 .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP,
1498 .size = sizeof(msg.payload.object),
1499 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1500 };
1501
1502 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
1503
1504 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
1505 return false;
1506 }
1507
1508 pthread_mutex_lock(&dev->backend_mutex);
1509 if (!vu_message_write(dev, dev->backend_fd, &msg)) {
1510 goto out;
1511 }
1512
1513 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
1514 goto out;
1515 }
1516
1517 if (msg_reply.request != msg.request) {
1518 DPRINT("Received unexpected msg type. Expected %d, received %d",
1519 msg.request, msg_reply.request);
1520 goto out;
1521 }
1522
1523 if (msg_reply.fd_num != 1) {
1524 DPRINT("Received unexpected number of fds. Expected 1, received %d",
1525 msg_reply.fd_num);
1526 goto out;
1527 }
1528
1529 *dmabuf_fd = msg_reply.fds[0];
1530 result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0;
1531 out:
1532 pthread_mutex_unlock(&dev->backend_mutex);
1533
1534 return result;
1535 }
1536
1537 static bool
vu_send_message(VuDev * dev,VhostUserMsg * vmsg)1538 vu_send_message(VuDev *dev, VhostUserMsg *vmsg)
1539 {
1540 bool result = false;
1541 pthread_mutex_lock(&dev->backend_mutex);
1542 if (!vu_message_write(dev, dev->backend_fd, vmsg)) {
1543 goto out;
1544 }
1545
1546 result = true;
1547 out:
1548 pthread_mutex_unlock(&dev->backend_mutex);
1549
1550 return result;
1551 }
1552
1553 bool
vu_add_shared_object(VuDev * dev,unsigned char uuid[UUID_LEN])1554 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
1555 {
1556 VhostUserMsg msg = {
1557 .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD,
1558 .size = sizeof(msg.payload.object),
1559 .flags = VHOST_USER_VERSION,
1560 };
1561
1562 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
1563
1564 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
1565 return false;
1566 }
1567
1568 return vu_send_message(dev, &msg);
1569 }
1570
1571 bool
vu_rm_shared_object(VuDev * dev,unsigned char uuid[UUID_LEN])1572 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
1573 {
1574 VhostUserMsg msg = {
1575 .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE,
1576 .size = sizeof(msg.payload.object),
1577 .flags = VHOST_USER_VERSION,
1578 };
1579
1580 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
1581
1582 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
1583 return false;
1584 }
1585
1586 return vu_send_message(dev, &msg);
1587 }
1588
1589 static bool
vu_set_vring_call_exec(VuDev * dev,VhostUserMsg * vmsg)1590 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1591 {
1592 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1593 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1594
1595 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1596
1597 if (!vu_check_queue_msg_file(dev, vmsg)) {
1598 return false;
1599 }
1600
1601 if (dev->vq[index].call_fd != -1) {
1602 close(dev->vq[index].call_fd);
1603 dev->vq[index].call_fd = -1;
1604 }
1605
1606 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0];
1607
1608 /* in case of I/O hang after reconnecting */
1609 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) {
1610 return -1;
1611 }
1612
1613 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index);
1614
1615 return false;
1616 }
1617
1618 static bool
vu_set_vring_err_exec(VuDev * dev,VhostUserMsg * vmsg)1619 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1620 {
1621 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1622 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1623
1624 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1625
1626 if (!vu_check_queue_msg_file(dev, vmsg)) {
1627 return false;
1628 }
1629
1630 if (dev->vq[index].err_fd != -1) {
1631 close(dev->vq[index].err_fd);
1632 dev->vq[index].err_fd = -1;
1633 }
1634
1635 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0];
1636
1637 return false;
1638 }
1639
1640 static bool
vu_get_protocol_features_exec(VuDev * dev,VhostUserMsg * vmsg)1641 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1642 {
1643 /*
1644 * Note that we support, but intentionally do not set,
1645 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that
1646 * a device implementation can return it in its callback
1647 * (get_protocol_features) if it wants to use this for
1648 * simulation, but it is otherwise not desirable (if even
1649 * implemented by the frontend.)
1650 */
1651 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1652 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1653 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ |
1654 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1655 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD |
1656 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |
1657 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS;
1658
1659 if (have_userfault()) {
1660 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1661 }
1662
1663 if (dev->iface->get_config && dev->iface->set_config) {
1664 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1665 }
1666
1667 if (dev->iface->get_protocol_features) {
1668 features |= dev->iface->get_protocol_features(dev);
1669 }
1670
1671 vmsg_set_reply_u64(vmsg, features);
1672 return true;
1673 }
1674
1675 static bool
vu_set_protocol_features_exec(VuDev * dev,VhostUserMsg * vmsg)1676 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1677 {
1678 uint64_t features = vmsg->payload.u64;
1679
1680 DPRINT("u64: 0x%016"PRIx64"\n", features);
1681
1682 dev->protocol_features = vmsg->payload.u64;
1683
1684 if (vu_has_protocol_feature(dev,
1685 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
1686 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) ||
1687 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
1688 /*
1689 * The use case for using messages for kick/call is simulation, to make
1690 * the kick and call synchronous. To actually get that behaviour, both
1691 * of the other features are required.
1692 * Theoretically, one could use only kick messages, or do them without
1693 * having F_REPLY_ACK, but too many (possibly pending) messages on the
1694 * socket will eventually cause the frontend to hang, to avoid this in
1695 * scenarios where not desired enforce that the settings are in a way
1696 * that actually enables the simulation case.
1697 */
1698 vu_panic(dev,
1699 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK");
1700 return false;
1701 }
1702
1703 if (dev->iface->set_protocol_features) {
1704 dev->iface->set_protocol_features(dev, features);
1705 }
1706
1707 return false;
1708 }
1709
1710 static bool
vu_get_queue_num_exec(VuDev * dev,VhostUserMsg * vmsg)1711 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1712 {
1713 vmsg_set_reply_u64(vmsg, dev->max_queues);
1714 return true;
1715 }
1716
1717 static bool
vu_set_vring_enable_exec(VuDev * dev,VhostUserMsg * vmsg)1718 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1719 {
1720 unsigned int index = vmsg->payload.state.index;
1721 unsigned int enable = vmsg->payload.state.num;
1722
1723 DPRINT("State.index: %u\n", index);
1724 DPRINT("State.enable: %u\n", enable);
1725
1726 if (index >= dev->max_queues) {
1727 vu_panic(dev, "Invalid vring_enable index: %u", index);
1728 return false;
1729 }
1730
1731 dev->vq[index].enable = enable;
1732 return false;
1733 }
1734
1735 static bool
vu_set_backend_req_fd(VuDev * dev,VhostUserMsg * vmsg)1736 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1737 {
1738 if (vmsg->fd_num != 1) {
1739 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num);
1740 return false;
1741 }
1742
1743 if (dev->backend_fd != -1) {
1744 close(dev->backend_fd);
1745 }
1746 dev->backend_fd = vmsg->fds[0];
1747 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]);
1748
1749 return false;
1750 }
1751
1752 static bool
vu_get_config(VuDev * dev,VhostUserMsg * vmsg)1753 vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1754 {
1755 int ret = -1;
1756
1757 if (dev->iface->get_config) {
1758 ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1759 vmsg->payload.config.size);
1760 }
1761
1762 if (ret) {
1763 /* resize to zero to indicate an error to frontend */
1764 vmsg->size = 0;
1765 }
1766
1767 return true;
1768 }
1769
1770 static bool
vu_set_config(VuDev * dev,VhostUserMsg * vmsg)1771 vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1772 {
1773 int ret = -1;
1774
1775 if (dev->iface->set_config) {
1776 ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1777 vmsg->payload.config.offset,
1778 vmsg->payload.config.size,
1779 vmsg->payload.config.flags);
1780 if (ret) {
1781 vu_panic(dev, "Set virtio configuration space failed");
1782 }
1783 }
1784
1785 return false;
1786 }
1787
1788 static bool
vu_set_postcopy_advise(VuDev * dev,VhostUserMsg * vmsg)1789 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1790 {
1791 #ifdef UFFDIO_API
1792 struct uffdio_api api_struct;
1793
1794 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1795 vmsg->size = 0;
1796 #else
1797 dev->postcopy_ufd = -1;
1798 #endif
1799
1800 if (dev->postcopy_ufd == -1) {
1801 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1802 goto out;
1803 }
1804
1805 #ifdef UFFDIO_API
1806 api_struct.api = UFFD_API;
1807 api_struct.features = 0;
1808 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1809 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1810 close(dev->postcopy_ufd);
1811 dev->postcopy_ufd = -1;
1812 goto out;
1813 }
1814 /* TODO: Stash feature flags somewhere */
1815 #endif
1816
1817 out:
1818 /* Return a ufd to the QEMU */
1819 vmsg->fd_num = 1;
1820 vmsg->fds[0] = dev->postcopy_ufd;
1821 return true; /* = send a reply */
1822 }
1823
1824 static bool
vu_set_postcopy_listen(VuDev * dev,VhostUserMsg * vmsg)1825 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1826 {
1827 if (dev->nregions) {
1828 vu_panic(dev, "Regions already registered at postcopy-listen");
1829 vmsg_set_reply_u64(vmsg, -1);
1830 return true;
1831 }
1832 dev->postcopy_listening = true;
1833
1834 vmsg_set_reply_u64(vmsg, 0);
1835 return true;
1836 }
1837
1838 static bool
vu_set_postcopy_end(VuDev * dev,VhostUserMsg * vmsg)1839 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1840 {
1841 DPRINT("%s: Entry\n", __func__);
1842 dev->postcopy_listening = false;
1843 if (dev->postcopy_ufd > 0) {
1844 close(dev->postcopy_ufd);
1845 dev->postcopy_ufd = -1;
1846 DPRINT("%s: Done close\n", __func__);
1847 }
1848
1849 vmsg_set_reply_u64(vmsg, 0);
1850 DPRINT("%s: exit\n", __func__);
1851 return true;
1852 }
1853
1854 static inline uint64_t
vu_inflight_queue_size(uint16_t queue_size)1855 vu_inflight_queue_size(uint16_t queue_size)
1856 {
1857 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1858 sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1859 }
1860
1861 #ifdef MFD_ALLOW_SEALING
1862 static void *
memfd_alloc(const char * name,size_t size,unsigned int flags,int * fd)1863 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd)
1864 {
1865 void *ptr;
1866 int ret;
1867
1868 *fd = memfd_create(name, MFD_ALLOW_SEALING);
1869 if (*fd < 0) {
1870 return NULL;
1871 }
1872
1873 ret = ftruncate(*fd, size);
1874 if (ret < 0) {
1875 close(*fd);
1876 return NULL;
1877 }
1878
1879 ret = fcntl(*fd, F_ADD_SEALS, flags);
1880 if (ret < 0) {
1881 close(*fd);
1882 return NULL;
1883 }
1884
1885 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0);
1886 if (ptr == MAP_FAILED) {
1887 close(*fd);
1888 return NULL;
1889 }
1890
1891 return ptr;
1892 }
1893 #endif
1894
1895 static bool
vu_get_inflight_fd(VuDev * dev,VhostUserMsg * vmsg)1896 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1897 {
1898 int fd = -1;
1899 void *addr = NULL;
1900 uint64_t mmap_size;
1901 uint16_t num_queues, queue_size;
1902
1903 if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1904 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1905 vmsg->payload.inflight.mmap_size = 0;
1906 return true;
1907 }
1908
1909 num_queues = vmsg->payload.inflight.num_queues;
1910 queue_size = vmsg->payload.inflight.queue_size;
1911
1912 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1913 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1914
1915 mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1916
1917 #ifdef MFD_ALLOW_SEALING
1918 addr = memfd_alloc("vhost-inflight", mmap_size,
1919 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1920 &fd);
1921 #else
1922 vu_panic(dev, "Not implemented: memfd support is missing");
1923 #endif
1924
1925 if (!addr) {
1926 vu_panic(dev, "Failed to alloc vhost inflight area");
1927 vmsg->payload.inflight.mmap_size = 0;
1928 return true;
1929 }
1930
1931 memset(addr, 0, mmap_size);
1932
1933 dev->inflight_info.addr = addr;
1934 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1935 dev->inflight_info.fd = vmsg->fds[0] = fd;
1936 vmsg->fd_num = 1;
1937 vmsg->payload.inflight.mmap_offset = 0;
1938
1939 DPRINT("send inflight mmap_size: %"PRId64"\n",
1940 vmsg->payload.inflight.mmap_size);
1941 DPRINT("send inflight mmap offset: %"PRId64"\n",
1942 vmsg->payload.inflight.mmap_offset);
1943
1944 return true;
1945 }
1946
1947 static bool
vu_set_inflight_fd(VuDev * dev,VhostUserMsg * vmsg)1948 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1949 {
1950 int fd, i;
1951 uint64_t mmap_size, mmap_offset;
1952 uint16_t num_queues, queue_size;
1953 void *rc;
1954
1955 if (vmsg->fd_num != 1 ||
1956 vmsg->size != sizeof(vmsg->payload.inflight)) {
1957 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1958 vmsg->size, vmsg->fd_num);
1959 return false;
1960 }
1961
1962 fd = vmsg->fds[0];
1963 mmap_size = vmsg->payload.inflight.mmap_size;
1964 mmap_offset = vmsg->payload.inflight.mmap_offset;
1965 num_queues = vmsg->payload.inflight.num_queues;
1966 queue_size = vmsg->payload.inflight.queue_size;
1967
1968 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1969 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1970 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1971 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1972
1973 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1974 fd, mmap_offset);
1975
1976 if (rc == MAP_FAILED) {
1977 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1978 return false;
1979 }
1980
1981 if (dev->inflight_info.fd) {
1982 close(dev->inflight_info.fd);
1983 }
1984
1985 if (dev->inflight_info.addr) {
1986 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1987 }
1988
1989 dev->inflight_info.fd = fd;
1990 dev->inflight_info.addr = rc;
1991 dev->inflight_info.size = mmap_size;
1992
1993 for (i = 0; i < num_queues; i++) {
1994 dev->vq[i].inflight = (VuVirtqInflight *)rc;
1995 dev->vq[i].inflight->desc_num = queue_size;
1996 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1997 }
1998
1999 return false;
2000 }
2001
2002 static bool
vu_handle_vring_kick(VuDev * dev,VhostUserMsg * vmsg)2003 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg)
2004 {
2005 unsigned int index = vmsg->payload.state.index;
2006
2007 if (index >= dev->max_queues) {
2008 vu_panic(dev, "Invalid queue index: %u", index);
2009 return false;
2010 }
2011
2012 DPRINT("Got kick message: handler:%p idx:%u\n",
2013 dev->vq[index].handler, index);
2014
2015 if (!dev->vq[index].started) {
2016 dev->vq[index].started = true;
2017
2018 if (dev->iface->queue_set_started) {
2019 dev->iface->queue_set_started(dev, index, true);
2020 }
2021 }
2022
2023 if (dev->vq[index].handler) {
2024 dev->vq[index].handler(dev, index);
2025 }
2026
2027 return false;
2028 }
2029
vu_handle_get_max_memslots(VuDev * dev,VhostUserMsg * vmsg)2030 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg)
2031 {
2032 vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS);
2033
2034 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS);
2035
2036 return true;
2037 }
2038
2039 static bool
vu_process_message(VuDev * dev,VhostUserMsg * vmsg)2040 vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
2041 {
2042 int do_reply = 0;
2043
2044 /* Print out generic part of the request. */
2045 DPRINT("================ Vhost user message ================\n");
2046 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
2047 vmsg->request);
2048 DPRINT("Flags: 0x%x\n", vmsg->flags);
2049 DPRINT("Size: %u\n", vmsg->size);
2050
2051 if (vmsg->fd_num) {
2052 int i;
2053 DPRINT("Fds:");
2054 for (i = 0; i < vmsg->fd_num; i++) {
2055 DPRINT(" %d", vmsg->fds[i]);
2056 }
2057 DPRINT("\n");
2058 }
2059
2060 if (dev->iface->process_msg &&
2061 dev->iface->process_msg(dev, vmsg, &do_reply)) {
2062 return do_reply;
2063 }
2064
2065 switch (vmsg->request) {
2066 case VHOST_USER_GET_FEATURES:
2067 return vu_get_features_exec(dev, vmsg);
2068 case VHOST_USER_SET_FEATURES:
2069 return vu_set_features_exec(dev, vmsg);
2070 case VHOST_USER_GET_PROTOCOL_FEATURES:
2071 return vu_get_protocol_features_exec(dev, vmsg);
2072 case VHOST_USER_SET_PROTOCOL_FEATURES:
2073 return vu_set_protocol_features_exec(dev, vmsg);
2074 case VHOST_USER_SET_OWNER:
2075 return vu_set_owner_exec(dev, vmsg);
2076 case VHOST_USER_RESET_OWNER:
2077 return vu_reset_device_exec(dev, vmsg);
2078 case VHOST_USER_SET_MEM_TABLE:
2079 return vu_set_mem_table_exec(dev, vmsg);
2080 case VHOST_USER_SET_LOG_BASE:
2081 return vu_set_log_base_exec(dev, vmsg);
2082 case VHOST_USER_SET_LOG_FD:
2083 return vu_set_log_fd_exec(dev, vmsg);
2084 case VHOST_USER_SET_VRING_NUM:
2085 return vu_set_vring_num_exec(dev, vmsg);
2086 case VHOST_USER_SET_VRING_ADDR:
2087 return vu_set_vring_addr_exec(dev, vmsg);
2088 case VHOST_USER_SET_VRING_BASE:
2089 return vu_set_vring_base_exec(dev, vmsg);
2090 case VHOST_USER_GET_VRING_BASE:
2091 return vu_get_vring_base_exec(dev, vmsg);
2092 case VHOST_USER_SET_VRING_KICK:
2093 return vu_set_vring_kick_exec(dev, vmsg);
2094 case VHOST_USER_SET_VRING_CALL:
2095 return vu_set_vring_call_exec(dev, vmsg);
2096 case VHOST_USER_SET_VRING_ERR:
2097 return vu_set_vring_err_exec(dev, vmsg);
2098 case VHOST_USER_GET_QUEUE_NUM:
2099 return vu_get_queue_num_exec(dev, vmsg);
2100 case VHOST_USER_SET_VRING_ENABLE:
2101 return vu_set_vring_enable_exec(dev, vmsg);
2102 case VHOST_USER_SET_BACKEND_REQ_FD:
2103 return vu_set_backend_req_fd(dev, vmsg);
2104 case VHOST_USER_GET_CONFIG:
2105 return vu_get_config(dev, vmsg);
2106 case VHOST_USER_SET_CONFIG:
2107 return vu_set_config(dev, vmsg);
2108 case VHOST_USER_NONE:
2109 /* if you need processing before exit, override iface->process_msg */
2110 exit(0);
2111 case VHOST_USER_POSTCOPY_ADVISE:
2112 return vu_set_postcopy_advise(dev, vmsg);
2113 case VHOST_USER_POSTCOPY_LISTEN:
2114 return vu_set_postcopy_listen(dev, vmsg);
2115 case VHOST_USER_POSTCOPY_END:
2116 return vu_set_postcopy_end(dev, vmsg);
2117 case VHOST_USER_GET_INFLIGHT_FD:
2118 return vu_get_inflight_fd(dev, vmsg);
2119 case VHOST_USER_SET_INFLIGHT_FD:
2120 return vu_set_inflight_fd(dev, vmsg);
2121 case VHOST_USER_VRING_KICK:
2122 return vu_handle_vring_kick(dev, vmsg);
2123 case VHOST_USER_GET_MAX_MEM_SLOTS:
2124 return vu_handle_get_max_memslots(dev, vmsg);
2125 case VHOST_USER_ADD_MEM_REG:
2126 return vu_add_mem_reg(dev, vmsg);
2127 case VHOST_USER_REM_MEM_REG:
2128 return vu_rem_mem_reg(dev, vmsg);
2129 case VHOST_USER_GET_SHARED_OBJECT:
2130 return vu_get_shared_object(dev, vmsg);
2131 default:
2132 vmsg_close_fds(vmsg);
2133 vu_panic(dev, "Unhandled request: %d", vmsg->request);
2134 }
2135
2136 return false;
2137 }
2138
2139 bool
vu_dispatch(VuDev * dev)2140 vu_dispatch(VuDev *dev)
2141 {
2142 VhostUserMsg vmsg = { 0, };
2143 int reply_requested;
2144 bool need_reply, success = false;
2145
2146 if (!dev->read_msg(dev, dev->sock, &vmsg)) {
2147 goto end;
2148 }
2149
2150 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK;
2151
2152 reply_requested = vu_process_message(dev, &vmsg);
2153 if (!reply_requested && need_reply) {
2154 vmsg_set_reply_u64(&vmsg, 0);
2155 reply_requested = 1;
2156 }
2157
2158 if (!reply_requested) {
2159 success = true;
2160 goto end;
2161 }
2162
2163 if (!vu_send_reply(dev, dev->sock, &vmsg)) {
2164 goto end;
2165 }
2166
2167 success = true;
2168
2169 end:
2170 free(vmsg.data);
2171 return success;
2172 }
2173
2174 void
vu_deinit(VuDev * dev)2175 vu_deinit(VuDev *dev)
2176 {
2177 unsigned int i;
2178
2179 vu_remove_all_mem_regs(dev);
2180
2181 for (i = 0; i < dev->max_queues; i++) {
2182 VuVirtq *vq = &dev->vq[i];
2183
2184 if (vq->call_fd != -1) {
2185 close(vq->call_fd);
2186 vq->call_fd = -1;
2187 }
2188
2189 if (vq->kick_fd != -1) {
2190 dev->remove_watch(dev, vq->kick_fd);
2191 close(vq->kick_fd);
2192 vq->kick_fd = -1;
2193 }
2194
2195 if (vq->err_fd != -1) {
2196 close(vq->err_fd);
2197 vq->err_fd = -1;
2198 }
2199
2200 if (vq->resubmit_list) {
2201 free(vq->resubmit_list);
2202 vq->resubmit_list = NULL;
2203 }
2204
2205 vq->inflight = NULL;
2206 }
2207
2208 if (dev->inflight_info.addr) {
2209 munmap(dev->inflight_info.addr, dev->inflight_info.size);
2210 dev->inflight_info.addr = NULL;
2211 }
2212
2213 if (dev->inflight_info.fd > 0) {
2214 close(dev->inflight_info.fd);
2215 dev->inflight_info.fd = -1;
2216 }
2217
2218 vu_close_log(dev);
2219 if (dev->backend_fd != -1) {
2220 close(dev->backend_fd);
2221 dev->backend_fd = -1;
2222 }
2223 pthread_mutex_destroy(&dev->backend_mutex);
2224
2225 if (dev->sock != -1) {
2226 close(dev->sock);
2227 }
2228
2229 free(dev->vq);
2230 dev->vq = NULL;
2231 free(dev->regions);
2232 dev->regions = NULL;
2233 }
2234
2235 bool
vu_init(VuDev * dev,uint16_t max_queues,int socket,vu_panic_cb panic,vu_read_msg_cb read_msg,vu_set_watch_cb set_watch,vu_remove_watch_cb remove_watch,const VuDevIface * iface)2236 vu_init(VuDev *dev,
2237 uint16_t max_queues,
2238 int socket,
2239 vu_panic_cb panic,
2240 vu_read_msg_cb read_msg,
2241 vu_set_watch_cb set_watch,
2242 vu_remove_watch_cb remove_watch,
2243 const VuDevIface *iface)
2244 {
2245 uint16_t i;
2246
2247 assert(max_queues > 0);
2248 assert(socket >= 0);
2249 assert(set_watch);
2250 assert(remove_watch);
2251 assert(iface);
2252 assert(panic);
2253
2254 memset(dev, 0, sizeof(*dev));
2255
2256 dev->sock = socket;
2257 dev->panic = panic;
2258 dev->read_msg = read_msg ? read_msg : vu_message_read_default;
2259 dev->set_watch = set_watch;
2260 dev->remove_watch = remove_watch;
2261 dev->iface = iface;
2262 dev->log_call_fd = -1;
2263 pthread_mutex_init(&dev->backend_mutex, NULL);
2264 dev->backend_fd = -1;
2265 dev->max_queues = max_queues;
2266
2267 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0]));
2268 if (!dev->regions) {
2269 DPRINT("%s: failed to malloc mem regions\n", __func__);
2270 return false;
2271 }
2272
2273 dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
2274 if (!dev->vq) {
2275 DPRINT("%s: failed to malloc virtqueues\n", __func__);
2276 free(dev->regions);
2277 dev->regions = NULL;
2278 return false;
2279 }
2280
2281 for (i = 0; i < max_queues; i++) {
2282 dev->vq[i] = (VuVirtq) {
2283 .call_fd = -1, .kick_fd = -1, .err_fd = -1,
2284 .notification = true,
2285 };
2286 }
2287
2288 return true;
2289 }
2290
2291 VuVirtq *
vu_get_queue(VuDev * dev,int qidx)2292 vu_get_queue(VuDev *dev, int qidx)
2293 {
2294 assert(qidx < dev->max_queues);
2295 return &dev->vq[qidx];
2296 }
2297
2298 bool
vu_queue_enabled(VuDev * dev,VuVirtq * vq)2299 vu_queue_enabled(VuDev *dev, VuVirtq *vq)
2300 {
2301 return vq->enable;
2302 }
2303
2304 bool
vu_queue_started(const VuDev * dev,const VuVirtq * vq)2305 vu_queue_started(const VuDev *dev, const VuVirtq *vq)
2306 {
2307 return vq->started;
2308 }
2309
2310 static inline uint16_t
vring_avail_flags(VuVirtq * vq)2311 vring_avail_flags(VuVirtq *vq)
2312 {
2313 return le16toh(vq->vring.avail->flags);
2314 }
2315
2316 static inline uint16_t
vring_avail_idx(VuVirtq * vq)2317 vring_avail_idx(VuVirtq *vq)
2318 {
2319 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
2320
2321 return vq->shadow_avail_idx;
2322 }
2323
2324 static inline uint16_t
vring_avail_ring(VuVirtq * vq,int i)2325 vring_avail_ring(VuVirtq *vq, int i)
2326 {
2327 return le16toh(vq->vring.avail->ring[i]);
2328 }
2329
2330 static inline uint16_t
vring_get_used_event(VuVirtq * vq)2331 vring_get_used_event(VuVirtq *vq)
2332 {
2333 return vring_avail_ring(vq, vq->vring.num);
2334 }
2335
2336 static int
virtqueue_num_heads(VuDev * dev,VuVirtq * vq,unsigned int idx)2337 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
2338 {
2339 uint16_t num_heads = vring_avail_idx(vq) - idx;
2340
2341 /* Check it isn't doing very strange things with descriptor numbers. */
2342 if (num_heads > vq->vring.num) {
2343 vu_panic(dev, "Guest moved used index from %u to %u",
2344 idx, vq->shadow_avail_idx);
2345 return -1;
2346 }
2347 if (num_heads) {
2348 /* On success, callers read a descriptor at vq->last_avail_idx.
2349 * Make sure descriptor read does not bypass avail index read. */
2350 smp_rmb();
2351 }
2352
2353 return num_heads;
2354 }
2355
2356 static bool
virtqueue_get_head(VuDev * dev,VuVirtq * vq,unsigned int idx,unsigned int * head)2357 virtqueue_get_head(VuDev *dev, VuVirtq *vq,
2358 unsigned int idx, unsigned int *head)
2359 {
2360 /* Grab the next descriptor number they're advertising, and increment
2361 * the index we've seen. */
2362 *head = vring_avail_ring(vq, idx % vq->vring.num);
2363
2364 /* If their number is silly, that's a fatal mistake. */
2365 if (*head >= vq->vring.num) {
2366 vu_panic(dev, "Guest says index %u is available", *head);
2367 return false;
2368 }
2369
2370 return true;
2371 }
2372
2373 static int
virtqueue_read_indirect_desc(VuDev * dev,struct vring_desc * desc,uint64_t addr,size_t len)2374 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
2375 uint64_t addr, size_t len)
2376 {
2377 struct vring_desc *ori_desc;
2378 uint64_t read_len;
2379
2380 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
2381 return -1;
2382 }
2383
2384 if (len == 0) {
2385 return -1;
2386 }
2387
2388 while (len) {
2389 read_len = len;
2390 ori_desc = vu_gpa_to_va(dev, &read_len, addr);
2391 if (!ori_desc) {
2392 return -1;
2393 }
2394
2395 memcpy(desc, ori_desc, read_len);
2396 len -= read_len;
2397 addr += read_len;
2398 desc += read_len;
2399 }
2400
2401 return 0;
2402 }
2403
2404 enum {
2405 VIRTQUEUE_READ_DESC_ERROR = -1,
2406 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
2407 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
2408 };
2409
2410 static int
virtqueue_read_next_desc(VuDev * dev,struct vring_desc * desc,int i,unsigned int max,unsigned int * next)2411 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
2412 int i, unsigned int max, unsigned int *next)
2413 {
2414 /* If this descriptor says it doesn't chain, we're done. */
2415 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) {
2416 return VIRTQUEUE_READ_DESC_DONE;
2417 }
2418
2419 /* Check they're not leading us off end of descriptors. */
2420 *next = le16toh(desc[i].next);
2421 /* Make sure compiler knows to grab that: we don't want it changing! */
2422 smp_wmb();
2423
2424 if (*next >= max) {
2425 vu_panic(dev, "Desc next is %u", *next);
2426 return VIRTQUEUE_READ_DESC_ERROR;
2427 }
2428
2429 return VIRTQUEUE_READ_DESC_MORE;
2430 }
2431
2432 void
vu_queue_get_avail_bytes(VuDev * dev,VuVirtq * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)2433 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
2434 unsigned int *out_bytes,
2435 unsigned max_in_bytes, unsigned max_out_bytes)
2436 {
2437 unsigned int idx;
2438 unsigned int total_bufs, in_total, out_total;
2439 int rc;
2440
2441 idx = vq->last_avail_idx;
2442
2443 total_bufs = in_total = out_total = 0;
2444 if (!vu_is_vq_usable(dev, vq)) {
2445 goto done;
2446 }
2447
2448 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
2449 unsigned int max, desc_len, num_bufs, indirect = 0;
2450 uint64_t desc_addr, read_len;
2451 struct vring_desc *desc;
2452 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2453 unsigned int i;
2454
2455 max = vq->vring.num;
2456 num_bufs = total_bufs;
2457 if (!virtqueue_get_head(dev, vq, idx++, &i)) {
2458 goto err;
2459 }
2460 desc = vq->vring.desc;
2461
2462 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
2463 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
2464 vu_panic(dev, "Invalid size for indirect buffer table");
2465 goto err;
2466 }
2467
2468 /* If we've got too many, that implies a descriptor loop. */
2469 if (num_bufs >= max) {
2470 vu_panic(dev, "Looped descriptor");
2471 goto err;
2472 }
2473
2474 /* loop over the indirect descriptor table */
2475 indirect = 1;
2476 desc_addr = le64toh(desc[i].addr);
2477 desc_len = le32toh(desc[i].len);
2478 max = desc_len / sizeof(struct vring_desc);
2479 read_len = desc_len;
2480 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2481 if (unlikely(desc && read_len != desc_len)) {
2482 /* Failed to use zero copy */
2483 desc = NULL;
2484 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2485 desc_addr,
2486 desc_len)) {
2487 desc = desc_buf;
2488 }
2489 }
2490 if (!desc) {
2491 vu_panic(dev, "Invalid indirect buffer table");
2492 goto err;
2493 }
2494 num_bufs = i = 0;
2495 }
2496
2497 do {
2498 /* If we've got too many, that implies a descriptor loop. */
2499 if (++num_bufs > max) {
2500 vu_panic(dev, "Looped descriptor");
2501 goto err;
2502 }
2503
2504 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
2505 in_total += le32toh(desc[i].len);
2506 } else {
2507 out_total += le32toh(desc[i].len);
2508 }
2509 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
2510 goto done;
2511 }
2512 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2513 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2514
2515 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2516 goto err;
2517 }
2518
2519 if (!indirect) {
2520 total_bufs = num_bufs;
2521 } else {
2522 total_bufs++;
2523 }
2524 }
2525 if (rc < 0) {
2526 goto err;
2527 }
2528 done:
2529 if (in_bytes) {
2530 *in_bytes = in_total;
2531 }
2532 if (out_bytes) {
2533 *out_bytes = out_total;
2534 }
2535 return;
2536
2537 err:
2538 in_total = out_total = 0;
2539 goto done;
2540 }
2541
2542 bool
vu_queue_avail_bytes(VuDev * dev,VuVirtq * vq,unsigned int in_bytes,unsigned int out_bytes)2543 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
2544 unsigned int out_bytes)
2545 {
2546 unsigned int in_total, out_total;
2547
2548 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
2549 in_bytes, out_bytes);
2550
2551 return in_bytes <= in_total && out_bytes <= out_total;
2552 }
2553
2554 /* Fetch avail_idx from VQ memory only when we really need to know if
2555 * guest has added some buffers. */
2556 bool
vu_queue_empty(VuDev * dev,VuVirtq * vq)2557 vu_queue_empty(VuDev *dev, VuVirtq *vq)
2558 {
2559 if (!vu_is_vq_usable(dev, vq)) {
2560 return true;
2561 }
2562
2563 if (vq->shadow_avail_idx != vq->last_avail_idx) {
2564 return false;
2565 }
2566
2567 return vring_avail_idx(vq) == vq->last_avail_idx;
2568 }
2569
2570 static bool
vring_notify(VuDev * dev,VuVirtq * vq)2571 vring_notify(VuDev *dev, VuVirtq *vq)
2572 {
2573 uint16_t old, new;
2574 bool v;
2575
2576 /* We need to expose used array entries before checking used event. */
2577 smp_mb();
2578
2579 /* Always notify when queue is empty (when feature acknowledge) */
2580 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2581 !vq->inuse && vu_queue_empty(dev, vq)) {
2582 return true;
2583 }
2584
2585 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2586 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2587 }
2588
2589 v = vq->signalled_used_valid;
2590 vq->signalled_used_valid = true;
2591 old = vq->signalled_used;
2592 new = vq->signalled_used = vq->used_idx;
2593 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2594 }
2595
_vu_queue_notify(VuDev * dev,VuVirtq * vq,bool sync)2596 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
2597 {
2598 if (!vu_is_vq_usable(dev, vq)) {
2599 return;
2600 }
2601
2602 if (!vring_notify(dev, vq)) {
2603 DPRINT("skipped notify...\n");
2604 return;
2605 }
2606
2607 if (vq->call_fd < 0 &&
2608 vu_has_protocol_feature(dev,
2609 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
2610 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
2611 VhostUserMsg vmsg = {
2612 .request = VHOST_USER_BACKEND_VRING_CALL,
2613 .flags = VHOST_USER_VERSION,
2614 .size = sizeof(vmsg.payload.state),
2615 .payload.state = {
2616 .index = vq - dev->vq,
2617 },
2618 };
2619 bool ack = sync &&
2620 vu_has_protocol_feature(dev,
2621 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2622
2623 if (ack) {
2624 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
2625 }
2626
2627 vu_message_write(dev, dev->backend_fd, &vmsg);
2628 if (ack) {
2629 vu_message_read_default(dev, dev->backend_fd, &vmsg);
2630 }
2631 return;
2632 }
2633
2634 if (eventfd_write(vq->call_fd, 1) < 0) {
2635 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
2636 }
2637 }
2638
vu_queue_notify(VuDev * dev,VuVirtq * vq)2639 void vu_queue_notify(VuDev *dev, VuVirtq *vq)
2640 {
2641 _vu_queue_notify(dev, vq, false);
2642 }
2643
vu_queue_notify_sync(VuDev * dev,VuVirtq * vq)2644 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq)
2645 {
2646 _vu_queue_notify(dev, vq, true);
2647 }
2648
vu_config_change_msg(VuDev * dev)2649 void vu_config_change_msg(VuDev *dev)
2650 {
2651 VhostUserMsg vmsg = {
2652 .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG,
2653 .flags = VHOST_USER_VERSION,
2654 };
2655
2656 vu_message_write(dev, dev->backend_fd, &vmsg);
2657 }
2658
2659 static inline void
vring_used_flags_set_bit(VuVirtq * vq,int mask)2660 vring_used_flags_set_bit(VuVirtq *vq, int mask)
2661 {
2662 uint16_t *flags;
2663
2664 flags = (uint16_t *)((char*)vq->vring.used +
2665 offsetof(struct vring_used, flags));
2666 *flags = htole16(le16toh(*flags) | mask);
2667 }
2668
2669 static inline void
vring_used_flags_unset_bit(VuVirtq * vq,int mask)2670 vring_used_flags_unset_bit(VuVirtq *vq, int mask)
2671 {
2672 uint16_t *flags;
2673
2674 flags = (uint16_t *)((char*)vq->vring.used +
2675 offsetof(struct vring_used, flags));
2676 *flags = htole16(le16toh(*flags) & ~mask);
2677 }
2678
2679 static inline void
vring_set_avail_event(VuVirtq * vq,uint16_t val)2680 vring_set_avail_event(VuVirtq *vq, uint16_t val)
2681 {
2682 uint16_t val_le = htole16(val);
2683
2684 if (!vq->notification) {
2685 return;
2686 }
2687
2688 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t));
2689 }
2690
2691 void
vu_queue_set_notification(VuDev * dev,VuVirtq * vq,int enable)2692 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2693 {
2694 vq->notification = enable;
2695 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2696 vring_set_avail_event(vq, vring_avail_idx(vq));
2697 } else if (enable) {
2698 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2699 } else {
2700 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2701 }
2702 if (enable) {
2703 /* Expose avail event/used flags before caller checks the avail idx. */
2704 smp_mb();
2705 }
2706 }
2707
2708 static bool
virtqueue_map_desc(VuDev * dev,unsigned int * p_num_sg,struct iovec * iov,unsigned int max_num_sg,bool is_write,uint64_t pa,size_t sz)2709 virtqueue_map_desc(VuDev *dev,
2710 unsigned int *p_num_sg, struct iovec *iov,
2711 unsigned int max_num_sg, bool is_write,
2712 uint64_t pa, size_t sz)
2713 {
2714 unsigned num_sg = *p_num_sg;
2715
2716 assert(num_sg <= max_num_sg);
2717
2718 if (!sz) {
2719 vu_panic(dev, "virtio: zero sized buffers are not allowed");
2720 return false;
2721 }
2722
2723 while (sz) {
2724 uint64_t len = sz;
2725
2726 if (num_sg == max_num_sg) {
2727 vu_panic(dev, "virtio: too many descriptors in indirect table");
2728 return false;
2729 }
2730
2731 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2732 if (iov[num_sg].iov_base == NULL) {
2733 vu_panic(dev, "virtio: invalid address for buffers");
2734 return false;
2735 }
2736 iov[num_sg].iov_len = len;
2737 num_sg++;
2738 sz -= len;
2739 pa += len;
2740 }
2741
2742 *p_num_sg = num_sg;
2743 return true;
2744 }
2745
2746 static void *
virtqueue_alloc_element(size_t sz,unsigned out_num,unsigned in_num)2747 virtqueue_alloc_element(size_t sz,
2748 unsigned out_num, unsigned in_num)
2749 {
2750 VuVirtqElement *elem;
2751 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2752 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2753 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2754
2755 assert(sz >= sizeof(VuVirtqElement));
2756 elem = malloc(out_sg_end);
2757 if (!elem) {
2758 DPRINT("%s: failed to malloc virtqueue element\n", __func__);
2759 return NULL;
2760 }
2761 elem->out_num = out_num;
2762 elem->in_num = in_num;
2763 elem->in_sg = (void *)elem + in_sg_ofs;
2764 elem->out_sg = (void *)elem + out_sg_ofs;
2765 return elem;
2766 }
2767
2768 static void *
vu_queue_map_desc(VuDev * dev,VuVirtq * vq,unsigned int idx,size_t sz)2769 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2770 {
2771 struct vring_desc *desc = vq->vring.desc;
2772 uint64_t desc_addr, read_len;
2773 unsigned int desc_len;
2774 unsigned int max = vq->vring.num;
2775 unsigned int i = idx;
2776 VuVirtqElement *elem;
2777 unsigned int out_num = 0, in_num = 0;
2778 struct iovec iov[VIRTQUEUE_MAX_SIZE];
2779 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2780 int rc;
2781
2782 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
2783 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
2784 vu_panic(dev, "Invalid size for indirect buffer table");
2785 return NULL;
2786 }
2787
2788 /* loop over the indirect descriptor table */
2789 desc_addr = le64toh(desc[i].addr);
2790 desc_len = le32toh(desc[i].len);
2791 max = desc_len / sizeof(struct vring_desc);
2792 read_len = desc_len;
2793 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2794 if (unlikely(desc && read_len != desc_len)) {
2795 /* Failed to use zero copy */
2796 desc = NULL;
2797 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2798 desc_addr,
2799 desc_len)) {
2800 desc = desc_buf;
2801 }
2802 }
2803 if (!desc) {
2804 vu_panic(dev, "Invalid indirect buffer table");
2805 return NULL;
2806 }
2807 i = 0;
2808 }
2809
2810 /* Collect all the descriptors */
2811 do {
2812 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
2813 if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
2814 VIRTQUEUE_MAX_SIZE - out_num, true,
2815 le64toh(desc[i].addr),
2816 le32toh(desc[i].len))) {
2817 return NULL;
2818 }
2819 } else {
2820 if (in_num) {
2821 vu_panic(dev, "Incorrect order for descriptors");
2822 return NULL;
2823 }
2824 if (!virtqueue_map_desc(dev, &out_num, iov,
2825 VIRTQUEUE_MAX_SIZE, false,
2826 le64toh(desc[i].addr),
2827 le32toh(desc[i].len))) {
2828 return NULL;
2829 }
2830 }
2831
2832 /* If we've got too many, that implies a descriptor loop. */
2833 if ((in_num + out_num) > max) {
2834 vu_panic(dev, "Looped descriptor");
2835 return NULL;
2836 }
2837 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2838 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2839
2840 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2841 vu_panic(dev, "read descriptor error");
2842 return NULL;
2843 }
2844
2845 /* Now copy what we have collected and mapped */
2846 elem = virtqueue_alloc_element(sz, out_num, in_num);
2847 if (!elem) {
2848 return NULL;
2849 }
2850 elem->index = idx;
2851 for (i = 0; i < out_num; i++) {
2852 elem->out_sg[i] = iov[i];
2853 }
2854 for (i = 0; i < in_num; i++) {
2855 elem->in_sg[i] = iov[out_num + i];
2856 }
2857
2858 return elem;
2859 }
2860
2861 static int
vu_queue_inflight_get(VuDev * dev,VuVirtq * vq,int desc_idx)2862 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2863 {
2864 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2865 return 0;
2866 }
2867
2868 if (unlikely(!vq->inflight)) {
2869 return -1;
2870 }
2871
2872 vq->inflight->desc[desc_idx].counter = vq->counter++;
2873 vq->inflight->desc[desc_idx].inflight = 1;
2874
2875 return 0;
2876 }
2877
2878 static int
vu_queue_inflight_pre_put(VuDev * dev,VuVirtq * vq,int desc_idx)2879 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2880 {
2881 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2882 return 0;
2883 }
2884
2885 if (unlikely(!vq->inflight)) {
2886 return -1;
2887 }
2888
2889 vq->inflight->last_batch_head = desc_idx;
2890
2891 return 0;
2892 }
2893
2894 static int
vu_queue_inflight_post_put(VuDev * dev,VuVirtq * vq,int desc_idx)2895 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2896 {
2897 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2898 return 0;
2899 }
2900
2901 if (unlikely(!vq->inflight)) {
2902 return -1;
2903 }
2904
2905 barrier();
2906
2907 vq->inflight->desc[desc_idx].inflight = 0;
2908
2909 barrier();
2910
2911 vq->inflight->used_idx = vq->used_idx;
2912
2913 return 0;
2914 }
2915
2916 void *
vu_queue_pop(VuDev * dev,VuVirtq * vq,size_t sz)2917 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2918 {
2919 int i;
2920 unsigned int head;
2921 VuVirtqElement *elem;
2922
2923 if (!vu_is_vq_usable(dev, vq)) {
2924 return NULL;
2925 }
2926
2927 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2928 i = (--vq->resubmit_num);
2929 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2930
2931 if (!vq->resubmit_num) {
2932 free(vq->resubmit_list);
2933 vq->resubmit_list = NULL;
2934 }
2935
2936 return elem;
2937 }
2938
2939 if (vu_queue_empty(dev, vq)) {
2940 return NULL;
2941 }
2942 /*
2943 * Needed after virtio_queue_empty(), see comment in
2944 * virtqueue_num_heads().
2945 */
2946 smp_rmb();
2947
2948 if (vq->inuse >= vq->vring.num) {
2949 vu_panic(dev, "Virtqueue size exceeded");
2950 return NULL;
2951 }
2952
2953 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2954 return NULL;
2955 }
2956
2957 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2958 vring_set_avail_event(vq, vq->last_avail_idx);
2959 }
2960
2961 elem = vu_queue_map_desc(dev, vq, head, sz);
2962
2963 if (!elem) {
2964 return NULL;
2965 }
2966
2967 vq->inuse++;
2968
2969 vu_queue_inflight_get(dev, vq, head);
2970
2971 return elem;
2972 }
2973
2974 static void
vu_queue_detach_element(VuDev * dev,VuVirtq * vq,VuVirtqElement * elem,size_t len)2975 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2976 size_t len)
2977 {
2978 vq->inuse--;
2979 /* unmap, when DMA support is added */
2980 }
2981
2982 void
vu_queue_unpop(VuDev * dev,VuVirtq * vq,VuVirtqElement * elem,size_t len)2983 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2984 size_t len)
2985 {
2986 vq->last_avail_idx--;
2987 vu_queue_detach_element(dev, vq, elem, len);
2988 }
2989
2990 bool
vu_queue_rewind(VuDev * dev,VuVirtq * vq,unsigned int num)2991 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2992 {
2993 if (num > vq->inuse) {
2994 return false;
2995 }
2996 vq->last_avail_idx -= num;
2997 vq->inuse -= num;
2998 return true;
2999 }
3000
3001 static inline
vring_used_write(VuDev * dev,VuVirtq * vq,struct vring_used_elem * uelem,int i)3002 void vring_used_write(VuDev *dev, VuVirtq *vq,
3003 struct vring_used_elem *uelem, int i)
3004 {
3005 struct vring_used *used = vq->vring.used;
3006
3007 used->ring[i] = *uelem;
3008 vu_log_write(dev, vq->vring.log_guest_addr +
3009 offsetof(struct vring_used, ring[i]),
3010 sizeof(used->ring[i]));
3011 }
3012
3013
3014 static void
vu_log_queue_fill(VuDev * dev,VuVirtq * vq,const VuVirtqElement * elem,unsigned int len)3015 vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
3016 const VuVirtqElement *elem,
3017 unsigned int len)
3018 {
3019 struct vring_desc *desc = vq->vring.desc;
3020 unsigned int i, max, min, desc_len;
3021 uint64_t desc_addr, read_len;
3022 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
3023 unsigned num_bufs = 0;
3024
3025 max = vq->vring.num;
3026 i = elem->index;
3027
3028 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
3029 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
3030 vu_panic(dev, "Invalid size for indirect buffer table");
3031 return;
3032 }
3033
3034 /* loop over the indirect descriptor table */
3035 desc_addr = le64toh(desc[i].addr);
3036 desc_len = le32toh(desc[i].len);
3037 max = desc_len / sizeof(struct vring_desc);
3038 read_len = desc_len;
3039 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
3040 if (unlikely(desc && read_len != desc_len)) {
3041 /* Failed to use zero copy */
3042 desc = NULL;
3043 if (!virtqueue_read_indirect_desc(dev, desc_buf,
3044 desc_addr,
3045 desc_len)) {
3046 desc = desc_buf;
3047 }
3048 }
3049 if (!desc) {
3050 vu_panic(dev, "Invalid indirect buffer table");
3051 return;
3052 }
3053 i = 0;
3054 }
3055
3056 do {
3057 if (++num_bufs > max) {
3058 vu_panic(dev, "Looped descriptor");
3059 return;
3060 }
3061
3062 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
3063 min = MIN(le32toh(desc[i].len), len);
3064 vu_log_write(dev, le64toh(desc[i].addr), min);
3065 len -= min;
3066 }
3067
3068 } while (len > 0 &&
3069 (virtqueue_read_next_desc(dev, desc, i, max, &i)
3070 == VIRTQUEUE_READ_DESC_MORE));
3071 }
3072
3073 void
vu_queue_fill(VuDev * dev,VuVirtq * vq,const VuVirtqElement * elem,unsigned int len,unsigned int idx)3074 vu_queue_fill(VuDev *dev, VuVirtq *vq,
3075 const VuVirtqElement *elem,
3076 unsigned int len, unsigned int idx)
3077 {
3078 struct vring_used_elem uelem;
3079
3080 if (!vu_is_vq_usable(dev, vq)) {
3081 return;
3082 }
3083
3084 vu_log_queue_fill(dev, vq, elem, len);
3085
3086 idx = (idx + vq->used_idx) % vq->vring.num;
3087
3088 uelem.id = htole32(elem->index);
3089 uelem.len = htole32(len);
3090 vring_used_write(dev, vq, &uelem, idx);
3091 }
3092
3093 static inline
vring_used_idx_set(VuDev * dev,VuVirtq * vq,uint16_t val)3094 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
3095 {
3096 vq->vring.used->idx = htole16(val);
3097 vu_log_write(dev,
3098 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
3099 sizeof(vq->vring.used->idx));
3100
3101 vq->used_idx = val;
3102 }
3103
3104 void
vu_queue_flush(VuDev * dev,VuVirtq * vq,unsigned int count)3105 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
3106 {
3107 uint16_t old, new;
3108
3109 if (!vu_is_vq_usable(dev, vq)) {
3110 return;
3111 }
3112
3113 /* Make sure buffer is written before we update index. */
3114 smp_wmb();
3115
3116 old = vq->used_idx;
3117 new = old + count;
3118 vring_used_idx_set(dev, vq, new);
3119 vq->inuse -= count;
3120 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
3121 vq->signalled_used_valid = false;
3122 }
3123 }
3124
3125 void
vu_queue_push(VuDev * dev,VuVirtq * vq,const VuVirtqElement * elem,unsigned int len)3126 vu_queue_push(VuDev *dev, VuVirtq *vq,
3127 const VuVirtqElement *elem, unsigned int len)
3128 {
3129 vu_queue_fill(dev, vq, elem, len, 0);
3130 vu_queue_inflight_pre_put(dev, vq, elem->index);
3131 vu_queue_flush(dev, vq, 1);
3132 vu_queue_inflight_post_put(dev, vq, elem->index);
3133 }
3134