1 /* 2 * Vhost User library 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2016 Red Hat, Inc. 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Marc-André Lureau <mlureau@redhat.com> 10 * Victor Kaplansky <victork@redhat.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2 or 13 * later. See the COPYING file in the top-level directory. 14 */ 15 16 #ifndef _GNU_SOURCE 17 #define _GNU_SOURCE 18 #endif 19 20 /* this code avoids GLib dependency */ 21 #include <stdlib.h> 22 #include <stdio.h> 23 #include <unistd.h> 24 #include <stdarg.h> 25 #include <errno.h> 26 #include <string.h> 27 #include <assert.h> 28 #include <inttypes.h> 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/eventfd.h> 32 #include <sys/mman.h> 33 #include <endian.h> 34 35 /* Necessary to provide VIRTIO_F_VERSION_1 on system 36 * with older linux headers. Must appear before 37 * <linux/vhost.h> below. 38 */ 39 #include "standard-headers/linux/virtio_config.h" 40 41 #if defined(__linux__) 42 #include <sys/syscall.h> 43 #include <fcntl.h> 44 #include <sys/ioctl.h> 45 #include <linux/vhost.h> 46 47 #ifdef __NR_userfaultfd 48 #include <linux/userfaultfd.h> 49 #endif 50 51 #endif 52 53 #include "include/atomic.h" 54 55 #include "libvhost-user.h" 56 57 /* usually provided by GLib */ 58 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) 59 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4) 60 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 61 __attribute__((__format__(gnu_printf, format_idx, arg_idx))) 62 #else 63 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 64 __attribute__((__format__(__printf__, format_idx, arg_idx))) 65 #endif 66 #else /* !__GNUC__ */ 67 #define G_GNUC_PRINTF(format_idx, arg_idx) 68 #endif /* !__GNUC__ */ 69 #ifndef MIN 70 #define MIN(x, y) ({ \ 71 __typeof__(x) _min1 = (x); \ 72 __typeof__(y) _min2 = (y); \ 73 (void) (&_min1 == &_min2); \ 74 _min1 < _min2 ? _min1 : _min2; }) 75 #endif 76 77 /* Round number down to multiple */ 78 #define ALIGN_DOWN(n, m) ((n) / (m) * (m)) 79 80 /* Round number up to multiple */ 81 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) 82 83 #ifndef unlikely 84 #define unlikely(x) __builtin_expect(!!(x), 0) 85 #endif 86 87 /* Align each region to cache line size in inflight buffer */ 88 #define INFLIGHT_ALIGNMENT 64 89 90 /* The version of inflight buffer */ 91 #define INFLIGHT_VERSION 1 92 93 /* The version of the protocol we support */ 94 #define VHOST_USER_VERSION 1 95 #define LIBVHOST_USER_DEBUG 0 96 97 #define DPRINT(...) \ 98 do { \ 99 if (LIBVHOST_USER_DEBUG) { \ 100 fprintf(stderr, __VA_ARGS__); \ 101 } \ 102 } while (0) 103 104 static inline 105 bool has_feature(uint64_t features, unsigned int fbit) 106 { 107 assert(fbit < 64); 108 return !!(features & (1ULL << fbit)); 109 } 110 111 static inline 112 bool vu_has_feature(VuDev *dev, 113 unsigned int fbit) 114 { 115 return has_feature(dev->features, fbit); 116 } 117 118 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) 119 { 120 return has_feature(dev->protocol_features, fbit); 121 } 122 123 const char * 124 vu_request_to_string(unsigned int req) 125 { 126 #define REQ(req) [req] = #req 127 static const char *vu_request_str[] = { 128 REQ(VHOST_USER_NONE), 129 REQ(VHOST_USER_GET_FEATURES), 130 REQ(VHOST_USER_SET_FEATURES), 131 REQ(VHOST_USER_SET_OWNER), 132 REQ(VHOST_USER_RESET_OWNER), 133 REQ(VHOST_USER_SET_MEM_TABLE), 134 REQ(VHOST_USER_SET_LOG_BASE), 135 REQ(VHOST_USER_SET_LOG_FD), 136 REQ(VHOST_USER_SET_VRING_NUM), 137 REQ(VHOST_USER_SET_VRING_ADDR), 138 REQ(VHOST_USER_SET_VRING_BASE), 139 REQ(VHOST_USER_GET_VRING_BASE), 140 REQ(VHOST_USER_SET_VRING_KICK), 141 REQ(VHOST_USER_SET_VRING_CALL), 142 REQ(VHOST_USER_SET_VRING_ERR), 143 REQ(VHOST_USER_GET_PROTOCOL_FEATURES), 144 REQ(VHOST_USER_SET_PROTOCOL_FEATURES), 145 REQ(VHOST_USER_GET_QUEUE_NUM), 146 REQ(VHOST_USER_SET_VRING_ENABLE), 147 REQ(VHOST_USER_SEND_RARP), 148 REQ(VHOST_USER_NET_SET_MTU), 149 REQ(VHOST_USER_SET_BACKEND_REQ_FD), 150 REQ(VHOST_USER_IOTLB_MSG), 151 REQ(VHOST_USER_SET_VRING_ENDIAN), 152 REQ(VHOST_USER_GET_CONFIG), 153 REQ(VHOST_USER_SET_CONFIG), 154 REQ(VHOST_USER_POSTCOPY_ADVISE), 155 REQ(VHOST_USER_POSTCOPY_LISTEN), 156 REQ(VHOST_USER_POSTCOPY_END), 157 REQ(VHOST_USER_GET_INFLIGHT_FD), 158 REQ(VHOST_USER_SET_INFLIGHT_FD), 159 REQ(VHOST_USER_GPU_SET_SOCKET), 160 REQ(VHOST_USER_VRING_KICK), 161 REQ(VHOST_USER_GET_MAX_MEM_SLOTS), 162 REQ(VHOST_USER_ADD_MEM_REG), 163 REQ(VHOST_USER_REM_MEM_REG), 164 REQ(VHOST_USER_GET_SHARED_OBJECT), 165 REQ(VHOST_USER_MAX), 166 }; 167 #undef REQ 168 169 if (req < VHOST_USER_MAX) { 170 return vu_request_str[req]; 171 } else { 172 return "unknown"; 173 } 174 } 175 176 static void G_GNUC_PRINTF(2, 3) 177 vu_panic(VuDev *dev, const char *msg, ...) 178 { 179 char *buf = NULL; 180 va_list ap; 181 182 va_start(ap, msg); 183 if (vasprintf(&buf, msg, ap) < 0) { 184 buf = NULL; 185 } 186 va_end(ap); 187 188 dev->broken = true; 189 dev->panic(dev, buf); 190 free(buf); 191 192 /* 193 * FIXME: 194 * find a way to call virtio_error, or perhaps close the connection? 195 */ 196 } 197 198 /* Translate guest physical address to our virtual address. */ 199 void * 200 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) 201 { 202 unsigned int i; 203 204 if (*plen == 0) { 205 return NULL; 206 } 207 208 /* Find matching memory region. */ 209 for (i = 0; i < dev->nregions; i++) { 210 VuDevRegion *r = &dev->regions[i]; 211 212 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { 213 if ((guest_addr + *plen) > (r->gpa + r->size)) { 214 *plen = r->gpa + r->size - guest_addr; 215 } 216 return (void *)(uintptr_t) 217 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; 218 } 219 } 220 221 return NULL; 222 } 223 224 /* Translate qemu virtual address to our virtual address. */ 225 static void * 226 qva_to_va(VuDev *dev, uint64_t qemu_addr) 227 { 228 unsigned int i; 229 230 /* Find matching memory region. */ 231 for (i = 0; i < dev->nregions; i++) { 232 VuDevRegion *r = &dev->regions[i]; 233 234 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { 235 return (void *)(uintptr_t) 236 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; 237 } 238 } 239 240 return NULL; 241 } 242 243 static void 244 vu_remove_all_mem_regs(VuDev *dev) 245 { 246 unsigned int i; 247 248 for (i = 0; i < dev->nregions; i++) { 249 VuDevRegion *r = &dev->regions[i]; 250 void *ma = (void *)(uintptr_t)r->mmap_addr; 251 252 if (ma) { 253 munmap(ma, r->size + r->mmap_offset); 254 } 255 } 256 dev->nregions = 0; 257 } 258 259 static void 260 vmsg_close_fds(VhostUserMsg *vmsg) 261 { 262 int i; 263 264 for (i = 0; i < vmsg->fd_num; i++) { 265 close(vmsg->fds[i]); 266 } 267 } 268 269 /* Set reply payload.u64 and clear request flags and fd_num */ 270 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val) 271 { 272 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ 273 vmsg->size = sizeof(vmsg->payload.u64); 274 vmsg->payload.u64 = val; 275 vmsg->fd_num = 0; 276 } 277 278 /* A test to see if we have userfault available */ 279 static bool 280 have_userfault(void) 281 { 282 #if defined(__linux__) && defined(__NR_userfaultfd) &&\ 283 defined(UFFD_FEATURE_MISSING_SHMEM) &&\ 284 defined(UFFD_FEATURE_MISSING_HUGETLBFS) 285 /* Now test the kernel we're running on really has the features */ 286 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 287 struct uffdio_api api_struct; 288 if (ufd < 0) { 289 return false; 290 } 291 292 api_struct.api = UFFD_API; 293 api_struct.features = UFFD_FEATURE_MISSING_SHMEM | 294 UFFD_FEATURE_MISSING_HUGETLBFS; 295 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 296 close(ufd); 297 return false; 298 } 299 close(ufd); 300 return true; 301 302 #else 303 return false; 304 #endif 305 } 306 307 static bool 308 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 309 { 310 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 311 struct iovec iov = { 312 .iov_base = (char *)vmsg, 313 .iov_len = VHOST_USER_HDR_SIZE, 314 }; 315 struct msghdr msg = { 316 .msg_iov = &iov, 317 .msg_iovlen = 1, 318 .msg_control = control, 319 .msg_controllen = sizeof(control), 320 }; 321 size_t fd_size; 322 struct cmsghdr *cmsg; 323 int rc; 324 325 do { 326 rc = recvmsg(conn_fd, &msg, 0); 327 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 328 329 if (rc < 0) { 330 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); 331 return false; 332 } 333 334 vmsg->fd_num = 0; 335 for (cmsg = CMSG_FIRSTHDR(&msg); 336 cmsg != NULL; 337 cmsg = CMSG_NXTHDR(&msg, cmsg)) 338 { 339 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 340 fd_size = cmsg->cmsg_len - CMSG_LEN(0); 341 vmsg->fd_num = fd_size / sizeof(int); 342 assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS); 343 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); 344 break; 345 } 346 } 347 348 if (vmsg->size > sizeof(vmsg->payload)) { 349 vu_panic(dev, 350 "Error: too big message request: %d, size: vmsg->size: %u, " 351 "while sizeof(vmsg->payload) = %zu\n", 352 vmsg->request, vmsg->size, sizeof(vmsg->payload)); 353 goto fail; 354 } 355 356 if (vmsg->size) { 357 do { 358 rc = read(conn_fd, &vmsg->payload, vmsg->size); 359 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 360 361 if (rc <= 0) { 362 vu_panic(dev, "Error while reading: %s", strerror(errno)); 363 goto fail; 364 } 365 366 assert((uint32_t)rc == vmsg->size); 367 } 368 369 return true; 370 371 fail: 372 vmsg_close_fds(vmsg); 373 374 return false; 375 } 376 377 static bool 378 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 379 { 380 int rc; 381 uint8_t *p = (uint8_t *)vmsg; 382 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 383 struct iovec iov = { 384 .iov_base = (char *)vmsg, 385 .iov_len = VHOST_USER_HDR_SIZE, 386 }; 387 struct msghdr msg = { 388 .msg_iov = &iov, 389 .msg_iovlen = 1, 390 .msg_control = control, 391 }; 392 struct cmsghdr *cmsg; 393 394 memset(control, 0, sizeof(control)); 395 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); 396 if (vmsg->fd_num > 0) { 397 size_t fdsize = vmsg->fd_num * sizeof(int); 398 msg.msg_controllen = CMSG_SPACE(fdsize); 399 cmsg = CMSG_FIRSTHDR(&msg); 400 cmsg->cmsg_len = CMSG_LEN(fdsize); 401 cmsg->cmsg_level = SOL_SOCKET; 402 cmsg->cmsg_type = SCM_RIGHTS; 403 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); 404 } else { 405 msg.msg_controllen = 0; 406 } 407 408 do { 409 rc = sendmsg(conn_fd, &msg, 0); 410 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 411 412 if (vmsg->size) { 413 do { 414 if (vmsg->data) { 415 rc = write(conn_fd, vmsg->data, vmsg->size); 416 } else { 417 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); 418 } 419 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 420 } 421 422 if (rc <= 0) { 423 vu_panic(dev, "Error while writing: %s", strerror(errno)); 424 return false; 425 } 426 427 return true; 428 } 429 430 static bool 431 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 432 { 433 /* Set the version in the flags when sending the reply */ 434 vmsg->flags &= ~VHOST_USER_VERSION_MASK; 435 vmsg->flags |= VHOST_USER_VERSION; 436 vmsg->flags |= VHOST_USER_REPLY_MASK; 437 438 return vu_message_write(dev, conn_fd, vmsg); 439 } 440 441 /* 442 * Processes a reply on the backend channel. 443 * Entered with backend_mutex held and releases it before exit. 444 * Returns true on success. 445 */ 446 static bool 447 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) 448 { 449 VhostUserMsg msg_reply; 450 bool result = false; 451 452 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 453 result = true; 454 goto out; 455 } 456 457 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 458 goto out; 459 } 460 461 if (msg_reply.request != vmsg->request) { 462 DPRINT("Received unexpected msg type. Expected %d received %d", 463 vmsg->request, msg_reply.request); 464 goto out; 465 } 466 467 result = msg_reply.payload.u64 == 0; 468 469 out: 470 pthread_mutex_unlock(&dev->backend_mutex); 471 return result; 472 } 473 474 /* Kick the log_call_fd if required. */ 475 static void 476 vu_log_kick(VuDev *dev) 477 { 478 if (dev->log_call_fd != -1) { 479 DPRINT("Kicking the QEMU's log...\n"); 480 if (eventfd_write(dev->log_call_fd, 1) < 0) { 481 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 482 } 483 } 484 } 485 486 static void 487 vu_log_page(uint8_t *log_table, uint64_t page) 488 { 489 DPRINT("Logged dirty guest page: %"PRId64"\n", page); 490 qatomic_or(&log_table[page / 8], 1 << (page % 8)); 491 } 492 493 static void 494 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) 495 { 496 uint64_t page; 497 498 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || 499 !dev->log_table || !length) { 500 return; 501 } 502 503 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); 504 505 page = address / VHOST_LOG_PAGE; 506 while (page * VHOST_LOG_PAGE < address + length) { 507 vu_log_page(dev->log_table, page); 508 page += 1; 509 } 510 511 vu_log_kick(dev); 512 } 513 514 static void 515 vu_kick_cb(VuDev *dev, int condition, void *data) 516 { 517 int index = (intptr_t)data; 518 VuVirtq *vq = &dev->vq[index]; 519 int sock = vq->kick_fd; 520 eventfd_t kick_data; 521 ssize_t rc; 522 523 rc = eventfd_read(sock, &kick_data); 524 if (rc == -1) { 525 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); 526 dev->remove_watch(dev, dev->vq[index].kick_fd); 527 } else { 528 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n", 529 kick_data, vq->handler, index); 530 if (vq->handler) { 531 vq->handler(dev, index); 532 } 533 } 534 } 535 536 static bool 537 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) 538 { 539 vmsg->payload.u64 = 540 /* 541 * The following VIRTIO feature bits are supported by our virtqueue 542 * implementation: 543 */ 544 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY | 545 1ULL << VIRTIO_RING_F_INDIRECT_DESC | 546 1ULL << VIRTIO_RING_F_EVENT_IDX | 547 1ULL << VIRTIO_F_VERSION_1 | 548 549 /* vhost-user feature bits */ 550 1ULL << VHOST_F_LOG_ALL | 551 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 552 553 if (dev->iface->get_features) { 554 vmsg->payload.u64 |= dev->iface->get_features(dev); 555 } 556 557 vmsg->size = sizeof(vmsg->payload.u64); 558 vmsg->fd_num = 0; 559 560 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 561 562 return true; 563 } 564 565 static void 566 vu_set_enable_all_rings(VuDev *dev, bool enabled) 567 { 568 uint16_t i; 569 570 for (i = 0; i < dev->max_queues; i++) { 571 dev->vq[i].enable = enabled; 572 } 573 } 574 575 static bool 576 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) 577 { 578 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 579 580 dev->features = vmsg->payload.u64; 581 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { 582 /* 583 * We only support devices conforming to VIRTIO 1.0 or 584 * later 585 */ 586 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); 587 return false; 588 } 589 590 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { 591 vu_set_enable_all_rings(dev, true); 592 } 593 594 if (dev->iface->set_features) { 595 dev->iface->set_features(dev, dev->features); 596 } 597 598 return false; 599 } 600 601 static bool 602 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) 603 { 604 return false; 605 } 606 607 static void 608 vu_close_log(VuDev *dev) 609 { 610 if (dev->log_table) { 611 if (munmap(dev->log_table, dev->log_size) != 0) { 612 perror("close log munmap() error"); 613 } 614 615 dev->log_table = NULL; 616 } 617 if (dev->log_call_fd != -1) { 618 close(dev->log_call_fd); 619 dev->log_call_fd = -1; 620 } 621 } 622 623 static bool 624 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) 625 { 626 vu_set_enable_all_rings(dev, false); 627 628 return false; 629 } 630 631 static bool 632 map_ring(VuDev *dev, VuVirtq *vq) 633 { 634 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); 635 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); 636 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); 637 638 DPRINT("Setting virtq addresses:\n"); 639 DPRINT(" vring_desc at %p\n", vq->vring.desc); 640 DPRINT(" vring_used at %p\n", vq->vring.used); 641 DPRINT(" vring_avail at %p\n", vq->vring.avail); 642 643 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); 644 } 645 646 static bool 647 generate_faults(VuDev *dev) { 648 unsigned int i; 649 for (i = 0; i < dev->nregions; i++) { 650 #ifdef UFFDIO_REGISTER 651 VuDevRegion *dev_region = &dev->regions[i]; 652 int ret; 653 struct uffdio_register reg_struct; 654 655 /* 656 * We should already have an open ufd. Mark each memory 657 * range as ufd. 658 * Discard any mapping we have here; note I can't use MADV_REMOVE 659 * or fallocate to make the hole since I don't want to lose 660 * data that's already arrived in the shared process. 661 * TODO: How to do hugepage 662 */ 663 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 664 dev_region->size + dev_region->mmap_offset, 665 MADV_DONTNEED); 666 if (ret) { 667 fprintf(stderr, 668 "%s: Failed to madvise(DONTNEED) region %d: %s\n", 669 __func__, i, strerror(errno)); 670 } 671 /* 672 * Turn off transparent hugepages so we dont get lose wakeups 673 * in neighbouring pages. 674 * TODO: Turn this backon later. 675 */ 676 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 677 dev_region->size + dev_region->mmap_offset, 678 MADV_NOHUGEPAGE); 679 if (ret) { 680 /* 681 * Note: This can happen legally on kernels that are configured 682 * without madvise'able hugepages 683 */ 684 fprintf(stderr, 685 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n", 686 __func__, i, strerror(errno)); 687 } 688 689 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; 690 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; 691 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 692 693 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { 694 vu_panic(dev, "%s: Failed to userfault region %d " 695 "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64 696 ": (ufd=%d)%s\n", 697 __func__, i, 698 dev_region->mmap_addr, 699 dev_region->size, dev_region->mmap_offset, 700 dev->postcopy_ufd, strerror(errno)); 701 return false; 702 } 703 if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) { 704 vu_panic(dev, "%s Region (%d) doesn't support COPY", 705 __func__, i); 706 return false; 707 } 708 DPRINT("%s: region %d: Registered userfault for %" 709 PRIx64 " + %" PRIx64 "\n", __func__, i, 710 (uint64_t)reg_struct.range.start, 711 (uint64_t)reg_struct.range.len); 712 /* Now it's registered we can let the client at it */ 713 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, 714 dev_region->size + dev_region->mmap_offset, 715 PROT_READ | PROT_WRITE)) { 716 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", 717 i, strerror(errno)); 718 return false; 719 } 720 /* TODO: Stash 'zero' support flags somewhere */ 721 #endif 722 } 723 724 return true; 725 } 726 727 static bool 728 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 729 int i; 730 bool track_ramblocks = dev->postcopy_listening; 731 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 732 VuDevRegion *dev_region = &dev->regions[dev->nregions]; 733 void *mmap_addr; 734 735 if (vmsg->fd_num != 1) { 736 vmsg_close_fds(vmsg); 737 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " 738 "should be sent for this message type", vmsg->fd_num); 739 return false; 740 } 741 742 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 743 close(vmsg->fds[0]); 744 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " 745 "least %zu bytes and only %d bytes were received", 746 VHOST_USER_MEM_REG_SIZE, vmsg->size); 747 return false; 748 } 749 750 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { 751 close(vmsg->fds[0]); 752 vu_panic(dev, "failing attempt to hot add memory via " 753 "VHOST_USER_ADD_MEM_REG message because the backend has " 754 "no free ram slots available"); 755 return false; 756 } 757 758 /* 759 * If we are in postcopy mode and we receive a u64 payload with a 0 value 760 * we know all the postcopy client bases have been received, and we 761 * should start generating faults. 762 */ 763 if (track_ramblocks && 764 vmsg->size == sizeof(vmsg->payload.u64) && 765 vmsg->payload.u64 == 0) { 766 (void)generate_faults(dev); 767 return false; 768 } 769 770 DPRINT("Adding region: %u\n", dev->nregions); 771 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 772 msg_region->guest_phys_addr); 773 DPRINT(" memory_size: 0x%016"PRIx64"\n", 774 msg_region->memory_size); 775 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 776 msg_region->userspace_addr); 777 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 778 msg_region->mmap_offset); 779 780 dev_region->gpa = msg_region->guest_phys_addr; 781 dev_region->size = msg_region->memory_size; 782 dev_region->qva = msg_region->userspace_addr; 783 dev_region->mmap_offset = msg_region->mmap_offset; 784 785 /* 786 * We don't use offset argument of mmap() since the 787 * mapped address has to be page aligned, and we use huge 788 * pages. 789 */ 790 if (track_ramblocks) { 791 /* 792 * In postcopy we're using PROT_NONE here to catch anyone 793 * accessing it before we userfault. 794 */ 795 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 796 PROT_NONE, MAP_SHARED | MAP_NORESERVE, 797 vmsg->fds[0], 0); 798 } else { 799 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 800 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, 801 vmsg->fds[0], 0); 802 } 803 804 if (mmap_addr == MAP_FAILED) { 805 vu_panic(dev, "region mmap error: %s", strerror(errno)); 806 } else { 807 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 808 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 809 dev_region->mmap_addr); 810 } 811 812 close(vmsg->fds[0]); 813 814 if (track_ramblocks) { 815 /* 816 * Return the address to QEMU so that it can translate the ufd 817 * fault addresses back. 818 */ 819 msg_region->userspace_addr = (uintptr_t)(mmap_addr + 820 dev_region->mmap_offset); 821 822 /* Send the message back to qemu with the addresses filled in. */ 823 vmsg->fd_num = 0; 824 DPRINT("Successfully added new region in postcopy\n"); 825 dev->nregions++; 826 return true; 827 } else { 828 for (i = 0; i < dev->max_queues; i++) { 829 if (dev->vq[i].vring.desc) { 830 if (map_ring(dev, &dev->vq[i])) { 831 vu_panic(dev, "remapping queue %d for new memory region", 832 i); 833 } 834 } 835 } 836 837 DPRINT("Successfully added new region\n"); 838 dev->nregions++; 839 return false; 840 } 841 } 842 843 static inline bool reg_equal(VuDevRegion *vudev_reg, 844 VhostUserMemoryRegion *msg_reg) 845 { 846 if (vudev_reg->gpa == msg_reg->guest_phys_addr && 847 vudev_reg->qva == msg_reg->userspace_addr && 848 vudev_reg->size == msg_reg->memory_size) { 849 return true; 850 } 851 852 return false; 853 } 854 855 static bool 856 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 857 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 858 unsigned int i; 859 bool found = false; 860 861 if (vmsg->fd_num > 1) { 862 vmsg_close_fds(vmsg); 863 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " 864 "should be sent for this message type", vmsg->fd_num); 865 return false; 866 } 867 868 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 869 vmsg_close_fds(vmsg); 870 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " 871 "least %zu bytes and only %d bytes were received", 872 VHOST_USER_MEM_REG_SIZE, vmsg->size); 873 return false; 874 } 875 876 DPRINT("Removing region:\n"); 877 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 878 msg_region->guest_phys_addr); 879 DPRINT(" memory_size: 0x%016"PRIx64"\n", 880 msg_region->memory_size); 881 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 882 msg_region->userspace_addr); 883 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 884 msg_region->mmap_offset); 885 886 for (i = 0; i < dev->nregions; i++) { 887 if (reg_equal(&dev->regions[i], msg_region)) { 888 VuDevRegion *r = &dev->regions[i]; 889 void *ma = (void *) (uintptr_t) r->mmap_addr; 890 891 if (ma) { 892 munmap(ma, r->size + r->mmap_offset); 893 } 894 895 /* 896 * Shift all affected entries by 1 to close the hole at index i and 897 * zero out the last entry. 898 */ 899 memmove(dev->regions + i, dev->regions + i + 1, 900 sizeof(VuDevRegion) * (dev->nregions - i - 1)); 901 memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion)); 902 DPRINT("Successfully removed a region\n"); 903 dev->nregions--; 904 i--; 905 906 found = true; 907 908 /* Continue the search for eventual duplicates. */ 909 } 910 } 911 912 if (!found) { 913 vu_panic(dev, "Specified region not found\n"); 914 } 915 916 vmsg_close_fds(vmsg); 917 918 return false; 919 } 920 921 static bool 922 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) 923 { 924 int fd_num = 0; 925 int dmabuf_fd = -1; 926 if (dev->iface->get_shared_object) { 927 dmabuf_fd = dev->iface->get_shared_object( 928 dev, &vmsg->payload.object.uuid[0]); 929 } 930 if (dmabuf_fd != -1) { 931 DPRINT("dmabuf_fd found for requested UUID\n"); 932 vmsg->fds[fd_num++] = dmabuf_fd; 933 } 934 vmsg->fd_num = fd_num; 935 936 return true; 937 } 938 939 static bool 940 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) 941 { 942 VhostUserMemory m = vmsg->payload.memory, *memory = &m; 943 int prot = PROT_READ | PROT_WRITE; 944 unsigned int i; 945 946 if (dev->postcopy_listening) { 947 /* 948 * In postcopy we're using PROT_NONE here to catch anyone 949 * accessing it before we userfault 950 */ 951 prot = PROT_NONE; 952 } 953 954 vu_remove_all_mem_regs(dev); 955 dev->nregions = memory->nregions; 956 957 DPRINT("Nregions: %u\n", memory->nregions); 958 for (i = 0; i < dev->nregions; i++) { 959 void *mmap_addr; 960 VhostUserMemoryRegion *msg_region = &memory->regions[i]; 961 VuDevRegion *dev_region = &dev->regions[i]; 962 963 DPRINT("Region %d\n", i); 964 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 965 msg_region->guest_phys_addr); 966 DPRINT(" memory_size: 0x%016"PRIx64"\n", 967 msg_region->memory_size); 968 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 969 msg_region->userspace_addr); 970 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 971 msg_region->mmap_offset); 972 973 dev_region->gpa = msg_region->guest_phys_addr; 974 dev_region->size = msg_region->memory_size; 975 dev_region->qva = msg_region->userspace_addr; 976 dev_region->mmap_offset = msg_region->mmap_offset; 977 978 /* We don't use offset argument of mmap() since the 979 * mapped address has to be page aligned, and we use huge 980 * pages. */ 981 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 982 prot, MAP_SHARED | MAP_NORESERVE, vmsg->fds[i], 0); 983 984 if (mmap_addr == MAP_FAILED) { 985 vu_panic(dev, "region mmap error: %s", strerror(errno)); 986 } else { 987 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 988 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 989 dev_region->mmap_addr); 990 } 991 992 if (dev->postcopy_listening) { 993 /* 994 * Return the address to QEMU so that it can translate the ufd 995 * fault addresses back. 996 */ 997 msg_region->userspace_addr = (uintptr_t)(mmap_addr + 998 dev_region->mmap_offset); 999 } 1000 close(vmsg->fds[i]); 1001 } 1002 1003 if (dev->postcopy_listening) { 1004 /* Send the message back to qemu with the addresses filled in */ 1005 vmsg->fd_num = 0; 1006 if (!vu_send_reply(dev, dev->sock, vmsg)) { 1007 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); 1008 return false; 1009 } 1010 1011 /* 1012 * Wait for QEMU to confirm that it's registered the handler for the 1013 * faults. 1014 */ 1015 if (!dev->read_msg(dev, dev->sock, vmsg) || 1016 vmsg->size != sizeof(vmsg->payload.u64) || 1017 vmsg->payload.u64 != 0) { 1018 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); 1019 return false; 1020 } 1021 1022 /* OK, now we can go and register the memory and generate faults */ 1023 (void)generate_faults(dev); 1024 return false; 1025 } 1026 1027 for (i = 0; i < dev->max_queues; i++) { 1028 if (dev->vq[i].vring.desc) { 1029 if (map_ring(dev, &dev->vq[i])) { 1030 vu_panic(dev, "remapping queue %d during setmemtable", i); 1031 } 1032 } 1033 } 1034 1035 return false; 1036 } 1037 1038 static bool 1039 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1040 { 1041 int fd; 1042 uint64_t log_mmap_size, log_mmap_offset; 1043 void *rc; 1044 1045 if (vmsg->fd_num != 1 || 1046 vmsg->size != sizeof(vmsg->payload.log)) { 1047 vu_panic(dev, "Invalid log_base message"); 1048 return true; 1049 } 1050 1051 fd = vmsg->fds[0]; 1052 log_mmap_offset = vmsg->payload.log.mmap_offset; 1053 log_mmap_size = vmsg->payload.log.mmap_size; 1054 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset); 1055 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size); 1056 1057 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 1058 log_mmap_offset); 1059 close(fd); 1060 if (rc == MAP_FAILED) { 1061 perror("log mmap error"); 1062 } 1063 1064 if (dev->log_table) { 1065 munmap(dev->log_table, dev->log_size); 1066 } 1067 dev->log_table = rc; 1068 dev->log_size = log_mmap_size; 1069 1070 vmsg->size = sizeof(vmsg->payload.u64); 1071 vmsg->fd_num = 0; 1072 1073 return true; 1074 } 1075 1076 static bool 1077 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) 1078 { 1079 if (vmsg->fd_num != 1) { 1080 vu_panic(dev, "Invalid log_fd message"); 1081 return false; 1082 } 1083 1084 if (dev->log_call_fd != -1) { 1085 close(dev->log_call_fd); 1086 } 1087 dev->log_call_fd = vmsg->fds[0]; 1088 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); 1089 1090 return false; 1091 } 1092 1093 static bool 1094 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1095 { 1096 unsigned int index = vmsg->payload.state.index; 1097 unsigned int num = vmsg->payload.state.num; 1098 1099 DPRINT("State.index: %u\n", index); 1100 DPRINT("State.num: %u\n", num); 1101 dev->vq[index].vring.num = num; 1102 1103 return false; 1104 } 1105 1106 static bool 1107 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) 1108 { 1109 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; 1110 unsigned int index = vra->index; 1111 VuVirtq *vq = &dev->vq[index]; 1112 1113 DPRINT("vhost_vring_addr:\n"); 1114 DPRINT(" index: %d\n", vra->index); 1115 DPRINT(" flags: %d\n", vra->flags); 1116 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); 1117 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); 1118 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); 1119 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); 1120 1121 vq->vra = *vra; 1122 vq->vring.flags = vra->flags; 1123 vq->vring.log_guest_addr = vra->log_guest_addr; 1124 1125 1126 if (map_ring(dev, vq)) { 1127 vu_panic(dev, "Invalid vring_addr message"); 1128 return false; 1129 } 1130 1131 vq->used_idx = le16toh(vq->vring.used->idx); 1132 1133 if (vq->last_avail_idx != vq->used_idx) { 1134 bool resume = dev->iface->queue_is_processed_in_order && 1135 dev->iface->queue_is_processed_in_order(dev, index); 1136 1137 DPRINT("Last avail index != used index: %u != %u%s\n", 1138 vq->last_avail_idx, vq->used_idx, 1139 resume ? ", resuming" : ""); 1140 1141 if (resume) { 1142 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; 1143 } 1144 } 1145 1146 return false; 1147 } 1148 1149 static bool 1150 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1151 { 1152 unsigned int index = vmsg->payload.state.index; 1153 unsigned int num = vmsg->payload.state.num; 1154 1155 DPRINT("State.index: %u\n", index); 1156 DPRINT("State.num: %u\n", num); 1157 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; 1158 1159 return false; 1160 } 1161 1162 static bool 1163 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1164 { 1165 unsigned int index = vmsg->payload.state.index; 1166 1167 DPRINT("State.index: %u\n", index); 1168 vmsg->payload.state.num = dev->vq[index].last_avail_idx; 1169 vmsg->size = sizeof(vmsg->payload.state); 1170 1171 dev->vq[index].started = false; 1172 if (dev->iface->queue_set_started) { 1173 dev->iface->queue_set_started(dev, index, false); 1174 } 1175 1176 if (dev->vq[index].call_fd != -1) { 1177 close(dev->vq[index].call_fd); 1178 dev->vq[index].call_fd = -1; 1179 } 1180 if (dev->vq[index].kick_fd != -1) { 1181 dev->remove_watch(dev, dev->vq[index].kick_fd); 1182 close(dev->vq[index].kick_fd); 1183 dev->vq[index].kick_fd = -1; 1184 } 1185 1186 return true; 1187 } 1188 1189 static bool 1190 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) 1191 { 1192 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1193 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1194 1195 if (index >= dev->max_queues) { 1196 vmsg_close_fds(vmsg); 1197 vu_panic(dev, "Invalid queue index: %u", index); 1198 return false; 1199 } 1200 1201 if (nofd) { 1202 vmsg_close_fds(vmsg); 1203 return true; 1204 } 1205 1206 if (vmsg->fd_num != 1) { 1207 vmsg_close_fds(vmsg); 1208 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); 1209 return false; 1210 } 1211 1212 return true; 1213 } 1214 1215 static int 1216 inflight_desc_compare(const void *a, const void *b) 1217 { 1218 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a, 1219 *desc1 = (VuVirtqInflightDesc *)b; 1220 1221 if (desc1->counter > desc0->counter && 1222 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { 1223 return 1; 1224 } 1225 1226 return -1; 1227 } 1228 1229 static int 1230 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) 1231 { 1232 int i = 0; 1233 1234 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 1235 return 0; 1236 } 1237 1238 if (unlikely(!vq->inflight)) { 1239 return -1; 1240 } 1241 1242 if (unlikely(!vq->inflight->version)) { 1243 /* initialize the buffer */ 1244 vq->inflight->version = INFLIGHT_VERSION; 1245 return 0; 1246 } 1247 1248 vq->used_idx = le16toh(vq->vring.used->idx); 1249 vq->resubmit_num = 0; 1250 vq->resubmit_list = NULL; 1251 vq->counter = 0; 1252 1253 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { 1254 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; 1255 1256 barrier(); 1257 1258 vq->inflight->used_idx = vq->used_idx; 1259 } 1260 1261 for (i = 0; i < vq->inflight->desc_num; i++) { 1262 if (vq->inflight->desc[i].inflight == 1) { 1263 vq->inuse++; 1264 } 1265 } 1266 1267 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; 1268 1269 if (vq->inuse) { 1270 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); 1271 if (!vq->resubmit_list) { 1272 return -1; 1273 } 1274 1275 for (i = 0; i < vq->inflight->desc_num; i++) { 1276 if (vq->inflight->desc[i].inflight) { 1277 vq->resubmit_list[vq->resubmit_num].index = i; 1278 vq->resubmit_list[vq->resubmit_num].counter = 1279 vq->inflight->desc[i].counter; 1280 vq->resubmit_num++; 1281 } 1282 } 1283 1284 if (vq->resubmit_num > 1) { 1285 qsort(vq->resubmit_list, vq->resubmit_num, 1286 sizeof(VuVirtqInflightDesc), inflight_desc_compare); 1287 } 1288 vq->counter = vq->resubmit_list[0].counter + 1; 1289 } 1290 1291 /* in case of I/O hang after reconnecting */ 1292 if (eventfd_write(vq->kick_fd, 1)) { 1293 return -1; 1294 } 1295 1296 return 0; 1297 } 1298 1299 static bool 1300 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) 1301 { 1302 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1303 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1304 1305 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1306 1307 if (!vu_check_queue_msg_file(dev, vmsg)) { 1308 return false; 1309 } 1310 1311 if (dev->vq[index].kick_fd != -1) { 1312 dev->remove_watch(dev, dev->vq[index].kick_fd); 1313 close(dev->vq[index].kick_fd); 1314 dev->vq[index].kick_fd = -1; 1315 } 1316 1317 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; 1318 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); 1319 1320 dev->vq[index].started = true; 1321 if (dev->iface->queue_set_started) { 1322 dev->iface->queue_set_started(dev, index, true); 1323 } 1324 1325 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { 1326 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, 1327 vu_kick_cb, (void *)(long)index); 1328 1329 DPRINT("Waiting for kicks on fd: %d for vq: %d\n", 1330 dev->vq[index].kick_fd, index); 1331 } 1332 1333 if (vu_check_queue_inflights(dev, &dev->vq[index])) { 1334 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); 1335 } 1336 1337 return false; 1338 } 1339 1340 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, 1341 vu_queue_handler_cb handler) 1342 { 1343 int qidx = vq - dev->vq; 1344 1345 vq->handler = handler; 1346 if (vq->kick_fd >= 0) { 1347 if (handler) { 1348 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, 1349 vu_kick_cb, (void *)(long)qidx); 1350 } else { 1351 dev->remove_watch(dev, vq->kick_fd); 1352 } 1353 } 1354 } 1355 1356 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, 1357 int size, int offset) 1358 { 1359 int qidx = vq - dev->vq; 1360 int fd_num = 0; 1361 VhostUserMsg vmsg = { 1362 .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG, 1363 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1364 .size = sizeof(vmsg.payload.area), 1365 .payload.area = { 1366 .u64 = qidx & VHOST_USER_VRING_IDX_MASK, 1367 .size = size, 1368 .offset = offset, 1369 }, 1370 }; 1371 1372 if (fd == -1) { 1373 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; 1374 } else { 1375 vmsg.fds[fd_num++] = fd; 1376 } 1377 1378 vmsg.fd_num = fd_num; 1379 1380 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { 1381 return false; 1382 } 1383 1384 pthread_mutex_lock(&dev->backend_mutex); 1385 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { 1386 pthread_mutex_unlock(&dev->backend_mutex); 1387 return false; 1388 } 1389 1390 /* Also unlocks the backend_mutex */ 1391 return vu_process_message_reply(dev, &vmsg); 1392 } 1393 1394 bool 1395 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], 1396 int *dmabuf_fd) 1397 { 1398 bool result = false; 1399 VhostUserMsg msg_reply; 1400 VhostUserMsg msg = { 1401 .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP, 1402 .size = sizeof(msg.payload.object), 1403 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1404 }; 1405 1406 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1407 1408 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1409 return false; 1410 } 1411 1412 pthread_mutex_lock(&dev->backend_mutex); 1413 if (!vu_message_write(dev, dev->backend_fd, &msg)) { 1414 goto out; 1415 } 1416 1417 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 1418 goto out; 1419 } 1420 1421 if (msg_reply.request != msg.request) { 1422 DPRINT("Received unexpected msg type. Expected %d, received %d", 1423 msg.request, msg_reply.request); 1424 goto out; 1425 } 1426 1427 if (msg_reply.fd_num != 1) { 1428 DPRINT("Received unexpected number of fds. Expected 1, received %d", 1429 msg_reply.fd_num); 1430 goto out; 1431 } 1432 1433 *dmabuf_fd = msg_reply.fds[0]; 1434 result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0; 1435 out: 1436 pthread_mutex_unlock(&dev->backend_mutex); 1437 1438 return result; 1439 } 1440 1441 static bool 1442 vu_send_message(VuDev *dev, VhostUserMsg *vmsg) 1443 { 1444 bool result = false; 1445 pthread_mutex_lock(&dev->backend_mutex); 1446 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { 1447 goto out; 1448 } 1449 1450 result = true; 1451 out: 1452 pthread_mutex_unlock(&dev->backend_mutex); 1453 1454 return result; 1455 } 1456 1457 bool 1458 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1459 { 1460 VhostUserMsg msg = { 1461 .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD, 1462 .size = sizeof(msg.payload.object), 1463 .flags = VHOST_USER_VERSION, 1464 }; 1465 1466 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1467 1468 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1469 return false; 1470 } 1471 1472 return vu_send_message(dev, &msg); 1473 } 1474 1475 bool 1476 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1477 { 1478 VhostUserMsg msg = { 1479 .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE, 1480 .size = sizeof(msg.payload.object), 1481 .flags = VHOST_USER_VERSION, 1482 }; 1483 1484 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1485 1486 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1487 return false; 1488 } 1489 1490 return vu_send_message(dev, &msg); 1491 } 1492 1493 static bool 1494 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) 1495 { 1496 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1497 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1498 1499 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1500 1501 if (!vu_check_queue_msg_file(dev, vmsg)) { 1502 return false; 1503 } 1504 1505 if (dev->vq[index].call_fd != -1) { 1506 close(dev->vq[index].call_fd); 1507 dev->vq[index].call_fd = -1; 1508 } 1509 1510 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; 1511 1512 /* in case of I/O hang after reconnecting */ 1513 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { 1514 return -1; 1515 } 1516 1517 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); 1518 1519 return false; 1520 } 1521 1522 static bool 1523 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) 1524 { 1525 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1526 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1527 1528 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1529 1530 if (!vu_check_queue_msg_file(dev, vmsg)) { 1531 return false; 1532 } 1533 1534 if (dev->vq[index].err_fd != -1) { 1535 close(dev->vq[index].err_fd); 1536 dev->vq[index].err_fd = -1; 1537 } 1538 1539 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; 1540 1541 return false; 1542 } 1543 1544 static bool 1545 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1546 { 1547 /* 1548 * Note that we support, but intentionally do not set, 1549 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that 1550 * a device implementation can return it in its callback 1551 * (get_protocol_features) if it wants to use this for 1552 * simulation, but it is otherwise not desirable (if even 1553 * implemented by the frontend.) 1554 */ 1555 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1556 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | 1557 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | 1558 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | 1559 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | 1560 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1561 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; 1562 1563 if (have_userfault()) { 1564 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT; 1565 } 1566 1567 if (dev->iface->get_config && dev->iface->set_config) { 1568 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1569 } 1570 1571 if (dev->iface->get_protocol_features) { 1572 features |= dev->iface->get_protocol_features(dev); 1573 } 1574 1575 vmsg_set_reply_u64(vmsg, features); 1576 return true; 1577 } 1578 1579 static bool 1580 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1581 { 1582 uint64_t features = vmsg->payload.u64; 1583 1584 DPRINT("u64: 0x%016"PRIx64"\n", features); 1585 1586 dev->protocol_features = vmsg->payload.u64; 1587 1588 if (vu_has_protocol_feature(dev, 1589 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 1590 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || 1591 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 1592 /* 1593 * The use case for using messages for kick/call is simulation, to make 1594 * the kick and call synchronous. To actually get that behaviour, both 1595 * of the other features are required. 1596 * Theoretically, one could use only kick messages, or do them without 1597 * having F_REPLY_ACK, but too many (possibly pending) messages on the 1598 * socket will eventually cause the frontend to hang, to avoid this in 1599 * scenarios where not desired enforce that the settings are in a way 1600 * that actually enables the simulation case. 1601 */ 1602 vu_panic(dev, 1603 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK"); 1604 return false; 1605 } 1606 1607 if (dev->iface->set_protocol_features) { 1608 dev->iface->set_protocol_features(dev, features); 1609 } 1610 1611 return false; 1612 } 1613 1614 static bool 1615 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1616 { 1617 vmsg_set_reply_u64(vmsg, dev->max_queues); 1618 return true; 1619 } 1620 1621 static bool 1622 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) 1623 { 1624 unsigned int index = vmsg->payload.state.index; 1625 unsigned int enable = vmsg->payload.state.num; 1626 1627 DPRINT("State.index: %u\n", index); 1628 DPRINT("State.enable: %u\n", enable); 1629 1630 if (index >= dev->max_queues) { 1631 vu_panic(dev, "Invalid vring_enable index: %u", index); 1632 return false; 1633 } 1634 1635 dev->vq[index].enable = enable; 1636 return false; 1637 } 1638 1639 static bool 1640 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg) 1641 { 1642 if (vmsg->fd_num != 1) { 1643 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); 1644 return false; 1645 } 1646 1647 if (dev->backend_fd != -1) { 1648 close(dev->backend_fd); 1649 } 1650 dev->backend_fd = vmsg->fds[0]; 1651 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]); 1652 1653 return false; 1654 } 1655 1656 static bool 1657 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) 1658 { 1659 int ret = -1; 1660 1661 if (dev->iface->get_config) { 1662 ret = dev->iface->get_config(dev, vmsg->payload.config.region, 1663 vmsg->payload.config.size); 1664 } 1665 1666 if (ret) { 1667 /* resize to zero to indicate an error to frontend */ 1668 vmsg->size = 0; 1669 } 1670 1671 return true; 1672 } 1673 1674 static bool 1675 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) 1676 { 1677 int ret = -1; 1678 1679 if (dev->iface->set_config) { 1680 ret = dev->iface->set_config(dev, vmsg->payload.config.region, 1681 vmsg->payload.config.offset, 1682 vmsg->payload.config.size, 1683 vmsg->payload.config.flags); 1684 if (ret) { 1685 vu_panic(dev, "Set virtio configuration space failed"); 1686 } 1687 } 1688 1689 return false; 1690 } 1691 1692 static bool 1693 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) 1694 { 1695 #ifdef UFFDIO_API 1696 struct uffdio_api api_struct; 1697 1698 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1699 vmsg->size = 0; 1700 #else 1701 dev->postcopy_ufd = -1; 1702 #endif 1703 1704 if (dev->postcopy_ufd == -1) { 1705 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); 1706 goto out; 1707 } 1708 1709 #ifdef UFFDIO_API 1710 api_struct.api = UFFD_API; 1711 api_struct.features = 0; 1712 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { 1713 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); 1714 close(dev->postcopy_ufd); 1715 dev->postcopy_ufd = -1; 1716 goto out; 1717 } 1718 /* TODO: Stash feature flags somewhere */ 1719 #endif 1720 1721 out: 1722 /* Return a ufd to the QEMU */ 1723 vmsg->fd_num = 1; 1724 vmsg->fds[0] = dev->postcopy_ufd; 1725 return true; /* = send a reply */ 1726 } 1727 1728 static bool 1729 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) 1730 { 1731 if (dev->nregions) { 1732 vu_panic(dev, "Regions already registered at postcopy-listen"); 1733 vmsg_set_reply_u64(vmsg, -1); 1734 return true; 1735 } 1736 dev->postcopy_listening = true; 1737 1738 vmsg_set_reply_u64(vmsg, 0); 1739 return true; 1740 } 1741 1742 static bool 1743 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) 1744 { 1745 DPRINT("%s: Entry\n", __func__); 1746 dev->postcopy_listening = false; 1747 if (dev->postcopy_ufd > 0) { 1748 close(dev->postcopy_ufd); 1749 dev->postcopy_ufd = -1; 1750 DPRINT("%s: Done close\n", __func__); 1751 } 1752 1753 vmsg_set_reply_u64(vmsg, 0); 1754 DPRINT("%s: exit\n", __func__); 1755 return true; 1756 } 1757 1758 static inline uint64_t 1759 vu_inflight_queue_size(uint16_t queue_size) 1760 { 1761 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size + 1762 sizeof(uint16_t), INFLIGHT_ALIGNMENT); 1763 } 1764 1765 #ifdef MFD_ALLOW_SEALING 1766 static void * 1767 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd) 1768 { 1769 void *ptr; 1770 int ret; 1771 1772 *fd = memfd_create(name, MFD_ALLOW_SEALING); 1773 if (*fd < 0) { 1774 return NULL; 1775 } 1776 1777 ret = ftruncate(*fd, size); 1778 if (ret < 0) { 1779 close(*fd); 1780 return NULL; 1781 } 1782 1783 ret = fcntl(*fd, F_ADD_SEALS, flags); 1784 if (ret < 0) { 1785 close(*fd); 1786 return NULL; 1787 } 1788 1789 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0); 1790 if (ptr == MAP_FAILED) { 1791 close(*fd); 1792 return NULL; 1793 } 1794 1795 return ptr; 1796 } 1797 #endif 1798 1799 static bool 1800 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1801 { 1802 int fd = -1; 1803 void *addr = NULL; 1804 uint64_t mmap_size; 1805 uint16_t num_queues, queue_size; 1806 1807 if (vmsg->size != sizeof(vmsg->payload.inflight)) { 1808 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); 1809 vmsg->payload.inflight.mmap_size = 0; 1810 return true; 1811 } 1812 1813 num_queues = vmsg->payload.inflight.num_queues; 1814 queue_size = vmsg->payload.inflight.queue_size; 1815 1816 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1817 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1818 1819 mmap_size = vu_inflight_queue_size(queue_size) * num_queues; 1820 1821 #ifdef MFD_ALLOW_SEALING 1822 addr = memfd_alloc("vhost-inflight", mmap_size, 1823 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 1824 &fd); 1825 #else 1826 vu_panic(dev, "Not implemented: memfd support is missing"); 1827 #endif 1828 1829 if (!addr) { 1830 vu_panic(dev, "Failed to alloc vhost inflight area"); 1831 vmsg->payload.inflight.mmap_size = 0; 1832 return true; 1833 } 1834 1835 memset(addr, 0, mmap_size); 1836 1837 dev->inflight_info.addr = addr; 1838 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; 1839 dev->inflight_info.fd = vmsg->fds[0] = fd; 1840 vmsg->fd_num = 1; 1841 vmsg->payload.inflight.mmap_offset = 0; 1842 1843 DPRINT("send inflight mmap_size: %"PRId64"\n", 1844 vmsg->payload.inflight.mmap_size); 1845 DPRINT("send inflight mmap offset: %"PRId64"\n", 1846 vmsg->payload.inflight.mmap_offset); 1847 1848 return true; 1849 } 1850 1851 static bool 1852 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1853 { 1854 int fd, i; 1855 uint64_t mmap_size, mmap_offset; 1856 uint16_t num_queues, queue_size; 1857 void *rc; 1858 1859 if (vmsg->fd_num != 1 || 1860 vmsg->size != sizeof(vmsg->payload.inflight)) { 1861 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", 1862 vmsg->size, vmsg->fd_num); 1863 return false; 1864 } 1865 1866 fd = vmsg->fds[0]; 1867 mmap_size = vmsg->payload.inflight.mmap_size; 1868 mmap_offset = vmsg->payload.inflight.mmap_offset; 1869 num_queues = vmsg->payload.inflight.num_queues; 1870 queue_size = vmsg->payload.inflight.queue_size; 1871 1872 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size); 1873 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset); 1874 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1875 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1876 1877 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1878 fd, mmap_offset); 1879 1880 if (rc == MAP_FAILED) { 1881 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); 1882 return false; 1883 } 1884 1885 if (dev->inflight_info.fd) { 1886 close(dev->inflight_info.fd); 1887 } 1888 1889 if (dev->inflight_info.addr) { 1890 munmap(dev->inflight_info.addr, dev->inflight_info.size); 1891 } 1892 1893 dev->inflight_info.fd = fd; 1894 dev->inflight_info.addr = rc; 1895 dev->inflight_info.size = mmap_size; 1896 1897 for (i = 0; i < num_queues; i++) { 1898 dev->vq[i].inflight = (VuVirtqInflight *)rc; 1899 dev->vq[i].inflight->desc_num = queue_size; 1900 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size)); 1901 } 1902 1903 return false; 1904 } 1905 1906 static bool 1907 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) 1908 { 1909 unsigned int index = vmsg->payload.state.index; 1910 1911 if (index >= dev->max_queues) { 1912 vu_panic(dev, "Invalid queue index: %u", index); 1913 return false; 1914 } 1915 1916 DPRINT("Got kick message: handler:%p idx:%u\n", 1917 dev->vq[index].handler, index); 1918 1919 if (!dev->vq[index].started) { 1920 dev->vq[index].started = true; 1921 1922 if (dev->iface->queue_set_started) { 1923 dev->iface->queue_set_started(dev, index, true); 1924 } 1925 } 1926 1927 if (dev->vq[index].handler) { 1928 dev->vq[index].handler(dev, index); 1929 } 1930 1931 return false; 1932 } 1933 1934 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) 1935 { 1936 vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS); 1937 1938 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); 1939 1940 return true; 1941 } 1942 1943 static bool 1944 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) 1945 { 1946 int do_reply = 0; 1947 1948 /* Print out generic part of the request. */ 1949 DPRINT("================ Vhost user message ================\n"); 1950 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), 1951 vmsg->request); 1952 DPRINT("Flags: 0x%x\n", vmsg->flags); 1953 DPRINT("Size: %u\n", vmsg->size); 1954 1955 if (vmsg->fd_num) { 1956 int i; 1957 DPRINT("Fds:"); 1958 for (i = 0; i < vmsg->fd_num; i++) { 1959 DPRINT(" %d", vmsg->fds[i]); 1960 } 1961 DPRINT("\n"); 1962 } 1963 1964 if (dev->iface->process_msg && 1965 dev->iface->process_msg(dev, vmsg, &do_reply)) { 1966 return do_reply; 1967 } 1968 1969 switch (vmsg->request) { 1970 case VHOST_USER_GET_FEATURES: 1971 return vu_get_features_exec(dev, vmsg); 1972 case VHOST_USER_SET_FEATURES: 1973 return vu_set_features_exec(dev, vmsg); 1974 case VHOST_USER_GET_PROTOCOL_FEATURES: 1975 return vu_get_protocol_features_exec(dev, vmsg); 1976 case VHOST_USER_SET_PROTOCOL_FEATURES: 1977 return vu_set_protocol_features_exec(dev, vmsg); 1978 case VHOST_USER_SET_OWNER: 1979 return vu_set_owner_exec(dev, vmsg); 1980 case VHOST_USER_RESET_OWNER: 1981 return vu_reset_device_exec(dev, vmsg); 1982 case VHOST_USER_SET_MEM_TABLE: 1983 return vu_set_mem_table_exec(dev, vmsg); 1984 case VHOST_USER_SET_LOG_BASE: 1985 return vu_set_log_base_exec(dev, vmsg); 1986 case VHOST_USER_SET_LOG_FD: 1987 return vu_set_log_fd_exec(dev, vmsg); 1988 case VHOST_USER_SET_VRING_NUM: 1989 return vu_set_vring_num_exec(dev, vmsg); 1990 case VHOST_USER_SET_VRING_ADDR: 1991 return vu_set_vring_addr_exec(dev, vmsg); 1992 case VHOST_USER_SET_VRING_BASE: 1993 return vu_set_vring_base_exec(dev, vmsg); 1994 case VHOST_USER_GET_VRING_BASE: 1995 return vu_get_vring_base_exec(dev, vmsg); 1996 case VHOST_USER_SET_VRING_KICK: 1997 return vu_set_vring_kick_exec(dev, vmsg); 1998 case VHOST_USER_SET_VRING_CALL: 1999 return vu_set_vring_call_exec(dev, vmsg); 2000 case VHOST_USER_SET_VRING_ERR: 2001 return vu_set_vring_err_exec(dev, vmsg); 2002 case VHOST_USER_GET_QUEUE_NUM: 2003 return vu_get_queue_num_exec(dev, vmsg); 2004 case VHOST_USER_SET_VRING_ENABLE: 2005 return vu_set_vring_enable_exec(dev, vmsg); 2006 case VHOST_USER_SET_BACKEND_REQ_FD: 2007 return vu_set_backend_req_fd(dev, vmsg); 2008 case VHOST_USER_GET_CONFIG: 2009 return vu_get_config(dev, vmsg); 2010 case VHOST_USER_SET_CONFIG: 2011 return vu_set_config(dev, vmsg); 2012 case VHOST_USER_NONE: 2013 /* if you need processing before exit, override iface->process_msg */ 2014 exit(0); 2015 case VHOST_USER_POSTCOPY_ADVISE: 2016 return vu_set_postcopy_advise(dev, vmsg); 2017 case VHOST_USER_POSTCOPY_LISTEN: 2018 return vu_set_postcopy_listen(dev, vmsg); 2019 case VHOST_USER_POSTCOPY_END: 2020 return vu_set_postcopy_end(dev, vmsg); 2021 case VHOST_USER_GET_INFLIGHT_FD: 2022 return vu_get_inflight_fd(dev, vmsg); 2023 case VHOST_USER_SET_INFLIGHT_FD: 2024 return vu_set_inflight_fd(dev, vmsg); 2025 case VHOST_USER_VRING_KICK: 2026 return vu_handle_vring_kick(dev, vmsg); 2027 case VHOST_USER_GET_MAX_MEM_SLOTS: 2028 return vu_handle_get_max_memslots(dev, vmsg); 2029 case VHOST_USER_ADD_MEM_REG: 2030 return vu_add_mem_reg(dev, vmsg); 2031 case VHOST_USER_REM_MEM_REG: 2032 return vu_rem_mem_reg(dev, vmsg); 2033 case VHOST_USER_GET_SHARED_OBJECT: 2034 return vu_get_shared_object(dev, vmsg); 2035 default: 2036 vmsg_close_fds(vmsg); 2037 vu_panic(dev, "Unhandled request: %d", vmsg->request); 2038 } 2039 2040 return false; 2041 } 2042 2043 bool 2044 vu_dispatch(VuDev *dev) 2045 { 2046 VhostUserMsg vmsg = { 0, }; 2047 int reply_requested; 2048 bool need_reply, success = false; 2049 2050 if (!dev->read_msg(dev, dev->sock, &vmsg)) { 2051 goto end; 2052 } 2053 2054 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK; 2055 2056 reply_requested = vu_process_message(dev, &vmsg); 2057 if (!reply_requested && need_reply) { 2058 vmsg_set_reply_u64(&vmsg, 0); 2059 reply_requested = 1; 2060 } 2061 2062 if (!reply_requested) { 2063 success = true; 2064 goto end; 2065 } 2066 2067 if (!vu_send_reply(dev, dev->sock, &vmsg)) { 2068 goto end; 2069 } 2070 2071 success = true; 2072 2073 end: 2074 free(vmsg.data); 2075 return success; 2076 } 2077 2078 void 2079 vu_deinit(VuDev *dev) 2080 { 2081 unsigned int i; 2082 2083 vu_remove_all_mem_regs(dev); 2084 2085 for (i = 0; i < dev->max_queues; i++) { 2086 VuVirtq *vq = &dev->vq[i]; 2087 2088 if (vq->call_fd != -1) { 2089 close(vq->call_fd); 2090 vq->call_fd = -1; 2091 } 2092 2093 if (vq->kick_fd != -1) { 2094 dev->remove_watch(dev, vq->kick_fd); 2095 close(vq->kick_fd); 2096 vq->kick_fd = -1; 2097 } 2098 2099 if (vq->err_fd != -1) { 2100 close(vq->err_fd); 2101 vq->err_fd = -1; 2102 } 2103 2104 if (vq->resubmit_list) { 2105 free(vq->resubmit_list); 2106 vq->resubmit_list = NULL; 2107 } 2108 2109 vq->inflight = NULL; 2110 } 2111 2112 if (dev->inflight_info.addr) { 2113 munmap(dev->inflight_info.addr, dev->inflight_info.size); 2114 dev->inflight_info.addr = NULL; 2115 } 2116 2117 if (dev->inflight_info.fd > 0) { 2118 close(dev->inflight_info.fd); 2119 dev->inflight_info.fd = -1; 2120 } 2121 2122 vu_close_log(dev); 2123 if (dev->backend_fd != -1) { 2124 close(dev->backend_fd); 2125 dev->backend_fd = -1; 2126 } 2127 pthread_mutex_destroy(&dev->backend_mutex); 2128 2129 if (dev->sock != -1) { 2130 close(dev->sock); 2131 } 2132 2133 free(dev->vq); 2134 dev->vq = NULL; 2135 free(dev->regions); 2136 dev->regions = NULL; 2137 } 2138 2139 bool 2140 vu_init(VuDev *dev, 2141 uint16_t max_queues, 2142 int socket, 2143 vu_panic_cb panic, 2144 vu_read_msg_cb read_msg, 2145 vu_set_watch_cb set_watch, 2146 vu_remove_watch_cb remove_watch, 2147 const VuDevIface *iface) 2148 { 2149 uint16_t i; 2150 2151 assert(max_queues > 0); 2152 assert(socket >= 0); 2153 assert(set_watch); 2154 assert(remove_watch); 2155 assert(iface); 2156 assert(panic); 2157 2158 memset(dev, 0, sizeof(*dev)); 2159 2160 dev->sock = socket; 2161 dev->panic = panic; 2162 dev->read_msg = read_msg ? read_msg : vu_message_read_default; 2163 dev->set_watch = set_watch; 2164 dev->remove_watch = remove_watch; 2165 dev->iface = iface; 2166 dev->log_call_fd = -1; 2167 pthread_mutex_init(&dev->backend_mutex, NULL); 2168 dev->backend_fd = -1; 2169 dev->max_queues = max_queues; 2170 2171 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); 2172 if (!dev->regions) { 2173 DPRINT("%s: failed to malloc mem regions\n", __func__); 2174 return false; 2175 } 2176 memset(dev->regions, 0, VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); 2177 2178 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); 2179 if (!dev->vq) { 2180 DPRINT("%s: failed to malloc virtqueues\n", __func__); 2181 free(dev->regions); 2182 dev->regions = NULL; 2183 return false; 2184 } 2185 2186 for (i = 0; i < max_queues; i++) { 2187 dev->vq[i] = (VuVirtq) { 2188 .call_fd = -1, .kick_fd = -1, .err_fd = -1, 2189 .notification = true, 2190 }; 2191 } 2192 2193 return true; 2194 } 2195 2196 VuVirtq * 2197 vu_get_queue(VuDev *dev, int qidx) 2198 { 2199 assert(qidx < dev->max_queues); 2200 return &dev->vq[qidx]; 2201 } 2202 2203 bool 2204 vu_queue_enabled(VuDev *dev, VuVirtq *vq) 2205 { 2206 return vq->enable; 2207 } 2208 2209 bool 2210 vu_queue_started(const VuDev *dev, const VuVirtq *vq) 2211 { 2212 return vq->started; 2213 } 2214 2215 static inline uint16_t 2216 vring_avail_flags(VuVirtq *vq) 2217 { 2218 return le16toh(vq->vring.avail->flags); 2219 } 2220 2221 static inline uint16_t 2222 vring_avail_idx(VuVirtq *vq) 2223 { 2224 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); 2225 2226 return vq->shadow_avail_idx; 2227 } 2228 2229 static inline uint16_t 2230 vring_avail_ring(VuVirtq *vq, int i) 2231 { 2232 return le16toh(vq->vring.avail->ring[i]); 2233 } 2234 2235 static inline uint16_t 2236 vring_get_used_event(VuVirtq *vq) 2237 { 2238 return vring_avail_ring(vq, vq->vring.num); 2239 } 2240 2241 static int 2242 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) 2243 { 2244 uint16_t num_heads = vring_avail_idx(vq) - idx; 2245 2246 /* Check it isn't doing very strange things with descriptor numbers. */ 2247 if (num_heads > vq->vring.num) { 2248 vu_panic(dev, "Guest moved used index from %u to %u", 2249 idx, vq->shadow_avail_idx); 2250 return -1; 2251 } 2252 if (num_heads) { 2253 /* On success, callers read a descriptor at vq->last_avail_idx. 2254 * Make sure descriptor read does not bypass avail index read. */ 2255 smp_rmb(); 2256 } 2257 2258 return num_heads; 2259 } 2260 2261 static bool 2262 virtqueue_get_head(VuDev *dev, VuVirtq *vq, 2263 unsigned int idx, unsigned int *head) 2264 { 2265 /* Grab the next descriptor number they're advertising, and increment 2266 * the index we've seen. */ 2267 *head = vring_avail_ring(vq, idx % vq->vring.num); 2268 2269 /* If their number is silly, that's a fatal mistake. */ 2270 if (*head >= vq->vring.num) { 2271 vu_panic(dev, "Guest says index %u is available", *head); 2272 return false; 2273 } 2274 2275 return true; 2276 } 2277 2278 static int 2279 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, 2280 uint64_t addr, size_t len) 2281 { 2282 struct vring_desc *ori_desc; 2283 uint64_t read_len; 2284 2285 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { 2286 return -1; 2287 } 2288 2289 if (len == 0) { 2290 return -1; 2291 } 2292 2293 while (len) { 2294 read_len = len; 2295 ori_desc = vu_gpa_to_va(dev, &read_len, addr); 2296 if (!ori_desc) { 2297 return -1; 2298 } 2299 2300 memcpy(desc, ori_desc, read_len); 2301 len -= read_len; 2302 addr += read_len; 2303 desc += read_len; 2304 } 2305 2306 return 0; 2307 } 2308 2309 enum { 2310 VIRTQUEUE_READ_DESC_ERROR = -1, 2311 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 2312 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 2313 }; 2314 2315 static int 2316 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, 2317 int i, unsigned int max, unsigned int *next) 2318 { 2319 /* If this descriptor says it doesn't chain, we're done. */ 2320 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) { 2321 return VIRTQUEUE_READ_DESC_DONE; 2322 } 2323 2324 /* Check they're not leading us off end of descriptors. */ 2325 *next = le16toh(desc[i].next); 2326 /* Make sure compiler knows to grab that: we don't want it changing! */ 2327 smp_wmb(); 2328 2329 if (*next >= max) { 2330 vu_panic(dev, "Desc next is %u", *next); 2331 return VIRTQUEUE_READ_DESC_ERROR; 2332 } 2333 2334 return VIRTQUEUE_READ_DESC_MORE; 2335 } 2336 2337 void 2338 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, 2339 unsigned int *out_bytes, 2340 unsigned max_in_bytes, unsigned max_out_bytes) 2341 { 2342 unsigned int idx; 2343 unsigned int total_bufs, in_total, out_total; 2344 int rc; 2345 2346 idx = vq->last_avail_idx; 2347 2348 total_bufs = in_total = out_total = 0; 2349 if (unlikely(dev->broken) || 2350 unlikely(!vq->vring.avail)) { 2351 goto done; 2352 } 2353 2354 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { 2355 unsigned int max, desc_len, num_bufs, indirect = 0; 2356 uint64_t desc_addr, read_len; 2357 struct vring_desc *desc; 2358 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2359 unsigned int i; 2360 2361 max = vq->vring.num; 2362 num_bufs = total_bufs; 2363 if (!virtqueue_get_head(dev, vq, idx++, &i)) { 2364 goto err; 2365 } 2366 desc = vq->vring.desc; 2367 2368 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2369 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2370 vu_panic(dev, "Invalid size for indirect buffer table"); 2371 goto err; 2372 } 2373 2374 /* If we've got too many, that implies a descriptor loop. */ 2375 if (num_bufs >= max) { 2376 vu_panic(dev, "Looped descriptor"); 2377 goto err; 2378 } 2379 2380 /* loop over the indirect descriptor table */ 2381 indirect = 1; 2382 desc_addr = le64toh(desc[i].addr); 2383 desc_len = le32toh(desc[i].len); 2384 max = desc_len / sizeof(struct vring_desc); 2385 read_len = desc_len; 2386 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2387 if (unlikely(desc && read_len != desc_len)) { 2388 /* Failed to use zero copy */ 2389 desc = NULL; 2390 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2391 desc_addr, 2392 desc_len)) { 2393 desc = desc_buf; 2394 } 2395 } 2396 if (!desc) { 2397 vu_panic(dev, "Invalid indirect buffer table"); 2398 goto err; 2399 } 2400 num_bufs = i = 0; 2401 } 2402 2403 do { 2404 /* If we've got too many, that implies a descriptor loop. */ 2405 if (++num_bufs > max) { 2406 vu_panic(dev, "Looped descriptor"); 2407 goto err; 2408 } 2409 2410 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2411 in_total += le32toh(desc[i].len); 2412 } else { 2413 out_total += le32toh(desc[i].len); 2414 } 2415 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 2416 goto done; 2417 } 2418 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2419 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2420 2421 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2422 goto err; 2423 } 2424 2425 if (!indirect) { 2426 total_bufs = num_bufs; 2427 } else { 2428 total_bufs++; 2429 } 2430 } 2431 if (rc < 0) { 2432 goto err; 2433 } 2434 done: 2435 if (in_bytes) { 2436 *in_bytes = in_total; 2437 } 2438 if (out_bytes) { 2439 *out_bytes = out_total; 2440 } 2441 return; 2442 2443 err: 2444 in_total = out_total = 0; 2445 goto done; 2446 } 2447 2448 bool 2449 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, 2450 unsigned int out_bytes) 2451 { 2452 unsigned int in_total, out_total; 2453 2454 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, 2455 in_bytes, out_bytes); 2456 2457 return in_bytes <= in_total && out_bytes <= out_total; 2458 } 2459 2460 /* Fetch avail_idx from VQ memory only when we really need to know if 2461 * guest has added some buffers. */ 2462 bool 2463 vu_queue_empty(VuDev *dev, VuVirtq *vq) 2464 { 2465 if (unlikely(dev->broken) || 2466 unlikely(!vq->vring.avail)) { 2467 return true; 2468 } 2469 2470 if (vq->shadow_avail_idx != vq->last_avail_idx) { 2471 return false; 2472 } 2473 2474 return vring_avail_idx(vq) == vq->last_avail_idx; 2475 } 2476 2477 static bool 2478 vring_notify(VuDev *dev, VuVirtq *vq) 2479 { 2480 uint16_t old, new; 2481 bool v; 2482 2483 /* We need to expose used array entries before checking used event. */ 2484 smp_mb(); 2485 2486 /* Always notify when queue is empty (when feature acknowledge) */ 2487 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && 2488 !vq->inuse && vu_queue_empty(dev, vq)) { 2489 return true; 2490 } 2491 2492 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2493 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 2494 } 2495 2496 v = vq->signalled_used_valid; 2497 vq->signalled_used_valid = true; 2498 old = vq->signalled_used; 2499 new = vq->signalled_used = vq->used_idx; 2500 return !v || vring_need_event(vring_get_used_event(vq), new, old); 2501 } 2502 2503 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) 2504 { 2505 if (unlikely(dev->broken) || 2506 unlikely(!vq->vring.avail)) { 2507 return; 2508 } 2509 2510 if (!vring_notify(dev, vq)) { 2511 DPRINT("skipped notify...\n"); 2512 return; 2513 } 2514 2515 if (vq->call_fd < 0 && 2516 vu_has_protocol_feature(dev, 2517 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 2518 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { 2519 VhostUserMsg vmsg = { 2520 .request = VHOST_USER_BACKEND_VRING_CALL, 2521 .flags = VHOST_USER_VERSION, 2522 .size = sizeof(vmsg.payload.state), 2523 .payload.state = { 2524 .index = vq - dev->vq, 2525 }, 2526 }; 2527 bool ack = sync && 2528 vu_has_protocol_feature(dev, 2529 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2530 2531 if (ack) { 2532 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK; 2533 } 2534 2535 vu_message_write(dev, dev->backend_fd, &vmsg); 2536 if (ack) { 2537 vu_message_read_default(dev, dev->backend_fd, &vmsg); 2538 } 2539 return; 2540 } 2541 2542 if (eventfd_write(vq->call_fd, 1) < 0) { 2543 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 2544 } 2545 } 2546 2547 void vu_queue_notify(VuDev *dev, VuVirtq *vq) 2548 { 2549 _vu_queue_notify(dev, vq, false); 2550 } 2551 2552 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) 2553 { 2554 _vu_queue_notify(dev, vq, true); 2555 } 2556 2557 void vu_config_change_msg(VuDev *dev) 2558 { 2559 VhostUserMsg vmsg = { 2560 .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG, 2561 .flags = VHOST_USER_VERSION, 2562 }; 2563 2564 vu_message_write(dev, dev->backend_fd, &vmsg); 2565 } 2566 2567 static inline void 2568 vring_used_flags_set_bit(VuVirtq *vq, int mask) 2569 { 2570 uint16_t *flags; 2571 2572 flags = (uint16_t *)((char*)vq->vring.used + 2573 offsetof(struct vring_used, flags)); 2574 *flags = htole16(le16toh(*flags) | mask); 2575 } 2576 2577 static inline void 2578 vring_used_flags_unset_bit(VuVirtq *vq, int mask) 2579 { 2580 uint16_t *flags; 2581 2582 flags = (uint16_t *)((char*)vq->vring.used + 2583 offsetof(struct vring_used, flags)); 2584 *flags = htole16(le16toh(*flags) & ~mask); 2585 } 2586 2587 static inline void 2588 vring_set_avail_event(VuVirtq *vq, uint16_t val) 2589 { 2590 uint16_t val_le = htole16(val); 2591 2592 if (!vq->notification) { 2593 return; 2594 } 2595 2596 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); 2597 } 2598 2599 void 2600 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) 2601 { 2602 vq->notification = enable; 2603 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2604 vring_set_avail_event(vq, vring_avail_idx(vq)); 2605 } else if (enable) { 2606 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 2607 } else { 2608 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 2609 } 2610 if (enable) { 2611 /* Expose avail event/used flags before caller checks the avail idx. */ 2612 smp_mb(); 2613 } 2614 } 2615 2616 static bool 2617 virtqueue_map_desc(VuDev *dev, 2618 unsigned int *p_num_sg, struct iovec *iov, 2619 unsigned int max_num_sg, bool is_write, 2620 uint64_t pa, size_t sz) 2621 { 2622 unsigned num_sg = *p_num_sg; 2623 2624 assert(num_sg <= max_num_sg); 2625 2626 if (!sz) { 2627 vu_panic(dev, "virtio: zero sized buffers are not allowed"); 2628 return false; 2629 } 2630 2631 while (sz) { 2632 uint64_t len = sz; 2633 2634 if (num_sg == max_num_sg) { 2635 vu_panic(dev, "virtio: too many descriptors in indirect table"); 2636 return false; 2637 } 2638 2639 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); 2640 if (iov[num_sg].iov_base == NULL) { 2641 vu_panic(dev, "virtio: invalid address for buffers"); 2642 return false; 2643 } 2644 iov[num_sg].iov_len = len; 2645 num_sg++; 2646 sz -= len; 2647 pa += len; 2648 } 2649 2650 *p_num_sg = num_sg; 2651 return true; 2652 } 2653 2654 static void * 2655 virtqueue_alloc_element(size_t sz, 2656 unsigned out_num, unsigned in_num) 2657 { 2658 VuVirtqElement *elem; 2659 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); 2660 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 2661 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 2662 2663 assert(sz >= sizeof(VuVirtqElement)); 2664 elem = malloc(out_sg_end); 2665 if (!elem) { 2666 DPRINT("%s: failed to malloc virtqueue element\n", __func__); 2667 return NULL; 2668 } 2669 elem->out_num = out_num; 2670 elem->in_num = in_num; 2671 elem->in_sg = (void *)elem + in_sg_ofs; 2672 elem->out_sg = (void *)elem + out_sg_ofs; 2673 return elem; 2674 } 2675 2676 static void * 2677 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) 2678 { 2679 struct vring_desc *desc = vq->vring.desc; 2680 uint64_t desc_addr, read_len; 2681 unsigned int desc_len; 2682 unsigned int max = vq->vring.num; 2683 unsigned int i = idx; 2684 VuVirtqElement *elem; 2685 unsigned int out_num = 0, in_num = 0; 2686 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 2687 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2688 int rc; 2689 2690 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2691 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2692 vu_panic(dev, "Invalid size for indirect buffer table"); 2693 return NULL; 2694 } 2695 2696 /* loop over the indirect descriptor table */ 2697 desc_addr = le64toh(desc[i].addr); 2698 desc_len = le32toh(desc[i].len); 2699 max = desc_len / sizeof(struct vring_desc); 2700 read_len = desc_len; 2701 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2702 if (unlikely(desc && read_len != desc_len)) { 2703 /* Failed to use zero copy */ 2704 desc = NULL; 2705 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2706 desc_addr, 2707 desc_len)) { 2708 desc = desc_buf; 2709 } 2710 } 2711 if (!desc) { 2712 vu_panic(dev, "Invalid indirect buffer table"); 2713 return NULL; 2714 } 2715 i = 0; 2716 } 2717 2718 /* Collect all the descriptors */ 2719 do { 2720 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2721 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, 2722 VIRTQUEUE_MAX_SIZE - out_num, true, 2723 le64toh(desc[i].addr), 2724 le32toh(desc[i].len))) { 2725 return NULL; 2726 } 2727 } else { 2728 if (in_num) { 2729 vu_panic(dev, "Incorrect order for descriptors"); 2730 return NULL; 2731 } 2732 if (!virtqueue_map_desc(dev, &out_num, iov, 2733 VIRTQUEUE_MAX_SIZE, false, 2734 le64toh(desc[i].addr), 2735 le32toh(desc[i].len))) { 2736 return NULL; 2737 } 2738 } 2739 2740 /* If we've got too many, that implies a descriptor loop. */ 2741 if ((in_num + out_num) > max) { 2742 vu_panic(dev, "Looped descriptor"); 2743 return NULL; 2744 } 2745 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2746 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2747 2748 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2749 vu_panic(dev, "read descriptor error"); 2750 return NULL; 2751 } 2752 2753 /* Now copy what we have collected and mapped */ 2754 elem = virtqueue_alloc_element(sz, out_num, in_num); 2755 if (!elem) { 2756 return NULL; 2757 } 2758 elem->index = idx; 2759 for (i = 0; i < out_num; i++) { 2760 elem->out_sg[i] = iov[i]; 2761 } 2762 for (i = 0; i < in_num; i++) { 2763 elem->in_sg[i] = iov[out_num + i]; 2764 } 2765 2766 return elem; 2767 } 2768 2769 static int 2770 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) 2771 { 2772 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2773 return 0; 2774 } 2775 2776 if (unlikely(!vq->inflight)) { 2777 return -1; 2778 } 2779 2780 vq->inflight->desc[desc_idx].counter = vq->counter++; 2781 vq->inflight->desc[desc_idx].inflight = 1; 2782 2783 return 0; 2784 } 2785 2786 static int 2787 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2788 { 2789 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2790 return 0; 2791 } 2792 2793 if (unlikely(!vq->inflight)) { 2794 return -1; 2795 } 2796 2797 vq->inflight->last_batch_head = desc_idx; 2798 2799 return 0; 2800 } 2801 2802 static int 2803 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2804 { 2805 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2806 return 0; 2807 } 2808 2809 if (unlikely(!vq->inflight)) { 2810 return -1; 2811 } 2812 2813 barrier(); 2814 2815 vq->inflight->desc[desc_idx].inflight = 0; 2816 2817 barrier(); 2818 2819 vq->inflight->used_idx = vq->used_idx; 2820 2821 return 0; 2822 } 2823 2824 void * 2825 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) 2826 { 2827 int i; 2828 unsigned int head; 2829 VuVirtqElement *elem; 2830 2831 if (unlikely(dev->broken) || 2832 unlikely(!vq->vring.avail)) { 2833 return NULL; 2834 } 2835 2836 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { 2837 i = (--vq->resubmit_num); 2838 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); 2839 2840 if (!vq->resubmit_num) { 2841 free(vq->resubmit_list); 2842 vq->resubmit_list = NULL; 2843 } 2844 2845 return elem; 2846 } 2847 2848 if (vu_queue_empty(dev, vq)) { 2849 return NULL; 2850 } 2851 /* 2852 * Needed after virtio_queue_empty(), see comment in 2853 * virtqueue_num_heads(). 2854 */ 2855 smp_rmb(); 2856 2857 if (vq->inuse >= vq->vring.num) { 2858 vu_panic(dev, "Virtqueue size exceeded"); 2859 return NULL; 2860 } 2861 2862 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { 2863 return NULL; 2864 } 2865 2866 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2867 vring_set_avail_event(vq, vq->last_avail_idx); 2868 } 2869 2870 elem = vu_queue_map_desc(dev, vq, head, sz); 2871 2872 if (!elem) { 2873 return NULL; 2874 } 2875 2876 vq->inuse++; 2877 2878 vu_queue_inflight_get(dev, vq, head); 2879 2880 return elem; 2881 } 2882 2883 static void 2884 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2885 size_t len) 2886 { 2887 vq->inuse--; 2888 /* unmap, when DMA support is added */ 2889 } 2890 2891 void 2892 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2893 size_t len) 2894 { 2895 vq->last_avail_idx--; 2896 vu_queue_detach_element(dev, vq, elem, len); 2897 } 2898 2899 bool 2900 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) 2901 { 2902 if (num > vq->inuse) { 2903 return false; 2904 } 2905 vq->last_avail_idx -= num; 2906 vq->inuse -= num; 2907 return true; 2908 } 2909 2910 static inline 2911 void vring_used_write(VuDev *dev, VuVirtq *vq, 2912 struct vring_used_elem *uelem, int i) 2913 { 2914 struct vring_used *used = vq->vring.used; 2915 2916 used->ring[i] = *uelem; 2917 vu_log_write(dev, vq->vring.log_guest_addr + 2918 offsetof(struct vring_used, ring[i]), 2919 sizeof(used->ring[i])); 2920 } 2921 2922 2923 static void 2924 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, 2925 const VuVirtqElement *elem, 2926 unsigned int len) 2927 { 2928 struct vring_desc *desc = vq->vring.desc; 2929 unsigned int i, max, min, desc_len; 2930 uint64_t desc_addr, read_len; 2931 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2932 unsigned num_bufs = 0; 2933 2934 max = vq->vring.num; 2935 i = elem->index; 2936 2937 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2938 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2939 vu_panic(dev, "Invalid size for indirect buffer table"); 2940 return; 2941 } 2942 2943 /* loop over the indirect descriptor table */ 2944 desc_addr = le64toh(desc[i].addr); 2945 desc_len = le32toh(desc[i].len); 2946 max = desc_len / sizeof(struct vring_desc); 2947 read_len = desc_len; 2948 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2949 if (unlikely(desc && read_len != desc_len)) { 2950 /* Failed to use zero copy */ 2951 desc = NULL; 2952 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2953 desc_addr, 2954 desc_len)) { 2955 desc = desc_buf; 2956 } 2957 } 2958 if (!desc) { 2959 vu_panic(dev, "Invalid indirect buffer table"); 2960 return; 2961 } 2962 i = 0; 2963 } 2964 2965 do { 2966 if (++num_bufs > max) { 2967 vu_panic(dev, "Looped descriptor"); 2968 return; 2969 } 2970 2971 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2972 min = MIN(le32toh(desc[i].len), len); 2973 vu_log_write(dev, le64toh(desc[i].addr), min); 2974 len -= min; 2975 } 2976 2977 } while (len > 0 && 2978 (virtqueue_read_next_desc(dev, desc, i, max, &i) 2979 == VIRTQUEUE_READ_DESC_MORE)); 2980 } 2981 2982 void 2983 vu_queue_fill(VuDev *dev, VuVirtq *vq, 2984 const VuVirtqElement *elem, 2985 unsigned int len, unsigned int idx) 2986 { 2987 struct vring_used_elem uelem; 2988 2989 if (unlikely(dev->broken) || 2990 unlikely(!vq->vring.avail)) { 2991 return; 2992 } 2993 2994 vu_log_queue_fill(dev, vq, elem, len); 2995 2996 idx = (idx + vq->used_idx) % vq->vring.num; 2997 2998 uelem.id = htole32(elem->index); 2999 uelem.len = htole32(len); 3000 vring_used_write(dev, vq, &uelem, idx); 3001 } 3002 3003 static inline 3004 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) 3005 { 3006 vq->vring.used->idx = htole16(val); 3007 vu_log_write(dev, 3008 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), 3009 sizeof(vq->vring.used->idx)); 3010 3011 vq->used_idx = val; 3012 } 3013 3014 void 3015 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) 3016 { 3017 uint16_t old, new; 3018 3019 if (unlikely(dev->broken) || 3020 unlikely(!vq->vring.avail)) { 3021 return; 3022 } 3023 3024 /* Make sure buffer is written before we update index. */ 3025 smp_wmb(); 3026 3027 old = vq->used_idx; 3028 new = old + count; 3029 vring_used_idx_set(dev, vq, new); 3030 vq->inuse -= count; 3031 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { 3032 vq->signalled_used_valid = false; 3033 } 3034 } 3035 3036 void 3037 vu_queue_push(VuDev *dev, VuVirtq *vq, 3038 const VuVirtqElement *elem, unsigned int len) 3039 { 3040 vu_queue_fill(dev, vq, elem, len, 0); 3041 vu_queue_inflight_pre_put(dev, vq, elem->index); 3042 vu_queue_flush(dev, vq, 1); 3043 vu_queue_inflight_post_put(dev, vq, elem->index); 3044 } 3045