1 /* 2 * Vhost User library 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2016 Red Hat, Inc. 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Marc-André Lureau <mlureau@redhat.com> 10 * Victor Kaplansky <victork@redhat.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2 or 13 * later. See the COPYING file in the top-level directory. 14 */ 15 16 #ifndef _GNU_SOURCE 17 #define _GNU_SOURCE 18 #endif 19 20 /* this code avoids GLib dependency */ 21 #include <stdlib.h> 22 #include <stdio.h> 23 #include <unistd.h> 24 #include <stdarg.h> 25 #include <errno.h> 26 #include <string.h> 27 #include <assert.h> 28 #include <inttypes.h> 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/eventfd.h> 32 #include <sys/mman.h> 33 #include <endian.h> 34 35 /* Necessary to provide VIRTIO_F_VERSION_1 on system 36 * with older linux headers. Must appear before 37 * <linux/vhost.h> below. 38 */ 39 #include "standard-headers/linux/virtio_config.h" 40 41 #if defined(__linux__) 42 #include <sys/syscall.h> 43 #include <fcntl.h> 44 #include <sys/ioctl.h> 45 #include <linux/vhost.h> 46 #include <sys/vfs.h> 47 #include <linux/magic.h> 48 49 #ifdef __NR_userfaultfd 50 #include <linux/userfaultfd.h> 51 #endif 52 53 #endif 54 55 #include "include/atomic.h" 56 57 #include "libvhost-user.h" 58 59 /* usually provided by GLib */ 60 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) 61 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4) 62 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 63 __attribute__((__format__(gnu_printf, format_idx, arg_idx))) 64 #else 65 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 66 __attribute__((__format__(__printf__, format_idx, arg_idx))) 67 #endif 68 #else /* !__GNUC__ */ 69 #define G_GNUC_PRINTF(format_idx, arg_idx) 70 #endif /* !__GNUC__ */ 71 #ifndef MIN 72 #define MIN(x, y) ({ \ 73 __typeof__(x) _min1 = (x); \ 74 __typeof__(y) _min2 = (y); \ 75 (void) (&_min1 == &_min2); \ 76 _min1 < _min2 ? _min1 : _min2; }) 77 #endif 78 79 /* Round number down to multiple */ 80 #define ALIGN_DOWN(n, m) ((n) / (m) * (m)) 81 82 /* Round number up to multiple */ 83 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) 84 85 #ifndef unlikely 86 #define unlikely(x) __builtin_expect(!!(x), 0) 87 #endif 88 89 /* Align each region to cache line size in inflight buffer */ 90 #define INFLIGHT_ALIGNMENT 64 91 92 /* The version of inflight buffer */ 93 #define INFLIGHT_VERSION 1 94 95 /* The version of the protocol we support */ 96 #define VHOST_USER_VERSION 1 97 #define LIBVHOST_USER_DEBUG 0 98 99 #define DPRINT(...) \ 100 do { \ 101 if (LIBVHOST_USER_DEBUG) { \ 102 fprintf(stderr, __VA_ARGS__); \ 103 } \ 104 } while (0) 105 106 static inline 107 bool has_feature(uint64_t features, unsigned int fbit) 108 { 109 assert(fbit < 64); 110 return !!(features & (1ULL << fbit)); 111 } 112 113 static inline 114 bool vu_has_feature(VuDev *dev, 115 unsigned int fbit) 116 { 117 return has_feature(dev->features, fbit); 118 } 119 120 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) 121 { 122 return has_feature(dev->protocol_features, fbit); 123 } 124 125 const char * 126 vu_request_to_string(unsigned int req) 127 { 128 #define REQ(req) [req] = #req 129 static const char *vu_request_str[] = { 130 REQ(VHOST_USER_NONE), 131 REQ(VHOST_USER_GET_FEATURES), 132 REQ(VHOST_USER_SET_FEATURES), 133 REQ(VHOST_USER_SET_OWNER), 134 REQ(VHOST_USER_RESET_OWNER), 135 REQ(VHOST_USER_SET_MEM_TABLE), 136 REQ(VHOST_USER_SET_LOG_BASE), 137 REQ(VHOST_USER_SET_LOG_FD), 138 REQ(VHOST_USER_SET_VRING_NUM), 139 REQ(VHOST_USER_SET_VRING_ADDR), 140 REQ(VHOST_USER_SET_VRING_BASE), 141 REQ(VHOST_USER_GET_VRING_BASE), 142 REQ(VHOST_USER_SET_VRING_KICK), 143 REQ(VHOST_USER_SET_VRING_CALL), 144 REQ(VHOST_USER_SET_VRING_ERR), 145 REQ(VHOST_USER_GET_PROTOCOL_FEATURES), 146 REQ(VHOST_USER_SET_PROTOCOL_FEATURES), 147 REQ(VHOST_USER_GET_QUEUE_NUM), 148 REQ(VHOST_USER_SET_VRING_ENABLE), 149 REQ(VHOST_USER_SEND_RARP), 150 REQ(VHOST_USER_NET_SET_MTU), 151 REQ(VHOST_USER_SET_BACKEND_REQ_FD), 152 REQ(VHOST_USER_IOTLB_MSG), 153 REQ(VHOST_USER_SET_VRING_ENDIAN), 154 REQ(VHOST_USER_GET_CONFIG), 155 REQ(VHOST_USER_SET_CONFIG), 156 REQ(VHOST_USER_POSTCOPY_ADVISE), 157 REQ(VHOST_USER_POSTCOPY_LISTEN), 158 REQ(VHOST_USER_POSTCOPY_END), 159 REQ(VHOST_USER_GET_INFLIGHT_FD), 160 REQ(VHOST_USER_SET_INFLIGHT_FD), 161 REQ(VHOST_USER_GPU_SET_SOCKET), 162 REQ(VHOST_USER_VRING_KICK), 163 REQ(VHOST_USER_GET_MAX_MEM_SLOTS), 164 REQ(VHOST_USER_ADD_MEM_REG), 165 REQ(VHOST_USER_REM_MEM_REG), 166 REQ(VHOST_USER_GET_SHARED_OBJECT), 167 REQ(VHOST_USER_MAX), 168 }; 169 #undef REQ 170 171 if (req < VHOST_USER_MAX) { 172 return vu_request_str[req]; 173 } else { 174 return "unknown"; 175 } 176 } 177 178 static void G_GNUC_PRINTF(2, 3) 179 vu_panic(VuDev *dev, const char *msg, ...) 180 { 181 char *buf = NULL; 182 va_list ap; 183 184 va_start(ap, msg); 185 if (vasprintf(&buf, msg, ap) < 0) { 186 buf = NULL; 187 } 188 va_end(ap); 189 190 dev->broken = true; 191 dev->panic(dev, buf); 192 free(buf); 193 194 /* 195 * FIXME: 196 * find a way to call virtio_error, or perhaps close the connection? 197 */ 198 } 199 200 /* Search for a memory region that covers this guest physical address. */ 201 static VuDevRegion * 202 vu_gpa_to_mem_region(VuDev *dev, uint64_t guest_addr) 203 { 204 int low = 0; 205 int high = dev->nregions - 1; 206 207 /* 208 * Memory regions cannot overlap in guest physical address space. Each 209 * GPA belongs to exactly one memory region, so there can only be one 210 * match. 211 * 212 * We store our memory regions ordered by GPA and can simply perform a 213 * binary search. 214 */ 215 while (low <= high) { 216 unsigned int mid = low + (high - low) / 2; 217 VuDevRegion *cur = &dev->regions[mid]; 218 219 if (guest_addr >= cur->gpa && guest_addr < cur->gpa + cur->size) { 220 return cur; 221 } 222 if (guest_addr >= cur->gpa + cur->size) { 223 low = mid + 1; 224 } 225 if (guest_addr < cur->gpa) { 226 high = mid - 1; 227 } 228 } 229 return NULL; 230 } 231 232 /* Translate guest physical address to our virtual address. */ 233 void * 234 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) 235 { 236 VuDevRegion *r; 237 238 if (*plen == 0) { 239 return NULL; 240 } 241 242 r = vu_gpa_to_mem_region(dev, guest_addr); 243 if (!r) { 244 return NULL; 245 } 246 247 if ((guest_addr + *plen) > (r->gpa + r->size)) { 248 *plen = r->gpa + r->size - guest_addr; 249 } 250 return (void *)(uintptr_t)guest_addr - r->gpa + r->mmap_addr + 251 r->mmap_offset; 252 } 253 254 /* Translate qemu virtual address to our virtual address. */ 255 static void * 256 qva_to_va(VuDev *dev, uint64_t qemu_addr) 257 { 258 unsigned int i; 259 260 /* Find matching memory region. */ 261 for (i = 0; i < dev->nregions; i++) { 262 VuDevRegion *r = &dev->regions[i]; 263 264 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { 265 return (void *)(uintptr_t) 266 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; 267 } 268 } 269 270 return NULL; 271 } 272 273 static void 274 vu_remove_all_mem_regs(VuDev *dev) 275 { 276 unsigned int i; 277 278 for (i = 0; i < dev->nregions; i++) { 279 VuDevRegion *r = &dev->regions[i]; 280 281 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); 282 } 283 dev->nregions = 0; 284 } 285 286 static size_t 287 get_fd_hugepagesize(int fd) 288 { 289 #if defined(__linux__) 290 struct statfs fs; 291 int ret; 292 293 do { 294 ret = fstatfs(fd, &fs); 295 } while (ret != 0 && errno == EINTR); 296 297 if (!ret && (unsigned int)fs.f_type == HUGETLBFS_MAGIC) { 298 return fs.f_bsize; 299 } 300 #endif 301 return 0; 302 } 303 304 static void 305 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd) 306 { 307 const uint64_t start_gpa = msg_region->guest_phys_addr; 308 const uint64_t end_gpa = start_gpa + msg_region->memory_size; 309 int prot = PROT_READ | PROT_WRITE; 310 uint64_t mmap_offset, fd_offset; 311 size_t hugepagesize; 312 VuDevRegion *r; 313 void *mmap_addr; 314 int low = 0; 315 int high = dev->nregions - 1; 316 unsigned int idx; 317 318 DPRINT("Adding region %d\n", dev->nregions); 319 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 320 msg_region->guest_phys_addr); 321 DPRINT(" memory_size: 0x%016"PRIx64"\n", 322 msg_region->memory_size); 323 DPRINT(" userspace_addr: 0x%016"PRIx64"\n", 324 msg_region->userspace_addr); 325 DPRINT(" old mmap_offset: 0x%016"PRIx64"\n", 326 msg_region->mmap_offset); 327 328 if (dev->postcopy_listening) { 329 /* 330 * In postcopy we're using PROT_NONE here to catch anyone 331 * accessing it before we userfault 332 */ 333 prot = PROT_NONE; 334 } 335 336 /* 337 * We will add memory regions into the array sorted by GPA. Perform a 338 * binary search to locate the insertion point: it will be at the low 339 * index. 340 */ 341 while (low <= high) { 342 unsigned int mid = low + (high - low) / 2; 343 VuDevRegion *cur = &dev->regions[mid]; 344 345 /* Overlap of GPA addresses. */ 346 if (start_gpa < cur->gpa + cur->size && cur->gpa < end_gpa) { 347 vu_panic(dev, "regions with overlapping guest physical addresses"); 348 return; 349 } 350 if (start_gpa >= cur->gpa + cur->size) { 351 low = mid + 1; 352 } 353 if (start_gpa < cur->gpa) { 354 high = mid - 1; 355 } 356 } 357 idx = low; 358 359 /* 360 * Convert most of msg_region->mmap_offset to fd_offset. In almost all 361 * cases, this will leave us with mmap_offset == 0, mmap()'ing only 362 * what we really need. Only if a memory region would partially cover 363 * hugetlb pages, we'd get mmap_offset != 0, which usually doesn't happen 364 * anymore (i.e., modern QEMU). 365 * 366 * Note that mmap() with hugetlb would fail if the offset into the file 367 * is not aligned to the huge page size. 368 */ 369 hugepagesize = get_fd_hugepagesize(fd); 370 if (hugepagesize) { 371 fd_offset = ALIGN_DOWN(msg_region->mmap_offset, hugepagesize); 372 mmap_offset = msg_region->mmap_offset - fd_offset; 373 } else { 374 fd_offset = msg_region->mmap_offset; 375 mmap_offset = 0; 376 } 377 378 DPRINT(" fd_offset: 0x%016"PRIx64"\n", 379 fd_offset); 380 DPRINT(" new mmap_offset: 0x%016"PRIx64"\n", 381 mmap_offset); 382 383 mmap_addr = mmap(0, msg_region->memory_size + mmap_offset, 384 prot, MAP_SHARED | MAP_NORESERVE, fd, fd_offset); 385 if (mmap_addr == MAP_FAILED) { 386 vu_panic(dev, "region mmap error: %s", strerror(errno)); 387 return; 388 } 389 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 390 (uint64_t)(uintptr_t)mmap_addr); 391 392 /* Shift all affected entries by 1 to open a hole at idx. */ 393 r = &dev->regions[idx]; 394 memmove(r + 1, r, sizeof(VuDevRegion) * (dev->nregions - idx)); 395 r->gpa = msg_region->guest_phys_addr; 396 r->size = msg_region->memory_size; 397 r->qva = msg_region->userspace_addr; 398 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 399 r->mmap_offset = mmap_offset; 400 dev->nregions++; 401 402 if (dev->postcopy_listening) { 403 /* 404 * Return the address to QEMU so that it can translate the ufd 405 * fault addresses back. 406 */ 407 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset; 408 } 409 } 410 411 static void 412 vmsg_close_fds(VhostUserMsg *vmsg) 413 { 414 int i; 415 416 for (i = 0; i < vmsg->fd_num; i++) { 417 close(vmsg->fds[i]); 418 } 419 } 420 421 /* Set reply payload.u64 and clear request flags and fd_num */ 422 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val) 423 { 424 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ 425 vmsg->size = sizeof(vmsg->payload.u64); 426 vmsg->payload.u64 = val; 427 vmsg->fd_num = 0; 428 } 429 430 /* A test to see if we have userfault available */ 431 static bool 432 have_userfault(void) 433 { 434 #if defined(__linux__) && defined(__NR_userfaultfd) &&\ 435 defined(UFFD_FEATURE_MISSING_SHMEM) &&\ 436 defined(UFFD_FEATURE_MISSING_HUGETLBFS) 437 /* Now test the kernel we're running on really has the features */ 438 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 439 struct uffdio_api api_struct; 440 if (ufd < 0) { 441 return false; 442 } 443 444 api_struct.api = UFFD_API; 445 api_struct.features = UFFD_FEATURE_MISSING_SHMEM | 446 UFFD_FEATURE_MISSING_HUGETLBFS; 447 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 448 close(ufd); 449 return false; 450 } 451 close(ufd); 452 return true; 453 454 #else 455 return false; 456 #endif 457 } 458 459 static bool 460 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 461 { 462 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 463 struct iovec iov = { 464 .iov_base = (char *)vmsg, 465 .iov_len = VHOST_USER_HDR_SIZE, 466 }; 467 struct msghdr msg = { 468 .msg_iov = &iov, 469 .msg_iovlen = 1, 470 .msg_control = control, 471 .msg_controllen = sizeof(control), 472 }; 473 size_t fd_size; 474 struct cmsghdr *cmsg; 475 int rc; 476 477 do { 478 rc = recvmsg(conn_fd, &msg, 0); 479 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 480 481 if (rc < 0) { 482 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); 483 return false; 484 } 485 486 vmsg->fd_num = 0; 487 for (cmsg = CMSG_FIRSTHDR(&msg); 488 cmsg != NULL; 489 cmsg = CMSG_NXTHDR(&msg, cmsg)) 490 { 491 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 492 fd_size = cmsg->cmsg_len - CMSG_LEN(0); 493 vmsg->fd_num = fd_size / sizeof(int); 494 assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS); 495 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); 496 break; 497 } 498 } 499 500 if (vmsg->size > sizeof(vmsg->payload)) { 501 vu_panic(dev, 502 "Error: too big message request: %d, size: vmsg->size: %u, " 503 "while sizeof(vmsg->payload) = %zu\n", 504 vmsg->request, vmsg->size, sizeof(vmsg->payload)); 505 goto fail; 506 } 507 508 if (vmsg->size) { 509 do { 510 rc = read(conn_fd, &vmsg->payload, vmsg->size); 511 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 512 513 if (rc <= 0) { 514 vu_panic(dev, "Error while reading: %s", strerror(errno)); 515 goto fail; 516 } 517 518 assert((uint32_t)rc == vmsg->size); 519 } 520 521 return true; 522 523 fail: 524 vmsg_close_fds(vmsg); 525 526 return false; 527 } 528 529 static bool 530 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 531 { 532 int rc; 533 uint8_t *p = (uint8_t *)vmsg; 534 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 535 struct iovec iov = { 536 .iov_base = (char *)vmsg, 537 .iov_len = VHOST_USER_HDR_SIZE, 538 }; 539 struct msghdr msg = { 540 .msg_iov = &iov, 541 .msg_iovlen = 1, 542 .msg_control = control, 543 }; 544 struct cmsghdr *cmsg; 545 546 memset(control, 0, sizeof(control)); 547 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); 548 if (vmsg->fd_num > 0) { 549 size_t fdsize = vmsg->fd_num * sizeof(int); 550 msg.msg_controllen = CMSG_SPACE(fdsize); 551 cmsg = CMSG_FIRSTHDR(&msg); 552 cmsg->cmsg_len = CMSG_LEN(fdsize); 553 cmsg->cmsg_level = SOL_SOCKET; 554 cmsg->cmsg_type = SCM_RIGHTS; 555 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); 556 } else { 557 msg.msg_controllen = 0; 558 } 559 560 do { 561 rc = sendmsg(conn_fd, &msg, 0); 562 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 563 564 if (vmsg->size) { 565 do { 566 if (vmsg->data) { 567 rc = write(conn_fd, vmsg->data, vmsg->size); 568 } else { 569 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); 570 } 571 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 572 } 573 574 if (rc <= 0) { 575 vu_panic(dev, "Error while writing: %s", strerror(errno)); 576 return false; 577 } 578 579 return true; 580 } 581 582 static bool 583 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 584 { 585 /* Set the version in the flags when sending the reply */ 586 vmsg->flags &= ~VHOST_USER_VERSION_MASK; 587 vmsg->flags |= VHOST_USER_VERSION; 588 vmsg->flags |= VHOST_USER_REPLY_MASK; 589 590 return vu_message_write(dev, conn_fd, vmsg); 591 } 592 593 /* 594 * Processes a reply on the backend channel. 595 * Entered with backend_mutex held and releases it before exit. 596 * Returns true on success. 597 */ 598 static bool 599 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) 600 { 601 VhostUserMsg msg_reply; 602 bool result = false; 603 604 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 605 result = true; 606 goto out; 607 } 608 609 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 610 goto out; 611 } 612 613 if (msg_reply.request != vmsg->request) { 614 DPRINT("Received unexpected msg type. Expected %d received %d", 615 vmsg->request, msg_reply.request); 616 goto out; 617 } 618 619 result = msg_reply.payload.u64 == 0; 620 621 out: 622 pthread_mutex_unlock(&dev->backend_mutex); 623 return result; 624 } 625 626 /* Kick the log_call_fd if required. */ 627 static void 628 vu_log_kick(VuDev *dev) 629 { 630 if (dev->log_call_fd != -1) { 631 DPRINT("Kicking the QEMU's log...\n"); 632 if (eventfd_write(dev->log_call_fd, 1) < 0) { 633 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 634 } 635 } 636 } 637 638 static void 639 vu_log_page(uint8_t *log_table, uint64_t page) 640 { 641 DPRINT("Logged dirty guest page: %"PRId64"\n", page); 642 qatomic_or(&log_table[page / 8], 1 << (page % 8)); 643 } 644 645 static void 646 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) 647 { 648 uint64_t page; 649 650 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || 651 !dev->log_table || !length) { 652 return; 653 } 654 655 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); 656 657 page = address / VHOST_LOG_PAGE; 658 while (page * VHOST_LOG_PAGE < address + length) { 659 vu_log_page(dev->log_table, page); 660 page += 1; 661 } 662 663 vu_log_kick(dev); 664 } 665 666 static void 667 vu_kick_cb(VuDev *dev, int condition, void *data) 668 { 669 int index = (intptr_t)data; 670 VuVirtq *vq = &dev->vq[index]; 671 int sock = vq->kick_fd; 672 eventfd_t kick_data; 673 ssize_t rc; 674 675 rc = eventfd_read(sock, &kick_data); 676 if (rc == -1) { 677 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); 678 dev->remove_watch(dev, dev->vq[index].kick_fd); 679 } else { 680 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n", 681 kick_data, vq->handler, index); 682 if (vq->handler) { 683 vq->handler(dev, index); 684 } 685 } 686 } 687 688 static bool 689 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) 690 { 691 vmsg->payload.u64 = 692 /* 693 * The following VIRTIO feature bits are supported by our virtqueue 694 * implementation: 695 */ 696 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY | 697 1ULL << VIRTIO_RING_F_INDIRECT_DESC | 698 1ULL << VIRTIO_RING_F_EVENT_IDX | 699 1ULL << VIRTIO_F_VERSION_1 | 700 701 /* vhost-user feature bits */ 702 1ULL << VHOST_F_LOG_ALL | 703 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 704 705 if (dev->iface->get_features) { 706 vmsg->payload.u64 |= dev->iface->get_features(dev); 707 } 708 709 vmsg->size = sizeof(vmsg->payload.u64); 710 vmsg->fd_num = 0; 711 712 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 713 714 return true; 715 } 716 717 static void 718 vu_set_enable_all_rings(VuDev *dev, bool enabled) 719 { 720 uint16_t i; 721 722 for (i = 0; i < dev->max_queues; i++) { 723 dev->vq[i].enable = enabled; 724 } 725 } 726 727 static bool 728 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) 729 { 730 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 731 732 dev->features = vmsg->payload.u64; 733 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { 734 /* 735 * We only support devices conforming to VIRTIO 1.0 or 736 * later 737 */ 738 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); 739 return false; 740 } 741 742 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { 743 vu_set_enable_all_rings(dev, true); 744 } 745 746 if (dev->iface->set_features) { 747 dev->iface->set_features(dev, dev->features); 748 } 749 750 return false; 751 } 752 753 static bool 754 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) 755 { 756 return false; 757 } 758 759 static void 760 vu_close_log(VuDev *dev) 761 { 762 if (dev->log_table) { 763 if (munmap(dev->log_table, dev->log_size) != 0) { 764 perror("close log munmap() error"); 765 } 766 767 dev->log_table = NULL; 768 } 769 if (dev->log_call_fd != -1) { 770 close(dev->log_call_fd); 771 dev->log_call_fd = -1; 772 } 773 } 774 775 static bool 776 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) 777 { 778 vu_set_enable_all_rings(dev, false); 779 780 return false; 781 } 782 783 static bool 784 map_ring(VuDev *dev, VuVirtq *vq) 785 { 786 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); 787 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); 788 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); 789 790 DPRINT("Setting virtq addresses:\n"); 791 DPRINT(" vring_desc at %p\n", vq->vring.desc); 792 DPRINT(" vring_used at %p\n", vq->vring.used); 793 DPRINT(" vring_avail at %p\n", vq->vring.avail); 794 795 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); 796 } 797 798 static bool 799 generate_faults(VuDev *dev) { 800 unsigned int i; 801 for (i = 0; i < dev->nregions; i++) { 802 #ifdef UFFDIO_REGISTER 803 VuDevRegion *dev_region = &dev->regions[i]; 804 int ret; 805 struct uffdio_register reg_struct; 806 807 /* 808 * We should already have an open ufd. Mark each memory 809 * range as ufd. 810 * Discard any mapping we have here; note I can't use MADV_REMOVE 811 * or fallocate to make the hole since I don't want to lose 812 * data that's already arrived in the shared process. 813 * TODO: How to do hugepage 814 */ 815 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 816 dev_region->size + dev_region->mmap_offset, 817 MADV_DONTNEED); 818 if (ret) { 819 fprintf(stderr, 820 "%s: Failed to madvise(DONTNEED) region %d: %s\n", 821 __func__, i, strerror(errno)); 822 } 823 /* 824 * Turn off transparent hugepages so we dont get lose wakeups 825 * in neighbouring pages. 826 * TODO: Turn this backon later. 827 */ 828 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 829 dev_region->size + dev_region->mmap_offset, 830 MADV_NOHUGEPAGE); 831 if (ret) { 832 /* 833 * Note: This can happen legally on kernels that are configured 834 * without madvise'able hugepages 835 */ 836 fprintf(stderr, 837 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n", 838 __func__, i, strerror(errno)); 839 } 840 841 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; 842 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; 843 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 844 845 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { 846 vu_panic(dev, "%s: Failed to userfault region %d " 847 "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64 848 ": (ufd=%d)%s\n", 849 __func__, i, 850 dev_region->mmap_addr, 851 dev_region->size, dev_region->mmap_offset, 852 dev->postcopy_ufd, strerror(errno)); 853 return false; 854 } 855 if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) { 856 vu_panic(dev, "%s Region (%d) doesn't support COPY", 857 __func__, i); 858 return false; 859 } 860 DPRINT("%s: region %d: Registered userfault for %" 861 PRIx64 " + %" PRIx64 "\n", __func__, i, 862 (uint64_t)reg_struct.range.start, 863 (uint64_t)reg_struct.range.len); 864 /* Now it's registered we can let the client at it */ 865 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, 866 dev_region->size + dev_region->mmap_offset, 867 PROT_READ | PROT_WRITE)) { 868 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", 869 i, strerror(errno)); 870 return false; 871 } 872 /* TODO: Stash 'zero' support flags somewhere */ 873 #endif 874 } 875 876 return true; 877 } 878 879 static bool 880 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 881 int i; 882 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 883 884 if (vmsg->fd_num != 1) { 885 vmsg_close_fds(vmsg); 886 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " 887 "should be sent for this message type", vmsg->fd_num); 888 return false; 889 } 890 891 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 892 close(vmsg->fds[0]); 893 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " 894 "least %zu bytes and only %d bytes were received", 895 VHOST_USER_MEM_REG_SIZE, vmsg->size); 896 return false; 897 } 898 899 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { 900 close(vmsg->fds[0]); 901 vu_panic(dev, "failing attempt to hot add memory via " 902 "VHOST_USER_ADD_MEM_REG message because the backend has " 903 "no free ram slots available"); 904 return false; 905 } 906 907 /* 908 * If we are in postcopy mode and we receive a u64 payload with a 0 value 909 * we know all the postcopy client bases have been received, and we 910 * should start generating faults. 911 */ 912 if (dev->postcopy_listening && 913 vmsg->size == sizeof(vmsg->payload.u64) && 914 vmsg->payload.u64 == 0) { 915 (void)generate_faults(dev); 916 return false; 917 } 918 919 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); 920 close(vmsg->fds[0]); 921 922 if (dev->postcopy_listening) { 923 /* Send the message back to qemu with the addresses filled in. */ 924 vmsg->fd_num = 0; 925 DPRINT("Successfully added new region in postcopy\n"); 926 return true; 927 } else { 928 for (i = 0; i < dev->max_queues; i++) { 929 if (dev->vq[i].vring.desc) { 930 if (map_ring(dev, &dev->vq[i])) { 931 vu_panic(dev, "remapping queue %d for new memory region", 932 i); 933 } 934 } 935 } 936 937 DPRINT("Successfully added new region\n"); 938 return false; 939 } 940 } 941 942 static inline bool reg_equal(VuDevRegion *vudev_reg, 943 VhostUserMemoryRegion *msg_reg) 944 { 945 if (vudev_reg->gpa == msg_reg->guest_phys_addr && 946 vudev_reg->qva == msg_reg->userspace_addr && 947 vudev_reg->size == msg_reg->memory_size) { 948 return true; 949 } 950 951 return false; 952 } 953 954 static bool 955 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 956 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 957 unsigned int idx; 958 VuDevRegion *r; 959 960 if (vmsg->fd_num > 1) { 961 vmsg_close_fds(vmsg); 962 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " 963 "should be sent for this message type", vmsg->fd_num); 964 return false; 965 } 966 967 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 968 vmsg_close_fds(vmsg); 969 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " 970 "least %zu bytes and only %d bytes were received", 971 VHOST_USER_MEM_REG_SIZE, vmsg->size); 972 return false; 973 } 974 975 DPRINT("Removing region:\n"); 976 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 977 msg_region->guest_phys_addr); 978 DPRINT(" memory_size: 0x%016"PRIx64"\n", 979 msg_region->memory_size); 980 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 981 msg_region->userspace_addr); 982 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 983 msg_region->mmap_offset); 984 985 r = vu_gpa_to_mem_region(dev, msg_region->guest_phys_addr); 986 if (!r || !reg_equal(r, msg_region)) { 987 vmsg_close_fds(vmsg); 988 vu_panic(dev, "Specified region not found\n"); 989 return false; 990 } 991 992 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); 993 994 idx = r - dev->regions; 995 assert(idx < dev->nregions); 996 /* Shift all affected entries by 1 to close the hole. */ 997 memmove(r, r + 1, sizeof(VuDevRegion) * (dev->nregions - idx - 1)); 998 DPRINT("Successfully removed a region\n"); 999 dev->nregions--; 1000 1001 vmsg_close_fds(vmsg); 1002 1003 return false; 1004 } 1005 1006 static bool 1007 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) 1008 { 1009 int fd_num = 0; 1010 int dmabuf_fd = -1; 1011 if (dev->iface->get_shared_object) { 1012 dmabuf_fd = dev->iface->get_shared_object( 1013 dev, &vmsg->payload.object.uuid[0]); 1014 } 1015 if (dmabuf_fd != -1) { 1016 DPRINT("dmabuf_fd found for requested UUID\n"); 1017 vmsg->fds[fd_num++] = dmabuf_fd; 1018 } 1019 vmsg->fd_num = fd_num; 1020 1021 return true; 1022 } 1023 1024 static bool 1025 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) 1026 { 1027 VhostUserMemory m = vmsg->payload.memory, *memory = &m; 1028 unsigned int i; 1029 1030 vu_remove_all_mem_regs(dev); 1031 1032 DPRINT("Nregions: %u\n", memory->nregions); 1033 for (i = 0; i < memory->nregions; i++) { 1034 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); 1035 close(vmsg->fds[i]); 1036 } 1037 1038 if (dev->postcopy_listening) { 1039 /* Send the message back to qemu with the addresses filled in */ 1040 vmsg->fd_num = 0; 1041 if (!vu_send_reply(dev, dev->sock, vmsg)) { 1042 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); 1043 return false; 1044 } 1045 1046 /* 1047 * Wait for QEMU to confirm that it's registered the handler for the 1048 * faults. 1049 */ 1050 if (!dev->read_msg(dev, dev->sock, vmsg) || 1051 vmsg->size != sizeof(vmsg->payload.u64) || 1052 vmsg->payload.u64 != 0) { 1053 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); 1054 return false; 1055 } 1056 1057 /* OK, now we can go and register the memory and generate faults */ 1058 (void)generate_faults(dev); 1059 return false; 1060 } 1061 1062 for (i = 0; i < dev->max_queues; i++) { 1063 if (dev->vq[i].vring.desc) { 1064 if (map_ring(dev, &dev->vq[i])) { 1065 vu_panic(dev, "remapping queue %d during setmemtable", i); 1066 } 1067 } 1068 } 1069 1070 return false; 1071 } 1072 1073 static bool 1074 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1075 { 1076 int fd; 1077 uint64_t log_mmap_size, log_mmap_offset; 1078 void *rc; 1079 1080 if (vmsg->fd_num != 1 || 1081 vmsg->size != sizeof(vmsg->payload.log)) { 1082 vu_panic(dev, "Invalid log_base message"); 1083 return true; 1084 } 1085 1086 fd = vmsg->fds[0]; 1087 log_mmap_offset = vmsg->payload.log.mmap_offset; 1088 log_mmap_size = vmsg->payload.log.mmap_size; 1089 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset); 1090 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size); 1091 1092 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 1093 log_mmap_offset); 1094 close(fd); 1095 if (rc == MAP_FAILED) { 1096 perror("log mmap error"); 1097 } 1098 1099 if (dev->log_table) { 1100 munmap(dev->log_table, dev->log_size); 1101 } 1102 dev->log_table = rc; 1103 dev->log_size = log_mmap_size; 1104 1105 vmsg->size = sizeof(vmsg->payload.u64); 1106 vmsg->fd_num = 0; 1107 1108 return true; 1109 } 1110 1111 static bool 1112 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) 1113 { 1114 if (vmsg->fd_num != 1) { 1115 vu_panic(dev, "Invalid log_fd message"); 1116 return false; 1117 } 1118 1119 if (dev->log_call_fd != -1) { 1120 close(dev->log_call_fd); 1121 } 1122 dev->log_call_fd = vmsg->fds[0]; 1123 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); 1124 1125 return false; 1126 } 1127 1128 static bool 1129 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1130 { 1131 unsigned int index = vmsg->payload.state.index; 1132 unsigned int num = vmsg->payload.state.num; 1133 1134 DPRINT("State.index: %u\n", index); 1135 DPRINT("State.num: %u\n", num); 1136 dev->vq[index].vring.num = num; 1137 1138 return false; 1139 } 1140 1141 static bool 1142 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) 1143 { 1144 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; 1145 unsigned int index = vra->index; 1146 VuVirtq *vq = &dev->vq[index]; 1147 1148 DPRINT("vhost_vring_addr:\n"); 1149 DPRINT(" index: %d\n", vra->index); 1150 DPRINT(" flags: %d\n", vra->flags); 1151 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); 1152 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); 1153 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); 1154 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); 1155 1156 vq->vra = *vra; 1157 vq->vring.flags = vra->flags; 1158 vq->vring.log_guest_addr = vra->log_guest_addr; 1159 1160 1161 if (map_ring(dev, vq)) { 1162 vu_panic(dev, "Invalid vring_addr message"); 1163 return false; 1164 } 1165 1166 vq->used_idx = le16toh(vq->vring.used->idx); 1167 1168 if (vq->last_avail_idx != vq->used_idx) { 1169 bool resume = dev->iface->queue_is_processed_in_order && 1170 dev->iface->queue_is_processed_in_order(dev, index); 1171 1172 DPRINT("Last avail index != used index: %u != %u%s\n", 1173 vq->last_avail_idx, vq->used_idx, 1174 resume ? ", resuming" : ""); 1175 1176 if (resume) { 1177 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; 1178 } 1179 } 1180 1181 return false; 1182 } 1183 1184 static bool 1185 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1186 { 1187 unsigned int index = vmsg->payload.state.index; 1188 unsigned int num = vmsg->payload.state.num; 1189 1190 DPRINT("State.index: %u\n", index); 1191 DPRINT("State.num: %u\n", num); 1192 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; 1193 1194 return false; 1195 } 1196 1197 static bool 1198 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1199 { 1200 unsigned int index = vmsg->payload.state.index; 1201 1202 DPRINT("State.index: %u\n", index); 1203 vmsg->payload.state.num = dev->vq[index].last_avail_idx; 1204 vmsg->size = sizeof(vmsg->payload.state); 1205 1206 dev->vq[index].started = false; 1207 if (dev->iface->queue_set_started) { 1208 dev->iface->queue_set_started(dev, index, false); 1209 } 1210 1211 if (dev->vq[index].call_fd != -1) { 1212 close(dev->vq[index].call_fd); 1213 dev->vq[index].call_fd = -1; 1214 } 1215 if (dev->vq[index].kick_fd != -1) { 1216 dev->remove_watch(dev, dev->vq[index].kick_fd); 1217 close(dev->vq[index].kick_fd); 1218 dev->vq[index].kick_fd = -1; 1219 } 1220 1221 return true; 1222 } 1223 1224 static bool 1225 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) 1226 { 1227 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1228 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1229 1230 if (index >= dev->max_queues) { 1231 vmsg_close_fds(vmsg); 1232 vu_panic(dev, "Invalid queue index: %u", index); 1233 return false; 1234 } 1235 1236 if (nofd) { 1237 vmsg_close_fds(vmsg); 1238 return true; 1239 } 1240 1241 if (vmsg->fd_num != 1) { 1242 vmsg_close_fds(vmsg); 1243 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); 1244 return false; 1245 } 1246 1247 return true; 1248 } 1249 1250 static int 1251 inflight_desc_compare(const void *a, const void *b) 1252 { 1253 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a, 1254 *desc1 = (VuVirtqInflightDesc *)b; 1255 1256 if (desc1->counter > desc0->counter && 1257 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { 1258 return 1; 1259 } 1260 1261 return -1; 1262 } 1263 1264 static int 1265 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) 1266 { 1267 int i = 0; 1268 1269 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 1270 return 0; 1271 } 1272 1273 if (unlikely(!vq->inflight)) { 1274 return -1; 1275 } 1276 1277 if (unlikely(!vq->inflight->version)) { 1278 /* initialize the buffer */ 1279 vq->inflight->version = INFLIGHT_VERSION; 1280 return 0; 1281 } 1282 1283 vq->used_idx = le16toh(vq->vring.used->idx); 1284 vq->resubmit_num = 0; 1285 vq->resubmit_list = NULL; 1286 vq->counter = 0; 1287 1288 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { 1289 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; 1290 1291 barrier(); 1292 1293 vq->inflight->used_idx = vq->used_idx; 1294 } 1295 1296 for (i = 0; i < vq->inflight->desc_num; i++) { 1297 if (vq->inflight->desc[i].inflight == 1) { 1298 vq->inuse++; 1299 } 1300 } 1301 1302 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; 1303 1304 if (vq->inuse) { 1305 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); 1306 if (!vq->resubmit_list) { 1307 return -1; 1308 } 1309 1310 for (i = 0; i < vq->inflight->desc_num; i++) { 1311 if (vq->inflight->desc[i].inflight) { 1312 vq->resubmit_list[vq->resubmit_num].index = i; 1313 vq->resubmit_list[vq->resubmit_num].counter = 1314 vq->inflight->desc[i].counter; 1315 vq->resubmit_num++; 1316 } 1317 } 1318 1319 if (vq->resubmit_num > 1) { 1320 qsort(vq->resubmit_list, vq->resubmit_num, 1321 sizeof(VuVirtqInflightDesc), inflight_desc_compare); 1322 } 1323 vq->counter = vq->resubmit_list[0].counter + 1; 1324 } 1325 1326 /* in case of I/O hang after reconnecting */ 1327 if (eventfd_write(vq->kick_fd, 1)) { 1328 return -1; 1329 } 1330 1331 return 0; 1332 } 1333 1334 static bool 1335 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) 1336 { 1337 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1338 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1339 1340 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1341 1342 if (!vu_check_queue_msg_file(dev, vmsg)) { 1343 return false; 1344 } 1345 1346 if (dev->vq[index].kick_fd != -1) { 1347 dev->remove_watch(dev, dev->vq[index].kick_fd); 1348 close(dev->vq[index].kick_fd); 1349 dev->vq[index].kick_fd = -1; 1350 } 1351 1352 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; 1353 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); 1354 1355 dev->vq[index].started = true; 1356 if (dev->iface->queue_set_started) { 1357 dev->iface->queue_set_started(dev, index, true); 1358 } 1359 1360 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { 1361 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, 1362 vu_kick_cb, (void *)(long)index); 1363 1364 DPRINT("Waiting for kicks on fd: %d for vq: %d\n", 1365 dev->vq[index].kick_fd, index); 1366 } 1367 1368 if (vu_check_queue_inflights(dev, &dev->vq[index])) { 1369 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); 1370 } 1371 1372 return false; 1373 } 1374 1375 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, 1376 vu_queue_handler_cb handler) 1377 { 1378 int qidx = vq - dev->vq; 1379 1380 vq->handler = handler; 1381 if (vq->kick_fd >= 0) { 1382 if (handler) { 1383 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, 1384 vu_kick_cb, (void *)(long)qidx); 1385 } else { 1386 dev->remove_watch(dev, vq->kick_fd); 1387 } 1388 } 1389 } 1390 1391 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, 1392 int size, int offset) 1393 { 1394 int qidx = vq - dev->vq; 1395 int fd_num = 0; 1396 VhostUserMsg vmsg = { 1397 .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG, 1398 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1399 .size = sizeof(vmsg.payload.area), 1400 .payload.area = { 1401 .u64 = qidx & VHOST_USER_VRING_IDX_MASK, 1402 .size = size, 1403 .offset = offset, 1404 }, 1405 }; 1406 1407 if (fd == -1) { 1408 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; 1409 } else { 1410 vmsg.fds[fd_num++] = fd; 1411 } 1412 1413 vmsg.fd_num = fd_num; 1414 1415 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { 1416 return false; 1417 } 1418 1419 pthread_mutex_lock(&dev->backend_mutex); 1420 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { 1421 pthread_mutex_unlock(&dev->backend_mutex); 1422 return false; 1423 } 1424 1425 /* Also unlocks the backend_mutex */ 1426 return vu_process_message_reply(dev, &vmsg); 1427 } 1428 1429 bool 1430 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], 1431 int *dmabuf_fd) 1432 { 1433 bool result = false; 1434 VhostUserMsg msg_reply; 1435 VhostUserMsg msg = { 1436 .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP, 1437 .size = sizeof(msg.payload.object), 1438 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1439 }; 1440 1441 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1442 1443 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1444 return false; 1445 } 1446 1447 pthread_mutex_lock(&dev->backend_mutex); 1448 if (!vu_message_write(dev, dev->backend_fd, &msg)) { 1449 goto out; 1450 } 1451 1452 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 1453 goto out; 1454 } 1455 1456 if (msg_reply.request != msg.request) { 1457 DPRINT("Received unexpected msg type. Expected %d, received %d", 1458 msg.request, msg_reply.request); 1459 goto out; 1460 } 1461 1462 if (msg_reply.fd_num != 1) { 1463 DPRINT("Received unexpected number of fds. Expected 1, received %d", 1464 msg_reply.fd_num); 1465 goto out; 1466 } 1467 1468 *dmabuf_fd = msg_reply.fds[0]; 1469 result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0; 1470 out: 1471 pthread_mutex_unlock(&dev->backend_mutex); 1472 1473 return result; 1474 } 1475 1476 static bool 1477 vu_send_message(VuDev *dev, VhostUserMsg *vmsg) 1478 { 1479 bool result = false; 1480 pthread_mutex_lock(&dev->backend_mutex); 1481 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { 1482 goto out; 1483 } 1484 1485 result = true; 1486 out: 1487 pthread_mutex_unlock(&dev->backend_mutex); 1488 1489 return result; 1490 } 1491 1492 bool 1493 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1494 { 1495 VhostUserMsg msg = { 1496 .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD, 1497 .size = sizeof(msg.payload.object), 1498 .flags = VHOST_USER_VERSION, 1499 }; 1500 1501 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1502 1503 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1504 return false; 1505 } 1506 1507 return vu_send_message(dev, &msg); 1508 } 1509 1510 bool 1511 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1512 { 1513 VhostUserMsg msg = { 1514 .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE, 1515 .size = sizeof(msg.payload.object), 1516 .flags = VHOST_USER_VERSION, 1517 }; 1518 1519 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1520 1521 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1522 return false; 1523 } 1524 1525 return vu_send_message(dev, &msg); 1526 } 1527 1528 static bool 1529 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) 1530 { 1531 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1532 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1533 1534 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1535 1536 if (!vu_check_queue_msg_file(dev, vmsg)) { 1537 return false; 1538 } 1539 1540 if (dev->vq[index].call_fd != -1) { 1541 close(dev->vq[index].call_fd); 1542 dev->vq[index].call_fd = -1; 1543 } 1544 1545 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; 1546 1547 /* in case of I/O hang after reconnecting */ 1548 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { 1549 return -1; 1550 } 1551 1552 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); 1553 1554 return false; 1555 } 1556 1557 static bool 1558 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) 1559 { 1560 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1561 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1562 1563 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1564 1565 if (!vu_check_queue_msg_file(dev, vmsg)) { 1566 return false; 1567 } 1568 1569 if (dev->vq[index].err_fd != -1) { 1570 close(dev->vq[index].err_fd); 1571 dev->vq[index].err_fd = -1; 1572 } 1573 1574 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; 1575 1576 return false; 1577 } 1578 1579 static bool 1580 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1581 { 1582 /* 1583 * Note that we support, but intentionally do not set, 1584 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that 1585 * a device implementation can return it in its callback 1586 * (get_protocol_features) if it wants to use this for 1587 * simulation, but it is otherwise not desirable (if even 1588 * implemented by the frontend.) 1589 */ 1590 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1591 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | 1592 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | 1593 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | 1594 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | 1595 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1596 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; 1597 1598 if (have_userfault()) { 1599 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT; 1600 } 1601 1602 if (dev->iface->get_config && dev->iface->set_config) { 1603 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1604 } 1605 1606 if (dev->iface->get_protocol_features) { 1607 features |= dev->iface->get_protocol_features(dev); 1608 } 1609 1610 vmsg_set_reply_u64(vmsg, features); 1611 return true; 1612 } 1613 1614 static bool 1615 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1616 { 1617 uint64_t features = vmsg->payload.u64; 1618 1619 DPRINT("u64: 0x%016"PRIx64"\n", features); 1620 1621 dev->protocol_features = vmsg->payload.u64; 1622 1623 if (vu_has_protocol_feature(dev, 1624 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 1625 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || 1626 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 1627 /* 1628 * The use case for using messages for kick/call is simulation, to make 1629 * the kick and call synchronous. To actually get that behaviour, both 1630 * of the other features are required. 1631 * Theoretically, one could use only kick messages, or do them without 1632 * having F_REPLY_ACK, but too many (possibly pending) messages on the 1633 * socket will eventually cause the frontend to hang, to avoid this in 1634 * scenarios where not desired enforce that the settings are in a way 1635 * that actually enables the simulation case. 1636 */ 1637 vu_panic(dev, 1638 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK"); 1639 return false; 1640 } 1641 1642 if (dev->iface->set_protocol_features) { 1643 dev->iface->set_protocol_features(dev, features); 1644 } 1645 1646 return false; 1647 } 1648 1649 static bool 1650 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1651 { 1652 vmsg_set_reply_u64(vmsg, dev->max_queues); 1653 return true; 1654 } 1655 1656 static bool 1657 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) 1658 { 1659 unsigned int index = vmsg->payload.state.index; 1660 unsigned int enable = vmsg->payload.state.num; 1661 1662 DPRINT("State.index: %u\n", index); 1663 DPRINT("State.enable: %u\n", enable); 1664 1665 if (index >= dev->max_queues) { 1666 vu_panic(dev, "Invalid vring_enable index: %u", index); 1667 return false; 1668 } 1669 1670 dev->vq[index].enable = enable; 1671 return false; 1672 } 1673 1674 static bool 1675 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg) 1676 { 1677 if (vmsg->fd_num != 1) { 1678 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); 1679 return false; 1680 } 1681 1682 if (dev->backend_fd != -1) { 1683 close(dev->backend_fd); 1684 } 1685 dev->backend_fd = vmsg->fds[0]; 1686 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]); 1687 1688 return false; 1689 } 1690 1691 static bool 1692 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) 1693 { 1694 int ret = -1; 1695 1696 if (dev->iface->get_config) { 1697 ret = dev->iface->get_config(dev, vmsg->payload.config.region, 1698 vmsg->payload.config.size); 1699 } 1700 1701 if (ret) { 1702 /* resize to zero to indicate an error to frontend */ 1703 vmsg->size = 0; 1704 } 1705 1706 return true; 1707 } 1708 1709 static bool 1710 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) 1711 { 1712 int ret = -1; 1713 1714 if (dev->iface->set_config) { 1715 ret = dev->iface->set_config(dev, vmsg->payload.config.region, 1716 vmsg->payload.config.offset, 1717 vmsg->payload.config.size, 1718 vmsg->payload.config.flags); 1719 if (ret) { 1720 vu_panic(dev, "Set virtio configuration space failed"); 1721 } 1722 } 1723 1724 return false; 1725 } 1726 1727 static bool 1728 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) 1729 { 1730 #ifdef UFFDIO_API 1731 struct uffdio_api api_struct; 1732 1733 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1734 vmsg->size = 0; 1735 #else 1736 dev->postcopy_ufd = -1; 1737 #endif 1738 1739 if (dev->postcopy_ufd == -1) { 1740 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); 1741 goto out; 1742 } 1743 1744 #ifdef UFFDIO_API 1745 api_struct.api = UFFD_API; 1746 api_struct.features = 0; 1747 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { 1748 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); 1749 close(dev->postcopy_ufd); 1750 dev->postcopy_ufd = -1; 1751 goto out; 1752 } 1753 /* TODO: Stash feature flags somewhere */ 1754 #endif 1755 1756 out: 1757 /* Return a ufd to the QEMU */ 1758 vmsg->fd_num = 1; 1759 vmsg->fds[0] = dev->postcopy_ufd; 1760 return true; /* = send a reply */ 1761 } 1762 1763 static bool 1764 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) 1765 { 1766 if (dev->nregions) { 1767 vu_panic(dev, "Regions already registered at postcopy-listen"); 1768 vmsg_set_reply_u64(vmsg, -1); 1769 return true; 1770 } 1771 dev->postcopy_listening = true; 1772 1773 vmsg_set_reply_u64(vmsg, 0); 1774 return true; 1775 } 1776 1777 static bool 1778 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) 1779 { 1780 DPRINT("%s: Entry\n", __func__); 1781 dev->postcopy_listening = false; 1782 if (dev->postcopy_ufd > 0) { 1783 close(dev->postcopy_ufd); 1784 dev->postcopy_ufd = -1; 1785 DPRINT("%s: Done close\n", __func__); 1786 } 1787 1788 vmsg_set_reply_u64(vmsg, 0); 1789 DPRINT("%s: exit\n", __func__); 1790 return true; 1791 } 1792 1793 static inline uint64_t 1794 vu_inflight_queue_size(uint16_t queue_size) 1795 { 1796 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size + 1797 sizeof(uint16_t), INFLIGHT_ALIGNMENT); 1798 } 1799 1800 #ifdef MFD_ALLOW_SEALING 1801 static void * 1802 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd) 1803 { 1804 void *ptr; 1805 int ret; 1806 1807 *fd = memfd_create(name, MFD_ALLOW_SEALING); 1808 if (*fd < 0) { 1809 return NULL; 1810 } 1811 1812 ret = ftruncate(*fd, size); 1813 if (ret < 0) { 1814 close(*fd); 1815 return NULL; 1816 } 1817 1818 ret = fcntl(*fd, F_ADD_SEALS, flags); 1819 if (ret < 0) { 1820 close(*fd); 1821 return NULL; 1822 } 1823 1824 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0); 1825 if (ptr == MAP_FAILED) { 1826 close(*fd); 1827 return NULL; 1828 } 1829 1830 return ptr; 1831 } 1832 #endif 1833 1834 static bool 1835 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1836 { 1837 int fd = -1; 1838 void *addr = NULL; 1839 uint64_t mmap_size; 1840 uint16_t num_queues, queue_size; 1841 1842 if (vmsg->size != sizeof(vmsg->payload.inflight)) { 1843 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); 1844 vmsg->payload.inflight.mmap_size = 0; 1845 return true; 1846 } 1847 1848 num_queues = vmsg->payload.inflight.num_queues; 1849 queue_size = vmsg->payload.inflight.queue_size; 1850 1851 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1852 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1853 1854 mmap_size = vu_inflight_queue_size(queue_size) * num_queues; 1855 1856 #ifdef MFD_ALLOW_SEALING 1857 addr = memfd_alloc("vhost-inflight", mmap_size, 1858 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 1859 &fd); 1860 #else 1861 vu_panic(dev, "Not implemented: memfd support is missing"); 1862 #endif 1863 1864 if (!addr) { 1865 vu_panic(dev, "Failed to alloc vhost inflight area"); 1866 vmsg->payload.inflight.mmap_size = 0; 1867 return true; 1868 } 1869 1870 memset(addr, 0, mmap_size); 1871 1872 dev->inflight_info.addr = addr; 1873 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; 1874 dev->inflight_info.fd = vmsg->fds[0] = fd; 1875 vmsg->fd_num = 1; 1876 vmsg->payload.inflight.mmap_offset = 0; 1877 1878 DPRINT("send inflight mmap_size: %"PRId64"\n", 1879 vmsg->payload.inflight.mmap_size); 1880 DPRINT("send inflight mmap offset: %"PRId64"\n", 1881 vmsg->payload.inflight.mmap_offset); 1882 1883 return true; 1884 } 1885 1886 static bool 1887 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1888 { 1889 int fd, i; 1890 uint64_t mmap_size, mmap_offset; 1891 uint16_t num_queues, queue_size; 1892 void *rc; 1893 1894 if (vmsg->fd_num != 1 || 1895 vmsg->size != sizeof(vmsg->payload.inflight)) { 1896 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", 1897 vmsg->size, vmsg->fd_num); 1898 return false; 1899 } 1900 1901 fd = vmsg->fds[0]; 1902 mmap_size = vmsg->payload.inflight.mmap_size; 1903 mmap_offset = vmsg->payload.inflight.mmap_offset; 1904 num_queues = vmsg->payload.inflight.num_queues; 1905 queue_size = vmsg->payload.inflight.queue_size; 1906 1907 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size); 1908 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset); 1909 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1910 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1911 1912 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1913 fd, mmap_offset); 1914 1915 if (rc == MAP_FAILED) { 1916 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); 1917 return false; 1918 } 1919 1920 if (dev->inflight_info.fd) { 1921 close(dev->inflight_info.fd); 1922 } 1923 1924 if (dev->inflight_info.addr) { 1925 munmap(dev->inflight_info.addr, dev->inflight_info.size); 1926 } 1927 1928 dev->inflight_info.fd = fd; 1929 dev->inflight_info.addr = rc; 1930 dev->inflight_info.size = mmap_size; 1931 1932 for (i = 0; i < num_queues; i++) { 1933 dev->vq[i].inflight = (VuVirtqInflight *)rc; 1934 dev->vq[i].inflight->desc_num = queue_size; 1935 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size)); 1936 } 1937 1938 return false; 1939 } 1940 1941 static bool 1942 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) 1943 { 1944 unsigned int index = vmsg->payload.state.index; 1945 1946 if (index >= dev->max_queues) { 1947 vu_panic(dev, "Invalid queue index: %u", index); 1948 return false; 1949 } 1950 1951 DPRINT("Got kick message: handler:%p idx:%u\n", 1952 dev->vq[index].handler, index); 1953 1954 if (!dev->vq[index].started) { 1955 dev->vq[index].started = true; 1956 1957 if (dev->iface->queue_set_started) { 1958 dev->iface->queue_set_started(dev, index, true); 1959 } 1960 } 1961 1962 if (dev->vq[index].handler) { 1963 dev->vq[index].handler(dev, index); 1964 } 1965 1966 return false; 1967 } 1968 1969 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) 1970 { 1971 vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS); 1972 1973 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); 1974 1975 return true; 1976 } 1977 1978 static bool 1979 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) 1980 { 1981 int do_reply = 0; 1982 1983 /* Print out generic part of the request. */ 1984 DPRINT("================ Vhost user message ================\n"); 1985 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), 1986 vmsg->request); 1987 DPRINT("Flags: 0x%x\n", vmsg->flags); 1988 DPRINT("Size: %u\n", vmsg->size); 1989 1990 if (vmsg->fd_num) { 1991 int i; 1992 DPRINT("Fds:"); 1993 for (i = 0; i < vmsg->fd_num; i++) { 1994 DPRINT(" %d", vmsg->fds[i]); 1995 } 1996 DPRINT("\n"); 1997 } 1998 1999 if (dev->iface->process_msg && 2000 dev->iface->process_msg(dev, vmsg, &do_reply)) { 2001 return do_reply; 2002 } 2003 2004 switch (vmsg->request) { 2005 case VHOST_USER_GET_FEATURES: 2006 return vu_get_features_exec(dev, vmsg); 2007 case VHOST_USER_SET_FEATURES: 2008 return vu_set_features_exec(dev, vmsg); 2009 case VHOST_USER_GET_PROTOCOL_FEATURES: 2010 return vu_get_protocol_features_exec(dev, vmsg); 2011 case VHOST_USER_SET_PROTOCOL_FEATURES: 2012 return vu_set_protocol_features_exec(dev, vmsg); 2013 case VHOST_USER_SET_OWNER: 2014 return vu_set_owner_exec(dev, vmsg); 2015 case VHOST_USER_RESET_OWNER: 2016 return vu_reset_device_exec(dev, vmsg); 2017 case VHOST_USER_SET_MEM_TABLE: 2018 return vu_set_mem_table_exec(dev, vmsg); 2019 case VHOST_USER_SET_LOG_BASE: 2020 return vu_set_log_base_exec(dev, vmsg); 2021 case VHOST_USER_SET_LOG_FD: 2022 return vu_set_log_fd_exec(dev, vmsg); 2023 case VHOST_USER_SET_VRING_NUM: 2024 return vu_set_vring_num_exec(dev, vmsg); 2025 case VHOST_USER_SET_VRING_ADDR: 2026 return vu_set_vring_addr_exec(dev, vmsg); 2027 case VHOST_USER_SET_VRING_BASE: 2028 return vu_set_vring_base_exec(dev, vmsg); 2029 case VHOST_USER_GET_VRING_BASE: 2030 return vu_get_vring_base_exec(dev, vmsg); 2031 case VHOST_USER_SET_VRING_KICK: 2032 return vu_set_vring_kick_exec(dev, vmsg); 2033 case VHOST_USER_SET_VRING_CALL: 2034 return vu_set_vring_call_exec(dev, vmsg); 2035 case VHOST_USER_SET_VRING_ERR: 2036 return vu_set_vring_err_exec(dev, vmsg); 2037 case VHOST_USER_GET_QUEUE_NUM: 2038 return vu_get_queue_num_exec(dev, vmsg); 2039 case VHOST_USER_SET_VRING_ENABLE: 2040 return vu_set_vring_enable_exec(dev, vmsg); 2041 case VHOST_USER_SET_BACKEND_REQ_FD: 2042 return vu_set_backend_req_fd(dev, vmsg); 2043 case VHOST_USER_GET_CONFIG: 2044 return vu_get_config(dev, vmsg); 2045 case VHOST_USER_SET_CONFIG: 2046 return vu_set_config(dev, vmsg); 2047 case VHOST_USER_NONE: 2048 /* if you need processing before exit, override iface->process_msg */ 2049 exit(0); 2050 case VHOST_USER_POSTCOPY_ADVISE: 2051 return vu_set_postcopy_advise(dev, vmsg); 2052 case VHOST_USER_POSTCOPY_LISTEN: 2053 return vu_set_postcopy_listen(dev, vmsg); 2054 case VHOST_USER_POSTCOPY_END: 2055 return vu_set_postcopy_end(dev, vmsg); 2056 case VHOST_USER_GET_INFLIGHT_FD: 2057 return vu_get_inflight_fd(dev, vmsg); 2058 case VHOST_USER_SET_INFLIGHT_FD: 2059 return vu_set_inflight_fd(dev, vmsg); 2060 case VHOST_USER_VRING_KICK: 2061 return vu_handle_vring_kick(dev, vmsg); 2062 case VHOST_USER_GET_MAX_MEM_SLOTS: 2063 return vu_handle_get_max_memslots(dev, vmsg); 2064 case VHOST_USER_ADD_MEM_REG: 2065 return vu_add_mem_reg(dev, vmsg); 2066 case VHOST_USER_REM_MEM_REG: 2067 return vu_rem_mem_reg(dev, vmsg); 2068 case VHOST_USER_GET_SHARED_OBJECT: 2069 return vu_get_shared_object(dev, vmsg); 2070 default: 2071 vmsg_close_fds(vmsg); 2072 vu_panic(dev, "Unhandled request: %d", vmsg->request); 2073 } 2074 2075 return false; 2076 } 2077 2078 bool 2079 vu_dispatch(VuDev *dev) 2080 { 2081 VhostUserMsg vmsg = { 0, }; 2082 int reply_requested; 2083 bool need_reply, success = false; 2084 2085 if (!dev->read_msg(dev, dev->sock, &vmsg)) { 2086 goto end; 2087 } 2088 2089 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK; 2090 2091 reply_requested = vu_process_message(dev, &vmsg); 2092 if (!reply_requested && need_reply) { 2093 vmsg_set_reply_u64(&vmsg, 0); 2094 reply_requested = 1; 2095 } 2096 2097 if (!reply_requested) { 2098 success = true; 2099 goto end; 2100 } 2101 2102 if (!vu_send_reply(dev, dev->sock, &vmsg)) { 2103 goto end; 2104 } 2105 2106 success = true; 2107 2108 end: 2109 free(vmsg.data); 2110 return success; 2111 } 2112 2113 void 2114 vu_deinit(VuDev *dev) 2115 { 2116 unsigned int i; 2117 2118 vu_remove_all_mem_regs(dev); 2119 2120 for (i = 0; i < dev->max_queues; i++) { 2121 VuVirtq *vq = &dev->vq[i]; 2122 2123 if (vq->call_fd != -1) { 2124 close(vq->call_fd); 2125 vq->call_fd = -1; 2126 } 2127 2128 if (vq->kick_fd != -1) { 2129 dev->remove_watch(dev, vq->kick_fd); 2130 close(vq->kick_fd); 2131 vq->kick_fd = -1; 2132 } 2133 2134 if (vq->err_fd != -1) { 2135 close(vq->err_fd); 2136 vq->err_fd = -1; 2137 } 2138 2139 if (vq->resubmit_list) { 2140 free(vq->resubmit_list); 2141 vq->resubmit_list = NULL; 2142 } 2143 2144 vq->inflight = NULL; 2145 } 2146 2147 if (dev->inflight_info.addr) { 2148 munmap(dev->inflight_info.addr, dev->inflight_info.size); 2149 dev->inflight_info.addr = NULL; 2150 } 2151 2152 if (dev->inflight_info.fd > 0) { 2153 close(dev->inflight_info.fd); 2154 dev->inflight_info.fd = -1; 2155 } 2156 2157 vu_close_log(dev); 2158 if (dev->backend_fd != -1) { 2159 close(dev->backend_fd); 2160 dev->backend_fd = -1; 2161 } 2162 pthread_mutex_destroy(&dev->backend_mutex); 2163 2164 if (dev->sock != -1) { 2165 close(dev->sock); 2166 } 2167 2168 free(dev->vq); 2169 dev->vq = NULL; 2170 free(dev->regions); 2171 dev->regions = NULL; 2172 } 2173 2174 bool 2175 vu_init(VuDev *dev, 2176 uint16_t max_queues, 2177 int socket, 2178 vu_panic_cb panic, 2179 vu_read_msg_cb read_msg, 2180 vu_set_watch_cb set_watch, 2181 vu_remove_watch_cb remove_watch, 2182 const VuDevIface *iface) 2183 { 2184 uint16_t i; 2185 2186 assert(max_queues > 0); 2187 assert(socket >= 0); 2188 assert(set_watch); 2189 assert(remove_watch); 2190 assert(iface); 2191 assert(panic); 2192 2193 memset(dev, 0, sizeof(*dev)); 2194 2195 dev->sock = socket; 2196 dev->panic = panic; 2197 dev->read_msg = read_msg ? read_msg : vu_message_read_default; 2198 dev->set_watch = set_watch; 2199 dev->remove_watch = remove_watch; 2200 dev->iface = iface; 2201 dev->log_call_fd = -1; 2202 pthread_mutex_init(&dev->backend_mutex, NULL); 2203 dev->backend_fd = -1; 2204 dev->max_queues = max_queues; 2205 2206 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); 2207 if (!dev->regions) { 2208 DPRINT("%s: failed to malloc mem regions\n", __func__); 2209 return false; 2210 } 2211 2212 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); 2213 if (!dev->vq) { 2214 DPRINT("%s: failed to malloc virtqueues\n", __func__); 2215 free(dev->regions); 2216 dev->regions = NULL; 2217 return false; 2218 } 2219 2220 for (i = 0; i < max_queues; i++) { 2221 dev->vq[i] = (VuVirtq) { 2222 .call_fd = -1, .kick_fd = -1, .err_fd = -1, 2223 .notification = true, 2224 }; 2225 } 2226 2227 return true; 2228 } 2229 2230 VuVirtq * 2231 vu_get_queue(VuDev *dev, int qidx) 2232 { 2233 assert(qidx < dev->max_queues); 2234 return &dev->vq[qidx]; 2235 } 2236 2237 bool 2238 vu_queue_enabled(VuDev *dev, VuVirtq *vq) 2239 { 2240 return vq->enable; 2241 } 2242 2243 bool 2244 vu_queue_started(const VuDev *dev, const VuVirtq *vq) 2245 { 2246 return vq->started; 2247 } 2248 2249 static inline uint16_t 2250 vring_avail_flags(VuVirtq *vq) 2251 { 2252 return le16toh(vq->vring.avail->flags); 2253 } 2254 2255 static inline uint16_t 2256 vring_avail_idx(VuVirtq *vq) 2257 { 2258 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); 2259 2260 return vq->shadow_avail_idx; 2261 } 2262 2263 static inline uint16_t 2264 vring_avail_ring(VuVirtq *vq, int i) 2265 { 2266 return le16toh(vq->vring.avail->ring[i]); 2267 } 2268 2269 static inline uint16_t 2270 vring_get_used_event(VuVirtq *vq) 2271 { 2272 return vring_avail_ring(vq, vq->vring.num); 2273 } 2274 2275 static int 2276 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) 2277 { 2278 uint16_t num_heads = vring_avail_idx(vq) - idx; 2279 2280 /* Check it isn't doing very strange things with descriptor numbers. */ 2281 if (num_heads > vq->vring.num) { 2282 vu_panic(dev, "Guest moved used index from %u to %u", 2283 idx, vq->shadow_avail_idx); 2284 return -1; 2285 } 2286 if (num_heads) { 2287 /* On success, callers read a descriptor at vq->last_avail_idx. 2288 * Make sure descriptor read does not bypass avail index read. */ 2289 smp_rmb(); 2290 } 2291 2292 return num_heads; 2293 } 2294 2295 static bool 2296 virtqueue_get_head(VuDev *dev, VuVirtq *vq, 2297 unsigned int idx, unsigned int *head) 2298 { 2299 /* Grab the next descriptor number they're advertising, and increment 2300 * the index we've seen. */ 2301 *head = vring_avail_ring(vq, idx % vq->vring.num); 2302 2303 /* If their number is silly, that's a fatal mistake. */ 2304 if (*head >= vq->vring.num) { 2305 vu_panic(dev, "Guest says index %u is available", *head); 2306 return false; 2307 } 2308 2309 return true; 2310 } 2311 2312 static int 2313 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, 2314 uint64_t addr, size_t len) 2315 { 2316 struct vring_desc *ori_desc; 2317 uint64_t read_len; 2318 2319 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { 2320 return -1; 2321 } 2322 2323 if (len == 0) { 2324 return -1; 2325 } 2326 2327 while (len) { 2328 read_len = len; 2329 ori_desc = vu_gpa_to_va(dev, &read_len, addr); 2330 if (!ori_desc) { 2331 return -1; 2332 } 2333 2334 memcpy(desc, ori_desc, read_len); 2335 len -= read_len; 2336 addr += read_len; 2337 desc += read_len; 2338 } 2339 2340 return 0; 2341 } 2342 2343 enum { 2344 VIRTQUEUE_READ_DESC_ERROR = -1, 2345 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 2346 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 2347 }; 2348 2349 static int 2350 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, 2351 int i, unsigned int max, unsigned int *next) 2352 { 2353 /* If this descriptor says it doesn't chain, we're done. */ 2354 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) { 2355 return VIRTQUEUE_READ_DESC_DONE; 2356 } 2357 2358 /* Check they're not leading us off end of descriptors. */ 2359 *next = le16toh(desc[i].next); 2360 /* Make sure compiler knows to grab that: we don't want it changing! */ 2361 smp_wmb(); 2362 2363 if (*next >= max) { 2364 vu_panic(dev, "Desc next is %u", *next); 2365 return VIRTQUEUE_READ_DESC_ERROR; 2366 } 2367 2368 return VIRTQUEUE_READ_DESC_MORE; 2369 } 2370 2371 void 2372 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, 2373 unsigned int *out_bytes, 2374 unsigned max_in_bytes, unsigned max_out_bytes) 2375 { 2376 unsigned int idx; 2377 unsigned int total_bufs, in_total, out_total; 2378 int rc; 2379 2380 idx = vq->last_avail_idx; 2381 2382 total_bufs = in_total = out_total = 0; 2383 if (unlikely(dev->broken) || 2384 unlikely(!vq->vring.avail)) { 2385 goto done; 2386 } 2387 2388 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { 2389 unsigned int max, desc_len, num_bufs, indirect = 0; 2390 uint64_t desc_addr, read_len; 2391 struct vring_desc *desc; 2392 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2393 unsigned int i; 2394 2395 max = vq->vring.num; 2396 num_bufs = total_bufs; 2397 if (!virtqueue_get_head(dev, vq, idx++, &i)) { 2398 goto err; 2399 } 2400 desc = vq->vring.desc; 2401 2402 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2403 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2404 vu_panic(dev, "Invalid size for indirect buffer table"); 2405 goto err; 2406 } 2407 2408 /* If we've got too many, that implies a descriptor loop. */ 2409 if (num_bufs >= max) { 2410 vu_panic(dev, "Looped descriptor"); 2411 goto err; 2412 } 2413 2414 /* loop over the indirect descriptor table */ 2415 indirect = 1; 2416 desc_addr = le64toh(desc[i].addr); 2417 desc_len = le32toh(desc[i].len); 2418 max = desc_len / sizeof(struct vring_desc); 2419 read_len = desc_len; 2420 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2421 if (unlikely(desc && read_len != desc_len)) { 2422 /* Failed to use zero copy */ 2423 desc = NULL; 2424 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2425 desc_addr, 2426 desc_len)) { 2427 desc = desc_buf; 2428 } 2429 } 2430 if (!desc) { 2431 vu_panic(dev, "Invalid indirect buffer table"); 2432 goto err; 2433 } 2434 num_bufs = i = 0; 2435 } 2436 2437 do { 2438 /* If we've got too many, that implies a descriptor loop. */ 2439 if (++num_bufs > max) { 2440 vu_panic(dev, "Looped descriptor"); 2441 goto err; 2442 } 2443 2444 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2445 in_total += le32toh(desc[i].len); 2446 } else { 2447 out_total += le32toh(desc[i].len); 2448 } 2449 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 2450 goto done; 2451 } 2452 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2453 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2454 2455 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2456 goto err; 2457 } 2458 2459 if (!indirect) { 2460 total_bufs = num_bufs; 2461 } else { 2462 total_bufs++; 2463 } 2464 } 2465 if (rc < 0) { 2466 goto err; 2467 } 2468 done: 2469 if (in_bytes) { 2470 *in_bytes = in_total; 2471 } 2472 if (out_bytes) { 2473 *out_bytes = out_total; 2474 } 2475 return; 2476 2477 err: 2478 in_total = out_total = 0; 2479 goto done; 2480 } 2481 2482 bool 2483 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, 2484 unsigned int out_bytes) 2485 { 2486 unsigned int in_total, out_total; 2487 2488 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, 2489 in_bytes, out_bytes); 2490 2491 return in_bytes <= in_total && out_bytes <= out_total; 2492 } 2493 2494 /* Fetch avail_idx from VQ memory only when we really need to know if 2495 * guest has added some buffers. */ 2496 bool 2497 vu_queue_empty(VuDev *dev, VuVirtq *vq) 2498 { 2499 if (unlikely(dev->broken) || 2500 unlikely(!vq->vring.avail)) { 2501 return true; 2502 } 2503 2504 if (vq->shadow_avail_idx != vq->last_avail_idx) { 2505 return false; 2506 } 2507 2508 return vring_avail_idx(vq) == vq->last_avail_idx; 2509 } 2510 2511 static bool 2512 vring_notify(VuDev *dev, VuVirtq *vq) 2513 { 2514 uint16_t old, new; 2515 bool v; 2516 2517 /* We need to expose used array entries before checking used event. */ 2518 smp_mb(); 2519 2520 /* Always notify when queue is empty (when feature acknowledge) */ 2521 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && 2522 !vq->inuse && vu_queue_empty(dev, vq)) { 2523 return true; 2524 } 2525 2526 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2527 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 2528 } 2529 2530 v = vq->signalled_used_valid; 2531 vq->signalled_used_valid = true; 2532 old = vq->signalled_used; 2533 new = vq->signalled_used = vq->used_idx; 2534 return !v || vring_need_event(vring_get_used_event(vq), new, old); 2535 } 2536 2537 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) 2538 { 2539 if (unlikely(dev->broken) || 2540 unlikely(!vq->vring.avail)) { 2541 return; 2542 } 2543 2544 if (!vring_notify(dev, vq)) { 2545 DPRINT("skipped notify...\n"); 2546 return; 2547 } 2548 2549 if (vq->call_fd < 0 && 2550 vu_has_protocol_feature(dev, 2551 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 2552 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { 2553 VhostUserMsg vmsg = { 2554 .request = VHOST_USER_BACKEND_VRING_CALL, 2555 .flags = VHOST_USER_VERSION, 2556 .size = sizeof(vmsg.payload.state), 2557 .payload.state = { 2558 .index = vq - dev->vq, 2559 }, 2560 }; 2561 bool ack = sync && 2562 vu_has_protocol_feature(dev, 2563 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2564 2565 if (ack) { 2566 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK; 2567 } 2568 2569 vu_message_write(dev, dev->backend_fd, &vmsg); 2570 if (ack) { 2571 vu_message_read_default(dev, dev->backend_fd, &vmsg); 2572 } 2573 return; 2574 } 2575 2576 if (eventfd_write(vq->call_fd, 1) < 0) { 2577 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 2578 } 2579 } 2580 2581 void vu_queue_notify(VuDev *dev, VuVirtq *vq) 2582 { 2583 _vu_queue_notify(dev, vq, false); 2584 } 2585 2586 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) 2587 { 2588 _vu_queue_notify(dev, vq, true); 2589 } 2590 2591 void vu_config_change_msg(VuDev *dev) 2592 { 2593 VhostUserMsg vmsg = { 2594 .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG, 2595 .flags = VHOST_USER_VERSION, 2596 }; 2597 2598 vu_message_write(dev, dev->backend_fd, &vmsg); 2599 } 2600 2601 static inline void 2602 vring_used_flags_set_bit(VuVirtq *vq, int mask) 2603 { 2604 uint16_t *flags; 2605 2606 flags = (uint16_t *)((char*)vq->vring.used + 2607 offsetof(struct vring_used, flags)); 2608 *flags = htole16(le16toh(*flags) | mask); 2609 } 2610 2611 static inline void 2612 vring_used_flags_unset_bit(VuVirtq *vq, int mask) 2613 { 2614 uint16_t *flags; 2615 2616 flags = (uint16_t *)((char*)vq->vring.used + 2617 offsetof(struct vring_used, flags)); 2618 *flags = htole16(le16toh(*flags) & ~mask); 2619 } 2620 2621 static inline void 2622 vring_set_avail_event(VuVirtq *vq, uint16_t val) 2623 { 2624 uint16_t val_le = htole16(val); 2625 2626 if (!vq->notification) { 2627 return; 2628 } 2629 2630 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); 2631 } 2632 2633 void 2634 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) 2635 { 2636 vq->notification = enable; 2637 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2638 vring_set_avail_event(vq, vring_avail_idx(vq)); 2639 } else if (enable) { 2640 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 2641 } else { 2642 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 2643 } 2644 if (enable) { 2645 /* Expose avail event/used flags before caller checks the avail idx. */ 2646 smp_mb(); 2647 } 2648 } 2649 2650 static bool 2651 virtqueue_map_desc(VuDev *dev, 2652 unsigned int *p_num_sg, struct iovec *iov, 2653 unsigned int max_num_sg, bool is_write, 2654 uint64_t pa, size_t sz) 2655 { 2656 unsigned num_sg = *p_num_sg; 2657 2658 assert(num_sg <= max_num_sg); 2659 2660 if (!sz) { 2661 vu_panic(dev, "virtio: zero sized buffers are not allowed"); 2662 return false; 2663 } 2664 2665 while (sz) { 2666 uint64_t len = sz; 2667 2668 if (num_sg == max_num_sg) { 2669 vu_panic(dev, "virtio: too many descriptors in indirect table"); 2670 return false; 2671 } 2672 2673 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); 2674 if (iov[num_sg].iov_base == NULL) { 2675 vu_panic(dev, "virtio: invalid address for buffers"); 2676 return false; 2677 } 2678 iov[num_sg].iov_len = len; 2679 num_sg++; 2680 sz -= len; 2681 pa += len; 2682 } 2683 2684 *p_num_sg = num_sg; 2685 return true; 2686 } 2687 2688 static void * 2689 virtqueue_alloc_element(size_t sz, 2690 unsigned out_num, unsigned in_num) 2691 { 2692 VuVirtqElement *elem; 2693 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); 2694 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 2695 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 2696 2697 assert(sz >= sizeof(VuVirtqElement)); 2698 elem = malloc(out_sg_end); 2699 if (!elem) { 2700 DPRINT("%s: failed to malloc virtqueue element\n", __func__); 2701 return NULL; 2702 } 2703 elem->out_num = out_num; 2704 elem->in_num = in_num; 2705 elem->in_sg = (void *)elem + in_sg_ofs; 2706 elem->out_sg = (void *)elem + out_sg_ofs; 2707 return elem; 2708 } 2709 2710 static void * 2711 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) 2712 { 2713 struct vring_desc *desc = vq->vring.desc; 2714 uint64_t desc_addr, read_len; 2715 unsigned int desc_len; 2716 unsigned int max = vq->vring.num; 2717 unsigned int i = idx; 2718 VuVirtqElement *elem; 2719 unsigned int out_num = 0, in_num = 0; 2720 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 2721 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2722 int rc; 2723 2724 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2725 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2726 vu_panic(dev, "Invalid size for indirect buffer table"); 2727 return NULL; 2728 } 2729 2730 /* loop over the indirect descriptor table */ 2731 desc_addr = le64toh(desc[i].addr); 2732 desc_len = le32toh(desc[i].len); 2733 max = desc_len / sizeof(struct vring_desc); 2734 read_len = desc_len; 2735 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2736 if (unlikely(desc && read_len != desc_len)) { 2737 /* Failed to use zero copy */ 2738 desc = NULL; 2739 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2740 desc_addr, 2741 desc_len)) { 2742 desc = desc_buf; 2743 } 2744 } 2745 if (!desc) { 2746 vu_panic(dev, "Invalid indirect buffer table"); 2747 return NULL; 2748 } 2749 i = 0; 2750 } 2751 2752 /* Collect all the descriptors */ 2753 do { 2754 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2755 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, 2756 VIRTQUEUE_MAX_SIZE - out_num, true, 2757 le64toh(desc[i].addr), 2758 le32toh(desc[i].len))) { 2759 return NULL; 2760 } 2761 } else { 2762 if (in_num) { 2763 vu_panic(dev, "Incorrect order for descriptors"); 2764 return NULL; 2765 } 2766 if (!virtqueue_map_desc(dev, &out_num, iov, 2767 VIRTQUEUE_MAX_SIZE, false, 2768 le64toh(desc[i].addr), 2769 le32toh(desc[i].len))) { 2770 return NULL; 2771 } 2772 } 2773 2774 /* If we've got too many, that implies a descriptor loop. */ 2775 if ((in_num + out_num) > max) { 2776 vu_panic(dev, "Looped descriptor"); 2777 return NULL; 2778 } 2779 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2780 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2781 2782 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2783 vu_panic(dev, "read descriptor error"); 2784 return NULL; 2785 } 2786 2787 /* Now copy what we have collected and mapped */ 2788 elem = virtqueue_alloc_element(sz, out_num, in_num); 2789 if (!elem) { 2790 return NULL; 2791 } 2792 elem->index = idx; 2793 for (i = 0; i < out_num; i++) { 2794 elem->out_sg[i] = iov[i]; 2795 } 2796 for (i = 0; i < in_num; i++) { 2797 elem->in_sg[i] = iov[out_num + i]; 2798 } 2799 2800 return elem; 2801 } 2802 2803 static int 2804 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) 2805 { 2806 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2807 return 0; 2808 } 2809 2810 if (unlikely(!vq->inflight)) { 2811 return -1; 2812 } 2813 2814 vq->inflight->desc[desc_idx].counter = vq->counter++; 2815 vq->inflight->desc[desc_idx].inflight = 1; 2816 2817 return 0; 2818 } 2819 2820 static int 2821 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2822 { 2823 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2824 return 0; 2825 } 2826 2827 if (unlikely(!vq->inflight)) { 2828 return -1; 2829 } 2830 2831 vq->inflight->last_batch_head = desc_idx; 2832 2833 return 0; 2834 } 2835 2836 static int 2837 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2838 { 2839 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2840 return 0; 2841 } 2842 2843 if (unlikely(!vq->inflight)) { 2844 return -1; 2845 } 2846 2847 barrier(); 2848 2849 vq->inflight->desc[desc_idx].inflight = 0; 2850 2851 barrier(); 2852 2853 vq->inflight->used_idx = vq->used_idx; 2854 2855 return 0; 2856 } 2857 2858 void * 2859 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) 2860 { 2861 int i; 2862 unsigned int head; 2863 VuVirtqElement *elem; 2864 2865 if (unlikely(dev->broken) || 2866 unlikely(!vq->vring.avail)) { 2867 return NULL; 2868 } 2869 2870 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { 2871 i = (--vq->resubmit_num); 2872 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); 2873 2874 if (!vq->resubmit_num) { 2875 free(vq->resubmit_list); 2876 vq->resubmit_list = NULL; 2877 } 2878 2879 return elem; 2880 } 2881 2882 if (vu_queue_empty(dev, vq)) { 2883 return NULL; 2884 } 2885 /* 2886 * Needed after virtio_queue_empty(), see comment in 2887 * virtqueue_num_heads(). 2888 */ 2889 smp_rmb(); 2890 2891 if (vq->inuse >= vq->vring.num) { 2892 vu_panic(dev, "Virtqueue size exceeded"); 2893 return NULL; 2894 } 2895 2896 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { 2897 return NULL; 2898 } 2899 2900 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2901 vring_set_avail_event(vq, vq->last_avail_idx); 2902 } 2903 2904 elem = vu_queue_map_desc(dev, vq, head, sz); 2905 2906 if (!elem) { 2907 return NULL; 2908 } 2909 2910 vq->inuse++; 2911 2912 vu_queue_inflight_get(dev, vq, head); 2913 2914 return elem; 2915 } 2916 2917 static void 2918 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2919 size_t len) 2920 { 2921 vq->inuse--; 2922 /* unmap, when DMA support is added */ 2923 } 2924 2925 void 2926 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2927 size_t len) 2928 { 2929 vq->last_avail_idx--; 2930 vu_queue_detach_element(dev, vq, elem, len); 2931 } 2932 2933 bool 2934 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) 2935 { 2936 if (num > vq->inuse) { 2937 return false; 2938 } 2939 vq->last_avail_idx -= num; 2940 vq->inuse -= num; 2941 return true; 2942 } 2943 2944 static inline 2945 void vring_used_write(VuDev *dev, VuVirtq *vq, 2946 struct vring_used_elem *uelem, int i) 2947 { 2948 struct vring_used *used = vq->vring.used; 2949 2950 used->ring[i] = *uelem; 2951 vu_log_write(dev, vq->vring.log_guest_addr + 2952 offsetof(struct vring_used, ring[i]), 2953 sizeof(used->ring[i])); 2954 } 2955 2956 2957 static void 2958 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, 2959 const VuVirtqElement *elem, 2960 unsigned int len) 2961 { 2962 struct vring_desc *desc = vq->vring.desc; 2963 unsigned int i, max, min, desc_len; 2964 uint64_t desc_addr, read_len; 2965 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2966 unsigned num_bufs = 0; 2967 2968 max = vq->vring.num; 2969 i = elem->index; 2970 2971 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2972 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2973 vu_panic(dev, "Invalid size for indirect buffer table"); 2974 return; 2975 } 2976 2977 /* loop over the indirect descriptor table */ 2978 desc_addr = le64toh(desc[i].addr); 2979 desc_len = le32toh(desc[i].len); 2980 max = desc_len / sizeof(struct vring_desc); 2981 read_len = desc_len; 2982 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2983 if (unlikely(desc && read_len != desc_len)) { 2984 /* Failed to use zero copy */ 2985 desc = NULL; 2986 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2987 desc_addr, 2988 desc_len)) { 2989 desc = desc_buf; 2990 } 2991 } 2992 if (!desc) { 2993 vu_panic(dev, "Invalid indirect buffer table"); 2994 return; 2995 } 2996 i = 0; 2997 } 2998 2999 do { 3000 if (++num_bufs > max) { 3001 vu_panic(dev, "Looped descriptor"); 3002 return; 3003 } 3004 3005 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 3006 min = MIN(le32toh(desc[i].len), len); 3007 vu_log_write(dev, le64toh(desc[i].addr), min); 3008 len -= min; 3009 } 3010 3011 } while (len > 0 && 3012 (virtqueue_read_next_desc(dev, desc, i, max, &i) 3013 == VIRTQUEUE_READ_DESC_MORE)); 3014 } 3015 3016 void 3017 vu_queue_fill(VuDev *dev, VuVirtq *vq, 3018 const VuVirtqElement *elem, 3019 unsigned int len, unsigned int idx) 3020 { 3021 struct vring_used_elem uelem; 3022 3023 if (unlikely(dev->broken) || 3024 unlikely(!vq->vring.avail)) { 3025 return; 3026 } 3027 3028 vu_log_queue_fill(dev, vq, elem, len); 3029 3030 idx = (idx + vq->used_idx) % vq->vring.num; 3031 3032 uelem.id = htole32(elem->index); 3033 uelem.len = htole32(len); 3034 vring_used_write(dev, vq, &uelem, idx); 3035 } 3036 3037 static inline 3038 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) 3039 { 3040 vq->vring.used->idx = htole16(val); 3041 vu_log_write(dev, 3042 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), 3043 sizeof(vq->vring.used->idx)); 3044 3045 vq->used_idx = val; 3046 } 3047 3048 void 3049 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) 3050 { 3051 uint16_t old, new; 3052 3053 if (unlikely(dev->broken) || 3054 unlikely(!vq->vring.avail)) { 3055 return; 3056 } 3057 3058 /* Make sure buffer is written before we update index. */ 3059 smp_wmb(); 3060 3061 old = vq->used_idx; 3062 new = old + count; 3063 vring_used_idx_set(dev, vq, new); 3064 vq->inuse -= count; 3065 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { 3066 vq->signalled_used_valid = false; 3067 } 3068 } 3069 3070 void 3071 vu_queue_push(VuDev *dev, VuVirtq *vq, 3072 const VuVirtqElement *elem, unsigned int len) 3073 { 3074 vu_queue_fill(dev, vq, elem, len, 0); 3075 vu_queue_inflight_pre_put(dev, vq, elem->index); 3076 vu_queue_flush(dev, vq, 1); 3077 vu_queue_inflight_post_put(dev, vq, elem->index); 3078 } 3079