1 /* 2 * Vhost User library 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2016 Red Hat, Inc. 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Marc-André Lureau <mlureau@redhat.com> 10 * Victor Kaplansky <victork@redhat.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2 or 13 * later. See the COPYING file in the top-level directory. 14 */ 15 16 #ifndef _GNU_SOURCE 17 #define _GNU_SOURCE 18 #endif 19 20 /* this code avoids GLib dependency */ 21 #include <stdlib.h> 22 #include <stdio.h> 23 #include <unistd.h> 24 #include <stdarg.h> 25 #include <errno.h> 26 #include <string.h> 27 #include <assert.h> 28 #include <inttypes.h> 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/eventfd.h> 32 #include <sys/mman.h> 33 #include <endian.h> 34 35 /* Necessary to provide VIRTIO_F_VERSION_1 on system 36 * with older linux headers. Must appear before 37 * <linux/vhost.h> below. 38 */ 39 #include "standard-headers/linux/virtio_config.h" 40 41 #if defined(__linux__) 42 #include <sys/syscall.h> 43 #include <fcntl.h> 44 #include <sys/ioctl.h> 45 #include <linux/vhost.h> 46 47 #ifdef __NR_userfaultfd 48 #include <linux/userfaultfd.h> 49 #endif 50 51 #endif 52 53 #include "include/atomic.h" 54 55 #include "libvhost-user.h" 56 57 /* usually provided by GLib */ 58 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) 59 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4) 60 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 61 __attribute__((__format__(gnu_printf, format_idx, arg_idx))) 62 #else 63 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 64 __attribute__((__format__(__printf__, format_idx, arg_idx))) 65 #endif 66 #else /* !__GNUC__ */ 67 #define G_GNUC_PRINTF(format_idx, arg_idx) 68 #endif /* !__GNUC__ */ 69 #ifndef MIN 70 #define MIN(x, y) ({ \ 71 __typeof__(x) _min1 = (x); \ 72 __typeof__(y) _min2 = (y); \ 73 (void) (&_min1 == &_min2); \ 74 _min1 < _min2 ? _min1 : _min2; }) 75 #endif 76 77 /* Round number down to multiple */ 78 #define ALIGN_DOWN(n, m) ((n) / (m) * (m)) 79 80 /* Round number up to multiple */ 81 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) 82 83 #ifndef unlikely 84 #define unlikely(x) __builtin_expect(!!(x), 0) 85 #endif 86 87 /* Align each region to cache line size in inflight buffer */ 88 #define INFLIGHT_ALIGNMENT 64 89 90 /* The version of inflight buffer */ 91 #define INFLIGHT_VERSION 1 92 93 /* The version of the protocol we support */ 94 #define VHOST_USER_VERSION 1 95 #define LIBVHOST_USER_DEBUG 0 96 97 #define DPRINT(...) \ 98 do { \ 99 if (LIBVHOST_USER_DEBUG) { \ 100 fprintf(stderr, __VA_ARGS__); \ 101 } \ 102 } while (0) 103 104 static inline 105 bool has_feature(uint64_t features, unsigned int fbit) 106 { 107 assert(fbit < 64); 108 return !!(features & (1ULL << fbit)); 109 } 110 111 static inline 112 bool vu_has_feature(VuDev *dev, 113 unsigned int fbit) 114 { 115 return has_feature(dev->features, fbit); 116 } 117 118 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) 119 { 120 return has_feature(dev->protocol_features, fbit); 121 } 122 123 const char * 124 vu_request_to_string(unsigned int req) 125 { 126 #define REQ(req) [req] = #req 127 static const char *vu_request_str[] = { 128 REQ(VHOST_USER_NONE), 129 REQ(VHOST_USER_GET_FEATURES), 130 REQ(VHOST_USER_SET_FEATURES), 131 REQ(VHOST_USER_SET_OWNER), 132 REQ(VHOST_USER_RESET_OWNER), 133 REQ(VHOST_USER_SET_MEM_TABLE), 134 REQ(VHOST_USER_SET_LOG_BASE), 135 REQ(VHOST_USER_SET_LOG_FD), 136 REQ(VHOST_USER_SET_VRING_NUM), 137 REQ(VHOST_USER_SET_VRING_ADDR), 138 REQ(VHOST_USER_SET_VRING_BASE), 139 REQ(VHOST_USER_GET_VRING_BASE), 140 REQ(VHOST_USER_SET_VRING_KICK), 141 REQ(VHOST_USER_SET_VRING_CALL), 142 REQ(VHOST_USER_SET_VRING_ERR), 143 REQ(VHOST_USER_GET_PROTOCOL_FEATURES), 144 REQ(VHOST_USER_SET_PROTOCOL_FEATURES), 145 REQ(VHOST_USER_GET_QUEUE_NUM), 146 REQ(VHOST_USER_SET_VRING_ENABLE), 147 REQ(VHOST_USER_SEND_RARP), 148 REQ(VHOST_USER_NET_SET_MTU), 149 REQ(VHOST_USER_SET_BACKEND_REQ_FD), 150 REQ(VHOST_USER_IOTLB_MSG), 151 REQ(VHOST_USER_SET_VRING_ENDIAN), 152 REQ(VHOST_USER_GET_CONFIG), 153 REQ(VHOST_USER_SET_CONFIG), 154 REQ(VHOST_USER_POSTCOPY_ADVISE), 155 REQ(VHOST_USER_POSTCOPY_LISTEN), 156 REQ(VHOST_USER_POSTCOPY_END), 157 REQ(VHOST_USER_GET_INFLIGHT_FD), 158 REQ(VHOST_USER_SET_INFLIGHT_FD), 159 REQ(VHOST_USER_GPU_SET_SOCKET), 160 REQ(VHOST_USER_VRING_KICK), 161 REQ(VHOST_USER_GET_MAX_MEM_SLOTS), 162 REQ(VHOST_USER_ADD_MEM_REG), 163 REQ(VHOST_USER_REM_MEM_REG), 164 REQ(VHOST_USER_GET_SHARED_OBJECT), 165 REQ(VHOST_USER_MAX), 166 }; 167 #undef REQ 168 169 if (req < VHOST_USER_MAX) { 170 return vu_request_str[req]; 171 } else { 172 return "unknown"; 173 } 174 } 175 176 static void G_GNUC_PRINTF(2, 3) 177 vu_panic(VuDev *dev, const char *msg, ...) 178 { 179 char *buf = NULL; 180 va_list ap; 181 182 va_start(ap, msg); 183 if (vasprintf(&buf, msg, ap) < 0) { 184 buf = NULL; 185 } 186 va_end(ap); 187 188 dev->broken = true; 189 dev->panic(dev, buf); 190 free(buf); 191 192 /* 193 * FIXME: 194 * find a way to call virtio_error, or perhaps close the connection? 195 */ 196 } 197 198 /* Translate guest physical address to our virtual address. */ 199 void * 200 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) 201 { 202 unsigned int i; 203 204 if (*plen == 0) { 205 return NULL; 206 } 207 208 /* Find matching memory region. */ 209 for (i = 0; i < dev->nregions; i++) { 210 VuDevRegion *r = &dev->regions[i]; 211 212 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { 213 if ((guest_addr + *plen) > (r->gpa + r->size)) { 214 *plen = r->gpa + r->size - guest_addr; 215 } 216 return (void *)(uintptr_t) 217 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; 218 } 219 } 220 221 return NULL; 222 } 223 224 /* Translate qemu virtual address to our virtual address. */ 225 static void * 226 qva_to_va(VuDev *dev, uint64_t qemu_addr) 227 { 228 unsigned int i; 229 230 /* Find matching memory region. */ 231 for (i = 0; i < dev->nregions; i++) { 232 VuDevRegion *r = &dev->regions[i]; 233 234 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { 235 return (void *)(uintptr_t) 236 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; 237 } 238 } 239 240 return NULL; 241 } 242 243 static void 244 vu_remove_all_mem_regs(VuDev *dev) 245 { 246 unsigned int i; 247 248 for (i = 0; i < dev->nregions; i++) { 249 VuDevRegion *r = &dev->regions[i]; 250 251 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); 252 } 253 dev->nregions = 0; 254 } 255 256 static void 257 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd) 258 { 259 int prot = PROT_READ | PROT_WRITE; 260 VuDevRegion *r; 261 void *mmap_addr; 262 263 DPRINT("Adding region %d\n", dev->nregions); 264 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 265 msg_region->guest_phys_addr); 266 DPRINT(" memory_size: 0x%016"PRIx64"\n", 267 msg_region->memory_size); 268 DPRINT(" userspace_addr: 0x%016"PRIx64"\n", 269 msg_region->userspace_addr); 270 DPRINT(" mmap_offset: 0x%016"PRIx64"\n", 271 msg_region->mmap_offset); 272 273 if (dev->postcopy_listening) { 274 /* 275 * In postcopy we're using PROT_NONE here to catch anyone 276 * accessing it before we userfault 277 */ 278 prot = PROT_NONE; 279 } 280 281 /* 282 * We don't use offset argument of mmap() since the mapped address has 283 * to be page aligned, and we use huge pages. 284 */ 285 mmap_addr = mmap(0, msg_region->memory_size + msg_region->mmap_offset, 286 prot, MAP_SHARED | MAP_NORESERVE, fd, 0); 287 if (mmap_addr == MAP_FAILED) { 288 vu_panic(dev, "region mmap error: %s", strerror(errno)); 289 return; 290 } 291 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 292 (uint64_t)(uintptr_t)mmap_addr); 293 294 r = &dev->regions[dev->nregions]; 295 r->gpa = msg_region->guest_phys_addr; 296 r->size = msg_region->memory_size; 297 r->qva = msg_region->userspace_addr; 298 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 299 r->mmap_offset = msg_region->mmap_offset; 300 dev->nregions++; 301 302 if (dev->postcopy_listening) { 303 /* 304 * Return the address to QEMU so that it can translate the ufd 305 * fault addresses back. 306 */ 307 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset; 308 } 309 } 310 311 static void 312 vmsg_close_fds(VhostUserMsg *vmsg) 313 { 314 int i; 315 316 for (i = 0; i < vmsg->fd_num; i++) { 317 close(vmsg->fds[i]); 318 } 319 } 320 321 /* Set reply payload.u64 and clear request flags and fd_num */ 322 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val) 323 { 324 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ 325 vmsg->size = sizeof(vmsg->payload.u64); 326 vmsg->payload.u64 = val; 327 vmsg->fd_num = 0; 328 } 329 330 /* A test to see if we have userfault available */ 331 static bool 332 have_userfault(void) 333 { 334 #if defined(__linux__) && defined(__NR_userfaultfd) &&\ 335 defined(UFFD_FEATURE_MISSING_SHMEM) &&\ 336 defined(UFFD_FEATURE_MISSING_HUGETLBFS) 337 /* Now test the kernel we're running on really has the features */ 338 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 339 struct uffdio_api api_struct; 340 if (ufd < 0) { 341 return false; 342 } 343 344 api_struct.api = UFFD_API; 345 api_struct.features = UFFD_FEATURE_MISSING_SHMEM | 346 UFFD_FEATURE_MISSING_HUGETLBFS; 347 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 348 close(ufd); 349 return false; 350 } 351 close(ufd); 352 return true; 353 354 #else 355 return false; 356 #endif 357 } 358 359 static bool 360 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 361 { 362 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 363 struct iovec iov = { 364 .iov_base = (char *)vmsg, 365 .iov_len = VHOST_USER_HDR_SIZE, 366 }; 367 struct msghdr msg = { 368 .msg_iov = &iov, 369 .msg_iovlen = 1, 370 .msg_control = control, 371 .msg_controllen = sizeof(control), 372 }; 373 size_t fd_size; 374 struct cmsghdr *cmsg; 375 int rc; 376 377 do { 378 rc = recvmsg(conn_fd, &msg, 0); 379 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 380 381 if (rc < 0) { 382 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); 383 return false; 384 } 385 386 vmsg->fd_num = 0; 387 for (cmsg = CMSG_FIRSTHDR(&msg); 388 cmsg != NULL; 389 cmsg = CMSG_NXTHDR(&msg, cmsg)) 390 { 391 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 392 fd_size = cmsg->cmsg_len - CMSG_LEN(0); 393 vmsg->fd_num = fd_size / sizeof(int); 394 assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS); 395 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); 396 break; 397 } 398 } 399 400 if (vmsg->size > sizeof(vmsg->payload)) { 401 vu_panic(dev, 402 "Error: too big message request: %d, size: vmsg->size: %u, " 403 "while sizeof(vmsg->payload) = %zu\n", 404 vmsg->request, vmsg->size, sizeof(vmsg->payload)); 405 goto fail; 406 } 407 408 if (vmsg->size) { 409 do { 410 rc = read(conn_fd, &vmsg->payload, vmsg->size); 411 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 412 413 if (rc <= 0) { 414 vu_panic(dev, "Error while reading: %s", strerror(errno)); 415 goto fail; 416 } 417 418 assert((uint32_t)rc == vmsg->size); 419 } 420 421 return true; 422 423 fail: 424 vmsg_close_fds(vmsg); 425 426 return false; 427 } 428 429 static bool 430 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 431 { 432 int rc; 433 uint8_t *p = (uint8_t *)vmsg; 434 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 435 struct iovec iov = { 436 .iov_base = (char *)vmsg, 437 .iov_len = VHOST_USER_HDR_SIZE, 438 }; 439 struct msghdr msg = { 440 .msg_iov = &iov, 441 .msg_iovlen = 1, 442 .msg_control = control, 443 }; 444 struct cmsghdr *cmsg; 445 446 memset(control, 0, sizeof(control)); 447 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); 448 if (vmsg->fd_num > 0) { 449 size_t fdsize = vmsg->fd_num * sizeof(int); 450 msg.msg_controllen = CMSG_SPACE(fdsize); 451 cmsg = CMSG_FIRSTHDR(&msg); 452 cmsg->cmsg_len = CMSG_LEN(fdsize); 453 cmsg->cmsg_level = SOL_SOCKET; 454 cmsg->cmsg_type = SCM_RIGHTS; 455 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); 456 } else { 457 msg.msg_controllen = 0; 458 } 459 460 do { 461 rc = sendmsg(conn_fd, &msg, 0); 462 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 463 464 if (vmsg->size) { 465 do { 466 if (vmsg->data) { 467 rc = write(conn_fd, vmsg->data, vmsg->size); 468 } else { 469 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); 470 } 471 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 472 } 473 474 if (rc <= 0) { 475 vu_panic(dev, "Error while writing: %s", strerror(errno)); 476 return false; 477 } 478 479 return true; 480 } 481 482 static bool 483 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 484 { 485 /* Set the version in the flags when sending the reply */ 486 vmsg->flags &= ~VHOST_USER_VERSION_MASK; 487 vmsg->flags |= VHOST_USER_VERSION; 488 vmsg->flags |= VHOST_USER_REPLY_MASK; 489 490 return vu_message_write(dev, conn_fd, vmsg); 491 } 492 493 /* 494 * Processes a reply on the backend channel. 495 * Entered with backend_mutex held and releases it before exit. 496 * Returns true on success. 497 */ 498 static bool 499 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) 500 { 501 VhostUserMsg msg_reply; 502 bool result = false; 503 504 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 505 result = true; 506 goto out; 507 } 508 509 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 510 goto out; 511 } 512 513 if (msg_reply.request != vmsg->request) { 514 DPRINT("Received unexpected msg type. Expected %d received %d", 515 vmsg->request, msg_reply.request); 516 goto out; 517 } 518 519 result = msg_reply.payload.u64 == 0; 520 521 out: 522 pthread_mutex_unlock(&dev->backend_mutex); 523 return result; 524 } 525 526 /* Kick the log_call_fd if required. */ 527 static void 528 vu_log_kick(VuDev *dev) 529 { 530 if (dev->log_call_fd != -1) { 531 DPRINT("Kicking the QEMU's log...\n"); 532 if (eventfd_write(dev->log_call_fd, 1) < 0) { 533 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 534 } 535 } 536 } 537 538 static void 539 vu_log_page(uint8_t *log_table, uint64_t page) 540 { 541 DPRINT("Logged dirty guest page: %"PRId64"\n", page); 542 qatomic_or(&log_table[page / 8], 1 << (page % 8)); 543 } 544 545 static void 546 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) 547 { 548 uint64_t page; 549 550 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || 551 !dev->log_table || !length) { 552 return; 553 } 554 555 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); 556 557 page = address / VHOST_LOG_PAGE; 558 while (page * VHOST_LOG_PAGE < address + length) { 559 vu_log_page(dev->log_table, page); 560 page += 1; 561 } 562 563 vu_log_kick(dev); 564 } 565 566 static void 567 vu_kick_cb(VuDev *dev, int condition, void *data) 568 { 569 int index = (intptr_t)data; 570 VuVirtq *vq = &dev->vq[index]; 571 int sock = vq->kick_fd; 572 eventfd_t kick_data; 573 ssize_t rc; 574 575 rc = eventfd_read(sock, &kick_data); 576 if (rc == -1) { 577 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); 578 dev->remove_watch(dev, dev->vq[index].kick_fd); 579 } else { 580 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n", 581 kick_data, vq->handler, index); 582 if (vq->handler) { 583 vq->handler(dev, index); 584 } 585 } 586 } 587 588 static bool 589 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) 590 { 591 vmsg->payload.u64 = 592 /* 593 * The following VIRTIO feature bits are supported by our virtqueue 594 * implementation: 595 */ 596 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY | 597 1ULL << VIRTIO_RING_F_INDIRECT_DESC | 598 1ULL << VIRTIO_RING_F_EVENT_IDX | 599 1ULL << VIRTIO_F_VERSION_1 | 600 601 /* vhost-user feature bits */ 602 1ULL << VHOST_F_LOG_ALL | 603 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 604 605 if (dev->iface->get_features) { 606 vmsg->payload.u64 |= dev->iface->get_features(dev); 607 } 608 609 vmsg->size = sizeof(vmsg->payload.u64); 610 vmsg->fd_num = 0; 611 612 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 613 614 return true; 615 } 616 617 static void 618 vu_set_enable_all_rings(VuDev *dev, bool enabled) 619 { 620 uint16_t i; 621 622 for (i = 0; i < dev->max_queues; i++) { 623 dev->vq[i].enable = enabled; 624 } 625 } 626 627 static bool 628 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) 629 { 630 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 631 632 dev->features = vmsg->payload.u64; 633 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { 634 /* 635 * We only support devices conforming to VIRTIO 1.0 or 636 * later 637 */ 638 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); 639 return false; 640 } 641 642 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { 643 vu_set_enable_all_rings(dev, true); 644 } 645 646 if (dev->iface->set_features) { 647 dev->iface->set_features(dev, dev->features); 648 } 649 650 return false; 651 } 652 653 static bool 654 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) 655 { 656 return false; 657 } 658 659 static void 660 vu_close_log(VuDev *dev) 661 { 662 if (dev->log_table) { 663 if (munmap(dev->log_table, dev->log_size) != 0) { 664 perror("close log munmap() error"); 665 } 666 667 dev->log_table = NULL; 668 } 669 if (dev->log_call_fd != -1) { 670 close(dev->log_call_fd); 671 dev->log_call_fd = -1; 672 } 673 } 674 675 static bool 676 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) 677 { 678 vu_set_enable_all_rings(dev, false); 679 680 return false; 681 } 682 683 static bool 684 map_ring(VuDev *dev, VuVirtq *vq) 685 { 686 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); 687 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); 688 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); 689 690 DPRINT("Setting virtq addresses:\n"); 691 DPRINT(" vring_desc at %p\n", vq->vring.desc); 692 DPRINT(" vring_used at %p\n", vq->vring.used); 693 DPRINT(" vring_avail at %p\n", vq->vring.avail); 694 695 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); 696 } 697 698 static bool 699 generate_faults(VuDev *dev) { 700 unsigned int i; 701 for (i = 0; i < dev->nregions; i++) { 702 #ifdef UFFDIO_REGISTER 703 VuDevRegion *dev_region = &dev->regions[i]; 704 int ret; 705 struct uffdio_register reg_struct; 706 707 /* 708 * We should already have an open ufd. Mark each memory 709 * range as ufd. 710 * Discard any mapping we have here; note I can't use MADV_REMOVE 711 * or fallocate to make the hole since I don't want to lose 712 * data that's already arrived in the shared process. 713 * TODO: How to do hugepage 714 */ 715 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 716 dev_region->size + dev_region->mmap_offset, 717 MADV_DONTNEED); 718 if (ret) { 719 fprintf(stderr, 720 "%s: Failed to madvise(DONTNEED) region %d: %s\n", 721 __func__, i, strerror(errno)); 722 } 723 /* 724 * Turn off transparent hugepages so we dont get lose wakeups 725 * in neighbouring pages. 726 * TODO: Turn this backon later. 727 */ 728 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 729 dev_region->size + dev_region->mmap_offset, 730 MADV_NOHUGEPAGE); 731 if (ret) { 732 /* 733 * Note: This can happen legally on kernels that are configured 734 * without madvise'able hugepages 735 */ 736 fprintf(stderr, 737 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n", 738 __func__, i, strerror(errno)); 739 } 740 741 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; 742 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; 743 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 744 745 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { 746 vu_panic(dev, "%s: Failed to userfault region %d " 747 "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64 748 ": (ufd=%d)%s\n", 749 __func__, i, 750 dev_region->mmap_addr, 751 dev_region->size, dev_region->mmap_offset, 752 dev->postcopy_ufd, strerror(errno)); 753 return false; 754 } 755 if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) { 756 vu_panic(dev, "%s Region (%d) doesn't support COPY", 757 __func__, i); 758 return false; 759 } 760 DPRINT("%s: region %d: Registered userfault for %" 761 PRIx64 " + %" PRIx64 "\n", __func__, i, 762 (uint64_t)reg_struct.range.start, 763 (uint64_t)reg_struct.range.len); 764 /* Now it's registered we can let the client at it */ 765 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, 766 dev_region->size + dev_region->mmap_offset, 767 PROT_READ | PROT_WRITE)) { 768 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", 769 i, strerror(errno)); 770 return false; 771 } 772 /* TODO: Stash 'zero' support flags somewhere */ 773 #endif 774 } 775 776 return true; 777 } 778 779 static bool 780 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 781 int i; 782 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 783 784 if (vmsg->fd_num != 1) { 785 vmsg_close_fds(vmsg); 786 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " 787 "should be sent for this message type", vmsg->fd_num); 788 return false; 789 } 790 791 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 792 close(vmsg->fds[0]); 793 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " 794 "least %zu bytes and only %d bytes were received", 795 VHOST_USER_MEM_REG_SIZE, vmsg->size); 796 return false; 797 } 798 799 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { 800 close(vmsg->fds[0]); 801 vu_panic(dev, "failing attempt to hot add memory via " 802 "VHOST_USER_ADD_MEM_REG message because the backend has " 803 "no free ram slots available"); 804 return false; 805 } 806 807 /* 808 * If we are in postcopy mode and we receive a u64 payload with a 0 value 809 * we know all the postcopy client bases have been received, and we 810 * should start generating faults. 811 */ 812 if (dev->postcopy_listening && 813 vmsg->size == sizeof(vmsg->payload.u64) && 814 vmsg->payload.u64 == 0) { 815 (void)generate_faults(dev); 816 return false; 817 } 818 819 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); 820 close(vmsg->fds[0]); 821 822 if (dev->postcopy_listening) { 823 /* Send the message back to qemu with the addresses filled in. */ 824 vmsg->fd_num = 0; 825 DPRINT("Successfully added new region in postcopy\n"); 826 return true; 827 } else { 828 for (i = 0; i < dev->max_queues; i++) { 829 if (dev->vq[i].vring.desc) { 830 if (map_ring(dev, &dev->vq[i])) { 831 vu_panic(dev, "remapping queue %d for new memory region", 832 i); 833 } 834 } 835 } 836 837 DPRINT("Successfully added new region\n"); 838 return false; 839 } 840 } 841 842 static inline bool reg_equal(VuDevRegion *vudev_reg, 843 VhostUserMemoryRegion *msg_reg) 844 { 845 if (vudev_reg->gpa == msg_reg->guest_phys_addr && 846 vudev_reg->qva == msg_reg->userspace_addr && 847 vudev_reg->size == msg_reg->memory_size) { 848 return true; 849 } 850 851 return false; 852 } 853 854 static bool 855 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 856 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 857 unsigned int i; 858 bool found = false; 859 860 if (vmsg->fd_num > 1) { 861 vmsg_close_fds(vmsg); 862 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " 863 "should be sent for this message type", vmsg->fd_num); 864 return false; 865 } 866 867 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 868 vmsg_close_fds(vmsg); 869 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " 870 "least %zu bytes and only %d bytes were received", 871 VHOST_USER_MEM_REG_SIZE, vmsg->size); 872 return false; 873 } 874 875 DPRINT("Removing region:\n"); 876 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 877 msg_region->guest_phys_addr); 878 DPRINT(" memory_size: 0x%016"PRIx64"\n", 879 msg_region->memory_size); 880 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 881 msg_region->userspace_addr); 882 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 883 msg_region->mmap_offset); 884 885 for (i = 0; i < dev->nregions; i++) { 886 if (reg_equal(&dev->regions[i], msg_region)) { 887 VuDevRegion *r = &dev->regions[i]; 888 889 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); 890 891 /* Shift all affected entries by 1 to close the hole at index. */ 892 memmove(dev->regions + i, dev->regions + i + 1, 893 sizeof(VuDevRegion) * (dev->nregions - i - 1)); 894 DPRINT("Successfully removed a region\n"); 895 dev->nregions--; 896 i--; 897 898 found = true; 899 break; 900 } 901 } 902 903 if (!found) { 904 vu_panic(dev, "Specified region not found\n"); 905 } 906 907 vmsg_close_fds(vmsg); 908 909 return false; 910 } 911 912 static bool 913 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) 914 { 915 int fd_num = 0; 916 int dmabuf_fd = -1; 917 if (dev->iface->get_shared_object) { 918 dmabuf_fd = dev->iface->get_shared_object( 919 dev, &vmsg->payload.object.uuid[0]); 920 } 921 if (dmabuf_fd != -1) { 922 DPRINT("dmabuf_fd found for requested UUID\n"); 923 vmsg->fds[fd_num++] = dmabuf_fd; 924 } 925 vmsg->fd_num = fd_num; 926 927 return true; 928 } 929 930 static bool 931 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) 932 { 933 VhostUserMemory m = vmsg->payload.memory, *memory = &m; 934 unsigned int i; 935 936 vu_remove_all_mem_regs(dev); 937 938 DPRINT("Nregions: %u\n", memory->nregions); 939 for (i = 0; i < memory->nregions; i++) { 940 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); 941 close(vmsg->fds[i]); 942 } 943 944 if (dev->postcopy_listening) { 945 /* Send the message back to qemu with the addresses filled in */ 946 vmsg->fd_num = 0; 947 if (!vu_send_reply(dev, dev->sock, vmsg)) { 948 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); 949 return false; 950 } 951 952 /* 953 * Wait for QEMU to confirm that it's registered the handler for the 954 * faults. 955 */ 956 if (!dev->read_msg(dev, dev->sock, vmsg) || 957 vmsg->size != sizeof(vmsg->payload.u64) || 958 vmsg->payload.u64 != 0) { 959 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); 960 return false; 961 } 962 963 /* OK, now we can go and register the memory and generate faults */ 964 (void)generate_faults(dev); 965 return false; 966 } 967 968 for (i = 0; i < dev->max_queues; i++) { 969 if (dev->vq[i].vring.desc) { 970 if (map_ring(dev, &dev->vq[i])) { 971 vu_panic(dev, "remapping queue %d during setmemtable", i); 972 } 973 } 974 } 975 976 return false; 977 } 978 979 static bool 980 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) 981 { 982 int fd; 983 uint64_t log_mmap_size, log_mmap_offset; 984 void *rc; 985 986 if (vmsg->fd_num != 1 || 987 vmsg->size != sizeof(vmsg->payload.log)) { 988 vu_panic(dev, "Invalid log_base message"); 989 return true; 990 } 991 992 fd = vmsg->fds[0]; 993 log_mmap_offset = vmsg->payload.log.mmap_offset; 994 log_mmap_size = vmsg->payload.log.mmap_size; 995 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset); 996 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size); 997 998 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 999 log_mmap_offset); 1000 close(fd); 1001 if (rc == MAP_FAILED) { 1002 perror("log mmap error"); 1003 } 1004 1005 if (dev->log_table) { 1006 munmap(dev->log_table, dev->log_size); 1007 } 1008 dev->log_table = rc; 1009 dev->log_size = log_mmap_size; 1010 1011 vmsg->size = sizeof(vmsg->payload.u64); 1012 vmsg->fd_num = 0; 1013 1014 return true; 1015 } 1016 1017 static bool 1018 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) 1019 { 1020 if (vmsg->fd_num != 1) { 1021 vu_panic(dev, "Invalid log_fd message"); 1022 return false; 1023 } 1024 1025 if (dev->log_call_fd != -1) { 1026 close(dev->log_call_fd); 1027 } 1028 dev->log_call_fd = vmsg->fds[0]; 1029 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); 1030 1031 return false; 1032 } 1033 1034 static bool 1035 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1036 { 1037 unsigned int index = vmsg->payload.state.index; 1038 unsigned int num = vmsg->payload.state.num; 1039 1040 DPRINT("State.index: %u\n", index); 1041 DPRINT("State.num: %u\n", num); 1042 dev->vq[index].vring.num = num; 1043 1044 return false; 1045 } 1046 1047 static bool 1048 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) 1049 { 1050 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; 1051 unsigned int index = vra->index; 1052 VuVirtq *vq = &dev->vq[index]; 1053 1054 DPRINT("vhost_vring_addr:\n"); 1055 DPRINT(" index: %d\n", vra->index); 1056 DPRINT(" flags: %d\n", vra->flags); 1057 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); 1058 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); 1059 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); 1060 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); 1061 1062 vq->vra = *vra; 1063 vq->vring.flags = vra->flags; 1064 vq->vring.log_guest_addr = vra->log_guest_addr; 1065 1066 1067 if (map_ring(dev, vq)) { 1068 vu_panic(dev, "Invalid vring_addr message"); 1069 return false; 1070 } 1071 1072 vq->used_idx = le16toh(vq->vring.used->idx); 1073 1074 if (vq->last_avail_idx != vq->used_idx) { 1075 bool resume = dev->iface->queue_is_processed_in_order && 1076 dev->iface->queue_is_processed_in_order(dev, index); 1077 1078 DPRINT("Last avail index != used index: %u != %u%s\n", 1079 vq->last_avail_idx, vq->used_idx, 1080 resume ? ", resuming" : ""); 1081 1082 if (resume) { 1083 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; 1084 } 1085 } 1086 1087 return false; 1088 } 1089 1090 static bool 1091 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1092 { 1093 unsigned int index = vmsg->payload.state.index; 1094 unsigned int num = vmsg->payload.state.num; 1095 1096 DPRINT("State.index: %u\n", index); 1097 DPRINT("State.num: %u\n", num); 1098 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; 1099 1100 return false; 1101 } 1102 1103 static bool 1104 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1105 { 1106 unsigned int index = vmsg->payload.state.index; 1107 1108 DPRINT("State.index: %u\n", index); 1109 vmsg->payload.state.num = dev->vq[index].last_avail_idx; 1110 vmsg->size = sizeof(vmsg->payload.state); 1111 1112 dev->vq[index].started = false; 1113 if (dev->iface->queue_set_started) { 1114 dev->iface->queue_set_started(dev, index, false); 1115 } 1116 1117 if (dev->vq[index].call_fd != -1) { 1118 close(dev->vq[index].call_fd); 1119 dev->vq[index].call_fd = -1; 1120 } 1121 if (dev->vq[index].kick_fd != -1) { 1122 dev->remove_watch(dev, dev->vq[index].kick_fd); 1123 close(dev->vq[index].kick_fd); 1124 dev->vq[index].kick_fd = -1; 1125 } 1126 1127 return true; 1128 } 1129 1130 static bool 1131 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) 1132 { 1133 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1134 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1135 1136 if (index >= dev->max_queues) { 1137 vmsg_close_fds(vmsg); 1138 vu_panic(dev, "Invalid queue index: %u", index); 1139 return false; 1140 } 1141 1142 if (nofd) { 1143 vmsg_close_fds(vmsg); 1144 return true; 1145 } 1146 1147 if (vmsg->fd_num != 1) { 1148 vmsg_close_fds(vmsg); 1149 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); 1150 return false; 1151 } 1152 1153 return true; 1154 } 1155 1156 static int 1157 inflight_desc_compare(const void *a, const void *b) 1158 { 1159 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a, 1160 *desc1 = (VuVirtqInflightDesc *)b; 1161 1162 if (desc1->counter > desc0->counter && 1163 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { 1164 return 1; 1165 } 1166 1167 return -1; 1168 } 1169 1170 static int 1171 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) 1172 { 1173 int i = 0; 1174 1175 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 1176 return 0; 1177 } 1178 1179 if (unlikely(!vq->inflight)) { 1180 return -1; 1181 } 1182 1183 if (unlikely(!vq->inflight->version)) { 1184 /* initialize the buffer */ 1185 vq->inflight->version = INFLIGHT_VERSION; 1186 return 0; 1187 } 1188 1189 vq->used_idx = le16toh(vq->vring.used->idx); 1190 vq->resubmit_num = 0; 1191 vq->resubmit_list = NULL; 1192 vq->counter = 0; 1193 1194 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { 1195 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; 1196 1197 barrier(); 1198 1199 vq->inflight->used_idx = vq->used_idx; 1200 } 1201 1202 for (i = 0; i < vq->inflight->desc_num; i++) { 1203 if (vq->inflight->desc[i].inflight == 1) { 1204 vq->inuse++; 1205 } 1206 } 1207 1208 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; 1209 1210 if (vq->inuse) { 1211 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); 1212 if (!vq->resubmit_list) { 1213 return -1; 1214 } 1215 1216 for (i = 0; i < vq->inflight->desc_num; i++) { 1217 if (vq->inflight->desc[i].inflight) { 1218 vq->resubmit_list[vq->resubmit_num].index = i; 1219 vq->resubmit_list[vq->resubmit_num].counter = 1220 vq->inflight->desc[i].counter; 1221 vq->resubmit_num++; 1222 } 1223 } 1224 1225 if (vq->resubmit_num > 1) { 1226 qsort(vq->resubmit_list, vq->resubmit_num, 1227 sizeof(VuVirtqInflightDesc), inflight_desc_compare); 1228 } 1229 vq->counter = vq->resubmit_list[0].counter + 1; 1230 } 1231 1232 /* in case of I/O hang after reconnecting */ 1233 if (eventfd_write(vq->kick_fd, 1)) { 1234 return -1; 1235 } 1236 1237 return 0; 1238 } 1239 1240 static bool 1241 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) 1242 { 1243 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1244 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1245 1246 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1247 1248 if (!vu_check_queue_msg_file(dev, vmsg)) { 1249 return false; 1250 } 1251 1252 if (dev->vq[index].kick_fd != -1) { 1253 dev->remove_watch(dev, dev->vq[index].kick_fd); 1254 close(dev->vq[index].kick_fd); 1255 dev->vq[index].kick_fd = -1; 1256 } 1257 1258 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; 1259 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); 1260 1261 dev->vq[index].started = true; 1262 if (dev->iface->queue_set_started) { 1263 dev->iface->queue_set_started(dev, index, true); 1264 } 1265 1266 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { 1267 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, 1268 vu_kick_cb, (void *)(long)index); 1269 1270 DPRINT("Waiting for kicks on fd: %d for vq: %d\n", 1271 dev->vq[index].kick_fd, index); 1272 } 1273 1274 if (vu_check_queue_inflights(dev, &dev->vq[index])) { 1275 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); 1276 } 1277 1278 return false; 1279 } 1280 1281 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, 1282 vu_queue_handler_cb handler) 1283 { 1284 int qidx = vq - dev->vq; 1285 1286 vq->handler = handler; 1287 if (vq->kick_fd >= 0) { 1288 if (handler) { 1289 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, 1290 vu_kick_cb, (void *)(long)qidx); 1291 } else { 1292 dev->remove_watch(dev, vq->kick_fd); 1293 } 1294 } 1295 } 1296 1297 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, 1298 int size, int offset) 1299 { 1300 int qidx = vq - dev->vq; 1301 int fd_num = 0; 1302 VhostUserMsg vmsg = { 1303 .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG, 1304 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1305 .size = sizeof(vmsg.payload.area), 1306 .payload.area = { 1307 .u64 = qidx & VHOST_USER_VRING_IDX_MASK, 1308 .size = size, 1309 .offset = offset, 1310 }, 1311 }; 1312 1313 if (fd == -1) { 1314 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; 1315 } else { 1316 vmsg.fds[fd_num++] = fd; 1317 } 1318 1319 vmsg.fd_num = fd_num; 1320 1321 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { 1322 return false; 1323 } 1324 1325 pthread_mutex_lock(&dev->backend_mutex); 1326 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { 1327 pthread_mutex_unlock(&dev->backend_mutex); 1328 return false; 1329 } 1330 1331 /* Also unlocks the backend_mutex */ 1332 return vu_process_message_reply(dev, &vmsg); 1333 } 1334 1335 bool 1336 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], 1337 int *dmabuf_fd) 1338 { 1339 bool result = false; 1340 VhostUserMsg msg_reply; 1341 VhostUserMsg msg = { 1342 .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP, 1343 .size = sizeof(msg.payload.object), 1344 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1345 }; 1346 1347 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1348 1349 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1350 return false; 1351 } 1352 1353 pthread_mutex_lock(&dev->backend_mutex); 1354 if (!vu_message_write(dev, dev->backend_fd, &msg)) { 1355 goto out; 1356 } 1357 1358 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 1359 goto out; 1360 } 1361 1362 if (msg_reply.request != msg.request) { 1363 DPRINT("Received unexpected msg type. Expected %d, received %d", 1364 msg.request, msg_reply.request); 1365 goto out; 1366 } 1367 1368 if (msg_reply.fd_num != 1) { 1369 DPRINT("Received unexpected number of fds. Expected 1, received %d", 1370 msg_reply.fd_num); 1371 goto out; 1372 } 1373 1374 *dmabuf_fd = msg_reply.fds[0]; 1375 result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0; 1376 out: 1377 pthread_mutex_unlock(&dev->backend_mutex); 1378 1379 return result; 1380 } 1381 1382 static bool 1383 vu_send_message(VuDev *dev, VhostUserMsg *vmsg) 1384 { 1385 bool result = false; 1386 pthread_mutex_lock(&dev->backend_mutex); 1387 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { 1388 goto out; 1389 } 1390 1391 result = true; 1392 out: 1393 pthread_mutex_unlock(&dev->backend_mutex); 1394 1395 return result; 1396 } 1397 1398 bool 1399 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1400 { 1401 VhostUserMsg msg = { 1402 .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD, 1403 .size = sizeof(msg.payload.object), 1404 .flags = VHOST_USER_VERSION, 1405 }; 1406 1407 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1408 1409 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1410 return false; 1411 } 1412 1413 return vu_send_message(dev, &msg); 1414 } 1415 1416 bool 1417 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1418 { 1419 VhostUserMsg msg = { 1420 .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE, 1421 .size = sizeof(msg.payload.object), 1422 .flags = VHOST_USER_VERSION, 1423 }; 1424 1425 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1426 1427 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1428 return false; 1429 } 1430 1431 return vu_send_message(dev, &msg); 1432 } 1433 1434 static bool 1435 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) 1436 { 1437 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1438 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1439 1440 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1441 1442 if (!vu_check_queue_msg_file(dev, vmsg)) { 1443 return false; 1444 } 1445 1446 if (dev->vq[index].call_fd != -1) { 1447 close(dev->vq[index].call_fd); 1448 dev->vq[index].call_fd = -1; 1449 } 1450 1451 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; 1452 1453 /* in case of I/O hang after reconnecting */ 1454 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { 1455 return -1; 1456 } 1457 1458 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); 1459 1460 return false; 1461 } 1462 1463 static bool 1464 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) 1465 { 1466 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1467 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1468 1469 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1470 1471 if (!vu_check_queue_msg_file(dev, vmsg)) { 1472 return false; 1473 } 1474 1475 if (dev->vq[index].err_fd != -1) { 1476 close(dev->vq[index].err_fd); 1477 dev->vq[index].err_fd = -1; 1478 } 1479 1480 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; 1481 1482 return false; 1483 } 1484 1485 static bool 1486 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1487 { 1488 /* 1489 * Note that we support, but intentionally do not set, 1490 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that 1491 * a device implementation can return it in its callback 1492 * (get_protocol_features) if it wants to use this for 1493 * simulation, but it is otherwise not desirable (if even 1494 * implemented by the frontend.) 1495 */ 1496 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1497 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | 1498 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | 1499 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | 1500 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | 1501 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1502 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; 1503 1504 if (have_userfault()) { 1505 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT; 1506 } 1507 1508 if (dev->iface->get_config && dev->iface->set_config) { 1509 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1510 } 1511 1512 if (dev->iface->get_protocol_features) { 1513 features |= dev->iface->get_protocol_features(dev); 1514 } 1515 1516 vmsg_set_reply_u64(vmsg, features); 1517 return true; 1518 } 1519 1520 static bool 1521 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1522 { 1523 uint64_t features = vmsg->payload.u64; 1524 1525 DPRINT("u64: 0x%016"PRIx64"\n", features); 1526 1527 dev->protocol_features = vmsg->payload.u64; 1528 1529 if (vu_has_protocol_feature(dev, 1530 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 1531 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || 1532 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 1533 /* 1534 * The use case for using messages for kick/call is simulation, to make 1535 * the kick and call synchronous. To actually get that behaviour, both 1536 * of the other features are required. 1537 * Theoretically, one could use only kick messages, or do them without 1538 * having F_REPLY_ACK, but too many (possibly pending) messages on the 1539 * socket will eventually cause the frontend to hang, to avoid this in 1540 * scenarios where not desired enforce that the settings are in a way 1541 * that actually enables the simulation case. 1542 */ 1543 vu_panic(dev, 1544 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK"); 1545 return false; 1546 } 1547 1548 if (dev->iface->set_protocol_features) { 1549 dev->iface->set_protocol_features(dev, features); 1550 } 1551 1552 return false; 1553 } 1554 1555 static bool 1556 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1557 { 1558 vmsg_set_reply_u64(vmsg, dev->max_queues); 1559 return true; 1560 } 1561 1562 static bool 1563 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) 1564 { 1565 unsigned int index = vmsg->payload.state.index; 1566 unsigned int enable = vmsg->payload.state.num; 1567 1568 DPRINT("State.index: %u\n", index); 1569 DPRINT("State.enable: %u\n", enable); 1570 1571 if (index >= dev->max_queues) { 1572 vu_panic(dev, "Invalid vring_enable index: %u", index); 1573 return false; 1574 } 1575 1576 dev->vq[index].enable = enable; 1577 return false; 1578 } 1579 1580 static bool 1581 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg) 1582 { 1583 if (vmsg->fd_num != 1) { 1584 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); 1585 return false; 1586 } 1587 1588 if (dev->backend_fd != -1) { 1589 close(dev->backend_fd); 1590 } 1591 dev->backend_fd = vmsg->fds[0]; 1592 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]); 1593 1594 return false; 1595 } 1596 1597 static bool 1598 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) 1599 { 1600 int ret = -1; 1601 1602 if (dev->iface->get_config) { 1603 ret = dev->iface->get_config(dev, vmsg->payload.config.region, 1604 vmsg->payload.config.size); 1605 } 1606 1607 if (ret) { 1608 /* resize to zero to indicate an error to frontend */ 1609 vmsg->size = 0; 1610 } 1611 1612 return true; 1613 } 1614 1615 static bool 1616 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) 1617 { 1618 int ret = -1; 1619 1620 if (dev->iface->set_config) { 1621 ret = dev->iface->set_config(dev, vmsg->payload.config.region, 1622 vmsg->payload.config.offset, 1623 vmsg->payload.config.size, 1624 vmsg->payload.config.flags); 1625 if (ret) { 1626 vu_panic(dev, "Set virtio configuration space failed"); 1627 } 1628 } 1629 1630 return false; 1631 } 1632 1633 static bool 1634 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) 1635 { 1636 #ifdef UFFDIO_API 1637 struct uffdio_api api_struct; 1638 1639 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1640 vmsg->size = 0; 1641 #else 1642 dev->postcopy_ufd = -1; 1643 #endif 1644 1645 if (dev->postcopy_ufd == -1) { 1646 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); 1647 goto out; 1648 } 1649 1650 #ifdef UFFDIO_API 1651 api_struct.api = UFFD_API; 1652 api_struct.features = 0; 1653 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { 1654 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); 1655 close(dev->postcopy_ufd); 1656 dev->postcopy_ufd = -1; 1657 goto out; 1658 } 1659 /* TODO: Stash feature flags somewhere */ 1660 #endif 1661 1662 out: 1663 /* Return a ufd to the QEMU */ 1664 vmsg->fd_num = 1; 1665 vmsg->fds[0] = dev->postcopy_ufd; 1666 return true; /* = send a reply */ 1667 } 1668 1669 static bool 1670 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) 1671 { 1672 if (dev->nregions) { 1673 vu_panic(dev, "Regions already registered at postcopy-listen"); 1674 vmsg_set_reply_u64(vmsg, -1); 1675 return true; 1676 } 1677 dev->postcopy_listening = true; 1678 1679 vmsg_set_reply_u64(vmsg, 0); 1680 return true; 1681 } 1682 1683 static bool 1684 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) 1685 { 1686 DPRINT("%s: Entry\n", __func__); 1687 dev->postcopy_listening = false; 1688 if (dev->postcopy_ufd > 0) { 1689 close(dev->postcopy_ufd); 1690 dev->postcopy_ufd = -1; 1691 DPRINT("%s: Done close\n", __func__); 1692 } 1693 1694 vmsg_set_reply_u64(vmsg, 0); 1695 DPRINT("%s: exit\n", __func__); 1696 return true; 1697 } 1698 1699 static inline uint64_t 1700 vu_inflight_queue_size(uint16_t queue_size) 1701 { 1702 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size + 1703 sizeof(uint16_t), INFLIGHT_ALIGNMENT); 1704 } 1705 1706 #ifdef MFD_ALLOW_SEALING 1707 static void * 1708 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd) 1709 { 1710 void *ptr; 1711 int ret; 1712 1713 *fd = memfd_create(name, MFD_ALLOW_SEALING); 1714 if (*fd < 0) { 1715 return NULL; 1716 } 1717 1718 ret = ftruncate(*fd, size); 1719 if (ret < 0) { 1720 close(*fd); 1721 return NULL; 1722 } 1723 1724 ret = fcntl(*fd, F_ADD_SEALS, flags); 1725 if (ret < 0) { 1726 close(*fd); 1727 return NULL; 1728 } 1729 1730 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0); 1731 if (ptr == MAP_FAILED) { 1732 close(*fd); 1733 return NULL; 1734 } 1735 1736 return ptr; 1737 } 1738 #endif 1739 1740 static bool 1741 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1742 { 1743 int fd = -1; 1744 void *addr = NULL; 1745 uint64_t mmap_size; 1746 uint16_t num_queues, queue_size; 1747 1748 if (vmsg->size != sizeof(vmsg->payload.inflight)) { 1749 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); 1750 vmsg->payload.inflight.mmap_size = 0; 1751 return true; 1752 } 1753 1754 num_queues = vmsg->payload.inflight.num_queues; 1755 queue_size = vmsg->payload.inflight.queue_size; 1756 1757 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1758 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1759 1760 mmap_size = vu_inflight_queue_size(queue_size) * num_queues; 1761 1762 #ifdef MFD_ALLOW_SEALING 1763 addr = memfd_alloc("vhost-inflight", mmap_size, 1764 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 1765 &fd); 1766 #else 1767 vu_panic(dev, "Not implemented: memfd support is missing"); 1768 #endif 1769 1770 if (!addr) { 1771 vu_panic(dev, "Failed to alloc vhost inflight area"); 1772 vmsg->payload.inflight.mmap_size = 0; 1773 return true; 1774 } 1775 1776 memset(addr, 0, mmap_size); 1777 1778 dev->inflight_info.addr = addr; 1779 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; 1780 dev->inflight_info.fd = vmsg->fds[0] = fd; 1781 vmsg->fd_num = 1; 1782 vmsg->payload.inflight.mmap_offset = 0; 1783 1784 DPRINT("send inflight mmap_size: %"PRId64"\n", 1785 vmsg->payload.inflight.mmap_size); 1786 DPRINT("send inflight mmap offset: %"PRId64"\n", 1787 vmsg->payload.inflight.mmap_offset); 1788 1789 return true; 1790 } 1791 1792 static bool 1793 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1794 { 1795 int fd, i; 1796 uint64_t mmap_size, mmap_offset; 1797 uint16_t num_queues, queue_size; 1798 void *rc; 1799 1800 if (vmsg->fd_num != 1 || 1801 vmsg->size != sizeof(vmsg->payload.inflight)) { 1802 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", 1803 vmsg->size, vmsg->fd_num); 1804 return false; 1805 } 1806 1807 fd = vmsg->fds[0]; 1808 mmap_size = vmsg->payload.inflight.mmap_size; 1809 mmap_offset = vmsg->payload.inflight.mmap_offset; 1810 num_queues = vmsg->payload.inflight.num_queues; 1811 queue_size = vmsg->payload.inflight.queue_size; 1812 1813 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size); 1814 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset); 1815 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1816 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1817 1818 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1819 fd, mmap_offset); 1820 1821 if (rc == MAP_FAILED) { 1822 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); 1823 return false; 1824 } 1825 1826 if (dev->inflight_info.fd) { 1827 close(dev->inflight_info.fd); 1828 } 1829 1830 if (dev->inflight_info.addr) { 1831 munmap(dev->inflight_info.addr, dev->inflight_info.size); 1832 } 1833 1834 dev->inflight_info.fd = fd; 1835 dev->inflight_info.addr = rc; 1836 dev->inflight_info.size = mmap_size; 1837 1838 for (i = 0; i < num_queues; i++) { 1839 dev->vq[i].inflight = (VuVirtqInflight *)rc; 1840 dev->vq[i].inflight->desc_num = queue_size; 1841 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size)); 1842 } 1843 1844 return false; 1845 } 1846 1847 static bool 1848 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) 1849 { 1850 unsigned int index = vmsg->payload.state.index; 1851 1852 if (index >= dev->max_queues) { 1853 vu_panic(dev, "Invalid queue index: %u", index); 1854 return false; 1855 } 1856 1857 DPRINT("Got kick message: handler:%p idx:%u\n", 1858 dev->vq[index].handler, index); 1859 1860 if (!dev->vq[index].started) { 1861 dev->vq[index].started = true; 1862 1863 if (dev->iface->queue_set_started) { 1864 dev->iface->queue_set_started(dev, index, true); 1865 } 1866 } 1867 1868 if (dev->vq[index].handler) { 1869 dev->vq[index].handler(dev, index); 1870 } 1871 1872 return false; 1873 } 1874 1875 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) 1876 { 1877 vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS); 1878 1879 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); 1880 1881 return true; 1882 } 1883 1884 static bool 1885 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) 1886 { 1887 int do_reply = 0; 1888 1889 /* Print out generic part of the request. */ 1890 DPRINT("================ Vhost user message ================\n"); 1891 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), 1892 vmsg->request); 1893 DPRINT("Flags: 0x%x\n", vmsg->flags); 1894 DPRINT("Size: %u\n", vmsg->size); 1895 1896 if (vmsg->fd_num) { 1897 int i; 1898 DPRINT("Fds:"); 1899 for (i = 0; i < vmsg->fd_num; i++) { 1900 DPRINT(" %d", vmsg->fds[i]); 1901 } 1902 DPRINT("\n"); 1903 } 1904 1905 if (dev->iface->process_msg && 1906 dev->iface->process_msg(dev, vmsg, &do_reply)) { 1907 return do_reply; 1908 } 1909 1910 switch (vmsg->request) { 1911 case VHOST_USER_GET_FEATURES: 1912 return vu_get_features_exec(dev, vmsg); 1913 case VHOST_USER_SET_FEATURES: 1914 return vu_set_features_exec(dev, vmsg); 1915 case VHOST_USER_GET_PROTOCOL_FEATURES: 1916 return vu_get_protocol_features_exec(dev, vmsg); 1917 case VHOST_USER_SET_PROTOCOL_FEATURES: 1918 return vu_set_protocol_features_exec(dev, vmsg); 1919 case VHOST_USER_SET_OWNER: 1920 return vu_set_owner_exec(dev, vmsg); 1921 case VHOST_USER_RESET_OWNER: 1922 return vu_reset_device_exec(dev, vmsg); 1923 case VHOST_USER_SET_MEM_TABLE: 1924 return vu_set_mem_table_exec(dev, vmsg); 1925 case VHOST_USER_SET_LOG_BASE: 1926 return vu_set_log_base_exec(dev, vmsg); 1927 case VHOST_USER_SET_LOG_FD: 1928 return vu_set_log_fd_exec(dev, vmsg); 1929 case VHOST_USER_SET_VRING_NUM: 1930 return vu_set_vring_num_exec(dev, vmsg); 1931 case VHOST_USER_SET_VRING_ADDR: 1932 return vu_set_vring_addr_exec(dev, vmsg); 1933 case VHOST_USER_SET_VRING_BASE: 1934 return vu_set_vring_base_exec(dev, vmsg); 1935 case VHOST_USER_GET_VRING_BASE: 1936 return vu_get_vring_base_exec(dev, vmsg); 1937 case VHOST_USER_SET_VRING_KICK: 1938 return vu_set_vring_kick_exec(dev, vmsg); 1939 case VHOST_USER_SET_VRING_CALL: 1940 return vu_set_vring_call_exec(dev, vmsg); 1941 case VHOST_USER_SET_VRING_ERR: 1942 return vu_set_vring_err_exec(dev, vmsg); 1943 case VHOST_USER_GET_QUEUE_NUM: 1944 return vu_get_queue_num_exec(dev, vmsg); 1945 case VHOST_USER_SET_VRING_ENABLE: 1946 return vu_set_vring_enable_exec(dev, vmsg); 1947 case VHOST_USER_SET_BACKEND_REQ_FD: 1948 return vu_set_backend_req_fd(dev, vmsg); 1949 case VHOST_USER_GET_CONFIG: 1950 return vu_get_config(dev, vmsg); 1951 case VHOST_USER_SET_CONFIG: 1952 return vu_set_config(dev, vmsg); 1953 case VHOST_USER_NONE: 1954 /* if you need processing before exit, override iface->process_msg */ 1955 exit(0); 1956 case VHOST_USER_POSTCOPY_ADVISE: 1957 return vu_set_postcopy_advise(dev, vmsg); 1958 case VHOST_USER_POSTCOPY_LISTEN: 1959 return vu_set_postcopy_listen(dev, vmsg); 1960 case VHOST_USER_POSTCOPY_END: 1961 return vu_set_postcopy_end(dev, vmsg); 1962 case VHOST_USER_GET_INFLIGHT_FD: 1963 return vu_get_inflight_fd(dev, vmsg); 1964 case VHOST_USER_SET_INFLIGHT_FD: 1965 return vu_set_inflight_fd(dev, vmsg); 1966 case VHOST_USER_VRING_KICK: 1967 return vu_handle_vring_kick(dev, vmsg); 1968 case VHOST_USER_GET_MAX_MEM_SLOTS: 1969 return vu_handle_get_max_memslots(dev, vmsg); 1970 case VHOST_USER_ADD_MEM_REG: 1971 return vu_add_mem_reg(dev, vmsg); 1972 case VHOST_USER_REM_MEM_REG: 1973 return vu_rem_mem_reg(dev, vmsg); 1974 case VHOST_USER_GET_SHARED_OBJECT: 1975 return vu_get_shared_object(dev, vmsg); 1976 default: 1977 vmsg_close_fds(vmsg); 1978 vu_panic(dev, "Unhandled request: %d", vmsg->request); 1979 } 1980 1981 return false; 1982 } 1983 1984 bool 1985 vu_dispatch(VuDev *dev) 1986 { 1987 VhostUserMsg vmsg = { 0, }; 1988 int reply_requested; 1989 bool need_reply, success = false; 1990 1991 if (!dev->read_msg(dev, dev->sock, &vmsg)) { 1992 goto end; 1993 } 1994 1995 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK; 1996 1997 reply_requested = vu_process_message(dev, &vmsg); 1998 if (!reply_requested && need_reply) { 1999 vmsg_set_reply_u64(&vmsg, 0); 2000 reply_requested = 1; 2001 } 2002 2003 if (!reply_requested) { 2004 success = true; 2005 goto end; 2006 } 2007 2008 if (!vu_send_reply(dev, dev->sock, &vmsg)) { 2009 goto end; 2010 } 2011 2012 success = true; 2013 2014 end: 2015 free(vmsg.data); 2016 return success; 2017 } 2018 2019 void 2020 vu_deinit(VuDev *dev) 2021 { 2022 unsigned int i; 2023 2024 vu_remove_all_mem_regs(dev); 2025 2026 for (i = 0; i < dev->max_queues; i++) { 2027 VuVirtq *vq = &dev->vq[i]; 2028 2029 if (vq->call_fd != -1) { 2030 close(vq->call_fd); 2031 vq->call_fd = -1; 2032 } 2033 2034 if (vq->kick_fd != -1) { 2035 dev->remove_watch(dev, vq->kick_fd); 2036 close(vq->kick_fd); 2037 vq->kick_fd = -1; 2038 } 2039 2040 if (vq->err_fd != -1) { 2041 close(vq->err_fd); 2042 vq->err_fd = -1; 2043 } 2044 2045 if (vq->resubmit_list) { 2046 free(vq->resubmit_list); 2047 vq->resubmit_list = NULL; 2048 } 2049 2050 vq->inflight = NULL; 2051 } 2052 2053 if (dev->inflight_info.addr) { 2054 munmap(dev->inflight_info.addr, dev->inflight_info.size); 2055 dev->inflight_info.addr = NULL; 2056 } 2057 2058 if (dev->inflight_info.fd > 0) { 2059 close(dev->inflight_info.fd); 2060 dev->inflight_info.fd = -1; 2061 } 2062 2063 vu_close_log(dev); 2064 if (dev->backend_fd != -1) { 2065 close(dev->backend_fd); 2066 dev->backend_fd = -1; 2067 } 2068 pthread_mutex_destroy(&dev->backend_mutex); 2069 2070 if (dev->sock != -1) { 2071 close(dev->sock); 2072 } 2073 2074 free(dev->vq); 2075 dev->vq = NULL; 2076 free(dev->regions); 2077 dev->regions = NULL; 2078 } 2079 2080 bool 2081 vu_init(VuDev *dev, 2082 uint16_t max_queues, 2083 int socket, 2084 vu_panic_cb panic, 2085 vu_read_msg_cb read_msg, 2086 vu_set_watch_cb set_watch, 2087 vu_remove_watch_cb remove_watch, 2088 const VuDevIface *iface) 2089 { 2090 uint16_t i; 2091 2092 assert(max_queues > 0); 2093 assert(socket >= 0); 2094 assert(set_watch); 2095 assert(remove_watch); 2096 assert(iface); 2097 assert(panic); 2098 2099 memset(dev, 0, sizeof(*dev)); 2100 2101 dev->sock = socket; 2102 dev->panic = panic; 2103 dev->read_msg = read_msg ? read_msg : vu_message_read_default; 2104 dev->set_watch = set_watch; 2105 dev->remove_watch = remove_watch; 2106 dev->iface = iface; 2107 dev->log_call_fd = -1; 2108 pthread_mutex_init(&dev->backend_mutex, NULL); 2109 dev->backend_fd = -1; 2110 dev->max_queues = max_queues; 2111 2112 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); 2113 if (!dev->regions) { 2114 DPRINT("%s: failed to malloc mem regions\n", __func__); 2115 return false; 2116 } 2117 2118 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); 2119 if (!dev->vq) { 2120 DPRINT("%s: failed to malloc virtqueues\n", __func__); 2121 free(dev->regions); 2122 dev->regions = NULL; 2123 return false; 2124 } 2125 2126 for (i = 0; i < max_queues; i++) { 2127 dev->vq[i] = (VuVirtq) { 2128 .call_fd = -1, .kick_fd = -1, .err_fd = -1, 2129 .notification = true, 2130 }; 2131 } 2132 2133 return true; 2134 } 2135 2136 VuVirtq * 2137 vu_get_queue(VuDev *dev, int qidx) 2138 { 2139 assert(qidx < dev->max_queues); 2140 return &dev->vq[qidx]; 2141 } 2142 2143 bool 2144 vu_queue_enabled(VuDev *dev, VuVirtq *vq) 2145 { 2146 return vq->enable; 2147 } 2148 2149 bool 2150 vu_queue_started(const VuDev *dev, const VuVirtq *vq) 2151 { 2152 return vq->started; 2153 } 2154 2155 static inline uint16_t 2156 vring_avail_flags(VuVirtq *vq) 2157 { 2158 return le16toh(vq->vring.avail->flags); 2159 } 2160 2161 static inline uint16_t 2162 vring_avail_idx(VuVirtq *vq) 2163 { 2164 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); 2165 2166 return vq->shadow_avail_idx; 2167 } 2168 2169 static inline uint16_t 2170 vring_avail_ring(VuVirtq *vq, int i) 2171 { 2172 return le16toh(vq->vring.avail->ring[i]); 2173 } 2174 2175 static inline uint16_t 2176 vring_get_used_event(VuVirtq *vq) 2177 { 2178 return vring_avail_ring(vq, vq->vring.num); 2179 } 2180 2181 static int 2182 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) 2183 { 2184 uint16_t num_heads = vring_avail_idx(vq) - idx; 2185 2186 /* Check it isn't doing very strange things with descriptor numbers. */ 2187 if (num_heads > vq->vring.num) { 2188 vu_panic(dev, "Guest moved used index from %u to %u", 2189 idx, vq->shadow_avail_idx); 2190 return -1; 2191 } 2192 if (num_heads) { 2193 /* On success, callers read a descriptor at vq->last_avail_idx. 2194 * Make sure descriptor read does not bypass avail index read. */ 2195 smp_rmb(); 2196 } 2197 2198 return num_heads; 2199 } 2200 2201 static bool 2202 virtqueue_get_head(VuDev *dev, VuVirtq *vq, 2203 unsigned int idx, unsigned int *head) 2204 { 2205 /* Grab the next descriptor number they're advertising, and increment 2206 * the index we've seen. */ 2207 *head = vring_avail_ring(vq, idx % vq->vring.num); 2208 2209 /* If their number is silly, that's a fatal mistake. */ 2210 if (*head >= vq->vring.num) { 2211 vu_panic(dev, "Guest says index %u is available", *head); 2212 return false; 2213 } 2214 2215 return true; 2216 } 2217 2218 static int 2219 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, 2220 uint64_t addr, size_t len) 2221 { 2222 struct vring_desc *ori_desc; 2223 uint64_t read_len; 2224 2225 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { 2226 return -1; 2227 } 2228 2229 if (len == 0) { 2230 return -1; 2231 } 2232 2233 while (len) { 2234 read_len = len; 2235 ori_desc = vu_gpa_to_va(dev, &read_len, addr); 2236 if (!ori_desc) { 2237 return -1; 2238 } 2239 2240 memcpy(desc, ori_desc, read_len); 2241 len -= read_len; 2242 addr += read_len; 2243 desc += read_len; 2244 } 2245 2246 return 0; 2247 } 2248 2249 enum { 2250 VIRTQUEUE_READ_DESC_ERROR = -1, 2251 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 2252 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 2253 }; 2254 2255 static int 2256 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, 2257 int i, unsigned int max, unsigned int *next) 2258 { 2259 /* If this descriptor says it doesn't chain, we're done. */ 2260 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) { 2261 return VIRTQUEUE_READ_DESC_DONE; 2262 } 2263 2264 /* Check they're not leading us off end of descriptors. */ 2265 *next = le16toh(desc[i].next); 2266 /* Make sure compiler knows to grab that: we don't want it changing! */ 2267 smp_wmb(); 2268 2269 if (*next >= max) { 2270 vu_panic(dev, "Desc next is %u", *next); 2271 return VIRTQUEUE_READ_DESC_ERROR; 2272 } 2273 2274 return VIRTQUEUE_READ_DESC_MORE; 2275 } 2276 2277 void 2278 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, 2279 unsigned int *out_bytes, 2280 unsigned max_in_bytes, unsigned max_out_bytes) 2281 { 2282 unsigned int idx; 2283 unsigned int total_bufs, in_total, out_total; 2284 int rc; 2285 2286 idx = vq->last_avail_idx; 2287 2288 total_bufs = in_total = out_total = 0; 2289 if (unlikely(dev->broken) || 2290 unlikely(!vq->vring.avail)) { 2291 goto done; 2292 } 2293 2294 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { 2295 unsigned int max, desc_len, num_bufs, indirect = 0; 2296 uint64_t desc_addr, read_len; 2297 struct vring_desc *desc; 2298 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2299 unsigned int i; 2300 2301 max = vq->vring.num; 2302 num_bufs = total_bufs; 2303 if (!virtqueue_get_head(dev, vq, idx++, &i)) { 2304 goto err; 2305 } 2306 desc = vq->vring.desc; 2307 2308 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2309 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2310 vu_panic(dev, "Invalid size for indirect buffer table"); 2311 goto err; 2312 } 2313 2314 /* If we've got too many, that implies a descriptor loop. */ 2315 if (num_bufs >= max) { 2316 vu_panic(dev, "Looped descriptor"); 2317 goto err; 2318 } 2319 2320 /* loop over the indirect descriptor table */ 2321 indirect = 1; 2322 desc_addr = le64toh(desc[i].addr); 2323 desc_len = le32toh(desc[i].len); 2324 max = desc_len / sizeof(struct vring_desc); 2325 read_len = desc_len; 2326 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2327 if (unlikely(desc && read_len != desc_len)) { 2328 /* Failed to use zero copy */ 2329 desc = NULL; 2330 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2331 desc_addr, 2332 desc_len)) { 2333 desc = desc_buf; 2334 } 2335 } 2336 if (!desc) { 2337 vu_panic(dev, "Invalid indirect buffer table"); 2338 goto err; 2339 } 2340 num_bufs = i = 0; 2341 } 2342 2343 do { 2344 /* If we've got too many, that implies a descriptor loop. */ 2345 if (++num_bufs > max) { 2346 vu_panic(dev, "Looped descriptor"); 2347 goto err; 2348 } 2349 2350 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2351 in_total += le32toh(desc[i].len); 2352 } else { 2353 out_total += le32toh(desc[i].len); 2354 } 2355 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 2356 goto done; 2357 } 2358 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2359 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2360 2361 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2362 goto err; 2363 } 2364 2365 if (!indirect) { 2366 total_bufs = num_bufs; 2367 } else { 2368 total_bufs++; 2369 } 2370 } 2371 if (rc < 0) { 2372 goto err; 2373 } 2374 done: 2375 if (in_bytes) { 2376 *in_bytes = in_total; 2377 } 2378 if (out_bytes) { 2379 *out_bytes = out_total; 2380 } 2381 return; 2382 2383 err: 2384 in_total = out_total = 0; 2385 goto done; 2386 } 2387 2388 bool 2389 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, 2390 unsigned int out_bytes) 2391 { 2392 unsigned int in_total, out_total; 2393 2394 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, 2395 in_bytes, out_bytes); 2396 2397 return in_bytes <= in_total && out_bytes <= out_total; 2398 } 2399 2400 /* Fetch avail_idx from VQ memory only when we really need to know if 2401 * guest has added some buffers. */ 2402 bool 2403 vu_queue_empty(VuDev *dev, VuVirtq *vq) 2404 { 2405 if (unlikely(dev->broken) || 2406 unlikely(!vq->vring.avail)) { 2407 return true; 2408 } 2409 2410 if (vq->shadow_avail_idx != vq->last_avail_idx) { 2411 return false; 2412 } 2413 2414 return vring_avail_idx(vq) == vq->last_avail_idx; 2415 } 2416 2417 static bool 2418 vring_notify(VuDev *dev, VuVirtq *vq) 2419 { 2420 uint16_t old, new; 2421 bool v; 2422 2423 /* We need to expose used array entries before checking used event. */ 2424 smp_mb(); 2425 2426 /* Always notify when queue is empty (when feature acknowledge) */ 2427 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && 2428 !vq->inuse && vu_queue_empty(dev, vq)) { 2429 return true; 2430 } 2431 2432 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2433 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 2434 } 2435 2436 v = vq->signalled_used_valid; 2437 vq->signalled_used_valid = true; 2438 old = vq->signalled_used; 2439 new = vq->signalled_used = vq->used_idx; 2440 return !v || vring_need_event(vring_get_used_event(vq), new, old); 2441 } 2442 2443 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) 2444 { 2445 if (unlikely(dev->broken) || 2446 unlikely(!vq->vring.avail)) { 2447 return; 2448 } 2449 2450 if (!vring_notify(dev, vq)) { 2451 DPRINT("skipped notify...\n"); 2452 return; 2453 } 2454 2455 if (vq->call_fd < 0 && 2456 vu_has_protocol_feature(dev, 2457 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 2458 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { 2459 VhostUserMsg vmsg = { 2460 .request = VHOST_USER_BACKEND_VRING_CALL, 2461 .flags = VHOST_USER_VERSION, 2462 .size = sizeof(vmsg.payload.state), 2463 .payload.state = { 2464 .index = vq - dev->vq, 2465 }, 2466 }; 2467 bool ack = sync && 2468 vu_has_protocol_feature(dev, 2469 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2470 2471 if (ack) { 2472 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK; 2473 } 2474 2475 vu_message_write(dev, dev->backend_fd, &vmsg); 2476 if (ack) { 2477 vu_message_read_default(dev, dev->backend_fd, &vmsg); 2478 } 2479 return; 2480 } 2481 2482 if (eventfd_write(vq->call_fd, 1) < 0) { 2483 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 2484 } 2485 } 2486 2487 void vu_queue_notify(VuDev *dev, VuVirtq *vq) 2488 { 2489 _vu_queue_notify(dev, vq, false); 2490 } 2491 2492 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) 2493 { 2494 _vu_queue_notify(dev, vq, true); 2495 } 2496 2497 void vu_config_change_msg(VuDev *dev) 2498 { 2499 VhostUserMsg vmsg = { 2500 .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG, 2501 .flags = VHOST_USER_VERSION, 2502 }; 2503 2504 vu_message_write(dev, dev->backend_fd, &vmsg); 2505 } 2506 2507 static inline void 2508 vring_used_flags_set_bit(VuVirtq *vq, int mask) 2509 { 2510 uint16_t *flags; 2511 2512 flags = (uint16_t *)((char*)vq->vring.used + 2513 offsetof(struct vring_used, flags)); 2514 *flags = htole16(le16toh(*flags) | mask); 2515 } 2516 2517 static inline void 2518 vring_used_flags_unset_bit(VuVirtq *vq, int mask) 2519 { 2520 uint16_t *flags; 2521 2522 flags = (uint16_t *)((char*)vq->vring.used + 2523 offsetof(struct vring_used, flags)); 2524 *flags = htole16(le16toh(*flags) & ~mask); 2525 } 2526 2527 static inline void 2528 vring_set_avail_event(VuVirtq *vq, uint16_t val) 2529 { 2530 uint16_t val_le = htole16(val); 2531 2532 if (!vq->notification) { 2533 return; 2534 } 2535 2536 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); 2537 } 2538 2539 void 2540 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) 2541 { 2542 vq->notification = enable; 2543 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2544 vring_set_avail_event(vq, vring_avail_idx(vq)); 2545 } else if (enable) { 2546 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 2547 } else { 2548 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 2549 } 2550 if (enable) { 2551 /* Expose avail event/used flags before caller checks the avail idx. */ 2552 smp_mb(); 2553 } 2554 } 2555 2556 static bool 2557 virtqueue_map_desc(VuDev *dev, 2558 unsigned int *p_num_sg, struct iovec *iov, 2559 unsigned int max_num_sg, bool is_write, 2560 uint64_t pa, size_t sz) 2561 { 2562 unsigned num_sg = *p_num_sg; 2563 2564 assert(num_sg <= max_num_sg); 2565 2566 if (!sz) { 2567 vu_panic(dev, "virtio: zero sized buffers are not allowed"); 2568 return false; 2569 } 2570 2571 while (sz) { 2572 uint64_t len = sz; 2573 2574 if (num_sg == max_num_sg) { 2575 vu_panic(dev, "virtio: too many descriptors in indirect table"); 2576 return false; 2577 } 2578 2579 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); 2580 if (iov[num_sg].iov_base == NULL) { 2581 vu_panic(dev, "virtio: invalid address for buffers"); 2582 return false; 2583 } 2584 iov[num_sg].iov_len = len; 2585 num_sg++; 2586 sz -= len; 2587 pa += len; 2588 } 2589 2590 *p_num_sg = num_sg; 2591 return true; 2592 } 2593 2594 static void * 2595 virtqueue_alloc_element(size_t sz, 2596 unsigned out_num, unsigned in_num) 2597 { 2598 VuVirtqElement *elem; 2599 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); 2600 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 2601 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 2602 2603 assert(sz >= sizeof(VuVirtqElement)); 2604 elem = malloc(out_sg_end); 2605 if (!elem) { 2606 DPRINT("%s: failed to malloc virtqueue element\n", __func__); 2607 return NULL; 2608 } 2609 elem->out_num = out_num; 2610 elem->in_num = in_num; 2611 elem->in_sg = (void *)elem + in_sg_ofs; 2612 elem->out_sg = (void *)elem + out_sg_ofs; 2613 return elem; 2614 } 2615 2616 static void * 2617 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) 2618 { 2619 struct vring_desc *desc = vq->vring.desc; 2620 uint64_t desc_addr, read_len; 2621 unsigned int desc_len; 2622 unsigned int max = vq->vring.num; 2623 unsigned int i = idx; 2624 VuVirtqElement *elem; 2625 unsigned int out_num = 0, in_num = 0; 2626 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 2627 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2628 int rc; 2629 2630 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2631 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2632 vu_panic(dev, "Invalid size for indirect buffer table"); 2633 return NULL; 2634 } 2635 2636 /* loop over the indirect descriptor table */ 2637 desc_addr = le64toh(desc[i].addr); 2638 desc_len = le32toh(desc[i].len); 2639 max = desc_len / sizeof(struct vring_desc); 2640 read_len = desc_len; 2641 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2642 if (unlikely(desc && read_len != desc_len)) { 2643 /* Failed to use zero copy */ 2644 desc = NULL; 2645 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2646 desc_addr, 2647 desc_len)) { 2648 desc = desc_buf; 2649 } 2650 } 2651 if (!desc) { 2652 vu_panic(dev, "Invalid indirect buffer table"); 2653 return NULL; 2654 } 2655 i = 0; 2656 } 2657 2658 /* Collect all the descriptors */ 2659 do { 2660 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2661 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, 2662 VIRTQUEUE_MAX_SIZE - out_num, true, 2663 le64toh(desc[i].addr), 2664 le32toh(desc[i].len))) { 2665 return NULL; 2666 } 2667 } else { 2668 if (in_num) { 2669 vu_panic(dev, "Incorrect order for descriptors"); 2670 return NULL; 2671 } 2672 if (!virtqueue_map_desc(dev, &out_num, iov, 2673 VIRTQUEUE_MAX_SIZE, false, 2674 le64toh(desc[i].addr), 2675 le32toh(desc[i].len))) { 2676 return NULL; 2677 } 2678 } 2679 2680 /* If we've got too many, that implies a descriptor loop. */ 2681 if ((in_num + out_num) > max) { 2682 vu_panic(dev, "Looped descriptor"); 2683 return NULL; 2684 } 2685 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2686 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2687 2688 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2689 vu_panic(dev, "read descriptor error"); 2690 return NULL; 2691 } 2692 2693 /* Now copy what we have collected and mapped */ 2694 elem = virtqueue_alloc_element(sz, out_num, in_num); 2695 if (!elem) { 2696 return NULL; 2697 } 2698 elem->index = idx; 2699 for (i = 0; i < out_num; i++) { 2700 elem->out_sg[i] = iov[i]; 2701 } 2702 for (i = 0; i < in_num; i++) { 2703 elem->in_sg[i] = iov[out_num + i]; 2704 } 2705 2706 return elem; 2707 } 2708 2709 static int 2710 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) 2711 { 2712 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2713 return 0; 2714 } 2715 2716 if (unlikely(!vq->inflight)) { 2717 return -1; 2718 } 2719 2720 vq->inflight->desc[desc_idx].counter = vq->counter++; 2721 vq->inflight->desc[desc_idx].inflight = 1; 2722 2723 return 0; 2724 } 2725 2726 static int 2727 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2728 { 2729 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2730 return 0; 2731 } 2732 2733 if (unlikely(!vq->inflight)) { 2734 return -1; 2735 } 2736 2737 vq->inflight->last_batch_head = desc_idx; 2738 2739 return 0; 2740 } 2741 2742 static int 2743 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2744 { 2745 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2746 return 0; 2747 } 2748 2749 if (unlikely(!vq->inflight)) { 2750 return -1; 2751 } 2752 2753 barrier(); 2754 2755 vq->inflight->desc[desc_idx].inflight = 0; 2756 2757 barrier(); 2758 2759 vq->inflight->used_idx = vq->used_idx; 2760 2761 return 0; 2762 } 2763 2764 void * 2765 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) 2766 { 2767 int i; 2768 unsigned int head; 2769 VuVirtqElement *elem; 2770 2771 if (unlikely(dev->broken) || 2772 unlikely(!vq->vring.avail)) { 2773 return NULL; 2774 } 2775 2776 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { 2777 i = (--vq->resubmit_num); 2778 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); 2779 2780 if (!vq->resubmit_num) { 2781 free(vq->resubmit_list); 2782 vq->resubmit_list = NULL; 2783 } 2784 2785 return elem; 2786 } 2787 2788 if (vu_queue_empty(dev, vq)) { 2789 return NULL; 2790 } 2791 /* 2792 * Needed after virtio_queue_empty(), see comment in 2793 * virtqueue_num_heads(). 2794 */ 2795 smp_rmb(); 2796 2797 if (vq->inuse >= vq->vring.num) { 2798 vu_panic(dev, "Virtqueue size exceeded"); 2799 return NULL; 2800 } 2801 2802 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { 2803 return NULL; 2804 } 2805 2806 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2807 vring_set_avail_event(vq, vq->last_avail_idx); 2808 } 2809 2810 elem = vu_queue_map_desc(dev, vq, head, sz); 2811 2812 if (!elem) { 2813 return NULL; 2814 } 2815 2816 vq->inuse++; 2817 2818 vu_queue_inflight_get(dev, vq, head); 2819 2820 return elem; 2821 } 2822 2823 static void 2824 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2825 size_t len) 2826 { 2827 vq->inuse--; 2828 /* unmap, when DMA support is added */ 2829 } 2830 2831 void 2832 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2833 size_t len) 2834 { 2835 vq->last_avail_idx--; 2836 vu_queue_detach_element(dev, vq, elem, len); 2837 } 2838 2839 bool 2840 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) 2841 { 2842 if (num > vq->inuse) { 2843 return false; 2844 } 2845 vq->last_avail_idx -= num; 2846 vq->inuse -= num; 2847 return true; 2848 } 2849 2850 static inline 2851 void vring_used_write(VuDev *dev, VuVirtq *vq, 2852 struct vring_used_elem *uelem, int i) 2853 { 2854 struct vring_used *used = vq->vring.used; 2855 2856 used->ring[i] = *uelem; 2857 vu_log_write(dev, vq->vring.log_guest_addr + 2858 offsetof(struct vring_used, ring[i]), 2859 sizeof(used->ring[i])); 2860 } 2861 2862 2863 static void 2864 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, 2865 const VuVirtqElement *elem, 2866 unsigned int len) 2867 { 2868 struct vring_desc *desc = vq->vring.desc; 2869 unsigned int i, max, min, desc_len; 2870 uint64_t desc_addr, read_len; 2871 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2872 unsigned num_bufs = 0; 2873 2874 max = vq->vring.num; 2875 i = elem->index; 2876 2877 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2878 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2879 vu_panic(dev, "Invalid size for indirect buffer table"); 2880 return; 2881 } 2882 2883 /* loop over the indirect descriptor table */ 2884 desc_addr = le64toh(desc[i].addr); 2885 desc_len = le32toh(desc[i].len); 2886 max = desc_len / sizeof(struct vring_desc); 2887 read_len = desc_len; 2888 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2889 if (unlikely(desc && read_len != desc_len)) { 2890 /* Failed to use zero copy */ 2891 desc = NULL; 2892 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2893 desc_addr, 2894 desc_len)) { 2895 desc = desc_buf; 2896 } 2897 } 2898 if (!desc) { 2899 vu_panic(dev, "Invalid indirect buffer table"); 2900 return; 2901 } 2902 i = 0; 2903 } 2904 2905 do { 2906 if (++num_bufs > max) { 2907 vu_panic(dev, "Looped descriptor"); 2908 return; 2909 } 2910 2911 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2912 min = MIN(le32toh(desc[i].len), len); 2913 vu_log_write(dev, le64toh(desc[i].addr), min); 2914 len -= min; 2915 } 2916 2917 } while (len > 0 && 2918 (virtqueue_read_next_desc(dev, desc, i, max, &i) 2919 == VIRTQUEUE_READ_DESC_MORE)); 2920 } 2921 2922 void 2923 vu_queue_fill(VuDev *dev, VuVirtq *vq, 2924 const VuVirtqElement *elem, 2925 unsigned int len, unsigned int idx) 2926 { 2927 struct vring_used_elem uelem; 2928 2929 if (unlikely(dev->broken) || 2930 unlikely(!vq->vring.avail)) { 2931 return; 2932 } 2933 2934 vu_log_queue_fill(dev, vq, elem, len); 2935 2936 idx = (idx + vq->used_idx) % vq->vring.num; 2937 2938 uelem.id = htole32(elem->index); 2939 uelem.len = htole32(len); 2940 vring_used_write(dev, vq, &uelem, idx); 2941 } 2942 2943 static inline 2944 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) 2945 { 2946 vq->vring.used->idx = htole16(val); 2947 vu_log_write(dev, 2948 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), 2949 sizeof(vq->vring.used->idx)); 2950 2951 vq->used_idx = val; 2952 } 2953 2954 void 2955 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) 2956 { 2957 uint16_t old, new; 2958 2959 if (unlikely(dev->broken) || 2960 unlikely(!vq->vring.avail)) { 2961 return; 2962 } 2963 2964 /* Make sure buffer is written before we update index. */ 2965 smp_wmb(); 2966 2967 old = vq->used_idx; 2968 new = old + count; 2969 vring_used_idx_set(dev, vq, new); 2970 vq->inuse -= count; 2971 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { 2972 vq->signalled_used_valid = false; 2973 } 2974 } 2975 2976 void 2977 vu_queue_push(VuDev *dev, VuVirtq *vq, 2978 const VuVirtqElement *elem, unsigned int len) 2979 { 2980 vu_queue_fill(dev, vq, elem, len, 0); 2981 vu_queue_inflight_pre_put(dev, vq, elem->index); 2982 vu_queue_flush(dev, vq, 1); 2983 vu_queue_inflight_post_put(dev, vq, elem->index); 2984 } 2985