1 /* 2 * Vhost User library 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2016 Red Hat, Inc. 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Marc-André Lureau <mlureau@redhat.com> 10 * Victor Kaplansky <victork@redhat.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2 or 13 * later. See the COPYING file in the top-level directory. 14 */ 15 16 #ifndef _GNU_SOURCE 17 #define _GNU_SOURCE 18 #endif 19 20 /* this code avoids GLib dependency */ 21 #include <stdlib.h> 22 #include <stdio.h> 23 #include <unistd.h> 24 #include <stdarg.h> 25 #include <errno.h> 26 #include <string.h> 27 #include <assert.h> 28 #include <inttypes.h> 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/eventfd.h> 32 #include <sys/mman.h> 33 #include <endian.h> 34 35 /* Necessary to provide VIRTIO_F_VERSION_1 on system 36 * with older linux headers. Must appear before 37 * <linux/vhost.h> below. 38 */ 39 #include "standard-headers/linux/virtio_config.h" 40 41 #if defined(__linux__) 42 #include <sys/syscall.h> 43 #include <fcntl.h> 44 #include <sys/ioctl.h> 45 #include <linux/vhost.h> 46 47 #ifdef __NR_userfaultfd 48 #include <linux/userfaultfd.h> 49 #endif 50 51 #endif 52 53 #include "include/atomic.h" 54 55 #include "libvhost-user.h" 56 57 /* usually provided by GLib */ 58 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4) 59 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4) 60 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 61 __attribute__((__format__(gnu_printf, format_idx, arg_idx))) 62 #else 63 #define G_GNUC_PRINTF(format_idx, arg_idx) \ 64 __attribute__((__format__(__printf__, format_idx, arg_idx))) 65 #endif 66 #else /* !__GNUC__ */ 67 #define G_GNUC_PRINTF(format_idx, arg_idx) 68 #endif /* !__GNUC__ */ 69 #ifndef MIN 70 #define MIN(x, y) ({ \ 71 __typeof__(x) _min1 = (x); \ 72 __typeof__(y) _min2 = (y); \ 73 (void) (&_min1 == &_min2); \ 74 _min1 < _min2 ? _min1 : _min2; }) 75 #endif 76 77 /* Round number down to multiple */ 78 #define ALIGN_DOWN(n, m) ((n) / (m) * (m)) 79 80 /* Round number up to multiple */ 81 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) 82 83 #ifndef unlikely 84 #define unlikely(x) __builtin_expect(!!(x), 0) 85 #endif 86 87 /* Align each region to cache line size in inflight buffer */ 88 #define INFLIGHT_ALIGNMENT 64 89 90 /* The version of inflight buffer */ 91 #define INFLIGHT_VERSION 1 92 93 /* The version of the protocol we support */ 94 #define VHOST_USER_VERSION 1 95 #define LIBVHOST_USER_DEBUG 0 96 97 #define DPRINT(...) \ 98 do { \ 99 if (LIBVHOST_USER_DEBUG) { \ 100 fprintf(stderr, __VA_ARGS__); \ 101 } \ 102 } while (0) 103 104 static inline 105 bool has_feature(uint64_t features, unsigned int fbit) 106 { 107 assert(fbit < 64); 108 return !!(features & (1ULL << fbit)); 109 } 110 111 static inline 112 bool vu_has_feature(VuDev *dev, 113 unsigned int fbit) 114 { 115 return has_feature(dev->features, fbit); 116 } 117 118 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) 119 { 120 return has_feature(dev->protocol_features, fbit); 121 } 122 123 const char * 124 vu_request_to_string(unsigned int req) 125 { 126 #define REQ(req) [req] = #req 127 static const char *vu_request_str[] = { 128 REQ(VHOST_USER_NONE), 129 REQ(VHOST_USER_GET_FEATURES), 130 REQ(VHOST_USER_SET_FEATURES), 131 REQ(VHOST_USER_SET_OWNER), 132 REQ(VHOST_USER_RESET_OWNER), 133 REQ(VHOST_USER_SET_MEM_TABLE), 134 REQ(VHOST_USER_SET_LOG_BASE), 135 REQ(VHOST_USER_SET_LOG_FD), 136 REQ(VHOST_USER_SET_VRING_NUM), 137 REQ(VHOST_USER_SET_VRING_ADDR), 138 REQ(VHOST_USER_SET_VRING_BASE), 139 REQ(VHOST_USER_GET_VRING_BASE), 140 REQ(VHOST_USER_SET_VRING_KICK), 141 REQ(VHOST_USER_SET_VRING_CALL), 142 REQ(VHOST_USER_SET_VRING_ERR), 143 REQ(VHOST_USER_GET_PROTOCOL_FEATURES), 144 REQ(VHOST_USER_SET_PROTOCOL_FEATURES), 145 REQ(VHOST_USER_GET_QUEUE_NUM), 146 REQ(VHOST_USER_SET_VRING_ENABLE), 147 REQ(VHOST_USER_SEND_RARP), 148 REQ(VHOST_USER_NET_SET_MTU), 149 REQ(VHOST_USER_SET_BACKEND_REQ_FD), 150 REQ(VHOST_USER_IOTLB_MSG), 151 REQ(VHOST_USER_SET_VRING_ENDIAN), 152 REQ(VHOST_USER_GET_CONFIG), 153 REQ(VHOST_USER_SET_CONFIG), 154 REQ(VHOST_USER_POSTCOPY_ADVISE), 155 REQ(VHOST_USER_POSTCOPY_LISTEN), 156 REQ(VHOST_USER_POSTCOPY_END), 157 REQ(VHOST_USER_GET_INFLIGHT_FD), 158 REQ(VHOST_USER_SET_INFLIGHT_FD), 159 REQ(VHOST_USER_GPU_SET_SOCKET), 160 REQ(VHOST_USER_VRING_KICK), 161 REQ(VHOST_USER_GET_MAX_MEM_SLOTS), 162 REQ(VHOST_USER_ADD_MEM_REG), 163 REQ(VHOST_USER_REM_MEM_REG), 164 REQ(VHOST_USER_GET_SHARED_OBJECT), 165 REQ(VHOST_USER_MAX), 166 }; 167 #undef REQ 168 169 if (req < VHOST_USER_MAX) { 170 return vu_request_str[req]; 171 } else { 172 return "unknown"; 173 } 174 } 175 176 static void G_GNUC_PRINTF(2, 3) 177 vu_panic(VuDev *dev, const char *msg, ...) 178 { 179 char *buf = NULL; 180 va_list ap; 181 182 va_start(ap, msg); 183 if (vasprintf(&buf, msg, ap) < 0) { 184 buf = NULL; 185 } 186 va_end(ap); 187 188 dev->broken = true; 189 dev->panic(dev, buf); 190 free(buf); 191 192 /* 193 * FIXME: 194 * find a way to call virtio_error, or perhaps close the connection? 195 */ 196 } 197 198 /* Translate guest physical address to our virtual address. */ 199 void * 200 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) 201 { 202 unsigned int i; 203 204 if (*plen == 0) { 205 return NULL; 206 } 207 208 /* Find matching memory region. */ 209 for (i = 0; i < dev->nregions; i++) { 210 VuDevRegion *r = &dev->regions[i]; 211 212 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { 213 if ((guest_addr + *plen) > (r->gpa + r->size)) { 214 *plen = r->gpa + r->size - guest_addr; 215 } 216 return (void *)(uintptr_t) 217 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; 218 } 219 } 220 221 return NULL; 222 } 223 224 /* Translate qemu virtual address to our virtual address. */ 225 static void * 226 qva_to_va(VuDev *dev, uint64_t qemu_addr) 227 { 228 unsigned int i; 229 230 /* Find matching memory region. */ 231 for (i = 0; i < dev->nregions; i++) { 232 VuDevRegion *r = &dev->regions[i]; 233 234 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { 235 return (void *)(uintptr_t) 236 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; 237 } 238 } 239 240 return NULL; 241 } 242 243 static void 244 vu_remove_all_mem_regs(VuDev *dev) 245 { 246 unsigned int i; 247 248 for (i = 0; i < dev->nregions; i++) { 249 VuDevRegion *r = &dev->regions[i]; 250 251 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); 252 } 253 dev->nregions = 0; 254 } 255 256 static void 257 _vu_add_mem_reg(VuDev *dev, VhostUserMemoryRegion *msg_region, int fd) 258 { 259 int prot = PROT_READ | PROT_WRITE; 260 VuDevRegion *r; 261 void *mmap_addr; 262 263 DPRINT("Adding region %d\n", dev->nregions); 264 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 265 msg_region->guest_phys_addr); 266 DPRINT(" memory_size: 0x%016"PRIx64"\n", 267 msg_region->memory_size); 268 DPRINT(" userspace_addr: 0x%016"PRIx64"\n", 269 msg_region->userspace_addr); 270 DPRINT(" mmap_offset: 0x%016"PRIx64"\n", 271 msg_region->mmap_offset); 272 273 if (dev->postcopy_listening) { 274 /* 275 * In postcopy we're using PROT_NONE here to catch anyone 276 * accessing it before we userfault 277 */ 278 prot = PROT_NONE; 279 } 280 281 /* 282 * We don't use offset argument of mmap() since the mapped address has 283 * to be page aligned, and we use huge pages. 284 */ 285 mmap_addr = mmap(0, msg_region->memory_size + msg_region->mmap_offset, 286 prot, MAP_SHARED | MAP_NORESERVE, fd, 0); 287 if (mmap_addr == MAP_FAILED) { 288 vu_panic(dev, "region mmap error: %s", strerror(errno)); 289 return; 290 } 291 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 292 (uint64_t)(uintptr_t)mmap_addr); 293 294 r = &dev->regions[dev->nregions]; 295 r->gpa = msg_region->guest_phys_addr; 296 r->size = msg_region->memory_size; 297 r->qva = msg_region->userspace_addr; 298 r->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 299 r->mmap_offset = msg_region->mmap_offset; 300 dev->nregions++; 301 302 if (dev->postcopy_listening) { 303 /* 304 * Return the address to QEMU so that it can translate the ufd 305 * fault addresses back. 306 */ 307 msg_region->userspace_addr = r->mmap_addr + r->mmap_offset; 308 } 309 } 310 311 static void 312 vmsg_close_fds(VhostUserMsg *vmsg) 313 { 314 int i; 315 316 for (i = 0; i < vmsg->fd_num; i++) { 317 close(vmsg->fds[i]); 318 } 319 } 320 321 /* Set reply payload.u64 and clear request flags and fd_num */ 322 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val) 323 { 324 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ 325 vmsg->size = sizeof(vmsg->payload.u64); 326 vmsg->payload.u64 = val; 327 vmsg->fd_num = 0; 328 } 329 330 /* A test to see if we have userfault available */ 331 static bool 332 have_userfault(void) 333 { 334 #if defined(__linux__) && defined(__NR_userfaultfd) &&\ 335 defined(UFFD_FEATURE_MISSING_SHMEM) &&\ 336 defined(UFFD_FEATURE_MISSING_HUGETLBFS) 337 /* Now test the kernel we're running on really has the features */ 338 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 339 struct uffdio_api api_struct; 340 if (ufd < 0) { 341 return false; 342 } 343 344 api_struct.api = UFFD_API; 345 api_struct.features = UFFD_FEATURE_MISSING_SHMEM | 346 UFFD_FEATURE_MISSING_HUGETLBFS; 347 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 348 close(ufd); 349 return false; 350 } 351 close(ufd); 352 return true; 353 354 #else 355 return false; 356 #endif 357 } 358 359 static bool 360 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 361 { 362 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 363 struct iovec iov = { 364 .iov_base = (char *)vmsg, 365 .iov_len = VHOST_USER_HDR_SIZE, 366 }; 367 struct msghdr msg = { 368 .msg_iov = &iov, 369 .msg_iovlen = 1, 370 .msg_control = control, 371 .msg_controllen = sizeof(control), 372 }; 373 size_t fd_size; 374 struct cmsghdr *cmsg; 375 int rc; 376 377 do { 378 rc = recvmsg(conn_fd, &msg, 0); 379 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 380 381 if (rc < 0) { 382 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); 383 return false; 384 } 385 386 vmsg->fd_num = 0; 387 for (cmsg = CMSG_FIRSTHDR(&msg); 388 cmsg != NULL; 389 cmsg = CMSG_NXTHDR(&msg, cmsg)) 390 { 391 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 392 fd_size = cmsg->cmsg_len - CMSG_LEN(0); 393 vmsg->fd_num = fd_size / sizeof(int); 394 assert(fd_size < VHOST_MEMORY_BASELINE_NREGIONS); 395 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); 396 break; 397 } 398 } 399 400 if (vmsg->size > sizeof(vmsg->payload)) { 401 vu_panic(dev, 402 "Error: too big message request: %d, size: vmsg->size: %u, " 403 "while sizeof(vmsg->payload) = %zu\n", 404 vmsg->request, vmsg->size, sizeof(vmsg->payload)); 405 goto fail; 406 } 407 408 if (vmsg->size) { 409 do { 410 rc = read(conn_fd, &vmsg->payload, vmsg->size); 411 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 412 413 if (rc <= 0) { 414 vu_panic(dev, "Error while reading: %s", strerror(errno)); 415 goto fail; 416 } 417 418 assert((uint32_t)rc == vmsg->size); 419 } 420 421 return true; 422 423 fail: 424 vmsg_close_fds(vmsg); 425 426 return false; 427 } 428 429 static bool 430 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 431 { 432 int rc; 433 uint8_t *p = (uint8_t *)vmsg; 434 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 435 struct iovec iov = { 436 .iov_base = (char *)vmsg, 437 .iov_len = VHOST_USER_HDR_SIZE, 438 }; 439 struct msghdr msg = { 440 .msg_iov = &iov, 441 .msg_iovlen = 1, 442 .msg_control = control, 443 }; 444 struct cmsghdr *cmsg; 445 446 memset(control, 0, sizeof(control)); 447 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); 448 if (vmsg->fd_num > 0) { 449 size_t fdsize = vmsg->fd_num * sizeof(int); 450 msg.msg_controllen = CMSG_SPACE(fdsize); 451 cmsg = CMSG_FIRSTHDR(&msg); 452 cmsg->cmsg_len = CMSG_LEN(fdsize); 453 cmsg->cmsg_level = SOL_SOCKET; 454 cmsg->cmsg_type = SCM_RIGHTS; 455 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); 456 } else { 457 msg.msg_controllen = 0; 458 } 459 460 do { 461 rc = sendmsg(conn_fd, &msg, 0); 462 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 463 464 if (vmsg->size) { 465 do { 466 if (vmsg->data) { 467 rc = write(conn_fd, vmsg->data, vmsg->size); 468 } else { 469 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); 470 } 471 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 472 } 473 474 if (rc <= 0) { 475 vu_panic(dev, "Error while writing: %s", strerror(errno)); 476 return false; 477 } 478 479 return true; 480 } 481 482 static bool 483 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 484 { 485 /* Set the version in the flags when sending the reply */ 486 vmsg->flags &= ~VHOST_USER_VERSION_MASK; 487 vmsg->flags |= VHOST_USER_VERSION; 488 vmsg->flags |= VHOST_USER_REPLY_MASK; 489 490 return vu_message_write(dev, conn_fd, vmsg); 491 } 492 493 /* 494 * Processes a reply on the backend channel. 495 * Entered with backend_mutex held and releases it before exit. 496 * Returns true on success. 497 */ 498 static bool 499 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) 500 { 501 VhostUserMsg msg_reply; 502 bool result = false; 503 504 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 505 result = true; 506 goto out; 507 } 508 509 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 510 goto out; 511 } 512 513 if (msg_reply.request != vmsg->request) { 514 DPRINT("Received unexpected msg type. Expected %d received %d", 515 vmsg->request, msg_reply.request); 516 goto out; 517 } 518 519 result = msg_reply.payload.u64 == 0; 520 521 out: 522 pthread_mutex_unlock(&dev->backend_mutex); 523 return result; 524 } 525 526 /* Kick the log_call_fd if required. */ 527 static void 528 vu_log_kick(VuDev *dev) 529 { 530 if (dev->log_call_fd != -1) { 531 DPRINT("Kicking the QEMU's log...\n"); 532 if (eventfd_write(dev->log_call_fd, 1) < 0) { 533 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 534 } 535 } 536 } 537 538 static void 539 vu_log_page(uint8_t *log_table, uint64_t page) 540 { 541 DPRINT("Logged dirty guest page: %"PRId64"\n", page); 542 qatomic_or(&log_table[page / 8], 1 << (page % 8)); 543 } 544 545 static void 546 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) 547 { 548 uint64_t page; 549 550 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || 551 !dev->log_table || !length) { 552 return; 553 } 554 555 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); 556 557 page = address / VHOST_LOG_PAGE; 558 while (page * VHOST_LOG_PAGE < address + length) { 559 vu_log_page(dev->log_table, page); 560 page += 1; 561 } 562 563 vu_log_kick(dev); 564 } 565 566 static void 567 vu_kick_cb(VuDev *dev, int condition, void *data) 568 { 569 int index = (intptr_t)data; 570 VuVirtq *vq = &dev->vq[index]; 571 int sock = vq->kick_fd; 572 eventfd_t kick_data; 573 ssize_t rc; 574 575 rc = eventfd_read(sock, &kick_data); 576 if (rc == -1) { 577 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); 578 dev->remove_watch(dev, dev->vq[index].kick_fd); 579 } else { 580 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n", 581 kick_data, vq->handler, index); 582 if (vq->handler) { 583 vq->handler(dev, index); 584 } 585 } 586 } 587 588 static bool 589 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) 590 { 591 vmsg->payload.u64 = 592 /* 593 * The following VIRTIO feature bits are supported by our virtqueue 594 * implementation: 595 */ 596 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY | 597 1ULL << VIRTIO_RING_F_INDIRECT_DESC | 598 1ULL << VIRTIO_RING_F_EVENT_IDX | 599 1ULL << VIRTIO_F_VERSION_1 | 600 601 /* vhost-user feature bits */ 602 1ULL << VHOST_F_LOG_ALL | 603 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 604 605 if (dev->iface->get_features) { 606 vmsg->payload.u64 |= dev->iface->get_features(dev); 607 } 608 609 vmsg->size = sizeof(vmsg->payload.u64); 610 vmsg->fd_num = 0; 611 612 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 613 614 return true; 615 } 616 617 static void 618 vu_set_enable_all_rings(VuDev *dev, bool enabled) 619 { 620 uint16_t i; 621 622 for (i = 0; i < dev->max_queues; i++) { 623 dev->vq[i].enable = enabled; 624 } 625 } 626 627 static bool 628 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) 629 { 630 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 631 632 dev->features = vmsg->payload.u64; 633 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { 634 /* 635 * We only support devices conforming to VIRTIO 1.0 or 636 * later 637 */ 638 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); 639 return false; 640 } 641 642 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { 643 vu_set_enable_all_rings(dev, true); 644 } 645 646 if (dev->iface->set_features) { 647 dev->iface->set_features(dev, dev->features); 648 } 649 650 return false; 651 } 652 653 static bool 654 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) 655 { 656 return false; 657 } 658 659 static void 660 vu_close_log(VuDev *dev) 661 { 662 if (dev->log_table) { 663 if (munmap(dev->log_table, dev->log_size) != 0) { 664 perror("close log munmap() error"); 665 } 666 667 dev->log_table = NULL; 668 } 669 if (dev->log_call_fd != -1) { 670 close(dev->log_call_fd); 671 dev->log_call_fd = -1; 672 } 673 } 674 675 static bool 676 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) 677 { 678 vu_set_enable_all_rings(dev, false); 679 680 return false; 681 } 682 683 static bool 684 map_ring(VuDev *dev, VuVirtq *vq) 685 { 686 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); 687 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); 688 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); 689 690 DPRINT("Setting virtq addresses:\n"); 691 DPRINT(" vring_desc at %p\n", vq->vring.desc); 692 DPRINT(" vring_used at %p\n", vq->vring.used); 693 DPRINT(" vring_avail at %p\n", vq->vring.avail); 694 695 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); 696 } 697 698 static bool 699 generate_faults(VuDev *dev) { 700 unsigned int i; 701 for (i = 0; i < dev->nregions; i++) { 702 #ifdef UFFDIO_REGISTER 703 VuDevRegion *dev_region = &dev->regions[i]; 704 int ret; 705 struct uffdio_register reg_struct; 706 707 /* 708 * We should already have an open ufd. Mark each memory 709 * range as ufd. 710 * Discard any mapping we have here; note I can't use MADV_REMOVE 711 * or fallocate to make the hole since I don't want to lose 712 * data that's already arrived in the shared process. 713 * TODO: How to do hugepage 714 */ 715 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 716 dev_region->size + dev_region->mmap_offset, 717 MADV_DONTNEED); 718 if (ret) { 719 fprintf(stderr, 720 "%s: Failed to madvise(DONTNEED) region %d: %s\n", 721 __func__, i, strerror(errno)); 722 } 723 /* 724 * Turn off transparent hugepages so we dont get lose wakeups 725 * in neighbouring pages. 726 * TODO: Turn this backon later. 727 */ 728 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 729 dev_region->size + dev_region->mmap_offset, 730 MADV_NOHUGEPAGE); 731 if (ret) { 732 /* 733 * Note: This can happen legally on kernels that are configured 734 * without madvise'able hugepages 735 */ 736 fprintf(stderr, 737 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n", 738 __func__, i, strerror(errno)); 739 } 740 741 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; 742 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; 743 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 744 745 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { 746 vu_panic(dev, "%s: Failed to userfault region %d " 747 "@%" PRIx64 " + size:%" PRIx64 " offset: %" PRIx64 748 ": (ufd=%d)%s\n", 749 __func__, i, 750 dev_region->mmap_addr, 751 dev_region->size, dev_region->mmap_offset, 752 dev->postcopy_ufd, strerror(errno)); 753 return false; 754 } 755 if (!(reg_struct.ioctls & (1ULL << _UFFDIO_COPY))) { 756 vu_panic(dev, "%s Region (%d) doesn't support COPY", 757 __func__, i); 758 return false; 759 } 760 DPRINT("%s: region %d: Registered userfault for %" 761 PRIx64 " + %" PRIx64 "\n", __func__, i, 762 (uint64_t)reg_struct.range.start, 763 (uint64_t)reg_struct.range.len); 764 /* Now it's registered we can let the client at it */ 765 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, 766 dev_region->size + dev_region->mmap_offset, 767 PROT_READ | PROT_WRITE)) { 768 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", 769 i, strerror(errno)); 770 return false; 771 } 772 /* TODO: Stash 'zero' support flags somewhere */ 773 #endif 774 } 775 776 return true; 777 } 778 779 static bool 780 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 781 int i; 782 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 783 784 if (vmsg->fd_num != 1) { 785 vmsg_close_fds(vmsg); 786 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " 787 "should be sent for this message type", vmsg->fd_num); 788 return false; 789 } 790 791 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 792 close(vmsg->fds[0]); 793 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " 794 "least %zu bytes and only %d bytes were received", 795 VHOST_USER_MEM_REG_SIZE, vmsg->size); 796 return false; 797 } 798 799 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { 800 close(vmsg->fds[0]); 801 vu_panic(dev, "failing attempt to hot add memory via " 802 "VHOST_USER_ADD_MEM_REG message because the backend has " 803 "no free ram slots available"); 804 return false; 805 } 806 807 /* 808 * If we are in postcopy mode and we receive a u64 payload with a 0 value 809 * we know all the postcopy client bases have been received, and we 810 * should start generating faults. 811 */ 812 if (dev->postcopy_listening && 813 vmsg->size == sizeof(vmsg->payload.u64) && 814 vmsg->payload.u64 == 0) { 815 (void)generate_faults(dev); 816 return false; 817 } 818 819 _vu_add_mem_reg(dev, msg_region, vmsg->fds[0]); 820 close(vmsg->fds[0]); 821 822 if (dev->postcopy_listening) { 823 /* Send the message back to qemu with the addresses filled in. */ 824 vmsg->fd_num = 0; 825 DPRINT("Successfully added new region in postcopy\n"); 826 return true; 827 } else { 828 for (i = 0; i < dev->max_queues; i++) { 829 if (dev->vq[i].vring.desc) { 830 if (map_ring(dev, &dev->vq[i])) { 831 vu_panic(dev, "remapping queue %d for new memory region", 832 i); 833 } 834 } 835 } 836 837 DPRINT("Successfully added new region\n"); 838 return false; 839 } 840 } 841 842 static inline bool reg_equal(VuDevRegion *vudev_reg, 843 VhostUserMemoryRegion *msg_reg) 844 { 845 if (vudev_reg->gpa == msg_reg->guest_phys_addr && 846 vudev_reg->qva == msg_reg->userspace_addr && 847 vudev_reg->size == msg_reg->memory_size) { 848 return true; 849 } 850 851 return false; 852 } 853 854 static bool 855 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 856 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 857 unsigned int i; 858 bool found = false; 859 860 if (vmsg->fd_num > 1) { 861 vmsg_close_fds(vmsg); 862 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " 863 "should be sent for this message type", vmsg->fd_num); 864 return false; 865 } 866 867 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 868 vmsg_close_fds(vmsg); 869 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " 870 "least %zu bytes and only %d bytes were received", 871 VHOST_USER_MEM_REG_SIZE, vmsg->size); 872 return false; 873 } 874 875 DPRINT("Removing region:\n"); 876 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 877 msg_region->guest_phys_addr); 878 DPRINT(" memory_size: 0x%016"PRIx64"\n", 879 msg_region->memory_size); 880 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 881 msg_region->userspace_addr); 882 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 883 msg_region->mmap_offset); 884 885 for (i = 0; i < dev->nregions; i++) { 886 if (reg_equal(&dev->regions[i], msg_region)) { 887 VuDevRegion *r = &dev->regions[i]; 888 889 munmap((void *)(uintptr_t)r->mmap_addr, r->size + r->mmap_offset); 890 891 /* 892 * Shift all affected entries by 1 to close the hole at index i and 893 * zero out the last entry. 894 */ 895 memmove(dev->regions + i, dev->regions + i + 1, 896 sizeof(VuDevRegion) * (dev->nregions - i - 1)); 897 memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion)); 898 DPRINT("Successfully removed a region\n"); 899 dev->nregions--; 900 i--; 901 902 found = true; 903 904 /* Continue the search for eventual duplicates. */ 905 } 906 } 907 908 if (!found) { 909 vu_panic(dev, "Specified region not found\n"); 910 } 911 912 vmsg_close_fds(vmsg); 913 914 return false; 915 } 916 917 static bool 918 vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg) 919 { 920 int fd_num = 0; 921 int dmabuf_fd = -1; 922 if (dev->iface->get_shared_object) { 923 dmabuf_fd = dev->iface->get_shared_object( 924 dev, &vmsg->payload.object.uuid[0]); 925 } 926 if (dmabuf_fd != -1) { 927 DPRINT("dmabuf_fd found for requested UUID\n"); 928 vmsg->fds[fd_num++] = dmabuf_fd; 929 } 930 vmsg->fd_num = fd_num; 931 932 return true; 933 } 934 935 static bool 936 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) 937 { 938 VhostUserMemory m = vmsg->payload.memory, *memory = &m; 939 unsigned int i; 940 941 vu_remove_all_mem_regs(dev); 942 943 DPRINT("Nregions: %u\n", memory->nregions); 944 for (i = 0; i < memory->nregions; i++) { 945 _vu_add_mem_reg(dev, &memory->regions[i], vmsg->fds[i]); 946 close(vmsg->fds[i]); 947 } 948 949 if (dev->postcopy_listening) { 950 /* Send the message back to qemu with the addresses filled in */ 951 vmsg->fd_num = 0; 952 if (!vu_send_reply(dev, dev->sock, vmsg)) { 953 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); 954 return false; 955 } 956 957 /* 958 * Wait for QEMU to confirm that it's registered the handler for the 959 * faults. 960 */ 961 if (!dev->read_msg(dev, dev->sock, vmsg) || 962 vmsg->size != sizeof(vmsg->payload.u64) || 963 vmsg->payload.u64 != 0) { 964 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); 965 return false; 966 } 967 968 /* OK, now we can go and register the memory and generate faults */ 969 (void)generate_faults(dev); 970 return false; 971 } 972 973 for (i = 0; i < dev->max_queues; i++) { 974 if (dev->vq[i].vring.desc) { 975 if (map_ring(dev, &dev->vq[i])) { 976 vu_panic(dev, "remapping queue %d during setmemtable", i); 977 } 978 } 979 } 980 981 return false; 982 } 983 984 static bool 985 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) 986 { 987 int fd; 988 uint64_t log_mmap_size, log_mmap_offset; 989 void *rc; 990 991 if (vmsg->fd_num != 1 || 992 vmsg->size != sizeof(vmsg->payload.log)) { 993 vu_panic(dev, "Invalid log_base message"); 994 return true; 995 } 996 997 fd = vmsg->fds[0]; 998 log_mmap_offset = vmsg->payload.log.mmap_offset; 999 log_mmap_size = vmsg->payload.log.mmap_size; 1000 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset); 1001 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size); 1002 1003 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 1004 log_mmap_offset); 1005 close(fd); 1006 if (rc == MAP_FAILED) { 1007 perror("log mmap error"); 1008 } 1009 1010 if (dev->log_table) { 1011 munmap(dev->log_table, dev->log_size); 1012 } 1013 dev->log_table = rc; 1014 dev->log_size = log_mmap_size; 1015 1016 vmsg->size = sizeof(vmsg->payload.u64); 1017 vmsg->fd_num = 0; 1018 1019 return true; 1020 } 1021 1022 static bool 1023 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) 1024 { 1025 if (vmsg->fd_num != 1) { 1026 vu_panic(dev, "Invalid log_fd message"); 1027 return false; 1028 } 1029 1030 if (dev->log_call_fd != -1) { 1031 close(dev->log_call_fd); 1032 } 1033 dev->log_call_fd = vmsg->fds[0]; 1034 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); 1035 1036 return false; 1037 } 1038 1039 static bool 1040 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1041 { 1042 unsigned int index = vmsg->payload.state.index; 1043 unsigned int num = vmsg->payload.state.num; 1044 1045 DPRINT("State.index: %u\n", index); 1046 DPRINT("State.num: %u\n", num); 1047 dev->vq[index].vring.num = num; 1048 1049 return false; 1050 } 1051 1052 static bool 1053 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) 1054 { 1055 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; 1056 unsigned int index = vra->index; 1057 VuVirtq *vq = &dev->vq[index]; 1058 1059 DPRINT("vhost_vring_addr:\n"); 1060 DPRINT(" index: %d\n", vra->index); 1061 DPRINT(" flags: %d\n", vra->flags); 1062 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); 1063 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); 1064 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); 1065 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); 1066 1067 vq->vra = *vra; 1068 vq->vring.flags = vra->flags; 1069 vq->vring.log_guest_addr = vra->log_guest_addr; 1070 1071 1072 if (map_ring(dev, vq)) { 1073 vu_panic(dev, "Invalid vring_addr message"); 1074 return false; 1075 } 1076 1077 vq->used_idx = le16toh(vq->vring.used->idx); 1078 1079 if (vq->last_avail_idx != vq->used_idx) { 1080 bool resume = dev->iface->queue_is_processed_in_order && 1081 dev->iface->queue_is_processed_in_order(dev, index); 1082 1083 DPRINT("Last avail index != used index: %u != %u%s\n", 1084 vq->last_avail_idx, vq->used_idx, 1085 resume ? ", resuming" : ""); 1086 1087 if (resume) { 1088 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; 1089 } 1090 } 1091 1092 return false; 1093 } 1094 1095 static bool 1096 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1097 { 1098 unsigned int index = vmsg->payload.state.index; 1099 unsigned int num = vmsg->payload.state.num; 1100 1101 DPRINT("State.index: %u\n", index); 1102 DPRINT("State.num: %u\n", num); 1103 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; 1104 1105 return false; 1106 } 1107 1108 static bool 1109 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1110 { 1111 unsigned int index = vmsg->payload.state.index; 1112 1113 DPRINT("State.index: %u\n", index); 1114 vmsg->payload.state.num = dev->vq[index].last_avail_idx; 1115 vmsg->size = sizeof(vmsg->payload.state); 1116 1117 dev->vq[index].started = false; 1118 if (dev->iface->queue_set_started) { 1119 dev->iface->queue_set_started(dev, index, false); 1120 } 1121 1122 if (dev->vq[index].call_fd != -1) { 1123 close(dev->vq[index].call_fd); 1124 dev->vq[index].call_fd = -1; 1125 } 1126 if (dev->vq[index].kick_fd != -1) { 1127 dev->remove_watch(dev, dev->vq[index].kick_fd); 1128 close(dev->vq[index].kick_fd); 1129 dev->vq[index].kick_fd = -1; 1130 } 1131 1132 return true; 1133 } 1134 1135 static bool 1136 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) 1137 { 1138 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1139 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1140 1141 if (index >= dev->max_queues) { 1142 vmsg_close_fds(vmsg); 1143 vu_panic(dev, "Invalid queue index: %u", index); 1144 return false; 1145 } 1146 1147 if (nofd) { 1148 vmsg_close_fds(vmsg); 1149 return true; 1150 } 1151 1152 if (vmsg->fd_num != 1) { 1153 vmsg_close_fds(vmsg); 1154 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); 1155 return false; 1156 } 1157 1158 return true; 1159 } 1160 1161 static int 1162 inflight_desc_compare(const void *a, const void *b) 1163 { 1164 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a, 1165 *desc1 = (VuVirtqInflightDesc *)b; 1166 1167 if (desc1->counter > desc0->counter && 1168 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { 1169 return 1; 1170 } 1171 1172 return -1; 1173 } 1174 1175 static int 1176 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) 1177 { 1178 int i = 0; 1179 1180 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 1181 return 0; 1182 } 1183 1184 if (unlikely(!vq->inflight)) { 1185 return -1; 1186 } 1187 1188 if (unlikely(!vq->inflight->version)) { 1189 /* initialize the buffer */ 1190 vq->inflight->version = INFLIGHT_VERSION; 1191 return 0; 1192 } 1193 1194 vq->used_idx = le16toh(vq->vring.used->idx); 1195 vq->resubmit_num = 0; 1196 vq->resubmit_list = NULL; 1197 vq->counter = 0; 1198 1199 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { 1200 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; 1201 1202 barrier(); 1203 1204 vq->inflight->used_idx = vq->used_idx; 1205 } 1206 1207 for (i = 0; i < vq->inflight->desc_num; i++) { 1208 if (vq->inflight->desc[i].inflight == 1) { 1209 vq->inuse++; 1210 } 1211 } 1212 1213 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; 1214 1215 if (vq->inuse) { 1216 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); 1217 if (!vq->resubmit_list) { 1218 return -1; 1219 } 1220 1221 for (i = 0; i < vq->inflight->desc_num; i++) { 1222 if (vq->inflight->desc[i].inflight) { 1223 vq->resubmit_list[vq->resubmit_num].index = i; 1224 vq->resubmit_list[vq->resubmit_num].counter = 1225 vq->inflight->desc[i].counter; 1226 vq->resubmit_num++; 1227 } 1228 } 1229 1230 if (vq->resubmit_num > 1) { 1231 qsort(vq->resubmit_list, vq->resubmit_num, 1232 sizeof(VuVirtqInflightDesc), inflight_desc_compare); 1233 } 1234 vq->counter = vq->resubmit_list[0].counter + 1; 1235 } 1236 1237 /* in case of I/O hang after reconnecting */ 1238 if (eventfd_write(vq->kick_fd, 1)) { 1239 return -1; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static bool 1246 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) 1247 { 1248 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1249 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1250 1251 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1252 1253 if (!vu_check_queue_msg_file(dev, vmsg)) { 1254 return false; 1255 } 1256 1257 if (dev->vq[index].kick_fd != -1) { 1258 dev->remove_watch(dev, dev->vq[index].kick_fd); 1259 close(dev->vq[index].kick_fd); 1260 dev->vq[index].kick_fd = -1; 1261 } 1262 1263 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; 1264 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); 1265 1266 dev->vq[index].started = true; 1267 if (dev->iface->queue_set_started) { 1268 dev->iface->queue_set_started(dev, index, true); 1269 } 1270 1271 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { 1272 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, 1273 vu_kick_cb, (void *)(long)index); 1274 1275 DPRINT("Waiting for kicks on fd: %d for vq: %d\n", 1276 dev->vq[index].kick_fd, index); 1277 } 1278 1279 if (vu_check_queue_inflights(dev, &dev->vq[index])) { 1280 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); 1281 } 1282 1283 return false; 1284 } 1285 1286 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, 1287 vu_queue_handler_cb handler) 1288 { 1289 int qidx = vq - dev->vq; 1290 1291 vq->handler = handler; 1292 if (vq->kick_fd >= 0) { 1293 if (handler) { 1294 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, 1295 vu_kick_cb, (void *)(long)qidx); 1296 } else { 1297 dev->remove_watch(dev, vq->kick_fd); 1298 } 1299 } 1300 } 1301 1302 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, 1303 int size, int offset) 1304 { 1305 int qidx = vq - dev->vq; 1306 int fd_num = 0; 1307 VhostUserMsg vmsg = { 1308 .request = VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG, 1309 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1310 .size = sizeof(vmsg.payload.area), 1311 .payload.area = { 1312 .u64 = qidx & VHOST_USER_VRING_IDX_MASK, 1313 .size = size, 1314 .offset = offset, 1315 }, 1316 }; 1317 1318 if (fd == -1) { 1319 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; 1320 } else { 1321 vmsg.fds[fd_num++] = fd; 1322 } 1323 1324 vmsg.fd_num = fd_num; 1325 1326 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD)) { 1327 return false; 1328 } 1329 1330 pthread_mutex_lock(&dev->backend_mutex); 1331 if (!vu_message_write(dev, dev->backend_fd, &vmsg)) { 1332 pthread_mutex_unlock(&dev->backend_mutex); 1333 return false; 1334 } 1335 1336 /* Also unlocks the backend_mutex */ 1337 return vu_process_message_reply(dev, &vmsg); 1338 } 1339 1340 bool 1341 vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], 1342 int *dmabuf_fd) 1343 { 1344 bool result = false; 1345 VhostUserMsg msg_reply; 1346 VhostUserMsg msg = { 1347 .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP, 1348 .size = sizeof(msg.payload.object), 1349 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1350 }; 1351 1352 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1353 1354 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1355 return false; 1356 } 1357 1358 pthread_mutex_lock(&dev->backend_mutex); 1359 if (!vu_message_write(dev, dev->backend_fd, &msg)) { 1360 goto out; 1361 } 1362 1363 if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) { 1364 goto out; 1365 } 1366 1367 if (msg_reply.request != msg.request) { 1368 DPRINT("Received unexpected msg type. Expected %d, received %d", 1369 msg.request, msg_reply.request); 1370 goto out; 1371 } 1372 1373 if (msg_reply.fd_num != 1) { 1374 DPRINT("Received unexpected number of fds. Expected 1, received %d", 1375 msg_reply.fd_num); 1376 goto out; 1377 } 1378 1379 *dmabuf_fd = msg_reply.fds[0]; 1380 result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0; 1381 out: 1382 pthread_mutex_unlock(&dev->backend_mutex); 1383 1384 return result; 1385 } 1386 1387 static bool 1388 vu_send_message(VuDev *dev, VhostUserMsg *vmsg) 1389 { 1390 bool result = false; 1391 pthread_mutex_lock(&dev->backend_mutex); 1392 if (!vu_message_write(dev, dev->backend_fd, vmsg)) { 1393 goto out; 1394 } 1395 1396 result = true; 1397 out: 1398 pthread_mutex_unlock(&dev->backend_mutex); 1399 1400 return result; 1401 } 1402 1403 bool 1404 vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1405 { 1406 VhostUserMsg msg = { 1407 .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD, 1408 .size = sizeof(msg.payload.object), 1409 .flags = VHOST_USER_VERSION, 1410 }; 1411 1412 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1413 1414 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1415 return false; 1416 } 1417 1418 return vu_send_message(dev, &msg); 1419 } 1420 1421 bool 1422 vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]) 1423 { 1424 VhostUserMsg msg = { 1425 .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE, 1426 .size = sizeof(msg.payload.object), 1427 .flags = VHOST_USER_VERSION, 1428 }; 1429 1430 memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN); 1431 1432 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) { 1433 return false; 1434 } 1435 1436 return vu_send_message(dev, &msg); 1437 } 1438 1439 static bool 1440 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) 1441 { 1442 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1443 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1444 1445 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1446 1447 if (!vu_check_queue_msg_file(dev, vmsg)) { 1448 return false; 1449 } 1450 1451 if (dev->vq[index].call_fd != -1) { 1452 close(dev->vq[index].call_fd); 1453 dev->vq[index].call_fd = -1; 1454 } 1455 1456 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; 1457 1458 /* in case of I/O hang after reconnecting */ 1459 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { 1460 return -1; 1461 } 1462 1463 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); 1464 1465 return false; 1466 } 1467 1468 static bool 1469 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) 1470 { 1471 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1472 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1473 1474 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1475 1476 if (!vu_check_queue_msg_file(dev, vmsg)) { 1477 return false; 1478 } 1479 1480 if (dev->vq[index].err_fd != -1) { 1481 close(dev->vq[index].err_fd); 1482 dev->vq[index].err_fd = -1; 1483 } 1484 1485 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; 1486 1487 return false; 1488 } 1489 1490 static bool 1491 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1492 { 1493 /* 1494 * Note that we support, but intentionally do not set, 1495 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that 1496 * a device implementation can return it in its callback 1497 * (get_protocol_features) if it wants to use this for 1498 * simulation, but it is otherwise not desirable (if even 1499 * implemented by the frontend.) 1500 */ 1501 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1502 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | 1503 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | 1504 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | 1505 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | 1506 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1507 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; 1508 1509 if (have_userfault()) { 1510 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT; 1511 } 1512 1513 if (dev->iface->get_config && dev->iface->set_config) { 1514 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1515 } 1516 1517 if (dev->iface->get_protocol_features) { 1518 features |= dev->iface->get_protocol_features(dev); 1519 } 1520 1521 vmsg_set_reply_u64(vmsg, features); 1522 return true; 1523 } 1524 1525 static bool 1526 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1527 { 1528 uint64_t features = vmsg->payload.u64; 1529 1530 DPRINT("u64: 0x%016"PRIx64"\n", features); 1531 1532 dev->protocol_features = vmsg->payload.u64; 1533 1534 if (vu_has_protocol_feature(dev, 1535 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 1536 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ) || 1537 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 1538 /* 1539 * The use case for using messages for kick/call is simulation, to make 1540 * the kick and call synchronous. To actually get that behaviour, both 1541 * of the other features are required. 1542 * Theoretically, one could use only kick messages, or do them without 1543 * having F_REPLY_ACK, but too many (possibly pending) messages on the 1544 * socket will eventually cause the frontend to hang, to avoid this in 1545 * scenarios where not desired enforce that the settings are in a way 1546 * that actually enables the simulation case. 1547 */ 1548 vu_panic(dev, 1549 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK"); 1550 return false; 1551 } 1552 1553 if (dev->iface->set_protocol_features) { 1554 dev->iface->set_protocol_features(dev, features); 1555 } 1556 1557 return false; 1558 } 1559 1560 static bool 1561 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1562 { 1563 vmsg_set_reply_u64(vmsg, dev->max_queues); 1564 return true; 1565 } 1566 1567 static bool 1568 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) 1569 { 1570 unsigned int index = vmsg->payload.state.index; 1571 unsigned int enable = vmsg->payload.state.num; 1572 1573 DPRINT("State.index: %u\n", index); 1574 DPRINT("State.enable: %u\n", enable); 1575 1576 if (index >= dev->max_queues) { 1577 vu_panic(dev, "Invalid vring_enable index: %u", index); 1578 return false; 1579 } 1580 1581 dev->vq[index].enable = enable; 1582 return false; 1583 } 1584 1585 static bool 1586 vu_set_backend_req_fd(VuDev *dev, VhostUserMsg *vmsg) 1587 { 1588 if (vmsg->fd_num != 1) { 1589 vu_panic(dev, "Invalid backend_req_fd message (%d fd's)", vmsg->fd_num); 1590 return false; 1591 } 1592 1593 if (dev->backend_fd != -1) { 1594 close(dev->backend_fd); 1595 } 1596 dev->backend_fd = vmsg->fds[0]; 1597 DPRINT("Got backend_fd: %d\n", vmsg->fds[0]); 1598 1599 return false; 1600 } 1601 1602 static bool 1603 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) 1604 { 1605 int ret = -1; 1606 1607 if (dev->iface->get_config) { 1608 ret = dev->iface->get_config(dev, vmsg->payload.config.region, 1609 vmsg->payload.config.size); 1610 } 1611 1612 if (ret) { 1613 /* resize to zero to indicate an error to frontend */ 1614 vmsg->size = 0; 1615 } 1616 1617 return true; 1618 } 1619 1620 static bool 1621 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) 1622 { 1623 int ret = -1; 1624 1625 if (dev->iface->set_config) { 1626 ret = dev->iface->set_config(dev, vmsg->payload.config.region, 1627 vmsg->payload.config.offset, 1628 vmsg->payload.config.size, 1629 vmsg->payload.config.flags); 1630 if (ret) { 1631 vu_panic(dev, "Set virtio configuration space failed"); 1632 } 1633 } 1634 1635 return false; 1636 } 1637 1638 static bool 1639 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) 1640 { 1641 #ifdef UFFDIO_API 1642 struct uffdio_api api_struct; 1643 1644 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1645 vmsg->size = 0; 1646 #else 1647 dev->postcopy_ufd = -1; 1648 #endif 1649 1650 if (dev->postcopy_ufd == -1) { 1651 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); 1652 goto out; 1653 } 1654 1655 #ifdef UFFDIO_API 1656 api_struct.api = UFFD_API; 1657 api_struct.features = 0; 1658 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { 1659 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); 1660 close(dev->postcopy_ufd); 1661 dev->postcopy_ufd = -1; 1662 goto out; 1663 } 1664 /* TODO: Stash feature flags somewhere */ 1665 #endif 1666 1667 out: 1668 /* Return a ufd to the QEMU */ 1669 vmsg->fd_num = 1; 1670 vmsg->fds[0] = dev->postcopy_ufd; 1671 return true; /* = send a reply */ 1672 } 1673 1674 static bool 1675 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) 1676 { 1677 if (dev->nregions) { 1678 vu_panic(dev, "Regions already registered at postcopy-listen"); 1679 vmsg_set_reply_u64(vmsg, -1); 1680 return true; 1681 } 1682 dev->postcopy_listening = true; 1683 1684 vmsg_set_reply_u64(vmsg, 0); 1685 return true; 1686 } 1687 1688 static bool 1689 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) 1690 { 1691 DPRINT("%s: Entry\n", __func__); 1692 dev->postcopy_listening = false; 1693 if (dev->postcopy_ufd > 0) { 1694 close(dev->postcopy_ufd); 1695 dev->postcopy_ufd = -1; 1696 DPRINT("%s: Done close\n", __func__); 1697 } 1698 1699 vmsg_set_reply_u64(vmsg, 0); 1700 DPRINT("%s: exit\n", __func__); 1701 return true; 1702 } 1703 1704 static inline uint64_t 1705 vu_inflight_queue_size(uint16_t queue_size) 1706 { 1707 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size + 1708 sizeof(uint16_t), INFLIGHT_ALIGNMENT); 1709 } 1710 1711 #ifdef MFD_ALLOW_SEALING 1712 static void * 1713 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd) 1714 { 1715 void *ptr; 1716 int ret; 1717 1718 *fd = memfd_create(name, MFD_ALLOW_SEALING); 1719 if (*fd < 0) { 1720 return NULL; 1721 } 1722 1723 ret = ftruncate(*fd, size); 1724 if (ret < 0) { 1725 close(*fd); 1726 return NULL; 1727 } 1728 1729 ret = fcntl(*fd, F_ADD_SEALS, flags); 1730 if (ret < 0) { 1731 close(*fd); 1732 return NULL; 1733 } 1734 1735 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0); 1736 if (ptr == MAP_FAILED) { 1737 close(*fd); 1738 return NULL; 1739 } 1740 1741 return ptr; 1742 } 1743 #endif 1744 1745 static bool 1746 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1747 { 1748 int fd = -1; 1749 void *addr = NULL; 1750 uint64_t mmap_size; 1751 uint16_t num_queues, queue_size; 1752 1753 if (vmsg->size != sizeof(vmsg->payload.inflight)) { 1754 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); 1755 vmsg->payload.inflight.mmap_size = 0; 1756 return true; 1757 } 1758 1759 num_queues = vmsg->payload.inflight.num_queues; 1760 queue_size = vmsg->payload.inflight.queue_size; 1761 1762 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1763 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1764 1765 mmap_size = vu_inflight_queue_size(queue_size) * num_queues; 1766 1767 #ifdef MFD_ALLOW_SEALING 1768 addr = memfd_alloc("vhost-inflight", mmap_size, 1769 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 1770 &fd); 1771 #else 1772 vu_panic(dev, "Not implemented: memfd support is missing"); 1773 #endif 1774 1775 if (!addr) { 1776 vu_panic(dev, "Failed to alloc vhost inflight area"); 1777 vmsg->payload.inflight.mmap_size = 0; 1778 return true; 1779 } 1780 1781 memset(addr, 0, mmap_size); 1782 1783 dev->inflight_info.addr = addr; 1784 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; 1785 dev->inflight_info.fd = vmsg->fds[0] = fd; 1786 vmsg->fd_num = 1; 1787 vmsg->payload.inflight.mmap_offset = 0; 1788 1789 DPRINT("send inflight mmap_size: %"PRId64"\n", 1790 vmsg->payload.inflight.mmap_size); 1791 DPRINT("send inflight mmap offset: %"PRId64"\n", 1792 vmsg->payload.inflight.mmap_offset); 1793 1794 return true; 1795 } 1796 1797 static bool 1798 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1799 { 1800 int fd, i; 1801 uint64_t mmap_size, mmap_offset; 1802 uint16_t num_queues, queue_size; 1803 void *rc; 1804 1805 if (vmsg->fd_num != 1 || 1806 vmsg->size != sizeof(vmsg->payload.inflight)) { 1807 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", 1808 vmsg->size, vmsg->fd_num); 1809 return false; 1810 } 1811 1812 fd = vmsg->fds[0]; 1813 mmap_size = vmsg->payload.inflight.mmap_size; 1814 mmap_offset = vmsg->payload.inflight.mmap_offset; 1815 num_queues = vmsg->payload.inflight.num_queues; 1816 queue_size = vmsg->payload.inflight.queue_size; 1817 1818 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size); 1819 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset); 1820 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1821 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1822 1823 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1824 fd, mmap_offset); 1825 1826 if (rc == MAP_FAILED) { 1827 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); 1828 return false; 1829 } 1830 1831 if (dev->inflight_info.fd) { 1832 close(dev->inflight_info.fd); 1833 } 1834 1835 if (dev->inflight_info.addr) { 1836 munmap(dev->inflight_info.addr, dev->inflight_info.size); 1837 } 1838 1839 dev->inflight_info.fd = fd; 1840 dev->inflight_info.addr = rc; 1841 dev->inflight_info.size = mmap_size; 1842 1843 for (i = 0; i < num_queues; i++) { 1844 dev->vq[i].inflight = (VuVirtqInflight *)rc; 1845 dev->vq[i].inflight->desc_num = queue_size; 1846 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size)); 1847 } 1848 1849 return false; 1850 } 1851 1852 static bool 1853 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) 1854 { 1855 unsigned int index = vmsg->payload.state.index; 1856 1857 if (index >= dev->max_queues) { 1858 vu_panic(dev, "Invalid queue index: %u", index); 1859 return false; 1860 } 1861 1862 DPRINT("Got kick message: handler:%p idx:%u\n", 1863 dev->vq[index].handler, index); 1864 1865 if (!dev->vq[index].started) { 1866 dev->vq[index].started = true; 1867 1868 if (dev->iface->queue_set_started) { 1869 dev->iface->queue_set_started(dev, index, true); 1870 } 1871 } 1872 1873 if (dev->vq[index].handler) { 1874 dev->vq[index].handler(dev, index); 1875 } 1876 1877 return false; 1878 } 1879 1880 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) 1881 { 1882 vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS); 1883 1884 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); 1885 1886 return true; 1887 } 1888 1889 static bool 1890 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) 1891 { 1892 int do_reply = 0; 1893 1894 /* Print out generic part of the request. */ 1895 DPRINT("================ Vhost user message ================\n"); 1896 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), 1897 vmsg->request); 1898 DPRINT("Flags: 0x%x\n", vmsg->flags); 1899 DPRINT("Size: %u\n", vmsg->size); 1900 1901 if (vmsg->fd_num) { 1902 int i; 1903 DPRINT("Fds:"); 1904 for (i = 0; i < vmsg->fd_num; i++) { 1905 DPRINT(" %d", vmsg->fds[i]); 1906 } 1907 DPRINT("\n"); 1908 } 1909 1910 if (dev->iface->process_msg && 1911 dev->iface->process_msg(dev, vmsg, &do_reply)) { 1912 return do_reply; 1913 } 1914 1915 switch (vmsg->request) { 1916 case VHOST_USER_GET_FEATURES: 1917 return vu_get_features_exec(dev, vmsg); 1918 case VHOST_USER_SET_FEATURES: 1919 return vu_set_features_exec(dev, vmsg); 1920 case VHOST_USER_GET_PROTOCOL_FEATURES: 1921 return vu_get_protocol_features_exec(dev, vmsg); 1922 case VHOST_USER_SET_PROTOCOL_FEATURES: 1923 return vu_set_protocol_features_exec(dev, vmsg); 1924 case VHOST_USER_SET_OWNER: 1925 return vu_set_owner_exec(dev, vmsg); 1926 case VHOST_USER_RESET_OWNER: 1927 return vu_reset_device_exec(dev, vmsg); 1928 case VHOST_USER_SET_MEM_TABLE: 1929 return vu_set_mem_table_exec(dev, vmsg); 1930 case VHOST_USER_SET_LOG_BASE: 1931 return vu_set_log_base_exec(dev, vmsg); 1932 case VHOST_USER_SET_LOG_FD: 1933 return vu_set_log_fd_exec(dev, vmsg); 1934 case VHOST_USER_SET_VRING_NUM: 1935 return vu_set_vring_num_exec(dev, vmsg); 1936 case VHOST_USER_SET_VRING_ADDR: 1937 return vu_set_vring_addr_exec(dev, vmsg); 1938 case VHOST_USER_SET_VRING_BASE: 1939 return vu_set_vring_base_exec(dev, vmsg); 1940 case VHOST_USER_GET_VRING_BASE: 1941 return vu_get_vring_base_exec(dev, vmsg); 1942 case VHOST_USER_SET_VRING_KICK: 1943 return vu_set_vring_kick_exec(dev, vmsg); 1944 case VHOST_USER_SET_VRING_CALL: 1945 return vu_set_vring_call_exec(dev, vmsg); 1946 case VHOST_USER_SET_VRING_ERR: 1947 return vu_set_vring_err_exec(dev, vmsg); 1948 case VHOST_USER_GET_QUEUE_NUM: 1949 return vu_get_queue_num_exec(dev, vmsg); 1950 case VHOST_USER_SET_VRING_ENABLE: 1951 return vu_set_vring_enable_exec(dev, vmsg); 1952 case VHOST_USER_SET_BACKEND_REQ_FD: 1953 return vu_set_backend_req_fd(dev, vmsg); 1954 case VHOST_USER_GET_CONFIG: 1955 return vu_get_config(dev, vmsg); 1956 case VHOST_USER_SET_CONFIG: 1957 return vu_set_config(dev, vmsg); 1958 case VHOST_USER_NONE: 1959 /* if you need processing before exit, override iface->process_msg */ 1960 exit(0); 1961 case VHOST_USER_POSTCOPY_ADVISE: 1962 return vu_set_postcopy_advise(dev, vmsg); 1963 case VHOST_USER_POSTCOPY_LISTEN: 1964 return vu_set_postcopy_listen(dev, vmsg); 1965 case VHOST_USER_POSTCOPY_END: 1966 return vu_set_postcopy_end(dev, vmsg); 1967 case VHOST_USER_GET_INFLIGHT_FD: 1968 return vu_get_inflight_fd(dev, vmsg); 1969 case VHOST_USER_SET_INFLIGHT_FD: 1970 return vu_set_inflight_fd(dev, vmsg); 1971 case VHOST_USER_VRING_KICK: 1972 return vu_handle_vring_kick(dev, vmsg); 1973 case VHOST_USER_GET_MAX_MEM_SLOTS: 1974 return vu_handle_get_max_memslots(dev, vmsg); 1975 case VHOST_USER_ADD_MEM_REG: 1976 return vu_add_mem_reg(dev, vmsg); 1977 case VHOST_USER_REM_MEM_REG: 1978 return vu_rem_mem_reg(dev, vmsg); 1979 case VHOST_USER_GET_SHARED_OBJECT: 1980 return vu_get_shared_object(dev, vmsg); 1981 default: 1982 vmsg_close_fds(vmsg); 1983 vu_panic(dev, "Unhandled request: %d", vmsg->request); 1984 } 1985 1986 return false; 1987 } 1988 1989 bool 1990 vu_dispatch(VuDev *dev) 1991 { 1992 VhostUserMsg vmsg = { 0, }; 1993 int reply_requested; 1994 bool need_reply, success = false; 1995 1996 if (!dev->read_msg(dev, dev->sock, &vmsg)) { 1997 goto end; 1998 } 1999 2000 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK; 2001 2002 reply_requested = vu_process_message(dev, &vmsg); 2003 if (!reply_requested && need_reply) { 2004 vmsg_set_reply_u64(&vmsg, 0); 2005 reply_requested = 1; 2006 } 2007 2008 if (!reply_requested) { 2009 success = true; 2010 goto end; 2011 } 2012 2013 if (!vu_send_reply(dev, dev->sock, &vmsg)) { 2014 goto end; 2015 } 2016 2017 success = true; 2018 2019 end: 2020 free(vmsg.data); 2021 return success; 2022 } 2023 2024 void 2025 vu_deinit(VuDev *dev) 2026 { 2027 unsigned int i; 2028 2029 vu_remove_all_mem_regs(dev); 2030 2031 for (i = 0; i < dev->max_queues; i++) { 2032 VuVirtq *vq = &dev->vq[i]; 2033 2034 if (vq->call_fd != -1) { 2035 close(vq->call_fd); 2036 vq->call_fd = -1; 2037 } 2038 2039 if (vq->kick_fd != -1) { 2040 dev->remove_watch(dev, vq->kick_fd); 2041 close(vq->kick_fd); 2042 vq->kick_fd = -1; 2043 } 2044 2045 if (vq->err_fd != -1) { 2046 close(vq->err_fd); 2047 vq->err_fd = -1; 2048 } 2049 2050 if (vq->resubmit_list) { 2051 free(vq->resubmit_list); 2052 vq->resubmit_list = NULL; 2053 } 2054 2055 vq->inflight = NULL; 2056 } 2057 2058 if (dev->inflight_info.addr) { 2059 munmap(dev->inflight_info.addr, dev->inflight_info.size); 2060 dev->inflight_info.addr = NULL; 2061 } 2062 2063 if (dev->inflight_info.fd > 0) { 2064 close(dev->inflight_info.fd); 2065 dev->inflight_info.fd = -1; 2066 } 2067 2068 vu_close_log(dev); 2069 if (dev->backend_fd != -1) { 2070 close(dev->backend_fd); 2071 dev->backend_fd = -1; 2072 } 2073 pthread_mutex_destroy(&dev->backend_mutex); 2074 2075 if (dev->sock != -1) { 2076 close(dev->sock); 2077 } 2078 2079 free(dev->vq); 2080 dev->vq = NULL; 2081 free(dev->regions); 2082 dev->regions = NULL; 2083 } 2084 2085 bool 2086 vu_init(VuDev *dev, 2087 uint16_t max_queues, 2088 int socket, 2089 vu_panic_cb panic, 2090 vu_read_msg_cb read_msg, 2091 vu_set_watch_cb set_watch, 2092 vu_remove_watch_cb remove_watch, 2093 const VuDevIface *iface) 2094 { 2095 uint16_t i; 2096 2097 assert(max_queues > 0); 2098 assert(socket >= 0); 2099 assert(set_watch); 2100 assert(remove_watch); 2101 assert(iface); 2102 assert(panic); 2103 2104 memset(dev, 0, sizeof(*dev)); 2105 2106 dev->sock = socket; 2107 dev->panic = panic; 2108 dev->read_msg = read_msg ? read_msg : vu_message_read_default; 2109 dev->set_watch = set_watch; 2110 dev->remove_watch = remove_watch; 2111 dev->iface = iface; 2112 dev->log_call_fd = -1; 2113 pthread_mutex_init(&dev->backend_mutex, NULL); 2114 dev->backend_fd = -1; 2115 dev->max_queues = max_queues; 2116 2117 dev->regions = malloc(VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); 2118 if (!dev->regions) { 2119 DPRINT("%s: failed to malloc mem regions\n", __func__); 2120 return false; 2121 } 2122 memset(dev->regions, 0, VHOST_USER_MAX_RAM_SLOTS * sizeof(dev->regions[0])); 2123 2124 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); 2125 if (!dev->vq) { 2126 DPRINT("%s: failed to malloc virtqueues\n", __func__); 2127 free(dev->regions); 2128 dev->regions = NULL; 2129 return false; 2130 } 2131 2132 for (i = 0; i < max_queues; i++) { 2133 dev->vq[i] = (VuVirtq) { 2134 .call_fd = -1, .kick_fd = -1, .err_fd = -1, 2135 .notification = true, 2136 }; 2137 } 2138 2139 return true; 2140 } 2141 2142 VuVirtq * 2143 vu_get_queue(VuDev *dev, int qidx) 2144 { 2145 assert(qidx < dev->max_queues); 2146 return &dev->vq[qidx]; 2147 } 2148 2149 bool 2150 vu_queue_enabled(VuDev *dev, VuVirtq *vq) 2151 { 2152 return vq->enable; 2153 } 2154 2155 bool 2156 vu_queue_started(const VuDev *dev, const VuVirtq *vq) 2157 { 2158 return vq->started; 2159 } 2160 2161 static inline uint16_t 2162 vring_avail_flags(VuVirtq *vq) 2163 { 2164 return le16toh(vq->vring.avail->flags); 2165 } 2166 2167 static inline uint16_t 2168 vring_avail_idx(VuVirtq *vq) 2169 { 2170 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); 2171 2172 return vq->shadow_avail_idx; 2173 } 2174 2175 static inline uint16_t 2176 vring_avail_ring(VuVirtq *vq, int i) 2177 { 2178 return le16toh(vq->vring.avail->ring[i]); 2179 } 2180 2181 static inline uint16_t 2182 vring_get_used_event(VuVirtq *vq) 2183 { 2184 return vring_avail_ring(vq, vq->vring.num); 2185 } 2186 2187 static int 2188 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) 2189 { 2190 uint16_t num_heads = vring_avail_idx(vq) - idx; 2191 2192 /* Check it isn't doing very strange things with descriptor numbers. */ 2193 if (num_heads > vq->vring.num) { 2194 vu_panic(dev, "Guest moved used index from %u to %u", 2195 idx, vq->shadow_avail_idx); 2196 return -1; 2197 } 2198 if (num_heads) { 2199 /* On success, callers read a descriptor at vq->last_avail_idx. 2200 * Make sure descriptor read does not bypass avail index read. */ 2201 smp_rmb(); 2202 } 2203 2204 return num_heads; 2205 } 2206 2207 static bool 2208 virtqueue_get_head(VuDev *dev, VuVirtq *vq, 2209 unsigned int idx, unsigned int *head) 2210 { 2211 /* Grab the next descriptor number they're advertising, and increment 2212 * the index we've seen. */ 2213 *head = vring_avail_ring(vq, idx % vq->vring.num); 2214 2215 /* If their number is silly, that's a fatal mistake. */ 2216 if (*head >= vq->vring.num) { 2217 vu_panic(dev, "Guest says index %u is available", *head); 2218 return false; 2219 } 2220 2221 return true; 2222 } 2223 2224 static int 2225 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, 2226 uint64_t addr, size_t len) 2227 { 2228 struct vring_desc *ori_desc; 2229 uint64_t read_len; 2230 2231 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { 2232 return -1; 2233 } 2234 2235 if (len == 0) { 2236 return -1; 2237 } 2238 2239 while (len) { 2240 read_len = len; 2241 ori_desc = vu_gpa_to_va(dev, &read_len, addr); 2242 if (!ori_desc) { 2243 return -1; 2244 } 2245 2246 memcpy(desc, ori_desc, read_len); 2247 len -= read_len; 2248 addr += read_len; 2249 desc += read_len; 2250 } 2251 2252 return 0; 2253 } 2254 2255 enum { 2256 VIRTQUEUE_READ_DESC_ERROR = -1, 2257 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 2258 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 2259 }; 2260 2261 static int 2262 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, 2263 int i, unsigned int max, unsigned int *next) 2264 { 2265 /* If this descriptor says it doesn't chain, we're done. */ 2266 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) { 2267 return VIRTQUEUE_READ_DESC_DONE; 2268 } 2269 2270 /* Check they're not leading us off end of descriptors. */ 2271 *next = le16toh(desc[i].next); 2272 /* Make sure compiler knows to grab that: we don't want it changing! */ 2273 smp_wmb(); 2274 2275 if (*next >= max) { 2276 vu_panic(dev, "Desc next is %u", *next); 2277 return VIRTQUEUE_READ_DESC_ERROR; 2278 } 2279 2280 return VIRTQUEUE_READ_DESC_MORE; 2281 } 2282 2283 void 2284 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, 2285 unsigned int *out_bytes, 2286 unsigned max_in_bytes, unsigned max_out_bytes) 2287 { 2288 unsigned int idx; 2289 unsigned int total_bufs, in_total, out_total; 2290 int rc; 2291 2292 idx = vq->last_avail_idx; 2293 2294 total_bufs = in_total = out_total = 0; 2295 if (unlikely(dev->broken) || 2296 unlikely(!vq->vring.avail)) { 2297 goto done; 2298 } 2299 2300 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { 2301 unsigned int max, desc_len, num_bufs, indirect = 0; 2302 uint64_t desc_addr, read_len; 2303 struct vring_desc *desc; 2304 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2305 unsigned int i; 2306 2307 max = vq->vring.num; 2308 num_bufs = total_bufs; 2309 if (!virtqueue_get_head(dev, vq, idx++, &i)) { 2310 goto err; 2311 } 2312 desc = vq->vring.desc; 2313 2314 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2315 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2316 vu_panic(dev, "Invalid size for indirect buffer table"); 2317 goto err; 2318 } 2319 2320 /* If we've got too many, that implies a descriptor loop. */ 2321 if (num_bufs >= max) { 2322 vu_panic(dev, "Looped descriptor"); 2323 goto err; 2324 } 2325 2326 /* loop over the indirect descriptor table */ 2327 indirect = 1; 2328 desc_addr = le64toh(desc[i].addr); 2329 desc_len = le32toh(desc[i].len); 2330 max = desc_len / sizeof(struct vring_desc); 2331 read_len = desc_len; 2332 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2333 if (unlikely(desc && read_len != desc_len)) { 2334 /* Failed to use zero copy */ 2335 desc = NULL; 2336 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2337 desc_addr, 2338 desc_len)) { 2339 desc = desc_buf; 2340 } 2341 } 2342 if (!desc) { 2343 vu_panic(dev, "Invalid indirect buffer table"); 2344 goto err; 2345 } 2346 num_bufs = i = 0; 2347 } 2348 2349 do { 2350 /* If we've got too many, that implies a descriptor loop. */ 2351 if (++num_bufs > max) { 2352 vu_panic(dev, "Looped descriptor"); 2353 goto err; 2354 } 2355 2356 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2357 in_total += le32toh(desc[i].len); 2358 } else { 2359 out_total += le32toh(desc[i].len); 2360 } 2361 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 2362 goto done; 2363 } 2364 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2365 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2366 2367 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2368 goto err; 2369 } 2370 2371 if (!indirect) { 2372 total_bufs = num_bufs; 2373 } else { 2374 total_bufs++; 2375 } 2376 } 2377 if (rc < 0) { 2378 goto err; 2379 } 2380 done: 2381 if (in_bytes) { 2382 *in_bytes = in_total; 2383 } 2384 if (out_bytes) { 2385 *out_bytes = out_total; 2386 } 2387 return; 2388 2389 err: 2390 in_total = out_total = 0; 2391 goto done; 2392 } 2393 2394 bool 2395 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, 2396 unsigned int out_bytes) 2397 { 2398 unsigned int in_total, out_total; 2399 2400 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, 2401 in_bytes, out_bytes); 2402 2403 return in_bytes <= in_total && out_bytes <= out_total; 2404 } 2405 2406 /* Fetch avail_idx from VQ memory only when we really need to know if 2407 * guest has added some buffers. */ 2408 bool 2409 vu_queue_empty(VuDev *dev, VuVirtq *vq) 2410 { 2411 if (unlikely(dev->broken) || 2412 unlikely(!vq->vring.avail)) { 2413 return true; 2414 } 2415 2416 if (vq->shadow_avail_idx != vq->last_avail_idx) { 2417 return false; 2418 } 2419 2420 return vring_avail_idx(vq) == vq->last_avail_idx; 2421 } 2422 2423 static bool 2424 vring_notify(VuDev *dev, VuVirtq *vq) 2425 { 2426 uint16_t old, new; 2427 bool v; 2428 2429 /* We need to expose used array entries before checking used event. */ 2430 smp_mb(); 2431 2432 /* Always notify when queue is empty (when feature acknowledge) */ 2433 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && 2434 !vq->inuse && vu_queue_empty(dev, vq)) { 2435 return true; 2436 } 2437 2438 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2439 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 2440 } 2441 2442 v = vq->signalled_used_valid; 2443 vq->signalled_used_valid = true; 2444 old = vq->signalled_used; 2445 new = vq->signalled_used = vq->used_idx; 2446 return !v || vring_need_event(vring_get_used_event(vq), new, old); 2447 } 2448 2449 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) 2450 { 2451 if (unlikely(dev->broken) || 2452 unlikely(!vq->vring.avail)) { 2453 return; 2454 } 2455 2456 if (!vring_notify(dev, vq)) { 2457 DPRINT("skipped notify...\n"); 2458 return; 2459 } 2460 2461 if (vq->call_fd < 0 && 2462 vu_has_protocol_feature(dev, 2463 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 2464 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_BACKEND_REQ)) { 2465 VhostUserMsg vmsg = { 2466 .request = VHOST_USER_BACKEND_VRING_CALL, 2467 .flags = VHOST_USER_VERSION, 2468 .size = sizeof(vmsg.payload.state), 2469 .payload.state = { 2470 .index = vq - dev->vq, 2471 }, 2472 }; 2473 bool ack = sync && 2474 vu_has_protocol_feature(dev, 2475 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2476 2477 if (ack) { 2478 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK; 2479 } 2480 2481 vu_message_write(dev, dev->backend_fd, &vmsg); 2482 if (ack) { 2483 vu_message_read_default(dev, dev->backend_fd, &vmsg); 2484 } 2485 return; 2486 } 2487 2488 if (eventfd_write(vq->call_fd, 1) < 0) { 2489 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 2490 } 2491 } 2492 2493 void vu_queue_notify(VuDev *dev, VuVirtq *vq) 2494 { 2495 _vu_queue_notify(dev, vq, false); 2496 } 2497 2498 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) 2499 { 2500 _vu_queue_notify(dev, vq, true); 2501 } 2502 2503 void vu_config_change_msg(VuDev *dev) 2504 { 2505 VhostUserMsg vmsg = { 2506 .request = VHOST_USER_BACKEND_CONFIG_CHANGE_MSG, 2507 .flags = VHOST_USER_VERSION, 2508 }; 2509 2510 vu_message_write(dev, dev->backend_fd, &vmsg); 2511 } 2512 2513 static inline void 2514 vring_used_flags_set_bit(VuVirtq *vq, int mask) 2515 { 2516 uint16_t *flags; 2517 2518 flags = (uint16_t *)((char*)vq->vring.used + 2519 offsetof(struct vring_used, flags)); 2520 *flags = htole16(le16toh(*flags) | mask); 2521 } 2522 2523 static inline void 2524 vring_used_flags_unset_bit(VuVirtq *vq, int mask) 2525 { 2526 uint16_t *flags; 2527 2528 flags = (uint16_t *)((char*)vq->vring.used + 2529 offsetof(struct vring_used, flags)); 2530 *flags = htole16(le16toh(*flags) & ~mask); 2531 } 2532 2533 static inline void 2534 vring_set_avail_event(VuVirtq *vq, uint16_t val) 2535 { 2536 uint16_t val_le = htole16(val); 2537 2538 if (!vq->notification) { 2539 return; 2540 } 2541 2542 memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t)); 2543 } 2544 2545 void 2546 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) 2547 { 2548 vq->notification = enable; 2549 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2550 vring_set_avail_event(vq, vring_avail_idx(vq)); 2551 } else if (enable) { 2552 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 2553 } else { 2554 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 2555 } 2556 if (enable) { 2557 /* Expose avail event/used flags before caller checks the avail idx. */ 2558 smp_mb(); 2559 } 2560 } 2561 2562 static bool 2563 virtqueue_map_desc(VuDev *dev, 2564 unsigned int *p_num_sg, struct iovec *iov, 2565 unsigned int max_num_sg, bool is_write, 2566 uint64_t pa, size_t sz) 2567 { 2568 unsigned num_sg = *p_num_sg; 2569 2570 assert(num_sg <= max_num_sg); 2571 2572 if (!sz) { 2573 vu_panic(dev, "virtio: zero sized buffers are not allowed"); 2574 return false; 2575 } 2576 2577 while (sz) { 2578 uint64_t len = sz; 2579 2580 if (num_sg == max_num_sg) { 2581 vu_panic(dev, "virtio: too many descriptors in indirect table"); 2582 return false; 2583 } 2584 2585 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); 2586 if (iov[num_sg].iov_base == NULL) { 2587 vu_panic(dev, "virtio: invalid address for buffers"); 2588 return false; 2589 } 2590 iov[num_sg].iov_len = len; 2591 num_sg++; 2592 sz -= len; 2593 pa += len; 2594 } 2595 2596 *p_num_sg = num_sg; 2597 return true; 2598 } 2599 2600 static void * 2601 virtqueue_alloc_element(size_t sz, 2602 unsigned out_num, unsigned in_num) 2603 { 2604 VuVirtqElement *elem; 2605 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); 2606 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 2607 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 2608 2609 assert(sz >= sizeof(VuVirtqElement)); 2610 elem = malloc(out_sg_end); 2611 if (!elem) { 2612 DPRINT("%s: failed to malloc virtqueue element\n", __func__); 2613 return NULL; 2614 } 2615 elem->out_num = out_num; 2616 elem->in_num = in_num; 2617 elem->in_sg = (void *)elem + in_sg_ofs; 2618 elem->out_sg = (void *)elem + out_sg_ofs; 2619 return elem; 2620 } 2621 2622 static void * 2623 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) 2624 { 2625 struct vring_desc *desc = vq->vring.desc; 2626 uint64_t desc_addr, read_len; 2627 unsigned int desc_len; 2628 unsigned int max = vq->vring.num; 2629 unsigned int i = idx; 2630 VuVirtqElement *elem; 2631 unsigned int out_num = 0, in_num = 0; 2632 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 2633 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2634 int rc; 2635 2636 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2637 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2638 vu_panic(dev, "Invalid size for indirect buffer table"); 2639 return NULL; 2640 } 2641 2642 /* loop over the indirect descriptor table */ 2643 desc_addr = le64toh(desc[i].addr); 2644 desc_len = le32toh(desc[i].len); 2645 max = desc_len / sizeof(struct vring_desc); 2646 read_len = desc_len; 2647 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2648 if (unlikely(desc && read_len != desc_len)) { 2649 /* Failed to use zero copy */ 2650 desc = NULL; 2651 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2652 desc_addr, 2653 desc_len)) { 2654 desc = desc_buf; 2655 } 2656 } 2657 if (!desc) { 2658 vu_panic(dev, "Invalid indirect buffer table"); 2659 return NULL; 2660 } 2661 i = 0; 2662 } 2663 2664 /* Collect all the descriptors */ 2665 do { 2666 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2667 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, 2668 VIRTQUEUE_MAX_SIZE - out_num, true, 2669 le64toh(desc[i].addr), 2670 le32toh(desc[i].len))) { 2671 return NULL; 2672 } 2673 } else { 2674 if (in_num) { 2675 vu_panic(dev, "Incorrect order for descriptors"); 2676 return NULL; 2677 } 2678 if (!virtqueue_map_desc(dev, &out_num, iov, 2679 VIRTQUEUE_MAX_SIZE, false, 2680 le64toh(desc[i].addr), 2681 le32toh(desc[i].len))) { 2682 return NULL; 2683 } 2684 } 2685 2686 /* If we've got too many, that implies a descriptor loop. */ 2687 if ((in_num + out_num) > max) { 2688 vu_panic(dev, "Looped descriptor"); 2689 return NULL; 2690 } 2691 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2692 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2693 2694 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2695 vu_panic(dev, "read descriptor error"); 2696 return NULL; 2697 } 2698 2699 /* Now copy what we have collected and mapped */ 2700 elem = virtqueue_alloc_element(sz, out_num, in_num); 2701 if (!elem) { 2702 return NULL; 2703 } 2704 elem->index = idx; 2705 for (i = 0; i < out_num; i++) { 2706 elem->out_sg[i] = iov[i]; 2707 } 2708 for (i = 0; i < in_num; i++) { 2709 elem->in_sg[i] = iov[out_num + i]; 2710 } 2711 2712 return elem; 2713 } 2714 2715 static int 2716 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) 2717 { 2718 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2719 return 0; 2720 } 2721 2722 if (unlikely(!vq->inflight)) { 2723 return -1; 2724 } 2725 2726 vq->inflight->desc[desc_idx].counter = vq->counter++; 2727 vq->inflight->desc[desc_idx].inflight = 1; 2728 2729 return 0; 2730 } 2731 2732 static int 2733 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2734 { 2735 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2736 return 0; 2737 } 2738 2739 if (unlikely(!vq->inflight)) { 2740 return -1; 2741 } 2742 2743 vq->inflight->last_batch_head = desc_idx; 2744 2745 return 0; 2746 } 2747 2748 static int 2749 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2750 { 2751 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2752 return 0; 2753 } 2754 2755 if (unlikely(!vq->inflight)) { 2756 return -1; 2757 } 2758 2759 barrier(); 2760 2761 vq->inflight->desc[desc_idx].inflight = 0; 2762 2763 barrier(); 2764 2765 vq->inflight->used_idx = vq->used_idx; 2766 2767 return 0; 2768 } 2769 2770 void * 2771 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) 2772 { 2773 int i; 2774 unsigned int head; 2775 VuVirtqElement *elem; 2776 2777 if (unlikely(dev->broken) || 2778 unlikely(!vq->vring.avail)) { 2779 return NULL; 2780 } 2781 2782 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { 2783 i = (--vq->resubmit_num); 2784 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); 2785 2786 if (!vq->resubmit_num) { 2787 free(vq->resubmit_list); 2788 vq->resubmit_list = NULL; 2789 } 2790 2791 return elem; 2792 } 2793 2794 if (vu_queue_empty(dev, vq)) { 2795 return NULL; 2796 } 2797 /* 2798 * Needed after virtio_queue_empty(), see comment in 2799 * virtqueue_num_heads(). 2800 */ 2801 smp_rmb(); 2802 2803 if (vq->inuse >= vq->vring.num) { 2804 vu_panic(dev, "Virtqueue size exceeded"); 2805 return NULL; 2806 } 2807 2808 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { 2809 return NULL; 2810 } 2811 2812 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2813 vring_set_avail_event(vq, vq->last_avail_idx); 2814 } 2815 2816 elem = vu_queue_map_desc(dev, vq, head, sz); 2817 2818 if (!elem) { 2819 return NULL; 2820 } 2821 2822 vq->inuse++; 2823 2824 vu_queue_inflight_get(dev, vq, head); 2825 2826 return elem; 2827 } 2828 2829 static void 2830 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2831 size_t len) 2832 { 2833 vq->inuse--; 2834 /* unmap, when DMA support is added */ 2835 } 2836 2837 void 2838 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2839 size_t len) 2840 { 2841 vq->last_avail_idx--; 2842 vu_queue_detach_element(dev, vq, elem, len); 2843 } 2844 2845 bool 2846 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) 2847 { 2848 if (num > vq->inuse) { 2849 return false; 2850 } 2851 vq->last_avail_idx -= num; 2852 vq->inuse -= num; 2853 return true; 2854 } 2855 2856 static inline 2857 void vring_used_write(VuDev *dev, VuVirtq *vq, 2858 struct vring_used_elem *uelem, int i) 2859 { 2860 struct vring_used *used = vq->vring.used; 2861 2862 used->ring[i] = *uelem; 2863 vu_log_write(dev, vq->vring.log_guest_addr + 2864 offsetof(struct vring_used, ring[i]), 2865 sizeof(used->ring[i])); 2866 } 2867 2868 2869 static void 2870 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, 2871 const VuVirtqElement *elem, 2872 unsigned int len) 2873 { 2874 struct vring_desc *desc = vq->vring.desc; 2875 unsigned int i, max, min, desc_len; 2876 uint64_t desc_addr, read_len; 2877 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2878 unsigned num_bufs = 0; 2879 2880 max = vq->vring.num; 2881 i = elem->index; 2882 2883 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2884 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2885 vu_panic(dev, "Invalid size for indirect buffer table"); 2886 return; 2887 } 2888 2889 /* loop over the indirect descriptor table */ 2890 desc_addr = le64toh(desc[i].addr); 2891 desc_len = le32toh(desc[i].len); 2892 max = desc_len / sizeof(struct vring_desc); 2893 read_len = desc_len; 2894 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2895 if (unlikely(desc && read_len != desc_len)) { 2896 /* Failed to use zero copy */ 2897 desc = NULL; 2898 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2899 desc_addr, 2900 desc_len)) { 2901 desc = desc_buf; 2902 } 2903 } 2904 if (!desc) { 2905 vu_panic(dev, "Invalid indirect buffer table"); 2906 return; 2907 } 2908 i = 0; 2909 } 2910 2911 do { 2912 if (++num_bufs > max) { 2913 vu_panic(dev, "Looped descriptor"); 2914 return; 2915 } 2916 2917 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2918 min = MIN(le32toh(desc[i].len), len); 2919 vu_log_write(dev, le64toh(desc[i].addr), min); 2920 len -= min; 2921 } 2922 2923 } while (len > 0 && 2924 (virtqueue_read_next_desc(dev, desc, i, max, &i) 2925 == VIRTQUEUE_READ_DESC_MORE)); 2926 } 2927 2928 void 2929 vu_queue_fill(VuDev *dev, VuVirtq *vq, 2930 const VuVirtqElement *elem, 2931 unsigned int len, unsigned int idx) 2932 { 2933 struct vring_used_elem uelem; 2934 2935 if (unlikely(dev->broken) || 2936 unlikely(!vq->vring.avail)) { 2937 return; 2938 } 2939 2940 vu_log_queue_fill(dev, vq, elem, len); 2941 2942 idx = (idx + vq->used_idx) % vq->vring.num; 2943 2944 uelem.id = htole32(elem->index); 2945 uelem.len = htole32(len); 2946 vring_used_write(dev, vq, &uelem, idx); 2947 } 2948 2949 static inline 2950 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) 2951 { 2952 vq->vring.used->idx = htole16(val); 2953 vu_log_write(dev, 2954 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), 2955 sizeof(vq->vring.used->idx)); 2956 2957 vq->used_idx = val; 2958 } 2959 2960 void 2961 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) 2962 { 2963 uint16_t old, new; 2964 2965 if (unlikely(dev->broken) || 2966 unlikely(!vq->vring.avail)) { 2967 return; 2968 } 2969 2970 /* Make sure buffer is written before we update index. */ 2971 smp_wmb(); 2972 2973 old = vq->used_idx; 2974 new = old + count; 2975 vring_used_idx_set(dev, vq, new); 2976 vq->inuse -= count; 2977 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { 2978 vq->signalled_used_valid = false; 2979 } 2980 } 2981 2982 void 2983 vu_queue_push(VuDev *dev, VuVirtq *vq, 2984 const VuVirtqElement *elem, unsigned int len) 2985 { 2986 vu_queue_fill(dev, vq, elem, len, 0); 2987 vu_queue_inflight_pre_put(dev, vq, elem->index); 2988 vu_queue_flush(dev, vq, 1); 2989 vu_queue_inflight_post_put(dev, vq, elem->index); 2990 } 2991