1 /*
2 * Vhost User library
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
14 */
15
16 /* this code avoids GLib dependency */
17 #include <stdlib.h>
18 #include <stdio.h>
19 #include <unistd.h>
20 #include <stdarg.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <assert.h>
24 #include <inttypes.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
28 #include <sys/mman.h>
29 #include "qemu/compiler.h"
30
31 #if defined(__linux__)
32 #include <sys/syscall.h>
33 #include <fcntl.h>
34 #include <sys/ioctl.h>
35 #include <linux/vhost.h>
36
37 #ifdef __NR_userfaultfd
38 #include <linux/userfaultfd.h>
39 #endif
40
41 #endif
42
43 #include "qemu/atomic.h"
44 #include "qemu/osdep.h"
45 #include "qemu/memfd.h"
46
47 #include "libvhost-user.h"
48
49 /* usually provided by GLib */
50 #ifndef MIN
51 #define MIN(x, y) ({ \
52 typeof(x) _min1 = (x); \
53 typeof(y) _min2 = (y); \
54 (void) (&_min1 == &_min2); \
55 _min1 < _min2 ? _min1 : _min2; })
56 #endif
57
58 /* Round number down to multiple */
59 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
60
61 /* Round number up to multiple */
62 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
63
64 /* Align each region to cache line size in inflight buffer */
65 #define INFLIGHT_ALIGNMENT 64
66
67 /* The version of inflight buffer */
68 #define INFLIGHT_VERSION 1
69
70 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
71
72 /* The version of the protocol we support */
73 #define VHOST_USER_VERSION 1
74 #define LIBVHOST_USER_DEBUG 0
75
76 #define DPRINT(...) \
77 do { \
78 if (LIBVHOST_USER_DEBUG) { \
79 fprintf(stderr, __VA_ARGS__); \
80 } \
81 } while (0)
82
83 static inline
has_feature(uint64_t features,unsigned int fbit)84 bool has_feature(uint64_t features, unsigned int fbit)
85 {
86 assert(fbit < 64);
87 return !!(features & (1ULL << fbit));
88 }
89
90 static inline
vu_has_feature(VuDev * dev,unsigned int fbit)91 bool vu_has_feature(VuDev *dev,
92 unsigned int fbit)
93 {
94 return has_feature(dev->features, fbit);
95 }
96
vu_has_protocol_feature(VuDev * dev,unsigned int fbit)97 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit)
98 {
99 return has_feature(dev->protocol_features, fbit);
100 }
101
102 static const char *
vu_request_to_string(unsigned int req)103 vu_request_to_string(unsigned int req)
104 {
105 #define REQ(req) [req] = #req
106 static const char *vu_request_str[] = {
107 REQ(VHOST_USER_NONE),
108 REQ(VHOST_USER_GET_FEATURES),
109 REQ(VHOST_USER_SET_FEATURES),
110 REQ(VHOST_USER_SET_OWNER),
111 REQ(VHOST_USER_RESET_OWNER),
112 REQ(VHOST_USER_SET_MEM_TABLE),
113 REQ(VHOST_USER_SET_LOG_BASE),
114 REQ(VHOST_USER_SET_LOG_FD),
115 REQ(VHOST_USER_SET_VRING_NUM),
116 REQ(VHOST_USER_SET_VRING_ADDR),
117 REQ(VHOST_USER_SET_VRING_BASE),
118 REQ(VHOST_USER_GET_VRING_BASE),
119 REQ(VHOST_USER_SET_VRING_KICK),
120 REQ(VHOST_USER_SET_VRING_CALL),
121 REQ(VHOST_USER_SET_VRING_ERR),
122 REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
123 REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
124 REQ(VHOST_USER_GET_QUEUE_NUM),
125 REQ(VHOST_USER_SET_VRING_ENABLE),
126 REQ(VHOST_USER_SEND_RARP),
127 REQ(VHOST_USER_NET_SET_MTU),
128 REQ(VHOST_USER_SET_SLAVE_REQ_FD),
129 REQ(VHOST_USER_IOTLB_MSG),
130 REQ(VHOST_USER_SET_VRING_ENDIAN),
131 REQ(VHOST_USER_GET_CONFIG),
132 REQ(VHOST_USER_SET_CONFIG),
133 REQ(VHOST_USER_POSTCOPY_ADVISE),
134 REQ(VHOST_USER_POSTCOPY_LISTEN),
135 REQ(VHOST_USER_POSTCOPY_END),
136 REQ(VHOST_USER_GET_INFLIGHT_FD),
137 REQ(VHOST_USER_SET_INFLIGHT_FD),
138 REQ(VHOST_USER_GPU_SET_SOCKET),
139 REQ(VHOST_USER_VRING_KICK),
140 REQ(VHOST_USER_MAX),
141 };
142 #undef REQ
143
144 if (req < VHOST_USER_MAX) {
145 return vu_request_str[req];
146 } else {
147 return "unknown";
148 }
149 }
150
151 static void
vu_panic(VuDev * dev,const char * msg,...)152 vu_panic(VuDev *dev, const char *msg, ...)
153 {
154 char *buf = NULL;
155 va_list ap;
156
157 va_start(ap, msg);
158 if (vasprintf(&buf, msg, ap) < 0) {
159 buf = NULL;
160 }
161 va_end(ap);
162
163 dev->broken = true;
164 dev->panic(dev, buf);
165 free(buf);
166
167 /*
168 * FIXME:
169 * find a way to call virtio_error, or perhaps close the connection?
170 */
171 }
172
173 /* Translate guest physical address to our virtual address. */
174 void *
vu_gpa_to_va(VuDev * dev,uint64_t * plen,uint64_t guest_addr)175 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
176 {
177 int i;
178
179 if (*plen == 0) {
180 return NULL;
181 }
182
183 /* Find matching memory region. */
184 for (i = 0; i < dev->nregions; i++) {
185 VuDevRegion *r = &dev->regions[i];
186
187 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
188 if ((guest_addr + *plen) > (r->gpa + r->size)) {
189 *plen = r->gpa + r->size - guest_addr;
190 }
191 return (void *)(uintptr_t)
192 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
193 }
194 }
195
196 return NULL;
197 }
198
199 /* Translate qemu virtual address to our virtual address. */
200 static void *
qva_to_va(VuDev * dev,uint64_t qemu_addr)201 qva_to_va(VuDev *dev, uint64_t qemu_addr)
202 {
203 int i;
204
205 /* Find matching memory region. */
206 for (i = 0; i < dev->nregions; i++) {
207 VuDevRegion *r = &dev->regions[i];
208
209 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
210 return (void *)(uintptr_t)
211 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
212 }
213 }
214
215 return NULL;
216 }
217
218 static void
vmsg_close_fds(VhostUserMsg * vmsg)219 vmsg_close_fds(VhostUserMsg *vmsg)
220 {
221 int i;
222
223 for (i = 0; i < vmsg->fd_num; i++) {
224 close(vmsg->fds[i]);
225 }
226 }
227
228 /* Set reply payload.u64 and clear request flags and fd_num */
vmsg_set_reply_u64(VhostUserMsg * vmsg,uint64_t val)229 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val)
230 {
231 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */
232 vmsg->size = sizeof(vmsg->payload.u64);
233 vmsg->payload.u64 = val;
234 vmsg->fd_num = 0;
235 }
236
237 /* A test to see if we have userfault available */
238 static bool
have_userfault(void)239 have_userfault(void)
240 {
241 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
242 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
243 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
244 /* Now test the kernel we're running on really has the features */
245 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
246 struct uffdio_api api_struct;
247 if (ufd < 0) {
248 return false;
249 }
250
251 api_struct.api = UFFD_API;
252 api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
253 UFFD_FEATURE_MISSING_HUGETLBFS;
254 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
255 close(ufd);
256 return false;
257 }
258 close(ufd);
259 return true;
260
261 #else
262 return false;
263 #endif
264 }
265
266 static bool
vu_message_read(VuDev * dev,int conn_fd,VhostUserMsg * vmsg)267 vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
268 {
269 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
270 struct iovec iov = {
271 .iov_base = (char *)vmsg,
272 .iov_len = VHOST_USER_HDR_SIZE,
273 };
274 struct msghdr msg = {
275 .msg_iov = &iov,
276 .msg_iovlen = 1,
277 .msg_control = control,
278 .msg_controllen = sizeof(control),
279 };
280 size_t fd_size;
281 struct cmsghdr *cmsg;
282 int rc;
283
284 do {
285 rc = recvmsg(conn_fd, &msg, 0);
286 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
287
288 if (rc < 0) {
289 vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
290 return false;
291 }
292
293 vmsg->fd_num = 0;
294 for (cmsg = CMSG_FIRSTHDR(&msg);
295 cmsg != NULL;
296 cmsg = CMSG_NXTHDR(&msg, cmsg))
297 {
298 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
299 fd_size = cmsg->cmsg_len - CMSG_LEN(0);
300 vmsg->fd_num = fd_size / sizeof(int);
301 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
302 break;
303 }
304 }
305
306 if (vmsg->size > sizeof(vmsg->payload)) {
307 vu_panic(dev,
308 "Error: too big message request: %d, size: vmsg->size: %u, "
309 "while sizeof(vmsg->payload) = %zu\n",
310 vmsg->request, vmsg->size, sizeof(vmsg->payload));
311 goto fail;
312 }
313
314 if (vmsg->size) {
315 do {
316 rc = read(conn_fd, &vmsg->payload, vmsg->size);
317 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
318
319 if (rc <= 0) {
320 vu_panic(dev, "Error while reading: %s", strerror(errno));
321 goto fail;
322 }
323
324 assert(rc == vmsg->size);
325 }
326
327 return true;
328
329 fail:
330 vmsg_close_fds(vmsg);
331
332 return false;
333 }
334
335 static bool
vu_message_write(VuDev * dev,int conn_fd,VhostUserMsg * vmsg)336 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
337 {
338 int rc;
339 uint8_t *p = (uint8_t *)vmsg;
340 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
341 struct iovec iov = {
342 .iov_base = (char *)vmsg,
343 .iov_len = VHOST_USER_HDR_SIZE,
344 };
345 struct msghdr msg = {
346 .msg_iov = &iov,
347 .msg_iovlen = 1,
348 .msg_control = control,
349 };
350 struct cmsghdr *cmsg;
351
352 memset(control, 0, sizeof(control));
353 assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
354 if (vmsg->fd_num > 0) {
355 size_t fdsize = vmsg->fd_num * sizeof(int);
356 msg.msg_controllen = CMSG_SPACE(fdsize);
357 cmsg = CMSG_FIRSTHDR(&msg);
358 cmsg->cmsg_len = CMSG_LEN(fdsize);
359 cmsg->cmsg_level = SOL_SOCKET;
360 cmsg->cmsg_type = SCM_RIGHTS;
361 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
362 } else {
363 msg.msg_controllen = 0;
364 }
365
366 do {
367 rc = sendmsg(conn_fd, &msg, 0);
368 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
369
370 if (vmsg->size) {
371 do {
372 if (vmsg->data) {
373 rc = write(conn_fd, vmsg->data, vmsg->size);
374 } else {
375 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
376 }
377 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
378 }
379
380 if (rc <= 0) {
381 vu_panic(dev, "Error while writing: %s", strerror(errno));
382 return false;
383 }
384
385 return true;
386 }
387
388 static bool
vu_send_reply(VuDev * dev,int conn_fd,VhostUserMsg * vmsg)389 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
390 {
391 /* Set the version in the flags when sending the reply */
392 vmsg->flags &= ~VHOST_USER_VERSION_MASK;
393 vmsg->flags |= VHOST_USER_VERSION;
394 vmsg->flags |= VHOST_USER_REPLY_MASK;
395
396 return vu_message_write(dev, conn_fd, vmsg);
397 }
398
399 /*
400 * Processes a reply on the slave channel.
401 * Entered with slave_mutex held and releases it before exit.
402 * Returns true on success.
403 */
404 static bool
vu_process_message_reply(VuDev * dev,const VhostUserMsg * vmsg)405 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
406 {
407 VhostUserMsg msg_reply;
408 bool result = false;
409
410 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
411 result = true;
412 goto out;
413 }
414
415 if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
416 goto out;
417 }
418
419 if (msg_reply.request != vmsg->request) {
420 DPRINT("Received unexpected msg type. Expected %d received %d",
421 vmsg->request, msg_reply.request);
422 goto out;
423 }
424
425 result = msg_reply.payload.u64 == 0;
426
427 out:
428 pthread_mutex_unlock(&dev->slave_mutex);
429 return result;
430 }
431
432 /* Kick the log_call_fd if required. */
433 static void
vu_log_kick(VuDev * dev)434 vu_log_kick(VuDev *dev)
435 {
436 if (dev->log_call_fd != -1) {
437 DPRINT("Kicking the QEMU's log...\n");
438 if (eventfd_write(dev->log_call_fd, 1) < 0) {
439 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
440 }
441 }
442 }
443
444 static void
vu_log_page(uint8_t * log_table,uint64_t page)445 vu_log_page(uint8_t *log_table, uint64_t page)
446 {
447 DPRINT("Logged dirty guest page: %"PRId64"\n", page);
448 atomic_or(&log_table[page / 8], 1 << (page % 8));
449 }
450
451 static void
vu_log_write(VuDev * dev,uint64_t address,uint64_t length)452 vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
453 {
454 uint64_t page;
455
456 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
457 !dev->log_table || !length) {
458 return;
459 }
460
461 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
462
463 page = address / VHOST_LOG_PAGE;
464 while (page * VHOST_LOG_PAGE < address + length) {
465 vu_log_page(dev->log_table, page);
466 page += 1;
467 }
468
469 vu_log_kick(dev);
470 }
471
472 static void
vu_kick_cb(VuDev * dev,int condition,void * data)473 vu_kick_cb(VuDev *dev, int condition, void *data)
474 {
475 int index = (intptr_t)data;
476 VuVirtq *vq = &dev->vq[index];
477 int sock = vq->kick_fd;
478 eventfd_t kick_data;
479 ssize_t rc;
480
481 rc = eventfd_read(sock, &kick_data);
482 if (rc == -1) {
483 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
484 dev->remove_watch(dev, dev->vq[index].kick_fd);
485 } else {
486 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
487 kick_data, vq->handler, index);
488 if (vq->handler) {
489 vq->handler(dev, index);
490 }
491 }
492 }
493
494 static bool
vu_get_features_exec(VuDev * dev,VhostUserMsg * vmsg)495 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
496 {
497 vmsg->payload.u64 =
498 1ULL << VHOST_F_LOG_ALL |
499 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
500
501 if (dev->iface->get_features) {
502 vmsg->payload.u64 |= dev->iface->get_features(dev);
503 }
504
505 vmsg->size = sizeof(vmsg->payload.u64);
506 vmsg->fd_num = 0;
507
508 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
509
510 return true;
511 }
512
513 static void
vu_set_enable_all_rings(VuDev * dev,bool enabled)514 vu_set_enable_all_rings(VuDev *dev, bool enabled)
515 {
516 uint16_t i;
517
518 for (i = 0; i < dev->max_queues; i++) {
519 dev->vq[i].enable = enabled;
520 }
521 }
522
523 static bool
vu_set_features_exec(VuDev * dev,VhostUserMsg * vmsg)524 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
525 {
526 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
527
528 dev->features = vmsg->payload.u64;
529
530 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
531 vu_set_enable_all_rings(dev, true);
532 }
533
534 if (dev->iface->set_features) {
535 dev->iface->set_features(dev, dev->features);
536 }
537
538 return false;
539 }
540
541 static bool
vu_set_owner_exec(VuDev * dev,VhostUserMsg * vmsg)542 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
543 {
544 return false;
545 }
546
547 static void
vu_close_log(VuDev * dev)548 vu_close_log(VuDev *dev)
549 {
550 if (dev->log_table) {
551 if (munmap(dev->log_table, dev->log_size) != 0) {
552 perror("close log munmap() error");
553 }
554
555 dev->log_table = NULL;
556 }
557 if (dev->log_call_fd != -1) {
558 close(dev->log_call_fd);
559 dev->log_call_fd = -1;
560 }
561 }
562
563 static bool
vu_reset_device_exec(VuDev * dev,VhostUserMsg * vmsg)564 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
565 {
566 vu_set_enable_all_rings(dev, false);
567
568 return false;
569 }
570
571 static bool
map_ring(VuDev * dev,VuVirtq * vq)572 map_ring(VuDev *dev, VuVirtq *vq)
573 {
574 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr);
575 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr);
576 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr);
577
578 DPRINT("Setting virtq addresses:\n");
579 DPRINT(" vring_desc at %p\n", vq->vring.desc);
580 DPRINT(" vring_used at %p\n", vq->vring.used);
581 DPRINT(" vring_avail at %p\n", vq->vring.avail);
582
583 return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
584 }
585
586 static bool
vu_set_mem_table_exec_postcopy(VuDev * dev,VhostUserMsg * vmsg)587 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
588 {
589 int i;
590 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
591 dev->nregions = memory->nregions;
592
593 DPRINT("Nregions: %d\n", memory->nregions);
594 for (i = 0; i < dev->nregions; i++) {
595 void *mmap_addr;
596 VhostUserMemoryRegion *msg_region = &memory->regions[i];
597 VuDevRegion *dev_region = &dev->regions[i];
598
599 DPRINT("Region %d\n", i);
600 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
601 msg_region->guest_phys_addr);
602 DPRINT(" memory_size: 0x%016"PRIx64"\n",
603 msg_region->memory_size);
604 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
605 msg_region->userspace_addr);
606 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
607 msg_region->mmap_offset);
608
609 dev_region->gpa = msg_region->guest_phys_addr;
610 dev_region->size = msg_region->memory_size;
611 dev_region->qva = msg_region->userspace_addr;
612 dev_region->mmap_offset = msg_region->mmap_offset;
613
614 /* We don't use offset argument of mmap() since the
615 * mapped address has to be page aligned, and we use huge
616 * pages.
617 * In postcopy we're using PROT_NONE here to catch anyone
618 * accessing it before we userfault
619 */
620 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
621 PROT_NONE, MAP_SHARED,
622 vmsg->fds[i], 0);
623
624 if (mmap_addr == MAP_FAILED) {
625 vu_panic(dev, "region mmap error: %s", strerror(errno));
626 } else {
627 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
628 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
629 dev_region->mmap_addr);
630 }
631
632 /* Return the address to QEMU so that it can translate the ufd
633 * fault addresses back.
634 */
635 msg_region->userspace_addr = (uintptr_t)(mmap_addr +
636 dev_region->mmap_offset);
637 close(vmsg->fds[i]);
638 }
639
640 /* Send the message back to qemu with the addresses filled in */
641 vmsg->fd_num = 0;
642 if (!vu_send_reply(dev, dev->sock, vmsg)) {
643 vu_panic(dev, "failed to respond to set-mem-table for postcopy");
644 return false;
645 }
646
647 /* Wait for QEMU to confirm that it's registered the handler for the
648 * faults.
649 */
650 if (!vu_message_read(dev, dev->sock, vmsg) ||
651 vmsg->size != sizeof(vmsg->payload.u64) ||
652 vmsg->payload.u64 != 0) {
653 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
654 return false;
655 }
656
657 /* OK, now we can go and register the memory and generate faults */
658 for (i = 0; i < dev->nregions; i++) {
659 VuDevRegion *dev_region = &dev->regions[i];
660 int ret;
661 #ifdef UFFDIO_REGISTER
662 /* We should already have an open ufd. Mark each memory
663 * range as ufd.
664 * Discard any mapping we have here; note I can't use MADV_REMOVE
665 * or fallocate to make the hole since I don't want to lose
666 * data that's already arrived in the shared process.
667 * TODO: How to do hugepage
668 */
669 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
670 dev_region->size + dev_region->mmap_offset,
671 MADV_DONTNEED);
672 if (ret) {
673 fprintf(stderr,
674 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
675 __func__, i, strerror(errno));
676 }
677 /* Turn off transparent hugepages so we dont get lose wakeups
678 * in neighbouring pages.
679 * TODO: Turn this backon later.
680 */
681 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
682 dev_region->size + dev_region->mmap_offset,
683 MADV_NOHUGEPAGE);
684 if (ret) {
685 /* Note: This can happen legally on kernels that are configured
686 * without madvise'able hugepages
687 */
688 fprintf(stderr,
689 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
690 __func__, i, strerror(errno));
691 }
692 struct uffdio_register reg_struct;
693 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
694 reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
695 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
696
697 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) {
698 vu_panic(dev, "%s: Failed to userfault region %d "
699 "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
700 __func__, i,
701 dev_region->mmap_addr,
702 dev_region->size, dev_region->mmap_offset,
703 dev->postcopy_ufd, strerror(errno));
704 return false;
705 }
706 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
707 vu_panic(dev, "%s Region (%d) doesn't support COPY",
708 __func__, i);
709 return false;
710 }
711 DPRINT("%s: region %d: Registered userfault for %"
712 PRIx64 " + %" PRIx64 "\n", __func__, i,
713 (uint64_t)reg_struct.range.start,
714 (uint64_t)reg_struct.range.len);
715 /* Now it's registered we can let the client at it */
716 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
717 dev_region->size + dev_region->mmap_offset,
718 PROT_READ | PROT_WRITE)) {
719 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
720 i, strerror(errno));
721 return false;
722 }
723 /* TODO: Stash 'zero' support flags somewhere */
724 #endif
725 }
726
727 return false;
728 }
729
730 static bool
vu_set_mem_table_exec(VuDev * dev,VhostUserMsg * vmsg)731 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
732 {
733 int i;
734 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
735
736 for (i = 0; i < dev->nregions; i++) {
737 VuDevRegion *r = &dev->regions[i];
738 void *m = (void *) (uintptr_t) r->mmap_addr;
739
740 if (m) {
741 munmap(m, r->size + r->mmap_offset);
742 }
743 }
744 dev->nregions = memory->nregions;
745
746 if (dev->postcopy_listening) {
747 return vu_set_mem_table_exec_postcopy(dev, vmsg);
748 }
749
750 DPRINT("Nregions: %d\n", memory->nregions);
751 for (i = 0; i < dev->nregions; i++) {
752 void *mmap_addr;
753 VhostUserMemoryRegion *msg_region = &memory->regions[i];
754 VuDevRegion *dev_region = &dev->regions[i];
755
756 DPRINT("Region %d\n", i);
757 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
758 msg_region->guest_phys_addr);
759 DPRINT(" memory_size: 0x%016"PRIx64"\n",
760 msg_region->memory_size);
761 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
762 msg_region->userspace_addr);
763 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
764 msg_region->mmap_offset);
765
766 dev_region->gpa = msg_region->guest_phys_addr;
767 dev_region->size = msg_region->memory_size;
768 dev_region->qva = msg_region->userspace_addr;
769 dev_region->mmap_offset = msg_region->mmap_offset;
770
771 /* We don't use offset argument of mmap() since the
772 * mapped address has to be page aligned, and we use huge
773 * pages. */
774 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
775 PROT_READ | PROT_WRITE, MAP_SHARED,
776 vmsg->fds[i], 0);
777
778 if (mmap_addr == MAP_FAILED) {
779 vu_panic(dev, "region mmap error: %s", strerror(errno));
780 } else {
781 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
782 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
783 dev_region->mmap_addr);
784 }
785
786 close(vmsg->fds[i]);
787 }
788
789 for (i = 0; i < dev->max_queues; i++) {
790 if (dev->vq[i].vring.desc) {
791 if (map_ring(dev, &dev->vq[i])) {
792 vu_panic(dev, "remaping queue %d during setmemtable", i);
793 }
794 }
795 }
796
797 return false;
798 }
799
800 static bool
vu_set_log_base_exec(VuDev * dev,VhostUserMsg * vmsg)801 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
802 {
803 int fd;
804 uint64_t log_mmap_size, log_mmap_offset;
805 void *rc;
806
807 if (vmsg->fd_num != 1 ||
808 vmsg->size != sizeof(vmsg->payload.log)) {
809 vu_panic(dev, "Invalid log_base message");
810 return true;
811 }
812
813 fd = vmsg->fds[0];
814 log_mmap_offset = vmsg->payload.log.mmap_offset;
815 log_mmap_size = vmsg->payload.log.mmap_size;
816 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
817 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
818
819 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
820 log_mmap_offset);
821 close(fd);
822 if (rc == MAP_FAILED) {
823 perror("log mmap error");
824 }
825
826 if (dev->log_table) {
827 munmap(dev->log_table, dev->log_size);
828 }
829 dev->log_table = rc;
830 dev->log_size = log_mmap_size;
831
832 vmsg->size = sizeof(vmsg->payload.u64);
833 vmsg->fd_num = 0;
834
835 return true;
836 }
837
838 static bool
vu_set_log_fd_exec(VuDev * dev,VhostUserMsg * vmsg)839 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
840 {
841 if (vmsg->fd_num != 1) {
842 vu_panic(dev, "Invalid log_fd message");
843 return false;
844 }
845
846 if (dev->log_call_fd != -1) {
847 close(dev->log_call_fd);
848 }
849 dev->log_call_fd = vmsg->fds[0];
850 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
851
852 return false;
853 }
854
855 static bool
vu_set_vring_num_exec(VuDev * dev,VhostUserMsg * vmsg)856 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
857 {
858 unsigned int index = vmsg->payload.state.index;
859 unsigned int num = vmsg->payload.state.num;
860
861 DPRINT("State.index: %d\n", index);
862 DPRINT("State.num: %d\n", num);
863 dev->vq[index].vring.num = num;
864
865 return false;
866 }
867
868 static bool
vu_set_vring_addr_exec(VuDev * dev,VhostUserMsg * vmsg)869 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
870 {
871 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
872 unsigned int index = vra->index;
873 VuVirtq *vq = &dev->vq[index];
874
875 DPRINT("vhost_vring_addr:\n");
876 DPRINT(" index: %d\n", vra->index);
877 DPRINT(" flags: %d\n", vra->flags);
878 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", vra->desc_user_addr);
879 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", vra->used_user_addr);
880 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", vra->avail_user_addr);
881 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", vra->log_guest_addr);
882
883 vq->vra = *vra;
884 vq->vring.flags = vra->flags;
885 vq->vring.log_guest_addr = vra->log_guest_addr;
886
887
888 if (map_ring(dev, vq)) {
889 vu_panic(dev, "Invalid vring_addr message");
890 return false;
891 }
892
893 vq->used_idx = vq->vring.used->idx;
894
895 if (vq->last_avail_idx != vq->used_idx) {
896 bool resume = dev->iface->queue_is_processed_in_order &&
897 dev->iface->queue_is_processed_in_order(dev, index);
898
899 DPRINT("Last avail index != used index: %u != %u%s\n",
900 vq->last_avail_idx, vq->used_idx,
901 resume ? ", resuming" : "");
902
903 if (resume) {
904 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
905 }
906 }
907
908 return false;
909 }
910
911 static bool
vu_set_vring_base_exec(VuDev * dev,VhostUserMsg * vmsg)912 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
913 {
914 unsigned int index = vmsg->payload.state.index;
915 unsigned int num = vmsg->payload.state.num;
916
917 DPRINT("State.index: %d\n", index);
918 DPRINT("State.num: %d\n", num);
919 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
920
921 return false;
922 }
923
924 static bool
vu_get_vring_base_exec(VuDev * dev,VhostUserMsg * vmsg)925 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
926 {
927 unsigned int index = vmsg->payload.state.index;
928
929 DPRINT("State.index: %d\n", index);
930 vmsg->payload.state.num = dev->vq[index].last_avail_idx;
931 vmsg->size = sizeof(vmsg->payload.state);
932
933 dev->vq[index].started = false;
934 if (dev->iface->queue_set_started) {
935 dev->iface->queue_set_started(dev, index, false);
936 }
937
938 if (dev->vq[index].call_fd != -1) {
939 close(dev->vq[index].call_fd);
940 dev->vq[index].call_fd = -1;
941 }
942 if (dev->vq[index].kick_fd != -1) {
943 dev->remove_watch(dev, dev->vq[index].kick_fd);
944 close(dev->vq[index].kick_fd);
945 dev->vq[index].kick_fd = -1;
946 }
947
948 return true;
949 }
950
951 static bool
vu_check_queue_msg_file(VuDev * dev,VhostUserMsg * vmsg)952 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
953 {
954 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
955 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
956
957 if (index >= dev->max_queues) {
958 vmsg_close_fds(vmsg);
959 vu_panic(dev, "Invalid queue index: %u", index);
960 return false;
961 }
962
963 if (nofd) {
964 vmsg_close_fds(vmsg);
965 return true;
966 }
967
968 if (vmsg->fd_num != 1) {
969 vmsg_close_fds(vmsg);
970 vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
971 return false;
972 }
973
974 return true;
975 }
976
977 static int
inflight_desc_compare(const void * a,const void * b)978 inflight_desc_compare(const void *a, const void *b)
979 {
980 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
981 *desc1 = (VuVirtqInflightDesc *)b;
982
983 if (desc1->counter > desc0->counter &&
984 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
985 return 1;
986 }
987
988 return -1;
989 }
990
991 static int
vu_check_queue_inflights(VuDev * dev,VuVirtq * vq)992 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
993 {
994 int i = 0;
995
996 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
997 return 0;
998 }
999
1000 if (unlikely(!vq->inflight)) {
1001 return -1;
1002 }
1003
1004 if (unlikely(!vq->inflight->version)) {
1005 /* initialize the buffer */
1006 vq->inflight->version = INFLIGHT_VERSION;
1007 return 0;
1008 }
1009
1010 vq->used_idx = vq->vring.used->idx;
1011 vq->resubmit_num = 0;
1012 vq->resubmit_list = NULL;
1013 vq->counter = 0;
1014
1015 if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
1016 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
1017
1018 barrier();
1019
1020 vq->inflight->used_idx = vq->used_idx;
1021 }
1022
1023 for (i = 0; i < vq->inflight->desc_num; i++) {
1024 if (vq->inflight->desc[i].inflight == 1) {
1025 vq->inuse++;
1026 }
1027 }
1028
1029 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
1030
1031 if (vq->inuse) {
1032 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc));
1033 if (!vq->resubmit_list) {
1034 return -1;
1035 }
1036
1037 for (i = 0; i < vq->inflight->desc_num; i++) {
1038 if (vq->inflight->desc[i].inflight) {
1039 vq->resubmit_list[vq->resubmit_num].index = i;
1040 vq->resubmit_list[vq->resubmit_num].counter =
1041 vq->inflight->desc[i].counter;
1042 vq->resubmit_num++;
1043 }
1044 }
1045
1046 if (vq->resubmit_num > 1) {
1047 qsort(vq->resubmit_list, vq->resubmit_num,
1048 sizeof(VuVirtqInflightDesc), inflight_desc_compare);
1049 }
1050 vq->counter = vq->resubmit_list[0].counter + 1;
1051 }
1052
1053 /* in case of I/O hang after reconnecting */
1054 if (eventfd_write(vq->kick_fd, 1)) {
1055 return -1;
1056 }
1057
1058 return 0;
1059 }
1060
1061 static bool
vu_set_vring_kick_exec(VuDev * dev,VhostUserMsg * vmsg)1062 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1063 {
1064 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1065 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1066
1067 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1068
1069 if (!vu_check_queue_msg_file(dev, vmsg)) {
1070 return false;
1071 }
1072
1073 if (dev->vq[index].kick_fd != -1) {
1074 dev->remove_watch(dev, dev->vq[index].kick_fd);
1075 close(dev->vq[index].kick_fd);
1076 dev->vq[index].kick_fd = -1;
1077 }
1078
1079 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0];
1080 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index);
1081
1082 dev->vq[index].started = true;
1083 if (dev->iface->queue_set_started) {
1084 dev->iface->queue_set_started(dev, index, true);
1085 }
1086
1087 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1088 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1089 vu_kick_cb, (void *)(long)index);
1090
1091 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1092 dev->vq[index].kick_fd, index);
1093 }
1094
1095 if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1096 vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1097 }
1098
1099 return false;
1100 }
1101
vu_set_queue_handler(VuDev * dev,VuVirtq * vq,vu_queue_handler_cb handler)1102 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1103 vu_queue_handler_cb handler)
1104 {
1105 int qidx = vq - dev->vq;
1106
1107 vq->handler = handler;
1108 if (vq->kick_fd >= 0) {
1109 if (handler) {
1110 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1111 vu_kick_cb, (void *)(long)qidx);
1112 } else {
1113 dev->remove_watch(dev, vq->kick_fd);
1114 }
1115 }
1116 }
1117
vu_set_queue_host_notifier(VuDev * dev,VuVirtq * vq,int fd,int size,int offset)1118 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1119 int size, int offset)
1120 {
1121 int qidx = vq - dev->vq;
1122 int fd_num = 0;
1123 VhostUserMsg vmsg = {
1124 .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1125 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1126 .size = sizeof(vmsg.payload.area),
1127 .payload.area = {
1128 .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1129 .size = size,
1130 .offset = offset,
1131 },
1132 };
1133
1134 if (fd == -1) {
1135 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1136 } else {
1137 vmsg.fds[fd_num++] = fd;
1138 }
1139
1140 vmsg.fd_num = fd_num;
1141
1142 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
1143 return false;
1144 }
1145
1146 pthread_mutex_lock(&dev->slave_mutex);
1147 if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
1148 pthread_mutex_unlock(&dev->slave_mutex);
1149 return false;
1150 }
1151
1152 /* Also unlocks the slave_mutex */
1153 return vu_process_message_reply(dev, &vmsg);
1154 }
1155
1156 static bool
vu_set_vring_call_exec(VuDev * dev,VhostUserMsg * vmsg)1157 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1158 {
1159 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1160 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1161
1162 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1163
1164 if (!vu_check_queue_msg_file(dev, vmsg)) {
1165 return false;
1166 }
1167
1168 if (dev->vq[index].call_fd != -1) {
1169 close(dev->vq[index].call_fd);
1170 dev->vq[index].call_fd = -1;
1171 }
1172
1173 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0];
1174
1175 /* in case of I/O hang after reconnecting */
1176 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) {
1177 return -1;
1178 }
1179
1180 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index);
1181
1182 return false;
1183 }
1184
1185 static bool
vu_set_vring_err_exec(VuDev * dev,VhostUserMsg * vmsg)1186 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1187 {
1188 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1189 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
1190
1191 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1192
1193 if (!vu_check_queue_msg_file(dev, vmsg)) {
1194 return false;
1195 }
1196
1197 if (dev->vq[index].err_fd != -1) {
1198 close(dev->vq[index].err_fd);
1199 dev->vq[index].err_fd = -1;
1200 }
1201
1202 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0];
1203
1204 return false;
1205 }
1206
1207 static bool
vu_get_protocol_features_exec(VuDev * dev,VhostUserMsg * vmsg)1208 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1209 {
1210 /*
1211 * Note that we support, but intentionally do not set,
1212 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that
1213 * a device implementation can return it in its callback
1214 * (get_protocol_features) if it wants to use this for
1215 * simulation, but it is otherwise not desirable (if even
1216 * implemented by the master.)
1217 */
1218 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1219 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1220 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1221 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1222 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD |
1223 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK;
1224
1225 if (have_userfault()) {
1226 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1227 }
1228
1229 if (dev->iface->get_config && dev->iface->set_config) {
1230 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1231 }
1232
1233 if (dev->iface->get_protocol_features) {
1234 features |= dev->iface->get_protocol_features(dev);
1235 }
1236
1237 vmsg_set_reply_u64(vmsg, features);
1238 return true;
1239 }
1240
1241 static bool
vu_set_protocol_features_exec(VuDev * dev,VhostUserMsg * vmsg)1242 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1243 {
1244 uint64_t features = vmsg->payload.u64;
1245
1246 DPRINT("u64: 0x%016"PRIx64"\n", features);
1247
1248 dev->protocol_features = vmsg->payload.u64;
1249
1250 if (vu_has_protocol_feature(dev,
1251 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
1252 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ) ||
1253 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
1254 /*
1255 * The use case for using messages for kick/call is simulation, to make
1256 * the kick and call synchronous. To actually get that behaviour, both
1257 * of the other features are required.
1258 * Theoretically, one could use only kick messages, or do them without
1259 * having F_REPLY_ACK, but too many (possibly pending) messages on the
1260 * socket will eventually cause the master to hang, to avoid this in
1261 * scenarios where not desired enforce that the settings are in a way
1262 * that actually enables the simulation case.
1263 */
1264 vu_panic(dev,
1265 "F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK");
1266 return false;
1267 }
1268
1269 if (dev->iface->set_protocol_features) {
1270 dev->iface->set_protocol_features(dev, features);
1271 }
1272
1273 return false;
1274 }
1275
1276 static bool
vu_get_queue_num_exec(VuDev * dev,VhostUserMsg * vmsg)1277 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1278 {
1279 vmsg_set_reply_u64(vmsg, dev->max_queues);
1280 return true;
1281 }
1282
1283 static bool
vu_set_vring_enable_exec(VuDev * dev,VhostUserMsg * vmsg)1284 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1285 {
1286 unsigned int index = vmsg->payload.state.index;
1287 unsigned int enable = vmsg->payload.state.num;
1288
1289 DPRINT("State.index: %d\n", index);
1290 DPRINT("State.enable: %d\n", enable);
1291
1292 if (index >= dev->max_queues) {
1293 vu_panic(dev, "Invalid vring_enable index: %u", index);
1294 return false;
1295 }
1296
1297 dev->vq[index].enable = enable;
1298 return false;
1299 }
1300
1301 static bool
vu_set_slave_req_fd(VuDev * dev,VhostUserMsg * vmsg)1302 vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1303 {
1304 if (vmsg->fd_num != 1) {
1305 vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
1306 return false;
1307 }
1308
1309 if (dev->slave_fd != -1) {
1310 close(dev->slave_fd);
1311 }
1312 dev->slave_fd = vmsg->fds[0];
1313 DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
1314
1315 return false;
1316 }
1317
1318 static bool
vu_get_config(VuDev * dev,VhostUserMsg * vmsg)1319 vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1320 {
1321 int ret = -1;
1322
1323 if (dev->iface->get_config) {
1324 ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1325 vmsg->payload.config.size);
1326 }
1327
1328 if (ret) {
1329 /* resize to zero to indicate an error to master */
1330 vmsg->size = 0;
1331 }
1332
1333 return true;
1334 }
1335
1336 static bool
vu_set_config(VuDev * dev,VhostUserMsg * vmsg)1337 vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1338 {
1339 int ret = -1;
1340
1341 if (dev->iface->set_config) {
1342 ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1343 vmsg->payload.config.offset,
1344 vmsg->payload.config.size,
1345 vmsg->payload.config.flags);
1346 if (ret) {
1347 vu_panic(dev, "Set virtio configuration space failed");
1348 }
1349 }
1350
1351 return false;
1352 }
1353
1354 static bool
vu_set_postcopy_advise(VuDev * dev,VhostUserMsg * vmsg)1355 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1356 {
1357 dev->postcopy_ufd = -1;
1358 #ifdef UFFDIO_API
1359 struct uffdio_api api_struct;
1360
1361 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1362 vmsg->size = 0;
1363 #endif
1364
1365 if (dev->postcopy_ufd == -1) {
1366 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1367 goto out;
1368 }
1369
1370 #ifdef UFFDIO_API
1371 api_struct.api = UFFD_API;
1372 api_struct.features = 0;
1373 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1374 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1375 close(dev->postcopy_ufd);
1376 dev->postcopy_ufd = -1;
1377 goto out;
1378 }
1379 /* TODO: Stash feature flags somewhere */
1380 #endif
1381
1382 out:
1383 /* Return a ufd to the QEMU */
1384 vmsg->fd_num = 1;
1385 vmsg->fds[0] = dev->postcopy_ufd;
1386 return true; /* = send a reply */
1387 }
1388
1389 static bool
vu_set_postcopy_listen(VuDev * dev,VhostUserMsg * vmsg)1390 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1391 {
1392 if (dev->nregions) {
1393 vu_panic(dev, "Regions already registered at postcopy-listen");
1394 vmsg_set_reply_u64(vmsg, -1);
1395 return true;
1396 }
1397 dev->postcopy_listening = true;
1398
1399 vmsg_set_reply_u64(vmsg, 0);
1400 return true;
1401 }
1402
1403 static bool
vu_set_postcopy_end(VuDev * dev,VhostUserMsg * vmsg)1404 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1405 {
1406 DPRINT("%s: Entry\n", __func__);
1407 dev->postcopy_listening = false;
1408 if (dev->postcopy_ufd > 0) {
1409 close(dev->postcopy_ufd);
1410 dev->postcopy_ufd = -1;
1411 DPRINT("%s: Done close\n", __func__);
1412 }
1413
1414 vmsg_set_reply_u64(vmsg, 0);
1415 DPRINT("%s: exit\n", __func__);
1416 return true;
1417 }
1418
1419 static inline uint64_t
vu_inflight_queue_size(uint16_t queue_size)1420 vu_inflight_queue_size(uint16_t queue_size)
1421 {
1422 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1423 sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1424 }
1425
1426 static bool
vu_get_inflight_fd(VuDev * dev,VhostUserMsg * vmsg)1427 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1428 {
1429 int fd;
1430 void *addr;
1431 uint64_t mmap_size;
1432 uint16_t num_queues, queue_size;
1433
1434 if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1435 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1436 vmsg->payload.inflight.mmap_size = 0;
1437 return true;
1438 }
1439
1440 num_queues = vmsg->payload.inflight.num_queues;
1441 queue_size = vmsg->payload.inflight.queue_size;
1442
1443 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1444 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1445
1446 mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1447
1448 addr = qemu_memfd_alloc("vhost-inflight", mmap_size,
1449 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1450 &fd, NULL);
1451
1452 if (!addr) {
1453 vu_panic(dev, "Failed to alloc vhost inflight area");
1454 vmsg->payload.inflight.mmap_size = 0;
1455 return true;
1456 }
1457
1458 memset(addr, 0, mmap_size);
1459
1460 dev->inflight_info.addr = addr;
1461 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1462 dev->inflight_info.fd = vmsg->fds[0] = fd;
1463 vmsg->fd_num = 1;
1464 vmsg->payload.inflight.mmap_offset = 0;
1465
1466 DPRINT("send inflight mmap_size: %"PRId64"\n",
1467 vmsg->payload.inflight.mmap_size);
1468 DPRINT("send inflight mmap offset: %"PRId64"\n",
1469 vmsg->payload.inflight.mmap_offset);
1470
1471 return true;
1472 }
1473
1474 static bool
vu_set_inflight_fd(VuDev * dev,VhostUserMsg * vmsg)1475 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1476 {
1477 int fd, i;
1478 uint64_t mmap_size, mmap_offset;
1479 uint16_t num_queues, queue_size;
1480 void *rc;
1481
1482 if (vmsg->fd_num != 1 ||
1483 vmsg->size != sizeof(vmsg->payload.inflight)) {
1484 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1485 vmsg->size, vmsg->fd_num);
1486 return false;
1487 }
1488
1489 fd = vmsg->fds[0];
1490 mmap_size = vmsg->payload.inflight.mmap_size;
1491 mmap_offset = vmsg->payload.inflight.mmap_offset;
1492 num_queues = vmsg->payload.inflight.num_queues;
1493 queue_size = vmsg->payload.inflight.queue_size;
1494
1495 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1496 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1497 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1498 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1499
1500 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1501 fd, mmap_offset);
1502
1503 if (rc == MAP_FAILED) {
1504 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1505 return false;
1506 }
1507
1508 if (dev->inflight_info.fd) {
1509 close(dev->inflight_info.fd);
1510 }
1511
1512 if (dev->inflight_info.addr) {
1513 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1514 }
1515
1516 dev->inflight_info.fd = fd;
1517 dev->inflight_info.addr = rc;
1518 dev->inflight_info.size = mmap_size;
1519
1520 for (i = 0; i < num_queues; i++) {
1521 dev->vq[i].inflight = (VuVirtqInflight *)rc;
1522 dev->vq[i].inflight->desc_num = queue_size;
1523 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1524 }
1525
1526 return false;
1527 }
1528
1529 static bool
vu_handle_vring_kick(VuDev * dev,VhostUserMsg * vmsg)1530 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg)
1531 {
1532 unsigned int index = vmsg->payload.state.index;
1533
1534 if (index >= dev->max_queues) {
1535 vu_panic(dev, "Invalid queue index: %u", index);
1536 return false;
1537 }
1538
1539 DPRINT("Got kick message: handler:%p idx:%d\n",
1540 dev->vq[index].handler, index);
1541
1542 if (!dev->vq[index].started) {
1543 dev->vq[index].started = true;
1544
1545 if (dev->iface->queue_set_started) {
1546 dev->iface->queue_set_started(dev, index, true);
1547 }
1548 }
1549
1550 if (dev->vq[index].handler) {
1551 dev->vq[index].handler(dev, index);
1552 }
1553
1554 return false;
1555 }
1556
1557 static bool
vu_process_message(VuDev * dev,VhostUserMsg * vmsg)1558 vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
1559 {
1560 int do_reply = 0;
1561
1562 /* Print out generic part of the request. */
1563 DPRINT("================ Vhost user message ================\n");
1564 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
1565 vmsg->request);
1566 DPRINT("Flags: 0x%x\n", vmsg->flags);
1567 DPRINT("Size: %d\n", vmsg->size);
1568
1569 if (vmsg->fd_num) {
1570 int i;
1571 DPRINT("Fds:");
1572 for (i = 0; i < vmsg->fd_num; i++) {
1573 DPRINT(" %d", vmsg->fds[i]);
1574 }
1575 DPRINT("\n");
1576 }
1577
1578 if (dev->iface->process_msg &&
1579 dev->iface->process_msg(dev, vmsg, &do_reply)) {
1580 return do_reply;
1581 }
1582
1583 switch (vmsg->request) {
1584 case VHOST_USER_GET_FEATURES:
1585 return vu_get_features_exec(dev, vmsg);
1586 case VHOST_USER_SET_FEATURES:
1587 return vu_set_features_exec(dev, vmsg);
1588 case VHOST_USER_GET_PROTOCOL_FEATURES:
1589 return vu_get_protocol_features_exec(dev, vmsg);
1590 case VHOST_USER_SET_PROTOCOL_FEATURES:
1591 return vu_set_protocol_features_exec(dev, vmsg);
1592 case VHOST_USER_SET_OWNER:
1593 return vu_set_owner_exec(dev, vmsg);
1594 case VHOST_USER_RESET_OWNER:
1595 return vu_reset_device_exec(dev, vmsg);
1596 case VHOST_USER_SET_MEM_TABLE:
1597 return vu_set_mem_table_exec(dev, vmsg);
1598 case VHOST_USER_SET_LOG_BASE:
1599 return vu_set_log_base_exec(dev, vmsg);
1600 case VHOST_USER_SET_LOG_FD:
1601 return vu_set_log_fd_exec(dev, vmsg);
1602 case VHOST_USER_SET_VRING_NUM:
1603 return vu_set_vring_num_exec(dev, vmsg);
1604 case VHOST_USER_SET_VRING_ADDR:
1605 return vu_set_vring_addr_exec(dev, vmsg);
1606 case VHOST_USER_SET_VRING_BASE:
1607 return vu_set_vring_base_exec(dev, vmsg);
1608 case VHOST_USER_GET_VRING_BASE:
1609 return vu_get_vring_base_exec(dev, vmsg);
1610 case VHOST_USER_SET_VRING_KICK:
1611 return vu_set_vring_kick_exec(dev, vmsg);
1612 case VHOST_USER_SET_VRING_CALL:
1613 return vu_set_vring_call_exec(dev, vmsg);
1614 case VHOST_USER_SET_VRING_ERR:
1615 return vu_set_vring_err_exec(dev, vmsg);
1616 case VHOST_USER_GET_QUEUE_NUM:
1617 return vu_get_queue_num_exec(dev, vmsg);
1618 case VHOST_USER_SET_VRING_ENABLE:
1619 return vu_set_vring_enable_exec(dev, vmsg);
1620 case VHOST_USER_SET_SLAVE_REQ_FD:
1621 return vu_set_slave_req_fd(dev, vmsg);
1622 case VHOST_USER_GET_CONFIG:
1623 return vu_get_config(dev, vmsg);
1624 case VHOST_USER_SET_CONFIG:
1625 return vu_set_config(dev, vmsg);
1626 case VHOST_USER_NONE:
1627 /* if you need processing before exit, override iface->process_msg */
1628 exit(0);
1629 case VHOST_USER_POSTCOPY_ADVISE:
1630 return vu_set_postcopy_advise(dev, vmsg);
1631 case VHOST_USER_POSTCOPY_LISTEN:
1632 return vu_set_postcopy_listen(dev, vmsg);
1633 case VHOST_USER_POSTCOPY_END:
1634 return vu_set_postcopy_end(dev, vmsg);
1635 case VHOST_USER_GET_INFLIGHT_FD:
1636 return vu_get_inflight_fd(dev, vmsg);
1637 case VHOST_USER_SET_INFLIGHT_FD:
1638 return vu_set_inflight_fd(dev, vmsg);
1639 case VHOST_USER_VRING_KICK:
1640 return vu_handle_vring_kick(dev, vmsg);
1641 default:
1642 vmsg_close_fds(vmsg);
1643 vu_panic(dev, "Unhandled request: %d", vmsg->request);
1644 }
1645
1646 return false;
1647 }
1648
1649 bool
vu_dispatch(VuDev * dev)1650 vu_dispatch(VuDev *dev)
1651 {
1652 VhostUserMsg vmsg = { 0, };
1653 int reply_requested;
1654 bool need_reply, success = false;
1655
1656 if (!vu_message_read(dev, dev->sock, &vmsg)) {
1657 goto end;
1658 }
1659
1660 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK;
1661
1662 reply_requested = vu_process_message(dev, &vmsg);
1663 if (!reply_requested && need_reply) {
1664 vmsg_set_reply_u64(&vmsg, 0);
1665 reply_requested = 1;
1666 }
1667
1668 if (!reply_requested) {
1669 success = true;
1670 goto end;
1671 }
1672
1673 if (!vu_send_reply(dev, dev->sock, &vmsg)) {
1674 goto end;
1675 }
1676
1677 success = true;
1678
1679 end:
1680 free(vmsg.data);
1681 return success;
1682 }
1683
1684 void
vu_deinit(VuDev * dev)1685 vu_deinit(VuDev *dev)
1686 {
1687 int i;
1688
1689 for (i = 0; i < dev->nregions; i++) {
1690 VuDevRegion *r = &dev->regions[i];
1691 void *m = (void *) (uintptr_t) r->mmap_addr;
1692 if (m != MAP_FAILED) {
1693 munmap(m, r->size + r->mmap_offset);
1694 }
1695 }
1696 dev->nregions = 0;
1697
1698 for (i = 0; i < dev->max_queues; i++) {
1699 VuVirtq *vq = &dev->vq[i];
1700
1701 if (vq->call_fd != -1) {
1702 close(vq->call_fd);
1703 vq->call_fd = -1;
1704 }
1705
1706 if (vq->kick_fd != -1) {
1707 close(vq->kick_fd);
1708 vq->kick_fd = -1;
1709 }
1710
1711 if (vq->err_fd != -1) {
1712 close(vq->err_fd);
1713 vq->err_fd = -1;
1714 }
1715
1716 if (vq->resubmit_list) {
1717 free(vq->resubmit_list);
1718 vq->resubmit_list = NULL;
1719 }
1720
1721 vq->inflight = NULL;
1722 }
1723
1724 if (dev->inflight_info.addr) {
1725 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1726 dev->inflight_info.addr = NULL;
1727 }
1728
1729 if (dev->inflight_info.fd > 0) {
1730 close(dev->inflight_info.fd);
1731 dev->inflight_info.fd = -1;
1732 }
1733
1734 vu_close_log(dev);
1735 if (dev->slave_fd != -1) {
1736 close(dev->slave_fd);
1737 dev->slave_fd = -1;
1738 }
1739 pthread_mutex_destroy(&dev->slave_mutex);
1740
1741 if (dev->sock != -1) {
1742 close(dev->sock);
1743 }
1744
1745 free(dev->vq);
1746 dev->vq = NULL;
1747 }
1748
1749 bool
vu_init(VuDev * dev,uint16_t max_queues,int socket,vu_panic_cb panic,vu_set_watch_cb set_watch,vu_remove_watch_cb remove_watch,const VuDevIface * iface)1750 vu_init(VuDev *dev,
1751 uint16_t max_queues,
1752 int socket,
1753 vu_panic_cb panic,
1754 vu_set_watch_cb set_watch,
1755 vu_remove_watch_cb remove_watch,
1756 const VuDevIface *iface)
1757 {
1758 uint16_t i;
1759
1760 assert(max_queues > 0);
1761 assert(socket >= 0);
1762 assert(set_watch);
1763 assert(remove_watch);
1764 assert(iface);
1765 assert(panic);
1766
1767 memset(dev, 0, sizeof(*dev));
1768
1769 dev->sock = socket;
1770 dev->panic = panic;
1771 dev->set_watch = set_watch;
1772 dev->remove_watch = remove_watch;
1773 dev->iface = iface;
1774 dev->log_call_fd = -1;
1775 pthread_mutex_init(&dev->slave_mutex, NULL);
1776 dev->slave_fd = -1;
1777 dev->max_queues = max_queues;
1778
1779 dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
1780 if (!dev->vq) {
1781 DPRINT("%s: failed to malloc virtqueues\n", __func__);
1782 return false;
1783 }
1784
1785 for (i = 0; i < max_queues; i++) {
1786 dev->vq[i] = (VuVirtq) {
1787 .call_fd = -1, .kick_fd = -1, .err_fd = -1,
1788 .notification = true,
1789 };
1790 }
1791
1792 return true;
1793 }
1794
1795 VuVirtq *
vu_get_queue(VuDev * dev,int qidx)1796 vu_get_queue(VuDev *dev, int qidx)
1797 {
1798 assert(qidx < dev->max_queues);
1799 return &dev->vq[qidx];
1800 }
1801
1802 bool
vu_queue_enabled(VuDev * dev,VuVirtq * vq)1803 vu_queue_enabled(VuDev *dev, VuVirtq *vq)
1804 {
1805 return vq->enable;
1806 }
1807
1808 bool
vu_queue_started(const VuDev * dev,const VuVirtq * vq)1809 vu_queue_started(const VuDev *dev, const VuVirtq *vq)
1810 {
1811 return vq->started;
1812 }
1813
1814 static inline uint16_t
vring_avail_flags(VuVirtq * vq)1815 vring_avail_flags(VuVirtq *vq)
1816 {
1817 return vq->vring.avail->flags;
1818 }
1819
1820 static inline uint16_t
vring_avail_idx(VuVirtq * vq)1821 vring_avail_idx(VuVirtq *vq)
1822 {
1823 vq->shadow_avail_idx = vq->vring.avail->idx;
1824
1825 return vq->shadow_avail_idx;
1826 }
1827
1828 static inline uint16_t
vring_avail_ring(VuVirtq * vq,int i)1829 vring_avail_ring(VuVirtq *vq, int i)
1830 {
1831 return vq->vring.avail->ring[i];
1832 }
1833
1834 static inline uint16_t
vring_get_used_event(VuVirtq * vq)1835 vring_get_used_event(VuVirtq *vq)
1836 {
1837 return vring_avail_ring(vq, vq->vring.num);
1838 }
1839
1840 static int
virtqueue_num_heads(VuDev * dev,VuVirtq * vq,unsigned int idx)1841 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
1842 {
1843 uint16_t num_heads = vring_avail_idx(vq) - idx;
1844
1845 /* Check it isn't doing very strange things with descriptor numbers. */
1846 if (num_heads > vq->vring.num) {
1847 vu_panic(dev, "Guest moved used index from %u to %u",
1848 idx, vq->shadow_avail_idx);
1849 return -1;
1850 }
1851 if (num_heads) {
1852 /* On success, callers read a descriptor at vq->last_avail_idx.
1853 * Make sure descriptor read does not bypass avail index read. */
1854 smp_rmb();
1855 }
1856
1857 return num_heads;
1858 }
1859
1860 static bool
virtqueue_get_head(VuDev * dev,VuVirtq * vq,unsigned int idx,unsigned int * head)1861 virtqueue_get_head(VuDev *dev, VuVirtq *vq,
1862 unsigned int idx, unsigned int *head)
1863 {
1864 /* Grab the next descriptor number they're advertising, and increment
1865 * the index we've seen. */
1866 *head = vring_avail_ring(vq, idx % vq->vring.num);
1867
1868 /* If their number is silly, that's a fatal mistake. */
1869 if (*head >= vq->vring.num) {
1870 vu_panic(dev, "Guest says index %u is available", *head);
1871 return false;
1872 }
1873
1874 return true;
1875 }
1876
1877 static int
virtqueue_read_indirect_desc(VuDev * dev,struct vring_desc * desc,uint64_t addr,size_t len)1878 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
1879 uint64_t addr, size_t len)
1880 {
1881 struct vring_desc *ori_desc;
1882 uint64_t read_len;
1883
1884 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
1885 return -1;
1886 }
1887
1888 if (len == 0) {
1889 return -1;
1890 }
1891
1892 while (len) {
1893 read_len = len;
1894 ori_desc = vu_gpa_to_va(dev, &read_len, addr);
1895 if (!ori_desc) {
1896 return -1;
1897 }
1898
1899 memcpy(desc, ori_desc, read_len);
1900 len -= read_len;
1901 addr += read_len;
1902 desc += read_len;
1903 }
1904
1905 return 0;
1906 }
1907
1908 enum {
1909 VIRTQUEUE_READ_DESC_ERROR = -1,
1910 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
1911 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
1912 };
1913
1914 static int
virtqueue_read_next_desc(VuDev * dev,struct vring_desc * desc,int i,unsigned int max,unsigned int * next)1915 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
1916 int i, unsigned int max, unsigned int *next)
1917 {
1918 /* If this descriptor says it doesn't chain, we're done. */
1919 if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
1920 return VIRTQUEUE_READ_DESC_DONE;
1921 }
1922
1923 /* Check they're not leading us off end of descriptors. */
1924 *next = desc[i].next;
1925 /* Make sure compiler knows to grab that: we don't want it changing! */
1926 smp_wmb();
1927
1928 if (*next >= max) {
1929 vu_panic(dev, "Desc next is %u", *next);
1930 return VIRTQUEUE_READ_DESC_ERROR;
1931 }
1932
1933 return VIRTQUEUE_READ_DESC_MORE;
1934 }
1935
1936 void
vu_queue_get_avail_bytes(VuDev * dev,VuVirtq * vq,unsigned int * in_bytes,unsigned int * out_bytes,unsigned max_in_bytes,unsigned max_out_bytes)1937 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
1938 unsigned int *out_bytes,
1939 unsigned max_in_bytes, unsigned max_out_bytes)
1940 {
1941 unsigned int idx;
1942 unsigned int total_bufs, in_total, out_total;
1943 int rc;
1944
1945 idx = vq->last_avail_idx;
1946
1947 total_bufs = in_total = out_total = 0;
1948 if (unlikely(dev->broken) ||
1949 unlikely(!vq->vring.avail)) {
1950 goto done;
1951 }
1952
1953 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
1954 unsigned int max, desc_len, num_bufs, indirect = 0;
1955 uint64_t desc_addr, read_len;
1956 struct vring_desc *desc;
1957 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
1958 unsigned int i;
1959
1960 max = vq->vring.num;
1961 num_bufs = total_bufs;
1962 if (!virtqueue_get_head(dev, vq, idx++, &i)) {
1963 goto err;
1964 }
1965 desc = vq->vring.desc;
1966
1967 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
1968 if (desc[i].len % sizeof(struct vring_desc)) {
1969 vu_panic(dev, "Invalid size for indirect buffer table");
1970 goto err;
1971 }
1972
1973 /* If we've got too many, that implies a descriptor loop. */
1974 if (num_bufs >= max) {
1975 vu_panic(dev, "Looped descriptor");
1976 goto err;
1977 }
1978
1979 /* loop over the indirect descriptor table */
1980 indirect = 1;
1981 desc_addr = desc[i].addr;
1982 desc_len = desc[i].len;
1983 max = desc_len / sizeof(struct vring_desc);
1984 read_len = desc_len;
1985 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
1986 if (unlikely(desc && read_len != desc_len)) {
1987 /* Failed to use zero copy */
1988 desc = NULL;
1989 if (!virtqueue_read_indirect_desc(dev, desc_buf,
1990 desc_addr,
1991 desc_len)) {
1992 desc = desc_buf;
1993 }
1994 }
1995 if (!desc) {
1996 vu_panic(dev, "Invalid indirect buffer table");
1997 goto err;
1998 }
1999 num_bufs = i = 0;
2000 }
2001
2002 do {
2003 /* If we've got too many, that implies a descriptor loop. */
2004 if (++num_bufs > max) {
2005 vu_panic(dev, "Looped descriptor");
2006 goto err;
2007 }
2008
2009 if (desc[i].flags & VRING_DESC_F_WRITE) {
2010 in_total += desc[i].len;
2011 } else {
2012 out_total += desc[i].len;
2013 }
2014 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
2015 goto done;
2016 }
2017 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2018 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2019
2020 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2021 goto err;
2022 }
2023
2024 if (!indirect) {
2025 total_bufs = num_bufs;
2026 } else {
2027 total_bufs++;
2028 }
2029 }
2030 if (rc < 0) {
2031 goto err;
2032 }
2033 done:
2034 if (in_bytes) {
2035 *in_bytes = in_total;
2036 }
2037 if (out_bytes) {
2038 *out_bytes = out_total;
2039 }
2040 return;
2041
2042 err:
2043 in_total = out_total = 0;
2044 goto done;
2045 }
2046
2047 bool
vu_queue_avail_bytes(VuDev * dev,VuVirtq * vq,unsigned int in_bytes,unsigned int out_bytes)2048 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
2049 unsigned int out_bytes)
2050 {
2051 unsigned int in_total, out_total;
2052
2053 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
2054 in_bytes, out_bytes);
2055
2056 return in_bytes <= in_total && out_bytes <= out_total;
2057 }
2058
2059 /* Fetch avail_idx from VQ memory only when we really need to know if
2060 * guest has added some buffers. */
2061 bool
vu_queue_empty(VuDev * dev,VuVirtq * vq)2062 vu_queue_empty(VuDev *dev, VuVirtq *vq)
2063 {
2064 if (unlikely(dev->broken) ||
2065 unlikely(!vq->vring.avail)) {
2066 return true;
2067 }
2068
2069 if (vq->shadow_avail_idx != vq->last_avail_idx) {
2070 return false;
2071 }
2072
2073 return vring_avail_idx(vq) == vq->last_avail_idx;
2074 }
2075
2076 static bool
vring_notify(VuDev * dev,VuVirtq * vq)2077 vring_notify(VuDev *dev, VuVirtq *vq)
2078 {
2079 uint16_t old, new;
2080 bool v;
2081
2082 /* We need to expose used array entries before checking used event. */
2083 smp_mb();
2084
2085 /* Always notify when queue is empty (when feature acknowledge) */
2086 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2087 !vq->inuse && vu_queue_empty(dev, vq)) {
2088 return true;
2089 }
2090
2091 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2092 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2093 }
2094
2095 v = vq->signalled_used_valid;
2096 vq->signalled_used_valid = true;
2097 old = vq->signalled_used;
2098 new = vq->signalled_used = vq->used_idx;
2099 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2100 }
2101
_vu_queue_notify(VuDev * dev,VuVirtq * vq,bool sync)2102 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync)
2103 {
2104 if (unlikely(dev->broken) ||
2105 unlikely(!vq->vring.avail)) {
2106 return;
2107 }
2108
2109 if (!vring_notify(dev, vq)) {
2110 DPRINT("skipped notify...\n");
2111 return;
2112 }
2113
2114 if (vq->call_fd < 0 &&
2115 vu_has_protocol_feature(dev,
2116 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
2117 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
2118 VhostUserMsg vmsg = {
2119 .request = VHOST_USER_SLAVE_VRING_CALL,
2120 .flags = VHOST_USER_VERSION,
2121 .size = sizeof(vmsg.payload.state),
2122 .payload.state = {
2123 .index = vq - dev->vq,
2124 },
2125 };
2126 bool ack = sync &&
2127 vu_has_protocol_feature(dev,
2128 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2129
2130 if (ack) {
2131 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK;
2132 }
2133
2134 vu_message_write(dev, dev->slave_fd, &vmsg);
2135 if (ack) {
2136 vu_message_read(dev, dev->slave_fd, &vmsg);
2137 }
2138 return;
2139 }
2140
2141 if (eventfd_write(vq->call_fd, 1) < 0) {
2142 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
2143 }
2144 }
2145
vu_queue_notify(VuDev * dev,VuVirtq * vq)2146 void vu_queue_notify(VuDev *dev, VuVirtq *vq)
2147 {
2148 _vu_queue_notify(dev, vq, false);
2149 }
2150
vu_queue_notify_sync(VuDev * dev,VuVirtq * vq)2151 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq)
2152 {
2153 _vu_queue_notify(dev, vq, true);
2154 }
2155
2156 static inline void
vring_used_flags_set_bit(VuVirtq * vq,int mask)2157 vring_used_flags_set_bit(VuVirtq *vq, int mask)
2158 {
2159 uint16_t *flags;
2160
2161 flags = (uint16_t *)((char*)vq->vring.used +
2162 offsetof(struct vring_used, flags));
2163 *flags |= mask;
2164 }
2165
2166 static inline void
vring_used_flags_unset_bit(VuVirtq * vq,int mask)2167 vring_used_flags_unset_bit(VuVirtq *vq, int mask)
2168 {
2169 uint16_t *flags;
2170
2171 flags = (uint16_t *)((char*)vq->vring.used +
2172 offsetof(struct vring_used, flags));
2173 *flags &= ~mask;
2174 }
2175
2176 static inline void
vring_set_avail_event(VuVirtq * vq,uint16_t val)2177 vring_set_avail_event(VuVirtq *vq, uint16_t val)
2178 {
2179 if (!vq->notification) {
2180 return;
2181 }
2182
2183 *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
2184 }
2185
2186 void
vu_queue_set_notification(VuDev * dev,VuVirtq * vq,int enable)2187 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2188 {
2189 vq->notification = enable;
2190 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2191 vring_set_avail_event(vq, vring_avail_idx(vq));
2192 } else if (enable) {
2193 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2194 } else {
2195 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2196 }
2197 if (enable) {
2198 /* Expose avail event/used flags before caller checks the avail idx. */
2199 smp_mb();
2200 }
2201 }
2202
2203 static void
virtqueue_map_desc(VuDev * dev,unsigned int * p_num_sg,struct iovec * iov,unsigned int max_num_sg,bool is_write,uint64_t pa,size_t sz)2204 virtqueue_map_desc(VuDev *dev,
2205 unsigned int *p_num_sg, struct iovec *iov,
2206 unsigned int max_num_sg, bool is_write,
2207 uint64_t pa, size_t sz)
2208 {
2209 unsigned num_sg = *p_num_sg;
2210
2211 assert(num_sg <= max_num_sg);
2212
2213 if (!sz) {
2214 vu_panic(dev, "virtio: zero sized buffers are not allowed");
2215 return;
2216 }
2217
2218 while (sz) {
2219 uint64_t len = sz;
2220
2221 if (num_sg == max_num_sg) {
2222 vu_panic(dev, "virtio: too many descriptors in indirect table");
2223 return;
2224 }
2225
2226 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2227 if (iov[num_sg].iov_base == NULL) {
2228 vu_panic(dev, "virtio: invalid address for buffers");
2229 return;
2230 }
2231 iov[num_sg].iov_len = len;
2232 num_sg++;
2233 sz -= len;
2234 pa += len;
2235 }
2236
2237 *p_num_sg = num_sg;
2238 }
2239
2240 static void *
virtqueue_alloc_element(size_t sz,unsigned out_num,unsigned in_num)2241 virtqueue_alloc_element(size_t sz,
2242 unsigned out_num, unsigned in_num)
2243 {
2244 VuVirtqElement *elem;
2245 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2246 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2247 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2248
2249 assert(sz >= sizeof(VuVirtqElement));
2250 elem = malloc(out_sg_end);
2251 elem->out_num = out_num;
2252 elem->in_num = in_num;
2253 elem->in_sg = (void *)elem + in_sg_ofs;
2254 elem->out_sg = (void *)elem + out_sg_ofs;
2255 return elem;
2256 }
2257
2258 static void *
vu_queue_map_desc(VuDev * dev,VuVirtq * vq,unsigned int idx,size_t sz)2259 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2260 {
2261 struct vring_desc *desc = vq->vring.desc;
2262 uint64_t desc_addr, read_len;
2263 unsigned int desc_len;
2264 unsigned int max = vq->vring.num;
2265 unsigned int i = idx;
2266 VuVirtqElement *elem;
2267 unsigned int out_num = 0, in_num = 0;
2268 struct iovec iov[VIRTQUEUE_MAX_SIZE];
2269 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2270 int rc;
2271
2272 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2273 if (desc[i].len % sizeof(struct vring_desc)) {
2274 vu_panic(dev, "Invalid size for indirect buffer table");
2275 }
2276
2277 /* loop over the indirect descriptor table */
2278 desc_addr = desc[i].addr;
2279 desc_len = desc[i].len;
2280 max = desc_len / sizeof(struct vring_desc);
2281 read_len = desc_len;
2282 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2283 if (unlikely(desc && read_len != desc_len)) {
2284 /* Failed to use zero copy */
2285 desc = NULL;
2286 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2287 desc_addr,
2288 desc_len)) {
2289 desc = desc_buf;
2290 }
2291 }
2292 if (!desc) {
2293 vu_panic(dev, "Invalid indirect buffer table");
2294 return NULL;
2295 }
2296 i = 0;
2297 }
2298
2299 /* Collect all the descriptors */
2300 do {
2301 if (desc[i].flags & VRING_DESC_F_WRITE) {
2302 virtqueue_map_desc(dev, &in_num, iov + out_num,
2303 VIRTQUEUE_MAX_SIZE - out_num, true,
2304 desc[i].addr, desc[i].len);
2305 } else {
2306 if (in_num) {
2307 vu_panic(dev, "Incorrect order for descriptors");
2308 return NULL;
2309 }
2310 virtqueue_map_desc(dev, &out_num, iov,
2311 VIRTQUEUE_MAX_SIZE, false,
2312 desc[i].addr, desc[i].len);
2313 }
2314
2315 /* If we've got too many, that implies a descriptor loop. */
2316 if ((in_num + out_num) > max) {
2317 vu_panic(dev, "Looped descriptor");
2318 }
2319 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2320 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2321
2322 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2323 vu_panic(dev, "read descriptor error");
2324 return NULL;
2325 }
2326
2327 /* Now copy what we have collected and mapped */
2328 elem = virtqueue_alloc_element(sz, out_num, in_num);
2329 elem->index = idx;
2330 for (i = 0; i < out_num; i++) {
2331 elem->out_sg[i] = iov[i];
2332 }
2333 for (i = 0; i < in_num; i++) {
2334 elem->in_sg[i] = iov[out_num + i];
2335 }
2336
2337 return elem;
2338 }
2339
2340 static int
vu_queue_inflight_get(VuDev * dev,VuVirtq * vq,int desc_idx)2341 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2342 {
2343 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2344 return 0;
2345 }
2346
2347 if (unlikely(!vq->inflight)) {
2348 return -1;
2349 }
2350
2351 vq->inflight->desc[desc_idx].counter = vq->counter++;
2352 vq->inflight->desc[desc_idx].inflight = 1;
2353
2354 return 0;
2355 }
2356
2357 static int
vu_queue_inflight_pre_put(VuDev * dev,VuVirtq * vq,int desc_idx)2358 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2359 {
2360 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2361 return 0;
2362 }
2363
2364 if (unlikely(!vq->inflight)) {
2365 return -1;
2366 }
2367
2368 vq->inflight->last_batch_head = desc_idx;
2369
2370 return 0;
2371 }
2372
2373 static int
vu_queue_inflight_post_put(VuDev * dev,VuVirtq * vq,int desc_idx)2374 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2375 {
2376 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2377 return 0;
2378 }
2379
2380 if (unlikely(!vq->inflight)) {
2381 return -1;
2382 }
2383
2384 barrier();
2385
2386 vq->inflight->desc[desc_idx].inflight = 0;
2387
2388 barrier();
2389
2390 vq->inflight->used_idx = vq->used_idx;
2391
2392 return 0;
2393 }
2394
2395 void *
vu_queue_pop(VuDev * dev,VuVirtq * vq,size_t sz)2396 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2397 {
2398 int i;
2399 unsigned int head;
2400 VuVirtqElement *elem;
2401
2402 if (unlikely(dev->broken) ||
2403 unlikely(!vq->vring.avail)) {
2404 return NULL;
2405 }
2406
2407 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2408 i = (--vq->resubmit_num);
2409 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2410
2411 if (!vq->resubmit_num) {
2412 free(vq->resubmit_list);
2413 vq->resubmit_list = NULL;
2414 }
2415
2416 return elem;
2417 }
2418
2419 if (vu_queue_empty(dev, vq)) {
2420 return NULL;
2421 }
2422 /*
2423 * Needed after virtio_queue_empty(), see comment in
2424 * virtqueue_num_heads().
2425 */
2426 smp_rmb();
2427
2428 if (vq->inuse >= vq->vring.num) {
2429 vu_panic(dev, "Virtqueue size exceeded");
2430 return NULL;
2431 }
2432
2433 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2434 return NULL;
2435 }
2436
2437 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2438 vring_set_avail_event(vq, vq->last_avail_idx);
2439 }
2440
2441 elem = vu_queue_map_desc(dev, vq, head, sz);
2442
2443 if (!elem) {
2444 return NULL;
2445 }
2446
2447 vq->inuse++;
2448
2449 vu_queue_inflight_get(dev, vq, head);
2450
2451 return elem;
2452 }
2453
2454 static void
vu_queue_detach_element(VuDev * dev,VuVirtq * vq,VuVirtqElement * elem,size_t len)2455 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2456 size_t len)
2457 {
2458 vq->inuse--;
2459 /* unmap, when DMA support is added */
2460 }
2461
2462 void
vu_queue_unpop(VuDev * dev,VuVirtq * vq,VuVirtqElement * elem,size_t len)2463 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2464 size_t len)
2465 {
2466 vq->last_avail_idx--;
2467 vu_queue_detach_element(dev, vq, elem, len);
2468 }
2469
2470 bool
vu_queue_rewind(VuDev * dev,VuVirtq * vq,unsigned int num)2471 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2472 {
2473 if (num > vq->inuse) {
2474 return false;
2475 }
2476 vq->last_avail_idx -= num;
2477 vq->inuse -= num;
2478 return true;
2479 }
2480
2481 static inline
vring_used_write(VuDev * dev,VuVirtq * vq,struct vring_used_elem * uelem,int i)2482 void vring_used_write(VuDev *dev, VuVirtq *vq,
2483 struct vring_used_elem *uelem, int i)
2484 {
2485 struct vring_used *used = vq->vring.used;
2486
2487 used->ring[i] = *uelem;
2488 vu_log_write(dev, vq->vring.log_guest_addr +
2489 offsetof(struct vring_used, ring[i]),
2490 sizeof(used->ring[i]));
2491 }
2492
2493
2494 static void
vu_log_queue_fill(VuDev * dev,VuVirtq * vq,const VuVirtqElement * elem,unsigned int len)2495 vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
2496 const VuVirtqElement *elem,
2497 unsigned int len)
2498 {
2499 struct vring_desc *desc = vq->vring.desc;
2500 unsigned int i, max, min, desc_len;
2501 uint64_t desc_addr, read_len;
2502 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2503 unsigned num_bufs = 0;
2504
2505 max = vq->vring.num;
2506 i = elem->index;
2507
2508 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2509 if (desc[i].len % sizeof(struct vring_desc)) {
2510 vu_panic(dev, "Invalid size for indirect buffer table");
2511 }
2512
2513 /* loop over the indirect descriptor table */
2514 desc_addr = desc[i].addr;
2515 desc_len = desc[i].len;
2516 max = desc_len / sizeof(struct vring_desc);
2517 read_len = desc_len;
2518 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2519 if (unlikely(desc && read_len != desc_len)) {
2520 /* Failed to use zero copy */
2521 desc = NULL;
2522 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2523 desc_addr,
2524 desc_len)) {
2525 desc = desc_buf;
2526 }
2527 }
2528 if (!desc) {
2529 vu_panic(dev, "Invalid indirect buffer table");
2530 return;
2531 }
2532 i = 0;
2533 }
2534
2535 do {
2536 if (++num_bufs > max) {
2537 vu_panic(dev, "Looped descriptor");
2538 return;
2539 }
2540
2541 if (desc[i].flags & VRING_DESC_F_WRITE) {
2542 min = MIN(desc[i].len, len);
2543 vu_log_write(dev, desc[i].addr, min);
2544 len -= min;
2545 }
2546
2547 } while (len > 0 &&
2548 (virtqueue_read_next_desc(dev, desc, i, max, &i)
2549 == VIRTQUEUE_READ_DESC_MORE));
2550 }
2551
2552 void
vu_queue_fill(VuDev * dev,VuVirtq * vq,const VuVirtqElement * elem,unsigned int len,unsigned int idx)2553 vu_queue_fill(VuDev *dev, VuVirtq *vq,
2554 const VuVirtqElement *elem,
2555 unsigned int len, unsigned int idx)
2556 {
2557 struct vring_used_elem uelem;
2558
2559 if (unlikely(dev->broken) ||
2560 unlikely(!vq->vring.avail)) {
2561 return;
2562 }
2563
2564 vu_log_queue_fill(dev, vq, elem, len);
2565
2566 idx = (idx + vq->used_idx) % vq->vring.num;
2567
2568 uelem.id = elem->index;
2569 uelem.len = len;
2570 vring_used_write(dev, vq, &uelem, idx);
2571 }
2572
2573 static inline
vring_used_idx_set(VuDev * dev,VuVirtq * vq,uint16_t val)2574 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
2575 {
2576 vq->vring.used->idx = val;
2577 vu_log_write(dev,
2578 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
2579 sizeof(vq->vring.used->idx));
2580
2581 vq->used_idx = val;
2582 }
2583
2584 void
vu_queue_flush(VuDev * dev,VuVirtq * vq,unsigned int count)2585 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
2586 {
2587 uint16_t old, new;
2588
2589 if (unlikely(dev->broken) ||
2590 unlikely(!vq->vring.avail)) {
2591 return;
2592 }
2593
2594 /* Make sure buffer is written before we update index. */
2595 smp_wmb();
2596
2597 old = vq->used_idx;
2598 new = old + count;
2599 vring_used_idx_set(dev, vq, new);
2600 vq->inuse -= count;
2601 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
2602 vq->signalled_used_valid = false;
2603 }
2604 }
2605
2606 void
vu_queue_push(VuDev * dev,VuVirtq * vq,const VuVirtqElement * elem,unsigned int len)2607 vu_queue_push(VuDev *dev, VuVirtq *vq,
2608 const VuVirtqElement *elem, unsigned int len)
2609 {
2610 vu_queue_fill(dev, vq, elem, len, 0);
2611 vu_queue_inflight_pre_put(dev, vq, elem->index);
2612 vu_queue_flush(dev, vq, 1);
2613 vu_queue_inflight_post_put(dev, vq, elem->index);
2614 }
2615