xref: /qemu/util/aio-posix.c (revision b25f23e7)
1 /*
2  * QEMU aio implementation
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qemu-common.h"
18 #include "block/block.h"
19 #include "qemu/rcu_queue.h"
20 #include "qemu/sockets.h"
21 #include "qemu/cutils.h"
22 #include "trace.h"
23 #ifdef CONFIG_EPOLL_CREATE1
24 #include <sys/epoll.h>
25 #endif
26 
27 struct AioHandler
28 {
29     GPollFD pfd;
30     IOHandler *io_read;
31     IOHandler *io_write;
32     AioPollFn *io_poll;
33     IOHandler *io_poll_begin;
34     IOHandler *io_poll_end;
35     int deleted;
36     void *opaque;
37     bool is_external;
38     QLIST_ENTRY(AioHandler) node;
39 };
40 
41 #ifdef CONFIG_EPOLL_CREATE1
42 
43 /* The fd number threashold to switch to epoll */
44 #define EPOLL_ENABLE_THRESHOLD 64
45 
46 static void aio_epoll_disable(AioContext *ctx)
47 {
48     ctx->epoll_available = false;
49     if (!ctx->epoll_enabled) {
50         return;
51     }
52     ctx->epoll_enabled = false;
53     close(ctx->epollfd);
54 }
55 
56 static inline int epoll_events_from_pfd(int pfd_events)
57 {
58     return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
59            (pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
60            (pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
61            (pfd_events & G_IO_ERR ? EPOLLERR : 0);
62 }
63 
64 static bool aio_epoll_try_enable(AioContext *ctx)
65 {
66     AioHandler *node;
67     struct epoll_event event;
68 
69     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
70         int r;
71         if (node->deleted || !node->pfd.events) {
72             continue;
73         }
74         event.events = epoll_events_from_pfd(node->pfd.events);
75         event.data.ptr = node;
76         r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
77         if (r) {
78             return false;
79         }
80     }
81     ctx->epoll_enabled = true;
82     return true;
83 }
84 
85 static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
86 {
87     struct epoll_event event;
88     int r;
89     int ctl;
90 
91     if (!ctx->epoll_enabled) {
92         return;
93     }
94     if (!node->pfd.events) {
95         ctl = EPOLL_CTL_DEL;
96     } else {
97         event.data.ptr = node;
98         event.events = epoll_events_from_pfd(node->pfd.events);
99         ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
100     }
101 
102     r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event);
103     if (r) {
104         aio_epoll_disable(ctx);
105     }
106 }
107 
108 static int aio_epoll(AioContext *ctx, GPollFD *pfds,
109                      unsigned npfd, int64_t timeout)
110 {
111     AioHandler *node;
112     int i, ret = 0;
113     struct epoll_event events[128];
114 
115     assert(npfd == 1);
116     assert(pfds[0].fd == ctx->epollfd);
117     if (timeout > 0) {
118         ret = qemu_poll_ns(pfds, npfd, timeout);
119     }
120     if (timeout <= 0 || ret > 0) {
121         ret = epoll_wait(ctx->epollfd, events,
122                          sizeof(events) / sizeof(events[0]),
123                          timeout);
124         if (ret <= 0) {
125             goto out;
126         }
127         for (i = 0; i < ret; i++) {
128             int ev = events[i].events;
129             node = events[i].data.ptr;
130             node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) |
131                 (ev & EPOLLOUT ? G_IO_OUT : 0) |
132                 (ev & EPOLLHUP ? G_IO_HUP : 0) |
133                 (ev & EPOLLERR ? G_IO_ERR : 0);
134         }
135     }
136 out:
137     return ret;
138 }
139 
140 static bool aio_epoll_enabled(AioContext *ctx)
141 {
142     /* Fall back to ppoll when external clients are disabled. */
143     return !aio_external_disabled(ctx) && ctx->epoll_enabled;
144 }
145 
146 static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
147                                  unsigned npfd, int64_t timeout)
148 {
149     if (!ctx->epoll_available) {
150         return false;
151     }
152     if (aio_epoll_enabled(ctx)) {
153         return true;
154     }
155     if (npfd >= EPOLL_ENABLE_THRESHOLD) {
156         if (aio_epoll_try_enable(ctx)) {
157             return true;
158         } else {
159             aio_epoll_disable(ctx);
160         }
161     }
162     return false;
163 }
164 
165 #else
166 
167 static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
168 {
169 }
170 
171 static int aio_epoll(AioContext *ctx, GPollFD *pfds,
172                      unsigned npfd, int64_t timeout)
173 {
174     assert(false);
175 }
176 
177 static bool aio_epoll_enabled(AioContext *ctx)
178 {
179     return false;
180 }
181 
182 static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
183                           unsigned npfd, int64_t timeout)
184 {
185     return false;
186 }
187 
188 #endif
189 
190 static AioHandler *find_aio_handler(AioContext *ctx, int fd)
191 {
192     AioHandler *node;
193 
194     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
195         if (node->pfd.fd == fd)
196             if (!node->deleted)
197                 return node;
198     }
199 
200     return NULL;
201 }
202 
203 void aio_set_fd_handler(AioContext *ctx,
204                         int fd,
205                         bool is_external,
206                         IOHandler *io_read,
207                         IOHandler *io_write,
208                         AioPollFn *io_poll,
209                         void *opaque)
210 {
211     AioHandler *node;
212     bool is_new = false;
213     bool deleted = false;
214 
215     qemu_lockcnt_lock(&ctx->list_lock);
216 
217     node = find_aio_handler(ctx, fd);
218 
219     /* Are we deleting the fd handler? */
220     if (!io_read && !io_write && !io_poll) {
221         if (node == NULL) {
222             qemu_lockcnt_unlock(&ctx->list_lock);
223             return;
224         }
225 
226         g_source_remove_poll(&ctx->source, &node->pfd);
227 
228         /* If the lock is held, just mark the node as deleted */
229         if (qemu_lockcnt_count(&ctx->list_lock)) {
230             node->deleted = 1;
231             node->pfd.revents = 0;
232         } else {
233             /* Otherwise, delete it for real.  We can't just mark it as
234              * deleted because deleted nodes are only cleaned up while
235              * no one is walking the handlers list.
236              */
237             QLIST_REMOVE(node, node);
238             deleted = true;
239         }
240 
241         if (!node->io_poll) {
242             ctx->poll_disable_cnt--;
243         }
244     } else {
245         if (node == NULL) {
246             /* Alloc and insert if it's not already there */
247             node = g_new0(AioHandler, 1);
248             node->pfd.fd = fd;
249             QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
250 
251             g_source_add_poll(&ctx->source, &node->pfd);
252             is_new = true;
253 
254             ctx->poll_disable_cnt += !io_poll;
255         } else {
256             ctx->poll_disable_cnt += !io_poll - !node->io_poll;
257         }
258 
259         /* Update handler with latest information */
260         node->io_read = io_read;
261         node->io_write = io_write;
262         node->io_poll = io_poll;
263         node->opaque = opaque;
264         node->is_external = is_external;
265 
266         node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
267         node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
268     }
269 
270     aio_epoll_update(ctx, node, is_new);
271     qemu_lockcnt_unlock(&ctx->list_lock);
272     aio_notify(ctx);
273 
274     if (deleted) {
275         g_free(node);
276     }
277 }
278 
279 void aio_set_fd_poll(AioContext *ctx, int fd,
280                      IOHandler *io_poll_begin,
281                      IOHandler *io_poll_end)
282 {
283     AioHandler *node = find_aio_handler(ctx, fd);
284 
285     if (!node) {
286         return;
287     }
288 
289     node->io_poll_begin = io_poll_begin;
290     node->io_poll_end = io_poll_end;
291 }
292 
293 void aio_set_event_notifier(AioContext *ctx,
294                             EventNotifier *notifier,
295                             bool is_external,
296                             EventNotifierHandler *io_read,
297                             AioPollFn *io_poll)
298 {
299     aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
300                        (IOHandler *)io_read, NULL, io_poll, notifier);
301 }
302 
303 void aio_set_event_notifier_poll(AioContext *ctx,
304                                  EventNotifier *notifier,
305                                  EventNotifierHandler *io_poll_begin,
306                                  EventNotifierHandler *io_poll_end)
307 {
308     aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
309                     (IOHandler *)io_poll_begin,
310                     (IOHandler *)io_poll_end);
311 }
312 
313 static void poll_set_started(AioContext *ctx, bool started)
314 {
315     AioHandler *node;
316 
317     if (started == ctx->poll_started) {
318         return;
319     }
320 
321     ctx->poll_started = started;
322 
323     qemu_lockcnt_inc(&ctx->list_lock);
324     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
325         IOHandler *fn;
326 
327         if (node->deleted) {
328             continue;
329         }
330 
331         if (started) {
332             fn = node->io_poll_begin;
333         } else {
334             fn = node->io_poll_end;
335         }
336 
337         if (fn) {
338             fn(node->opaque);
339         }
340     }
341     qemu_lockcnt_dec(&ctx->list_lock);
342 }
343 
344 
345 bool aio_prepare(AioContext *ctx)
346 {
347     /* Poll mode cannot be used with glib's event loop, disable it. */
348     poll_set_started(ctx, false);
349 
350     return false;
351 }
352 
353 bool aio_pending(AioContext *ctx)
354 {
355     AioHandler *node;
356     bool result = false;
357 
358     /*
359      * We have to walk very carefully in case aio_set_fd_handler is
360      * called while we're walking.
361      */
362     qemu_lockcnt_inc(&ctx->list_lock);
363 
364     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
365         int revents;
366 
367         revents = node->pfd.revents & node->pfd.events;
368         if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
369             aio_node_check(ctx, node->is_external)) {
370             result = true;
371             break;
372         }
373         if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
374             aio_node_check(ctx, node->is_external)) {
375             result = true;
376             break;
377         }
378     }
379     qemu_lockcnt_dec(&ctx->list_lock);
380 
381     return result;
382 }
383 
384 static bool aio_dispatch_handlers(AioContext *ctx)
385 {
386     AioHandler *node, *tmp;
387     bool progress = false;
388 
389     QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
390         int revents;
391 
392         revents = node->pfd.revents & node->pfd.events;
393         node->pfd.revents = 0;
394 
395         if (!node->deleted &&
396             (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
397             aio_node_check(ctx, node->is_external) &&
398             node->io_read) {
399             node->io_read(node->opaque);
400 
401             /* aio_notify() does not count as progress */
402             if (node->opaque != &ctx->notifier) {
403                 progress = true;
404             }
405         }
406         if (!node->deleted &&
407             (revents & (G_IO_OUT | G_IO_ERR)) &&
408             aio_node_check(ctx, node->is_external) &&
409             node->io_write) {
410             node->io_write(node->opaque);
411             progress = true;
412         }
413 
414         if (node->deleted) {
415             if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
416                 QLIST_REMOVE(node, node);
417                 g_free(node);
418                 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
419             }
420         }
421     }
422 
423     return progress;
424 }
425 
426 void aio_dispatch(AioContext *ctx)
427 {
428     qemu_lockcnt_inc(&ctx->list_lock);
429     aio_bh_poll(ctx);
430     aio_dispatch_handlers(ctx);
431     qemu_lockcnt_dec(&ctx->list_lock);
432 
433     timerlistgroup_run_timers(&ctx->tlg);
434 }
435 
436 /* These thread-local variables are used only in a small part of aio_poll
437  * around the call to the poll() system call.  In particular they are not
438  * used while aio_poll is performing callbacks, which makes it much easier
439  * to think about reentrancy!
440  *
441  * Stack-allocated arrays would be perfect but they have size limitations;
442  * heap allocation is expensive enough that we want to reuse arrays across
443  * calls to aio_poll().  And because poll() has to be called without holding
444  * any lock, the arrays cannot be stored in AioContext.  Thread-local data
445  * has none of the disadvantages of these three options.
446  */
447 static __thread GPollFD *pollfds;
448 static __thread AioHandler **nodes;
449 static __thread unsigned npfd, nalloc;
450 static __thread Notifier pollfds_cleanup_notifier;
451 
452 static void pollfds_cleanup(Notifier *n, void *unused)
453 {
454     g_assert(npfd == 0);
455     g_free(pollfds);
456     g_free(nodes);
457     nalloc = 0;
458 }
459 
460 static void add_pollfd(AioHandler *node)
461 {
462     if (npfd == nalloc) {
463         if (nalloc == 0) {
464             pollfds_cleanup_notifier.notify = pollfds_cleanup;
465             qemu_thread_atexit_add(&pollfds_cleanup_notifier);
466             nalloc = 8;
467         } else {
468             g_assert(nalloc <= INT_MAX);
469             nalloc *= 2;
470         }
471         pollfds = g_renew(GPollFD, pollfds, nalloc);
472         nodes = g_renew(AioHandler *, nodes, nalloc);
473     }
474     nodes[npfd] = node;
475     pollfds[npfd] = (GPollFD) {
476         .fd = node->pfd.fd,
477         .events = node->pfd.events,
478     };
479     npfd++;
480 }
481 
482 static bool run_poll_handlers_once(AioContext *ctx)
483 {
484     bool progress = false;
485     AioHandler *node;
486 
487     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
488         if (!node->deleted && node->io_poll &&
489             aio_node_check(ctx, node->is_external) &&
490             node->io_poll(node->opaque)) {
491             progress = true;
492         }
493 
494         /* Caller handles freeing deleted nodes.  Don't do it here. */
495     }
496 
497     return progress;
498 }
499 
500 /* run_poll_handlers:
501  * @ctx: the AioContext
502  * @max_ns: maximum time to poll for, in nanoseconds
503  *
504  * Polls for a given time.
505  *
506  * Note that ctx->notify_me must be non-zero so this function can detect
507  * aio_notify().
508  *
509  * Note that the caller must have incremented ctx->list_lock.
510  *
511  * Returns: true if progress was made, false otherwise
512  */
513 static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
514 {
515     bool progress;
516     int64_t end_time;
517 
518     assert(ctx->notify_me);
519     assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
520     assert(ctx->poll_disable_cnt == 0);
521 
522     trace_run_poll_handlers_begin(ctx, max_ns);
523 
524     end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
525 
526     do {
527         progress = run_poll_handlers_once(ctx);
528     } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
529 
530     trace_run_poll_handlers_end(ctx, progress);
531 
532     return progress;
533 }
534 
535 /* try_poll_mode:
536  * @ctx: the AioContext
537  * @blocking: busy polling is only attempted when blocking is true
538  *
539  * ctx->notify_me must be non-zero so this function can detect aio_notify().
540  *
541  * Note that the caller must have incremented ctx->list_lock.
542  *
543  * Returns: true if progress was made, false otherwise
544  */
545 static bool try_poll_mode(AioContext *ctx, bool blocking)
546 {
547     if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
548         /* See qemu_soonest_timeout() uint64_t hack */
549         int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
550                              (uint64_t)ctx->poll_ns);
551 
552         if (max_ns) {
553             poll_set_started(ctx, true);
554 
555             if (run_poll_handlers(ctx, max_ns)) {
556                 return true;
557             }
558         }
559     }
560 
561     poll_set_started(ctx, false);
562 
563     /* Even if we don't run busy polling, try polling once in case it can make
564      * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
565      */
566     return run_poll_handlers_once(ctx);
567 }
568 
569 bool aio_poll(AioContext *ctx, bool blocking)
570 {
571     AioHandler *node;
572     int i;
573     int ret = 0;
574     bool progress;
575     int64_t timeout;
576     int64_t start = 0;
577 
578     /* aio_notify can avoid the expensive event_notifier_set if
579      * everything (file descriptors, bottom halves, timers) will
580      * be re-evaluated before the next blocking poll().  This is
581      * already true when aio_poll is called with blocking == false;
582      * if blocking == true, it is only true after poll() returns,
583      * so disable the optimization now.
584      */
585     if (blocking) {
586         atomic_add(&ctx->notify_me, 2);
587     }
588 
589     qemu_lockcnt_inc(&ctx->list_lock);
590 
591     if (ctx->poll_max_ns) {
592         start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
593     }
594 
595     progress = try_poll_mode(ctx, blocking);
596     if (!progress) {
597         assert(npfd == 0);
598 
599         /* fill pollfds */
600 
601         if (!aio_epoll_enabled(ctx)) {
602             QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
603                 if (!node->deleted && node->pfd.events
604                     && aio_node_check(ctx, node->is_external)) {
605                     add_pollfd(node);
606                 }
607             }
608         }
609 
610         timeout = blocking ? aio_compute_timeout(ctx) : 0;
611 
612         /* wait until next event */
613         if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
614             AioHandler epoll_handler;
615 
616             epoll_handler.pfd.fd = ctx->epollfd;
617             epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
618             npfd = 0;
619             add_pollfd(&epoll_handler);
620             ret = aio_epoll(ctx, pollfds, npfd, timeout);
621         } else  {
622             ret = qemu_poll_ns(pollfds, npfd, timeout);
623         }
624     }
625 
626     if (blocking) {
627         atomic_sub(&ctx->notify_me, 2);
628     }
629 
630     /* Adjust polling time */
631     if (ctx->poll_max_ns) {
632         int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
633 
634         if (block_ns <= ctx->poll_ns) {
635             /* This is the sweet spot, no adjustment needed */
636         } else if (block_ns > ctx->poll_max_ns) {
637             /* We'd have to poll for too long, poll less */
638             int64_t old = ctx->poll_ns;
639 
640             if (ctx->poll_shrink) {
641                 ctx->poll_ns /= ctx->poll_shrink;
642             } else {
643                 ctx->poll_ns = 0;
644             }
645 
646             trace_poll_shrink(ctx, old, ctx->poll_ns);
647         } else if (ctx->poll_ns < ctx->poll_max_ns &&
648                    block_ns < ctx->poll_max_ns) {
649             /* There is room to grow, poll longer */
650             int64_t old = ctx->poll_ns;
651             int64_t grow = ctx->poll_grow;
652 
653             if (grow == 0) {
654                 grow = 2;
655             }
656 
657             if (ctx->poll_ns) {
658                 ctx->poll_ns *= grow;
659             } else {
660                 ctx->poll_ns = 4000; /* start polling at 4 microseconds */
661             }
662 
663             if (ctx->poll_ns > ctx->poll_max_ns) {
664                 ctx->poll_ns = ctx->poll_max_ns;
665             }
666 
667             trace_poll_grow(ctx, old, ctx->poll_ns);
668         }
669     }
670 
671     aio_notify_accept(ctx);
672 
673     /* if we have any readable fds, dispatch event */
674     if (ret > 0) {
675         for (i = 0; i < npfd; i++) {
676             nodes[i]->pfd.revents = pollfds[i].revents;
677         }
678     }
679 
680     npfd = 0;
681 
682     progress |= aio_bh_poll(ctx);
683 
684     if (ret > 0) {
685         progress |= aio_dispatch_handlers(ctx);
686     }
687 
688     qemu_lockcnt_dec(&ctx->list_lock);
689 
690     progress |= timerlistgroup_run_timers(&ctx->tlg);
691 
692     return progress;
693 }
694 
695 void aio_context_setup(AioContext *ctx)
696 {
697     /* TODO remove this in final patch submission */
698     if (getenv("QEMU_AIO_POLL_MAX_NS")) {
699         fprintf(stderr, "The QEMU_AIO_POLL_MAX_NS environment variable has "
700                 "been replaced with -object iothread,poll-max-ns=NUM\n");
701         exit(1);
702     }
703 
704 #ifdef CONFIG_EPOLL_CREATE1
705     assert(!ctx->epollfd);
706     ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
707     if (ctx->epollfd == -1) {
708         fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
709         ctx->epoll_available = false;
710     } else {
711         ctx->epoll_available = true;
712     }
713 #endif
714 }
715 
716 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
717                                  int64_t grow, int64_t shrink, Error **errp)
718 {
719     /* No thread synchronization here, it doesn't matter if an incorrect value
720      * is used once.
721      */
722     ctx->poll_max_ns = max_ns;
723     ctx->poll_ns = 0;
724     ctx->poll_grow = grow;
725     ctx->poll_shrink = shrink;
726 
727     aio_notify(ctx);
728 }
729