xref: /qemu/util/aio-win32.c (revision 897a06c6)
1c2b38b27SPaolo Bonzini /*
2c2b38b27SPaolo Bonzini  * QEMU aio implementation
3c2b38b27SPaolo Bonzini  *
4c2b38b27SPaolo Bonzini  * Copyright IBM Corp., 2008
5c2b38b27SPaolo Bonzini  * Copyright Red Hat Inc., 2012
6c2b38b27SPaolo Bonzini  *
7c2b38b27SPaolo Bonzini  * Authors:
8c2b38b27SPaolo Bonzini  *  Anthony Liguori   <aliguori@us.ibm.com>
9c2b38b27SPaolo Bonzini  *  Paolo Bonzini     <pbonzini@redhat.com>
10c2b38b27SPaolo Bonzini  *
11c2b38b27SPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2.  See
12c2b38b27SPaolo Bonzini  * the COPYING file in the top-level directory.
13c2b38b27SPaolo Bonzini  *
14c2b38b27SPaolo Bonzini  * Contributions after 2012-01-13 are licensed under the terms of the
15c2b38b27SPaolo Bonzini  * GNU GPL, version 2 or (at your option) any later version.
16c2b38b27SPaolo Bonzini  */
17c2b38b27SPaolo Bonzini 
18c2b38b27SPaolo Bonzini #include "qemu/osdep.h"
19c2b38b27SPaolo Bonzini #include "block/block.h"
20eada6d92SVolker Rümelin #include "qemu/main-loop.h"
21c2b38b27SPaolo Bonzini #include "qemu/queue.h"
22c2b38b27SPaolo Bonzini #include "qemu/sockets.h"
23c2b38b27SPaolo Bonzini #include "qapi/error.h"
24c2b38b27SPaolo Bonzini #include "qemu/rcu_queue.h"
25e2a3a219SMarc-André Lureau #include "qemu/error-report.h"
26c2b38b27SPaolo Bonzini 
27c2b38b27SPaolo Bonzini struct AioHandler {
28c2b38b27SPaolo Bonzini     EventNotifier *e;
29c2b38b27SPaolo Bonzini     IOHandler *io_read;
30c2b38b27SPaolo Bonzini     IOHandler *io_write;
31c2b38b27SPaolo Bonzini     EventNotifierHandler *io_notify;
32c2b38b27SPaolo Bonzini     GPollFD pfd;
33c2b38b27SPaolo Bonzini     int deleted;
34c2b38b27SPaolo Bonzini     void *opaque;
35c2b38b27SPaolo Bonzini     QLIST_ENTRY(AioHandler) node;
36c2b38b27SPaolo Bonzini };
37c2b38b27SPaolo Bonzini 
38fef16601SRemy Noel static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
39c2b38b27SPaolo Bonzini {
40da0652c0SYonggang Luo     /*
41da0652c0SYonggang Luo      * If the GSource is in the process of being destroyed then
42da0652c0SYonggang Luo      * g_source_remove_poll() causes an assertion failure.  Skip
43da0652c0SYonggang Luo      * removal in that case, because glib cleans up its state during
44da0652c0SYonggang Luo      * destruction anyway.
45da0652c0SYonggang Luo      */
46da0652c0SYonggang Luo     if (!g_source_is_destroyed(&ctx->source)) {
47da0652c0SYonggang Luo         g_source_remove_poll(&ctx->source, &node->pfd);
48da0652c0SYonggang Luo     }
49da0652c0SYonggang Luo 
50c2b38b27SPaolo Bonzini     /* If aio_poll is in progress, just mark the node as deleted */
51c2b38b27SPaolo Bonzini     if (qemu_lockcnt_count(&ctx->list_lock)) {
52c2b38b27SPaolo Bonzini         node->deleted = 1;
53c2b38b27SPaolo Bonzini         node->pfd.revents = 0;
54c2b38b27SPaolo Bonzini     } else {
55c2b38b27SPaolo Bonzini         /* Otherwise, delete it for real.  We can't just mark it as
56c2b38b27SPaolo Bonzini          * deleted because deleted nodes are only cleaned up after
57c2b38b27SPaolo Bonzini          * releasing the list_lock.
58c2b38b27SPaolo Bonzini          */
59c2b38b27SPaolo Bonzini         QLIST_REMOVE(node, node);
60c2b38b27SPaolo Bonzini         g_free(node);
61c2b38b27SPaolo Bonzini     }
62c2b38b27SPaolo Bonzini }
63fef16601SRemy Noel 
64fef16601SRemy Noel void aio_set_fd_handler(AioContext *ctx,
65fef16601SRemy Noel                         int fd,
66fef16601SRemy Noel                         IOHandler *io_read,
67fef16601SRemy Noel                         IOHandler *io_write,
68fef16601SRemy Noel                         AioPollFn *io_poll,
69826cc324SStefan Hajnoczi                         IOHandler *io_poll_ready,
70fef16601SRemy Noel                         void *opaque)
71fef16601SRemy Noel {
72fef16601SRemy Noel     AioHandler *old_node;
73fef16601SRemy Noel     AioHandler *node = NULL;
74abe34282SMarc-André Lureau     SOCKET s;
75fef16601SRemy Noel 
76e2a3a219SMarc-André Lureau     if (!fd_is_socket(fd)) {
77e2a3a219SMarc-André Lureau         error_report("fd=%d is not a socket, AIO implementation is missing", fd);
78e2a3a219SMarc-André Lureau         return;
79e2a3a219SMarc-André Lureau     }
80e2a3a219SMarc-André Lureau 
81abe34282SMarc-André Lureau     s = _get_osfhandle(fd);
82abe34282SMarc-André Lureau 
83fef16601SRemy Noel     qemu_lockcnt_lock(&ctx->list_lock);
84fef16601SRemy Noel     QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
85abe34282SMarc-André Lureau         if (old_node->pfd.fd == s && !old_node->deleted) {
86fef16601SRemy Noel             break;
87fef16601SRemy Noel         }
88fef16601SRemy Noel     }
89fef16601SRemy Noel 
90fef16601SRemy Noel     if (io_read || io_write) {
91c2b38b27SPaolo Bonzini         HANDLE event;
9255d41b16SAlistair Francis         long bitmask = 0;
93c2b38b27SPaolo Bonzini 
94c2b38b27SPaolo Bonzini         /* Alloc and insert if it's not already there */
95c2b38b27SPaolo Bonzini         node = g_new0(AioHandler, 1);
96abe34282SMarc-André Lureau         node->pfd.fd = s;
97c2b38b27SPaolo Bonzini 
98c2b38b27SPaolo Bonzini         node->pfd.events = 0;
99c2b38b27SPaolo Bonzini         if (node->io_read) {
100c2b38b27SPaolo Bonzini             node->pfd.events |= G_IO_IN;
101c2b38b27SPaolo Bonzini         }
102c2b38b27SPaolo Bonzini         if (node->io_write) {
103c2b38b27SPaolo Bonzini             node->pfd.events |= G_IO_OUT;
104c2b38b27SPaolo Bonzini         }
105c2b38b27SPaolo Bonzini 
106c2b38b27SPaolo Bonzini         node->e = &ctx->notifier;
107c2b38b27SPaolo Bonzini 
108c2b38b27SPaolo Bonzini         /* Update handler with latest information */
109c2b38b27SPaolo Bonzini         node->opaque = opaque;
110c2b38b27SPaolo Bonzini         node->io_read = io_read;
111c2b38b27SPaolo Bonzini         node->io_write = io_write;
112c2b38b27SPaolo Bonzini 
11355d41b16SAlistair Francis         if (io_read) {
11455d41b16SAlistair Francis             bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
11555d41b16SAlistair Francis         }
11655d41b16SAlistair Francis 
11755d41b16SAlistair Francis         if (io_write) {
11855d41b16SAlistair Francis             bitmask |= FD_WRITE | FD_CONNECT;
11955d41b16SAlistair Francis         }
12055d41b16SAlistair Francis 
121fef16601SRemy Noel         QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
122c2b38b27SPaolo Bonzini         event = event_notifier_get_handle(&ctx->notifier);
123abe34282SMarc-André Lureau         qemu_socket_select(fd, event, bitmask, NULL);
124c2b38b27SPaolo Bonzini     }
125fef16601SRemy Noel     if (old_node) {
126fef16601SRemy Noel         aio_remove_fd_handler(ctx, old_node);
127fef16601SRemy Noel     }
128c2b38b27SPaolo Bonzini 
129c2b38b27SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
130c2b38b27SPaolo Bonzini     aio_notify(ctx);
131c2b38b27SPaolo Bonzini }
132c2b38b27SPaolo Bonzini 
133c2b38b27SPaolo Bonzini void aio_set_event_notifier(AioContext *ctx,
134c2b38b27SPaolo Bonzini                             EventNotifier *e,
135c2b38b27SPaolo Bonzini                             EventNotifierHandler *io_notify,
136826cc324SStefan Hajnoczi                             AioPollFn *io_poll,
137826cc324SStefan Hajnoczi                             EventNotifierHandler *io_poll_ready)
138c2b38b27SPaolo Bonzini {
139c2b38b27SPaolo Bonzini     AioHandler *node;
140c2b38b27SPaolo Bonzini 
141c2b38b27SPaolo Bonzini     qemu_lockcnt_lock(&ctx->list_lock);
142c2b38b27SPaolo Bonzini     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
143c2b38b27SPaolo Bonzini         if (node->e == e && !node->deleted) {
144c2b38b27SPaolo Bonzini             break;
145c2b38b27SPaolo Bonzini         }
146c2b38b27SPaolo Bonzini     }
147c2b38b27SPaolo Bonzini 
148c2b38b27SPaolo Bonzini     /* Are we deleting the fd handler? */
149c2b38b27SPaolo Bonzini     if (!io_notify) {
150c2b38b27SPaolo Bonzini         if (node) {
151fef16601SRemy Noel             aio_remove_fd_handler(ctx, node);
152c2b38b27SPaolo Bonzini         }
153c2b38b27SPaolo Bonzini     } else {
154c2b38b27SPaolo Bonzini         if (node == NULL) {
155c2b38b27SPaolo Bonzini             /* Alloc and insert if it's not already there */
156c2b38b27SPaolo Bonzini             node = g_new0(AioHandler, 1);
157c2b38b27SPaolo Bonzini             node->e = e;
158c2b38b27SPaolo Bonzini             node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
159c2b38b27SPaolo Bonzini             node->pfd.events = G_IO_IN;
160c2b38b27SPaolo Bonzini             QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
161c2b38b27SPaolo Bonzini 
162c2b38b27SPaolo Bonzini             g_source_add_poll(&ctx->source, &node->pfd);
163c2b38b27SPaolo Bonzini         }
164c2b38b27SPaolo Bonzini         /* Update handler with latest information */
165c2b38b27SPaolo Bonzini         node->io_notify = io_notify;
166c2b38b27SPaolo Bonzini     }
167c2b38b27SPaolo Bonzini 
168c2b38b27SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
169c2b38b27SPaolo Bonzini     aio_notify(ctx);
170c2b38b27SPaolo Bonzini }
171c2b38b27SPaolo Bonzini 
172c2b38b27SPaolo Bonzini void aio_set_event_notifier_poll(AioContext *ctx,
173c2b38b27SPaolo Bonzini                                  EventNotifier *notifier,
174c2b38b27SPaolo Bonzini                                  EventNotifierHandler *io_poll_begin,
175c2b38b27SPaolo Bonzini                                  EventNotifierHandler *io_poll_end)
176c2b38b27SPaolo Bonzini {
177c2b38b27SPaolo Bonzini     /* Not implemented */
178c2b38b27SPaolo Bonzini }
179c2b38b27SPaolo Bonzini 
180c2b38b27SPaolo Bonzini bool aio_prepare(AioContext *ctx)
181c2b38b27SPaolo Bonzini {
182c2b38b27SPaolo Bonzini     static struct timeval tv0;
183c2b38b27SPaolo Bonzini     AioHandler *node;
184c2b38b27SPaolo Bonzini     bool have_select_revents = false;
185c2b38b27SPaolo Bonzini     fd_set rfds, wfds;
186c2b38b27SPaolo Bonzini 
187c2b38b27SPaolo Bonzini     /*
188c2b38b27SPaolo Bonzini      * We have to walk very carefully in case aio_set_fd_handler is
189c2b38b27SPaolo Bonzini      * called while we're walking.
190c2b38b27SPaolo Bonzini      */
191c2b38b27SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
192c2b38b27SPaolo Bonzini 
193c2b38b27SPaolo Bonzini     /* fill fd sets */
194c2b38b27SPaolo Bonzini     FD_ZERO(&rfds);
195c2b38b27SPaolo Bonzini     FD_ZERO(&wfds);
196c2b38b27SPaolo Bonzini     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
197c2b38b27SPaolo Bonzini         if (node->io_read) {
198c2b38b27SPaolo Bonzini             FD_SET ((SOCKET)node->pfd.fd, &rfds);
199c2b38b27SPaolo Bonzini         }
200c2b38b27SPaolo Bonzini         if (node->io_write) {
201c2b38b27SPaolo Bonzini             FD_SET ((SOCKET)node->pfd.fd, &wfds);
202c2b38b27SPaolo Bonzini         }
203c2b38b27SPaolo Bonzini     }
204c2b38b27SPaolo Bonzini 
205c2b38b27SPaolo Bonzini     if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
206c2b38b27SPaolo Bonzini         QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
207c2b38b27SPaolo Bonzini             node->pfd.revents = 0;
208c2b38b27SPaolo Bonzini             if (FD_ISSET(node->pfd.fd, &rfds)) {
209c2b38b27SPaolo Bonzini                 node->pfd.revents |= G_IO_IN;
210c2b38b27SPaolo Bonzini                 have_select_revents = true;
211c2b38b27SPaolo Bonzini             }
212c2b38b27SPaolo Bonzini 
213c2b38b27SPaolo Bonzini             if (FD_ISSET(node->pfd.fd, &wfds)) {
214c2b38b27SPaolo Bonzini                 node->pfd.revents |= G_IO_OUT;
215c2b38b27SPaolo Bonzini                 have_select_revents = true;
216c2b38b27SPaolo Bonzini             }
217c2b38b27SPaolo Bonzini         }
218c2b38b27SPaolo Bonzini     }
219c2b38b27SPaolo Bonzini 
220c2b38b27SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
221c2b38b27SPaolo Bonzini     return have_select_revents;
222c2b38b27SPaolo Bonzini }
223c2b38b27SPaolo Bonzini 
224c2b38b27SPaolo Bonzini bool aio_pending(AioContext *ctx)
225c2b38b27SPaolo Bonzini {
226c2b38b27SPaolo Bonzini     AioHandler *node;
227c2b38b27SPaolo Bonzini     bool result = false;
228c2b38b27SPaolo Bonzini 
229c2b38b27SPaolo Bonzini     /*
230c2b38b27SPaolo Bonzini      * We have to walk very carefully in case aio_set_fd_handler is
231c2b38b27SPaolo Bonzini      * called while we're walking.
232c2b38b27SPaolo Bonzini      */
233c2b38b27SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
234c2b38b27SPaolo Bonzini     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
235c2b38b27SPaolo Bonzini         if (node->pfd.revents && node->io_notify) {
236c2b38b27SPaolo Bonzini             result = true;
237c2b38b27SPaolo Bonzini             break;
238c2b38b27SPaolo Bonzini         }
239c2b38b27SPaolo Bonzini 
240c2b38b27SPaolo Bonzini         if ((node->pfd.revents & G_IO_IN) && node->io_read) {
241c2b38b27SPaolo Bonzini             result = true;
242c2b38b27SPaolo Bonzini             break;
243c2b38b27SPaolo Bonzini         }
244c2b38b27SPaolo Bonzini         if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
245c2b38b27SPaolo Bonzini             result = true;
246c2b38b27SPaolo Bonzini             break;
247c2b38b27SPaolo Bonzini         }
248c2b38b27SPaolo Bonzini     }
249c2b38b27SPaolo Bonzini 
250c2b38b27SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
251c2b38b27SPaolo Bonzini     return result;
252c2b38b27SPaolo Bonzini }
253c2b38b27SPaolo Bonzini 
254c2b38b27SPaolo Bonzini static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
255c2b38b27SPaolo Bonzini {
256c2b38b27SPaolo Bonzini     AioHandler *node;
257c2b38b27SPaolo Bonzini     bool progress = false;
258c2b38b27SPaolo Bonzini     AioHandler *tmp;
259c2b38b27SPaolo Bonzini 
260c2b38b27SPaolo Bonzini     /*
261c2b38b27SPaolo Bonzini      * We have to walk very carefully in case aio_set_fd_handler is
262c2b38b27SPaolo Bonzini      * called while we're walking.
263c2b38b27SPaolo Bonzini      */
264c2b38b27SPaolo Bonzini     QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
265c2b38b27SPaolo Bonzini         int revents = node->pfd.revents;
266c2b38b27SPaolo Bonzini 
267c2b38b27SPaolo Bonzini         if (!node->deleted &&
268c2b38b27SPaolo Bonzini             (revents || event_notifier_get_handle(node->e) == event) &&
269c2b38b27SPaolo Bonzini             node->io_notify) {
270c2b38b27SPaolo Bonzini             node->pfd.revents = 0;
271c2b38b27SPaolo Bonzini             node->io_notify(node->e);
272c2b38b27SPaolo Bonzini 
273c2b38b27SPaolo Bonzini             /* aio_notify() does not count as progress */
274c2b38b27SPaolo Bonzini             if (node->e != &ctx->notifier) {
275c2b38b27SPaolo Bonzini                 progress = true;
276c2b38b27SPaolo Bonzini             }
277c2b38b27SPaolo Bonzini         }
278c2b38b27SPaolo Bonzini 
279c2b38b27SPaolo Bonzini         if (!node->deleted &&
280c2b38b27SPaolo Bonzini             (node->io_read || node->io_write)) {
281c2b38b27SPaolo Bonzini             node->pfd.revents = 0;
282c2b38b27SPaolo Bonzini             if ((revents & G_IO_IN) && node->io_read) {
283c2b38b27SPaolo Bonzini                 node->io_read(node->opaque);
284c2b38b27SPaolo Bonzini                 progress = true;
285c2b38b27SPaolo Bonzini             }
286c2b38b27SPaolo Bonzini             if ((revents & G_IO_OUT) && node->io_write) {
287c2b38b27SPaolo Bonzini                 node->io_write(node->opaque);
288c2b38b27SPaolo Bonzini                 progress = true;
289c2b38b27SPaolo Bonzini             }
290c2b38b27SPaolo Bonzini 
291c2b38b27SPaolo Bonzini             /* if the next select() will return an event, we have progressed */
292c2b38b27SPaolo Bonzini             if (event == event_notifier_get_handle(&ctx->notifier)) {
293c2b38b27SPaolo Bonzini                 WSANETWORKEVENTS ev;
294c2b38b27SPaolo Bonzini                 WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
295c2b38b27SPaolo Bonzini                 if (ev.lNetworkEvents) {
296c2b38b27SPaolo Bonzini                     progress = true;
297c2b38b27SPaolo Bonzini                 }
298c2b38b27SPaolo Bonzini             }
299c2b38b27SPaolo Bonzini         }
300c2b38b27SPaolo Bonzini 
301c2b38b27SPaolo Bonzini         if (node->deleted) {
302c2b38b27SPaolo Bonzini             if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
303c2b38b27SPaolo Bonzini                 QLIST_REMOVE(node, node);
304c2b38b27SPaolo Bonzini                 g_free(node);
305c2b38b27SPaolo Bonzini                 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
306c2b38b27SPaolo Bonzini             }
307c2b38b27SPaolo Bonzini         }
308c2b38b27SPaolo Bonzini     }
309c2b38b27SPaolo Bonzini 
310c2b38b27SPaolo Bonzini     return progress;
311c2b38b27SPaolo Bonzini }
312c2b38b27SPaolo Bonzini 
313a153bf52SPaolo Bonzini void aio_dispatch(AioContext *ctx)
314c2b38b27SPaolo Bonzini {
315bd451435SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
316a153bf52SPaolo Bonzini     aio_bh_poll(ctx);
317a153bf52SPaolo Bonzini     aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
318bd451435SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
319a153bf52SPaolo Bonzini     timerlistgroup_run_timers(&ctx->tlg);
320c2b38b27SPaolo Bonzini }
321c2b38b27SPaolo Bonzini 
322c2b38b27SPaolo Bonzini bool aio_poll(AioContext *ctx, bool blocking)
323c2b38b27SPaolo Bonzini {
324c2b38b27SPaolo Bonzini     AioHandler *node;
325e0d034bbSBin Meng     HANDLE events[MAXIMUM_WAIT_OBJECTS];
326c2b38b27SPaolo Bonzini     bool progress, have_select_revents, first;
327e0d034bbSBin Meng     unsigned count;
328c2b38b27SPaolo Bonzini     int timeout;
329c2b38b27SPaolo Bonzini 
3305710a3e0SPaolo Bonzini     /*
3315710a3e0SPaolo Bonzini      * There cannot be two concurrent aio_poll calls for the same AioContext (or
3325710a3e0SPaolo Bonzini      * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
3335710a3e0SPaolo Bonzini      * We rely on this below to avoid slow locked accesses to ctx->notify_me.
334eada6d92SVolker Rümelin      *
335eada6d92SVolker Rümelin      * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
336eada6d92SVolker Rümelin      * is special in that it runs in the main thread, but that thread's context
337eada6d92SVolker Rümelin      * is qemu_aio_context.
3385710a3e0SPaolo Bonzini      */
339eada6d92SVolker Rümelin     assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
340eada6d92SVolker Rümelin                                       qemu_get_aio_context() : ctx));
341c2b38b27SPaolo Bonzini     progress = false;
342c2b38b27SPaolo Bonzini 
343c2b38b27SPaolo Bonzini     /* aio_notify can avoid the expensive event_notifier_set if
344c2b38b27SPaolo Bonzini      * everything (file descriptors, bottom halves, timers) will
345c2b38b27SPaolo Bonzini      * be re-evaluated before the next blocking poll().  This is
346c2b38b27SPaolo Bonzini      * already true when aio_poll is called with blocking == false;
347c2b38b27SPaolo Bonzini      * if blocking == true, it is only true after poll() returns,
348c2b38b27SPaolo Bonzini      * so disable the optimization now.
349c2b38b27SPaolo Bonzini      */
350c2b38b27SPaolo Bonzini     if (blocking) {
351d73415a3SStefan Hajnoczi         qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
3525710a3e0SPaolo Bonzini         /*
3535710a3e0SPaolo Bonzini          * Write ctx->notify_me before computing the timeout
3545710a3e0SPaolo Bonzini          * (reading bottom half flags, etc.).  Pairs with
3555710a3e0SPaolo Bonzini          * smp_mb in aio_notify().
3565710a3e0SPaolo Bonzini          */
3575710a3e0SPaolo Bonzini         smp_mb();
358c2b38b27SPaolo Bonzini     }
359c2b38b27SPaolo Bonzini 
360c2b38b27SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
361c2b38b27SPaolo Bonzini     have_select_revents = aio_prepare(ctx);
362c2b38b27SPaolo Bonzini 
363c2b38b27SPaolo Bonzini     /* fill fd sets */
364c2b38b27SPaolo Bonzini     count = 0;
365c2b38b27SPaolo Bonzini     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
36660f782b6SStefan Hajnoczi         if (!node->deleted && node->io_notify) {
367e0d034bbSBin Meng             assert(count < MAXIMUM_WAIT_OBJECTS);
368c2b38b27SPaolo Bonzini             events[count++] = event_notifier_get_handle(node->e);
369c2b38b27SPaolo Bonzini         }
370c2b38b27SPaolo Bonzini     }
371c2b38b27SPaolo Bonzini 
372c2b38b27SPaolo Bonzini     first = true;
373c2b38b27SPaolo Bonzini 
374c2b38b27SPaolo Bonzini     /* ctx->notifier is always registered.  */
375c2b38b27SPaolo Bonzini     assert(count > 0);
376c2b38b27SPaolo Bonzini 
377c2b38b27SPaolo Bonzini     /* Multiple iterations, all of them non-blocking except the first,
378c2b38b27SPaolo Bonzini      * may be necessary to process all pending events.  After the first
379c2b38b27SPaolo Bonzini      * WaitForMultipleObjects call ctx->notify_me will be decremented.
380c2b38b27SPaolo Bonzini      */
381c2b38b27SPaolo Bonzini     do {
382c2b38b27SPaolo Bonzini         HANDLE event;
383c2b38b27SPaolo Bonzini         int ret;
384c2b38b27SPaolo Bonzini 
385c2b38b27SPaolo Bonzini         timeout = blocking && !have_select_revents
386c2b38b27SPaolo Bonzini             ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
387c2b38b27SPaolo Bonzini         ret = WaitForMultipleObjects(count, events, FALSE, timeout);
388c2b38b27SPaolo Bonzini         if (blocking) {
389c2b38b27SPaolo Bonzini             assert(first);
390d73415a3SStefan Hajnoczi             qatomic_store_release(&ctx->notify_me,
391d73415a3SStefan Hajnoczi                                   qatomic_read(&ctx->notify_me) - 2);
392b37548fcSFam Zheng             aio_notify_accept(ctx);
393c2b38b27SPaolo Bonzini         }
394c2b38b27SPaolo Bonzini 
395c2b38b27SPaolo Bonzini         if (first) {
396c2b38b27SPaolo Bonzini             progress |= aio_bh_poll(ctx);
397c2b38b27SPaolo Bonzini             first = false;
398c2b38b27SPaolo Bonzini         }
399c2b38b27SPaolo Bonzini 
400c2b38b27SPaolo Bonzini         /* if we have any signaled events, dispatch event */
401c2b38b27SPaolo Bonzini         event = NULL;
402c2b38b27SPaolo Bonzini         if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
403c2b38b27SPaolo Bonzini             event = events[ret - WAIT_OBJECT_0];
404c2b38b27SPaolo Bonzini             events[ret - WAIT_OBJECT_0] = events[--count];
405c2b38b27SPaolo Bonzini         } else if (!have_select_revents) {
406c2b38b27SPaolo Bonzini             break;
407c2b38b27SPaolo Bonzini         }
408c2b38b27SPaolo Bonzini 
409c2b38b27SPaolo Bonzini         have_select_revents = false;
410c2b38b27SPaolo Bonzini         blocking = false;
411c2b38b27SPaolo Bonzini 
412c2b38b27SPaolo Bonzini         progress |= aio_dispatch_handlers(ctx, event);
413c2b38b27SPaolo Bonzini     } while (count > 0);
414c2b38b27SPaolo Bonzini 
415bd451435SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
416bd451435SPaolo Bonzini 
417c2b38b27SPaolo Bonzini     progress |= timerlistgroup_run_timers(&ctx->tlg);
418c2b38b27SPaolo Bonzini     return progress;
419c2b38b27SPaolo Bonzini }
420c2b38b27SPaolo Bonzini 
421c2b38b27SPaolo Bonzini void aio_context_setup(AioContext *ctx)
422c2b38b27SPaolo Bonzini {
423c2b38b27SPaolo Bonzini }
424c2b38b27SPaolo Bonzini 
425cd0a6d2bSJie Wang void aio_context_destroy(AioContext *ctx)
426cd0a6d2bSJie Wang {
427cd0a6d2bSJie Wang }
428cd0a6d2bSJie Wang 
429ba607ca8SStefan Hajnoczi void aio_context_use_g_source(AioContext *ctx)
430ba607ca8SStefan Hajnoczi {
431ba607ca8SStefan Hajnoczi }
432ba607ca8SStefan Hajnoczi 
433c2b38b27SPaolo Bonzini void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
434c2b38b27SPaolo Bonzini                                  int64_t grow, int64_t shrink, Error **errp)
435c2b38b27SPaolo Bonzini {
43690c558beSPeter Xu     if (max_ns) {
437c2b38b27SPaolo Bonzini         error_setg(errp, "AioContext polling is not implemented on Windows");
438c2b38b27SPaolo Bonzini     }
43990c558beSPeter Xu }
4401793ad02SStefano Garzarella 
441*897a06c6SPhilippe Mathieu-Daudé void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
4421793ad02SStefano Garzarella {
4431793ad02SStefano Garzarella }
444