xref: /qemu/util/aio-win32.c (revision b37548fc)
1c2b38b27SPaolo Bonzini /*
2c2b38b27SPaolo Bonzini  * QEMU aio implementation
3c2b38b27SPaolo Bonzini  *
4c2b38b27SPaolo Bonzini  * Copyright IBM Corp., 2008
5c2b38b27SPaolo Bonzini  * Copyright Red Hat Inc., 2012
6c2b38b27SPaolo Bonzini  *
7c2b38b27SPaolo Bonzini  * Authors:
8c2b38b27SPaolo Bonzini  *  Anthony Liguori   <aliguori@us.ibm.com>
9c2b38b27SPaolo Bonzini  *  Paolo Bonzini     <pbonzini@redhat.com>
10c2b38b27SPaolo Bonzini  *
11c2b38b27SPaolo Bonzini  * This work is licensed under the terms of the GNU GPL, version 2.  See
12c2b38b27SPaolo Bonzini  * the COPYING file in the top-level directory.
13c2b38b27SPaolo Bonzini  *
14c2b38b27SPaolo Bonzini  * Contributions after 2012-01-13 are licensed under the terms of the
15c2b38b27SPaolo Bonzini  * GNU GPL, version 2 or (at your option) any later version.
16c2b38b27SPaolo Bonzini  */
17c2b38b27SPaolo Bonzini 
18c2b38b27SPaolo Bonzini #include "qemu/osdep.h"
19c2b38b27SPaolo Bonzini #include "qemu-common.h"
20c2b38b27SPaolo Bonzini #include "block/block.h"
21c2b38b27SPaolo Bonzini #include "qemu/queue.h"
22c2b38b27SPaolo Bonzini #include "qemu/sockets.h"
23c2b38b27SPaolo Bonzini #include "qapi/error.h"
24c2b38b27SPaolo Bonzini #include "qemu/rcu_queue.h"
25c2b38b27SPaolo Bonzini 
26c2b38b27SPaolo Bonzini struct AioHandler {
27c2b38b27SPaolo Bonzini     EventNotifier *e;
28c2b38b27SPaolo Bonzini     IOHandler *io_read;
29c2b38b27SPaolo Bonzini     IOHandler *io_write;
30c2b38b27SPaolo Bonzini     EventNotifierHandler *io_notify;
31c2b38b27SPaolo Bonzini     GPollFD pfd;
32c2b38b27SPaolo Bonzini     int deleted;
33c2b38b27SPaolo Bonzini     void *opaque;
34c2b38b27SPaolo Bonzini     bool is_external;
35c2b38b27SPaolo Bonzini     QLIST_ENTRY(AioHandler) node;
36c2b38b27SPaolo Bonzini };
37c2b38b27SPaolo Bonzini 
38c2b38b27SPaolo Bonzini void aio_set_fd_handler(AioContext *ctx,
39c2b38b27SPaolo Bonzini                         int fd,
40c2b38b27SPaolo Bonzini                         bool is_external,
41c2b38b27SPaolo Bonzini                         IOHandler *io_read,
42c2b38b27SPaolo Bonzini                         IOHandler *io_write,
43c2b38b27SPaolo Bonzini                         AioPollFn *io_poll,
44c2b38b27SPaolo Bonzini                         void *opaque)
45c2b38b27SPaolo Bonzini {
46c2b38b27SPaolo Bonzini     /* fd is a SOCKET in our case */
47c2b38b27SPaolo Bonzini     AioHandler *node;
48c2b38b27SPaolo Bonzini 
49c2b38b27SPaolo Bonzini     qemu_lockcnt_lock(&ctx->list_lock);
50c2b38b27SPaolo Bonzini     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
51c2b38b27SPaolo Bonzini         if (node->pfd.fd == fd && !node->deleted) {
52c2b38b27SPaolo Bonzini             break;
53c2b38b27SPaolo Bonzini         }
54c2b38b27SPaolo Bonzini     }
55c2b38b27SPaolo Bonzini 
56c2b38b27SPaolo Bonzini     /* Are we deleting the fd handler? */
57c2b38b27SPaolo Bonzini     if (!io_read && !io_write) {
58c2b38b27SPaolo Bonzini         if (node) {
59c2b38b27SPaolo Bonzini             /* If aio_poll is in progress, just mark the node as deleted */
60c2b38b27SPaolo Bonzini             if (qemu_lockcnt_count(&ctx->list_lock)) {
61c2b38b27SPaolo Bonzini                 node->deleted = 1;
62c2b38b27SPaolo Bonzini                 node->pfd.revents = 0;
63c2b38b27SPaolo Bonzini             } else {
64c2b38b27SPaolo Bonzini                 /* Otherwise, delete it for real.  We can't just mark it as
65c2b38b27SPaolo Bonzini                  * deleted because deleted nodes are only cleaned up after
66c2b38b27SPaolo Bonzini                  * releasing the list_lock.
67c2b38b27SPaolo Bonzini                  */
68c2b38b27SPaolo Bonzini                 QLIST_REMOVE(node, node);
69c2b38b27SPaolo Bonzini                 g_free(node);
70c2b38b27SPaolo Bonzini             }
71c2b38b27SPaolo Bonzini         }
72c2b38b27SPaolo Bonzini     } else {
73c2b38b27SPaolo Bonzini         HANDLE event;
7455d41b16SAlistair Francis         long bitmask = 0;
75c2b38b27SPaolo Bonzini 
76c2b38b27SPaolo Bonzini         if (node == NULL) {
77c2b38b27SPaolo Bonzini             /* Alloc and insert if it's not already there */
78c2b38b27SPaolo Bonzini             node = g_new0(AioHandler, 1);
79c2b38b27SPaolo Bonzini             node->pfd.fd = fd;
80c2b38b27SPaolo Bonzini             QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
81c2b38b27SPaolo Bonzini         }
82c2b38b27SPaolo Bonzini 
83c2b38b27SPaolo Bonzini         node->pfd.events = 0;
84c2b38b27SPaolo Bonzini         if (node->io_read) {
85c2b38b27SPaolo Bonzini             node->pfd.events |= G_IO_IN;
86c2b38b27SPaolo Bonzini         }
87c2b38b27SPaolo Bonzini         if (node->io_write) {
88c2b38b27SPaolo Bonzini             node->pfd.events |= G_IO_OUT;
89c2b38b27SPaolo Bonzini         }
90c2b38b27SPaolo Bonzini 
91c2b38b27SPaolo Bonzini         node->e = &ctx->notifier;
92c2b38b27SPaolo Bonzini 
93c2b38b27SPaolo Bonzini         /* Update handler with latest information */
94c2b38b27SPaolo Bonzini         node->opaque = opaque;
95c2b38b27SPaolo Bonzini         node->io_read = io_read;
96c2b38b27SPaolo Bonzini         node->io_write = io_write;
97c2b38b27SPaolo Bonzini         node->is_external = is_external;
98c2b38b27SPaolo Bonzini 
9955d41b16SAlistair Francis         if (io_read) {
10055d41b16SAlistair Francis             bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
10155d41b16SAlistair Francis         }
10255d41b16SAlistair Francis 
10355d41b16SAlistair Francis         if (io_write) {
10455d41b16SAlistair Francis             bitmask |= FD_WRITE | FD_CONNECT;
10555d41b16SAlistair Francis         }
10655d41b16SAlistair Francis 
107c2b38b27SPaolo Bonzini         event = event_notifier_get_handle(&ctx->notifier);
10855d41b16SAlistair Francis         WSAEventSelect(node->pfd.fd, event, bitmask);
109c2b38b27SPaolo Bonzini     }
110c2b38b27SPaolo Bonzini 
111c2b38b27SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
112c2b38b27SPaolo Bonzini     aio_notify(ctx);
113c2b38b27SPaolo Bonzini }
114c2b38b27SPaolo Bonzini 
115c2b38b27SPaolo Bonzini void aio_set_fd_poll(AioContext *ctx, int fd,
116c2b38b27SPaolo Bonzini                      IOHandler *io_poll_begin,
117c2b38b27SPaolo Bonzini                      IOHandler *io_poll_end)
118c2b38b27SPaolo Bonzini {
119c2b38b27SPaolo Bonzini     /* Not implemented */
120c2b38b27SPaolo Bonzini }
121c2b38b27SPaolo Bonzini 
122c2b38b27SPaolo Bonzini void aio_set_event_notifier(AioContext *ctx,
123c2b38b27SPaolo Bonzini                             EventNotifier *e,
124c2b38b27SPaolo Bonzini                             bool is_external,
125c2b38b27SPaolo Bonzini                             EventNotifierHandler *io_notify,
126c2b38b27SPaolo Bonzini                             AioPollFn *io_poll)
127c2b38b27SPaolo Bonzini {
128c2b38b27SPaolo Bonzini     AioHandler *node;
129c2b38b27SPaolo Bonzini 
130c2b38b27SPaolo Bonzini     qemu_lockcnt_lock(&ctx->list_lock);
131c2b38b27SPaolo Bonzini     QLIST_FOREACH(node, &ctx->aio_handlers, node) {
132c2b38b27SPaolo Bonzini         if (node->e == e && !node->deleted) {
133c2b38b27SPaolo Bonzini             break;
134c2b38b27SPaolo Bonzini         }
135c2b38b27SPaolo Bonzini     }
136c2b38b27SPaolo Bonzini 
137c2b38b27SPaolo Bonzini     /* Are we deleting the fd handler? */
138c2b38b27SPaolo Bonzini     if (!io_notify) {
139c2b38b27SPaolo Bonzini         if (node) {
140c2b38b27SPaolo Bonzini             g_source_remove_poll(&ctx->source, &node->pfd);
141c2b38b27SPaolo Bonzini 
142c2b38b27SPaolo Bonzini             /* aio_poll is in progress, just mark the node as deleted */
143c2b38b27SPaolo Bonzini             if (qemu_lockcnt_count(&ctx->list_lock)) {
144c2b38b27SPaolo Bonzini                 node->deleted = 1;
145c2b38b27SPaolo Bonzini                 node->pfd.revents = 0;
146c2b38b27SPaolo Bonzini             } else {
147c2b38b27SPaolo Bonzini                 /* Otherwise, delete it for real.  We can't just mark it as
148c2b38b27SPaolo Bonzini                  * deleted because deleted nodes are only cleaned up after
149c2b38b27SPaolo Bonzini                  * releasing the list_lock.
150c2b38b27SPaolo Bonzini                  */
151c2b38b27SPaolo Bonzini                 QLIST_REMOVE(node, node);
152c2b38b27SPaolo Bonzini                 g_free(node);
153c2b38b27SPaolo Bonzini             }
154c2b38b27SPaolo Bonzini         }
155c2b38b27SPaolo Bonzini     } else {
156c2b38b27SPaolo Bonzini         if (node == NULL) {
157c2b38b27SPaolo Bonzini             /* Alloc and insert if it's not already there */
158c2b38b27SPaolo Bonzini             node = g_new0(AioHandler, 1);
159c2b38b27SPaolo Bonzini             node->e = e;
160c2b38b27SPaolo Bonzini             node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
161c2b38b27SPaolo Bonzini             node->pfd.events = G_IO_IN;
162c2b38b27SPaolo Bonzini             node->is_external = is_external;
163c2b38b27SPaolo Bonzini             QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
164c2b38b27SPaolo Bonzini 
165c2b38b27SPaolo Bonzini             g_source_add_poll(&ctx->source, &node->pfd);
166c2b38b27SPaolo Bonzini         }
167c2b38b27SPaolo Bonzini         /* Update handler with latest information */
168c2b38b27SPaolo Bonzini         node->io_notify = io_notify;
169c2b38b27SPaolo Bonzini     }
170c2b38b27SPaolo Bonzini 
171c2b38b27SPaolo Bonzini     qemu_lockcnt_unlock(&ctx->list_lock);
172c2b38b27SPaolo Bonzini     aio_notify(ctx);
173c2b38b27SPaolo Bonzini }
174c2b38b27SPaolo Bonzini 
175c2b38b27SPaolo Bonzini void aio_set_event_notifier_poll(AioContext *ctx,
176c2b38b27SPaolo Bonzini                                  EventNotifier *notifier,
177c2b38b27SPaolo Bonzini                                  EventNotifierHandler *io_poll_begin,
178c2b38b27SPaolo Bonzini                                  EventNotifierHandler *io_poll_end)
179c2b38b27SPaolo Bonzini {
180c2b38b27SPaolo Bonzini     /* Not implemented */
181c2b38b27SPaolo Bonzini }
182c2b38b27SPaolo Bonzini 
183c2b38b27SPaolo Bonzini bool aio_prepare(AioContext *ctx)
184c2b38b27SPaolo Bonzini {
185c2b38b27SPaolo Bonzini     static struct timeval tv0;
186c2b38b27SPaolo Bonzini     AioHandler *node;
187c2b38b27SPaolo Bonzini     bool have_select_revents = false;
188c2b38b27SPaolo Bonzini     fd_set rfds, wfds;
189c2b38b27SPaolo Bonzini 
190c2b38b27SPaolo Bonzini     /*
191c2b38b27SPaolo Bonzini      * We have to walk very carefully in case aio_set_fd_handler is
192c2b38b27SPaolo Bonzini      * called while we're walking.
193c2b38b27SPaolo Bonzini      */
194c2b38b27SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
195c2b38b27SPaolo Bonzini 
196c2b38b27SPaolo Bonzini     /* fill fd sets */
197c2b38b27SPaolo Bonzini     FD_ZERO(&rfds);
198c2b38b27SPaolo Bonzini     FD_ZERO(&wfds);
199c2b38b27SPaolo Bonzini     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
200c2b38b27SPaolo Bonzini         if (node->io_read) {
201c2b38b27SPaolo Bonzini             FD_SET ((SOCKET)node->pfd.fd, &rfds);
202c2b38b27SPaolo Bonzini         }
203c2b38b27SPaolo Bonzini         if (node->io_write) {
204c2b38b27SPaolo Bonzini             FD_SET ((SOCKET)node->pfd.fd, &wfds);
205c2b38b27SPaolo Bonzini         }
206c2b38b27SPaolo Bonzini     }
207c2b38b27SPaolo Bonzini 
208c2b38b27SPaolo Bonzini     if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
209c2b38b27SPaolo Bonzini         QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
210c2b38b27SPaolo Bonzini             node->pfd.revents = 0;
211c2b38b27SPaolo Bonzini             if (FD_ISSET(node->pfd.fd, &rfds)) {
212c2b38b27SPaolo Bonzini                 node->pfd.revents |= G_IO_IN;
213c2b38b27SPaolo Bonzini                 have_select_revents = true;
214c2b38b27SPaolo Bonzini             }
215c2b38b27SPaolo Bonzini 
216c2b38b27SPaolo Bonzini             if (FD_ISSET(node->pfd.fd, &wfds)) {
217c2b38b27SPaolo Bonzini                 node->pfd.revents |= G_IO_OUT;
218c2b38b27SPaolo Bonzini                 have_select_revents = true;
219c2b38b27SPaolo Bonzini             }
220c2b38b27SPaolo Bonzini         }
221c2b38b27SPaolo Bonzini     }
222c2b38b27SPaolo Bonzini 
223c2b38b27SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
224c2b38b27SPaolo Bonzini     return have_select_revents;
225c2b38b27SPaolo Bonzini }
226c2b38b27SPaolo Bonzini 
227c2b38b27SPaolo Bonzini bool aio_pending(AioContext *ctx)
228c2b38b27SPaolo Bonzini {
229c2b38b27SPaolo Bonzini     AioHandler *node;
230c2b38b27SPaolo Bonzini     bool result = false;
231c2b38b27SPaolo Bonzini 
232c2b38b27SPaolo Bonzini     /*
233c2b38b27SPaolo Bonzini      * We have to walk very carefully in case aio_set_fd_handler is
234c2b38b27SPaolo Bonzini      * called while we're walking.
235c2b38b27SPaolo Bonzini      */
236c2b38b27SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
237c2b38b27SPaolo Bonzini     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
238c2b38b27SPaolo Bonzini         if (node->pfd.revents && node->io_notify) {
239c2b38b27SPaolo Bonzini             result = true;
240c2b38b27SPaolo Bonzini             break;
241c2b38b27SPaolo Bonzini         }
242c2b38b27SPaolo Bonzini 
243c2b38b27SPaolo Bonzini         if ((node->pfd.revents & G_IO_IN) && node->io_read) {
244c2b38b27SPaolo Bonzini             result = true;
245c2b38b27SPaolo Bonzini             break;
246c2b38b27SPaolo Bonzini         }
247c2b38b27SPaolo Bonzini         if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
248c2b38b27SPaolo Bonzini             result = true;
249c2b38b27SPaolo Bonzini             break;
250c2b38b27SPaolo Bonzini         }
251c2b38b27SPaolo Bonzini     }
252c2b38b27SPaolo Bonzini 
253c2b38b27SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
254c2b38b27SPaolo Bonzini     return result;
255c2b38b27SPaolo Bonzini }
256c2b38b27SPaolo Bonzini 
257c2b38b27SPaolo Bonzini static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
258c2b38b27SPaolo Bonzini {
259c2b38b27SPaolo Bonzini     AioHandler *node;
260c2b38b27SPaolo Bonzini     bool progress = false;
261c2b38b27SPaolo Bonzini     AioHandler *tmp;
262c2b38b27SPaolo Bonzini 
263c2b38b27SPaolo Bonzini     /*
264c2b38b27SPaolo Bonzini      * We have to walk very carefully in case aio_set_fd_handler is
265c2b38b27SPaolo Bonzini      * called while we're walking.
266c2b38b27SPaolo Bonzini      */
267c2b38b27SPaolo Bonzini     QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
268c2b38b27SPaolo Bonzini         int revents = node->pfd.revents;
269c2b38b27SPaolo Bonzini 
270c2b38b27SPaolo Bonzini         if (!node->deleted &&
271c2b38b27SPaolo Bonzini             (revents || event_notifier_get_handle(node->e) == event) &&
272c2b38b27SPaolo Bonzini             node->io_notify) {
273c2b38b27SPaolo Bonzini             node->pfd.revents = 0;
274c2b38b27SPaolo Bonzini             node->io_notify(node->e);
275c2b38b27SPaolo Bonzini 
276c2b38b27SPaolo Bonzini             /* aio_notify() does not count as progress */
277c2b38b27SPaolo Bonzini             if (node->e != &ctx->notifier) {
278c2b38b27SPaolo Bonzini                 progress = true;
279c2b38b27SPaolo Bonzini             }
280c2b38b27SPaolo Bonzini         }
281c2b38b27SPaolo Bonzini 
282c2b38b27SPaolo Bonzini         if (!node->deleted &&
283c2b38b27SPaolo Bonzini             (node->io_read || node->io_write)) {
284c2b38b27SPaolo Bonzini             node->pfd.revents = 0;
285c2b38b27SPaolo Bonzini             if ((revents & G_IO_IN) && node->io_read) {
286c2b38b27SPaolo Bonzini                 node->io_read(node->opaque);
287c2b38b27SPaolo Bonzini                 progress = true;
288c2b38b27SPaolo Bonzini             }
289c2b38b27SPaolo Bonzini             if ((revents & G_IO_OUT) && node->io_write) {
290c2b38b27SPaolo Bonzini                 node->io_write(node->opaque);
291c2b38b27SPaolo Bonzini                 progress = true;
292c2b38b27SPaolo Bonzini             }
293c2b38b27SPaolo Bonzini 
294c2b38b27SPaolo Bonzini             /* if the next select() will return an event, we have progressed */
295c2b38b27SPaolo Bonzini             if (event == event_notifier_get_handle(&ctx->notifier)) {
296c2b38b27SPaolo Bonzini                 WSANETWORKEVENTS ev;
297c2b38b27SPaolo Bonzini                 WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
298c2b38b27SPaolo Bonzini                 if (ev.lNetworkEvents) {
299c2b38b27SPaolo Bonzini                     progress = true;
300c2b38b27SPaolo Bonzini                 }
301c2b38b27SPaolo Bonzini             }
302c2b38b27SPaolo Bonzini         }
303c2b38b27SPaolo Bonzini 
304c2b38b27SPaolo Bonzini         if (node->deleted) {
305c2b38b27SPaolo Bonzini             if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
306c2b38b27SPaolo Bonzini                 QLIST_REMOVE(node, node);
307c2b38b27SPaolo Bonzini                 g_free(node);
308c2b38b27SPaolo Bonzini                 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
309c2b38b27SPaolo Bonzini             }
310c2b38b27SPaolo Bonzini         }
311c2b38b27SPaolo Bonzini     }
312c2b38b27SPaolo Bonzini 
313c2b38b27SPaolo Bonzini     return progress;
314c2b38b27SPaolo Bonzini }
315c2b38b27SPaolo Bonzini 
316a153bf52SPaolo Bonzini void aio_dispatch(AioContext *ctx)
317c2b38b27SPaolo Bonzini {
318bd451435SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
319a153bf52SPaolo Bonzini     aio_bh_poll(ctx);
320a153bf52SPaolo Bonzini     aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
321bd451435SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
322a153bf52SPaolo Bonzini     timerlistgroup_run_timers(&ctx->tlg);
323c2b38b27SPaolo Bonzini }
324c2b38b27SPaolo Bonzini 
325c2b38b27SPaolo Bonzini bool aio_poll(AioContext *ctx, bool blocking)
326c2b38b27SPaolo Bonzini {
327c2b38b27SPaolo Bonzini     AioHandler *node;
328c2b38b27SPaolo Bonzini     HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
329c2b38b27SPaolo Bonzini     bool progress, have_select_revents, first;
330c2b38b27SPaolo Bonzini     int count;
331c2b38b27SPaolo Bonzini     int timeout;
332c2b38b27SPaolo Bonzini 
333c2b38b27SPaolo Bonzini     progress = false;
334c2b38b27SPaolo Bonzini 
335c2b38b27SPaolo Bonzini     /* aio_notify can avoid the expensive event_notifier_set if
336c2b38b27SPaolo Bonzini      * everything (file descriptors, bottom halves, timers) will
337c2b38b27SPaolo Bonzini      * be re-evaluated before the next blocking poll().  This is
338c2b38b27SPaolo Bonzini      * already true when aio_poll is called with blocking == false;
339c2b38b27SPaolo Bonzini      * if blocking == true, it is only true after poll() returns,
340c2b38b27SPaolo Bonzini      * so disable the optimization now.
341c2b38b27SPaolo Bonzini      */
342c2b38b27SPaolo Bonzini     if (blocking) {
343c2b38b27SPaolo Bonzini         atomic_add(&ctx->notify_me, 2);
344c2b38b27SPaolo Bonzini     }
345c2b38b27SPaolo Bonzini 
346c2b38b27SPaolo Bonzini     qemu_lockcnt_inc(&ctx->list_lock);
347c2b38b27SPaolo Bonzini     have_select_revents = aio_prepare(ctx);
348c2b38b27SPaolo Bonzini 
349c2b38b27SPaolo Bonzini     /* fill fd sets */
350c2b38b27SPaolo Bonzini     count = 0;
351c2b38b27SPaolo Bonzini     QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
352c2b38b27SPaolo Bonzini         if (!node->deleted && node->io_notify
353c2b38b27SPaolo Bonzini             && aio_node_check(ctx, node->is_external)) {
354c2b38b27SPaolo Bonzini             events[count++] = event_notifier_get_handle(node->e);
355c2b38b27SPaolo Bonzini         }
356c2b38b27SPaolo Bonzini     }
357c2b38b27SPaolo Bonzini 
358c2b38b27SPaolo Bonzini     first = true;
359c2b38b27SPaolo Bonzini 
360c2b38b27SPaolo Bonzini     /* ctx->notifier is always registered.  */
361c2b38b27SPaolo Bonzini     assert(count > 0);
362c2b38b27SPaolo Bonzini 
363c2b38b27SPaolo Bonzini     /* Multiple iterations, all of them non-blocking except the first,
364c2b38b27SPaolo Bonzini      * may be necessary to process all pending events.  After the first
365c2b38b27SPaolo Bonzini      * WaitForMultipleObjects call ctx->notify_me will be decremented.
366c2b38b27SPaolo Bonzini      */
367c2b38b27SPaolo Bonzini     do {
368c2b38b27SPaolo Bonzini         HANDLE event;
369c2b38b27SPaolo Bonzini         int ret;
370c2b38b27SPaolo Bonzini 
371c2b38b27SPaolo Bonzini         timeout = blocking && !have_select_revents
372c2b38b27SPaolo Bonzini             ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
373c2b38b27SPaolo Bonzini         ret = WaitForMultipleObjects(count, events, FALSE, timeout);
374c2b38b27SPaolo Bonzini         if (blocking) {
375c2b38b27SPaolo Bonzini             assert(first);
376*b37548fcSFam Zheng             assert(in_aio_context_home_thread(ctx));
377c2b38b27SPaolo Bonzini             atomic_sub(&ctx->notify_me, 2);
378*b37548fcSFam Zheng             aio_notify_accept(ctx);
379c2b38b27SPaolo Bonzini         }
380c2b38b27SPaolo Bonzini 
381c2b38b27SPaolo Bonzini         if (first) {
382c2b38b27SPaolo Bonzini             progress |= aio_bh_poll(ctx);
383c2b38b27SPaolo Bonzini             first = false;
384c2b38b27SPaolo Bonzini         }
385c2b38b27SPaolo Bonzini 
386c2b38b27SPaolo Bonzini         /* if we have any signaled events, dispatch event */
387c2b38b27SPaolo Bonzini         event = NULL;
388c2b38b27SPaolo Bonzini         if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
389c2b38b27SPaolo Bonzini             event = events[ret - WAIT_OBJECT_0];
390c2b38b27SPaolo Bonzini             events[ret - WAIT_OBJECT_0] = events[--count];
391c2b38b27SPaolo Bonzini         } else if (!have_select_revents) {
392c2b38b27SPaolo Bonzini             break;
393c2b38b27SPaolo Bonzini         }
394c2b38b27SPaolo Bonzini 
395c2b38b27SPaolo Bonzini         have_select_revents = false;
396c2b38b27SPaolo Bonzini         blocking = false;
397c2b38b27SPaolo Bonzini 
398c2b38b27SPaolo Bonzini         progress |= aio_dispatch_handlers(ctx, event);
399c2b38b27SPaolo Bonzini     } while (count > 0);
400c2b38b27SPaolo Bonzini 
401bd451435SPaolo Bonzini     qemu_lockcnt_dec(&ctx->list_lock);
402bd451435SPaolo Bonzini 
403c2b38b27SPaolo Bonzini     progress |= timerlistgroup_run_timers(&ctx->tlg);
404c2b38b27SPaolo Bonzini     return progress;
405c2b38b27SPaolo Bonzini }
406c2b38b27SPaolo Bonzini 
407c2b38b27SPaolo Bonzini void aio_context_setup(AioContext *ctx)
408c2b38b27SPaolo Bonzini {
409c2b38b27SPaolo Bonzini }
410c2b38b27SPaolo Bonzini 
411cd0a6d2bSJie Wang void aio_context_destroy(AioContext *ctx)
412cd0a6d2bSJie Wang {
413cd0a6d2bSJie Wang }
414cd0a6d2bSJie Wang 
415c2b38b27SPaolo Bonzini void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
416c2b38b27SPaolo Bonzini                                  int64_t grow, int64_t shrink, Error **errp)
417c2b38b27SPaolo Bonzini {
41890c558beSPeter Xu     if (max_ns) {
419c2b38b27SPaolo Bonzini         error_setg(errp, "AioContext polling is not implemented on Windows");
420c2b38b27SPaolo Bonzini     }
42190c558beSPeter Xu }
422