1c2b38b27SPaolo Bonzini /* 2c2b38b27SPaolo Bonzini * QEMU aio implementation 3c2b38b27SPaolo Bonzini * 4c2b38b27SPaolo Bonzini * Copyright IBM Corp., 2008 5c2b38b27SPaolo Bonzini * Copyright Red Hat Inc., 2012 6c2b38b27SPaolo Bonzini * 7c2b38b27SPaolo Bonzini * Authors: 8c2b38b27SPaolo Bonzini * Anthony Liguori <aliguori@us.ibm.com> 9c2b38b27SPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 10c2b38b27SPaolo Bonzini * 11c2b38b27SPaolo Bonzini * This work is licensed under the terms of the GNU GPL, version 2. See 12c2b38b27SPaolo Bonzini * the COPYING file in the top-level directory. 13c2b38b27SPaolo Bonzini * 14c2b38b27SPaolo Bonzini * Contributions after 2012-01-13 are licensed under the terms of the 15c2b38b27SPaolo Bonzini * GNU GPL, version 2 or (at your option) any later version. 16c2b38b27SPaolo Bonzini */ 17c2b38b27SPaolo Bonzini 18c2b38b27SPaolo Bonzini #include "qemu/osdep.h" 19c2b38b27SPaolo Bonzini #include "qemu-common.h" 20c2b38b27SPaolo Bonzini #include "block/block.h" 21c2b38b27SPaolo Bonzini #include "qemu/queue.h" 22c2b38b27SPaolo Bonzini #include "qemu/sockets.h" 23c2b38b27SPaolo Bonzini #include "qapi/error.h" 24c2b38b27SPaolo Bonzini #include "qemu/rcu_queue.h" 25c2b38b27SPaolo Bonzini 26c2b38b27SPaolo Bonzini struct AioHandler { 27c2b38b27SPaolo Bonzini EventNotifier *e; 28c2b38b27SPaolo Bonzini IOHandler *io_read; 29c2b38b27SPaolo Bonzini IOHandler *io_write; 30c2b38b27SPaolo Bonzini EventNotifierHandler *io_notify; 31c2b38b27SPaolo Bonzini GPollFD pfd; 32c2b38b27SPaolo Bonzini int deleted; 33c2b38b27SPaolo Bonzini void *opaque; 34c2b38b27SPaolo Bonzini bool is_external; 35c2b38b27SPaolo Bonzini QLIST_ENTRY(AioHandler) node; 36c2b38b27SPaolo Bonzini }; 37c2b38b27SPaolo Bonzini 38fef16601SRemy Noel static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) 39c2b38b27SPaolo Bonzini { 40*da0652c0SYonggang Luo /* 41*da0652c0SYonggang Luo * If the GSource is in the process of being destroyed then 42*da0652c0SYonggang Luo * g_source_remove_poll() causes an assertion failure. Skip 43*da0652c0SYonggang Luo * removal in that case, because glib cleans up its state during 44*da0652c0SYonggang Luo * destruction anyway. 45*da0652c0SYonggang Luo */ 46*da0652c0SYonggang Luo if (!g_source_is_destroyed(&ctx->source)) { 47*da0652c0SYonggang Luo g_source_remove_poll(&ctx->source, &node->pfd); 48*da0652c0SYonggang Luo } 49*da0652c0SYonggang Luo 50c2b38b27SPaolo Bonzini /* If aio_poll is in progress, just mark the node as deleted */ 51c2b38b27SPaolo Bonzini if (qemu_lockcnt_count(&ctx->list_lock)) { 52c2b38b27SPaolo Bonzini node->deleted = 1; 53c2b38b27SPaolo Bonzini node->pfd.revents = 0; 54c2b38b27SPaolo Bonzini } else { 55c2b38b27SPaolo Bonzini /* Otherwise, delete it for real. We can't just mark it as 56c2b38b27SPaolo Bonzini * deleted because deleted nodes are only cleaned up after 57c2b38b27SPaolo Bonzini * releasing the list_lock. 58c2b38b27SPaolo Bonzini */ 59c2b38b27SPaolo Bonzini QLIST_REMOVE(node, node); 60c2b38b27SPaolo Bonzini g_free(node); 61c2b38b27SPaolo Bonzini } 62c2b38b27SPaolo Bonzini } 63fef16601SRemy Noel 64fef16601SRemy Noel void aio_set_fd_handler(AioContext *ctx, 65fef16601SRemy Noel int fd, 66fef16601SRemy Noel bool is_external, 67fef16601SRemy Noel IOHandler *io_read, 68fef16601SRemy Noel IOHandler *io_write, 69fef16601SRemy Noel AioPollFn *io_poll, 70fef16601SRemy Noel void *opaque) 71fef16601SRemy Noel { 72fef16601SRemy Noel /* fd is a SOCKET in our case */ 73fef16601SRemy Noel AioHandler *old_node; 74fef16601SRemy Noel AioHandler *node = NULL; 75fef16601SRemy Noel 76fef16601SRemy Noel qemu_lockcnt_lock(&ctx->list_lock); 77fef16601SRemy Noel QLIST_FOREACH(old_node, &ctx->aio_handlers, node) { 78fef16601SRemy Noel if (old_node->pfd.fd == fd && !old_node->deleted) { 79fef16601SRemy Noel break; 80fef16601SRemy Noel } 81fef16601SRemy Noel } 82fef16601SRemy Noel 83fef16601SRemy Noel if (io_read || io_write) { 84c2b38b27SPaolo Bonzini HANDLE event; 8555d41b16SAlistair Francis long bitmask = 0; 86c2b38b27SPaolo Bonzini 87c2b38b27SPaolo Bonzini /* Alloc and insert if it's not already there */ 88c2b38b27SPaolo Bonzini node = g_new0(AioHandler, 1); 89c2b38b27SPaolo Bonzini node->pfd.fd = fd; 90c2b38b27SPaolo Bonzini 91c2b38b27SPaolo Bonzini node->pfd.events = 0; 92c2b38b27SPaolo Bonzini if (node->io_read) { 93c2b38b27SPaolo Bonzini node->pfd.events |= G_IO_IN; 94c2b38b27SPaolo Bonzini } 95c2b38b27SPaolo Bonzini if (node->io_write) { 96c2b38b27SPaolo Bonzini node->pfd.events |= G_IO_OUT; 97c2b38b27SPaolo Bonzini } 98c2b38b27SPaolo Bonzini 99c2b38b27SPaolo Bonzini node->e = &ctx->notifier; 100c2b38b27SPaolo Bonzini 101c2b38b27SPaolo Bonzini /* Update handler with latest information */ 102c2b38b27SPaolo Bonzini node->opaque = opaque; 103c2b38b27SPaolo Bonzini node->io_read = io_read; 104c2b38b27SPaolo Bonzini node->io_write = io_write; 105c2b38b27SPaolo Bonzini node->is_external = is_external; 106c2b38b27SPaolo Bonzini 10755d41b16SAlistair Francis if (io_read) { 10855d41b16SAlistair Francis bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE; 10955d41b16SAlistair Francis } 11055d41b16SAlistair Francis 11155d41b16SAlistair Francis if (io_write) { 11255d41b16SAlistair Francis bitmask |= FD_WRITE | FD_CONNECT; 11355d41b16SAlistair Francis } 11455d41b16SAlistair Francis 115fef16601SRemy Noel QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); 116c2b38b27SPaolo Bonzini event = event_notifier_get_handle(&ctx->notifier); 11755d41b16SAlistair Francis WSAEventSelect(node->pfd.fd, event, bitmask); 118c2b38b27SPaolo Bonzini } 119fef16601SRemy Noel if (old_node) { 120fef16601SRemy Noel aio_remove_fd_handler(ctx, old_node); 121fef16601SRemy Noel } 122c2b38b27SPaolo Bonzini 123c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 124c2b38b27SPaolo Bonzini aio_notify(ctx); 125c2b38b27SPaolo Bonzini } 126c2b38b27SPaolo Bonzini 127c2b38b27SPaolo Bonzini void aio_set_fd_poll(AioContext *ctx, int fd, 128c2b38b27SPaolo Bonzini IOHandler *io_poll_begin, 129c2b38b27SPaolo Bonzini IOHandler *io_poll_end) 130c2b38b27SPaolo Bonzini { 131c2b38b27SPaolo Bonzini /* Not implemented */ 132c2b38b27SPaolo Bonzini } 133c2b38b27SPaolo Bonzini 134c2b38b27SPaolo Bonzini void aio_set_event_notifier(AioContext *ctx, 135c2b38b27SPaolo Bonzini EventNotifier *e, 136c2b38b27SPaolo Bonzini bool is_external, 137c2b38b27SPaolo Bonzini EventNotifierHandler *io_notify, 138c2b38b27SPaolo Bonzini AioPollFn *io_poll) 139c2b38b27SPaolo Bonzini { 140c2b38b27SPaolo Bonzini AioHandler *node; 141c2b38b27SPaolo Bonzini 142c2b38b27SPaolo Bonzini qemu_lockcnt_lock(&ctx->list_lock); 143c2b38b27SPaolo Bonzini QLIST_FOREACH(node, &ctx->aio_handlers, node) { 144c2b38b27SPaolo Bonzini if (node->e == e && !node->deleted) { 145c2b38b27SPaolo Bonzini break; 146c2b38b27SPaolo Bonzini } 147c2b38b27SPaolo Bonzini } 148c2b38b27SPaolo Bonzini 149c2b38b27SPaolo Bonzini /* Are we deleting the fd handler? */ 150c2b38b27SPaolo Bonzini if (!io_notify) { 151c2b38b27SPaolo Bonzini if (node) { 152fef16601SRemy Noel aio_remove_fd_handler(ctx, node); 153c2b38b27SPaolo Bonzini } 154c2b38b27SPaolo Bonzini } else { 155c2b38b27SPaolo Bonzini if (node == NULL) { 156c2b38b27SPaolo Bonzini /* Alloc and insert if it's not already there */ 157c2b38b27SPaolo Bonzini node = g_new0(AioHandler, 1); 158c2b38b27SPaolo Bonzini node->e = e; 159c2b38b27SPaolo Bonzini node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); 160c2b38b27SPaolo Bonzini node->pfd.events = G_IO_IN; 161c2b38b27SPaolo Bonzini node->is_external = is_external; 162c2b38b27SPaolo Bonzini QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); 163c2b38b27SPaolo Bonzini 164c2b38b27SPaolo Bonzini g_source_add_poll(&ctx->source, &node->pfd); 165c2b38b27SPaolo Bonzini } 166c2b38b27SPaolo Bonzini /* Update handler with latest information */ 167c2b38b27SPaolo Bonzini node->io_notify = io_notify; 168c2b38b27SPaolo Bonzini } 169c2b38b27SPaolo Bonzini 170c2b38b27SPaolo Bonzini qemu_lockcnt_unlock(&ctx->list_lock); 171c2b38b27SPaolo Bonzini aio_notify(ctx); 172c2b38b27SPaolo Bonzini } 173c2b38b27SPaolo Bonzini 174c2b38b27SPaolo Bonzini void aio_set_event_notifier_poll(AioContext *ctx, 175c2b38b27SPaolo Bonzini EventNotifier *notifier, 176c2b38b27SPaolo Bonzini EventNotifierHandler *io_poll_begin, 177c2b38b27SPaolo Bonzini EventNotifierHandler *io_poll_end) 178c2b38b27SPaolo Bonzini { 179c2b38b27SPaolo Bonzini /* Not implemented */ 180c2b38b27SPaolo Bonzini } 181c2b38b27SPaolo Bonzini 182c2b38b27SPaolo Bonzini bool aio_prepare(AioContext *ctx) 183c2b38b27SPaolo Bonzini { 184c2b38b27SPaolo Bonzini static struct timeval tv0; 185c2b38b27SPaolo Bonzini AioHandler *node; 186c2b38b27SPaolo Bonzini bool have_select_revents = false; 187c2b38b27SPaolo Bonzini fd_set rfds, wfds; 188c2b38b27SPaolo Bonzini 189c2b38b27SPaolo Bonzini /* 190c2b38b27SPaolo Bonzini * We have to walk very carefully in case aio_set_fd_handler is 191c2b38b27SPaolo Bonzini * called while we're walking. 192c2b38b27SPaolo Bonzini */ 193c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 194c2b38b27SPaolo Bonzini 195c2b38b27SPaolo Bonzini /* fill fd sets */ 196c2b38b27SPaolo Bonzini FD_ZERO(&rfds); 197c2b38b27SPaolo Bonzini FD_ZERO(&wfds); 198c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 199c2b38b27SPaolo Bonzini if (node->io_read) { 200c2b38b27SPaolo Bonzini FD_SET ((SOCKET)node->pfd.fd, &rfds); 201c2b38b27SPaolo Bonzini } 202c2b38b27SPaolo Bonzini if (node->io_write) { 203c2b38b27SPaolo Bonzini FD_SET ((SOCKET)node->pfd.fd, &wfds); 204c2b38b27SPaolo Bonzini } 205c2b38b27SPaolo Bonzini } 206c2b38b27SPaolo Bonzini 207c2b38b27SPaolo Bonzini if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { 208c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 209c2b38b27SPaolo Bonzini node->pfd.revents = 0; 210c2b38b27SPaolo Bonzini if (FD_ISSET(node->pfd.fd, &rfds)) { 211c2b38b27SPaolo Bonzini node->pfd.revents |= G_IO_IN; 212c2b38b27SPaolo Bonzini have_select_revents = true; 213c2b38b27SPaolo Bonzini } 214c2b38b27SPaolo Bonzini 215c2b38b27SPaolo Bonzini if (FD_ISSET(node->pfd.fd, &wfds)) { 216c2b38b27SPaolo Bonzini node->pfd.revents |= G_IO_OUT; 217c2b38b27SPaolo Bonzini have_select_revents = true; 218c2b38b27SPaolo Bonzini } 219c2b38b27SPaolo Bonzini } 220c2b38b27SPaolo Bonzini } 221c2b38b27SPaolo Bonzini 222c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 223c2b38b27SPaolo Bonzini return have_select_revents; 224c2b38b27SPaolo Bonzini } 225c2b38b27SPaolo Bonzini 226c2b38b27SPaolo Bonzini bool aio_pending(AioContext *ctx) 227c2b38b27SPaolo Bonzini { 228c2b38b27SPaolo Bonzini AioHandler *node; 229c2b38b27SPaolo Bonzini bool result = false; 230c2b38b27SPaolo Bonzini 231c2b38b27SPaolo Bonzini /* 232c2b38b27SPaolo Bonzini * We have to walk very carefully in case aio_set_fd_handler is 233c2b38b27SPaolo Bonzini * called while we're walking. 234c2b38b27SPaolo Bonzini */ 235c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 236c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 237c2b38b27SPaolo Bonzini if (node->pfd.revents && node->io_notify) { 238c2b38b27SPaolo Bonzini result = true; 239c2b38b27SPaolo Bonzini break; 240c2b38b27SPaolo Bonzini } 241c2b38b27SPaolo Bonzini 242c2b38b27SPaolo Bonzini if ((node->pfd.revents & G_IO_IN) && node->io_read) { 243c2b38b27SPaolo Bonzini result = true; 244c2b38b27SPaolo Bonzini break; 245c2b38b27SPaolo Bonzini } 246c2b38b27SPaolo Bonzini if ((node->pfd.revents & G_IO_OUT) && node->io_write) { 247c2b38b27SPaolo Bonzini result = true; 248c2b38b27SPaolo Bonzini break; 249c2b38b27SPaolo Bonzini } 250c2b38b27SPaolo Bonzini } 251c2b38b27SPaolo Bonzini 252c2b38b27SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 253c2b38b27SPaolo Bonzini return result; 254c2b38b27SPaolo Bonzini } 255c2b38b27SPaolo Bonzini 256c2b38b27SPaolo Bonzini static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) 257c2b38b27SPaolo Bonzini { 258c2b38b27SPaolo Bonzini AioHandler *node; 259c2b38b27SPaolo Bonzini bool progress = false; 260c2b38b27SPaolo Bonzini AioHandler *tmp; 261c2b38b27SPaolo Bonzini 262c2b38b27SPaolo Bonzini /* 263c2b38b27SPaolo Bonzini * We have to walk very carefully in case aio_set_fd_handler is 264c2b38b27SPaolo Bonzini * called while we're walking. 265c2b38b27SPaolo Bonzini */ 266c2b38b27SPaolo Bonzini QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { 267c2b38b27SPaolo Bonzini int revents = node->pfd.revents; 268c2b38b27SPaolo Bonzini 269c2b38b27SPaolo Bonzini if (!node->deleted && 270c2b38b27SPaolo Bonzini (revents || event_notifier_get_handle(node->e) == event) && 271c2b38b27SPaolo Bonzini node->io_notify) { 272c2b38b27SPaolo Bonzini node->pfd.revents = 0; 273c2b38b27SPaolo Bonzini node->io_notify(node->e); 274c2b38b27SPaolo Bonzini 275c2b38b27SPaolo Bonzini /* aio_notify() does not count as progress */ 276c2b38b27SPaolo Bonzini if (node->e != &ctx->notifier) { 277c2b38b27SPaolo Bonzini progress = true; 278c2b38b27SPaolo Bonzini } 279c2b38b27SPaolo Bonzini } 280c2b38b27SPaolo Bonzini 281c2b38b27SPaolo Bonzini if (!node->deleted && 282c2b38b27SPaolo Bonzini (node->io_read || node->io_write)) { 283c2b38b27SPaolo Bonzini node->pfd.revents = 0; 284c2b38b27SPaolo Bonzini if ((revents & G_IO_IN) && node->io_read) { 285c2b38b27SPaolo Bonzini node->io_read(node->opaque); 286c2b38b27SPaolo Bonzini progress = true; 287c2b38b27SPaolo Bonzini } 288c2b38b27SPaolo Bonzini if ((revents & G_IO_OUT) && node->io_write) { 289c2b38b27SPaolo Bonzini node->io_write(node->opaque); 290c2b38b27SPaolo Bonzini progress = true; 291c2b38b27SPaolo Bonzini } 292c2b38b27SPaolo Bonzini 293c2b38b27SPaolo Bonzini /* if the next select() will return an event, we have progressed */ 294c2b38b27SPaolo Bonzini if (event == event_notifier_get_handle(&ctx->notifier)) { 295c2b38b27SPaolo Bonzini WSANETWORKEVENTS ev; 296c2b38b27SPaolo Bonzini WSAEnumNetworkEvents(node->pfd.fd, event, &ev); 297c2b38b27SPaolo Bonzini if (ev.lNetworkEvents) { 298c2b38b27SPaolo Bonzini progress = true; 299c2b38b27SPaolo Bonzini } 300c2b38b27SPaolo Bonzini } 301c2b38b27SPaolo Bonzini } 302c2b38b27SPaolo Bonzini 303c2b38b27SPaolo Bonzini if (node->deleted) { 304c2b38b27SPaolo Bonzini if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { 305c2b38b27SPaolo Bonzini QLIST_REMOVE(node, node); 306c2b38b27SPaolo Bonzini g_free(node); 307c2b38b27SPaolo Bonzini qemu_lockcnt_inc_and_unlock(&ctx->list_lock); 308c2b38b27SPaolo Bonzini } 309c2b38b27SPaolo Bonzini } 310c2b38b27SPaolo Bonzini } 311c2b38b27SPaolo Bonzini 312c2b38b27SPaolo Bonzini return progress; 313c2b38b27SPaolo Bonzini } 314c2b38b27SPaolo Bonzini 315a153bf52SPaolo Bonzini void aio_dispatch(AioContext *ctx) 316c2b38b27SPaolo Bonzini { 317bd451435SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 318a153bf52SPaolo Bonzini aio_bh_poll(ctx); 319a153bf52SPaolo Bonzini aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); 320bd451435SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 321a153bf52SPaolo Bonzini timerlistgroup_run_timers(&ctx->tlg); 322c2b38b27SPaolo Bonzini } 323c2b38b27SPaolo Bonzini 324c2b38b27SPaolo Bonzini bool aio_poll(AioContext *ctx, bool blocking) 325c2b38b27SPaolo Bonzini { 326c2b38b27SPaolo Bonzini AioHandler *node; 327c2b38b27SPaolo Bonzini HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; 328c2b38b27SPaolo Bonzini bool progress, have_select_revents, first; 329c2b38b27SPaolo Bonzini int count; 330c2b38b27SPaolo Bonzini int timeout; 331c2b38b27SPaolo Bonzini 3325710a3e0SPaolo Bonzini /* 3335710a3e0SPaolo Bonzini * There cannot be two concurrent aio_poll calls for the same AioContext (or 3345710a3e0SPaolo Bonzini * an aio_poll concurrent with a GSource prepare/check/dispatch callback). 3355710a3e0SPaolo Bonzini * We rely on this below to avoid slow locked accesses to ctx->notify_me. 3365710a3e0SPaolo Bonzini */ 3375710a3e0SPaolo Bonzini assert(in_aio_context_home_thread(ctx)); 338c2b38b27SPaolo Bonzini progress = false; 339c2b38b27SPaolo Bonzini 340c2b38b27SPaolo Bonzini /* aio_notify can avoid the expensive event_notifier_set if 341c2b38b27SPaolo Bonzini * everything (file descriptors, bottom halves, timers) will 342c2b38b27SPaolo Bonzini * be re-evaluated before the next blocking poll(). This is 343c2b38b27SPaolo Bonzini * already true when aio_poll is called with blocking == false; 344c2b38b27SPaolo Bonzini * if blocking == true, it is only true after poll() returns, 345c2b38b27SPaolo Bonzini * so disable the optimization now. 346c2b38b27SPaolo Bonzini */ 347c2b38b27SPaolo Bonzini if (blocking) { 3485710a3e0SPaolo Bonzini atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); 3495710a3e0SPaolo Bonzini /* 3505710a3e0SPaolo Bonzini * Write ctx->notify_me before computing the timeout 3515710a3e0SPaolo Bonzini * (reading bottom half flags, etc.). Pairs with 3525710a3e0SPaolo Bonzini * smp_mb in aio_notify(). 3535710a3e0SPaolo Bonzini */ 3545710a3e0SPaolo Bonzini smp_mb(); 355c2b38b27SPaolo Bonzini } 356c2b38b27SPaolo Bonzini 357c2b38b27SPaolo Bonzini qemu_lockcnt_inc(&ctx->list_lock); 358c2b38b27SPaolo Bonzini have_select_revents = aio_prepare(ctx); 359c2b38b27SPaolo Bonzini 360c2b38b27SPaolo Bonzini /* fill fd sets */ 361c2b38b27SPaolo Bonzini count = 0; 362c2b38b27SPaolo Bonzini QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { 363c2b38b27SPaolo Bonzini if (!node->deleted && node->io_notify 364c2b38b27SPaolo Bonzini && aio_node_check(ctx, node->is_external)) { 365c2b38b27SPaolo Bonzini events[count++] = event_notifier_get_handle(node->e); 366c2b38b27SPaolo Bonzini } 367c2b38b27SPaolo Bonzini } 368c2b38b27SPaolo Bonzini 369c2b38b27SPaolo Bonzini first = true; 370c2b38b27SPaolo Bonzini 371c2b38b27SPaolo Bonzini /* ctx->notifier is always registered. */ 372c2b38b27SPaolo Bonzini assert(count > 0); 373c2b38b27SPaolo Bonzini 374c2b38b27SPaolo Bonzini /* Multiple iterations, all of them non-blocking except the first, 375c2b38b27SPaolo Bonzini * may be necessary to process all pending events. After the first 376c2b38b27SPaolo Bonzini * WaitForMultipleObjects call ctx->notify_me will be decremented. 377c2b38b27SPaolo Bonzini */ 378c2b38b27SPaolo Bonzini do { 379c2b38b27SPaolo Bonzini HANDLE event; 380c2b38b27SPaolo Bonzini int ret; 381c2b38b27SPaolo Bonzini 382c2b38b27SPaolo Bonzini timeout = blocking && !have_select_revents 383c2b38b27SPaolo Bonzini ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; 384c2b38b27SPaolo Bonzini ret = WaitForMultipleObjects(count, events, FALSE, timeout); 385c2b38b27SPaolo Bonzini if (blocking) { 386c2b38b27SPaolo Bonzini assert(first); 3875710a3e0SPaolo Bonzini atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); 388b37548fcSFam Zheng aio_notify_accept(ctx); 389c2b38b27SPaolo Bonzini } 390c2b38b27SPaolo Bonzini 391c2b38b27SPaolo Bonzini if (first) { 392c2b38b27SPaolo Bonzini progress |= aio_bh_poll(ctx); 393c2b38b27SPaolo Bonzini first = false; 394c2b38b27SPaolo Bonzini } 395c2b38b27SPaolo Bonzini 396c2b38b27SPaolo Bonzini /* if we have any signaled events, dispatch event */ 397c2b38b27SPaolo Bonzini event = NULL; 398c2b38b27SPaolo Bonzini if ((DWORD) (ret - WAIT_OBJECT_0) < count) { 399c2b38b27SPaolo Bonzini event = events[ret - WAIT_OBJECT_0]; 400c2b38b27SPaolo Bonzini events[ret - WAIT_OBJECT_0] = events[--count]; 401c2b38b27SPaolo Bonzini } else if (!have_select_revents) { 402c2b38b27SPaolo Bonzini break; 403c2b38b27SPaolo Bonzini } 404c2b38b27SPaolo Bonzini 405c2b38b27SPaolo Bonzini have_select_revents = false; 406c2b38b27SPaolo Bonzini blocking = false; 407c2b38b27SPaolo Bonzini 408c2b38b27SPaolo Bonzini progress |= aio_dispatch_handlers(ctx, event); 409c2b38b27SPaolo Bonzini } while (count > 0); 410c2b38b27SPaolo Bonzini 411bd451435SPaolo Bonzini qemu_lockcnt_dec(&ctx->list_lock); 412bd451435SPaolo Bonzini 413c2b38b27SPaolo Bonzini progress |= timerlistgroup_run_timers(&ctx->tlg); 414c2b38b27SPaolo Bonzini return progress; 415c2b38b27SPaolo Bonzini } 416c2b38b27SPaolo Bonzini 417c2b38b27SPaolo Bonzini void aio_context_setup(AioContext *ctx) 418c2b38b27SPaolo Bonzini { 419c2b38b27SPaolo Bonzini } 420c2b38b27SPaolo Bonzini 421cd0a6d2bSJie Wang void aio_context_destroy(AioContext *ctx) 422cd0a6d2bSJie Wang { 423cd0a6d2bSJie Wang } 424cd0a6d2bSJie Wang 425ba607ca8SStefan Hajnoczi void aio_context_use_g_source(AioContext *ctx) 426ba607ca8SStefan Hajnoczi { 427ba607ca8SStefan Hajnoczi } 428ba607ca8SStefan Hajnoczi 429c2b38b27SPaolo Bonzini void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, 430c2b38b27SPaolo Bonzini int64_t grow, int64_t shrink, Error **errp) 431c2b38b27SPaolo Bonzini { 43290c558beSPeter Xu if (max_ns) { 433c2b38b27SPaolo Bonzini error_setg(errp, "AioContext polling is not implemented on Windows"); 434c2b38b27SPaolo Bonzini } 43590c558beSPeter Xu } 436