1 /* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_AIO_H 15 #define QEMU_AIO_H 16 17 #include "qemu-common.h" 18 #include "qemu/queue.h" 19 #include "qemu/event_notifier.h" 20 #include "qemu/thread.h" 21 #include "qemu/timer.h" 22 23 typedef struct BlockAIOCB BlockAIOCB; 24 typedef void BlockCompletionFunc(void *opaque, int ret); 25 26 typedef struct AIOCBInfo { 27 void (*cancel_async)(BlockAIOCB *acb); 28 AioContext *(*get_aio_context)(BlockAIOCB *acb); 29 size_t aiocb_size; 30 } AIOCBInfo; 31 32 struct BlockAIOCB { 33 const AIOCBInfo *aiocb_info; 34 BlockDriverState *bs; 35 BlockCompletionFunc *cb; 36 void *opaque; 37 int refcnt; 38 }; 39 40 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, 41 BlockCompletionFunc *cb, void *opaque); 42 void qemu_aio_unref(void *p); 43 void qemu_aio_ref(void *p); 44 45 typedef struct AioHandler AioHandler; 46 typedef void QEMUBHFunc(void *opaque); 47 typedef bool AioPollFn(void *opaque); 48 typedef void IOHandler(void *opaque); 49 50 struct ThreadPool; 51 struct LinuxAioState; 52 53 struct AioContext { 54 GSource source; 55 56 /* Protects all fields from multi-threaded access */ 57 QemuRecMutex lock; 58 59 /* The list of registered AIO handlers */ 60 QLIST_HEAD(, AioHandler) aio_handlers; 61 62 /* This is a simple lock used to protect the aio_handlers list. 63 * Specifically, it's used to ensure that no callbacks are removed while 64 * we're walking and dispatching callbacks. 65 */ 66 int walking_handlers; 67 68 /* Used to avoid unnecessary event_notifier_set calls in aio_notify; 69 * accessed with atomic primitives. If this field is 0, everything 70 * (file descriptors, bottom halves, timers) will be re-evaluated 71 * before the next blocking poll(), thus the event_notifier_set call 72 * can be skipped. If it is non-zero, you may need to wake up a 73 * concurrent aio_poll or the glib main event loop, making 74 * event_notifier_set necessary. 75 * 76 * Bit 0 is reserved for GSource usage of the AioContext, and is 1 77 * between a call to aio_ctx_prepare and the next call to aio_ctx_check. 78 * Bits 1-31 simply count the number of active calls to aio_poll 79 * that are in the prepare or poll phase. 80 * 81 * The GSource and aio_poll must use a different mechanism because 82 * there is no certainty that a call to GSource's prepare callback 83 * (via g_main_context_prepare) is indeed followed by check and 84 * dispatch. It's not clear whether this would be a bug, but let's 85 * play safe and allow it---it will just cause extra calls to 86 * event_notifier_set until the next call to dispatch. 87 * 88 * Instead, the aio_poll calls include both the prepare and the 89 * dispatch phase, hence a simple counter is enough for them. 90 */ 91 uint32_t notify_me; 92 93 /* lock to protect between bh's adders and deleter */ 94 QemuMutex bh_lock; 95 96 /* Anchor of the list of Bottom Halves belonging to the context */ 97 struct QEMUBH *first_bh; 98 99 /* A simple lock used to protect the first_bh list, and ensure that 100 * no callbacks are removed while we're walking and dispatching callbacks. 101 */ 102 int walking_bh; 103 104 /* Used by aio_notify. 105 * 106 * "notified" is used to avoid expensive event_notifier_test_and_clear 107 * calls. When it is clear, the EventNotifier is clear, or one thread 108 * is going to clear "notified" before processing more events. False 109 * positives are possible, i.e. "notified" could be set even though the 110 * EventNotifier is clear. 111 * 112 * Note that event_notifier_set *cannot* be optimized the same way. For 113 * more information on the problem that would result, see "#ifdef BUG2" 114 * in the docs/aio_notify_accept.promela formal model. 115 */ 116 bool notified; 117 EventNotifier notifier; 118 119 /* Thread pool for performing work and receiving completion callbacks */ 120 struct ThreadPool *thread_pool; 121 122 #ifdef CONFIG_LINUX_AIO 123 /* State for native Linux AIO. Uses aio_context_acquire/release for 124 * locking. 125 */ 126 struct LinuxAioState *linux_aio; 127 #endif 128 129 /* TimerLists for calling timers - one per clock type */ 130 QEMUTimerListGroup tlg; 131 132 int external_disable_cnt; 133 134 /* Number of AioHandlers without .io_poll() */ 135 int poll_disable_cnt; 136 137 /* Polling mode parameters */ 138 int64_t poll_ns; /* current polling time in nanoseconds */ 139 int64_t poll_max_ns; /* maximum polling time in nanoseconds */ 140 int64_t poll_grow; /* polling time growth factor */ 141 int64_t poll_shrink; /* polling time shrink factor */ 142 143 /* Are we in polling mode or monitoring file descriptors? */ 144 bool poll_started; 145 146 /* epoll(7) state used when built with CONFIG_EPOLL */ 147 int epollfd; 148 bool epoll_enabled; 149 bool epoll_available; 150 }; 151 152 /** 153 * aio_context_new: Allocate a new AioContext. 154 * 155 * AioContext provide a mini event-loop that can be waited on synchronously. 156 * They also provide bottom halves, a service to execute a piece of code 157 * as soon as possible. 158 */ 159 AioContext *aio_context_new(Error **errp); 160 161 /** 162 * aio_context_ref: 163 * @ctx: The AioContext to operate on. 164 * 165 * Add a reference to an AioContext. 166 */ 167 void aio_context_ref(AioContext *ctx); 168 169 /** 170 * aio_context_unref: 171 * @ctx: The AioContext to operate on. 172 * 173 * Drop a reference to an AioContext. 174 */ 175 void aio_context_unref(AioContext *ctx); 176 177 /* Take ownership of the AioContext. If the AioContext will be shared between 178 * threads, and a thread does not want to be interrupted, it will have to 179 * take ownership around calls to aio_poll(). Otherwise, aio_poll() 180 * automatically takes care of calling aio_context_acquire and 181 * aio_context_release. 182 * 183 * Access to timers and BHs from a thread that has not acquired AioContext 184 * is possible. Access to callbacks for now must be done while the AioContext 185 * is owned by the thread (FIXME). 186 */ 187 void aio_context_acquire(AioContext *ctx); 188 189 /* Relinquish ownership of the AioContext. */ 190 void aio_context_release(AioContext *ctx); 191 192 /** 193 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run 194 * only once and as soon as possible. 195 */ 196 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); 197 198 /** 199 * aio_bh_new: Allocate a new bottom half structure. 200 * 201 * Bottom halves are lightweight callbacks whose invocation is guaranteed 202 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure 203 * is opaque and must be allocated prior to its use. 204 */ 205 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); 206 207 /** 208 * aio_notify: Force processing of pending events. 209 * 210 * Similar to signaling a condition variable, aio_notify forces 211 * aio_poll to exit, so that the next call will re-examine pending events. 212 * The caller of aio_notify will usually call aio_poll again very soon, 213 * or go through another iteration of the GLib main loop. Hence, aio_notify 214 * also has the side effect of recalculating the sets of file descriptors 215 * that the main loop waits for. 216 * 217 * Calling aio_notify is rarely necessary, because for example scheduling 218 * a bottom half calls it already. 219 */ 220 void aio_notify(AioContext *ctx); 221 222 /** 223 * aio_notify_accept: Acknowledge receiving an aio_notify. 224 * 225 * aio_notify() uses an EventNotifier in order to wake up a sleeping 226 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are 227 * usually rare, but the AioContext has to clear the EventNotifier on 228 * every aio_poll() or g_main_context_iteration() in order to avoid 229 * busy waiting. This event_notifier_test_and_clear() cannot be done 230 * using the usual aio_context_set_event_notifier(), because it must 231 * be done before processing all events (file descriptors, bottom halves, 232 * timers). 233 * 234 * aio_notify_accept() is an optimized event_notifier_test_and_clear() 235 * that is specific to an AioContext's notifier; it is used internally 236 * to clear the EventNotifier only if aio_notify() had been called. 237 */ 238 void aio_notify_accept(AioContext *ctx); 239 240 /** 241 * aio_bh_call: Executes callback function of the specified BH. 242 */ 243 void aio_bh_call(QEMUBH *bh); 244 245 /** 246 * aio_bh_poll: Poll bottom halves for an AioContext. 247 * 248 * These are internal functions used by the QEMU main loop. 249 * And notice that multiple occurrences of aio_bh_poll cannot 250 * be called concurrently 251 */ 252 int aio_bh_poll(AioContext *ctx); 253 254 /** 255 * qemu_bh_schedule: Schedule a bottom half. 256 * 257 * Scheduling a bottom half interrupts the main loop and causes the 258 * execution of the callback that was passed to qemu_bh_new. 259 * 260 * Bottom halves that are scheduled from a bottom half handler are instantly 261 * invoked. This can create an infinite loop if a bottom half handler 262 * schedules itself. 263 * 264 * @bh: The bottom half to be scheduled. 265 */ 266 void qemu_bh_schedule(QEMUBH *bh); 267 268 /** 269 * qemu_bh_cancel: Cancel execution of a bottom half. 270 * 271 * Canceling execution of a bottom half undoes the effect of calls to 272 * qemu_bh_schedule without freeing its resources yet. While cancellation 273 * itself is also wait-free and thread-safe, it can of course race with the 274 * loop that executes bottom halves unless you are holding the iothread 275 * mutex. This makes it mostly useless if you are not holding the mutex. 276 * 277 * @bh: The bottom half to be canceled. 278 */ 279 void qemu_bh_cancel(QEMUBH *bh); 280 281 /** 282 *qemu_bh_delete: Cancel execution of a bottom half and free its resources. 283 * 284 * Deleting a bottom half frees the memory that was allocated for it by 285 * qemu_bh_new. It also implies canceling the bottom half if it was 286 * scheduled. 287 * This func is async. The bottom half will do the delete action at the finial 288 * end. 289 * 290 * @bh: The bottom half to be deleted. 291 */ 292 void qemu_bh_delete(QEMUBH *bh); 293 294 /* Return whether there are any pending callbacks from the GSource 295 * attached to the AioContext, before g_poll is invoked. 296 * 297 * This is used internally in the implementation of the GSource. 298 */ 299 bool aio_prepare(AioContext *ctx); 300 301 /* Return whether there are any pending callbacks from the GSource 302 * attached to the AioContext, after g_poll is invoked. 303 * 304 * This is used internally in the implementation of the GSource. 305 */ 306 bool aio_pending(AioContext *ctx); 307 308 /* Dispatch any pending callbacks from the GSource attached to the AioContext. 309 * 310 * This is used internally in the implementation of the GSource. 311 * 312 * @dispatch_fds: true to process fds, false to skip them 313 * (can be used as an optimization by callers that know there 314 * are no fds ready) 315 */ 316 bool aio_dispatch(AioContext *ctx, bool dispatch_fds); 317 318 /* Progress in completing AIO work to occur. This can issue new pending 319 * aio as a result of executing I/O completion or bh callbacks. 320 * 321 * Return whether any progress was made by executing AIO or bottom half 322 * handlers. If @blocking == true, this should always be true except 323 * if someone called aio_notify. 324 * 325 * If there are no pending bottom halves, but there are pending AIO 326 * operations, it may not be possible to make any progress without 327 * blocking. If @blocking is true, this function will wait until one 328 * or more AIO events have completed, to ensure something has moved 329 * before returning. 330 */ 331 bool aio_poll(AioContext *ctx, bool blocking); 332 333 /* Register a file descriptor and associated callbacks. Behaves very similarly 334 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will 335 * be invoked when using aio_poll(). 336 * 337 * Code that invokes AIO completion functions should rely on this function 338 * instead of qemu_set_fd_handler[2]. 339 */ 340 void aio_set_fd_handler(AioContext *ctx, 341 int fd, 342 bool is_external, 343 IOHandler *io_read, 344 IOHandler *io_write, 345 AioPollFn *io_poll, 346 void *opaque); 347 348 /* Set polling begin/end callbacks for a file descriptor that has already been 349 * registered with aio_set_fd_handler. Do nothing if the file descriptor is 350 * not registered. 351 */ 352 void aio_set_fd_poll(AioContext *ctx, int fd, 353 IOHandler *io_poll_begin, 354 IOHandler *io_poll_end); 355 356 /* Register an event notifier and associated callbacks. Behaves very similarly 357 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks 358 * will be invoked when using aio_poll(). 359 * 360 * Code that invokes AIO completion functions should rely on this function 361 * instead of event_notifier_set_handler. 362 */ 363 void aio_set_event_notifier(AioContext *ctx, 364 EventNotifier *notifier, 365 bool is_external, 366 EventNotifierHandler *io_read, 367 AioPollFn *io_poll); 368 369 /* Set polling begin/end callbacks for an event notifier that has already been 370 * registered with aio_set_event_notifier. Do nothing if the event notifier is 371 * not registered. 372 */ 373 void aio_set_event_notifier_poll(AioContext *ctx, 374 EventNotifier *notifier, 375 EventNotifierHandler *io_poll_begin, 376 EventNotifierHandler *io_poll_end); 377 378 /* Return a GSource that lets the main loop poll the file descriptors attached 379 * to this AioContext. 380 */ 381 GSource *aio_get_g_source(AioContext *ctx); 382 383 /* Return the ThreadPool bound to this AioContext */ 384 struct ThreadPool *aio_get_thread_pool(AioContext *ctx); 385 386 /* Return the LinuxAioState bound to this AioContext */ 387 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); 388 389 /** 390 * aio_timer_new: 391 * @ctx: the aio context 392 * @type: the clock type 393 * @scale: the scale 394 * @cb: the callback to call on timer expiry 395 * @opaque: the opaque pointer to pass to the callback 396 * 397 * Allocate a new timer attached to the context @ctx. 398 * The function is responsible for memory allocation. 399 * 400 * The preferred interface is aio_timer_init. Use that 401 * unless you really need dynamic memory allocation. 402 * 403 * Returns: a pointer to the new timer 404 */ 405 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, 406 int scale, 407 QEMUTimerCB *cb, void *opaque) 408 { 409 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); 410 } 411 412 /** 413 * aio_timer_init: 414 * @ctx: the aio context 415 * @ts: the timer 416 * @type: the clock type 417 * @scale: the scale 418 * @cb: the callback to call on timer expiry 419 * @opaque: the opaque pointer to pass to the callback 420 * 421 * Initialise a new timer attached to the context @ctx. 422 * The caller is responsible for memory allocation. 423 */ 424 static inline void aio_timer_init(AioContext *ctx, 425 QEMUTimer *ts, QEMUClockType type, 426 int scale, 427 QEMUTimerCB *cb, void *opaque) 428 { 429 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); 430 } 431 432 /** 433 * aio_compute_timeout: 434 * @ctx: the aio context 435 * 436 * Compute the timeout that a blocking aio_poll should use. 437 */ 438 int64_t aio_compute_timeout(AioContext *ctx); 439 440 /** 441 * aio_disable_external: 442 * @ctx: the aio context 443 * 444 * Disable the further processing of external clients. 445 */ 446 static inline void aio_disable_external(AioContext *ctx) 447 { 448 atomic_inc(&ctx->external_disable_cnt); 449 } 450 451 /** 452 * aio_enable_external: 453 * @ctx: the aio context 454 * 455 * Enable the processing of external clients. 456 */ 457 static inline void aio_enable_external(AioContext *ctx) 458 { 459 assert(ctx->external_disable_cnt > 0); 460 atomic_dec(&ctx->external_disable_cnt); 461 } 462 463 /** 464 * aio_external_disabled: 465 * @ctx: the aio context 466 * 467 * Return true if the external clients are disabled. 468 */ 469 static inline bool aio_external_disabled(AioContext *ctx) 470 { 471 return atomic_read(&ctx->external_disable_cnt); 472 } 473 474 /** 475 * aio_node_check: 476 * @ctx: the aio context 477 * @is_external: Whether or not the checked node is an external event source. 478 * 479 * Check if the node's is_external flag is okay to be polled by the ctx at this 480 * moment. True means green light. 481 */ 482 static inline bool aio_node_check(AioContext *ctx, bool is_external) 483 { 484 return !is_external || !atomic_read(&ctx->external_disable_cnt); 485 } 486 487 /** 488 * Return the AioContext whose event loop runs in the current thread. 489 * 490 * If called from an IOThread this will be the IOThread's AioContext. If 491 * called from another thread it will be the main loop AioContext. 492 */ 493 AioContext *qemu_get_current_aio_context(void); 494 495 /** 496 * @ctx: the aio context 497 * 498 * Return whether we are running in the I/O thread that manages @ctx. 499 */ 500 static inline bool aio_context_in_iothread(AioContext *ctx) 501 { 502 return ctx == qemu_get_current_aio_context(); 503 } 504 505 /** 506 * aio_context_setup: 507 * @ctx: the aio context 508 * 509 * Initialize the aio context. 510 */ 511 void aio_context_setup(AioContext *ctx); 512 513 /** 514 * aio_context_set_poll_params: 515 * @ctx: the aio context 516 * @max_ns: how long to busy poll for, in nanoseconds 517 * @grow: polling time growth factor 518 * @shrink: polling time shrink factor 519 * 520 * Poll mode can be disabled by setting poll_max_ns to 0. 521 */ 522 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, 523 int64_t grow, int64_t shrink, 524 Error **errp); 525 526 #endif 527