1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef QEMU_AIO_H
15 #define QEMU_AIO_H
16
17 #ifdef CONFIG_LINUX_IO_URING
18 #include <liburing.h>
19 #endif
20 #include "qemu/queue.h"
21 #include "qemu/event_notifier.h"
22 #include "qemu/thread.h"
23 #include "qemu/timer.h"
24
25 typedef struct BlockAIOCB BlockAIOCB;
26 typedef void BlockCompletionFunc(void *opaque, int ret);
27
28 typedef struct AIOCBInfo {
29 void (*cancel_async)(BlockAIOCB *acb);
30 AioContext *(*get_aio_context)(BlockAIOCB *acb);
31 size_t aiocb_size;
32 } AIOCBInfo;
33
34 struct BlockAIOCB {
35 const AIOCBInfo *aiocb_info;
36 BlockDriverState *bs;
37 BlockCompletionFunc *cb;
38 void *opaque;
39 int refcnt;
40 };
41
42 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
43 BlockCompletionFunc *cb, void *opaque);
44 void qemu_aio_unref(void *p);
45 void qemu_aio_ref(void *p);
46
47 typedef struct AioHandler AioHandler;
48 typedef QLIST_HEAD(, AioHandler) AioHandlerList;
49 typedef void QEMUBHFunc(void *opaque);
50 typedef bool AioPollFn(void *opaque);
51 typedef void IOHandler(void *opaque);
52
53 struct Coroutine;
54 struct ThreadPool;
55 struct LinuxAioState;
56 struct LuringState;
57
58 /* Is polling disabled? */
59 bool aio_poll_disabled(AioContext *ctx);
60
61 /* Callbacks for file descriptor monitoring implementations */
62 typedef struct {
63 /*
64 * update:
65 * @ctx: the AioContext
66 * @old_node: the existing handler or NULL if this file descriptor is being
67 * monitored for the first time
68 * @new_node: the new handler or NULL if this file descriptor is being
69 * removed
70 *
71 * Add/remove/modify a monitored file descriptor.
72 *
73 * Called with ctx->list_lock acquired.
74 */
75 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
76
77 /*
78 * wait:
79 * @ctx: the AioContext
80 * @ready_list: list for handlers that become ready
81 * @timeout: maximum duration to wait, in nanoseconds
82 *
83 * Wait for file descriptors to become ready and place them on ready_list.
84 *
85 * Called with ctx->list_lock incremented but not locked.
86 *
87 * Returns: number of ready file descriptors.
88 */
89 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
90
91 /*
92 * need_wait:
93 * @ctx: the AioContext
94 *
95 * Tell aio_poll() when to stop userspace polling early because ->wait()
96 * has fds ready.
97 *
98 * File descriptor monitoring implementations that cannot poll fd readiness
99 * from userspace should use aio_poll_disabled() here. This ensures that
100 * file descriptors are not starved by handlers that frequently make
101 * progress via userspace polling.
102 *
103 * Returns: true if ->wait() should be called, false otherwise.
104 */
105 bool (*need_wait)(AioContext *ctx);
106 } FDMonOps;
107
108 /*
109 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
110 * scheduled BHs are not processed until the next aio_bh_poll() call. All
111 * active aio_bh_poll() calls chain their slices together in a list, so that
112 * nested aio_bh_poll() calls process all scheduled bottom halves.
113 */
114 typedef QSLIST_HEAD(, QEMUBH) BHList;
115 typedef struct BHListSlice BHListSlice;
116 struct BHListSlice {
117 BHList bh_list;
118 QSIMPLEQ_ENTRY(BHListSlice) next;
119 };
120
121 typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
122
123 struct AioContext {
124 GSource source;
125
126 /* Used by AioContext users to protect from multi-threaded access. */
127 QemuRecMutex lock;
128
129 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
130 AioHandlerList aio_handlers;
131
132 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
133 AioHandlerList deleted_aio_handlers;
134
135 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
136 * accessed with atomic primitives. If this field is 0, everything
137 * (file descriptors, bottom halves, timers) will be re-evaluated
138 * before the next blocking poll(), thus the event_notifier_set call
139 * can be skipped. If it is non-zero, you may need to wake up a
140 * concurrent aio_poll or the glib main event loop, making
141 * event_notifier_set necessary.
142 *
143 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
144 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
145 * Bits 1-31 simply count the number of active calls to aio_poll
146 * that are in the prepare or poll phase.
147 *
148 * The GSource and aio_poll must use a different mechanism because
149 * there is no certainty that a call to GSource's prepare callback
150 * (via g_main_context_prepare) is indeed followed by check and
151 * dispatch. It's not clear whether this would be a bug, but let's
152 * play safe and allow it---it will just cause extra calls to
153 * event_notifier_set until the next call to dispatch.
154 *
155 * Instead, the aio_poll calls include both the prepare and the
156 * dispatch phase, hence a simple counter is enough for them.
157 */
158 uint32_t notify_me;
159
160 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
161 * and to ensure that no callbacks are removed while we're walking and
162 * dispatching them.
163 */
164 QemuLockCnt list_lock;
165
166 /* Bottom Halves pending aio_bh_poll() processing */
167 BHList bh_list;
168
169 /* Chained BH list slices for each nested aio_bh_poll() call */
170 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
171
172 /* Used by aio_notify.
173 *
174 * "notified" is used to avoid expensive event_notifier_test_and_clear
175 * calls. When it is clear, the EventNotifier is clear, or one thread
176 * is going to clear "notified" before processing more events. False
177 * positives are possible, i.e. "notified" could be set even though the
178 * EventNotifier is clear.
179 *
180 * Note that event_notifier_set *cannot* be optimized the same way. For
181 * more information on the problem that would result, see "#ifdef BUG2"
182 * in the docs/aio_notify_accept.promela formal model.
183 */
184 bool notified;
185 EventNotifier notifier;
186
187 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
188 QEMUBH *co_schedule_bh;
189
190 /* Thread pool for performing work and receiving completion callbacks.
191 * Has its own locking.
192 */
193 struct ThreadPool *thread_pool;
194
195 #ifdef CONFIG_LINUX_AIO
196 /*
197 * State for native Linux AIO. Uses aio_context_acquire/release for
198 * locking.
199 */
200 struct LinuxAioState *linux_aio;
201 #endif
202 #ifdef CONFIG_LINUX_IO_URING
203 /*
204 * State for Linux io_uring. Uses aio_context_acquire/release for
205 * locking.
206 */
207 struct LuringState *linux_io_uring;
208
209 /* State for file descriptor monitoring using Linux io_uring */
210 struct io_uring fdmon_io_uring;
211 AioHandlerSList submit_list;
212 #endif
213
214 /* TimerLists for calling timers - one per clock type. Has its own
215 * locking.
216 */
217 QEMUTimerListGroup tlg;
218
219 int external_disable_cnt;
220
221 /* Number of AioHandlers without .io_poll() */
222 int poll_disable_cnt;
223
224 /* Polling mode parameters */
225 int64_t poll_ns; /* current polling time in nanoseconds */
226 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
227 int64_t poll_grow; /* polling time growth factor */
228 int64_t poll_shrink; /* polling time shrink factor */
229
230 /*
231 * List of handlers participating in userspace polling. Protected by
232 * ctx->list_lock. Iterated and modified mostly by the event loop thread
233 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
234 * only touches the list to delete nodes if ctx->list_lock's count is zero.
235 */
236 AioHandlerList poll_aio_handlers;
237
238 /* Are we in polling mode or monitoring file descriptors? */
239 bool poll_started;
240
241 /* epoll(7) state used when built with CONFIG_EPOLL */
242 int epollfd;
243
244 const FDMonOps *fdmon_ops;
245 };
246
247 /**
248 * aio_context_new: Allocate a new AioContext.
249 *
250 * AioContext provide a mini event-loop that can be waited on synchronously.
251 * They also provide bottom halves, a service to execute a piece of code
252 * as soon as possible.
253 */
254 AioContext *aio_context_new(Error **errp);
255
256 /**
257 * aio_context_ref:
258 * @ctx: The AioContext to operate on.
259 *
260 * Add a reference to an AioContext.
261 */
262 void aio_context_ref(AioContext *ctx);
263
264 /**
265 * aio_context_unref:
266 * @ctx: The AioContext to operate on.
267 *
268 * Drop a reference to an AioContext.
269 */
270 void aio_context_unref(AioContext *ctx);
271
272 /* Take ownership of the AioContext. If the AioContext will be shared between
273 * threads, and a thread does not want to be interrupted, it will have to
274 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
275 * automatically takes care of calling aio_context_acquire and
276 * aio_context_release.
277 *
278 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
279 * thread still has to call those to avoid being interrupted by the guest.
280 *
281 * Bottom halves, timers and callbacks can be created or removed without
282 * acquiring the AioContext.
283 */
284 void aio_context_acquire(AioContext *ctx);
285
286 /* Relinquish ownership of the AioContext. */
287 void aio_context_release(AioContext *ctx);
288
289 /**
290 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
291 * only once and as soon as possible.
292 */
293 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
294
295 /**
296 * aio_bh_new: Allocate a new bottom half structure.
297 *
298 * Bottom halves are lightweight callbacks whose invocation is guaranteed
299 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
300 * is opaque and must be allocated prior to its use.
301 */
302 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
303
304 /**
305 * aio_notify: Force processing of pending events.
306 *
307 * Similar to signaling a condition variable, aio_notify forces
308 * aio_poll to exit, so that the next call will re-examine pending events.
309 * The caller of aio_notify will usually call aio_poll again very soon,
310 * or go through another iteration of the GLib main loop. Hence, aio_notify
311 * also has the side effect of recalculating the sets of file descriptors
312 * that the main loop waits for.
313 *
314 * Calling aio_notify is rarely necessary, because for example scheduling
315 * a bottom half calls it already.
316 */
317 void aio_notify(AioContext *ctx);
318
319 /**
320 * aio_notify_accept: Acknowledge receiving an aio_notify.
321 *
322 * aio_notify() uses an EventNotifier in order to wake up a sleeping
323 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
324 * usually rare, but the AioContext has to clear the EventNotifier on
325 * every aio_poll() or g_main_context_iteration() in order to avoid
326 * busy waiting. This event_notifier_test_and_clear() cannot be done
327 * using the usual aio_context_set_event_notifier(), because it must
328 * be done before processing all events (file descriptors, bottom halves,
329 * timers).
330 *
331 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
332 * that is specific to an AioContext's notifier; it is used internally
333 * to clear the EventNotifier only if aio_notify() had been called.
334 */
335 void aio_notify_accept(AioContext *ctx);
336
337 /**
338 * aio_bh_call: Executes callback function of the specified BH.
339 */
340 void aio_bh_call(QEMUBH *bh);
341
342 /**
343 * aio_bh_poll: Poll bottom halves for an AioContext.
344 *
345 * These are internal functions used by the QEMU main loop.
346 * And notice that multiple occurrences of aio_bh_poll cannot
347 * be called concurrently
348 */
349 int aio_bh_poll(AioContext *ctx);
350
351 /**
352 * qemu_bh_schedule: Schedule a bottom half.
353 *
354 * Scheduling a bottom half interrupts the main loop and causes the
355 * execution of the callback that was passed to qemu_bh_new.
356 *
357 * Bottom halves that are scheduled from a bottom half handler are instantly
358 * invoked. This can create an infinite loop if a bottom half handler
359 * schedules itself.
360 *
361 * @bh: The bottom half to be scheduled.
362 */
363 void qemu_bh_schedule(QEMUBH *bh);
364
365 /**
366 * qemu_bh_cancel: Cancel execution of a bottom half.
367 *
368 * Canceling execution of a bottom half undoes the effect of calls to
369 * qemu_bh_schedule without freeing its resources yet. While cancellation
370 * itself is also wait-free and thread-safe, it can of course race with the
371 * loop that executes bottom halves unless you are holding the iothread
372 * mutex. This makes it mostly useless if you are not holding the mutex.
373 *
374 * @bh: The bottom half to be canceled.
375 */
376 void qemu_bh_cancel(QEMUBH *bh);
377
378 /**
379 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
380 *
381 * Deleting a bottom half frees the memory that was allocated for it by
382 * qemu_bh_new. It also implies canceling the bottom half if it was
383 * scheduled.
384 * This func is async. The bottom half will do the delete action at the finial
385 * end.
386 *
387 * @bh: The bottom half to be deleted.
388 */
389 void qemu_bh_delete(QEMUBH *bh);
390
391 /* Return whether there are any pending callbacks from the GSource
392 * attached to the AioContext, before g_poll is invoked.
393 *
394 * This is used internally in the implementation of the GSource.
395 */
396 bool aio_prepare(AioContext *ctx);
397
398 /* Return whether there are any pending callbacks from the GSource
399 * attached to the AioContext, after g_poll is invoked.
400 *
401 * This is used internally in the implementation of the GSource.
402 */
403 bool aio_pending(AioContext *ctx);
404
405 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
406 *
407 * This is used internally in the implementation of the GSource.
408 */
409 void aio_dispatch(AioContext *ctx);
410
411 /* Progress in completing AIO work to occur. This can issue new pending
412 * aio as a result of executing I/O completion or bh callbacks.
413 *
414 * Return whether any progress was made by executing AIO or bottom half
415 * handlers. If @blocking == true, this should always be true except
416 * if someone called aio_notify.
417 *
418 * If there are no pending bottom halves, but there are pending AIO
419 * operations, it may not be possible to make any progress without
420 * blocking. If @blocking is true, this function will wait until one
421 * or more AIO events have completed, to ensure something has moved
422 * before returning.
423 */
424 bool aio_poll(AioContext *ctx, bool blocking);
425
426 /* Register a file descriptor and associated callbacks. Behaves very similarly
427 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
428 * be invoked when using aio_poll().
429 *
430 * Code that invokes AIO completion functions should rely on this function
431 * instead of qemu_set_fd_handler[2].
432 */
433 void aio_set_fd_handler(AioContext *ctx,
434 int fd,
435 bool is_external,
436 IOHandler *io_read,
437 IOHandler *io_write,
438 AioPollFn *io_poll,
439 void *opaque);
440
441 /* Set polling begin/end callbacks for a file descriptor that has already been
442 * registered with aio_set_fd_handler. Do nothing if the file descriptor is
443 * not registered.
444 */
445 void aio_set_fd_poll(AioContext *ctx, int fd,
446 IOHandler *io_poll_begin,
447 IOHandler *io_poll_end);
448
449 /* Register an event notifier and associated callbacks. Behaves very similarly
450 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
451 * will be invoked when using aio_poll().
452 *
453 * Code that invokes AIO completion functions should rely on this function
454 * instead of event_notifier_set_handler.
455 */
456 void aio_set_event_notifier(AioContext *ctx,
457 EventNotifier *notifier,
458 bool is_external,
459 EventNotifierHandler *io_read,
460 AioPollFn *io_poll);
461
462 /* Set polling begin/end callbacks for an event notifier that has already been
463 * registered with aio_set_event_notifier. Do nothing if the event notifier is
464 * not registered.
465 */
466 void aio_set_event_notifier_poll(AioContext *ctx,
467 EventNotifier *notifier,
468 EventNotifierHandler *io_poll_begin,
469 EventNotifierHandler *io_poll_end);
470
471 /* Return a GSource that lets the main loop poll the file descriptors attached
472 * to this AioContext.
473 */
474 GSource *aio_get_g_source(AioContext *ctx);
475
476 /* Return the ThreadPool bound to this AioContext */
477 struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
478
479 /* Setup the LinuxAioState bound to this AioContext */
480 struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
481
482 /* Return the LinuxAioState bound to this AioContext */
483 struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
484
485 /* Setup the LuringState bound to this AioContext */
486 struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
487
488 /* Return the LuringState bound to this AioContext */
489 struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
490 /**
491 * aio_timer_new_with_attrs:
492 * @ctx: the aio context
493 * @type: the clock type
494 * @scale: the scale
495 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
496 * to assign
497 * @cb: the callback to call on timer expiry
498 * @opaque: the opaque pointer to pass to the callback
499 *
500 * Allocate a new timer (with attributes) attached to the context @ctx.
501 * The function is responsible for memory allocation.
502 *
503 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
504 * Use that unless you really need dynamic memory allocation.
505 *
506 * Returns: a pointer to the new timer
507 */
aio_timer_new_with_attrs(AioContext * ctx,QEMUClockType type,int scale,int attributes,QEMUTimerCB * cb,void * opaque)508 static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
509 QEMUClockType type,
510 int scale, int attributes,
511 QEMUTimerCB *cb, void *opaque)
512 {
513 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
514 }
515
516 /**
517 * aio_timer_new:
518 * @ctx: the aio context
519 * @type: the clock type
520 * @scale: the scale
521 * @cb: the callback to call on timer expiry
522 * @opaque: the opaque pointer to pass to the callback
523 *
524 * Allocate a new timer attached to the context @ctx.
525 * See aio_timer_new_with_attrs for details.
526 *
527 * Returns: a pointer to the new timer
528 */
aio_timer_new(AioContext * ctx,QEMUClockType type,int scale,QEMUTimerCB * cb,void * opaque)529 static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
530 int scale,
531 QEMUTimerCB *cb, void *opaque)
532 {
533 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
534 }
535
536 /**
537 * aio_timer_init_with_attrs:
538 * @ctx: the aio context
539 * @ts: the timer
540 * @type: the clock type
541 * @scale: the scale
542 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
543 * to assign
544 * @cb: the callback to call on timer expiry
545 * @opaque: the opaque pointer to pass to the callback
546 *
547 * Initialise a new timer (with attributes) attached to the context @ctx.
548 * The caller is responsible for memory allocation.
549 */
aio_timer_init_with_attrs(AioContext * ctx,QEMUTimer * ts,QEMUClockType type,int scale,int attributes,QEMUTimerCB * cb,void * opaque)550 static inline void aio_timer_init_with_attrs(AioContext *ctx,
551 QEMUTimer *ts, QEMUClockType type,
552 int scale, int attributes,
553 QEMUTimerCB *cb, void *opaque)
554 {
555 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
556 }
557
558 /**
559 * aio_timer_init:
560 * @ctx: the aio context
561 * @ts: the timer
562 * @type: the clock type
563 * @scale: the scale
564 * @cb: the callback to call on timer expiry
565 * @opaque: the opaque pointer to pass to the callback
566 *
567 * Initialise a new timer attached to the context @ctx.
568 * See aio_timer_init_with_attrs for details.
569 */
aio_timer_init(AioContext * ctx,QEMUTimer * ts,QEMUClockType type,int scale,QEMUTimerCB * cb,void * opaque)570 static inline void aio_timer_init(AioContext *ctx,
571 QEMUTimer *ts, QEMUClockType type,
572 int scale,
573 QEMUTimerCB *cb, void *opaque)
574 {
575 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
576 }
577
578 /**
579 * aio_compute_timeout:
580 * @ctx: the aio context
581 *
582 * Compute the timeout that a blocking aio_poll should use.
583 */
584 int64_t aio_compute_timeout(AioContext *ctx);
585
586 /**
587 * aio_disable_external:
588 * @ctx: the aio context
589 *
590 * Disable the further processing of external clients.
591 */
aio_disable_external(AioContext * ctx)592 static inline void aio_disable_external(AioContext *ctx)
593 {
594 atomic_inc(&ctx->external_disable_cnt);
595 }
596
597 /**
598 * aio_enable_external:
599 * @ctx: the aio context
600 *
601 * Enable the processing of external clients.
602 */
aio_enable_external(AioContext * ctx)603 static inline void aio_enable_external(AioContext *ctx)
604 {
605 int old;
606
607 old = atomic_fetch_dec(&ctx->external_disable_cnt);
608 assert(old > 0);
609 if (old == 1) {
610 /* Kick event loop so it re-arms file descriptors */
611 aio_notify(ctx);
612 }
613 }
614
615 /**
616 * aio_external_disabled:
617 * @ctx: the aio context
618 *
619 * Return true if the external clients are disabled.
620 */
aio_external_disabled(AioContext * ctx)621 static inline bool aio_external_disabled(AioContext *ctx)
622 {
623 return atomic_read(&ctx->external_disable_cnt);
624 }
625
626 /**
627 * aio_node_check:
628 * @ctx: the aio context
629 * @is_external: Whether or not the checked node is an external event source.
630 *
631 * Check if the node's is_external flag is okay to be polled by the ctx at this
632 * moment. True means green light.
633 */
aio_node_check(AioContext * ctx,bool is_external)634 static inline bool aio_node_check(AioContext *ctx, bool is_external)
635 {
636 return !is_external || !atomic_read(&ctx->external_disable_cnt);
637 }
638
639 /**
640 * aio_co_schedule:
641 * @ctx: the aio context
642 * @co: the coroutine
643 *
644 * Start a coroutine on a remote AioContext.
645 *
646 * The coroutine must not be entered by anyone else while aio_co_schedule()
647 * is active. In addition the coroutine must have yielded unless ctx
648 * is the context in which the coroutine is running (i.e. the value of
649 * qemu_get_current_aio_context() from the coroutine itself).
650 */
651 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
652
653 /**
654 * aio_co_wake:
655 * @co: the coroutine
656 *
657 * Restart a coroutine on the AioContext where it was running last, thus
658 * preventing coroutines from jumping from one context to another when they
659 * go to sleep.
660 *
661 * aio_co_wake may be executed either in coroutine or non-coroutine
662 * context. The coroutine must not be entered by anyone else while
663 * aio_co_wake() is active.
664 */
665 void aio_co_wake(struct Coroutine *co);
666
667 /**
668 * aio_co_enter:
669 * @ctx: the context to run the coroutine
670 * @co: the coroutine to run
671 *
672 * Enter a coroutine in the specified AioContext.
673 */
674 void aio_co_enter(AioContext *ctx, struct Coroutine *co);
675
676 /**
677 * Return the AioContext whose event loop runs in the current thread.
678 *
679 * If called from an IOThread this will be the IOThread's AioContext. If
680 * called from another thread it will be the main loop AioContext.
681 */
682 AioContext *qemu_get_current_aio_context(void);
683
684 /**
685 * in_aio_context_home_thread:
686 * @ctx: the aio context
687 *
688 * Return whether we are running in the thread that normally runs @ctx. Note
689 * that acquiring/releasing ctx does not affect the outcome, each AioContext
690 * still only has one home thread that is responsible for running it.
691 */
in_aio_context_home_thread(AioContext * ctx)692 static inline bool in_aio_context_home_thread(AioContext *ctx)
693 {
694 return ctx == qemu_get_current_aio_context();
695 }
696
697 /**
698 * aio_context_setup:
699 * @ctx: the aio context
700 *
701 * Initialize the aio context.
702 */
703 void aio_context_setup(AioContext *ctx);
704
705 /**
706 * aio_context_destroy:
707 * @ctx: the aio context
708 *
709 * Destroy the aio context.
710 */
711 void aio_context_destroy(AioContext *ctx);
712
713 /**
714 * aio_context_set_poll_params:
715 * @ctx: the aio context
716 * @max_ns: how long to busy poll for, in nanoseconds
717 * @grow: polling time growth factor
718 * @shrink: polling time shrink factor
719 *
720 * Poll mode can be disabled by setting poll_max_ns to 0.
721 */
722 void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
723 int64_t grow, int64_t shrink,
724 Error **errp);
725
726 #endif
727