xref: /qemu/block/io_uring.c (revision 433fcea4)
1 /*
2  * Linux io_uring support.
3  *
4  * Copyright (C) 2009 IBM, Corp.
5  * Copyright (C) 2009 Red Hat, Inc.
6  * Copyright (C) 2019 Aarushi Mehta
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  */
11 #include "qemu/osdep.h"
12 #include <liburing.h>
13 #include "block/aio.h"
14 #include "qemu/queue.h"
15 #include "block/block.h"
16 #include "block/raw-aio.h"
17 #include "qemu/coroutine.h"
18 #include "qemu/defer-call.h"
19 #include "qapi/error.h"
20 #include "sysemu/block-backend.h"
21 #include "trace.h"
22 
23 /* Only used for assertions.  */
24 #include "qemu/coroutine_int.h"
25 
26 /* io_uring ring size */
27 #define MAX_ENTRIES 128
28 
29 typedef struct LuringAIOCB {
30     Coroutine *co;
31     struct io_uring_sqe sqeq;
32     ssize_t ret;
33     QEMUIOVector *qiov;
34     bool is_read;
35     QSIMPLEQ_ENTRY(LuringAIOCB) next;
36 
37     /*
38      * Buffered reads may require resubmission, see
39      * luring_resubmit_short_read().
40      */
41     int total_read;
42     QEMUIOVector resubmit_qiov;
43 } LuringAIOCB;
44 
45 typedef struct LuringQueue {
46     unsigned int in_queue;
47     unsigned int in_flight;
48     bool blocked;
49     QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue;
50 } LuringQueue;
51 
52 typedef struct LuringState {
53     AioContext *aio_context;
54 
55     struct io_uring ring;
56 
57     /* No locking required, only accessed from AioContext home thread */
58     LuringQueue io_q;
59 
60     QEMUBH *completion_bh;
61 } LuringState;
62 
63 /**
64  * luring_resubmit:
65  *
66  * Resubmit a request by appending it to submit_queue.  The caller must ensure
67  * that ioq_submit() is called later so that submit_queue requests are started.
68  */
69 static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb)
70 {
71     QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
72     s->io_q.in_queue++;
73 }
74 
75 /**
76  * luring_resubmit_short_read:
77  *
78  * Short reads are rare but may occur. The remaining read request needs to be
79  * resubmitted.
80  */
81 static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb,
82                                        int nread)
83 {
84     QEMUIOVector *resubmit_qiov;
85     size_t remaining;
86 
87     trace_luring_resubmit_short_read(s, luringcb, nread);
88 
89     /* Update read position */
90     luringcb->total_read += nread;
91     remaining = luringcb->qiov->size - luringcb->total_read;
92 
93     /* Shorten qiov */
94     resubmit_qiov = &luringcb->resubmit_qiov;
95     if (resubmit_qiov->iov == NULL) {
96         qemu_iovec_init(resubmit_qiov, luringcb->qiov->niov);
97     } else {
98         qemu_iovec_reset(resubmit_qiov);
99     }
100     qemu_iovec_concat(resubmit_qiov, luringcb->qiov, luringcb->total_read,
101                       remaining);
102 
103     /* Update sqe */
104     luringcb->sqeq.off += nread;
105     luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov;
106     luringcb->sqeq.len = luringcb->resubmit_qiov.niov;
107 
108     luring_resubmit(s, luringcb);
109 }
110 
111 /**
112  * luring_process_completions:
113  * @s: AIO state
114  *
115  * Fetches completed I/O requests, consumes cqes and invokes their callbacks
116  * The function is somewhat tricky because it supports nested event loops, for
117  * example when a request callback invokes aio_poll().
118  *
119  * Function schedules BH completion so it  can be called again in a nested
120  * event loop.  When there are no events left  to complete the BH is being
121  * canceled.
122  *
123  */
124 static void luring_process_completions(LuringState *s)
125 {
126     struct io_uring_cqe *cqes;
127     int total_bytes;
128     /*
129      * Request completion callbacks can run the nested event loop.
130      * Schedule ourselves so the nested event loop will "see" remaining
131      * completed requests and process them.  Without this, completion
132      * callbacks that wait for other requests using a nested event loop
133      * would hang forever.
134      *
135      * This workaround is needed because io_uring uses poll_wait, which
136      * is woken up when new events are added to the uring, thus polling on
137      * the same uring fd will block unless more events are received.
138      *
139      * Other leaf block drivers (drivers that access the data themselves)
140      * are networking based, so they poll sockets for data and run the
141      * correct coroutine.
142      */
143     qemu_bh_schedule(s->completion_bh);
144 
145     while (io_uring_peek_cqe(&s->ring, &cqes) == 0) {
146         LuringAIOCB *luringcb;
147         int ret;
148 
149         if (!cqes) {
150             break;
151         }
152 
153         luringcb = io_uring_cqe_get_data(cqes);
154         ret = cqes->res;
155         io_uring_cqe_seen(&s->ring, cqes);
156         cqes = NULL;
157 
158         /* Change counters one-by-one because we can be nested. */
159         s->io_q.in_flight--;
160         trace_luring_process_completion(s, luringcb, ret);
161 
162         /* total_read is non-zero only for resubmitted read requests */
163         total_bytes = ret + luringcb->total_read;
164 
165         if (ret < 0) {
166             /*
167              * Only writev/readv/fsync requests on regular files or host block
168              * devices are submitted. Therefore -EAGAIN is not expected but it's
169              * known to happen sometimes with Linux SCSI. Submit again and hope
170              * the request completes successfully.
171              *
172              * For more information, see:
173              * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u
174              *
175              * If the code is changed to submit other types of requests in the
176              * future, then this workaround may need to be extended to deal with
177              * genuine -EAGAIN results that should not be resubmitted
178              * immediately.
179              */
180             if (ret == -EINTR || ret == -EAGAIN) {
181                 luring_resubmit(s, luringcb);
182                 continue;
183             }
184         } else if (!luringcb->qiov) {
185             goto end;
186         } else if (total_bytes == luringcb->qiov->size) {
187             ret = 0;
188         /* Only read/write */
189         } else {
190             /* Short Read/Write */
191             if (luringcb->is_read) {
192                 if (ret > 0) {
193                     luring_resubmit_short_read(s, luringcb, ret);
194                     continue;
195                 } else {
196                     /* Pad with zeroes */
197                     qemu_iovec_memset(luringcb->qiov, total_bytes, 0,
198                                       luringcb->qiov->size - total_bytes);
199                     ret = 0;
200                 }
201             } else {
202                 ret = -ENOSPC;
203             }
204         }
205 end:
206         luringcb->ret = ret;
207         qemu_iovec_destroy(&luringcb->resubmit_qiov);
208 
209         /*
210          * If the coroutine is already entered it must be in ioq_submit()
211          * and will notice luringcb->ret has been filled in when it
212          * eventually runs later. Coroutines cannot be entered recursively
213          * so avoid doing that!
214          */
215         assert(luringcb->co->ctx == s->aio_context);
216         if (!qemu_coroutine_entered(luringcb->co)) {
217             aio_co_wake(luringcb->co);
218         }
219     }
220     qemu_bh_cancel(s->completion_bh);
221 }
222 
223 static int ioq_submit(LuringState *s)
224 {
225     int ret = 0;
226     LuringAIOCB *luringcb, *luringcb_next;
227 
228     while (s->io_q.in_queue > 0) {
229         /*
230          * Try to fetch sqes from the ring for requests waiting in
231          * the overflow queue
232          */
233         QSIMPLEQ_FOREACH_SAFE(luringcb, &s->io_q.submit_queue, next,
234                               luringcb_next) {
235             struct io_uring_sqe *sqes = io_uring_get_sqe(&s->ring);
236             if (!sqes) {
237                 break;
238             }
239             /* Prep sqe for submission */
240             *sqes = luringcb->sqeq;
241             QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next);
242         }
243         ret = io_uring_submit(&s->ring);
244         trace_luring_io_uring_submit(s, ret);
245         /* Prevent infinite loop if submission is refused */
246         if (ret <= 0) {
247             if (ret == -EAGAIN || ret == -EINTR) {
248                 continue;
249             }
250             break;
251         }
252         s->io_q.in_flight += ret;
253         s->io_q.in_queue  -= ret;
254     }
255     s->io_q.blocked = (s->io_q.in_queue > 0);
256 
257     if (s->io_q.in_flight) {
258         /*
259          * We can try to complete something just right away if there are
260          * still requests in-flight.
261          */
262         luring_process_completions(s);
263     }
264     return ret;
265 }
266 
267 static void luring_process_completions_and_submit(LuringState *s)
268 {
269     luring_process_completions(s);
270 
271     if (s->io_q.in_queue > 0) {
272         ioq_submit(s);
273     }
274 }
275 
276 static void qemu_luring_completion_bh(void *opaque)
277 {
278     LuringState *s = opaque;
279     luring_process_completions_and_submit(s);
280 }
281 
282 static void qemu_luring_completion_cb(void *opaque)
283 {
284     LuringState *s = opaque;
285     luring_process_completions_and_submit(s);
286 }
287 
288 static bool qemu_luring_poll_cb(void *opaque)
289 {
290     LuringState *s = opaque;
291 
292     return io_uring_cq_ready(&s->ring);
293 }
294 
295 static void qemu_luring_poll_ready(void *opaque)
296 {
297     LuringState *s = opaque;
298 
299     luring_process_completions_and_submit(s);
300 }
301 
302 static void ioq_init(LuringQueue *io_q)
303 {
304     QSIMPLEQ_INIT(&io_q->submit_queue);
305     io_q->in_queue = 0;
306     io_q->in_flight = 0;
307     io_q->blocked = false;
308 }
309 
310 static void luring_deferred_fn(void *opaque)
311 {
312     LuringState *s = opaque;
313     trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
314                            s->io_q.in_flight);
315     if (!s->io_q.blocked && s->io_q.in_queue > 0) {
316         ioq_submit(s);
317     }
318 }
319 
320 /**
321  * luring_do_submit:
322  * @fd: file descriptor for I/O
323  * @luringcb: AIO control block
324  * @s: AIO state
325  * @offset: offset for request
326  * @type: type of request
327  *
328  * Fetches sqes from ring, adds to pending queue and preps them
329  *
330  */
331 static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
332                             uint64_t offset, int type)
333 {
334     int ret;
335     struct io_uring_sqe *sqes = &luringcb->sqeq;
336 
337     switch (type) {
338     case QEMU_AIO_WRITE:
339         io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
340                              luringcb->qiov->niov, offset);
341         break;
342     case QEMU_AIO_ZONE_APPEND:
343         io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
344                              luringcb->qiov->niov, offset);
345         break;
346     case QEMU_AIO_READ:
347         io_uring_prep_readv(sqes, fd, luringcb->qiov->iov,
348                             luringcb->qiov->niov, offset);
349         break;
350     case QEMU_AIO_FLUSH:
351         io_uring_prep_fsync(sqes, fd, IORING_FSYNC_DATASYNC);
352         break;
353     default:
354         fprintf(stderr, "%s: invalid AIO request type, aborting 0x%x.\n",
355                         __func__, type);
356         abort();
357     }
358     io_uring_sqe_set_data(sqes, luringcb);
359 
360     QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
361     s->io_q.in_queue++;
362     trace_luring_do_submit(s, s->io_q.blocked, s->io_q.in_queue,
363                            s->io_q.in_flight);
364     if (!s->io_q.blocked) {
365         if (s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES) {
366             ret = ioq_submit(s);
367             trace_luring_do_submit_done(s, ret);
368             return ret;
369         }
370 
371         defer_call(luring_deferred_fn, s);
372     }
373     return 0;
374 }
375 
376 int coroutine_fn luring_co_submit(BlockDriverState *bs, int fd, uint64_t offset,
377                                   QEMUIOVector *qiov, int type)
378 {
379     int ret;
380     AioContext *ctx = qemu_get_current_aio_context();
381     LuringState *s = aio_get_linux_io_uring(ctx);
382     LuringAIOCB luringcb = {
383         .co         = qemu_coroutine_self(),
384         .ret        = -EINPROGRESS,
385         .qiov       = qiov,
386         .is_read    = (type == QEMU_AIO_READ),
387     };
388     trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0,
389                            type);
390     ret = luring_do_submit(fd, &luringcb, s, offset, type);
391 
392     if (ret < 0) {
393         return ret;
394     }
395 
396     if (luringcb.ret == -EINPROGRESS) {
397         qemu_coroutine_yield();
398     }
399     return luringcb.ret;
400 }
401 
402 void luring_detach_aio_context(LuringState *s, AioContext *old_context)
403 {
404     aio_set_fd_handler(old_context, s->ring.ring_fd,
405                        NULL, NULL, NULL, NULL, s);
406     qemu_bh_delete(s->completion_bh);
407     s->aio_context = NULL;
408 }
409 
410 void luring_attach_aio_context(LuringState *s, AioContext *new_context)
411 {
412     s->aio_context = new_context;
413     s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
414     aio_set_fd_handler(s->aio_context, s->ring.ring_fd,
415                        qemu_luring_completion_cb, NULL,
416                        qemu_luring_poll_cb, qemu_luring_poll_ready, s);
417 }
418 
419 LuringState *luring_init(Error **errp)
420 {
421     int rc;
422     LuringState *s = g_new0(LuringState, 1);
423     struct io_uring *ring = &s->ring;
424 
425     trace_luring_init_state(s, sizeof(*s));
426 
427     rc = io_uring_queue_init(MAX_ENTRIES, ring, 0);
428     if (rc < 0) {
429         error_setg_errno(errp, errno, "failed to init linux io_uring ring");
430         g_free(s);
431         return NULL;
432     }
433 
434     ioq_init(&s->io_q);
435     return s;
436 
437 }
438 
439 void luring_cleanup(LuringState *s)
440 {
441     io_uring_queue_exit(&s->ring);
442     trace_luring_cleanup_state(s);
443     g_free(s);
444 }
445