xref: /qemu/block/io.c (revision 7a4e543d)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "block/throttle-groups.h"
31 #include "qemu/error-report.h"
32 
33 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
34 
35 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
36         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
37         BlockCompletionFunc *cb, void *opaque);
38 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
39         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
40         BlockCompletionFunc *cb, void *opaque);
41 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
42                                          int64_t sector_num, int nb_sectors,
43                                          QEMUIOVector *iov);
44 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
45                                          int64_t sector_num, int nb_sectors,
46                                          QEMUIOVector *iov);
47 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
48     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
49     BdrvRequestFlags flags);
50 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
51     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
52     BdrvRequestFlags flags);
53 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
54                                          int64_t sector_num,
55                                          QEMUIOVector *qiov,
56                                          int nb_sectors,
57                                          BdrvRequestFlags flags,
58                                          BlockCompletionFunc *cb,
59                                          void *opaque,
60                                          bool is_write);
61 static void coroutine_fn bdrv_co_do_rw(void *opaque);
62 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
63     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
64 
65 /* throttling disk I/O limits */
66 void bdrv_set_io_limits(BlockDriverState *bs,
67                         ThrottleConfig *cfg)
68 {
69     int i;
70 
71     throttle_group_config(bs, cfg);
72 
73     for (i = 0; i < 2; i++) {
74         qemu_co_enter_next(&bs->throttled_reqs[i]);
75     }
76 }
77 
78 /* this function drain all the throttled IOs */
79 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
80 {
81     bool drained = false;
82     bool enabled = bs->io_limits_enabled;
83     int i;
84 
85     bs->io_limits_enabled = false;
86 
87     for (i = 0; i < 2; i++) {
88         while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
89             drained = true;
90         }
91     }
92 
93     bs->io_limits_enabled = enabled;
94 
95     return drained;
96 }
97 
98 void bdrv_io_limits_disable(BlockDriverState *bs)
99 {
100     bs->io_limits_enabled = false;
101     bdrv_start_throttled_reqs(bs);
102     throttle_group_unregister_bs(bs);
103 }
104 
105 /* should be called before bdrv_set_io_limits if a limit is set */
106 void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
107 {
108     assert(!bs->io_limits_enabled);
109     throttle_group_register_bs(bs, group);
110     bs->io_limits_enabled = true;
111 }
112 
113 void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
114 {
115     /* this bs is not part of any group */
116     if (!bs->throttle_state) {
117         return;
118     }
119 
120     /* this bs is a part of the same group than the one we want */
121     if (!g_strcmp0(throttle_group_get_name(bs), group)) {
122         return;
123     }
124 
125     /* need to change the group this bs belong to */
126     bdrv_io_limits_disable(bs);
127     bdrv_io_limits_enable(bs, group);
128 }
129 
130 void bdrv_setup_io_funcs(BlockDriver *bdrv)
131 {
132     /* Block drivers without coroutine functions need emulation */
133     if (!bdrv->bdrv_co_readv) {
134         bdrv->bdrv_co_readv = bdrv_co_readv_em;
135         bdrv->bdrv_co_writev = bdrv_co_writev_em;
136 
137         /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
138          * the block driver lacks aio we need to emulate that too.
139          */
140         if (!bdrv->bdrv_aio_readv) {
141             /* add AIO emulation layer */
142             bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
143             bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
144         }
145     }
146 }
147 
148 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
149 {
150     BlockDriver *drv = bs->drv;
151     Error *local_err = NULL;
152 
153     memset(&bs->bl, 0, sizeof(bs->bl));
154 
155     if (!drv) {
156         return;
157     }
158 
159     /* Take some limits from the children as a default */
160     if (bs->file) {
161         bdrv_refresh_limits(bs->file->bs, &local_err);
162         if (local_err) {
163             error_propagate(errp, local_err);
164             return;
165         }
166         bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
167         bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
168         bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
169         bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
170         bs->bl.max_iov = bs->file->bs->bl.max_iov;
171     } else {
172         bs->bl.min_mem_alignment = 512;
173         bs->bl.opt_mem_alignment = getpagesize();
174 
175         /* Safe default since most protocols use readv()/writev()/etc */
176         bs->bl.max_iov = IOV_MAX;
177     }
178 
179     if (bs->backing) {
180         bdrv_refresh_limits(bs->backing->bs, &local_err);
181         if (local_err) {
182             error_propagate(errp, local_err);
183             return;
184         }
185         bs->bl.opt_transfer_length =
186             MAX(bs->bl.opt_transfer_length,
187                 bs->backing->bs->bl.opt_transfer_length);
188         bs->bl.max_transfer_length =
189             MIN_NON_ZERO(bs->bl.max_transfer_length,
190                          bs->backing->bs->bl.max_transfer_length);
191         bs->bl.opt_mem_alignment =
192             MAX(bs->bl.opt_mem_alignment,
193                 bs->backing->bs->bl.opt_mem_alignment);
194         bs->bl.min_mem_alignment =
195             MAX(bs->bl.min_mem_alignment,
196                 bs->backing->bs->bl.min_mem_alignment);
197         bs->bl.max_iov =
198             MIN(bs->bl.max_iov,
199                 bs->backing->bs->bl.max_iov);
200     }
201 
202     /* Then let the driver override it */
203     if (drv->bdrv_refresh_limits) {
204         drv->bdrv_refresh_limits(bs, errp);
205     }
206 }
207 
208 /**
209  * The copy-on-read flag is actually a reference count so multiple users may
210  * use the feature without worrying about clobbering its previous state.
211  * Copy-on-read stays enabled until all users have called to disable it.
212  */
213 void bdrv_enable_copy_on_read(BlockDriverState *bs)
214 {
215     bs->copy_on_read++;
216 }
217 
218 void bdrv_disable_copy_on_read(BlockDriverState *bs)
219 {
220     assert(bs->copy_on_read > 0);
221     bs->copy_on_read--;
222 }
223 
224 /* Check if any requests are in-flight (including throttled requests) */
225 bool bdrv_requests_pending(BlockDriverState *bs)
226 {
227     BdrvChild *child;
228 
229     if (!QLIST_EMPTY(&bs->tracked_requests)) {
230         return true;
231     }
232     if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
233         return true;
234     }
235     if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
236         return true;
237     }
238 
239     QLIST_FOREACH(child, &bs->children, next) {
240         if (bdrv_requests_pending(child->bs)) {
241             return true;
242         }
243     }
244 
245     return false;
246 }
247 
248 static void bdrv_drain_recurse(BlockDriverState *bs)
249 {
250     BdrvChild *child;
251 
252     if (bs->drv && bs->drv->bdrv_drain) {
253         bs->drv->bdrv_drain(bs);
254     }
255     QLIST_FOREACH(child, &bs->children, next) {
256         bdrv_drain_recurse(child->bs);
257     }
258 }
259 
260 /*
261  * Wait for pending requests to complete on a single BlockDriverState subtree,
262  * and suspend block driver's internal I/O until next request arrives.
263  *
264  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
265  * AioContext.
266  *
267  * Only this BlockDriverState's AioContext is run, so in-flight requests must
268  * not depend on events in other AioContexts.  In that case, use
269  * bdrv_drain_all() instead.
270  */
271 void bdrv_drain(BlockDriverState *bs)
272 {
273     bool busy = true;
274 
275     bdrv_drain_recurse(bs);
276     while (busy) {
277         /* Keep iterating */
278          bdrv_flush_io_queue(bs);
279          busy = bdrv_requests_pending(bs);
280          busy |= aio_poll(bdrv_get_aio_context(bs), busy);
281     }
282 }
283 
284 /*
285  * Wait for pending requests to complete across all BlockDriverStates
286  *
287  * This function does not flush data to disk, use bdrv_flush_all() for that
288  * after calling this function.
289  */
290 void bdrv_drain_all(void)
291 {
292     /* Always run first iteration so any pending completion BHs run */
293     bool busy = true;
294     BlockDriverState *bs = NULL;
295     GSList *aio_ctxs = NULL, *ctx;
296 
297     while ((bs = bdrv_next(bs))) {
298         AioContext *aio_context = bdrv_get_aio_context(bs);
299 
300         aio_context_acquire(aio_context);
301         if (bs->job) {
302             block_job_pause(bs->job);
303         }
304         bdrv_drain_recurse(bs);
305         aio_context_release(aio_context);
306 
307         if (!g_slist_find(aio_ctxs, aio_context)) {
308             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
309         }
310     }
311 
312     /* Note that completion of an asynchronous I/O operation can trigger any
313      * number of other I/O operations on other devices---for example a
314      * coroutine can submit an I/O request to another device in response to
315      * request completion.  Therefore we must keep looping until there was no
316      * more activity rather than simply draining each device independently.
317      */
318     while (busy) {
319         busy = false;
320 
321         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
322             AioContext *aio_context = ctx->data;
323             bs = NULL;
324 
325             aio_context_acquire(aio_context);
326             while ((bs = bdrv_next(bs))) {
327                 if (aio_context == bdrv_get_aio_context(bs)) {
328                     bdrv_flush_io_queue(bs);
329                     if (bdrv_requests_pending(bs)) {
330                         busy = true;
331                         aio_poll(aio_context, busy);
332                     }
333                 }
334             }
335             busy |= aio_poll(aio_context, false);
336             aio_context_release(aio_context);
337         }
338     }
339 
340     bs = NULL;
341     while ((bs = bdrv_next(bs))) {
342         AioContext *aio_context = bdrv_get_aio_context(bs);
343 
344         aio_context_acquire(aio_context);
345         if (bs->job) {
346             block_job_resume(bs->job);
347         }
348         aio_context_release(aio_context);
349     }
350     g_slist_free(aio_ctxs);
351 }
352 
353 /**
354  * Remove an active request from the tracked requests list
355  *
356  * This function should be called when a tracked request is completing.
357  */
358 static void tracked_request_end(BdrvTrackedRequest *req)
359 {
360     if (req->serialising) {
361         req->bs->serialising_in_flight--;
362     }
363 
364     QLIST_REMOVE(req, list);
365     qemu_co_queue_restart_all(&req->wait_queue);
366 }
367 
368 /**
369  * Add an active request to the tracked requests list
370  */
371 static void tracked_request_begin(BdrvTrackedRequest *req,
372                                   BlockDriverState *bs,
373                                   int64_t offset,
374                                   unsigned int bytes,
375                                   enum BdrvTrackedRequestType type)
376 {
377     *req = (BdrvTrackedRequest){
378         .bs = bs,
379         .offset         = offset,
380         .bytes          = bytes,
381         .type           = type,
382         .co             = qemu_coroutine_self(),
383         .serialising    = false,
384         .overlap_offset = offset,
385         .overlap_bytes  = bytes,
386     };
387 
388     qemu_co_queue_init(&req->wait_queue);
389 
390     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
391 }
392 
393 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
394 {
395     int64_t overlap_offset = req->offset & ~(align - 1);
396     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
397                                - overlap_offset;
398 
399     if (!req->serialising) {
400         req->bs->serialising_in_flight++;
401         req->serialising = true;
402     }
403 
404     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
405     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
406 }
407 
408 /**
409  * Round a region to cluster boundaries
410  */
411 void bdrv_round_to_clusters(BlockDriverState *bs,
412                             int64_t sector_num, int nb_sectors,
413                             int64_t *cluster_sector_num,
414                             int *cluster_nb_sectors)
415 {
416     BlockDriverInfo bdi;
417 
418     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
419         *cluster_sector_num = sector_num;
420         *cluster_nb_sectors = nb_sectors;
421     } else {
422         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
423         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
424         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
425                                             nb_sectors, c);
426     }
427 }
428 
429 static int bdrv_get_cluster_size(BlockDriverState *bs)
430 {
431     BlockDriverInfo bdi;
432     int ret;
433 
434     ret = bdrv_get_info(bs, &bdi);
435     if (ret < 0 || bdi.cluster_size == 0) {
436         return bs->request_alignment;
437     } else {
438         return bdi.cluster_size;
439     }
440 }
441 
442 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
443                                      int64_t offset, unsigned int bytes)
444 {
445     /*        aaaa   bbbb */
446     if (offset >= req->overlap_offset + req->overlap_bytes) {
447         return false;
448     }
449     /* bbbb   aaaa        */
450     if (req->overlap_offset >= offset + bytes) {
451         return false;
452     }
453     return true;
454 }
455 
456 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
457 {
458     BlockDriverState *bs = self->bs;
459     BdrvTrackedRequest *req;
460     bool retry;
461     bool waited = false;
462 
463     if (!bs->serialising_in_flight) {
464         return false;
465     }
466 
467     do {
468         retry = false;
469         QLIST_FOREACH(req, &bs->tracked_requests, list) {
470             if (req == self || (!req->serialising && !self->serialising)) {
471                 continue;
472             }
473             if (tracked_request_overlaps(req, self->overlap_offset,
474                                          self->overlap_bytes))
475             {
476                 /* Hitting this means there was a reentrant request, for
477                  * example, a block driver issuing nested requests.  This must
478                  * never happen since it means deadlock.
479                  */
480                 assert(qemu_coroutine_self() != req->co);
481 
482                 /* If the request is already (indirectly) waiting for us, or
483                  * will wait for us as soon as it wakes up, then just go on
484                  * (instead of producing a deadlock in the former case). */
485                 if (!req->waiting_for) {
486                     self->waiting_for = req;
487                     qemu_co_queue_wait(&req->wait_queue);
488                     self->waiting_for = NULL;
489                     retry = true;
490                     waited = true;
491                     break;
492                 }
493             }
494         }
495     } while (retry);
496 
497     return waited;
498 }
499 
500 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
501                                    size_t size)
502 {
503     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
504         return -EIO;
505     }
506 
507     if (!bdrv_is_inserted(bs)) {
508         return -ENOMEDIUM;
509     }
510 
511     if (offset < 0) {
512         return -EIO;
513     }
514 
515     return 0;
516 }
517 
518 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
519                               int nb_sectors)
520 {
521     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
522         return -EIO;
523     }
524 
525     return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
526                                    nb_sectors * BDRV_SECTOR_SIZE);
527 }
528 
529 typedef struct RwCo {
530     BlockDriverState *bs;
531     int64_t offset;
532     QEMUIOVector *qiov;
533     bool is_write;
534     int ret;
535     BdrvRequestFlags flags;
536 } RwCo;
537 
538 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
539 {
540     RwCo *rwco = opaque;
541 
542     if (!rwco->is_write) {
543         rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
544                                       rwco->qiov->size, rwco->qiov,
545                                       rwco->flags);
546     } else {
547         rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
548                                        rwco->qiov->size, rwco->qiov,
549                                        rwco->flags);
550     }
551 }
552 
553 /*
554  * Process a vectored synchronous request using coroutines
555  */
556 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
557                         QEMUIOVector *qiov, bool is_write,
558                         BdrvRequestFlags flags)
559 {
560     Coroutine *co;
561     RwCo rwco = {
562         .bs = bs,
563         .offset = offset,
564         .qiov = qiov,
565         .is_write = is_write,
566         .ret = NOT_DONE,
567         .flags = flags,
568     };
569 
570     /**
571      * In sync call context, when the vcpu is blocked, this throttling timer
572      * will not fire; so the I/O throttling function has to be disabled here
573      * if it has been enabled.
574      */
575     if (bs->io_limits_enabled) {
576         fprintf(stderr, "Disabling I/O throttling on '%s' due "
577                         "to synchronous I/O.\n", bdrv_get_device_name(bs));
578         bdrv_io_limits_disable(bs);
579     }
580 
581     if (qemu_in_coroutine()) {
582         /* Fast-path if already in coroutine context */
583         bdrv_rw_co_entry(&rwco);
584     } else {
585         AioContext *aio_context = bdrv_get_aio_context(bs);
586 
587         co = qemu_coroutine_create(bdrv_rw_co_entry);
588         qemu_coroutine_enter(co, &rwco);
589         while (rwco.ret == NOT_DONE) {
590             aio_poll(aio_context, true);
591         }
592     }
593     return rwco.ret;
594 }
595 
596 /*
597  * Process a synchronous request using coroutines
598  */
599 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
600                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
601 {
602     QEMUIOVector qiov;
603     struct iovec iov = {
604         .iov_base = (void *)buf,
605         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
606     };
607 
608     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
609         return -EINVAL;
610     }
611 
612     qemu_iovec_init_external(&qiov, &iov, 1);
613     return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
614                         &qiov, is_write, flags);
615 }
616 
617 /* return < 0 if error. See bdrv_write() for the return codes */
618 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
619               uint8_t *buf, int nb_sectors)
620 {
621     return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
622 }
623 
624 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
625 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
626                           uint8_t *buf, int nb_sectors)
627 {
628     bool enabled;
629     int ret;
630 
631     enabled = bs->io_limits_enabled;
632     bs->io_limits_enabled = false;
633     ret = bdrv_read(bs, sector_num, buf, nb_sectors);
634     bs->io_limits_enabled = enabled;
635     return ret;
636 }
637 
638 /* Return < 0 if error. Important errors are:
639   -EIO         generic I/O error (may happen for all errors)
640   -ENOMEDIUM   No media inserted.
641   -EINVAL      Invalid sector number or nb_sectors
642   -EACCES      Trying to write a read-only device
643 */
644 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
645                const uint8_t *buf, int nb_sectors)
646 {
647     return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
648 }
649 
650 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
651                       int nb_sectors, BdrvRequestFlags flags)
652 {
653     return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
654                       BDRV_REQ_ZERO_WRITE | flags);
655 }
656 
657 /*
658  * Completely zero out a block device with the help of bdrv_write_zeroes.
659  * The operation is sped up by checking the block status and only writing
660  * zeroes to the device if they currently do not return zeroes. Optional
661  * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
662  *
663  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
664  */
665 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
666 {
667     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
668     BlockDriverState *file;
669     int n;
670 
671     target_sectors = bdrv_nb_sectors(bs);
672     if (target_sectors < 0) {
673         return target_sectors;
674     }
675 
676     for (;;) {
677         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
678         if (nb_sectors <= 0) {
679             return 0;
680         }
681         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
682         if (ret < 0) {
683             error_report("error getting block status at sector %" PRId64 ": %s",
684                          sector_num, strerror(-ret));
685             return ret;
686         }
687         if (ret & BDRV_BLOCK_ZERO) {
688             sector_num += n;
689             continue;
690         }
691         ret = bdrv_write_zeroes(bs, sector_num, n, flags);
692         if (ret < 0) {
693             error_report("error writing zeroes at sector %" PRId64 ": %s",
694                          sector_num, strerror(-ret));
695             return ret;
696         }
697         sector_num += n;
698     }
699 }
700 
701 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
702 {
703     QEMUIOVector qiov;
704     struct iovec iov = {
705         .iov_base = (void *)buf,
706         .iov_len = bytes,
707     };
708     int ret;
709 
710     if (bytes < 0) {
711         return -EINVAL;
712     }
713 
714     qemu_iovec_init_external(&qiov, &iov, 1);
715     ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
716     if (ret < 0) {
717         return ret;
718     }
719 
720     return bytes;
721 }
722 
723 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
724 {
725     int ret;
726 
727     ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
728     if (ret < 0) {
729         return ret;
730     }
731 
732     return qiov->size;
733 }
734 
735 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
736                 const void *buf, int bytes)
737 {
738     QEMUIOVector qiov;
739     struct iovec iov = {
740         .iov_base   = (void *) buf,
741         .iov_len    = bytes,
742     };
743 
744     if (bytes < 0) {
745         return -EINVAL;
746     }
747 
748     qemu_iovec_init_external(&qiov, &iov, 1);
749     return bdrv_pwritev(bs, offset, &qiov);
750 }
751 
752 /*
753  * Writes to the file and ensures that no writes are reordered across this
754  * request (acts as a barrier)
755  *
756  * Returns 0 on success, -errno in error cases.
757  */
758 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
759     const void *buf, int count)
760 {
761     int ret;
762 
763     ret = bdrv_pwrite(bs, offset, buf, count);
764     if (ret < 0) {
765         return ret;
766     }
767 
768     /* No flush needed for cache modes that already do it */
769     if (bs->enable_write_cache) {
770         bdrv_flush(bs);
771     }
772 
773     return 0;
774 }
775 
776 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
777         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
778 {
779     /* Perform I/O through a temporary buffer so that users who scribble over
780      * their read buffer while the operation is in progress do not end up
781      * modifying the image file.  This is critical for zero-copy guest I/O
782      * where anything might happen inside guest memory.
783      */
784     void *bounce_buffer;
785 
786     BlockDriver *drv = bs->drv;
787     struct iovec iov;
788     QEMUIOVector bounce_qiov;
789     int64_t cluster_sector_num;
790     int cluster_nb_sectors;
791     size_t skip_bytes;
792     int ret;
793 
794     /* Cover entire cluster so no additional backing file I/O is required when
795      * allocating cluster in the image file.
796      */
797     bdrv_round_to_clusters(bs, sector_num, nb_sectors,
798                            &cluster_sector_num, &cluster_nb_sectors);
799 
800     trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
801                                    cluster_sector_num, cluster_nb_sectors);
802 
803     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
804     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
805     if (bounce_buffer == NULL) {
806         ret = -ENOMEM;
807         goto err;
808     }
809 
810     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
811 
812     ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
813                              &bounce_qiov);
814     if (ret < 0) {
815         goto err;
816     }
817 
818     if (drv->bdrv_co_write_zeroes &&
819         buffer_is_zero(bounce_buffer, iov.iov_len)) {
820         ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
821                                       cluster_nb_sectors, 0);
822     } else {
823         /* This does not change the data on the disk, it is not necessary
824          * to flush even in cache=writethrough mode.
825          */
826         ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
827                                   &bounce_qiov);
828     }
829 
830     if (ret < 0) {
831         /* It might be okay to ignore write errors for guest requests.  If this
832          * is a deliberate copy-on-read then we don't want to ignore the error.
833          * Simply report it in all cases.
834          */
835         goto err;
836     }
837 
838     skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
839     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
840                         nb_sectors * BDRV_SECTOR_SIZE);
841 
842 err:
843     qemu_vfree(bounce_buffer);
844     return ret;
845 }
846 
847 /*
848  * Forwards an already correctly aligned request to the BlockDriver. This
849  * handles copy on read and zeroing after EOF; any other features must be
850  * implemented by the caller.
851  */
852 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
853     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
854     int64_t align, QEMUIOVector *qiov, int flags)
855 {
856     BlockDriver *drv = bs->drv;
857     int ret;
858 
859     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
860     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
861 
862     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
863     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
864     assert(!qiov || bytes == qiov->size);
865 
866     /* Handle Copy on Read and associated serialisation */
867     if (flags & BDRV_REQ_COPY_ON_READ) {
868         /* If we touch the same cluster it counts as an overlap.  This
869          * guarantees that allocating writes will be serialized and not race
870          * with each other for the same cluster.  For example, in copy-on-read
871          * it ensures that the CoR read and write operations are atomic and
872          * guest writes cannot interleave between them. */
873         mark_request_serialising(req, bdrv_get_cluster_size(bs));
874     }
875 
876     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
877         wait_serialising_requests(req);
878     }
879 
880     if (flags & BDRV_REQ_COPY_ON_READ) {
881         int pnum;
882 
883         ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
884         if (ret < 0) {
885             goto out;
886         }
887 
888         if (!ret || pnum != nb_sectors) {
889             ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
890             goto out;
891         }
892     }
893 
894     /* Forward the request to the BlockDriver */
895     if (!bs->zero_beyond_eof) {
896         ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
897     } else {
898         /* Read zeros after EOF */
899         int64_t total_sectors, max_nb_sectors;
900 
901         total_sectors = bdrv_nb_sectors(bs);
902         if (total_sectors < 0) {
903             ret = total_sectors;
904             goto out;
905         }
906 
907         max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
908                                   align >> BDRV_SECTOR_BITS);
909         if (nb_sectors < max_nb_sectors) {
910             ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
911         } else if (max_nb_sectors > 0) {
912             QEMUIOVector local_qiov;
913 
914             qemu_iovec_init(&local_qiov, qiov->niov);
915             qemu_iovec_concat(&local_qiov, qiov, 0,
916                               max_nb_sectors * BDRV_SECTOR_SIZE);
917 
918             ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
919                                      &local_qiov);
920 
921             qemu_iovec_destroy(&local_qiov);
922         } else {
923             ret = 0;
924         }
925 
926         /* Reading beyond end of file is supposed to produce zeroes */
927         if (ret == 0 && total_sectors < sector_num + nb_sectors) {
928             uint64_t offset = MAX(0, total_sectors - sector_num);
929             uint64_t bytes = (sector_num + nb_sectors - offset) *
930                               BDRV_SECTOR_SIZE;
931             qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
932         }
933     }
934 
935 out:
936     return ret;
937 }
938 
939 /*
940  * Handle a read request in coroutine context
941  */
942 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
943     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
944     BdrvRequestFlags flags)
945 {
946     BlockDriver *drv = bs->drv;
947     BdrvTrackedRequest req;
948 
949     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
950     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
951     uint8_t *head_buf = NULL;
952     uint8_t *tail_buf = NULL;
953     QEMUIOVector local_qiov;
954     bool use_local_qiov = false;
955     int ret;
956 
957     if (!drv) {
958         return -ENOMEDIUM;
959     }
960 
961     ret = bdrv_check_byte_request(bs, offset, bytes);
962     if (ret < 0) {
963         return ret;
964     }
965 
966     /* Don't do copy-on-read if we read data before write operation */
967     if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
968         flags |= BDRV_REQ_COPY_ON_READ;
969     }
970 
971     /* throttling disk I/O */
972     if (bs->io_limits_enabled) {
973         throttle_group_co_io_limits_intercept(bs, bytes, false);
974     }
975 
976     /* Align read if necessary by padding qiov */
977     if (offset & (align - 1)) {
978         head_buf = qemu_blockalign(bs, align);
979         qemu_iovec_init(&local_qiov, qiov->niov + 2);
980         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
981         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
982         use_local_qiov = true;
983 
984         bytes += offset & (align - 1);
985         offset = offset & ~(align - 1);
986     }
987 
988     if ((offset + bytes) & (align - 1)) {
989         if (!use_local_qiov) {
990             qemu_iovec_init(&local_qiov, qiov->niov + 1);
991             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
992             use_local_qiov = true;
993         }
994         tail_buf = qemu_blockalign(bs, align);
995         qemu_iovec_add(&local_qiov, tail_buf,
996                        align - ((offset + bytes) & (align - 1)));
997 
998         bytes = ROUND_UP(bytes, align);
999     }
1000 
1001     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1002     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1003                               use_local_qiov ? &local_qiov : qiov,
1004                               flags);
1005     tracked_request_end(&req);
1006 
1007     if (use_local_qiov) {
1008         qemu_iovec_destroy(&local_qiov);
1009         qemu_vfree(head_buf);
1010         qemu_vfree(tail_buf);
1011     }
1012 
1013     return ret;
1014 }
1015 
1016 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1017     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1018     BdrvRequestFlags flags)
1019 {
1020     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1021         return -EINVAL;
1022     }
1023 
1024     return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1025                              nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1026 }
1027 
1028 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1029     int nb_sectors, QEMUIOVector *qiov)
1030 {
1031     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1032 
1033     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1034 }
1035 
1036 int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs,
1037     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1038 {
1039     trace_bdrv_co_readv_no_serialising(bs, sector_num, nb_sectors);
1040 
1041     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1042                             BDRV_REQ_NO_SERIALISING);
1043 }
1044 
1045 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1046     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1047 {
1048     trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1049 
1050     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1051                             BDRV_REQ_COPY_ON_READ);
1052 }
1053 
1054 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1055 
1056 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1057     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1058 {
1059     BlockDriver *drv = bs->drv;
1060     QEMUIOVector qiov;
1061     struct iovec iov = {0};
1062     int ret = 0;
1063 
1064     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1065                                         BDRV_REQUEST_MAX_SECTORS);
1066 
1067     while (nb_sectors > 0 && !ret) {
1068         int num = nb_sectors;
1069 
1070         /* Align request.  Block drivers can expect the "bulk" of the request
1071          * to be aligned.
1072          */
1073         if (bs->bl.write_zeroes_alignment
1074             && num > bs->bl.write_zeroes_alignment) {
1075             if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1076                 /* Make a small request up to the first aligned sector.  */
1077                 num = bs->bl.write_zeroes_alignment;
1078                 num -= sector_num % bs->bl.write_zeroes_alignment;
1079             } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1080                 /* Shorten the request to the last aligned sector.  num cannot
1081                  * underflow because num > bs->bl.write_zeroes_alignment.
1082                  */
1083                 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1084             }
1085         }
1086 
1087         /* limit request size */
1088         if (num > max_write_zeroes) {
1089             num = max_write_zeroes;
1090         }
1091 
1092         ret = -ENOTSUP;
1093         /* First try the efficient write zeroes operation */
1094         if (drv->bdrv_co_write_zeroes) {
1095             ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
1096         }
1097 
1098         if (ret == -ENOTSUP) {
1099             /* Fall back to bounce buffer if write zeroes is unsupported */
1100             int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1101                                             MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1102             num = MIN(num, max_xfer_len);
1103             iov.iov_len = num * BDRV_SECTOR_SIZE;
1104             if (iov.iov_base == NULL) {
1105                 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1106                 if (iov.iov_base == NULL) {
1107                     ret = -ENOMEM;
1108                     goto fail;
1109                 }
1110                 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1111             }
1112             qemu_iovec_init_external(&qiov, &iov, 1);
1113 
1114             ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
1115 
1116             /* Keep bounce buffer around if it is big enough for all
1117              * all future requests.
1118              */
1119             if (num < max_xfer_len) {
1120                 qemu_vfree(iov.iov_base);
1121                 iov.iov_base = NULL;
1122             }
1123         }
1124 
1125         sector_num += num;
1126         nb_sectors -= num;
1127     }
1128 
1129 fail:
1130     qemu_vfree(iov.iov_base);
1131     return ret;
1132 }
1133 
1134 /*
1135  * Forwards an already correctly aligned write request to the BlockDriver.
1136  */
1137 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1138     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1139     QEMUIOVector *qiov, int flags)
1140 {
1141     BlockDriver *drv = bs->drv;
1142     bool waited;
1143     int ret;
1144 
1145     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1146     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1147 
1148     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1149     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1150     assert(!qiov || bytes == qiov->size);
1151 
1152     waited = wait_serialising_requests(req);
1153     assert(!waited || !req->serialising);
1154     assert(req->overlap_offset <= offset);
1155     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1156 
1157     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1158 
1159     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1160         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1161         qemu_iovec_is_zero(qiov)) {
1162         flags |= BDRV_REQ_ZERO_WRITE;
1163         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1164             flags |= BDRV_REQ_MAY_UNMAP;
1165         }
1166     }
1167 
1168     if (ret < 0) {
1169         /* Do nothing, write notifier decided to fail this request */
1170     } else if (flags & BDRV_REQ_ZERO_WRITE) {
1171         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1172         ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1173     } else {
1174         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1175         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1176     }
1177     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1178 
1179     if (ret == 0 && !bs->enable_write_cache) {
1180         ret = bdrv_co_flush(bs);
1181     }
1182 
1183     bdrv_set_dirty(bs, sector_num, nb_sectors);
1184 
1185     if (bs->wr_highest_offset < offset + bytes) {
1186         bs->wr_highest_offset = offset + bytes;
1187     }
1188 
1189     if (ret >= 0) {
1190         bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1191     }
1192 
1193     return ret;
1194 }
1195 
1196 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1197                                                 int64_t offset,
1198                                                 unsigned int bytes,
1199                                                 BdrvRequestFlags flags,
1200                                                 BdrvTrackedRequest *req)
1201 {
1202     uint8_t *buf = NULL;
1203     QEMUIOVector local_qiov;
1204     struct iovec iov;
1205     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1206     unsigned int head_padding_bytes, tail_padding_bytes;
1207     int ret = 0;
1208 
1209     head_padding_bytes = offset & (align - 1);
1210     tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1211 
1212 
1213     assert(flags & BDRV_REQ_ZERO_WRITE);
1214     if (head_padding_bytes || tail_padding_bytes) {
1215         buf = qemu_blockalign(bs, align);
1216         iov = (struct iovec) {
1217             .iov_base   = buf,
1218             .iov_len    = align,
1219         };
1220         qemu_iovec_init_external(&local_qiov, &iov, 1);
1221     }
1222     if (head_padding_bytes) {
1223         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1224 
1225         /* RMW the unaligned part before head. */
1226         mark_request_serialising(req, align);
1227         wait_serialising_requests(req);
1228         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1229         ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1230                                   align, &local_qiov, 0);
1231         if (ret < 0) {
1232             goto fail;
1233         }
1234         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1235 
1236         memset(buf + head_padding_bytes, 0, zero_bytes);
1237         ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1238                                    &local_qiov,
1239                                    flags & ~BDRV_REQ_ZERO_WRITE);
1240         if (ret < 0) {
1241             goto fail;
1242         }
1243         offset += zero_bytes;
1244         bytes -= zero_bytes;
1245     }
1246 
1247     assert(!bytes || (offset & (align - 1)) == 0);
1248     if (bytes >= align) {
1249         /* Write the aligned part in the middle. */
1250         uint64_t aligned_bytes = bytes & ~(align - 1);
1251         ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1252                                    NULL, flags);
1253         if (ret < 0) {
1254             goto fail;
1255         }
1256         bytes -= aligned_bytes;
1257         offset += aligned_bytes;
1258     }
1259 
1260     assert(!bytes || (offset & (align - 1)) == 0);
1261     if (bytes) {
1262         assert(align == tail_padding_bytes + bytes);
1263         /* RMW the unaligned part after tail. */
1264         mark_request_serialising(req, align);
1265         wait_serialising_requests(req);
1266         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1267         ret = bdrv_aligned_preadv(bs, req, offset, align,
1268                                   align, &local_qiov, 0);
1269         if (ret < 0) {
1270             goto fail;
1271         }
1272         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1273 
1274         memset(buf, 0, bytes);
1275         ret = bdrv_aligned_pwritev(bs, req, offset, align,
1276                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1277     }
1278 fail:
1279     qemu_vfree(buf);
1280     return ret;
1281 
1282 }
1283 
1284 /*
1285  * Handle a write request in coroutine context
1286  */
1287 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
1288     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1289     BdrvRequestFlags flags)
1290 {
1291     BdrvTrackedRequest req;
1292     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1293     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1294     uint8_t *head_buf = NULL;
1295     uint8_t *tail_buf = NULL;
1296     QEMUIOVector local_qiov;
1297     bool use_local_qiov = false;
1298     int ret;
1299 
1300     if (!bs->drv) {
1301         return -ENOMEDIUM;
1302     }
1303     if (bs->read_only) {
1304         return -EPERM;
1305     }
1306     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1307 
1308     ret = bdrv_check_byte_request(bs, offset, bytes);
1309     if (ret < 0) {
1310         return ret;
1311     }
1312 
1313     /* throttling disk I/O */
1314     if (bs->io_limits_enabled) {
1315         throttle_group_co_io_limits_intercept(bs, bytes, true);
1316     }
1317 
1318     /*
1319      * Align write if necessary by performing a read-modify-write cycle.
1320      * Pad qiov with the read parts and be sure to have a tracked request not
1321      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1322      */
1323     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1324 
1325     if (!qiov) {
1326         ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1327         goto out;
1328     }
1329 
1330     if (offset & (align - 1)) {
1331         QEMUIOVector head_qiov;
1332         struct iovec head_iov;
1333 
1334         mark_request_serialising(&req, align);
1335         wait_serialising_requests(&req);
1336 
1337         head_buf = qemu_blockalign(bs, align);
1338         head_iov = (struct iovec) {
1339             .iov_base   = head_buf,
1340             .iov_len    = align,
1341         };
1342         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1343 
1344         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1345         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1346                                   align, &head_qiov, 0);
1347         if (ret < 0) {
1348             goto fail;
1349         }
1350         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1351 
1352         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1353         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1354         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1355         use_local_qiov = true;
1356 
1357         bytes += offset & (align - 1);
1358         offset = offset & ~(align - 1);
1359     }
1360 
1361     if ((offset + bytes) & (align - 1)) {
1362         QEMUIOVector tail_qiov;
1363         struct iovec tail_iov;
1364         size_t tail_bytes;
1365         bool waited;
1366 
1367         mark_request_serialising(&req, align);
1368         waited = wait_serialising_requests(&req);
1369         assert(!waited || !use_local_qiov);
1370 
1371         tail_buf = qemu_blockalign(bs, align);
1372         tail_iov = (struct iovec) {
1373             .iov_base   = tail_buf,
1374             .iov_len    = align,
1375         };
1376         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1377 
1378         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1379         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1380                                   align, &tail_qiov, 0);
1381         if (ret < 0) {
1382             goto fail;
1383         }
1384         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1385 
1386         if (!use_local_qiov) {
1387             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1388             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1389             use_local_qiov = true;
1390         }
1391 
1392         tail_bytes = (offset + bytes) & (align - 1);
1393         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1394 
1395         bytes = ROUND_UP(bytes, align);
1396     }
1397 
1398     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1399                                use_local_qiov ? &local_qiov : qiov,
1400                                flags);
1401 
1402 fail:
1403 
1404     if (use_local_qiov) {
1405         qemu_iovec_destroy(&local_qiov);
1406     }
1407     qemu_vfree(head_buf);
1408     qemu_vfree(tail_buf);
1409 out:
1410     tracked_request_end(&req);
1411     return ret;
1412 }
1413 
1414 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1415     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1416     BdrvRequestFlags flags)
1417 {
1418     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1419         return -EINVAL;
1420     }
1421 
1422     return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1423                               nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1424 }
1425 
1426 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1427     int nb_sectors, QEMUIOVector *qiov)
1428 {
1429     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1430 
1431     return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1432 }
1433 
1434 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1435                                       int64_t sector_num, int nb_sectors,
1436                                       BdrvRequestFlags flags)
1437 {
1438     trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1439 
1440     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1441         flags &= ~BDRV_REQ_MAY_UNMAP;
1442     }
1443 
1444     return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1445                              BDRV_REQ_ZERO_WRITE | flags);
1446 }
1447 
1448 int bdrv_flush_all(void)
1449 {
1450     BlockDriverState *bs = NULL;
1451     int result = 0;
1452 
1453     while ((bs = bdrv_next(bs))) {
1454         AioContext *aio_context = bdrv_get_aio_context(bs);
1455         int ret;
1456 
1457         aio_context_acquire(aio_context);
1458         ret = bdrv_flush(bs);
1459         if (ret < 0 && !result) {
1460             result = ret;
1461         }
1462         aio_context_release(aio_context);
1463     }
1464 
1465     return result;
1466 }
1467 
1468 typedef struct BdrvCoGetBlockStatusData {
1469     BlockDriverState *bs;
1470     BlockDriverState *base;
1471     BlockDriverState **file;
1472     int64_t sector_num;
1473     int nb_sectors;
1474     int *pnum;
1475     int64_t ret;
1476     bool done;
1477 } BdrvCoGetBlockStatusData;
1478 
1479 /*
1480  * Returns the allocation status of the specified sectors.
1481  * Drivers not implementing the functionality are assumed to not support
1482  * backing files, hence all their sectors are reported as allocated.
1483  *
1484  * If 'sector_num' is beyond the end of the disk image the return value is 0
1485  * and 'pnum' is set to 0.
1486  *
1487  * 'pnum' is set to the number of sectors (including and immediately following
1488  * the specified sector) that are known to be in the same
1489  * allocated/unallocated state.
1490  *
1491  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1492  * beyond the end of the disk image it will be clamped.
1493  *
1494  * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1495  * points to the BDS which the sector range is allocated in.
1496  */
1497 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1498                                                      int64_t sector_num,
1499                                                      int nb_sectors, int *pnum,
1500                                                      BlockDriverState **file)
1501 {
1502     int64_t total_sectors;
1503     int64_t n;
1504     int64_t ret, ret2;
1505 
1506     total_sectors = bdrv_nb_sectors(bs);
1507     if (total_sectors < 0) {
1508         return total_sectors;
1509     }
1510 
1511     if (sector_num >= total_sectors) {
1512         *pnum = 0;
1513         return 0;
1514     }
1515 
1516     n = total_sectors - sector_num;
1517     if (n < nb_sectors) {
1518         nb_sectors = n;
1519     }
1520 
1521     if (!bs->drv->bdrv_co_get_block_status) {
1522         *pnum = nb_sectors;
1523         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1524         if (bs->drv->protocol_name) {
1525             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1526         }
1527         return ret;
1528     }
1529 
1530     *file = NULL;
1531     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1532                                             file);
1533     if (ret < 0) {
1534         *pnum = 0;
1535         return ret;
1536     }
1537 
1538     if (ret & BDRV_BLOCK_RAW) {
1539         assert(ret & BDRV_BLOCK_OFFSET_VALID);
1540         return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1541                                      *pnum, pnum, file);
1542     }
1543 
1544     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1545         ret |= BDRV_BLOCK_ALLOCATED;
1546     } else {
1547         if (bdrv_unallocated_blocks_are_zero(bs)) {
1548             ret |= BDRV_BLOCK_ZERO;
1549         } else if (bs->backing) {
1550             BlockDriverState *bs2 = bs->backing->bs;
1551             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1552             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1553                 ret |= BDRV_BLOCK_ZERO;
1554             }
1555         }
1556     }
1557 
1558     if (*file && *file != bs &&
1559         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1560         (ret & BDRV_BLOCK_OFFSET_VALID)) {
1561         BlockDriverState *file2;
1562         int file_pnum;
1563 
1564         ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1565                                         *pnum, &file_pnum, &file2);
1566         if (ret2 >= 0) {
1567             /* Ignore errors.  This is just providing extra information, it
1568              * is useful but not necessary.
1569              */
1570             if (!file_pnum) {
1571                 /* !file_pnum indicates an offset at or beyond the EOF; it is
1572                  * perfectly valid for the format block driver to point to such
1573                  * offsets, so catch it and mark everything as zero */
1574                 ret |= BDRV_BLOCK_ZERO;
1575             } else {
1576                 /* Limit request to the range reported by the protocol driver */
1577                 *pnum = file_pnum;
1578                 ret |= (ret2 & BDRV_BLOCK_ZERO);
1579             }
1580         }
1581     }
1582 
1583     return ret;
1584 }
1585 
1586 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1587         BlockDriverState *base,
1588         int64_t sector_num,
1589         int nb_sectors,
1590         int *pnum,
1591         BlockDriverState **file)
1592 {
1593     BlockDriverState *p;
1594     int64_t ret = 0;
1595 
1596     assert(bs != base);
1597     for (p = bs; p != base; p = backing_bs(p)) {
1598         ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1599         if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1600             break;
1601         }
1602         /* [sector_num, pnum] unallocated on this layer, which could be only
1603          * the first part of [sector_num, nb_sectors].  */
1604         nb_sectors = MIN(nb_sectors, *pnum);
1605     }
1606     return ret;
1607 }
1608 
1609 /* Coroutine wrapper for bdrv_get_block_status_above() */
1610 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1611 {
1612     BdrvCoGetBlockStatusData *data = opaque;
1613 
1614     data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1615                                                data->sector_num,
1616                                                data->nb_sectors,
1617                                                data->pnum,
1618                                                data->file);
1619     data->done = true;
1620 }
1621 
1622 /*
1623  * Synchronous wrapper around bdrv_co_get_block_status_above().
1624  *
1625  * See bdrv_co_get_block_status_above() for details.
1626  */
1627 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1628                                     BlockDriverState *base,
1629                                     int64_t sector_num,
1630                                     int nb_sectors, int *pnum,
1631                                     BlockDriverState **file)
1632 {
1633     Coroutine *co;
1634     BdrvCoGetBlockStatusData data = {
1635         .bs = bs,
1636         .base = base,
1637         .file = file,
1638         .sector_num = sector_num,
1639         .nb_sectors = nb_sectors,
1640         .pnum = pnum,
1641         .done = false,
1642     };
1643 
1644     if (qemu_in_coroutine()) {
1645         /* Fast-path if already in coroutine context */
1646         bdrv_get_block_status_above_co_entry(&data);
1647     } else {
1648         AioContext *aio_context = bdrv_get_aio_context(bs);
1649 
1650         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1651         qemu_coroutine_enter(co, &data);
1652         while (!data.done) {
1653             aio_poll(aio_context, true);
1654         }
1655     }
1656     return data.ret;
1657 }
1658 
1659 int64_t bdrv_get_block_status(BlockDriverState *bs,
1660                               int64_t sector_num,
1661                               int nb_sectors, int *pnum,
1662                               BlockDriverState **file)
1663 {
1664     return bdrv_get_block_status_above(bs, backing_bs(bs),
1665                                        sector_num, nb_sectors, pnum, file);
1666 }
1667 
1668 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1669                                    int nb_sectors, int *pnum)
1670 {
1671     BlockDriverState *file;
1672     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1673                                         &file);
1674     if (ret < 0) {
1675         return ret;
1676     }
1677     return !!(ret & BDRV_BLOCK_ALLOCATED);
1678 }
1679 
1680 /*
1681  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1682  *
1683  * Return true if the given sector is allocated in any image between
1684  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
1685  * sector is allocated in any image of the chain.  Return false otherwise.
1686  *
1687  * 'pnum' is set to the number of sectors (including and immediately following
1688  *  the specified sector) that are known to be in the same
1689  *  allocated/unallocated state.
1690  *
1691  */
1692 int bdrv_is_allocated_above(BlockDriverState *top,
1693                             BlockDriverState *base,
1694                             int64_t sector_num,
1695                             int nb_sectors, int *pnum)
1696 {
1697     BlockDriverState *intermediate;
1698     int ret, n = nb_sectors;
1699 
1700     intermediate = top;
1701     while (intermediate && intermediate != base) {
1702         int pnum_inter;
1703         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1704                                 &pnum_inter);
1705         if (ret < 0) {
1706             return ret;
1707         } else if (ret) {
1708             *pnum = pnum_inter;
1709             return 1;
1710         }
1711 
1712         /*
1713          * [sector_num, nb_sectors] is unallocated on top but intermediate
1714          * might have
1715          *
1716          * [sector_num+x, nr_sectors] allocated.
1717          */
1718         if (n > pnum_inter &&
1719             (intermediate == top ||
1720              sector_num + pnum_inter < intermediate->total_sectors)) {
1721             n = pnum_inter;
1722         }
1723 
1724         intermediate = backing_bs(intermediate);
1725     }
1726 
1727     *pnum = n;
1728     return 0;
1729 }
1730 
1731 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1732                           const uint8_t *buf, int nb_sectors)
1733 {
1734     BlockDriver *drv = bs->drv;
1735     int ret;
1736 
1737     if (!drv) {
1738         return -ENOMEDIUM;
1739     }
1740     if (!drv->bdrv_write_compressed) {
1741         return -ENOTSUP;
1742     }
1743     ret = bdrv_check_request(bs, sector_num, nb_sectors);
1744     if (ret < 0) {
1745         return ret;
1746     }
1747 
1748     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1749 
1750     return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1751 }
1752 
1753 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1754                       int64_t pos, int size)
1755 {
1756     QEMUIOVector qiov;
1757     struct iovec iov = {
1758         .iov_base   = (void *) buf,
1759         .iov_len    = size,
1760     };
1761 
1762     qemu_iovec_init_external(&qiov, &iov, 1);
1763     return bdrv_writev_vmstate(bs, &qiov, pos);
1764 }
1765 
1766 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1767 {
1768     BlockDriver *drv = bs->drv;
1769 
1770     if (!drv) {
1771         return -ENOMEDIUM;
1772     } else if (drv->bdrv_save_vmstate) {
1773         return drv->bdrv_save_vmstate(bs, qiov, pos);
1774     } else if (bs->file) {
1775         return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1776     }
1777 
1778     return -ENOTSUP;
1779 }
1780 
1781 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1782                       int64_t pos, int size)
1783 {
1784     BlockDriver *drv = bs->drv;
1785     if (!drv)
1786         return -ENOMEDIUM;
1787     if (drv->bdrv_load_vmstate)
1788         return drv->bdrv_load_vmstate(bs, buf, pos, size);
1789     if (bs->file)
1790         return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1791     return -ENOTSUP;
1792 }
1793 
1794 /**************************************************************/
1795 /* async I/Os */
1796 
1797 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1798                            QEMUIOVector *qiov, int nb_sectors,
1799                            BlockCompletionFunc *cb, void *opaque)
1800 {
1801     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1802 
1803     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1804                                  cb, opaque, false);
1805 }
1806 
1807 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1808                             QEMUIOVector *qiov, int nb_sectors,
1809                             BlockCompletionFunc *cb, void *opaque)
1810 {
1811     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1812 
1813     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1814                                  cb, opaque, true);
1815 }
1816 
1817 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
1818         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
1819         BlockCompletionFunc *cb, void *opaque)
1820 {
1821     trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
1822 
1823     return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
1824                                  BDRV_REQ_ZERO_WRITE | flags,
1825                                  cb, opaque, true);
1826 }
1827 
1828 
1829 typedef struct MultiwriteCB {
1830     int error;
1831     int num_requests;
1832     int num_callbacks;
1833     struct {
1834         BlockCompletionFunc *cb;
1835         void *opaque;
1836         QEMUIOVector *free_qiov;
1837     } callbacks[];
1838 } MultiwriteCB;
1839 
1840 static void multiwrite_user_cb(MultiwriteCB *mcb)
1841 {
1842     int i;
1843 
1844     for (i = 0; i < mcb->num_callbacks; i++) {
1845         mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
1846         if (mcb->callbacks[i].free_qiov) {
1847             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
1848         }
1849         g_free(mcb->callbacks[i].free_qiov);
1850     }
1851 }
1852 
1853 static void multiwrite_cb(void *opaque, int ret)
1854 {
1855     MultiwriteCB *mcb = opaque;
1856 
1857     trace_multiwrite_cb(mcb, ret);
1858 
1859     if (ret < 0 && !mcb->error) {
1860         mcb->error = ret;
1861     }
1862 
1863     mcb->num_requests--;
1864     if (mcb->num_requests == 0) {
1865         multiwrite_user_cb(mcb);
1866         g_free(mcb);
1867     }
1868 }
1869 
1870 static int multiwrite_req_compare(const void *a, const void *b)
1871 {
1872     const BlockRequest *req1 = a, *req2 = b;
1873 
1874     /*
1875      * Note that we can't simply subtract req2->sector from req1->sector
1876      * here as that could overflow the return value.
1877      */
1878     if (req1->sector > req2->sector) {
1879         return 1;
1880     } else if (req1->sector < req2->sector) {
1881         return -1;
1882     } else {
1883         return 0;
1884     }
1885 }
1886 
1887 /*
1888  * Takes a bunch of requests and tries to merge them. Returns the number of
1889  * requests that remain after merging.
1890  */
1891 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
1892     int num_reqs, MultiwriteCB *mcb)
1893 {
1894     int i, outidx;
1895 
1896     // Sort requests by start sector
1897     qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
1898 
1899     // Check if adjacent requests touch the same clusters. If so, combine them,
1900     // filling up gaps with zero sectors.
1901     outidx = 0;
1902     for (i = 1; i < num_reqs; i++) {
1903         int merge = 0;
1904         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
1905 
1906         // Handle exactly sequential writes and overlapping writes.
1907         if (reqs[i].sector <= oldreq_last) {
1908             merge = 1;
1909         }
1910 
1911         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 >
1912             bs->bl.max_iov) {
1913             merge = 0;
1914         }
1915 
1916         if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
1917             reqs[i].nb_sectors > bs->bl.max_transfer_length) {
1918             merge = 0;
1919         }
1920 
1921         if (merge) {
1922             size_t size;
1923             QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
1924             qemu_iovec_init(qiov,
1925                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
1926 
1927             // Add the first request to the merged one. If the requests are
1928             // overlapping, drop the last sectors of the first request.
1929             size = (reqs[i].sector - reqs[outidx].sector) << 9;
1930             qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
1931 
1932             // We should need to add any zeros between the two requests
1933             assert (reqs[i].sector <= oldreq_last);
1934 
1935             // Add the second request
1936             qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
1937 
1938             // Add tail of first request, if necessary
1939             if (qiov->size < reqs[outidx].qiov->size) {
1940                 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
1941                                   reqs[outidx].qiov->size - qiov->size);
1942             }
1943 
1944             reqs[outidx].nb_sectors = qiov->size >> 9;
1945             reqs[outidx].qiov = qiov;
1946 
1947             mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
1948         } else {
1949             outidx++;
1950             reqs[outidx].sector     = reqs[i].sector;
1951             reqs[outidx].nb_sectors = reqs[i].nb_sectors;
1952             reqs[outidx].qiov       = reqs[i].qiov;
1953         }
1954     }
1955 
1956     if (bs->blk) {
1957         block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE,
1958                               num_reqs - outidx - 1);
1959     }
1960 
1961     return outidx + 1;
1962 }
1963 
1964 /*
1965  * Submit multiple AIO write requests at once.
1966  *
1967  * On success, the function returns 0 and all requests in the reqs array have
1968  * been submitted. In error case this function returns -1, and any of the
1969  * requests may or may not be submitted yet. In particular, this means that the
1970  * callback will be called for some of the requests, for others it won't. The
1971  * caller must check the error field of the BlockRequest to wait for the right
1972  * callbacks (if error != 0, no callback will be called).
1973  *
1974  * The implementation may modify the contents of the reqs array, e.g. to merge
1975  * requests. However, the fields opaque and error are left unmodified as they
1976  * are used to signal failure for a single request to the caller.
1977  */
1978 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
1979 {
1980     MultiwriteCB *mcb;
1981     int i;
1982 
1983     /* don't submit writes if we don't have a medium */
1984     if (bs->drv == NULL) {
1985         for (i = 0; i < num_reqs; i++) {
1986             reqs[i].error = -ENOMEDIUM;
1987         }
1988         return -1;
1989     }
1990 
1991     if (num_reqs == 0) {
1992         return 0;
1993     }
1994 
1995     // Create MultiwriteCB structure
1996     mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
1997     mcb->num_requests = 0;
1998     mcb->num_callbacks = num_reqs;
1999 
2000     for (i = 0; i < num_reqs; i++) {
2001         mcb->callbacks[i].cb = reqs[i].cb;
2002         mcb->callbacks[i].opaque = reqs[i].opaque;
2003     }
2004 
2005     // Check for mergable requests
2006     num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2007 
2008     trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2009 
2010     /* Run the aio requests. */
2011     mcb->num_requests = num_reqs;
2012     for (i = 0; i < num_reqs; i++) {
2013         bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
2014                               reqs[i].nb_sectors, reqs[i].flags,
2015                               multiwrite_cb, mcb,
2016                               true);
2017     }
2018 
2019     return 0;
2020 }
2021 
2022 void bdrv_aio_cancel(BlockAIOCB *acb)
2023 {
2024     qemu_aio_ref(acb);
2025     bdrv_aio_cancel_async(acb);
2026     while (acb->refcnt > 1) {
2027         if (acb->aiocb_info->get_aio_context) {
2028             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2029         } else if (acb->bs) {
2030             aio_poll(bdrv_get_aio_context(acb->bs), true);
2031         } else {
2032             abort();
2033         }
2034     }
2035     qemu_aio_unref(acb);
2036 }
2037 
2038 /* Async version of aio cancel. The caller is not blocked if the acb implements
2039  * cancel_async, otherwise we do nothing and let the request normally complete.
2040  * In either case the completion callback must be called. */
2041 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2042 {
2043     if (acb->aiocb_info->cancel_async) {
2044         acb->aiocb_info->cancel_async(acb);
2045     }
2046 }
2047 
2048 /**************************************************************/
2049 /* async block device emulation */
2050 
2051 typedef struct BlockAIOCBSync {
2052     BlockAIOCB common;
2053     QEMUBH *bh;
2054     int ret;
2055     /* vector translation state */
2056     QEMUIOVector *qiov;
2057     uint8_t *bounce;
2058     int is_write;
2059 } BlockAIOCBSync;
2060 
2061 static const AIOCBInfo bdrv_em_aiocb_info = {
2062     .aiocb_size         = sizeof(BlockAIOCBSync),
2063 };
2064 
2065 static void bdrv_aio_bh_cb(void *opaque)
2066 {
2067     BlockAIOCBSync *acb = opaque;
2068 
2069     if (!acb->is_write && acb->ret >= 0) {
2070         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
2071     }
2072     qemu_vfree(acb->bounce);
2073     acb->common.cb(acb->common.opaque, acb->ret);
2074     qemu_bh_delete(acb->bh);
2075     acb->bh = NULL;
2076     qemu_aio_unref(acb);
2077 }
2078 
2079 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2080                                       int64_t sector_num,
2081                                       QEMUIOVector *qiov,
2082                                       int nb_sectors,
2083                                       BlockCompletionFunc *cb,
2084                                       void *opaque,
2085                                       int is_write)
2086 
2087 {
2088     BlockAIOCBSync *acb;
2089 
2090     acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
2091     acb->is_write = is_write;
2092     acb->qiov = qiov;
2093     acb->bounce = qemu_try_blockalign(bs, qiov->size);
2094     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
2095 
2096     if (acb->bounce == NULL) {
2097         acb->ret = -ENOMEM;
2098     } else if (is_write) {
2099         qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
2100         acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2101     } else {
2102         acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2103     }
2104 
2105     qemu_bh_schedule(acb->bh);
2106 
2107     return &acb->common;
2108 }
2109 
2110 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2111         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2112         BlockCompletionFunc *cb, void *opaque)
2113 {
2114     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2115 }
2116 
2117 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2118         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2119         BlockCompletionFunc *cb, void *opaque)
2120 {
2121     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2122 }
2123 
2124 
2125 typedef struct BlockAIOCBCoroutine {
2126     BlockAIOCB common;
2127     BlockRequest req;
2128     bool is_write;
2129     bool need_bh;
2130     bool *done;
2131     QEMUBH* bh;
2132 } BlockAIOCBCoroutine;
2133 
2134 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2135     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
2136 };
2137 
2138 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2139 {
2140     if (!acb->need_bh) {
2141         acb->common.cb(acb->common.opaque, acb->req.error);
2142         qemu_aio_unref(acb);
2143     }
2144 }
2145 
2146 static void bdrv_co_em_bh(void *opaque)
2147 {
2148     BlockAIOCBCoroutine *acb = opaque;
2149 
2150     assert(!acb->need_bh);
2151     qemu_bh_delete(acb->bh);
2152     bdrv_co_complete(acb);
2153 }
2154 
2155 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2156 {
2157     acb->need_bh = false;
2158     if (acb->req.error != -EINPROGRESS) {
2159         BlockDriverState *bs = acb->common.bs;
2160 
2161         acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2162         qemu_bh_schedule(acb->bh);
2163     }
2164 }
2165 
2166 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2167 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2168 {
2169     BlockAIOCBCoroutine *acb = opaque;
2170     BlockDriverState *bs = acb->common.bs;
2171 
2172     if (!acb->is_write) {
2173         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
2174             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2175     } else {
2176         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
2177             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2178     }
2179 
2180     bdrv_co_complete(acb);
2181 }
2182 
2183 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
2184                                          int64_t sector_num,
2185                                          QEMUIOVector *qiov,
2186                                          int nb_sectors,
2187                                          BdrvRequestFlags flags,
2188                                          BlockCompletionFunc *cb,
2189                                          void *opaque,
2190                                          bool is_write)
2191 {
2192     Coroutine *co;
2193     BlockAIOCBCoroutine *acb;
2194 
2195     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2196     acb->need_bh = true;
2197     acb->req.error = -EINPROGRESS;
2198     acb->req.sector = sector_num;
2199     acb->req.nb_sectors = nb_sectors;
2200     acb->req.qiov = qiov;
2201     acb->req.flags = flags;
2202     acb->is_write = is_write;
2203 
2204     co = qemu_coroutine_create(bdrv_co_do_rw);
2205     qemu_coroutine_enter(co, acb);
2206 
2207     bdrv_co_maybe_schedule_bh(acb);
2208     return &acb->common;
2209 }
2210 
2211 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2212 {
2213     BlockAIOCBCoroutine *acb = opaque;
2214     BlockDriverState *bs = acb->common.bs;
2215 
2216     acb->req.error = bdrv_co_flush(bs);
2217     bdrv_co_complete(acb);
2218 }
2219 
2220 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2221         BlockCompletionFunc *cb, void *opaque)
2222 {
2223     trace_bdrv_aio_flush(bs, opaque);
2224 
2225     Coroutine *co;
2226     BlockAIOCBCoroutine *acb;
2227 
2228     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2229     acb->need_bh = true;
2230     acb->req.error = -EINPROGRESS;
2231 
2232     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2233     qemu_coroutine_enter(co, acb);
2234 
2235     bdrv_co_maybe_schedule_bh(acb);
2236     return &acb->common;
2237 }
2238 
2239 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2240 {
2241     BlockAIOCBCoroutine *acb = opaque;
2242     BlockDriverState *bs = acb->common.bs;
2243 
2244     acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2245     bdrv_co_complete(acb);
2246 }
2247 
2248 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2249         int64_t sector_num, int nb_sectors,
2250         BlockCompletionFunc *cb, void *opaque)
2251 {
2252     Coroutine *co;
2253     BlockAIOCBCoroutine *acb;
2254 
2255     trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2256 
2257     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2258     acb->need_bh = true;
2259     acb->req.error = -EINPROGRESS;
2260     acb->req.sector = sector_num;
2261     acb->req.nb_sectors = nb_sectors;
2262     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2263     qemu_coroutine_enter(co, acb);
2264 
2265     bdrv_co_maybe_schedule_bh(acb);
2266     return &acb->common;
2267 }
2268 
2269 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2270                    BlockCompletionFunc *cb, void *opaque)
2271 {
2272     BlockAIOCB *acb;
2273 
2274     acb = g_malloc(aiocb_info->aiocb_size);
2275     acb->aiocb_info = aiocb_info;
2276     acb->bs = bs;
2277     acb->cb = cb;
2278     acb->opaque = opaque;
2279     acb->refcnt = 1;
2280     return acb;
2281 }
2282 
2283 void qemu_aio_ref(void *p)
2284 {
2285     BlockAIOCB *acb = p;
2286     acb->refcnt++;
2287 }
2288 
2289 void qemu_aio_unref(void *p)
2290 {
2291     BlockAIOCB *acb = p;
2292     assert(acb->refcnt > 0);
2293     if (--acb->refcnt == 0) {
2294         g_free(acb);
2295     }
2296 }
2297 
2298 /**************************************************************/
2299 /* Coroutine block device emulation */
2300 
2301 typedef struct CoroutineIOCompletion {
2302     Coroutine *coroutine;
2303     int ret;
2304 } CoroutineIOCompletion;
2305 
2306 static void bdrv_co_io_em_complete(void *opaque, int ret)
2307 {
2308     CoroutineIOCompletion *co = opaque;
2309 
2310     co->ret = ret;
2311     qemu_coroutine_enter(co->coroutine, NULL);
2312 }
2313 
2314 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
2315                                       int nb_sectors, QEMUIOVector *iov,
2316                                       bool is_write)
2317 {
2318     CoroutineIOCompletion co = {
2319         .coroutine = qemu_coroutine_self(),
2320     };
2321     BlockAIOCB *acb;
2322 
2323     if (is_write) {
2324         acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
2325                                        bdrv_co_io_em_complete, &co);
2326     } else {
2327         acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
2328                                       bdrv_co_io_em_complete, &co);
2329     }
2330 
2331     trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
2332     if (!acb) {
2333         return -EIO;
2334     }
2335     qemu_coroutine_yield();
2336 
2337     return co.ret;
2338 }
2339 
2340 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
2341                                          int64_t sector_num, int nb_sectors,
2342                                          QEMUIOVector *iov)
2343 {
2344     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
2345 }
2346 
2347 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
2348                                          int64_t sector_num, int nb_sectors,
2349                                          QEMUIOVector *iov)
2350 {
2351     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
2352 }
2353 
2354 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2355 {
2356     RwCo *rwco = opaque;
2357 
2358     rwco->ret = bdrv_co_flush(rwco->bs);
2359 }
2360 
2361 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2362 {
2363     int ret;
2364     BdrvTrackedRequest req;
2365 
2366     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2367         bdrv_is_sg(bs)) {
2368         return 0;
2369     }
2370 
2371     tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2372     /* Write back cached data to the OS even with cache=unsafe */
2373     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2374     if (bs->drv->bdrv_co_flush_to_os) {
2375         ret = bs->drv->bdrv_co_flush_to_os(bs);
2376         if (ret < 0) {
2377             goto out;
2378         }
2379     }
2380 
2381     /* But don't actually force it to the disk with cache=unsafe */
2382     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2383         goto flush_parent;
2384     }
2385 
2386     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2387     if (bs->drv->bdrv_co_flush_to_disk) {
2388         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2389     } else if (bs->drv->bdrv_aio_flush) {
2390         BlockAIOCB *acb;
2391         CoroutineIOCompletion co = {
2392             .coroutine = qemu_coroutine_self(),
2393         };
2394 
2395         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2396         if (acb == NULL) {
2397             ret = -EIO;
2398         } else {
2399             qemu_coroutine_yield();
2400             ret = co.ret;
2401         }
2402     } else {
2403         /*
2404          * Some block drivers always operate in either writethrough or unsafe
2405          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2406          * know how the server works (because the behaviour is hardcoded or
2407          * depends on server-side configuration), so we can't ensure that
2408          * everything is safe on disk. Returning an error doesn't work because
2409          * that would break guests even if the server operates in writethrough
2410          * mode.
2411          *
2412          * Let's hope the user knows what he's doing.
2413          */
2414         ret = 0;
2415     }
2416     if (ret < 0) {
2417         goto out;
2418     }
2419 
2420     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2421      * in the case of cache=unsafe, so there are no useless flushes.
2422      */
2423 flush_parent:
2424     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2425 out:
2426     tracked_request_end(&req);
2427     return ret;
2428 }
2429 
2430 int bdrv_flush(BlockDriverState *bs)
2431 {
2432     Coroutine *co;
2433     RwCo rwco = {
2434         .bs = bs,
2435         .ret = NOT_DONE,
2436     };
2437 
2438     if (qemu_in_coroutine()) {
2439         /* Fast-path if already in coroutine context */
2440         bdrv_flush_co_entry(&rwco);
2441     } else {
2442         AioContext *aio_context = bdrv_get_aio_context(bs);
2443 
2444         co = qemu_coroutine_create(bdrv_flush_co_entry);
2445         qemu_coroutine_enter(co, &rwco);
2446         while (rwco.ret == NOT_DONE) {
2447             aio_poll(aio_context, true);
2448         }
2449     }
2450 
2451     return rwco.ret;
2452 }
2453 
2454 typedef struct DiscardCo {
2455     BlockDriverState *bs;
2456     int64_t sector_num;
2457     int nb_sectors;
2458     int ret;
2459 } DiscardCo;
2460 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2461 {
2462     DiscardCo *rwco = opaque;
2463 
2464     rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2465 }
2466 
2467 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2468                                  int nb_sectors)
2469 {
2470     BdrvTrackedRequest req;
2471     int max_discard, ret;
2472 
2473     if (!bs->drv) {
2474         return -ENOMEDIUM;
2475     }
2476 
2477     ret = bdrv_check_request(bs, sector_num, nb_sectors);
2478     if (ret < 0) {
2479         return ret;
2480     } else if (bs->read_only) {
2481         return -EPERM;
2482     }
2483     assert(!(bs->open_flags & BDRV_O_INACTIVE));
2484 
2485     /* Do nothing if disabled.  */
2486     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2487         return 0;
2488     }
2489 
2490     if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2491         return 0;
2492     }
2493 
2494     tracked_request_begin(&req, bs, sector_num, nb_sectors,
2495                           BDRV_TRACKED_DISCARD);
2496     bdrv_set_dirty(bs, sector_num, nb_sectors);
2497 
2498     max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2499     while (nb_sectors > 0) {
2500         int ret;
2501         int num = nb_sectors;
2502 
2503         /* align request */
2504         if (bs->bl.discard_alignment &&
2505             num >= bs->bl.discard_alignment &&
2506             sector_num % bs->bl.discard_alignment) {
2507             if (num > bs->bl.discard_alignment) {
2508                 num = bs->bl.discard_alignment;
2509             }
2510             num -= sector_num % bs->bl.discard_alignment;
2511         }
2512 
2513         /* limit request size */
2514         if (num > max_discard) {
2515             num = max_discard;
2516         }
2517 
2518         if (bs->drv->bdrv_co_discard) {
2519             ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2520         } else {
2521             BlockAIOCB *acb;
2522             CoroutineIOCompletion co = {
2523                 .coroutine = qemu_coroutine_self(),
2524             };
2525 
2526             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2527                                             bdrv_co_io_em_complete, &co);
2528             if (acb == NULL) {
2529                 ret = -EIO;
2530                 goto out;
2531             } else {
2532                 qemu_coroutine_yield();
2533                 ret = co.ret;
2534             }
2535         }
2536         if (ret && ret != -ENOTSUP) {
2537             goto out;
2538         }
2539 
2540         sector_num += num;
2541         nb_sectors -= num;
2542     }
2543     ret = 0;
2544 out:
2545     tracked_request_end(&req);
2546     return ret;
2547 }
2548 
2549 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2550 {
2551     Coroutine *co;
2552     DiscardCo rwco = {
2553         .bs = bs,
2554         .sector_num = sector_num,
2555         .nb_sectors = nb_sectors,
2556         .ret = NOT_DONE,
2557     };
2558 
2559     if (qemu_in_coroutine()) {
2560         /* Fast-path if already in coroutine context */
2561         bdrv_discard_co_entry(&rwco);
2562     } else {
2563         AioContext *aio_context = bdrv_get_aio_context(bs);
2564 
2565         co = qemu_coroutine_create(bdrv_discard_co_entry);
2566         qemu_coroutine_enter(co, &rwco);
2567         while (rwco.ret == NOT_DONE) {
2568             aio_poll(aio_context, true);
2569         }
2570     }
2571 
2572     return rwco.ret;
2573 }
2574 
2575 typedef struct {
2576     CoroutineIOCompletion *co;
2577     QEMUBH *bh;
2578 } BdrvIoctlCompletionData;
2579 
2580 static void bdrv_ioctl_bh_cb(void *opaque)
2581 {
2582     BdrvIoctlCompletionData *data = opaque;
2583 
2584     bdrv_co_io_em_complete(data->co, -ENOTSUP);
2585     qemu_bh_delete(data->bh);
2586 }
2587 
2588 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2589 {
2590     BlockDriver *drv = bs->drv;
2591     BdrvTrackedRequest tracked_req;
2592     CoroutineIOCompletion co = {
2593         .coroutine = qemu_coroutine_self(),
2594     };
2595     BlockAIOCB *acb;
2596 
2597     tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2598     if (!drv || !drv->bdrv_aio_ioctl) {
2599         co.ret = -ENOTSUP;
2600         goto out;
2601     }
2602 
2603     acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2604     if (!acb) {
2605         BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
2606         data->bh = aio_bh_new(bdrv_get_aio_context(bs),
2607                                 bdrv_ioctl_bh_cb, data);
2608         data->co = &co;
2609         qemu_bh_schedule(data->bh);
2610     }
2611     qemu_coroutine_yield();
2612 out:
2613     tracked_request_end(&tracked_req);
2614     return co.ret;
2615 }
2616 
2617 typedef struct {
2618     BlockDriverState *bs;
2619     int req;
2620     void *buf;
2621     int ret;
2622 } BdrvIoctlCoData;
2623 
2624 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2625 {
2626     BdrvIoctlCoData *data = opaque;
2627     data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2628 }
2629 
2630 /* needed for generic scsi interface */
2631 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2632 {
2633     BdrvIoctlCoData data = {
2634         .bs = bs,
2635         .req = req,
2636         .buf = buf,
2637         .ret = -EINPROGRESS,
2638     };
2639 
2640     if (qemu_in_coroutine()) {
2641         /* Fast-path if already in coroutine context */
2642         bdrv_co_ioctl_entry(&data);
2643     } else {
2644         Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2645 
2646         qemu_coroutine_enter(co, &data);
2647         while (data.ret == -EINPROGRESS) {
2648             aio_poll(bdrv_get_aio_context(bs), true);
2649         }
2650     }
2651     return data.ret;
2652 }
2653 
2654 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2655 {
2656     BlockAIOCBCoroutine *acb = opaque;
2657     acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2658                                       acb->req.req, acb->req.buf);
2659     bdrv_co_complete(acb);
2660 }
2661 
2662 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2663         unsigned long int req, void *buf,
2664         BlockCompletionFunc *cb, void *opaque)
2665 {
2666     BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2667                                             bs, cb, opaque);
2668     Coroutine *co;
2669 
2670     acb->need_bh = true;
2671     acb->req.error = -EINPROGRESS;
2672     acb->req.req = req;
2673     acb->req.buf = buf;
2674     co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2675     qemu_coroutine_enter(co, acb);
2676 
2677     bdrv_co_maybe_schedule_bh(acb);
2678     return &acb->common;
2679 }
2680 
2681 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2682 {
2683     return qemu_memalign(bdrv_opt_mem_align(bs), size);
2684 }
2685 
2686 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2687 {
2688     return memset(qemu_blockalign(bs, size), 0, size);
2689 }
2690 
2691 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2692 {
2693     size_t align = bdrv_opt_mem_align(bs);
2694 
2695     /* Ensure that NULL is never returned on success */
2696     assert(align > 0);
2697     if (size == 0) {
2698         size = align;
2699     }
2700 
2701     return qemu_try_memalign(align, size);
2702 }
2703 
2704 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2705 {
2706     void *mem = qemu_try_blockalign(bs, size);
2707 
2708     if (mem) {
2709         memset(mem, 0, size);
2710     }
2711 
2712     return mem;
2713 }
2714 
2715 /*
2716  * Check if all memory in this vector is sector aligned.
2717  */
2718 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2719 {
2720     int i;
2721     size_t alignment = bdrv_min_mem_align(bs);
2722 
2723     for (i = 0; i < qiov->niov; i++) {
2724         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2725             return false;
2726         }
2727         if (qiov->iov[i].iov_len % alignment) {
2728             return false;
2729         }
2730     }
2731 
2732     return true;
2733 }
2734 
2735 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2736                                     NotifierWithReturn *notifier)
2737 {
2738     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2739 }
2740 
2741 void bdrv_io_plug(BlockDriverState *bs)
2742 {
2743     BlockDriver *drv = bs->drv;
2744     if (drv && drv->bdrv_io_plug) {
2745         drv->bdrv_io_plug(bs);
2746     } else if (bs->file) {
2747         bdrv_io_plug(bs->file->bs);
2748     }
2749 }
2750 
2751 void bdrv_io_unplug(BlockDriverState *bs)
2752 {
2753     BlockDriver *drv = bs->drv;
2754     if (drv && drv->bdrv_io_unplug) {
2755         drv->bdrv_io_unplug(bs);
2756     } else if (bs->file) {
2757         bdrv_io_unplug(bs->file->bs);
2758     }
2759 }
2760 
2761 void bdrv_flush_io_queue(BlockDriverState *bs)
2762 {
2763     BlockDriver *drv = bs->drv;
2764     if (drv && drv->bdrv_flush_io_queue) {
2765         drv->bdrv_flush_io_queue(bs);
2766     } else if (bs->file) {
2767         bdrv_flush_io_queue(bs->file->bs);
2768     }
2769     bdrv_start_throttled_reqs(bs);
2770 }
2771 
2772 void bdrv_drained_begin(BlockDriverState *bs)
2773 {
2774     if (!bs->quiesce_counter++) {
2775         aio_disable_external(bdrv_get_aio_context(bs));
2776     }
2777     bdrv_drain(bs);
2778 }
2779 
2780 void bdrv_drained_end(BlockDriverState *bs)
2781 {
2782     assert(bs->quiesce_counter > 0);
2783     if (--bs->quiesce_counter > 0) {
2784         return;
2785     }
2786     aio_enable_external(bdrv_get_aio_context(bs));
2787 }
2788