xref: /qemu/block/mirror.c (revision e6f24193)
1893f7ebaSPaolo Bonzini /*
2893f7ebaSPaolo Bonzini  * Image mirroring
3893f7ebaSPaolo Bonzini  *
4893f7ebaSPaolo Bonzini  * Copyright Red Hat, Inc. 2012
5893f7ebaSPaolo Bonzini  *
6893f7ebaSPaolo Bonzini  * Authors:
7893f7ebaSPaolo Bonzini  *  Paolo Bonzini  <pbonzini@redhat.com>
8893f7ebaSPaolo Bonzini  *
9893f7ebaSPaolo Bonzini  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10893f7ebaSPaolo Bonzini  * See the COPYING.LIB file in the top-level directory.
11893f7ebaSPaolo Bonzini  *
12893f7ebaSPaolo Bonzini  */
13893f7ebaSPaolo Bonzini 
1480c71a24SPeter Maydell #include "qemu/osdep.h"
15fd4a6493SKevin Wolf #include "qemu/cutils.h"
16893f7ebaSPaolo Bonzini #include "trace.h"
17c87621eaSJohn Snow #include "block/blockjob_int.h"
18737e150eSPaolo Bonzini #include "block/block_int.h"
19373340b2SMax Reitz #include "sysemu/block-backend.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h"
22893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h"
23b812f671SPaolo Bonzini #include "qemu/bitmap.h"
24893f7ebaSPaolo Bonzini 
25893f7ebaSPaolo Bonzini #define SLICE_TIME    100000000ULL /* ns */
26402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16
27b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
28b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
29402a4741SPaolo Bonzini 
30402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks.
31402a4741SPaolo Bonzini  * Free chunks are organized in a list.
32402a4741SPaolo Bonzini  */
33402a4741SPaolo Bonzini typedef struct MirrorBuffer {
34402a4741SPaolo Bonzini     QSIMPLEQ_ENTRY(MirrorBuffer) next;
35402a4741SPaolo Bonzini } MirrorBuffer;
36893f7ebaSPaolo Bonzini 
37893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob {
38893f7ebaSPaolo Bonzini     BlockJob common;
39893f7ebaSPaolo Bonzini     RateLimit limit;
40e253f4b8SKevin Wolf     BlockBackend *target;
414ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
424ef85a9cSKevin Wolf     BlockDriverState *source;
435bc361b8SFam Zheng     BlockDriverState *base;
444ef85a9cSKevin Wolf 
4509158f00SBenoît Canet     /* The name of the graph node to replace */
4609158f00SBenoît Canet     char *replaces;
4709158f00SBenoît Canet     /* The BDS to replace */
4809158f00SBenoît Canet     BlockDriverState *to_replace;
4909158f00SBenoît Canet     /* Used to block operations on the drive-mirror-replace target */
5009158f00SBenoît Canet     Error *replace_blocker;
5103544a6eSFam Zheng     bool is_none_mode;
52274fcceeSMax Reitz     BlockMirrorBackingMode backing_mode;
53b952b558SPaolo Bonzini     BlockdevOnError on_source_error, on_target_error;
54d63ffd87SPaolo Bonzini     bool synced;
55d63ffd87SPaolo Bonzini     bool should_complete;
56eee13dfeSPaolo Bonzini     int64_t granularity;
57b812f671SPaolo Bonzini     size_t buf_size;
58b21c7652SMax Reitz     int64_t bdev_length;
59b812f671SPaolo Bonzini     unsigned long *cow_bitmap;
60e4654d2dSFam Zheng     BdrvDirtyBitmap *dirty_bitmap;
61dc162c8eSFam Zheng     BdrvDirtyBitmapIter *dbi;
62893f7ebaSPaolo Bonzini     uint8_t *buf;
63402a4741SPaolo Bonzini     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
64402a4741SPaolo Bonzini     int buf_free_count;
65bd48bde8SPaolo Bonzini 
6649efb1f5SDenis V. Lunev     uint64_t last_pause_ns;
67402a4741SPaolo Bonzini     unsigned long *in_flight_bitmap;
68bd48bde8SPaolo Bonzini     int in_flight;
69b436982fSEric Blake     int64_t bytes_in_flight;
70bd48bde8SPaolo Bonzini     int ret;
710fc9f8eaSFam Zheng     bool unmap;
72e424aff5SKevin Wolf     bool waiting_for_io;
73b436982fSEric Blake     int target_cluster_size;
74e5b43573SFam Zheng     int max_iov;
7590ab48ebSAnton Nefedov     bool initial_zeroing_ongoing;
76893f7ebaSPaolo Bonzini } MirrorBlockJob;
77893f7ebaSPaolo Bonzini 
78bd48bde8SPaolo Bonzini typedef struct MirrorOp {
79bd48bde8SPaolo Bonzini     MirrorBlockJob *s;
80bd48bde8SPaolo Bonzini     QEMUIOVector qiov;
81b436982fSEric Blake     int64_t offset;
82b436982fSEric Blake     uint64_t bytes;
83bd48bde8SPaolo Bonzini } MirrorOp;
84bd48bde8SPaolo Bonzini 
85b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
86b952b558SPaolo Bonzini                                             int error)
87b952b558SPaolo Bonzini {
88b952b558SPaolo Bonzini     s->synced = false;
89b952b558SPaolo Bonzini     if (read) {
9081e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_source_error,
9181e254dcSKevin Wolf                                       true, error);
92b952b558SPaolo Bonzini     } else {
9381e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_target_error,
9481e254dcSKevin Wolf                                       false, error);
95b952b558SPaolo Bonzini     }
96b952b558SPaolo Bonzini }
97b952b558SPaolo Bonzini 
98bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret)
99bd48bde8SPaolo Bonzini {
100bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
101402a4741SPaolo Bonzini     struct iovec *iov;
102bd48bde8SPaolo Bonzini     int64_t chunk_num;
103b436982fSEric Blake     int i, nb_chunks;
104bd48bde8SPaolo Bonzini 
105b436982fSEric Blake     trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
106bd48bde8SPaolo Bonzini 
107bd48bde8SPaolo Bonzini     s->in_flight--;
108b436982fSEric Blake     s->bytes_in_flight -= op->bytes;
109402a4741SPaolo Bonzini     iov = op->qiov.iov;
110402a4741SPaolo Bonzini     for (i = 0; i < op->qiov.niov; i++) {
111402a4741SPaolo Bonzini         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
112402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
113402a4741SPaolo Bonzini         s->buf_free_count++;
114402a4741SPaolo Bonzini     }
115402a4741SPaolo Bonzini 
116b436982fSEric Blake     chunk_num = op->offset / s->granularity;
117b436982fSEric Blake     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
118402a4741SPaolo Bonzini     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
119b21c7652SMax Reitz     if (ret >= 0) {
120b21c7652SMax Reitz         if (s->cow_bitmap) {
121bd48bde8SPaolo Bonzini             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
122bd48bde8SPaolo Bonzini         }
12390ab48ebSAnton Nefedov         if (!s->initial_zeroing_ongoing) {
124b436982fSEric Blake             s->common.offset += op->bytes;
125b21c7652SMax Reitz         }
12690ab48ebSAnton Nefedov     }
1276df3bf8eSZhang Min     qemu_iovec_destroy(&op->qiov);
128c84b3192SPaolo Bonzini     g_free(op);
1297b770c72SStefan Hajnoczi 
130e424aff5SKevin Wolf     if (s->waiting_for_io) {
1310b8b8753SPaolo Bonzini         qemu_coroutine_enter(s->common.co);
132bd48bde8SPaolo Bonzini     }
1337b770c72SStefan Hajnoczi }
134bd48bde8SPaolo Bonzini 
135bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret)
136bd48bde8SPaolo Bonzini {
137bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
138bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
139b9e413ddSPaolo Bonzini 
140b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
141bd48bde8SPaolo Bonzini     if (ret < 0) {
142bd48bde8SPaolo Bonzini         BlockErrorAction action;
143bd48bde8SPaolo Bonzini 
144b436982fSEric Blake         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
145b436982fSEric Blake                               op->bytes >> BDRV_SECTOR_BITS);
146bd48bde8SPaolo Bonzini         action = mirror_error_action(s, false, -ret);
147a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
148bd48bde8SPaolo Bonzini             s->ret = ret;
149bd48bde8SPaolo Bonzini         }
150bd48bde8SPaolo Bonzini     }
151bd48bde8SPaolo Bonzini     mirror_iteration_done(op, ret);
152b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
153bd48bde8SPaolo Bonzini }
154bd48bde8SPaolo Bonzini 
155bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret)
156bd48bde8SPaolo Bonzini {
157bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
158bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
159b9e413ddSPaolo Bonzini 
160b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
161bd48bde8SPaolo Bonzini     if (ret < 0) {
162bd48bde8SPaolo Bonzini         BlockErrorAction action;
163bd48bde8SPaolo Bonzini 
164b436982fSEric Blake         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
165b436982fSEric Blake                               op->bytes >> BDRV_SECTOR_BITS);
166bd48bde8SPaolo Bonzini         action = mirror_error_action(s, true, -ret);
167a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168bd48bde8SPaolo Bonzini             s->ret = ret;
169bd48bde8SPaolo Bonzini         }
170bd48bde8SPaolo Bonzini 
171bd48bde8SPaolo Bonzini         mirror_iteration_done(op, ret);
172b9e413ddSPaolo Bonzini     } else {
173b436982fSEric Blake         blk_aio_pwritev(s->target, op->offset, &op->qiov,
17473698c30SEric Blake                         0, mirror_write_complete, op);
175bd48bde8SPaolo Bonzini     }
176b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
177b9e413ddSPaolo Bonzini }
178bd48bde8SPaolo Bonzini 
1794150ae60SFam Zheng static inline void mirror_clip_sectors(MirrorBlockJob *s,
1804150ae60SFam Zheng                                        int64_t sector_num,
1814150ae60SFam Zheng                                        int *nb_sectors)
1824150ae60SFam Zheng {
1834150ae60SFam Zheng     *nb_sectors = MIN(*nb_sectors,
1844150ae60SFam Zheng                       s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
1854150ae60SFam Zheng }
1864150ae60SFam Zheng 
187e5b43573SFam Zheng /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
188e5b43573SFam Zheng  * return the offset of the adjusted tail sector against original. */
189e5b43573SFam Zheng static int mirror_cow_align(MirrorBlockJob *s,
190e5b43573SFam Zheng                             int64_t *sector_num,
191e5b43573SFam Zheng                             int *nb_sectors)
192893f7ebaSPaolo Bonzini {
193e5b43573SFam Zheng     bool need_cow;
194e5b43573SFam Zheng     int ret = 0;
195e5b43573SFam Zheng     int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
196e5b43573SFam Zheng     int64_t align_sector_num = *sector_num;
197e5b43573SFam Zheng     int align_nb_sectors = *nb_sectors;
198e5b43573SFam Zheng     int max_sectors = chunk_sectors * s->max_iov;
199893f7ebaSPaolo Bonzini 
200e5b43573SFam Zheng     need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
201e5b43573SFam Zheng     need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
202e5b43573SFam Zheng                           s->cow_bitmap);
203e5b43573SFam Zheng     if (need_cow) {
204244483e6SKevin Wolf         bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
205244483e6SKevin Wolf                                        *nb_sectors, &align_sector_num,
206244483e6SKevin Wolf                                        &align_nb_sectors);
2078f0720ecSPaolo Bonzini     }
2088f0720ecSPaolo Bonzini 
209e5b43573SFam Zheng     if (align_nb_sectors > max_sectors) {
210e5b43573SFam Zheng         align_nb_sectors = max_sectors;
211e5b43573SFam Zheng         if (need_cow) {
212e5b43573SFam Zheng             align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
213b436982fSEric Blake                                                s->target_cluster_size >>
214b436982fSEric Blake                                                BDRV_SECTOR_BITS);
215e5b43573SFam Zheng         }
216e5b43573SFam Zheng     }
2174150ae60SFam Zheng     /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
2184150ae60SFam Zheng      * that doesn't matter because it's already the end of source image. */
2194150ae60SFam Zheng     mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
220402a4741SPaolo Bonzini 
221e5b43573SFam Zheng     ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
222e5b43573SFam Zheng     *sector_num = align_sector_num;
223e5b43573SFam Zheng     *nb_sectors = align_nb_sectors;
224e5b43573SFam Zheng     assert(ret >= 0);
225e5b43573SFam Zheng     return ret;
226e5b43573SFam Zheng }
227e5b43573SFam Zheng 
22821cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s)
22921cd917fSFam Zheng {
23021cd917fSFam Zheng     assert(!s->waiting_for_io);
23121cd917fSFam Zheng     s->waiting_for_io = true;
23221cd917fSFam Zheng     qemu_coroutine_yield();
23321cd917fSFam Zheng     s->waiting_for_io = false;
23421cd917fSFam Zheng }
23521cd917fSFam Zheng 
236e5b43573SFam Zheng /* Submit async read while handling COW.
23717612955SJohn Snow  * Returns: The number of sectors copied after and including sector_num,
23817612955SJohn Snow  *          excluding any sectors copied prior to sector_num due to alignment.
23917612955SJohn Snow  *          This will be nb_sectors if no alignment is necessary, or
240e5b43573SFam Zheng  *          (new_end - sector_num) if tail is rounded up or down due to
241e5b43573SFam Zheng  *          alignment or buffer limit.
242402a4741SPaolo Bonzini  */
243e5b43573SFam Zheng static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
244e5b43573SFam Zheng                           int nb_sectors)
245e5b43573SFam Zheng {
246e253f4b8SKevin Wolf     BlockBackend *source = s->common.blk;
247e5b43573SFam Zheng     int sectors_per_chunk, nb_chunks;
24817612955SJohn Snow     int ret;
249e5b43573SFam Zheng     MirrorOp *op;
250e4808881SJohn Snow     int max_sectors;
251402a4741SPaolo Bonzini 
252e5b43573SFam Zheng     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
253e4808881SJohn Snow     max_sectors = sectors_per_chunk * s->max_iov;
254e5b43573SFam Zheng 
255e5b43573SFam Zheng     /* We can only handle as much as buf_size at a time. */
256e5b43573SFam Zheng     nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
257e4808881SJohn Snow     nb_sectors = MIN(max_sectors, nb_sectors);
258e5b43573SFam Zheng     assert(nb_sectors);
25917612955SJohn Snow     ret = nb_sectors;
260e5b43573SFam Zheng 
261e5b43573SFam Zheng     if (s->cow_bitmap) {
262e5b43573SFam Zheng         ret += mirror_cow_align(s, &sector_num, &nb_sectors);
263e5b43573SFam Zheng     }
264e5b43573SFam Zheng     assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
265e5b43573SFam Zheng     /* The sector range must meet granularity because:
266e5b43573SFam Zheng      * 1) Caller passes in aligned values;
267e5b43573SFam Zheng      * 2) mirror_cow_align is used only when target cluster is larger. */
268e5b43573SFam Zheng     assert(!(sector_num % sectors_per_chunk));
2694150ae60SFam Zheng     nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
270e5b43573SFam Zheng 
271e5b43573SFam Zheng     while (s->buf_free_count < nb_chunks) {
2725cb1a49eSEric Blake         trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
2735cb1a49eSEric Blake                                      s->in_flight);
27421cd917fSFam Zheng         mirror_wait_for_io(s);
275b812f671SPaolo Bonzini     }
276b812f671SPaolo Bonzini 
277bd48bde8SPaolo Bonzini     /* Allocate a MirrorOp that is used as an AIO callback.  */
278c84b3192SPaolo Bonzini     op = g_new(MirrorOp, 1);
279bd48bde8SPaolo Bonzini     op->s = s;
280b436982fSEric Blake     op->offset = sector_num * BDRV_SECTOR_SIZE;
281b436982fSEric Blake     op->bytes = nb_sectors * BDRV_SECTOR_SIZE;
282402a4741SPaolo Bonzini 
283402a4741SPaolo Bonzini     /* Now make a QEMUIOVector taking enough granularity-sized chunks
284402a4741SPaolo Bonzini      * from s->buf_free.
285402a4741SPaolo Bonzini      */
286402a4741SPaolo Bonzini     qemu_iovec_init(&op->qiov, nb_chunks);
287402a4741SPaolo Bonzini     while (nb_chunks-- > 0) {
288402a4741SPaolo Bonzini         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
289e5b43573SFam Zheng         size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
2905a0f6fd5SKevin Wolf 
291402a4741SPaolo Bonzini         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
292402a4741SPaolo Bonzini         s->buf_free_count--;
2935a0f6fd5SKevin Wolf         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
294402a4741SPaolo Bonzini     }
295402a4741SPaolo Bonzini 
296893f7ebaSPaolo Bonzini     /* Copy the dirty cluster.  */
297bd48bde8SPaolo Bonzini     s->in_flight++;
298b436982fSEric Blake     s->bytes_in_flight += nb_sectors * BDRV_SECTOR_SIZE;
2995cb1a49eSEric Blake     trace_mirror_one_iteration(s, sector_num * BDRV_SECTOR_SIZE,
3005cb1a49eSEric Blake                                nb_sectors * BDRV_SECTOR_SIZE);
301dcfb3bebSFam Zheng 
30273698c30SEric Blake     blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
303bd48bde8SPaolo Bonzini                    mirror_read_complete, op);
304e5b43573SFam Zheng     return ret;
305e5b43573SFam Zheng }
306e5b43573SFam Zheng 
307e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s,
308*e6f24193SEric Blake                                       int64_t offset,
309*e6f24193SEric Blake                                       uint64_t bytes,
310e5b43573SFam Zheng                                       bool is_discard)
311e5b43573SFam Zheng {
312e5b43573SFam Zheng     MirrorOp *op;
313e5b43573SFam Zheng 
314e5b43573SFam Zheng     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
315e5b43573SFam Zheng      * so the freeing in mirror_iteration_done is nop. */
316e5b43573SFam Zheng     op = g_new0(MirrorOp, 1);
317e5b43573SFam Zheng     op->s = s;
318*e6f24193SEric Blake     op->offset = offset;
319*e6f24193SEric Blake     op->bytes = bytes;
320e5b43573SFam Zheng 
321e5b43573SFam Zheng     s->in_flight++;
322*e6f24193SEric Blake     s->bytes_in_flight += bytes;
323e5b43573SFam Zheng     if (is_discard) {
324*e6f24193SEric Blake         blk_aio_pdiscard(s->target, offset,
325b436982fSEric Blake                          op->bytes, mirror_write_complete, op);
326e5b43573SFam Zheng     } else {
327*e6f24193SEric Blake         blk_aio_pwrite_zeroes(s->target, offset,
328b436982fSEric Blake                               op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
329dcfb3bebSFam Zheng                               mirror_write_complete, op);
330e5b43573SFam Zheng     }
331e5b43573SFam Zheng }
332e5b43573SFam Zheng 
333e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
334e5b43573SFam Zheng {
3354ef85a9cSKevin Wolf     BlockDriverState *source = s->source;
3369c83625bSMax Reitz     int64_t sector_num, first_chunk;
337e5b43573SFam Zheng     uint64_t delay_ns = 0;
338e5b43573SFam Zheng     /* At least the first dirty chunk is mirrored in one iteration. */
339e5b43573SFam Zheng     int nb_chunks = 1;
340e5b43573SFam Zheng     int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
341e5b43573SFam Zheng     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
3424b5004d9SDenis V. Lunev     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
343b436982fSEric Blake     int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
344e5b43573SFam Zheng 
345b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
346dc162c8eSFam Zheng     sector_num = bdrv_dirty_iter_next(s->dbi);
347e5b43573SFam Zheng     if (sector_num < 0) {
348dc162c8eSFam Zheng         bdrv_set_dirty_iter(s->dbi, 0);
349dc162c8eSFam Zheng         sector_num = bdrv_dirty_iter_next(s->dbi);
3505cb1a49eSEric Blake         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) *
3515cb1a49eSEric Blake                                   BDRV_SECTOR_SIZE);
352e5b43573SFam Zheng         assert(sector_num >= 0);
353e5b43573SFam Zheng     }
354b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
355e5b43573SFam Zheng 
3569c83625bSMax Reitz     first_chunk = sector_num / sectors_per_chunk;
3579c83625bSMax Reitz     while (test_bit(first_chunk, s->in_flight_bitmap)) {
3585cb1a49eSEric Blake         trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
3595cb1a49eSEric Blake                                      s->in_flight);
3609c83625bSMax Reitz         mirror_wait_for_io(s);
3619c83625bSMax Reitz     }
3629c83625bSMax Reitz 
363565ac01fSStefan Hajnoczi     block_job_pause_point(&s->common);
364565ac01fSStefan Hajnoczi 
365e5b43573SFam Zheng     /* Find the number of consective dirty chunks following the first dirty
366e5b43573SFam Zheng      * one, and wait for in flight requests in them. */
367b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
368e5b43573SFam Zheng     while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
369dc162c8eSFam Zheng         int64_t next_dirty;
370e5b43573SFam Zheng         int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
371e5b43573SFam Zheng         int64_t next_chunk = next_sector / sectors_per_chunk;
372e5b43573SFam Zheng         if (next_sector >= end ||
373b64bd51eSPaolo Bonzini             !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_sector)) {
374e5b43573SFam Zheng             break;
375e5b43573SFam Zheng         }
376e5b43573SFam Zheng         if (test_bit(next_chunk, s->in_flight_bitmap)) {
377e5b43573SFam Zheng             break;
378e5b43573SFam Zheng         }
3799c83625bSMax Reitz 
380dc162c8eSFam Zheng         next_dirty = bdrv_dirty_iter_next(s->dbi);
381dc162c8eSFam Zheng         if (next_dirty > next_sector || next_dirty < 0) {
382f27a2742SMax Reitz             /* The bitmap iterator's cache is stale, refresh it */
383dc162c8eSFam Zheng             bdrv_set_dirty_iter(s->dbi, next_sector);
384dc162c8eSFam Zheng             next_dirty = bdrv_dirty_iter_next(s->dbi);
385f27a2742SMax Reitz         }
386dc162c8eSFam Zheng         assert(next_dirty == next_sector);
387e5b43573SFam Zheng         nb_chunks++;
388e5b43573SFam Zheng     }
389e5b43573SFam Zheng 
390e5b43573SFam Zheng     /* Clear dirty bits before querying the block status, because
391e5b43573SFam Zheng      * calling bdrv_get_block_status_above could yield - if some blocks are
392e5b43573SFam Zheng      * marked dirty in this window, we need to know.
393e5b43573SFam Zheng      */
394b64bd51eSPaolo Bonzini     bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, sector_num,
395e5b43573SFam Zheng                                   nb_chunks * sectors_per_chunk);
396b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
397b64bd51eSPaolo Bonzini 
398e5b43573SFam Zheng     bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
399e5b43573SFam Zheng     while (nb_chunks > 0 && sector_num < end) {
40039c11580SJohn Snow         int64_t ret;
401f3e4ce4aSEric Blake         int io_sectors;
402f3e4ce4aSEric Blake         int64_t io_bytes_acct;
403e5b43573SFam Zheng         BlockDriverState *file;
404e5b43573SFam Zheng         enum MirrorMethod {
405e5b43573SFam Zheng             MIRROR_METHOD_COPY,
406e5b43573SFam Zheng             MIRROR_METHOD_ZERO,
407e5b43573SFam Zheng             MIRROR_METHOD_DISCARD
408e5b43573SFam Zheng         } mirror_method = MIRROR_METHOD_COPY;
409e5b43573SFam Zheng 
410e5b43573SFam Zheng         assert(!(sector_num % sectors_per_chunk));
411e5b43573SFam Zheng         ret = bdrv_get_block_status_above(source, NULL, sector_num,
412e5b43573SFam Zheng                                           nb_chunks * sectors_per_chunk,
413e5b43573SFam Zheng                                           &io_sectors, &file);
414e5b43573SFam Zheng         if (ret < 0) {
415b436982fSEric Blake             io_sectors = MIN(nb_chunks * sectors_per_chunk,
416b436982fSEric Blake                              max_io_bytes >> BDRV_SECTOR_BITS);
4170965a41eSVladimir Sementsov-Ogievskiy         } else if (ret & BDRV_BLOCK_DATA) {
418b436982fSEric Blake             io_sectors = MIN(io_sectors, max_io_bytes >> BDRV_SECTOR_BITS);
419e5b43573SFam Zheng         }
420e5b43573SFam Zheng 
421e5b43573SFam Zheng         io_sectors -= io_sectors % sectors_per_chunk;
422e5b43573SFam Zheng         if (io_sectors < sectors_per_chunk) {
423e5b43573SFam Zheng             io_sectors = sectors_per_chunk;
424e5b43573SFam Zheng         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
425e5b43573SFam Zheng             int64_t target_sector_num;
426e5b43573SFam Zheng             int target_nb_sectors;
427244483e6SKevin Wolf             bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
428244483e6SKevin Wolf                                            io_sectors,  &target_sector_num,
429244483e6SKevin Wolf                                            &target_nb_sectors);
430e5b43573SFam Zheng             if (target_sector_num == sector_num &&
431e5b43573SFam Zheng                 target_nb_sectors == io_sectors) {
432e5b43573SFam Zheng                 mirror_method = ret & BDRV_BLOCK_ZERO ?
433e5b43573SFam Zheng                                     MIRROR_METHOD_ZERO :
434e5b43573SFam Zheng                                     MIRROR_METHOD_DISCARD;
435e5b43573SFam Zheng             }
436e5b43573SFam Zheng         }
437e5b43573SFam Zheng 
438cf56a3c6SDenis V. Lunev         while (s->in_flight >= MAX_IN_FLIGHT) {
4395cb1a49eSEric Blake             trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE,
4405cb1a49eSEric Blake                                          s->in_flight);
441cf56a3c6SDenis V. Lunev             mirror_wait_for_io(s);
442cf56a3c6SDenis V. Lunev         }
443cf56a3c6SDenis V. Lunev 
444dbaa7b57SVladimir Sementsov-Ogievskiy         if (s->ret < 0) {
445dbaa7b57SVladimir Sementsov-Ogievskiy             return 0;
446dbaa7b57SVladimir Sementsov-Ogievskiy         }
447dbaa7b57SVladimir Sementsov-Ogievskiy 
4484150ae60SFam Zheng         mirror_clip_sectors(s, sector_num, &io_sectors);
449e5b43573SFam Zheng         switch (mirror_method) {
450e5b43573SFam Zheng         case MIRROR_METHOD_COPY:
451e5b43573SFam Zheng             io_sectors = mirror_do_read(s, sector_num, io_sectors);
452f3e4ce4aSEric Blake             io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
453e5b43573SFam Zheng             break;
454e5b43573SFam Zheng         case MIRROR_METHOD_ZERO:
455e5b43573SFam Zheng         case MIRROR_METHOD_DISCARD:
456*e6f24193SEric Blake             mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
457*e6f24193SEric Blake                                       io_sectors * BDRV_SECTOR_SIZE,
4584b5004d9SDenis V. Lunev                                       mirror_method == MIRROR_METHOD_DISCARD);
4594b5004d9SDenis V. Lunev             if (write_zeroes_ok) {
460f3e4ce4aSEric Blake                 io_bytes_acct = 0;
4614b5004d9SDenis V. Lunev             } else {
462f3e4ce4aSEric Blake                 io_bytes_acct = io_sectors * BDRV_SECTOR_SIZE;
4634b5004d9SDenis V. Lunev             }
464e5b43573SFam Zheng             break;
465e5b43573SFam Zheng         default:
466e5b43573SFam Zheng             abort();
467e5b43573SFam Zheng         }
468e5b43573SFam Zheng         assert(io_sectors);
469e5b43573SFam Zheng         sector_num += io_sectors;
4704150ae60SFam Zheng         nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
471f14a39ccSSascha Silbe         if (s->common.speed) {
472f3e4ce4aSEric Blake             delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
473f14a39ccSSascha Silbe         }
474dcfb3bebSFam Zheng     }
475cc8c9d6cSPaolo Bonzini     return delay_ns;
476893f7ebaSPaolo Bonzini }
477b952b558SPaolo Bonzini 
478402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s)
479402a4741SPaolo Bonzini {
480402a4741SPaolo Bonzini     int granularity = s->granularity;
481402a4741SPaolo Bonzini     size_t buf_size = s->buf_size;
482402a4741SPaolo Bonzini     uint8_t *buf = s->buf;
483402a4741SPaolo Bonzini 
484402a4741SPaolo Bonzini     assert(s->buf_free_count == 0);
485402a4741SPaolo Bonzini     QSIMPLEQ_INIT(&s->buf_free);
486402a4741SPaolo Bonzini     while (buf_size != 0) {
487402a4741SPaolo Bonzini         MirrorBuffer *cur = (MirrorBuffer *)buf;
488402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
489402a4741SPaolo Bonzini         s->buf_free_count++;
490402a4741SPaolo Bonzini         buf_size -= granularity;
491402a4741SPaolo Bonzini         buf += granularity;
492402a4741SPaolo Bonzini     }
493402a4741SPaolo Bonzini }
494402a4741SPaolo Bonzini 
495bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching
496bae8196dSPaolo Bonzini  * mirror_resume() because mirror_run() will begin iterating again
497bae8196dSPaolo Bonzini  * when the job is resumed.
498bae8196dSPaolo Bonzini  */
499bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s)
500bd48bde8SPaolo Bonzini {
501bd48bde8SPaolo Bonzini     while (s->in_flight > 0) {
50221cd917fSFam Zheng         mirror_wait_for_io(s);
503bd48bde8SPaolo Bonzini     }
504893f7ebaSPaolo Bonzini }
505893f7ebaSPaolo Bonzini 
5065a7e7a0bSStefan Hajnoczi typedef struct {
5075a7e7a0bSStefan Hajnoczi     int ret;
5085a7e7a0bSStefan Hajnoczi } MirrorExitData;
5095a7e7a0bSStefan Hajnoczi 
5105a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque)
5115a7e7a0bSStefan Hajnoczi {
5125a7e7a0bSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5135a7e7a0bSStefan Hajnoczi     MirrorExitData *data = opaque;
5145a7e7a0bSStefan Hajnoczi     AioContext *replace_aio_context = NULL;
5154ef85a9cSKevin Wolf     BlockDriverState *src = s->source;
516e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
5174ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
51812fa4af6SKevin Wolf     Error *local_err = NULL;
5193f09bfbcSKevin Wolf 
5202119882cSPaolo Bonzini     bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
5212119882cSPaolo Bonzini 
5223f09bfbcSKevin Wolf     /* Make sure that the source BDS doesn't go away before we called
5233f09bfbcSKevin Wolf      * block_job_completed(). */
5243f09bfbcSKevin Wolf     bdrv_ref(src);
5254ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
5267d9fcb39SKevin Wolf     bdrv_ref(target_bs);
5277d9fcb39SKevin Wolf 
5287d9fcb39SKevin Wolf     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
5297d9fcb39SKevin Wolf      * inserting target_bs at s->to_replace, where we might not be able to get
53063c8ef28SKevin Wolf      * these permissions.
53163c8ef28SKevin Wolf      *
53263c8ef28SKevin Wolf      * Note that blk_unref() alone doesn't necessarily drop permissions because
53363c8ef28SKevin Wolf      * we might be running nested inside mirror_drain(), which takes an extra
53463c8ef28SKevin Wolf      * reference, so use an explicit blk_set_perm() first. */
53563c8ef28SKevin Wolf     blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
5367d9fcb39SKevin Wolf     blk_unref(s->target);
5377d9fcb39SKevin Wolf     s->target = NULL;
5384ef85a9cSKevin Wolf 
5394ef85a9cSKevin Wolf     /* We don't access the source any more. Dropping any WRITE/RESIZE is
5404ef85a9cSKevin Wolf      * required before it could become a backing file of target_bs. */
5414ef85a9cSKevin Wolf     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
5424ef85a9cSKevin Wolf                             &error_abort);
5434ef85a9cSKevin Wolf     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
5444ef85a9cSKevin Wolf         BlockDriverState *backing = s->is_none_mode ? src : s->base;
5454ef85a9cSKevin Wolf         if (backing_bs(target_bs) != backing) {
54612fa4af6SKevin Wolf             bdrv_set_backing_hd(target_bs, backing, &local_err);
54712fa4af6SKevin Wolf             if (local_err) {
54812fa4af6SKevin Wolf                 error_report_err(local_err);
54912fa4af6SKevin Wolf                 data->ret = -EPERM;
55012fa4af6SKevin Wolf             }
5514ef85a9cSKevin Wolf         }
5524ef85a9cSKevin Wolf     }
5535a7e7a0bSStefan Hajnoczi 
5545a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5555a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
5565a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
5575a7e7a0bSStefan Hajnoczi     }
5585a7e7a0bSStefan Hajnoczi 
5595a7e7a0bSStefan Hajnoczi     if (s->should_complete && data->ret == 0) {
560e253f4b8SKevin Wolf         BlockDriverState *to_replace = src;
5615a7e7a0bSStefan Hajnoczi         if (s->to_replace) {
5625a7e7a0bSStefan Hajnoczi             to_replace = s->to_replace;
5635a7e7a0bSStefan Hajnoczi         }
56440365552SKevin Wolf 
565e253f4b8SKevin Wolf         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
566e253f4b8SKevin Wolf             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
5675a7e7a0bSStefan Hajnoczi         }
568b8804815SKevin Wolf 
569b8804815SKevin Wolf         /* The mirror job has no requests in flight any more, but we need to
570b8804815SKevin Wolf          * drain potential other users of the BDS before changing the graph. */
571e253f4b8SKevin Wolf         bdrv_drained_begin(target_bs);
5725fe31c25SKevin Wolf         bdrv_replace_node(to_replace, target_bs, &local_err);
573e253f4b8SKevin Wolf         bdrv_drained_end(target_bs);
5745fe31c25SKevin Wolf         if (local_err) {
5755fe31c25SKevin Wolf             error_report_err(local_err);
5765fe31c25SKevin Wolf             data->ret = -EPERM;
5775fe31c25SKevin Wolf         }
5785a7e7a0bSStefan Hajnoczi     }
5795a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5805a7e7a0bSStefan Hajnoczi         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
5815a7e7a0bSStefan Hajnoczi         error_free(s->replace_blocker);
5825a7e7a0bSStefan Hajnoczi         bdrv_unref(s->to_replace);
5835a7e7a0bSStefan Hajnoczi     }
5845a7e7a0bSStefan Hajnoczi     if (replace_aio_context) {
5855a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
5865a7e7a0bSStefan Hajnoczi     }
5875a7e7a0bSStefan Hajnoczi     g_free(s->replaces);
5887d9fcb39SKevin Wolf     bdrv_unref(target_bs);
5894ef85a9cSKevin Wolf 
5904ef85a9cSKevin Wolf     /* Remove the mirror filter driver from the graph. Before this, get rid of
5914ef85a9cSKevin Wolf      * the blockers on the intermediate nodes so that the resulting state is
5920bf74767SKevin Wolf      * valid. Also give up permissions on mirror_top_bs->backing, which might
5930bf74767SKevin Wolf      * block the removal. */
5944ef85a9cSKevin Wolf     block_job_remove_all_bdrv(job);
595c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
596c1cef672SFam Zheng                             &error_abort);
5975fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
5984ef85a9cSKevin Wolf 
5994ef85a9cSKevin Wolf     /* We just changed the BDS the job BB refers to (with either or both of the
6005fe31c25SKevin Wolf      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
6015fe31c25SKevin Wolf      * the right thing. We don't need any permissions any more now. */
6024ef85a9cSKevin Wolf     blk_remove_bs(job->blk);
6034ef85a9cSKevin Wolf     blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
6044ef85a9cSKevin Wolf     blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
6054ef85a9cSKevin Wolf 
6065a7e7a0bSStefan Hajnoczi     block_job_completed(&s->common, data->ret);
6074ef85a9cSKevin Wolf 
6085a7e7a0bSStefan Hajnoczi     g_free(data);
609176c3699SFam Zheng     bdrv_drained_end(src);
6104ef85a9cSKevin Wolf     bdrv_unref(mirror_top_bs);
6113f09bfbcSKevin Wolf     bdrv_unref(src);
6125a7e7a0bSStefan Hajnoczi }
6135a7e7a0bSStefan Hajnoczi 
61449efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s)
61549efb1f5SDenis V. Lunev {
61649efb1f5SDenis V. Lunev     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
61749efb1f5SDenis V. Lunev 
61849efb1f5SDenis V. Lunev     if (now - s->last_pause_ns > SLICE_TIME) {
61949efb1f5SDenis V. Lunev         s->last_pause_ns = now;
62049efb1f5SDenis V. Lunev         block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
62149efb1f5SDenis V. Lunev     } else {
62249efb1f5SDenis V. Lunev         block_job_pause_point(&s->common);
62349efb1f5SDenis V. Lunev     }
62449efb1f5SDenis V. Lunev }
62549efb1f5SDenis V. Lunev 
626c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
627c0b363adSDenis V. Lunev {
628c0b363adSDenis V. Lunev     int64_t sector_num, end;
629c0b363adSDenis V. Lunev     BlockDriverState *base = s->base;
6304ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
631c0b363adSDenis V. Lunev     BlockDriverState *target_bs = blk_bs(s->target);
632c0b363adSDenis V. Lunev     int ret, n;
633c0b363adSDenis V. Lunev 
634c0b363adSDenis V. Lunev     end = s->bdev_length / BDRV_SECTOR_SIZE;
635c0b363adSDenis V. Lunev 
636b7d5062cSDenis V. Lunev     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
637c7c2769cSDenis V. Lunev         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
638b7d5062cSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
639b7d5062cSDenis V. Lunev             return 0;
640b7d5062cSDenis V. Lunev         }
641b7d5062cSDenis V. Lunev 
64290ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = true;
643c7c2769cSDenis V. Lunev         for (sector_num = 0; sector_num < end; ) {
644c7c2769cSDenis V. Lunev             int nb_sectors = MIN(end - sector_num,
645c7c2769cSDenis V. Lunev                 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
646c7c2769cSDenis V. Lunev 
647c7c2769cSDenis V. Lunev             mirror_throttle(s);
648c7c2769cSDenis V. Lunev 
649c7c2769cSDenis V. Lunev             if (block_job_is_cancelled(&s->common)) {
65090ab48ebSAnton Nefedov                 s->initial_zeroing_ongoing = false;
651c7c2769cSDenis V. Lunev                 return 0;
652c7c2769cSDenis V. Lunev             }
653c7c2769cSDenis V. Lunev 
654c7c2769cSDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT) {
65567adf4b3SEric Blake                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
65667adf4b3SEric Blake                                    s->in_flight);
657c7c2769cSDenis V. Lunev                 mirror_wait_for_io(s);
658c7c2769cSDenis V. Lunev                 continue;
659c7c2769cSDenis V. Lunev             }
660c7c2769cSDenis V. Lunev 
661*e6f24193SEric Blake             mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
662*e6f24193SEric Blake                                       nb_sectors * BDRV_SECTOR_SIZE, false);
663c7c2769cSDenis V. Lunev             sector_num += nb_sectors;
664c7c2769cSDenis V. Lunev         }
665c7c2769cSDenis V. Lunev 
666bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
66790ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = false;
668c7c2769cSDenis V. Lunev     }
669c7c2769cSDenis V. Lunev 
670c0b363adSDenis V. Lunev     /* First part, loop on the sectors and initialize the dirty bitmap.  */
671c0b363adSDenis V. Lunev     for (sector_num = 0; sector_num < end; ) {
672c0b363adSDenis V. Lunev         /* Just to make sure we are not exceeding int limit. */
673c0b363adSDenis V. Lunev         int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
674c0b363adSDenis V. Lunev                              end - sector_num);
675c0b363adSDenis V. Lunev 
676c0b363adSDenis V. Lunev         mirror_throttle(s);
677c0b363adSDenis V. Lunev 
678c0b363adSDenis V. Lunev         if (block_job_is_cancelled(&s->common)) {
679c0b363adSDenis V. Lunev             return 0;
680c0b363adSDenis V. Lunev         }
681c0b363adSDenis V. Lunev 
682c0b363adSDenis V. Lunev         ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
683c0b363adSDenis V. Lunev         if (ret < 0) {
684c0b363adSDenis V. Lunev             return ret;
685c0b363adSDenis V. Lunev         }
686c0b363adSDenis V. Lunev 
687c0b363adSDenis V. Lunev         assert(n > 0);
688b7d5062cSDenis V. Lunev         if (ret == 1) {
689c0b363adSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
690c0b363adSDenis V. Lunev         }
691c0b363adSDenis V. Lunev         sector_num += n;
692c0b363adSDenis V. Lunev     }
693c0b363adSDenis V. Lunev     return 0;
694c0b363adSDenis V. Lunev }
695c0b363adSDenis V. Lunev 
696bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the
697bdffb31dSPaolo Bonzini  * data to the medium, or just before completing.
698bdffb31dSPaolo Bonzini  */
699bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s)
700bdffb31dSPaolo Bonzini {
701bdffb31dSPaolo Bonzini     int ret = blk_flush(s->target);
702bdffb31dSPaolo Bonzini     if (ret < 0) {
703bdffb31dSPaolo Bonzini         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
704bdffb31dSPaolo Bonzini             s->ret = ret;
705bdffb31dSPaolo Bonzini         }
706bdffb31dSPaolo Bonzini     }
707bdffb31dSPaolo Bonzini     return ret;
708bdffb31dSPaolo Bonzini }
709bdffb31dSPaolo Bonzini 
710893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque)
711893f7ebaSPaolo Bonzini {
712893f7ebaSPaolo Bonzini     MirrorBlockJob *s = opaque;
7135a7e7a0bSStefan Hajnoczi     MirrorExitData *data;
7144ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
715e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
7169a0cec66SPaolo Bonzini     bool need_drain = true;
717c0b363adSDenis V. Lunev     int64_t length;
718b812f671SPaolo Bonzini     BlockDriverInfo bdi;
7191d33936eSJeff Cody     char backing_filename[2]; /* we only need 2 characters because we are only
7201d33936eSJeff Cody                                  checking for a NULL string */
721893f7ebaSPaolo Bonzini     int ret = 0;
722893f7ebaSPaolo Bonzini 
723893f7ebaSPaolo Bonzini     if (block_job_is_cancelled(&s->common)) {
724893f7ebaSPaolo Bonzini         goto immediate_exit;
725893f7ebaSPaolo Bonzini     }
726893f7ebaSPaolo Bonzini 
727b21c7652SMax Reitz     s->bdev_length = bdrv_getlength(bs);
728b21c7652SMax Reitz     if (s->bdev_length < 0) {
729b21c7652SMax Reitz         ret = s->bdev_length;
730373df5b1SFam Zheng         goto immediate_exit;
731becc347eSKevin Wolf     }
732becc347eSKevin Wolf 
733becc347eSKevin Wolf     /* Active commit must resize the base image if its size differs from the
734becc347eSKevin Wolf      * active layer. */
735becc347eSKevin Wolf     if (s->base == blk_bs(s->target)) {
736becc347eSKevin Wolf         int64_t base_length;
737becc347eSKevin Wolf 
738becc347eSKevin Wolf         base_length = blk_getlength(s->target);
739becc347eSKevin Wolf         if (base_length < 0) {
740becc347eSKevin Wolf             ret = base_length;
741becc347eSKevin Wolf             goto immediate_exit;
742becc347eSKevin Wolf         }
743becc347eSKevin Wolf 
744becc347eSKevin Wolf         if (s->bdev_length > base_length) {
745ed3d2ec9SMax Reitz             ret = blk_truncate(s->target, s->bdev_length, NULL);
746becc347eSKevin Wolf             if (ret < 0) {
747becc347eSKevin Wolf                 goto immediate_exit;
748becc347eSKevin Wolf             }
749becc347eSKevin Wolf         }
750becc347eSKevin Wolf     }
751becc347eSKevin Wolf 
752becc347eSKevin Wolf     if (s->bdev_length == 0) {
7539e48b025SFam Zheng         /* Report BLOCK_JOB_READY and wait for complete. */
7549e48b025SFam Zheng         block_job_event_ready(&s->common);
7559e48b025SFam Zheng         s->synced = true;
7569e48b025SFam Zheng         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
7579e48b025SFam Zheng             block_job_yield(&s->common);
7589e48b025SFam Zheng         }
7599e48b025SFam Zheng         s->common.cancelled = false;
7609e48b025SFam Zheng         goto immediate_exit;
761893f7ebaSPaolo Bonzini     }
762893f7ebaSPaolo Bonzini 
763b21c7652SMax Reitz     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
764402a4741SPaolo Bonzini     s->in_flight_bitmap = bitmap_new(length);
765402a4741SPaolo Bonzini 
766b812f671SPaolo Bonzini     /* If we have no backing file yet in the destination, we cannot let
767b812f671SPaolo Bonzini      * the destination do COW.  Instead, we copy sectors around the
768b812f671SPaolo Bonzini      * dirty data if needed.  We need a bitmap to do that.
769b812f671SPaolo Bonzini      */
770e253f4b8SKevin Wolf     bdrv_get_backing_filename(target_bs, backing_filename,
771b812f671SPaolo Bonzini                               sizeof(backing_filename));
772e253f4b8SKevin Wolf     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
773b436982fSEric Blake         s->target_cluster_size = bdi.cluster_size;
774b436982fSEric Blake     } else {
775b436982fSEric Blake         s->target_cluster_size = BDRV_SECTOR_SIZE;
776c3cc95bdSFam Zheng     }
777b436982fSEric Blake     if (backing_filename[0] && !target_bs->backing &&
778b436982fSEric Blake         s->granularity < s->target_cluster_size) {
779b436982fSEric Blake         s->buf_size = MAX(s->buf_size, s->target_cluster_size);
780b812f671SPaolo Bonzini         s->cow_bitmap = bitmap_new(length);
781b812f671SPaolo Bonzini     }
782e253f4b8SKevin Wolf     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
783b812f671SPaolo Bonzini 
7847504edf4SKevin Wolf     s->buf = qemu_try_blockalign(bs, s->buf_size);
7857504edf4SKevin Wolf     if (s->buf == NULL) {
7867504edf4SKevin Wolf         ret = -ENOMEM;
7877504edf4SKevin Wolf         goto immediate_exit;
7887504edf4SKevin Wolf     }
7897504edf4SKevin Wolf 
790402a4741SPaolo Bonzini     mirror_free_init(s);
791893f7ebaSPaolo Bonzini 
79249efb1f5SDenis V. Lunev     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
79303544a6eSFam Zheng     if (!s->is_none_mode) {
794c0b363adSDenis V. Lunev         ret = mirror_dirty_init(s);
795c0b363adSDenis V. Lunev         if (ret < 0 || block_job_is_cancelled(&s->common)) {
7964c0cbd6fSFam Zheng             goto immediate_exit;
7974c0cbd6fSFam Zheng         }
798893f7ebaSPaolo Bonzini     }
799893f7ebaSPaolo Bonzini 
800dc162c8eSFam Zheng     assert(!s->dbi);
801dc162c8eSFam Zheng     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
802893f7ebaSPaolo Bonzini     for (;;) {
803cc8c9d6cSPaolo Bonzini         uint64_t delay_ns = 0;
80449efb1f5SDenis V. Lunev         int64_t cnt, delta;
805893f7ebaSPaolo Bonzini         bool should_complete;
806893f7ebaSPaolo Bonzini 
807bd48bde8SPaolo Bonzini         if (s->ret < 0) {
808bd48bde8SPaolo Bonzini             ret = s->ret;
809893f7ebaSPaolo Bonzini             goto immediate_exit;
810893f7ebaSPaolo Bonzini         }
811bd48bde8SPaolo Bonzini 
812565ac01fSStefan Hajnoczi         block_job_pause_point(&s->common);
813565ac01fSStefan Hajnoczi 
81420dca810SJohn Snow         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
815b21c7652SMax Reitz         /* s->common.offset contains the number of bytes already processed so
816b21c7652SMax Reitz          * far, cnt is the number of dirty sectors remaining and
817b436982fSEric Blake          * s->bytes_in_flight is the number of bytes currently being
818b21c7652SMax Reitz          * processed; together those are the current total operation length */
819b436982fSEric Blake         s->common.len = s->common.offset + s->bytes_in_flight +
820b436982fSEric Blake             cnt * BDRV_SECTOR_SIZE;
821bd48bde8SPaolo Bonzini 
822bd48bde8SPaolo Bonzini         /* Note that even when no rate limit is applied we need to yield
823a7282330SFam Zheng          * periodically with no pending I/O so that bdrv_drain_all() returns.
824bd48bde8SPaolo Bonzini          * We do so every SLICE_TIME nanoseconds, or when there is an error,
825bd48bde8SPaolo Bonzini          * or when the source is clean, whichever comes first.
826bd48bde8SPaolo Bonzini          */
82749efb1f5SDenis V. Lunev         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
82849efb1f5SDenis V. Lunev         if (delta < SLICE_TIME &&
829bd48bde8SPaolo Bonzini             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
830cf56a3c6SDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
831402a4741SPaolo Bonzini                 (cnt == 0 && s->in_flight > 0)) {
8325cb1a49eSEric Blake                 trace_mirror_yield(s, cnt * BDRV_SECTOR_SIZE,
8335cb1a49eSEric Blake                                    s->buf_free_count, s->in_flight);
83421cd917fSFam Zheng                 mirror_wait_for_io(s);
835bd48bde8SPaolo Bonzini                 continue;
836bd48bde8SPaolo Bonzini             } else if (cnt != 0) {
837cc8c9d6cSPaolo Bonzini                 delay_ns = mirror_iteration(s);
838893f7ebaSPaolo Bonzini             }
839cc8c9d6cSPaolo Bonzini         }
840893f7ebaSPaolo Bonzini 
841893f7ebaSPaolo Bonzini         should_complete = false;
842bd48bde8SPaolo Bonzini         if (s->in_flight == 0 && cnt == 0) {
843893f7ebaSPaolo Bonzini             trace_mirror_before_flush(s);
844bdffb31dSPaolo Bonzini             if (!s->synced) {
845bdffb31dSPaolo Bonzini                 if (mirror_flush(s) < 0) {
846bdffb31dSPaolo Bonzini                     /* Go check s->ret.  */
847bdffb31dSPaolo Bonzini                     continue;
848893f7ebaSPaolo Bonzini                 }
849893f7ebaSPaolo Bonzini                 /* We're out of the streaming phase.  From now on, if the job
850893f7ebaSPaolo Bonzini                  * is cancelled we will actually complete all pending I/O and
851893f7ebaSPaolo Bonzini                  * report completion.  This way, block-job-cancel will leave
852893f7ebaSPaolo Bonzini                  * the target in a consistent state.
853893f7ebaSPaolo Bonzini                  */
854bcada37bSWenchao Xia                 block_job_event_ready(&s->common);
855d63ffd87SPaolo Bonzini                 s->synced = true;
856d63ffd87SPaolo Bonzini             }
857d63ffd87SPaolo Bonzini 
858d63ffd87SPaolo Bonzini             should_complete = s->should_complete ||
859d63ffd87SPaolo Bonzini                 block_job_is_cancelled(&s->common);
86020dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
861893f7ebaSPaolo Bonzini         }
862893f7ebaSPaolo Bonzini 
863893f7ebaSPaolo Bonzini         if (cnt == 0 && should_complete) {
864893f7ebaSPaolo Bonzini             /* The dirty bitmap is not updated while operations are pending.
865893f7ebaSPaolo Bonzini              * If we're about to exit, wait for pending operations before
866893f7ebaSPaolo Bonzini              * calling bdrv_get_dirty_count(bs), or we may exit while the
867893f7ebaSPaolo Bonzini              * source has dirty data to copy!
868893f7ebaSPaolo Bonzini              *
869893f7ebaSPaolo Bonzini              * Note that I/O can be submitted by the guest while
8709a0cec66SPaolo Bonzini              * mirror_populate runs, so pause it now.  Before deciding
8719a0cec66SPaolo Bonzini              * whether to switch to target check one last time if I/O has
8729a0cec66SPaolo Bonzini              * come in the meanwhile, and if not flush the data to disk.
873893f7ebaSPaolo Bonzini              */
8745cb1a49eSEric Blake             trace_mirror_before_drain(s, cnt * BDRV_SECTOR_SIZE);
8759a0cec66SPaolo Bonzini 
8769a0cec66SPaolo Bonzini             bdrv_drained_begin(bs);
87720dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
878bdffb31dSPaolo Bonzini             if (cnt > 0 || mirror_flush(s) < 0) {
8799a0cec66SPaolo Bonzini                 bdrv_drained_end(bs);
8809a0cec66SPaolo Bonzini                 continue;
8819a0cec66SPaolo Bonzini             }
8829a0cec66SPaolo Bonzini 
8839a0cec66SPaolo Bonzini             /* The two disks are in sync.  Exit and report successful
8849a0cec66SPaolo Bonzini              * completion.
8859a0cec66SPaolo Bonzini              */
8869a0cec66SPaolo Bonzini             assert(QLIST_EMPTY(&bs->tracked_requests));
8879a0cec66SPaolo Bonzini             s->common.cancelled = false;
8889a0cec66SPaolo Bonzini             need_drain = false;
8899a0cec66SPaolo Bonzini             break;
890893f7ebaSPaolo Bonzini         }
891893f7ebaSPaolo Bonzini 
892893f7ebaSPaolo Bonzini         ret = 0;
8935cb1a49eSEric Blake         trace_mirror_before_sleep(s, cnt * BDRV_SECTOR_SIZE,
8945cb1a49eSEric Blake                                   s->synced, delay_ns);
895d63ffd87SPaolo Bonzini         if (!s->synced) {
8967483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
897893f7ebaSPaolo Bonzini             if (block_job_is_cancelled(&s->common)) {
898893f7ebaSPaolo Bonzini                 break;
899893f7ebaSPaolo Bonzini             }
900893f7ebaSPaolo Bonzini         } else if (!should_complete) {
901bd48bde8SPaolo Bonzini             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
9027483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
903893f7ebaSPaolo Bonzini         }
90449efb1f5SDenis V. Lunev         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
905893f7ebaSPaolo Bonzini     }
906893f7ebaSPaolo Bonzini 
907893f7ebaSPaolo Bonzini immediate_exit:
908bd48bde8SPaolo Bonzini     if (s->in_flight > 0) {
909bd48bde8SPaolo Bonzini         /* We get here only if something went wrong.  Either the job failed,
910bd48bde8SPaolo Bonzini          * or it was cancelled prematurely so that we do not guarantee that
911bd48bde8SPaolo Bonzini          * the target is a copy of the source.
912bd48bde8SPaolo Bonzini          */
913bd48bde8SPaolo Bonzini         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
9149a0cec66SPaolo Bonzini         assert(need_drain);
915bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
916bd48bde8SPaolo Bonzini     }
917bd48bde8SPaolo Bonzini 
918bd48bde8SPaolo Bonzini     assert(s->in_flight == 0);
9197191bf31SMarkus Armbruster     qemu_vfree(s->buf);
920b812f671SPaolo Bonzini     g_free(s->cow_bitmap);
921402a4741SPaolo Bonzini     g_free(s->in_flight_bitmap);
922dc162c8eSFam Zheng     bdrv_dirty_iter_free(s->dbi);
9235a7e7a0bSStefan Hajnoczi 
9245a7e7a0bSStefan Hajnoczi     data = g_malloc(sizeof(*data));
9255a7e7a0bSStefan Hajnoczi     data->ret = ret;
9269a0cec66SPaolo Bonzini 
9279a0cec66SPaolo Bonzini     if (need_drain) {
928e253f4b8SKevin Wolf         bdrv_drained_begin(bs);
9299a0cec66SPaolo Bonzini     }
9305a7e7a0bSStefan Hajnoczi     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
931893f7ebaSPaolo Bonzini }
932893f7ebaSPaolo Bonzini 
933893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
934893f7ebaSPaolo Bonzini {
935893f7ebaSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
936893f7ebaSPaolo Bonzini 
937893f7ebaSPaolo Bonzini     if (speed < 0) {
938c6bd8c70SMarkus Armbruster         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
939893f7ebaSPaolo Bonzini         return;
940893f7ebaSPaolo Bonzini     }
941f3e4ce4aSEric Blake     ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
942893f7ebaSPaolo Bonzini }
943893f7ebaSPaolo Bonzini 
944d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp)
945d63ffd87SPaolo Bonzini {
946d63ffd87SPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
9474ef85a9cSKevin Wolf     BlockDriverState *target;
948d63ffd87SPaolo Bonzini 
949274fcceeSMax Reitz     target = blk_bs(s->target);
950274fcceeSMax Reitz 
951d63ffd87SPaolo Bonzini     if (!s->synced) {
9529df229c3SAlberto Garcia         error_setg(errp, "The active block job '%s' cannot be completed",
9539df229c3SAlberto Garcia                    job->id);
954d63ffd87SPaolo Bonzini         return;
955d63ffd87SPaolo Bonzini     }
956d63ffd87SPaolo Bonzini 
957274fcceeSMax Reitz     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
958274fcceeSMax Reitz         int ret;
959274fcceeSMax Reitz 
960274fcceeSMax Reitz         assert(!target->backing);
961274fcceeSMax Reitz         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
962274fcceeSMax Reitz         if (ret < 0) {
963274fcceeSMax Reitz             return;
964274fcceeSMax Reitz         }
965274fcceeSMax Reitz     }
966274fcceeSMax Reitz 
96715d67298SChanglong Xie     /* block all operations on to_replace bs */
96809158f00SBenoît Canet     if (s->replaces) {
9695a7e7a0bSStefan Hajnoczi         AioContext *replace_aio_context;
9705a7e7a0bSStefan Hajnoczi 
971e12f3784SWen Congyang         s->to_replace = bdrv_find_node(s->replaces);
97209158f00SBenoît Canet         if (!s->to_replace) {
973e12f3784SWen Congyang             error_setg(errp, "Node name '%s' not found", s->replaces);
97409158f00SBenoît Canet             return;
97509158f00SBenoît Canet         }
97609158f00SBenoît Canet 
9775a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
9785a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
9795a7e7a0bSStefan Hajnoczi 
9804ef85a9cSKevin Wolf         /* TODO Translate this into permission system. Current definition of
9814ef85a9cSKevin Wolf          * GRAPH_MOD would require to request it for the parents; they might
9824ef85a9cSKevin Wolf          * not even be BlockDriverStates, however, so a BdrvChild can't address
9834ef85a9cSKevin Wolf          * them. May need redefinition of GRAPH_MOD. */
98409158f00SBenoît Canet         error_setg(&s->replace_blocker,
98509158f00SBenoît Canet                    "block device is in use by block-job-complete");
98609158f00SBenoît Canet         bdrv_op_block_all(s->to_replace, s->replace_blocker);
98709158f00SBenoît Canet         bdrv_ref(s->to_replace);
9885a7e7a0bSStefan Hajnoczi 
9895a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
99009158f00SBenoît Canet     }
99109158f00SBenoît Canet 
992d63ffd87SPaolo Bonzini     s->should_complete = true;
993751ebd76SFam Zheng     block_job_enter(&s->common);
994d63ffd87SPaolo Bonzini }
995d63ffd87SPaolo Bonzini 
996bae8196dSPaolo Bonzini static void mirror_pause(BlockJob *job)
997565ac01fSStefan Hajnoczi {
998565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
999565ac01fSStefan Hajnoczi 
1000bae8196dSPaolo Bonzini     mirror_wait_for_all_io(s);
1001565ac01fSStefan Hajnoczi }
1002565ac01fSStefan Hajnoczi 
1003565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
1004565ac01fSStefan Hajnoczi {
1005565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1006565ac01fSStefan Hajnoczi 
1007565ac01fSStefan Hajnoczi     blk_set_aio_context(s->target, new_context);
1008565ac01fSStefan Hajnoczi }
1009565ac01fSStefan Hajnoczi 
1010bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job)
1011bae8196dSPaolo Bonzini {
1012bae8196dSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1013bae8196dSPaolo Bonzini 
1014bae8196dSPaolo Bonzini     /* Need to keep a reference in case blk_drain triggers execution
1015bae8196dSPaolo Bonzini      * of mirror_complete...
1016bae8196dSPaolo Bonzini      */
1017bae8196dSPaolo Bonzini     if (s->target) {
1018bae8196dSPaolo Bonzini         BlockBackend *target = s->target;
1019bae8196dSPaolo Bonzini         blk_ref(target);
1020bae8196dSPaolo Bonzini         blk_drain(target);
1021bae8196dSPaolo Bonzini         blk_unref(target);
1022bae8196dSPaolo Bonzini     }
1023bae8196dSPaolo Bonzini }
1024bae8196dSPaolo Bonzini 
10253fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = {
1026893f7ebaSPaolo Bonzini     .instance_size          = sizeof(MirrorBlockJob),
102779e14bf7SFam Zheng     .job_type               = BLOCK_JOB_TYPE_MIRROR,
1028893f7ebaSPaolo Bonzini     .set_speed              = mirror_set_speed,
1029a7815a76SJohn Snow     .start                  = mirror_run,
1030d63ffd87SPaolo Bonzini     .complete               = mirror_complete,
1031565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1032565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1033bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
1034893f7ebaSPaolo Bonzini };
1035893f7ebaSPaolo Bonzini 
103603544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = {
103703544a6eSFam Zheng     .instance_size          = sizeof(MirrorBlockJob),
103803544a6eSFam Zheng     .job_type               = BLOCK_JOB_TYPE_COMMIT,
103903544a6eSFam Zheng     .set_speed              = mirror_set_speed,
1040a7815a76SJohn Snow     .start                  = mirror_run,
104103544a6eSFam Zheng     .complete               = mirror_complete,
1042565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1043565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1044bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
104503544a6eSFam Zheng };
104603544a6eSFam Zheng 
10474ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
10484ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10494ef85a9cSKevin Wolf {
10504ef85a9cSKevin Wolf     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
10514ef85a9cSKevin Wolf }
10524ef85a9cSKevin Wolf 
10534ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
10544ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10554ef85a9cSKevin Wolf {
10564ef85a9cSKevin Wolf     return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
10574ef85a9cSKevin Wolf }
10584ef85a9cSKevin Wolf 
10594ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
10604ef85a9cSKevin Wolf {
10614ef85a9cSKevin Wolf     return bdrv_co_flush(bs->backing->bs);
10624ef85a9cSKevin Wolf }
10634ef85a9cSKevin Wolf 
10644ef85a9cSKevin Wolf static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
10654ef85a9cSKevin Wolf     BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
10664ef85a9cSKevin Wolf     BlockDriverState **file)
10674ef85a9cSKevin Wolf {
10684ef85a9cSKevin Wolf     *pnum = nb_sectors;
10694ef85a9cSKevin Wolf     *file = bs->backing->bs;
1070d5254033SEric Blake     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
10714ef85a9cSKevin Wolf            (sector_num << BDRV_SECTOR_BITS);
10724ef85a9cSKevin Wolf }
10734ef85a9cSKevin Wolf 
10744ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1075f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
10764ef85a9cSKevin Wolf {
1077f5a5ca79SManos Pitsidianakis     return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
10784ef85a9cSKevin Wolf }
10794ef85a9cSKevin Wolf 
10804ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1081f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes)
10824ef85a9cSKevin Wolf {
1083f5a5ca79SManos Pitsidianakis     return bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
10844ef85a9cSKevin Wolf }
10854ef85a9cSKevin Wolf 
1086fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1087fd4a6493SKevin Wolf {
1088fd4a6493SKevin Wolf     bdrv_refresh_filename(bs->backing->bs);
1089fd4a6493SKevin Wolf     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1090fd4a6493SKevin Wolf             bs->backing->bs->filename);
1091fd4a6493SKevin Wolf }
1092fd4a6493SKevin Wolf 
10934ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs)
10944ef85a9cSKevin Wolf {
10954ef85a9cSKevin Wolf }
10964ef85a9cSKevin Wolf 
10974ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
10984ef85a9cSKevin Wolf                                        const BdrvChildRole *role,
10994ef85a9cSKevin Wolf                                        uint64_t perm, uint64_t shared,
11004ef85a9cSKevin Wolf                                        uint64_t *nperm, uint64_t *nshared)
11014ef85a9cSKevin Wolf {
11024ef85a9cSKevin Wolf     /* Must be able to forward guest writes to the real image */
11034ef85a9cSKevin Wolf     *nperm = 0;
11044ef85a9cSKevin Wolf     if (perm & BLK_PERM_WRITE) {
11054ef85a9cSKevin Wolf         *nperm |= BLK_PERM_WRITE;
11064ef85a9cSKevin Wolf     }
11074ef85a9cSKevin Wolf 
11084ef85a9cSKevin Wolf     *nshared = BLK_PERM_ALL;
11094ef85a9cSKevin Wolf }
11104ef85a9cSKevin Wolf 
11114ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it
11124ef85a9cSKevin Wolf  * from its backing file and that allows writes on the backing file chain. */
11134ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = {
11144ef85a9cSKevin Wolf     .format_name                = "mirror_top",
11154ef85a9cSKevin Wolf     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
11164ef85a9cSKevin Wolf     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
11174ef85a9cSKevin Wolf     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
11184ef85a9cSKevin Wolf     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
11194ef85a9cSKevin Wolf     .bdrv_co_flush              = bdrv_mirror_top_flush,
11204ef85a9cSKevin Wolf     .bdrv_co_get_block_status   = bdrv_mirror_top_get_block_status,
1121fd4a6493SKevin Wolf     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
11224ef85a9cSKevin Wolf     .bdrv_close                 = bdrv_mirror_top_close,
11234ef85a9cSKevin Wolf     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
11244ef85a9cSKevin Wolf };
11254ef85a9cSKevin Wolf 
112671aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs,
112747970dfbSJohn Snow                              int creation_flags, BlockDriverState *target,
112847970dfbSJohn Snow                              const char *replaces, int64_t speed,
112947970dfbSJohn Snow                              uint32_t granularity, int64_t buf_size,
1130274fcceeSMax Reitz                              BlockMirrorBackingMode backing_mode,
113103544a6eSFam Zheng                              BlockdevOnError on_source_error,
1132b952b558SPaolo Bonzini                              BlockdevOnError on_target_error,
11330fc9f8eaSFam Zheng                              bool unmap,
1134097310b5SMarkus Armbruster                              BlockCompletionFunc *cb,
113551ccfa2dSFam Zheng                              void *opaque,
113603544a6eSFam Zheng                              const BlockJobDriver *driver,
1137b49f7eadSWen Congyang                              bool is_none_mode, BlockDriverState *base,
113851ccfa2dSFam Zheng                              bool auto_complete, const char *filter_node_name,
113951ccfa2dSFam Zheng                              Error **errp)
1140893f7ebaSPaolo Bonzini {
1141893f7ebaSPaolo Bonzini     MirrorBlockJob *s;
11424ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
11434ef85a9cSKevin Wolf     bool target_graph_mod;
11444ef85a9cSKevin Wolf     bool target_is_backing;
1145b2c2832cSKevin Wolf     Error *local_err = NULL;
1146d7086422SKevin Wolf     int ret;
1147893f7ebaSPaolo Bonzini 
1148eee13dfeSPaolo Bonzini     if (granularity == 0) {
1149341ebc2fSJohn Snow         granularity = bdrv_get_default_bitmap_granularity(target);
1150eee13dfeSPaolo Bonzini     }
1151eee13dfeSPaolo Bonzini 
1152eee13dfeSPaolo Bonzini     assert ((granularity & (granularity - 1)) == 0);
1153b436982fSEric Blake     /* Granularity must be large enough for sector-based dirty bitmap */
1154b436982fSEric Blake     assert(granularity >= BDRV_SECTOR_SIZE);
1155eee13dfeSPaolo Bonzini 
115648ac0a4dSWen Congyang     if (buf_size < 0) {
115748ac0a4dSWen Congyang         error_setg(errp, "Invalid parameter 'buf-size'");
115848ac0a4dSWen Congyang         return;
115948ac0a4dSWen Congyang     }
116048ac0a4dSWen Congyang 
116148ac0a4dSWen Congyang     if (buf_size == 0) {
116248ac0a4dSWen Congyang         buf_size = DEFAULT_MIRROR_BUF_SIZE;
116348ac0a4dSWen Congyang     }
11645bc361b8SFam Zheng 
11654ef85a9cSKevin Wolf     /* In the case of active commit, add dummy driver to provide consistent
11664ef85a9cSKevin Wolf      * reads on the top, while disabling it in the intermediate nodes, and make
11674ef85a9cSKevin Wolf      * the backing chain writable. */
11686cdbceb1SKevin Wolf     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
11696cdbceb1SKevin Wolf                                          BDRV_O_RDWR, errp);
11704ef85a9cSKevin Wolf     if (mirror_top_bs == NULL) {
1171893f7ebaSPaolo Bonzini         return;
1172893f7ebaSPaolo Bonzini     }
11734ef85a9cSKevin Wolf     mirror_top_bs->total_sectors = bs->total_sectors;
117419dd29e8SFam Zheng     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1175893f7ebaSPaolo Bonzini 
11764ef85a9cSKevin Wolf     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
11777a25fcd0SMax Reitz      * it alive until block_job_create() succeeds even if bs has no parent. */
11784ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
11794ef85a9cSKevin Wolf     bdrv_drained_begin(bs);
1180b2c2832cSKevin Wolf     bdrv_append(mirror_top_bs, bs, &local_err);
11814ef85a9cSKevin Wolf     bdrv_drained_end(bs);
11824ef85a9cSKevin Wolf 
1183b2c2832cSKevin Wolf     if (local_err) {
1184b2c2832cSKevin Wolf         bdrv_unref(mirror_top_bs);
1185b2c2832cSKevin Wolf         error_propagate(errp, local_err);
1186b2c2832cSKevin Wolf         return;
1187b2c2832cSKevin Wolf     }
1188b2c2832cSKevin Wolf 
11894ef85a9cSKevin Wolf     /* Make sure that the source is not resized while the job is running */
11904ef85a9cSKevin Wolf     s = block_job_create(job_id, driver, mirror_top_bs,
11914ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ,
11924ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
11934ef85a9cSKevin Wolf                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
11944ef85a9cSKevin Wolf                          creation_flags, cb, opaque, errp);
11954ef85a9cSKevin Wolf     if (!s) {
11964ef85a9cSKevin Wolf         goto fail;
11974ef85a9cSKevin Wolf     }
11987a25fcd0SMax Reitz     /* The block job now has a reference to this node */
11997a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
12007a25fcd0SMax Reitz 
12014ef85a9cSKevin Wolf     s->source = bs;
12024ef85a9cSKevin Wolf     s->mirror_top_bs = mirror_top_bs;
12034ef85a9cSKevin Wolf 
12044ef85a9cSKevin Wolf     /* No resize for the target either; while the mirror is still running, a
12054ef85a9cSKevin Wolf      * consistent read isn't necessarily possible. We could possibly allow
12064ef85a9cSKevin Wolf      * writes and graph modifications, though it would likely defeat the
12074ef85a9cSKevin Wolf      * purpose of a mirror, so leave them blocked for now.
12084ef85a9cSKevin Wolf      *
12094ef85a9cSKevin Wolf      * In the case of active commit, things look a bit different, though,
12104ef85a9cSKevin Wolf      * because the target is an already populated backing file in active use.
12114ef85a9cSKevin Wolf      * We can allow anything except resize there.*/
12124ef85a9cSKevin Wolf     target_is_backing = bdrv_chain_contains(bs, target);
12134ef85a9cSKevin Wolf     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
12144ef85a9cSKevin Wolf     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
12154ef85a9cSKevin Wolf                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
12164ef85a9cSKevin Wolf                         BLK_PERM_WRITE_UNCHANGED |
12174ef85a9cSKevin Wolf                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
12184ef85a9cSKevin Wolf                                              BLK_PERM_WRITE |
12194ef85a9cSKevin Wolf                                              BLK_PERM_GRAPH_MOD : 0));
1220d7086422SKevin Wolf     ret = blk_insert_bs(s->target, target, errp);
1221d7086422SKevin Wolf     if (ret < 0) {
12224ef85a9cSKevin Wolf         goto fail;
1223d7086422SKevin Wolf     }
1224e253f4b8SKevin Wolf 
122509158f00SBenoît Canet     s->replaces = g_strdup(replaces);
1226b952b558SPaolo Bonzini     s->on_source_error = on_source_error;
1227b952b558SPaolo Bonzini     s->on_target_error = on_target_error;
122803544a6eSFam Zheng     s->is_none_mode = is_none_mode;
1229274fcceeSMax Reitz     s->backing_mode = backing_mode;
12305bc361b8SFam Zheng     s->base = base;
1231eee13dfeSPaolo Bonzini     s->granularity = granularity;
123248ac0a4dSWen Congyang     s->buf_size = ROUND_UP(buf_size, granularity);
12330fc9f8eaSFam Zheng     s->unmap = unmap;
1234b49f7eadSWen Congyang     if (auto_complete) {
1235b49f7eadSWen Congyang         s->should_complete = true;
1236b49f7eadSWen Congyang     }
1237b812f671SPaolo Bonzini 
12380db6e54aSFam Zheng     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1239b8afb520SFam Zheng     if (!s->dirty_bitmap) {
124088f9d1b3SKevin Wolf         goto fail;
1241b8afb520SFam Zheng     }
124210f3cd15SAlberto Garcia 
12434ef85a9cSKevin Wolf     /* Required permissions are already taken with blk_new() */
124476d554e2SKevin Wolf     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
124576d554e2SKevin Wolf                        &error_abort);
124676d554e2SKevin Wolf 
1247f3ede4b0SAlberto Garcia     /* In commit_active_start() all intermediate nodes disappear, so
1248f3ede4b0SAlberto Garcia      * any jobs in them must be blocked */
12494ef85a9cSKevin Wolf     if (target_is_backing) {
1250f3ede4b0SAlberto Garcia         BlockDriverState *iter;
1251f3ede4b0SAlberto Garcia         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
12524ef85a9cSKevin Wolf             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
12534ef85a9cSKevin Wolf              * ourselves at s->base (if writes are blocked for a node, they are
12544ef85a9cSKevin Wolf              * also blocked for its backing file). The other options would be a
12554ef85a9cSKevin Wolf              * second filter driver above s->base (== target). */
12564ef85a9cSKevin Wolf             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
12574ef85a9cSKevin Wolf                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
12584ef85a9cSKevin Wolf                                      errp);
12594ef85a9cSKevin Wolf             if (ret < 0) {
12604ef85a9cSKevin Wolf                 goto fail;
12614ef85a9cSKevin Wolf             }
1262f3ede4b0SAlberto Garcia         }
1263f3ede4b0SAlberto Garcia     }
126410f3cd15SAlberto Garcia 
12655ccac6f1SJohn Snow     trace_mirror_start(bs, s, opaque);
12665ccac6f1SJohn Snow     block_job_start(&s->common);
12674ef85a9cSKevin Wolf     return;
12684ef85a9cSKevin Wolf 
12694ef85a9cSKevin Wolf fail:
12704ef85a9cSKevin Wolf     if (s) {
12717a25fcd0SMax Reitz         /* Make sure this BDS does not go away until we have completed the graph
12727a25fcd0SMax Reitz          * changes below */
12737a25fcd0SMax Reitz         bdrv_ref(mirror_top_bs);
12747a25fcd0SMax Reitz 
12754ef85a9cSKevin Wolf         g_free(s->replaces);
12764ef85a9cSKevin Wolf         blk_unref(s->target);
127705b0d8e3SPaolo Bonzini         block_job_early_fail(&s->common);
12784ef85a9cSKevin Wolf     }
12794ef85a9cSKevin Wolf 
1280c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1281c1cef672SFam Zheng                             &error_abort);
12825fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
12837a25fcd0SMax Reitz 
12847a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
1285893f7ebaSPaolo Bonzini }
128603544a6eSFam Zheng 
128771aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs,
128871aa9867SAlberto Garcia                   BlockDriverState *target, const char *replaces,
12895fba6c0eSJohn Snow                   int64_t speed, uint32_t granularity, int64_t buf_size,
1290274fcceeSMax Reitz                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1291274fcceeSMax Reitz                   BlockdevOnError on_source_error,
129203544a6eSFam Zheng                   BlockdevOnError on_target_error,
12936cdbceb1SKevin Wolf                   bool unmap, const char *filter_node_name, Error **errp)
129403544a6eSFam Zheng {
129503544a6eSFam Zheng     bool is_none_mode;
129603544a6eSFam Zheng     BlockDriverState *base;
129703544a6eSFam Zheng 
12984b80ab2bSJohn Snow     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
12994b80ab2bSJohn Snow         error_setg(errp, "Sync mode 'incremental' not supported");
1300d58d8453SJohn Snow         return;
1301d58d8453SJohn Snow     }
130203544a6eSFam Zheng     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1303760e0063SKevin Wolf     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
130447970dfbSJohn Snow     mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1305274fcceeSMax Reitz                      speed, granularity, buf_size, backing_mode,
130651ccfa2dSFam Zheng                      on_source_error, on_target_error, unmap, NULL, NULL,
13076cdbceb1SKevin Wolf                      &mirror_job_driver, is_none_mode, base, false,
130851ccfa2dSFam Zheng                      filter_node_name, errp);
130903544a6eSFam Zheng }
131003544a6eSFam Zheng 
1311fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs,
131247970dfbSJohn Snow                          BlockDriverState *base, int creation_flags,
131347970dfbSJohn Snow                          int64_t speed, BlockdevOnError on_error,
13140db832f4SKevin Wolf                          const char *filter_node_name,
131578bbd910SFam Zheng                          BlockCompletionFunc *cb, void *opaque,
131678bbd910SFam Zheng                          bool auto_complete, Error **errp)
131703544a6eSFam Zheng {
13184da83585SJeff Cody     int orig_base_flags;
1319cc67f4d1SJeff Cody     Error *local_err = NULL;
13204da83585SJeff Cody 
13214da83585SJeff Cody     orig_base_flags = bdrv_get_flags(base);
13224da83585SJeff Cody 
132320a63d2cSFam Zheng     if (bdrv_reopen(base, bs->open_flags, errp)) {
132420a63d2cSFam Zheng         return;
132520a63d2cSFam Zheng     }
13264da83585SJeff Cody 
132747970dfbSJohn Snow     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
132871aa9867SAlberto Garcia                      MIRROR_LEAVE_BACKING_CHAIN,
132951ccfa2dSFam Zheng                      on_error, on_error, true, cb, opaque,
13306cdbceb1SKevin Wolf                      &commit_active_job_driver, false, base, auto_complete,
133151ccfa2dSFam Zheng                      filter_node_name, &local_err);
13320fb6395cSMarkus Armbruster     if (local_err) {
1333cc67f4d1SJeff Cody         error_propagate(errp, local_err);
13344da83585SJeff Cody         goto error_restore_flags;
13354da83585SJeff Cody     }
13364da83585SJeff Cody 
13374da83585SJeff Cody     return;
13384da83585SJeff Cody 
13394da83585SJeff Cody error_restore_flags:
13404da83585SJeff Cody     /* ignore error and errp for bdrv_reopen, because we want to propagate
13414da83585SJeff Cody      * the original error */
13424da83585SJeff Cody     bdrv_reopen(base, orig_base_flags, NULL);
13434da83585SJeff Cody     return;
134403544a6eSFam Zheng }
1345