xref: /qemu/block/mirror.c (revision 3a691c50)
1893f7ebaSPaolo Bonzini /*
2893f7ebaSPaolo Bonzini  * Image mirroring
3893f7ebaSPaolo Bonzini  *
4893f7ebaSPaolo Bonzini  * Copyright Red Hat, Inc. 2012
5893f7ebaSPaolo Bonzini  *
6893f7ebaSPaolo Bonzini  * Authors:
7893f7ebaSPaolo Bonzini  *  Paolo Bonzini  <pbonzini@redhat.com>
8893f7ebaSPaolo Bonzini  *
9893f7ebaSPaolo Bonzini  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10893f7ebaSPaolo Bonzini  * See the COPYING.LIB file in the top-level directory.
11893f7ebaSPaolo Bonzini  *
12893f7ebaSPaolo Bonzini  */
13893f7ebaSPaolo Bonzini 
1480c71a24SPeter Maydell #include "qemu/osdep.h"
15fd4a6493SKevin Wolf #include "qemu/cutils.h"
16893f7ebaSPaolo Bonzini #include "trace.h"
17c87621eaSJohn Snow #include "block/blockjob_int.h"
18737e150eSPaolo Bonzini #include "block/block_int.h"
19373340b2SMax Reitz #include "sysemu/block-backend.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h"
22893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h"
23b812f671SPaolo Bonzini #include "qemu/bitmap.h"
24893f7ebaSPaolo Bonzini 
25893f7ebaSPaolo Bonzini #define SLICE_TIME    100000000ULL /* ns */
26402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16
27b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
28b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
29402a4741SPaolo Bonzini 
30402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks.
31402a4741SPaolo Bonzini  * Free chunks are organized in a list.
32402a4741SPaolo Bonzini  */
33402a4741SPaolo Bonzini typedef struct MirrorBuffer {
34402a4741SPaolo Bonzini     QSIMPLEQ_ENTRY(MirrorBuffer) next;
35402a4741SPaolo Bonzini } MirrorBuffer;
36893f7ebaSPaolo Bonzini 
37893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob {
38893f7ebaSPaolo Bonzini     BlockJob common;
39893f7ebaSPaolo Bonzini     RateLimit limit;
40e253f4b8SKevin Wolf     BlockBackend *target;
414ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
424ef85a9cSKevin Wolf     BlockDriverState *source;
435bc361b8SFam Zheng     BlockDriverState *base;
444ef85a9cSKevin Wolf 
4509158f00SBenoît Canet     /* The name of the graph node to replace */
4609158f00SBenoît Canet     char *replaces;
4709158f00SBenoît Canet     /* The BDS to replace */
4809158f00SBenoît Canet     BlockDriverState *to_replace;
4909158f00SBenoît Canet     /* Used to block operations on the drive-mirror-replace target */
5009158f00SBenoît Canet     Error *replace_blocker;
5103544a6eSFam Zheng     bool is_none_mode;
52274fcceeSMax Reitz     BlockMirrorBackingMode backing_mode;
53b952b558SPaolo Bonzini     BlockdevOnError on_source_error, on_target_error;
54d63ffd87SPaolo Bonzini     bool synced;
55d63ffd87SPaolo Bonzini     bool should_complete;
56eee13dfeSPaolo Bonzini     int64_t granularity;
57b812f671SPaolo Bonzini     size_t buf_size;
58b21c7652SMax Reitz     int64_t bdev_length;
59b812f671SPaolo Bonzini     unsigned long *cow_bitmap;
60e4654d2dSFam Zheng     BdrvDirtyBitmap *dirty_bitmap;
61dc162c8eSFam Zheng     BdrvDirtyBitmapIter *dbi;
62893f7ebaSPaolo Bonzini     uint8_t *buf;
63402a4741SPaolo Bonzini     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
64402a4741SPaolo Bonzini     int buf_free_count;
65bd48bde8SPaolo Bonzini 
6649efb1f5SDenis V. Lunev     uint64_t last_pause_ns;
67402a4741SPaolo Bonzini     unsigned long *in_flight_bitmap;
68bd48bde8SPaolo Bonzini     int in_flight;
69b436982fSEric Blake     int64_t bytes_in_flight;
70bd48bde8SPaolo Bonzini     int ret;
710fc9f8eaSFam Zheng     bool unmap;
72e424aff5SKevin Wolf     bool waiting_for_io;
73b436982fSEric Blake     int target_cluster_size;
74e5b43573SFam Zheng     int max_iov;
7590ab48ebSAnton Nefedov     bool initial_zeroing_ongoing;
76893f7ebaSPaolo Bonzini } MirrorBlockJob;
77893f7ebaSPaolo Bonzini 
78bd48bde8SPaolo Bonzini typedef struct MirrorOp {
79bd48bde8SPaolo Bonzini     MirrorBlockJob *s;
80bd48bde8SPaolo Bonzini     QEMUIOVector qiov;
81b436982fSEric Blake     int64_t offset;
82b436982fSEric Blake     uint64_t bytes;
83bd48bde8SPaolo Bonzini } MirrorOp;
84bd48bde8SPaolo Bonzini 
85b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
86b952b558SPaolo Bonzini                                             int error)
87b952b558SPaolo Bonzini {
88b952b558SPaolo Bonzini     s->synced = false;
89b952b558SPaolo Bonzini     if (read) {
9081e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_source_error,
9181e254dcSKevin Wolf                                       true, error);
92b952b558SPaolo Bonzini     } else {
9381e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_target_error,
9481e254dcSKevin Wolf                                       false, error);
95b952b558SPaolo Bonzini     }
96b952b558SPaolo Bonzini }
97b952b558SPaolo Bonzini 
98bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret)
99bd48bde8SPaolo Bonzini {
100bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
101402a4741SPaolo Bonzini     struct iovec *iov;
102bd48bde8SPaolo Bonzini     int64_t chunk_num;
103b436982fSEric Blake     int i, nb_chunks;
104bd48bde8SPaolo Bonzini 
105b436982fSEric Blake     trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
106bd48bde8SPaolo Bonzini 
107bd48bde8SPaolo Bonzini     s->in_flight--;
108b436982fSEric Blake     s->bytes_in_flight -= op->bytes;
109402a4741SPaolo Bonzini     iov = op->qiov.iov;
110402a4741SPaolo Bonzini     for (i = 0; i < op->qiov.niov; i++) {
111402a4741SPaolo Bonzini         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
112402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
113402a4741SPaolo Bonzini         s->buf_free_count++;
114402a4741SPaolo Bonzini     }
115402a4741SPaolo Bonzini 
116b436982fSEric Blake     chunk_num = op->offset / s->granularity;
117b436982fSEric Blake     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
118402a4741SPaolo Bonzini     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
119b21c7652SMax Reitz     if (ret >= 0) {
120b21c7652SMax Reitz         if (s->cow_bitmap) {
121bd48bde8SPaolo Bonzini             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
122bd48bde8SPaolo Bonzini         }
12390ab48ebSAnton Nefedov         if (!s->initial_zeroing_ongoing) {
124b436982fSEric Blake             s->common.offset += op->bytes;
125b21c7652SMax Reitz         }
12690ab48ebSAnton Nefedov     }
1276df3bf8eSZhang Min     qemu_iovec_destroy(&op->qiov);
128c84b3192SPaolo Bonzini     g_free(op);
1297b770c72SStefan Hajnoczi 
130e424aff5SKevin Wolf     if (s->waiting_for_io) {
1310b8b8753SPaolo Bonzini         qemu_coroutine_enter(s->common.co);
132bd48bde8SPaolo Bonzini     }
1337b770c72SStefan Hajnoczi }
134bd48bde8SPaolo Bonzini 
135bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret)
136bd48bde8SPaolo Bonzini {
137bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
138bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
139b9e413ddSPaolo Bonzini 
140b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
141bd48bde8SPaolo Bonzini     if (ret < 0) {
142bd48bde8SPaolo Bonzini         BlockErrorAction action;
143bd48bde8SPaolo Bonzini 
144b436982fSEric Blake         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
145b436982fSEric Blake                               op->bytes >> BDRV_SECTOR_BITS);
146bd48bde8SPaolo Bonzini         action = mirror_error_action(s, false, -ret);
147a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
148bd48bde8SPaolo Bonzini             s->ret = ret;
149bd48bde8SPaolo Bonzini         }
150bd48bde8SPaolo Bonzini     }
151bd48bde8SPaolo Bonzini     mirror_iteration_done(op, ret);
152b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
153bd48bde8SPaolo Bonzini }
154bd48bde8SPaolo Bonzini 
155bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret)
156bd48bde8SPaolo Bonzini {
157bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
158bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
159b9e413ddSPaolo Bonzini 
160b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
161bd48bde8SPaolo Bonzini     if (ret < 0) {
162bd48bde8SPaolo Bonzini         BlockErrorAction action;
163bd48bde8SPaolo Bonzini 
164b436982fSEric Blake         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
165b436982fSEric Blake                               op->bytes >> BDRV_SECTOR_BITS);
166bd48bde8SPaolo Bonzini         action = mirror_error_action(s, true, -ret);
167a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168bd48bde8SPaolo Bonzini             s->ret = ret;
169bd48bde8SPaolo Bonzini         }
170bd48bde8SPaolo Bonzini 
171bd48bde8SPaolo Bonzini         mirror_iteration_done(op, ret);
172b9e413ddSPaolo Bonzini     } else {
173b436982fSEric Blake         blk_aio_pwritev(s->target, op->offset, &op->qiov,
17473698c30SEric Blake                         0, mirror_write_complete, op);
175bd48bde8SPaolo Bonzini     }
176b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
177b9e413ddSPaolo Bonzini }
178bd48bde8SPaolo Bonzini 
179782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */
180782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
181782d97efSEric Blake                                         int64_t offset,
182782d97efSEric Blake                                         int64_t bytes)
183782d97efSEric Blake {
184782d97efSEric Blake     return MIN(bytes, s->bdev_length - offset);
185782d97efSEric Blake }
186782d97efSEric Blake 
187782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and
188782d97efSEric Blake  * return the offset of the adjusted tail against original. */
189782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
190ae4cc877SEric Blake                             uint64_t *bytes)
191893f7ebaSPaolo Bonzini {
192e5b43573SFam Zheng     bool need_cow;
193e5b43573SFam Zheng     int ret = 0;
194782d97efSEric Blake     int64_t align_offset = *offset;
195782d97efSEric Blake     unsigned int align_bytes = *bytes;
196782d97efSEric Blake     int max_bytes = s->granularity * s->max_iov;
197893f7ebaSPaolo Bonzini 
198ae4cc877SEric Blake     assert(*bytes < INT_MAX);
199782d97efSEric Blake     need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
200782d97efSEric Blake     need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
201e5b43573SFam Zheng                           s->cow_bitmap);
202e5b43573SFam Zheng     if (need_cow) {
203782d97efSEric Blake         bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
204782d97efSEric Blake                                &align_offset, &align_bytes);
2058f0720ecSPaolo Bonzini     }
2068f0720ecSPaolo Bonzini 
207782d97efSEric Blake     if (align_bytes > max_bytes) {
208782d97efSEric Blake         align_bytes = max_bytes;
209e5b43573SFam Zheng         if (need_cow) {
210782d97efSEric Blake             align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
211e5b43573SFam Zheng         }
212e5b43573SFam Zheng     }
213782d97efSEric Blake     /* Clipping may result in align_bytes unaligned to chunk boundary, but
2144150ae60SFam Zheng      * that doesn't matter because it's already the end of source image. */
215782d97efSEric Blake     align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
216402a4741SPaolo Bonzini 
217782d97efSEric Blake     ret = align_offset + align_bytes - (*offset + *bytes);
218782d97efSEric Blake     *offset = align_offset;
219782d97efSEric Blake     *bytes = align_bytes;
220e5b43573SFam Zheng     assert(ret >= 0);
221e5b43573SFam Zheng     return ret;
222e5b43573SFam Zheng }
223e5b43573SFam Zheng 
22421cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s)
22521cd917fSFam Zheng {
22621cd917fSFam Zheng     assert(!s->waiting_for_io);
22721cd917fSFam Zheng     s->waiting_for_io = true;
22821cd917fSFam Zheng     qemu_coroutine_yield();
22921cd917fSFam Zheng     s->waiting_for_io = false;
23021cd917fSFam Zheng }
23121cd917fSFam Zheng 
232e5b43573SFam Zheng /* Submit async read while handling COW.
233ae4cc877SEric Blake  * Returns: The number of bytes copied after and including offset,
234ae4cc877SEric Blake  *          excluding any bytes copied prior to offset due to alignment.
235ae4cc877SEric Blake  *          This will be @bytes if no alignment is necessary, or
236ae4cc877SEric Blake  *          (new_end - offset) if tail is rounded up or down due to
237e5b43573SFam Zheng  *          alignment or buffer limit.
238402a4741SPaolo Bonzini  */
239ae4cc877SEric Blake static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset,
240ae4cc877SEric Blake                                uint64_t bytes)
241e5b43573SFam Zheng {
242e253f4b8SKevin Wolf     BlockBackend *source = s->common.blk;
243ae4cc877SEric Blake     int nb_chunks;
244ae4cc877SEric Blake     uint64_t ret;
245e5b43573SFam Zheng     MirrorOp *op;
246ae4cc877SEric Blake     uint64_t max_bytes;
247402a4741SPaolo Bonzini 
248ae4cc877SEric Blake     max_bytes = s->granularity * s->max_iov;
249e5b43573SFam Zheng 
250e5b43573SFam Zheng     /* We can only handle as much as buf_size at a time. */
251ae4cc877SEric Blake     bytes = MIN(s->buf_size, MIN(max_bytes, bytes));
252ae4cc877SEric Blake     assert(bytes);
253ae4cc877SEric Blake     assert(bytes < BDRV_REQUEST_MAX_BYTES);
254ae4cc877SEric Blake     ret = bytes;
255e5b43573SFam Zheng 
256e5b43573SFam Zheng     if (s->cow_bitmap) {
257ae4cc877SEric Blake         ret += mirror_cow_align(s, &offset, &bytes);
258e5b43573SFam Zheng     }
259ae4cc877SEric Blake     assert(bytes <= s->buf_size);
260ae4cc877SEric Blake     /* The offset is granularity-aligned because:
261e5b43573SFam Zheng      * 1) Caller passes in aligned values;
262e5b43573SFam Zheng      * 2) mirror_cow_align is used only when target cluster is larger. */
263ae4cc877SEric Blake     assert(QEMU_IS_ALIGNED(offset, s->granularity));
264ae4cc877SEric Blake     /* The range is sector-aligned, since bdrv_getlength() rounds up. */
265ae4cc877SEric Blake     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
266ae4cc877SEric Blake     nb_chunks = DIV_ROUND_UP(bytes, s->granularity);
267e5b43573SFam Zheng 
268e5b43573SFam Zheng     while (s->buf_free_count < nb_chunks) {
269ae4cc877SEric Blake         trace_mirror_yield_in_flight(s, offset, s->in_flight);
27021cd917fSFam Zheng         mirror_wait_for_io(s);
271b812f671SPaolo Bonzini     }
272b812f671SPaolo Bonzini 
273bd48bde8SPaolo Bonzini     /* Allocate a MirrorOp that is used as an AIO callback.  */
274c84b3192SPaolo Bonzini     op = g_new(MirrorOp, 1);
275bd48bde8SPaolo Bonzini     op->s = s;
276ae4cc877SEric Blake     op->offset = offset;
277ae4cc877SEric Blake     op->bytes = bytes;
278402a4741SPaolo Bonzini 
279402a4741SPaolo Bonzini     /* Now make a QEMUIOVector taking enough granularity-sized chunks
280402a4741SPaolo Bonzini      * from s->buf_free.
281402a4741SPaolo Bonzini      */
282402a4741SPaolo Bonzini     qemu_iovec_init(&op->qiov, nb_chunks);
283402a4741SPaolo Bonzini     while (nb_chunks-- > 0) {
284402a4741SPaolo Bonzini         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
285ae4cc877SEric Blake         size_t remaining = bytes - op->qiov.size;
2865a0f6fd5SKevin Wolf 
287402a4741SPaolo Bonzini         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
288402a4741SPaolo Bonzini         s->buf_free_count--;
2895a0f6fd5SKevin Wolf         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
290402a4741SPaolo Bonzini     }
291402a4741SPaolo Bonzini 
292893f7ebaSPaolo Bonzini     /* Copy the dirty cluster.  */
293bd48bde8SPaolo Bonzini     s->in_flight++;
294ae4cc877SEric Blake     s->bytes_in_flight += bytes;
295ae4cc877SEric Blake     trace_mirror_one_iteration(s, offset, bytes);
296dcfb3bebSFam Zheng 
297ae4cc877SEric Blake     blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op);
298e5b43573SFam Zheng     return ret;
299e5b43573SFam Zheng }
300e5b43573SFam Zheng 
301e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s,
302e6f24193SEric Blake                                       int64_t offset,
303e6f24193SEric Blake                                       uint64_t bytes,
304e5b43573SFam Zheng                                       bool is_discard)
305e5b43573SFam Zheng {
306e5b43573SFam Zheng     MirrorOp *op;
307e5b43573SFam Zheng 
308e5b43573SFam Zheng     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
309e5b43573SFam Zheng      * so the freeing in mirror_iteration_done is nop. */
310e5b43573SFam Zheng     op = g_new0(MirrorOp, 1);
311e5b43573SFam Zheng     op->s = s;
312e6f24193SEric Blake     op->offset = offset;
313e6f24193SEric Blake     op->bytes = bytes;
314e5b43573SFam Zheng 
315e5b43573SFam Zheng     s->in_flight++;
316e6f24193SEric Blake     s->bytes_in_flight += bytes;
317e5b43573SFam Zheng     if (is_discard) {
318e6f24193SEric Blake         blk_aio_pdiscard(s->target, offset,
319b436982fSEric Blake                          op->bytes, mirror_write_complete, op);
320e5b43573SFam Zheng     } else {
321e6f24193SEric Blake         blk_aio_pwrite_zeroes(s->target, offset,
322b436982fSEric Blake                               op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
323dcfb3bebSFam Zheng                               mirror_write_complete, op);
324e5b43573SFam Zheng     }
325e5b43573SFam Zheng }
326e5b43573SFam Zheng 
327e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
328e5b43573SFam Zheng {
3294ef85a9cSKevin Wolf     BlockDriverState *source = s->source;
330fb2ef791SEric Blake     int64_t offset, first_chunk;
331e5b43573SFam Zheng     uint64_t delay_ns = 0;
332e5b43573SFam Zheng     /* At least the first dirty chunk is mirrored in one iteration. */
333e5b43573SFam Zheng     int nb_chunks = 1;
334e5b43573SFam Zheng     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
3354b5004d9SDenis V. Lunev     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
336b436982fSEric Blake     int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
337e5b43573SFam Zheng 
338b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
339fb2ef791SEric Blake     offset = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
340fb2ef791SEric Blake     if (offset < 0) {
341dc162c8eSFam Zheng         bdrv_set_dirty_iter(s->dbi, 0);
342fb2ef791SEric Blake         offset = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
3435cb1a49eSEric Blake         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap) *
3445cb1a49eSEric Blake                                   BDRV_SECTOR_SIZE);
345fb2ef791SEric Blake         assert(offset >= 0);
346e5b43573SFam Zheng     }
347b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
348e5b43573SFam Zheng 
349fb2ef791SEric Blake     first_chunk = offset / s->granularity;
3509c83625bSMax Reitz     while (test_bit(first_chunk, s->in_flight_bitmap)) {
351fb2ef791SEric Blake         trace_mirror_yield_in_flight(s, offset, s->in_flight);
3529c83625bSMax Reitz         mirror_wait_for_io(s);
3539c83625bSMax Reitz     }
3549c83625bSMax Reitz 
355565ac01fSStefan Hajnoczi     block_job_pause_point(&s->common);
356565ac01fSStefan Hajnoczi 
357e5b43573SFam Zheng     /* Find the number of consective dirty chunks following the first dirty
358e5b43573SFam Zheng      * one, and wait for in flight requests in them. */
359b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
360fb2ef791SEric Blake     while (nb_chunks * s->granularity < s->buf_size) {
361dc162c8eSFam Zheng         int64_t next_dirty;
362fb2ef791SEric Blake         int64_t next_offset = offset + nb_chunks * s->granularity;
363fb2ef791SEric Blake         int64_t next_chunk = next_offset / s->granularity;
364fb2ef791SEric Blake         if (next_offset >= s->bdev_length ||
365fb2ef791SEric Blake             !bdrv_get_dirty_locked(source, s->dirty_bitmap,
366fb2ef791SEric Blake                                    next_offset >> BDRV_SECTOR_BITS)) {
367e5b43573SFam Zheng             break;
368e5b43573SFam Zheng         }
369e5b43573SFam Zheng         if (test_bit(next_chunk, s->in_flight_bitmap)) {
370e5b43573SFam Zheng             break;
371e5b43573SFam Zheng         }
3729c83625bSMax Reitz 
373fb2ef791SEric Blake         next_dirty = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
374fb2ef791SEric Blake         if (next_dirty > next_offset || next_dirty < 0) {
375f27a2742SMax Reitz             /* The bitmap iterator's cache is stale, refresh it */
376fb2ef791SEric Blake             bdrv_set_dirty_iter(s->dbi, next_offset >> BDRV_SECTOR_BITS);
377fb2ef791SEric Blake             next_dirty = bdrv_dirty_iter_next(s->dbi) * BDRV_SECTOR_SIZE;
378f27a2742SMax Reitz         }
379fb2ef791SEric Blake         assert(next_dirty == next_offset);
380e5b43573SFam Zheng         nb_chunks++;
381e5b43573SFam Zheng     }
382e5b43573SFam Zheng 
383e5b43573SFam Zheng     /* Clear dirty bits before querying the block status, because
384e5b43573SFam Zheng      * calling bdrv_get_block_status_above could yield - if some blocks are
385e5b43573SFam Zheng      * marked dirty in this window, we need to know.
386e5b43573SFam Zheng      */
387fb2ef791SEric Blake     bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset >> BDRV_SECTOR_BITS,
388e5b43573SFam Zheng                                    nb_chunks * sectors_per_chunk);
389b64bd51eSPaolo Bonzini     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
390b64bd51eSPaolo Bonzini 
391fb2ef791SEric Blake     bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
392fb2ef791SEric Blake     while (nb_chunks > 0 && offset < s->bdev_length) {
39339c11580SJohn Snow         int64_t ret;
394f3e4ce4aSEric Blake         int io_sectors;
395fb2ef791SEric Blake         unsigned int io_bytes;
396f3e4ce4aSEric Blake         int64_t io_bytes_acct;
397e5b43573SFam Zheng         BlockDriverState *file;
398e5b43573SFam Zheng         enum MirrorMethod {
399e5b43573SFam Zheng             MIRROR_METHOD_COPY,
400e5b43573SFam Zheng             MIRROR_METHOD_ZERO,
401e5b43573SFam Zheng             MIRROR_METHOD_DISCARD
402e5b43573SFam Zheng         } mirror_method = MIRROR_METHOD_COPY;
403e5b43573SFam Zheng 
404fb2ef791SEric Blake         assert(!(offset % s->granularity));
405fb2ef791SEric Blake         ret = bdrv_get_block_status_above(source, NULL,
406fb2ef791SEric Blake                                           offset >> BDRV_SECTOR_BITS,
407e5b43573SFam Zheng                                           nb_chunks * sectors_per_chunk,
408e5b43573SFam Zheng                                           &io_sectors, &file);
409fb2ef791SEric Blake         io_bytes = io_sectors * BDRV_SECTOR_SIZE;
410e5b43573SFam Zheng         if (ret < 0) {
411fb2ef791SEric Blake             io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
4120965a41eSVladimir Sementsov-Ogievskiy         } else if (ret & BDRV_BLOCK_DATA) {
413fb2ef791SEric Blake             io_bytes = MIN(io_bytes, max_io_bytes);
414e5b43573SFam Zheng         }
415e5b43573SFam Zheng 
416fb2ef791SEric Blake         io_bytes -= io_bytes % s->granularity;
417fb2ef791SEric Blake         if (io_bytes < s->granularity) {
418fb2ef791SEric Blake             io_bytes = s->granularity;
419e5b43573SFam Zheng         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
420fb2ef791SEric Blake             int64_t target_offset;
421fb2ef791SEric Blake             unsigned int target_bytes;
422fb2ef791SEric Blake             bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
423fb2ef791SEric Blake                                    &target_offset, &target_bytes);
424fb2ef791SEric Blake             if (target_offset == offset &&
425fb2ef791SEric Blake                 target_bytes == io_bytes) {
426e5b43573SFam Zheng                 mirror_method = ret & BDRV_BLOCK_ZERO ?
427e5b43573SFam Zheng                                     MIRROR_METHOD_ZERO :
428e5b43573SFam Zheng                                     MIRROR_METHOD_DISCARD;
429e5b43573SFam Zheng             }
430e5b43573SFam Zheng         }
431e5b43573SFam Zheng 
432cf56a3c6SDenis V. Lunev         while (s->in_flight >= MAX_IN_FLIGHT) {
433fb2ef791SEric Blake             trace_mirror_yield_in_flight(s, offset, s->in_flight);
434cf56a3c6SDenis V. Lunev             mirror_wait_for_io(s);
435cf56a3c6SDenis V. Lunev         }
436cf56a3c6SDenis V. Lunev 
437dbaa7b57SVladimir Sementsov-Ogievskiy         if (s->ret < 0) {
438dbaa7b57SVladimir Sementsov-Ogievskiy             return 0;
439dbaa7b57SVladimir Sementsov-Ogievskiy         }
440dbaa7b57SVladimir Sementsov-Ogievskiy 
441fb2ef791SEric Blake         io_bytes = mirror_clip_bytes(s, offset, io_bytes);
442e5b43573SFam Zheng         switch (mirror_method) {
443e5b43573SFam Zheng         case MIRROR_METHOD_COPY:
444fb2ef791SEric Blake             io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes);
445e5b43573SFam Zheng             break;
446e5b43573SFam Zheng         case MIRROR_METHOD_ZERO:
447e5b43573SFam Zheng         case MIRROR_METHOD_DISCARD:
448fb2ef791SEric Blake             mirror_do_zero_or_discard(s, offset, io_bytes,
4494b5004d9SDenis V. Lunev                                       mirror_method == MIRROR_METHOD_DISCARD);
4504b5004d9SDenis V. Lunev             if (write_zeroes_ok) {
451f3e4ce4aSEric Blake                 io_bytes_acct = 0;
4524b5004d9SDenis V. Lunev             } else {
453fb2ef791SEric Blake                 io_bytes_acct = io_bytes;
4544b5004d9SDenis V. Lunev             }
455e5b43573SFam Zheng             break;
456e5b43573SFam Zheng         default:
457e5b43573SFam Zheng             abort();
458e5b43573SFam Zheng         }
459fb2ef791SEric Blake         assert(io_bytes);
460fb2ef791SEric Blake         offset += io_bytes;
461fb2ef791SEric Blake         nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
462f14a39ccSSascha Silbe         if (s->common.speed) {
463f3e4ce4aSEric Blake             delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
464f14a39ccSSascha Silbe         }
465dcfb3bebSFam Zheng     }
466cc8c9d6cSPaolo Bonzini     return delay_ns;
467893f7ebaSPaolo Bonzini }
468b952b558SPaolo Bonzini 
469402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s)
470402a4741SPaolo Bonzini {
471402a4741SPaolo Bonzini     int granularity = s->granularity;
472402a4741SPaolo Bonzini     size_t buf_size = s->buf_size;
473402a4741SPaolo Bonzini     uint8_t *buf = s->buf;
474402a4741SPaolo Bonzini 
475402a4741SPaolo Bonzini     assert(s->buf_free_count == 0);
476402a4741SPaolo Bonzini     QSIMPLEQ_INIT(&s->buf_free);
477402a4741SPaolo Bonzini     while (buf_size != 0) {
478402a4741SPaolo Bonzini         MirrorBuffer *cur = (MirrorBuffer *)buf;
479402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
480402a4741SPaolo Bonzini         s->buf_free_count++;
481402a4741SPaolo Bonzini         buf_size -= granularity;
482402a4741SPaolo Bonzini         buf += granularity;
483402a4741SPaolo Bonzini     }
484402a4741SPaolo Bonzini }
485402a4741SPaolo Bonzini 
486bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching
487bae8196dSPaolo Bonzini  * mirror_resume() because mirror_run() will begin iterating again
488bae8196dSPaolo Bonzini  * when the job is resumed.
489bae8196dSPaolo Bonzini  */
490bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s)
491bd48bde8SPaolo Bonzini {
492bd48bde8SPaolo Bonzini     while (s->in_flight > 0) {
49321cd917fSFam Zheng         mirror_wait_for_io(s);
494bd48bde8SPaolo Bonzini     }
495893f7ebaSPaolo Bonzini }
496893f7ebaSPaolo Bonzini 
4975a7e7a0bSStefan Hajnoczi typedef struct {
4985a7e7a0bSStefan Hajnoczi     int ret;
4995a7e7a0bSStefan Hajnoczi } MirrorExitData;
5005a7e7a0bSStefan Hajnoczi 
5015a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque)
5025a7e7a0bSStefan Hajnoczi {
5035a7e7a0bSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5045a7e7a0bSStefan Hajnoczi     MirrorExitData *data = opaque;
5055a7e7a0bSStefan Hajnoczi     AioContext *replace_aio_context = NULL;
5064ef85a9cSKevin Wolf     BlockDriverState *src = s->source;
507e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
5084ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
50912fa4af6SKevin Wolf     Error *local_err = NULL;
5103f09bfbcSKevin Wolf 
5112119882cSPaolo Bonzini     bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
5122119882cSPaolo Bonzini 
5133f09bfbcSKevin Wolf     /* Make sure that the source BDS doesn't go away before we called
5143f09bfbcSKevin Wolf      * block_job_completed(). */
5153f09bfbcSKevin Wolf     bdrv_ref(src);
5164ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
5177d9fcb39SKevin Wolf     bdrv_ref(target_bs);
5187d9fcb39SKevin Wolf 
5197d9fcb39SKevin Wolf     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
5207d9fcb39SKevin Wolf      * inserting target_bs at s->to_replace, where we might not be able to get
52163c8ef28SKevin Wolf      * these permissions.
52263c8ef28SKevin Wolf      *
52363c8ef28SKevin Wolf      * Note that blk_unref() alone doesn't necessarily drop permissions because
52463c8ef28SKevin Wolf      * we might be running nested inside mirror_drain(), which takes an extra
52563c8ef28SKevin Wolf      * reference, so use an explicit blk_set_perm() first. */
52663c8ef28SKevin Wolf     blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
5277d9fcb39SKevin Wolf     blk_unref(s->target);
5287d9fcb39SKevin Wolf     s->target = NULL;
5294ef85a9cSKevin Wolf 
5304ef85a9cSKevin Wolf     /* We don't access the source any more. Dropping any WRITE/RESIZE is
5314ef85a9cSKevin Wolf      * required before it could become a backing file of target_bs. */
5324ef85a9cSKevin Wolf     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
5334ef85a9cSKevin Wolf                             &error_abort);
5344ef85a9cSKevin Wolf     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
5354ef85a9cSKevin Wolf         BlockDriverState *backing = s->is_none_mode ? src : s->base;
5364ef85a9cSKevin Wolf         if (backing_bs(target_bs) != backing) {
53712fa4af6SKevin Wolf             bdrv_set_backing_hd(target_bs, backing, &local_err);
53812fa4af6SKevin Wolf             if (local_err) {
53912fa4af6SKevin Wolf                 error_report_err(local_err);
54012fa4af6SKevin Wolf                 data->ret = -EPERM;
54112fa4af6SKevin Wolf             }
5424ef85a9cSKevin Wolf         }
5434ef85a9cSKevin Wolf     }
5445a7e7a0bSStefan Hajnoczi 
5455a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5465a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
5475a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
5485a7e7a0bSStefan Hajnoczi     }
5495a7e7a0bSStefan Hajnoczi 
5505a7e7a0bSStefan Hajnoczi     if (s->should_complete && data->ret == 0) {
551e253f4b8SKevin Wolf         BlockDriverState *to_replace = src;
5525a7e7a0bSStefan Hajnoczi         if (s->to_replace) {
5535a7e7a0bSStefan Hajnoczi             to_replace = s->to_replace;
5545a7e7a0bSStefan Hajnoczi         }
55540365552SKevin Wolf 
556e253f4b8SKevin Wolf         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
557e253f4b8SKevin Wolf             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
5585a7e7a0bSStefan Hajnoczi         }
559b8804815SKevin Wolf 
560b8804815SKevin Wolf         /* The mirror job has no requests in flight any more, but we need to
561b8804815SKevin Wolf          * drain potential other users of the BDS before changing the graph. */
562e253f4b8SKevin Wolf         bdrv_drained_begin(target_bs);
5635fe31c25SKevin Wolf         bdrv_replace_node(to_replace, target_bs, &local_err);
564e253f4b8SKevin Wolf         bdrv_drained_end(target_bs);
5655fe31c25SKevin Wolf         if (local_err) {
5665fe31c25SKevin Wolf             error_report_err(local_err);
5675fe31c25SKevin Wolf             data->ret = -EPERM;
5685fe31c25SKevin Wolf         }
5695a7e7a0bSStefan Hajnoczi     }
5705a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5715a7e7a0bSStefan Hajnoczi         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
5725a7e7a0bSStefan Hajnoczi         error_free(s->replace_blocker);
5735a7e7a0bSStefan Hajnoczi         bdrv_unref(s->to_replace);
5745a7e7a0bSStefan Hajnoczi     }
5755a7e7a0bSStefan Hajnoczi     if (replace_aio_context) {
5765a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
5775a7e7a0bSStefan Hajnoczi     }
5785a7e7a0bSStefan Hajnoczi     g_free(s->replaces);
5797d9fcb39SKevin Wolf     bdrv_unref(target_bs);
5804ef85a9cSKevin Wolf 
5814ef85a9cSKevin Wolf     /* Remove the mirror filter driver from the graph. Before this, get rid of
5824ef85a9cSKevin Wolf      * the blockers on the intermediate nodes so that the resulting state is
5830bf74767SKevin Wolf      * valid. Also give up permissions on mirror_top_bs->backing, which might
5840bf74767SKevin Wolf      * block the removal. */
5854ef85a9cSKevin Wolf     block_job_remove_all_bdrv(job);
586c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
587c1cef672SFam Zheng                             &error_abort);
5885fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
5894ef85a9cSKevin Wolf 
5904ef85a9cSKevin Wolf     /* We just changed the BDS the job BB refers to (with either or both of the
5915fe31c25SKevin Wolf      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
5925fe31c25SKevin Wolf      * the right thing. We don't need any permissions any more now. */
5934ef85a9cSKevin Wolf     blk_remove_bs(job->blk);
5944ef85a9cSKevin Wolf     blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
5954ef85a9cSKevin Wolf     blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
5964ef85a9cSKevin Wolf 
5975a7e7a0bSStefan Hajnoczi     block_job_completed(&s->common, data->ret);
5984ef85a9cSKevin Wolf 
5995a7e7a0bSStefan Hajnoczi     g_free(data);
600176c3699SFam Zheng     bdrv_drained_end(src);
6014ef85a9cSKevin Wolf     bdrv_unref(mirror_top_bs);
6023f09bfbcSKevin Wolf     bdrv_unref(src);
6035a7e7a0bSStefan Hajnoczi }
6045a7e7a0bSStefan Hajnoczi 
60549efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s)
60649efb1f5SDenis V. Lunev {
60749efb1f5SDenis V. Lunev     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
60849efb1f5SDenis V. Lunev 
60949efb1f5SDenis V. Lunev     if (now - s->last_pause_ns > SLICE_TIME) {
61049efb1f5SDenis V. Lunev         s->last_pause_ns = now;
61149efb1f5SDenis V. Lunev         block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
61249efb1f5SDenis V. Lunev     } else {
61349efb1f5SDenis V. Lunev         block_job_pause_point(&s->common);
61449efb1f5SDenis V. Lunev     }
61549efb1f5SDenis V. Lunev }
61649efb1f5SDenis V. Lunev 
617c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
618c0b363adSDenis V. Lunev {
619c0b363adSDenis V. Lunev     int64_t sector_num, end;
620c0b363adSDenis V. Lunev     BlockDriverState *base = s->base;
6214ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
622c0b363adSDenis V. Lunev     BlockDriverState *target_bs = blk_bs(s->target);
623c0b363adSDenis V. Lunev     int ret, n;
62451b0a488SEric Blake     int64_t count;
625c0b363adSDenis V. Lunev 
626c0b363adSDenis V. Lunev     end = s->bdev_length / BDRV_SECTOR_SIZE;
627c0b363adSDenis V. Lunev 
628b7d5062cSDenis V. Lunev     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
629c7c2769cSDenis V. Lunev         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
630b7d5062cSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
631b7d5062cSDenis V. Lunev             return 0;
632b7d5062cSDenis V. Lunev         }
633b7d5062cSDenis V. Lunev 
63490ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = true;
635c7c2769cSDenis V. Lunev         for (sector_num = 0; sector_num < end; ) {
636c7c2769cSDenis V. Lunev             int nb_sectors = MIN(end - sector_num,
637c7c2769cSDenis V. Lunev                 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
638c7c2769cSDenis V. Lunev 
639c7c2769cSDenis V. Lunev             mirror_throttle(s);
640c7c2769cSDenis V. Lunev 
641c7c2769cSDenis V. Lunev             if (block_job_is_cancelled(&s->common)) {
64290ab48ebSAnton Nefedov                 s->initial_zeroing_ongoing = false;
643c7c2769cSDenis V. Lunev                 return 0;
644c7c2769cSDenis V. Lunev             }
645c7c2769cSDenis V. Lunev 
646c7c2769cSDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT) {
64767adf4b3SEric Blake                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
64867adf4b3SEric Blake                                    s->in_flight);
649c7c2769cSDenis V. Lunev                 mirror_wait_for_io(s);
650c7c2769cSDenis V. Lunev                 continue;
651c7c2769cSDenis V. Lunev             }
652c7c2769cSDenis V. Lunev 
653e6f24193SEric Blake             mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
654e6f24193SEric Blake                                       nb_sectors * BDRV_SECTOR_SIZE, false);
655c7c2769cSDenis V. Lunev             sector_num += nb_sectors;
656c7c2769cSDenis V. Lunev         }
657c7c2769cSDenis V. Lunev 
658bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
65990ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = false;
660c7c2769cSDenis V. Lunev     }
661c7c2769cSDenis V. Lunev 
662c0b363adSDenis V. Lunev     /* First part, loop on the sectors and initialize the dirty bitmap.  */
663c0b363adSDenis V. Lunev     for (sector_num = 0; sector_num < end; ) {
664c0b363adSDenis V. Lunev         /* Just to make sure we are not exceeding int limit. */
665c0b363adSDenis V. Lunev         int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
666c0b363adSDenis V. Lunev                              end - sector_num);
667c0b363adSDenis V. Lunev 
668c0b363adSDenis V. Lunev         mirror_throttle(s);
669c0b363adSDenis V. Lunev 
670c0b363adSDenis V. Lunev         if (block_job_is_cancelled(&s->common)) {
671c0b363adSDenis V. Lunev             return 0;
672c0b363adSDenis V. Lunev         }
673c0b363adSDenis V. Lunev 
67451b0a488SEric Blake         ret = bdrv_is_allocated_above(bs, base, sector_num * BDRV_SECTOR_SIZE,
67551b0a488SEric Blake                                       nb_sectors * BDRV_SECTOR_SIZE, &count);
676c0b363adSDenis V. Lunev         if (ret < 0) {
677c0b363adSDenis V. Lunev             return ret;
678c0b363adSDenis V. Lunev         }
679c0b363adSDenis V. Lunev 
68051b0a488SEric Blake         /* TODO: Relax this once bdrv_is_allocated_above and dirty
68151b0a488SEric Blake          * bitmaps no longer require sector alignment. */
68251b0a488SEric Blake         assert(QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE));
68351b0a488SEric Blake         n = count >> BDRV_SECTOR_BITS;
684c0b363adSDenis V. Lunev         assert(n > 0);
685b7d5062cSDenis V. Lunev         if (ret == 1) {
686c0b363adSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
687c0b363adSDenis V. Lunev         }
688c0b363adSDenis V. Lunev         sector_num += n;
689c0b363adSDenis V. Lunev     }
690c0b363adSDenis V. Lunev     return 0;
691c0b363adSDenis V. Lunev }
692c0b363adSDenis V. Lunev 
693bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the
694bdffb31dSPaolo Bonzini  * data to the medium, or just before completing.
695bdffb31dSPaolo Bonzini  */
696bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s)
697bdffb31dSPaolo Bonzini {
698bdffb31dSPaolo Bonzini     int ret = blk_flush(s->target);
699bdffb31dSPaolo Bonzini     if (ret < 0) {
700bdffb31dSPaolo Bonzini         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
701bdffb31dSPaolo Bonzini             s->ret = ret;
702bdffb31dSPaolo Bonzini         }
703bdffb31dSPaolo Bonzini     }
704bdffb31dSPaolo Bonzini     return ret;
705bdffb31dSPaolo Bonzini }
706bdffb31dSPaolo Bonzini 
707893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque)
708893f7ebaSPaolo Bonzini {
709893f7ebaSPaolo Bonzini     MirrorBlockJob *s = opaque;
7105a7e7a0bSStefan Hajnoczi     MirrorExitData *data;
7114ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
712e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
7139a0cec66SPaolo Bonzini     bool need_drain = true;
714c0b363adSDenis V. Lunev     int64_t length;
715b812f671SPaolo Bonzini     BlockDriverInfo bdi;
7161d33936eSJeff Cody     char backing_filename[2]; /* we only need 2 characters because we are only
7171d33936eSJeff Cody                                  checking for a NULL string */
718893f7ebaSPaolo Bonzini     int ret = 0;
719893f7ebaSPaolo Bonzini 
720893f7ebaSPaolo Bonzini     if (block_job_is_cancelled(&s->common)) {
721893f7ebaSPaolo Bonzini         goto immediate_exit;
722893f7ebaSPaolo Bonzini     }
723893f7ebaSPaolo Bonzini 
724b21c7652SMax Reitz     s->bdev_length = bdrv_getlength(bs);
725b21c7652SMax Reitz     if (s->bdev_length < 0) {
726b21c7652SMax Reitz         ret = s->bdev_length;
727373df5b1SFam Zheng         goto immediate_exit;
728becc347eSKevin Wolf     }
729becc347eSKevin Wolf 
730becc347eSKevin Wolf     /* Active commit must resize the base image if its size differs from the
731becc347eSKevin Wolf      * active layer. */
732becc347eSKevin Wolf     if (s->base == blk_bs(s->target)) {
733becc347eSKevin Wolf         int64_t base_length;
734becc347eSKevin Wolf 
735becc347eSKevin Wolf         base_length = blk_getlength(s->target);
736becc347eSKevin Wolf         if (base_length < 0) {
737becc347eSKevin Wolf             ret = base_length;
738becc347eSKevin Wolf             goto immediate_exit;
739becc347eSKevin Wolf         }
740becc347eSKevin Wolf 
741becc347eSKevin Wolf         if (s->bdev_length > base_length) {
742*3a691c50SMax Reitz             ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
743*3a691c50SMax Reitz                                NULL);
744becc347eSKevin Wolf             if (ret < 0) {
745becc347eSKevin Wolf                 goto immediate_exit;
746becc347eSKevin Wolf             }
747becc347eSKevin Wolf         }
748becc347eSKevin Wolf     }
749becc347eSKevin Wolf 
750becc347eSKevin Wolf     if (s->bdev_length == 0) {
7519e48b025SFam Zheng         /* Report BLOCK_JOB_READY and wait for complete. */
7529e48b025SFam Zheng         block_job_event_ready(&s->common);
7539e48b025SFam Zheng         s->synced = true;
7549e48b025SFam Zheng         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
7559e48b025SFam Zheng             block_job_yield(&s->common);
7569e48b025SFam Zheng         }
7579e48b025SFam Zheng         s->common.cancelled = false;
7589e48b025SFam Zheng         goto immediate_exit;
759893f7ebaSPaolo Bonzini     }
760893f7ebaSPaolo Bonzini 
761b21c7652SMax Reitz     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
762402a4741SPaolo Bonzini     s->in_flight_bitmap = bitmap_new(length);
763402a4741SPaolo Bonzini 
764b812f671SPaolo Bonzini     /* If we have no backing file yet in the destination, we cannot let
765b812f671SPaolo Bonzini      * the destination do COW.  Instead, we copy sectors around the
766b812f671SPaolo Bonzini      * dirty data if needed.  We need a bitmap to do that.
767b812f671SPaolo Bonzini      */
768e253f4b8SKevin Wolf     bdrv_get_backing_filename(target_bs, backing_filename,
769b812f671SPaolo Bonzini                               sizeof(backing_filename));
770e253f4b8SKevin Wolf     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
771b436982fSEric Blake         s->target_cluster_size = bdi.cluster_size;
772b436982fSEric Blake     } else {
773b436982fSEric Blake         s->target_cluster_size = BDRV_SECTOR_SIZE;
774c3cc95bdSFam Zheng     }
775b436982fSEric Blake     if (backing_filename[0] && !target_bs->backing &&
776b436982fSEric Blake         s->granularity < s->target_cluster_size) {
777b436982fSEric Blake         s->buf_size = MAX(s->buf_size, s->target_cluster_size);
778b812f671SPaolo Bonzini         s->cow_bitmap = bitmap_new(length);
779b812f671SPaolo Bonzini     }
780e253f4b8SKevin Wolf     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
781b812f671SPaolo Bonzini 
7827504edf4SKevin Wolf     s->buf = qemu_try_blockalign(bs, s->buf_size);
7837504edf4SKevin Wolf     if (s->buf == NULL) {
7847504edf4SKevin Wolf         ret = -ENOMEM;
7857504edf4SKevin Wolf         goto immediate_exit;
7867504edf4SKevin Wolf     }
7877504edf4SKevin Wolf 
788402a4741SPaolo Bonzini     mirror_free_init(s);
789893f7ebaSPaolo Bonzini 
79049efb1f5SDenis V. Lunev     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
79103544a6eSFam Zheng     if (!s->is_none_mode) {
792c0b363adSDenis V. Lunev         ret = mirror_dirty_init(s);
793c0b363adSDenis V. Lunev         if (ret < 0 || block_job_is_cancelled(&s->common)) {
7944c0cbd6fSFam Zheng             goto immediate_exit;
7954c0cbd6fSFam Zheng         }
796893f7ebaSPaolo Bonzini     }
797893f7ebaSPaolo Bonzini 
798dc162c8eSFam Zheng     assert(!s->dbi);
799dc162c8eSFam Zheng     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
800893f7ebaSPaolo Bonzini     for (;;) {
801cc8c9d6cSPaolo Bonzini         uint64_t delay_ns = 0;
80249efb1f5SDenis V. Lunev         int64_t cnt, delta;
803893f7ebaSPaolo Bonzini         bool should_complete;
804893f7ebaSPaolo Bonzini 
805bd48bde8SPaolo Bonzini         if (s->ret < 0) {
806bd48bde8SPaolo Bonzini             ret = s->ret;
807893f7ebaSPaolo Bonzini             goto immediate_exit;
808893f7ebaSPaolo Bonzini         }
809bd48bde8SPaolo Bonzini 
810565ac01fSStefan Hajnoczi         block_job_pause_point(&s->common);
811565ac01fSStefan Hajnoczi 
81220dca810SJohn Snow         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
813b21c7652SMax Reitz         /* s->common.offset contains the number of bytes already processed so
814b21c7652SMax Reitz          * far, cnt is the number of dirty sectors remaining and
815b436982fSEric Blake          * s->bytes_in_flight is the number of bytes currently being
816b21c7652SMax Reitz          * processed; together those are the current total operation length */
817b436982fSEric Blake         s->common.len = s->common.offset + s->bytes_in_flight +
818b436982fSEric Blake             cnt * BDRV_SECTOR_SIZE;
819bd48bde8SPaolo Bonzini 
820bd48bde8SPaolo Bonzini         /* Note that even when no rate limit is applied we need to yield
821a7282330SFam Zheng          * periodically with no pending I/O so that bdrv_drain_all() returns.
822bd48bde8SPaolo Bonzini          * We do so every SLICE_TIME nanoseconds, or when there is an error,
823bd48bde8SPaolo Bonzini          * or when the source is clean, whichever comes first.
824bd48bde8SPaolo Bonzini          */
82549efb1f5SDenis V. Lunev         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
82649efb1f5SDenis V. Lunev         if (delta < SLICE_TIME &&
827bd48bde8SPaolo Bonzini             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
828cf56a3c6SDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
829402a4741SPaolo Bonzini                 (cnt == 0 && s->in_flight > 0)) {
8305cb1a49eSEric Blake                 trace_mirror_yield(s, cnt * BDRV_SECTOR_SIZE,
8315cb1a49eSEric Blake                                    s->buf_free_count, s->in_flight);
83221cd917fSFam Zheng                 mirror_wait_for_io(s);
833bd48bde8SPaolo Bonzini                 continue;
834bd48bde8SPaolo Bonzini             } else if (cnt != 0) {
835cc8c9d6cSPaolo Bonzini                 delay_ns = mirror_iteration(s);
836893f7ebaSPaolo Bonzini             }
837cc8c9d6cSPaolo Bonzini         }
838893f7ebaSPaolo Bonzini 
839893f7ebaSPaolo Bonzini         should_complete = false;
840bd48bde8SPaolo Bonzini         if (s->in_flight == 0 && cnt == 0) {
841893f7ebaSPaolo Bonzini             trace_mirror_before_flush(s);
842bdffb31dSPaolo Bonzini             if (!s->synced) {
843bdffb31dSPaolo Bonzini                 if (mirror_flush(s) < 0) {
844bdffb31dSPaolo Bonzini                     /* Go check s->ret.  */
845bdffb31dSPaolo Bonzini                     continue;
846893f7ebaSPaolo Bonzini                 }
847893f7ebaSPaolo Bonzini                 /* We're out of the streaming phase.  From now on, if the job
848893f7ebaSPaolo Bonzini                  * is cancelled we will actually complete all pending I/O and
849893f7ebaSPaolo Bonzini                  * report completion.  This way, block-job-cancel will leave
850893f7ebaSPaolo Bonzini                  * the target in a consistent state.
851893f7ebaSPaolo Bonzini                  */
852bcada37bSWenchao Xia                 block_job_event_ready(&s->common);
853d63ffd87SPaolo Bonzini                 s->synced = true;
854d63ffd87SPaolo Bonzini             }
855d63ffd87SPaolo Bonzini 
856d63ffd87SPaolo Bonzini             should_complete = s->should_complete ||
857d63ffd87SPaolo Bonzini                 block_job_is_cancelled(&s->common);
85820dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
859893f7ebaSPaolo Bonzini         }
860893f7ebaSPaolo Bonzini 
861893f7ebaSPaolo Bonzini         if (cnt == 0 && should_complete) {
862893f7ebaSPaolo Bonzini             /* The dirty bitmap is not updated while operations are pending.
863893f7ebaSPaolo Bonzini              * If we're about to exit, wait for pending operations before
864893f7ebaSPaolo Bonzini              * calling bdrv_get_dirty_count(bs), or we may exit while the
865893f7ebaSPaolo Bonzini              * source has dirty data to copy!
866893f7ebaSPaolo Bonzini              *
867893f7ebaSPaolo Bonzini              * Note that I/O can be submitted by the guest while
8689a0cec66SPaolo Bonzini              * mirror_populate runs, so pause it now.  Before deciding
8699a0cec66SPaolo Bonzini              * whether to switch to target check one last time if I/O has
8709a0cec66SPaolo Bonzini              * come in the meanwhile, and if not flush the data to disk.
871893f7ebaSPaolo Bonzini              */
8725cb1a49eSEric Blake             trace_mirror_before_drain(s, cnt * BDRV_SECTOR_SIZE);
8739a0cec66SPaolo Bonzini 
8749a0cec66SPaolo Bonzini             bdrv_drained_begin(bs);
87520dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
876bdffb31dSPaolo Bonzini             if (cnt > 0 || mirror_flush(s) < 0) {
8779a0cec66SPaolo Bonzini                 bdrv_drained_end(bs);
8789a0cec66SPaolo Bonzini                 continue;
8799a0cec66SPaolo Bonzini             }
8809a0cec66SPaolo Bonzini 
8819a0cec66SPaolo Bonzini             /* The two disks are in sync.  Exit and report successful
8829a0cec66SPaolo Bonzini              * completion.
8839a0cec66SPaolo Bonzini              */
8849a0cec66SPaolo Bonzini             assert(QLIST_EMPTY(&bs->tracked_requests));
8859a0cec66SPaolo Bonzini             s->common.cancelled = false;
8869a0cec66SPaolo Bonzini             need_drain = false;
8879a0cec66SPaolo Bonzini             break;
888893f7ebaSPaolo Bonzini         }
889893f7ebaSPaolo Bonzini 
890893f7ebaSPaolo Bonzini         ret = 0;
8915cb1a49eSEric Blake         trace_mirror_before_sleep(s, cnt * BDRV_SECTOR_SIZE,
8925cb1a49eSEric Blake                                   s->synced, delay_ns);
893d63ffd87SPaolo Bonzini         if (!s->synced) {
8947483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
895893f7ebaSPaolo Bonzini             if (block_job_is_cancelled(&s->common)) {
896893f7ebaSPaolo Bonzini                 break;
897893f7ebaSPaolo Bonzini             }
898893f7ebaSPaolo Bonzini         } else if (!should_complete) {
899bd48bde8SPaolo Bonzini             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
9007483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
901893f7ebaSPaolo Bonzini         }
90249efb1f5SDenis V. Lunev         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
903893f7ebaSPaolo Bonzini     }
904893f7ebaSPaolo Bonzini 
905893f7ebaSPaolo Bonzini immediate_exit:
906bd48bde8SPaolo Bonzini     if (s->in_flight > 0) {
907bd48bde8SPaolo Bonzini         /* We get here only if something went wrong.  Either the job failed,
908bd48bde8SPaolo Bonzini          * or it was cancelled prematurely so that we do not guarantee that
909bd48bde8SPaolo Bonzini          * the target is a copy of the source.
910bd48bde8SPaolo Bonzini          */
911bd48bde8SPaolo Bonzini         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
9129a0cec66SPaolo Bonzini         assert(need_drain);
913bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
914bd48bde8SPaolo Bonzini     }
915bd48bde8SPaolo Bonzini 
916bd48bde8SPaolo Bonzini     assert(s->in_flight == 0);
9177191bf31SMarkus Armbruster     qemu_vfree(s->buf);
918b812f671SPaolo Bonzini     g_free(s->cow_bitmap);
919402a4741SPaolo Bonzini     g_free(s->in_flight_bitmap);
920dc162c8eSFam Zheng     bdrv_dirty_iter_free(s->dbi);
9215a7e7a0bSStefan Hajnoczi 
9225a7e7a0bSStefan Hajnoczi     data = g_malloc(sizeof(*data));
9235a7e7a0bSStefan Hajnoczi     data->ret = ret;
9249a0cec66SPaolo Bonzini 
9259a0cec66SPaolo Bonzini     if (need_drain) {
926e253f4b8SKevin Wolf         bdrv_drained_begin(bs);
9279a0cec66SPaolo Bonzini     }
9285a7e7a0bSStefan Hajnoczi     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
929893f7ebaSPaolo Bonzini }
930893f7ebaSPaolo Bonzini 
931893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
932893f7ebaSPaolo Bonzini {
933893f7ebaSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
934893f7ebaSPaolo Bonzini 
935893f7ebaSPaolo Bonzini     if (speed < 0) {
936c6bd8c70SMarkus Armbruster         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
937893f7ebaSPaolo Bonzini         return;
938893f7ebaSPaolo Bonzini     }
939f3e4ce4aSEric Blake     ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
940893f7ebaSPaolo Bonzini }
941893f7ebaSPaolo Bonzini 
942d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp)
943d63ffd87SPaolo Bonzini {
944d63ffd87SPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
9454ef85a9cSKevin Wolf     BlockDriverState *target;
946d63ffd87SPaolo Bonzini 
947274fcceeSMax Reitz     target = blk_bs(s->target);
948274fcceeSMax Reitz 
949d63ffd87SPaolo Bonzini     if (!s->synced) {
9509df229c3SAlberto Garcia         error_setg(errp, "The active block job '%s' cannot be completed",
9519df229c3SAlberto Garcia                    job->id);
952d63ffd87SPaolo Bonzini         return;
953d63ffd87SPaolo Bonzini     }
954d63ffd87SPaolo Bonzini 
955274fcceeSMax Reitz     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
956274fcceeSMax Reitz         int ret;
957274fcceeSMax Reitz 
958274fcceeSMax Reitz         assert(!target->backing);
959274fcceeSMax Reitz         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
960274fcceeSMax Reitz         if (ret < 0) {
961274fcceeSMax Reitz             return;
962274fcceeSMax Reitz         }
963274fcceeSMax Reitz     }
964274fcceeSMax Reitz 
96515d67298SChanglong Xie     /* block all operations on to_replace bs */
96609158f00SBenoît Canet     if (s->replaces) {
9675a7e7a0bSStefan Hajnoczi         AioContext *replace_aio_context;
9685a7e7a0bSStefan Hajnoczi 
969e12f3784SWen Congyang         s->to_replace = bdrv_find_node(s->replaces);
97009158f00SBenoît Canet         if (!s->to_replace) {
971e12f3784SWen Congyang             error_setg(errp, "Node name '%s' not found", s->replaces);
97209158f00SBenoît Canet             return;
97309158f00SBenoît Canet         }
97409158f00SBenoît Canet 
9755a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
9765a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
9775a7e7a0bSStefan Hajnoczi 
9784ef85a9cSKevin Wolf         /* TODO Translate this into permission system. Current definition of
9794ef85a9cSKevin Wolf          * GRAPH_MOD would require to request it for the parents; they might
9804ef85a9cSKevin Wolf          * not even be BlockDriverStates, however, so a BdrvChild can't address
9814ef85a9cSKevin Wolf          * them. May need redefinition of GRAPH_MOD. */
98209158f00SBenoît Canet         error_setg(&s->replace_blocker,
98309158f00SBenoît Canet                    "block device is in use by block-job-complete");
98409158f00SBenoît Canet         bdrv_op_block_all(s->to_replace, s->replace_blocker);
98509158f00SBenoît Canet         bdrv_ref(s->to_replace);
9865a7e7a0bSStefan Hajnoczi 
9875a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
98809158f00SBenoît Canet     }
98909158f00SBenoît Canet 
990d63ffd87SPaolo Bonzini     s->should_complete = true;
991751ebd76SFam Zheng     block_job_enter(&s->common);
992d63ffd87SPaolo Bonzini }
993d63ffd87SPaolo Bonzini 
994bae8196dSPaolo Bonzini static void mirror_pause(BlockJob *job)
995565ac01fSStefan Hajnoczi {
996565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
997565ac01fSStefan Hajnoczi 
998bae8196dSPaolo Bonzini     mirror_wait_for_all_io(s);
999565ac01fSStefan Hajnoczi }
1000565ac01fSStefan Hajnoczi 
1001565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
1002565ac01fSStefan Hajnoczi {
1003565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1004565ac01fSStefan Hajnoczi 
1005565ac01fSStefan Hajnoczi     blk_set_aio_context(s->target, new_context);
1006565ac01fSStefan Hajnoczi }
1007565ac01fSStefan Hajnoczi 
1008bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job)
1009bae8196dSPaolo Bonzini {
1010bae8196dSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1011bae8196dSPaolo Bonzini 
1012bae8196dSPaolo Bonzini     /* Need to keep a reference in case blk_drain triggers execution
1013bae8196dSPaolo Bonzini      * of mirror_complete...
1014bae8196dSPaolo Bonzini      */
1015bae8196dSPaolo Bonzini     if (s->target) {
1016bae8196dSPaolo Bonzini         BlockBackend *target = s->target;
1017bae8196dSPaolo Bonzini         blk_ref(target);
1018bae8196dSPaolo Bonzini         blk_drain(target);
1019bae8196dSPaolo Bonzini         blk_unref(target);
1020bae8196dSPaolo Bonzini     }
1021bae8196dSPaolo Bonzini }
1022bae8196dSPaolo Bonzini 
10233fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = {
1024893f7ebaSPaolo Bonzini     .instance_size          = sizeof(MirrorBlockJob),
102579e14bf7SFam Zheng     .job_type               = BLOCK_JOB_TYPE_MIRROR,
1026893f7ebaSPaolo Bonzini     .set_speed              = mirror_set_speed,
1027a7815a76SJohn Snow     .start                  = mirror_run,
1028d63ffd87SPaolo Bonzini     .complete               = mirror_complete,
1029565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1030565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1031bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
1032893f7ebaSPaolo Bonzini };
1033893f7ebaSPaolo Bonzini 
103403544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = {
103503544a6eSFam Zheng     .instance_size          = sizeof(MirrorBlockJob),
103603544a6eSFam Zheng     .job_type               = BLOCK_JOB_TYPE_COMMIT,
103703544a6eSFam Zheng     .set_speed              = mirror_set_speed,
1038a7815a76SJohn Snow     .start                  = mirror_run,
103903544a6eSFam Zheng     .complete               = mirror_complete,
1040565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1041565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1042bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
104303544a6eSFam Zheng };
104403544a6eSFam Zheng 
10454ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
10464ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10474ef85a9cSKevin Wolf {
10484ef85a9cSKevin Wolf     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
10494ef85a9cSKevin Wolf }
10504ef85a9cSKevin Wolf 
10514ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
10524ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10534ef85a9cSKevin Wolf {
10544ef85a9cSKevin Wolf     return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
10554ef85a9cSKevin Wolf }
10564ef85a9cSKevin Wolf 
10574ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
10584ef85a9cSKevin Wolf {
10594ef85a9cSKevin Wolf     return bdrv_co_flush(bs->backing->bs);
10604ef85a9cSKevin Wolf }
10614ef85a9cSKevin Wolf 
10624ef85a9cSKevin Wolf static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
10634ef85a9cSKevin Wolf     BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
10644ef85a9cSKevin Wolf     BlockDriverState **file)
10654ef85a9cSKevin Wolf {
10664ef85a9cSKevin Wolf     *pnum = nb_sectors;
10674ef85a9cSKevin Wolf     *file = bs->backing->bs;
1068d5254033SEric Blake     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID |
10694ef85a9cSKevin Wolf            (sector_num << BDRV_SECTOR_BITS);
10704ef85a9cSKevin Wolf }
10714ef85a9cSKevin Wolf 
10724ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1073f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes, BdrvRequestFlags flags)
10744ef85a9cSKevin Wolf {
1075f5a5ca79SManos Pitsidianakis     return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
10764ef85a9cSKevin Wolf }
10774ef85a9cSKevin Wolf 
10784ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1079f5a5ca79SManos Pitsidianakis     int64_t offset, int bytes)
10804ef85a9cSKevin Wolf {
1081f5a5ca79SManos Pitsidianakis     return bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
10824ef85a9cSKevin Wolf }
10834ef85a9cSKevin Wolf 
1084fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1085fd4a6493SKevin Wolf {
1086fd4a6493SKevin Wolf     bdrv_refresh_filename(bs->backing->bs);
1087fd4a6493SKevin Wolf     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1088fd4a6493SKevin Wolf             bs->backing->bs->filename);
1089fd4a6493SKevin Wolf }
1090fd4a6493SKevin Wolf 
10914ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs)
10924ef85a9cSKevin Wolf {
10934ef85a9cSKevin Wolf }
10944ef85a9cSKevin Wolf 
10954ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
10964ef85a9cSKevin Wolf                                        const BdrvChildRole *role,
10974ef85a9cSKevin Wolf                                        uint64_t perm, uint64_t shared,
10984ef85a9cSKevin Wolf                                        uint64_t *nperm, uint64_t *nshared)
10994ef85a9cSKevin Wolf {
11004ef85a9cSKevin Wolf     /* Must be able to forward guest writes to the real image */
11014ef85a9cSKevin Wolf     *nperm = 0;
11024ef85a9cSKevin Wolf     if (perm & BLK_PERM_WRITE) {
11034ef85a9cSKevin Wolf         *nperm |= BLK_PERM_WRITE;
11044ef85a9cSKevin Wolf     }
11054ef85a9cSKevin Wolf 
11064ef85a9cSKevin Wolf     *nshared = BLK_PERM_ALL;
11074ef85a9cSKevin Wolf }
11084ef85a9cSKevin Wolf 
11094ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it
11104ef85a9cSKevin Wolf  * from its backing file and that allows writes on the backing file chain. */
11114ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = {
11124ef85a9cSKevin Wolf     .format_name                = "mirror_top",
11134ef85a9cSKevin Wolf     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
11144ef85a9cSKevin Wolf     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
11154ef85a9cSKevin Wolf     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
11164ef85a9cSKevin Wolf     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
11174ef85a9cSKevin Wolf     .bdrv_co_flush              = bdrv_mirror_top_flush,
11184ef85a9cSKevin Wolf     .bdrv_co_get_block_status   = bdrv_mirror_top_get_block_status,
1119fd4a6493SKevin Wolf     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
11204ef85a9cSKevin Wolf     .bdrv_close                 = bdrv_mirror_top_close,
11214ef85a9cSKevin Wolf     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
11224ef85a9cSKevin Wolf };
11234ef85a9cSKevin Wolf 
112471aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs,
112547970dfbSJohn Snow                              int creation_flags, BlockDriverState *target,
112647970dfbSJohn Snow                              const char *replaces, int64_t speed,
112747970dfbSJohn Snow                              uint32_t granularity, int64_t buf_size,
1128274fcceeSMax Reitz                              BlockMirrorBackingMode backing_mode,
112903544a6eSFam Zheng                              BlockdevOnError on_source_error,
1130b952b558SPaolo Bonzini                              BlockdevOnError on_target_error,
11310fc9f8eaSFam Zheng                              bool unmap,
1132097310b5SMarkus Armbruster                              BlockCompletionFunc *cb,
113351ccfa2dSFam Zheng                              void *opaque,
113403544a6eSFam Zheng                              const BlockJobDriver *driver,
1135b49f7eadSWen Congyang                              bool is_none_mode, BlockDriverState *base,
113651ccfa2dSFam Zheng                              bool auto_complete, const char *filter_node_name,
113751ccfa2dSFam Zheng                              Error **errp)
1138893f7ebaSPaolo Bonzini {
1139893f7ebaSPaolo Bonzini     MirrorBlockJob *s;
11404ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
11414ef85a9cSKevin Wolf     bool target_graph_mod;
11424ef85a9cSKevin Wolf     bool target_is_backing;
1143b2c2832cSKevin Wolf     Error *local_err = NULL;
1144d7086422SKevin Wolf     int ret;
1145893f7ebaSPaolo Bonzini 
1146eee13dfeSPaolo Bonzini     if (granularity == 0) {
1147341ebc2fSJohn Snow         granularity = bdrv_get_default_bitmap_granularity(target);
1148eee13dfeSPaolo Bonzini     }
1149eee13dfeSPaolo Bonzini 
1150eee13dfeSPaolo Bonzini     assert ((granularity & (granularity - 1)) == 0);
1151b436982fSEric Blake     /* Granularity must be large enough for sector-based dirty bitmap */
1152b436982fSEric Blake     assert(granularity >= BDRV_SECTOR_SIZE);
1153eee13dfeSPaolo Bonzini 
115448ac0a4dSWen Congyang     if (buf_size < 0) {
115548ac0a4dSWen Congyang         error_setg(errp, "Invalid parameter 'buf-size'");
115648ac0a4dSWen Congyang         return;
115748ac0a4dSWen Congyang     }
115848ac0a4dSWen Congyang 
115948ac0a4dSWen Congyang     if (buf_size == 0) {
116048ac0a4dSWen Congyang         buf_size = DEFAULT_MIRROR_BUF_SIZE;
116148ac0a4dSWen Congyang     }
11625bc361b8SFam Zheng 
11634ef85a9cSKevin Wolf     /* In the case of active commit, add dummy driver to provide consistent
11644ef85a9cSKevin Wolf      * reads on the top, while disabling it in the intermediate nodes, and make
11654ef85a9cSKevin Wolf      * the backing chain writable. */
11666cdbceb1SKevin Wolf     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
11676cdbceb1SKevin Wolf                                          BDRV_O_RDWR, errp);
11684ef85a9cSKevin Wolf     if (mirror_top_bs == NULL) {
1169893f7ebaSPaolo Bonzini         return;
1170893f7ebaSPaolo Bonzini     }
11714ef85a9cSKevin Wolf     mirror_top_bs->total_sectors = bs->total_sectors;
117219dd29e8SFam Zheng     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1173893f7ebaSPaolo Bonzini 
11744ef85a9cSKevin Wolf     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
11757a25fcd0SMax Reitz      * it alive until block_job_create() succeeds even if bs has no parent. */
11764ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
11774ef85a9cSKevin Wolf     bdrv_drained_begin(bs);
1178b2c2832cSKevin Wolf     bdrv_append(mirror_top_bs, bs, &local_err);
11794ef85a9cSKevin Wolf     bdrv_drained_end(bs);
11804ef85a9cSKevin Wolf 
1181b2c2832cSKevin Wolf     if (local_err) {
1182b2c2832cSKevin Wolf         bdrv_unref(mirror_top_bs);
1183b2c2832cSKevin Wolf         error_propagate(errp, local_err);
1184b2c2832cSKevin Wolf         return;
1185b2c2832cSKevin Wolf     }
1186b2c2832cSKevin Wolf 
11874ef85a9cSKevin Wolf     /* Make sure that the source is not resized while the job is running */
11884ef85a9cSKevin Wolf     s = block_job_create(job_id, driver, mirror_top_bs,
11894ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ,
11904ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
11914ef85a9cSKevin Wolf                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
11924ef85a9cSKevin Wolf                          creation_flags, cb, opaque, errp);
11934ef85a9cSKevin Wolf     if (!s) {
11944ef85a9cSKevin Wolf         goto fail;
11954ef85a9cSKevin Wolf     }
11967a25fcd0SMax Reitz     /* The block job now has a reference to this node */
11977a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
11987a25fcd0SMax Reitz 
11994ef85a9cSKevin Wolf     s->source = bs;
12004ef85a9cSKevin Wolf     s->mirror_top_bs = mirror_top_bs;
12014ef85a9cSKevin Wolf 
12024ef85a9cSKevin Wolf     /* No resize for the target either; while the mirror is still running, a
12034ef85a9cSKevin Wolf      * consistent read isn't necessarily possible. We could possibly allow
12044ef85a9cSKevin Wolf      * writes and graph modifications, though it would likely defeat the
12054ef85a9cSKevin Wolf      * purpose of a mirror, so leave them blocked for now.
12064ef85a9cSKevin Wolf      *
12074ef85a9cSKevin Wolf      * In the case of active commit, things look a bit different, though,
12084ef85a9cSKevin Wolf      * because the target is an already populated backing file in active use.
12094ef85a9cSKevin Wolf      * We can allow anything except resize there.*/
12104ef85a9cSKevin Wolf     target_is_backing = bdrv_chain_contains(bs, target);
12114ef85a9cSKevin Wolf     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
12124ef85a9cSKevin Wolf     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
12134ef85a9cSKevin Wolf                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
12144ef85a9cSKevin Wolf                         BLK_PERM_WRITE_UNCHANGED |
12154ef85a9cSKevin Wolf                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
12164ef85a9cSKevin Wolf                                              BLK_PERM_WRITE |
12174ef85a9cSKevin Wolf                                              BLK_PERM_GRAPH_MOD : 0));
1218d7086422SKevin Wolf     ret = blk_insert_bs(s->target, target, errp);
1219d7086422SKevin Wolf     if (ret < 0) {
12204ef85a9cSKevin Wolf         goto fail;
1221d7086422SKevin Wolf     }
1222e253f4b8SKevin Wolf 
122309158f00SBenoît Canet     s->replaces = g_strdup(replaces);
1224b952b558SPaolo Bonzini     s->on_source_error = on_source_error;
1225b952b558SPaolo Bonzini     s->on_target_error = on_target_error;
122603544a6eSFam Zheng     s->is_none_mode = is_none_mode;
1227274fcceeSMax Reitz     s->backing_mode = backing_mode;
12285bc361b8SFam Zheng     s->base = base;
1229eee13dfeSPaolo Bonzini     s->granularity = granularity;
123048ac0a4dSWen Congyang     s->buf_size = ROUND_UP(buf_size, granularity);
12310fc9f8eaSFam Zheng     s->unmap = unmap;
1232b49f7eadSWen Congyang     if (auto_complete) {
1233b49f7eadSWen Congyang         s->should_complete = true;
1234b49f7eadSWen Congyang     }
1235b812f671SPaolo Bonzini 
12360db6e54aSFam Zheng     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1237b8afb520SFam Zheng     if (!s->dirty_bitmap) {
123888f9d1b3SKevin Wolf         goto fail;
1239b8afb520SFam Zheng     }
124010f3cd15SAlberto Garcia 
12414ef85a9cSKevin Wolf     /* Required permissions are already taken with blk_new() */
124276d554e2SKevin Wolf     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
124376d554e2SKevin Wolf                        &error_abort);
124476d554e2SKevin Wolf 
1245f3ede4b0SAlberto Garcia     /* In commit_active_start() all intermediate nodes disappear, so
1246f3ede4b0SAlberto Garcia      * any jobs in them must be blocked */
12474ef85a9cSKevin Wolf     if (target_is_backing) {
1248f3ede4b0SAlberto Garcia         BlockDriverState *iter;
1249f3ede4b0SAlberto Garcia         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
12504ef85a9cSKevin Wolf             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
12514ef85a9cSKevin Wolf              * ourselves at s->base (if writes are blocked for a node, they are
12524ef85a9cSKevin Wolf              * also blocked for its backing file). The other options would be a
12534ef85a9cSKevin Wolf              * second filter driver above s->base (== target). */
12544ef85a9cSKevin Wolf             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
12554ef85a9cSKevin Wolf                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
12564ef85a9cSKevin Wolf                                      errp);
12574ef85a9cSKevin Wolf             if (ret < 0) {
12584ef85a9cSKevin Wolf                 goto fail;
12594ef85a9cSKevin Wolf             }
1260f3ede4b0SAlberto Garcia         }
1261f3ede4b0SAlberto Garcia     }
126210f3cd15SAlberto Garcia 
12635ccac6f1SJohn Snow     trace_mirror_start(bs, s, opaque);
12645ccac6f1SJohn Snow     block_job_start(&s->common);
12654ef85a9cSKevin Wolf     return;
12664ef85a9cSKevin Wolf 
12674ef85a9cSKevin Wolf fail:
12684ef85a9cSKevin Wolf     if (s) {
12697a25fcd0SMax Reitz         /* Make sure this BDS does not go away until we have completed the graph
12707a25fcd0SMax Reitz          * changes below */
12717a25fcd0SMax Reitz         bdrv_ref(mirror_top_bs);
12727a25fcd0SMax Reitz 
12734ef85a9cSKevin Wolf         g_free(s->replaces);
12744ef85a9cSKevin Wolf         blk_unref(s->target);
127505b0d8e3SPaolo Bonzini         block_job_early_fail(&s->common);
12764ef85a9cSKevin Wolf     }
12774ef85a9cSKevin Wolf 
1278c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1279c1cef672SFam Zheng                             &error_abort);
12805fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
12817a25fcd0SMax Reitz 
12827a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
1283893f7ebaSPaolo Bonzini }
128403544a6eSFam Zheng 
128571aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs,
128671aa9867SAlberto Garcia                   BlockDriverState *target, const char *replaces,
12875fba6c0eSJohn Snow                   int64_t speed, uint32_t granularity, int64_t buf_size,
1288274fcceeSMax Reitz                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1289274fcceeSMax Reitz                   BlockdevOnError on_source_error,
129003544a6eSFam Zheng                   BlockdevOnError on_target_error,
12916cdbceb1SKevin Wolf                   bool unmap, const char *filter_node_name, Error **errp)
129203544a6eSFam Zheng {
129303544a6eSFam Zheng     bool is_none_mode;
129403544a6eSFam Zheng     BlockDriverState *base;
129503544a6eSFam Zheng 
12964b80ab2bSJohn Snow     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
12974b80ab2bSJohn Snow         error_setg(errp, "Sync mode 'incremental' not supported");
1298d58d8453SJohn Snow         return;
1299d58d8453SJohn Snow     }
130003544a6eSFam Zheng     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1301760e0063SKevin Wolf     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
130247970dfbSJohn Snow     mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1303274fcceeSMax Reitz                      speed, granularity, buf_size, backing_mode,
130451ccfa2dSFam Zheng                      on_source_error, on_target_error, unmap, NULL, NULL,
13056cdbceb1SKevin Wolf                      &mirror_job_driver, is_none_mode, base, false,
130651ccfa2dSFam Zheng                      filter_node_name, errp);
130703544a6eSFam Zheng }
130803544a6eSFam Zheng 
1309fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs,
131047970dfbSJohn Snow                          BlockDriverState *base, int creation_flags,
131147970dfbSJohn Snow                          int64_t speed, BlockdevOnError on_error,
13120db832f4SKevin Wolf                          const char *filter_node_name,
131378bbd910SFam Zheng                          BlockCompletionFunc *cb, void *opaque,
131478bbd910SFam Zheng                          bool auto_complete, Error **errp)
131503544a6eSFam Zheng {
13164da83585SJeff Cody     int orig_base_flags;
1317cc67f4d1SJeff Cody     Error *local_err = NULL;
13184da83585SJeff Cody 
13194da83585SJeff Cody     orig_base_flags = bdrv_get_flags(base);
13204da83585SJeff Cody 
132120a63d2cSFam Zheng     if (bdrv_reopen(base, bs->open_flags, errp)) {
132220a63d2cSFam Zheng         return;
132320a63d2cSFam Zheng     }
13244da83585SJeff Cody 
132547970dfbSJohn Snow     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
132671aa9867SAlberto Garcia                      MIRROR_LEAVE_BACKING_CHAIN,
132751ccfa2dSFam Zheng                      on_error, on_error, true, cb, opaque,
13286cdbceb1SKevin Wolf                      &commit_active_job_driver, false, base, auto_complete,
132951ccfa2dSFam Zheng                      filter_node_name, &local_err);
13300fb6395cSMarkus Armbruster     if (local_err) {
1331cc67f4d1SJeff Cody         error_propagate(errp, local_err);
13324da83585SJeff Cody         goto error_restore_flags;
13334da83585SJeff Cody     }
13344da83585SJeff Cody 
13354da83585SJeff Cody     return;
13364da83585SJeff Cody 
13374da83585SJeff Cody error_restore_flags:
13384da83585SJeff Cody     /* ignore error and errp for bdrv_reopen, because we want to propagate
13394da83585SJeff Cody      * the original error */
13404da83585SJeff Cody     bdrv_reopen(base, orig_base_flags, NULL);
13414da83585SJeff Cody     return;
134203544a6eSFam Zheng }
1343