xref: /qemu/block/mirror.c (revision 05b0d8e3)
1893f7ebaSPaolo Bonzini /*
2893f7ebaSPaolo Bonzini  * Image mirroring
3893f7ebaSPaolo Bonzini  *
4893f7ebaSPaolo Bonzini  * Copyright Red Hat, Inc. 2012
5893f7ebaSPaolo Bonzini  *
6893f7ebaSPaolo Bonzini  * Authors:
7893f7ebaSPaolo Bonzini  *  Paolo Bonzini  <pbonzini@redhat.com>
8893f7ebaSPaolo Bonzini  *
9893f7ebaSPaolo Bonzini  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10893f7ebaSPaolo Bonzini  * See the COPYING.LIB file in the top-level directory.
11893f7ebaSPaolo Bonzini  *
12893f7ebaSPaolo Bonzini  */
13893f7ebaSPaolo Bonzini 
1480c71a24SPeter Maydell #include "qemu/osdep.h"
15fd4a6493SKevin Wolf #include "qemu/cutils.h"
16893f7ebaSPaolo Bonzini #include "trace.h"
17c87621eaSJohn Snow #include "block/blockjob_int.h"
18737e150eSPaolo Bonzini #include "block/block_int.h"
19373340b2SMax Reitz #include "sysemu/block-backend.h"
20da34e65cSMarkus Armbruster #include "qapi/error.h"
21cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h"
22893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h"
23b812f671SPaolo Bonzini #include "qemu/bitmap.h"
24893f7ebaSPaolo Bonzini 
25893f7ebaSPaolo Bonzini #define SLICE_TIME    100000000ULL /* ns */
26402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16
270965a41eSVladimir Sementsov-Ogievskiy #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */
280965a41eSVladimir Sementsov-Ogievskiy #define DEFAULT_MIRROR_BUF_SIZE \
290965a41eSVladimir Sementsov-Ogievskiy     (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE)
30402a4741SPaolo Bonzini 
31402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks.
32402a4741SPaolo Bonzini  * Free chunks are organized in a list.
33402a4741SPaolo Bonzini  */
34402a4741SPaolo Bonzini typedef struct MirrorBuffer {
35402a4741SPaolo Bonzini     QSIMPLEQ_ENTRY(MirrorBuffer) next;
36402a4741SPaolo Bonzini } MirrorBuffer;
37893f7ebaSPaolo Bonzini 
38893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob {
39893f7ebaSPaolo Bonzini     BlockJob common;
40893f7ebaSPaolo Bonzini     RateLimit limit;
41e253f4b8SKevin Wolf     BlockBackend *target;
424ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
434ef85a9cSKevin Wolf     BlockDriverState *source;
445bc361b8SFam Zheng     BlockDriverState *base;
454ef85a9cSKevin Wolf 
4609158f00SBenoît Canet     /* The name of the graph node to replace */
4709158f00SBenoît Canet     char *replaces;
4809158f00SBenoît Canet     /* The BDS to replace */
4909158f00SBenoît Canet     BlockDriverState *to_replace;
5009158f00SBenoît Canet     /* Used to block operations on the drive-mirror-replace target */
5109158f00SBenoît Canet     Error *replace_blocker;
5203544a6eSFam Zheng     bool is_none_mode;
53274fcceeSMax Reitz     BlockMirrorBackingMode backing_mode;
54b952b558SPaolo Bonzini     BlockdevOnError on_source_error, on_target_error;
55d63ffd87SPaolo Bonzini     bool synced;
56d63ffd87SPaolo Bonzini     bool should_complete;
57eee13dfeSPaolo Bonzini     int64_t granularity;
58b812f671SPaolo Bonzini     size_t buf_size;
59b21c7652SMax Reitz     int64_t bdev_length;
60b812f671SPaolo Bonzini     unsigned long *cow_bitmap;
61e4654d2dSFam Zheng     BdrvDirtyBitmap *dirty_bitmap;
62dc162c8eSFam Zheng     BdrvDirtyBitmapIter *dbi;
63893f7ebaSPaolo Bonzini     uint8_t *buf;
64402a4741SPaolo Bonzini     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
65402a4741SPaolo Bonzini     int buf_free_count;
66bd48bde8SPaolo Bonzini 
6749efb1f5SDenis V. Lunev     uint64_t last_pause_ns;
68402a4741SPaolo Bonzini     unsigned long *in_flight_bitmap;
69bd48bde8SPaolo Bonzini     int in_flight;
70531509baSDenis V. Lunev     int64_t sectors_in_flight;
71bd48bde8SPaolo Bonzini     int ret;
720fc9f8eaSFam Zheng     bool unmap;
73e424aff5SKevin Wolf     bool waiting_for_io;
74e5b43573SFam Zheng     int target_cluster_sectors;
75e5b43573SFam Zheng     int max_iov;
7690ab48ebSAnton Nefedov     bool initial_zeroing_ongoing;
77893f7ebaSPaolo Bonzini } MirrorBlockJob;
78893f7ebaSPaolo Bonzini 
79bd48bde8SPaolo Bonzini typedef struct MirrorOp {
80bd48bde8SPaolo Bonzini     MirrorBlockJob *s;
81bd48bde8SPaolo Bonzini     QEMUIOVector qiov;
82bd48bde8SPaolo Bonzini     int64_t sector_num;
83bd48bde8SPaolo Bonzini     int nb_sectors;
84bd48bde8SPaolo Bonzini } MirrorOp;
85bd48bde8SPaolo Bonzini 
86b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
87b952b558SPaolo Bonzini                                             int error)
88b952b558SPaolo Bonzini {
89b952b558SPaolo Bonzini     s->synced = false;
90b952b558SPaolo Bonzini     if (read) {
9181e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_source_error,
9281e254dcSKevin Wolf                                       true, error);
93b952b558SPaolo Bonzini     } else {
9481e254dcSKevin Wolf         return block_job_error_action(&s->common, s->on_target_error,
9581e254dcSKevin Wolf                                       false, error);
96b952b558SPaolo Bonzini     }
97b952b558SPaolo Bonzini }
98b952b558SPaolo Bonzini 
99bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret)
100bd48bde8SPaolo Bonzini {
101bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
102402a4741SPaolo Bonzini     struct iovec *iov;
103bd48bde8SPaolo Bonzini     int64_t chunk_num;
104402a4741SPaolo Bonzini     int i, nb_chunks, sectors_per_chunk;
105bd48bde8SPaolo Bonzini 
106bd48bde8SPaolo Bonzini     trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
107bd48bde8SPaolo Bonzini 
108bd48bde8SPaolo Bonzini     s->in_flight--;
109b21c7652SMax Reitz     s->sectors_in_flight -= op->nb_sectors;
110402a4741SPaolo Bonzini     iov = op->qiov.iov;
111402a4741SPaolo Bonzini     for (i = 0; i < op->qiov.niov; i++) {
112402a4741SPaolo Bonzini         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
113402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
114402a4741SPaolo Bonzini         s->buf_free_count++;
115402a4741SPaolo Bonzini     }
116402a4741SPaolo Bonzini 
117bd48bde8SPaolo Bonzini     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
118bd48bde8SPaolo Bonzini     chunk_num = op->sector_num / sectors_per_chunk;
1194150ae60SFam Zheng     nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk);
120402a4741SPaolo Bonzini     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
121b21c7652SMax Reitz     if (ret >= 0) {
122b21c7652SMax Reitz         if (s->cow_bitmap) {
123bd48bde8SPaolo Bonzini             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
124bd48bde8SPaolo Bonzini         }
12590ab48ebSAnton Nefedov         if (!s->initial_zeroing_ongoing) {
126b21c7652SMax Reitz             s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
127b21c7652SMax Reitz         }
12890ab48ebSAnton Nefedov     }
1296df3bf8eSZhang Min     qemu_iovec_destroy(&op->qiov);
130c84b3192SPaolo Bonzini     g_free(op);
1317b770c72SStefan Hajnoczi 
132e424aff5SKevin Wolf     if (s->waiting_for_io) {
1330b8b8753SPaolo Bonzini         qemu_coroutine_enter(s->common.co);
134bd48bde8SPaolo Bonzini     }
1357b770c72SStefan Hajnoczi }
136bd48bde8SPaolo Bonzini 
137bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret)
138bd48bde8SPaolo Bonzini {
139bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
140bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
141b9e413ddSPaolo Bonzini 
142b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
143bd48bde8SPaolo Bonzini     if (ret < 0) {
144bd48bde8SPaolo Bonzini         BlockErrorAction action;
145bd48bde8SPaolo Bonzini 
14620dca810SJohn Snow         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
147bd48bde8SPaolo Bonzini         action = mirror_error_action(s, false, -ret);
148a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
149bd48bde8SPaolo Bonzini             s->ret = ret;
150bd48bde8SPaolo Bonzini         }
151bd48bde8SPaolo Bonzini     }
152bd48bde8SPaolo Bonzini     mirror_iteration_done(op, ret);
153b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
154bd48bde8SPaolo Bonzini }
155bd48bde8SPaolo Bonzini 
156bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret)
157bd48bde8SPaolo Bonzini {
158bd48bde8SPaolo Bonzini     MirrorOp *op = opaque;
159bd48bde8SPaolo Bonzini     MirrorBlockJob *s = op->s;
160b9e413ddSPaolo Bonzini 
161b9e413ddSPaolo Bonzini     aio_context_acquire(blk_get_aio_context(s->common.blk));
162bd48bde8SPaolo Bonzini     if (ret < 0) {
163bd48bde8SPaolo Bonzini         BlockErrorAction action;
164bd48bde8SPaolo Bonzini 
16520dca810SJohn Snow         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
166bd48bde8SPaolo Bonzini         action = mirror_error_action(s, true, -ret);
167a589569fSWenchao Xia         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168bd48bde8SPaolo Bonzini             s->ret = ret;
169bd48bde8SPaolo Bonzini         }
170bd48bde8SPaolo Bonzini 
171bd48bde8SPaolo Bonzini         mirror_iteration_done(op, ret);
172b9e413ddSPaolo Bonzini     } else {
173e253f4b8SKevin Wolf         blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
17473698c30SEric Blake                         0, mirror_write_complete, op);
175bd48bde8SPaolo Bonzini     }
176b9e413ddSPaolo Bonzini     aio_context_release(blk_get_aio_context(s->common.blk));
177b9e413ddSPaolo Bonzini }
178bd48bde8SPaolo Bonzini 
1794150ae60SFam Zheng static inline void mirror_clip_sectors(MirrorBlockJob *s,
1804150ae60SFam Zheng                                        int64_t sector_num,
1814150ae60SFam Zheng                                        int *nb_sectors)
1824150ae60SFam Zheng {
1834150ae60SFam Zheng     *nb_sectors = MIN(*nb_sectors,
1844150ae60SFam Zheng                       s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
1854150ae60SFam Zheng }
1864150ae60SFam Zheng 
187e5b43573SFam Zheng /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
188e5b43573SFam Zheng  * return the offset of the adjusted tail sector against original. */
189e5b43573SFam Zheng static int mirror_cow_align(MirrorBlockJob *s,
190e5b43573SFam Zheng                             int64_t *sector_num,
191e5b43573SFam Zheng                             int *nb_sectors)
192893f7ebaSPaolo Bonzini {
193e5b43573SFam Zheng     bool need_cow;
194e5b43573SFam Zheng     int ret = 0;
195e5b43573SFam Zheng     int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
196e5b43573SFam Zheng     int64_t align_sector_num = *sector_num;
197e5b43573SFam Zheng     int align_nb_sectors = *nb_sectors;
198e5b43573SFam Zheng     int max_sectors = chunk_sectors * s->max_iov;
199893f7ebaSPaolo Bonzini 
200e5b43573SFam Zheng     need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
201e5b43573SFam Zheng     need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
202e5b43573SFam Zheng                           s->cow_bitmap);
203e5b43573SFam Zheng     if (need_cow) {
204244483e6SKevin Wolf         bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
205244483e6SKevin Wolf                                        *nb_sectors, &align_sector_num,
206244483e6SKevin Wolf                                        &align_nb_sectors);
2078f0720ecSPaolo Bonzini     }
2088f0720ecSPaolo Bonzini 
209e5b43573SFam Zheng     if (align_nb_sectors > max_sectors) {
210e5b43573SFam Zheng         align_nb_sectors = max_sectors;
211e5b43573SFam Zheng         if (need_cow) {
212e5b43573SFam Zheng             align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
213e5b43573SFam Zheng                                                s->target_cluster_sectors);
214e5b43573SFam Zheng         }
215e5b43573SFam Zheng     }
2164150ae60SFam Zheng     /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
2174150ae60SFam Zheng      * that doesn't matter because it's already the end of source image. */
2184150ae60SFam Zheng     mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
219402a4741SPaolo Bonzini 
220e5b43573SFam Zheng     ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
221e5b43573SFam Zheng     *sector_num = align_sector_num;
222e5b43573SFam Zheng     *nb_sectors = align_nb_sectors;
223e5b43573SFam Zheng     assert(ret >= 0);
224e5b43573SFam Zheng     return ret;
225e5b43573SFam Zheng }
226e5b43573SFam Zheng 
22721cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s)
22821cd917fSFam Zheng {
22921cd917fSFam Zheng     assert(!s->waiting_for_io);
23021cd917fSFam Zheng     s->waiting_for_io = true;
23121cd917fSFam Zheng     qemu_coroutine_yield();
23221cd917fSFam Zheng     s->waiting_for_io = false;
23321cd917fSFam Zheng }
23421cd917fSFam Zheng 
235e5b43573SFam Zheng /* Submit async read while handling COW.
23617612955SJohn Snow  * Returns: The number of sectors copied after and including sector_num,
23717612955SJohn Snow  *          excluding any sectors copied prior to sector_num due to alignment.
23817612955SJohn Snow  *          This will be nb_sectors if no alignment is necessary, or
239e5b43573SFam Zheng  *          (new_end - sector_num) if tail is rounded up or down due to
240e5b43573SFam Zheng  *          alignment or buffer limit.
241402a4741SPaolo Bonzini  */
242e5b43573SFam Zheng static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
243e5b43573SFam Zheng                           int nb_sectors)
244e5b43573SFam Zheng {
245e253f4b8SKevin Wolf     BlockBackend *source = s->common.blk;
246e5b43573SFam Zheng     int sectors_per_chunk, nb_chunks;
24717612955SJohn Snow     int ret;
248e5b43573SFam Zheng     MirrorOp *op;
249e4808881SJohn Snow     int max_sectors;
250402a4741SPaolo Bonzini 
251e5b43573SFam Zheng     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
252e4808881SJohn Snow     max_sectors = sectors_per_chunk * s->max_iov;
253e5b43573SFam Zheng 
254e5b43573SFam Zheng     /* We can only handle as much as buf_size at a time. */
255e5b43573SFam Zheng     nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
256e4808881SJohn Snow     nb_sectors = MIN(max_sectors, nb_sectors);
257e5b43573SFam Zheng     assert(nb_sectors);
25817612955SJohn Snow     ret = nb_sectors;
259e5b43573SFam Zheng 
260e5b43573SFam Zheng     if (s->cow_bitmap) {
261e5b43573SFam Zheng         ret += mirror_cow_align(s, &sector_num, &nb_sectors);
262e5b43573SFam Zheng     }
263e5b43573SFam Zheng     assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
264e5b43573SFam Zheng     /* The sector range must meet granularity because:
265e5b43573SFam Zheng      * 1) Caller passes in aligned values;
266e5b43573SFam Zheng      * 2) mirror_cow_align is used only when target cluster is larger. */
267e5b43573SFam Zheng     assert(!(sector_num % sectors_per_chunk));
2684150ae60SFam Zheng     nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
269e5b43573SFam Zheng 
270e5b43573SFam Zheng     while (s->buf_free_count < nb_chunks) {
271402a4741SPaolo Bonzini         trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
27221cd917fSFam Zheng         mirror_wait_for_io(s);
273b812f671SPaolo Bonzini     }
274b812f671SPaolo Bonzini 
275bd48bde8SPaolo Bonzini     /* Allocate a MirrorOp that is used as an AIO callback.  */
276c84b3192SPaolo Bonzini     op = g_new(MirrorOp, 1);
277bd48bde8SPaolo Bonzini     op->s = s;
278bd48bde8SPaolo Bonzini     op->sector_num = sector_num;
279bd48bde8SPaolo Bonzini     op->nb_sectors = nb_sectors;
280402a4741SPaolo Bonzini 
281402a4741SPaolo Bonzini     /* Now make a QEMUIOVector taking enough granularity-sized chunks
282402a4741SPaolo Bonzini      * from s->buf_free.
283402a4741SPaolo Bonzini      */
284402a4741SPaolo Bonzini     qemu_iovec_init(&op->qiov, nb_chunks);
285402a4741SPaolo Bonzini     while (nb_chunks-- > 0) {
286402a4741SPaolo Bonzini         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
287e5b43573SFam Zheng         size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
2885a0f6fd5SKevin Wolf 
289402a4741SPaolo Bonzini         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
290402a4741SPaolo Bonzini         s->buf_free_count--;
2915a0f6fd5SKevin Wolf         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
292402a4741SPaolo Bonzini     }
293402a4741SPaolo Bonzini 
294893f7ebaSPaolo Bonzini     /* Copy the dirty cluster.  */
295bd48bde8SPaolo Bonzini     s->in_flight++;
296b21c7652SMax Reitz     s->sectors_in_flight += nb_sectors;
297b812f671SPaolo Bonzini     trace_mirror_one_iteration(s, sector_num, nb_sectors);
298dcfb3bebSFam Zheng 
29973698c30SEric Blake     blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
300bd48bde8SPaolo Bonzini                    mirror_read_complete, op);
301e5b43573SFam Zheng     return ret;
302e5b43573SFam Zheng }
303e5b43573SFam Zheng 
304e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s,
305e5b43573SFam Zheng                                       int64_t sector_num,
306e5b43573SFam Zheng                                       int nb_sectors,
307e5b43573SFam Zheng                                       bool is_discard)
308e5b43573SFam Zheng {
309e5b43573SFam Zheng     MirrorOp *op;
310e5b43573SFam Zheng 
311e5b43573SFam Zheng     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
312e5b43573SFam Zheng      * so the freeing in mirror_iteration_done is nop. */
313e5b43573SFam Zheng     op = g_new0(MirrorOp, 1);
314e5b43573SFam Zheng     op->s = s;
315e5b43573SFam Zheng     op->sector_num = sector_num;
316e5b43573SFam Zheng     op->nb_sectors = nb_sectors;
317e5b43573SFam Zheng 
318e5b43573SFam Zheng     s->in_flight++;
319e5b43573SFam Zheng     s->sectors_in_flight += nb_sectors;
320e5b43573SFam Zheng     if (is_discard) {
3211c6c4bb7SEric Blake         blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
3221c6c4bb7SEric Blake                          op->nb_sectors << BDRV_SECTOR_BITS,
323e5b43573SFam Zheng                          mirror_write_complete, op);
324e5b43573SFam Zheng     } else {
325e253f4b8SKevin Wolf         blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
326e253f4b8SKevin Wolf                               op->nb_sectors * BDRV_SECTOR_SIZE,
327dcfb3bebSFam Zheng                               s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
328dcfb3bebSFam Zheng                               mirror_write_complete, op);
329e5b43573SFam Zheng     }
330e5b43573SFam Zheng }
331e5b43573SFam Zheng 
332e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
333e5b43573SFam Zheng {
3344ef85a9cSKevin Wolf     BlockDriverState *source = s->source;
3359c83625bSMax Reitz     int64_t sector_num, first_chunk;
336e5b43573SFam Zheng     uint64_t delay_ns = 0;
337e5b43573SFam Zheng     /* At least the first dirty chunk is mirrored in one iteration. */
338e5b43573SFam Zheng     int nb_chunks = 1;
339e5b43573SFam Zheng     int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
340e5b43573SFam Zheng     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
3414b5004d9SDenis V. Lunev     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
3420965a41eSVladimir Sementsov-Ogievskiy     int max_io_sectors = MAX((s->buf_size >> BDRV_SECTOR_BITS) / MAX_IN_FLIGHT,
3430965a41eSVladimir Sementsov-Ogievskiy                              MAX_IO_SECTORS);
344e5b43573SFam Zheng 
345dc162c8eSFam Zheng     sector_num = bdrv_dirty_iter_next(s->dbi);
346e5b43573SFam Zheng     if (sector_num < 0) {
347dc162c8eSFam Zheng         bdrv_set_dirty_iter(s->dbi, 0);
348dc162c8eSFam Zheng         sector_num = bdrv_dirty_iter_next(s->dbi);
349e5b43573SFam Zheng         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
350e5b43573SFam Zheng         assert(sector_num >= 0);
351e5b43573SFam Zheng     }
352e5b43573SFam Zheng 
3539c83625bSMax Reitz     first_chunk = sector_num / sectors_per_chunk;
3549c83625bSMax Reitz     while (test_bit(first_chunk, s->in_flight_bitmap)) {
355ff04198bSDenis V. Lunev         trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
3569c83625bSMax Reitz         mirror_wait_for_io(s);
3579c83625bSMax Reitz     }
3589c83625bSMax Reitz 
359565ac01fSStefan Hajnoczi     block_job_pause_point(&s->common);
360565ac01fSStefan Hajnoczi 
361e5b43573SFam Zheng     /* Find the number of consective dirty chunks following the first dirty
362e5b43573SFam Zheng      * one, and wait for in flight requests in them. */
363e5b43573SFam Zheng     while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
364dc162c8eSFam Zheng         int64_t next_dirty;
365e5b43573SFam Zheng         int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
366e5b43573SFam Zheng         int64_t next_chunk = next_sector / sectors_per_chunk;
367e5b43573SFam Zheng         if (next_sector >= end ||
368e5b43573SFam Zheng             !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
369e5b43573SFam Zheng             break;
370e5b43573SFam Zheng         }
371e5b43573SFam Zheng         if (test_bit(next_chunk, s->in_flight_bitmap)) {
372e5b43573SFam Zheng             break;
373e5b43573SFam Zheng         }
3749c83625bSMax Reitz 
375dc162c8eSFam Zheng         next_dirty = bdrv_dirty_iter_next(s->dbi);
376dc162c8eSFam Zheng         if (next_dirty > next_sector || next_dirty < 0) {
377f27a2742SMax Reitz             /* The bitmap iterator's cache is stale, refresh it */
378dc162c8eSFam Zheng             bdrv_set_dirty_iter(s->dbi, next_sector);
379dc162c8eSFam Zheng             next_dirty = bdrv_dirty_iter_next(s->dbi);
380f27a2742SMax Reitz         }
381dc162c8eSFam Zheng         assert(next_dirty == next_sector);
382e5b43573SFam Zheng         nb_chunks++;
383e5b43573SFam Zheng     }
384e5b43573SFam Zheng 
385e5b43573SFam Zheng     /* Clear dirty bits before querying the block status, because
386e5b43573SFam Zheng      * calling bdrv_get_block_status_above could yield - if some blocks are
387e5b43573SFam Zheng      * marked dirty in this window, we need to know.
388e5b43573SFam Zheng      */
389e5b43573SFam Zheng     bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num,
390e5b43573SFam Zheng                             nb_chunks * sectors_per_chunk);
391e5b43573SFam Zheng     bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
392e5b43573SFam Zheng     while (nb_chunks > 0 && sector_num < end) {
39339c11580SJohn Snow         int64_t ret;
3944b5004d9SDenis V. Lunev         int io_sectors, io_sectors_acct;
395e5b43573SFam Zheng         BlockDriverState *file;
396e5b43573SFam Zheng         enum MirrorMethod {
397e5b43573SFam Zheng             MIRROR_METHOD_COPY,
398e5b43573SFam Zheng             MIRROR_METHOD_ZERO,
399e5b43573SFam Zheng             MIRROR_METHOD_DISCARD
400e5b43573SFam Zheng         } mirror_method = MIRROR_METHOD_COPY;
401e5b43573SFam Zheng 
402e5b43573SFam Zheng         assert(!(sector_num % sectors_per_chunk));
403e5b43573SFam Zheng         ret = bdrv_get_block_status_above(source, NULL, sector_num,
404e5b43573SFam Zheng                                           nb_chunks * sectors_per_chunk,
405e5b43573SFam Zheng                                           &io_sectors, &file);
406e5b43573SFam Zheng         if (ret < 0) {
4070965a41eSVladimir Sementsov-Ogievskiy             io_sectors = MIN(nb_chunks * sectors_per_chunk, max_io_sectors);
4080965a41eSVladimir Sementsov-Ogievskiy         } else if (ret & BDRV_BLOCK_DATA) {
4090965a41eSVladimir Sementsov-Ogievskiy             io_sectors = MIN(io_sectors, max_io_sectors);
410e5b43573SFam Zheng         }
411e5b43573SFam Zheng 
412e5b43573SFam Zheng         io_sectors -= io_sectors % sectors_per_chunk;
413e5b43573SFam Zheng         if (io_sectors < sectors_per_chunk) {
414e5b43573SFam Zheng             io_sectors = sectors_per_chunk;
415e5b43573SFam Zheng         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
416e5b43573SFam Zheng             int64_t target_sector_num;
417e5b43573SFam Zheng             int target_nb_sectors;
418244483e6SKevin Wolf             bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
419244483e6SKevin Wolf                                            io_sectors,  &target_sector_num,
420244483e6SKevin Wolf                                            &target_nb_sectors);
421e5b43573SFam Zheng             if (target_sector_num == sector_num &&
422e5b43573SFam Zheng                 target_nb_sectors == io_sectors) {
423e5b43573SFam Zheng                 mirror_method = ret & BDRV_BLOCK_ZERO ?
424e5b43573SFam Zheng                                     MIRROR_METHOD_ZERO :
425e5b43573SFam Zheng                                     MIRROR_METHOD_DISCARD;
426e5b43573SFam Zheng             }
427e5b43573SFam Zheng         }
428e5b43573SFam Zheng 
429cf56a3c6SDenis V. Lunev         while (s->in_flight >= MAX_IN_FLIGHT) {
430cf56a3c6SDenis V. Lunev             trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
431cf56a3c6SDenis V. Lunev             mirror_wait_for_io(s);
432cf56a3c6SDenis V. Lunev         }
433cf56a3c6SDenis V. Lunev 
434dbaa7b57SVladimir Sementsov-Ogievskiy         if (s->ret < 0) {
435dbaa7b57SVladimir Sementsov-Ogievskiy             return 0;
436dbaa7b57SVladimir Sementsov-Ogievskiy         }
437dbaa7b57SVladimir Sementsov-Ogievskiy 
4384150ae60SFam Zheng         mirror_clip_sectors(s, sector_num, &io_sectors);
439e5b43573SFam Zheng         switch (mirror_method) {
440e5b43573SFam Zheng         case MIRROR_METHOD_COPY:
441e5b43573SFam Zheng             io_sectors = mirror_do_read(s, sector_num, io_sectors);
4424b5004d9SDenis V. Lunev             io_sectors_acct = io_sectors;
443e5b43573SFam Zheng             break;
444e5b43573SFam Zheng         case MIRROR_METHOD_ZERO:
445e5b43573SFam Zheng         case MIRROR_METHOD_DISCARD:
4464b5004d9SDenis V. Lunev             mirror_do_zero_or_discard(s, sector_num, io_sectors,
4474b5004d9SDenis V. Lunev                                       mirror_method == MIRROR_METHOD_DISCARD);
4484b5004d9SDenis V. Lunev             if (write_zeroes_ok) {
4494b5004d9SDenis V. Lunev                 io_sectors_acct = 0;
4504b5004d9SDenis V. Lunev             } else {
4514b5004d9SDenis V. Lunev                 io_sectors_acct = io_sectors;
4524b5004d9SDenis V. Lunev             }
453e5b43573SFam Zheng             break;
454e5b43573SFam Zheng         default:
455e5b43573SFam Zheng             abort();
456e5b43573SFam Zheng         }
457e5b43573SFam Zheng         assert(io_sectors);
458e5b43573SFam Zheng         sector_num += io_sectors;
4594150ae60SFam Zheng         nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
460f14a39ccSSascha Silbe         if (s->common.speed) {
4614b5004d9SDenis V. Lunev             delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct);
462f14a39ccSSascha Silbe         }
463dcfb3bebSFam Zheng     }
464cc8c9d6cSPaolo Bonzini     return delay_ns;
465893f7ebaSPaolo Bonzini }
466b952b558SPaolo Bonzini 
467402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s)
468402a4741SPaolo Bonzini {
469402a4741SPaolo Bonzini     int granularity = s->granularity;
470402a4741SPaolo Bonzini     size_t buf_size = s->buf_size;
471402a4741SPaolo Bonzini     uint8_t *buf = s->buf;
472402a4741SPaolo Bonzini 
473402a4741SPaolo Bonzini     assert(s->buf_free_count == 0);
474402a4741SPaolo Bonzini     QSIMPLEQ_INIT(&s->buf_free);
475402a4741SPaolo Bonzini     while (buf_size != 0) {
476402a4741SPaolo Bonzini         MirrorBuffer *cur = (MirrorBuffer *)buf;
477402a4741SPaolo Bonzini         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
478402a4741SPaolo Bonzini         s->buf_free_count++;
479402a4741SPaolo Bonzini         buf_size -= granularity;
480402a4741SPaolo Bonzini         buf += granularity;
481402a4741SPaolo Bonzini     }
482402a4741SPaolo Bonzini }
483402a4741SPaolo Bonzini 
484bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching
485bae8196dSPaolo Bonzini  * mirror_resume() because mirror_run() will begin iterating again
486bae8196dSPaolo Bonzini  * when the job is resumed.
487bae8196dSPaolo Bonzini  */
488bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s)
489bd48bde8SPaolo Bonzini {
490bd48bde8SPaolo Bonzini     while (s->in_flight > 0) {
49121cd917fSFam Zheng         mirror_wait_for_io(s);
492bd48bde8SPaolo Bonzini     }
493893f7ebaSPaolo Bonzini }
494893f7ebaSPaolo Bonzini 
4955a7e7a0bSStefan Hajnoczi typedef struct {
4965a7e7a0bSStefan Hajnoczi     int ret;
4975a7e7a0bSStefan Hajnoczi } MirrorExitData;
4985a7e7a0bSStefan Hajnoczi 
4995a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque)
5005a7e7a0bSStefan Hajnoczi {
5015a7e7a0bSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
5025a7e7a0bSStefan Hajnoczi     MirrorExitData *data = opaque;
5035a7e7a0bSStefan Hajnoczi     AioContext *replace_aio_context = NULL;
5044ef85a9cSKevin Wolf     BlockDriverState *src = s->source;
505e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
5064ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
50712fa4af6SKevin Wolf     Error *local_err = NULL;
5083f09bfbcSKevin Wolf 
5093f09bfbcSKevin Wolf     /* Make sure that the source BDS doesn't go away before we called
5103f09bfbcSKevin Wolf      * block_job_completed(). */
5113f09bfbcSKevin Wolf     bdrv_ref(src);
5124ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
5137d9fcb39SKevin Wolf     bdrv_ref(target_bs);
5147d9fcb39SKevin Wolf 
5157d9fcb39SKevin Wolf     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
5167d9fcb39SKevin Wolf      * inserting target_bs at s->to_replace, where we might not be able to get
5177d9fcb39SKevin Wolf      * these permissions. */
5187d9fcb39SKevin Wolf     blk_unref(s->target);
5197d9fcb39SKevin Wolf     s->target = NULL;
5204ef85a9cSKevin Wolf 
5214ef85a9cSKevin Wolf     /* We don't access the source any more. Dropping any WRITE/RESIZE is
5224ef85a9cSKevin Wolf      * required before it could become a backing file of target_bs. */
5234ef85a9cSKevin Wolf     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
5244ef85a9cSKevin Wolf                             &error_abort);
5254ef85a9cSKevin Wolf     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
5264ef85a9cSKevin Wolf         BlockDriverState *backing = s->is_none_mode ? src : s->base;
5274ef85a9cSKevin Wolf         if (backing_bs(target_bs) != backing) {
52812fa4af6SKevin Wolf             bdrv_set_backing_hd(target_bs, backing, &local_err);
52912fa4af6SKevin Wolf             if (local_err) {
53012fa4af6SKevin Wolf                 error_report_err(local_err);
53112fa4af6SKevin Wolf                 data->ret = -EPERM;
53212fa4af6SKevin Wolf             }
5334ef85a9cSKevin Wolf         }
5344ef85a9cSKevin Wolf     }
5355a7e7a0bSStefan Hajnoczi 
5365a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5375a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
5385a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
5395a7e7a0bSStefan Hajnoczi     }
5405a7e7a0bSStefan Hajnoczi 
5415a7e7a0bSStefan Hajnoczi     if (s->should_complete && data->ret == 0) {
542e253f4b8SKevin Wolf         BlockDriverState *to_replace = src;
5435a7e7a0bSStefan Hajnoczi         if (s->to_replace) {
5445a7e7a0bSStefan Hajnoczi             to_replace = s->to_replace;
5455a7e7a0bSStefan Hajnoczi         }
54640365552SKevin Wolf 
547e253f4b8SKevin Wolf         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
548e253f4b8SKevin Wolf             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
5495a7e7a0bSStefan Hajnoczi         }
550b8804815SKevin Wolf 
551b8804815SKevin Wolf         /* The mirror job has no requests in flight any more, but we need to
552b8804815SKevin Wolf          * drain potential other users of the BDS before changing the graph. */
553e253f4b8SKevin Wolf         bdrv_drained_begin(target_bs);
5545fe31c25SKevin Wolf         bdrv_replace_node(to_replace, target_bs, &local_err);
555e253f4b8SKevin Wolf         bdrv_drained_end(target_bs);
5565fe31c25SKevin Wolf         if (local_err) {
5575fe31c25SKevin Wolf             error_report_err(local_err);
5585fe31c25SKevin Wolf             data->ret = -EPERM;
5595fe31c25SKevin Wolf         }
5605a7e7a0bSStefan Hajnoczi     }
5615a7e7a0bSStefan Hajnoczi     if (s->to_replace) {
5625a7e7a0bSStefan Hajnoczi         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
5635a7e7a0bSStefan Hajnoczi         error_free(s->replace_blocker);
5645a7e7a0bSStefan Hajnoczi         bdrv_unref(s->to_replace);
5655a7e7a0bSStefan Hajnoczi     }
5665a7e7a0bSStefan Hajnoczi     if (replace_aio_context) {
5675a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
5685a7e7a0bSStefan Hajnoczi     }
5695a7e7a0bSStefan Hajnoczi     g_free(s->replaces);
5707d9fcb39SKevin Wolf     bdrv_unref(target_bs);
5714ef85a9cSKevin Wolf 
5724ef85a9cSKevin Wolf     /* Remove the mirror filter driver from the graph. Before this, get rid of
5734ef85a9cSKevin Wolf      * the blockers on the intermediate nodes so that the resulting state is
5740bf74767SKevin Wolf      * valid. Also give up permissions on mirror_top_bs->backing, which might
5750bf74767SKevin Wolf      * block the removal. */
5764ef85a9cSKevin Wolf     block_job_remove_all_bdrv(job);
577c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
578c1cef672SFam Zheng                             &error_abort);
5795fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
5804ef85a9cSKevin Wolf 
5814ef85a9cSKevin Wolf     /* We just changed the BDS the job BB refers to (with either or both of the
5825fe31c25SKevin Wolf      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
5835fe31c25SKevin Wolf      * the right thing. We don't need any permissions any more now. */
5844ef85a9cSKevin Wolf     blk_remove_bs(job->blk);
5854ef85a9cSKevin Wolf     blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
5864ef85a9cSKevin Wolf     blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
5874ef85a9cSKevin Wolf 
5885a7e7a0bSStefan Hajnoczi     block_job_completed(&s->common, data->ret);
5894ef85a9cSKevin Wolf 
5905a7e7a0bSStefan Hajnoczi     g_free(data);
591176c3699SFam Zheng     bdrv_drained_end(src);
5924ef85a9cSKevin Wolf     bdrv_unref(mirror_top_bs);
5933f09bfbcSKevin Wolf     bdrv_unref(src);
5945a7e7a0bSStefan Hajnoczi }
5955a7e7a0bSStefan Hajnoczi 
59649efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s)
59749efb1f5SDenis V. Lunev {
59849efb1f5SDenis V. Lunev     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
59949efb1f5SDenis V. Lunev 
60049efb1f5SDenis V. Lunev     if (now - s->last_pause_ns > SLICE_TIME) {
60149efb1f5SDenis V. Lunev         s->last_pause_ns = now;
60249efb1f5SDenis V. Lunev         block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
60349efb1f5SDenis V. Lunev     } else {
60449efb1f5SDenis V. Lunev         block_job_pause_point(&s->common);
60549efb1f5SDenis V. Lunev     }
60649efb1f5SDenis V. Lunev }
60749efb1f5SDenis V. Lunev 
608c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
609c0b363adSDenis V. Lunev {
610c0b363adSDenis V. Lunev     int64_t sector_num, end;
611c0b363adSDenis V. Lunev     BlockDriverState *base = s->base;
6124ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
613c0b363adSDenis V. Lunev     BlockDriverState *target_bs = blk_bs(s->target);
614c0b363adSDenis V. Lunev     int ret, n;
615c0b363adSDenis V. Lunev 
616c0b363adSDenis V. Lunev     end = s->bdev_length / BDRV_SECTOR_SIZE;
617c0b363adSDenis V. Lunev 
618b7d5062cSDenis V. Lunev     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
619c7c2769cSDenis V. Lunev         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
620b7d5062cSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
621b7d5062cSDenis V. Lunev             return 0;
622b7d5062cSDenis V. Lunev         }
623b7d5062cSDenis V. Lunev 
62490ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = true;
625c7c2769cSDenis V. Lunev         for (sector_num = 0; sector_num < end; ) {
626c7c2769cSDenis V. Lunev             int nb_sectors = MIN(end - sector_num,
627c7c2769cSDenis V. Lunev                 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
628c7c2769cSDenis V. Lunev 
629c7c2769cSDenis V. Lunev             mirror_throttle(s);
630c7c2769cSDenis V. Lunev 
631c7c2769cSDenis V. Lunev             if (block_job_is_cancelled(&s->common)) {
63290ab48ebSAnton Nefedov                 s->initial_zeroing_ongoing = false;
633c7c2769cSDenis V. Lunev                 return 0;
634c7c2769cSDenis V. Lunev             }
635c7c2769cSDenis V. Lunev 
636c7c2769cSDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT) {
63767adf4b3SEric Blake                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
63867adf4b3SEric Blake                                    s->in_flight);
639c7c2769cSDenis V. Lunev                 mirror_wait_for_io(s);
640c7c2769cSDenis V. Lunev                 continue;
641c7c2769cSDenis V. Lunev             }
642c7c2769cSDenis V. Lunev 
643c7c2769cSDenis V. Lunev             mirror_do_zero_or_discard(s, sector_num, nb_sectors, false);
644c7c2769cSDenis V. Lunev             sector_num += nb_sectors;
645c7c2769cSDenis V. Lunev         }
646c7c2769cSDenis V. Lunev 
647bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
64890ab48ebSAnton Nefedov         s->initial_zeroing_ongoing = false;
649c7c2769cSDenis V. Lunev     }
650c7c2769cSDenis V. Lunev 
651c0b363adSDenis V. Lunev     /* First part, loop on the sectors and initialize the dirty bitmap.  */
652c0b363adSDenis V. Lunev     for (sector_num = 0; sector_num < end; ) {
653c0b363adSDenis V. Lunev         /* Just to make sure we are not exceeding int limit. */
654c0b363adSDenis V. Lunev         int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
655c0b363adSDenis V. Lunev                              end - sector_num);
656c0b363adSDenis V. Lunev 
657c0b363adSDenis V. Lunev         mirror_throttle(s);
658c0b363adSDenis V. Lunev 
659c0b363adSDenis V. Lunev         if (block_job_is_cancelled(&s->common)) {
660c0b363adSDenis V. Lunev             return 0;
661c0b363adSDenis V. Lunev         }
662c0b363adSDenis V. Lunev 
663c0b363adSDenis V. Lunev         ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
664c0b363adSDenis V. Lunev         if (ret < 0) {
665c0b363adSDenis V. Lunev             return ret;
666c0b363adSDenis V. Lunev         }
667c0b363adSDenis V. Lunev 
668c0b363adSDenis V. Lunev         assert(n > 0);
669b7d5062cSDenis V. Lunev         if (ret == 1) {
670c0b363adSDenis V. Lunev             bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
671c0b363adSDenis V. Lunev         }
672c0b363adSDenis V. Lunev         sector_num += n;
673c0b363adSDenis V. Lunev     }
674c0b363adSDenis V. Lunev     return 0;
675c0b363adSDenis V. Lunev }
676c0b363adSDenis V. Lunev 
677bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the
678bdffb31dSPaolo Bonzini  * data to the medium, or just before completing.
679bdffb31dSPaolo Bonzini  */
680bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s)
681bdffb31dSPaolo Bonzini {
682bdffb31dSPaolo Bonzini     int ret = blk_flush(s->target);
683bdffb31dSPaolo Bonzini     if (ret < 0) {
684bdffb31dSPaolo Bonzini         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
685bdffb31dSPaolo Bonzini             s->ret = ret;
686bdffb31dSPaolo Bonzini         }
687bdffb31dSPaolo Bonzini     }
688bdffb31dSPaolo Bonzini     return ret;
689bdffb31dSPaolo Bonzini }
690bdffb31dSPaolo Bonzini 
691893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque)
692893f7ebaSPaolo Bonzini {
693893f7ebaSPaolo Bonzini     MirrorBlockJob *s = opaque;
6945a7e7a0bSStefan Hajnoczi     MirrorExitData *data;
6954ef85a9cSKevin Wolf     BlockDriverState *bs = s->source;
696e253f4b8SKevin Wolf     BlockDriverState *target_bs = blk_bs(s->target);
6979a0cec66SPaolo Bonzini     bool need_drain = true;
698c0b363adSDenis V. Lunev     int64_t length;
699b812f671SPaolo Bonzini     BlockDriverInfo bdi;
7001d33936eSJeff Cody     char backing_filename[2]; /* we only need 2 characters because we are only
7011d33936eSJeff Cody                                  checking for a NULL string */
702893f7ebaSPaolo Bonzini     int ret = 0;
703e5b43573SFam Zheng     int target_cluster_size = BDRV_SECTOR_SIZE;
704893f7ebaSPaolo Bonzini 
705893f7ebaSPaolo Bonzini     if (block_job_is_cancelled(&s->common)) {
706893f7ebaSPaolo Bonzini         goto immediate_exit;
707893f7ebaSPaolo Bonzini     }
708893f7ebaSPaolo Bonzini 
709b21c7652SMax Reitz     s->bdev_length = bdrv_getlength(bs);
710b21c7652SMax Reitz     if (s->bdev_length < 0) {
711b21c7652SMax Reitz         ret = s->bdev_length;
712373df5b1SFam Zheng         goto immediate_exit;
713becc347eSKevin Wolf     }
714becc347eSKevin Wolf 
715becc347eSKevin Wolf     /* Active commit must resize the base image if its size differs from the
716becc347eSKevin Wolf      * active layer. */
717becc347eSKevin Wolf     if (s->base == blk_bs(s->target)) {
718becc347eSKevin Wolf         int64_t base_length;
719becc347eSKevin Wolf 
720becc347eSKevin Wolf         base_length = blk_getlength(s->target);
721becc347eSKevin Wolf         if (base_length < 0) {
722becc347eSKevin Wolf             ret = base_length;
723becc347eSKevin Wolf             goto immediate_exit;
724becc347eSKevin Wolf         }
725becc347eSKevin Wolf 
726becc347eSKevin Wolf         if (s->bdev_length > base_length) {
727ed3d2ec9SMax Reitz             ret = blk_truncate(s->target, s->bdev_length, NULL);
728becc347eSKevin Wolf             if (ret < 0) {
729becc347eSKevin Wolf                 goto immediate_exit;
730becc347eSKevin Wolf             }
731becc347eSKevin Wolf         }
732becc347eSKevin Wolf     }
733becc347eSKevin Wolf 
734becc347eSKevin Wolf     if (s->bdev_length == 0) {
7359e48b025SFam Zheng         /* Report BLOCK_JOB_READY and wait for complete. */
7369e48b025SFam Zheng         block_job_event_ready(&s->common);
7379e48b025SFam Zheng         s->synced = true;
7389e48b025SFam Zheng         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
7399e48b025SFam Zheng             block_job_yield(&s->common);
7409e48b025SFam Zheng         }
7419e48b025SFam Zheng         s->common.cancelled = false;
7429e48b025SFam Zheng         goto immediate_exit;
743893f7ebaSPaolo Bonzini     }
744893f7ebaSPaolo Bonzini 
745b21c7652SMax Reitz     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
746402a4741SPaolo Bonzini     s->in_flight_bitmap = bitmap_new(length);
747402a4741SPaolo Bonzini 
748b812f671SPaolo Bonzini     /* If we have no backing file yet in the destination, we cannot let
749b812f671SPaolo Bonzini      * the destination do COW.  Instead, we copy sectors around the
750b812f671SPaolo Bonzini      * dirty data if needed.  We need a bitmap to do that.
751b812f671SPaolo Bonzini      */
752e253f4b8SKevin Wolf     bdrv_get_backing_filename(target_bs, backing_filename,
753b812f671SPaolo Bonzini                               sizeof(backing_filename));
754e253f4b8SKevin Wolf     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
755e5b43573SFam Zheng         target_cluster_size = bdi.cluster_size;
756c3cc95bdSFam Zheng     }
757e253f4b8SKevin Wolf     if (backing_filename[0] && !target_bs->backing
758e5b43573SFam Zheng         && s->granularity < target_cluster_size) {
759e5b43573SFam Zheng         s->buf_size = MAX(s->buf_size, target_cluster_size);
760b812f671SPaolo Bonzini         s->cow_bitmap = bitmap_new(length);
761b812f671SPaolo Bonzini     }
762e5b43573SFam Zheng     s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS;
763e253f4b8SKevin Wolf     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
764b812f671SPaolo Bonzini 
7657504edf4SKevin Wolf     s->buf = qemu_try_blockalign(bs, s->buf_size);
7667504edf4SKevin Wolf     if (s->buf == NULL) {
7677504edf4SKevin Wolf         ret = -ENOMEM;
7687504edf4SKevin Wolf         goto immediate_exit;
7697504edf4SKevin Wolf     }
7707504edf4SKevin Wolf 
771402a4741SPaolo Bonzini     mirror_free_init(s);
772893f7ebaSPaolo Bonzini 
77349efb1f5SDenis V. Lunev     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
77403544a6eSFam Zheng     if (!s->is_none_mode) {
775c0b363adSDenis V. Lunev         ret = mirror_dirty_init(s);
776c0b363adSDenis V. Lunev         if (ret < 0 || block_job_is_cancelled(&s->common)) {
7774c0cbd6fSFam Zheng             goto immediate_exit;
7784c0cbd6fSFam Zheng         }
779893f7ebaSPaolo Bonzini     }
780893f7ebaSPaolo Bonzini 
781dc162c8eSFam Zheng     assert(!s->dbi);
782dc162c8eSFam Zheng     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
783893f7ebaSPaolo Bonzini     for (;;) {
784cc8c9d6cSPaolo Bonzini         uint64_t delay_ns = 0;
78549efb1f5SDenis V. Lunev         int64_t cnt, delta;
786893f7ebaSPaolo Bonzini         bool should_complete;
787893f7ebaSPaolo Bonzini 
788bd48bde8SPaolo Bonzini         if (s->ret < 0) {
789bd48bde8SPaolo Bonzini             ret = s->ret;
790893f7ebaSPaolo Bonzini             goto immediate_exit;
791893f7ebaSPaolo Bonzini         }
792bd48bde8SPaolo Bonzini 
793565ac01fSStefan Hajnoczi         block_job_pause_point(&s->common);
794565ac01fSStefan Hajnoczi 
79520dca810SJohn Snow         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
796b21c7652SMax Reitz         /* s->common.offset contains the number of bytes already processed so
797b21c7652SMax Reitz          * far, cnt is the number of dirty sectors remaining and
798b21c7652SMax Reitz          * s->sectors_in_flight is the number of sectors currently being
799b21c7652SMax Reitz          * processed; together those are the current total operation length */
800b21c7652SMax Reitz         s->common.len = s->common.offset +
801b21c7652SMax Reitz                         (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
802bd48bde8SPaolo Bonzini 
803bd48bde8SPaolo Bonzini         /* Note that even when no rate limit is applied we need to yield
804a7282330SFam Zheng          * periodically with no pending I/O so that bdrv_drain_all() returns.
805bd48bde8SPaolo Bonzini          * We do so every SLICE_TIME nanoseconds, or when there is an error,
806bd48bde8SPaolo Bonzini          * or when the source is clean, whichever comes first.
807bd48bde8SPaolo Bonzini          */
80849efb1f5SDenis V. Lunev         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
80949efb1f5SDenis V. Lunev         if (delta < SLICE_TIME &&
810bd48bde8SPaolo Bonzini             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
811cf56a3c6SDenis V. Lunev             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
812402a4741SPaolo Bonzini                 (cnt == 0 && s->in_flight > 0)) {
81367adf4b3SEric Blake                 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
81421cd917fSFam Zheng                 mirror_wait_for_io(s);
815bd48bde8SPaolo Bonzini                 continue;
816bd48bde8SPaolo Bonzini             } else if (cnt != 0) {
817cc8c9d6cSPaolo Bonzini                 delay_ns = mirror_iteration(s);
818893f7ebaSPaolo Bonzini             }
819cc8c9d6cSPaolo Bonzini         }
820893f7ebaSPaolo Bonzini 
821893f7ebaSPaolo Bonzini         should_complete = false;
822bd48bde8SPaolo Bonzini         if (s->in_flight == 0 && cnt == 0) {
823893f7ebaSPaolo Bonzini             trace_mirror_before_flush(s);
824bdffb31dSPaolo Bonzini             if (!s->synced) {
825bdffb31dSPaolo Bonzini                 if (mirror_flush(s) < 0) {
826bdffb31dSPaolo Bonzini                     /* Go check s->ret.  */
827bdffb31dSPaolo Bonzini                     continue;
828893f7ebaSPaolo Bonzini                 }
829893f7ebaSPaolo Bonzini                 /* We're out of the streaming phase.  From now on, if the job
830893f7ebaSPaolo Bonzini                  * is cancelled we will actually complete all pending I/O and
831893f7ebaSPaolo Bonzini                  * report completion.  This way, block-job-cancel will leave
832893f7ebaSPaolo Bonzini                  * the target in a consistent state.
833893f7ebaSPaolo Bonzini                  */
834bcada37bSWenchao Xia                 block_job_event_ready(&s->common);
835d63ffd87SPaolo Bonzini                 s->synced = true;
836d63ffd87SPaolo Bonzini             }
837d63ffd87SPaolo Bonzini 
838d63ffd87SPaolo Bonzini             should_complete = s->should_complete ||
839d63ffd87SPaolo Bonzini                 block_job_is_cancelled(&s->common);
84020dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
841893f7ebaSPaolo Bonzini         }
842893f7ebaSPaolo Bonzini 
843893f7ebaSPaolo Bonzini         if (cnt == 0 && should_complete) {
844893f7ebaSPaolo Bonzini             /* The dirty bitmap is not updated while operations are pending.
845893f7ebaSPaolo Bonzini              * If we're about to exit, wait for pending operations before
846893f7ebaSPaolo Bonzini              * calling bdrv_get_dirty_count(bs), or we may exit while the
847893f7ebaSPaolo Bonzini              * source has dirty data to copy!
848893f7ebaSPaolo Bonzini              *
849893f7ebaSPaolo Bonzini              * Note that I/O can be submitted by the guest while
8509a0cec66SPaolo Bonzini              * mirror_populate runs, so pause it now.  Before deciding
8519a0cec66SPaolo Bonzini              * whether to switch to target check one last time if I/O has
8529a0cec66SPaolo Bonzini              * come in the meanwhile, and if not flush the data to disk.
853893f7ebaSPaolo Bonzini              */
854893f7ebaSPaolo Bonzini             trace_mirror_before_drain(s, cnt);
8559a0cec66SPaolo Bonzini 
8569a0cec66SPaolo Bonzini             bdrv_drained_begin(bs);
85720dca810SJohn Snow             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
858bdffb31dSPaolo Bonzini             if (cnt > 0 || mirror_flush(s) < 0) {
8599a0cec66SPaolo Bonzini                 bdrv_drained_end(bs);
8609a0cec66SPaolo Bonzini                 continue;
8619a0cec66SPaolo Bonzini             }
8629a0cec66SPaolo Bonzini 
8639a0cec66SPaolo Bonzini             /* The two disks are in sync.  Exit and report successful
8649a0cec66SPaolo Bonzini              * completion.
8659a0cec66SPaolo Bonzini              */
8669a0cec66SPaolo Bonzini             assert(QLIST_EMPTY(&bs->tracked_requests));
8679a0cec66SPaolo Bonzini             s->common.cancelled = false;
8689a0cec66SPaolo Bonzini             need_drain = false;
8699a0cec66SPaolo Bonzini             break;
870893f7ebaSPaolo Bonzini         }
871893f7ebaSPaolo Bonzini 
872893f7ebaSPaolo Bonzini         ret = 0;
873cc8c9d6cSPaolo Bonzini         trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
874d63ffd87SPaolo Bonzini         if (!s->synced) {
8757483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
876893f7ebaSPaolo Bonzini             if (block_job_is_cancelled(&s->common)) {
877893f7ebaSPaolo Bonzini                 break;
878893f7ebaSPaolo Bonzini             }
879893f7ebaSPaolo Bonzini         } else if (!should_complete) {
880bd48bde8SPaolo Bonzini             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
8817483d1e5SAlex Bligh             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
882893f7ebaSPaolo Bonzini         }
88349efb1f5SDenis V. Lunev         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
884893f7ebaSPaolo Bonzini     }
885893f7ebaSPaolo Bonzini 
886893f7ebaSPaolo Bonzini immediate_exit:
887bd48bde8SPaolo Bonzini     if (s->in_flight > 0) {
888bd48bde8SPaolo Bonzini         /* We get here only if something went wrong.  Either the job failed,
889bd48bde8SPaolo Bonzini          * or it was cancelled prematurely so that we do not guarantee that
890bd48bde8SPaolo Bonzini          * the target is a copy of the source.
891bd48bde8SPaolo Bonzini          */
892bd48bde8SPaolo Bonzini         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
8939a0cec66SPaolo Bonzini         assert(need_drain);
894bae8196dSPaolo Bonzini         mirror_wait_for_all_io(s);
895bd48bde8SPaolo Bonzini     }
896bd48bde8SPaolo Bonzini 
897bd48bde8SPaolo Bonzini     assert(s->in_flight == 0);
8987191bf31SMarkus Armbruster     qemu_vfree(s->buf);
899b812f671SPaolo Bonzini     g_free(s->cow_bitmap);
900402a4741SPaolo Bonzini     g_free(s->in_flight_bitmap);
901dc162c8eSFam Zheng     bdrv_dirty_iter_free(s->dbi);
902e4654d2dSFam Zheng     bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
9035a7e7a0bSStefan Hajnoczi 
9045a7e7a0bSStefan Hajnoczi     data = g_malloc(sizeof(*data));
9055a7e7a0bSStefan Hajnoczi     data->ret = ret;
9069a0cec66SPaolo Bonzini 
9079a0cec66SPaolo Bonzini     if (need_drain) {
908e253f4b8SKevin Wolf         bdrv_drained_begin(bs);
9099a0cec66SPaolo Bonzini     }
9105a7e7a0bSStefan Hajnoczi     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
911893f7ebaSPaolo Bonzini }
912893f7ebaSPaolo Bonzini 
913893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
914893f7ebaSPaolo Bonzini {
915893f7ebaSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
916893f7ebaSPaolo Bonzini 
917893f7ebaSPaolo Bonzini     if (speed < 0) {
918c6bd8c70SMarkus Armbruster         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
919893f7ebaSPaolo Bonzini         return;
920893f7ebaSPaolo Bonzini     }
921893f7ebaSPaolo Bonzini     ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
922893f7ebaSPaolo Bonzini }
923893f7ebaSPaolo Bonzini 
924d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp)
925d63ffd87SPaolo Bonzini {
926d63ffd87SPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
9274ef85a9cSKevin Wolf     BlockDriverState *target;
928d63ffd87SPaolo Bonzini 
929274fcceeSMax Reitz     target = blk_bs(s->target);
930274fcceeSMax Reitz 
931d63ffd87SPaolo Bonzini     if (!s->synced) {
9329df229c3SAlberto Garcia         error_setg(errp, "The active block job '%s' cannot be completed",
9339df229c3SAlberto Garcia                    job->id);
934d63ffd87SPaolo Bonzini         return;
935d63ffd87SPaolo Bonzini     }
936d63ffd87SPaolo Bonzini 
937274fcceeSMax Reitz     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
938274fcceeSMax Reitz         int ret;
939274fcceeSMax Reitz 
940274fcceeSMax Reitz         assert(!target->backing);
941274fcceeSMax Reitz         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
942274fcceeSMax Reitz         if (ret < 0) {
943274fcceeSMax Reitz             return;
944274fcceeSMax Reitz         }
945274fcceeSMax Reitz     }
946274fcceeSMax Reitz 
94715d67298SChanglong Xie     /* block all operations on to_replace bs */
94809158f00SBenoît Canet     if (s->replaces) {
9495a7e7a0bSStefan Hajnoczi         AioContext *replace_aio_context;
9505a7e7a0bSStefan Hajnoczi 
951e12f3784SWen Congyang         s->to_replace = bdrv_find_node(s->replaces);
95209158f00SBenoît Canet         if (!s->to_replace) {
953e12f3784SWen Congyang             error_setg(errp, "Node name '%s' not found", s->replaces);
95409158f00SBenoît Canet             return;
95509158f00SBenoît Canet         }
95609158f00SBenoît Canet 
9575a7e7a0bSStefan Hajnoczi         replace_aio_context = bdrv_get_aio_context(s->to_replace);
9585a7e7a0bSStefan Hajnoczi         aio_context_acquire(replace_aio_context);
9595a7e7a0bSStefan Hajnoczi 
9604ef85a9cSKevin Wolf         /* TODO Translate this into permission system. Current definition of
9614ef85a9cSKevin Wolf          * GRAPH_MOD would require to request it for the parents; they might
9624ef85a9cSKevin Wolf          * not even be BlockDriverStates, however, so a BdrvChild can't address
9634ef85a9cSKevin Wolf          * them. May need redefinition of GRAPH_MOD. */
96409158f00SBenoît Canet         error_setg(&s->replace_blocker,
96509158f00SBenoît Canet                    "block device is in use by block-job-complete");
96609158f00SBenoît Canet         bdrv_op_block_all(s->to_replace, s->replace_blocker);
96709158f00SBenoît Canet         bdrv_ref(s->to_replace);
9685a7e7a0bSStefan Hajnoczi 
9695a7e7a0bSStefan Hajnoczi         aio_context_release(replace_aio_context);
97009158f00SBenoît Canet     }
97109158f00SBenoît Canet 
972d63ffd87SPaolo Bonzini     s->should_complete = true;
973751ebd76SFam Zheng     block_job_enter(&s->common);
974d63ffd87SPaolo Bonzini }
975d63ffd87SPaolo Bonzini 
976bae8196dSPaolo Bonzini static void mirror_pause(BlockJob *job)
977565ac01fSStefan Hajnoczi {
978565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
979565ac01fSStefan Hajnoczi 
980bae8196dSPaolo Bonzini     mirror_wait_for_all_io(s);
981565ac01fSStefan Hajnoczi }
982565ac01fSStefan Hajnoczi 
983565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
984565ac01fSStefan Hajnoczi {
985565ac01fSStefan Hajnoczi     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
986565ac01fSStefan Hajnoczi 
987565ac01fSStefan Hajnoczi     blk_set_aio_context(s->target, new_context);
988565ac01fSStefan Hajnoczi }
989565ac01fSStefan Hajnoczi 
990bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job)
991bae8196dSPaolo Bonzini {
992bae8196dSPaolo Bonzini     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
993bae8196dSPaolo Bonzini 
994bae8196dSPaolo Bonzini     /* Need to keep a reference in case blk_drain triggers execution
995bae8196dSPaolo Bonzini      * of mirror_complete...
996bae8196dSPaolo Bonzini      */
997bae8196dSPaolo Bonzini     if (s->target) {
998bae8196dSPaolo Bonzini         BlockBackend *target = s->target;
999bae8196dSPaolo Bonzini         blk_ref(target);
1000bae8196dSPaolo Bonzini         blk_drain(target);
1001bae8196dSPaolo Bonzini         blk_unref(target);
1002bae8196dSPaolo Bonzini     }
1003bae8196dSPaolo Bonzini }
1004bae8196dSPaolo Bonzini 
10053fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = {
1006893f7ebaSPaolo Bonzini     .instance_size          = sizeof(MirrorBlockJob),
100779e14bf7SFam Zheng     .job_type               = BLOCK_JOB_TYPE_MIRROR,
1008893f7ebaSPaolo Bonzini     .set_speed              = mirror_set_speed,
1009a7815a76SJohn Snow     .start                  = mirror_run,
1010d63ffd87SPaolo Bonzini     .complete               = mirror_complete,
1011565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1012565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1013bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
1014893f7ebaSPaolo Bonzini };
1015893f7ebaSPaolo Bonzini 
101603544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = {
101703544a6eSFam Zheng     .instance_size          = sizeof(MirrorBlockJob),
101803544a6eSFam Zheng     .job_type               = BLOCK_JOB_TYPE_COMMIT,
101903544a6eSFam Zheng     .set_speed              = mirror_set_speed,
1020a7815a76SJohn Snow     .start                  = mirror_run,
102103544a6eSFam Zheng     .complete               = mirror_complete,
1022565ac01fSStefan Hajnoczi     .pause                  = mirror_pause,
1023565ac01fSStefan Hajnoczi     .attached_aio_context   = mirror_attached_aio_context,
1024bae8196dSPaolo Bonzini     .drain                  = mirror_drain,
102503544a6eSFam Zheng };
102603544a6eSFam Zheng 
10274ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
10284ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10294ef85a9cSKevin Wolf {
10304ef85a9cSKevin Wolf     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
10314ef85a9cSKevin Wolf }
10324ef85a9cSKevin Wolf 
10334ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
10344ef85a9cSKevin Wolf     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
10354ef85a9cSKevin Wolf {
10364ef85a9cSKevin Wolf     return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
10374ef85a9cSKevin Wolf }
10384ef85a9cSKevin Wolf 
10394ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
10404ef85a9cSKevin Wolf {
10414ef85a9cSKevin Wolf     return bdrv_co_flush(bs->backing->bs);
10424ef85a9cSKevin Wolf }
10434ef85a9cSKevin Wolf 
10444ef85a9cSKevin Wolf static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
10454ef85a9cSKevin Wolf     BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
10464ef85a9cSKevin Wolf     BlockDriverState **file)
10474ef85a9cSKevin Wolf {
10484ef85a9cSKevin Wolf     *pnum = nb_sectors;
10494ef85a9cSKevin Wolf     *file = bs->backing->bs;
10504ef85a9cSKevin Wolf     return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA |
10514ef85a9cSKevin Wolf            (sector_num << BDRV_SECTOR_BITS);
10524ef85a9cSKevin Wolf }
10534ef85a9cSKevin Wolf 
10544ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
10554ef85a9cSKevin Wolf     int64_t offset, int count, BdrvRequestFlags flags)
10564ef85a9cSKevin Wolf {
10574ef85a9cSKevin Wolf     return bdrv_co_pwrite_zeroes(bs->backing, offset, count, flags);
10584ef85a9cSKevin Wolf }
10594ef85a9cSKevin Wolf 
10604ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
10614ef85a9cSKevin Wolf     int64_t offset, int count)
10624ef85a9cSKevin Wolf {
10634ef85a9cSKevin Wolf     return bdrv_co_pdiscard(bs->backing->bs, offset, count);
10644ef85a9cSKevin Wolf }
10654ef85a9cSKevin Wolf 
1066fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1067fd4a6493SKevin Wolf {
1068fd4a6493SKevin Wolf     bdrv_refresh_filename(bs->backing->bs);
1069fd4a6493SKevin Wolf     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1070fd4a6493SKevin Wolf             bs->backing->bs->filename);
1071fd4a6493SKevin Wolf }
1072fd4a6493SKevin Wolf 
10734ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs)
10744ef85a9cSKevin Wolf {
10754ef85a9cSKevin Wolf }
10764ef85a9cSKevin Wolf 
10774ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
10784ef85a9cSKevin Wolf                                        const BdrvChildRole *role,
10794ef85a9cSKevin Wolf                                        uint64_t perm, uint64_t shared,
10804ef85a9cSKevin Wolf                                        uint64_t *nperm, uint64_t *nshared)
10814ef85a9cSKevin Wolf {
10824ef85a9cSKevin Wolf     /* Must be able to forward guest writes to the real image */
10834ef85a9cSKevin Wolf     *nperm = 0;
10844ef85a9cSKevin Wolf     if (perm & BLK_PERM_WRITE) {
10854ef85a9cSKevin Wolf         *nperm |= BLK_PERM_WRITE;
10864ef85a9cSKevin Wolf     }
10874ef85a9cSKevin Wolf 
10884ef85a9cSKevin Wolf     *nshared = BLK_PERM_ALL;
10894ef85a9cSKevin Wolf }
10904ef85a9cSKevin Wolf 
10914ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it
10924ef85a9cSKevin Wolf  * from its backing file and that allows writes on the backing file chain. */
10934ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = {
10944ef85a9cSKevin Wolf     .format_name                = "mirror_top",
10954ef85a9cSKevin Wolf     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
10964ef85a9cSKevin Wolf     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
10974ef85a9cSKevin Wolf     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
10984ef85a9cSKevin Wolf     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
10994ef85a9cSKevin Wolf     .bdrv_co_flush              = bdrv_mirror_top_flush,
11004ef85a9cSKevin Wolf     .bdrv_co_get_block_status   = bdrv_mirror_top_get_block_status,
1101fd4a6493SKevin Wolf     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
11024ef85a9cSKevin Wolf     .bdrv_close                 = bdrv_mirror_top_close,
11034ef85a9cSKevin Wolf     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
11044ef85a9cSKevin Wolf };
11054ef85a9cSKevin Wolf 
110671aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs,
110747970dfbSJohn Snow                              int creation_flags, BlockDriverState *target,
110847970dfbSJohn Snow                              const char *replaces, int64_t speed,
110947970dfbSJohn Snow                              uint32_t granularity, int64_t buf_size,
1110274fcceeSMax Reitz                              BlockMirrorBackingMode backing_mode,
111103544a6eSFam Zheng                              BlockdevOnError on_source_error,
1112b952b558SPaolo Bonzini                              BlockdevOnError on_target_error,
11130fc9f8eaSFam Zheng                              bool unmap,
1114097310b5SMarkus Armbruster                              BlockCompletionFunc *cb,
111551ccfa2dSFam Zheng                              void *opaque,
111603544a6eSFam Zheng                              const BlockJobDriver *driver,
1117b49f7eadSWen Congyang                              bool is_none_mode, BlockDriverState *base,
111851ccfa2dSFam Zheng                              bool auto_complete, const char *filter_node_name,
111951ccfa2dSFam Zheng                              Error **errp)
1120893f7ebaSPaolo Bonzini {
1121893f7ebaSPaolo Bonzini     MirrorBlockJob *s;
11224ef85a9cSKevin Wolf     BlockDriverState *mirror_top_bs;
11234ef85a9cSKevin Wolf     bool target_graph_mod;
11244ef85a9cSKevin Wolf     bool target_is_backing;
1125b2c2832cSKevin Wolf     Error *local_err = NULL;
1126d7086422SKevin Wolf     int ret;
1127893f7ebaSPaolo Bonzini 
1128eee13dfeSPaolo Bonzini     if (granularity == 0) {
1129341ebc2fSJohn Snow         granularity = bdrv_get_default_bitmap_granularity(target);
1130eee13dfeSPaolo Bonzini     }
1131eee13dfeSPaolo Bonzini 
1132eee13dfeSPaolo Bonzini     assert ((granularity & (granularity - 1)) == 0);
1133eee13dfeSPaolo Bonzini 
113448ac0a4dSWen Congyang     if (buf_size < 0) {
113548ac0a4dSWen Congyang         error_setg(errp, "Invalid parameter 'buf-size'");
113648ac0a4dSWen Congyang         return;
113748ac0a4dSWen Congyang     }
113848ac0a4dSWen Congyang 
113948ac0a4dSWen Congyang     if (buf_size == 0) {
114048ac0a4dSWen Congyang         buf_size = DEFAULT_MIRROR_BUF_SIZE;
114148ac0a4dSWen Congyang     }
11425bc361b8SFam Zheng 
11434ef85a9cSKevin Wolf     /* In the case of active commit, add dummy driver to provide consistent
11444ef85a9cSKevin Wolf      * reads on the top, while disabling it in the intermediate nodes, and make
11454ef85a9cSKevin Wolf      * the backing chain writable. */
11466cdbceb1SKevin Wolf     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
11476cdbceb1SKevin Wolf                                          BDRV_O_RDWR, errp);
11484ef85a9cSKevin Wolf     if (mirror_top_bs == NULL) {
1149893f7ebaSPaolo Bonzini         return;
1150893f7ebaSPaolo Bonzini     }
11514ef85a9cSKevin Wolf     mirror_top_bs->total_sectors = bs->total_sectors;
115219dd29e8SFam Zheng     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1153893f7ebaSPaolo Bonzini 
11544ef85a9cSKevin Wolf     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
11557a25fcd0SMax Reitz      * it alive until block_job_create() succeeds even if bs has no parent. */
11564ef85a9cSKevin Wolf     bdrv_ref(mirror_top_bs);
11574ef85a9cSKevin Wolf     bdrv_drained_begin(bs);
1158b2c2832cSKevin Wolf     bdrv_append(mirror_top_bs, bs, &local_err);
11594ef85a9cSKevin Wolf     bdrv_drained_end(bs);
11604ef85a9cSKevin Wolf 
1161b2c2832cSKevin Wolf     if (local_err) {
1162b2c2832cSKevin Wolf         bdrv_unref(mirror_top_bs);
1163b2c2832cSKevin Wolf         error_propagate(errp, local_err);
1164b2c2832cSKevin Wolf         return;
1165b2c2832cSKevin Wolf     }
1166b2c2832cSKevin Wolf 
11674ef85a9cSKevin Wolf     /* Make sure that the source is not resized while the job is running */
11684ef85a9cSKevin Wolf     s = block_job_create(job_id, driver, mirror_top_bs,
11694ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ,
11704ef85a9cSKevin Wolf                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
11714ef85a9cSKevin Wolf                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
11724ef85a9cSKevin Wolf                          creation_flags, cb, opaque, errp);
11734ef85a9cSKevin Wolf     if (!s) {
11744ef85a9cSKevin Wolf         goto fail;
11754ef85a9cSKevin Wolf     }
11767a25fcd0SMax Reitz     /* The block job now has a reference to this node */
11777a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
11787a25fcd0SMax Reitz 
11794ef85a9cSKevin Wolf     s->source = bs;
11804ef85a9cSKevin Wolf     s->mirror_top_bs = mirror_top_bs;
11814ef85a9cSKevin Wolf 
11824ef85a9cSKevin Wolf     /* No resize for the target either; while the mirror is still running, a
11834ef85a9cSKevin Wolf      * consistent read isn't necessarily possible. We could possibly allow
11844ef85a9cSKevin Wolf      * writes and graph modifications, though it would likely defeat the
11854ef85a9cSKevin Wolf      * purpose of a mirror, so leave them blocked for now.
11864ef85a9cSKevin Wolf      *
11874ef85a9cSKevin Wolf      * In the case of active commit, things look a bit different, though,
11884ef85a9cSKevin Wolf      * because the target is an already populated backing file in active use.
11894ef85a9cSKevin Wolf      * We can allow anything except resize there.*/
11904ef85a9cSKevin Wolf     target_is_backing = bdrv_chain_contains(bs, target);
11914ef85a9cSKevin Wolf     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
11924ef85a9cSKevin Wolf     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
11934ef85a9cSKevin Wolf                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
11944ef85a9cSKevin Wolf                         BLK_PERM_WRITE_UNCHANGED |
11954ef85a9cSKevin Wolf                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
11964ef85a9cSKevin Wolf                                              BLK_PERM_WRITE |
11974ef85a9cSKevin Wolf                                              BLK_PERM_GRAPH_MOD : 0));
1198d7086422SKevin Wolf     ret = blk_insert_bs(s->target, target, errp);
1199d7086422SKevin Wolf     if (ret < 0) {
12004ef85a9cSKevin Wolf         goto fail;
1201d7086422SKevin Wolf     }
1202e253f4b8SKevin Wolf 
120309158f00SBenoît Canet     s->replaces = g_strdup(replaces);
1204b952b558SPaolo Bonzini     s->on_source_error = on_source_error;
1205b952b558SPaolo Bonzini     s->on_target_error = on_target_error;
120603544a6eSFam Zheng     s->is_none_mode = is_none_mode;
1207274fcceeSMax Reitz     s->backing_mode = backing_mode;
12085bc361b8SFam Zheng     s->base = base;
1209eee13dfeSPaolo Bonzini     s->granularity = granularity;
121048ac0a4dSWen Congyang     s->buf_size = ROUND_UP(buf_size, granularity);
12110fc9f8eaSFam Zheng     s->unmap = unmap;
1212b49f7eadSWen Congyang     if (auto_complete) {
1213b49f7eadSWen Congyang         s->should_complete = true;
1214b49f7eadSWen Congyang     }
1215b812f671SPaolo Bonzini 
12160db6e54aSFam Zheng     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1217b8afb520SFam Zheng     if (!s->dirty_bitmap) {
121888f9d1b3SKevin Wolf         goto fail;
1219b8afb520SFam Zheng     }
122010f3cd15SAlberto Garcia 
12214ef85a9cSKevin Wolf     /* Required permissions are already taken with blk_new() */
122276d554e2SKevin Wolf     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
122376d554e2SKevin Wolf                        &error_abort);
122476d554e2SKevin Wolf 
1225f3ede4b0SAlberto Garcia     /* In commit_active_start() all intermediate nodes disappear, so
1226f3ede4b0SAlberto Garcia      * any jobs in them must be blocked */
12274ef85a9cSKevin Wolf     if (target_is_backing) {
1228f3ede4b0SAlberto Garcia         BlockDriverState *iter;
1229f3ede4b0SAlberto Garcia         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
12304ef85a9cSKevin Wolf             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
12314ef85a9cSKevin Wolf              * ourselves at s->base (if writes are blocked for a node, they are
12324ef85a9cSKevin Wolf              * also blocked for its backing file). The other options would be a
12334ef85a9cSKevin Wolf              * second filter driver above s->base (== target). */
12344ef85a9cSKevin Wolf             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
12354ef85a9cSKevin Wolf                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
12364ef85a9cSKevin Wolf                                      errp);
12374ef85a9cSKevin Wolf             if (ret < 0) {
12384ef85a9cSKevin Wolf                 goto fail;
12394ef85a9cSKevin Wolf             }
1240f3ede4b0SAlberto Garcia         }
1241f3ede4b0SAlberto Garcia     }
124210f3cd15SAlberto Garcia 
12435ccac6f1SJohn Snow     trace_mirror_start(bs, s, opaque);
12445ccac6f1SJohn Snow     block_job_start(&s->common);
12454ef85a9cSKevin Wolf     return;
12464ef85a9cSKevin Wolf 
12474ef85a9cSKevin Wolf fail:
12484ef85a9cSKevin Wolf     if (s) {
12497a25fcd0SMax Reitz         /* Make sure this BDS does not go away until we have completed the graph
12507a25fcd0SMax Reitz          * changes below */
12517a25fcd0SMax Reitz         bdrv_ref(mirror_top_bs);
12527a25fcd0SMax Reitz 
12534ef85a9cSKevin Wolf         g_free(s->replaces);
12544ef85a9cSKevin Wolf         blk_unref(s->target);
1255*05b0d8e3SPaolo Bonzini         block_job_early_fail(&s->common);
12564ef85a9cSKevin Wolf     }
12574ef85a9cSKevin Wolf 
1258c1cef672SFam Zheng     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1259c1cef672SFam Zheng                             &error_abort);
12605fe31c25SKevin Wolf     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
12617a25fcd0SMax Reitz 
12627a25fcd0SMax Reitz     bdrv_unref(mirror_top_bs);
1263893f7ebaSPaolo Bonzini }
126403544a6eSFam Zheng 
126571aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs,
126671aa9867SAlberto Garcia                   BlockDriverState *target, const char *replaces,
12675fba6c0eSJohn Snow                   int64_t speed, uint32_t granularity, int64_t buf_size,
1268274fcceeSMax Reitz                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1269274fcceeSMax Reitz                   BlockdevOnError on_source_error,
127003544a6eSFam Zheng                   BlockdevOnError on_target_error,
12716cdbceb1SKevin Wolf                   bool unmap, const char *filter_node_name, Error **errp)
127203544a6eSFam Zheng {
127303544a6eSFam Zheng     bool is_none_mode;
127403544a6eSFam Zheng     BlockDriverState *base;
127503544a6eSFam Zheng 
12764b80ab2bSJohn Snow     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
12774b80ab2bSJohn Snow         error_setg(errp, "Sync mode 'incremental' not supported");
1278d58d8453SJohn Snow         return;
1279d58d8453SJohn Snow     }
128003544a6eSFam Zheng     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1281760e0063SKevin Wolf     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
128247970dfbSJohn Snow     mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1283274fcceeSMax Reitz                      speed, granularity, buf_size, backing_mode,
128451ccfa2dSFam Zheng                      on_source_error, on_target_error, unmap, NULL, NULL,
12856cdbceb1SKevin Wolf                      &mirror_job_driver, is_none_mode, base, false,
128651ccfa2dSFam Zheng                      filter_node_name, errp);
128703544a6eSFam Zheng }
128803544a6eSFam Zheng 
1289fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs,
129047970dfbSJohn Snow                          BlockDriverState *base, int creation_flags,
129147970dfbSJohn Snow                          int64_t speed, BlockdevOnError on_error,
12920db832f4SKevin Wolf                          const char *filter_node_name,
129378bbd910SFam Zheng                          BlockCompletionFunc *cb, void *opaque,
129478bbd910SFam Zheng                          bool auto_complete, Error **errp)
129503544a6eSFam Zheng {
12964da83585SJeff Cody     int orig_base_flags;
1297cc67f4d1SJeff Cody     Error *local_err = NULL;
12984da83585SJeff Cody 
12994da83585SJeff Cody     orig_base_flags = bdrv_get_flags(base);
13004da83585SJeff Cody 
130120a63d2cSFam Zheng     if (bdrv_reopen(base, bs->open_flags, errp)) {
130220a63d2cSFam Zheng         return;
130320a63d2cSFam Zheng     }
13044da83585SJeff Cody 
130547970dfbSJohn Snow     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
130671aa9867SAlberto Garcia                      MIRROR_LEAVE_BACKING_CHAIN,
130751ccfa2dSFam Zheng                      on_error, on_error, true, cb, opaque,
13086cdbceb1SKevin Wolf                      &commit_active_job_driver, false, base, auto_complete,
130951ccfa2dSFam Zheng                      filter_node_name, &local_err);
13100fb6395cSMarkus Armbruster     if (local_err) {
1311cc67f4d1SJeff Cody         error_propagate(errp, local_err);
13124da83585SJeff Cody         goto error_restore_flags;
13134da83585SJeff Cody     }
13144da83585SJeff Cody 
13154da83585SJeff Cody     return;
13164da83585SJeff Cody 
13174da83585SJeff Cody error_restore_flags:
13184da83585SJeff Cody     /* ignore error and errp for bdrv_reopen, because we want to propagate
13194da83585SJeff Cody      * the original error */
13204da83585SJeff Cody     bdrv_reopen(base, orig_base_flags, NULL);
13214da83585SJeff Cody     return;
132203544a6eSFam Zheng }
1323