xref: /qemu/block/mirror.c (revision 7a4e543d)
1 /*
2  * Image mirroring
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Paolo Bonzini  <pbonzini@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "trace.h"
16 #include "block/blockjob.h"
17 #include "block/block_int.h"
18 #include "sysemu/block-backend.h"
19 #include "qapi/qmp/qerror.h"
20 #include "qemu/ratelimit.h"
21 #include "qemu/bitmap.h"
22 #include "qemu/error-report.h"
23 
24 #define SLICE_TIME    100000000ULL /* ns */
25 #define MAX_IN_FLIGHT 16
26 #define DEFAULT_MIRROR_BUF_SIZE   (10 << 20)
27 
28 /* The mirroring buffer is a list of granularity-sized chunks.
29  * Free chunks are organized in a list.
30  */
31 typedef struct MirrorBuffer {
32     QSIMPLEQ_ENTRY(MirrorBuffer) next;
33 } MirrorBuffer;
34 
35 typedef struct MirrorBlockJob {
36     BlockJob common;
37     RateLimit limit;
38     BlockDriverState *target;
39     BlockDriverState *base;
40     /* The name of the graph node to replace */
41     char *replaces;
42     /* The BDS to replace */
43     BlockDriverState *to_replace;
44     /* Used to block operations on the drive-mirror-replace target */
45     Error *replace_blocker;
46     bool is_none_mode;
47     BlockdevOnError on_source_error, on_target_error;
48     bool synced;
49     bool should_complete;
50     int64_t sector_num;
51     int64_t granularity;
52     size_t buf_size;
53     int64_t bdev_length;
54     unsigned long *cow_bitmap;
55     BdrvDirtyBitmap *dirty_bitmap;
56     HBitmapIter hbi;
57     uint8_t *buf;
58     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
59     int buf_free_count;
60 
61     unsigned long *in_flight_bitmap;
62     int in_flight;
63     int sectors_in_flight;
64     int ret;
65     bool unmap;
66     bool waiting_for_io;
67 } MirrorBlockJob;
68 
69 typedef struct MirrorOp {
70     MirrorBlockJob *s;
71     QEMUIOVector qiov;
72     int64_t sector_num;
73     int nb_sectors;
74 } MirrorOp;
75 
76 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
77                                             int error)
78 {
79     s->synced = false;
80     if (read) {
81         return block_job_error_action(&s->common, s->common.bs,
82                                       s->on_source_error, true, error);
83     } else {
84         return block_job_error_action(&s->common, s->target,
85                                       s->on_target_error, false, error);
86     }
87 }
88 
89 static void mirror_iteration_done(MirrorOp *op, int ret)
90 {
91     MirrorBlockJob *s = op->s;
92     struct iovec *iov;
93     int64_t chunk_num;
94     int i, nb_chunks, sectors_per_chunk;
95 
96     trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
97 
98     s->in_flight--;
99     s->sectors_in_flight -= op->nb_sectors;
100     iov = op->qiov.iov;
101     for (i = 0; i < op->qiov.niov; i++) {
102         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
103         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
104         s->buf_free_count++;
105     }
106 
107     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
108     chunk_num = op->sector_num / sectors_per_chunk;
109     nb_chunks = op->nb_sectors / sectors_per_chunk;
110     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
111     if (ret >= 0) {
112         if (s->cow_bitmap) {
113             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
114         }
115         s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
116     }
117 
118     qemu_iovec_destroy(&op->qiov);
119     g_free(op);
120 
121     if (s->waiting_for_io) {
122         qemu_coroutine_enter(s->common.co, NULL);
123     }
124 }
125 
126 static void mirror_write_complete(void *opaque, int ret)
127 {
128     MirrorOp *op = opaque;
129     MirrorBlockJob *s = op->s;
130     if (ret < 0) {
131         BlockErrorAction action;
132 
133         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
134         action = mirror_error_action(s, false, -ret);
135         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
136             s->ret = ret;
137         }
138     }
139     mirror_iteration_done(op, ret);
140 }
141 
142 static void mirror_read_complete(void *opaque, int ret)
143 {
144     MirrorOp *op = opaque;
145     MirrorBlockJob *s = op->s;
146     if (ret < 0) {
147         BlockErrorAction action;
148 
149         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
150         action = mirror_error_action(s, true, -ret);
151         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
152             s->ret = ret;
153         }
154 
155         mirror_iteration_done(op, ret);
156         return;
157     }
158     bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
159                     mirror_write_complete, op);
160 }
161 
162 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
163 {
164     BlockDriverState *source = s->common.bs;
165     int nb_sectors, sectors_per_chunk, nb_chunks, max_iov;
166     int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
167     uint64_t delay_ns = 0;
168     MirrorOp *op;
169     int pnum;
170     int64_t ret;
171     BlockDriverState *file;
172 
173     max_iov = MIN(source->bl.max_iov, s->target->bl.max_iov);
174 
175     s->sector_num = hbitmap_iter_next(&s->hbi);
176     if (s->sector_num < 0) {
177         bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
178         s->sector_num = hbitmap_iter_next(&s->hbi);
179         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
180         assert(s->sector_num >= 0);
181     }
182 
183     hbitmap_next_sector = s->sector_num;
184     sector_num = s->sector_num;
185     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
186     end = s->bdev_length / BDRV_SECTOR_SIZE;
187 
188     /* Extend the QEMUIOVector to include all adjacent blocks that will
189      * be copied in this operation.
190      *
191      * We have to do this if we have no backing file yet in the destination,
192      * and the cluster size is very large.  Then we need to do COW ourselves.
193      * The first time a cluster is copied, copy it entirely.  Note that,
194      * because both the granularity and the cluster size are powers of two,
195      * the number of sectors to copy cannot exceed one cluster.
196      *
197      * We also want to extend the QEMUIOVector to include more adjacent
198      * dirty blocks if possible, to limit the number of I/O operations and
199      * run efficiently even with a small granularity.
200      */
201     nb_chunks = 0;
202     nb_sectors = 0;
203     next_sector = sector_num;
204     next_chunk = sector_num / sectors_per_chunk;
205 
206     /* Wait for I/O to this cluster (from a previous iteration) to be done.  */
207     while (test_bit(next_chunk, s->in_flight_bitmap)) {
208         trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
209         s->waiting_for_io = true;
210         qemu_coroutine_yield();
211         s->waiting_for_io = false;
212     }
213 
214     do {
215         int added_sectors, added_chunks;
216 
217         if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
218             test_bit(next_chunk, s->in_flight_bitmap)) {
219             assert(nb_sectors > 0);
220             break;
221         }
222 
223         added_sectors = sectors_per_chunk;
224         if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
225             bdrv_round_to_clusters(s->target,
226                                    next_sector, added_sectors,
227                                    &next_sector, &added_sectors);
228 
229             /* On the first iteration, the rounding may make us copy
230              * sectors before the first dirty one.
231              */
232             if (next_sector < sector_num) {
233                 assert(nb_sectors == 0);
234                 sector_num = next_sector;
235                 next_chunk = next_sector / sectors_per_chunk;
236             }
237         }
238 
239         added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
240         added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
241 
242         /* When doing COW, it may happen that there is not enough space for
243          * a full cluster.  Wait if that is the case.
244          */
245         while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
246             trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
247             s->waiting_for_io = true;
248             qemu_coroutine_yield();
249             s->waiting_for_io = false;
250         }
251         if (s->buf_free_count < nb_chunks + added_chunks) {
252             trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
253             break;
254         }
255         if (max_iov < nb_chunks + added_chunks) {
256             trace_mirror_break_iov_max(s, nb_chunks, added_chunks);
257             break;
258         }
259 
260         /* We have enough free space to copy these sectors.  */
261         bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
262 
263         nb_sectors += added_sectors;
264         nb_chunks += added_chunks;
265         next_sector += added_sectors;
266         next_chunk += added_chunks;
267         if (!s->synced && s->common.speed) {
268             delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
269         }
270     } while (delay_ns == 0 && next_sector < end);
271 
272     /* Allocate a MirrorOp that is used as an AIO callback.  */
273     op = g_new(MirrorOp, 1);
274     op->s = s;
275     op->sector_num = sector_num;
276     op->nb_sectors = nb_sectors;
277 
278     /* Now make a QEMUIOVector taking enough granularity-sized chunks
279      * from s->buf_free.
280      */
281     qemu_iovec_init(&op->qiov, nb_chunks);
282     next_sector = sector_num;
283     while (nb_chunks-- > 0) {
284         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
285         size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;
286 
287         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
288         s->buf_free_count--;
289         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
290 
291         /* Advance the HBitmapIter in parallel, so that we do not examine
292          * the same sector twice.
293          */
294         if (next_sector > hbitmap_next_sector
295             && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
296             hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
297         }
298 
299         next_sector += sectors_per_chunk;
300     }
301 
302     bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors);
303 
304     /* Copy the dirty cluster.  */
305     s->in_flight++;
306     s->sectors_in_flight += nb_sectors;
307     trace_mirror_one_iteration(s, sector_num, nb_sectors);
308 
309     ret = bdrv_get_block_status_above(source, NULL, sector_num,
310                                       nb_sectors, &pnum, &file);
311     if (ret < 0 || pnum < nb_sectors ||
312             (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) {
313         bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
314                        mirror_read_complete, op);
315     } else if (ret & BDRV_BLOCK_ZERO) {
316         bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors,
317                               s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
318                               mirror_write_complete, op);
319     } else {
320         assert(!(ret & BDRV_BLOCK_DATA));
321         bdrv_aio_discard(s->target, sector_num, op->nb_sectors,
322                          mirror_write_complete, op);
323     }
324     return delay_ns;
325 }
326 
327 static void mirror_free_init(MirrorBlockJob *s)
328 {
329     int granularity = s->granularity;
330     size_t buf_size = s->buf_size;
331     uint8_t *buf = s->buf;
332 
333     assert(s->buf_free_count == 0);
334     QSIMPLEQ_INIT(&s->buf_free);
335     while (buf_size != 0) {
336         MirrorBuffer *cur = (MirrorBuffer *)buf;
337         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
338         s->buf_free_count++;
339         buf_size -= granularity;
340         buf += granularity;
341     }
342 }
343 
344 static void mirror_drain(MirrorBlockJob *s)
345 {
346     while (s->in_flight > 0) {
347         s->waiting_for_io = true;
348         qemu_coroutine_yield();
349         s->waiting_for_io = false;
350     }
351 }
352 
353 typedef struct {
354     int ret;
355 } MirrorExitData;
356 
357 static void mirror_exit(BlockJob *job, void *opaque)
358 {
359     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
360     MirrorExitData *data = opaque;
361     AioContext *replace_aio_context = NULL;
362     BlockDriverState *src = s->common.bs;
363 
364     /* Make sure that the source BDS doesn't go away before we called
365      * block_job_completed(). */
366     bdrv_ref(src);
367 
368     if (s->to_replace) {
369         replace_aio_context = bdrv_get_aio_context(s->to_replace);
370         aio_context_acquire(replace_aio_context);
371     }
372 
373     if (s->should_complete && data->ret == 0) {
374         BlockDriverState *to_replace = s->common.bs;
375         if (s->to_replace) {
376             to_replace = s->to_replace;
377         }
378 
379         /* This was checked in mirror_start_job(), but meanwhile one of the
380          * nodes could have been newly attached to a BlockBackend. */
381         if (to_replace->blk && s->target->blk) {
382             error_report("block job: Can't create node with two BlockBackends");
383             data->ret = -EINVAL;
384             goto out;
385         }
386 
387         if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
388             bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
389         }
390         bdrv_replace_in_backing_chain(to_replace, s->target);
391     }
392 
393 out:
394     if (s->to_replace) {
395         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
396         error_free(s->replace_blocker);
397         bdrv_unref(s->to_replace);
398     }
399     if (replace_aio_context) {
400         aio_context_release(replace_aio_context);
401     }
402     g_free(s->replaces);
403     bdrv_op_unblock_all(s->target, s->common.blocker);
404     bdrv_unref(s->target);
405     block_job_completed(&s->common, data->ret);
406     g_free(data);
407     bdrv_drained_end(src);
408     bdrv_unref(src);
409 }
410 
411 static void coroutine_fn mirror_run(void *opaque)
412 {
413     MirrorBlockJob *s = opaque;
414     MirrorExitData *data;
415     BlockDriverState *bs = s->common.bs;
416     int64_t sector_num, end, length;
417     uint64_t last_pause_ns;
418     BlockDriverInfo bdi;
419     char backing_filename[2]; /* we only need 2 characters because we are only
420                                  checking for a NULL string */
421     int ret = 0;
422     int n;
423 
424     if (block_job_is_cancelled(&s->common)) {
425         goto immediate_exit;
426     }
427 
428     s->bdev_length = bdrv_getlength(bs);
429     if (s->bdev_length < 0) {
430         ret = s->bdev_length;
431         goto immediate_exit;
432     } else if (s->bdev_length == 0) {
433         /* Report BLOCK_JOB_READY and wait for complete. */
434         block_job_event_ready(&s->common);
435         s->synced = true;
436         while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
437             block_job_yield(&s->common);
438         }
439         s->common.cancelled = false;
440         goto immediate_exit;
441     }
442 
443     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
444     s->in_flight_bitmap = bitmap_new(length);
445 
446     /* If we have no backing file yet in the destination, we cannot let
447      * the destination do COW.  Instead, we copy sectors around the
448      * dirty data if needed.  We need a bitmap to do that.
449      */
450     bdrv_get_backing_filename(s->target, backing_filename,
451                               sizeof(backing_filename));
452     if (backing_filename[0] && !s->target->backing) {
453         ret = bdrv_get_info(s->target, &bdi);
454         if (ret < 0) {
455             goto immediate_exit;
456         }
457         if (s->granularity < bdi.cluster_size) {
458             s->buf_size = MAX(s->buf_size, bdi.cluster_size);
459             s->cow_bitmap = bitmap_new(length);
460         }
461     }
462 
463     end = s->bdev_length / BDRV_SECTOR_SIZE;
464     s->buf = qemu_try_blockalign(bs, s->buf_size);
465     if (s->buf == NULL) {
466         ret = -ENOMEM;
467         goto immediate_exit;
468     }
469 
470     mirror_free_init(s);
471 
472     last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
473     if (!s->is_none_mode) {
474         /* First part, loop on the sectors and initialize the dirty bitmap.  */
475         BlockDriverState *base = s->base;
476         bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(s->target);
477 
478         for (sector_num = 0; sector_num < end; ) {
479             /* Just to make sure we are not exceeding int limit. */
480             int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
481                                  end - sector_num);
482             int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
483 
484             if (now - last_pause_ns > SLICE_TIME) {
485                 last_pause_ns = now;
486                 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
487             }
488 
489             if (block_job_is_cancelled(&s->common)) {
490                 goto immediate_exit;
491             }
492 
493             ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
494 
495             if (ret < 0) {
496                 goto immediate_exit;
497             }
498 
499             assert(n > 0);
500             if (ret == 1 || mark_all_dirty) {
501                 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
502             }
503             sector_num += n;
504         }
505     }
506 
507     bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
508     for (;;) {
509         uint64_t delay_ns = 0;
510         int64_t cnt;
511         bool should_complete;
512 
513         if (s->ret < 0) {
514             ret = s->ret;
515             goto immediate_exit;
516         }
517 
518         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
519         /* s->common.offset contains the number of bytes already processed so
520          * far, cnt is the number of dirty sectors remaining and
521          * s->sectors_in_flight is the number of sectors currently being
522          * processed; together those are the current total operation length */
523         s->common.len = s->common.offset +
524                         (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
525 
526         /* Note that even when no rate limit is applied we need to yield
527          * periodically with no pending I/O so that bdrv_drain_all() returns.
528          * We do so every SLICE_TIME nanoseconds, or when there is an error,
529          * or when the source is clean, whichever comes first.
530          */
531         if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
532             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
533             if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
534                 (cnt == 0 && s->in_flight > 0)) {
535                 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
536                 s->waiting_for_io = true;
537                 qemu_coroutine_yield();
538                 s->waiting_for_io = false;
539                 continue;
540             } else if (cnt != 0) {
541                 delay_ns = mirror_iteration(s);
542             }
543         }
544 
545         should_complete = false;
546         if (s->in_flight == 0 && cnt == 0) {
547             trace_mirror_before_flush(s);
548             ret = bdrv_flush(s->target);
549             if (ret < 0) {
550                 if (mirror_error_action(s, false, -ret) ==
551                     BLOCK_ERROR_ACTION_REPORT) {
552                     goto immediate_exit;
553                 }
554             } else {
555                 /* We're out of the streaming phase.  From now on, if the job
556                  * is cancelled we will actually complete all pending I/O and
557                  * report completion.  This way, block-job-cancel will leave
558                  * the target in a consistent state.
559                  */
560                 if (!s->synced) {
561                     block_job_event_ready(&s->common);
562                     s->synced = true;
563                 }
564 
565                 should_complete = s->should_complete ||
566                     block_job_is_cancelled(&s->common);
567                 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
568             }
569         }
570 
571         if (cnt == 0 && should_complete) {
572             /* The dirty bitmap is not updated while operations are pending.
573              * If we're about to exit, wait for pending operations before
574              * calling bdrv_get_dirty_count(bs), or we may exit while the
575              * source has dirty data to copy!
576              *
577              * Note that I/O can be submitted by the guest while
578              * mirror_populate runs.
579              */
580             trace_mirror_before_drain(s, cnt);
581             bdrv_drain(bs);
582             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
583         }
584 
585         ret = 0;
586         trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
587         if (!s->synced) {
588             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
589             if (block_job_is_cancelled(&s->common)) {
590                 break;
591             }
592         } else if (!should_complete) {
593             delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
594             block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
595         } else if (cnt == 0) {
596             /* The two disks are in sync.  Exit and report successful
597              * completion.
598              */
599             assert(QLIST_EMPTY(&bs->tracked_requests));
600             s->common.cancelled = false;
601             break;
602         }
603         last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
604     }
605 
606 immediate_exit:
607     if (s->in_flight > 0) {
608         /* We get here only if something went wrong.  Either the job failed,
609          * or it was cancelled prematurely so that we do not guarantee that
610          * the target is a copy of the source.
611          */
612         assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
613         mirror_drain(s);
614     }
615 
616     assert(s->in_flight == 0);
617     qemu_vfree(s->buf);
618     g_free(s->cow_bitmap);
619     g_free(s->in_flight_bitmap);
620     bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
621     if (s->target->blk) {
622         blk_iostatus_disable(s->target->blk);
623     }
624 
625     data = g_malloc(sizeof(*data));
626     data->ret = ret;
627     /* Before we switch to target in mirror_exit, make sure data doesn't
628      * change. */
629     bdrv_drained_begin(s->common.bs);
630     block_job_defer_to_main_loop(&s->common, mirror_exit, data);
631 }
632 
633 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
634 {
635     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
636 
637     if (speed < 0) {
638         error_setg(errp, QERR_INVALID_PARAMETER, "speed");
639         return;
640     }
641     ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
642 }
643 
644 static void mirror_iostatus_reset(BlockJob *job)
645 {
646     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
647 
648     if (s->target->blk) {
649         blk_iostatus_reset(s->target->blk);
650     }
651 }
652 
653 static void mirror_complete(BlockJob *job, Error **errp)
654 {
655     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
656     Error *local_err = NULL;
657     int ret;
658 
659     ret = bdrv_open_backing_file(s->target, NULL, "backing", &local_err);
660     if (ret < 0) {
661         error_propagate(errp, local_err);
662         return;
663     }
664     if (!s->synced) {
665         error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id);
666         return;
667     }
668 
669     /* check the target bs is not blocked and block all operations on it */
670     if (s->replaces) {
671         AioContext *replace_aio_context;
672 
673         s->to_replace = bdrv_find_node(s->replaces);
674         if (!s->to_replace) {
675             error_setg(errp, "Node name '%s' not found", s->replaces);
676             return;
677         }
678 
679         replace_aio_context = bdrv_get_aio_context(s->to_replace);
680         aio_context_acquire(replace_aio_context);
681 
682         error_setg(&s->replace_blocker,
683                    "block device is in use by block-job-complete");
684         bdrv_op_block_all(s->to_replace, s->replace_blocker);
685         bdrv_ref(s->to_replace);
686 
687         aio_context_release(replace_aio_context);
688     }
689 
690     s->should_complete = true;
691     block_job_enter(&s->common);
692 }
693 
694 static const BlockJobDriver mirror_job_driver = {
695     .instance_size = sizeof(MirrorBlockJob),
696     .job_type      = BLOCK_JOB_TYPE_MIRROR,
697     .set_speed     = mirror_set_speed,
698     .iostatus_reset= mirror_iostatus_reset,
699     .complete      = mirror_complete,
700 };
701 
702 static const BlockJobDriver commit_active_job_driver = {
703     .instance_size = sizeof(MirrorBlockJob),
704     .job_type      = BLOCK_JOB_TYPE_COMMIT,
705     .set_speed     = mirror_set_speed,
706     .iostatus_reset
707                    = mirror_iostatus_reset,
708     .complete      = mirror_complete,
709 };
710 
711 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
712                              const char *replaces,
713                              int64_t speed, uint32_t granularity,
714                              int64_t buf_size,
715                              BlockdevOnError on_source_error,
716                              BlockdevOnError on_target_error,
717                              bool unmap,
718                              BlockCompletionFunc *cb,
719                              void *opaque, Error **errp,
720                              const BlockJobDriver *driver,
721                              bool is_none_mode, BlockDriverState *base)
722 {
723     MirrorBlockJob *s;
724     BlockDriverState *replaced_bs;
725 
726     if (granularity == 0) {
727         granularity = bdrv_get_default_bitmap_granularity(target);
728     }
729 
730     assert ((granularity & (granularity - 1)) == 0);
731 
732     if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
733          on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
734         (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
735         error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
736         return;
737     }
738 
739     if (buf_size < 0) {
740         error_setg(errp, "Invalid parameter 'buf-size'");
741         return;
742     }
743 
744     if (buf_size == 0) {
745         buf_size = DEFAULT_MIRROR_BUF_SIZE;
746     }
747 
748     /* We can't support this case as long as the block layer can't handle
749      * multiple BlockBackends per BlockDriverState. */
750     if (replaces) {
751         replaced_bs = bdrv_lookup_bs(replaces, replaces, errp);
752         if (replaced_bs == NULL) {
753             return;
754         }
755     } else {
756         replaced_bs = bs;
757     }
758     if (replaced_bs->blk && target->blk) {
759         error_setg(errp, "Can't create node with two BlockBackends");
760         return;
761     }
762 
763     s = block_job_create(driver, bs, speed, cb, opaque, errp);
764     if (!s) {
765         return;
766     }
767 
768     s->replaces = g_strdup(replaces);
769     s->on_source_error = on_source_error;
770     s->on_target_error = on_target_error;
771     s->target = target;
772     s->is_none_mode = is_none_mode;
773     s->base = base;
774     s->granularity = granularity;
775     s->buf_size = ROUND_UP(buf_size, granularity);
776     s->unmap = unmap;
777 
778     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
779     if (!s->dirty_bitmap) {
780         g_free(s->replaces);
781         block_job_unref(&s->common);
782         return;
783     }
784 
785     bdrv_op_block_all(s->target, s->common.blocker);
786 
787     bdrv_set_enable_write_cache(s->target, true);
788     if (s->target->blk) {
789         blk_set_on_error(s->target->blk, on_target_error, on_target_error);
790         blk_iostatus_enable(s->target->blk);
791     }
792     s->common.co = qemu_coroutine_create(mirror_run);
793     trace_mirror_start(bs, s, s->common.co, opaque);
794     qemu_coroutine_enter(s->common.co, s);
795 }
796 
797 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
798                   const char *replaces,
799                   int64_t speed, uint32_t granularity, int64_t buf_size,
800                   MirrorSyncMode mode, BlockdevOnError on_source_error,
801                   BlockdevOnError on_target_error,
802                   bool unmap,
803                   BlockCompletionFunc *cb,
804                   void *opaque, Error **errp)
805 {
806     bool is_none_mode;
807     BlockDriverState *base;
808 
809     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
810         error_setg(errp, "Sync mode 'incremental' not supported");
811         return;
812     }
813     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
814     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
815     mirror_start_job(bs, target, replaces,
816                      speed, granularity, buf_size,
817                      on_source_error, on_target_error, unmap, cb, opaque, errp,
818                      &mirror_job_driver, is_none_mode, base);
819 }
820 
821 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
822                          int64_t speed,
823                          BlockdevOnError on_error,
824                          BlockCompletionFunc *cb,
825                          void *opaque, Error **errp)
826 {
827     int64_t length, base_length;
828     int orig_base_flags;
829     int ret;
830     Error *local_err = NULL;
831 
832     orig_base_flags = bdrv_get_flags(base);
833 
834     if (bdrv_reopen(base, bs->open_flags, errp)) {
835         return;
836     }
837 
838     length = bdrv_getlength(bs);
839     if (length < 0) {
840         error_setg_errno(errp, -length,
841                          "Unable to determine length of %s", bs->filename);
842         goto error_restore_flags;
843     }
844 
845     base_length = bdrv_getlength(base);
846     if (base_length < 0) {
847         error_setg_errno(errp, -base_length,
848                          "Unable to determine length of %s", base->filename);
849         goto error_restore_flags;
850     }
851 
852     if (length > base_length) {
853         ret = bdrv_truncate(base, length);
854         if (ret < 0) {
855             error_setg_errno(errp, -ret,
856                             "Top image %s is larger than base image %s, and "
857                              "resize of base image failed",
858                              bs->filename, base->filename);
859             goto error_restore_flags;
860         }
861     }
862 
863     bdrv_ref(base);
864     mirror_start_job(bs, base, NULL, speed, 0, 0,
865                      on_error, on_error, false, cb, opaque, &local_err,
866                      &commit_active_job_driver, false, base);
867     if (local_err) {
868         error_propagate(errp, local_err);
869         goto error_restore_flags;
870     }
871 
872     return;
873 
874 error_restore_flags:
875     /* ignore error and errp for bdrv_reopen, because we want to propagate
876      * the original error */
877     bdrv_reopen(base, orig_base_flags, NULL);
878     return;
879 }
880