xref: /qemu/block/mirror.c (revision bf6e6a37)
1 /*
2  * Image mirroring
3  *
4  * Copyright Red Hat, Inc. 2012
5  *
6  * Authors:
7  *  Paolo Bonzini  <pbonzini@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "sysemu/block-backend.h"
22 #include "qapi/error.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 
27 #define MAX_IN_FLIGHT 16
28 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
29 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
30 
31 /* The mirroring buffer is a list of granularity-sized chunks.
32  * Free chunks are organized in a list.
33  */
34 typedef struct MirrorBuffer {
35     QSIMPLEQ_ENTRY(MirrorBuffer) next;
36 } MirrorBuffer;
37 
38 typedef struct MirrorOp MirrorOp;
39 
40 typedef struct MirrorBlockJob {
41     BlockJob common;
42     BlockBackend *target;
43     BlockDriverState *mirror_top_bs;
44     BlockDriverState *base;
45 
46     /* The name of the graph node to replace */
47     char *replaces;
48     /* The BDS to replace */
49     BlockDriverState *to_replace;
50     /* Used to block operations on the drive-mirror-replace target */
51     Error *replace_blocker;
52     bool is_none_mode;
53     BlockMirrorBackingMode backing_mode;
54     MirrorCopyMode copy_mode;
55     BlockdevOnError on_source_error, on_target_error;
56     bool synced;
57     /* Set when the target is synced (dirty bitmap is clean, nothing
58      * in flight) and the job is running in active mode */
59     bool actively_synced;
60     bool should_complete;
61     int64_t granularity;
62     size_t buf_size;
63     int64_t bdev_length;
64     unsigned long *cow_bitmap;
65     BdrvDirtyBitmap *dirty_bitmap;
66     BdrvDirtyBitmapIter *dbi;
67     uint8_t *buf;
68     QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
69     int buf_free_count;
70 
71     uint64_t last_pause_ns;
72     unsigned long *in_flight_bitmap;
73     int in_flight;
74     int64_t bytes_in_flight;
75     QTAILQ_HEAD(MirrorOpList, MirrorOp) ops_in_flight;
76     int ret;
77     bool unmap;
78     int target_cluster_size;
79     int max_iov;
80     bool initial_zeroing_ongoing;
81     int in_active_write_counter;
82 } MirrorBlockJob;
83 
84 typedef struct MirrorBDSOpaque {
85     MirrorBlockJob *job;
86 } MirrorBDSOpaque;
87 
88 struct MirrorOp {
89     MirrorBlockJob *s;
90     QEMUIOVector qiov;
91     int64_t offset;
92     uint64_t bytes;
93 
94     /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
95      * mirror_co_discard() before yielding for the first time */
96     int64_t *bytes_handled;
97 
98     bool is_pseudo_op;
99     bool is_active_write;
100     CoQueue waiting_requests;
101 
102     QTAILQ_ENTRY(MirrorOp) next;
103 };
104 
105 typedef enum MirrorMethod {
106     MIRROR_METHOD_COPY,
107     MIRROR_METHOD_ZERO,
108     MIRROR_METHOD_DISCARD,
109 } MirrorMethod;
110 
111 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
112                                             int error)
113 {
114     s->synced = false;
115     s->actively_synced = false;
116     if (read) {
117         return block_job_error_action(&s->common, s->on_source_error,
118                                       true, error);
119     } else {
120         return block_job_error_action(&s->common, s->on_target_error,
121                                       false, error);
122     }
123 }
124 
125 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
126                                                   MirrorBlockJob *s,
127                                                   uint64_t offset,
128                                                   uint64_t bytes)
129 {
130     uint64_t self_start_chunk = offset / s->granularity;
131     uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
132     uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
133 
134     while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
135                          self_start_chunk) < self_end_chunk &&
136            s->ret >= 0)
137     {
138         MirrorOp *op;
139 
140         QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
141             uint64_t op_start_chunk = op->offset / s->granularity;
142             uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
143                                                  s->granularity) -
144                                     op_start_chunk;
145 
146             if (op == self) {
147                 continue;
148             }
149 
150             if (ranges_overlap(self_start_chunk, self_nb_chunks,
151                                op_start_chunk, op_nb_chunks))
152             {
153                 qemu_co_queue_wait(&op->waiting_requests, NULL);
154                 break;
155             }
156         }
157     }
158 }
159 
160 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
161 {
162     MirrorBlockJob *s = op->s;
163     struct iovec *iov;
164     int64_t chunk_num;
165     int i, nb_chunks;
166 
167     trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
168 
169     s->in_flight--;
170     s->bytes_in_flight -= op->bytes;
171     iov = op->qiov.iov;
172     for (i = 0; i < op->qiov.niov; i++) {
173         MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
174         QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
175         s->buf_free_count++;
176     }
177 
178     chunk_num = op->offset / s->granularity;
179     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
180 
181     bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
182     QTAILQ_REMOVE(&s->ops_in_flight, op, next);
183     if (ret >= 0) {
184         if (s->cow_bitmap) {
185             bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
186         }
187         if (!s->initial_zeroing_ongoing) {
188             job_progress_update(&s->common.job, op->bytes);
189         }
190     }
191     qemu_iovec_destroy(&op->qiov);
192 
193     qemu_co_queue_restart_all(&op->waiting_requests);
194     g_free(op);
195 }
196 
197 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
198 {
199     MirrorBlockJob *s = op->s;
200 
201     aio_context_acquire(blk_get_aio_context(s->common.blk));
202     if (ret < 0) {
203         BlockErrorAction action;
204 
205         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
206         action = mirror_error_action(s, false, -ret);
207         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
208             s->ret = ret;
209         }
210     }
211     mirror_iteration_done(op, ret);
212     aio_context_release(blk_get_aio_context(s->common.blk));
213 }
214 
215 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
216 {
217     MirrorBlockJob *s = op->s;
218 
219     aio_context_acquire(blk_get_aio_context(s->common.blk));
220     if (ret < 0) {
221         BlockErrorAction action;
222 
223         bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
224         action = mirror_error_action(s, true, -ret);
225         if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
226             s->ret = ret;
227         }
228 
229         mirror_iteration_done(op, ret);
230     } else {
231         ret = blk_co_pwritev(s->target, op->offset,
232                              op->qiov.size, &op->qiov, 0);
233         mirror_write_complete(op, ret);
234     }
235     aio_context_release(blk_get_aio_context(s->common.blk));
236 }
237 
238 /* Clip bytes relative to offset to not exceed end-of-file */
239 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
240                                         int64_t offset,
241                                         int64_t bytes)
242 {
243     return MIN(bytes, s->bdev_length - offset);
244 }
245 
246 /* Round offset and/or bytes to target cluster if COW is needed, and
247  * return the offset of the adjusted tail against original. */
248 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
249                             uint64_t *bytes)
250 {
251     bool need_cow;
252     int ret = 0;
253     int64_t align_offset = *offset;
254     int64_t align_bytes = *bytes;
255     int max_bytes = s->granularity * s->max_iov;
256 
257     need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
258     need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
259                           s->cow_bitmap);
260     if (need_cow) {
261         bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
262                                &align_offset, &align_bytes);
263     }
264 
265     if (align_bytes > max_bytes) {
266         align_bytes = max_bytes;
267         if (need_cow) {
268             align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
269         }
270     }
271     /* Clipping may result in align_bytes unaligned to chunk boundary, but
272      * that doesn't matter because it's already the end of source image. */
273     align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
274 
275     ret = align_offset + align_bytes - (*offset + *bytes);
276     *offset = align_offset;
277     *bytes = align_bytes;
278     assert(ret >= 0);
279     return ret;
280 }
281 
282 static inline void mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
283 {
284     MirrorOp *op;
285 
286     QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
287         /* Do not wait on pseudo ops, because it may in turn wait on
288          * some other operation to start, which may in fact be the
289          * caller of this function.  Since there is only one pseudo op
290          * at any given time, we will always find some real operation
291          * to wait on. */
292         if (!op->is_pseudo_op && op->is_active_write == active) {
293             qemu_co_queue_wait(&op->waiting_requests, NULL);
294             return;
295         }
296     }
297     abort();
298 }
299 
300 static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
301 {
302     /* Only non-active operations use up in-flight slots */
303     mirror_wait_for_any_operation(s, false);
304 }
305 
306 /* Perform a mirror copy operation.
307  *
308  * *op->bytes_handled is set to the number of bytes copied after and
309  * including offset, excluding any bytes copied prior to offset due
310  * to alignment.  This will be op->bytes if no alignment is necessary,
311  * or (new_end - op->offset) if the tail is rounded up or down due to
312  * alignment or buffer limit.
313  */
314 static void coroutine_fn mirror_co_read(void *opaque)
315 {
316     MirrorOp *op = opaque;
317     MirrorBlockJob *s = op->s;
318     int nb_chunks;
319     uint64_t ret;
320     uint64_t max_bytes;
321 
322     max_bytes = s->granularity * s->max_iov;
323 
324     /* We can only handle as much as buf_size at a time. */
325     op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
326     assert(op->bytes);
327     assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
328     *op->bytes_handled = op->bytes;
329 
330     if (s->cow_bitmap) {
331         *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
332     }
333     /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
334     assert(*op->bytes_handled <= UINT_MAX);
335     assert(op->bytes <= s->buf_size);
336     /* The offset is granularity-aligned because:
337      * 1) Caller passes in aligned values;
338      * 2) mirror_cow_align is used only when target cluster is larger. */
339     assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
340     /* The range is sector-aligned, since bdrv_getlength() rounds up. */
341     assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
342     nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
343 
344     while (s->buf_free_count < nb_chunks) {
345         trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
346         mirror_wait_for_free_in_flight_slot(s);
347     }
348 
349     /* Now make a QEMUIOVector taking enough granularity-sized chunks
350      * from s->buf_free.
351      */
352     qemu_iovec_init(&op->qiov, nb_chunks);
353     while (nb_chunks-- > 0) {
354         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
355         size_t remaining = op->bytes - op->qiov.size;
356 
357         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
358         s->buf_free_count--;
359         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
360     }
361 
362     /* Copy the dirty cluster.  */
363     s->in_flight++;
364     s->bytes_in_flight += op->bytes;
365     trace_mirror_one_iteration(s, op->offset, op->bytes);
366 
367     ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
368                          &op->qiov, 0);
369     mirror_read_complete(op, ret);
370 }
371 
372 static void coroutine_fn mirror_co_zero(void *opaque)
373 {
374     MirrorOp *op = opaque;
375     int ret;
376 
377     op->s->in_flight++;
378     op->s->bytes_in_flight += op->bytes;
379     *op->bytes_handled = op->bytes;
380 
381     ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
382                                op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
383     mirror_write_complete(op, ret);
384 }
385 
386 static void coroutine_fn mirror_co_discard(void *opaque)
387 {
388     MirrorOp *op = opaque;
389     int ret;
390 
391     op->s->in_flight++;
392     op->s->bytes_in_flight += op->bytes;
393     *op->bytes_handled = op->bytes;
394 
395     ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
396     mirror_write_complete(op, ret);
397 }
398 
399 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
400                                unsigned bytes, MirrorMethod mirror_method)
401 {
402     MirrorOp *op;
403     Coroutine *co;
404     int64_t bytes_handled = -1;
405 
406     op = g_new(MirrorOp, 1);
407     *op = (MirrorOp){
408         .s              = s,
409         .offset         = offset,
410         .bytes          = bytes,
411         .bytes_handled  = &bytes_handled,
412     };
413     qemu_co_queue_init(&op->waiting_requests);
414 
415     switch (mirror_method) {
416     case MIRROR_METHOD_COPY:
417         co = qemu_coroutine_create(mirror_co_read, op);
418         break;
419     case MIRROR_METHOD_ZERO:
420         co = qemu_coroutine_create(mirror_co_zero, op);
421         break;
422     case MIRROR_METHOD_DISCARD:
423         co = qemu_coroutine_create(mirror_co_discard, op);
424         break;
425     default:
426         abort();
427     }
428 
429     QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
430     qemu_coroutine_enter(co);
431     /* At this point, ownership of op has been moved to the coroutine
432      * and the object may already be freed */
433 
434     /* Assert that this value has been set */
435     assert(bytes_handled >= 0);
436 
437     /* Same assertion as in mirror_co_read() (and for mirror_co_read()
438      * and mirror_co_discard(), bytes_handled == op->bytes, which
439      * is the @bytes parameter given to this function) */
440     assert(bytes_handled <= UINT_MAX);
441     return bytes_handled;
442 }
443 
444 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
445 {
446     BlockDriverState *source = s->mirror_top_bs->backing->bs;
447     MirrorOp *pseudo_op;
448     int64_t offset;
449     uint64_t delay_ns = 0, ret = 0;
450     /* At least the first dirty chunk is mirrored in one iteration. */
451     int nb_chunks = 1;
452     bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
453     int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
454 
455     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
456     offset = bdrv_dirty_iter_next(s->dbi);
457     if (offset < 0) {
458         bdrv_set_dirty_iter(s->dbi, 0);
459         offset = bdrv_dirty_iter_next(s->dbi);
460         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
461         assert(offset >= 0);
462     }
463     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
464 
465     mirror_wait_on_conflicts(NULL, s, offset, 1);
466 
467     job_pause_point(&s->common.job);
468 
469     /* Find the number of consective dirty chunks following the first dirty
470      * one, and wait for in flight requests in them. */
471     bdrv_dirty_bitmap_lock(s->dirty_bitmap);
472     while (nb_chunks * s->granularity < s->buf_size) {
473         int64_t next_dirty;
474         int64_t next_offset = offset + nb_chunks * s->granularity;
475         int64_t next_chunk = next_offset / s->granularity;
476         if (next_offset >= s->bdev_length ||
477             !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
478             break;
479         }
480         if (test_bit(next_chunk, s->in_flight_bitmap)) {
481             break;
482         }
483 
484         next_dirty = bdrv_dirty_iter_next(s->dbi);
485         if (next_dirty > next_offset || next_dirty < 0) {
486             /* The bitmap iterator's cache is stale, refresh it */
487             bdrv_set_dirty_iter(s->dbi, next_offset);
488             next_dirty = bdrv_dirty_iter_next(s->dbi);
489         }
490         assert(next_dirty == next_offset);
491         nb_chunks++;
492     }
493 
494     /* Clear dirty bits before querying the block status, because
495      * calling bdrv_block_status_above could yield - if some blocks are
496      * marked dirty in this window, we need to know.
497      */
498     bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
499                                    nb_chunks * s->granularity);
500     bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
501 
502     /* Before claiming an area in the in-flight bitmap, we have to
503      * create a MirrorOp for it so that conflicting requests can wait
504      * for it.  mirror_perform() will create the real MirrorOps later,
505      * for now we just create a pseudo operation that will wake up all
506      * conflicting requests once all real operations have been
507      * launched. */
508     pseudo_op = g_new(MirrorOp, 1);
509     *pseudo_op = (MirrorOp){
510         .offset         = offset,
511         .bytes          = nb_chunks * s->granularity,
512         .is_pseudo_op   = true,
513     };
514     qemu_co_queue_init(&pseudo_op->waiting_requests);
515     QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
516 
517     bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
518     while (nb_chunks > 0 && offset < s->bdev_length) {
519         int ret;
520         int64_t io_bytes;
521         int64_t io_bytes_acct;
522         MirrorMethod mirror_method = MIRROR_METHOD_COPY;
523 
524         assert(!(offset % s->granularity));
525         ret = bdrv_block_status_above(source, NULL, offset,
526                                       nb_chunks * s->granularity,
527                                       &io_bytes, NULL, NULL);
528         if (ret < 0) {
529             io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
530         } else if (ret & BDRV_BLOCK_DATA) {
531             io_bytes = MIN(io_bytes, max_io_bytes);
532         }
533 
534         io_bytes -= io_bytes % s->granularity;
535         if (io_bytes < s->granularity) {
536             io_bytes = s->granularity;
537         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
538             int64_t target_offset;
539             int64_t target_bytes;
540             bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
541                                    &target_offset, &target_bytes);
542             if (target_offset == offset &&
543                 target_bytes == io_bytes) {
544                 mirror_method = ret & BDRV_BLOCK_ZERO ?
545                                     MIRROR_METHOD_ZERO :
546                                     MIRROR_METHOD_DISCARD;
547             }
548         }
549 
550         while (s->in_flight >= MAX_IN_FLIGHT) {
551             trace_mirror_yield_in_flight(s, offset, s->in_flight);
552             mirror_wait_for_free_in_flight_slot(s);
553         }
554 
555         if (s->ret < 0) {
556             ret = 0;
557             goto fail;
558         }
559 
560         io_bytes = mirror_clip_bytes(s, offset, io_bytes);
561         io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
562         if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
563             io_bytes_acct = 0;
564         } else {
565             io_bytes_acct = io_bytes;
566         }
567         assert(io_bytes);
568         offset += io_bytes;
569         nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
570         delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
571     }
572 
573     ret = delay_ns;
574 fail:
575     QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
576     qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
577     g_free(pseudo_op);
578 
579     return ret;
580 }
581 
582 static void mirror_free_init(MirrorBlockJob *s)
583 {
584     int granularity = s->granularity;
585     size_t buf_size = s->buf_size;
586     uint8_t *buf = s->buf;
587 
588     assert(s->buf_free_count == 0);
589     QSIMPLEQ_INIT(&s->buf_free);
590     while (buf_size != 0) {
591         MirrorBuffer *cur = (MirrorBuffer *)buf;
592         QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
593         s->buf_free_count++;
594         buf_size -= granularity;
595         buf += granularity;
596     }
597 }
598 
599 /* This is also used for the .pause callback. There is no matching
600  * mirror_resume() because mirror_run() will begin iterating again
601  * when the job is resumed.
602  */
603 static void mirror_wait_for_all_io(MirrorBlockJob *s)
604 {
605     while (s->in_flight > 0) {
606         mirror_wait_for_free_in_flight_slot(s);
607     }
608 }
609 
610 typedef struct {
611     int ret;
612 } MirrorExitData;
613 
614 static void mirror_exit(Job *job, void *opaque)
615 {
616     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
617     BlockJob *bjob = &s->common;
618     MirrorExitData *data = opaque;
619     MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
620     AioContext *replace_aio_context = NULL;
621     BlockDriverState *src = s->mirror_top_bs->backing->bs;
622     BlockDriverState *target_bs = blk_bs(s->target);
623     BlockDriverState *mirror_top_bs = s->mirror_top_bs;
624     Error *local_err = NULL;
625 
626     bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
627 
628     /* Make sure that the source BDS doesn't go away before we called
629      * job_completed(). */
630     bdrv_ref(src);
631     bdrv_ref(mirror_top_bs);
632     bdrv_ref(target_bs);
633 
634     /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
635      * inserting target_bs at s->to_replace, where we might not be able to get
636      * these permissions.
637      *
638      * Note that blk_unref() alone doesn't necessarily drop permissions because
639      * we might be running nested inside mirror_drain(), which takes an extra
640      * reference, so use an explicit blk_set_perm() first. */
641     blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
642     blk_unref(s->target);
643     s->target = NULL;
644 
645     /* We don't access the source any more. Dropping any WRITE/RESIZE is
646      * required before it could become a backing file of target_bs. */
647     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
648                             &error_abort);
649     if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
650         BlockDriverState *backing = s->is_none_mode ? src : s->base;
651         if (backing_bs(target_bs) != backing) {
652             bdrv_set_backing_hd(target_bs, backing, &local_err);
653             if (local_err) {
654                 error_report_err(local_err);
655                 data->ret = -EPERM;
656             }
657         }
658     }
659 
660     if (s->to_replace) {
661         replace_aio_context = bdrv_get_aio_context(s->to_replace);
662         aio_context_acquire(replace_aio_context);
663     }
664 
665     if (s->should_complete && data->ret == 0) {
666         BlockDriverState *to_replace = src;
667         if (s->to_replace) {
668             to_replace = s->to_replace;
669         }
670 
671         if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
672             bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
673         }
674 
675         /* The mirror job has no requests in flight any more, but we need to
676          * drain potential other users of the BDS before changing the graph. */
677         bdrv_drained_begin(target_bs);
678         bdrv_replace_node(to_replace, target_bs, &local_err);
679         bdrv_drained_end(target_bs);
680         if (local_err) {
681             error_report_err(local_err);
682             data->ret = -EPERM;
683         }
684     }
685     if (s->to_replace) {
686         bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
687         error_free(s->replace_blocker);
688         bdrv_unref(s->to_replace);
689     }
690     if (replace_aio_context) {
691         aio_context_release(replace_aio_context);
692     }
693     g_free(s->replaces);
694     bdrv_unref(target_bs);
695 
696     /* Remove the mirror filter driver from the graph. Before this, get rid of
697      * the blockers on the intermediate nodes so that the resulting state is
698      * valid. Also give up permissions on mirror_top_bs->backing, which might
699      * block the removal. */
700     block_job_remove_all_bdrv(bjob);
701     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
702                             &error_abort);
703     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
704 
705     /* We just changed the BDS the job BB refers to (with either or both of the
706      * bdrv_replace_node() calls), so switch the BB back so the cleanup does
707      * the right thing. We don't need any permissions any more now. */
708     blk_remove_bs(bjob->blk);
709     blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
710     blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
711 
712     bs_opaque->job = NULL;
713     job_completed(job, data->ret, NULL);
714 
715     g_free(data);
716     bdrv_drained_end(src);
717     bdrv_unref(mirror_top_bs);
718     bdrv_unref(src);
719 }
720 
721 static void mirror_throttle(MirrorBlockJob *s)
722 {
723     int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
724 
725     if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
726         s->last_pause_ns = now;
727         job_sleep_ns(&s->common.job, 0);
728     } else {
729         job_pause_point(&s->common.job);
730     }
731 }
732 
733 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
734 {
735     int64_t offset;
736     BlockDriverState *base = s->base;
737     BlockDriverState *bs = s->mirror_top_bs->backing->bs;
738     BlockDriverState *target_bs = blk_bs(s->target);
739     int ret;
740     int64_t count;
741 
742     if (base == NULL && !bdrv_has_zero_init(target_bs)) {
743         if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
744             bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
745             return 0;
746         }
747 
748         s->initial_zeroing_ongoing = true;
749         for (offset = 0; offset < s->bdev_length; ) {
750             int bytes = MIN(s->bdev_length - offset,
751                             QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
752 
753             mirror_throttle(s);
754 
755             if (job_is_cancelled(&s->common.job)) {
756                 s->initial_zeroing_ongoing = false;
757                 return 0;
758             }
759 
760             if (s->in_flight >= MAX_IN_FLIGHT) {
761                 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
762                                    s->in_flight);
763                 mirror_wait_for_free_in_flight_slot(s);
764                 continue;
765             }
766 
767             mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
768             offset += bytes;
769         }
770 
771         mirror_wait_for_all_io(s);
772         s->initial_zeroing_ongoing = false;
773     }
774 
775     /* First part, loop on the sectors and initialize the dirty bitmap.  */
776     for (offset = 0; offset < s->bdev_length; ) {
777         /* Just to make sure we are not exceeding int limit. */
778         int bytes = MIN(s->bdev_length - offset,
779                         QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
780 
781         mirror_throttle(s);
782 
783         if (job_is_cancelled(&s->common.job)) {
784             return 0;
785         }
786 
787         ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count);
788         if (ret < 0) {
789             return ret;
790         }
791 
792         assert(count);
793         if (ret == 1) {
794             bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
795         }
796         offset += count;
797     }
798     return 0;
799 }
800 
801 /* Called when going out of the streaming phase to flush the bulk of the
802  * data to the medium, or just before completing.
803  */
804 static int mirror_flush(MirrorBlockJob *s)
805 {
806     int ret = blk_flush(s->target);
807     if (ret < 0) {
808         if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
809             s->ret = ret;
810         }
811     }
812     return ret;
813 }
814 
815 static void coroutine_fn mirror_run(void *opaque)
816 {
817     MirrorBlockJob *s = opaque;
818     MirrorExitData *data;
819     BlockDriverState *bs = s->mirror_top_bs->backing->bs;
820     BlockDriverState *target_bs = blk_bs(s->target);
821     bool need_drain = true;
822     int64_t length;
823     BlockDriverInfo bdi;
824     char backing_filename[2]; /* we only need 2 characters because we are only
825                                  checking for a NULL string */
826     int ret = 0;
827 
828     if (job_is_cancelled(&s->common.job)) {
829         goto immediate_exit;
830     }
831 
832     s->bdev_length = bdrv_getlength(bs);
833     if (s->bdev_length < 0) {
834         ret = s->bdev_length;
835         goto immediate_exit;
836     }
837 
838     /* Active commit must resize the base image if its size differs from the
839      * active layer. */
840     if (s->base == blk_bs(s->target)) {
841         int64_t base_length;
842 
843         base_length = blk_getlength(s->target);
844         if (base_length < 0) {
845             ret = base_length;
846             goto immediate_exit;
847         }
848 
849         if (s->bdev_length > base_length) {
850             ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
851                                NULL);
852             if (ret < 0) {
853                 goto immediate_exit;
854             }
855         }
856     }
857 
858     if (s->bdev_length == 0) {
859         /* Transition to the READY state and wait for complete. */
860         job_transition_to_ready(&s->common.job);
861         s->synced = true;
862         s->actively_synced = true;
863         while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
864             job_yield(&s->common.job);
865         }
866         s->common.job.cancelled = false;
867         goto immediate_exit;
868     }
869 
870     length = DIV_ROUND_UP(s->bdev_length, s->granularity);
871     s->in_flight_bitmap = bitmap_new(length);
872 
873     /* If we have no backing file yet in the destination, we cannot let
874      * the destination do COW.  Instead, we copy sectors around the
875      * dirty data if needed.  We need a bitmap to do that.
876      */
877     bdrv_get_backing_filename(target_bs, backing_filename,
878                               sizeof(backing_filename));
879     if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
880         s->target_cluster_size = bdi.cluster_size;
881     } else {
882         s->target_cluster_size = BDRV_SECTOR_SIZE;
883     }
884     if (backing_filename[0] && !target_bs->backing &&
885         s->granularity < s->target_cluster_size) {
886         s->buf_size = MAX(s->buf_size, s->target_cluster_size);
887         s->cow_bitmap = bitmap_new(length);
888     }
889     s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
890 
891     s->buf = qemu_try_blockalign(bs, s->buf_size);
892     if (s->buf == NULL) {
893         ret = -ENOMEM;
894         goto immediate_exit;
895     }
896 
897     mirror_free_init(s);
898 
899     s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
900     if (!s->is_none_mode) {
901         ret = mirror_dirty_init(s);
902         if (ret < 0 || job_is_cancelled(&s->common.job)) {
903             goto immediate_exit;
904         }
905     }
906 
907     assert(!s->dbi);
908     s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
909     for (;;) {
910         uint64_t delay_ns = 0;
911         int64_t cnt, delta;
912         bool should_complete;
913 
914         /* Do not start passive operations while there are active
915          * writes in progress */
916         while (s->in_active_write_counter) {
917             mirror_wait_for_any_operation(s, true);
918         }
919 
920         if (s->ret < 0) {
921             ret = s->ret;
922             goto immediate_exit;
923         }
924 
925         job_pause_point(&s->common.job);
926 
927         cnt = bdrv_get_dirty_count(s->dirty_bitmap);
928         /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
929          * the number of bytes currently being processed; together those are
930          * the current remaining operation length */
931         job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
932 
933         /* Note that even when no rate limit is applied we need to yield
934          * periodically with no pending I/O so that bdrv_drain_all() returns.
935          * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
936          * an error, or when the source is clean, whichever comes first. */
937         delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
938         if (delta < BLOCK_JOB_SLICE_TIME &&
939             s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
940             if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
941                 (cnt == 0 && s->in_flight > 0)) {
942                 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
943                 mirror_wait_for_free_in_flight_slot(s);
944                 continue;
945             } else if (cnt != 0) {
946                 delay_ns = mirror_iteration(s);
947             }
948         }
949 
950         should_complete = false;
951         if (s->in_flight == 0 && cnt == 0) {
952             trace_mirror_before_flush(s);
953             if (!s->synced) {
954                 if (mirror_flush(s) < 0) {
955                     /* Go check s->ret.  */
956                     continue;
957                 }
958                 /* We're out of the streaming phase.  From now on, if the job
959                  * is cancelled we will actually complete all pending I/O and
960                  * report completion.  This way, block-job-cancel will leave
961                  * the target in a consistent state.
962                  */
963                 job_transition_to_ready(&s->common.job);
964                 s->synced = true;
965                 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
966                     s->actively_synced = true;
967                 }
968             }
969 
970             should_complete = s->should_complete ||
971                 job_is_cancelled(&s->common.job);
972             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
973         }
974 
975         if (cnt == 0 && should_complete) {
976             /* The dirty bitmap is not updated while operations are pending.
977              * If we're about to exit, wait for pending operations before
978              * calling bdrv_get_dirty_count(bs), or we may exit while the
979              * source has dirty data to copy!
980              *
981              * Note that I/O can be submitted by the guest while
982              * mirror_populate runs, so pause it now.  Before deciding
983              * whether to switch to target check one last time if I/O has
984              * come in the meanwhile, and if not flush the data to disk.
985              */
986             trace_mirror_before_drain(s, cnt);
987 
988             bdrv_drained_begin(bs);
989             cnt = bdrv_get_dirty_count(s->dirty_bitmap);
990             if (cnt > 0 || mirror_flush(s) < 0) {
991                 bdrv_drained_end(bs);
992                 continue;
993             }
994 
995             /* The two disks are in sync.  Exit and report successful
996              * completion.
997              */
998             assert(QLIST_EMPTY(&bs->tracked_requests));
999             s->common.job.cancelled = false;
1000             need_drain = false;
1001             break;
1002         }
1003 
1004         ret = 0;
1005 
1006         if (s->synced && !should_complete) {
1007             delay_ns = (s->in_flight == 0 &&
1008                         cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1009         }
1010         trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
1011         job_sleep_ns(&s->common.job, delay_ns);
1012         if (job_is_cancelled(&s->common.job) &&
1013             (!s->synced || s->common.job.force_cancel))
1014         {
1015             break;
1016         }
1017         s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1018     }
1019 
1020 immediate_exit:
1021     if (s->in_flight > 0) {
1022         /* We get here only if something went wrong.  Either the job failed,
1023          * or it was cancelled prematurely so that we do not guarantee that
1024          * the target is a copy of the source.
1025          */
1026         assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
1027                job_is_cancelled(&s->common.job)));
1028         assert(need_drain);
1029         mirror_wait_for_all_io(s);
1030     }
1031 
1032     assert(s->in_flight == 0);
1033     qemu_vfree(s->buf);
1034     g_free(s->cow_bitmap);
1035     g_free(s->in_flight_bitmap);
1036     bdrv_dirty_iter_free(s->dbi);
1037 
1038     data = g_malloc(sizeof(*data));
1039     data->ret = ret;
1040 
1041     if (need_drain) {
1042         bdrv_drained_begin(bs);
1043     }
1044     job_defer_to_main_loop(&s->common.job, mirror_exit, data);
1045 }
1046 
1047 static void mirror_complete(Job *job, Error **errp)
1048 {
1049     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1050     BlockDriverState *target;
1051 
1052     target = blk_bs(s->target);
1053 
1054     if (!s->synced) {
1055         error_setg(errp, "The active block job '%s' cannot be completed",
1056                    job->id);
1057         return;
1058     }
1059 
1060     if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
1061         int ret;
1062 
1063         assert(!target->backing);
1064         ret = bdrv_open_backing_file(target, NULL, "backing", errp);
1065         if (ret < 0) {
1066             return;
1067         }
1068     }
1069 
1070     /* block all operations on to_replace bs */
1071     if (s->replaces) {
1072         AioContext *replace_aio_context;
1073 
1074         s->to_replace = bdrv_find_node(s->replaces);
1075         if (!s->to_replace) {
1076             error_setg(errp, "Node name '%s' not found", s->replaces);
1077             return;
1078         }
1079 
1080         replace_aio_context = bdrv_get_aio_context(s->to_replace);
1081         aio_context_acquire(replace_aio_context);
1082 
1083         /* TODO Translate this into permission system. Current definition of
1084          * GRAPH_MOD would require to request it for the parents; they might
1085          * not even be BlockDriverStates, however, so a BdrvChild can't address
1086          * them. May need redefinition of GRAPH_MOD. */
1087         error_setg(&s->replace_blocker,
1088                    "block device is in use by block-job-complete");
1089         bdrv_op_block_all(s->to_replace, s->replace_blocker);
1090         bdrv_ref(s->to_replace);
1091 
1092         aio_context_release(replace_aio_context);
1093     }
1094 
1095     s->should_complete = true;
1096     job_enter(job);
1097 }
1098 
1099 static void mirror_pause(Job *job)
1100 {
1101     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1102 
1103     mirror_wait_for_all_io(s);
1104 }
1105 
1106 static bool mirror_drained_poll(BlockJob *job)
1107 {
1108     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1109     return !!s->in_flight;
1110 }
1111 
1112 static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
1113 {
1114     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1115 
1116     blk_set_aio_context(s->target, new_context);
1117 }
1118 
1119 static void mirror_drain(BlockJob *job)
1120 {
1121     MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1122 
1123     /* Need to keep a reference in case blk_drain triggers execution
1124      * of mirror_complete...
1125      */
1126     if (s->target) {
1127         BlockBackend *target = s->target;
1128         blk_ref(target);
1129         blk_drain(target);
1130         blk_unref(target);
1131     }
1132 }
1133 
1134 static const BlockJobDriver mirror_job_driver = {
1135     .job_driver = {
1136         .instance_size          = sizeof(MirrorBlockJob),
1137         .job_type               = JOB_TYPE_MIRROR,
1138         .free                   = block_job_free,
1139         .user_resume            = block_job_user_resume,
1140         .drain                  = block_job_drain,
1141         .start                  = mirror_run,
1142         .pause                  = mirror_pause,
1143         .complete               = mirror_complete,
1144     },
1145     .drained_poll           = mirror_drained_poll,
1146     .attached_aio_context   = mirror_attached_aio_context,
1147     .drain                  = mirror_drain,
1148 };
1149 
1150 static const BlockJobDriver commit_active_job_driver = {
1151     .job_driver = {
1152         .instance_size          = sizeof(MirrorBlockJob),
1153         .job_type               = JOB_TYPE_COMMIT,
1154         .free                   = block_job_free,
1155         .user_resume            = block_job_user_resume,
1156         .drain                  = block_job_drain,
1157         .start                  = mirror_run,
1158         .pause                  = mirror_pause,
1159         .complete               = mirror_complete,
1160     },
1161     .drained_poll           = mirror_drained_poll,
1162     .attached_aio_context   = mirror_attached_aio_context,
1163     .drain                  = mirror_drain,
1164 };
1165 
1166 static void do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1167                                  uint64_t offset, uint64_t bytes,
1168                                  QEMUIOVector *qiov, int flags)
1169 {
1170     BdrvDirtyBitmapIter *iter;
1171     QEMUIOVector target_qiov;
1172     uint64_t dirty_offset;
1173     int dirty_bytes;
1174 
1175     if (qiov) {
1176         qemu_iovec_init(&target_qiov, qiov->niov);
1177     }
1178 
1179     iter = bdrv_dirty_iter_new(job->dirty_bitmap);
1180     bdrv_set_dirty_iter(iter, offset);
1181 
1182     while (true) {
1183         bool valid_area;
1184         int ret;
1185 
1186         bdrv_dirty_bitmap_lock(job->dirty_bitmap);
1187         valid_area = bdrv_dirty_iter_next_area(iter, offset + bytes,
1188                                                &dirty_offset, &dirty_bytes);
1189         if (!valid_area) {
1190             bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
1191             break;
1192         }
1193 
1194         bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap,
1195                                        dirty_offset, dirty_bytes);
1196         bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
1197 
1198         job_progress_increase_remaining(&job->common.job, dirty_bytes);
1199 
1200         assert(dirty_offset - offset <= SIZE_MAX);
1201         if (qiov) {
1202             qemu_iovec_reset(&target_qiov);
1203             qemu_iovec_concat(&target_qiov, qiov,
1204                               dirty_offset - offset, dirty_bytes);
1205         }
1206 
1207         switch (method) {
1208         case MIRROR_METHOD_COPY:
1209             ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes,
1210                                  qiov ? &target_qiov : NULL, flags);
1211             break;
1212 
1213         case MIRROR_METHOD_ZERO:
1214             assert(!qiov);
1215             ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes,
1216                                        flags);
1217             break;
1218 
1219         case MIRROR_METHOD_DISCARD:
1220             assert(!qiov);
1221             ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes);
1222             break;
1223 
1224         default:
1225             abort();
1226         }
1227 
1228         if (ret >= 0) {
1229             job_progress_update(&job->common.job, dirty_bytes);
1230         } else {
1231             BlockErrorAction action;
1232 
1233             bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes);
1234             job->actively_synced = false;
1235 
1236             action = mirror_error_action(job, false, -ret);
1237             if (action == BLOCK_ERROR_ACTION_REPORT) {
1238                 if (!job->ret) {
1239                     job->ret = ret;
1240                 }
1241                 break;
1242             }
1243         }
1244     }
1245 
1246     bdrv_dirty_iter_free(iter);
1247     if (qiov) {
1248         qemu_iovec_destroy(&target_qiov);
1249     }
1250 }
1251 
1252 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1253                                                    uint64_t offset,
1254                                                    uint64_t bytes)
1255 {
1256     MirrorOp *op;
1257     uint64_t start_chunk = offset / s->granularity;
1258     uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1259 
1260     op = g_new(MirrorOp, 1);
1261     *op = (MirrorOp){
1262         .s                  = s,
1263         .offset             = offset,
1264         .bytes              = bytes,
1265         .is_active_write    = true,
1266     };
1267     qemu_co_queue_init(&op->waiting_requests);
1268     QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1269 
1270     s->in_active_write_counter++;
1271 
1272     mirror_wait_on_conflicts(op, s, offset, bytes);
1273 
1274     bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1275 
1276     return op;
1277 }
1278 
1279 static void coroutine_fn active_write_settle(MirrorOp *op)
1280 {
1281     uint64_t start_chunk = op->offset / op->s->granularity;
1282     uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1283                                       op->s->granularity);
1284 
1285     if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1286         BdrvChild *source = op->s->mirror_top_bs->backing;
1287 
1288         if (QLIST_FIRST(&source->bs->parents) == source &&
1289             QLIST_NEXT(source, next_parent) == NULL)
1290         {
1291             /* Assert that we are back in sync once all active write
1292              * operations are settled.
1293              * Note that we can only assert this if the mirror node
1294              * is the source node's only parent. */
1295             assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1296         }
1297     }
1298     bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1299     QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1300     qemu_co_queue_restart_all(&op->waiting_requests);
1301     g_free(op);
1302 }
1303 
1304 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1305     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1306 {
1307     return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1308 }
1309 
1310 static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1311     MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1312     int flags)
1313 {
1314     MirrorOp *op = NULL;
1315     MirrorBDSOpaque *s = bs->opaque;
1316     int ret = 0;
1317     bool copy_to_target;
1318 
1319     copy_to_target = s->job->ret >= 0 &&
1320                      s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1321 
1322     if (copy_to_target) {
1323         op = active_write_prepare(s->job, offset, bytes);
1324     }
1325 
1326     switch (method) {
1327     case MIRROR_METHOD_COPY:
1328         ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1329         break;
1330 
1331     case MIRROR_METHOD_ZERO:
1332         ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1333         break;
1334 
1335     case MIRROR_METHOD_DISCARD:
1336         ret = bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
1337         break;
1338 
1339     default:
1340         abort();
1341     }
1342 
1343     if (ret < 0) {
1344         goto out;
1345     }
1346 
1347     if (copy_to_target) {
1348         do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1349     }
1350 
1351 out:
1352     if (copy_to_target) {
1353         active_write_settle(op);
1354     }
1355     return ret;
1356 }
1357 
1358 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1359     uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1360 {
1361     MirrorBDSOpaque *s = bs->opaque;
1362     QEMUIOVector bounce_qiov;
1363     void *bounce_buf;
1364     int ret = 0;
1365     bool copy_to_target;
1366 
1367     copy_to_target = s->job->ret >= 0 &&
1368                      s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1369 
1370     if (copy_to_target) {
1371         /* The guest might concurrently modify the data to write; but
1372          * the data on source and destination must match, so we have
1373          * to use a bounce buffer if we are going to write to the
1374          * target now. */
1375         bounce_buf = qemu_blockalign(bs, bytes);
1376         iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1377 
1378         qemu_iovec_init(&bounce_qiov, 1);
1379         qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1380         qiov = &bounce_qiov;
1381     }
1382 
1383     ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1384                                    flags);
1385 
1386     if (copy_to_target) {
1387         qemu_iovec_destroy(&bounce_qiov);
1388         qemu_vfree(bounce_buf);
1389     }
1390 
1391     return ret;
1392 }
1393 
1394 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1395 {
1396     if (bs->backing == NULL) {
1397         /* we can be here after failed bdrv_append in mirror_start_job */
1398         return 0;
1399     }
1400     return bdrv_co_flush(bs->backing->bs);
1401 }
1402 
1403 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1404     int64_t offset, int bytes, BdrvRequestFlags flags)
1405 {
1406     return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1407                                     flags);
1408 }
1409 
1410 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1411     int64_t offset, int bytes)
1412 {
1413     return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1414                                     NULL, 0);
1415 }
1416 
1417 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1418 {
1419     if (bs->backing == NULL) {
1420         /* we can be here after failed bdrv_attach_child in
1421          * bdrv_set_backing_hd */
1422         return;
1423     }
1424     bdrv_refresh_filename(bs->backing->bs);
1425     pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1426             bs->backing->bs->filename);
1427 }
1428 
1429 static void bdrv_mirror_top_close(BlockDriverState *bs)
1430 {
1431 }
1432 
1433 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1434                                        const BdrvChildRole *role,
1435                                        BlockReopenQueue *reopen_queue,
1436                                        uint64_t perm, uint64_t shared,
1437                                        uint64_t *nperm, uint64_t *nshared)
1438 {
1439     /* Must be able to forward guest writes to the real image */
1440     *nperm = 0;
1441     if (perm & BLK_PERM_WRITE) {
1442         *nperm |= BLK_PERM_WRITE;
1443     }
1444 
1445     *nshared = BLK_PERM_ALL;
1446 }
1447 
1448 /* Dummy node that provides consistent read to its users without requiring it
1449  * from its backing file and that allows writes on the backing file chain. */
1450 static BlockDriver bdrv_mirror_top = {
1451     .format_name                = "mirror_top",
1452     .bdrv_co_preadv             = bdrv_mirror_top_preadv,
1453     .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
1454     .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
1455     .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
1456     .bdrv_co_flush              = bdrv_mirror_top_flush,
1457     .bdrv_co_block_status       = bdrv_co_block_status_from_backing,
1458     .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
1459     .bdrv_close                 = bdrv_mirror_top_close,
1460     .bdrv_child_perm            = bdrv_mirror_top_child_perm,
1461 };
1462 
1463 static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1464                              int creation_flags, BlockDriverState *target,
1465                              const char *replaces, int64_t speed,
1466                              uint32_t granularity, int64_t buf_size,
1467                              BlockMirrorBackingMode backing_mode,
1468                              BlockdevOnError on_source_error,
1469                              BlockdevOnError on_target_error,
1470                              bool unmap,
1471                              BlockCompletionFunc *cb,
1472                              void *opaque,
1473                              const BlockJobDriver *driver,
1474                              bool is_none_mode, BlockDriverState *base,
1475                              bool auto_complete, const char *filter_node_name,
1476                              bool is_mirror, MirrorCopyMode copy_mode,
1477                              Error **errp)
1478 {
1479     MirrorBlockJob *s;
1480     MirrorBDSOpaque *bs_opaque;
1481     BlockDriverState *mirror_top_bs;
1482     bool target_graph_mod;
1483     bool target_is_backing;
1484     Error *local_err = NULL;
1485     int ret;
1486 
1487     if (granularity == 0) {
1488         granularity = bdrv_get_default_bitmap_granularity(target);
1489     }
1490 
1491     assert(is_power_of_2(granularity));
1492 
1493     if (buf_size < 0) {
1494         error_setg(errp, "Invalid parameter 'buf-size'");
1495         return;
1496     }
1497 
1498     if (buf_size == 0) {
1499         buf_size = DEFAULT_MIRROR_BUF_SIZE;
1500     }
1501 
1502     /* In the case of active commit, add dummy driver to provide consistent
1503      * reads on the top, while disabling it in the intermediate nodes, and make
1504      * the backing chain writable. */
1505     mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1506                                          BDRV_O_RDWR, errp);
1507     if (mirror_top_bs == NULL) {
1508         return;
1509     }
1510     if (!filter_node_name) {
1511         mirror_top_bs->implicit = true;
1512     }
1513     mirror_top_bs->total_sectors = bs->total_sectors;
1514     mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1515     mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED;
1516     bs_opaque = g_new0(MirrorBDSOpaque, 1);
1517     mirror_top_bs->opaque = bs_opaque;
1518     bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1519 
1520     /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1521      * it alive until block_job_create() succeeds even if bs has no parent. */
1522     bdrv_ref(mirror_top_bs);
1523     bdrv_drained_begin(bs);
1524     bdrv_append(mirror_top_bs, bs, &local_err);
1525     bdrv_drained_end(bs);
1526 
1527     if (local_err) {
1528         bdrv_unref(mirror_top_bs);
1529         error_propagate(errp, local_err);
1530         return;
1531     }
1532 
1533     /* Make sure that the source is not resized while the job is running */
1534     s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1535                          BLK_PERM_CONSISTENT_READ,
1536                          BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1537                          BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1538                          creation_flags, cb, opaque, errp);
1539     if (!s) {
1540         goto fail;
1541     }
1542     bs_opaque->job = s;
1543 
1544     /* The block job now has a reference to this node */
1545     bdrv_unref(mirror_top_bs);
1546 
1547     s->mirror_top_bs = mirror_top_bs;
1548 
1549     /* No resize for the target either; while the mirror is still running, a
1550      * consistent read isn't necessarily possible. We could possibly allow
1551      * writes and graph modifications, though it would likely defeat the
1552      * purpose of a mirror, so leave them blocked for now.
1553      *
1554      * In the case of active commit, things look a bit different, though,
1555      * because the target is an already populated backing file in active use.
1556      * We can allow anything except resize there.*/
1557     target_is_backing = bdrv_chain_contains(bs, target);
1558     target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1559     s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
1560                         (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1561                         BLK_PERM_WRITE_UNCHANGED |
1562                         (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1563                                              BLK_PERM_WRITE |
1564                                              BLK_PERM_GRAPH_MOD : 0));
1565     ret = blk_insert_bs(s->target, target, errp);
1566     if (ret < 0) {
1567         goto fail;
1568     }
1569     if (is_mirror) {
1570         /* XXX: Mirror target could be a NBD server of target QEMU in the case
1571          * of non-shared block migration. To allow migration completion, we
1572          * have to allow "inactivate" of the target BB.  When that happens, we
1573          * know the job is drained, and the vcpus are stopped, so no write
1574          * operation will be performed. Block layer already has assertions to
1575          * ensure that. */
1576         blk_set_force_allow_inactivate(s->target);
1577     }
1578 
1579     s->replaces = g_strdup(replaces);
1580     s->on_source_error = on_source_error;
1581     s->on_target_error = on_target_error;
1582     s->is_none_mode = is_none_mode;
1583     s->backing_mode = backing_mode;
1584     s->copy_mode = copy_mode;
1585     s->base = base;
1586     s->granularity = granularity;
1587     s->buf_size = ROUND_UP(buf_size, granularity);
1588     s->unmap = unmap;
1589     if (auto_complete) {
1590         s->should_complete = true;
1591     }
1592 
1593     s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1594     if (!s->dirty_bitmap) {
1595         goto fail;
1596     }
1597 
1598     /* Required permissions are already taken with blk_new() */
1599     block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1600                        &error_abort);
1601 
1602     /* In commit_active_start() all intermediate nodes disappear, so
1603      * any jobs in them must be blocked */
1604     if (target_is_backing) {
1605         BlockDriverState *iter;
1606         for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1607             /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1608              * ourselves at s->base (if writes are blocked for a node, they are
1609              * also blocked for its backing file). The other options would be a
1610              * second filter driver above s->base (== target). */
1611             ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1612                                      BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1613                                      errp);
1614             if (ret < 0) {
1615                 goto fail;
1616             }
1617         }
1618     }
1619 
1620     QTAILQ_INIT(&s->ops_in_flight);
1621 
1622     trace_mirror_start(bs, s, opaque);
1623     job_start(&s->common.job);
1624     return;
1625 
1626 fail:
1627     if (s) {
1628         /* Make sure this BDS does not go away until we have completed the graph
1629          * changes below */
1630         bdrv_ref(mirror_top_bs);
1631 
1632         g_free(s->replaces);
1633         blk_unref(s->target);
1634         bs_opaque->job = NULL;
1635         job_early_fail(&s->common.job);
1636     }
1637 
1638     bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1639                             &error_abort);
1640     bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1641 
1642     bdrv_unref(mirror_top_bs);
1643 }
1644 
1645 void mirror_start(const char *job_id, BlockDriverState *bs,
1646                   BlockDriverState *target, const char *replaces,
1647                   int64_t speed, uint32_t granularity, int64_t buf_size,
1648                   MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1649                   BlockdevOnError on_source_error,
1650                   BlockdevOnError on_target_error,
1651                   bool unmap, const char *filter_node_name,
1652                   MirrorCopyMode copy_mode, Error **errp)
1653 {
1654     bool is_none_mode;
1655     BlockDriverState *base;
1656 
1657     if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1658         error_setg(errp, "Sync mode 'incremental' not supported");
1659         return;
1660     }
1661     is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1662     base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1663     mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces,
1664                      speed, granularity, buf_size, backing_mode,
1665                      on_source_error, on_target_error, unmap, NULL, NULL,
1666                      &mirror_job_driver, is_none_mode, base, false,
1667                      filter_node_name, true, copy_mode, errp);
1668 }
1669 
1670 void commit_active_start(const char *job_id, BlockDriverState *bs,
1671                          BlockDriverState *base, int creation_flags,
1672                          int64_t speed, BlockdevOnError on_error,
1673                          const char *filter_node_name,
1674                          BlockCompletionFunc *cb, void *opaque,
1675                          bool auto_complete, Error **errp)
1676 {
1677     int orig_base_flags;
1678     Error *local_err = NULL;
1679 
1680     orig_base_flags = bdrv_get_flags(base);
1681 
1682     if (bdrv_reopen(base, bs->open_flags, errp)) {
1683         return;
1684     }
1685 
1686     mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1687                      MIRROR_LEAVE_BACKING_CHAIN,
1688                      on_error, on_error, true, cb, opaque,
1689                      &commit_active_job_driver, false, base, auto_complete,
1690                      filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1691                      &local_err);
1692     if (local_err) {
1693         error_propagate(errp, local_err);
1694         goto error_restore_flags;
1695     }
1696 
1697     return;
1698 
1699 error_restore_flags:
1700     /* ignore error and errp for bdrv_reopen, because we want to propagate
1701      * the original error */
1702     bdrv_reopen(base, orig_base_flags, NULL);
1703     return;
1704 }
1705