1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15fd4a6493SKevin Wolf #include "qemu/cutils.h" 16893f7ebaSPaolo Bonzini #include "trace.h" 17c87621eaSJohn Snow #include "block/blockjob_int.h" 18737e150eSPaolo Bonzini #include "block/block_int.h" 19373340b2SMax Reitz #include "sysemu/block-backend.h" 20da34e65cSMarkus Armbruster #include "qapi/error.h" 21cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 22893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 23b812f671SPaolo Bonzini #include "qemu/bitmap.h" 24893f7ebaSPaolo Bonzini 25402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 26b436982fSEric Blake #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ 27b436982fSEric Blake #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) 28402a4741SPaolo Bonzini 29402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 30402a4741SPaolo Bonzini * Free chunks are organized in a list. 31402a4741SPaolo Bonzini */ 32402a4741SPaolo Bonzini typedef struct MirrorBuffer { 33402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 34402a4741SPaolo Bonzini } MirrorBuffer; 35893f7ebaSPaolo Bonzini 36893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 37893f7ebaSPaolo Bonzini BlockJob common; 38e253f4b8SKevin Wolf BlockBackend *target; 394ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 404ef85a9cSKevin Wolf BlockDriverState *source; 415bc361b8SFam Zheng BlockDriverState *base; 424ef85a9cSKevin Wolf 4309158f00SBenoît Canet /* The name of the graph node to replace */ 4409158f00SBenoît Canet char *replaces; 4509158f00SBenoît Canet /* The BDS to replace */ 4609158f00SBenoît Canet BlockDriverState *to_replace; 4709158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 4809158f00SBenoît Canet Error *replace_blocker; 4903544a6eSFam Zheng bool is_none_mode; 50274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 51b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 52d63ffd87SPaolo Bonzini bool synced; 53d63ffd87SPaolo Bonzini bool should_complete; 54eee13dfeSPaolo Bonzini int64_t granularity; 55b812f671SPaolo Bonzini size_t buf_size; 56b21c7652SMax Reitz int64_t bdev_length; 57b812f671SPaolo Bonzini unsigned long *cow_bitmap; 58e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 59dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 60893f7ebaSPaolo Bonzini uint8_t *buf; 61402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 62402a4741SPaolo Bonzini int buf_free_count; 63bd48bde8SPaolo Bonzini 6449efb1f5SDenis V. Lunev uint64_t last_pause_ns; 65402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 66bd48bde8SPaolo Bonzini int in_flight; 67b436982fSEric Blake int64_t bytes_in_flight; 68bd48bde8SPaolo Bonzini int ret; 690fc9f8eaSFam Zheng bool unmap; 70e424aff5SKevin Wolf bool waiting_for_io; 71b436982fSEric Blake int target_cluster_size; 72e5b43573SFam Zheng int max_iov; 7390ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 74893f7ebaSPaolo Bonzini } MirrorBlockJob; 75893f7ebaSPaolo Bonzini 76bd48bde8SPaolo Bonzini typedef struct MirrorOp { 77bd48bde8SPaolo Bonzini MirrorBlockJob *s; 78bd48bde8SPaolo Bonzini QEMUIOVector qiov; 79b436982fSEric Blake int64_t offset; 80b436982fSEric Blake uint64_t bytes; 81bd48bde8SPaolo Bonzini } MirrorOp; 82bd48bde8SPaolo Bonzini 83b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 84b952b558SPaolo Bonzini int error) 85b952b558SPaolo Bonzini { 86b952b558SPaolo Bonzini s->synced = false; 87b952b558SPaolo Bonzini if (read) { 8881e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 8981e254dcSKevin Wolf true, error); 90b952b558SPaolo Bonzini } else { 9181e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 9281e254dcSKevin Wolf false, error); 93b952b558SPaolo Bonzini } 94b952b558SPaolo Bonzini } 95b952b558SPaolo Bonzini 96bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret) 97bd48bde8SPaolo Bonzini { 98bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 99402a4741SPaolo Bonzini struct iovec *iov; 100bd48bde8SPaolo Bonzini int64_t chunk_num; 101b436982fSEric Blake int i, nb_chunks; 102bd48bde8SPaolo Bonzini 103b436982fSEric Blake trace_mirror_iteration_done(s, op->offset, op->bytes, ret); 104bd48bde8SPaolo Bonzini 105bd48bde8SPaolo Bonzini s->in_flight--; 106b436982fSEric Blake s->bytes_in_flight -= op->bytes; 107402a4741SPaolo Bonzini iov = op->qiov.iov; 108402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 109402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 110402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 111402a4741SPaolo Bonzini s->buf_free_count++; 112402a4741SPaolo Bonzini } 113402a4741SPaolo Bonzini 114b436982fSEric Blake chunk_num = op->offset / s->granularity; 115b436982fSEric Blake nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); 116402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 117b21c7652SMax Reitz if (ret >= 0) { 118b21c7652SMax Reitz if (s->cow_bitmap) { 119bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 120bd48bde8SPaolo Bonzini } 12190ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 12205df8a6aSKevin Wolf block_job_progress_update(&s->common, op->bytes); 123b21c7652SMax Reitz } 12490ab48ebSAnton Nefedov } 1256df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 126c84b3192SPaolo Bonzini g_free(op); 1277b770c72SStefan Hajnoczi 128e424aff5SKevin Wolf if (s->waiting_for_io) { 129da01ff7fSKevin Wolf qemu_coroutine_enter(s->common.job.co); 130bd48bde8SPaolo Bonzini } 1317b770c72SStefan Hajnoczi } 132bd48bde8SPaolo Bonzini 133bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret) 134bd48bde8SPaolo Bonzini { 135bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 136bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 137b9e413ddSPaolo Bonzini 138b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 139bd48bde8SPaolo Bonzini if (ret < 0) { 140bd48bde8SPaolo Bonzini BlockErrorAction action; 141bd48bde8SPaolo Bonzini 142e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 143bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 144a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 145bd48bde8SPaolo Bonzini s->ret = ret; 146bd48bde8SPaolo Bonzini } 147bd48bde8SPaolo Bonzini } 148bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 149b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 150bd48bde8SPaolo Bonzini } 151bd48bde8SPaolo Bonzini 152bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret) 153bd48bde8SPaolo Bonzini { 154bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 155bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 156b9e413ddSPaolo Bonzini 157b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 158bd48bde8SPaolo Bonzini if (ret < 0) { 159bd48bde8SPaolo Bonzini BlockErrorAction action; 160bd48bde8SPaolo Bonzini 161e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); 162bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 163a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 164bd48bde8SPaolo Bonzini s->ret = ret; 165bd48bde8SPaolo Bonzini } 166bd48bde8SPaolo Bonzini 167bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 168b9e413ddSPaolo Bonzini } else { 169b436982fSEric Blake blk_aio_pwritev(s->target, op->offset, &op->qiov, 17073698c30SEric Blake 0, mirror_write_complete, op); 171bd48bde8SPaolo Bonzini } 172b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 173b9e413ddSPaolo Bonzini } 174bd48bde8SPaolo Bonzini 175782d97efSEric Blake /* Clip bytes relative to offset to not exceed end-of-file */ 176782d97efSEric Blake static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, 177782d97efSEric Blake int64_t offset, 178782d97efSEric Blake int64_t bytes) 179782d97efSEric Blake { 180782d97efSEric Blake return MIN(bytes, s->bdev_length - offset); 181782d97efSEric Blake } 182782d97efSEric Blake 183782d97efSEric Blake /* Round offset and/or bytes to target cluster if COW is needed, and 184782d97efSEric Blake * return the offset of the adjusted tail against original. */ 185782d97efSEric Blake static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, 186ae4cc877SEric Blake uint64_t *bytes) 187893f7ebaSPaolo Bonzini { 188e5b43573SFam Zheng bool need_cow; 189e5b43573SFam Zheng int ret = 0; 190782d97efSEric Blake int64_t align_offset = *offset; 1917cfd5275SEric Blake int64_t align_bytes = *bytes; 192782d97efSEric Blake int max_bytes = s->granularity * s->max_iov; 193893f7ebaSPaolo Bonzini 194782d97efSEric Blake need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); 195782d97efSEric Blake need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, 196e5b43573SFam Zheng s->cow_bitmap); 197e5b43573SFam Zheng if (need_cow) { 198782d97efSEric Blake bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, 199782d97efSEric Blake &align_offset, &align_bytes); 2008f0720ecSPaolo Bonzini } 2018f0720ecSPaolo Bonzini 202782d97efSEric Blake if (align_bytes > max_bytes) { 203782d97efSEric Blake align_bytes = max_bytes; 204e5b43573SFam Zheng if (need_cow) { 205782d97efSEric Blake align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); 206e5b43573SFam Zheng } 207e5b43573SFam Zheng } 208782d97efSEric Blake /* Clipping may result in align_bytes unaligned to chunk boundary, but 2094150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 210782d97efSEric Blake align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); 211402a4741SPaolo Bonzini 212782d97efSEric Blake ret = align_offset + align_bytes - (*offset + *bytes); 213782d97efSEric Blake *offset = align_offset; 214782d97efSEric Blake *bytes = align_bytes; 215e5b43573SFam Zheng assert(ret >= 0); 216e5b43573SFam Zheng return ret; 217e5b43573SFam Zheng } 218e5b43573SFam Zheng 21921cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s) 22021cd917fSFam Zheng { 22121cd917fSFam Zheng assert(!s->waiting_for_io); 22221cd917fSFam Zheng s->waiting_for_io = true; 22321cd917fSFam Zheng qemu_coroutine_yield(); 22421cd917fSFam Zheng s->waiting_for_io = false; 22521cd917fSFam Zheng } 22621cd917fSFam Zheng 227e5b43573SFam Zheng /* Submit async read while handling COW. 228ae4cc877SEric Blake * Returns: The number of bytes copied after and including offset, 229ae4cc877SEric Blake * excluding any bytes copied prior to offset due to alignment. 230ae4cc877SEric Blake * This will be @bytes if no alignment is necessary, or 231ae4cc877SEric Blake * (new_end - offset) if tail is rounded up or down due to 232e5b43573SFam Zheng * alignment or buffer limit. 233402a4741SPaolo Bonzini */ 234ae4cc877SEric Blake static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset, 235ae4cc877SEric Blake uint64_t bytes) 236e5b43573SFam Zheng { 237e253f4b8SKevin Wolf BlockBackend *source = s->common.blk; 238ae4cc877SEric Blake int nb_chunks; 239ae4cc877SEric Blake uint64_t ret; 240e5b43573SFam Zheng MirrorOp *op; 241ae4cc877SEric Blake uint64_t max_bytes; 242402a4741SPaolo Bonzini 243ae4cc877SEric Blake max_bytes = s->granularity * s->max_iov; 244e5b43573SFam Zheng 245e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 246ae4cc877SEric Blake bytes = MIN(s->buf_size, MIN(max_bytes, bytes)); 247ae4cc877SEric Blake assert(bytes); 248ae4cc877SEric Blake assert(bytes < BDRV_REQUEST_MAX_BYTES); 249ae4cc877SEric Blake ret = bytes; 250e5b43573SFam Zheng 251e5b43573SFam Zheng if (s->cow_bitmap) { 252ae4cc877SEric Blake ret += mirror_cow_align(s, &offset, &bytes); 253e5b43573SFam Zheng } 254ae4cc877SEric Blake assert(bytes <= s->buf_size); 255ae4cc877SEric Blake /* The offset is granularity-aligned because: 256e5b43573SFam Zheng * 1) Caller passes in aligned values; 257e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 258ae4cc877SEric Blake assert(QEMU_IS_ALIGNED(offset, s->granularity)); 259ae4cc877SEric Blake /* The range is sector-aligned, since bdrv_getlength() rounds up. */ 260ae4cc877SEric Blake assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 261ae4cc877SEric Blake nb_chunks = DIV_ROUND_UP(bytes, s->granularity); 262e5b43573SFam Zheng 263e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 264ae4cc877SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 26521cd917fSFam Zheng mirror_wait_for_io(s); 266b812f671SPaolo Bonzini } 267b812f671SPaolo Bonzini 268bd48bde8SPaolo Bonzini /* Allocate a MirrorOp that is used as an AIO callback. */ 269c84b3192SPaolo Bonzini op = g_new(MirrorOp, 1); 270bd48bde8SPaolo Bonzini op->s = s; 271ae4cc877SEric Blake op->offset = offset; 272ae4cc877SEric Blake op->bytes = bytes; 273402a4741SPaolo Bonzini 274402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 275402a4741SPaolo Bonzini * from s->buf_free. 276402a4741SPaolo Bonzini */ 277402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 278402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 279402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 280ae4cc877SEric Blake size_t remaining = bytes - op->qiov.size; 2815a0f6fd5SKevin Wolf 282402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 283402a4741SPaolo Bonzini s->buf_free_count--; 2845a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 285402a4741SPaolo Bonzini } 286402a4741SPaolo Bonzini 287893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 288bd48bde8SPaolo Bonzini s->in_flight++; 289ae4cc877SEric Blake s->bytes_in_flight += bytes; 290ae4cc877SEric Blake trace_mirror_one_iteration(s, offset, bytes); 291dcfb3bebSFam Zheng 292ae4cc877SEric Blake blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op); 293e5b43573SFam Zheng return ret; 294e5b43573SFam Zheng } 295e5b43573SFam Zheng 296e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s, 297e6f24193SEric Blake int64_t offset, 298e6f24193SEric Blake uint64_t bytes, 299e5b43573SFam Zheng bool is_discard) 300e5b43573SFam Zheng { 301e5b43573SFam Zheng MirrorOp *op; 302e5b43573SFam Zheng 303e5b43573SFam Zheng /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed 304e5b43573SFam Zheng * so the freeing in mirror_iteration_done is nop. */ 305e5b43573SFam Zheng op = g_new0(MirrorOp, 1); 306e5b43573SFam Zheng op->s = s; 307e6f24193SEric Blake op->offset = offset; 308e6f24193SEric Blake op->bytes = bytes; 309e5b43573SFam Zheng 310e5b43573SFam Zheng s->in_flight++; 311e6f24193SEric Blake s->bytes_in_flight += bytes; 312e5b43573SFam Zheng if (is_discard) { 313e6f24193SEric Blake blk_aio_pdiscard(s->target, offset, 314b436982fSEric Blake op->bytes, mirror_write_complete, op); 315e5b43573SFam Zheng } else { 316e6f24193SEric Blake blk_aio_pwrite_zeroes(s->target, offset, 317b436982fSEric Blake op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 318dcfb3bebSFam Zheng mirror_write_complete, op); 319e5b43573SFam Zheng } 320e5b43573SFam Zheng } 321e5b43573SFam Zheng 322e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 323e5b43573SFam Zheng { 3244ef85a9cSKevin Wolf BlockDriverState *source = s->source; 325fb2ef791SEric Blake int64_t offset, first_chunk; 326e5b43573SFam Zheng uint64_t delay_ns = 0; 327e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 328e5b43573SFam Zheng int nb_chunks = 1; 3294b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 330b436982fSEric Blake int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); 331e5b43573SFam Zheng 332b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 333f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 334fb2ef791SEric Blake if (offset < 0) { 335dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 336f798184cSEric Blake offset = bdrv_dirty_iter_next(s->dbi); 3379a46dba7SEric Blake trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 338fb2ef791SEric Blake assert(offset >= 0); 339e5b43573SFam Zheng } 340b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 341e5b43573SFam Zheng 342fb2ef791SEric Blake first_chunk = offset / s->granularity; 3439c83625bSMax Reitz while (test_bit(first_chunk, s->in_flight_bitmap)) { 344fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 3459c83625bSMax Reitz mirror_wait_for_io(s); 3469c83625bSMax Reitz } 3479c83625bSMax Reitz 348da01ff7fSKevin Wolf job_pause_point(&s->common.job); 349565ac01fSStefan Hajnoczi 350e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 351e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 352b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_lock(s->dirty_bitmap); 353fb2ef791SEric Blake while (nb_chunks * s->granularity < s->buf_size) { 354dc162c8eSFam Zheng int64_t next_dirty; 355fb2ef791SEric Blake int64_t next_offset = offset + nb_chunks * s->granularity; 356fb2ef791SEric Blake int64_t next_chunk = next_offset / s->granularity; 357fb2ef791SEric Blake if (next_offset >= s->bdev_length || 3583b5d4df0SEric Blake !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) { 359e5b43573SFam Zheng break; 360e5b43573SFam Zheng } 361e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 362e5b43573SFam Zheng break; 363e5b43573SFam Zheng } 3649c83625bSMax Reitz 365f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 366fb2ef791SEric Blake if (next_dirty > next_offset || next_dirty < 0) { 367f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 368715a74d8SEric Blake bdrv_set_dirty_iter(s->dbi, next_offset); 369f798184cSEric Blake next_dirty = bdrv_dirty_iter_next(s->dbi); 370f27a2742SMax Reitz } 371fb2ef791SEric Blake assert(next_dirty == next_offset); 372e5b43573SFam Zheng nb_chunks++; 373e5b43573SFam Zheng } 374e5b43573SFam Zheng 375e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 37631826642SEric Blake * calling bdrv_block_status_above could yield - if some blocks are 377e5b43573SFam Zheng * marked dirty in this window, we need to know. 378e5b43573SFam Zheng */ 379e0d7f73eSEric Blake bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, 380e0d7f73eSEric Blake nb_chunks * s->granularity); 381b64bd51eSPaolo Bonzini bdrv_dirty_bitmap_unlock(s->dirty_bitmap); 382b64bd51eSPaolo Bonzini 383fb2ef791SEric Blake bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); 384fb2ef791SEric Blake while (nb_chunks > 0 && offset < s->bdev_length) { 38531826642SEric Blake int ret; 3867cfd5275SEric Blake int64_t io_bytes; 387f3e4ce4aSEric Blake int64_t io_bytes_acct; 388e5b43573SFam Zheng enum MirrorMethod { 389e5b43573SFam Zheng MIRROR_METHOD_COPY, 390e5b43573SFam Zheng MIRROR_METHOD_ZERO, 391e5b43573SFam Zheng MIRROR_METHOD_DISCARD 392e5b43573SFam Zheng } mirror_method = MIRROR_METHOD_COPY; 393e5b43573SFam Zheng 394fb2ef791SEric Blake assert(!(offset % s->granularity)); 39531826642SEric Blake ret = bdrv_block_status_above(source, NULL, offset, 39631826642SEric Blake nb_chunks * s->granularity, 39731826642SEric Blake &io_bytes, NULL, NULL); 398e5b43573SFam Zheng if (ret < 0) { 399fb2ef791SEric Blake io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); 4000965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 401fb2ef791SEric Blake io_bytes = MIN(io_bytes, max_io_bytes); 402e5b43573SFam Zheng } 403e5b43573SFam Zheng 404fb2ef791SEric Blake io_bytes -= io_bytes % s->granularity; 405fb2ef791SEric Blake if (io_bytes < s->granularity) { 406fb2ef791SEric Blake io_bytes = s->granularity; 407e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 408fb2ef791SEric Blake int64_t target_offset; 4097cfd5275SEric Blake int64_t target_bytes; 410fb2ef791SEric Blake bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, 411fb2ef791SEric Blake &target_offset, &target_bytes); 412fb2ef791SEric Blake if (target_offset == offset && 413fb2ef791SEric Blake target_bytes == io_bytes) { 414e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 415e5b43573SFam Zheng MIRROR_METHOD_ZERO : 416e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 417e5b43573SFam Zheng } 418e5b43573SFam Zheng } 419e5b43573SFam Zheng 420cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 421fb2ef791SEric Blake trace_mirror_yield_in_flight(s, offset, s->in_flight); 422cf56a3c6SDenis V. Lunev mirror_wait_for_io(s); 423cf56a3c6SDenis V. Lunev } 424cf56a3c6SDenis V. Lunev 425dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 426dbaa7b57SVladimir Sementsov-Ogievskiy return 0; 427dbaa7b57SVladimir Sementsov-Ogievskiy } 428dbaa7b57SVladimir Sementsov-Ogievskiy 429fb2ef791SEric Blake io_bytes = mirror_clip_bytes(s, offset, io_bytes); 430e5b43573SFam Zheng switch (mirror_method) { 431e5b43573SFam Zheng case MIRROR_METHOD_COPY: 432fb2ef791SEric Blake io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes); 433e5b43573SFam Zheng break; 434e5b43573SFam Zheng case MIRROR_METHOD_ZERO: 435e5b43573SFam Zheng case MIRROR_METHOD_DISCARD: 436fb2ef791SEric Blake mirror_do_zero_or_discard(s, offset, io_bytes, 4374b5004d9SDenis V. Lunev mirror_method == MIRROR_METHOD_DISCARD); 4384b5004d9SDenis V. Lunev if (write_zeroes_ok) { 439f3e4ce4aSEric Blake io_bytes_acct = 0; 4404b5004d9SDenis V. Lunev } else { 441fb2ef791SEric Blake io_bytes_acct = io_bytes; 4424b5004d9SDenis V. Lunev } 443e5b43573SFam Zheng break; 444e5b43573SFam Zheng default: 445e5b43573SFam Zheng abort(); 446e5b43573SFam Zheng } 447fb2ef791SEric Blake assert(io_bytes); 448fb2ef791SEric Blake offset += io_bytes; 449fb2ef791SEric Blake nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); 450dee81d51SKevin Wolf delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); 451dcfb3bebSFam Zheng } 452cc8c9d6cSPaolo Bonzini return delay_ns; 453893f7ebaSPaolo Bonzini } 454b952b558SPaolo Bonzini 455402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 456402a4741SPaolo Bonzini { 457402a4741SPaolo Bonzini int granularity = s->granularity; 458402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 459402a4741SPaolo Bonzini uint8_t *buf = s->buf; 460402a4741SPaolo Bonzini 461402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 462402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 463402a4741SPaolo Bonzini while (buf_size != 0) { 464402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 465402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 466402a4741SPaolo Bonzini s->buf_free_count++; 467402a4741SPaolo Bonzini buf_size -= granularity; 468402a4741SPaolo Bonzini buf += granularity; 469402a4741SPaolo Bonzini } 470402a4741SPaolo Bonzini } 471402a4741SPaolo Bonzini 472bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 473bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 474bae8196dSPaolo Bonzini * when the job is resumed. 475bae8196dSPaolo Bonzini */ 476bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s) 477bd48bde8SPaolo Bonzini { 478bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 47921cd917fSFam Zheng mirror_wait_for_io(s); 480bd48bde8SPaolo Bonzini } 481893f7ebaSPaolo Bonzini } 482893f7ebaSPaolo Bonzini 4835a7e7a0bSStefan Hajnoczi typedef struct { 4845a7e7a0bSStefan Hajnoczi int ret; 4855a7e7a0bSStefan Hajnoczi } MirrorExitData; 4865a7e7a0bSStefan Hajnoczi 4871908a559SKevin Wolf static void mirror_exit(Job *job, void *opaque) 4885a7e7a0bSStefan Hajnoczi { 4891908a559SKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 4901908a559SKevin Wolf BlockJob *bjob = &s->common; 4915a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 4925a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 4934ef85a9cSKevin Wolf BlockDriverState *src = s->source; 494e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 4954ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs = s->mirror_top_bs; 49612fa4af6SKevin Wolf Error *local_err = NULL; 4973f09bfbcSKevin Wolf 4982119882cSPaolo Bonzini bdrv_release_dirty_bitmap(src, s->dirty_bitmap); 4992119882cSPaolo Bonzini 5003f09bfbcSKevin Wolf /* Make sure that the source BDS doesn't go away before we called 5013f09bfbcSKevin Wolf * block_job_completed(). */ 5023f09bfbcSKevin Wolf bdrv_ref(src); 5034ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 5047d9fcb39SKevin Wolf bdrv_ref(target_bs); 5057d9fcb39SKevin Wolf 5067d9fcb39SKevin Wolf /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 5077d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 50863c8ef28SKevin Wolf * these permissions. 50963c8ef28SKevin Wolf * 51063c8ef28SKevin Wolf * Note that blk_unref() alone doesn't necessarily drop permissions because 51163c8ef28SKevin Wolf * we might be running nested inside mirror_drain(), which takes an extra 51263c8ef28SKevin Wolf * reference, so use an explicit blk_set_perm() first. */ 51363c8ef28SKevin Wolf blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort); 5147d9fcb39SKevin Wolf blk_unref(s->target); 5157d9fcb39SKevin Wolf s->target = NULL; 5164ef85a9cSKevin Wolf 5174ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 5184ef85a9cSKevin Wolf * required before it could become a backing file of target_bs. */ 5194ef85a9cSKevin Wolf bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 5204ef85a9cSKevin Wolf &error_abort); 5214ef85a9cSKevin Wolf if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 5224ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 5234ef85a9cSKevin Wolf if (backing_bs(target_bs) != backing) { 52412fa4af6SKevin Wolf bdrv_set_backing_hd(target_bs, backing, &local_err); 52512fa4af6SKevin Wolf if (local_err) { 52612fa4af6SKevin Wolf error_report_err(local_err); 52712fa4af6SKevin Wolf data->ret = -EPERM; 52812fa4af6SKevin Wolf } 5294ef85a9cSKevin Wolf } 5304ef85a9cSKevin Wolf } 5315a7e7a0bSStefan Hajnoczi 5325a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5335a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 5345a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 5355a7e7a0bSStefan Hajnoczi } 5365a7e7a0bSStefan Hajnoczi 5375a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 538e253f4b8SKevin Wolf BlockDriverState *to_replace = src; 5395a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5405a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 5415a7e7a0bSStefan Hajnoczi } 54240365552SKevin Wolf 543e253f4b8SKevin Wolf if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { 544e253f4b8SKevin Wolf bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); 5455a7e7a0bSStefan Hajnoczi } 546b8804815SKevin Wolf 547b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 548b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 549e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 5505fe31c25SKevin Wolf bdrv_replace_node(to_replace, target_bs, &local_err); 551e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 5525fe31c25SKevin Wolf if (local_err) { 5535fe31c25SKevin Wolf error_report_err(local_err); 5545fe31c25SKevin Wolf data->ret = -EPERM; 5555fe31c25SKevin Wolf } 5565a7e7a0bSStefan Hajnoczi } 5575a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5585a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 5595a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 5605a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 5615a7e7a0bSStefan Hajnoczi } 5625a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 5635a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 5645a7e7a0bSStefan Hajnoczi } 5655a7e7a0bSStefan Hajnoczi g_free(s->replaces); 5667d9fcb39SKevin Wolf bdrv_unref(target_bs); 5674ef85a9cSKevin Wolf 5684ef85a9cSKevin Wolf /* Remove the mirror filter driver from the graph. Before this, get rid of 5694ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 5700bf74767SKevin Wolf * valid. Also give up permissions on mirror_top_bs->backing, which might 5710bf74767SKevin Wolf * block the removal. */ 5721908a559SKevin Wolf block_job_remove_all_bdrv(bjob); 573c1cef672SFam Zheng bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 574c1cef672SFam Zheng &error_abort); 5755fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 5764ef85a9cSKevin Wolf 5774ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 5785fe31c25SKevin Wolf * bdrv_replace_node() calls), so switch the BB back so the cleanup does 5795fe31c25SKevin Wolf * the right thing. We don't need any permissions any more now. */ 5801908a559SKevin Wolf blk_remove_bs(bjob->blk); 5811908a559SKevin Wolf blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); 5821908a559SKevin Wolf blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); 5834ef85a9cSKevin Wolf 5845a7e7a0bSStefan Hajnoczi block_job_completed(&s->common, data->ret); 5854ef85a9cSKevin Wolf 5865a7e7a0bSStefan Hajnoczi g_free(data); 587176c3699SFam Zheng bdrv_drained_end(src); 5884ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 5893f09bfbcSKevin Wolf bdrv_unref(src); 5905a7e7a0bSStefan Hajnoczi } 5915a7e7a0bSStefan Hajnoczi 59249efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s) 59349efb1f5SDenis V. Lunev { 59449efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 59549efb1f5SDenis V. Lunev 59618bb6928SKevin Wolf if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { 59749efb1f5SDenis V. Lunev s->last_pause_ns = now; 5985d43e86eSKevin Wolf job_sleep_ns(&s->common.job, 0); 59949efb1f5SDenis V. Lunev } else { 600da01ff7fSKevin Wolf job_pause_point(&s->common.job); 60149efb1f5SDenis V. Lunev } 60249efb1f5SDenis V. Lunev } 60349efb1f5SDenis V. Lunev 604c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 605c0b363adSDenis V. Lunev { 60623ca459aSEric Blake int64_t offset; 607c0b363adSDenis V. Lunev BlockDriverState *base = s->base; 6084ef85a9cSKevin Wolf BlockDriverState *bs = s->source; 609c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 61023ca459aSEric Blake int ret; 61151b0a488SEric Blake int64_t count; 612c0b363adSDenis V. Lunev 613b7d5062cSDenis V. Lunev if (base == NULL && !bdrv_has_zero_init(target_bs)) { 614c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 615e0d7f73eSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); 616b7d5062cSDenis V. Lunev return 0; 617b7d5062cSDenis V. Lunev } 618b7d5062cSDenis V. Lunev 61990ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 62023ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 62123ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 62223ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 623c7c2769cSDenis V. Lunev 624c7c2769cSDenis V. Lunev mirror_throttle(s); 625c7c2769cSDenis V. Lunev 626daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 62790ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 628c7c2769cSDenis V. Lunev return 0; 629c7c2769cSDenis V. Lunev } 630c7c2769cSDenis V. Lunev 631c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 63267adf4b3SEric Blake trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, 63367adf4b3SEric Blake s->in_flight); 634c7c2769cSDenis V. Lunev mirror_wait_for_io(s); 635c7c2769cSDenis V. Lunev continue; 636c7c2769cSDenis V. Lunev } 637c7c2769cSDenis V. Lunev 63823ca459aSEric Blake mirror_do_zero_or_discard(s, offset, bytes, false); 63923ca459aSEric Blake offset += bytes; 640c7c2769cSDenis V. Lunev } 641c7c2769cSDenis V. Lunev 642bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 64390ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 644c7c2769cSDenis V. Lunev } 645c7c2769cSDenis V. Lunev 646c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 64723ca459aSEric Blake for (offset = 0; offset < s->bdev_length; ) { 648c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 64923ca459aSEric Blake int bytes = MIN(s->bdev_length - offset, 65023ca459aSEric Blake QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); 651c0b363adSDenis V. Lunev 652c0b363adSDenis V. Lunev mirror_throttle(s); 653c0b363adSDenis V. Lunev 654daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 655c0b363adSDenis V. Lunev return 0; 656c0b363adSDenis V. Lunev } 657c0b363adSDenis V. Lunev 65823ca459aSEric Blake ret = bdrv_is_allocated_above(bs, base, offset, bytes, &count); 659c0b363adSDenis V. Lunev if (ret < 0) { 660c0b363adSDenis V. Lunev return ret; 661c0b363adSDenis V. Lunev } 662c0b363adSDenis V. Lunev 66323ca459aSEric Blake assert(count); 664b7d5062cSDenis V. Lunev if (ret == 1) { 66523ca459aSEric Blake bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); 666c0b363adSDenis V. Lunev } 66723ca459aSEric Blake offset += count; 668c0b363adSDenis V. Lunev } 669c0b363adSDenis V. Lunev return 0; 670c0b363adSDenis V. Lunev } 671c0b363adSDenis V. Lunev 672bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 673bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 674bdffb31dSPaolo Bonzini */ 675bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 676bdffb31dSPaolo Bonzini { 677bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 678bdffb31dSPaolo Bonzini if (ret < 0) { 679bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 680bdffb31dSPaolo Bonzini s->ret = ret; 681bdffb31dSPaolo Bonzini } 682bdffb31dSPaolo Bonzini } 683bdffb31dSPaolo Bonzini return ret; 684bdffb31dSPaolo Bonzini } 685bdffb31dSPaolo Bonzini 686893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 687893f7ebaSPaolo Bonzini { 688893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 6895a7e7a0bSStefan Hajnoczi MirrorExitData *data; 6904ef85a9cSKevin Wolf BlockDriverState *bs = s->source; 691e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 6929a0cec66SPaolo Bonzini bool need_drain = true; 693c0b363adSDenis V. Lunev int64_t length; 694b812f671SPaolo Bonzini BlockDriverInfo bdi; 6951d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 6961d33936eSJeff Cody checking for a NULL string */ 697893f7ebaSPaolo Bonzini int ret = 0; 698893f7ebaSPaolo Bonzini 699daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job)) { 700893f7ebaSPaolo Bonzini goto immediate_exit; 701893f7ebaSPaolo Bonzini } 702893f7ebaSPaolo Bonzini 703b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 704b21c7652SMax Reitz if (s->bdev_length < 0) { 705b21c7652SMax Reitz ret = s->bdev_length; 706373df5b1SFam Zheng goto immediate_exit; 707becc347eSKevin Wolf } 708becc347eSKevin Wolf 709becc347eSKevin Wolf /* Active commit must resize the base image if its size differs from the 710becc347eSKevin Wolf * active layer. */ 711becc347eSKevin Wolf if (s->base == blk_bs(s->target)) { 712becc347eSKevin Wolf int64_t base_length; 713becc347eSKevin Wolf 714becc347eSKevin Wolf base_length = blk_getlength(s->target); 715becc347eSKevin Wolf if (base_length < 0) { 716becc347eSKevin Wolf ret = base_length; 717becc347eSKevin Wolf goto immediate_exit; 718becc347eSKevin Wolf } 719becc347eSKevin Wolf 720becc347eSKevin Wolf if (s->bdev_length > base_length) { 7213a691c50SMax Reitz ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF, 7223a691c50SMax Reitz NULL); 723becc347eSKevin Wolf if (ret < 0) { 724becc347eSKevin Wolf goto immediate_exit; 725becc347eSKevin Wolf } 726becc347eSKevin Wolf } 727becc347eSKevin Wolf } 728becc347eSKevin Wolf 729becc347eSKevin Wolf if (s->bdev_length == 0) { 7309e48b025SFam Zheng /* Report BLOCK_JOB_READY and wait for complete. */ 7319e48b025SFam Zheng block_job_event_ready(&s->common); 7329e48b025SFam Zheng s->synced = true; 733daa7f2f9SKevin Wolf while (!job_is_cancelled(&s->common.job) && !s->should_complete) { 7349e48b025SFam Zheng block_job_yield(&s->common); 7359e48b025SFam Zheng } 736daa7f2f9SKevin Wolf s->common.job.cancelled = false; 7379e48b025SFam Zheng goto immediate_exit; 738893f7ebaSPaolo Bonzini } 739893f7ebaSPaolo Bonzini 740b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 741402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 742402a4741SPaolo Bonzini 743b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 744b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 745b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 746b812f671SPaolo Bonzini */ 747e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 748b812f671SPaolo Bonzini sizeof(backing_filename)); 749e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 750b436982fSEric Blake s->target_cluster_size = bdi.cluster_size; 751b436982fSEric Blake } else { 752b436982fSEric Blake s->target_cluster_size = BDRV_SECTOR_SIZE; 753c3cc95bdSFam Zheng } 754b436982fSEric Blake if (backing_filename[0] && !target_bs->backing && 755b436982fSEric Blake s->granularity < s->target_cluster_size) { 756b436982fSEric Blake s->buf_size = MAX(s->buf_size, s->target_cluster_size); 757b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 758b812f671SPaolo Bonzini } 759e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 760b812f671SPaolo Bonzini 7617504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 7627504edf4SKevin Wolf if (s->buf == NULL) { 7637504edf4SKevin Wolf ret = -ENOMEM; 7647504edf4SKevin Wolf goto immediate_exit; 7657504edf4SKevin Wolf } 7667504edf4SKevin Wolf 767402a4741SPaolo Bonzini mirror_free_init(s); 768893f7ebaSPaolo Bonzini 76949efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 77003544a6eSFam Zheng if (!s->is_none_mode) { 771c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 772daa7f2f9SKevin Wolf if (ret < 0 || job_is_cancelled(&s->common.job)) { 7734c0cbd6fSFam Zheng goto immediate_exit; 7744c0cbd6fSFam Zheng } 775893f7ebaSPaolo Bonzini } 776893f7ebaSPaolo Bonzini 777dc162c8eSFam Zheng assert(!s->dbi); 778715a74d8SEric Blake s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); 779893f7ebaSPaolo Bonzini for (;;) { 780cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 78149efb1f5SDenis V. Lunev int64_t cnt, delta; 782893f7ebaSPaolo Bonzini bool should_complete; 783893f7ebaSPaolo Bonzini 784bd48bde8SPaolo Bonzini if (s->ret < 0) { 785bd48bde8SPaolo Bonzini ret = s->ret; 786893f7ebaSPaolo Bonzini goto immediate_exit; 787893f7ebaSPaolo Bonzini } 788bd48bde8SPaolo Bonzini 789da01ff7fSKevin Wolf job_pause_point(&s->common.job); 790565ac01fSStefan Hajnoczi 79120dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 79205df8a6aSKevin Wolf /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is 79305df8a6aSKevin Wolf * the number of bytes currently being processed; together those are 79405df8a6aSKevin Wolf * the current remaining operation length */ 79505df8a6aSKevin Wolf block_job_progress_set_remaining(&s->common, s->bytes_in_flight + cnt); 796bd48bde8SPaolo Bonzini 797bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 798a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 79918bb6928SKevin Wolf * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is 80018bb6928SKevin Wolf * an error, or when the source is clean, whichever comes first. */ 80149efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 80218bb6928SKevin Wolf if (delta < BLOCK_JOB_SLICE_TIME && 803bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 804cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 805402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 8069a46dba7SEric Blake trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); 80721cd917fSFam Zheng mirror_wait_for_io(s); 808bd48bde8SPaolo Bonzini continue; 809bd48bde8SPaolo Bonzini } else if (cnt != 0) { 810cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 811893f7ebaSPaolo Bonzini } 812cc8c9d6cSPaolo Bonzini } 813893f7ebaSPaolo Bonzini 814893f7ebaSPaolo Bonzini should_complete = false; 815bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 816893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 817bdffb31dSPaolo Bonzini if (!s->synced) { 818bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 819bdffb31dSPaolo Bonzini /* Go check s->ret. */ 820bdffb31dSPaolo Bonzini continue; 821893f7ebaSPaolo Bonzini } 822893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 823893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 824893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 825893f7ebaSPaolo Bonzini * the target in a consistent state. 826893f7ebaSPaolo Bonzini */ 827bcada37bSWenchao Xia block_job_event_ready(&s->common); 828d63ffd87SPaolo Bonzini s->synced = true; 829d63ffd87SPaolo Bonzini } 830d63ffd87SPaolo Bonzini 831d63ffd87SPaolo Bonzini should_complete = s->should_complete || 832daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job); 83320dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 834893f7ebaSPaolo Bonzini } 835893f7ebaSPaolo Bonzini 836893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 837893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 838893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 839893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 840893f7ebaSPaolo Bonzini * source has dirty data to copy! 841893f7ebaSPaolo Bonzini * 842893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 8439a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 8449a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 8459a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 846893f7ebaSPaolo Bonzini */ 8479a46dba7SEric Blake trace_mirror_before_drain(s, cnt); 8489a0cec66SPaolo Bonzini 8499a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 85020dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 851bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 8529a0cec66SPaolo Bonzini bdrv_drained_end(bs); 8539a0cec66SPaolo Bonzini continue; 8549a0cec66SPaolo Bonzini } 8559a0cec66SPaolo Bonzini 8569a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 8579a0cec66SPaolo Bonzini * completion. 8589a0cec66SPaolo Bonzini */ 8599a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 860daa7f2f9SKevin Wolf s->common.job.cancelled = false; 8619a0cec66SPaolo Bonzini need_drain = false; 8629a0cec66SPaolo Bonzini break; 863893f7ebaSPaolo Bonzini } 864893f7ebaSPaolo Bonzini 865893f7ebaSPaolo Bonzini ret = 0; 866ddc4115eSStefan Hajnoczi 867ddc4115eSStefan Hajnoczi if (s->synced && !should_complete) { 86818bb6928SKevin Wolf delay_ns = (s->in_flight == 0 && 86918bb6928SKevin Wolf cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); 870ddc4115eSStefan Hajnoczi } 8719a46dba7SEric Blake trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 8725d43e86eSKevin Wolf job_sleep_ns(&s->common.job, delay_ns); 873daa7f2f9SKevin Wolf if (job_is_cancelled(&s->common.job) && 874004e95dfSKevin Wolf (!s->synced || s->common.job.force_cancel)) 875eb36639fSMax Reitz { 876893f7ebaSPaolo Bonzini break; 877893f7ebaSPaolo Bonzini } 87849efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 879893f7ebaSPaolo Bonzini } 880893f7ebaSPaolo Bonzini 881893f7ebaSPaolo Bonzini immediate_exit: 882bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 883bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 884bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 885bd48bde8SPaolo Bonzini * the target is a copy of the source. 886bd48bde8SPaolo Bonzini */ 887004e95dfSKevin Wolf assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && 888daa7f2f9SKevin Wolf job_is_cancelled(&s->common.job))); 8899a0cec66SPaolo Bonzini assert(need_drain); 890bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 891bd48bde8SPaolo Bonzini } 892bd48bde8SPaolo Bonzini 893bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 8947191bf31SMarkus Armbruster qemu_vfree(s->buf); 895b812f671SPaolo Bonzini g_free(s->cow_bitmap); 896402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 897dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 8985a7e7a0bSStefan Hajnoczi 8995a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 9005a7e7a0bSStefan Hajnoczi data->ret = ret; 9019a0cec66SPaolo Bonzini 9029a0cec66SPaolo Bonzini if (need_drain) { 903e253f4b8SKevin Wolf bdrv_drained_begin(bs); 9049a0cec66SPaolo Bonzini } 9051908a559SKevin Wolf job_defer_to_main_loop(&s->common.job, mirror_exit, data); 906893f7ebaSPaolo Bonzini } 907893f7ebaSPaolo Bonzini 908d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 909d63ffd87SPaolo Bonzini { 910d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 9114ef85a9cSKevin Wolf BlockDriverState *target; 912d63ffd87SPaolo Bonzini 913274fcceeSMax Reitz target = blk_bs(s->target); 914274fcceeSMax Reitz 915d63ffd87SPaolo Bonzini if (!s->synced) { 9169df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 91733e9e9bdSKevin Wolf job->job.id); 918d63ffd87SPaolo Bonzini return; 919d63ffd87SPaolo Bonzini } 920d63ffd87SPaolo Bonzini 921274fcceeSMax Reitz if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 922274fcceeSMax Reitz int ret; 923274fcceeSMax Reitz 924274fcceeSMax Reitz assert(!target->backing); 925274fcceeSMax Reitz ret = bdrv_open_backing_file(target, NULL, "backing", errp); 926274fcceeSMax Reitz if (ret < 0) { 927274fcceeSMax Reitz return; 928274fcceeSMax Reitz } 929274fcceeSMax Reitz } 930274fcceeSMax Reitz 93115d67298SChanglong Xie /* block all operations on to_replace bs */ 93209158f00SBenoît Canet if (s->replaces) { 9335a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 9345a7e7a0bSStefan Hajnoczi 935e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 93609158f00SBenoît Canet if (!s->to_replace) { 937e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 93809158f00SBenoît Canet return; 93909158f00SBenoît Canet } 94009158f00SBenoît Canet 9415a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 9425a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 9435a7e7a0bSStefan Hajnoczi 9444ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 9454ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 9464ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 9474ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 94809158f00SBenoît Canet error_setg(&s->replace_blocker, 94909158f00SBenoît Canet "block device is in use by block-job-complete"); 95009158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 95109158f00SBenoît Canet bdrv_ref(s->to_replace); 9525a7e7a0bSStefan Hajnoczi 9535a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 95409158f00SBenoît Canet } 95509158f00SBenoît Canet 956d63ffd87SPaolo Bonzini s->should_complete = true; 957751ebd76SFam Zheng block_job_enter(&s->common); 958d63ffd87SPaolo Bonzini } 959d63ffd87SPaolo Bonzini 960da01ff7fSKevin Wolf static void mirror_pause(Job *job) 961565ac01fSStefan Hajnoczi { 962da01ff7fSKevin Wolf MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); 963565ac01fSStefan Hajnoczi 964bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 965565ac01fSStefan Hajnoczi } 966565ac01fSStefan Hajnoczi 967565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) 968565ac01fSStefan Hajnoczi { 969565ac01fSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 970565ac01fSStefan Hajnoczi 971565ac01fSStefan Hajnoczi blk_set_aio_context(s->target, new_context); 972565ac01fSStefan Hajnoczi } 973565ac01fSStefan Hajnoczi 974bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job) 975bae8196dSPaolo Bonzini { 976bae8196dSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 977bae8196dSPaolo Bonzini 978bae8196dSPaolo Bonzini /* Need to keep a reference in case blk_drain triggers execution 979bae8196dSPaolo Bonzini * of mirror_complete... 980bae8196dSPaolo Bonzini */ 981bae8196dSPaolo Bonzini if (s->target) { 982bae8196dSPaolo Bonzini BlockBackend *target = s->target; 983bae8196dSPaolo Bonzini blk_ref(target); 984bae8196dSPaolo Bonzini blk_drain(target); 985bae8196dSPaolo Bonzini blk_unref(target); 986bae8196dSPaolo Bonzini } 987bae8196dSPaolo Bonzini } 988bae8196dSPaolo Bonzini 9893fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 99033e9e9bdSKevin Wolf .job_driver = { 991893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 9928e4c8700SKevin Wolf .job_type = JOB_TYPE_MIRROR, 99380fa2c75SKevin Wolf .free = block_job_free, 994b15de828SKevin Wolf .user_resume = block_job_user_resume, 995*b69f777dSKevin Wolf .drain = block_job_drain, 996a7815a76SJohn Snow .start = mirror_run, 997565ac01fSStefan Hajnoczi .pause = mirror_pause, 998da01ff7fSKevin Wolf }, 999da01ff7fSKevin Wolf .complete = mirror_complete, 1000565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1001bae8196dSPaolo Bonzini .drain = mirror_drain, 1002893f7ebaSPaolo Bonzini }; 1003893f7ebaSPaolo Bonzini 100403544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 100533e9e9bdSKevin Wolf .job_driver = { 100603544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 10078e4c8700SKevin Wolf .job_type = JOB_TYPE_COMMIT, 100880fa2c75SKevin Wolf .free = block_job_free, 1009b15de828SKevin Wolf .user_resume = block_job_user_resume, 1010*b69f777dSKevin Wolf .drain = block_job_drain, 1011a7815a76SJohn Snow .start = mirror_run, 1012565ac01fSStefan Hajnoczi .pause = mirror_pause, 1013da01ff7fSKevin Wolf }, 1014da01ff7fSKevin Wolf .complete = mirror_complete, 1015565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1016bae8196dSPaolo Bonzini .drain = mirror_drain, 101703544a6eSFam Zheng }; 101803544a6eSFam Zheng 10194ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 10204ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 10214ef85a9cSKevin Wolf { 10224ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 10234ef85a9cSKevin Wolf } 10244ef85a9cSKevin Wolf 10254ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 10264ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 10274ef85a9cSKevin Wolf { 10284ef85a9cSKevin Wolf return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 10294ef85a9cSKevin Wolf } 10304ef85a9cSKevin Wolf 10314ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 10324ef85a9cSKevin Wolf { 1033ce960aa9SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 1034ce960aa9SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_append in mirror_start_job */ 1035ce960aa9SVladimir Sementsov-Ogievskiy return 0; 1036ce960aa9SVladimir Sementsov-Ogievskiy } 10374ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 10384ef85a9cSKevin Wolf } 10394ef85a9cSKevin Wolf 10404ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1041f5a5ca79SManos Pitsidianakis int64_t offset, int bytes, BdrvRequestFlags flags) 10424ef85a9cSKevin Wolf { 1043f5a5ca79SManos Pitsidianakis return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); 10444ef85a9cSKevin Wolf } 10454ef85a9cSKevin Wolf 10464ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1047f5a5ca79SManos Pitsidianakis int64_t offset, int bytes) 10484ef85a9cSKevin Wolf { 1049f5a5ca79SManos Pitsidianakis return bdrv_co_pdiscard(bs->backing->bs, offset, bytes); 10504ef85a9cSKevin Wolf } 10514ef85a9cSKevin Wolf 1052fd4a6493SKevin Wolf static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts) 1053fd4a6493SKevin Wolf { 105418775ff3SVladimir Sementsov-Ogievskiy if (bs->backing == NULL) { 105518775ff3SVladimir Sementsov-Ogievskiy /* we can be here after failed bdrv_attach_child in 105618775ff3SVladimir Sementsov-Ogievskiy * bdrv_set_backing_hd */ 105718775ff3SVladimir Sementsov-Ogievskiy return; 105818775ff3SVladimir Sementsov-Ogievskiy } 1059fd4a6493SKevin Wolf bdrv_refresh_filename(bs->backing->bs); 1060fd4a6493SKevin Wolf pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), 1061fd4a6493SKevin Wolf bs->backing->bs->filename); 1062fd4a6493SKevin Wolf } 1063fd4a6493SKevin Wolf 10644ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs) 10654ef85a9cSKevin Wolf { 10664ef85a9cSKevin Wolf } 10674ef85a9cSKevin Wolf 10684ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 10694ef85a9cSKevin Wolf const BdrvChildRole *role, 1070e0995dc3SKevin Wolf BlockReopenQueue *reopen_queue, 10714ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 10724ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 10734ef85a9cSKevin Wolf { 10744ef85a9cSKevin Wolf /* Must be able to forward guest writes to the real image */ 10754ef85a9cSKevin Wolf *nperm = 0; 10764ef85a9cSKevin Wolf if (perm & BLK_PERM_WRITE) { 10774ef85a9cSKevin Wolf *nperm |= BLK_PERM_WRITE; 10784ef85a9cSKevin Wolf } 10794ef85a9cSKevin Wolf 10804ef85a9cSKevin Wolf *nshared = BLK_PERM_ALL; 10814ef85a9cSKevin Wolf } 10824ef85a9cSKevin Wolf 10834ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 10844ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 10854ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 10864ef85a9cSKevin Wolf .format_name = "mirror_top", 10874ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 10884ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 10894ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 10904ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 10914ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 10923e4d0e72SEric Blake .bdrv_co_block_status = bdrv_co_block_status_from_backing, 1093fd4a6493SKevin Wolf .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, 10944ef85a9cSKevin Wolf .bdrv_close = bdrv_mirror_top_close, 10954ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 10964ef85a9cSKevin Wolf }; 10974ef85a9cSKevin Wolf 109871aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs, 109947970dfbSJohn Snow int creation_flags, BlockDriverState *target, 110047970dfbSJohn Snow const char *replaces, int64_t speed, 110147970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1102274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 110303544a6eSFam Zheng BlockdevOnError on_source_error, 1104b952b558SPaolo Bonzini BlockdevOnError on_target_error, 11050fc9f8eaSFam Zheng bool unmap, 1106097310b5SMarkus Armbruster BlockCompletionFunc *cb, 110751ccfa2dSFam Zheng void *opaque, 110803544a6eSFam Zheng const BlockJobDriver *driver, 1109b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 111051ccfa2dSFam Zheng bool auto_complete, const char *filter_node_name, 1111045a2f82SFam Zheng bool is_mirror, 111251ccfa2dSFam Zheng Error **errp) 1113893f7ebaSPaolo Bonzini { 1114893f7ebaSPaolo Bonzini MirrorBlockJob *s; 11154ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 11164ef85a9cSKevin Wolf bool target_graph_mod; 11174ef85a9cSKevin Wolf bool target_is_backing; 1118b2c2832cSKevin Wolf Error *local_err = NULL; 1119d7086422SKevin Wolf int ret; 1120893f7ebaSPaolo Bonzini 1121eee13dfeSPaolo Bonzini if (granularity == 0) { 1122341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1123eee13dfeSPaolo Bonzini } 1124eee13dfeSPaolo Bonzini 112531826642SEric Blake assert(is_power_of_2(granularity)); 1126eee13dfeSPaolo Bonzini 112748ac0a4dSWen Congyang if (buf_size < 0) { 112848ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 112948ac0a4dSWen Congyang return; 113048ac0a4dSWen Congyang } 113148ac0a4dSWen Congyang 113248ac0a4dSWen Congyang if (buf_size == 0) { 113348ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 113448ac0a4dSWen Congyang } 11355bc361b8SFam Zheng 11364ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 11374ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 11384ef85a9cSKevin Wolf * the backing chain writable. */ 11396cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 11406cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 11414ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1142893f7ebaSPaolo Bonzini return; 1143893f7ebaSPaolo Bonzini } 1144d3c8c674SKevin Wolf if (!filter_node_name) { 1145d3c8c674SKevin Wolf mirror_top_bs->implicit = true; 1146d3c8c674SKevin Wolf } 11474ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1148228345bfSMax Reitz mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; 1149228345bfSMax Reitz mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED; 115019dd29e8SFam Zheng bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs)); 1151893f7ebaSPaolo Bonzini 11524ef85a9cSKevin Wolf /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep 11537a25fcd0SMax Reitz * it alive until block_job_create() succeeds even if bs has no parent. */ 11544ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 11554ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1156b2c2832cSKevin Wolf bdrv_append(mirror_top_bs, bs, &local_err); 11574ef85a9cSKevin Wolf bdrv_drained_end(bs); 11584ef85a9cSKevin Wolf 1159b2c2832cSKevin Wolf if (local_err) { 1160b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1161b2c2832cSKevin Wolf error_propagate(errp, local_err); 1162b2c2832cSKevin Wolf return; 1163b2c2832cSKevin Wolf } 1164b2c2832cSKevin Wolf 11654ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 116675859b94SJohn Snow s = block_job_create(job_id, driver, NULL, mirror_top_bs, 11674ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 11684ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 11694ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 11704ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 11714ef85a9cSKevin Wolf if (!s) { 11724ef85a9cSKevin Wolf goto fail; 11734ef85a9cSKevin Wolf } 11747a25fcd0SMax Reitz /* The block job now has a reference to this node */ 11757a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 11767a25fcd0SMax Reitz 11774ef85a9cSKevin Wolf s->source = bs; 11784ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 11794ef85a9cSKevin Wolf 11804ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 11814ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 11824ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 11834ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 11844ef85a9cSKevin Wolf * 11854ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 11864ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 11874ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 11884ef85a9cSKevin Wolf target_is_backing = bdrv_chain_contains(bs, target); 11894ef85a9cSKevin Wolf target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); 11904ef85a9cSKevin Wolf s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | 11914ef85a9cSKevin Wolf (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), 11924ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | 11934ef85a9cSKevin Wolf (target_is_backing ? BLK_PERM_CONSISTENT_READ | 11944ef85a9cSKevin Wolf BLK_PERM_WRITE | 11954ef85a9cSKevin Wolf BLK_PERM_GRAPH_MOD : 0)); 1196d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1197d7086422SKevin Wolf if (ret < 0) { 11984ef85a9cSKevin Wolf goto fail; 1199d7086422SKevin Wolf } 1200045a2f82SFam Zheng if (is_mirror) { 1201045a2f82SFam Zheng /* XXX: Mirror target could be a NBD server of target QEMU in the case 1202045a2f82SFam Zheng * of non-shared block migration. To allow migration completion, we 1203045a2f82SFam Zheng * have to allow "inactivate" of the target BB. When that happens, we 1204045a2f82SFam Zheng * know the job is drained, and the vcpus are stopped, so no write 1205045a2f82SFam Zheng * operation will be performed. Block layer already has assertions to 1206045a2f82SFam Zheng * ensure that. */ 1207045a2f82SFam Zheng blk_set_force_allow_inactivate(s->target); 1208045a2f82SFam Zheng } 1209e253f4b8SKevin Wolf 121009158f00SBenoît Canet s->replaces = g_strdup(replaces); 1211b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1212b952b558SPaolo Bonzini s->on_target_error = on_target_error; 121303544a6eSFam Zheng s->is_none_mode = is_none_mode; 1214274fcceeSMax Reitz s->backing_mode = backing_mode; 12155bc361b8SFam Zheng s->base = base; 1216eee13dfeSPaolo Bonzini s->granularity = granularity; 121748ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 12180fc9f8eaSFam Zheng s->unmap = unmap; 1219b49f7eadSWen Congyang if (auto_complete) { 1220b49f7eadSWen Congyang s->should_complete = true; 1221b49f7eadSWen Congyang } 1222b812f671SPaolo Bonzini 12230db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1224b8afb520SFam Zheng if (!s->dirty_bitmap) { 122588f9d1b3SKevin Wolf goto fail; 1226b8afb520SFam Zheng } 122710f3cd15SAlberto Garcia 12284ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 122976d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 123076d554e2SKevin Wolf &error_abort); 123176d554e2SKevin Wolf 1232f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1233f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 12344ef85a9cSKevin Wolf if (target_is_backing) { 1235f3ede4b0SAlberto Garcia BlockDriverState *iter; 1236f3ede4b0SAlberto Garcia for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { 12374ef85a9cSKevin Wolf /* XXX BLK_PERM_WRITE needs to be allowed so we don't block 12384ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 12394ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 12404ef85a9cSKevin Wolf * second filter driver above s->base (== target). */ 12414ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 12424ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, 12434ef85a9cSKevin Wolf errp); 12444ef85a9cSKevin Wolf if (ret < 0) { 12454ef85a9cSKevin Wolf goto fail; 12464ef85a9cSKevin Wolf } 1247f3ede4b0SAlberto Garcia } 1248f3ede4b0SAlberto Garcia } 124910f3cd15SAlberto Garcia 12505ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 1251da01ff7fSKevin Wolf job_start(&s->common.job); 12524ef85a9cSKevin Wolf return; 12534ef85a9cSKevin Wolf 12544ef85a9cSKevin Wolf fail: 12554ef85a9cSKevin Wolf if (s) { 12567a25fcd0SMax Reitz /* Make sure this BDS does not go away until we have completed the graph 12577a25fcd0SMax Reitz * changes below */ 12587a25fcd0SMax Reitz bdrv_ref(mirror_top_bs); 12597a25fcd0SMax Reitz 12604ef85a9cSKevin Wolf g_free(s->replaces); 12614ef85a9cSKevin Wolf blk_unref(s->target); 12624ad35181SKevin Wolf job_early_fail(&s->common.job); 12634ef85a9cSKevin Wolf } 12644ef85a9cSKevin Wolf 1265c1cef672SFam Zheng bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 1266c1cef672SFam Zheng &error_abort); 12675fe31c25SKevin Wolf bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); 12687a25fcd0SMax Reitz 12697a25fcd0SMax Reitz bdrv_unref(mirror_top_bs); 1270893f7ebaSPaolo Bonzini } 127103544a6eSFam Zheng 127271aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 127371aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 12745fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 1275274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1276274fcceeSMax Reitz BlockdevOnError on_source_error, 127703544a6eSFam Zheng BlockdevOnError on_target_error, 12786cdbceb1SKevin Wolf bool unmap, const char *filter_node_name, Error **errp) 127903544a6eSFam Zheng { 128003544a6eSFam Zheng bool is_none_mode; 128103544a6eSFam Zheng BlockDriverState *base; 128203544a6eSFam Zheng 12834b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 12844b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 1285d58d8453SJohn Snow return; 1286d58d8453SJohn Snow } 128703544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1288760e0063SKevin Wolf base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 1289bb02b65cSKevin Wolf mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces, 1290274fcceeSMax Reitz speed, granularity, buf_size, backing_mode, 129151ccfa2dSFam Zheng on_source_error, on_target_error, unmap, NULL, NULL, 12926cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 1293045a2f82SFam Zheng filter_node_name, true, errp); 129403544a6eSFam Zheng } 129503544a6eSFam Zheng 1296fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs, 129747970dfbSJohn Snow BlockDriverState *base, int creation_flags, 129847970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 12990db832f4SKevin Wolf const char *filter_node_name, 130078bbd910SFam Zheng BlockCompletionFunc *cb, void *opaque, 130178bbd910SFam Zheng bool auto_complete, Error **errp) 130203544a6eSFam Zheng { 13034da83585SJeff Cody int orig_base_flags; 1304cc67f4d1SJeff Cody Error *local_err = NULL; 13054da83585SJeff Cody 13064da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 13074da83585SJeff Cody 130820a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 130920a63d2cSFam Zheng return; 131020a63d2cSFam Zheng } 13114da83585SJeff Cody 131247970dfbSJohn Snow mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0, 131371aa9867SAlberto Garcia MIRROR_LEAVE_BACKING_CHAIN, 131451ccfa2dSFam Zheng on_error, on_error, true, cb, opaque, 13156cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 1316045a2f82SFam Zheng filter_node_name, false, &local_err); 13170fb6395cSMarkus Armbruster if (local_err) { 1318cc67f4d1SJeff Cody error_propagate(errp, local_err); 13194da83585SJeff Cody goto error_restore_flags; 13204da83585SJeff Cody } 13214da83585SJeff Cody 13224da83585SJeff Cody return; 13234da83585SJeff Cody 13244da83585SJeff Cody error_restore_flags: 13254da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 13264da83585SJeff Cody * the original error */ 13274da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 13284da83585SJeff Cody return; 132903544a6eSFam Zheng } 1330