1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15893f7ebaSPaolo Bonzini #include "trace.h" 16c87621eaSJohn Snow #include "block/blockjob_int.h" 17737e150eSPaolo Bonzini #include "block/block_int.h" 18373340b2SMax Reitz #include "sysemu/block-backend.h" 19da34e65cSMarkus Armbruster #include "qapi/error.h" 20cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 21893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 22b812f671SPaolo Bonzini #include "qemu/bitmap.h" 23893f7ebaSPaolo Bonzini 24893f7ebaSPaolo Bonzini #define SLICE_TIME 100000000ULL /* ns */ 25402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 260965a41eSVladimir Sementsov-Ogievskiy #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */ 270965a41eSVladimir Sementsov-Ogievskiy #define DEFAULT_MIRROR_BUF_SIZE \ 280965a41eSVladimir Sementsov-Ogievskiy (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE) 29402a4741SPaolo Bonzini 30402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 31402a4741SPaolo Bonzini * Free chunks are organized in a list. 32402a4741SPaolo Bonzini */ 33402a4741SPaolo Bonzini typedef struct MirrorBuffer { 34402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 35402a4741SPaolo Bonzini } MirrorBuffer; 36893f7ebaSPaolo Bonzini 37893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 38893f7ebaSPaolo Bonzini BlockJob common; 39893f7ebaSPaolo Bonzini RateLimit limit; 40e253f4b8SKevin Wolf BlockBackend *target; 414ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 424ef85a9cSKevin Wolf BlockDriverState *source; 435bc361b8SFam Zheng BlockDriverState *base; 444ef85a9cSKevin Wolf 4509158f00SBenoît Canet /* The name of the graph node to replace */ 4609158f00SBenoît Canet char *replaces; 4709158f00SBenoît Canet /* The BDS to replace */ 4809158f00SBenoît Canet BlockDriverState *to_replace; 4909158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 5009158f00SBenoît Canet Error *replace_blocker; 5103544a6eSFam Zheng bool is_none_mode; 52274fcceeSMax Reitz BlockMirrorBackingMode backing_mode; 53b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 54d63ffd87SPaolo Bonzini bool synced; 55d63ffd87SPaolo Bonzini bool should_complete; 56eee13dfeSPaolo Bonzini int64_t granularity; 57b812f671SPaolo Bonzini size_t buf_size; 58b21c7652SMax Reitz int64_t bdev_length; 59b812f671SPaolo Bonzini unsigned long *cow_bitmap; 60e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 61dc162c8eSFam Zheng BdrvDirtyBitmapIter *dbi; 62893f7ebaSPaolo Bonzini uint8_t *buf; 63402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 64402a4741SPaolo Bonzini int buf_free_count; 65bd48bde8SPaolo Bonzini 6649efb1f5SDenis V. Lunev uint64_t last_pause_ns; 67402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 68bd48bde8SPaolo Bonzini int in_flight; 69531509baSDenis V. Lunev int64_t sectors_in_flight; 70bd48bde8SPaolo Bonzini int ret; 710fc9f8eaSFam Zheng bool unmap; 72e424aff5SKevin Wolf bool waiting_for_io; 73e5b43573SFam Zheng int target_cluster_sectors; 74e5b43573SFam Zheng int max_iov; 7590ab48ebSAnton Nefedov bool initial_zeroing_ongoing; 76893f7ebaSPaolo Bonzini } MirrorBlockJob; 77893f7ebaSPaolo Bonzini 78bd48bde8SPaolo Bonzini typedef struct MirrorOp { 79bd48bde8SPaolo Bonzini MirrorBlockJob *s; 80bd48bde8SPaolo Bonzini QEMUIOVector qiov; 81bd48bde8SPaolo Bonzini int64_t sector_num; 82bd48bde8SPaolo Bonzini int nb_sectors; 83bd48bde8SPaolo Bonzini } MirrorOp; 84bd48bde8SPaolo Bonzini 85b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 86b952b558SPaolo Bonzini int error) 87b952b558SPaolo Bonzini { 88b952b558SPaolo Bonzini s->synced = false; 89b952b558SPaolo Bonzini if (read) { 9081e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 9181e254dcSKevin Wolf true, error); 92b952b558SPaolo Bonzini } else { 9381e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 9481e254dcSKevin Wolf false, error); 95b952b558SPaolo Bonzini } 96b952b558SPaolo Bonzini } 97b952b558SPaolo Bonzini 98bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret) 99bd48bde8SPaolo Bonzini { 100bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 101402a4741SPaolo Bonzini struct iovec *iov; 102bd48bde8SPaolo Bonzini int64_t chunk_num; 103402a4741SPaolo Bonzini int i, nb_chunks, sectors_per_chunk; 104bd48bde8SPaolo Bonzini 105bd48bde8SPaolo Bonzini trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 106bd48bde8SPaolo Bonzini 107bd48bde8SPaolo Bonzini s->in_flight--; 108b21c7652SMax Reitz s->sectors_in_flight -= op->nb_sectors; 109402a4741SPaolo Bonzini iov = op->qiov.iov; 110402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 111402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 112402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 113402a4741SPaolo Bonzini s->buf_free_count++; 114402a4741SPaolo Bonzini } 115402a4741SPaolo Bonzini 116bd48bde8SPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 117bd48bde8SPaolo Bonzini chunk_num = op->sector_num / sectors_per_chunk; 1184150ae60SFam Zheng nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk); 119402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 120b21c7652SMax Reitz if (ret >= 0) { 121b21c7652SMax Reitz if (s->cow_bitmap) { 122bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 123bd48bde8SPaolo Bonzini } 12490ab48ebSAnton Nefedov if (!s->initial_zeroing_ongoing) { 125b21c7652SMax Reitz s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; 126b21c7652SMax Reitz } 12790ab48ebSAnton Nefedov } 1286df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 129c84b3192SPaolo Bonzini g_free(op); 1307b770c72SStefan Hajnoczi 131e424aff5SKevin Wolf if (s->waiting_for_io) { 1320b8b8753SPaolo Bonzini qemu_coroutine_enter(s->common.co); 133bd48bde8SPaolo Bonzini } 1347b770c72SStefan Hajnoczi } 135bd48bde8SPaolo Bonzini 136bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret) 137bd48bde8SPaolo Bonzini { 138bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 139bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 140b9e413ddSPaolo Bonzini 141b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 142bd48bde8SPaolo Bonzini if (ret < 0) { 143bd48bde8SPaolo Bonzini BlockErrorAction action; 144bd48bde8SPaolo Bonzini 14520dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 146bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 147a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 148bd48bde8SPaolo Bonzini s->ret = ret; 149bd48bde8SPaolo Bonzini } 150bd48bde8SPaolo Bonzini } 151bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 152b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 153bd48bde8SPaolo Bonzini } 154bd48bde8SPaolo Bonzini 155bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret) 156bd48bde8SPaolo Bonzini { 157bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 158bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 159b9e413ddSPaolo Bonzini 160b9e413ddSPaolo Bonzini aio_context_acquire(blk_get_aio_context(s->common.blk)); 161bd48bde8SPaolo Bonzini if (ret < 0) { 162bd48bde8SPaolo Bonzini BlockErrorAction action; 163bd48bde8SPaolo Bonzini 16420dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 165bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 166a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 167bd48bde8SPaolo Bonzini s->ret = ret; 168bd48bde8SPaolo Bonzini } 169bd48bde8SPaolo Bonzini 170bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 171b9e413ddSPaolo Bonzini } else { 172e253f4b8SKevin Wolf blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, 17373698c30SEric Blake 0, mirror_write_complete, op); 174bd48bde8SPaolo Bonzini } 175b9e413ddSPaolo Bonzini aio_context_release(blk_get_aio_context(s->common.blk)); 176b9e413ddSPaolo Bonzini } 177bd48bde8SPaolo Bonzini 1784150ae60SFam Zheng static inline void mirror_clip_sectors(MirrorBlockJob *s, 1794150ae60SFam Zheng int64_t sector_num, 1804150ae60SFam Zheng int *nb_sectors) 1814150ae60SFam Zheng { 1824150ae60SFam Zheng *nb_sectors = MIN(*nb_sectors, 1834150ae60SFam Zheng s->bdev_length / BDRV_SECTOR_SIZE - sector_num); 1844150ae60SFam Zheng } 1854150ae60SFam Zheng 186e5b43573SFam Zheng /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and 187e5b43573SFam Zheng * return the offset of the adjusted tail sector against original. */ 188e5b43573SFam Zheng static int mirror_cow_align(MirrorBlockJob *s, 189e5b43573SFam Zheng int64_t *sector_num, 190e5b43573SFam Zheng int *nb_sectors) 191893f7ebaSPaolo Bonzini { 192e5b43573SFam Zheng bool need_cow; 193e5b43573SFam Zheng int ret = 0; 194e5b43573SFam Zheng int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; 195e5b43573SFam Zheng int64_t align_sector_num = *sector_num; 196e5b43573SFam Zheng int align_nb_sectors = *nb_sectors; 197e5b43573SFam Zheng int max_sectors = chunk_sectors * s->max_iov; 198893f7ebaSPaolo Bonzini 199e5b43573SFam Zheng need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); 200e5b43573SFam Zheng need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, 201e5b43573SFam Zheng s->cow_bitmap); 202e5b43573SFam Zheng if (need_cow) { 203244483e6SKevin Wolf bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num, 204244483e6SKevin Wolf *nb_sectors, &align_sector_num, 205244483e6SKevin Wolf &align_nb_sectors); 2068f0720ecSPaolo Bonzini } 2078f0720ecSPaolo Bonzini 208e5b43573SFam Zheng if (align_nb_sectors > max_sectors) { 209e5b43573SFam Zheng align_nb_sectors = max_sectors; 210e5b43573SFam Zheng if (need_cow) { 211e5b43573SFam Zheng align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, 212e5b43573SFam Zheng s->target_cluster_sectors); 213e5b43573SFam Zheng } 214e5b43573SFam Zheng } 2154150ae60SFam Zheng /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but 2164150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 2174150ae60SFam Zheng mirror_clip_sectors(s, align_sector_num, &align_nb_sectors); 218402a4741SPaolo Bonzini 219e5b43573SFam Zheng ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); 220e5b43573SFam Zheng *sector_num = align_sector_num; 221e5b43573SFam Zheng *nb_sectors = align_nb_sectors; 222e5b43573SFam Zheng assert(ret >= 0); 223e5b43573SFam Zheng return ret; 224e5b43573SFam Zheng } 225e5b43573SFam Zheng 22621cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s) 22721cd917fSFam Zheng { 22821cd917fSFam Zheng assert(!s->waiting_for_io); 22921cd917fSFam Zheng s->waiting_for_io = true; 23021cd917fSFam Zheng qemu_coroutine_yield(); 23121cd917fSFam Zheng s->waiting_for_io = false; 23221cd917fSFam Zheng } 23321cd917fSFam Zheng 234e5b43573SFam Zheng /* Submit async read while handling COW. 23517612955SJohn Snow * Returns: The number of sectors copied after and including sector_num, 23617612955SJohn Snow * excluding any sectors copied prior to sector_num due to alignment. 23717612955SJohn Snow * This will be nb_sectors if no alignment is necessary, or 238e5b43573SFam Zheng * (new_end - sector_num) if tail is rounded up or down due to 239e5b43573SFam Zheng * alignment or buffer limit. 240402a4741SPaolo Bonzini */ 241e5b43573SFam Zheng static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, 242e5b43573SFam Zheng int nb_sectors) 243e5b43573SFam Zheng { 244e253f4b8SKevin Wolf BlockBackend *source = s->common.blk; 245e5b43573SFam Zheng int sectors_per_chunk, nb_chunks; 24617612955SJohn Snow int ret; 247e5b43573SFam Zheng MirrorOp *op; 248e4808881SJohn Snow int max_sectors; 249402a4741SPaolo Bonzini 250e5b43573SFam Zheng sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 251e4808881SJohn Snow max_sectors = sectors_per_chunk * s->max_iov; 252e5b43573SFam Zheng 253e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 254e5b43573SFam Zheng nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); 255e4808881SJohn Snow nb_sectors = MIN(max_sectors, nb_sectors); 256e5b43573SFam Zheng assert(nb_sectors); 25717612955SJohn Snow ret = nb_sectors; 258e5b43573SFam Zheng 259e5b43573SFam Zheng if (s->cow_bitmap) { 260e5b43573SFam Zheng ret += mirror_cow_align(s, §or_num, &nb_sectors); 261e5b43573SFam Zheng } 262e5b43573SFam Zheng assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); 263e5b43573SFam Zheng /* The sector range must meet granularity because: 264e5b43573SFam Zheng * 1) Caller passes in aligned values; 265e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 266e5b43573SFam Zheng assert(!(sector_num % sectors_per_chunk)); 2674150ae60SFam Zheng nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); 268e5b43573SFam Zheng 269e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 270402a4741SPaolo Bonzini trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 27121cd917fSFam Zheng mirror_wait_for_io(s); 272b812f671SPaolo Bonzini } 273b812f671SPaolo Bonzini 274bd48bde8SPaolo Bonzini /* Allocate a MirrorOp that is used as an AIO callback. */ 275c84b3192SPaolo Bonzini op = g_new(MirrorOp, 1); 276bd48bde8SPaolo Bonzini op->s = s; 277bd48bde8SPaolo Bonzini op->sector_num = sector_num; 278bd48bde8SPaolo Bonzini op->nb_sectors = nb_sectors; 279402a4741SPaolo Bonzini 280402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 281402a4741SPaolo Bonzini * from s->buf_free. 282402a4741SPaolo Bonzini */ 283402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 284402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 285402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 286e5b43573SFam Zheng size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; 2875a0f6fd5SKevin Wolf 288402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 289402a4741SPaolo Bonzini s->buf_free_count--; 2905a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 291402a4741SPaolo Bonzini } 292402a4741SPaolo Bonzini 293893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 294bd48bde8SPaolo Bonzini s->in_flight++; 295b21c7652SMax Reitz s->sectors_in_flight += nb_sectors; 296b812f671SPaolo Bonzini trace_mirror_one_iteration(s, sector_num, nb_sectors); 297dcfb3bebSFam Zheng 29873698c30SEric Blake blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0, 299bd48bde8SPaolo Bonzini mirror_read_complete, op); 300e5b43573SFam Zheng return ret; 301e5b43573SFam Zheng } 302e5b43573SFam Zheng 303e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s, 304e5b43573SFam Zheng int64_t sector_num, 305e5b43573SFam Zheng int nb_sectors, 306e5b43573SFam Zheng bool is_discard) 307e5b43573SFam Zheng { 308e5b43573SFam Zheng MirrorOp *op; 309e5b43573SFam Zheng 310e5b43573SFam Zheng /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed 311e5b43573SFam Zheng * so the freeing in mirror_iteration_done is nop. */ 312e5b43573SFam Zheng op = g_new0(MirrorOp, 1); 313e5b43573SFam Zheng op->s = s; 314e5b43573SFam Zheng op->sector_num = sector_num; 315e5b43573SFam Zheng op->nb_sectors = nb_sectors; 316e5b43573SFam Zheng 317e5b43573SFam Zheng s->in_flight++; 318e5b43573SFam Zheng s->sectors_in_flight += nb_sectors; 319e5b43573SFam Zheng if (is_discard) { 3201c6c4bb7SEric Blake blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS, 3211c6c4bb7SEric Blake op->nb_sectors << BDRV_SECTOR_BITS, 322e5b43573SFam Zheng mirror_write_complete, op); 323e5b43573SFam Zheng } else { 324e253f4b8SKevin Wolf blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE, 325e253f4b8SKevin Wolf op->nb_sectors * BDRV_SECTOR_SIZE, 326dcfb3bebSFam Zheng s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 327dcfb3bebSFam Zheng mirror_write_complete, op); 328e5b43573SFam Zheng } 329e5b43573SFam Zheng } 330e5b43573SFam Zheng 331e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 332e5b43573SFam Zheng { 3334ef85a9cSKevin Wolf BlockDriverState *source = s->source; 3349c83625bSMax Reitz int64_t sector_num, first_chunk; 335e5b43573SFam Zheng uint64_t delay_ns = 0; 336e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 337e5b43573SFam Zheng int nb_chunks = 1; 338e5b43573SFam Zheng int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; 339e5b43573SFam Zheng int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 3404b5004d9SDenis V. Lunev bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 3410965a41eSVladimir Sementsov-Ogievskiy int max_io_sectors = MAX((s->buf_size >> BDRV_SECTOR_BITS) / MAX_IN_FLIGHT, 3420965a41eSVladimir Sementsov-Ogievskiy MAX_IO_SECTORS); 343e5b43573SFam Zheng 344dc162c8eSFam Zheng sector_num = bdrv_dirty_iter_next(s->dbi); 345e5b43573SFam Zheng if (sector_num < 0) { 346dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, 0); 347dc162c8eSFam Zheng sector_num = bdrv_dirty_iter_next(s->dbi); 348e5b43573SFam Zheng trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 349e5b43573SFam Zheng assert(sector_num >= 0); 350e5b43573SFam Zheng } 351e5b43573SFam Zheng 3529c83625bSMax Reitz first_chunk = sector_num / sectors_per_chunk; 3539c83625bSMax Reitz while (test_bit(first_chunk, s->in_flight_bitmap)) { 354ff04198bSDenis V. Lunev trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 3559c83625bSMax Reitz mirror_wait_for_io(s); 3569c83625bSMax Reitz } 3579c83625bSMax Reitz 358565ac01fSStefan Hajnoczi block_job_pause_point(&s->common); 359565ac01fSStefan Hajnoczi 360e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 361e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 362e5b43573SFam Zheng while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { 363dc162c8eSFam Zheng int64_t next_dirty; 364e5b43573SFam Zheng int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; 365e5b43573SFam Zheng int64_t next_chunk = next_sector / sectors_per_chunk; 366e5b43573SFam Zheng if (next_sector >= end || 367e5b43573SFam Zheng !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 368e5b43573SFam Zheng break; 369e5b43573SFam Zheng } 370e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 371e5b43573SFam Zheng break; 372e5b43573SFam Zheng } 3739c83625bSMax Reitz 374dc162c8eSFam Zheng next_dirty = bdrv_dirty_iter_next(s->dbi); 375dc162c8eSFam Zheng if (next_dirty > next_sector || next_dirty < 0) { 376f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 377dc162c8eSFam Zheng bdrv_set_dirty_iter(s->dbi, next_sector); 378dc162c8eSFam Zheng next_dirty = bdrv_dirty_iter_next(s->dbi); 379f27a2742SMax Reitz } 380dc162c8eSFam Zheng assert(next_dirty == next_sector); 381e5b43573SFam Zheng nb_chunks++; 382e5b43573SFam Zheng } 383e5b43573SFam Zheng 384e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 385e5b43573SFam Zheng * calling bdrv_get_block_status_above could yield - if some blocks are 386e5b43573SFam Zheng * marked dirty in this window, we need to know. 387e5b43573SFam Zheng */ 388e5b43573SFam Zheng bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, 389e5b43573SFam Zheng nb_chunks * sectors_per_chunk); 390e5b43573SFam Zheng bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); 391e5b43573SFam Zheng while (nb_chunks > 0 && sector_num < end) { 39239c11580SJohn Snow int64_t ret; 3934b5004d9SDenis V. Lunev int io_sectors, io_sectors_acct; 394e5b43573SFam Zheng BlockDriverState *file; 395e5b43573SFam Zheng enum MirrorMethod { 396e5b43573SFam Zheng MIRROR_METHOD_COPY, 397e5b43573SFam Zheng MIRROR_METHOD_ZERO, 398e5b43573SFam Zheng MIRROR_METHOD_DISCARD 399e5b43573SFam Zheng } mirror_method = MIRROR_METHOD_COPY; 400e5b43573SFam Zheng 401e5b43573SFam Zheng assert(!(sector_num % sectors_per_chunk)); 402e5b43573SFam Zheng ret = bdrv_get_block_status_above(source, NULL, sector_num, 403e5b43573SFam Zheng nb_chunks * sectors_per_chunk, 404e5b43573SFam Zheng &io_sectors, &file); 405e5b43573SFam Zheng if (ret < 0) { 4060965a41eSVladimir Sementsov-Ogievskiy io_sectors = MIN(nb_chunks * sectors_per_chunk, max_io_sectors); 4070965a41eSVladimir Sementsov-Ogievskiy } else if (ret & BDRV_BLOCK_DATA) { 4080965a41eSVladimir Sementsov-Ogievskiy io_sectors = MIN(io_sectors, max_io_sectors); 409e5b43573SFam Zheng } 410e5b43573SFam Zheng 411e5b43573SFam Zheng io_sectors -= io_sectors % sectors_per_chunk; 412e5b43573SFam Zheng if (io_sectors < sectors_per_chunk) { 413e5b43573SFam Zheng io_sectors = sectors_per_chunk; 414e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 415e5b43573SFam Zheng int64_t target_sector_num; 416e5b43573SFam Zheng int target_nb_sectors; 417244483e6SKevin Wolf bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num, 418244483e6SKevin Wolf io_sectors, &target_sector_num, 419244483e6SKevin Wolf &target_nb_sectors); 420e5b43573SFam Zheng if (target_sector_num == sector_num && 421e5b43573SFam Zheng target_nb_sectors == io_sectors) { 422e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 423e5b43573SFam Zheng MIRROR_METHOD_ZERO : 424e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 425e5b43573SFam Zheng } 426e5b43573SFam Zheng } 427e5b43573SFam Zheng 428cf56a3c6SDenis V. Lunev while (s->in_flight >= MAX_IN_FLIGHT) { 429cf56a3c6SDenis V. Lunev trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 430cf56a3c6SDenis V. Lunev mirror_wait_for_io(s); 431cf56a3c6SDenis V. Lunev } 432cf56a3c6SDenis V. Lunev 433dbaa7b57SVladimir Sementsov-Ogievskiy if (s->ret < 0) { 434dbaa7b57SVladimir Sementsov-Ogievskiy return 0; 435dbaa7b57SVladimir Sementsov-Ogievskiy } 436dbaa7b57SVladimir Sementsov-Ogievskiy 4374150ae60SFam Zheng mirror_clip_sectors(s, sector_num, &io_sectors); 438e5b43573SFam Zheng switch (mirror_method) { 439e5b43573SFam Zheng case MIRROR_METHOD_COPY: 440e5b43573SFam Zheng io_sectors = mirror_do_read(s, sector_num, io_sectors); 4414b5004d9SDenis V. Lunev io_sectors_acct = io_sectors; 442e5b43573SFam Zheng break; 443e5b43573SFam Zheng case MIRROR_METHOD_ZERO: 444e5b43573SFam Zheng case MIRROR_METHOD_DISCARD: 4454b5004d9SDenis V. Lunev mirror_do_zero_or_discard(s, sector_num, io_sectors, 4464b5004d9SDenis V. Lunev mirror_method == MIRROR_METHOD_DISCARD); 4474b5004d9SDenis V. Lunev if (write_zeroes_ok) { 4484b5004d9SDenis V. Lunev io_sectors_acct = 0; 4494b5004d9SDenis V. Lunev } else { 4504b5004d9SDenis V. Lunev io_sectors_acct = io_sectors; 4514b5004d9SDenis V. Lunev } 452e5b43573SFam Zheng break; 453e5b43573SFam Zheng default: 454e5b43573SFam Zheng abort(); 455e5b43573SFam Zheng } 456e5b43573SFam Zheng assert(io_sectors); 457e5b43573SFam Zheng sector_num += io_sectors; 4584150ae60SFam Zheng nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk); 459f14a39ccSSascha Silbe if (s->common.speed) { 4604b5004d9SDenis V. Lunev delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct); 461f14a39ccSSascha Silbe } 462dcfb3bebSFam Zheng } 463cc8c9d6cSPaolo Bonzini return delay_ns; 464893f7ebaSPaolo Bonzini } 465b952b558SPaolo Bonzini 466402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 467402a4741SPaolo Bonzini { 468402a4741SPaolo Bonzini int granularity = s->granularity; 469402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 470402a4741SPaolo Bonzini uint8_t *buf = s->buf; 471402a4741SPaolo Bonzini 472402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 473402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 474402a4741SPaolo Bonzini while (buf_size != 0) { 475402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 476402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 477402a4741SPaolo Bonzini s->buf_free_count++; 478402a4741SPaolo Bonzini buf_size -= granularity; 479402a4741SPaolo Bonzini buf += granularity; 480402a4741SPaolo Bonzini } 481402a4741SPaolo Bonzini } 482402a4741SPaolo Bonzini 483bae8196dSPaolo Bonzini /* This is also used for the .pause callback. There is no matching 484bae8196dSPaolo Bonzini * mirror_resume() because mirror_run() will begin iterating again 485bae8196dSPaolo Bonzini * when the job is resumed. 486bae8196dSPaolo Bonzini */ 487bae8196dSPaolo Bonzini static void mirror_wait_for_all_io(MirrorBlockJob *s) 488bd48bde8SPaolo Bonzini { 489bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 49021cd917fSFam Zheng mirror_wait_for_io(s); 491bd48bde8SPaolo Bonzini } 492893f7ebaSPaolo Bonzini } 493893f7ebaSPaolo Bonzini 4945a7e7a0bSStefan Hajnoczi typedef struct { 4955a7e7a0bSStefan Hajnoczi int ret; 4965a7e7a0bSStefan Hajnoczi } MirrorExitData; 4975a7e7a0bSStefan Hajnoczi 4985a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque) 4995a7e7a0bSStefan Hajnoczi { 5005a7e7a0bSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 5015a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 5025a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 5034ef85a9cSKevin Wolf BlockDriverState *src = s->source; 504e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 5054ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs = s->mirror_top_bs; 50612fa4af6SKevin Wolf Error *local_err = NULL; 5073f09bfbcSKevin Wolf 5083f09bfbcSKevin Wolf /* Make sure that the source BDS doesn't go away before we called 5093f09bfbcSKevin Wolf * block_job_completed(). */ 5103f09bfbcSKevin Wolf bdrv_ref(src); 5114ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 5127d9fcb39SKevin Wolf bdrv_ref(target_bs); 5137d9fcb39SKevin Wolf 5147d9fcb39SKevin Wolf /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before 5157d9fcb39SKevin Wolf * inserting target_bs at s->to_replace, where we might not be able to get 5167d9fcb39SKevin Wolf * these permissions. */ 5177d9fcb39SKevin Wolf blk_unref(s->target); 5187d9fcb39SKevin Wolf s->target = NULL; 5194ef85a9cSKevin Wolf 5204ef85a9cSKevin Wolf /* We don't access the source any more. Dropping any WRITE/RESIZE is 5214ef85a9cSKevin Wolf * required before it could become a backing file of target_bs. */ 5224ef85a9cSKevin Wolf bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 5234ef85a9cSKevin Wolf &error_abort); 5244ef85a9cSKevin Wolf if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 5254ef85a9cSKevin Wolf BlockDriverState *backing = s->is_none_mode ? src : s->base; 5264ef85a9cSKevin Wolf if (backing_bs(target_bs) != backing) { 52712fa4af6SKevin Wolf bdrv_set_backing_hd(target_bs, backing, &local_err); 52812fa4af6SKevin Wolf if (local_err) { 52912fa4af6SKevin Wolf error_report_err(local_err); 53012fa4af6SKevin Wolf data->ret = -EPERM; 53112fa4af6SKevin Wolf } 5324ef85a9cSKevin Wolf } 5334ef85a9cSKevin Wolf } 5345a7e7a0bSStefan Hajnoczi 5355a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5365a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 5375a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 5385a7e7a0bSStefan Hajnoczi } 5395a7e7a0bSStefan Hajnoczi 5405a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 541e253f4b8SKevin Wolf BlockDriverState *to_replace = src; 5425a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5435a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 5445a7e7a0bSStefan Hajnoczi } 54540365552SKevin Wolf 546e253f4b8SKevin Wolf if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { 547e253f4b8SKevin Wolf bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); 5485a7e7a0bSStefan Hajnoczi } 549b8804815SKevin Wolf 550b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 551b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 552e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 553e253f4b8SKevin Wolf bdrv_replace_in_backing_chain(to_replace, target_bs); 554e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 5555a7e7a0bSStefan Hajnoczi } 5565a7e7a0bSStefan Hajnoczi if (s->to_replace) { 5575a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 5585a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 5595a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 5605a7e7a0bSStefan Hajnoczi } 5615a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 5625a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 5635a7e7a0bSStefan Hajnoczi } 5645a7e7a0bSStefan Hajnoczi g_free(s->replaces); 5657d9fcb39SKevin Wolf bdrv_unref(target_bs); 5664ef85a9cSKevin Wolf 5674ef85a9cSKevin Wolf /* Remove the mirror filter driver from the graph. Before this, get rid of 5684ef85a9cSKevin Wolf * the blockers on the intermediate nodes so that the resulting state is 569*0bf74767SKevin Wolf * valid. Also give up permissions on mirror_top_bs->backing, which might 570*0bf74767SKevin Wolf * block the removal. */ 5714ef85a9cSKevin Wolf block_job_remove_all_bdrv(job); 572*0bf74767SKevin Wolf bdrv_child_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL); 5734ef85a9cSKevin Wolf bdrv_replace_in_backing_chain(mirror_top_bs, backing_bs(mirror_top_bs)); 5744ef85a9cSKevin Wolf 5754ef85a9cSKevin Wolf /* We just changed the BDS the job BB refers to (with either or both of the 5764ef85a9cSKevin Wolf * bdrv_replace_in_backing_chain() calls), so switch the BB back so the 5774ef85a9cSKevin Wolf * cleanup does the right thing. We don't need any permissions any more 5784ef85a9cSKevin Wolf * now. */ 5794ef85a9cSKevin Wolf blk_remove_bs(job->blk); 5804ef85a9cSKevin Wolf blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); 5814ef85a9cSKevin Wolf blk_insert_bs(job->blk, mirror_top_bs, &error_abort); 5824ef85a9cSKevin Wolf 5835a7e7a0bSStefan Hajnoczi block_job_completed(&s->common, data->ret); 5844ef85a9cSKevin Wolf 5855a7e7a0bSStefan Hajnoczi g_free(data); 586176c3699SFam Zheng bdrv_drained_end(src); 5874ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 5883f09bfbcSKevin Wolf bdrv_unref(src); 5895a7e7a0bSStefan Hajnoczi } 5905a7e7a0bSStefan Hajnoczi 59149efb1f5SDenis V. Lunev static void mirror_throttle(MirrorBlockJob *s) 59249efb1f5SDenis V. Lunev { 59349efb1f5SDenis V. Lunev int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 59449efb1f5SDenis V. Lunev 59549efb1f5SDenis V. Lunev if (now - s->last_pause_ns > SLICE_TIME) { 59649efb1f5SDenis V. Lunev s->last_pause_ns = now; 59749efb1f5SDenis V. Lunev block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); 59849efb1f5SDenis V. Lunev } else { 59949efb1f5SDenis V. Lunev block_job_pause_point(&s->common); 60049efb1f5SDenis V. Lunev } 60149efb1f5SDenis V. Lunev } 60249efb1f5SDenis V. Lunev 603c0b363adSDenis V. Lunev static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 604c0b363adSDenis V. Lunev { 605c0b363adSDenis V. Lunev int64_t sector_num, end; 606c0b363adSDenis V. Lunev BlockDriverState *base = s->base; 6074ef85a9cSKevin Wolf BlockDriverState *bs = s->source; 608c0b363adSDenis V. Lunev BlockDriverState *target_bs = blk_bs(s->target); 609c0b363adSDenis V. Lunev int ret, n; 610c0b363adSDenis V. Lunev 611c0b363adSDenis V. Lunev end = s->bdev_length / BDRV_SECTOR_SIZE; 612c0b363adSDenis V. Lunev 613b7d5062cSDenis V. Lunev if (base == NULL && !bdrv_has_zero_init(target_bs)) { 614c7c2769cSDenis V. Lunev if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 615b7d5062cSDenis V. Lunev bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end); 616b7d5062cSDenis V. Lunev return 0; 617b7d5062cSDenis V. Lunev } 618b7d5062cSDenis V. Lunev 61990ab48ebSAnton Nefedov s->initial_zeroing_ongoing = true; 620c7c2769cSDenis V. Lunev for (sector_num = 0; sector_num < end; ) { 621c7c2769cSDenis V. Lunev int nb_sectors = MIN(end - sector_num, 622c7c2769cSDenis V. Lunev QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS); 623c7c2769cSDenis V. Lunev 624c7c2769cSDenis V. Lunev mirror_throttle(s); 625c7c2769cSDenis V. Lunev 626c7c2769cSDenis V. Lunev if (block_job_is_cancelled(&s->common)) { 62790ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 628c7c2769cSDenis V. Lunev return 0; 629c7c2769cSDenis V. Lunev } 630c7c2769cSDenis V. Lunev 631c7c2769cSDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT) { 632c7c2769cSDenis V. Lunev trace_mirror_yield(s, s->in_flight, s->buf_free_count, -1); 633c7c2769cSDenis V. Lunev mirror_wait_for_io(s); 634c7c2769cSDenis V. Lunev continue; 635c7c2769cSDenis V. Lunev } 636c7c2769cSDenis V. Lunev 637c7c2769cSDenis V. Lunev mirror_do_zero_or_discard(s, sector_num, nb_sectors, false); 638c7c2769cSDenis V. Lunev sector_num += nb_sectors; 639c7c2769cSDenis V. Lunev } 640c7c2769cSDenis V. Lunev 641bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 64290ab48ebSAnton Nefedov s->initial_zeroing_ongoing = false; 643c7c2769cSDenis V. Lunev } 644c7c2769cSDenis V. Lunev 645c0b363adSDenis V. Lunev /* First part, loop on the sectors and initialize the dirty bitmap. */ 646c0b363adSDenis V. Lunev for (sector_num = 0; sector_num < end; ) { 647c0b363adSDenis V. Lunev /* Just to make sure we are not exceeding int limit. */ 648c0b363adSDenis V. Lunev int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, 649c0b363adSDenis V. Lunev end - sector_num); 650c0b363adSDenis V. Lunev 651c0b363adSDenis V. Lunev mirror_throttle(s); 652c0b363adSDenis V. Lunev 653c0b363adSDenis V. Lunev if (block_job_is_cancelled(&s->common)) { 654c0b363adSDenis V. Lunev return 0; 655c0b363adSDenis V. Lunev } 656c0b363adSDenis V. Lunev 657c0b363adSDenis V. Lunev ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); 658c0b363adSDenis V. Lunev if (ret < 0) { 659c0b363adSDenis V. Lunev return ret; 660c0b363adSDenis V. Lunev } 661c0b363adSDenis V. Lunev 662c0b363adSDenis V. Lunev assert(n > 0); 663b7d5062cSDenis V. Lunev if (ret == 1) { 664c0b363adSDenis V. Lunev bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 665c0b363adSDenis V. Lunev } 666c0b363adSDenis V. Lunev sector_num += n; 667c0b363adSDenis V. Lunev } 668c0b363adSDenis V. Lunev return 0; 669c0b363adSDenis V. Lunev } 670c0b363adSDenis V. Lunev 671bdffb31dSPaolo Bonzini /* Called when going out of the streaming phase to flush the bulk of the 672bdffb31dSPaolo Bonzini * data to the medium, or just before completing. 673bdffb31dSPaolo Bonzini */ 674bdffb31dSPaolo Bonzini static int mirror_flush(MirrorBlockJob *s) 675bdffb31dSPaolo Bonzini { 676bdffb31dSPaolo Bonzini int ret = blk_flush(s->target); 677bdffb31dSPaolo Bonzini if (ret < 0) { 678bdffb31dSPaolo Bonzini if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 679bdffb31dSPaolo Bonzini s->ret = ret; 680bdffb31dSPaolo Bonzini } 681bdffb31dSPaolo Bonzini } 682bdffb31dSPaolo Bonzini return ret; 683bdffb31dSPaolo Bonzini } 684bdffb31dSPaolo Bonzini 685893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 686893f7ebaSPaolo Bonzini { 687893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 6885a7e7a0bSStefan Hajnoczi MirrorExitData *data; 6894ef85a9cSKevin Wolf BlockDriverState *bs = s->source; 690e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 6919a0cec66SPaolo Bonzini bool need_drain = true; 692c0b363adSDenis V. Lunev int64_t length; 693b812f671SPaolo Bonzini BlockDriverInfo bdi; 6941d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 6951d33936eSJeff Cody checking for a NULL string */ 696893f7ebaSPaolo Bonzini int ret = 0; 697e5b43573SFam Zheng int target_cluster_size = BDRV_SECTOR_SIZE; 698893f7ebaSPaolo Bonzini 699893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 700893f7ebaSPaolo Bonzini goto immediate_exit; 701893f7ebaSPaolo Bonzini } 702893f7ebaSPaolo Bonzini 703b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 704b21c7652SMax Reitz if (s->bdev_length < 0) { 705b21c7652SMax Reitz ret = s->bdev_length; 706373df5b1SFam Zheng goto immediate_exit; 707becc347eSKevin Wolf } 708becc347eSKevin Wolf 709becc347eSKevin Wolf /* Active commit must resize the base image if its size differs from the 710becc347eSKevin Wolf * active layer. */ 711becc347eSKevin Wolf if (s->base == blk_bs(s->target)) { 712becc347eSKevin Wolf int64_t base_length; 713becc347eSKevin Wolf 714becc347eSKevin Wolf base_length = blk_getlength(s->target); 715becc347eSKevin Wolf if (base_length < 0) { 716becc347eSKevin Wolf ret = base_length; 717becc347eSKevin Wolf goto immediate_exit; 718becc347eSKevin Wolf } 719becc347eSKevin Wolf 720becc347eSKevin Wolf if (s->bdev_length > base_length) { 721becc347eSKevin Wolf ret = blk_truncate(s->target, s->bdev_length); 722becc347eSKevin Wolf if (ret < 0) { 723becc347eSKevin Wolf goto immediate_exit; 724becc347eSKevin Wolf } 725becc347eSKevin Wolf } 726becc347eSKevin Wolf } 727becc347eSKevin Wolf 728becc347eSKevin Wolf if (s->bdev_length == 0) { 7299e48b025SFam Zheng /* Report BLOCK_JOB_READY and wait for complete. */ 7309e48b025SFam Zheng block_job_event_ready(&s->common); 7319e48b025SFam Zheng s->synced = true; 7329e48b025SFam Zheng while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 7339e48b025SFam Zheng block_job_yield(&s->common); 7349e48b025SFam Zheng } 7359e48b025SFam Zheng s->common.cancelled = false; 7369e48b025SFam Zheng goto immediate_exit; 737893f7ebaSPaolo Bonzini } 738893f7ebaSPaolo Bonzini 739b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 740402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 741402a4741SPaolo Bonzini 742b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 743b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 744b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 745b812f671SPaolo Bonzini */ 746e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 747b812f671SPaolo Bonzini sizeof(backing_filename)); 748e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 749e5b43573SFam Zheng target_cluster_size = bdi.cluster_size; 750c3cc95bdSFam Zheng } 751e253f4b8SKevin Wolf if (backing_filename[0] && !target_bs->backing 752e5b43573SFam Zheng && s->granularity < target_cluster_size) { 753e5b43573SFam Zheng s->buf_size = MAX(s->buf_size, target_cluster_size); 754b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 755b812f671SPaolo Bonzini } 756e5b43573SFam Zheng s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; 757e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 758b812f671SPaolo Bonzini 7597504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 7607504edf4SKevin Wolf if (s->buf == NULL) { 7617504edf4SKevin Wolf ret = -ENOMEM; 7627504edf4SKevin Wolf goto immediate_exit; 7637504edf4SKevin Wolf } 7647504edf4SKevin Wolf 765402a4741SPaolo Bonzini mirror_free_init(s); 766893f7ebaSPaolo Bonzini 76749efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 76803544a6eSFam Zheng if (!s->is_none_mode) { 769c0b363adSDenis V. Lunev ret = mirror_dirty_init(s); 770c0b363adSDenis V. Lunev if (ret < 0 || block_job_is_cancelled(&s->common)) { 7714c0cbd6fSFam Zheng goto immediate_exit; 7724c0cbd6fSFam Zheng } 773893f7ebaSPaolo Bonzini } 774893f7ebaSPaolo Bonzini 775dc162c8eSFam Zheng assert(!s->dbi); 776dc162c8eSFam Zheng s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0); 777893f7ebaSPaolo Bonzini for (;;) { 778cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 77949efb1f5SDenis V. Lunev int64_t cnt, delta; 780893f7ebaSPaolo Bonzini bool should_complete; 781893f7ebaSPaolo Bonzini 782bd48bde8SPaolo Bonzini if (s->ret < 0) { 783bd48bde8SPaolo Bonzini ret = s->ret; 784893f7ebaSPaolo Bonzini goto immediate_exit; 785893f7ebaSPaolo Bonzini } 786bd48bde8SPaolo Bonzini 787565ac01fSStefan Hajnoczi block_job_pause_point(&s->common); 788565ac01fSStefan Hajnoczi 78920dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 790b21c7652SMax Reitz /* s->common.offset contains the number of bytes already processed so 791b21c7652SMax Reitz * far, cnt is the number of dirty sectors remaining and 792b21c7652SMax Reitz * s->sectors_in_flight is the number of sectors currently being 793b21c7652SMax Reitz * processed; together those are the current total operation length */ 794b21c7652SMax Reitz s->common.len = s->common.offset + 795b21c7652SMax Reitz (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; 796bd48bde8SPaolo Bonzini 797bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 798a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 799bd48bde8SPaolo Bonzini * We do so every SLICE_TIME nanoseconds, or when there is an error, 800bd48bde8SPaolo Bonzini * or when the source is clean, whichever comes first. 801bd48bde8SPaolo Bonzini */ 80249efb1f5SDenis V. Lunev delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 80349efb1f5SDenis V. Lunev if (delta < SLICE_TIME && 804bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 805cf56a3c6SDenis V. Lunev if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 806402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 807402a4741SPaolo Bonzini trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 80821cd917fSFam Zheng mirror_wait_for_io(s); 809bd48bde8SPaolo Bonzini continue; 810bd48bde8SPaolo Bonzini } else if (cnt != 0) { 811cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 812893f7ebaSPaolo Bonzini } 813cc8c9d6cSPaolo Bonzini } 814893f7ebaSPaolo Bonzini 815893f7ebaSPaolo Bonzini should_complete = false; 816bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 817893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 818bdffb31dSPaolo Bonzini if (!s->synced) { 819bdffb31dSPaolo Bonzini if (mirror_flush(s) < 0) { 820bdffb31dSPaolo Bonzini /* Go check s->ret. */ 821bdffb31dSPaolo Bonzini continue; 822893f7ebaSPaolo Bonzini } 823893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 824893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 825893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 826893f7ebaSPaolo Bonzini * the target in a consistent state. 827893f7ebaSPaolo Bonzini */ 828bcada37bSWenchao Xia block_job_event_ready(&s->common); 829d63ffd87SPaolo Bonzini s->synced = true; 830d63ffd87SPaolo Bonzini } 831d63ffd87SPaolo Bonzini 832d63ffd87SPaolo Bonzini should_complete = s->should_complete || 833d63ffd87SPaolo Bonzini block_job_is_cancelled(&s->common); 83420dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 835893f7ebaSPaolo Bonzini } 836893f7ebaSPaolo Bonzini 837893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 838893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 839893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 840893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 841893f7ebaSPaolo Bonzini * source has dirty data to copy! 842893f7ebaSPaolo Bonzini * 843893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 8449a0cec66SPaolo Bonzini * mirror_populate runs, so pause it now. Before deciding 8459a0cec66SPaolo Bonzini * whether to switch to target check one last time if I/O has 8469a0cec66SPaolo Bonzini * come in the meanwhile, and if not flush the data to disk. 847893f7ebaSPaolo Bonzini */ 848893f7ebaSPaolo Bonzini trace_mirror_before_drain(s, cnt); 8499a0cec66SPaolo Bonzini 8509a0cec66SPaolo Bonzini bdrv_drained_begin(bs); 85120dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 852bdffb31dSPaolo Bonzini if (cnt > 0 || mirror_flush(s) < 0) { 8539a0cec66SPaolo Bonzini bdrv_drained_end(bs); 8549a0cec66SPaolo Bonzini continue; 8559a0cec66SPaolo Bonzini } 8569a0cec66SPaolo Bonzini 8579a0cec66SPaolo Bonzini /* The two disks are in sync. Exit and report successful 8589a0cec66SPaolo Bonzini * completion. 8599a0cec66SPaolo Bonzini */ 8609a0cec66SPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 8619a0cec66SPaolo Bonzini s->common.cancelled = false; 8629a0cec66SPaolo Bonzini need_drain = false; 8639a0cec66SPaolo Bonzini break; 864893f7ebaSPaolo Bonzini } 865893f7ebaSPaolo Bonzini 866893f7ebaSPaolo Bonzini ret = 0; 867cc8c9d6cSPaolo Bonzini trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 868d63ffd87SPaolo Bonzini if (!s->synced) { 8697483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 870893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 871893f7ebaSPaolo Bonzini break; 872893f7ebaSPaolo Bonzini } 873893f7ebaSPaolo Bonzini } else if (!should_complete) { 874bd48bde8SPaolo Bonzini delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 8757483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 876893f7ebaSPaolo Bonzini } 87749efb1f5SDenis V. Lunev s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 878893f7ebaSPaolo Bonzini } 879893f7ebaSPaolo Bonzini 880893f7ebaSPaolo Bonzini immediate_exit: 881bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 882bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 883bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 884bd48bde8SPaolo Bonzini * the target is a copy of the source. 885bd48bde8SPaolo Bonzini */ 886bd48bde8SPaolo Bonzini assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 8879a0cec66SPaolo Bonzini assert(need_drain); 888bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 889bd48bde8SPaolo Bonzini } 890bd48bde8SPaolo Bonzini 891bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 8927191bf31SMarkus Armbruster qemu_vfree(s->buf); 893b812f671SPaolo Bonzini g_free(s->cow_bitmap); 894402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 895dc162c8eSFam Zheng bdrv_dirty_iter_free(s->dbi); 896e4654d2dSFam Zheng bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 8975a7e7a0bSStefan Hajnoczi 8985a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 8995a7e7a0bSStefan Hajnoczi data->ret = ret; 9009a0cec66SPaolo Bonzini 9019a0cec66SPaolo Bonzini if (need_drain) { 902e253f4b8SKevin Wolf bdrv_drained_begin(bs); 9039a0cec66SPaolo Bonzini } 9045a7e7a0bSStefan Hajnoczi block_job_defer_to_main_loop(&s->common, mirror_exit, data); 905893f7ebaSPaolo Bonzini } 906893f7ebaSPaolo Bonzini 907893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 908893f7ebaSPaolo Bonzini { 909893f7ebaSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 910893f7ebaSPaolo Bonzini 911893f7ebaSPaolo Bonzini if (speed < 0) { 912c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 913893f7ebaSPaolo Bonzini return; 914893f7ebaSPaolo Bonzini } 915893f7ebaSPaolo Bonzini ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 916893f7ebaSPaolo Bonzini } 917893f7ebaSPaolo Bonzini 918d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 919d63ffd87SPaolo Bonzini { 920d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 9214ef85a9cSKevin Wolf BlockDriverState *target; 922d63ffd87SPaolo Bonzini 923274fcceeSMax Reitz target = blk_bs(s->target); 924274fcceeSMax Reitz 925d63ffd87SPaolo Bonzini if (!s->synced) { 9269df229c3SAlberto Garcia error_setg(errp, "The active block job '%s' cannot be completed", 9279df229c3SAlberto Garcia job->id); 928d63ffd87SPaolo Bonzini return; 929d63ffd87SPaolo Bonzini } 930d63ffd87SPaolo Bonzini 931274fcceeSMax Reitz if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 932274fcceeSMax Reitz int ret; 933274fcceeSMax Reitz 934274fcceeSMax Reitz assert(!target->backing); 935274fcceeSMax Reitz ret = bdrv_open_backing_file(target, NULL, "backing", errp); 936274fcceeSMax Reitz if (ret < 0) { 937274fcceeSMax Reitz return; 938274fcceeSMax Reitz } 939274fcceeSMax Reitz } 940274fcceeSMax Reitz 94115d67298SChanglong Xie /* block all operations on to_replace bs */ 94209158f00SBenoît Canet if (s->replaces) { 9435a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 9445a7e7a0bSStefan Hajnoczi 945e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 94609158f00SBenoît Canet if (!s->to_replace) { 947e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 94809158f00SBenoît Canet return; 94909158f00SBenoît Canet } 95009158f00SBenoît Canet 9515a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 9525a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 9535a7e7a0bSStefan Hajnoczi 9544ef85a9cSKevin Wolf /* TODO Translate this into permission system. Current definition of 9554ef85a9cSKevin Wolf * GRAPH_MOD would require to request it for the parents; they might 9564ef85a9cSKevin Wolf * not even be BlockDriverStates, however, so a BdrvChild can't address 9574ef85a9cSKevin Wolf * them. May need redefinition of GRAPH_MOD. */ 95809158f00SBenoît Canet error_setg(&s->replace_blocker, 95909158f00SBenoît Canet "block device is in use by block-job-complete"); 96009158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 96109158f00SBenoît Canet bdrv_ref(s->to_replace); 9625a7e7a0bSStefan Hajnoczi 9635a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 96409158f00SBenoît Canet } 96509158f00SBenoît Canet 966d63ffd87SPaolo Bonzini s->should_complete = true; 967751ebd76SFam Zheng block_job_enter(&s->common); 968d63ffd87SPaolo Bonzini } 969d63ffd87SPaolo Bonzini 970bae8196dSPaolo Bonzini static void mirror_pause(BlockJob *job) 971565ac01fSStefan Hajnoczi { 972565ac01fSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 973565ac01fSStefan Hajnoczi 974bae8196dSPaolo Bonzini mirror_wait_for_all_io(s); 975565ac01fSStefan Hajnoczi } 976565ac01fSStefan Hajnoczi 977565ac01fSStefan Hajnoczi static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) 978565ac01fSStefan Hajnoczi { 979565ac01fSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 980565ac01fSStefan Hajnoczi 981565ac01fSStefan Hajnoczi blk_set_aio_context(s->target, new_context); 982565ac01fSStefan Hajnoczi } 983565ac01fSStefan Hajnoczi 984bae8196dSPaolo Bonzini static void mirror_drain(BlockJob *job) 985bae8196dSPaolo Bonzini { 986bae8196dSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 987bae8196dSPaolo Bonzini 988bae8196dSPaolo Bonzini /* Need to keep a reference in case blk_drain triggers execution 989bae8196dSPaolo Bonzini * of mirror_complete... 990bae8196dSPaolo Bonzini */ 991bae8196dSPaolo Bonzini if (s->target) { 992bae8196dSPaolo Bonzini BlockBackend *target = s->target; 993bae8196dSPaolo Bonzini blk_ref(target); 994bae8196dSPaolo Bonzini blk_drain(target); 995bae8196dSPaolo Bonzini blk_unref(target); 996bae8196dSPaolo Bonzini } 997bae8196dSPaolo Bonzini } 998bae8196dSPaolo Bonzini 9993fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 1000893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 100179e14bf7SFam Zheng .job_type = BLOCK_JOB_TYPE_MIRROR, 1002893f7ebaSPaolo Bonzini .set_speed = mirror_set_speed, 1003a7815a76SJohn Snow .start = mirror_run, 1004d63ffd87SPaolo Bonzini .complete = mirror_complete, 1005565ac01fSStefan Hajnoczi .pause = mirror_pause, 1006565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1007bae8196dSPaolo Bonzini .drain = mirror_drain, 1008893f7ebaSPaolo Bonzini }; 1009893f7ebaSPaolo Bonzini 101003544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 101103544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 101203544a6eSFam Zheng .job_type = BLOCK_JOB_TYPE_COMMIT, 101303544a6eSFam Zheng .set_speed = mirror_set_speed, 1014a7815a76SJohn Snow .start = mirror_run, 101503544a6eSFam Zheng .complete = mirror_complete, 1016565ac01fSStefan Hajnoczi .pause = mirror_pause, 1017565ac01fSStefan Hajnoczi .attached_aio_context = mirror_attached_aio_context, 1018bae8196dSPaolo Bonzini .drain = mirror_drain, 101903544a6eSFam Zheng }; 102003544a6eSFam Zheng 10214ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 10224ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 10234ef85a9cSKevin Wolf { 10244ef85a9cSKevin Wolf return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 10254ef85a9cSKevin Wolf } 10264ef85a9cSKevin Wolf 10274ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 10284ef85a9cSKevin Wolf uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 10294ef85a9cSKevin Wolf { 10304ef85a9cSKevin Wolf return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 10314ef85a9cSKevin Wolf } 10324ef85a9cSKevin Wolf 10334ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 10344ef85a9cSKevin Wolf { 10354ef85a9cSKevin Wolf return bdrv_co_flush(bs->backing->bs); 10364ef85a9cSKevin Wolf } 10374ef85a9cSKevin Wolf 10384ef85a9cSKevin Wolf static int64_t coroutine_fn bdrv_mirror_top_get_block_status( 10394ef85a9cSKevin Wolf BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, 10404ef85a9cSKevin Wolf BlockDriverState **file) 10414ef85a9cSKevin Wolf { 10424ef85a9cSKevin Wolf *pnum = nb_sectors; 10434ef85a9cSKevin Wolf *file = bs->backing->bs; 10444ef85a9cSKevin Wolf return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA | 10454ef85a9cSKevin Wolf (sector_num << BDRV_SECTOR_BITS); 10464ef85a9cSKevin Wolf } 10474ef85a9cSKevin Wolf 10484ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 10494ef85a9cSKevin Wolf int64_t offset, int count, BdrvRequestFlags flags) 10504ef85a9cSKevin Wolf { 10514ef85a9cSKevin Wolf return bdrv_co_pwrite_zeroes(bs->backing, offset, count, flags); 10524ef85a9cSKevin Wolf } 10534ef85a9cSKevin Wolf 10544ef85a9cSKevin Wolf static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 10554ef85a9cSKevin Wolf int64_t offset, int count) 10564ef85a9cSKevin Wolf { 10574ef85a9cSKevin Wolf return bdrv_co_pdiscard(bs->backing->bs, offset, count); 10584ef85a9cSKevin Wolf } 10594ef85a9cSKevin Wolf 10604ef85a9cSKevin Wolf static void bdrv_mirror_top_close(BlockDriverState *bs) 10614ef85a9cSKevin Wolf { 10624ef85a9cSKevin Wolf } 10634ef85a9cSKevin Wolf 10644ef85a9cSKevin Wolf static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 10654ef85a9cSKevin Wolf const BdrvChildRole *role, 10664ef85a9cSKevin Wolf uint64_t perm, uint64_t shared, 10674ef85a9cSKevin Wolf uint64_t *nperm, uint64_t *nshared) 10684ef85a9cSKevin Wolf { 10694ef85a9cSKevin Wolf /* Must be able to forward guest writes to the real image */ 10704ef85a9cSKevin Wolf *nperm = 0; 10714ef85a9cSKevin Wolf if (perm & BLK_PERM_WRITE) { 10724ef85a9cSKevin Wolf *nperm |= BLK_PERM_WRITE; 10734ef85a9cSKevin Wolf } 10744ef85a9cSKevin Wolf 10754ef85a9cSKevin Wolf *nshared = BLK_PERM_ALL; 10764ef85a9cSKevin Wolf } 10774ef85a9cSKevin Wolf 10784ef85a9cSKevin Wolf /* Dummy node that provides consistent read to its users without requiring it 10794ef85a9cSKevin Wolf * from its backing file and that allows writes on the backing file chain. */ 10804ef85a9cSKevin Wolf static BlockDriver bdrv_mirror_top = { 10814ef85a9cSKevin Wolf .format_name = "mirror_top", 10824ef85a9cSKevin Wolf .bdrv_co_preadv = bdrv_mirror_top_preadv, 10834ef85a9cSKevin Wolf .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 10844ef85a9cSKevin Wolf .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 10854ef85a9cSKevin Wolf .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 10864ef85a9cSKevin Wolf .bdrv_co_flush = bdrv_mirror_top_flush, 10874ef85a9cSKevin Wolf .bdrv_co_get_block_status = bdrv_mirror_top_get_block_status, 10884ef85a9cSKevin Wolf .bdrv_close = bdrv_mirror_top_close, 10894ef85a9cSKevin Wolf .bdrv_child_perm = bdrv_mirror_top_child_perm, 10904ef85a9cSKevin Wolf }; 10914ef85a9cSKevin Wolf 109271aa9867SAlberto Garcia static void mirror_start_job(const char *job_id, BlockDriverState *bs, 109347970dfbSJohn Snow int creation_flags, BlockDriverState *target, 109447970dfbSJohn Snow const char *replaces, int64_t speed, 109547970dfbSJohn Snow uint32_t granularity, int64_t buf_size, 1096274fcceeSMax Reitz BlockMirrorBackingMode backing_mode, 109703544a6eSFam Zheng BlockdevOnError on_source_error, 1098b952b558SPaolo Bonzini BlockdevOnError on_target_error, 10990fc9f8eaSFam Zheng bool unmap, 1100097310b5SMarkus Armbruster BlockCompletionFunc *cb, 110103544a6eSFam Zheng void *opaque, Error **errp, 110203544a6eSFam Zheng const BlockJobDriver *driver, 1103b49f7eadSWen Congyang bool is_none_mode, BlockDriverState *base, 11046cdbceb1SKevin Wolf bool auto_complete, const char *filter_node_name) 1105893f7ebaSPaolo Bonzini { 1106893f7ebaSPaolo Bonzini MirrorBlockJob *s; 11074ef85a9cSKevin Wolf BlockDriverState *mirror_top_bs; 11084ef85a9cSKevin Wolf bool target_graph_mod; 11094ef85a9cSKevin Wolf bool target_is_backing; 1110b2c2832cSKevin Wolf Error *local_err = NULL; 1111d7086422SKevin Wolf int ret; 1112893f7ebaSPaolo Bonzini 1113eee13dfeSPaolo Bonzini if (granularity == 0) { 1114341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 1115eee13dfeSPaolo Bonzini } 1116eee13dfeSPaolo Bonzini 1117eee13dfeSPaolo Bonzini assert ((granularity & (granularity - 1)) == 0); 1118eee13dfeSPaolo Bonzini 111948ac0a4dSWen Congyang if (buf_size < 0) { 112048ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 112148ac0a4dSWen Congyang return; 112248ac0a4dSWen Congyang } 112348ac0a4dSWen Congyang 112448ac0a4dSWen Congyang if (buf_size == 0) { 112548ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 112648ac0a4dSWen Congyang } 11275bc361b8SFam Zheng 11284ef85a9cSKevin Wolf /* In the case of active commit, add dummy driver to provide consistent 11294ef85a9cSKevin Wolf * reads on the top, while disabling it in the intermediate nodes, and make 11304ef85a9cSKevin Wolf * the backing chain writable. */ 11316cdbceb1SKevin Wolf mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 11326cdbceb1SKevin Wolf BDRV_O_RDWR, errp); 11334ef85a9cSKevin Wolf if (mirror_top_bs == NULL) { 1134893f7ebaSPaolo Bonzini return; 1135893f7ebaSPaolo Bonzini } 11364ef85a9cSKevin Wolf mirror_top_bs->total_sectors = bs->total_sectors; 1137893f7ebaSPaolo Bonzini 11384ef85a9cSKevin Wolf /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep 11394ef85a9cSKevin Wolf * it alive until block_job_create() even if bs has no parent. */ 11404ef85a9cSKevin Wolf bdrv_ref(mirror_top_bs); 11414ef85a9cSKevin Wolf bdrv_drained_begin(bs); 1142b2c2832cSKevin Wolf bdrv_append(mirror_top_bs, bs, &local_err); 11434ef85a9cSKevin Wolf bdrv_drained_end(bs); 11444ef85a9cSKevin Wolf 1145b2c2832cSKevin Wolf if (local_err) { 1146b2c2832cSKevin Wolf bdrv_unref(mirror_top_bs); 1147b2c2832cSKevin Wolf error_propagate(errp, local_err); 1148b2c2832cSKevin Wolf return; 1149b2c2832cSKevin Wolf } 1150b2c2832cSKevin Wolf 11514ef85a9cSKevin Wolf /* Make sure that the source is not resized while the job is running */ 11524ef85a9cSKevin Wolf s = block_job_create(job_id, driver, mirror_top_bs, 11534ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ, 11544ef85a9cSKevin Wolf BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 11554ef85a9cSKevin Wolf BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 11564ef85a9cSKevin Wolf creation_flags, cb, opaque, errp); 11574ef85a9cSKevin Wolf bdrv_unref(mirror_top_bs); 11584ef85a9cSKevin Wolf if (!s) { 11594ef85a9cSKevin Wolf goto fail; 11604ef85a9cSKevin Wolf } 11614ef85a9cSKevin Wolf s->source = bs; 11624ef85a9cSKevin Wolf s->mirror_top_bs = mirror_top_bs; 11634ef85a9cSKevin Wolf 11644ef85a9cSKevin Wolf /* No resize for the target either; while the mirror is still running, a 11654ef85a9cSKevin Wolf * consistent read isn't necessarily possible. We could possibly allow 11664ef85a9cSKevin Wolf * writes and graph modifications, though it would likely defeat the 11674ef85a9cSKevin Wolf * purpose of a mirror, so leave them blocked for now. 11684ef85a9cSKevin Wolf * 11694ef85a9cSKevin Wolf * In the case of active commit, things look a bit different, though, 11704ef85a9cSKevin Wolf * because the target is an already populated backing file in active use. 11714ef85a9cSKevin Wolf * We can allow anything except resize there.*/ 11724ef85a9cSKevin Wolf target_is_backing = bdrv_chain_contains(bs, target); 11734ef85a9cSKevin Wolf target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); 11744ef85a9cSKevin Wolf s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | 11754ef85a9cSKevin Wolf (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), 11764ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | 11774ef85a9cSKevin Wolf (target_is_backing ? BLK_PERM_CONSISTENT_READ | 11784ef85a9cSKevin Wolf BLK_PERM_WRITE | 11794ef85a9cSKevin Wolf BLK_PERM_GRAPH_MOD : 0)); 1180d7086422SKevin Wolf ret = blk_insert_bs(s->target, target, errp); 1181d7086422SKevin Wolf if (ret < 0) { 11824ef85a9cSKevin Wolf goto fail; 1183d7086422SKevin Wolf } 1184e253f4b8SKevin Wolf 118509158f00SBenoît Canet s->replaces = g_strdup(replaces); 1186b952b558SPaolo Bonzini s->on_source_error = on_source_error; 1187b952b558SPaolo Bonzini s->on_target_error = on_target_error; 118803544a6eSFam Zheng s->is_none_mode = is_none_mode; 1189274fcceeSMax Reitz s->backing_mode = backing_mode; 11905bc361b8SFam Zheng s->base = base; 1191eee13dfeSPaolo Bonzini s->granularity = granularity; 119248ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 11930fc9f8eaSFam Zheng s->unmap = unmap; 1194b49f7eadSWen Congyang if (auto_complete) { 1195b49f7eadSWen Congyang s->should_complete = true; 1196b49f7eadSWen Congyang } 1197b812f671SPaolo Bonzini 11980db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1199b8afb520SFam Zheng if (!s->dirty_bitmap) { 120097031164STing Wang g_free(s->replaces); 1201e253f4b8SKevin Wolf blk_unref(s->target); 120218930ba3SFam Zheng block_job_unref(&s->common); 1203b8afb520SFam Zheng return; 1204b8afb520SFam Zheng } 120510f3cd15SAlberto Garcia 12064ef85a9cSKevin Wolf /* Required permissions are already taken with blk_new() */ 120776d554e2SKevin Wolf block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 120876d554e2SKevin Wolf &error_abort); 120976d554e2SKevin Wolf 1210f3ede4b0SAlberto Garcia /* In commit_active_start() all intermediate nodes disappear, so 1211f3ede4b0SAlberto Garcia * any jobs in them must be blocked */ 12124ef85a9cSKevin Wolf if (target_is_backing) { 1213f3ede4b0SAlberto Garcia BlockDriverState *iter; 1214f3ede4b0SAlberto Garcia for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { 12154ef85a9cSKevin Wolf /* XXX BLK_PERM_WRITE needs to be allowed so we don't block 12164ef85a9cSKevin Wolf * ourselves at s->base (if writes are blocked for a node, they are 12174ef85a9cSKevin Wolf * also blocked for its backing file). The other options would be a 12184ef85a9cSKevin Wolf * second filter driver above s->base (== target). */ 12194ef85a9cSKevin Wolf ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 12204ef85a9cSKevin Wolf BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, 12214ef85a9cSKevin Wolf errp); 12224ef85a9cSKevin Wolf if (ret < 0) { 12234ef85a9cSKevin Wolf goto fail; 12244ef85a9cSKevin Wolf } 1225f3ede4b0SAlberto Garcia } 1226f3ede4b0SAlberto Garcia } 122710f3cd15SAlberto Garcia 12285ccac6f1SJohn Snow trace_mirror_start(bs, s, opaque); 12295ccac6f1SJohn Snow block_job_start(&s->common); 12304ef85a9cSKevin Wolf return; 12314ef85a9cSKevin Wolf 12324ef85a9cSKevin Wolf fail: 12334ef85a9cSKevin Wolf if (s) { 12344ef85a9cSKevin Wolf g_free(s->replaces); 12354ef85a9cSKevin Wolf blk_unref(s->target); 12364ef85a9cSKevin Wolf block_job_unref(&s->common); 12374ef85a9cSKevin Wolf } 12384ef85a9cSKevin Wolf 1239*0bf74767SKevin Wolf bdrv_child_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL); 12404ef85a9cSKevin Wolf bdrv_replace_in_backing_chain(mirror_top_bs, backing_bs(mirror_top_bs)); 1241893f7ebaSPaolo Bonzini } 124203544a6eSFam Zheng 124371aa9867SAlberto Garcia void mirror_start(const char *job_id, BlockDriverState *bs, 124471aa9867SAlberto Garcia BlockDriverState *target, const char *replaces, 12455fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 1246274fcceeSMax Reitz MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1247274fcceeSMax Reitz BlockdevOnError on_source_error, 124803544a6eSFam Zheng BlockdevOnError on_target_error, 12496cdbceb1SKevin Wolf bool unmap, const char *filter_node_name, Error **errp) 125003544a6eSFam Zheng { 125103544a6eSFam Zheng bool is_none_mode; 125203544a6eSFam Zheng BlockDriverState *base; 125303544a6eSFam Zheng 12544b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 12554b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 1256d58d8453SJohn Snow return; 1257d58d8453SJohn Snow } 125803544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1259760e0063SKevin Wolf base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 126047970dfbSJohn Snow mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces, 1261274fcceeSMax Reitz speed, granularity, buf_size, backing_mode, 12628254b6d9SJohn Snow on_source_error, on_target_error, unmap, NULL, NULL, errp, 12636cdbceb1SKevin Wolf &mirror_job_driver, is_none_mode, base, false, 12646cdbceb1SKevin Wolf filter_node_name); 126503544a6eSFam Zheng } 126603544a6eSFam Zheng 1267fd62c609SAlberto Garcia void commit_active_start(const char *job_id, BlockDriverState *bs, 126847970dfbSJohn Snow BlockDriverState *base, int creation_flags, 126947970dfbSJohn Snow int64_t speed, BlockdevOnError on_error, 12700db832f4SKevin Wolf const char *filter_node_name, 127147970dfbSJohn Snow BlockCompletionFunc *cb, void *opaque, Error **errp, 1272b49f7eadSWen Congyang bool auto_complete) 127303544a6eSFam Zheng { 12744da83585SJeff Cody int orig_base_flags; 1275cc67f4d1SJeff Cody Error *local_err = NULL; 12764da83585SJeff Cody 12774da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 12784da83585SJeff Cody 127920a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 128020a63d2cSFam Zheng return; 128120a63d2cSFam Zheng } 12824da83585SJeff Cody 128347970dfbSJohn Snow mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0, 128471aa9867SAlberto Garcia MIRROR_LEAVE_BACKING_CHAIN, 12856f13acf9SFam Zheng on_error, on_error, true, cb, opaque, &local_err, 12866cdbceb1SKevin Wolf &commit_active_job_driver, false, base, auto_complete, 12870db832f4SKevin Wolf filter_node_name); 12880fb6395cSMarkus Armbruster if (local_err) { 1289cc67f4d1SJeff Cody error_propagate(errp, local_err); 12904da83585SJeff Cody goto error_restore_flags; 12914da83585SJeff Cody } 12924da83585SJeff Cody 12934da83585SJeff Cody return; 12944da83585SJeff Cody 12954da83585SJeff Cody error_restore_flags: 12964da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 12974da83585SJeff Cody * the original error */ 12984da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 12994da83585SJeff Cody return; 130003544a6eSFam Zheng } 1301