1893f7ebaSPaolo Bonzini /* 2893f7ebaSPaolo Bonzini * Image mirroring 3893f7ebaSPaolo Bonzini * 4893f7ebaSPaolo Bonzini * Copyright Red Hat, Inc. 2012 5893f7ebaSPaolo Bonzini * 6893f7ebaSPaolo Bonzini * Authors: 7893f7ebaSPaolo Bonzini * Paolo Bonzini <pbonzini@redhat.com> 8893f7ebaSPaolo Bonzini * 9893f7ebaSPaolo Bonzini * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10893f7ebaSPaolo Bonzini * See the COPYING.LIB file in the top-level directory. 11893f7ebaSPaolo Bonzini * 12893f7ebaSPaolo Bonzini */ 13893f7ebaSPaolo Bonzini 1480c71a24SPeter Maydell #include "qemu/osdep.h" 15893f7ebaSPaolo Bonzini #include "trace.h" 16737e150eSPaolo Bonzini #include "block/blockjob.h" 17737e150eSPaolo Bonzini #include "block/block_int.h" 18373340b2SMax Reitz #include "sysemu/block-backend.h" 19da34e65cSMarkus Armbruster #include "qapi/error.h" 20cc7a8ea7SMarkus Armbruster #include "qapi/qmp/qerror.h" 21893f7ebaSPaolo Bonzini #include "qemu/ratelimit.h" 22b812f671SPaolo Bonzini #include "qemu/bitmap.h" 23893f7ebaSPaolo Bonzini 24893f7ebaSPaolo Bonzini #define SLICE_TIME 100000000ULL /* ns */ 25402a4741SPaolo Bonzini #define MAX_IN_FLIGHT 16 2648ac0a4dSWen Congyang #define DEFAULT_MIRROR_BUF_SIZE (10 << 20) 27402a4741SPaolo Bonzini 28402a4741SPaolo Bonzini /* The mirroring buffer is a list of granularity-sized chunks. 29402a4741SPaolo Bonzini * Free chunks are organized in a list. 30402a4741SPaolo Bonzini */ 31402a4741SPaolo Bonzini typedef struct MirrorBuffer { 32402a4741SPaolo Bonzini QSIMPLEQ_ENTRY(MirrorBuffer) next; 33402a4741SPaolo Bonzini } MirrorBuffer; 34893f7ebaSPaolo Bonzini 35893f7ebaSPaolo Bonzini typedef struct MirrorBlockJob { 36893f7ebaSPaolo Bonzini BlockJob common; 37893f7ebaSPaolo Bonzini RateLimit limit; 38e253f4b8SKevin Wolf BlockBackend *target; 395bc361b8SFam Zheng BlockDriverState *base; 4009158f00SBenoît Canet /* The name of the graph node to replace */ 4109158f00SBenoît Canet char *replaces; 4209158f00SBenoît Canet /* The BDS to replace */ 4309158f00SBenoît Canet BlockDriverState *to_replace; 4409158f00SBenoît Canet /* Used to block operations on the drive-mirror-replace target */ 4509158f00SBenoît Canet Error *replace_blocker; 4603544a6eSFam Zheng bool is_none_mode; 47b952b558SPaolo Bonzini BlockdevOnError on_source_error, on_target_error; 48d63ffd87SPaolo Bonzini bool synced; 49d63ffd87SPaolo Bonzini bool should_complete; 50eee13dfeSPaolo Bonzini int64_t granularity; 51b812f671SPaolo Bonzini size_t buf_size; 52b21c7652SMax Reitz int64_t bdev_length; 53b812f671SPaolo Bonzini unsigned long *cow_bitmap; 54e4654d2dSFam Zheng BdrvDirtyBitmap *dirty_bitmap; 558f0720ecSPaolo Bonzini HBitmapIter hbi; 56893f7ebaSPaolo Bonzini uint8_t *buf; 57402a4741SPaolo Bonzini QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 58402a4741SPaolo Bonzini int buf_free_count; 59bd48bde8SPaolo Bonzini 60402a4741SPaolo Bonzini unsigned long *in_flight_bitmap; 61bd48bde8SPaolo Bonzini int in_flight; 62b21c7652SMax Reitz int sectors_in_flight; 63bd48bde8SPaolo Bonzini int ret; 640fc9f8eaSFam Zheng bool unmap; 65e424aff5SKevin Wolf bool waiting_for_io; 66e5b43573SFam Zheng int target_cluster_sectors; 67e5b43573SFam Zheng int max_iov; 68893f7ebaSPaolo Bonzini } MirrorBlockJob; 69893f7ebaSPaolo Bonzini 70bd48bde8SPaolo Bonzini typedef struct MirrorOp { 71bd48bde8SPaolo Bonzini MirrorBlockJob *s; 72bd48bde8SPaolo Bonzini QEMUIOVector qiov; 73bd48bde8SPaolo Bonzini int64_t sector_num; 74bd48bde8SPaolo Bonzini int nb_sectors; 75bd48bde8SPaolo Bonzini } MirrorOp; 76bd48bde8SPaolo Bonzini 77b952b558SPaolo Bonzini static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 78b952b558SPaolo Bonzini int error) 79b952b558SPaolo Bonzini { 80b952b558SPaolo Bonzini s->synced = false; 81b952b558SPaolo Bonzini if (read) { 8281e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_source_error, 8381e254dcSKevin Wolf true, error); 84b952b558SPaolo Bonzini } else { 8581e254dcSKevin Wolf return block_job_error_action(&s->common, s->on_target_error, 8681e254dcSKevin Wolf false, error); 87b952b558SPaolo Bonzini } 88b952b558SPaolo Bonzini } 89b952b558SPaolo Bonzini 90bd48bde8SPaolo Bonzini static void mirror_iteration_done(MirrorOp *op, int ret) 91bd48bde8SPaolo Bonzini { 92bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 93402a4741SPaolo Bonzini struct iovec *iov; 94bd48bde8SPaolo Bonzini int64_t chunk_num; 95402a4741SPaolo Bonzini int i, nb_chunks, sectors_per_chunk; 96bd48bde8SPaolo Bonzini 97bd48bde8SPaolo Bonzini trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 98bd48bde8SPaolo Bonzini 99bd48bde8SPaolo Bonzini s->in_flight--; 100b21c7652SMax Reitz s->sectors_in_flight -= op->nb_sectors; 101402a4741SPaolo Bonzini iov = op->qiov.iov; 102402a4741SPaolo Bonzini for (i = 0; i < op->qiov.niov; i++) { 103402a4741SPaolo Bonzini MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 104402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 105402a4741SPaolo Bonzini s->buf_free_count++; 106402a4741SPaolo Bonzini } 107402a4741SPaolo Bonzini 108bd48bde8SPaolo Bonzini sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 109bd48bde8SPaolo Bonzini chunk_num = op->sector_num / sectors_per_chunk; 1104150ae60SFam Zheng nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk); 111402a4741SPaolo Bonzini bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 112b21c7652SMax Reitz if (ret >= 0) { 113b21c7652SMax Reitz if (s->cow_bitmap) { 114bd48bde8SPaolo Bonzini bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 115bd48bde8SPaolo Bonzini } 116b21c7652SMax Reitz s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; 117b21c7652SMax Reitz } 118bd48bde8SPaolo Bonzini 1196df3bf8eSZhang Min qemu_iovec_destroy(&op->qiov); 120c84b3192SPaolo Bonzini g_free(op); 1217b770c72SStefan Hajnoczi 122e424aff5SKevin Wolf if (s->waiting_for_io) { 123bd48bde8SPaolo Bonzini qemu_coroutine_enter(s->common.co, NULL); 124bd48bde8SPaolo Bonzini } 1257b770c72SStefan Hajnoczi } 126bd48bde8SPaolo Bonzini 127bd48bde8SPaolo Bonzini static void mirror_write_complete(void *opaque, int ret) 128bd48bde8SPaolo Bonzini { 129bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 130bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 131bd48bde8SPaolo Bonzini if (ret < 0) { 132bd48bde8SPaolo Bonzini BlockErrorAction action; 133bd48bde8SPaolo Bonzini 13420dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 135bd48bde8SPaolo Bonzini action = mirror_error_action(s, false, -ret); 136a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 137bd48bde8SPaolo Bonzini s->ret = ret; 138bd48bde8SPaolo Bonzini } 139bd48bde8SPaolo Bonzini } 140bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 141bd48bde8SPaolo Bonzini } 142bd48bde8SPaolo Bonzini 143bd48bde8SPaolo Bonzini static void mirror_read_complete(void *opaque, int ret) 144bd48bde8SPaolo Bonzini { 145bd48bde8SPaolo Bonzini MirrorOp *op = opaque; 146bd48bde8SPaolo Bonzini MirrorBlockJob *s = op->s; 147bd48bde8SPaolo Bonzini if (ret < 0) { 148bd48bde8SPaolo Bonzini BlockErrorAction action; 149bd48bde8SPaolo Bonzini 15020dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 151bd48bde8SPaolo Bonzini action = mirror_error_action(s, true, -ret); 152a589569fSWenchao Xia if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 153bd48bde8SPaolo Bonzini s->ret = ret; 154bd48bde8SPaolo Bonzini } 155bd48bde8SPaolo Bonzini 156bd48bde8SPaolo Bonzini mirror_iteration_done(op, ret); 157bd48bde8SPaolo Bonzini return; 158bd48bde8SPaolo Bonzini } 159e253f4b8SKevin Wolf blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, 16073698c30SEric Blake 0, mirror_write_complete, op); 161bd48bde8SPaolo Bonzini } 162bd48bde8SPaolo Bonzini 1634150ae60SFam Zheng static inline void mirror_clip_sectors(MirrorBlockJob *s, 1644150ae60SFam Zheng int64_t sector_num, 1654150ae60SFam Zheng int *nb_sectors) 1664150ae60SFam Zheng { 1674150ae60SFam Zheng *nb_sectors = MIN(*nb_sectors, 1684150ae60SFam Zheng s->bdev_length / BDRV_SECTOR_SIZE - sector_num); 1694150ae60SFam Zheng } 1704150ae60SFam Zheng 171e5b43573SFam Zheng /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and 172e5b43573SFam Zheng * return the offset of the adjusted tail sector against original. */ 173e5b43573SFam Zheng static int mirror_cow_align(MirrorBlockJob *s, 174e5b43573SFam Zheng int64_t *sector_num, 175e5b43573SFam Zheng int *nb_sectors) 176893f7ebaSPaolo Bonzini { 177e5b43573SFam Zheng bool need_cow; 178e5b43573SFam Zheng int ret = 0; 179e5b43573SFam Zheng int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; 180e5b43573SFam Zheng int64_t align_sector_num = *sector_num; 181e5b43573SFam Zheng int align_nb_sectors = *nb_sectors; 182e5b43573SFam Zheng int max_sectors = chunk_sectors * s->max_iov; 183893f7ebaSPaolo Bonzini 184e5b43573SFam Zheng need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); 185e5b43573SFam Zheng need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, 186e5b43573SFam Zheng s->cow_bitmap); 187e5b43573SFam Zheng if (need_cow) { 188*244483e6SKevin Wolf bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num, 189*244483e6SKevin Wolf *nb_sectors, &align_sector_num, 190*244483e6SKevin Wolf &align_nb_sectors); 1918f0720ecSPaolo Bonzini } 1928f0720ecSPaolo Bonzini 193e5b43573SFam Zheng if (align_nb_sectors > max_sectors) { 194e5b43573SFam Zheng align_nb_sectors = max_sectors; 195e5b43573SFam Zheng if (need_cow) { 196e5b43573SFam Zheng align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, 197e5b43573SFam Zheng s->target_cluster_sectors); 198e5b43573SFam Zheng } 199e5b43573SFam Zheng } 2004150ae60SFam Zheng /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but 2014150ae60SFam Zheng * that doesn't matter because it's already the end of source image. */ 2024150ae60SFam Zheng mirror_clip_sectors(s, align_sector_num, &align_nb_sectors); 203402a4741SPaolo Bonzini 204e5b43573SFam Zheng ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); 205e5b43573SFam Zheng *sector_num = align_sector_num; 206e5b43573SFam Zheng *nb_sectors = align_nb_sectors; 207e5b43573SFam Zheng assert(ret >= 0); 208e5b43573SFam Zheng return ret; 209e5b43573SFam Zheng } 210e5b43573SFam Zheng 21121cd917fSFam Zheng static inline void mirror_wait_for_io(MirrorBlockJob *s) 21221cd917fSFam Zheng { 21321cd917fSFam Zheng assert(!s->waiting_for_io); 21421cd917fSFam Zheng s->waiting_for_io = true; 21521cd917fSFam Zheng qemu_coroutine_yield(); 21621cd917fSFam Zheng s->waiting_for_io = false; 21721cd917fSFam Zheng } 21821cd917fSFam Zheng 219e5b43573SFam Zheng /* Submit async read while handling COW. 220e5b43573SFam Zheng * Returns: nb_sectors if no alignment is necessary, or 221e5b43573SFam Zheng * (new_end - sector_num) if tail is rounded up or down due to 222e5b43573SFam Zheng * alignment or buffer limit. 223402a4741SPaolo Bonzini */ 224e5b43573SFam Zheng static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, 225e5b43573SFam Zheng int nb_sectors) 226e5b43573SFam Zheng { 227e253f4b8SKevin Wolf BlockBackend *source = s->common.blk; 228e5b43573SFam Zheng int sectors_per_chunk, nb_chunks; 229e5b43573SFam Zheng int ret = nb_sectors; 230e5b43573SFam Zheng MirrorOp *op; 231402a4741SPaolo Bonzini 232e5b43573SFam Zheng sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 233e5b43573SFam Zheng 234e5b43573SFam Zheng /* We can only handle as much as buf_size at a time. */ 235e5b43573SFam Zheng nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); 236e5b43573SFam Zheng assert(nb_sectors); 237e5b43573SFam Zheng 238e5b43573SFam Zheng if (s->cow_bitmap) { 239e5b43573SFam Zheng ret += mirror_cow_align(s, §or_num, &nb_sectors); 240e5b43573SFam Zheng } 241e5b43573SFam Zheng assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); 242e5b43573SFam Zheng /* The sector range must meet granularity because: 243e5b43573SFam Zheng * 1) Caller passes in aligned values; 244e5b43573SFam Zheng * 2) mirror_cow_align is used only when target cluster is larger. */ 245e5b43573SFam Zheng assert(!(sector_num % sectors_per_chunk)); 2464150ae60SFam Zheng nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); 247e5b43573SFam Zheng 248e5b43573SFam Zheng while (s->buf_free_count < nb_chunks) { 249402a4741SPaolo Bonzini trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 25021cd917fSFam Zheng mirror_wait_for_io(s); 251b812f671SPaolo Bonzini } 252b812f671SPaolo Bonzini 253bd48bde8SPaolo Bonzini /* Allocate a MirrorOp that is used as an AIO callback. */ 254c84b3192SPaolo Bonzini op = g_new(MirrorOp, 1); 255bd48bde8SPaolo Bonzini op->s = s; 256bd48bde8SPaolo Bonzini op->sector_num = sector_num; 257bd48bde8SPaolo Bonzini op->nb_sectors = nb_sectors; 258402a4741SPaolo Bonzini 259402a4741SPaolo Bonzini /* Now make a QEMUIOVector taking enough granularity-sized chunks 260402a4741SPaolo Bonzini * from s->buf_free. 261402a4741SPaolo Bonzini */ 262402a4741SPaolo Bonzini qemu_iovec_init(&op->qiov, nb_chunks); 263402a4741SPaolo Bonzini while (nb_chunks-- > 0) { 264402a4741SPaolo Bonzini MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 265e5b43573SFam Zheng size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; 2665a0f6fd5SKevin Wolf 267402a4741SPaolo Bonzini QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 268402a4741SPaolo Bonzini s->buf_free_count--; 2695a0f6fd5SKevin Wolf qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 270402a4741SPaolo Bonzini } 271402a4741SPaolo Bonzini 272893f7ebaSPaolo Bonzini /* Copy the dirty cluster. */ 273bd48bde8SPaolo Bonzini s->in_flight++; 274b21c7652SMax Reitz s->sectors_in_flight += nb_sectors; 275b812f671SPaolo Bonzini trace_mirror_one_iteration(s, sector_num, nb_sectors); 276dcfb3bebSFam Zheng 27773698c30SEric Blake blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0, 278bd48bde8SPaolo Bonzini mirror_read_complete, op); 279e5b43573SFam Zheng return ret; 280e5b43573SFam Zheng } 281e5b43573SFam Zheng 282e5b43573SFam Zheng static void mirror_do_zero_or_discard(MirrorBlockJob *s, 283e5b43573SFam Zheng int64_t sector_num, 284e5b43573SFam Zheng int nb_sectors, 285e5b43573SFam Zheng bool is_discard) 286e5b43573SFam Zheng { 287e5b43573SFam Zheng MirrorOp *op; 288e5b43573SFam Zheng 289e5b43573SFam Zheng /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed 290e5b43573SFam Zheng * so the freeing in mirror_iteration_done is nop. */ 291e5b43573SFam Zheng op = g_new0(MirrorOp, 1); 292e5b43573SFam Zheng op->s = s; 293e5b43573SFam Zheng op->sector_num = sector_num; 294e5b43573SFam Zheng op->nb_sectors = nb_sectors; 295e5b43573SFam Zheng 296e5b43573SFam Zheng s->in_flight++; 297e5b43573SFam Zheng s->sectors_in_flight += nb_sectors; 298e5b43573SFam Zheng if (is_discard) { 299e253f4b8SKevin Wolf blk_aio_discard(s->target, sector_num, op->nb_sectors, 300e5b43573SFam Zheng mirror_write_complete, op); 301e5b43573SFam Zheng } else { 302e253f4b8SKevin Wolf blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE, 303e253f4b8SKevin Wolf op->nb_sectors * BDRV_SECTOR_SIZE, 304dcfb3bebSFam Zheng s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 305dcfb3bebSFam Zheng mirror_write_complete, op); 306e5b43573SFam Zheng } 307e5b43573SFam Zheng } 308e5b43573SFam Zheng 309e5b43573SFam Zheng static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 310e5b43573SFam Zheng { 311e253f4b8SKevin Wolf BlockDriverState *source = blk_bs(s->common.blk); 3129c83625bSMax Reitz int64_t sector_num, first_chunk; 313e5b43573SFam Zheng uint64_t delay_ns = 0; 314e5b43573SFam Zheng /* At least the first dirty chunk is mirrored in one iteration. */ 315e5b43573SFam Zheng int nb_chunks = 1; 316e5b43573SFam Zheng int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; 317e5b43573SFam Zheng int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 318e5b43573SFam Zheng 319e5b43573SFam Zheng sector_num = hbitmap_iter_next(&s->hbi); 320e5b43573SFam Zheng if (sector_num < 0) { 321e5b43573SFam Zheng bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 322e5b43573SFam Zheng sector_num = hbitmap_iter_next(&s->hbi); 323e5b43573SFam Zheng trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 324e5b43573SFam Zheng assert(sector_num >= 0); 325e5b43573SFam Zheng } 326e5b43573SFam Zheng 3279c83625bSMax Reitz first_chunk = sector_num / sectors_per_chunk; 3289c83625bSMax Reitz while (test_bit(first_chunk, s->in_flight_bitmap)) { 3299c83625bSMax Reitz trace_mirror_yield_in_flight(s, first_chunk, s->in_flight); 3309c83625bSMax Reitz mirror_wait_for_io(s); 3319c83625bSMax Reitz } 3329c83625bSMax Reitz 333e5b43573SFam Zheng /* Find the number of consective dirty chunks following the first dirty 334e5b43573SFam Zheng * one, and wait for in flight requests in them. */ 335e5b43573SFam Zheng while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { 336e5b43573SFam Zheng int64_t hbitmap_next; 337e5b43573SFam Zheng int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; 338e5b43573SFam Zheng int64_t next_chunk = next_sector / sectors_per_chunk; 339e5b43573SFam Zheng if (next_sector >= end || 340e5b43573SFam Zheng !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 341e5b43573SFam Zheng break; 342e5b43573SFam Zheng } 343e5b43573SFam Zheng if (test_bit(next_chunk, s->in_flight_bitmap)) { 344e5b43573SFam Zheng break; 345e5b43573SFam Zheng } 3469c83625bSMax Reitz 347e5b43573SFam Zheng hbitmap_next = hbitmap_iter_next(&s->hbi); 348f27a2742SMax Reitz if (hbitmap_next > next_sector || hbitmap_next < 0) { 349f27a2742SMax Reitz /* The bitmap iterator's cache is stale, refresh it */ 350f27a2742SMax Reitz bdrv_set_dirty_iter(&s->hbi, next_sector); 351f27a2742SMax Reitz hbitmap_next = hbitmap_iter_next(&s->hbi); 352f27a2742SMax Reitz } 353e5b43573SFam Zheng assert(hbitmap_next == next_sector); 354e5b43573SFam Zheng nb_chunks++; 355e5b43573SFam Zheng } 356e5b43573SFam Zheng 357e5b43573SFam Zheng /* Clear dirty bits before querying the block status, because 358e5b43573SFam Zheng * calling bdrv_get_block_status_above could yield - if some blocks are 359e5b43573SFam Zheng * marked dirty in this window, we need to know. 360e5b43573SFam Zheng */ 361e5b43573SFam Zheng bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, 362e5b43573SFam Zheng nb_chunks * sectors_per_chunk); 363e5b43573SFam Zheng bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); 364e5b43573SFam Zheng while (nb_chunks > 0 && sector_num < end) { 365e5b43573SFam Zheng int ret; 366e5b43573SFam Zheng int io_sectors; 367e5b43573SFam Zheng BlockDriverState *file; 368e5b43573SFam Zheng enum MirrorMethod { 369e5b43573SFam Zheng MIRROR_METHOD_COPY, 370e5b43573SFam Zheng MIRROR_METHOD_ZERO, 371e5b43573SFam Zheng MIRROR_METHOD_DISCARD 372e5b43573SFam Zheng } mirror_method = MIRROR_METHOD_COPY; 373e5b43573SFam Zheng 374e5b43573SFam Zheng assert(!(sector_num % sectors_per_chunk)); 375e5b43573SFam Zheng ret = bdrv_get_block_status_above(source, NULL, sector_num, 376e5b43573SFam Zheng nb_chunks * sectors_per_chunk, 377e5b43573SFam Zheng &io_sectors, &file); 378e5b43573SFam Zheng if (ret < 0) { 379e5b43573SFam Zheng io_sectors = nb_chunks * sectors_per_chunk; 380e5b43573SFam Zheng } 381e5b43573SFam Zheng 382e5b43573SFam Zheng io_sectors -= io_sectors % sectors_per_chunk; 383e5b43573SFam Zheng if (io_sectors < sectors_per_chunk) { 384e5b43573SFam Zheng io_sectors = sectors_per_chunk; 385e5b43573SFam Zheng } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 386e5b43573SFam Zheng int64_t target_sector_num; 387e5b43573SFam Zheng int target_nb_sectors; 388*244483e6SKevin Wolf bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num, 389*244483e6SKevin Wolf io_sectors, &target_sector_num, 390*244483e6SKevin Wolf &target_nb_sectors); 391e5b43573SFam Zheng if (target_sector_num == sector_num && 392e5b43573SFam Zheng target_nb_sectors == io_sectors) { 393e5b43573SFam Zheng mirror_method = ret & BDRV_BLOCK_ZERO ? 394e5b43573SFam Zheng MIRROR_METHOD_ZERO : 395e5b43573SFam Zheng MIRROR_METHOD_DISCARD; 396e5b43573SFam Zheng } 397e5b43573SFam Zheng } 398e5b43573SFam Zheng 3994150ae60SFam Zheng mirror_clip_sectors(s, sector_num, &io_sectors); 400e5b43573SFam Zheng switch (mirror_method) { 401e5b43573SFam Zheng case MIRROR_METHOD_COPY: 402e5b43573SFam Zheng io_sectors = mirror_do_read(s, sector_num, io_sectors); 403e5b43573SFam Zheng break; 404e5b43573SFam Zheng case MIRROR_METHOD_ZERO: 405e5b43573SFam Zheng mirror_do_zero_or_discard(s, sector_num, io_sectors, false); 406e5b43573SFam Zheng break; 407e5b43573SFam Zheng case MIRROR_METHOD_DISCARD: 408e5b43573SFam Zheng mirror_do_zero_or_discard(s, sector_num, io_sectors, true); 409e5b43573SFam Zheng break; 410e5b43573SFam Zheng default: 411e5b43573SFam Zheng abort(); 412e5b43573SFam Zheng } 413e5b43573SFam Zheng assert(io_sectors); 414e5b43573SFam Zheng sector_num += io_sectors; 4154150ae60SFam Zheng nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk); 416e5b43573SFam Zheng delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors); 417dcfb3bebSFam Zheng } 418cc8c9d6cSPaolo Bonzini return delay_ns; 419893f7ebaSPaolo Bonzini } 420b952b558SPaolo Bonzini 421402a4741SPaolo Bonzini static void mirror_free_init(MirrorBlockJob *s) 422402a4741SPaolo Bonzini { 423402a4741SPaolo Bonzini int granularity = s->granularity; 424402a4741SPaolo Bonzini size_t buf_size = s->buf_size; 425402a4741SPaolo Bonzini uint8_t *buf = s->buf; 426402a4741SPaolo Bonzini 427402a4741SPaolo Bonzini assert(s->buf_free_count == 0); 428402a4741SPaolo Bonzini QSIMPLEQ_INIT(&s->buf_free); 429402a4741SPaolo Bonzini while (buf_size != 0) { 430402a4741SPaolo Bonzini MirrorBuffer *cur = (MirrorBuffer *)buf; 431402a4741SPaolo Bonzini QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 432402a4741SPaolo Bonzini s->buf_free_count++; 433402a4741SPaolo Bonzini buf_size -= granularity; 434402a4741SPaolo Bonzini buf += granularity; 435402a4741SPaolo Bonzini } 436402a4741SPaolo Bonzini } 437402a4741SPaolo Bonzini 438bd48bde8SPaolo Bonzini static void mirror_drain(MirrorBlockJob *s) 439bd48bde8SPaolo Bonzini { 440bd48bde8SPaolo Bonzini while (s->in_flight > 0) { 44121cd917fSFam Zheng mirror_wait_for_io(s); 442bd48bde8SPaolo Bonzini } 443893f7ebaSPaolo Bonzini } 444893f7ebaSPaolo Bonzini 4455a7e7a0bSStefan Hajnoczi typedef struct { 4465a7e7a0bSStefan Hajnoczi int ret; 4475a7e7a0bSStefan Hajnoczi } MirrorExitData; 4485a7e7a0bSStefan Hajnoczi 4495a7e7a0bSStefan Hajnoczi static void mirror_exit(BlockJob *job, void *opaque) 4505a7e7a0bSStefan Hajnoczi { 4515a7e7a0bSStefan Hajnoczi MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 4525a7e7a0bSStefan Hajnoczi MirrorExitData *data = opaque; 4535a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context = NULL; 454e253f4b8SKevin Wolf BlockDriverState *src = blk_bs(s->common.blk); 455e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 4563f09bfbcSKevin Wolf 4573f09bfbcSKevin Wolf /* Make sure that the source BDS doesn't go away before we called 4583f09bfbcSKevin Wolf * block_job_completed(). */ 4593f09bfbcSKevin Wolf bdrv_ref(src); 4605a7e7a0bSStefan Hajnoczi 4615a7e7a0bSStefan Hajnoczi if (s->to_replace) { 4625a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 4635a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 4645a7e7a0bSStefan Hajnoczi } 4655a7e7a0bSStefan Hajnoczi 4665a7e7a0bSStefan Hajnoczi if (s->should_complete && data->ret == 0) { 467e253f4b8SKevin Wolf BlockDriverState *to_replace = src; 4685a7e7a0bSStefan Hajnoczi if (s->to_replace) { 4695a7e7a0bSStefan Hajnoczi to_replace = s->to_replace; 4705a7e7a0bSStefan Hajnoczi } 47140365552SKevin Wolf 472e253f4b8SKevin Wolf if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { 473e253f4b8SKevin Wolf bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); 4745a7e7a0bSStefan Hajnoczi } 475b8804815SKevin Wolf 476b8804815SKevin Wolf /* The mirror job has no requests in flight any more, but we need to 477b8804815SKevin Wolf * drain potential other users of the BDS before changing the graph. */ 478e253f4b8SKevin Wolf bdrv_drained_begin(target_bs); 479e253f4b8SKevin Wolf bdrv_replace_in_backing_chain(to_replace, target_bs); 480e253f4b8SKevin Wolf bdrv_drained_end(target_bs); 481b8804815SKevin Wolf 482b6d2e599SKevin Wolf /* We just changed the BDS the job BB refers to */ 483b6d2e599SKevin Wolf blk_remove_bs(job->blk); 484b6d2e599SKevin Wolf blk_insert_bs(job->blk, src); 4855a7e7a0bSStefan Hajnoczi } 4865a7e7a0bSStefan Hajnoczi if (s->to_replace) { 4875a7e7a0bSStefan Hajnoczi bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 4885a7e7a0bSStefan Hajnoczi error_free(s->replace_blocker); 4895a7e7a0bSStefan Hajnoczi bdrv_unref(s->to_replace); 4905a7e7a0bSStefan Hajnoczi } 4915a7e7a0bSStefan Hajnoczi if (replace_aio_context) { 4925a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 4935a7e7a0bSStefan Hajnoczi } 4945a7e7a0bSStefan Hajnoczi g_free(s->replaces); 495e253f4b8SKevin Wolf bdrv_op_unblock_all(target_bs, s->common.blocker); 496e253f4b8SKevin Wolf blk_unref(s->target); 4975a7e7a0bSStefan Hajnoczi block_job_completed(&s->common, data->ret); 4985a7e7a0bSStefan Hajnoczi g_free(data); 499176c3699SFam Zheng bdrv_drained_end(src); 500ab27c3b5SFam Zheng if (qemu_get_aio_context() == bdrv_get_aio_context(src)) { 501ab27c3b5SFam Zheng aio_enable_external(iohandler_get_aio_context()); 502ab27c3b5SFam Zheng } 5033f09bfbcSKevin Wolf bdrv_unref(src); 5045a7e7a0bSStefan Hajnoczi } 5055a7e7a0bSStefan Hajnoczi 506893f7ebaSPaolo Bonzini static void coroutine_fn mirror_run(void *opaque) 507893f7ebaSPaolo Bonzini { 508893f7ebaSPaolo Bonzini MirrorBlockJob *s = opaque; 5095a7e7a0bSStefan Hajnoczi MirrorExitData *data; 510e253f4b8SKevin Wolf BlockDriverState *bs = blk_bs(s->common.blk); 511e253f4b8SKevin Wolf BlockDriverState *target_bs = blk_bs(s->target); 51299900697SFam Zheng int64_t sector_num, end, length; 513bd48bde8SPaolo Bonzini uint64_t last_pause_ns; 514b812f671SPaolo Bonzini BlockDriverInfo bdi; 5151d33936eSJeff Cody char backing_filename[2]; /* we only need 2 characters because we are only 5161d33936eSJeff Cody checking for a NULL string */ 517893f7ebaSPaolo Bonzini int ret = 0; 518893f7ebaSPaolo Bonzini int n; 519e5b43573SFam Zheng int target_cluster_size = BDRV_SECTOR_SIZE; 520893f7ebaSPaolo Bonzini 521893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 522893f7ebaSPaolo Bonzini goto immediate_exit; 523893f7ebaSPaolo Bonzini } 524893f7ebaSPaolo Bonzini 525b21c7652SMax Reitz s->bdev_length = bdrv_getlength(bs); 526b21c7652SMax Reitz if (s->bdev_length < 0) { 527b21c7652SMax Reitz ret = s->bdev_length; 528373df5b1SFam Zheng goto immediate_exit; 529b21c7652SMax Reitz } else if (s->bdev_length == 0) { 5309e48b025SFam Zheng /* Report BLOCK_JOB_READY and wait for complete. */ 5319e48b025SFam Zheng block_job_event_ready(&s->common); 5329e48b025SFam Zheng s->synced = true; 5339e48b025SFam Zheng while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 5349e48b025SFam Zheng block_job_yield(&s->common); 5359e48b025SFam Zheng } 5369e48b025SFam Zheng s->common.cancelled = false; 5379e48b025SFam Zheng goto immediate_exit; 538893f7ebaSPaolo Bonzini } 539893f7ebaSPaolo Bonzini 540b21c7652SMax Reitz length = DIV_ROUND_UP(s->bdev_length, s->granularity); 541402a4741SPaolo Bonzini s->in_flight_bitmap = bitmap_new(length); 542402a4741SPaolo Bonzini 543b812f671SPaolo Bonzini /* If we have no backing file yet in the destination, we cannot let 544b812f671SPaolo Bonzini * the destination do COW. Instead, we copy sectors around the 545b812f671SPaolo Bonzini * dirty data if needed. We need a bitmap to do that. 546b812f671SPaolo Bonzini */ 547e253f4b8SKevin Wolf bdrv_get_backing_filename(target_bs, backing_filename, 548b812f671SPaolo Bonzini sizeof(backing_filename)); 549e253f4b8SKevin Wolf if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 550e5b43573SFam Zheng target_cluster_size = bdi.cluster_size; 551c3cc95bdSFam Zheng } 552e253f4b8SKevin Wolf if (backing_filename[0] && !target_bs->backing 553e5b43573SFam Zheng && s->granularity < target_cluster_size) { 554e5b43573SFam Zheng s->buf_size = MAX(s->buf_size, target_cluster_size); 555b812f671SPaolo Bonzini s->cow_bitmap = bitmap_new(length); 556b812f671SPaolo Bonzini } 557e5b43573SFam Zheng s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; 558e253f4b8SKevin Wolf s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 559b812f671SPaolo Bonzini 560b21c7652SMax Reitz end = s->bdev_length / BDRV_SECTOR_SIZE; 5617504edf4SKevin Wolf s->buf = qemu_try_blockalign(bs, s->buf_size); 5627504edf4SKevin Wolf if (s->buf == NULL) { 5637504edf4SKevin Wolf ret = -ENOMEM; 5647504edf4SKevin Wolf goto immediate_exit; 5657504edf4SKevin Wolf } 5667504edf4SKevin Wolf 567402a4741SPaolo Bonzini mirror_free_init(s); 568893f7ebaSPaolo Bonzini 5694c0cbd6fSFam Zheng last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 57003544a6eSFam Zheng if (!s->is_none_mode) { 571893f7ebaSPaolo Bonzini /* First part, loop on the sectors and initialize the dirty bitmap. */ 5725bc361b8SFam Zheng BlockDriverState *base = s->base; 573e253f4b8SKevin Wolf bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(target_bs); 5745279efebSJeff Cody 575893f7ebaSPaolo Bonzini for (sector_num = 0; sector_num < end; ) { 57699900697SFam Zheng /* Just to make sure we are not exceeding int limit. */ 57799900697SFam Zheng int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, 57899900697SFam Zheng end - sector_num); 5794c0cbd6fSFam Zheng int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 5804c0cbd6fSFam Zheng 5814c0cbd6fSFam Zheng if (now - last_pause_ns > SLICE_TIME) { 5824c0cbd6fSFam Zheng last_pause_ns = now; 5834c0cbd6fSFam Zheng block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); 5844c0cbd6fSFam Zheng } 5854c0cbd6fSFam Zheng 5864c0cbd6fSFam Zheng if (block_job_is_cancelled(&s->common)) { 5874c0cbd6fSFam Zheng goto immediate_exit; 5884c0cbd6fSFam Zheng } 5894c0cbd6fSFam Zheng 59099900697SFam Zheng ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); 591893f7ebaSPaolo Bonzini 592893f7ebaSPaolo Bonzini if (ret < 0) { 593893f7ebaSPaolo Bonzini goto immediate_exit; 594893f7ebaSPaolo Bonzini } 595893f7ebaSPaolo Bonzini 596893f7ebaSPaolo Bonzini assert(n > 0); 5975279efebSJeff Cody if (ret == 1 || mark_all_dirty) { 59820dca810SJohn Snow bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 599893f7ebaSPaolo Bonzini } 60099900697SFam Zheng sector_num += n; 601893f7ebaSPaolo Bonzini } 602893f7ebaSPaolo Bonzini } 603893f7ebaSPaolo Bonzini 60420dca810SJohn Snow bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); 605893f7ebaSPaolo Bonzini for (;;) { 606cc8c9d6cSPaolo Bonzini uint64_t delay_ns = 0; 607893f7ebaSPaolo Bonzini int64_t cnt; 608893f7ebaSPaolo Bonzini bool should_complete; 609893f7ebaSPaolo Bonzini 610bd48bde8SPaolo Bonzini if (s->ret < 0) { 611bd48bde8SPaolo Bonzini ret = s->ret; 612893f7ebaSPaolo Bonzini goto immediate_exit; 613893f7ebaSPaolo Bonzini } 614bd48bde8SPaolo Bonzini 61520dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 616b21c7652SMax Reitz /* s->common.offset contains the number of bytes already processed so 617b21c7652SMax Reitz * far, cnt is the number of dirty sectors remaining and 618b21c7652SMax Reitz * s->sectors_in_flight is the number of sectors currently being 619b21c7652SMax Reitz * processed; together those are the current total operation length */ 620b21c7652SMax Reitz s->common.len = s->common.offset + 621b21c7652SMax Reitz (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; 622bd48bde8SPaolo Bonzini 623bd48bde8SPaolo Bonzini /* Note that even when no rate limit is applied we need to yield 624a7282330SFam Zheng * periodically with no pending I/O so that bdrv_drain_all() returns. 625bd48bde8SPaolo Bonzini * We do so every SLICE_TIME nanoseconds, or when there is an error, 626bd48bde8SPaolo Bonzini * or when the source is clean, whichever comes first. 627bd48bde8SPaolo Bonzini */ 628bc72ad67SAlex Bligh if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && 629bd48bde8SPaolo Bonzini s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 630402a4741SPaolo Bonzini if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || 631402a4741SPaolo Bonzini (cnt == 0 && s->in_flight > 0)) { 632402a4741SPaolo Bonzini trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 63321cd917fSFam Zheng mirror_wait_for_io(s); 634bd48bde8SPaolo Bonzini continue; 635bd48bde8SPaolo Bonzini } else if (cnt != 0) { 636cc8c9d6cSPaolo Bonzini delay_ns = mirror_iteration(s); 637893f7ebaSPaolo Bonzini } 638cc8c9d6cSPaolo Bonzini } 639893f7ebaSPaolo Bonzini 640893f7ebaSPaolo Bonzini should_complete = false; 641bd48bde8SPaolo Bonzini if (s->in_flight == 0 && cnt == 0) { 642893f7ebaSPaolo Bonzini trace_mirror_before_flush(s); 643e253f4b8SKevin Wolf ret = blk_flush(s->target); 644893f7ebaSPaolo Bonzini if (ret < 0) { 645a589569fSWenchao Xia if (mirror_error_action(s, false, -ret) == 646a589569fSWenchao Xia BLOCK_ERROR_ACTION_REPORT) { 647893f7ebaSPaolo Bonzini goto immediate_exit; 648893f7ebaSPaolo Bonzini } 649b952b558SPaolo Bonzini } else { 650893f7ebaSPaolo Bonzini /* We're out of the streaming phase. From now on, if the job 651893f7ebaSPaolo Bonzini * is cancelled we will actually complete all pending I/O and 652893f7ebaSPaolo Bonzini * report completion. This way, block-job-cancel will leave 653893f7ebaSPaolo Bonzini * the target in a consistent state. 654893f7ebaSPaolo Bonzini */ 655d63ffd87SPaolo Bonzini if (!s->synced) { 656bcada37bSWenchao Xia block_job_event_ready(&s->common); 657d63ffd87SPaolo Bonzini s->synced = true; 658d63ffd87SPaolo Bonzini } 659d63ffd87SPaolo Bonzini 660d63ffd87SPaolo Bonzini should_complete = s->should_complete || 661d63ffd87SPaolo Bonzini block_job_is_cancelled(&s->common); 66220dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 663893f7ebaSPaolo Bonzini } 664b952b558SPaolo Bonzini } 665893f7ebaSPaolo Bonzini 666893f7ebaSPaolo Bonzini if (cnt == 0 && should_complete) { 667893f7ebaSPaolo Bonzini /* The dirty bitmap is not updated while operations are pending. 668893f7ebaSPaolo Bonzini * If we're about to exit, wait for pending operations before 669893f7ebaSPaolo Bonzini * calling bdrv_get_dirty_count(bs), or we may exit while the 670893f7ebaSPaolo Bonzini * source has dirty data to copy! 671893f7ebaSPaolo Bonzini * 672893f7ebaSPaolo Bonzini * Note that I/O can be submitted by the guest while 673893f7ebaSPaolo Bonzini * mirror_populate runs. 674893f7ebaSPaolo Bonzini */ 675893f7ebaSPaolo Bonzini trace_mirror_before_drain(s, cnt); 67639bf92ddSFam Zheng bdrv_co_drain(bs); 67720dca810SJohn Snow cnt = bdrv_get_dirty_count(s->dirty_bitmap); 678893f7ebaSPaolo Bonzini } 679893f7ebaSPaolo Bonzini 680893f7ebaSPaolo Bonzini ret = 0; 681cc8c9d6cSPaolo Bonzini trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 682d63ffd87SPaolo Bonzini if (!s->synced) { 6837483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 684893f7ebaSPaolo Bonzini if (block_job_is_cancelled(&s->common)) { 685893f7ebaSPaolo Bonzini break; 686893f7ebaSPaolo Bonzini } 687893f7ebaSPaolo Bonzini } else if (!should_complete) { 688bd48bde8SPaolo Bonzini delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 6897483d1e5SAlex Bligh block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 690893f7ebaSPaolo Bonzini } else if (cnt == 0) { 691893f7ebaSPaolo Bonzini /* The two disks are in sync. Exit and report successful 692893f7ebaSPaolo Bonzini * completion. 693893f7ebaSPaolo Bonzini */ 694893f7ebaSPaolo Bonzini assert(QLIST_EMPTY(&bs->tracked_requests)); 695893f7ebaSPaolo Bonzini s->common.cancelled = false; 696893f7ebaSPaolo Bonzini break; 697893f7ebaSPaolo Bonzini } 698bc72ad67SAlex Bligh last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 699893f7ebaSPaolo Bonzini } 700893f7ebaSPaolo Bonzini 701893f7ebaSPaolo Bonzini immediate_exit: 702bd48bde8SPaolo Bonzini if (s->in_flight > 0) { 703bd48bde8SPaolo Bonzini /* We get here only if something went wrong. Either the job failed, 704bd48bde8SPaolo Bonzini * or it was cancelled prematurely so that we do not guarantee that 705bd48bde8SPaolo Bonzini * the target is a copy of the source. 706bd48bde8SPaolo Bonzini */ 707bd48bde8SPaolo Bonzini assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 708bd48bde8SPaolo Bonzini mirror_drain(s); 709bd48bde8SPaolo Bonzini } 710bd48bde8SPaolo Bonzini 711bd48bde8SPaolo Bonzini assert(s->in_flight == 0); 7127191bf31SMarkus Armbruster qemu_vfree(s->buf); 713b812f671SPaolo Bonzini g_free(s->cow_bitmap); 714402a4741SPaolo Bonzini g_free(s->in_flight_bitmap); 715e4654d2dSFam Zheng bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 7165a7e7a0bSStefan Hajnoczi 7175a7e7a0bSStefan Hajnoczi data = g_malloc(sizeof(*data)); 7185a7e7a0bSStefan Hajnoczi data->ret = ret; 719176c3699SFam Zheng /* Before we switch to target in mirror_exit, make sure data doesn't 720176c3699SFam Zheng * change. */ 721e253f4b8SKevin Wolf bdrv_drained_begin(bs); 722ab27c3b5SFam Zheng if (qemu_get_aio_context() == bdrv_get_aio_context(bs)) { 723ab27c3b5SFam Zheng /* FIXME: virtio host notifiers run on iohandler_ctx, therefore the 724ab27c3b5SFam Zheng * above bdrv_drained_end isn't enough to quiesce it. This is ugly, we 725ab27c3b5SFam Zheng * need a block layer API change to achieve this. */ 726ab27c3b5SFam Zheng aio_disable_external(iohandler_get_aio_context()); 727ab27c3b5SFam Zheng } 7285a7e7a0bSStefan Hajnoczi block_job_defer_to_main_loop(&s->common, mirror_exit, data); 729893f7ebaSPaolo Bonzini } 730893f7ebaSPaolo Bonzini 731893f7ebaSPaolo Bonzini static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 732893f7ebaSPaolo Bonzini { 733893f7ebaSPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 734893f7ebaSPaolo Bonzini 735893f7ebaSPaolo Bonzini if (speed < 0) { 736c6bd8c70SMarkus Armbruster error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 737893f7ebaSPaolo Bonzini return; 738893f7ebaSPaolo Bonzini } 739893f7ebaSPaolo Bonzini ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 740893f7ebaSPaolo Bonzini } 741893f7ebaSPaolo Bonzini 742d63ffd87SPaolo Bonzini static void mirror_complete(BlockJob *job, Error **errp) 743d63ffd87SPaolo Bonzini { 744d63ffd87SPaolo Bonzini MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 74534b5d2c6SMax Reitz Error *local_err = NULL; 746d63ffd87SPaolo Bonzini int ret; 747d63ffd87SPaolo Bonzini 748e253f4b8SKevin Wolf ret = bdrv_open_backing_file(blk_bs(s->target), NULL, "backing", 749e253f4b8SKevin Wolf &local_err); 750d63ffd87SPaolo Bonzini if (ret < 0) { 75134b5d2c6SMax Reitz error_propagate(errp, local_err); 752d63ffd87SPaolo Bonzini return; 753d63ffd87SPaolo Bonzini } 754d63ffd87SPaolo Bonzini if (!s->synced) { 7558ccb9569SKevin Wolf error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id); 756d63ffd87SPaolo Bonzini return; 757d63ffd87SPaolo Bonzini } 758d63ffd87SPaolo Bonzini 75909158f00SBenoît Canet /* check the target bs is not blocked and block all operations on it */ 76009158f00SBenoît Canet if (s->replaces) { 7615a7e7a0bSStefan Hajnoczi AioContext *replace_aio_context; 7625a7e7a0bSStefan Hajnoczi 763e12f3784SWen Congyang s->to_replace = bdrv_find_node(s->replaces); 76409158f00SBenoît Canet if (!s->to_replace) { 765e12f3784SWen Congyang error_setg(errp, "Node name '%s' not found", s->replaces); 76609158f00SBenoît Canet return; 76709158f00SBenoît Canet } 76809158f00SBenoît Canet 7695a7e7a0bSStefan Hajnoczi replace_aio_context = bdrv_get_aio_context(s->to_replace); 7705a7e7a0bSStefan Hajnoczi aio_context_acquire(replace_aio_context); 7715a7e7a0bSStefan Hajnoczi 77209158f00SBenoît Canet error_setg(&s->replace_blocker, 77309158f00SBenoît Canet "block device is in use by block-job-complete"); 77409158f00SBenoît Canet bdrv_op_block_all(s->to_replace, s->replace_blocker); 77509158f00SBenoît Canet bdrv_ref(s->to_replace); 7765a7e7a0bSStefan Hajnoczi 7775a7e7a0bSStefan Hajnoczi aio_context_release(replace_aio_context); 77809158f00SBenoît Canet } 77909158f00SBenoît Canet 780d63ffd87SPaolo Bonzini s->should_complete = true; 781751ebd76SFam Zheng block_job_enter(&s->common); 782d63ffd87SPaolo Bonzini } 783d63ffd87SPaolo Bonzini 7843fc4b10aSFam Zheng static const BlockJobDriver mirror_job_driver = { 785893f7ebaSPaolo Bonzini .instance_size = sizeof(MirrorBlockJob), 78679e14bf7SFam Zheng .job_type = BLOCK_JOB_TYPE_MIRROR, 787893f7ebaSPaolo Bonzini .set_speed = mirror_set_speed, 788d63ffd87SPaolo Bonzini .complete = mirror_complete, 789893f7ebaSPaolo Bonzini }; 790893f7ebaSPaolo Bonzini 79103544a6eSFam Zheng static const BlockJobDriver commit_active_job_driver = { 79203544a6eSFam Zheng .instance_size = sizeof(MirrorBlockJob), 79303544a6eSFam Zheng .job_type = BLOCK_JOB_TYPE_COMMIT, 79403544a6eSFam Zheng .set_speed = mirror_set_speed, 79503544a6eSFam Zheng .complete = mirror_complete, 79603544a6eSFam Zheng }; 79703544a6eSFam Zheng 79803544a6eSFam Zheng static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, 79909158f00SBenoît Canet const char *replaces, 8005fba6c0eSJohn Snow int64_t speed, uint32_t granularity, 80103544a6eSFam Zheng int64_t buf_size, 80203544a6eSFam Zheng BlockdevOnError on_source_error, 803b952b558SPaolo Bonzini BlockdevOnError on_target_error, 8040fc9f8eaSFam Zheng bool unmap, 805097310b5SMarkus Armbruster BlockCompletionFunc *cb, 80603544a6eSFam Zheng void *opaque, Error **errp, 80703544a6eSFam Zheng const BlockJobDriver *driver, 80803544a6eSFam Zheng bool is_none_mode, BlockDriverState *base) 809893f7ebaSPaolo Bonzini { 810893f7ebaSPaolo Bonzini MirrorBlockJob *s; 811893f7ebaSPaolo Bonzini 812eee13dfeSPaolo Bonzini if (granularity == 0) { 813341ebc2fSJohn Snow granularity = bdrv_get_default_bitmap_granularity(target); 814eee13dfeSPaolo Bonzini } 815eee13dfeSPaolo Bonzini 816eee13dfeSPaolo Bonzini assert ((granularity & (granularity - 1)) == 0); 817eee13dfeSPaolo Bonzini 81848ac0a4dSWen Congyang if (buf_size < 0) { 81948ac0a4dSWen Congyang error_setg(errp, "Invalid parameter 'buf-size'"); 82048ac0a4dSWen Congyang return; 82148ac0a4dSWen Congyang } 82248ac0a4dSWen Congyang 82348ac0a4dSWen Congyang if (buf_size == 0) { 82448ac0a4dSWen Congyang buf_size = DEFAULT_MIRROR_BUF_SIZE; 82548ac0a4dSWen Congyang } 8265bc361b8SFam Zheng 82703544a6eSFam Zheng s = block_job_create(driver, bs, speed, cb, opaque, errp); 828893f7ebaSPaolo Bonzini if (!s) { 829893f7ebaSPaolo Bonzini return; 830893f7ebaSPaolo Bonzini } 831893f7ebaSPaolo Bonzini 832e253f4b8SKevin Wolf s->target = blk_new(); 833e253f4b8SKevin Wolf blk_insert_bs(s->target, target); 834e253f4b8SKevin Wolf 83509158f00SBenoît Canet s->replaces = g_strdup(replaces); 836b952b558SPaolo Bonzini s->on_source_error = on_source_error; 837b952b558SPaolo Bonzini s->on_target_error = on_target_error; 83803544a6eSFam Zheng s->is_none_mode = is_none_mode; 8395bc361b8SFam Zheng s->base = base; 840eee13dfeSPaolo Bonzini s->granularity = granularity; 84148ac0a4dSWen Congyang s->buf_size = ROUND_UP(buf_size, granularity); 8420fc9f8eaSFam Zheng s->unmap = unmap; 843b812f671SPaolo Bonzini 8440db6e54aSFam Zheng s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 845b8afb520SFam Zheng if (!s->dirty_bitmap) { 84697031164STing Wang g_free(s->replaces); 847e253f4b8SKevin Wolf blk_unref(s->target); 84818930ba3SFam Zheng block_job_unref(&s->common); 849b8afb520SFam Zheng return; 850b8afb520SFam Zheng } 85110f3cd15SAlberto Garcia 852e253f4b8SKevin Wolf bdrv_op_block_all(target, s->common.blocker); 85310f3cd15SAlberto Garcia 854893f7ebaSPaolo Bonzini s->common.co = qemu_coroutine_create(mirror_run); 855893f7ebaSPaolo Bonzini trace_mirror_start(bs, s, s->common.co, opaque); 856893f7ebaSPaolo Bonzini qemu_coroutine_enter(s->common.co, s); 857893f7ebaSPaolo Bonzini } 85803544a6eSFam Zheng 85903544a6eSFam Zheng void mirror_start(BlockDriverState *bs, BlockDriverState *target, 86009158f00SBenoît Canet const char *replaces, 8615fba6c0eSJohn Snow int64_t speed, uint32_t granularity, int64_t buf_size, 86203544a6eSFam Zheng MirrorSyncMode mode, BlockdevOnError on_source_error, 86303544a6eSFam Zheng BlockdevOnError on_target_error, 8640fc9f8eaSFam Zheng bool unmap, 865097310b5SMarkus Armbruster BlockCompletionFunc *cb, 86603544a6eSFam Zheng void *opaque, Error **errp) 86703544a6eSFam Zheng { 86803544a6eSFam Zheng bool is_none_mode; 86903544a6eSFam Zheng BlockDriverState *base; 87003544a6eSFam Zheng 8714b80ab2bSJohn Snow if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 8724b80ab2bSJohn Snow error_setg(errp, "Sync mode 'incremental' not supported"); 873d58d8453SJohn Snow return; 874d58d8453SJohn Snow } 87503544a6eSFam Zheng is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 876760e0063SKevin Wolf base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 87709158f00SBenoît Canet mirror_start_job(bs, target, replaces, 87809158f00SBenoît Canet speed, granularity, buf_size, 8790fc9f8eaSFam Zheng on_source_error, on_target_error, unmap, cb, opaque, errp, 88003544a6eSFam Zheng &mirror_job_driver, is_none_mode, base); 88103544a6eSFam Zheng } 88203544a6eSFam Zheng 88303544a6eSFam Zheng void commit_active_start(BlockDriverState *bs, BlockDriverState *base, 88403544a6eSFam Zheng int64_t speed, 88503544a6eSFam Zheng BlockdevOnError on_error, 886097310b5SMarkus Armbruster BlockCompletionFunc *cb, 88703544a6eSFam Zheng void *opaque, Error **errp) 88803544a6eSFam Zheng { 8894da83585SJeff Cody int64_t length, base_length; 8904da83585SJeff Cody int orig_base_flags; 89139a611a3SJeff Cody int ret; 892cc67f4d1SJeff Cody Error *local_err = NULL; 8934da83585SJeff Cody 8944da83585SJeff Cody orig_base_flags = bdrv_get_flags(base); 8954da83585SJeff Cody 89620a63d2cSFam Zheng if (bdrv_reopen(base, bs->open_flags, errp)) { 89720a63d2cSFam Zheng return; 89820a63d2cSFam Zheng } 8994da83585SJeff Cody 9004da83585SJeff Cody length = bdrv_getlength(bs); 9014da83585SJeff Cody if (length < 0) { 90239a611a3SJeff Cody error_setg_errno(errp, -length, 90339a611a3SJeff Cody "Unable to determine length of %s", bs->filename); 9044da83585SJeff Cody goto error_restore_flags; 9054da83585SJeff Cody } 9064da83585SJeff Cody 9074da83585SJeff Cody base_length = bdrv_getlength(base); 9084da83585SJeff Cody if (base_length < 0) { 90939a611a3SJeff Cody error_setg_errno(errp, -base_length, 91039a611a3SJeff Cody "Unable to determine length of %s", base->filename); 9114da83585SJeff Cody goto error_restore_flags; 9124da83585SJeff Cody } 9134da83585SJeff Cody 9144da83585SJeff Cody if (length > base_length) { 91539a611a3SJeff Cody ret = bdrv_truncate(base, length); 91639a611a3SJeff Cody if (ret < 0) { 91739a611a3SJeff Cody error_setg_errno(errp, -ret, 91839a611a3SJeff Cody "Top image %s is larger than base image %s, and " 9194da83585SJeff Cody "resize of base image failed", 9204da83585SJeff Cody bs->filename, base->filename); 9214da83585SJeff Cody goto error_restore_flags; 9224da83585SJeff Cody } 9234da83585SJeff Cody } 9244da83585SJeff Cody 92509158f00SBenoît Canet mirror_start_job(bs, base, NULL, speed, 0, 0, 9260fc9f8eaSFam Zheng on_error, on_error, false, cb, opaque, &local_err, 92703544a6eSFam Zheng &commit_active_job_driver, false, base); 9280fb6395cSMarkus Armbruster if (local_err) { 929cc67f4d1SJeff Cody error_propagate(errp, local_err); 9304da83585SJeff Cody goto error_restore_flags; 9314da83585SJeff Cody } 9324da83585SJeff Cody 9334da83585SJeff Cody return; 9344da83585SJeff Cody 9354da83585SJeff Cody error_restore_flags: 9364da83585SJeff Cody /* ignore error and errp for bdrv_reopen, because we want to propagate 9374da83585SJeff Cody * the original error */ 9384da83585SJeff Cody bdrv_reopen(base, orig_base_flags, NULL); 9394da83585SJeff Cody return; 94003544a6eSFam Zheng } 941