1 /* 2 * Image mirroring 3 * 4 * Copyright Red Hat, Inc. 2012 5 * 6 * Authors: 7 * Paolo Bonzini <pbonzini@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 10 * See the COPYING.LIB file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "trace.h" 16 #include "block/blockjob_int.h" 17 #include "block/block_int.h" 18 #include "sysemu/block-backend.h" 19 #include "qapi/error.h" 20 #include "qapi/qmp/qerror.h" 21 #include "qemu/ratelimit.h" 22 #include "qemu/bitmap.h" 23 24 #define SLICE_TIME 100000000ULL /* ns */ 25 #define MAX_IN_FLIGHT 16 26 #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */ 27 #define DEFAULT_MIRROR_BUF_SIZE \ 28 (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE) 29 30 /* The mirroring buffer is a list of granularity-sized chunks. 31 * Free chunks are organized in a list. 32 */ 33 typedef struct MirrorBuffer { 34 QSIMPLEQ_ENTRY(MirrorBuffer) next; 35 } MirrorBuffer; 36 37 typedef struct MirrorBlockJob { 38 BlockJob common; 39 RateLimit limit; 40 BlockBackend *target; 41 BlockDriverState *mirror_top_bs; 42 BlockDriverState *source; 43 BlockDriverState *base; 44 45 /* The name of the graph node to replace */ 46 char *replaces; 47 /* The BDS to replace */ 48 BlockDriverState *to_replace; 49 /* Used to block operations on the drive-mirror-replace target */ 50 Error *replace_blocker; 51 bool is_none_mode; 52 BlockMirrorBackingMode backing_mode; 53 BlockdevOnError on_source_error, on_target_error; 54 bool synced; 55 bool should_complete; 56 int64_t granularity; 57 size_t buf_size; 58 int64_t bdev_length; 59 unsigned long *cow_bitmap; 60 BdrvDirtyBitmap *dirty_bitmap; 61 BdrvDirtyBitmapIter *dbi; 62 uint8_t *buf; 63 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; 64 int buf_free_count; 65 66 uint64_t last_pause_ns; 67 unsigned long *in_flight_bitmap; 68 int in_flight; 69 int64_t sectors_in_flight; 70 int ret; 71 bool unmap; 72 bool waiting_for_io; 73 int target_cluster_sectors; 74 int max_iov; 75 bool initial_zeroing_ongoing; 76 } MirrorBlockJob; 77 78 typedef struct MirrorOp { 79 MirrorBlockJob *s; 80 QEMUIOVector qiov; 81 int64_t sector_num; 82 int nb_sectors; 83 } MirrorOp; 84 85 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, 86 int error) 87 { 88 s->synced = false; 89 if (read) { 90 return block_job_error_action(&s->common, s->on_source_error, 91 true, error); 92 } else { 93 return block_job_error_action(&s->common, s->on_target_error, 94 false, error); 95 } 96 } 97 98 static void mirror_iteration_done(MirrorOp *op, int ret) 99 { 100 MirrorBlockJob *s = op->s; 101 struct iovec *iov; 102 int64_t chunk_num; 103 int i, nb_chunks, sectors_per_chunk; 104 105 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); 106 107 s->in_flight--; 108 s->sectors_in_flight -= op->nb_sectors; 109 iov = op->qiov.iov; 110 for (i = 0; i < op->qiov.niov; i++) { 111 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; 112 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); 113 s->buf_free_count++; 114 } 115 116 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 117 chunk_num = op->sector_num / sectors_per_chunk; 118 nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk); 119 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); 120 if (ret >= 0) { 121 if (s->cow_bitmap) { 122 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); 123 } 124 if (!s->initial_zeroing_ongoing) { 125 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; 126 } 127 } 128 qemu_iovec_destroy(&op->qiov); 129 g_free(op); 130 131 if (s->waiting_for_io) { 132 qemu_coroutine_enter(s->common.co); 133 } 134 } 135 136 static void mirror_write_complete(void *opaque, int ret) 137 { 138 MirrorOp *op = opaque; 139 MirrorBlockJob *s = op->s; 140 141 aio_context_acquire(blk_get_aio_context(s->common.blk)); 142 if (ret < 0) { 143 BlockErrorAction action; 144 145 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 146 action = mirror_error_action(s, false, -ret); 147 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 148 s->ret = ret; 149 } 150 } 151 mirror_iteration_done(op, ret); 152 aio_context_release(blk_get_aio_context(s->common.blk)); 153 } 154 155 static void mirror_read_complete(void *opaque, int ret) 156 { 157 MirrorOp *op = opaque; 158 MirrorBlockJob *s = op->s; 159 160 aio_context_acquire(blk_get_aio_context(s->common.blk)); 161 if (ret < 0) { 162 BlockErrorAction action; 163 164 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); 165 action = mirror_error_action(s, true, -ret); 166 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { 167 s->ret = ret; 168 } 169 170 mirror_iteration_done(op, ret); 171 } else { 172 blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, 173 0, mirror_write_complete, op); 174 } 175 aio_context_release(blk_get_aio_context(s->common.blk)); 176 } 177 178 static inline void mirror_clip_sectors(MirrorBlockJob *s, 179 int64_t sector_num, 180 int *nb_sectors) 181 { 182 *nb_sectors = MIN(*nb_sectors, 183 s->bdev_length / BDRV_SECTOR_SIZE - sector_num); 184 } 185 186 /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and 187 * return the offset of the adjusted tail sector against original. */ 188 static int mirror_cow_align(MirrorBlockJob *s, 189 int64_t *sector_num, 190 int *nb_sectors) 191 { 192 bool need_cow; 193 int ret = 0; 194 int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; 195 int64_t align_sector_num = *sector_num; 196 int align_nb_sectors = *nb_sectors; 197 int max_sectors = chunk_sectors * s->max_iov; 198 199 need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); 200 need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, 201 s->cow_bitmap); 202 if (need_cow) { 203 bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num, 204 *nb_sectors, &align_sector_num, 205 &align_nb_sectors); 206 } 207 208 if (align_nb_sectors > max_sectors) { 209 align_nb_sectors = max_sectors; 210 if (need_cow) { 211 align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, 212 s->target_cluster_sectors); 213 } 214 } 215 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but 216 * that doesn't matter because it's already the end of source image. */ 217 mirror_clip_sectors(s, align_sector_num, &align_nb_sectors); 218 219 ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); 220 *sector_num = align_sector_num; 221 *nb_sectors = align_nb_sectors; 222 assert(ret >= 0); 223 return ret; 224 } 225 226 static inline void mirror_wait_for_io(MirrorBlockJob *s) 227 { 228 assert(!s->waiting_for_io); 229 s->waiting_for_io = true; 230 qemu_coroutine_yield(); 231 s->waiting_for_io = false; 232 } 233 234 /* Submit async read while handling COW. 235 * Returns: The number of sectors copied after and including sector_num, 236 * excluding any sectors copied prior to sector_num due to alignment. 237 * This will be nb_sectors if no alignment is necessary, or 238 * (new_end - sector_num) if tail is rounded up or down due to 239 * alignment or buffer limit. 240 */ 241 static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, 242 int nb_sectors) 243 { 244 BlockBackend *source = s->common.blk; 245 int sectors_per_chunk, nb_chunks; 246 int ret; 247 MirrorOp *op; 248 int max_sectors; 249 250 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 251 max_sectors = sectors_per_chunk * s->max_iov; 252 253 /* We can only handle as much as buf_size at a time. */ 254 nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); 255 nb_sectors = MIN(max_sectors, nb_sectors); 256 assert(nb_sectors); 257 ret = nb_sectors; 258 259 if (s->cow_bitmap) { 260 ret += mirror_cow_align(s, §or_num, &nb_sectors); 261 } 262 assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); 263 /* The sector range must meet granularity because: 264 * 1) Caller passes in aligned values; 265 * 2) mirror_cow_align is used only when target cluster is larger. */ 266 assert(!(sector_num % sectors_per_chunk)); 267 nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); 268 269 while (s->buf_free_count < nb_chunks) { 270 trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 271 mirror_wait_for_io(s); 272 } 273 274 /* Allocate a MirrorOp that is used as an AIO callback. */ 275 op = g_new(MirrorOp, 1); 276 op->s = s; 277 op->sector_num = sector_num; 278 op->nb_sectors = nb_sectors; 279 280 /* Now make a QEMUIOVector taking enough granularity-sized chunks 281 * from s->buf_free. 282 */ 283 qemu_iovec_init(&op->qiov, nb_chunks); 284 while (nb_chunks-- > 0) { 285 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); 286 size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; 287 288 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); 289 s->buf_free_count--; 290 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); 291 } 292 293 /* Copy the dirty cluster. */ 294 s->in_flight++; 295 s->sectors_in_flight += nb_sectors; 296 trace_mirror_one_iteration(s, sector_num, nb_sectors); 297 298 blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0, 299 mirror_read_complete, op); 300 return ret; 301 } 302 303 static void mirror_do_zero_or_discard(MirrorBlockJob *s, 304 int64_t sector_num, 305 int nb_sectors, 306 bool is_discard) 307 { 308 MirrorOp *op; 309 310 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed 311 * so the freeing in mirror_iteration_done is nop. */ 312 op = g_new0(MirrorOp, 1); 313 op->s = s; 314 op->sector_num = sector_num; 315 op->nb_sectors = nb_sectors; 316 317 s->in_flight++; 318 s->sectors_in_flight += nb_sectors; 319 if (is_discard) { 320 blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS, 321 op->nb_sectors << BDRV_SECTOR_BITS, 322 mirror_write_complete, op); 323 } else { 324 blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE, 325 op->nb_sectors * BDRV_SECTOR_SIZE, 326 s->unmap ? BDRV_REQ_MAY_UNMAP : 0, 327 mirror_write_complete, op); 328 } 329 } 330 331 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) 332 { 333 BlockDriverState *source = s->source; 334 int64_t sector_num, first_chunk; 335 uint64_t delay_ns = 0; 336 /* At least the first dirty chunk is mirrored in one iteration. */ 337 int nb_chunks = 1; 338 int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; 339 int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; 340 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); 341 int max_io_sectors = MAX((s->buf_size >> BDRV_SECTOR_BITS) / MAX_IN_FLIGHT, 342 MAX_IO_SECTORS); 343 344 sector_num = bdrv_dirty_iter_next(s->dbi); 345 if (sector_num < 0) { 346 bdrv_set_dirty_iter(s->dbi, 0); 347 sector_num = bdrv_dirty_iter_next(s->dbi); 348 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); 349 assert(sector_num >= 0); 350 } 351 352 first_chunk = sector_num / sectors_per_chunk; 353 while (test_bit(first_chunk, s->in_flight_bitmap)) { 354 trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 355 mirror_wait_for_io(s); 356 } 357 358 block_job_pause_point(&s->common); 359 360 /* Find the number of consective dirty chunks following the first dirty 361 * one, and wait for in flight requests in them. */ 362 while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { 363 int64_t next_dirty; 364 int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; 365 int64_t next_chunk = next_sector / sectors_per_chunk; 366 if (next_sector >= end || 367 !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { 368 break; 369 } 370 if (test_bit(next_chunk, s->in_flight_bitmap)) { 371 break; 372 } 373 374 next_dirty = bdrv_dirty_iter_next(s->dbi); 375 if (next_dirty > next_sector || next_dirty < 0) { 376 /* The bitmap iterator's cache is stale, refresh it */ 377 bdrv_set_dirty_iter(s->dbi, next_sector); 378 next_dirty = bdrv_dirty_iter_next(s->dbi); 379 } 380 assert(next_dirty == next_sector); 381 nb_chunks++; 382 } 383 384 /* Clear dirty bits before querying the block status, because 385 * calling bdrv_get_block_status_above could yield - if some blocks are 386 * marked dirty in this window, we need to know. 387 */ 388 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, 389 nb_chunks * sectors_per_chunk); 390 bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); 391 while (nb_chunks > 0 && sector_num < end) { 392 int64_t ret; 393 int io_sectors, io_sectors_acct; 394 BlockDriverState *file; 395 enum MirrorMethod { 396 MIRROR_METHOD_COPY, 397 MIRROR_METHOD_ZERO, 398 MIRROR_METHOD_DISCARD 399 } mirror_method = MIRROR_METHOD_COPY; 400 401 assert(!(sector_num % sectors_per_chunk)); 402 ret = bdrv_get_block_status_above(source, NULL, sector_num, 403 nb_chunks * sectors_per_chunk, 404 &io_sectors, &file); 405 if (ret < 0) { 406 io_sectors = MIN(nb_chunks * sectors_per_chunk, max_io_sectors); 407 } else if (ret & BDRV_BLOCK_DATA) { 408 io_sectors = MIN(io_sectors, max_io_sectors); 409 } 410 411 io_sectors -= io_sectors % sectors_per_chunk; 412 if (io_sectors < sectors_per_chunk) { 413 io_sectors = sectors_per_chunk; 414 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { 415 int64_t target_sector_num; 416 int target_nb_sectors; 417 bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num, 418 io_sectors, &target_sector_num, 419 &target_nb_sectors); 420 if (target_sector_num == sector_num && 421 target_nb_sectors == io_sectors) { 422 mirror_method = ret & BDRV_BLOCK_ZERO ? 423 MIRROR_METHOD_ZERO : 424 MIRROR_METHOD_DISCARD; 425 } 426 } 427 428 while (s->in_flight >= MAX_IN_FLIGHT) { 429 trace_mirror_yield_in_flight(s, sector_num, s->in_flight); 430 mirror_wait_for_io(s); 431 } 432 433 if (s->ret < 0) { 434 return 0; 435 } 436 437 mirror_clip_sectors(s, sector_num, &io_sectors); 438 switch (mirror_method) { 439 case MIRROR_METHOD_COPY: 440 io_sectors = mirror_do_read(s, sector_num, io_sectors); 441 io_sectors_acct = io_sectors; 442 break; 443 case MIRROR_METHOD_ZERO: 444 case MIRROR_METHOD_DISCARD: 445 mirror_do_zero_or_discard(s, sector_num, io_sectors, 446 mirror_method == MIRROR_METHOD_DISCARD); 447 if (write_zeroes_ok) { 448 io_sectors_acct = 0; 449 } else { 450 io_sectors_acct = io_sectors; 451 } 452 break; 453 default: 454 abort(); 455 } 456 assert(io_sectors); 457 sector_num += io_sectors; 458 nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk); 459 if (s->common.speed) { 460 delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct); 461 } 462 } 463 return delay_ns; 464 } 465 466 static void mirror_free_init(MirrorBlockJob *s) 467 { 468 int granularity = s->granularity; 469 size_t buf_size = s->buf_size; 470 uint8_t *buf = s->buf; 471 472 assert(s->buf_free_count == 0); 473 QSIMPLEQ_INIT(&s->buf_free); 474 while (buf_size != 0) { 475 MirrorBuffer *cur = (MirrorBuffer *)buf; 476 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); 477 s->buf_free_count++; 478 buf_size -= granularity; 479 buf += granularity; 480 } 481 } 482 483 /* This is also used for the .pause callback. There is no matching 484 * mirror_resume() because mirror_run() will begin iterating again 485 * when the job is resumed. 486 */ 487 static void mirror_wait_for_all_io(MirrorBlockJob *s) 488 { 489 while (s->in_flight > 0) { 490 mirror_wait_for_io(s); 491 } 492 } 493 494 typedef struct { 495 int ret; 496 } MirrorExitData; 497 498 static void mirror_exit(BlockJob *job, void *opaque) 499 { 500 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 501 MirrorExitData *data = opaque; 502 AioContext *replace_aio_context = NULL; 503 BlockDriverState *src = s->source; 504 BlockDriverState *target_bs = blk_bs(s->target); 505 BlockDriverState *mirror_top_bs = s->mirror_top_bs; 506 Error *local_err = NULL; 507 508 /* Make sure that the source BDS doesn't go away before we called 509 * block_job_completed(). */ 510 bdrv_ref(src); 511 bdrv_ref(mirror_top_bs); 512 513 /* We don't access the source any more. Dropping any WRITE/RESIZE is 514 * required before it could become a backing file of target_bs. */ 515 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, 516 &error_abort); 517 if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { 518 BlockDriverState *backing = s->is_none_mode ? src : s->base; 519 if (backing_bs(target_bs) != backing) { 520 bdrv_set_backing_hd(target_bs, backing, &local_err); 521 if (local_err) { 522 error_report_err(local_err); 523 data->ret = -EPERM; 524 } 525 } 526 } 527 528 if (s->to_replace) { 529 replace_aio_context = bdrv_get_aio_context(s->to_replace); 530 aio_context_acquire(replace_aio_context); 531 } 532 533 if (s->should_complete && data->ret == 0) { 534 BlockDriverState *to_replace = src; 535 if (s->to_replace) { 536 to_replace = s->to_replace; 537 } 538 539 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { 540 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); 541 } 542 543 /* The mirror job has no requests in flight any more, but we need to 544 * drain potential other users of the BDS before changing the graph. */ 545 bdrv_drained_begin(target_bs); 546 bdrv_replace_in_backing_chain(to_replace, target_bs); 547 bdrv_drained_end(target_bs); 548 } 549 if (s->to_replace) { 550 bdrv_op_unblock_all(s->to_replace, s->replace_blocker); 551 error_free(s->replace_blocker); 552 bdrv_unref(s->to_replace); 553 } 554 if (replace_aio_context) { 555 aio_context_release(replace_aio_context); 556 } 557 g_free(s->replaces); 558 blk_unref(s->target); 559 s->target = NULL; 560 561 /* Remove the mirror filter driver from the graph. Before this, get rid of 562 * the blockers on the intermediate nodes so that the resulting state is 563 * valid. */ 564 block_job_remove_all_bdrv(job); 565 bdrv_replace_in_backing_chain(mirror_top_bs, backing_bs(mirror_top_bs)); 566 567 /* We just changed the BDS the job BB refers to (with either or both of the 568 * bdrv_replace_in_backing_chain() calls), so switch the BB back so the 569 * cleanup does the right thing. We don't need any permissions any more 570 * now. */ 571 blk_remove_bs(job->blk); 572 blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); 573 blk_insert_bs(job->blk, mirror_top_bs, &error_abort); 574 575 block_job_completed(&s->common, data->ret); 576 577 g_free(data); 578 bdrv_drained_end(src); 579 bdrv_unref(mirror_top_bs); 580 bdrv_unref(src); 581 } 582 583 static void mirror_throttle(MirrorBlockJob *s) 584 { 585 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 586 587 if (now - s->last_pause_ns > SLICE_TIME) { 588 s->last_pause_ns = now; 589 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); 590 } else { 591 block_job_pause_point(&s->common); 592 } 593 } 594 595 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) 596 { 597 int64_t sector_num, end; 598 BlockDriverState *base = s->base; 599 BlockDriverState *bs = s->source; 600 BlockDriverState *target_bs = blk_bs(s->target); 601 int ret, n; 602 603 end = s->bdev_length / BDRV_SECTOR_SIZE; 604 605 if (base == NULL && !bdrv_has_zero_init(target_bs)) { 606 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { 607 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end); 608 return 0; 609 } 610 611 s->initial_zeroing_ongoing = true; 612 for (sector_num = 0; sector_num < end; ) { 613 int nb_sectors = MIN(end - sector_num, 614 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS); 615 616 mirror_throttle(s); 617 618 if (block_job_is_cancelled(&s->common)) { 619 s->initial_zeroing_ongoing = false; 620 return 0; 621 } 622 623 if (s->in_flight >= MAX_IN_FLIGHT) { 624 trace_mirror_yield(s, s->in_flight, s->buf_free_count, -1); 625 mirror_wait_for_io(s); 626 continue; 627 } 628 629 mirror_do_zero_or_discard(s, sector_num, nb_sectors, false); 630 sector_num += nb_sectors; 631 } 632 633 mirror_wait_for_all_io(s); 634 s->initial_zeroing_ongoing = false; 635 } 636 637 /* First part, loop on the sectors and initialize the dirty bitmap. */ 638 for (sector_num = 0; sector_num < end; ) { 639 /* Just to make sure we are not exceeding int limit. */ 640 int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, 641 end - sector_num); 642 643 mirror_throttle(s); 644 645 if (block_job_is_cancelled(&s->common)) { 646 return 0; 647 } 648 649 ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); 650 if (ret < 0) { 651 return ret; 652 } 653 654 assert(n > 0); 655 if (ret == 1) { 656 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); 657 } 658 sector_num += n; 659 } 660 return 0; 661 } 662 663 /* Called when going out of the streaming phase to flush the bulk of the 664 * data to the medium, or just before completing. 665 */ 666 static int mirror_flush(MirrorBlockJob *s) 667 { 668 int ret = blk_flush(s->target); 669 if (ret < 0) { 670 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { 671 s->ret = ret; 672 } 673 } 674 return ret; 675 } 676 677 static void coroutine_fn mirror_run(void *opaque) 678 { 679 MirrorBlockJob *s = opaque; 680 MirrorExitData *data; 681 BlockDriverState *bs = s->source; 682 BlockDriverState *target_bs = blk_bs(s->target); 683 bool need_drain = true; 684 int64_t length; 685 BlockDriverInfo bdi; 686 char backing_filename[2]; /* we only need 2 characters because we are only 687 checking for a NULL string */ 688 int ret = 0; 689 int target_cluster_size = BDRV_SECTOR_SIZE; 690 691 if (block_job_is_cancelled(&s->common)) { 692 goto immediate_exit; 693 } 694 695 s->bdev_length = bdrv_getlength(bs); 696 if (s->bdev_length < 0) { 697 ret = s->bdev_length; 698 goto immediate_exit; 699 } 700 701 /* Active commit must resize the base image if its size differs from the 702 * active layer. */ 703 if (s->base == blk_bs(s->target)) { 704 int64_t base_length; 705 706 base_length = blk_getlength(s->target); 707 if (base_length < 0) { 708 ret = base_length; 709 goto immediate_exit; 710 } 711 712 if (s->bdev_length > base_length) { 713 ret = blk_truncate(s->target, s->bdev_length); 714 if (ret < 0) { 715 goto immediate_exit; 716 } 717 } 718 } 719 720 if (s->bdev_length == 0) { 721 /* Report BLOCK_JOB_READY and wait for complete. */ 722 block_job_event_ready(&s->common); 723 s->synced = true; 724 while (!block_job_is_cancelled(&s->common) && !s->should_complete) { 725 block_job_yield(&s->common); 726 } 727 s->common.cancelled = false; 728 goto immediate_exit; 729 } 730 731 length = DIV_ROUND_UP(s->bdev_length, s->granularity); 732 s->in_flight_bitmap = bitmap_new(length); 733 734 /* If we have no backing file yet in the destination, we cannot let 735 * the destination do COW. Instead, we copy sectors around the 736 * dirty data if needed. We need a bitmap to do that. 737 */ 738 bdrv_get_backing_filename(target_bs, backing_filename, 739 sizeof(backing_filename)); 740 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { 741 target_cluster_size = bdi.cluster_size; 742 } 743 if (backing_filename[0] && !target_bs->backing 744 && s->granularity < target_cluster_size) { 745 s->buf_size = MAX(s->buf_size, target_cluster_size); 746 s->cow_bitmap = bitmap_new(length); 747 } 748 s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; 749 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); 750 751 s->buf = qemu_try_blockalign(bs, s->buf_size); 752 if (s->buf == NULL) { 753 ret = -ENOMEM; 754 goto immediate_exit; 755 } 756 757 mirror_free_init(s); 758 759 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 760 if (!s->is_none_mode) { 761 ret = mirror_dirty_init(s); 762 if (ret < 0 || block_job_is_cancelled(&s->common)) { 763 goto immediate_exit; 764 } 765 } 766 767 assert(!s->dbi); 768 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0); 769 for (;;) { 770 uint64_t delay_ns = 0; 771 int64_t cnt, delta; 772 bool should_complete; 773 774 if (s->ret < 0) { 775 ret = s->ret; 776 goto immediate_exit; 777 } 778 779 block_job_pause_point(&s->common); 780 781 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 782 /* s->common.offset contains the number of bytes already processed so 783 * far, cnt is the number of dirty sectors remaining and 784 * s->sectors_in_flight is the number of sectors currently being 785 * processed; together those are the current total operation length */ 786 s->common.len = s->common.offset + 787 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; 788 789 /* Note that even when no rate limit is applied we need to yield 790 * periodically with no pending I/O so that bdrv_drain_all() returns. 791 * We do so every SLICE_TIME nanoseconds, or when there is an error, 792 * or when the source is clean, whichever comes first. 793 */ 794 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; 795 if (delta < SLICE_TIME && 796 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { 797 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || 798 (cnt == 0 && s->in_flight > 0)) { 799 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); 800 mirror_wait_for_io(s); 801 continue; 802 } else if (cnt != 0) { 803 delay_ns = mirror_iteration(s); 804 } 805 } 806 807 should_complete = false; 808 if (s->in_flight == 0 && cnt == 0) { 809 trace_mirror_before_flush(s); 810 if (!s->synced) { 811 if (mirror_flush(s) < 0) { 812 /* Go check s->ret. */ 813 continue; 814 } 815 /* We're out of the streaming phase. From now on, if the job 816 * is cancelled we will actually complete all pending I/O and 817 * report completion. This way, block-job-cancel will leave 818 * the target in a consistent state. 819 */ 820 block_job_event_ready(&s->common); 821 s->synced = true; 822 } 823 824 should_complete = s->should_complete || 825 block_job_is_cancelled(&s->common); 826 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 827 } 828 829 if (cnt == 0 && should_complete) { 830 /* The dirty bitmap is not updated while operations are pending. 831 * If we're about to exit, wait for pending operations before 832 * calling bdrv_get_dirty_count(bs), or we may exit while the 833 * source has dirty data to copy! 834 * 835 * Note that I/O can be submitted by the guest while 836 * mirror_populate runs, so pause it now. Before deciding 837 * whether to switch to target check one last time if I/O has 838 * come in the meanwhile, and if not flush the data to disk. 839 */ 840 trace_mirror_before_drain(s, cnt); 841 842 bdrv_drained_begin(bs); 843 cnt = bdrv_get_dirty_count(s->dirty_bitmap); 844 if (cnt > 0 || mirror_flush(s) < 0) { 845 bdrv_drained_end(bs); 846 continue; 847 } 848 849 /* The two disks are in sync. Exit and report successful 850 * completion. 851 */ 852 assert(QLIST_EMPTY(&bs->tracked_requests)); 853 s->common.cancelled = false; 854 need_drain = false; 855 break; 856 } 857 858 ret = 0; 859 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); 860 if (!s->synced) { 861 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 862 if (block_job_is_cancelled(&s->common)) { 863 break; 864 } 865 } else if (!should_complete) { 866 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); 867 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); 868 } 869 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 870 } 871 872 immediate_exit: 873 if (s->in_flight > 0) { 874 /* We get here only if something went wrong. Either the job failed, 875 * or it was cancelled prematurely so that we do not guarantee that 876 * the target is a copy of the source. 877 */ 878 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); 879 assert(need_drain); 880 mirror_wait_for_all_io(s); 881 } 882 883 assert(s->in_flight == 0); 884 qemu_vfree(s->buf); 885 g_free(s->cow_bitmap); 886 g_free(s->in_flight_bitmap); 887 bdrv_dirty_iter_free(s->dbi); 888 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); 889 890 data = g_malloc(sizeof(*data)); 891 data->ret = ret; 892 893 if (need_drain) { 894 bdrv_drained_begin(bs); 895 } 896 block_job_defer_to_main_loop(&s->common, mirror_exit, data); 897 } 898 899 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) 900 { 901 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 902 903 if (speed < 0) { 904 error_setg(errp, QERR_INVALID_PARAMETER, "speed"); 905 return; 906 } 907 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); 908 } 909 910 static void mirror_complete(BlockJob *job, Error **errp) 911 { 912 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 913 BlockDriverState *target; 914 915 target = blk_bs(s->target); 916 917 if (!s->synced) { 918 error_setg(errp, "The active block job '%s' cannot be completed", 919 job->id); 920 return; 921 } 922 923 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { 924 int ret; 925 926 assert(!target->backing); 927 ret = bdrv_open_backing_file(target, NULL, "backing", errp); 928 if (ret < 0) { 929 return; 930 } 931 } 932 933 /* block all operations on to_replace bs */ 934 if (s->replaces) { 935 AioContext *replace_aio_context; 936 937 s->to_replace = bdrv_find_node(s->replaces); 938 if (!s->to_replace) { 939 error_setg(errp, "Node name '%s' not found", s->replaces); 940 return; 941 } 942 943 replace_aio_context = bdrv_get_aio_context(s->to_replace); 944 aio_context_acquire(replace_aio_context); 945 946 /* TODO Translate this into permission system. Current definition of 947 * GRAPH_MOD would require to request it for the parents; they might 948 * not even be BlockDriverStates, however, so a BdrvChild can't address 949 * them. May need redefinition of GRAPH_MOD. */ 950 error_setg(&s->replace_blocker, 951 "block device is in use by block-job-complete"); 952 bdrv_op_block_all(s->to_replace, s->replace_blocker); 953 bdrv_ref(s->to_replace); 954 955 aio_context_release(replace_aio_context); 956 } 957 958 s->should_complete = true; 959 block_job_enter(&s->common); 960 } 961 962 static void mirror_pause(BlockJob *job) 963 { 964 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 965 966 mirror_wait_for_all_io(s); 967 } 968 969 static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context) 970 { 971 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 972 973 blk_set_aio_context(s->target, new_context); 974 } 975 976 static void mirror_drain(BlockJob *job) 977 { 978 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); 979 980 /* Need to keep a reference in case blk_drain triggers execution 981 * of mirror_complete... 982 */ 983 if (s->target) { 984 BlockBackend *target = s->target; 985 blk_ref(target); 986 blk_drain(target); 987 blk_unref(target); 988 } 989 } 990 991 static const BlockJobDriver mirror_job_driver = { 992 .instance_size = sizeof(MirrorBlockJob), 993 .job_type = BLOCK_JOB_TYPE_MIRROR, 994 .set_speed = mirror_set_speed, 995 .start = mirror_run, 996 .complete = mirror_complete, 997 .pause = mirror_pause, 998 .attached_aio_context = mirror_attached_aio_context, 999 .drain = mirror_drain, 1000 }; 1001 1002 static const BlockJobDriver commit_active_job_driver = { 1003 .instance_size = sizeof(MirrorBlockJob), 1004 .job_type = BLOCK_JOB_TYPE_COMMIT, 1005 .set_speed = mirror_set_speed, 1006 .start = mirror_run, 1007 .complete = mirror_complete, 1008 .pause = mirror_pause, 1009 .attached_aio_context = mirror_attached_aio_context, 1010 .drain = mirror_drain, 1011 }; 1012 1013 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, 1014 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 1015 { 1016 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); 1017 } 1018 1019 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, 1020 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) 1021 { 1022 return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); 1023 } 1024 1025 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) 1026 { 1027 return bdrv_co_flush(bs->backing->bs); 1028 } 1029 1030 static int64_t coroutine_fn bdrv_mirror_top_get_block_status( 1031 BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, 1032 BlockDriverState **file) 1033 { 1034 *pnum = nb_sectors; 1035 *file = bs->backing->bs; 1036 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA | 1037 (sector_num << BDRV_SECTOR_BITS); 1038 } 1039 1040 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, 1041 int64_t offset, int count, BdrvRequestFlags flags) 1042 { 1043 return bdrv_co_pwrite_zeroes(bs->backing, offset, count, flags); 1044 } 1045 1046 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, 1047 int64_t offset, int count) 1048 { 1049 return bdrv_co_pdiscard(bs->backing->bs, offset, count); 1050 } 1051 1052 static void bdrv_mirror_top_close(BlockDriverState *bs) 1053 { 1054 } 1055 1056 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, 1057 const BdrvChildRole *role, 1058 uint64_t perm, uint64_t shared, 1059 uint64_t *nperm, uint64_t *nshared) 1060 { 1061 /* Must be able to forward guest writes to the real image */ 1062 *nperm = 0; 1063 if (perm & BLK_PERM_WRITE) { 1064 *nperm |= BLK_PERM_WRITE; 1065 } 1066 1067 *nshared = BLK_PERM_ALL; 1068 } 1069 1070 /* Dummy node that provides consistent read to its users without requiring it 1071 * from its backing file and that allows writes on the backing file chain. */ 1072 static BlockDriver bdrv_mirror_top = { 1073 .format_name = "mirror_top", 1074 .bdrv_co_preadv = bdrv_mirror_top_preadv, 1075 .bdrv_co_pwritev = bdrv_mirror_top_pwritev, 1076 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, 1077 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, 1078 .bdrv_co_flush = bdrv_mirror_top_flush, 1079 .bdrv_co_get_block_status = bdrv_mirror_top_get_block_status, 1080 .bdrv_close = bdrv_mirror_top_close, 1081 .bdrv_child_perm = bdrv_mirror_top_child_perm, 1082 }; 1083 1084 static void mirror_start_job(const char *job_id, BlockDriverState *bs, 1085 int creation_flags, BlockDriverState *target, 1086 const char *replaces, int64_t speed, 1087 uint32_t granularity, int64_t buf_size, 1088 BlockMirrorBackingMode backing_mode, 1089 BlockdevOnError on_source_error, 1090 BlockdevOnError on_target_error, 1091 bool unmap, 1092 BlockCompletionFunc *cb, 1093 void *opaque, Error **errp, 1094 const BlockJobDriver *driver, 1095 bool is_none_mode, BlockDriverState *base, 1096 bool auto_complete, const char *filter_node_name) 1097 { 1098 MirrorBlockJob *s; 1099 BlockDriverState *mirror_top_bs; 1100 bool target_graph_mod; 1101 bool target_is_backing; 1102 Error *local_err = NULL; 1103 int ret; 1104 1105 if (granularity == 0) { 1106 granularity = bdrv_get_default_bitmap_granularity(target); 1107 } 1108 1109 assert ((granularity & (granularity - 1)) == 0); 1110 1111 if (buf_size < 0) { 1112 error_setg(errp, "Invalid parameter 'buf-size'"); 1113 return; 1114 } 1115 1116 if (buf_size == 0) { 1117 buf_size = DEFAULT_MIRROR_BUF_SIZE; 1118 } 1119 1120 /* In the case of active commit, add dummy driver to provide consistent 1121 * reads on the top, while disabling it in the intermediate nodes, and make 1122 * the backing chain writable. */ 1123 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, 1124 BDRV_O_RDWR, errp); 1125 if (mirror_top_bs == NULL) { 1126 return; 1127 } 1128 mirror_top_bs->total_sectors = bs->total_sectors; 1129 1130 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep 1131 * it alive until block_job_create() even if bs has no parent. */ 1132 bdrv_ref(mirror_top_bs); 1133 bdrv_drained_begin(bs); 1134 bdrv_append(mirror_top_bs, bs, &local_err); 1135 bdrv_drained_end(bs); 1136 1137 if (local_err) { 1138 bdrv_unref(mirror_top_bs); 1139 error_propagate(errp, local_err); 1140 return; 1141 } 1142 1143 /* Make sure that the source is not resized while the job is running */ 1144 s = block_job_create(job_id, driver, mirror_top_bs, 1145 BLK_PERM_CONSISTENT_READ, 1146 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | 1147 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, 1148 creation_flags, cb, opaque, errp); 1149 bdrv_unref(mirror_top_bs); 1150 if (!s) { 1151 goto fail; 1152 } 1153 s->source = bs; 1154 s->mirror_top_bs = mirror_top_bs; 1155 1156 /* No resize for the target either; while the mirror is still running, a 1157 * consistent read isn't necessarily possible. We could possibly allow 1158 * writes and graph modifications, though it would likely defeat the 1159 * purpose of a mirror, so leave them blocked for now. 1160 * 1161 * In the case of active commit, things look a bit different, though, 1162 * because the target is an already populated backing file in active use. 1163 * We can allow anything except resize there.*/ 1164 target_is_backing = bdrv_chain_contains(bs, target); 1165 target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); 1166 s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | 1167 (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), 1168 BLK_PERM_WRITE_UNCHANGED | 1169 (target_is_backing ? BLK_PERM_CONSISTENT_READ | 1170 BLK_PERM_WRITE | 1171 BLK_PERM_GRAPH_MOD : 0)); 1172 ret = blk_insert_bs(s->target, target, errp); 1173 if (ret < 0) { 1174 goto fail; 1175 } 1176 1177 s->replaces = g_strdup(replaces); 1178 s->on_source_error = on_source_error; 1179 s->on_target_error = on_target_error; 1180 s->is_none_mode = is_none_mode; 1181 s->backing_mode = backing_mode; 1182 s->base = base; 1183 s->granularity = granularity; 1184 s->buf_size = ROUND_UP(buf_size, granularity); 1185 s->unmap = unmap; 1186 if (auto_complete) { 1187 s->should_complete = true; 1188 } 1189 1190 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); 1191 if (!s->dirty_bitmap) { 1192 g_free(s->replaces); 1193 blk_unref(s->target); 1194 block_job_unref(&s->common); 1195 return; 1196 } 1197 1198 /* Required permissions are already taken with blk_new() */ 1199 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, 1200 &error_abort); 1201 1202 /* In commit_active_start() all intermediate nodes disappear, so 1203 * any jobs in them must be blocked */ 1204 if (target_is_backing) { 1205 BlockDriverState *iter; 1206 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { 1207 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block 1208 * ourselves at s->base (if writes are blocked for a node, they are 1209 * also blocked for its backing file). The other options would be a 1210 * second filter driver above s->base (== target). */ 1211 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, 1212 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, 1213 errp); 1214 if (ret < 0) { 1215 goto fail; 1216 } 1217 } 1218 } 1219 1220 trace_mirror_start(bs, s, opaque); 1221 block_job_start(&s->common); 1222 return; 1223 1224 fail: 1225 if (s) { 1226 g_free(s->replaces); 1227 blk_unref(s->target); 1228 block_job_unref(&s->common); 1229 } 1230 1231 bdrv_replace_in_backing_chain(mirror_top_bs, backing_bs(mirror_top_bs)); 1232 } 1233 1234 void mirror_start(const char *job_id, BlockDriverState *bs, 1235 BlockDriverState *target, const char *replaces, 1236 int64_t speed, uint32_t granularity, int64_t buf_size, 1237 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, 1238 BlockdevOnError on_source_error, 1239 BlockdevOnError on_target_error, 1240 bool unmap, const char *filter_node_name, Error **errp) 1241 { 1242 bool is_none_mode; 1243 BlockDriverState *base; 1244 1245 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { 1246 error_setg(errp, "Sync mode 'incremental' not supported"); 1247 return; 1248 } 1249 is_none_mode = mode == MIRROR_SYNC_MODE_NONE; 1250 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; 1251 mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces, 1252 speed, granularity, buf_size, backing_mode, 1253 on_source_error, on_target_error, unmap, NULL, NULL, errp, 1254 &mirror_job_driver, is_none_mode, base, false, 1255 filter_node_name); 1256 } 1257 1258 void commit_active_start(const char *job_id, BlockDriverState *bs, 1259 BlockDriverState *base, int creation_flags, 1260 int64_t speed, BlockdevOnError on_error, 1261 const char *filter_node_name, 1262 BlockCompletionFunc *cb, void *opaque, Error **errp, 1263 bool auto_complete) 1264 { 1265 int orig_base_flags; 1266 Error *local_err = NULL; 1267 1268 orig_base_flags = bdrv_get_flags(base); 1269 1270 if (bdrv_reopen(base, bs->open_flags, errp)) { 1271 return; 1272 } 1273 1274 mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0, 1275 MIRROR_LEAVE_BACKING_CHAIN, 1276 on_error, on_error, true, cb, opaque, &local_err, 1277 &commit_active_job_driver, false, base, auto_complete, 1278 filter_node_name); 1279 if (local_err) { 1280 error_propagate(errp, local_err); 1281 goto error_restore_flags; 1282 } 1283 1284 return; 1285 1286 error_restore_flags: 1287 /* ignore error and errp for bdrv_reopen, because we want to propagate 1288 * the original error */ 1289 bdrv_reopen(base, orig_base_flags, NULL); 1290 return; 1291 } 1292