1 /* 2 * QEMU live block migration 3 * 4 * Copyright IBM, Corp. 2009 5 * 6 * Authors: 7 * Liran Schour <lirans@il.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qapi/error.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "qemu/cutils.h" 21 #include "qemu/queue.h" 22 #include "block.h" 23 #include "block/dirty-bitmap.h" 24 #include "migration/misc.h" 25 #include "migration.h" 26 #include "migration-stats.h" 27 #include "migration/register.h" 28 #include "qemu-file.h" 29 #include "migration/vmstate.h" 30 #include "sysemu/block-backend.h" 31 #include "trace.h" 32 #include "options.h" 33 34 #define BLK_MIG_BLOCK_SIZE (1ULL << 20) 35 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS) 36 37 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 38 #define BLK_MIG_FLAG_EOS 0x02 39 #define BLK_MIG_FLAG_PROGRESS 0x04 40 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08 41 42 #define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE) 43 44 #define MAX_IO_BUFFERS 512 45 #define MAX_PARALLEL_IO 16 46 47 typedef struct BlkMigDevState { 48 /* Written during setup phase. Can be read without a lock. */ 49 BlockBackend *blk; 50 char *blk_name; 51 int shared_base; 52 int64_t total_sectors; 53 QSIMPLEQ_ENTRY(BlkMigDevState) entry; 54 Error *blocker; 55 56 /* Only used by migration thread. Does not need a lock. */ 57 int bulk_completed; 58 int64_t cur_sector; 59 int64_t cur_dirty; 60 61 /* Data in the aio_bitmap is protected by block migration lock. 62 * Allocation and free happen during setup and cleanup respectively. 63 */ 64 unsigned long *aio_bitmap; 65 66 /* Protected by block migration lock. */ 67 int64_t completed_sectors; 68 69 /* During migration this is protected by bdrv_dirty_bitmap_lock(). 70 * Allocation and free happen during setup and cleanup respectively. 71 */ 72 BdrvDirtyBitmap *dirty_bitmap; 73 } BlkMigDevState; 74 75 typedef struct BlkMigBlock { 76 /* Only used by migration thread. */ 77 uint8_t *buf; 78 BlkMigDevState *bmds; 79 int64_t sector; 80 int nr_sectors; 81 QEMUIOVector qiov; 82 BlockAIOCB *aiocb; 83 84 /* Protected by block migration lock. */ 85 int ret; 86 QSIMPLEQ_ENTRY(BlkMigBlock) entry; 87 } BlkMigBlock; 88 89 typedef struct BlkMigState { 90 QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list; 91 int64_t total_sector_sum; 92 bool zero_blocks; 93 94 /* Protected by lock. */ 95 QSIMPLEQ_HEAD(, BlkMigBlock) blk_list; 96 int submitted; 97 int read_done; 98 99 /* Only used by migration thread. Does not need a lock. */ 100 int transferred; 101 int prev_progress; 102 int bulk_completed; 103 104 /* Lock must be taken _inside_ the iothread lock. */ 105 QemuMutex lock; 106 } BlkMigState; 107 108 static BlkMigState block_mig_state; 109 110 static void blk_mig_lock(void) 111 { 112 qemu_mutex_lock(&block_mig_state.lock); 113 } 114 115 static void blk_mig_unlock(void) 116 { 117 qemu_mutex_unlock(&block_mig_state.lock); 118 } 119 120 /* Must run outside of the iothread lock during the bulk phase, 121 * or the VM will stall. 122 */ 123 124 static void blk_send(QEMUFile *f, BlkMigBlock * blk) 125 { 126 int len; 127 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; 128 129 if (block_mig_state.zero_blocks && 130 buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) { 131 flags |= BLK_MIG_FLAG_ZERO_BLOCK; 132 } 133 134 /* sector number and flags */ 135 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS) 136 | flags); 137 138 /* device name */ 139 len = strlen(blk->bmds->blk_name); 140 qemu_put_byte(f, len); 141 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len); 142 143 /* if a block is zero we need to flush here since the network 144 * bandwidth is now a lot higher than the storage device bandwidth. 145 * thus if we queue zero blocks we slow down the migration */ 146 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { 147 qemu_fflush(f); 148 return; 149 } 150 151 qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE); 152 } 153 154 int blk_mig_active(void) 155 { 156 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list); 157 } 158 159 int blk_mig_bulk_active(void) 160 { 161 return blk_mig_active() && !block_mig_state.bulk_completed; 162 } 163 164 uint64_t blk_mig_bytes_transferred(void) 165 { 166 BlkMigDevState *bmds; 167 uint64_t sum = 0; 168 169 blk_mig_lock(); 170 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 171 sum += bmds->completed_sectors; 172 } 173 blk_mig_unlock(); 174 return sum << BDRV_SECTOR_BITS; 175 } 176 177 uint64_t blk_mig_bytes_remaining(void) 178 { 179 return blk_mig_bytes_total() - blk_mig_bytes_transferred(); 180 } 181 182 uint64_t blk_mig_bytes_total(void) 183 { 184 BlkMigDevState *bmds; 185 uint64_t sum = 0; 186 187 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 188 sum += bmds->total_sectors; 189 } 190 return sum << BDRV_SECTOR_BITS; 191 } 192 193 194 /* Called with migration lock held. */ 195 196 static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) 197 { 198 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; 199 200 if (sector < bmds->total_sectors) { 201 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & 202 (1UL << (chunk % (sizeof(unsigned long) * 8)))); 203 } else { 204 return 0; 205 } 206 } 207 208 /* Called with migration lock held. */ 209 210 static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, 211 int nb_sectors, int set) 212 { 213 int64_t start, end; 214 unsigned long val, idx, bit; 215 216 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; 217 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; 218 219 for (; start <= end; start++) { 220 idx = start / (sizeof(unsigned long) * 8); 221 bit = start % (sizeof(unsigned long) * 8); 222 val = bmds->aio_bitmap[idx]; 223 if (set) { 224 val |= 1UL << bit; 225 } else { 226 val &= ~(1UL << bit); 227 } 228 bmds->aio_bitmap[idx] = val; 229 } 230 } 231 232 static void alloc_aio_bitmap(BlkMigDevState *bmds) 233 { 234 int64_t bitmap_size; 235 236 bitmap_size = bmds->total_sectors + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; 237 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; 238 239 bmds->aio_bitmap = g_malloc0(bitmap_size); 240 } 241 242 /* Never hold migration lock when yielding to the main loop! */ 243 244 static void blk_mig_read_cb(void *opaque, int ret) 245 { 246 BlkMigBlock *blk = opaque; 247 248 blk_mig_lock(); 249 blk->ret = ret; 250 251 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); 252 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0); 253 254 block_mig_state.submitted--; 255 block_mig_state.read_done++; 256 assert(block_mig_state.submitted >= 0); 257 blk_mig_unlock(); 258 } 259 260 /* Called with no lock taken. */ 261 262 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) 263 { 264 int64_t total_sectors = bmds->total_sectors; 265 int64_t cur_sector = bmds->cur_sector; 266 BlockBackend *bb = bmds->blk; 267 BlkMigBlock *blk; 268 int nr_sectors; 269 int64_t count; 270 271 if (bmds->shared_base) { 272 qemu_mutex_lock_iothread(); 273 /* Skip unallocated sectors; intentionally treats failure or 274 * partial sector as an allocated sector */ 275 while (cur_sector < total_sectors && 276 !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE, 277 MAX_IS_ALLOCATED_SEARCH, &count)) { 278 if (count < BDRV_SECTOR_SIZE) { 279 break; 280 } 281 cur_sector += count >> BDRV_SECTOR_BITS; 282 } 283 qemu_mutex_unlock_iothread(); 284 } 285 286 if (cur_sector >= total_sectors) { 287 bmds->cur_sector = bmds->completed_sectors = total_sectors; 288 return 1; 289 } 290 291 bmds->completed_sectors = cur_sector; 292 293 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1); 294 295 /* we are going to transfer a full block even if it is not allocated */ 296 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 297 298 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { 299 nr_sectors = total_sectors - cur_sector; 300 } 301 302 blk = g_new(BlkMigBlock, 1); 303 blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); 304 blk->bmds = bmds; 305 blk->sector = cur_sector; 306 blk->nr_sectors = nr_sectors; 307 308 qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE); 309 310 blk_mig_lock(); 311 block_mig_state.submitted++; 312 blk_mig_unlock(); 313 314 /* 315 * The migration thread does not have an AioContext. Lock the BQL so that 316 * I/O runs in the main loop AioContext (see 317 * qemu_get_current_aio_context()). 318 */ 319 qemu_mutex_lock_iothread(); 320 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE, 321 nr_sectors * BDRV_SECTOR_SIZE); 322 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov, 323 0, blk_mig_read_cb, blk); 324 qemu_mutex_unlock_iothread(); 325 326 bmds->cur_sector = cur_sector + nr_sectors; 327 return (bmds->cur_sector >= total_sectors); 328 } 329 330 /* Called with iothread lock taken. */ 331 332 static int set_dirty_tracking(void) 333 { 334 BlkMigDevState *bmds; 335 int ret; 336 337 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 338 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk), 339 BLK_MIG_BLOCK_SIZE, 340 NULL, NULL); 341 if (!bmds->dirty_bitmap) { 342 ret = -errno; 343 goto fail; 344 } 345 } 346 return 0; 347 348 fail: 349 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 350 if (bmds->dirty_bitmap) { 351 bdrv_release_dirty_bitmap(bmds->dirty_bitmap); 352 } 353 } 354 return ret; 355 } 356 357 /* Called with iothread lock taken. */ 358 359 static void unset_dirty_tracking(void) 360 { 361 BlkMigDevState *bmds; 362 363 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 364 if (bmds->dirty_bitmap) { 365 bdrv_release_dirty_bitmap(bmds->dirty_bitmap); 366 } 367 } 368 } 369 370 static int init_blk_migration(QEMUFile *f) 371 { 372 BlockDriverState *bs; 373 BlkMigDevState *bmds; 374 int64_t sectors; 375 BdrvNextIterator it; 376 int i, num_bs = 0; 377 struct { 378 BlkMigDevState *bmds; 379 BlockDriverState *bs; 380 } *bmds_bs; 381 Error *local_err = NULL; 382 int ret; 383 384 GRAPH_RDLOCK_GUARD_MAINLOOP(); 385 386 block_mig_state.submitted = 0; 387 block_mig_state.read_done = 0; 388 block_mig_state.transferred = 0; 389 block_mig_state.total_sector_sum = 0; 390 block_mig_state.prev_progress = -1; 391 block_mig_state.bulk_completed = 0; 392 block_mig_state.zero_blocks = migrate_zero_blocks(); 393 394 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 395 num_bs++; 396 } 397 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs)); 398 399 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) { 400 if (bdrv_is_read_only(bs)) { 401 continue; 402 } 403 404 sectors = bdrv_nb_sectors(bs); 405 if (sectors <= 0) { 406 ret = sectors; 407 bdrv_next_cleanup(&it); 408 goto out; 409 } 410 411 bmds = g_new0(BlkMigDevState, 1); 412 bmds->blk = blk_new(qemu_get_aio_context(), 413 BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); 414 bmds->blk_name = g_strdup(bdrv_get_device_name(bs)); 415 bmds->bulk_completed = 0; 416 bmds->total_sectors = sectors; 417 bmds->completed_sectors = 0; 418 bmds->shared_base = migrate_block_incremental(); 419 420 assert(i < num_bs); 421 bmds_bs[i].bmds = bmds; 422 bmds_bs[i].bs = bs; 423 424 block_mig_state.total_sector_sum += sectors; 425 426 if (bmds->shared_base) { 427 trace_migration_block_init_shared(bdrv_get_device_name(bs)); 428 } else { 429 trace_migration_block_init_full(bdrv_get_device_name(bs)); 430 } 431 432 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); 433 } 434 435 /* Can only insert new BDSes now because doing so while iterating block 436 * devices may end up in a deadlock (iterating the new BDSes, too). */ 437 for (i = 0; i < num_bs; i++) { 438 bmds = bmds_bs[i].bmds; 439 bs = bmds_bs[i].bs; 440 441 if (bmds) { 442 ret = blk_insert_bs(bmds->blk, bs, &local_err); 443 if (ret < 0) { 444 error_report_err(local_err); 445 goto out; 446 } 447 448 alloc_aio_bitmap(bmds); 449 error_setg(&bmds->blocker, "block device is in use by migration"); 450 bdrv_op_block_all(bs, bmds->blocker); 451 } 452 } 453 454 ret = 0; 455 out: 456 g_free(bmds_bs); 457 return ret; 458 } 459 460 /* Called with no lock taken. */ 461 462 static int blk_mig_save_bulked_block(QEMUFile *f) 463 { 464 int64_t completed_sector_sum = 0; 465 BlkMigDevState *bmds; 466 int progress; 467 int ret = 0; 468 469 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 470 if (bmds->bulk_completed == 0) { 471 if (mig_save_device_bulk(f, bmds) == 1) { 472 /* completed bulk section for this device */ 473 bmds->bulk_completed = 1; 474 } 475 completed_sector_sum += bmds->completed_sectors; 476 ret = 1; 477 break; 478 } else { 479 completed_sector_sum += bmds->completed_sectors; 480 } 481 } 482 483 if (block_mig_state.total_sector_sum != 0) { 484 progress = completed_sector_sum * 100 / 485 block_mig_state.total_sector_sum; 486 } else { 487 progress = 100; 488 } 489 if (progress != block_mig_state.prev_progress) { 490 block_mig_state.prev_progress = progress; 491 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS) 492 | BLK_MIG_FLAG_PROGRESS); 493 trace_migration_block_progression(progress); 494 } 495 496 return ret; 497 } 498 499 static void blk_mig_reset_dirty_cursor(void) 500 { 501 BlkMigDevState *bmds; 502 503 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 504 bmds->cur_dirty = 0; 505 } 506 } 507 508 /* Called with iothread lock taken. */ 509 510 static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, 511 int is_async) 512 { 513 BlkMigBlock *blk; 514 int64_t total_sectors = bmds->total_sectors; 515 int64_t sector; 516 int nr_sectors; 517 int ret = -EIO; 518 519 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { 520 blk_mig_lock(); 521 if (bmds_aio_inflight(bmds, sector)) { 522 blk_mig_unlock(); 523 blk_drain(bmds->blk); 524 } else { 525 blk_mig_unlock(); 526 } 527 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap); 528 if (bdrv_dirty_bitmap_get_locked(bmds->dirty_bitmap, 529 sector * BDRV_SECTOR_SIZE)) { 530 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { 531 nr_sectors = total_sectors - sector; 532 } else { 533 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 534 } 535 bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap, 536 sector * BDRV_SECTOR_SIZE, 537 nr_sectors * BDRV_SECTOR_SIZE); 538 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); 539 540 blk = g_new(BlkMigBlock, 1); 541 blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); 542 blk->bmds = bmds; 543 blk->sector = sector; 544 blk->nr_sectors = nr_sectors; 545 546 if (is_async) { 547 qemu_iovec_init_buf(&blk->qiov, blk->buf, 548 nr_sectors * BDRV_SECTOR_SIZE); 549 550 blk->aiocb = blk_aio_preadv(bmds->blk, 551 sector * BDRV_SECTOR_SIZE, 552 &blk->qiov, 0, blk_mig_read_cb, 553 blk); 554 555 blk_mig_lock(); 556 block_mig_state.submitted++; 557 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1); 558 blk_mig_unlock(); 559 } else { 560 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, 561 nr_sectors * BDRV_SECTOR_SIZE, blk->buf, 0); 562 if (ret < 0) { 563 goto error; 564 } 565 blk_send(f, blk); 566 567 g_free(blk->buf); 568 g_free(blk); 569 } 570 571 sector += nr_sectors; 572 bmds->cur_dirty = sector; 573 break; 574 } 575 576 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); 577 sector += BDRV_SECTORS_PER_DIRTY_CHUNK; 578 bmds->cur_dirty = sector; 579 } 580 581 return (bmds->cur_dirty >= bmds->total_sectors); 582 583 error: 584 trace_migration_block_save_device_dirty(sector); 585 g_free(blk->buf); 586 g_free(blk); 587 return ret; 588 } 589 590 /* Called with iothread lock taken. 591 * 592 * return value: 593 * 0: too much data for max_downtime 594 * 1: few enough data for max_downtime 595 */ 596 static int blk_mig_save_dirty_block(QEMUFile *f, int is_async) 597 { 598 BlkMigDevState *bmds; 599 int ret = 1; 600 601 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 602 ret = mig_save_device_dirty(f, bmds, is_async); 603 if (ret <= 0) { 604 break; 605 } 606 } 607 608 return ret; 609 } 610 611 /* Called with no locks taken. */ 612 613 static int flush_blks(QEMUFile *f) 614 { 615 BlkMigBlock *blk; 616 int ret = 0; 617 618 trace_migration_block_flush_blks("Enter", block_mig_state.submitted, 619 block_mig_state.read_done, 620 block_mig_state.transferred); 621 622 blk_mig_lock(); 623 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { 624 if (migration_rate_exceeded(f)) { 625 break; 626 } 627 if (blk->ret < 0) { 628 ret = blk->ret; 629 break; 630 } 631 632 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); 633 blk_mig_unlock(); 634 blk_send(f, blk); 635 blk_mig_lock(); 636 637 g_free(blk->buf); 638 g_free(blk); 639 640 block_mig_state.read_done--; 641 block_mig_state.transferred++; 642 assert(block_mig_state.read_done >= 0); 643 } 644 blk_mig_unlock(); 645 646 trace_migration_block_flush_blks("Exit", block_mig_state.submitted, 647 block_mig_state.read_done, 648 block_mig_state.transferred); 649 return ret; 650 } 651 652 /* Called with iothread lock taken. */ 653 654 static int64_t get_remaining_dirty(void) 655 { 656 BlkMigDevState *bmds; 657 int64_t dirty = 0; 658 659 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 660 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap); 661 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); 662 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); 663 } 664 665 return dirty; 666 } 667 668 669 670 /* Called with iothread lock taken. */ 671 static void block_migration_cleanup_bmds(void) 672 { 673 BlkMigDevState *bmds; 674 BlockDriverState *bs; 675 676 unset_dirty_tracking(); 677 678 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { 679 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); 680 681 bs = blk_bs(bmds->blk); 682 if (bs) { 683 bdrv_op_unblock_all(bs, bmds->blocker); 684 } 685 error_free(bmds->blocker); 686 blk_unref(bmds->blk); 687 g_free(bmds->blk_name); 688 g_free(bmds->aio_bitmap); 689 g_free(bmds); 690 } 691 } 692 693 /* Called with iothread lock taken. */ 694 static void block_migration_cleanup(void *opaque) 695 { 696 BlkMigBlock *blk; 697 698 bdrv_drain_all(); 699 700 block_migration_cleanup_bmds(); 701 702 blk_mig_lock(); 703 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { 704 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); 705 g_free(blk->buf); 706 g_free(blk); 707 } 708 blk_mig_unlock(); 709 } 710 711 static int block_save_setup(QEMUFile *f, void *opaque) 712 { 713 int ret; 714 715 trace_migration_block_save("setup", block_mig_state.submitted, 716 block_mig_state.transferred); 717 718 warn_report("block migration is deprecated;" 719 " use blockdev-mirror with NBD instead"); 720 721 ret = init_blk_migration(f); 722 if (ret < 0) { 723 return ret; 724 } 725 726 /* start track dirty blocks */ 727 ret = set_dirty_tracking(); 728 if (ret) { 729 return ret; 730 } 731 732 ret = flush_blks(f); 733 blk_mig_reset_dirty_cursor(); 734 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 735 736 return ret; 737 } 738 739 static int block_save_iterate(QEMUFile *f, void *opaque) 740 { 741 int ret; 742 uint64_t last_bytes = qemu_file_transferred(f); 743 744 trace_migration_block_save("iterate", block_mig_state.submitted, 745 block_mig_state.transferred); 746 747 ret = flush_blks(f); 748 if (ret) { 749 return ret; 750 } 751 752 blk_mig_reset_dirty_cursor(); 753 754 /* control the rate of transfer */ 755 blk_mig_lock(); 756 while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE < 757 migration_rate_get() && 758 block_mig_state.submitted < MAX_PARALLEL_IO && 759 (block_mig_state.submitted + block_mig_state.read_done) < 760 MAX_IO_BUFFERS) { 761 blk_mig_unlock(); 762 if (block_mig_state.bulk_completed == 0) { 763 /* first finish the bulk phase */ 764 if (blk_mig_save_bulked_block(f) == 0) { 765 /* finished saving bulk on all devices */ 766 block_mig_state.bulk_completed = 1; 767 } 768 ret = 0; 769 } else { 770 /* Always called with iothread lock taken for 771 * simplicity, block_save_complete also calls it. 772 */ 773 qemu_mutex_lock_iothread(); 774 ret = blk_mig_save_dirty_block(f, 1); 775 qemu_mutex_unlock_iothread(); 776 } 777 if (ret < 0) { 778 return ret; 779 } 780 blk_mig_lock(); 781 if (ret != 0) { 782 /* no more dirty blocks */ 783 break; 784 } 785 } 786 blk_mig_unlock(); 787 788 ret = flush_blks(f); 789 if (ret) { 790 return ret; 791 } 792 793 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 794 uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes; 795 return (delta_bytes > 0); 796 } 797 798 /* Called with iothread lock taken. */ 799 800 static int block_save_complete(QEMUFile *f, void *opaque) 801 { 802 int ret; 803 804 trace_migration_block_save("complete", block_mig_state.submitted, 805 block_mig_state.transferred); 806 807 ret = flush_blks(f); 808 if (ret) { 809 return ret; 810 } 811 812 blk_mig_reset_dirty_cursor(); 813 814 /* we know for sure that save bulk is completed and 815 all async read completed */ 816 blk_mig_lock(); 817 assert(block_mig_state.submitted == 0); 818 blk_mig_unlock(); 819 820 do { 821 ret = blk_mig_save_dirty_block(f, 0); 822 if (ret < 0) { 823 return ret; 824 } 825 } while (ret == 0); 826 827 /* report completion */ 828 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); 829 830 trace_migration_block_save_complete(); 831 832 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 833 834 /* Make sure that our BlockBackends are gone, so that the block driver 835 * nodes can be inactivated. */ 836 block_migration_cleanup_bmds(); 837 838 return 0; 839 } 840 841 static void block_state_pending(void *opaque, uint64_t *must_precopy, 842 uint64_t *can_postcopy) 843 { 844 /* Estimate pending number of bytes to send */ 845 uint64_t pending; 846 847 qemu_mutex_lock_iothread(); 848 pending = get_remaining_dirty(); 849 qemu_mutex_unlock_iothread(); 850 851 blk_mig_lock(); 852 pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + 853 block_mig_state.read_done * BLK_MIG_BLOCK_SIZE; 854 blk_mig_unlock(); 855 856 /* Report at least one block pending during bulk phase */ 857 if (!pending && !block_mig_state.bulk_completed) { 858 pending = BLK_MIG_BLOCK_SIZE; 859 } 860 861 trace_migration_block_state_pending(pending); 862 /* We don't do postcopy */ 863 *must_precopy += pending; 864 } 865 866 static int block_load(QEMUFile *f, void *opaque, int version_id) 867 { 868 static int banner_printed; 869 int len, flags; 870 char device_name[256]; 871 int64_t addr; 872 BlockBackend *blk, *blk_prev = NULL; 873 Error *local_err = NULL; 874 uint8_t *buf; 875 int64_t total_sectors = 0; 876 int nr_sectors; 877 int ret; 878 BlockDriverInfo bdi; 879 int cluster_size = BLK_MIG_BLOCK_SIZE; 880 881 do { 882 addr = qemu_get_be64(f); 883 884 flags = addr & (BDRV_SECTOR_SIZE - 1); 885 addr >>= BDRV_SECTOR_BITS; 886 887 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) { 888 /* get device name */ 889 len = qemu_get_byte(f); 890 qemu_get_buffer(f, (uint8_t *)device_name, len); 891 device_name[len] = '\0'; 892 893 blk = blk_by_name(device_name); 894 if (!blk) { 895 fprintf(stderr, "Error unknown block device %s\n", 896 device_name); 897 return -EINVAL; 898 } 899 900 if (blk != blk_prev) { 901 blk_prev = blk; 902 total_sectors = blk_nb_sectors(blk); 903 if (total_sectors <= 0) { 904 error_report("Error getting length of block device %s", 905 device_name); 906 return -EINVAL; 907 } 908 909 blk_activate(blk, &local_err); 910 if (local_err) { 911 error_report_err(local_err); 912 return -EINVAL; 913 } 914 915 ret = bdrv_get_info(blk_bs(blk), &bdi); 916 if (ret == 0 && bdi.cluster_size > 0 && 917 bdi.cluster_size <= BLK_MIG_BLOCK_SIZE && 918 BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) { 919 cluster_size = bdi.cluster_size; 920 } else { 921 cluster_size = BLK_MIG_BLOCK_SIZE; 922 } 923 } 924 925 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) { 926 nr_sectors = total_sectors - addr; 927 } else { 928 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 929 } 930 931 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { 932 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE, 933 nr_sectors * BDRV_SECTOR_SIZE, 934 BDRV_REQ_MAY_UNMAP); 935 } else { 936 int i; 937 int64_t cur_addr; 938 uint8_t *cur_buf; 939 940 buf = g_malloc(BLK_MIG_BLOCK_SIZE); 941 qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE); 942 for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) { 943 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size; 944 cur_buf = buf + i * cluster_size; 945 946 if ((!block_mig_state.zero_blocks || 947 cluster_size < BLK_MIG_BLOCK_SIZE) && 948 buffer_is_zero(cur_buf, cluster_size)) { 949 ret = blk_pwrite_zeroes(blk, cur_addr, 950 cluster_size, 951 BDRV_REQ_MAY_UNMAP); 952 } else { 953 ret = blk_pwrite(blk, cur_addr, cluster_size, cur_buf, 954 0); 955 } 956 if (ret < 0) { 957 break; 958 } 959 } 960 g_free(buf); 961 } 962 963 if (ret < 0) { 964 return ret; 965 } 966 } else if (flags & BLK_MIG_FLAG_PROGRESS) { 967 if (!banner_printed) { 968 printf("Receiving block device images\n"); 969 banner_printed = 1; 970 } 971 printf("Completed %d %%%c", (int)addr, 972 (addr == 100) ? '\n' : '\r'); 973 fflush(stdout); 974 } else if (!(flags & BLK_MIG_FLAG_EOS)) { 975 fprintf(stderr, "Unknown block migration flags: 0x%x\n", flags); 976 return -EINVAL; 977 } 978 ret = qemu_file_get_error(f); 979 if (ret != 0) { 980 return ret; 981 } 982 } while (!(flags & BLK_MIG_FLAG_EOS)); 983 984 return 0; 985 } 986 987 static bool block_is_active(void *opaque) 988 { 989 return migrate_block(); 990 } 991 992 static SaveVMHandlers savevm_block_handlers = { 993 .save_setup = block_save_setup, 994 .save_live_iterate = block_save_iterate, 995 .save_live_complete_precopy = block_save_complete, 996 .state_pending_exact = block_state_pending, 997 .state_pending_estimate = block_state_pending, 998 .load_state = block_load, 999 .save_cleanup = block_migration_cleanup, 1000 .is_active = block_is_active, 1001 }; 1002 1003 void blk_mig_init(void) 1004 { 1005 QSIMPLEQ_INIT(&block_mig_state.bmds_list); 1006 QSIMPLEQ_INIT(&block_mig_state.blk_list); 1007 qemu_mutex_init(&block_mig_state.lock); 1008 1009 register_savevm_live("block", 0, 1, &savevm_block_handlers, 1010 &block_mig_state); 1011 } 1012