1 /* 2 * QEMU live block migration 3 * 4 * Copyright IBM, Corp. 2009 5 * 6 * Authors: 7 * Liran Schour <lirans@il.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu-common.h" 17 #include "block/block.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "hw/hw.h" 21 #include "qemu/queue.h" 22 #include "qemu/timer.h" 23 #include "migration/block.h" 24 #include "migration/migration.h" 25 #include "sysemu/blockdev.h" 26 #include "sysemu/block-backend.h" 27 #include <assert.h> 28 29 #define BLOCK_SIZE (1 << 20) 30 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS) 31 32 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 33 #define BLK_MIG_FLAG_EOS 0x02 34 #define BLK_MIG_FLAG_PROGRESS 0x04 35 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08 36 37 #define MAX_IS_ALLOCATED_SEARCH 65536 38 39 //#define DEBUG_BLK_MIGRATION 40 41 #ifdef DEBUG_BLK_MIGRATION 42 #define DPRINTF(fmt, ...) \ 43 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0) 44 #else 45 #define DPRINTF(fmt, ...) \ 46 do { } while (0) 47 #endif 48 49 typedef struct BlkMigDevState { 50 /* Written during setup phase. Can be read without a lock. */ 51 BlockDriverState *bs; 52 int shared_base; 53 int64_t total_sectors; 54 QSIMPLEQ_ENTRY(BlkMigDevState) entry; 55 56 /* Only used by migration thread. Does not need a lock. */ 57 int bulk_completed; 58 int64_t cur_sector; 59 int64_t cur_dirty; 60 61 /* Protected by block migration lock. */ 62 unsigned long *aio_bitmap; 63 int64_t completed_sectors; 64 BdrvDirtyBitmap *dirty_bitmap; 65 Error *blocker; 66 } BlkMigDevState; 67 68 typedef struct BlkMigBlock { 69 /* Only used by migration thread. */ 70 uint8_t *buf; 71 BlkMigDevState *bmds; 72 int64_t sector; 73 int nr_sectors; 74 struct iovec iov; 75 QEMUIOVector qiov; 76 BlockAIOCB *aiocb; 77 78 /* Protected by block migration lock. */ 79 int ret; 80 QSIMPLEQ_ENTRY(BlkMigBlock) entry; 81 } BlkMigBlock; 82 83 typedef struct BlkMigState { 84 /* Written during setup phase. Can be read without a lock. */ 85 int blk_enable; 86 int shared_base; 87 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list; 88 int64_t total_sector_sum; 89 bool zero_blocks; 90 91 /* Protected by lock. */ 92 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list; 93 int submitted; 94 int read_done; 95 96 /* Only used by migration thread. Does not need a lock. */ 97 int transferred; 98 int prev_progress; 99 int bulk_completed; 100 101 /* Lock must be taken _inside_ the iothread lock. */ 102 QemuMutex lock; 103 } BlkMigState; 104 105 static BlkMigState block_mig_state; 106 107 static void blk_mig_lock(void) 108 { 109 qemu_mutex_lock(&block_mig_state.lock); 110 } 111 112 static void blk_mig_unlock(void) 113 { 114 qemu_mutex_unlock(&block_mig_state.lock); 115 } 116 117 /* Must run outside of the iothread lock during the bulk phase, 118 * or the VM will stall. 119 */ 120 121 static void blk_send(QEMUFile *f, BlkMigBlock * blk) 122 { 123 int len; 124 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; 125 126 if (block_mig_state.zero_blocks && 127 buffer_is_zero(blk->buf, BLOCK_SIZE)) { 128 flags |= BLK_MIG_FLAG_ZERO_BLOCK; 129 } 130 131 /* sector number and flags */ 132 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS) 133 | flags); 134 135 /* device name */ 136 len = strlen(bdrv_get_device_name(blk->bmds->bs)); 137 qemu_put_byte(f, len); 138 qemu_put_buffer(f, (uint8_t *)bdrv_get_device_name(blk->bmds->bs), len); 139 140 /* if a block is zero we need to flush here since the network 141 * bandwidth is now a lot higher than the storage device bandwidth. 142 * thus if we queue zero blocks we slow down the migration */ 143 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { 144 qemu_fflush(f); 145 return; 146 } 147 148 qemu_put_buffer(f, blk->buf, BLOCK_SIZE); 149 } 150 151 int blk_mig_active(void) 152 { 153 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list); 154 } 155 156 uint64_t blk_mig_bytes_transferred(void) 157 { 158 BlkMigDevState *bmds; 159 uint64_t sum = 0; 160 161 blk_mig_lock(); 162 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 163 sum += bmds->completed_sectors; 164 } 165 blk_mig_unlock(); 166 return sum << BDRV_SECTOR_BITS; 167 } 168 169 uint64_t blk_mig_bytes_remaining(void) 170 { 171 return blk_mig_bytes_total() - blk_mig_bytes_transferred(); 172 } 173 174 uint64_t blk_mig_bytes_total(void) 175 { 176 BlkMigDevState *bmds; 177 uint64_t sum = 0; 178 179 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 180 sum += bmds->total_sectors; 181 } 182 return sum << BDRV_SECTOR_BITS; 183 } 184 185 186 /* Called with migration lock held. */ 187 188 static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) 189 { 190 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; 191 192 if (sector < bdrv_nb_sectors(bmds->bs)) { 193 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & 194 (1UL << (chunk % (sizeof(unsigned long) * 8)))); 195 } else { 196 return 0; 197 } 198 } 199 200 /* Called with migration lock held. */ 201 202 static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, 203 int nb_sectors, int set) 204 { 205 int64_t start, end; 206 unsigned long val, idx, bit; 207 208 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; 209 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; 210 211 for (; start <= end; start++) { 212 idx = start / (sizeof(unsigned long) * 8); 213 bit = start % (sizeof(unsigned long) * 8); 214 val = bmds->aio_bitmap[idx]; 215 if (set) { 216 val |= 1UL << bit; 217 } else { 218 val &= ~(1UL << bit); 219 } 220 bmds->aio_bitmap[idx] = val; 221 } 222 } 223 224 static void alloc_aio_bitmap(BlkMigDevState *bmds) 225 { 226 BlockDriverState *bs = bmds->bs; 227 int64_t bitmap_size; 228 229 bitmap_size = bdrv_nb_sectors(bs) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; 230 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; 231 232 bmds->aio_bitmap = g_malloc0(bitmap_size); 233 } 234 235 /* Never hold migration lock when yielding to the main loop! */ 236 237 static void blk_mig_read_cb(void *opaque, int ret) 238 { 239 BlkMigBlock *blk = opaque; 240 241 blk_mig_lock(); 242 blk->ret = ret; 243 244 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); 245 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0); 246 247 block_mig_state.submitted--; 248 block_mig_state.read_done++; 249 assert(block_mig_state.submitted >= 0); 250 blk_mig_unlock(); 251 } 252 253 /* Called with no lock taken. */ 254 255 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) 256 { 257 int64_t total_sectors = bmds->total_sectors; 258 int64_t cur_sector = bmds->cur_sector; 259 BlockDriverState *bs = bmds->bs; 260 BlkMigBlock *blk; 261 int nr_sectors; 262 263 if (bmds->shared_base) { 264 qemu_mutex_lock_iothread(); 265 while (cur_sector < total_sectors && 266 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH, 267 &nr_sectors)) { 268 cur_sector += nr_sectors; 269 } 270 qemu_mutex_unlock_iothread(); 271 } 272 273 if (cur_sector >= total_sectors) { 274 bmds->cur_sector = bmds->completed_sectors = total_sectors; 275 return 1; 276 } 277 278 bmds->completed_sectors = cur_sector; 279 280 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1); 281 282 /* we are going to transfer a full block even if it is not allocated */ 283 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 284 285 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { 286 nr_sectors = total_sectors - cur_sector; 287 } 288 289 blk = g_new(BlkMigBlock, 1); 290 blk->buf = g_malloc(BLOCK_SIZE); 291 blk->bmds = bmds; 292 blk->sector = cur_sector; 293 blk->nr_sectors = nr_sectors; 294 295 blk->iov.iov_base = blk->buf; 296 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; 297 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); 298 299 blk_mig_lock(); 300 block_mig_state.submitted++; 301 blk_mig_unlock(); 302 303 qemu_mutex_lock_iothread(); 304 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov, 305 nr_sectors, blk_mig_read_cb, blk); 306 307 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors); 308 qemu_mutex_unlock_iothread(); 309 310 bmds->cur_sector = cur_sector + nr_sectors; 311 return (bmds->cur_sector >= total_sectors); 312 } 313 314 /* Called with iothread lock taken. */ 315 316 static int set_dirty_tracking(void) 317 { 318 BlkMigDevState *bmds; 319 int ret; 320 321 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 322 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE, 323 NULL, NULL); 324 if (!bmds->dirty_bitmap) { 325 ret = -errno; 326 goto fail; 327 } 328 } 329 return 0; 330 331 fail: 332 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 333 if (bmds->dirty_bitmap) { 334 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap); 335 } 336 } 337 return ret; 338 } 339 340 static void unset_dirty_tracking(void) 341 { 342 BlkMigDevState *bmds; 343 344 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 345 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap); 346 } 347 } 348 349 static void init_blk_migration(QEMUFile *f) 350 { 351 BlockDriverState *bs; 352 BlkMigDevState *bmds; 353 int64_t sectors; 354 355 block_mig_state.submitted = 0; 356 block_mig_state.read_done = 0; 357 block_mig_state.transferred = 0; 358 block_mig_state.total_sector_sum = 0; 359 block_mig_state.prev_progress = -1; 360 block_mig_state.bulk_completed = 0; 361 block_mig_state.zero_blocks = migrate_zero_blocks(); 362 363 for (bs = bdrv_next(NULL); bs; bs = bdrv_next(bs)) { 364 if (bdrv_is_read_only(bs)) { 365 continue; 366 } 367 368 sectors = bdrv_nb_sectors(bs); 369 if (sectors <= 0) { 370 return; 371 } 372 373 bmds = g_new0(BlkMigDevState, 1); 374 bmds->bs = bs; 375 bmds->bulk_completed = 0; 376 bmds->total_sectors = sectors; 377 bmds->completed_sectors = 0; 378 bmds->shared_base = block_mig_state.shared_base; 379 alloc_aio_bitmap(bmds); 380 error_setg(&bmds->blocker, "block device is in use by migration"); 381 bdrv_op_block_all(bs, bmds->blocker); 382 bdrv_ref(bs); 383 384 block_mig_state.total_sector_sum += sectors; 385 386 if (bmds->shared_base) { 387 DPRINTF("Start migration for %s with shared base image\n", 388 bdrv_get_device_name(bs)); 389 } else { 390 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs)); 391 } 392 393 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); 394 } 395 } 396 397 /* Called with no lock taken. */ 398 399 static int blk_mig_save_bulked_block(QEMUFile *f) 400 { 401 int64_t completed_sector_sum = 0; 402 BlkMigDevState *bmds; 403 int progress; 404 int ret = 0; 405 406 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 407 if (bmds->bulk_completed == 0) { 408 if (mig_save_device_bulk(f, bmds) == 1) { 409 /* completed bulk section for this device */ 410 bmds->bulk_completed = 1; 411 } 412 completed_sector_sum += bmds->completed_sectors; 413 ret = 1; 414 break; 415 } else { 416 completed_sector_sum += bmds->completed_sectors; 417 } 418 } 419 420 if (block_mig_state.total_sector_sum != 0) { 421 progress = completed_sector_sum * 100 / 422 block_mig_state.total_sector_sum; 423 } else { 424 progress = 100; 425 } 426 if (progress != block_mig_state.prev_progress) { 427 block_mig_state.prev_progress = progress; 428 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS) 429 | BLK_MIG_FLAG_PROGRESS); 430 DPRINTF("Completed %d %%\r", progress); 431 } 432 433 return ret; 434 } 435 436 static void blk_mig_reset_dirty_cursor(void) 437 { 438 BlkMigDevState *bmds; 439 440 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 441 bmds->cur_dirty = 0; 442 } 443 } 444 445 /* Called with iothread lock taken. */ 446 447 static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, 448 int is_async) 449 { 450 BlkMigBlock *blk; 451 int64_t total_sectors = bmds->total_sectors; 452 int64_t sector; 453 int nr_sectors; 454 int ret = -EIO; 455 456 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { 457 blk_mig_lock(); 458 if (bmds_aio_inflight(bmds, sector)) { 459 blk_mig_unlock(); 460 bdrv_drain_all(); 461 } else { 462 blk_mig_unlock(); 463 } 464 if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) { 465 466 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { 467 nr_sectors = total_sectors - sector; 468 } else { 469 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 470 } 471 blk = g_new(BlkMigBlock, 1); 472 blk->buf = g_malloc(BLOCK_SIZE); 473 blk->bmds = bmds; 474 blk->sector = sector; 475 blk->nr_sectors = nr_sectors; 476 477 if (is_async) { 478 blk->iov.iov_base = blk->buf; 479 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; 480 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); 481 482 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov, 483 nr_sectors, blk_mig_read_cb, blk); 484 485 blk_mig_lock(); 486 block_mig_state.submitted++; 487 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1); 488 blk_mig_unlock(); 489 } else { 490 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors); 491 if (ret < 0) { 492 goto error; 493 } 494 blk_send(f, blk); 495 496 g_free(blk->buf); 497 g_free(blk); 498 } 499 500 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors); 501 break; 502 } 503 sector += BDRV_SECTORS_PER_DIRTY_CHUNK; 504 bmds->cur_dirty = sector; 505 } 506 507 return (bmds->cur_dirty >= bmds->total_sectors); 508 509 error: 510 DPRINTF("Error reading sector %" PRId64 "\n", sector); 511 g_free(blk->buf); 512 g_free(blk); 513 return ret; 514 } 515 516 /* Called with iothread lock taken. 517 * 518 * return value: 519 * 0: too much data for max_downtime 520 * 1: few enough data for max_downtime 521 */ 522 static int blk_mig_save_dirty_block(QEMUFile *f, int is_async) 523 { 524 BlkMigDevState *bmds; 525 int ret = 1; 526 527 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 528 ret = mig_save_device_dirty(f, bmds, is_async); 529 if (ret <= 0) { 530 break; 531 } 532 } 533 534 return ret; 535 } 536 537 /* Called with no locks taken. */ 538 539 static int flush_blks(QEMUFile *f) 540 { 541 BlkMigBlock *blk; 542 int ret = 0; 543 544 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n", 545 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done, 546 block_mig_state.transferred); 547 548 blk_mig_lock(); 549 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { 550 if (qemu_file_rate_limit(f)) { 551 break; 552 } 553 if (blk->ret < 0) { 554 ret = blk->ret; 555 break; 556 } 557 558 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); 559 blk_mig_unlock(); 560 blk_send(f, blk); 561 blk_mig_lock(); 562 563 g_free(blk->buf); 564 g_free(blk); 565 566 block_mig_state.read_done--; 567 block_mig_state.transferred++; 568 assert(block_mig_state.read_done >= 0); 569 } 570 blk_mig_unlock(); 571 572 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__, 573 block_mig_state.submitted, block_mig_state.read_done, 574 block_mig_state.transferred); 575 return ret; 576 } 577 578 /* Called with iothread lock taken. */ 579 580 static int64_t get_remaining_dirty(void) 581 { 582 BlkMigDevState *bmds; 583 int64_t dirty = 0; 584 585 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 586 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); 587 } 588 589 return dirty << BDRV_SECTOR_BITS; 590 } 591 592 /* Called with iothread lock taken. */ 593 594 static void blk_mig_cleanup(void) 595 { 596 BlkMigDevState *bmds; 597 BlkMigBlock *blk; 598 599 bdrv_drain_all(); 600 601 unset_dirty_tracking(); 602 603 blk_mig_lock(); 604 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { 605 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); 606 bdrv_op_unblock_all(bmds->bs, bmds->blocker); 607 error_free(bmds->blocker); 608 bdrv_unref(bmds->bs); 609 g_free(bmds->aio_bitmap); 610 g_free(bmds); 611 } 612 613 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { 614 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); 615 g_free(blk->buf); 616 g_free(blk); 617 } 618 blk_mig_unlock(); 619 } 620 621 static void block_migration_cancel(void *opaque) 622 { 623 blk_mig_cleanup(); 624 } 625 626 static int block_save_setup(QEMUFile *f, void *opaque) 627 { 628 int ret; 629 630 DPRINTF("Enter save live setup submitted %d transferred %d\n", 631 block_mig_state.submitted, block_mig_state.transferred); 632 633 qemu_mutex_lock_iothread(); 634 init_blk_migration(f); 635 636 /* start track dirty blocks */ 637 ret = set_dirty_tracking(); 638 639 if (ret) { 640 qemu_mutex_unlock_iothread(); 641 return ret; 642 } 643 644 qemu_mutex_unlock_iothread(); 645 646 ret = flush_blks(f); 647 blk_mig_reset_dirty_cursor(); 648 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 649 650 return ret; 651 } 652 653 static int block_save_iterate(QEMUFile *f, void *opaque) 654 { 655 int ret; 656 int64_t last_ftell = qemu_ftell(f); 657 int64_t delta_ftell; 658 659 DPRINTF("Enter save live iterate submitted %d transferred %d\n", 660 block_mig_state.submitted, block_mig_state.transferred); 661 662 ret = flush_blks(f); 663 if (ret) { 664 return ret; 665 } 666 667 blk_mig_reset_dirty_cursor(); 668 669 /* control the rate of transfer */ 670 blk_mig_lock(); 671 while ((block_mig_state.submitted + 672 block_mig_state.read_done) * BLOCK_SIZE < 673 qemu_file_get_rate_limit(f)) { 674 blk_mig_unlock(); 675 if (block_mig_state.bulk_completed == 0) { 676 /* first finish the bulk phase */ 677 if (blk_mig_save_bulked_block(f) == 0) { 678 /* finished saving bulk on all devices */ 679 block_mig_state.bulk_completed = 1; 680 } 681 ret = 0; 682 } else { 683 /* Always called with iothread lock taken for 684 * simplicity, block_save_complete also calls it. 685 */ 686 qemu_mutex_lock_iothread(); 687 ret = blk_mig_save_dirty_block(f, 1); 688 qemu_mutex_unlock_iothread(); 689 } 690 if (ret < 0) { 691 return ret; 692 } 693 blk_mig_lock(); 694 if (ret != 0) { 695 /* no more dirty blocks */ 696 break; 697 } 698 } 699 blk_mig_unlock(); 700 701 ret = flush_blks(f); 702 if (ret) { 703 return ret; 704 } 705 706 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 707 delta_ftell = qemu_ftell(f) - last_ftell; 708 if (delta_ftell > 0) { 709 return 1; 710 } else if (delta_ftell < 0) { 711 return -1; 712 } else { 713 return 0; 714 } 715 } 716 717 /* Called with iothread lock taken. */ 718 719 static int block_save_complete(QEMUFile *f, void *opaque) 720 { 721 int ret; 722 723 DPRINTF("Enter save live complete submitted %d transferred %d\n", 724 block_mig_state.submitted, block_mig_state.transferred); 725 726 ret = flush_blks(f); 727 if (ret) { 728 return ret; 729 } 730 731 blk_mig_reset_dirty_cursor(); 732 733 /* we know for sure that save bulk is completed and 734 all async read completed */ 735 blk_mig_lock(); 736 assert(block_mig_state.submitted == 0); 737 blk_mig_unlock(); 738 739 do { 740 ret = blk_mig_save_dirty_block(f, 0); 741 if (ret < 0) { 742 return ret; 743 } 744 } while (ret == 0); 745 746 /* report completion */ 747 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); 748 749 DPRINTF("Block migration completed\n"); 750 751 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 752 753 blk_mig_cleanup(); 754 return 0; 755 } 756 757 static uint64_t block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size) 758 { 759 /* Estimate pending number of bytes to send */ 760 uint64_t pending; 761 762 qemu_mutex_lock_iothread(); 763 blk_mig_lock(); 764 pending = get_remaining_dirty() + 765 block_mig_state.submitted * BLOCK_SIZE + 766 block_mig_state.read_done * BLOCK_SIZE; 767 768 /* Report at least one block pending during bulk phase */ 769 if (pending <= max_size && !block_mig_state.bulk_completed) { 770 pending = max_size + BLOCK_SIZE; 771 } 772 blk_mig_unlock(); 773 qemu_mutex_unlock_iothread(); 774 775 DPRINTF("Enter save live pending %" PRIu64 "\n", pending); 776 return pending; 777 } 778 779 static int block_load(QEMUFile *f, void *opaque, int version_id) 780 { 781 static int banner_printed; 782 int len, flags; 783 char device_name[256]; 784 int64_t addr; 785 BlockDriverState *bs, *bs_prev = NULL; 786 BlockBackend *blk; 787 uint8_t *buf; 788 int64_t total_sectors = 0; 789 int nr_sectors; 790 int ret; 791 792 do { 793 addr = qemu_get_be64(f); 794 795 flags = addr & ~BDRV_SECTOR_MASK; 796 addr >>= BDRV_SECTOR_BITS; 797 798 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) { 799 /* get device name */ 800 len = qemu_get_byte(f); 801 qemu_get_buffer(f, (uint8_t *)device_name, len); 802 device_name[len] = '\0'; 803 804 blk = blk_by_name(device_name); 805 if (!blk) { 806 fprintf(stderr, "Error unknown block device %s\n", 807 device_name); 808 return -EINVAL; 809 } 810 bs = blk_bs(blk); 811 812 if (bs != bs_prev) { 813 bs_prev = bs; 814 total_sectors = bdrv_nb_sectors(bs); 815 if (total_sectors <= 0) { 816 error_report("Error getting length of block device %s", 817 device_name); 818 return -EINVAL; 819 } 820 } 821 822 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) { 823 nr_sectors = total_sectors - addr; 824 } else { 825 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 826 } 827 828 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { 829 ret = bdrv_write_zeroes(bs, addr, nr_sectors, 830 BDRV_REQ_MAY_UNMAP); 831 } else { 832 buf = g_malloc(BLOCK_SIZE); 833 qemu_get_buffer(f, buf, BLOCK_SIZE); 834 ret = bdrv_write(bs, addr, buf, nr_sectors); 835 g_free(buf); 836 } 837 838 if (ret < 0) { 839 return ret; 840 } 841 } else if (flags & BLK_MIG_FLAG_PROGRESS) { 842 if (!banner_printed) { 843 printf("Receiving block device images\n"); 844 banner_printed = 1; 845 } 846 printf("Completed %d %%%c", (int)addr, 847 (addr == 100) ? '\n' : '\r'); 848 fflush(stdout); 849 } else if (!(flags & BLK_MIG_FLAG_EOS)) { 850 fprintf(stderr, "Unknown block migration flags: %#x\n", flags); 851 return -EINVAL; 852 } 853 ret = qemu_file_get_error(f); 854 if (ret != 0) { 855 return ret; 856 } 857 } while (!(flags & BLK_MIG_FLAG_EOS)); 858 859 return 0; 860 } 861 862 static void block_set_params(const MigrationParams *params, void *opaque) 863 { 864 block_mig_state.blk_enable = params->blk; 865 block_mig_state.shared_base = params->shared; 866 867 /* shared base means that blk_enable = 1 */ 868 block_mig_state.blk_enable |= params->shared; 869 } 870 871 static bool block_is_active(void *opaque) 872 { 873 return block_mig_state.blk_enable == 1; 874 } 875 876 static SaveVMHandlers savevm_block_handlers = { 877 .set_params = block_set_params, 878 .save_live_setup = block_save_setup, 879 .save_live_iterate = block_save_iterate, 880 .save_live_complete = block_save_complete, 881 .save_live_pending = block_save_pending, 882 .load_state = block_load, 883 .cancel = block_migration_cancel, 884 .is_active = block_is_active, 885 }; 886 887 void blk_mig_init(void) 888 { 889 QSIMPLEQ_INIT(&block_mig_state.bmds_list); 890 QSIMPLEQ_INIT(&block_mig_state.blk_list); 891 qemu_mutex_init(&block_mig_state.lock); 892 893 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers, 894 &block_mig_state); 895 } 896