1 /* 2 * QEMU live block migration 3 * 4 * Copyright IBM, Corp. 2009 5 * 6 * Authors: 7 * Liran Schour <lirans@il.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qapi/error.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "qemu/cutils.h" 21 #include "qemu/queue.h" 22 #include "block.h" 23 #include "block/dirty-bitmap.h" 24 #include "migration/misc.h" 25 #include "migration.h" 26 #include "migration-stats.h" 27 #include "migration/register.h" 28 #include "qemu-file.h" 29 #include "migration/vmstate.h" 30 #include "sysemu/block-backend.h" 31 #include "trace.h" 32 #include "options.h" 33 34 #define BLK_MIG_BLOCK_SIZE (1ULL << 20) 35 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS) 36 37 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 38 #define BLK_MIG_FLAG_EOS 0x02 39 #define BLK_MIG_FLAG_PROGRESS 0x04 40 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08 41 42 #define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE) 43 44 #define MAX_IO_BUFFERS 512 45 #define MAX_PARALLEL_IO 16 46 47 typedef struct BlkMigDevState { 48 /* Written during setup phase. Can be read without a lock. */ 49 BlockBackend *blk; 50 char *blk_name; 51 int shared_base; 52 int64_t total_sectors; 53 QSIMPLEQ_ENTRY(BlkMigDevState) entry; 54 Error *blocker; 55 56 /* Only used by migration thread. Does not need a lock. */ 57 int bulk_completed; 58 int64_t cur_sector; 59 int64_t cur_dirty; 60 61 /* Data in the aio_bitmap is protected by block migration lock. 62 * Allocation and free happen during setup and cleanup respectively. 63 */ 64 unsigned long *aio_bitmap; 65 66 /* Protected by block migration lock. */ 67 int64_t completed_sectors; 68 69 /* During migration this is protected by iothread lock / AioContext. 70 * Allocation and free happen during setup and cleanup respectively. 71 */ 72 BdrvDirtyBitmap *dirty_bitmap; 73 } BlkMigDevState; 74 75 typedef struct BlkMigBlock { 76 /* Only used by migration thread. */ 77 uint8_t *buf; 78 BlkMigDevState *bmds; 79 int64_t sector; 80 int nr_sectors; 81 QEMUIOVector qiov; 82 BlockAIOCB *aiocb; 83 84 /* Protected by block migration lock. */ 85 int ret; 86 QSIMPLEQ_ENTRY(BlkMigBlock) entry; 87 } BlkMigBlock; 88 89 typedef struct BlkMigState { 90 QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list; 91 int64_t total_sector_sum; 92 bool zero_blocks; 93 94 /* Protected by lock. */ 95 QSIMPLEQ_HEAD(, BlkMigBlock) blk_list; 96 int submitted; 97 int read_done; 98 99 /* Only used by migration thread. Does not need a lock. */ 100 int transferred; 101 int prev_progress; 102 int bulk_completed; 103 104 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */ 105 QemuMutex lock; 106 } BlkMigState; 107 108 static BlkMigState block_mig_state; 109 110 static void blk_mig_lock(void) 111 { 112 qemu_mutex_lock(&block_mig_state.lock); 113 } 114 115 static void blk_mig_unlock(void) 116 { 117 qemu_mutex_unlock(&block_mig_state.lock); 118 } 119 120 /* Must run outside of the iothread lock during the bulk phase, 121 * or the VM will stall. 122 */ 123 124 static void blk_send(QEMUFile *f, BlkMigBlock * blk) 125 { 126 int len; 127 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; 128 129 if (block_mig_state.zero_blocks && 130 buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) { 131 flags |= BLK_MIG_FLAG_ZERO_BLOCK; 132 } 133 134 /* sector number and flags */ 135 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS) 136 | flags); 137 138 /* device name */ 139 len = strlen(blk->bmds->blk_name); 140 qemu_put_byte(f, len); 141 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len); 142 143 /* if a block is zero we need to flush here since the network 144 * bandwidth is now a lot higher than the storage device bandwidth. 145 * thus if we queue zero blocks we slow down the migration */ 146 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { 147 qemu_fflush(f); 148 return; 149 } 150 151 qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE); 152 } 153 154 int blk_mig_active(void) 155 { 156 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list); 157 } 158 159 int blk_mig_bulk_active(void) 160 { 161 return blk_mig_active() && !block_mig_state.bulk_completed; 162 } 163 164 uint64_t blk_mig_bytes_transferred(void) 165 { 166 BlkMigDevState *bmds; 167 uint64_t sum = 0; 168 169 blk_mig_lock(); 170 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 171 sum += bmds->completed_sectors; 172 } 173 blk_mig_unlock(); 174 return sum << BDRV_SECTOR_BITS; 175 } 176 177 uint64_t blk_mig_bytes_remaining(void) 178 { 179 return blk_mig_bytes_total() - blk_mig_bytes_transferred(); 180 } 181 182 uint64_t blk_mig_bytes_total(void) 183 { 184 BlkMigDevState *bmds; 185 uint64_t sum = 0; 186 187 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 188 sum += bmds->total_sectors; 189 } 190 return sum << BDRV_SECTOR_BITS; 191 } 192 193 194 /* Called with migration lock held. */ 195 196 static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector) 197 { 198 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK; 199 200 if (sector < bmds->total_sectors) { 201 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] & 202 (1UL << (chunk % (sizeof(unsigned long) * 8)))); 203 } else { 204 return 0; 205 } 206 } 207 208 /* Called with migration lock held. */ 209 210 static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num, 211 int nb_sectors, int set) 212 { 213 int64_t start, end; 214 unsigned long val, idx, bit; 215 216 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK; 217 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK; 218 219 for (; start <= end; start++) { 220 idx = start / (sizeof(unsigned long) * 8); 221 bit = start % (sizeof(unsigned long) * 8); 222 val = bmds->aio_bitmap[idx]; 223 if (set) { 224 val |= 1UL << bit; 225 } else { 226 val &= ~(1UL << bit); 227 } 228 bmds->aio_bitmap[idx] = val; 229 } 230 } 231 232 static void alloc_aio_bitmap(BlkMigDevState *bmds) 233 { 234 int64_t bitmap_size; 235 236 bitmap_size = bmds->total_sectors + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1; 237 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8; 238 239 bmds->aio_bitmap = g_malloc0(bitmap_size); 240 } 241 242 /* Never hold migration lock when yielding to the main loop! */ 243 244 static void blk_mig_read_cb(void *opaque, int ret) 245 { 246 BlkMigBlock *blk = opaque; 247 248 blk_mig_lock(); 249 blk->ret = ret; 250 251 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); 252 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0); 253 254 block_mig_state.submitted--; 255 block_mig_state.read_done++; 256 assert(block_mig_state.submitted >= 0); 257 blk_mig_unlock(); 258 } 259 260 /* Called with no lock taken. */ 261 262 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) 263 { 264 int64_t total_sectors = bmds->total_sectors; 265 int64_t cur_sector = bmds->cur_sector; 266 BlockBackend *bb = bmds->blk; 267 BlkMigBlock *blk; 268 int nr_sectors; 269 int64_t count; 270 271 if (bmds->shared_base) { 272 qemu_mutex_lock_iothread(); 273 aio_context_acquire(blk_get_aio_context(bb)); 274 /* Skip unallocated sectors; intentionally treats failure or 275 * partial sector as an allocated sector */ 276 while (cur_sector < total_sectors && 277 !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE, 278 MAX_IS_ALLOCATED_SEARCH, &count)) { 279 if (count < BDRV_SECTOR_SIZE) { 280 break; 281 } 282 cur_sector += count >> BDRV_SECTOR_BITS; 283 } 284 aio_context_release(blk_get_aio_context(bb)); 285 qemu_mutex_unlock_iothread(); 286 } 287 288 if (cur_sector >= total_sectors) { 289 bmds->cur_sector = bmds->completed_sectors = total_sectors; 290 return 1; 291 } 292 293 bmds->completed_sectors = cur_sector; 294 295 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1); 296 297 /* we are going to transfer a full block even if it is not allocated */ 298 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 299 300 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { 301 nr_sectors = total_sectors - cur_sector; 302 } 303 304 blk = g_new(BlkMigBlock, 1); 305 blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); 306 blk->bmds = bmds; 307 blk->sector = cur_sector; 308 blk->nr_sectors = nr_sectors; 309 310 qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE); 311 312 blk_mig_lock(); 313 block_mig_state.submitted++; 314 blk_mig_unlock(); 315 316 /* We do not know if bs is under the main thread (and thus does 317 * not acquire the AioContext when doing AIO) or rather under 318 * dataplane. Thus acquire both the iothread mutex and the 319 * AioContext. 320 * 321 * This is ugly and will disappear when we make bdrv_* thread-safe, 322 * without the need to acquire the AioContext. 323 */ 324 qemu_mutex_lock_iothread(); 325 aio_context_acquire(blk_get_aio_context(bmds->blk)); 326 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE, 327 nr_sectors * BDRV_SECTOR_SIZE); 328 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov, 329 0, blk_mig_read_cb, blk); 330 aio_context_release(blk_get_aio_context(bmds->blk)); 331 qemu_mutex_unlock_iothread(); 332 333 bmds->cur_sector = cur_sector + nr_sectors; 334 return (bmds->cur_sector >= total_sectors); 335 } 336 337 /* Called with iothread lock taken. */ 338 339 static int set_dirty_tracking(void) 340 { 341 BlkMigDevState *bmds; 342 int ret; 343 344 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 345 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk), 346 BLK_MIG_BLOCK_SIZE, 347 NULL, NULL); 348 if (!bmds->dirty_bitmap) { 349 ret = -errno; 350 goto fail; 351 } 352 } 353 return 0; 354 355 fail: 356 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 357 if (bmds->dirty_bitmap) { 358 bdrv_release_dirty_bitmap(bmds->dirty_bitmap); 359 } 360 } 361 return ret; 362 } 363 364 /* Called with iothread lock taken. */ 365 366 static void unset_dirty_tracking(void) 367 { 368 BlkMigDevState *bmds; 369 370 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 371 bdrv_release_dirty_bitmap(bmds->dirty_bitmap); 372 } 373 } 374 375 static int init_blk_migration(QEMUFile *f) 376 { 377 BlockDriverState *bs; 378 BlkMigDevState *bmds; 379 int64_t sectors; 380 BdrvNextIterator it; 381 int i, num_bs = 0; 382 struct { 383 BlkMigDevState *bmds; 384 BlockDriverState *bs; 385 } *bmds_bs; 386 Error *local_err = NULL; 387 int ret; 388 389 block_mig_state.submitted = 0; 390 block_mig_state.read_done = 0; 391 block_mig_state.transferred = 0; 392 block_mig_state.total_sector_sum = 0; 393 block_mig_state.prev_progress = -1; 394 block_mig_state.bulk_completed = 0; 395 block_mig_state.zero_blocks = migrate_zero_blocks(); 396 397 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { 398 num_bs++; 399 } 400 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs)); 401 402 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) { 403 if (bdrv_is_read_only(bs)) { 404 continue; 405 } 406 407 sectors = bdrv_nb_sectors(bs); 408 if (sectors <= 0) { 409 ret = sectors; 410 bdrv_next_cleanup(&it); 411 goto out; 412 } 413 414 bmds = g_new0(BlkMigDevState, 1); 415 bmds->blk = blk_new(qemu_get_aio_context(), 416 BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); 417 bmds->blk_name = g_strdup(bdrv_get_device_name(bs)); 418 bmds->bulk_completed = 0; 419 bmds->total_sectors = sectors; 420 bmds->completed_sectors = 0; 421 bmds->shared_base = migrate_block_incremental(); 422 423 assert(i < num_bs); 424 bmds_bs[i].bmds = bmds; 425 bmds_bs[i].bs = bs; 426 427 block_mig_state.total_sector_sum += sectors; 428 429 if (bmds->shared_base) { 430 trace_migration_block_init_shared(bdrv_get_device_name(bs)); 431 } else { 432 trace_migration_block_init_full(bdrv_get_device_name(bs)); 433 } 434 435 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); 436 } 437 438 /* Can only insert new BDSes now because doing so while iterating block 439 * devices may end up in a deadlock (iterating the new BDSes, too). */ 440 for (i = 0; i < num_bs; i++) { 441 BlkMigDevState *bmds = bmds_bs[i].bmds; 442 BlockDriverState *bs = bmds_bs[i].bs; 443 444 if (bmds) { 445 ret = blk_insert_bs(bmds->blk, bs, &local_err); 446 if (ret < 0) { 447 error_report_err(local_err); 448 goto out; 449 } 450 451 alloc_aio_bitmap(bmds); 452 error_setg(&bmds->blocker, "block device is in use by migration"); 453 bdrv_op_block_all(bs, bmds->blocker); 454 } 455 } 456 457 ret = 0; 458 out: 459 g_free(bmds_bs); 460 return ret; 461 } 462 463 /* Called with no lock taken. */ 464 465 static int blk_mig_save_bulked_block(QEMUFile *f) 466 { 467 int64_t completed_sector_sum = 0; 468 BlkMigDevState *bmds; 469 int progress; 470 int ret = 0; 471 472 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 473 if (bmds->bulk_completed == 0) { 474 if (mig_save_device_bulk(f, bmds) == 1) { 475 /* completed bulk section for this device */ 476 bmds->bulk_completed = 1; 477 } 478 completed_sector_sum += bmds->completed_sectors; 479 ret = 1; 480 break; 481 } else { 482 completed_sector_sum += bmds->completed_sectors; 483 } 484 } 485 486 if (block_mig_state.total_sector_sum != 0) { 487 progress = completed_sector_sum * 100 / 488 block_mig_state.total_sector_sum; 489 } else { 490 progress = 100; 491 } 492 if (progress != block_mig_state.prev_progress) { 493 block_mig_state.prev_progress = progress; 494 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS) 495 | BLK_MIG_FLAG_PROGRESS); 496 trace_migration_block_progression(progress); 497 } 498 499 return ret; 500 } 501 502 static void blk_mig_reset_dirty_cursor(void) 503 { 504 BlkMigDevState *bmds; 505 506 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 507 bmds->cur_dirty = 0; 508 } 509 } 510 511 /* Called with iothread lock and AioContext taken. */ 512 513 static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, 514 int is_async) 515 { 516 BlkMigBlock *blk; 517 int64_t total_sectors = bmds->total_sectors; 518 int64_t sector; 519 int nr_sectors; 520 int ret = -EIO; 521 522 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { 523 blk_mig_lock(); 524 if (bmds_aio_inflight(bmds, sector)) { 525 blk_mig_unlock(); 526 blk_drain(bmds->blk); 527 } else { 528 blk_mig_unlock(); 529 } 530 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap); 531 if (bdrv_dirty_bitmap_get_locked(bmds->dirty_bitmap, 532 sector * BDRV_SECTOR_SIZE)) { 533 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { 534 nr_sectors = total_sectors - sector; 535 } else { 536 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 537 } 538 bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap, 539 sector * BDRV_SECTOR_SIZE, 540 nr_sectors * BDRV_SECTOR_SIZE); 541 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); 542 543 blk = g_new(BlkMigBlock, 1); 544 blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); 545 blk->bmds = bmds; 546 blk->sector = sector; 547 blk->nr_sectors = nr_sectors; 548 549 if (is_async) { 550 qemu_iovec_init_buf(&blk->qiov, blk->buf, 551 nr_sectors * BDRV_SECTOR_SIZE); 552 553 blk->aiocb = blk_aio_preadv(bmds->blk, 554 sector * BDRV_SECTOR_SIZE, 555 &blk->qiov, 0, blk_mig_read_cb, 556 blk); 557 558 blk_mig_lock(); 559 block_mig_state.submitted++; 560 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1); 561 blk_mig_unlock(); 562 } else { 563 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, 564 nr_sectors * BDRV_SECTOR_SIZE, blk->buf, 0); 565 if (ret < 0) { 566 goto error; 567 } 568 blk_send(f, blk); 569 570 g_free(blk->buf); 571 g_free(blk); 572 } 573 574 sector += nr_sectors; 575 bmds->cur_dirty = sector; 576 break; 577 } 578 579 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); 580 sector += BDRV_SECTORS_PER_DIRTY_CHUNK; 581 bmds->cur_dirty = sector; 582 } 583 584 return (bmds->cur_dirty >= bmds->total_sectors); 585 586 error: 587 trace_migration_block_save_device_dirty(sector); 588 g_free(blk->buf); 589 g_free(blk); 590 return ret; 591 } 592 593 /* Called with iothread lock taken. 594 * 595 * return value: 596 * 0: too much data for max_downtime 597 * 1: few enough data for max_downtime 598 */ 599 static int blk_mig_save_dirty_block(QEMUFile *f, int is_async) 600 { 601 BlkMigDevState *bmds; 602 int ret = 1; 603 604 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 605 aio_context_acquire(blk_get_aio_context(bmds->blk)); 606 ret = mig_save_device_dirty(f, bmds, is_async); 607 aio_context_release(blk_get_aio_context(bmds->blk)); 608 if (ret <= 0) { 609 break; 610 } 611 } 612 613 return ret; 614 } 615 616 /* Called with no locks taken. */ 617 618 static int flush_blks(QEMUFile *f) 619 { 620 BlkMigBlock *blk; 621 int ret = 0; 622 623 trace_migration_block_flush_blks("Enter", block_mig_state.submitted, 624 block_mig_state.read_done, 625 block_mig_state.transferred); 626 627 blk_mig_lock(); 628 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { 629 if (migration_rate_exceeded(f)) { 630 break; 631 } 632 if (blk->ret < 0) { 633 ret = blk->ret; 634 break; 635 } 636 637 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); 638 blk_mig_unlock(); 639 blk_send(f, blk); 640 blk_mig_lock(); 641 642 g_free(blk->buf); 643 g_free(blk); 644 645 block_mig_state.read_done--; 646 block_mig_state.transferred++; 647 assert(block_mig_state.read_done >= 0); 648 } 649 blk_mig_unlock(); 650 651 trace_migration_block_flush_blks("Exit", block_mig_state.submitted, 652 block_mig_state.read_done, 653 block_mig_state.transferred); 654 return ret; 655 } 656 657 /* Called with iothread lock taken. */ 658 659 static int64_t get_remaining_dirty(void) 660 { 661 BlkMigDevState *bmds; 662 int64_t dirty = 0; 663 664 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 665 aio_context_acquire(blk_get_aio_context(bmds->blk)); 666 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); 667 aio_context_release(blk_get_aio_context(bmds->blk)); 668 } 669 670 return dirty; 671 } 672 673 674 675 /* Called with iothread lock taken. */ 676 static void block_migration_cleanup_bmds(void) 677 { 678 BlkMigDevState *bmds; 679 AioContext *ctx; 680 681 unset_dirty_tracking(); 682 683 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { 684 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); 685 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker); 686 error_free(bmds->blocker); 687 688 /* Save ctx, because bmds->blk can disappear during blk_unref. */ 689 ctx = blk_get_aio_context(bmds->blk); 690 aio_context_acquire(ctx); 691 blk_unref(bmds->blk); 692 aio_context_release(ctx); 693 694 g_free(bmds->blk_name); 695 g_free(bmds->aio_bitmap); 696 g_free(bmds); 697 } 698 } 699 700 /* Called with iothread lock taken. */ 701 static void block_migration_cleanup(void *opaque) 702 { 703 BlkMigBlock *blk; 704 705 bdrv_drain_all(); 706 707 block_migration_cleanup_bmds(); 708 709 blk_mig_lock(); 710 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) { 711 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry); 712 g_free(blk->buf); 713 g_free(blk); 714 } 715 blk_mig_unlock(); 716 } 717 718 static int block_save_setup(QEMUFile *f, void *opaque) 719 { 720 int ret; 721 722 trace_migration_block_save("setup", block_mig_state.submitted, 723 block_mig_state.transferred); 724 725 qemu_mutex_lock_iothread(); 726 ret = init_blk_migration(f); 727 if (ret < 0) { 728 qemu_mutex_unlock_iothread(); 729 return ret; 730 } 731 732 /* start track dirty blocks */ 733 ret = set_dirty_tracking(); 734 735 qemu_mutex_unlock_iothread(); 736 737 if (ret) { 738 return ret; 739 } 740 741 ret = flush_blks(f); 742 blk_mig_reset_dirty_cursor(); 743 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 744 745 return ret; 746 } 747 748 static int block_save_iterate(QEMUFile *f, void *opaque) 749 { 750 int ret; 751 uint64_t last_bytes = qemu_file_transferred(f); 752 753 trace_migration_block_save("iterate", block_mig_state.submitted, 754 block_mig_state.transferred); 755 756 ret = flush_blks(f); 757 if (ret) { 758 return ret; 759 } 760 761 blk_mig_reset_dirty_cursor(); 762 763 /* control the rate of transfer */ 764 blk_mig_lock(); 765 while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE < 766 migration_rate_get() && 767 block_mig_state.submitted < MAX_PARALLEL_IO && 768 (block_mig_state.submitted + block_mig_state.read_done) < 769 MAX_IO_BUFFERS) { 770 blk_mig_unlock(); 771 if (block_mig_state.bulk_completed == 0) { 772 /* first finish the bulk phase */ 773 if (blk_mig_save_bulked_block(f) == 0) { 774 /* finished saving bulk on all devices */ 775 block_mig_state.bulk_completed = 1; 776 } 777 ret = 0; 778 } else { 779 /* Always called with iothread lock taken for 780 * simplicity, block_save_complete also calls it. 781 */ 782 qemu_mutex_lock_iothread(); 783 ret = blk_mig_save_dirty_block(f, 1); 784 qemu_mutex_unlock_iothread(); 785 } 786 if (ret < 0) { 787 return ret; 788 } 789 blk_mig_lock(); 790 if (ret != 0) { 791 /* no more dirty blocks */ 792 break; 793 } 794 } 795 blk_mig_unlock(); 796 797 ret = flush_blks(f); 798 if (ret) { 799 return ret; 800 } 801 802 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 803 uint64_t delta_bytes = qemu_file_transferred(f) - last_bytes; 804 return (delta_bytes > 0); 805 } 806 807 /* Called with iothread lock taken. */ 808 809 static int block_save_complete(QEMUFile *f, void *opaque) 810 { 811 int ret; 812 813 trace_migration_block_save("complete", block_mig_state.submitted, 814 block_mig_state.transferred); 815 816 ret = flush_blks(f); 817 if (ret) { 818 return ret; 819 } 820 821 blk_mig_reset_dirty_cursor(); 822 823 /* we know for sure that save bulk is completed and 824 all async read completed */ 825 blk_mig_lock(); 826 assert(block_mig_state.submitted == 0); 827 blk_mig_unlock(); 828 829 do { 830 ret = blk_mig_save_dirty_block(f, 0); 831 if (ret < 0) { 832 return ret; 833 } 834 } while (ret == 0); 835 836 /* report completion */ 837 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); 838 839 trace_migration_block_save_complete(); 840 841 qemu_put_be64(f, BLK_MIG_FLAG_EOS); 842 843 /* Make sure that our BlockBackends are gone, so that the block driver 844 * nodes can be inactivated. */ 845 block_migration_cleanup_bmds(); 846 847 return 0; 848 } 849 850 static void block_state_pending(void *opaque, uint64_t *must_precopy, 851 uint64_t *can_postcopy) 852 { 853 /* Estimate pending number of bytes to send */ 854 uint64_t pending; 855 856 qemu_mutex_lock_iothread(); 857 pending = get_remaining_dirty(); 858 qemu_mutex_unlock_iothread(); 859 860 blk_mig_lock(); 861 pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + 862 block_mig_state.read_done * BLK_MIG_BLOCK_SIZE; 863 blk_mig_unlock(); 864 865 /* Report at least one block pending during bulk phase */ 866 if (!pending && !block_mig_state.bulk_completed) { 867 pending = BLK_MIG_BLOCK_SIZE; 868 } 869 870 trace_migration_block_state_pending(pending); 871 /* We don't do postcopy */ 872 *must_precopy += pending; 873 } 874 875 static int block_load(QEMUFile *f, void *opaque, int version_id) 876 { 877 static int banner_printed; 878 int len, flags; 879 char device_name[256]; 880 int64_t addr; 881 BlockBackend *blk, *blk_prev = NULL; 882 Error *local_err = NULL; 883 uint8_t *buf; 884 int64_t total_sectors = 0; 885 int nr_sectors; 886 int ret; 887 BlockDriverInfo bdi; 888 int cluster_size = BLK_MIG_BLOCK_SIZE; 889 890 do { 891 addr = qemu_get_be64(f); 892 893 flags = addr & (BDRV_SECTOR_SIZE - 1); 894 addr >>= BDRV_SECTOR_BITS; 895 896 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) { 897 /* get device name */ 898 len = qemu_get_byte(f); 899 qemu_get_buffer(f, (uint8_t *)device_name, len); 900 device_name[len] = '\0'; 901 902 blk = blk_by_name(device_name); 903 if (!blk) { 904 fprintf(stderr, "Error unknown block device %s\n", 905 device_name); 906 return -EINVAL; 907 } 908 909 if (blk != blk_prev) { 910 blk_prev = blk; 911 total_sectors = blk_nb_sectors(blk); 912 if (total_sectors <= 0) { 913 error_report("Error getting length of block device %s", 914 device_name); 915 return -EINVAL; 916 } 917 918 blk_activate(blk, &local_err); 919 if (local_err) { 920 error_report_err(local_err); 921 return -EINVAL; 922 } 923 924 ret = bdrv_get_info(blk_bs(blk), &bdi); 925 if (ret == 0 && bdi.cluster_size > 0 && 926 bdi.cluster_size <= BLK_MIG_BLOCK_SIZE && 927 BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) { 928 cluster_size = bdi.cluster_size; 929 } else { 930 cluster_size = BLK_MIG_BLOCK_SIZE; 931 } 932 } 933 934 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) { 935 nr_sectors = total_sectors - addr; 936 } else { 937 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; 938 } 939 940 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) { 941 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE, 942 nr_sectors * BDRV_SECTOR_SIZE, 943 BDRV_REQ_MAY_UNMAP); 944 } else { 945 int i; 946 int64_t cur_addr; 947 uint8_t *cur_buf; 948 949 buf = g_malloc(BLK_MIG_BLOCK_SIZE); 950 qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE); 951 for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) { 952 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size; 953 cur_buf = buf + i * cluster_size; 954 955 if ((!block_mig_state.zero_blocks || 956 cluster_size < BLK_MIG_BLOCK_SIZE) && 957 buffer_is_zero(cur_buf, cluster_size)) { 958 ret = blk_pwrite_zeroes(blk, cur_addr, 959 cluster_size, 960 BDRV_REQ_MAY_UNMAP); 961 } else { 962 ret = blk_pwrite(blk, cur_addr, cluster_size, cur_buf, 963 0); 964 } 965 if (ret < 0) { 966 break; 967 } 968 } 969 g_free(buf); 970 } 971 972 if (ret < 0) { 973 return ret; 974 } 975 } else if (flags & BLK_MIG_FLAG_PROGRESS) { 976 if (!banner_printed) { 977 printf("Receiving block device images\n"); 978 banner_printed = 1; 979 } 980 printf("Completed %d %%%c", (int)addr, 981 (addr == 100) ? '\n' : '\r'); 982 fflush(stdout); 983 } else if (!(flags & BLK_MIG_FLAG_EOS)) { 984 fprintf(stderr, "Unknown block migration flags: 0x%x\n", flags); 985 return -EINVAL; 986 } 987 ret = qemu_file_get_error(f); 988 if (ret != 0) { 989 return ret; 990 } 991 } while (!(flags & BLK_MIG_FLAG_EOS)); 992 993 return 0; 994 } 995 996 static bool block_is_active(void *opaque) 997 { 998 return migrate_block(); 999 } 1000 1001 static SaveVMHandlers savevm_block_handlers = { 1002 .save_setup = block_save_setup, 1003 .save_live_iterate = block_save_iterate, 1004 .save_live_complete_precopy = block_save_complete, 1005 .state_pending_exact = block_state_pending, 1006 .state_pending_estimate = block_state_pending, 1007 .load_state = block_load, 1008 .save_cleanup = block_migration_cleanup, 1009 .is_active = block_is_active, 1010 }; 1011 1012 void blk_mig_init(void) 1013 { 1014 QSIMPLEQ_INIT(&block_mig_state.bmds_list); 1015 QSIMPLEQ_INIT(&block_mig_state.blk_list); 1016 qemu_mutex_init(&block_mig_state.lock); 1017 1018 register_savevm_live("block", 0, 1, &savevm_block_handlers, 1019 &block_mig_state); 1020 } 1021