1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2011-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 #include "qemu/osdep.h" 29 #include <zlib.h> 30 #include "qapi-event.h" 31 #include "qemu/bitops.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/timer.h" 34 #include "qemu/main-loop.h" 35 #include "migration/migration.h" 36 #include "migration/postcopy-ram.h" 37 #include "exec/address-spaces.h" 38 #include "migration/page_cache.h" 39 #include "qemu/error-report.h" 40 #include "trace.h" 41 #include "exec/ram_addr.h" 42 #include "qemu/rcu_queue.h" 43 44 #ifdef DEBUG_MIGRATION_RAM 45 #define DPRINTF(fmt, ...) \ 46 do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0) 47 #else 48 #define DPRINTF(fmt, ...) \ 49 do { } while (0) 50 #endif 51 52 static int dirty_rate_high_cnt; 53 54 static uint64_t bitmap_sync_count; 55 56 /***********************************************************/ 57 /* ram save/restore */ 58 59 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */ 60 #define RAM_SAVE_FLAG_COMPRESS 0x02 61 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 62 #define RAM_SAVE_FLAG_PAGE 0x08 63 #define RAM_SAVE_FLAG_EOS 0x10 64 #define RAM_SAVE_FLAG_CONTINUE 0x20 65 #define RAM_SAVE_FLAG_XBZRLE 0x40 66 /* 0x80 is reserved in migration.h start with 0x100 next */ 67 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 68 69 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE]; 70 71 static inline bool is_zero_range(uint8_t *p, uint64_t size) 72 { 73 return buffer_find_nonzero_offset(p, size) == size; 74 } 75 76 /* struct contains XBZRLE cache and a static page 77 used by the compression */ 78 static struct { 79 /* buffer used for XBZRLE encoding */ 80 uint8_t *encoded_buf; 81 /* buffer for storing page content */ 82 uint8_t *current_buf; 83 /* Cache for XBZRLE, Protected by lock. */ 84 PageCache *cache; 85 QemuMutex lock; 86 } XBZRLE; 87 88 /* buffer used for XBZRLE decoding */ 89 static uint8_t *xbzrle_decoded_buf; 90 91 static void XBZRLE_cache_lock(void) 92 { 93 if (migrate_use_xbzrle()) 94 qemu_mutex_lock(&XBZRLE.lock); 95 } 96 97 static void XBZRLE_cache_unlock(void) 98 { 99 if (migrate_use_xbzrle()) 100 qemu_mutex_unlock(&XBZRLE.lock); 101 } 102 103 /* 104 * called from qmp_migrate_set_cache_size in main thread, possibly while 105 * a migration is in progress. 106 * A running migration maybe using the cache and might finish during this 107 * call, hence changes to the cache are protected by XBZRLE.lock(). 108 */ 109 int64_t xbzrle_cache_resize(int64_t new_size) 110 { 111 PageCache *new_cache; 112 int64_t ret; 113 114 if (new_size < TARGET_PAGE_SIZE) { 115 return -1; 116 } 117 118 XBZRLE_cache_lock(); 119 120 if (XBZRLE.cache != NULL) { 121 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { 122 goto out_new_size; 123 } 124 new_cache = cache_init(new_size / TARGET_PAGE_SIZE, 125 TARGET_PAGE_SIZE); 126 if (!new_cache) { 127 error_report("Error creating cache"); 128 ret = -1; 129 goto out; 130 } 131 132 cache_fini(XBZRLE.cache); 133 XBZRLE.cache = new_cache; 134 } 135 136 out_new_size: 137 ret = pow2floor(new_size); 138 out: 139 XBZRLE_cache_unlock(); 140 return ret; 141 } 142 143 /* accounting for migration statistics */ 144 typedef struct AccountingInfo { 145 uint64_t dup_pages; 146 uint64_t skipped_pages; 147 uint64_t norm_pages; 148 uint64_t iterations; 149 uint64_t xbzrle_bytes; 150 uint64_t xbzrle_pages; 151 uint64_t xbzrle_cache_miss; 152 double xbzrle_cache_miss_rate; 153 uint64_t xbzrle_overflows; 154 } AccountingInfo; 155 156 static AccountingInfo acct_info; 157 158 static void acct_clear(void) 159 { 160 memset(&acct_info, 0, sizeof(acct_info)); 161 } 162 163 uint64_t dup_mig_bytes_transferred(void) 164 { 165 return acct_info.dup_pages * TARGET_PAGE_SIZE; 166 } 167 168 uint64_t dup_mig_pages_transferred(void) 169 { 170 return acct_info.dup_pages; 171 } 172 173 uint64_t skipped_mig_bytes_transferred(void) 174 { 175 return acct_info.skipped_pages * TARGET_PAGE_SIZE; 176 } 177 178 uint64_t skipped_mig_pages_transferred(void) 179 { 180 return acct_info.skipped_pages; 181 } 182 183 uint64_t norm_mig_bytes_transferred(void) 184 { 185 return acct_info.norm_pages * TARGET_PAGE_SIZE; 186 } 187 188 uint64_t norm_mig_pages_transferred(void) 189 { 190 return acct_info.norm_pages; 191 } 192 193 uint64_t xbzrle_mig_bytes_transferred(void) 194 { 195 return acct_info.xbzrle_bytes; 196 } 197 198 uint64_t xbzrle_mig_pages_transferred(void) 199 { 200 return acct_info.xbzrle_pages; 201 } 202 203 uint64_t xbzrle_mig_pages_cache_miss(void) 204 { 205 return acct_info.xbzrle_cache_miss; 206 } 207 208 double xbzrle_mig_cache_miss_rate(void) 209 { 210 return acct_info.xbzrle_cache_miss_rate; 211 } 212 213 uint64_t xbzrle_mig_pages_overflow(void) 214 { 215 return acct_info.xbzrle_overflows; 216 } 217 218 /* This is the last block that we have visited serching for dirty pages 219 */ 220 static RAMBlock *last_seen_block; 221 /* This is the last block from where we have sent data */ 222 static RAMBlock *last_sent_block; 223 static ram_addr_t last_offset; 224 static QemuMutex migration_bitmap_mutex; 225 static uint64_t migration_dirty_pages; 226 static uint32_t last_version; 227 static bool ram_bulk_stage; 228 229 /* used by the search for pages to send */ 230 struct PageSearchStatus { 231 /* Current block being searched */ 232 RAMBlock *block; 233 /* Current offset to search from */ 234 ram_addr_t offset; 235 /* Set once we wrap around */ 236 bool complete_round; 237 }; 238 typedef struct PageSearchStatus PageSearchStatus; 239 240 static struct BitmapRcu { 241 struct rcu_head rcu; 242 /* Main migration bitmap */ 243 unsigned long *bmap; 244 /* bitmap of pages that haven't been sent even once 245 * only maintained and used in postcopy at the moment 246 * where it's used to send the dirtymap at the start 247 * of the postcopy phase 248 */ 249 unsigned long *unsentmap; 250 } *migration_bitmap_rcu; 251 252 struct CompressParam { 253 bool start; 254 bool done; 255 QEMUFile *file; 256 QemuMutex mutex; 257 QemuCond cond; 258 RAMBlock *block; 259 ram_addr_t offset; 260 }; 261 typedef struct CompressParam CompressParam; 262 263 struct DecompressParam { 264 bool start; 265 QemuMutex mutex; 266 QemuCond cond; 267 void *des; 268 uint8_t *compbuf; 269 int len; 270 }; 271 typedef struct DecompressParam DecompressParam; 272 273 static CompressParam *comp_param; 274 static QemuThread *compress_threads; 275 /* comp_done_cond is used to wake up the migration thread when 276 * one of the compression threads has finished the compression. 277 * comp_done_lock is used to co-work with comp_done_cond. 278 */ 279 static QemuMutex *comp_done_lock; 280 static QemuCond *comp_done_cond; 281 /* The empty QEMUFileOps will be used by file in CompressParam */ 282 static const QEMUFileOps empty_ops = { }; 283 284 static bool compression_switch; 285 static bool quit_comp_thread; 286 static bool quit_decomp_thread; 287 static DecompressParam *decomp_param; 288 static QemuThread *decompress_threads; 289 290 static int do_compress_ram_page(CompressParam *param); 291 292 static void *do_data_compress(void *opaque) 293 { 294 CompressParam *param = opaque; 295 296 while (!quit_comp_thread) { 297 qemu_mutex_lock(¶m->mutex); 298 /* Re-check the quit_comp_thread in case of 299 * terminate_compression_threads is called just before 300 * qemu_mutex_lock(¶m->mutex) and after 301 * while(!quit_comp_thread), re-check it here can make 302 * sure the compression thread terminate as expected. 303 */ 304 while (!param->start && !quit_comp_thread) { 305 qemu_cond_wait(¶m->cond, ¶m->mutex); 306 } 307 if (!quit_comp_thread) { 308 do_compress_ram_page(param); 309 } 310 param->start = false; 311 qemu_mutex_unlock(¶m->mutex); 312 313 qemu_mutex_lock(comp_done_lock); 314 param->done = true; 315 qemu_cond_signal(comp_done_cond); 316 qemu_mutex_unlock(comp_done_lock); 317 } 318 319 return NULL; 320 } 321 322 static inline void terminate_compression_threads(void) 323 { 324 int idx, thread_count; 325 326 thread_count = migrate_compress_threads(); 327 quit_comp_thread = true; 328 for (idx = 0; idx < thread_count; idx++) { 329 qemu_mutex_lock(&comp_param[idx].mutex); 330 qemu_cond_signal(&comp_param[idx].cond); 331 qemu_mutex_unlock(&comp_param[idx].mutex); 332 } 333 } 334 335 void migrate_compress_threads_join(void) 336 { 337 int i, thread_count; 338 339 if (!migrate_use_compression()) { 340 return; 341 } 342 terminate_compression_threads(); 343 thread_count = migrate_compress_threads(); 344 for (i = 0; i < thread_count; i++) { 345 qemu_thread_join(compress_threads + i); 346 qemu_fclose(comp_param[i].file); 347 qemu_mutex_destroy(&comp_param[i].mutex); 348 qemu_cond_destroy(&comp_param[i].cond); 349 } 350 qemu_mutex_destroy(comp_done_lock); 351 qemu_cond_destroy(comp_done_cond); 352 g_free(compress_threads); 353 g_free(comp_param); 354 g_free(comp_done_cond); 355 g_free(comp_done_lock); 356 compress_threads = NULL; 357 comp_param = NULL; 358 comp_done_cond = NULL; 359 comp_done_lock = NULL; 360 } 361 362 void migrate_compress_threads_create(void) 363 { 364 int i, thread_count; 365 366 if (!migrate_use_compression()) { 367 return; 368 } 369 quit_comp_thread = false; 370 compression_switch = true; 371 thread_count = migrate_compress_threads(); 372 compress_threads = g_new0(QemuThread, thread_count); 373 comp_param = g_new0(CompressParam, thread_count); 374 comp_done_cond = g_new0(QemuCond, 1); 375 comp_done_lock = g_new0(QemuMutex, 1); 376 qemu_cond_init(comp_done_cond); 377 qemu_mutex_init(comp_done_lock); 378 for (i = 0; i < thread_count; i++) { 379 /* com_param[i].file is just used as a dummy buffer to save data, set 380 * it's ops to empty. 381 */ 382 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops); 383 comp_param[i].done = true; 384 qemu_mutex_init(&comp_param[i].mutex); 385 qemu_cond_init(&comp_param[i].cond); 386 qemu_thread_create(compress_threads + i, "compress", 387 do_data_compress, comp_param + i, 388 QEMU_THREAD_JOINABLE); 389 } 390 } 391 392 /** 393 * save_page_header: Write page header to wire 394 * 395 * If this is the 1st block, it also writes the block identification 396 * 397 * Returns: Number of bytes written 398 * 399 * @f: QEMUFile where to send the data 400 * @block: block that contains the page we want to send 401 * @offset: offset inside the block for the page 402 * in the lower bits, it contains flags 403 */ 404 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) 405 { 406 size_t size, len; 407 408 qemu_put_be64(f, offset); 409 size = 8; 410 411 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { 412 len = strlen(block->idstr); 413 qemu_put_byte(f, len); 414 qemu_put_buffer(f, (uint8_t *)block->idstr, len); 415 size += 1 + len; 416 } 417 return size; 418 } 419 420 /* Reduce amount of guest cpu execution to hopefully slow down memory writes. 421 * If guest dirty memory rate is reduced below the rate at which we can 422 * transfer pages to the destination then we should be able to complete 423 * migration. Some workloads dirty memory way too fast and will not effectively 424 * converge, even with auto-converge. 425 */ 426 static void mig_throttle_guest_down(void) 427 { 428 MigrationState *s = migrate_get_current(); 429 uint64_t pct_initial = 430 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; 431 uint64_t pct_icrement = 432 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; 433 434 /* We have not started throttling yet. Let's start it. */ 435 if (!cpu_throttle_active()) { 436 cpu_throttle_set(pct_initial); 437 } else { 438 /* Throttling already on, just increase the rate */ 439 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement); 440 } 441 } 442 443 /* Update the xbzrle cache to reflect a page that's been sent as all 0. 444 * The important thing is that a stale (not-yet-0'd) page be replaced 445 * by the new data. 446 * As a bonus, if the page wasn't in the cache it gets added so that 447 * when a small write is made into the 0'd page it gets XBZRLE sent 448 */ 449 static void xbzrle_cache_zero_page(ram_addr_t current_addr) 450 { 451 if (ram_bulk_stage || !migrate_use_xbzrle()) { 452 return; 453 } 454 455 /* We don't care if this fails to allocate a new cache page 456 * as long as it updated an old one */ 457 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE, 458 bitmap_sync_count); 459 } 460 461 #define ENCODING_FLAG_XBZRLE 0x1 462 463 /** 464 * save_xbzrle_page: compress and send current page 465 * 466 * Returns: 1 means that we wrote the page 467 * 0 means that page is identical to the one already sent 468 * -1 means that xbzrle would be longer than normal 469 * 470 * @f: QEMUFile where to send the data 471 * @current_data: 472 * @current_addr: 473 * @block: block that contains the page we want to send 474 * @offset: offset inside the block for the page 475 * @last_stage: if we are at the completion stage 476 * @bytes_transferred: increase it with the number of transferred bytes 477 */ 478 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, 479 ram_addr_t current_addr, RAMBlock *block, 480 ram_addr_t offset, bool last_stage, 481 uint64_t *bytes_transferred) 482 { 483 int encoded_len = 0, bytes_xbzrle; 484 uint8_t *prev_cached_page; 485 486 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) { 487 acct_info.xbzrle_cache_miss++; 488 if (!last_stage) { 489 if (cache_insert(XBZRLE.cache, current_addr, *current_data, 490 bitmap_sync_count) == -1) { 491 return -1; 492 } else { 493 /* update *current_data when the page has been 494 inserted into cache */ 495 *current_data = get_cached_data(XBZRLE.cache, current_addr); 496 } 497 } 498 return -1; 499 } 500 501 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); 502 503 /* save current buffer into memory */ 504 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); 505 506 /* XBZRLE encoding (if there is no overflow) */ 507 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, 508 TARGET_PAGE_SIZE, XBZRLE.encoded_buf, 509 TARGET_PAGE_SIZE); 510 if (encoded_len == 0) { 511 DPRINTF("Skipping unmodified page\n"); 512 return 0; 513 } else if (encoded_len == -1) { 514 DPRINTF("Overflow\n"); 515 acct_info.xbzrle_overflows++; 516 /* update data in the cache */ 517 if (!last_stage) { 518 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); 519 *current_data = prev_cached_page; 520 } 521 return -1; 522 } 523 524 /* we need to update the data in the cache, in order to get the same data */ 525 if (!last_stage) { 526 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); 527 } 528 529 /* Send XBZRLE based compressed page */ 530 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE); 531 qemu_put_byte(f, ENCODING_FLAG_XBZRLE); 532 qemu_put_be16(f, encoded_len); 533 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); 534 bytes_xbzrle += encoded_len + 1 + 2; 535 acct_info.xbzrle_pages++; 536 acct_info.xbzrle_bytes += bytes_xbzrle; 537 *bytes_transferred += bytes_xbzrle; 538 539 return 1; 540 } 541 542 /* Called with rcu_read_lock() to protect migration_bitmap 543 * rb: The RAMBlock to search for dirty pages in 544 * start: Start address (typically so we can continue from previous page) 545 * ram_addr_abs: Pointer into which to store the address of the dirty page 546 * within the global ram_addr space 547 * 548 * Returns: byte offset within memory region of the start of a dirty page 549 */ 550 static inline 551 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb, 552 ram_addr_t start, 553 ram_addr_t *ram_addr_abs) 554 { 555 unsigned long base = rb->offset >> TARGET_PAGE_BITS; 556 unsigned long nr = base + (start >> TARGET_PAGE_BITS); 557 uint64_t rb_size = rb->used_length; 558 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS); 559 unsigned long *bitmap; 560 561 unsigned long next; 562 563 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 564 if (ram_bulk_stage && nr > base) { 565 next = nr + 1; 566 } else { 567 next = find_next_bit(bitmap, size, nr); 568 } 569 570 *ram_addr_abs = next << TARGET_PAGE_BITS; 571 return (next - base) << TARGET_PAGE_BITS; 572 } 573 574 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr) 575 { 576 bool ret; 577 int nr = addr >> TARGET_PAGE_BITS; 578 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 579 580 ret = test_and_clear_bit(nr, bitmap); 581 582 if (ret) { 583 migration_dirty_pages--; 584 } 585 return ret; 586 } 587 588 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) 589 { 590 unsigned long *bitmap; 591 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 592 migration_dirty_pages += 593 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length); 594 } 595 596 /* Fix me: there are too many global variables used in migration process. */ 597 static int64_t start_time; 598 static int64_t bytes_xfer_prev; 599 static int64_t num_dirty_pages_period; 600 static uint64_t xbzrle_cache_miss_prev; 601 static uint64_t iterations_prev; 602 603 static void migration_bitmap_sync_init(void) 604 { 605 start_time = 0; 606 bytes_xfer_prev = 0; 607 num_dirty_pages_period = 0; 608 xbzrle_cache_miss_prev = 0; 609 iterations_prev = 0; 610 } 611 612 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */ 613 static void migration_bitmap_sync(void) 614 { 615 RAMBlock *block; 616 uint64_t num_dirty_pages_init = migration_dirty_pages; 617 MigrationState *s = migrate_get_current(); 618 int64_t end_time; 619 int64_t bytes_xfer_now; 620 621 bitmap_sync_count++; 622 623 if (!bytes_xfer_prev) { 624 bytes_xfer_prev = ram_bytes_transferred(); 625 } 626 627 if (!start_time) { 628 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 629 } 630 631 trace_migration_bitmap_sync_start(); 632 address_space_sync_dirty_bitmap(&address_space_memory); 633 634 qemu_mutex_lock(&migration_bitmap_mutex); 635 rcu_read_lock(); 636 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 637 migration_bitmap_sync_range(block->offset, block->used_length); 638 } 639 rcu_read_unlock(); 640 qemu_mutex_unlock(&migration_bitmap_mutex); 641 642 trace_migration_bitmap_sync_end(migration_dirty_pages 643 - num_dirty_pages_init); 644 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init; 645 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 646 647 /* more than 1 second = 1000 millisecons */ 648 if (end_time > start_time + 1000) { 649 if (migrate_auto_converge()) { 650 /* The following detection logic can be refined later. For now: 651 Check to see if the dirtied bytes is 50% more than the approx. 652 amount of bytes that just got transferred since the last time we 653 were in this routine. If that happens twice, start or increase 654 throttling */ 655 bytes_xfer_now = ram_bytes_transferred(); 656 657 if (s->dirty_pages_rate && 658 (num_dirty_pages_period * TARGET_PAGE_SIZE > 659 (bytes_xfer_now - bytes_xfer_prev)/2) && 660 (dirty_rate_high_cnt++ >= 2)) { 661 trace_migration_throttle(); 662 dirty_rate_high_cnt = 0; 663 mig_throttle_guest_down(); 664 } 665 bytes_xfer_prev = bytes_xfer_now; 666 } 667 668 if (migrate_use_xbzrle()) { 669 if (iterations_prev != acct_info.iterations) { 670 acct_info.xbzrle_cache_miss_rate = 671 (double)(acct_info.xbzrle_cache_miss - 672 xbzrle_cache_miss_prev) / 673 (acct_info.iterations - iterations_prev); 674 } 675 iterations_prev = acct_info.iterations; 676 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; 677 } 678 s->dirty_pages_rate = num_dirty_pages_period * 1000 679 / (end_time - start_time); 680 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; 681 start_time = end_time; 682 num_dirty_pages_period = 0; 683 } 684 s->dirty_sync_count = bitmap_sync_count; 685 if (migrate_use_events()) { 686 qapi_event_send_migration_pass(bitmap_sync_count, NULL); 687 } 688 } 689 690 /** 691 * save_zero_page: Send the zero page to the stream 692 * 693 * Returns: Number of pages written. 694 * 695 * @f: QEMUFile where to send the data 696 * @block: block that contains the page we want to send 697 * @offset: offset inside the block for the page 698 * @p: pointer to the page 699 * @bytes_transferred: increase it with the number of transferred bytes 700 */ 701 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, 702 uint8_t *p, uint64_t *bytes_transferred) 703 { 704 int pages = -1; 705 706 if (is_zero_range(p, TARGET_PAGE_SIZE)) { 707 acct_info.dup_pages++; 708 *bytes_transferred += save_page_header(f, block, 709 offset | RAM_SAVE_FLAG_COMPRESS); 710 qemu_put_byte(f, 0); 711 *bytes_transferred += 1; 712 pages = 1; 713 } 714 715 return pages; 716 } 717 718 /** 719 * ram_save_page: Send the given page to the stream 720 * 721 * Returns: Number of pages written. 722 * < 0 - error 723 * >=0 - Number of pages written - this might legally be 0 724 * if xbzrle noticed the page was the same. 725 * 726 * @f: QEMUFile where to send the data 727 * @block: block that contains the page we want to send 728 * @offset: offset inside the block for the page 729 * @last_stage: if we are at the completion stage 730 * @bytes_transferred: increase it with the number of transferred bytes 731 */ 732 static int ram_save_page(QEMUFile *f, PageSearchStatus *pss, 733 bool last_stage, uint64_t *bytes_transferred) 734 { 735 int pages = -1; 736 uint64_t bytes_xmit; 737 ram_addr_t current_addr; 738 uint8_t *p; 739 int ret; 740 bool send_async = true; 741 RAMBlock *block = pss->block; 742 ram_addr_t offset = pss->offset; 743 744 p = block->host + offset; 745 746 /* In doubt sent page as normal */ 747 bytes_xmit = 0; 748 ret = ram_control_save_page(f, block->offset, 749 offset, TARGET_PAGE_SIZE, &bytes_xmit); 750 if (bytes_xmit) { 751 *bytes_transferred += bytes_xmit; 752 pages = 1; 753 } 754 755 XBZRLE_cache_lock(); 756 757 current_addr = block->offset + offset; 758 759 if (block == last_sent_block) { 760 offset |= RAM_SAVE_FLAG_CONTINUE; 761 } 762 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 763 if (ret != RAM_SAVE_CONTROL_DELAYED) { 764 if (bytes_xmit > 0) { 765 acct_info.norm_pages++; 766 } else if (bytes_xmit == 0) { 767 acct_info.dup_pages++; 768 } 769 } 770 } else { 771 pages = save_zero_page(f, block, offset, p, bytes_transferred); 772 if (pages > 0) { 773 /* Must let xbzrle know, otherwise a previous (now 0'd) cached 774 * page would be stale 775 */ 776 xbzrle_cache_zero_page(current_addr); 777 } else if (!ram_bulk_stage && migrate_use_xbzrle()) { 778 pages = save_xbzrle_page(f, &p, current_addr, block, 779 offset, last_stage, bytes_transferred); 780 if (!last_stage) { 781 /* Can't send this cached data async, since the cache page 782 * might get updated before it gets to the wire 783 */ 784 send_async = false; 785 } 786 } 787 } 788 789 /* XBZRLE overflow or normal page */ 790 if (pages == -1) { 791 *bytes_transferred += save_page_header(f, block, 792 offset | RAM_SAVE_FLAG_PAGE); 793 if (send_async) { 794 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); 795 } else { 796 qemu_put_buffer(f, p, TARGET_PAGE_SIZE); 797 } 798 *bytes_transferred += TARGET_PAGE_SIZE; 799 pages = 1; 800 acct_info.norm_pages++; 801 } 802 803 XBZRLE_cache_unlock(); 804 805 return pages; 806 } 807 808 static int do_compress_ram_page(CompressParam *param) 809 { 810 int bytes_sent, blen; 811 uint8_t *p; 812 RAMBlock *block = param->block; 813 ram_addr_t offset = param->offset; 814 815 p = block->host + (offset & TARGET_PAGE_MASK); 816 817 bytes_sent = save_page_header(param->file, block, offset | 818 RAM_SAVE_FLAG_COMPRESS_PAGE); 819 blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE, 820 migrate_compress_level()); 821 bytes_sent += blen; 822 823 return bytes_sent; 824 } 825 826 static inline void start_compression(CompressParam *param) 827 { 828 param->done = false; 829 qemu_mutex_lock(¶m->mutex); 830 param->start = true; 831 qemu_cond_signal(¶m->cond); 832 qemu_mutex_unlock(¶m->mutex); 833 } 834 835 static inline void start_decompression(DecompressParam *param) 836 { 837 qemu_mutex_lock(¶m->mutex); 838 param->start = true; 839 qemu_cond_signal(¶m->cond); 840 qemu_mutex_unlock(¶m->mutex); 841 } 842 843 static uint64_t bytes_transferred; 844 845 static void flush_compressed_data(QEMUFile *f) 846 { 847 int idx, len, thread_count; 848 849 if (!migrate_use_compression()) { 850 return; 851 } 852 thread_count = migrate_compress_threads(); 853 for (idx = 0; idx < thread_count; idx++) { 854 if (!comp_param[idx].done) { 855 qemu_mutex_lock(comp_done_lock); 856 while (!comp_param[idx].done && !quit_comp_thread) { 857 qemu_cond_wait(comp_done_cond, comp_done_lock); 858 } 859 qemu_mutex_unlock(comp_done_lock); 860 } 861 if (!quit_comp_thread) { 862 len = qemu_put_qemu_file(f, comp_param[idx].file); 863 bytes_transferred += len; 864 } 865 } 866 } 867 868 static inline void set_compress_params(CompressParam *param, RAMBlock *block, 869 ram_addr_t offset) 870 { 871 param->block = block; 872 param->offset = offset; 873 } 874 875 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, 876 ram_addr_t offset, 877 uint64_t *bytes_transferred) 878 { 879 int idx, thread_count, bytes_xmit = -1, pages = -1; 880 881 thread_count = migrate_compress_threads(); 882 qemu_mutex_lock(comp_done_lock); 883 while (true) { 884 for (idx = 0; idx < thread_count; idx++) { 885 if (comp_param[idx].done) { 886 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file); 887 set_compress_params(&comp_param[idx], block, offset); 888 start_compression(&comp_param[idx]); 889 pages = 1; 890 acct_info.norm_pages++; 891 *bytes_transferred += bytes_xmit; 892 break; 893 } 894 } 895 if (pages > 0) { 896 break; 897 } else { 898 qemu_cond_wait(comp_done_cond, comp_done_lock); 899 } 900 } 901 qemu_mutex_unlock(comp_done_lock); 902 903 return pages; 904 } 905 906 /** 907 * ram_save_compressed_page: compress the given page and send it to the stream 908 * 909 * Returns: Number of pages written. 910 * 911 * @f: QEMUFile where to send the data 912 * @block: block that contains the page we want to send 913 * @offset: offset inside the block for the page 914 * @last_stage: if we are at the completion stage 915 * @bytes_transferred: increase it with the number of transferred bytes 916 */ 917 static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss, 918 bool last_stage, 919 uint64_t *bytes_transferred) 920 { 921 int pages = -1; 922 uint64_t bytes_xmit; 923 uint8_t *p; 924 int ret; 925 RAMBlock *block = pss->block; 926 ram_addr_t offset = pss->offset; 927 928 p = block->host + offset; 929 930 bytes_xmit = 0; 931 ret = ram_control_save_page(f, block->offset, 932 offset, TARGET_PAGE_SIZE, &bytes_xmit); 933 if (bytes_xmit) { 934 *bytes_transferred += bytes_xmit; 935 pages = 1; 936 } 937 if (block == last_sent_block) { 938 offset |= RAM_SAVE_FLAG_CONTINUE; 939 } 940 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { 941 if (ret != RAM_SAVE_CONTROL_DELAYED) { 942 if (bytes_xmit > 0) { 943 acct_info.norm_pages++; 944 } else if (bytes_xmit == 0) { 945 acct_info.dup_pages++; 946 } 947 } 948 } else { 949 /* When starting the process of a new block, the first page of 950 * the block should be sent out before other pages in the same 951 * block, and all the pages in last block should have been sent 952 * out, keeping this order is important, because the 'cont' flag 953 * is used to avoid resending the block name. 954 */ 955 if (block != last_sent_block) { 956 flush_compressed_data(f); 957 pages = save_zero_page(f, block, offset, p, bytes_transferred); 958 if (pages == -1) { 959 set_compress_params(&comp_param[0], block, offset); 960 /* Use the qemu thread to compress the data to make sure the 961 * first page is sent out before other pages 962 */ 963 bytes_xmit = do_compress_ram_page(&comp_param[0]); 964 acct_info.norm_pages++; 965 qemu_put_qemu_file(f, comp_param[0].file); 966 *bytes_transferred += bytes_xmit; 967 pages = 1; 968 } 969 } else { 970 pages = save_zero_page(f, block, offset, p, bytes_transferred); 971 if (pages == -1) { 972 pages = compress_page_with_multi_thread(f, block, offset, 973 bytes_transferred); 974 } 975 } 976 } 977 978 return pages; 979 } 980 981 /* 982 * Find the next dirty page and update any state associated with 983 * the search process. 984 * 985 * Returns: True if a page is found 986 * 987 * @f: Current migration stream. 988 * @pss: Data about the state of the current dirty page scan. 989 * @*again: Set to false if the search has scanned the whole of RAM 990 * *ram_addr_abs: Pointer into which to store the address of the dirty page 991 * within the global ram_addr space 992 */ 993 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss, 994 bool *again, ram_addr_t *ram_addr_abs) 995 { 996 pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset, 997 ram_addr_abs); 998 if (pss->complete_round && pss->block == last_seen_block && 999 pss->offset >= last_offset) { 1000 /* 1001 * We've been once around the RAM and haven't found anything. 1002 * Give up. 1003 */ 1004 *again = false; 1005 return false; 1006 } 1007 if (pss->offset >= pss->block->used_length) { 1008 /* Didn't find anything in this RAM Block */ 1009 pss->offset = 0; 1010 pss->block = QLIST_NEXT_RCU(pss->block, next); 1011 if (!pss->block) { 1012 /* Hit the end of the list */ 1013 pss->block = QLIST_FIRST_RCU(&ram_list.blocks); 1014 /* Flag that we've looped */ 1015 pss->complete_round = true; 1016 ram_bulk_stage = false; 1017 if (migrate_use_xbzrle()) { 1018 /* If xbzrle is on, stop using the data compression at this 1019 * point. In theory, xbzrle can do better than compression. 1020 */ 1021 flush_compressed_data(f); 1022 compression_switch = false; 1023 } 1024 } 1025 /* Didn't find anything this time, but try again on the new block */ 1026 *again = true; 1027 return false; 1028 } else { 1029 /* Can go around again, but... */ 1030 *again = true; 1031 /* We've found something so probably don't need to */ 1032 return true; 1033 } 1034 } 1035 1036 /* 1037 * Helper for 'get_queued_page' - gets a page off the queue 1038 * ms: MigrationState in 1039 * *offset: Used to return the offset within the RAMBlock 1040 * ram_addr_abs: global offset in the dirty/sent bitmaps 1041 * 1042 * Returns: block (or NULL if none available) 1043 */ 1044 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, 1045 ram_addr_t *ram_addr_abs) 1046 { 1047 RAMBlock *block = NULL; 1048 1049 qemu_mutex_lock(&ms->src_page_req_mutex); 1050 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) { 1051 struct MigrationSrcPageRequest *entry = 1052 QSIMPLEQ_FIRST(&ms->src_page_requests); 1053 block = entry->rb; 1054 *offset = entry->offset; 1055 *ram_addr_abs = (entry->offset + entry->rb->offset) & 1056 TARGET_PAGE_MASK; 1057 1058 if (entry->len > TARGET_PAGE_SIZE) { 1059 entry->len -= TARGET_PAGE_SIZE; 1060 entry->offset += TARGET_PAGE_SIZE; 1061 } else { 1062 memory_region_unref(block->mr); 1063 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); 1064 g_free(entry); 1065 } 1066 } 1067 qemu_mutex_unlock(&ms->src_page_req_mutex); 1068 1069 return block; 1070 } 1071 1072 /* 1073 * Unqueue a page from the queue fed by postcopy page requests; skips pages 1074 * that are already sent (!dirty) 1075 * 1076 * ms: MigrationState in 1077 * pss: PageSearchStatus structure updated with found block/offset 1078 * ram_addr_abs: global offset in the dirty/sent bitmaps 1079 * 1080 * Returns: true if a queued page is found 1081 */ 1082 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, 1083 ram_addr_t *ram_addr_abs) 1084 { 1085 RAMBlock *block; 1086 ram_addr_t offset; 1087 bool dirty; 1088 1089 do { 1090 block = unqueue_page(ms, &offset, ram_addr_abs); 1091 /* 1092 * We're sending this page, and since it's postcopy nothing else 1093 * will dirty it, and we must make sure it doesn't get sent again 1094 * even if this queue request was received after the background 1095 * search already sent it. 1096 */ 1097 if (block) { 1098 unsigned long *bitmap; 1099 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1100 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap); 1101 if (!dirty) { 1102 trace_get_queued_page_not_dirty( 1103 block->idstr, (uint64_t)offset, 1104 (uint64_t)*ram_addr_abs, 1105 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, 1106 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap)); 1107 } else { 1108 trace_get_queued_page(block->idstr, 1109 (uint64_t)offset, 1110 (uint64_t)*ram_addr_abs); 1111 } 1112 } 1113 1114 } while (block && !dirty); 1115 1116 if (block) { 1117 /* 1118 * As soon as we start servicing pages out of order, then we have 1119 * to kill the bulk stage, since the bulk stage assumes 1120 * in (migration_bitmap_find_and_reset_dirty) that every page is 1121 * dirty, that's no longer true. 1122 */ 1123 ram_bulk_stage = false; 1124 1125 /* 1126 * We want the background search to continue from the queued page 1127 * since the guest is likely to want other pages near to the page 1128 * it just requested. 1129 */ 1130 pss->block = block; 1131 pss->offset = offset; 1132 } 1133 1134 return !!block; 1135 } 1136 1137 /** 1138 * flush_page_queue: Flush any remaining pages in the ram request queue 1139 * it should be empty at the end anyway, but in error cases there may be 1140 * some left. 1141 * 1142 * ms: MigrationState 1143 */ 1144 void flush_page_queue(MigrationState *ms) 1145 { 1146 struct MigrationSrcPageRequest *mspr, *next_mspr; 1147 /* This queue generally should be empty - but in the case of a failed 1148 * migration might have some droppings in. 1149 */ 1150 rcu_read_lock(); 1151 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) { 1152 memory_region_unref(mspr->rb->mr); 1153 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); 1154 g_free(mspr); 1155 } 1156 rcu_read_unlock(); 1157 } 1158 1159 /** 1160 * Queue the pages for transmission, e.g. a request from postcopy destination 1161 * ms: MigrationStatus in which the queue is held 1162 * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last) 1163 * start: Offset from the start of the RAMBlock 1164 * len: Length (in bytes) to send 1165 * Return: 0 on success 1166 */ 1167 int ram_save_queue_pages(MigrationState *ms, const char *rbname, 1168 ram_addr_t start, ram_addr_t len) 1169 { 1170 RAMBlock *ramblock; 1171 1172 rcu_read_lock(); 1173 if (!rbname) { 1174 /* Reuse last RAMBlock */ 1175 ramblock = ms->last_req_rb; 1176 1177 if (!ramblock) { 1178 /* 1179 * Shouldn't happen, we can't reuse the last RAMBlock if 1180 * it's the 1st request. 1181 */ 1182 error_report("ram_save_queue_pages no previous block"); 1183 goto err; 1184 } 1185 } else { 1186 ramblock = qemu_ram_block_by_name(rbname); 1187 1188 if (!ramblock) { 1189 /* We shouldn't be asked for a non-existent RAMBlock */ 1190 error_report("ram_save_queue_pages no block '%s'", rbname); 1191 goto err; 1192 } 1193 ms->last_req_rb = ramblock; 1194 } 1195 trace_ram_save_queue_pages(ramblock->idstr, start, len); 1196 if (start+len > ramblock->used_length) { 1197 error_report("%s request overrun start=" RAM_ADDR_FMT " len=" 1198 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT, 1199 __func__, start, len, ramblock->used_length); 1200 goto err; 1201 } 1202 1203 struct MigrationSrcPageRequest *new_entry = 1204 g_malloc0(sizeof(struct MigrationSrcPageRequest)); 1205 new_entry->rb = ramblock; 1206 new_entry->offset = start; 1207 new_entry->len = len; 1208 1209 memory_region_ref(ramblock->mr); 1210 qemu_mutex_lock(&ms->src_page_req_mutex); 1211 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req); 1212 qemu_mutex_unlock(&ms->src_page_req_mutex); 1213 rcu_read_unlock(); 1214 1215 return 0; 1216 1217 err: 1218 rcu_read_unlock(); 1219 return -1; 1220 } 1221 1222 /** 1223 * ram_save_target_page: Save one target page 1224 * 1225 * 1226 * @f: QEMUFile where to send the data 1227 * @block: pointer to block that contains the page we want to send 1228 * @offset: offset inside the block for the page; 1229 * @last_stage: if we are at the completion stage 1230 * @bytes_transferred: increase it with the number of transferred bytes 1231 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space 1232 * 1233 * Returns: Number of pages written. 1234 */ 1235 static int ram_save_target_page(MigrationState *ms, QEMUFile *f, 1236 PageSearchStatus *pss, 1237 bool last_stage, 1238 uint64_t *bytes_transferred, 1239 ram_addr_t dirty_ram_abs) 1240 { 1241 int res = 0; 1242 1243 /* Check the pages is dirty and if it is send it */ 1244 if (migration_bitmap_clear_dirty(dirty_ram_abs)) { 1245 unsigned long *unsentmap; 1246 if (compression_switch && migrate_use_compression()) { 1247 res = ram_save_compressed_page(f, pss, 1248 last_stage, 1249 bytes_transferred); 1250 } else { 1251 res = ram_save_page(f, pss, last_stage, 1252 bytes_transferred); 1253 } 1254 1255 if (res < 0) { 1256 return res; 1257 } 1258 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1259 if (unsentmap) { 1260 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap); 1261 } 1262 /* Only update last_sent_block if a block was actually sent; xbzrle 1263 * might have decided the page was identical so didn't bother writing 1264 * to the stream. 1265 */ 1266 if (res > 0) { 1267 last_sent_block = pss->block; 1268 } 1269 } 1270 1271 return res; 1272 } 1273 1274 /** 1275 * ram_save_host_page: Starting at *offset send pages upto the end 1276 * of the current host page. It's valid for the initial 1277 * offset to point into the middle of a host page 1278 * in which case the remainder of the hostpage is sent. 1279 * Only dirty target pages are sent. 1280 * 1281 * Returns: Number of pages written. 1282 * 1283 * @f: QEMUFile where to send the data 1284 * @block: pointer to block that contains the page we want to send 1285 * @offset: offset inside the block for the page; updated to last target page 1286 * sent 1287 * @last_stage: if we are at the completion stage 1288 * @bytes_transferred: increase it with the number of transferred bytes 1289 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space 1290 */ 1291 static int ram_save_host_page(MigrationState *ms, QEMUFile *f, 1292 PageSearchStatus *pss, 1293 bool last_stage, 1294 uint64_t *bytes_transferred, 1295 ram_addr_t dirty_ram_abs) 1296 { 1297 int tmppages, pages = 0; 1298 do { 1299 tmppages = ram_save_target_page(ms, f, pss, last_stage, 1300 bytes_transferred, dirty_ram_abs); 1301 if (tmppages < 0) { 1302 return tmppages; 1303 } 1304 1305 pages += tmppages; 1306 pss->offset += TARGET_PAGE_SIZE; 1307 dirty_ram_abs += TARGET_PAGE_SIZE; 1308 } while (pss->offset & (qemu_host_page_size - 1)); 1309 1310 /* The offset we leave with is the last one we looked at */ 1311 pss->offset -= TARGET_PAGE_SIZE; 1312 return pages; 1313 } 1314 1315 /** 1316 * ram_find_and_save_block: Finds a dirty page and sends it to f 1317 * 1318 * Called within an RCU critical section. 1319 * 1320 * Returns: The number of pages written 1321 * 0 means no dirty pages 1322 * 1323 * @f: QEMUFile where to send the data 1324 * @last_stage: if we are at the completion stage 1325 * @bytes_transferred: increase it with the number of transferred bytes 1326 * 1327 * On systems where host-page-size > target-page-size it will send all the 1328 * pages in a host page that are dirty. 1329 */ 1330 1331 static int ram_find_and_save_block(QEMUFile *f, bool last_stage, 1332 uint64_t *bytes_transferred) 1333 { 1334 PageSearchStatus pss; 1335 MigrationState *ms = migrate_get_current(); 1336 int pages = 0; 1337 bool again, found; 1338 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in 1339 ram_addr_t space */ 1340 1341 pss.block = last_seen_block; 1342 pss.offset = last_offset; 1343 pss.complete_round = false; 1344 1345 if (!pss.block) { 1346 pss.block = QLIST_FIRST_RCU(&ram_list.blocks); 1347 } 1348 1349 do { 1350 again = true; 1351 found = get_queued_page(ms, &pss, &dirty_ram_abs); 1352 1353 if (!found) { 1354 /* priority queue empty, so just search for something dirty */ 1355 found = find_dirty_block(f, &pss, &again, &dirty_ram_abs); 1356 } 1357 1358 if (found) { 1359 pages = ram_save_host_page(ms, f, &pss, 1360 last_stage, bytes_transferred, 1361 dirty_ram_abs); 1362 } 1363 } while (!pages && again); 1364 1365 last_seen_block = pss.block; 1366 last_offset = pss.offset; 1367 1368 return pages; 1369 } 1370 1371 void acct_update_position(QEMUFile *f, size_t size, bool zero) 1372 { 1373 uint64_t pages = size / TARGET_PAGE_SIZE; 1374 if (zero) { 1375 acct_info.dup_pages += pages; 1376 } else { 1377 acct_info.norm_pages += pages; 1378 bytes_transferred += size; 1379 qemu_update_position(f, size); 1380 } 1381 } 1382 1383 static ram_addr_t ram_save_remaining(void) 1384 { 1385 return migration_dirty_pages; 1386 } 1387 1388 uint64_t ram_bytes_remaining(void) 1389 { 1390 return ram_save_remaining() * TARGET_PAGE_SIZE; 1391 } 1392 1393 uint64_t ram_bytes_transferred(void) 1394 { 1395 return bytes_transferred; 1396 } 1397 1398 uint64_t ram_bytes_total(void) 1399 { 1400 RAMBlock *block; 1401 uint64_t total = 0; 1402 1403 rcu_read_lock(); 1404 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) 1405 total += block->used_length; 1406 rcu_read_unlock(); 1407 return total; 1408 } 1409 1410 void free_xbzrle_decoded_buf(void) 1411 { 1412 g_free(xbzrle_decoded_buf); 1413 xbzrle_decoded_buf = NULL; 1414 } 1415 1416 static void migration_bitmap_free(struct BitmapRcu *bmap) 1417 { 1418 g_free(bmap->bmap); 1419 g_free(bmap->unsentmap); 1420 g_free(bmap); 1421 } 1422 1423 static void ram_migration_cleanup(void *opaque) 1424 { 1425 /* caller have hold iothread lock or is in a bh, so there is 1426 * no writing race against this migration_bitmap 1427 */ 1428 struct BitmapRcu *bitmap = migration_bitmap_rcu; 1429 atomic_rcu_set(&migration_bitmap_rcu, NULL); 1430 if (bitmap) { 1431 memory_global_dirty_log_stop(); 1432 call_rcu(bitmap, migration_bitmap_free, rcu); 1433 } 1434 1435 XBZRLE_cache_lock(); 1436 if (XBZRLE.cache) { 1437 cache_fini(XBZRLE.cache); 1438 g_free(XBZRLE.encoded_buf); 1439 g_free(XBZRLE.current_buf); 1440 XBZRLE.cache = NULL; 1441 XBZRLE.encoded_buf = NULL; 1442 XBZRLE.current_buf = NULL; 1443 } 1444 XBZRLE_cache_unlock(); 1445 } 1446 1447 static void reset_ram_globals(void) 1448 { 1449 last_seen_block = NULL; 1450 last_sent_block = NULL; 1451 last_offset = 0; 1452 last_version = ram_list.version; 1453 ram_bulk_stage = true; 1454 } 1455 1456 #define MAX_WAIT 50 /* ms, half buffered_file limit */ 1457 1458 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) 1459 { 1460 /* called in qemu main thread, so there is 1461 * no writing race against this migration_bitmap 1462 */ 1463 if (migration_bitmap_rcu) { 1464 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap; 1465 bitmap = g_new(struct BitmapRcu, 1); 1466 bitmap->bmap = bitmap_new(new); 1467 1468 /* prevent migration_bitmap content from being set bit 1469 * by migration_bitmap_sync_range() at the same time. 1470 * it is safe to migration if migration_bitmap is cleared bit 1471 * at the same time. 1472 */ 1473 qemu_mutex_lock(&migration_bitmap_mutex); 1474 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old); 1475 bitmap_set(bitmap->bmap, old, new - old); 1476 1477 /* We don't have a way to safely extend the sentmap 1478 * with RCU; so mark it as missing, entry to postcopy 1479 * will fail. 1480 */ 1481 bitmap->unsentmap = NULL; 1482 1483 atomic_rcu_set(&migration_bitmap_rcu, bitmap); 1484 qemu_mutex_unlock(&migration_bitmap_mutex); 1485 migration_dirty_pages += new - old; 1486 call_rcu(old_bitmap, migration_bitmap_free, rcu); 1487 } 1488 } 1489 1490 /* 1491 * 'expected' is the value you expect the bitmap mostly to be full 1492 * of; it won't bother printing lines that are all this value. 1493 * If 'todump' is null the migration bitmap is dumped. 1494 */ 1495 void ram_debug_dump_bitmap(unsigned long *todump, bool expected) 1496 { 1497 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; 1498 1499 int64_t cur; 1500 int64_t linelen = 128; 1501 char linebuf[129]; 1502 1503 if (!todump) { 1504 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1505 } 1506 1507 for (cur = 0; cur < ram_pages; cur += linelen) { 1508 int64_t curb; 1509 bool found = false; 1510 /* 1511 * Last line; catch the case where the line length 1512 * is longer than remaining ram 1513 */ 1514 if (cur + linelen > ram_pages) { 1515 linelen = ram_pages - cur; 1516 } 1517 for (curb = 0; curb < linelen; curb++) { 1518 bool thisbit = test_bit(cur + curb, todump); 1519 linebuf[curb] = thisbit ? '1' : '.'; 1520 found = found || (thisbit != expected); 1521 } 1522 if (found) { 1523 linebuf[curb] = '\0'; 1524 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf); 1525 } 1526 } 1527 } 1528 1529 /* **** functions for postcopy ***** */ 1530 1531 /* 1532 * Callback from postcopy_each_ram_send_discard for each RAMBlock 1533 * Note: At this point the 'unsentmap' is the processed bitmap combined 1534 * with the dirtymap; so a '1' means it's either dirty or unsent. 1535 * start,length: Indexes into the bitmap for the first bit 1536 * representing the named block and length in target-pages 1537 */ 1538 static int postcopy_send_discard_bm_ram(MigrationState *ms, 1539 PostcopyDiscardState *pds, 1540 unsigned long start, 1541 unsigned long length) 1542 { 1543 unsigned long end = start + length; /* one after the end */ 1544 unsigned long current; 1545 unsigned long *unsentmap; 1546 1547 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1548 for (current = start; current < end; ) { 1549 unsigned long one = find_next_bit(unsentmap, end, current); 1550 1551 if (one <= end) { 1552 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1); 1553 unsigned long discard_length; 1554 1555 if (zero >= end) { 1556 discard_length = end - one; 1557 } else { 1558 discard_length = zero - one; 1559 } 1560 postcopy_discard_send_range(ms, pds, one, discard_length); 1561 current = one + discard_length; 1562 } else { 1563 current = one; 1564 } 1565 } 1566 1567 return 0; 1568 } 1569 1570 /* 1571 * Utility for the outgoing postcopy code. 1572 * Calls postcopy_send_discard_bm_ram for each RAMBlock 1573 * passing it bitmap indexes and name. 1574 * Returns: 0 on success 1575 * (qemu_ram_foreach_block ends up passing unscaled lengths 1576 * which would mean postcopy code would have to deal with target page) 1577 */ 1578 static int postcopy_each_ram_send_discard(MigrationState *ms) 1579 { 1580 struct RAMBlock *block; 1581 int ret; 1582 1583 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1584 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1585 PostcopyDiscardState *pds = postcopy_discard_send_init(ms, 1586 first, 1587 block->idstr); 1588 1589 /* 1590 * Postcopy sends chunks of bitmap over the wire, but it 1591 * just needs indexes at this point, avoids it having 1592 * target page specific code. 1593 */ 1594 ret = postcopy_send_discard_bm_ram(ms, pds, first, 1595 block->used_length >> TARGET_PAGE_BITS); 1596 postcopy_discard_send_finish(ms, pds); 1597 if (ret) { 1598 return ret; 1599 } 1600 } 1601 1602 return 0; 1603 } 1604 1605 /* 1606 * Helper for postcopy_chunk_hostpages; it's called twice to cleanup 1607 * the two bitmaps, that are similar, but one is inverted. 1608 * 1609 * We search for runs of target-pages that don't start or end on a 1610 * host page boundary; 1611 * unsent_pass=true: Cleans up partially unsent host pages by searching 1612 * the unsentmap 1613 * unsent_pass=false: Cleans up partially dirty host pages by searching 1614 * the main migration bitmap 1615 * 1616 */ 1617 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, 1618 RAMBlock *block, 1619 PostcopyDiscardState *pds) 1620 { 1621 unsigned long *bitmap; 1622 unsigned long *unsentmap; 1623 unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE; 1624 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1625 unsigned long len = block->used_length >> TARGET_PAGE_BITS; 1626 unsigned long last = first + (len - 1); 1627 unsigned long run_start; 1628 1629 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1630 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1631 1632 if (unsent_pass) { 1633 /* Find a sent page */ 1634 run_start = find_next_zero_bit(unsentmap, last + 1, first); 1635 } else { 1636 /* Find a dirty page */ 1637 run_start = find_next_bit(bitmap, last + 1, first); 1638 } 1639 1640 while (run_start <= last) { 1641 bool do_fixup = false; 1642 unsigned long fixup_start_addr; 1643 unsigned long host_offset; 1644 1645 /* 1646 * If the start of this run of pages is in the middle of a host 1647 * page, then we need to fixup this host page. 1648 */ 1649 host_offset = run_start % host_ratio; 1650 if (host_offset) { 1651 do_fixup = true; 1652 run_start -= host_offset; 1653 fixup_start_addr = run_start; 1654 /* For the next pass */ 1655 run_start = run_start + host_ratio; 1656 } else { 1657 /* Find the end of this run */ 1658 unsigned long run_end; 1659 if (unsent_pass) { 1660 run_end = find_next_bit(unsentmap, last + 1, run_start + 1); 1661 } else { 1662 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1); 1663 } 1664 /* 1665 * If the end isn't at the start of a host page, then the 1666 * run doesn't finish at the end of a host page 1667 * and we need to discard. 1668 */ 1669 host_offset = run_end % host_ratio; 1670 if (host_offset) { 1671 do_fixup = true; 1672 fixup_start_addr = run_end - host_offset; 1673 /* 1674 * This host page has gone, the next loop iteration starts 1675 * from after the fixup 1676 */ 1677 run_start = fixup_start_addr + host_ratio; 1678 } else { 1679 /* 1680 * No discards on this iteration, next loop starts from 1681 * next sent/dirty page 1682 */ 1683 run_start = run_end + 1; 1684 } 1685 } 1686 1687 if (do_fixup) { 1688 unsigned long page; 1689 1690 /* Tell the destination to discard this page */ 1691 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) { 1692 /* For the unsent_pass we: 1693 * discard partially sent pages 1694 * For the !unsent_pass (dirty) we: 1695 * discard partially dirty pages that were sent 1696 * (any partially sent pages were already discarded 1697 * by the previous unsent_pass) 1698 */ 1699 postcopy_discard_send_range(ms, pds, fixup_start_addr, 1700 host_ratio); 1701 } 1702 1703 /* Clean up the bitmap */ 1704 for (page = fixup_start_addr; 1705 page < fixup_start_addr + host_ratio; page++) { 1706 /* All pages in this host page are now not sent */ 1707 set_bit(page, unsentmap); 1708 1709 /* 1710 * Remark them as dirty, updating the count for any pages 1711 * that weren't previously dirty. 1712 */ 1713 migration_dirty_pages += !test_and_set_bit(page, bitmap); 1714 } 1715 } 1716 1717 if (unsent_pass) { 1718 /* Find the next sent page for the next iteration */ 1719 run_start = find_next_zero_bit(unsentmap, last + 1, 1720 run_start); 1721 } else { 1722 /* Find the next dirty page for the next iteration */ 1723 run_start = find_next_bit(bitmap, last + 1, run_start); 1724 } 1725 } 1726 } 1727 1728 /* 1729 * Utility for the outgoing postcopy code. 1730 * 1731 * Discard any partially sent host-page size chunks, mark any partially 1732 * dirty host-page size chunks as all dirty. 1733 * 1734 * Returns: 0 on success 1735 */ 1736 static int postcopy_chunk_hostpages(MigrationState *ms) 1737 { 1738 struct RAMBlock *block; 1739 1740 if (qemu_host_page_size == TARGET_PAGE_SIZE) { 1741 /* Easy case - TPS==HPS - nothing to be done */ 1742 return 0; 1743 } 1744 1745 /* Easiest way to make sure we don't resume in the middle of a host-page */ 1746 last_seen_block = NULL; 1747 last_sent_block = NULL; 1748 last_offset = 0; 1749 1750 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1751 unsigned long first = block->offset >> TARGET_PAGE_BITS; 1752 1753 PostcopyDiscardState *pds = 1754 postcopy_discard_send_init(ms, first, block->idstr); 1755 1756 /* First pass: Discard all partially sent host pages */ 1757 postcopy_chunk_hostpages_pass(ms, true, block, pds); 1758 /* 1759 * Second pass: Ensure that all partially dirty host pages are made 1760 * fully dirty. 1761 */ 1762 postcopy_chunk_hostpages_pass(ms, false, block, pds); 1763 1764 postcopy_discard_send_finish(ms, pds); 1765 } /* ram_list loop */ 1766 1767 return 0; 1768 } 1769 1770 /* 1771 * Transmit the set of pages to be discarded after precopy to the target 1772 * these are pages that: 1773 * a) Have been previously transmitted but are now dirty again 1774 * b) Pages that have never been transmitted, this ensures that 1775 * any pages on the destination that have been mapped by background 1776 * tasks get discarded (transparent huge pages is the specific concern) 1777 * Hopefully this is pretty sparse 1778 */ 1779 int ram_postcopy_send_discard_bitmap(MigrationState *ms) 1780 { 1781 int ret; 1782 unsigned long *bitmap, *unsentmap; 1783 1784 rcu_read_lock(); 1785 1786 /* This should be our last sync, the src is now paused */ 1787 migration_bitmap_sync(); 1788 1789 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; 1790 if (!unsentmap) { 1791 /* We don't have a safe way to resize the sentmap, so 1792 * if the bitmap was resized it will be NULL at this 1793 * point. 1794 */ 1795 error_report("migration ram resized during precopy phase"); 1796 rcu_read_unlock(); 1797 return -EINVAL; 1798 } 1799 1800 /* Deal with TPS != HPS */ 1801 ret = postcopy_chunk_hostpages(ms); 1802 if (ret) { 1803 rcu_read_unlock(); 1804 return ret; 1805 } 1806 1807 /* 1808 * Update the unsentmap to be unsentmap = unsentmap | dirty 1809 */ 1810 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; 1811 bitmap_or(unsentmap, unsentmap, bitmap, 1812 last_ram_offset() >> TARGET_PAGE_BITS); 1813 1814 1815 trace_ram_postcopy_send_discard_bitmap(); 1816 #ifdef DEBUG_POSTCOPY 1817 ram_debug_dump_bitmap(unsentmap, true); 1818 #endif 1819 1820 ret = postcopy_each_ram_send_discard(ms); 1821 rcu_read_unlock(); 1822 1823 return ret; 1824 } 1825 1826 /* 1827 * At the start of the postcopy phase of migration, any now-dirty 1828 * precopied pages are discarded. 1829 * 1830 * start, length describe a byte address range within the RAMBlock 1831 * 1832 * Returns 0 on success. 1833 */ 1834 int ram_discard_range(MigrationIncomingState *mis, 1835 const char *block_name, 1836 uint64_t start, size_t length) 1837 { 1838 int ret = -1; 1839 1840 rcu_read_lock(); 1841 RAMBlock *rb = qemu_ram_block_by_name(block_name); 1842 1843 if (!rb) { 1844 error_report("ram_discard_range: Failed to find block '%s'", 1845 block_name); 1846 goto err; 1847 } 1848 1849 uint8_t *host_startaddr = rb->host + start; 1850 1851 if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) { 1852 error_report("ram_discard_range: Unaligned start address: %p", 1853 host_startaddr); 1854 goto err; 1855 } 1856 1857 if ((start + length) <= rb->used_length) { 1858 uint8_t *host_endaddr = host_startaddr + length; 1859 if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) { 1860 error_report("ram_discard_range: Unaligned end address: %p", 1861 host_endaddr); 1862 goto err; 1863 } 1864 ret = postcopy_ram_discard_range(mis, host_startaddr, length); 1865 } else { 1866 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64 1867 "/%zx/" RAM_ADDR_FMT")", 1868 block_name, start, length, rb->used_length); 1869 } 1870 1871 err: 1872 rcu_read_unlock(); 1873 1874 return ret; 1875 } 1876 1877 1878 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has 1879 * long-running RCU critical section. When rcu-reclaims in the code 1880 * start to become numerous it will be necessary to reduce the 1881 * granularity of these critical sections. 1882 */ 1883 1884 static int ram_save_setup(QEMUFile *f, void *opaque) 1885 { 1886 RAMBlock *block; 1887 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ 1888 1889 dirty_rate_high_cnt = 0; 1890 bitmap_sync_count = 0; 1891 migration_bitmap_sync_init(); 1892 qemu_mutex_init(&migration_bitmap_mutex); 1893 1894 if (migrate_use_xbzrle()) { 1895 XBZRLE_cache_lock(); 1896 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / 1897 TARGET_PAGE_SIZE, 1898 TARGET_PAGE_SIZE); 1899 if (!XBZRLE.cache) { 1900 XBZRLE_cache_unlock(); 1901 error_report("Error creating cache"); 1902 return -1; 1903 } 1904 XBZRLE_cache_unlock(); 1905 1906 /* We prefer not to abort if there is no memory */ 1907 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); 1908 if (!XBZRLE.encoded_buf) { 1909 error_report("Error allocating encoded_buf"); 1910 return -1; 1911 } 1912 1913 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); 1914 if (!XBZRLE.current_buf) { 1915 error_report("Error allocating current_buf"); 1916 g_free(XBZRLE.encoded_buf); 1917 XBZRLE.encoded_buf = NULL; 1918 return -1; 1919 } 1920 1921 acct_clear(); 1922 } 1923 1924 /* iothread lock needed for ram_list.dirty_memory[] */ 1925 qemu_mutex_lock_iothread(); 1926 qemu_mutex_lock_ramlist(); 1927 rcu_read_lock(); 1928 bytes_transferred = 0; 1929 reset_ram_globals(); 1930 1931 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; 1932 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1); 1933 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages); 1934 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages); 1935 1936 if (migrate_postcopy_ram()) { 1937 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages); 1938 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages); 1939 } 1940 1941 /* 1942 * Count the total number of pages used by ram blocks not including any 1943 * gaps due to alignment or unplugs. 1944 */ 1945 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; 1946 1947 memory_global_dirty_log_start(); 1948 migration_bitmap_sync(); 1949 qemu_mutex_unlock_ramlist(); 1950 qemu_mutex_unlock_iothread(); 1951 1952 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); 1953 1954 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { 1955 qemu_put_byte(f, strlen(block->idstr)); 1956 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); 1957 qemu_put_be64(f, block->used_length); 1958 } 1959 1960 rcu_read_unlock(); 1961 1962 ram_control_before_iterate(f, RAM_CONTROL_SETUP); 1963 ram_control_after_iterate(f, RAM_CONTROL_SETUP); 1964 1965 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 1966 1967 return 0; 1968 } 1969 1970 static int ram_save_iterate(QEMUFile *f, void *opaque) 1971 { 1972 int ret; 1973 int i; 1974 int64_t t0; 1975 int pages_sent = 0; 1976 1977 rcu_read_lock(); 1978 if (ram_list.version != last_version) { 1979 reset_ram_globals(); 1980 } 1981 1982 /* Read version before ram_list.blocks */ 1983 smp_rmb(); 1984 1985 ram_control_before_iterate(f, RAM_CONTROL_ROUND); 1986 1987 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); 1988 i = 0; 1989 while ((ret = qemu_file_rate_limit(f)) == 0) { 1990 int pages; 1991 1992 pages = ram_find_and_save_block(f, false, &bytes_transferred); 1993 /* no more pages to sent */ 1994 if (pages == 0) { 1995 break; 1996 } 1997 pages_sent += pages; 1998 acct_info.iterations++; 1999 2000 /* we want to check in the 1st loop, just in case it was the 1st time 2001 and we had to sync the dirty bitmap. 2002 qemu_get_clock_ns() is a bit expensive, so we only check each some 2003 iterations 2004 */ 2005 if ((i & 63) == 0) { 2006 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; 2007 if (t1 > MAX_WAIT) { 2008 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n", 2009 t1, i); 2010 break; 2011 } 2012 } 2013 i++; 2014 } 2015 flush_compressed_data(f); 2016 rcu_read_unlock(); 2017 2018 /* 2019 * Must occur before EOS (or any QEMUFile operation) 2020 * because of RDMA protocol. 2021 */ 2022 ram_control_after_iterate(f, RAM_CONTROL_ROUND); 2023 2024 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2025 bytes_transferred += 8; 2026 2027 ret = qemu_file_get_error(f); 2028 if (ret < 0) { 2029 return ret; 2030 } 2031 2032 return pages_sent; 2033 } 2034 2035 /* Called with iothread lock */ 2036 static int ram_save_complete(QEMUFile *f, void *opaque) 2037 { 2038 rcu_read_lock(); 2039 2040 if (!migration_in_postcopy(migrate_get_current())) { 2041 migration_bitmap_sync(); 2042 } 2043 2044 ram_control_before_iterate(f, RAM_CONTROL_FINISH); 2045 2046 /* try transferring iterative blocks of memory */ 2047 2048 /* flush all remaining blocks regardless of rate limiting */ 2049 while (true) { 2050 int pages; 2051 2052 pages = ram_find_and_save_block(f, true, &bytes_transferred); 2053 /* no more blocks to sent */ 2054 if (pages == 0) { 2055 break; 2056 } 2057 } 2058 2059 flush_compressed_data(f); 2060 ram_control_after_iterate(f, RAM_CONTROL_FINISH); 2061 2062 rcu_read_unlock(); 2063 2064 qemu_put_be64(f, RAM_SAVE_FLAG_EOS); 2065 2066 return 0; 2067 } 2068 2069 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, 2070 uint64_t *non_postcopiable_pending, 2071 uint64_t *postcopiable_pending) 2072 { 2073 uint64_t remaining_size; 2074 2075 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; 2076 2077 if (!migration_in_postcopy(migrate_get_current()) && 2078 remaining_size < max_size) { 2079 qemu_mutex_lock_iothread(); 2080 rcu_read_lock(); 2081 migration_bitmap_sync(); 2082 rcu_read_unlock(); 2083 qemu_mutex_unlock_iothread(); 2084 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; 2085 } 2086 2087 /* We can do postcopy, and all the data is postcopiable */ 2088 *postcopiable_pending += remaining_size; 2089 } 2090 2091 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) 2092 { 2093 unsigned int xh_len; 2094 int xh_flags; 2095 uint8_t *loaded_data; 2096 2097 if (!xbzrle_decoded_buf) { 2098 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE); 2099 } 2100 loaded_data = xbzrle_decoded_buf; 2101 2102 /* extract RLE header */ 2103 xh_flags = qemu_get_byte(f); 2104 xh_len = qemu_get_be16(f); 2105 2106 if (xh_flags != ENCODING_FLAG_XBZRLE) { 2107 error_report("Failed to load XBZRLE page - wrong compression!"); 2108 return -1; 2109 } 2110 2111 if (xh_len > TARGET_PAGE_SIZE) { 2112 error_report("Failed to load XBZRLE page - len overflow!"); 2113 return -1; 2114 } 2115 /* load data and decode */ 2116 qemu_get_buffer_in_place(f, &loaded_data, xh_len); 2117 2118 /* decode RLE */ 2119 if (xbzrle_decode_buffer(loaded_data, xh_len, host, 2120 TARGET_PAGE_SIZE) == -1) { 2121 error_report("Failed to load XBZRLE page - decode error!"); 2122 return -1; 2123 } 2124 2125 return 0; 2126 } 2127 2128 /* Must be called from within a rcu critical section. 2129 * Returns a pointer from within the RCU-protected ram_list. 2130 */ 2131 /* 2132 * Read a RAMBlock ID from the stream f. 2133 * 2134 * f: Stream to read from 2135 * flags: Page flags (mostly to see if it's a continuation of previous block) 2136 */ 2137 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, 2138 int flags) 2139 { 2140 static RAMBlock *block = NULL; 2141 char id[256]; 2142 uint8_t len; 2143 2144 if (flags & RAM_SAVE_FLAG_CONTINUE) { 2145 if (!block) { 2146 error_report("Ack, bad migration stream!"); 2147 return NULL; 2148 } 2149 return block; 2150 } 2151 2152 len = qemu_get_byte(f); 2153 qemu_get_buffer(f, (uint8_t *)id, len); 2154 id[len] = 0; 2155 2156 block = qemu_ram_block_by_name(id); 2157 if (!block) { 2158 error_report("Can't find block %s", id); 2159 return NULL; 2160 } 2161 2162 return block; 2163 } 2164 2165 static inline void *host_from_ram_block_offset(RAMBlock *block, 2166 ram_addr_t offset) 2167 { 2168 if (!offset_in_ramblock(block, offset)) { 2169 return NULL; 2170 } 2171 2172 return block->host + offset; 2173 } 2174 2175 /* 2176 * If a page (or a whole RDMA chunk) has been 2177 * determined to be zero, then zap it. 2178 */ 2179 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) 2180 { 2181 if (ch != 0 || !is_zero_range(host, size)) { 2182 memset(host, ch, size); 2183 } 2184 } 2185 2186 static void *do_data_decompress(void *opaque) 2187 { 2188 DecompressParam *param = opaque; 2189 unsigned long pagesize; 2190 2191 while (!quit_decomp_thread) { 2192 qemu_mutex_lock(¶m->mutex); 2193 while (!param->start && !quit_decomp_thread) { 2194 qemu_cond_wait(¶m->cond, ¶m->mutex); 2195 pagesize = TARGET_PAGE_SIZE; 2196 if (!quit_decomp_thread) { 2197 /* uncompress() will return failed in some case, especially 2198 * when the page is dirted when doing the compression, it's 2199 * not a problem because the dirty page will be retransferred 2200 * and uncompress() won't break the data in other pages. 2201 */ 2202 uncompress((Bytef *)param->des, &pagesize, 2203 (const Bytef *)param->compbuf, param->len); 2204 } 2205 param->start = false; 2206 } 2207 qemu_mutex_unlock(¶m->mutex); 2208 } 2209 2210 return NULL; 2211 } 2212 2213 void migrate_decompress_threads_create(void) 2214 { 2215 int i, thread_count; 2216 2217 thread_count = migrate_decompress_threads(); 2218 decompress_threads = g_new0(QemuThread, thread_count); 2219 decomp_param = g_new0(DecompressParam, thread_count); 2220 quit_decomp_thread = false; 2221 for (i = 0; i < thread_count; i++) { 2222 qemu_mutex_init(&decomp_param[i].mutex); 2223 qemu_cond_init(&decomp_param[i].cond); 2224 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE)); 2225 qemu_thread_create(decompress_threads + i, "decompress", 2226 do_data_decompress, decomp_param + i, 2227 QEMU_THREAD_JOINABLE); 2228 } 2229 } 2230 2231 void migrate_decompress_threads_join(void) 2232 { 2233 int i, thread_count; 2234 2235 quit_decomp_thread = true; 2236 thread_count = migrate_decompress_threads(); 2237 for (i = 0; i < thread_count; i++) { 2238 qemu_mutex_lock(&decomp_param[i].mutex); 2239 qemu_cond_signal(&decomp_param[i].cond); 2240 qemu_mutex_unlock(&decomp_param[i].mutex); 2241 } 2242 for (i = 0; i < thread_count; i++) { 2243 qemu_thread_join(decompress_threads + i); 2244 qemu_mutex_destroy(&decomp_param[i].mutex); 2245 qemu_cond_destroy(&decomp_param[i].cond); 2246 g_free(decomp_param[i].compbuf); 2247 } 2248 g_free(decompress_threads); 2249 g_free(decomp_param); 2250 decompress_threads = NULL; 2251 decomp_param = NULL; 2252 } 2253 2254 static void decompress_data_with_multi_threads(QEMUFile *f, 2255 void *host, int len) 2256 { 2257 int idx, thread_count; 2258 2259 thread_count = migrate_decompress_threads(); 2260 while (true) { 2261 for (idx = 0; idx < thread_count; idx++) { 2262 if (!decomp_param[idx].start) { 2263 qemu_get_buffer(f, decomp_param[idx].compbuf, len); 2264 decomp_param[idx].des = host; 2265 decomp_param[idx].len = len; 2266 start_decompression(&decomp_param[idx]); 2267 break; 2268 } 2269 } 2270 if (idx < thread_count) { 2271 break; 2272 } 2273 } 2274 } 2275 2276 /* 2277 * Allocate data structures etc needed by incoming migration with postcopy-ram 2278 * postcopy-ram's similarly names postcopy_ram_incoming_init does the work 2279 */ 2280 int ram_postcopy_incoming_init(MigrationIncomingState *mis) 2281 { 2282 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; 2283 2284 return postcopy_ram_incoming_init(mis, ram_pages); 2285 } 2286 2287 /* 2288 * Called in postcopy mode by ram_load(). 2289 * rcu_read_lock is taken prior to this being called. 2290 */ 2291 static int ram_load_postcopy(QEMUFile *f) 2292 { 2293 int flags = 0, ret = 0; 2294 bool place_needed = false; 2295 bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE; 2296 MigrationIncomingState *mis = migration_incoming_get_current(); 2297 /* Temporary page that is later 'placed' */ 2298 void *postcopy_host_page = postcopy_get_tmp_page(mis); 2299 void *last_host = NULL; 2300 bool all_zero = false; 2301 2302 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { 2303 ram_addr_t addr; 2304 void *host = NULL; 2305 void *page_buffer = NULL; 2306 void *place_source = NULL; 2307 uint8_t ch; 2308 2309 addr = qemu_get_be64(f); 2310 flags = addr & ~TARGET_PAGE_MASK; 2311 addr &= TARGET_PAGE_MASK; 2312 2313 trace_ram_load_postcopy_loop((uint64_t)addr, flags); 2314 place_needed = false; 2315 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) { 2316 RAMBlock *block = ram_block_from_stream(f, flags); 2317 2318 host = host_from_ram_block_offset(block, addr); 2319 if (!host) { 2320 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 2321 ret = -EINVAL; 2322 break; 2323 } 2324 page_buffer = host; 2325 /* 2326 * Postcopy requires that we place whole host pages atomically. 2327 * To make it atomic, the data is read into a temporary page 2328 * that's moved into place later. 2329 * The migration protocol uses, possibly smaller, target-pages 2330 * however the source ensures it always sends all the components 2331 * of a host page in order. 2332 */ 2333 page_buffer = postcopy_host_page + 2334 ((uintptr_t)host & ~qemu_host_page_mask); 2335 /* If all TP are zero then we can optimise the place */ 2336 if (!((uintptr_t)host & ~qemu_host_page_mask)) { 2337 all_zero = true; 2338 } else { 2339 /* not the 1st TP within the HP */ 2340 if (host != (last_host + TARGET_PAGE_SIZE)) { 2341 error_report("Non-sequential target page %p/%p", 2342 host, last_host); 2343 ret = -EINVAL; 2344 break; 2345 } 2346 } 2347 2348 2349 /* 2350 * If it's the last part of a host page then we place the host 2351 * page 2352 */ 2353 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) & 2354 ~qemu_host_page_mask) == 0; 2355 place_source = postcopy_host_page; 2356 } 2357 last_host = host; 2358 2359 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 2360 case RAM_SAVE_FLAG_COMPRESS: 2361 ch = qemu_get_byte(f); 2362 memset(page_buffer, ch, TARGET_PAGE_SIZE); 2363 if (ch) { 2364 all_zero = false; 2365 } 2366 break; 2367 2368 case RAM_SAVE_FLAG_PAGE: 2369 all_zero = false; 2370 if (!place_needed || !matching_page_sizes) { 2371 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE); 2372 } else { 2373 /* Avoids the qemu_file copy during postcopy, which is 2374 * going to do a copy later; can only do it when we 2375 * do this read in one go (matching page sizes) 2376 */ 2377 qemu_get_buffer_in_place(f, (uint8_t **)&place_source, 2378 TARGET_PAGE_SIZE); 2379 } 2380 break; 2381 case RAM_SAVE_FLAG_EOS: 2382 /* normal exit */ 2383 break; 2384 default: 2385 error_report("Unknown combination of migration flags: %#x" 2386 " (postcopy mode)", flags); 2387 ret = -EINVAL; 2388 } 2389 2390 if (place_needed) { 2391 /* This gets called at the last target page in the host page */ 2392 if (all_zero) { 2393 ret = postcopy_place_page_zero(mis, 2394 host + TARGET_PAGE_SIZE - 2395 qemu_host_page_size); 2396 } else { 2397 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE - 2398 qemu_host_page_size, 2399 place_source); 2400 } 2401 } 2402 if (!ret) { 2403 ret = qemu_file_get_error(f); 2404 } 2405 } 2406 2407 return ret; 2408 } 2409 2410 static int ram_load(QEMUFile *f, void *opaque, int version_id) 2411 { 2412 int flags = 0, ret = 0; 2413 static uint64_t seq_iter; 2414 int len = 0; 2415 /* 2416 * If system is running in postcopy mode, page inserts to host memory must 2417 * be atomic 2418 */ 2419 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING; 2420 2421 seq_iter++; 2422 2423 if (version_id != 4) { 2424 ret = -EINVAL; 2425 } 2426 2427 /* This RCU critical section can be very long running. 2428 * When RCU reclaims in the code start to become numerous, 2429 * it will be necessary to reduce the granularity of this 2430 * critical section. 2431 */ 2432 rcu_read_lock(); 2433 2434 if (postcopy_running) { 2435 ret = ram_load_postcopy(f); 2436 } 2437 2438 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) { 2439 ram_addr_t addr, total_ram_bytes; 2440 void *host = NULL; 2441 uint8_t ch; 2442 2443 addr = qemu_get_be64(f); 2444 flags = addr & ~TARGET_PAGE_MASK; 2445 addr &= TARGET_PAGE_MASK; 2446 2447 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE | 2448 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { 2449 RAMBlock *block = ram_block_from_stream(f, flags); 2450 2451 host = host_from_ram_block_offset(block, addr); 2452 if (!host) { 2453 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); 2454 ret = -EINVAL; 2455 break; 2456 } 2457 } 2458 2459 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) { 2460 case RAM_SAVE_FLAG_MEM_SIZE: 2461 /* Synchronize RAM block list */ 2462 total_ram_bytes = addr; 2463 while (!ret && total_ram_bytes) { 2464 RAMBlock *block; 2465 char id[256]; 2466 ram_addr_t length; 2467 2468 len = qemu_get_byte(f); 2469 qemu_get_buffer(f, (uint8_t *)id, len); 2470 id[len] = 0; 2471 length = qemu_get_be64(f); 2472 2473 block = qemu_ram_block_by_name(id); 2474 if (block) { 2475 if (length != block->used_length) { 2476 Error *local_err = NULL; 2477 2478 ret = qemu_ram_resize(block->offset, length, 2479 &local_err); 2480 if (local_err) { 2481 error_report_err(local_err); 2482 } 2483 } 2484 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG, 2485 block->idstr); 2486 } else { 2487 error_report("Unknown ramblock \"%s\", cannot " 2488 "accept migration", id); 2489 ret = -EINVAL; 2490 } 2491 2492 total_ram_bytes -= length; 2493 } 2494 break; 2495 2496 case RAM_SAVE_FLAG_COMPRESS: 2497 ch = qemu_get_byte(f); 2498 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); 2499 break; 2500 2501 case RAM_SAVE_FLAG_PAGE: 2502 qemu_get_buffer(f, host, TARGET_PAGE_SIZE); 2503 break; 2504 2505 case RAM_SAVE_FLAG_COMPRESS_PAGE: 2506 len = qemu_get_be32(f); 2507 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { 2508 error_report("Invalid compressed data length: %d", len); 2509 ret = -EINVAL; 2510 break; 2511 } 2512 decompress_data_with_multi_threads(f, host, len); 2513 break; 2514 2515 case RAM_SAVE_FLAG_XBZRLE: 2516 if (load_xbzrle(f, addr, host) < 0) { 2517 error_report("Failed to decompress XBZRLE page at " 2518 RAM_ADDR_FMT, addr); 2519 ret = -EINVAL; 2520 break; 2521 } 2522 break; 2523 case RAM_SAVE_FLAG_EOS: 2524 /* normal exit */ 2525 break; 2526 default: 2527 if (flags & RAM_SAVE_FLAG_HOOK) { 2528 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL); 2529 } else { 2530 error_report("Unknown combination of migration flags: %#x", 2531 flags); 2532 ret = -EINVAL; 2533 } 2534 } 2535 if (!ret) { 2536 ret = qemu_file_get_error(f); 2537 } 2538 } 2539 2540 rcu_read_unlock(); 2541 DPRINTF("Completed load of VM with exit code %d seq iteration " 2542 "%" PRIu64 "\n", ret, seq_iter); 2543 return ret; 2544 } 2545 2546 static SaveVMHandlers savevm_ram_handlers = { 2547 .save_live_setup = ram_save_setup, 2548 .save_live_iterate = ram_save_iterate, 2549 .save_live_complete_postcopy = ram_save_complete, 2550 .save_live_complete_precopy = ram_save_complete, 2551 .save_live_pending = ram_save_pending, 2552 .load_state = ram_load, 2553 .cleanup = ram_migration_cleanup, 2554 }; 2555 2556 void ram_mig_init(void) 2557 { 2558 qemu_mutex_init(&XBZRLE.lock); 2559 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL); 2560 } 2561