1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "socket.h" 24 #include "sysemu/runstate.h" 25 #include "sysemu/sysemu.h" 26 #include "sysemu/cpu-throttle.h" 27 #include "rdma.h" 28 #include "ram.h" 29 #include "migration/global_state.h" 30 #include "migration/misc.h" 31 #include "migration.h" 32 #include "savevm.h" 33 #include "qemu-file-channel.h" 34 #include "qemu-file.h" 35 #include "migration/vmstate.h" 36 #include "block/block.h" 37 #include "qapi/error.h" 38 #include "qapi/clone-visitor.h" 39 #include "qapi/qapi-visit-migration.h" 40 #include "qapi/qapi-visit-sockets.h" 41 #include "qapi/qapi-commands-migration.h" 42 #include "qapi/qapi-events-migration.h" 43 #include "qapi/qmp/qerror.h" 44 #include "qapi/qmp/qnull.h" 45 #include "qemu/rcu.h" 46 #include "block.h" 47 #include "postcopy-ram.h" 48 #include "qemu/thread.h" 49 #include "trace.h" 50 #include "exec/target_page.h" 51 #include "io/channel-buffer.h" 52 #include "migration/colo.h" 53 #include "hw/boards.h" 54 #include "hw/qdev-properties.h" 55 #include "hw/qdev-properties-system.h" 56 #include "monitor/monitor.h" 57 #include "net/announce.h" 58 #include "qemu/queue.h" 59 #include "multifd.h" 60 #include "qemu/yank.h" 61 #include "sysemu/cpus.h" 62 #include "yank_functions.h" 63 64 #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ 65 66 /* Amount of time to allocate to each "chunk" of bandwidth-throttled 67 * data. */ 68 #define BUFFER_DELAY 100 69 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) 70 71 /* Time in milliseconds we are allowed to stop the source, 72 * for sending the last part */ 73 #define DEFAULT_MIGRATE_SET_DOWNTIME 300 74 75 /* Maximum migrate downtime set to 2000 seconds */ 76 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000 77 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000) 78 79 /* Default compression thread count */ 80 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 81 /* Default decompression thread count, usually decompression is at 82 * least 4 times as fast as compression.*/ 83 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 84 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ 85 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 86 /* Define default autoconverge cpu throttle migration parameters */ 87 #define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 88 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 89 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 90 #define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99 91 92 /* Migration XBZRLE default cache size */ 93 #define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024) 94 95 /* The delay time (in ms) between two COLO checkpoints */ 96 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100) 97 #define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2 98 #define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE 99 /* 0: means nocompress, 1: best speed, ... 9: best compress ratio */ 100 #define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1 101 /* 0: means nocompress, 1: best speed, ... 20: best compress ratio */ 102 #define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1 103 104 /* Background transfer rate for postcopy, 0 means unlimited, note 105 * that page requests can still exceed this limit. 106 */ 107 #define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0 108 109 /* 110 * Parameters for self_announce_delay giving a stream of RARP/ARP 111 * packets after migration. 112 */ 113 #define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50 114 #define DEFAULT_MIGRATE_ANNOUNCE_MAX 550 115 #define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5 116 #define DEFAULT_MIGRATE_ANNOUNCE_STEP 100 117 118 static NotifierList migration_state_notifiers = 119 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); 120 121 /* Messages sent on the return path from destination to source */ 122 enum mig_rp_message_type { 123 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 124 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 125 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 126 127 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 128 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 129 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 130 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 131 132 MIG_RP_MSG_MAX 133 }; 134 135 /* Migration capabilities set */ 136 struct MigrateCapsSet { 137 int size; /* Capability set size */ 138 MigrationCapability caps[]; /* Variadic array of capabilities */ 139 }; 140 typedef struct MigrateCapsSet MigrateCapsSet; 141 142 /* Define and initialize MigrateCapsSet */ 143 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \ 144 MigrateCapsSet _name = { \ 145 .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \ 146 .caps = { __VA_ARGS__ } \ 147 } 148 149 /* Background-snapshot compatibility check list */ 150 static const 151 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot, 152 MIGRATION_CAPABILITY_POSTCOPY_RAM, 153 MIGRATION_CAPABILITY_DIRTY_BITMAPS, 154 MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME, 155 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE, 156 MIGRATION_CAPABILITY_RETURN_PATH, 157 MIGRATION_CAPABILITY_MULTIFD, 158 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER, 159 MIGRATION_CAPABILITY_AUTO_CONVERGE, 160 MIGRATION_CAPABILITY_RELEASE_RAM, 161 MIGRATION_CAPABILITY_RDMA_PIN_ALL, 162 MIGRATION_CAPABILITY_COMPRESS, 163 MIGRATION_CAPABILITY_XBZRLE, 164 MIGRATION_CAPABILITY_X_COLO, 165 MIGRATION_CAPABILITY_VALIDATE_UUID); 166 167 /* When we add fault tolerance, we could have several 168 migrations at once. For now we don't need to add 169 dynamic creation of migration */ 170 171 static MigrationState *current_migration; 172 static MigrationIncomingState *current_incoming; 173 174 static GSList *migration_blockers; 175 176 static bool migration_object_check(MigrationState *ms, Error **errp); 177 static int migration_maybe_pause(MigrationState *s, 178 int *current_active_state, 179 int new_state); 180 static void migrate_fd_cancel(MigrationState *s); 181 182 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 183 { 184 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 185 186 return (a > b) - (a < b); 187 } 188 189 void migration_object_init(void) 190 { 191 /* This can only be called once. */ 192 assert(!current_migration); 193 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 194 195 /* 196 * Init the migrate incoming object as well no matter whether 197 * we'll use it or not. 198 */ 199 assert(!current_incoming); 200 current_incoming = g_new0(MigrationIncomingState, 1); 201 current_incoming->state = MIGRATION_STATUS_NONE; 202 current_incoming->postcopy_remote_fds = 203 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 204 qemu_mutex_init(¤t_incoming->rp_mutex); 205 qemu_event_init(¤t_incoming->main_thread_load_event, false); 206 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 207 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 208 qemu_mutex_init(¤t_incoming->page_request_mutex); 209 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 210 211 migration_object_check(current_migration, &error_fatal); 212 213 blk_mig_init(); 214 ram_mig_init(); 215 dirty_bitmap_mig_init(); 216 } 217 218 void migration_cancel(void) 219 { 220 migrate_fd_cancel(current_migration); 221 } 222 223 void migration_shutdown(void) 224 { 225 /* 226 * Cancel the current migration - that will (eventually) 227 * stop the migration using this structure 228 */ 229 migration_cancel(); 230 object_unref(OBJECT(current_migration)); 231 232 /* 233 * Cancel outgoing migration of dirty bitmaps. It should 234 * at least unref used block nodes. 235 */ 236 dirty_bitmap_mig_cancel_outgoing(); 237 238 /* 239 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 240 * are non-critical data, and their loss never considered as 241 * something serious. 242 */ 243 dirty_bitmap_mig_cancel_incoming(); 244 } 245 246 /* For outgoing */ 247 MigrationState *migrate_get_current(void) 248 { 249 /* This can only be called after the object created. */ 250 assert(current_migration); 251 return current_migration; 252 } 253 254 MigrationIncomingState *migration_incoming_get_current(void) 255 { 256 assert(current_incoming); 257 return current_incoming; 258 } 259 260 void migration_incoming_state_destroy(void) 261 { 262 struct MigrationIncomingState *mis = migration_incoming_get_current(); 263 264 if (mis->to_src_file) { 265 /* Tell source that we are done */ 266 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 267 qemu_fclose(mis->to_src_file); 268 mis->to_src_file = NULL; 269 } 270 271 if (mis->from_src_file) { 272 migration_ioc_unregister_yank_from_file(mis->from_src_file); 273 qemu_fclose(mis->from_src_file); 274 mis->from_src_file = NULL; 275 } 276 if (mis->postcopy_remote_fds) { 277 g_array_free(mis->postcopy_remote_fds, TRUE); 278 mis->postcopy_remote_fds = NULL; 279 } 280 if (mis->transport_cleanup) { 281 mis->transport_cleanup(mis->transport_data); 282 } 283 284 qemu_event_reset(&mis->main_thread_load_event); 285 286 if (mis->page_requested) { 287 g_tree_destroy(mis->page_requested); 288 mis->page_requested = NULL; 289 } 290 291 if (mis->socket_address_list) { 292 qapi_free_SocketAddressList(mis->socket_address_list); 293 mis->socket_address_list = NULL; 294 } 295 296 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 297 } 298 299 static void migrate_generate_event(int new_state) 300 { 301 if (migrate_use_events()) { 302 qapi_event_send_migration(new_state); 303 } 304 } 305 306 static bool migrate_late_block_activate(void) 307 { 308 MigrationState *s; 309 310 s = migrate_get_current(); 311 312 return s->enabled_capabilities[ 313 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE]; 314 } 315 316 /* 317 * Send a message on the return channel back to the source 318 * of the migration. 319 */ 320 static int migrate_send_rp_message(MigrationIncomingState *mis, 321 enum mig_rp_message_type message_type, 322 uint16_t len, void *data) 323 { 324 int ret = 0; 325 326 trace_migrate_send_rp_message((int)message_type, len); 327 QEMU_LOCK_GUARD(&mis->rp_mutex); 328 329 /* 330 * It's possible that the file handle got lost due to network 331 * failures. 332 */ 333 if (!mis->to_src_file) { 334 ret = -EIO; 335 return ret; 336 } 337 338 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 339 qemu_put_be16(mis->to_src_file, len); 340 qemu_put_buffer(mis->to_src_file, data, len); 341 qemu_fflush(mis->to_src_file); 342 343 /* It's possible that qemu file got error during sending */ 344 ret = qemu_file_get_error(mis->to_src_file); 345 346 return ret; 347 } 348 349 /* Request one page from the source VM at the given start address. 350 * rb: the RAMBlock to request the page in 351 * Start: Address offset within the RB 352 * Len: Length in bytes required - must be a multiple of pagesize 353 */ 354 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 355 RAMBlock *rb, ram_addr_t start) 356 { 357 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 358 size_t msglen = 12; /* start + len */ 359 size_t len = qemu_ram_pagesize(rb); 360 enum mig_rp_message_type msg_type; 361 const char *rbname; 362 int rbname_len; 363 364 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 365 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 366 367 /* 368 * We maintain the last ramblock that we requested for page. Note that we 369 * don't need locking because this function will only be called within the 370 * postcopy ram fault thread. 371 */ 372 if (rb != mis->last_rb) { 373 mis->last_rb = rb; 374 375 rbname = qemu_ram_get_idstr(rb); 376 rbname_len = strlen(rbname); 377 378 assert(rbname_len < 256); 379 380 bufc[msglen++] = rbname_len; 381 memcpy(bufc + msglen, rbname, rbname_len); 382 msglen += rbname_len; 383 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 384 } else { 385 msg_type = MIG_RP_MSG_REQ_PAGES; 386 } 387 388 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 389 } 390 391 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 392 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 393 { 394 void *aligned = (void *)(uintptr_t)(haddr & (-qemu_ram_pagesize(rb))); 395 bool received = false; 396 397 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 398 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 399 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 400 /* 401 * The page has not been received, and it's not yet in the page 402 * request list. Queue it. Set the value of element to 1, so that 403 * things like g_tree_lookup() will return TRUE (1) when found. 404 */ 405 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 406 mis->page_requested_count++; 407 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 408 } 409 } 410 411 /* 412 * If the page is there, skip sending the message. We don't even need the 413 * lock because as long as the page arrived, it'll be there forever. 414 */ 415 if (received) { 416 return 0; 417 } 418 419 return migrate_send_rp_message_req_pages(mis, rb, start); 420 } 421 422 static bool migration_colo_enabled; 423 bool migration_incoming_colo_enabled(void) 424 { 425 return migration_colo_enabled; 426 } 427 428 void migration_incoming_disable_colo(void) 429 { 430 ram_block_discard_disable(false); 431 migration_colo_enabled = false; 432 } 433 434 int migration_incoming_enable_colo(void) 435 { 436 if (ram_block_discard_disable(true)) { 437 error_report("COLO: cannot disable RAM discard"); 438 return -EBUSY; 439 } 440 migration_colo_enabled = true; 441 return 0; 442 } 443 444 void migrate_add_address(SocketAddress *address) 445 { 446 MigrationIncomingState *mis = migration_incoming_get_current(); 447 448 QAPI_LIST_PREPEND(mis->socket_address_list, 449 QAPI_CLONE(SocketAddress, address)); 450 } 451 452 static void qemu_start_incoming_migration(const char *uri, Error **errp) 453 { 454 const char *p = NULL; 455 456 qapi_event_send_migration(MIGRATION_STATUS_SETUP); 457 if (strstart(uri, "tcp:", &p) || 458 strstart(uri, "unix:", NULL) || 459 strstart(uri, "vsock:", NULL)) { 460 socket_start_incoming_migration(p ? p : uri, errp); 461 #ifdef CONFIG_RDMA 462 } else if (strstart(uri, "rdma:", &p)) { 463 rdma_start_incoming_migration(p, errp); 464 #endif 465 } else if (strstart(uri, "exec:", &p)) { 466 exec_start_incoming_migration(p, errp); 467 } else if (strstart(uri, "fd:", &p)) { 468 fd_start_incoming_migration(p, errp); 469 } else { 470 error_setg(errp, "unknown migration protocol: %s", uri); 471 } 472 } 473 474 static void process_incoming_migration_bh(void *opaque) 475 { 476 Error *local_err = NULL; 477 MigrationIncomingState *mis = opaque; 478 479 /* If capability late_block_activate is set: 480 * Only fire up the block code now if we're going to restart the 481 * VM, else 'cont' will do it. 482 * This causes file locking to happen; so we don't want it to happen 483 * unless we really are starting the VM. 484 */ 485 if (!migrate_late_block_activate() || 486 (autostart && (!global_state_received() || 487 global_state_get_runstate() == RUN_STATE_RUNNING))) { 488 /* Make sure all file formats flush their mutable metadata. 489 * If we get an error here, just don't restart the VM yet. */ 490 bdrv_invalidate_cache_all(&local_err); 491 if (local_err) { 492 error_report_err(local_err); 493 local_err = NULL; 494 autostart = false; 495 } 496 } 497 498 /* 499 * This must happen after all error conditions are dealt with and 500 * we're sure the VM is going to be running on this host. 501 */ 502 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 503 504 if (multifd_load_cleanup(&local_err) != 0) { 505 error_report_err(local_err); 506 autostart = false; 507 } 508 /* If global state section was not received or we are in running 509 state, we need to obey autostart. Any other state is set with 510 runstate_set. */ 511 512 dirty_bitmap_mig_before_vm_start(); 513 514 if (!global_state_received() || 515 global_state_get_runstate() == RUN_STATE_RUNNING) { 516 if (autostart) { 517 vm_start(); 518 } else { 519 runstate_set(RUN_STATE_PAUSED); 520 } 521 } else if (migration_incoming_colo_enabled()) { 522 migration_incoming_disable_colo(); 523 vm_start(); 524 } else { 525 runstate_set(global_state_get_runstate()); 526 } 527 /* 528 * This must happen after any state changes since as soon as an external 529 * observer sees this event they might start to prod at the VM assuming 530 * it's ready to use. 531 */ 532 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 533 MIGRATION_STATUS_COMPLETED); 534 qemu_bh_delete(mis->bh); 535 migration_incoming_state_destroy(); 536 } 537 538 static void process_incoming_migration_co(void *opaque) 539 { 540 MigrationIncomingState *mis = migration_incoming_get_current(); 541 PostcopyState ps; 542 int ret; 543 Error *local_err = NULL; 544 545 assert(mis->from_src_file); 546 mis->migration_incoming_co = qemu_coroutine_self(); 547 mis->largest_page_size = qemu_ram_pagesize_largest(); 548 postcopy_state_set(POSTCOPY_INCOMING_NONE); 549 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 550 MIGRATION_STATUS_ACTIVE); 551 ret = qemu_loadvm_state(mis->from_src_file); 552 553 ps = postcopy_state_get(); 554 trace_process_incoming_migration_co_end(ret, ps); 555 if (ps != POSTCOPY_INCOMING_NONE) { 556 if (ps == POSTCOPY_INCOMING_ADVISE) { 557 /* 558 * Where a migration had postcopy enabled (and thus went to advise) 559 * but managed to complete within the precopy period, we can use 560 * the normal exit. 561 */ 562 postcopy_ram_incoming_cleanup(mis); 563 } else if (ret >= 0) { 564 /* 565 * Postcopy was started, cleanup should happen at the end of the 566 * postcopy thread. 567 */ 568 trace_process_incoming_migration_co_postcopy_end_main(); 569 return; 570 } 571 /* Else if something went wrong then just fall out of the normal exit */ 572 } 573 574 /* we get COLO info, and know if we are in COLO mode */ 575 if (!ret && migration_incoming_colo_enabled()) { 576 /* Make sure all file formats flush their mutable metadata */ 577 bdrv_invalidate_cache_all(&local_err); 578 if (local_err) { 579 error_report_err(local_err); 580 goto fail; 581 } 582 583 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", 584 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); 585 mis->have_colo_incoming_thread = true; 586 qemu_coroutine_yield(); 587 588 /* Wait checkpoint incoming thread exit before free resource */ 589 qemu_thread_join(&mis->colo_incoming_thread); 590 /* We hold the global iothread lock, so it is safe here */ 591 colo_release_ram_cache(); 592 } 593 594 if (ret < 0) { 595 error_report("load of migration failed: %s", strerror(-ret)); 596 goto fail; 597 } 598 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); 599 qemu_bh_schedule(mis->bh); 600 mis->migration_incoming_co = NULL; 601 return; 602 fail: 603 local_err = NULL; 604 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 605 MIGRATION_STATUS_FAILED); 606 qemu_fclose(mis->from_src_file); 607 if (multifd_load_cleanup(&local_err) != 0) { 608 error_report_err(local_err); 609 } 610 exit(EXIT_FAILURE); 611 } 612 613 /** 614 * @migration_incoming_setup: Setup incoming migration 615 * 616 * Returns 0 for no error or 1 for error 617 * 618 * @f: file for main migration channel 619 * @errp: where to put errors 620 */ 621 static int migration_incoming_setup(QEMUFile *f, Error **errp) 622 { 623 MigrationIncomingState *mis = migration_incoming_get_current(); 624 Error *local_err = NULL; 625 626 if (multifd_load_setup(&local_err) != 0) { 627 /* We haven't been able to create multifd threads 628 nothing better to do */ 629 error_report_err(local_err); 630 exit(EXIT_FAILURE); 631 } 632 633 if (!mis->from_src_file) { 634 mis->from_src_file = f; 635 } 636 qemu_file_set_blocking(f, false); 637 return 0; 638 } 639 640 void migration_incoming_process(void) 641 { 642 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 643 qemu_coroutine_enter(co); 644 } 645 646 /* Returns true if recovered from a paused migration, otherwise false */ 647 static bool postcopy_try_recover(QEMUFile *f) 648 { 649 MigrationIncomingState *mis = migration_incoming_get_current(); 650 651 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 652 /* Resumed from a paused postcopy migration */ 653 654 mis->from_src_file = f; 655 /* Postcopy has standalone thread to do vm load */ 656 qemu_file_set_blocking(f, true); 657 658 /* Re-configure the return path */ 659 mis->to_src_file = qemu_file_get_return_path(f); 660 661 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 662 MIGRATION_STATUS_POSTCOPY_RECOVER); 663 664 /* 665 * Here, we only wake up the main loading thread (while the 666 * fault thread will still be waiting), so that we can receive 667 * commands from source now, and answer it if needed. The 668 * fault thread will be woken up afterwards until we are sure 669 * that source is ready to reply to page requests. 670 */ 671 qemu_sem_post(&mis->postcopy_pause_sem_dst); 672 return true; 673 } 674 675 return false; 676 } 677 678 void migration_fd_process_incoming(QEMUFile *f, Error **errp) 679 { 680 Error *local_err = NULL; 681 682 if (postcopy_try_recover(f)) { 683 return; 684 } 685 686 if (migration_incoming_setup(f, &local_err)) { 687 error_propagate(errp, local_err); 688 return; 689 } 690 migration_incoming_process(); 691 } 692 693 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 694 { 695 MigrationIncomingState *mis = migration_incoming_get_current(); 696 Error *local_err = NULL; 697 bool start_migration; 698 699 if (!mis->from_src_file) { 700 /* The first connection (multifd may have multiple) */ 701 QEMUFile *f = qemu_fopen_channel_input(ioc); 702 703 /* If it's a recovery, we're done */ 704 if (postcopy_try_recover(f)) { 705 return; 706 } 707 708 if (migration_incoming_setup(f, &local_err)) { 709 error_propagate(errp, local_err); 710 return; 711 } 712 713 /* 714 * Common migration only needs one channel, so we can start 715 * right now. Multifd needs more than one channel, we wait. 716 */ 717 start_migration = !migrate_use_multifd(); 718 } else { 719 /* Multiple connections */ 720 assert(migrate_use_multifd()); 721 start_migration = multifd_recv_new_channel(ioc, &local_err); 722 if (local_err) { 723 error_propagate(errp, local_err); 724 return; 725 } 726 } 727 728 if (start_migration) { 729 migration_incoming_process(); 730 } 731 } 732 733 /** 734 * @migration_has_all_channels: We have received all channels that we need 735 * 736 * Returns true when we have got connections to all the channels that 737 * we need for migration. 738 */ 739 bool migration_has_all_channels(void) 740 { 741 MigrationIncomingState *mis = migration_incoming_get_current(); 742 bool all_channels; 743 744 all_channels = multifd_recv_all_channels_created(); 745 746 return all_channels && mis->from_src_file != NULL; 747 } 748 749 /* 750 * Send a 'SHUT' message on the return channel with the given value 751 * to indicate that we've finished with the RP. Non-0 value indicates 752 * error. 753 */ 754 void migrate_send_rp_shut(MigrationIncomingState *mis, 755 uint32_t value) 756 { 757 uint32_t buf; 758 759 buf = cpu_to_be32(value); 760 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 761 } 762 763 /* 764 * Send a 'PONG' message on the return channel with the given value 765 * (normally in response to a 'PING') 766 */ 767 void migrate_send_rp_pong(MigrationIncomingState *mis, 768 uint32_t value) 769 { 770 uint32_t buf; 771 772 buf = cpu_to_be32(value); 773 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 774 } 775 776 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 777 char *block_name) 778 { 779 char buf[512]; 780 int len; 781 int64_t res; 782 783 /* 784 * First, we send the header part. It contains only the len of 785 * idstr, and the idstr itself. 786 */ 787 len = strlen(block_name); 788 buf[0] = len; 789 memcpy(buf + 1, block_name, len); 790 791 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 792 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 793 __func__); 794 return; 795 } 796 797 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 798 799 /* 800 * Next, we dump the received bitmap to the stream. 801 * 802 * TODO: currently we are safe since we are the only one that is 803 * using the to_src_file handle (fault thread is still paused), 804 * and it's ok even not taking the mutex. However the best way is 805 * to take the lock before sending the message header, and release 806 * the lock after sending the bitmap. 807 */ 808 qemu_mutex_lock(&mis->rp_mutex); 809 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 810 qemu_mutex_unlock(&mis->rp_mutex); 811 812 trace_migrate_send_rp_recv_bitmap(block_name, res); 813 } 814 815 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 816 { 817 uint32_t buf; 818 819 buf = cpu_to_be32(value); 820 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 821 } 822 823 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) 824 { 825 MigrationCapabilityStatusList *head = NULL, **tail = &head; 826 MigrationCapabilityStatus *caps; 827 MigrationState *s = migrate_get_current(); 828 int i; 829 830 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 831 #ifndef CONFIG_LIVE_BLOCK_MIGRATION 832 if (i == MIGRATION_CAPABILITY_BLOCK) { 833 continue; 834 } 835 #endif 836 caps = g_malloc0(sizeof(*caps)); 837 caps->capability = i; 838 caps->state = s->enabled_capabilities[i]; 839 QAPI_LIST_APPEND(tail, caps); 840 } 841 842 return head; 843 } 844 845 MigrationParameters *qmp_query_migrate_parameters(Error **errp) 846 { 847 MigrationParameters *params; 848 MigrationState *s = migrate_get_current(); 849 850 /* TODO use QAPI_CLONE() instead of duplicating it inline */ 851 params = g_malloc0(sizeof(*params)); 852 params->has_compress_level = true; 853 params->compress_level = s->parameters.compress_level; 854 params->has_compress_threads = true; 855 params->compress_threads = s->parameters.compress_threads; 856 params->has_compress_wait_thread = true; 857 params->compress_wait_thread = s->parameters.compress_wait_thread; 858 params->has_decompress_threads = true; 859 params->decompress_threads = s->parameters.decompress_threads; 860 params->has_throttle_trigger_threshold = true; 861 params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; 862 params->has_cpu_throttle_initial = true; 863 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; 864 params->has_cpu_throttle_increment = true; 865 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment; 866 params->has_cpu_throttle_tailslow = true; 867 params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow; 868 params->has_tls_creds = true; 869 params->tls_creds = g_strdup(s->parameters.tls_creds); 870 params->has_tls_hostname = true; 871 params->tls_hostname = g_strdup(s->parameters.tls_hostname); 872 params->has_tls_authz = true; 873 params->tls_authz = g_strdup(s->parameters.tls_authz ? 874 s->parameters.tls_authz : ""); 875 params->has_max_bandwidth = true; 876 params->max_bandwidth = s->parameters.max_bandwidth; 877 params->has_downtime_limit = true; 878 params->downtime_limit = s->parameters.downtime_limit; 879 params->has_x_checkpoint_delay = true; 880 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay; 881 params->has_block_incremental = true; 882 params->block_incremental = s->parameters.block_incremental; 883 params->has_multifd_channels = true; 884 params->multifd_channels = s->parameters.multifd_channels; 885 params->has_multifd_compression = true; 886 params->multifd_compression = s->parameters.multifd_compression; 887 params->has_multifd_zlib_level = true; 888 params->multifd_zlib_level = s->parameters.multifd_zlib_level; 889 params->has_multifd_zstd_level = true; 890 params->multifd_zstd_level = s->parameters.multifd_zstd_level; 891 params->has_xbzrle_cache_size = true; 892 params->xbzrle_cache_size = s->parameters.xbzrle_cache_size; 893 params->has_max_postcopy_bandwidth = true; 894 params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth; 895 params->has_max_cpu_throttle = true; 896 params->max_cpu_throttle = s->parameters.max_cpu_throttle; 897 params->has_announce_initial = true; 898 params->announce_initial = s->parameters.announce_initial; 899 params->has_announce_max = true; 900 params->announce_max = s->parameters.announce_max; 901 params->has_announce_rounds = true; 902 params->announce_rounds = s->parameters.announce_rounds; 903 params->has_announce_step = true; 904 params->announce_step = s->parameters.announce_step; 905 906 if (s->parameters.has_block_bitmap_mapping) { 907 params->has_block_bitmap_mapping = true; 908 params->block_bitmap_mapping = 909 QAPI_CLONE(BitmapMigrationNodeAliasList, 910 s->parameters.block_bitmap_mapping); 911 } 912 913 return params; 914 } 915 916 AnnounceParameters *migrate_announce_params(void) 917 { 918 static AnnounceParameters ap; 919 920 MigrationState *s = migrate_get_current(); 921 922 ap.initial = s->parameters.announce_initial; 923 ap.max = s->parameters.announce_max; 924 ap.rounds = s->parameters.announce_rounds; 925 ap.step = s->parameters.announce_step; 926 927 return ≈ 928 } 929 930 /* 931 * Return true if we're already in the middle of a migration 932 * (i.e. any of the active or setup states) 933 */ 934 bool migration_is_setup_or_active(int state) 935 { 936 switch (state) { 937 case MIGRATION_STATUS_ACTIVE: 938 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 939 case MIGRATION_STATUS_POSTCOPY_PAUSED: 940 case MIGRATION_STATUS_POSTCOPY_RECOVER: 941 case MIGRATION_STATUS_SETUP: 942 case MIGRATION_STATUS_PRE_SWITCHOVER: 943 case MIGRATION_STATUS_DEVICE: 944 case MIGRATION_STATUS_WAIT_UNPLUG: 945 case MIGRATION_STATUS_COLO: 946 return true; 947 948 default: 949 return false; 950 951 } 952 } 953 954 bool migration_is_running(int state) 955 { 956 switch (state) { 957 case MIGRATION_STATUS_ACTIVE: 958 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 959 case MIGRATION_STATUS_POSTCOPY_PAUSED: 960 case MIGRATION_STATUS_POSTCOPY_RECOVER: 961 case MIGRATION_STATUS_SETUP: 962 case MIGRATION_STATUS_PRE_SWITCHOVER: 963 case MIGRATION_STATUS_DEVICE: 964 case MIGRATION_STATUS_WAIT_UNPLUG: 965 case MIGRATION_STATUS_CANCELLING: 966 return true; 967 968 default: 969 return false; 970 971 } 972 } 973 974 static void populate_time_info(MigrationInfo *info, MigrationState *s) 975 { 976 info->has_status = true; 977 info->has_setup_time = true; 978 info->setup_time = s->setup_time; 979 if (s->state == MIGRATION_STATUS_COMPLETED) { 980 info->has_total_time = true; 981 info->total_time = s->total_time; 982 info->has_downtime = true; 983 info->downtime = s->downtime; 984 } else { 985 info->has_total_time = true; 986 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 987 s->start_time; 988 info->has_expected_downtime = true; 989 info->expected_downtime = s->expected_downtime; 990 } 991 } 992 993 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 994 { 995 info->has_ram = true; 996 info->ram = g_malloc0(sizeof(*info->ram)); 997 info->ram->transferred = ram_counters.transferred; 998 info->ram->total = ram_bytes_total(); 999 info->ram->duplicate = ram_counters.duplicate; 1000 /* legacy value. It is not used anymore */ 1001 info->ram->skipped = 0; 1002 info->ram->normal = ram_counters.normal; 1003 info->ram->normal_bytes = ram_counters.normal * 1004 qemu_target_page_size(); 1005 info->ram->mbps = s->mbps; 1006 info->ram->dirty_sync_count = ram_counters.dirty_sync_count; 1007 info->ram->postcopy_requests = ram_counters.postcopy_requests; 1008 info->ram->page_size = qemu_target_page_size(); 1009 info->ram->multifd_bytes = ram_counters.multifd_bytes; 1010 info->ram->pages_per_second = s->pages_per_second; 1011 1012 if (migrate_use_xbzrle()) { 1013 info->has_xbzrle_cache = true; 1014 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1015 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1016 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1017 info->xbzrle_cache->pages = xbzrle_counters.pages; 1018 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1019 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1020 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1021 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1022 } 1023 1024 if (migrate_use_compression()) { 1025 info->has_compression = true; 1026 info->compression = g_malloc0(sizeof(*info->compression)); 1027 info->compression->pages = compression_counters.pages; 1028 info->compression->busy = compression_counters.busy; 1029 info->compression->busy_rate = compression_counters.busy_rate; 1030 info->compression->compressed_size = 1031 compression_counters.compressed_size; 1032 info->compression->compression_rate = 1033 compression_counters.compression_rate; 1034 } 1035 1036 if (cpu_throttle_active()) { 1037 info->has_cpu_throttle_percentage = true; 1038 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1039 } 1040 1041 if (s->state != MIGRATION_STATUS_COMPLETED) { 1042 info->ram->remaining = ram_bytes_remaining(); 1043 info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate; 1044 } 1045 } 1046 1047 static void populate_disk_info(MigrationInfo *info) 1048 { 1049 if (blk_mig_active()) { 1050 info->has_disk = true; 1051 info->disk = g_malloc0(sizeof(*info->disk)); 1052 info->disk->transferred = blk_mig_bytes_transferred(); 1053 info->disk->remaining = blk_mig_bytes_remaining(); 1054 info->disk->total = blk_mig_bytes_total(); 1055 } 1056 } 1057 1058 static void fill_source_migration_info(MigrationInfo *info) 1059 { 1060 MigrationState *s = migrate_get_current(); 1061 GSList *cur_blocker = migration_blockers; 1062 1063 info->blocked_reasons = NULL; 1064 1065 /* 1066 * There are two types of reasons a migration might be blocked; 1067 * a) devices marked in VMState as non-migratable, and 1068 * b) Explicit migration blockers 1069 * We need to add both of them here. 1070 */ 1071 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1072 1073 while (cur_blocker) { 1074 QAPI_LIST_PREPEND(info->blocked_reasons, 1075 g_strdup(error_get_pretty(cur_blocker->data))); 1076 cur_blocker = g_slist_next(cur_blocker); 1077 } 1078 info->has_blocked_reasons = info->blocked_reasons != NULL; 1079 1080 switch (s->state) { 1081 case MIGRATION_STATUS_NONE: 1082 /* no migration has happened ever */ 1083 /* do not overwrite destination migration status */ 1084 return; 1085 case MIGRATION_STATUS_SETUP: 1086 info->has_status = true; 1087 info->has_total_time = false; 1088 break; 1089 case MIGRATION_STATUS_ACTIVE: 1090 case MIGRATION_STATUS_CANCELLING: 1091 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1092 case MIGRATION_STATUS_PRE_SWITCHOVER: 1093 case MIGRATION_STATUS_DEVICE: 1094 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1095 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1096 /* TODO add some postcopy stats */ 1097 populate_time_info(info, s); 1098 populate_ram_info(info, s); 1099 populate_disk_info(info); 1100 populate_vfio_info(info); 1101 break; 1102 case MIGRATION_STATUS_COLO: 1103 info->has_status = true; 1104 /* TODO: display COLO specific information (checkpoint info etc.) */ 1105 break; 1106 case MIGRATION_STATUS_COMPLETED: 1107 populate_time_info(info, s); 1108 populate_ram_info(info, s); 1109 populate_vfio_info(info); 1110 break; 1111 case MIGRATION_STATUS_FAILED: 1112 info->has_status = true; 1113 if (s->error) { 1114 info->has_error_desc = true; 1115 info->error_desc = g_strdup(error_get_pretty(s->error)); 1116 } 1117 break; 1118 case MIGRATION_STATUS_CANCELLED: 1119 info->has_status = true; 1120 break; 1121 case MIGRATION_STATUS_WAIT_UNPLUG: 1122 info->has_status = true; 1123 break; 1124 } 1125 info->status = s->state; 1126 } 1127 1128 typedef enum WriteTrackingSupport { 1129 WT_SUPPORT_UNKNOWN = 0, 1130 WT_SUPPORT_ABSENT, 1131 WT_SUPPORT_AVAILABLE, 1132 WT_SUPPORT_COMPATIBLE 1133 } WriteTrackingSupport; 1134 1135 static 1136 WriteTrackingSupport migrate_query_write_tracking(void) 1137 { 1138 /* Check if kernel supports required UFFD features */ 1139 if (!ram_write_tracking_available()) { 1140 return WT_SUPPORT_ABSENT; 1141 } 1142 /* 1143 * Check if current memory configuration is 1144 * compatible with required UFFD features. 1145 */ 1146 if (!ram_write_tracking_compatible()) { 1147 return WT_SUPPORT_AVAILABLE; 1148 } 1149 1150 return WT_SUPPORT_COMPATIBLE; 1151 } 1152 1153 /** 1154 * @migration_caps_check - check capability validity 1155 * 1156 * @cap_list: old capability list, array of bool 1157 * @params: new capabilities to be applied soon 1158 * @errp: set *errp if the check failed, with reason 1159 * 1160 * Returns true if check passed, otherwise false. 1161 */ 1162 static bool migrate_caps_check(bool *cap_list, 1163 MigrationCapabilityStatusList *params, 1164 Error **errp) 1165 { 1166 MigrationCapabilityStatusList *cap; 1167 bool old_postcopy_cap; 1168 MigrationIncomingState *mis = migration_incoming_get_current(); 1169 1170 old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 1171 1172 for (cap = params; cap; cap = cap->next) { 1173 cap_list[cap->value->capability] = cap->value->state; 1174 } 1175 1176 #ifndef CONFIG_LIVE_BLOCK_MIGRATION 1177 if (cap_list[MIGRATION_CAPABILITY_BLOCK]) { 1178 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) " 1179 "block migration"); 1180 error_append_hint(errp, "Use drive_mirror+NBD instead.\n"); 1181 return false; 1182 } 1183 #endif 1184 1185 #ifndef CONFIG_REPLICATION 1186 if (cap_list[MIGRATION_CAPABILITY_X_COLO]) { 1187 error_setg(errp, "QEMU compiled without replication module" 1188 " can't enable COLO"); 1189 error_append_hint(errp, "Please enable replication before COLO.\n"); 1190 return false; 1191 } 1192 #endif 1193 1194 if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) { 1195 /* This check is reasonably expensive, so only when it's being 1196 * set the first time, also it's only the destination that needs 1197 * special support. 1198 */ 1199 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) && 1200 !postcopy_ram_supported_by_host(mis)) { 1201 /* postcopy_ram_supported_by_host will have emitted a more 1202 * detailed message 1203 */ 1204 error_setg(errp, "Postcopy is not supported"); 1205 return false; 1206 } 1207 1208 if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) { 1209 error_setg(errp, "Postcopy is not compatible with ignore-shared"); 1210 return false; 1211 } 1212 } 1213 1214 if (cap_list[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) { 1215 WriteTrackingSupport wt_support; 1216 int idx; 1217 /* 1218 * Check if 'background-snapshot' capability is supported by 1219 * host kernel and compatible with guest memory configuration. 1220 */ 1221 wt_support = migrate_query_write_tracking(); 1222 if (wt_support < WT_SUPPORT_AVAILABLE) { 1223 error_setg(errp, "Background-snapshot is not supported by host kernel"); 1224 return false; 1225 } 1226 if (wt_support < WT_SUPPORT_COMPATIBLE) { 1227 error_setg(errp, "Background-snapshot is not compatible " 1228 "with guest memory configuration"); 1229 return false; 1230 } 1231 1232 /* 1233 * Check if there are any migration capabilities 1234 * incompatible with 'background-snapshot'. 1235 */ 1236 for (idx = 0; idx < check_caps_background_snapshot.size; idx++) { 1237 int incomp_cap = check_caps_background_snapshot.caps[idx]; 1238 if (cap_list[incomp_cap]) { 1239 error_setg(errp, 1240 "Background-snapshot is not compatible with %s", 1241 MigrationCapability_str(incomp_cap)); 1242 return false; 1243 } 1244 } 1245 } 1246 1247 return true; 1248 } 1249 1250 static void fill_destination_migration_info(MigrationInfo *info) 1251 { 1252 MigrationIncomingState *mis = migration_incoming_get_current(); 1253 1254 if (mis->socket_address_list) { 1255 info->has_socket_address = true; 1256 info->socket_address = 1257 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1258 } 1259 1260 switch (mis->state) { 1261 case MIGRATION_STATUS_NONE: 1262 return; 1263 case MIGRATION_STATUS_SETUP: 1264 case MIGRATION_STATUS_CANCELLING: 1265 case MIGRATION_STATUS_CANCELLED: 1266 case MIGRATION_STATUS_ACTIVE: 1267 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1268 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1269 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1270 case MIGRATION_STATUS_FAILED: 1271 case MIGRATION_STATUS_COLO: 1272 info->has_status = true; 1273 break; 1274 case MIGRATION_STATUS_COMPLETED: 1275 info->has_status = true; 1276 fill_destination_postcopy_migration_info(info); 1277 break; 1278 } 1279 info->status = mis->state; 1280 } 1281 1282 MigrationInfo *qmp_query_migrate(Error **errp) 1283 { 1284 MigrationInfo *info = g_malloc0(sizeof(*info)); 1285 1286 fill_destination_migration_info(info); 1287 fill_source_migration_info(info); 1288 1289 return info; 1290 } 1291 1292 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, 1293 Error **errp) 1294 { 1295 MigrationState *s = migrate_get_current(); 1296 MigrationCapabilityStatusList *cap; 1297 bool cap_list[MIGRATION_CAPABILITY__MAX]; 1298 1299 if (migration_is_running(s->state)) { 1300 error_setg(errp, QERR_MIGRATION_ACTIVE); 1301 return; 1302 } 1303 1304 memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list)); 1305 if (!migrate_caps_check(cap_list, params, errp)) { 1306 return; 1307 } 1308 1309 for (cap = params; cap; cap = cap->next) { 1310 s->enabled_capabilities[cap->value->capability] = cap->value->state; 1311 } 1312 } 1313 1314 /* 1315 * Check whether the parameters are valid. Error will be put into errp 1316 * (if provided). Return true if valid, otherwise false. 1317 */ 1318 static bool migrate_params_check(MigrationParameters *params, Error **errp) 1319 { 1320 if (params->has_compress_level && 1321 (params->compress_level > 9)) { 1322 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", 1323 "a value between 0 and 9"); 1324 return false; 1325 } 1326 1327 if (params->has_compress_threads && (params->compress_threads < 1)) { 1328 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1329 "compress_threads", 1330 "a value between 1 and 255"); 1331 return false; 1332 } 1333 1334 if (params->has_decompress_threads && (params->decompress_threads < 1)) { 1335 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1336 "decompress_threads", 1337 "a value between 1 and 255"); 1338 return false; 1339 } 1340 1341 if (params->has_throttle_trigger_threshold && 1342 (params->throttle_trigger_threshold < 1 || 1343 params->throttle_trigger_threshold > 100)) { 1344 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1345 "throttle_trigger_threshold", 1346 "an integer in the range of 1 to 100"); 1347 return false; 1348 } 1349 1350 if (params->has_cpu_throttle_initial && 1351 (params->cpu_throttle_initial < 1 || 1352 params->cpu_throttle_initial > 99)) { 1353 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1354 "cpu_throttle_initial", 1355 "an integer in the range of 1 to 99"); 1356 return false; 1357 } 1358 1359 if (params->has_cpu_throttle_increment && 1360 (params->cpu_throttle_increment < 1 || 1361 params->cpu_throttle_increment > 99)) { 1362 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1363 "cpu_throttle_increment", 1364 "an integer in the range of 1 to 99"); 1365 return false; 1366 } 1367 1368 if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) { 1369 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1370 "max_bandwidth", 1371 "an integer in the range of 0 to "stringify(SIZE_MAX) 1372 " bytes/second"); 1373 return false; 1374 } 1375 1376 if (params->has_downtime_limit && 1377 (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) { 1378 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1379 "downtime_limit", 1380 "an integer in the range of 0 to " 1381 stringify(MAX_MIGRATE_DOWNTIME)" ms"); 1382 return false; 1383 } 1384 1385 /* x_checkpoint_delay is now always positive */ 1386 1387 if (params->has_multifd_channels && (params->multifd_channels < 1)) { 1388 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1389 "multifd_channels", 1390 "a value between 1 and 255"); 1391 return false; 1392 } 1393 1394 if (params->has_multifd_zlib_level && 1395 (params->multifd_zlib_level > 9)) { 1396 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level", 1397 "a value between 0 and 9"); 1398 return false; 1399 } 1400 1401 if (params->has_multifd_zstd_level && 1402 (params->multifd_zstd_level > 20)) { 1403 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level", 1404 "a value between 0 and 20"); 1405 return false; 1406 } 1407 1408 if (params->has_xbzrle_cache_size && 1409 (params->xbzrle_cache_size < qemu_target_page_size() || 1410 !is_power_of_2(params->xbzrle_cache_size))) { 1411 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1412 "xbzrle_cache_size", 1413 "a power of two no less than the target page size"); 1414 return false; 1415 } 1416 1417 if (params->has_max_cpu_throttle && 1418 (params->max_cpu_throttle < params->cpu_throttle_initial || 1419 params->max_cpu_throttle > 99)) { 1420 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1421 "max_cpu_throttle", 1422 "an integer in the range of cpu_throttle_initial to 99"); 1423 return false; 1424 } 1425 1426 if (params->has_announce_initial && 1427 params->announce_initial > 100000) { 1428 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1429 "announce_initial", 1430 "a value between 0 and 100000"); 1431 return false; 1432 } 1433 if (params->has_announce_max && 1434 params->announce_max > 100000) { 1435 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1436 "announce_max", 1437 "a value between 0 and 100000"); 1438 return false; 1439 } 1440 if (params->has_announce_rounds && 1441 params->announce_rounds > 1000) { 1442 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1443 "announce_rounds", 1444 "a value between 0 and 1000"); 1445 return false; 1446 } 1447 if (params->has_announce_step && 1448 (params->announce_step < 1 || 1449 params->announce_step > 10000)) { 1450 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, 1451 "announce_step", 1452 "a value between 0 and 10000"); 1453 return false; 1454 } 1455 1456 if (params->has_block_bitmap_mapping && 1457 !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) { 1458 error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: "); 1459 return false; 1460 } 1461 1462 return true; 1463 } 1464 1465 static void migrate_params_test_apply(MigrateSetParameters *params, 1466 MigrationParameters *dest) 1467 { 1468 *dest = migrate_get_current()->parameters; 1469 1470 /* TODO use QAPI_CLONE() instead of duplicating it inline */ 1471 1472 if (params->has_compress_level) { 1473 dest->compress_level = params->compress_level; 1474 } 1475 1476 if (params->has_compress_threads) { 1477 dest->compress_threads = params->compress_threads; 1478 } 1479 1480 if (params->has_compress_wait_thread) { 1481 dest->compress_wait_thread = params->compress_wait_thread; 1482 } 1483 1484 if (params->has_decompress_threads) { 1485 dest->decompress_threads = params->decompress_threads; 1486 } 1487 1488 if (params->has_throttle_trigger_threshold) { 1489 dest->throttle_trigger_threshold = params->throttle_trigger_threshold; 1490 } 1491 1492 if (params->has_cpu_throttle_initial) { 1493 dest->cpu_throttle_initial = params->cpu_throttle_initial; 1494 } 1495 1496 if (params->has_cpu_throttle_increment) { 1497 dest->cpu_throttle_increment = params->cpu_throttle_increment; 1498 } 1499 1500 if (params->has_cpu_throttle_tailslow) { 1501 dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow; 1502 } 1503 1504 if (params->has_tls_creds) { 1505 assert(params->tls_creds->type == QTYPE_QSTRING); 1506 dest->tls_creds = params->tls_creds->u.s; 1507 } 1508 1509 if (params->has_tls_hostname) { 1510 assert(params->tls_hostname->type == QTYPE_QSTRING); 1511 dest->tls_hostname = params->tls_hostname->u.s; 1512 } 1513 1514 if (params->has_max_bandwidth) { 1515 dest->max_bandwidth = params->max_bandwidth; 1516 } 1517 1518 if (params->has_downtime_limit) { 1519 dest->downtime_limit = params->downtime_limit; 1520 } 1521 1522 if (params->has_x_checkpoint_delay) { 1523 dest->x_checkpoint_delay = params->x_checkpoint_delay; 1524 } 1525 1526 if (params->has_block_incremental) { 1527 dest->block_incremental = params->block_incremental; 1528 } 1529 if (params->has_multifd_channels) { 1530 dest->multifd_channels = params->multifd_channels; 1531 } 1532 if (params->has_multifd_compression) { 1533 dest->multifd_compression = params->multifd_compression; 1534 } 1535 if (params->has_xbzrle_cache_size) { 1536 dest->xbzrle_cache_size = params->xbzrle_cache_size; 1537 } 1538 if (params->has_max_postcopy_bandwidth) { 1539 dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth; 1540 } 1541 if (params->has_max_cpu_throttle) { 1542 dest->max_cpu_throttle = params->max_cpu_throttle; 1543 } 1544 if (params->has_announce_initial) { 1545 dest->announce_initial = params->announce_initial; 1546 } 1547 if (params->has_announce_max) { 1548 dest->announce_max = params->announce_max; 1549 } 1550 if (params->has_announce_rounds) { 1551 dest->announce_rounds = params->announce_rounds; 1552 } 1553 if (params->has_announce_step) { 1554 dest->announce_step = params->announce_step; 1555 } 1556 1557 if (params->has_block_bitmap_mapping) { 1558 dest->has_block_bitmap_mapping = true; 1559 dest->block_bitmap_mapping = params->block_bitmap_mapping; 1560 } 1561 } 1562 1563 static void migrate_params_apply(MigrateSetParameters *params, Error **errp) 1564 { 1565 MigrationState *s = migrate_get_current(); 1566 1567 /* TODO use QAPI_CLONE() instead of duplicating it inline */ 1568 1569 if (params->has_compress_level) { 1570 s->parameters.compress_level = params->compress_level; 1571 } 1572 1573 if (params->has_compress_threads) { 1574 s->parameters.compress_threads = params->compress_threads; 1575 } 1576 1577 if (params->has_compress_wait_thread) { 1578 s->parameters.compress_wait_thread = params->compress_wait_thread; 1579 } 1580 1581 if (params->has_decompress_threads) { 1582 s->parameters.decompress_threads = params->decompress_threads; 1583 } 1584 1585 if (params->has_throttle_trigger_threshold) { 1586 s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; 1587 } 1588 1589 if (params->has_cpu_throttle_initial) { 1590 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; 1591 } 1592 1593 if (params->has_cpu_throttle_increment) { 1594 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; 1595 } 1596 1597 if (params->has_cpu_throttle_tailslow) { 1598 s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow; 1599 } 1600 1601 if (params->has_tls_creds) { 1602 g_free(s->parameters.tls_creds); 1603 assert(params->tls_creds->type == QTYPE_QSTRING); 1604 s->parameters.tls_creds = g_strdup(params->tls_creds->u.s); 1605 } 1606 1607 if (params->has_tls_hostname) { 1608 g_free(s->parameters.tls_hostname); 1609 assert(params->tls_hostname->type == QTYPE_QSTRING); 1610 s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s); 1611 } 1612 1613 if (params->has_tls_authz) { 1614 g_free(s->parameters.tls_authz); 1615 assert(params->tls_authz->type == QTYPE_QSTRING); 1616 s->parameters.tls_authz = g_strdup(params->tls_authz->u.s); 1617 } 1618 1619 if (params->has_max_bandwidth) { 1620 s->parameters.max_bandwidth = params->max_bandwidth; 1621 if (s->to_dst_file && !migration_in_postcopy()) { 1622 qemu_file_set_rate_limit(s->to_dst_file, 1623 s->parameters.max_bandwidth / XFER_LIMIT_RATIO); 1624 } 1625 } 1626 1627 if (params->has_downtime_limit) { 1628 s->parameters.downtime_limit = params->downtime_limit; 1629 } 1630 1631 if (params->has_x_checkpoint_delay) { 1632 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; 1633 if (migration_in_colo_state()) { 1634 colo_checkpoint_notify(s); 1635 } 1636 } 1637 1638 if (params->has_block_incremental) { 1639 s->parameters.block_incremental = params->block_incremental; 1640 } 1641 if (params->has_multifd_channels) { 1642 s->parameters.multifd_channels = params->multifd_channels; 1643 } 1644 if (params->has_multifd_compression) { 1645 s->parameters.multifd_compression = params->multifd_compression; 1646 } 1647 if (params->has_xbzrle_cache_size) { 1648 s->parameters.xbzrle_cache_size = params->xbzrle_cache_size; 1649 xbzrle_cache_resize(params->xbzrle_cache_size, errp); 1650 } 1651 if (params->has_max_postcopy_bandwidth) { 1652 s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth; 1653 if (s->to_dst_file && migration_in_postcopy()) { 1654 qemu_file_set_rate_limit(s->to_dst_file, 1655 s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO); 1656 } 1657 } 1658 if (params->has_max_cpu_throttle) { 1659 s->parameters.max_cpu_throttle = params->max_cpu_throttle; 1660 } 1661 if (params->has_announce_initial) { 1662 s->parameters.announce_initial = params->announce_initial; 1663 } 1664 if (params->has_announce_max) { 1665 s->parameters.announce_max = params->announce_max; 1666 } 1667 if (params->has_announce_rounds) { 1668 s->parameters.announce_rounds = params->announce_rounds; 1669 } 1670 if (params->has_announce_step) { 1671 s->parameters.announce_step = params->announce_step; 1672 } 1673 1674 if (params->has_block_bitmap_mapping) { 1675 qapi_free_BitmapMigrationNodeAliasList( 1676 s->parameters.block_bitmap_mapping); 1677 1678 s->parameters.has_block_bitmap_mapping = true; 1679 s->parameters.block_bitmap_mapping = 1680 QAPI_CLONE(BitmapMigrationNodeAliasList, 1681 params->block_bitmap_mapping); 1682 } 1683 } 1684 1685 void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) 1686 { 1687 MigrationParameters tmp; 1688 1689 /* TODO Rewrite "" to null instead */ 1690 if (params->has_tls_creds 1691 && params->tls_creds->type == QTYPE_QNULL) { 1692 qobject_unref(params->tls_creds->u.n); 1693 params->tls_creds->type = QTYPE_QSTRING; 1694 params->tls_creds->u.s = strdup(""); 1695 } 1696 /* TODO Rewrite "" to null instead */ 1697 if (params->has_tls_hostname 1698 && params->tls_hostname->type == QTYPE_QNULL) { 1699 qobject_unref(params->tls_hostname->u.n); 1700 params->tls_hostname->type = QTYPE_QSTRING; 1701 params->tls_hostname->u.s = strdup(""); 1702 } 1703 1704 migrate_params_test_apply(params, &tmp); 1705 1706 if (!migrate_params_check(&tmp, errp)) { 1707 /* Invalid parameter */ 1708 return; 1709 } 1710 1711 migrate_params_apply(params, errp); 1712 } 1713 1714 1715 void qmp_migrate_start_postcopy(Error **errp) 1716 { 1717 MigrationState *s = migrate_get_current(); 1718 1719 if (!migrate_postcopy()) { 1720 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1721 " the start of migration"); 1722 return; 1723 } 1724 1725 if (s->state == MIGRATION_STATUS_NONE) { 1726 error_setg(errp, "Postcopy must be started after migration has been" 1727 " started"); 1728 return; 1729 } 1730 /* 1731 * we don't error if migration has finished since that would be racy 1732 * with issuing this command. 1733 */ 1734 qatomic_set(&s->start_postcopy, true); 1735 } 1736 1737 /* shared migration helpers */ 1738 1739 void migrate_set_state(int *state, int old_state, int new_state) 1740 { 1741 assert(new_state < MIGRATION_STATUS__MAX); 1742 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1743 trace_migrate_set_state(MigrationStatus_str(new_state)); 1744 migrate_generate_event(new_state); 1745 } 1746 } 1747 1748 static MigrationCapabilityStatus *migrate_cap_add(MigrationCapability index, 1749 bool state) 1750 { 1751 MigrationCapabilityStatus *cap; 1752 1753 cap = g_new0(MigrationCapabilityStatus, 1); 1754 cap->capability = index; 1755 cap->state = state; 1756 1757 return cap; 1758 } 1759 1760 void migrate_set_block_enabled(bool value, Error **errp) 1761 { 1762 MigrationCapabilityStatusList *cap = NULL; 1763 1764 QAPI_LIST_PREPEND(cap, migrate_cap_add(MIGRATION_CAPABILITY_BLOCK, value)); 1765 qmp_migrate_set_capabilities(cap, errp); 1766 qapi_free_MigrationCapabilityStatusList(cap); 1767 } 1768 1769 static void migrate_set_block_incremental(MigrationState *s, bool value) 1770 { 1771 s->parameters.block_incremental = value; 1772 } 1773 1774 static void block_cleanup_parameters(MigrationState *s) 1775 { 1776 if (s->must_remove_block_options) { 1777 /* setting to false can never fail */ 1778 migrate_set_block_enabled(false, &error_abort); 1779 migrate_set_block_incremental(s, false); 1780 s->must_remove_block_options = false; 1781 } 1782 } 1783 1784 static void migrate_fd_cleanup(MigrationState *s) 1785 { 1786 qemu_bh_delete(s->cleanup_bh); 1787 s->cleanup_bh = NULL; 1788 1789 qemu_savevm_state_cleanup(); 1790 1791 if (s->to_dst_file) { 1792 QEMUFile *tmp; 1793 1794 trace_migrate_fd_cleanup(); 1795 qemu_mutex_unlock_iothread(); 1796 if (s->migration_thread_running) { 1797 qemu_thread_join(&s->thread); 1798 s->migration_thread_running = false; 1799 } 1800 qemu_mutex_lock_iothread(); 1801 1802 multifd_save_cleanup(); 1803 qemu_mutex_lock(&s->qemu_file_lock); 1804 tmp = s->to_dst_file; 1805 s->to_dst_file = NULL; 1806 qemu_mutex_unlock(&s->qemu_file_lock); 1807 /* 1808 * Close the file handle without the lock to make sure the 1809 * critical section won't block for long. 1810 */ 1811 migration_ioc_unregister_yank_from_file(tmp); 1812 qemu_fclose(tmp); 1813 } 1814 1815 assert(!migration_is_active(s)); 1816 1817 if (s->state == MIGRATION_STATUS_CANCELLING) { 1818 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1819 MIGRATION_STATUS_CANCELLED); 1820 } 1821 1822 if (s->error) { 1823 /* It is used on info migrate. We can't free it */ 1824 error_report_err(error_copy(s->error)); 1825 } 1826 notifier_list_notify(&migration_state_notifiers, s); 1827 block_cleanup_parameters(s); 1828 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1829 } 1830 1831 static void migrate_fd_cleanup_schedule(MigrationState *s) 1832 { 1833 /* 1834 * Ref the state for bh, because it may be called when 1835 * there're already no other refs 1836 */ 1837 object_ref(OBJECT(s)); 1838 qemu_bh_schedule(s->cleanup_bh); 1839 } 1840 1841 static void migrate_fd_cleanup_bh(void *opaque) 1842 { 1843 MigrationState *s = opaque; 1844 migrate_fd_cleanup(s); 1845 object_unref(OBJECT(s)); 1846 } 1847 1848 void migrate_set_error(MigrationState *s, const Error *error) 1849 { 1850 QEMU_LOCK_GUARD(&s->error_mutex); 1851 if (!s->error) { 1852 s->error = error_copy(error); 1853 } 1854 } 1855 1856 static void migrate_error_free(MigrationState *s) 1857 { 1858 QEMU_LOCK_GUARD(&s->error_mutex); 1859 if (s->error) { 1860 error_free(s->error); 1861 s->error = NULL; 1862 } 1863 } 1864 1865 void migrate_fd_error(MigrationState *s, const Error *error) 1866 { 1867 trace_migrate_fd_error(error_get_pretty(error)); 1868 assert(s->to_dst_file == NULL); 1869 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1870 MIGRATION_STATUS_FAILED); 1871 migrate_set_error(s, error); 1872 } 1873 1874 static void migrate_fd_cancel(MigrationState *s) 1875 { 1876 int old_state ; 1877 QEMUFile *f = migrate_get_current()->to_dst_file; 1878 trace_migrate_fd_cancel(); 1879 1880 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1881 if (s->rp_state.from_dst_file) { 1882 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1883 qemu_file_shutdown(s->rp_state.from_dst_file); 1884 } 1885 } 1886 1887 do { 1888 old_state = s->state; 1889 if (!migration_is_running(old_state)) { 1890 break; 1891 } 1892 /* If the migration is paused, kick it out of the pause */ 1893 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1894 qemu_sem_post(&s->pause_sem); 1895 } 1896 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1897 } while (s->state != MIGRATION_STATUS_CANCELLING); 1898 1899 /* 1900 * If we're unlucky the migration code might be stuck somewhere in a 1901 * send/write while the network has failed and is waiting to timeout; 1902 * if we've got shutdown(2) available then we can force it to quit. 1903 * The outgoing qemu file gets closed in migrate_fd_cleanup that is 1904 * called in a bh, so there is no race against this cancel. 1905 */ 1906 if (s->state == MIGRATION_STATUS_CANCELLING && f) { 1907 qemu_file_shutdown(f); 1908 } 1909 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1910 Error *local_err = NULL; 1911 1912 bdrv_invalidate_cache_all(&local_err); 1913 if (local_err) { 1914 error_report_err(local_err); 1915 } else { 1916 s->block_inactive = false; 1917 } 1918 } 1919 } 1920 1921 void add_migration_state_change_notifier(Notifier *notify) 1922 { 1923 notifier_list_add(&migration_state_notifiers, notify); 1924 } 1925 1926 void remove_migration_state_change_notifier(Notifier *notify) 1927 { 1928 notifier_remove(notify); 1929 } 1930 1931 bool migration_in_setup(MigrationState *s) 1932 { 1933 return s->state == MIGRATION_STATUS_SETUP; 1934 } 1935 1936 bool migration_has_finished(MigrationState *s) 1937 { 1938 return s->state == MIGRATION_STATUS_COMPLETED; 1939 } 1940 1941 bool migration_has_failed(MigrationState *s) 1942 { 1943 return (s->state == MIGRATION_STATUS_CANCELLED || 1944 s->state == MIGRATION_STATUS_FAILED); 1945 } 1946 1947 bool migration_in_postcopy(void) 1948 { 1949 MigrationState *s = migrate_get_current(); 1950 1951 switch (s->state) { 1952 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1953 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1954 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1955 return true; 1956 default: 1957 return false; 1958 } 1959 } 1960 1961 bool migration_in_postcopy_after_devices(MigrationState *s) 1962 { 1963 return migration_in_postcopy() && s->postcopy_after_devices; 1964 } 1965 1966 bool migration_in_incoming_postcopy(void) 1967 { 1968 PostcopyState ps = postcopy_state_get(); 1969 1970 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1971 } 1972 1973 bool migration_in_bg_snapshot(void) 1974 { 1975 MigrationState *s = migrate_get_current(); 1976 1977 return migrate_background_snapshot() && 1978 migration_is_setup_or_active(s->state); 1979 } 1980 1981 bool migration_is_idle(void) 1982 { 1983 MigrationState *s = current_migration; 1984 1985 if (!s) { 1986 return true; 1987 } 1988 1989 switch (s->state) { 1990 case MIGRATION_STATUS_NONE: 1991 case MIGRATION_STATUS_CANCELLED: 1992 case MIGRATION_STATUS_COMPLETED: 1993 case MIGRATION_STATUS_FAILED: 1994 return true; 1995 case MIGRATION_STATUS_SETUP: 1996 case MIGRATION_STATUS_CANCELLING: 1997 case MIGRATION_STATUS_ACTIVE: 1998 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1999 case MIGRATION_STATUS_COLO: 2000 case MIGRATION_STATUS_PRE_SWITCHOVER: 2001 case MIGRATION_STATUS_DEVICE: 2002 case MIGRATION_STATUS_WAIT_UNPLUG: 2003 return false; 2004 case MIGRATION_STATUS__MAX: 2005 g_assert_not_reached(); 2006 } 2007 2008 return false; 2009 } 2010 2011 bool migration_is_active(MigrationState *s) 2012 { 2013 return (s->state == MIGRATION_STATUS_ACTIVE || 2014 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2015 } 2016 2017 void migrate_init(MigrationState *s) 2018 { 2019 /* 2020 * Reinitialise all migration state, except 2021 * parameters/capabilities that the user set, and 2022 * locks. 2023 */ 2024 s->cleanup_bh = 0; 2025 s->vm_start_bh = 0; 2026 s->to_dst_file = NULL; 2027 s->state = MIGRATION_STATUS_NONE; 2028 s->rp_state.from_dst_file = NULL; 2029 s->rp_state.error = false; 2030 s->mbps = 0.0; 2031 s->pages_per_second = 0.0; 2032 s->downtime = 0; 2033 s->expected_downtime = 0; 2034 s->setup_time = 0; 2035 s->start_postcopy = false; 2036 s->postcopy_after_devices = false; 2037 s->migration_thread_running = false; 2038 error_free(s->error); 2039 s->error = NULL; 2040 s->hostname = NULL; 2041 2042 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 2043 2044 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2045 s->total_time = 0; 2046 s->vm_was_running = false; 2047 s->iteration_initial_bytes = 0; 2048 s->threshold_size = 0; 2049 } 2050 2051 int migrate_add_blocker(Error *reason, Error **errp) 2052 { 2053 if (only_migratable) { 2054 error_propagate_prepend(errp, error_copy(reason), 2055 "disallowing migration blocker " 2056 "(--only-migratable) for: "); 2057 return -EACCES; 2058 } 2059 2060 if (migration_is_idle()) { 2061 migration_blockers = g_slist_prepend(migration_blockers, reason); 2062 return 0; 2063 } 2064 2065 error_propagate_prepend(errp, error_copy(reason), 2066 "disallowing migration blocker " 2067 "(migration in progress) for: "); 2068 return -EBUSY; 2069 } 2070 2071 void migrate_del_blocker(Error *reason) 2072 { 2073 migration_blockers = g_slist_remove(migration_blockers, reason); 2074 } 2075 2076 void qmp_migrate_incoming(const char *uri, Error **errp) 2077 { 2078 Error *local_err = NULL; 2079 static bool once = true; 2080 2081 if (!once) { 2082 error_setg(errp, "The incoming migration has already been started"); 2083 return; 2084 } 2085 if (!runstate_check(RUN_STATE_INMIGRATE)) { 2086 error_setg(errp, "'-incoming' was not specified on the command line"); 2087 return; 2088 } 2089 2090 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2091 return; 2092 } 2093 2094 qemu_start_incoming_migration(uri, &local_err); 2095 2096 if (local_err) { 2097 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2098 error_propagate(errp, local_err); 2099 return; 2100 } 2101 2102 once = false; 2103 } 2104 2105 void qmp_migrate_recover(const char *uri, Error **errp) 2106 { 2107 MigrationIncomingState *mis = migration_incoming_get_current(); 2108 2109 /* 2110 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 2111 * callers (no one should ignore a recover failure); if there is, it's a 2112 * programming error. 2113 */ 2114 assert(errp); 2115 2116 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 2117 error_setg(errp, "Migrate recover can only be run " 2118 "when postcopy is paused."); 2119 return; 2120 } 2121 2122 if (qatomic_cmpxchg(&mis->postcopy_recover_triggered, 2123 false, true) == true) { 2124 error_setg(errp, "Migrate recovery is triggered already"); 2125 return; 2126 } 2127 2128 /* 2129 * Note that this call will never start a real migration; it will 2130 * only re-setup the migration stream and poke existing migration 2131 * to continue using that newly established channel. 2132 */ 2133 qemu_start_incoming_migration(uri, errp); 2134 2135 /* Safe to dereference with the assert above */ 2136 if (*errp) { 2137 /* Reset the flag so user could still retry */ 2138 qatomic_set(&mis->postcopy_recover_triggered, false); 2139 } 2140 } 2141 2142 void qmp_migrate_pause(Error **errp) 2143 { 2144 MigrationState *ms = migrate_get_current(); 2145 MigrationIncomingState *mis = migration_incoming_get_current(); 2146 int ret; 2147 2148 if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2149 /* Source side, during postcopy */ 2150 qemu_mutex_lock(&ms->qemu_file_lock); 2151 ret = qemu_file_shutdown(ms->to_dst_file); 2152 qemu_mutex_unlock(&ms->qemu_file_lock); 2153 if (ret) { 2154 error_setg(errp, "Failed to pause source migration"); 2155 } 2156 return; 2157 } 2158 2159 if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2160 ret = qemu_file_shutdown(mis->from_src_file); 2161 if (ret) { 2162 error_setg(errp, "Failed to pause destination migration"); 2163 } 2164 return; 2165 } 2166 2167 error_setg(errp, "migrate-pause is currently only supported " 2168 "during postcopy-active state"); 2169 } 2170 2171 bool migration_is_blocked(Error **errp) 2172 { 2173 if (qemu_savevm_state_blocked(errp)) { 2174 return true; 2175 } 2176 2177 if (migration_blockers) { 2178 error_propagate(errp, error_copy(migration_blockers->data)); 2179 return true; 2180 } 2181 2182 return false; 2183 } 2184 2185 /* Returns true if continue to migrate, or false if error detected */ 2186 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 2187 bool resume, Error **errp) 2188 { 2189 Error *local_err = NULL; 2190 2191 if (resume) { 2192 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 2193 error_setg(errp, "Cannot resume if there is no " 2194 "paused migration"); 2195 return false; 2196 } 2197 2198 /* 2199 * Postcopy recovery won't work well with release-ram 2200 * capability since release-ram will drop the page buffer as 2201 * long as the page is put into the send buffer. So if there 2202 * is a network failure happened, any page buffers that have 2203 * not yet reached the destination VM but have already been 2204 * sent from the source VM will be lost forever. Let's refuse 2205 * the client from resuming such a postcopy migration. 2206 * Luckily release-ram was designed to only be used when src 2207 * and destination VMs are on the same host, so it should be 2208 * fine. 2209 */ 2210 if (migrate_release_ram()) { 2211 error_setg(errp, "Postcopy recovery cannot work " 2212 "when release-ram capability is set"); 2213 return false; 2214 } 2215 2216 /* This is a resume, skip init status */ 2217 return true; 2218 } 2219 2220 if (migration_is_running(s->state)) { 2221 error_setg(errp, QERR_MIGRATION_ACTIVE); 2222 return false; 2223 } 2224 2225 if (runstate_check(RUN_STATE_INMIGRATE)) { 2226 error_setg(errp, "Guest is waiting for an incoming migration"); 2227 return false; 2228 } 2229 2230 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 2231 error_setg(errp, "Can't migrate the vm that was paused due to " 2232 "previous migration"); 2233 return false; 2234 } 2235 2236 if (migration_is_blocked(errp)) { 2237 return false; 2238 } 2239 2240 if (blk || blk_inc) { 2241 if (migrate_colo_enabled()) { 2242 error_setg(errp, "No disk migration is required in COLO mode"); 2243 return false; 2244 } 2245 if (migrate_use_block() || migrate_use_block_incremental()) { 2246 error_setg(errp, "Command options are incompatible with " 2247 "current migration capabilities"); 2248 return false; 2249 } 2250 migrate_set_block_enabled(true, &local_err); 2251 if (local_err) { 2252 error_propagate(errp, local_err); 2253 return false; 2254 } 2255 s->must_remove_block_options = true; 2256 } 2257 2258 if (blk_inc) { 2259 migrate_set_block_incremental(s, true); 2260 } 2261 2262 migrate_init(s); 2263 /* 2264 * set ram_counters memory to zero for a 2265 * new migration 2266 */ 2267 memset(&ram_counters, 0, sizeof(ram_counters)); 2268 2269 return true; 2270 } 2271 2272 void qmp_migrate(const char *uri, bool has_blk, bool blk, 2273 bool has_inc, bool inc, bool has_detach, bool detach, 2274 bool has_resume, bool resume, Error **errp) 2275 { 2276 Error *local_err = NULL; 2277 MigrationState *s = migrate_get_current(); 2278 const char *p = NULL; 2279 2280 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 2281 has_resume && resume, errp)) { 2282 /* Error detected, put into errp */ 2283 return; 2284 } 2285 2286 if (!(has_resume && resume)) { 2287 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2288 return; 2289 } 2290 } 2291 2292 if (strstart(uri, "tcp:", &p) || 2293 strstart(uri, "unix:", NULL) || 2294 strstart(uri, "vsock:", NULL)) { 2295 socket_start_outgoing_migration(s, p ? p : uri, &local_err); 2296 #ifdef CONFIG_RDMA 2297 } else if (strstart(uri, "rdma:", &p)) { 2298 rdma_start_outgoing_migration(s, p, &local_err); 2299 #endif 2300 } else if (strstart(uri, "exec:", &p)) { 2301 exec_start_outgoing_migration(s, p, &local_err); 2302 } else if (strstart(uri, "fd:", &p)) { 2303 fd_start_outgoing_migration(s, p, &local_err); 2304 } else { 2305 if (!(has_resume && resume)) { 2306 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2307 } 2308 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", 2309 "a valid migration protocol"); 2310 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2311 MIGRATION_STATUS_FAILED); 2312 block_cleanup_parameters(s); 2313 return; 2314 } 2315 2316 if (local_err) { 2317 if (!(has_resume && resume)) { 2318 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2319 } 2320 migrate_fd_error(s, local_err); 2321 error_propagate(errp, local_err); 2322 return; 2323 } 2324 } 2325 2326 void qmp_migrate_cancel(Error **errp) 2327 { 2328 migration_cancel(); 2329 } 2330 2331 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2332 { 2333 MigrationState *s = migrate_get_current(); 2334 if (s->state != state) { 2335 error_setg(errp, "Migration not in expected state: %s", 2336 MigrationStatus_str(s->state)); 2337 return; 2338 } 2339 qemu_sem_post(&s->pause_sem); 2340 } 2341 2342 bool migrate_release_ram(void) 2343 { 2344 MigrationState *s; 2345 2346 s = migrate_get_current(); 2347 2348 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM]; 2349 } 2350 2351 bool migrate_postcopy_ram(void) 2352 { 2353 MigrationState *s; 2354 2355 s = migrate_get_current(); 2356 2357 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; 2358 } 2359 2360 bool migrate_postcopy(void) 2361 { 2362 return migrate_postcopy_ram() || migrate_dirty_bitmaps(); 2363 } 2364 2365 bool migrate_auto_converge(void) 2366 { 2367 MigrationState *s; 2368 2369 s = migrate_get_current(); 2370 2371 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; 2372 } 2373 2374 bool migrate_zero_blocks(void) 2375 { 2376 MigrationState *s; 2377 2378 s = migrate_get_current(); 2379 2380 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; 2381 } 2382 2383 bool migrate_postcopy_blocktime(void) 2384 { 2385 MigrationState *s; 2386 2387 s = migrate_get_current(); 2388 2389 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME]; 2390 } 2391 2392 bool migrate_use_compression(void) 2393 { 2394 MigrationState *s; 2395 2396 s = migrate_get_current(); 2397 2398 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; 2399 } 2400 2401 int migrate_compress_level(void) 2402 { 2403 MigrationState *s; 2404 2405 s = migrate_get_current(); 2406 2407 return s->parameters.compress_level; 2408 } 2409 2410 int migrate_compress_threads(void) 2411 { 2412 MigrationState *s; 2413 2414 s = migrate_get_current(); 2415 2416 return s->parameters.compress_threads; 2417 } 2418 2419 int migrate_compress_wait_thread(void) 2420 { 2421 MigrationState *s; 2422 2423 s = migrate_get_current(); 2424 2425 return s->parameters.compress_wait_thread; 2426 } 2427 2428 int migrate_decompress_threads(void) 2429 { 2430 MigrationState *s; 2431 2432 s = migrate_get_current(); 2433 2434 return s->parameters.decompress_threads; 2435 } 2436 2437 bool migrate_dirty_bitmaps(void) 2438 { 2439 MigrationState *s; 2440 2441 s = migrate_get_current(); 2442 2443 return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS]; 2444 } 2445 2446 bool migrate_ignore_shared(void) 2447 { 2448 MigrationState *s; 2449 2450 s = migrate_get_current(); 2451 2452 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED]; 2453 } 2454 2455 bool migrate_validate_uuid(void) 2456 { 2457 MigrationState *s; 2458 2459 s = migrate_get_current(); 2460 2461 return s->enabled_capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID]; 2462 } 2463 2464 bool migrate_use_events(void) 2465 { 2466 MigrationState *s; 2467 2468 s = migrate_get_current(); 2469 2470 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; 2471 } 2472 2473 bool migrate_use_multifd(void) 2474 { 2475 MigrationState *s; 2476 2477 s = migrate_get_current(); 2478 2479 return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD]; 2480 } 2481 2482 bool migrate_pause_before_switchover(void) 2483 { 2484 MigrationState *s; 2485 2486 s = migrate_get_current(); 2487 2488 return s->enabled_capabilities[ 2489 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER]; 2490 } 2491 2492 int migrate_multifd_channels(void) 2493 { 2494 MigrationState *s; 2495 2496 s = migrate_get_current(); 2497 2498 return s->parameters.multifd_channels; 2499 } 2500 2501 MultiFDCompression migrate_multifd_compression(void) 2502 { 2503 MigrationState *s; 2504 2505 s = migrate_get_current(); 2506 2507 return s->parameters.multifd_compression; 2508 } 2509 2510 int migrate_multifd_zlib_level(void) 2511 { 2512 MigrationState *s; 2513 2514 s = migrate_get_current(); 2515 2516 return s->parameters.multifd_zlib_level; 2517 } 2518 2519 int migrate_multifd_zstd_level(void) 2520 { 2521 MigrationState *s; 2522 2523 s = migrate_get_current(); 2524 2525 return s->parameters.multifd_zstd_level; 2526 } 2527 2528 int migrate_use_xbzrle(void) 2529 { 2530 MigrationState *s; 2531 2532 s = migrate_get_current(); 2533 2534 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; 2535 } 2536 2537 uint64_t migrate_xbzrle_cache_size(void) 2538 { 2539 MigrationState *s; 2540 2541 s = migrate_get_current(); 2542 2543 return s->parameters.xbzrle_cache_size; 2544 } 2545 2546 static int64_t migrate_max_postcopy_bandwidth(void) 2547 { 2548 MigrationState *s; 2549 2550 s = migrate_get_current(); 2551 2552 return s->parameters.max_postcopy_bandwidth; 2553 } 2554 2555 bool migrate_use_block(void) 2556 { 2557 MigrationState *s; 2558 2559 s = migrate_get_current(); 2560 2561 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK]; 2562 } 2563 2564 bool migrate_use_return_path(void) 2565 { 2566 MigrationState *s; 2567 2568 s = migrate_get_current(); 2569 2570 return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH]; 2571 } 2572 2573 bool migrate_use_block_incremental(void) 2574 { 2575 MigrationState *s; 2576 2577 s = migrate_get_current(); 2578 2579 return s->parameters.block_incremental; 2580 } 2581 2582 bool migrate_background_snapshot(void) 2583 { 2584 MigrationState *s; 2585 2586 s = migrate_get_current(); 2587 2588 return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]; 2589 } 2590 2591 /* migration thread support */ 2592 /* 2593 * Something bad happened to the RP stream, mark an error 2594 * The caller shall print or trace something to indicate why 2595 */ 2596 static void mark_source_rp_bad(MigrationState *s) 2597 { 2598 s->rp_state.error = true; 2599 } 2600 2601 static struct rp_cmd_args { 2602 ssize_t len; /* -1 = variable */ 2603 const char *name; 2604 } rp_cmd_args[] = { 2605 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2606 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2607 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2608 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2609 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2610 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2611 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2612 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2613 }; 2614 2615 /* 2616 * Process a request for pages received on the return path, 2617 * We're allowed to send more than requested (e.g. to round to our page size) 2618 * and we don't need to send pages that have already been sent. 2619 */ 2620 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2621 ram_addr_t start, size_t len) 2622 { 2623 long our_host_ps = qemu_real_host_page_size; 2624 2625 trace_migrate_handle_rp_req_pages(rbname, start, len); 2626 2627 /* 2628 * Since we currently insist on matching page sizes, just sanity check 2629 * we're being asked for whole host pages. 2630 */ 2631 if (start & (our_host_ps - 1) || 2632 (len & (our_host_ps - 1))) { 2633 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT 2634 " len: %zd", __func__, start, len); 2635 mark_source_rp_bad(ms); 2636 return; 2637 } 2638 2639 if (ram_save_queue_pages(rbname, start, len)) { 2640 mark_source_rp_bad(ms); 2641 } 2642 } 2643 2644 /* Return true to retry, false to quit */ 2645 static bool postcopy_pause_return_path_thread(MigrationState *s) 2646 { 2647 trace_postcopy_pause_return_path(); 2648 2649 qemu_sem_wait(&s->postcopy_pause_rp_sem); 2650 2651 trace_postcopy_pause_return_path_continued(); 2652 2653 return true; 2654 } 2655 2656 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name) 2657 { 2658 RAMBlock *block = qemu_ram_block_by_name(block_name); 2659 2660 if (!block) { 2661 error_report("%s: invalid block name '%s'", __func__, block_name); 2662 return -EINVAL; 2663 } 2664 2665 /* Fetch the received bitmap and refresh the dirty bitmap */ 2666 return ram_dirty_bitmap_reload(s, block); 2667 } 2668 2669 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value) 2670 { 2671 trace_source_return_path_thread_resume_ack(value); 2672 2673 if (value != MIGRATION_RESUME_ACK_VALUE) { 2674 error_report("%s: illegal resume_ack value %"PRIu32, 2675 __func__, value); 2676 return -1; 2677 } 2678 2679 /* Now both sides are active. */ 2680 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2681 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2682 2683 /* Notify send thread that time to continue send pages */ 2684 qemu_sem_post(&s->rp_state.rp_sem); 2685 2686 return 0; 2687 } 2688 2689 /* Release ms->rp_state.from_dst_file in a safe way */ 2690 static void migration_release_from_dst_file(MigrationState *ms) 2691 { 2692 QEMUFile *file; 2693 2694 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2695 /* 2696 * Reset the from_dst_file pointer first before releasing it, as we 2697 * can't block within lock section 2698 */ 2699 file = ms->rp_state.from_dst_file; 2700 ms->rp_state.from_dst_file = NULL; 2701 } 2702 2703 qemu_fclose(file); 2704 } 2705 2706 /* 2707 * Handles messages sent on the return path towards the source VM 2708 * 2709 */ 2710 static void *source_return_path_thread(void *opaque) 2711 { 2712 MigrationState *ms = opaque; 2713 QEMUFile *rp = ms->rp_state.from_dst_file; 2714 uint16_t header_len, header_type; 2715 uint8_t buf[512]; 2716 uint32_t tmp32, sibling_error; 2717 ram_addr_t start = 0; /* =0 to silence warning */ 2718 size_t len = 0, expected_len; 2719 int res; 2720 2721 trace_source_return_path_thread_entry(); 2722 rcu_register_thread(); 2723 2724 retry: 2725 while (!ms->rp_state.error && !qemu_file_get_error(rp) && 2726 migration_is_setup_or_active(ms->state)) { 2727 trace_source_return_path_thread_loop_top(); 2728 header_type = qemu_get_be16(rp); 2729 header_len = qemu_get_be16(rp); 2730 2731 if (qemu_file_get_error(rp)) { 2732 mark_source_rp_bad(ms); 2733 goto out; 2734 } 2735 2736 if (header_type >= MIG_RP_MSG_MAX || 2737 header_type == MIG_RP_MSG_INVALID) { 2738 error_report("RP: Received invalid message 0x%04x length 0x%04x", 2739 header_type, header_len); 2740 mark_source_rp_bad(ms); 2741 goto out; 2742 } 2743 2744 if ((rp_cmd_args[header_type].len != -1 && 2745 header_len != rp_cmd_args[header_type].len) || 2746 header_len > sizeof(buf)) { 2747 error_report("RP: Received '%s' message (0x%04x) with" 2748 "incorrect length %d expecting %zu", 2749 rp_cmd_args[header_type].name, header_type, header_len, 2750 (size_t)rp_cmd_args[header_type].len); 2751 mark_source_rp_bad(ms); 2752 goto out; 2753 } 2754 2755 /* We know we've got a valid header by this point */ 2756 res = qemu_get_buffer(rp, buf, header_len); 2757 if (res != header_len) { 2758 error_report("RP: Failed reading data for message 0x%04x" 2759 " read %d expected %d", 2760 header_type, res, header_len); 2761 mark_source_rp_bad(ms); 2762 goto out; 2763 } 2764 2765 /* OK, we have the message and the data */ 2766 switch (header_type) { 2767 case MIG_RP_MSG_SHUT: 2768 sibling_error = ldl_be_p(buf); 2769 trace_source_return_path_thread_shut(sibling_error); 2770 if (sibling_error) { 2771 error_report("RP: Sibling indicated error %d", sibling_error); 2772 mark_source_rp_bad(ms); 2773 } 2774 /* 2775 * We'll let the main thread deal with closing the RP 2776 * we could do a shutdown(2) on it, but we're the only user 2777 * anyway, so there's nothing gained. 2778 */ 2779 goto out; 2780 2781 case MIG_RP_MSG_PONG: 2782 tmp32 = ldl_be_p(buf); 2783 trace_source_return_path_thread_pong(tmp32); 2784 break; 2785 2786 case MIG_RP_MSG_REQ_PAGES: 2787 start = ldq_be_p(buf); 2788 len = ldl_be_p(buf + 8); 2789 migrate_handle_rp_req_pages(ms, NULL, start, len); 2790 break; 2791 2792 case MIG_RP_MSG_REQ_PAGES_ID: 2793 expected_len = 12 + 1; /* header + termination */ 2794 2795 if (header_len >= expected_len) { 2796 start = ldq_be_p(buf); 2797 len = ldl_be_p(buf + 8); 2798 /* Now we expect an idstr */ 2799 tmp32 = buf[12]; /* Length of the following idstr */ 2800 buf[13 + tmp32] = '\0'; 2801 expected_len += tmp32; 2802 } 2803 if (header_len != expected_len) { 2804 error_report("RP: Req_Page_id with length %d expecting %zd", 2805 header_len, expected_len); 2806 mark_source_rp_bad(ms); 2807 goto out; 2808 } 2809 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); 2810 break; 2811 2812 case MIG_RP_MSG_RECV_BITMAP: 2813 if (header_len < 1) { 2814 error_report("%s: missing block name", __func__); 2815 mark_source_rp_bad(ms); 2816 goto out; 2817 } 2818 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2819 buf[buf[0] + 1] = '\0'; 2820 if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) { 2821 mark_source_rp_bad(ms); 2822 goto out; 2823 } 2824 break; 2825 2826 case MIG_RP_MSG_RESUME_ACK: 2827 tmp32 = ldl_be_p(buf); 2828 if (migrate_handle_rp_resume_ack(ms, tmp32)) { 2829 mark_source_rp_bad(ms); 2830 goto out; 2831 } 2832 break; 2833 2834 default: 2835 break; 2836 } 2837 } 2838 2839 out: 2840 res = qemu_file_get_error(rp); 2841 if (res) { 2842 if (res == -EIO && migration_in_postcopy()) { 2843 /* 2844 * Maybe there is something we can do: it looks like a 2845 * network down issue, and we pause for a recovery. 2846 */ 2847 migration_release_from_dst_file(ms); 2848 rp = NULL; 2849 if (postcopy_pause_return_path_thread(ms)) { 2850 /* 2851 * Reload rp, reset the rest. Referencing it is safe since 2852 * it's reset only by us above, or when migration completes 2853 */ 2854 rp = ms->rp_state.from_dst_file; 2855 ms->rp_state.error = false; 2856 goto retry; 2857 } 2858 } 2859 2860 trace_source_return_path_thread_bad_end(); 2861 mark_source_rp_bad(ms); 2862 } 2863 2864 trace_source_return_path_thread_end(); 2865 migration_release_from_dst_file(ms); 2866 rcu_unregister_thread(); 2867 return NULL; 2868 } 2869 2870 static int open_return_path_on_source(MigrationState *ms, 2871 bool create_thread) 2872 { 2873 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2874 if (!ms->rp_state.from_dst_file) { 2875 return -1; 2876 } 2877 2878 trace_open_return_path_on_source(); 2879 2880 if (!create_thread) { 2881 /* We're done */ 2882 return 0; 2883 } 2884 2885 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2886 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2887 ms->rp_state.rp_thread_created = true; 2888 2889 trace_open_return_path_on_source_continue(); 2890 2891 return 0; 2892 } 2893 2894 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ 2895 static int await_return_path_close_on_source(MigrationState *ms) 2896 { 2897 /* 2898 * If this is a normal exit then the destination will send a SHUT and the 2899 * rp_thread will exit, however if there's an error we need to cause 2900 * it to exit. 2901 */ 2902 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) { 2903 /* 2904 * shutdown(2), if we have it, will cause it to unblock if it's stuck 2905 * waiting for the destination. 2906 */ 2907 qemu_file_shutdown(ms->rp_state.from_dst_file); 2908 mark_source_rp_bad(ms); 2909 } 2910 trace_await_return_path_close_on_source_joining(); 2911 qemu_thread_join(&ms->rp_state.rp_thread); 2912 ms->rp_state.rp_thread_created = false; 2913 trace_await_return_path_close_on_source_close(); 2914 return ms->rp_state.error; 2915 } 2916 2917 /* 2918 * Switch from normal iteration to postcopy 2919 * Returns non-0 on error 2920 */ 2921 static int postcopy_start(MigrationState *ms) 2922 { 2923 int ret; 2924 QIOChannelBuffer *bioc; 2925 QEMUFile *fb; 2926 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 2927 int64_t bandwidth = migrate_max_postcopy_bandwidth(); 2928 bool restart_block = false; 2929 int cur_state = MIGRATION_STATUS_ACTIVE; 2930 if (!migrate_pause_before_switchover()) { 2931 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2932 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2933 } 2934 2935 trace_postcopy_start(); 2936 qemu_mutex_lock_iothread(); 2937 trace_postcopy_start_set_run(); 2938 2939 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 2940 global_state_store(); 2941 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 2942 if (ret < 0) { 2943 goto fail; 2944 } 2945 2946 ret = migration_maybe_pause(ms, &cur_state, 2947 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2948 if (ret < 0) { 2949 goto fail; 2950 } 2951 2952 ret = bdrv_inactivate_all(); 2953 if (ret < 0) { 2954 goto fail; 2955 } 2956 restart_block = true; 2957 2958 /* 2959 * Cause any non-postcopiable, but iterative devices to 2960 * send out their final data. 2961 */ 2962 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2963 2964 /* 2965 * in Finish migrate and with the io-lock held everything should 2966 * be quiet, but we've potentially still got dirty pages and we 2967 * need to tell the destination to throw any pages it's already received 2968 * that are dirty 2969 */ 2970 if (migrate_postcopy_ram()) { 2971 if (ram_postcopy_send_discard_bitmap(ms)) { 2972 error_report("postcopy send discard bitmap failed"); 2973 goto fail; 2974 } 2975 } 2976 2977 /* 2978 * send rest of state - note things that are doing postcopy 2979 * will notice we're in POSTCOPY_ACTIVE and not actually 2980 * wrap their state up here 2981 */ 2982 /* 0 max-postcopy-bandwidth means unlimited */ 2983 if (!bandwidth) { 2984 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); 2985 } else { 2986 qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO); 2987 } 2988 if (migrate_postcopy_ram()) { 2989 /* Ping just for debugging, helps line traces up */ 2990 qemu_savevm_send_ping(ms->to_dst_file, 2); 2991 } 2992 2993 /* 2994 * While loading the device state we may trigger page transfer 2995 * requests and the fd must be free to process those, and thus 2996 * the destination must read the whole device state off the fd before 2997 * it starts processing it. Unfortunately the ad-hoc migration format 2998 * doesn't allow the destination to know the size to read without fully 2999 * parsing it through each devices load-state code (especially the open 3000 * coded devices that use get/put). 3001 * So we wrap the device state up in a package with a length at the start; 3002 * to do this we use a qemu_buf to hold the whole of the device state. 3003 */ 3004 bioc = qio_channel_buffer_new(4096); 3005 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 3006 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc)); 3007 object_unref(OBJECT(bioc)); 3008 3009 /* 3010 * Make sure the receiver can get incoming pages before we send the rest 3011 * of the state 3012 */ 3013 qemu_savevm_send_postcopy_listen(fb); 3014 3015 qemu_savevm_state_complete_precopy(fb, false, false); 3016 if (migrate_postcopy_ram()) { 3017 qemu_savevm_send_ping(fb, 3); 3018 } 3019 3020 qemu_savevm_send_postcopy_run(fb); 3021 3022 /* <><> end of stuff going into the package */ 3023 3024 /* Last point of recovery; as soon as we send the package the destination 3025 * can open devices and potentially start running. 3026 * Lets just check again we've not got any errors. 3027 */ 3028 ret = qemu_file_get_error(ms->to_dst_file); 3029 if (ret) { 3030 error_report("postcopy_start: Migration stream errored (pre package)"); 3031 goto fail_closefb; 3032 } 3033 3034 restart_block = false; 3035 3036 /* Now send that blob */ 3037 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 3038 goto fail_closefb; 3039 } 3040 qemu_fclose(fb); 3041 3042 /* Send a notify to give a chance for anything that needs to happen 3043 * at the transition to postcopy and after the device state; in particular 3044 * spice needs to trigger a transition now 3045 */ 3046 ms->postcopy_after_devices = true; 3047 notifier_list_notify(&migration_state_notifiers, ms); 3048 3049 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; 3050 3051 qemu_mutex_unlock_iothread(); 3052 3053 if (migrate_postcopy_ram()) { 3054 /* 3055 * Although this ping is just for debug, it could potentially be 3056 * used for getting a better measurement of downtime at the source. 3057 */ 3058 qemu_savevm_send_ping(ms->to_dst_file, 4); 3059 } 3060 3061 if (migrate_release_ram()) { 3062 ram_postcopy_migrated_memory_release(ms); 3063 } 3064 3065 ret = qemu_file_get_error(ms->to_dst_file); 3066 if (ret) { 3067 error_report("postcopy_start: Migration stream errored"); 3068 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 3069 MIGRATION_STATUS_FAILED); 3070 } 3071 3072 return ret; 3073 3074 fail_closefb: 3075 qemu_fclose(fb); 3076 fail: 3077 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 3078 MIGRATION_STATUS_FAILED); 3079 if (restart_block) { 3080 /* A failure happened early enough that we know the destination hasn't 3081 * accessed block devices, so we're safe to recover. 3082 */ 3083 Error *local_err = NULL; 3084 3085 bdrv_invalidate_cache_all(&local_err); 3086 if (local_err) { 3087 error_report_err(local_err); 3088 } 3089 } 3090 qemu_mutex_unlock_iothread(); 3091 return -1; 3092 } 3093 3094 /** 3095 * migration_maybe_pause: Pause if required to by 3096 * migrate_pause_before_switchover called with the iothread locked 3097 * Returns: 0 on success 3098 */ 3099 static int migration_maybe_pause(MigrationState *s, 3100 int *current_active_state, 3101 int new_state) 3102 { 3103 if (!migrate_pause_before_switchover()) { 3104 return 0; 3105 } 3106 3107 /* Since leaving this state is not atomic with posting the semaphore 3108 * it's possible that someone could have issued multiple migrate_continue 3109 * and the semaphore is incorrectly positive at this point; 3110 * the docs say it's undefined to reinit a semaphore that's already 3111 * init'd, so use timedwait to eat up any existing posts. 3112 */ 3113 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 3114 /* This block intentionally left blank */ 3115 } 3116 3117 /* 3118 * If the migration is cancelled when it is in the completion phase, 3119 * the migration state is set to MIGRATION_STATUS_CANCELLING. 3120 * So we don't need to wait a semaphore, otherwise we would always 3121 * wait for the 'pause_sem' semaphore. 3122 */ 3123 if (s->state != MIGRATION_STATUS_CANCELLING) { 3124 qemu_mutex_unlock_iothread(); 3125 migrate_set_state(&s->state, *current_active_state, 3126 MIGRATION_STATUS_PRE_SWITCHOVER); 3127 qemu_sem_wait(&s->pause_sem); 3128 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 3129 new_state); 3130 *current_active_state = new_state; 3131 qemu_mutex_lock_iothread(); 3132 } 3133 3134 return s->state == new_state ? 0 : -EINVAL; 3135 } 3136 3137 /** 3138 * migration_completion: Used by migration_thread when there's not much left. 3139 * The caller 'breaks' the loop when this returns. 3140 * 3141 * @s: Current migration state 3142 */ 3143 static void migration_completion(MigrationState *s) 3144 { 3145 int ret; 3146 int current_active_state = s->state; 3147 3148 if (s->state == MIGRATION_STATUS_ACTIVE) { 3149 qemu_mutex_lock_iothread(); 3150 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3151 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 3152 s->vm_was_running = runstate_is_running(); 3153 ret = global_state_store(); 3154 3155 if (!ret) { 3156 bool inactivate = !migrate_colo_enabled(); 3157 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); 3158 trace_migration_completion_vm_stop(ret); 3159 if (ret >= 0) { 3160 ret = migration_maybe_pause(s, ¤t_active_state, 3161 MIGRATION_STATUS_DEVICE); 3162 } 3163 if (ret >= 0) { 3164 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 3165 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 3166 inactivate); 3167 } 3168 if (inactivate && ret >= 0) { 3169 s->block_inactive = true; 3170 } 3171 } 3172 qemu_mutex_unlock_iothread(); 3173 3174 if (ret < 0) { 3175 goto fail; 3176 } 3177 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 3178 trace_migration_completion_postcopy_end(); 3179 3180 qemu_savevm_state_complete_postcopy(s->to_dst_file); 3181 trace_migration_completion_postcopy_end_after_complete(); 3182 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 3183 goto fail; 3184 } 3185 3186 /* 3187 * If rp was opened we must clean up the thread before 3188 * cleaning everything else up (since if there are no failures 3189 * it will wait for the destination to send it's status in 3190 * a SHUT command). 3191 */ 3192 if (s->rp_state.rp_thread_created) { 3193 int rp_error; 3194 trace_migration_return_path_end_before(); 3195 rp_error = await_return_path_close_on_source(s); 3196 trace_migration_return_path_end_after(rp_error); 3197 if (rp_error) { 3198 goto fail_invalidate; 3199 } 3200 } 3201 3202 if (qemu_file_get_error(s->to_dst_file)) { 3203 trace_migration_completion_file_err(); 3204 goto fail_invalidate; 3205 } 3206 3207 if (!migrate_colo_enabled()) { 3208 migrate_set_state(&s->state, current_active_state, 3209 MIGRATION_STATUS_COMPLETED); 3210 } 3211 3212 return; 3213 3214 fail_invalidate: 3215 /* If not doing postcopy, vm_start() will be called: let's regain 3216 * control on images. 3217 */ 3218 if (s->state == MIGRATION_STATUS_ACTIVE || 3219 s->state == MIGRATION_STATUS_DEVICE) { 3220 Error *local_err = NULL; 3221 3222 qemu_mutex_lock_iothread(); 3223 bdrv_invalidate_cache_all(&local_err); 3224 if (local_err) { 3225 error_report_err(local_err); 3226 } else { 3227 s->block_inactive = false; 3228 } 3229 qemu_mutex_unlock_iothread(); 3230 } 3231 3232 fail: 3233 migrate_set_state(&s->state, current_active_state, 3234 MIGRATION_STATUS_FAILED); 3235 } 3236 3237 /** 3238 * bg_migration_completion: Used by bg_migration_thread when after all the 3239 * RAM has been saved. The caller 'breaks' the loop when this returns. 3240 * 3241 * @s: Current migration state 3242 */ 3243 static void bg_migration_completion(MigrationState *s) 3244 { 3245 int current_active_state = s->state; 3246 3247 /* 3248 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3249 * memory ranges, flush kernel wait queues and wake up threads 3250 * waiting for write fault to be resolved. 3251 */ 3252 ram_write_tracking_stop(); 3253 3254 if (s->state == MIGRATION_STATUS_ACTIVE) { 3255 /* 3256 * By this moment we have RAM content saved into the migration stream. 3257 * The next step is to flush the non-RAM content (device state) 3258 * right after the ram content. The device state has been stored into 3259 * the temporary buffer before RAM saving started. 3260 */ 3261 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 3262 qemu_fflush(s->to_dst_file); 3263 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 3264 goto fail; 3265 } 3266 3267 if (qemu_file_get_error(s->to_dst_file)) { 3268 trace_migration_completion_file_err(); 3269 goto fail; 3270 } 3271 3272 migrate_set_state(&s->state, current_active_state, 3273 MIGRATION_STATUS_COMPLETED); 3274 return; 3275 3276 fail: 3277 migrate_set_state(&s->state, current_active_state, 3278 MIGRATION_STATUS_FAILED); 3279 } 3280 3281 bool migrate_colo_enabled(void) 3282 { 3283 MigrationState *s = migrate_get_current(); 3284 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO]; 3285 } 3286 3287 typedef enum MigThrError { 3288 /* No error detected */ 3289 MIG_THR_ERR_NONE = 0, 3290 /* Detected error, but resumed successfully */ 3291 MIG_THR_ERR_RECOVERED = 1, 3292 /* Detected fatal error, need to exit */ 3293 MIG_THR_ERR_FATAL = 2, 3294 } MigThrError; 3295 3296 static int postcopy_resume_handshake(MigrationState *s) 3297 { 3298 qemu_savevm_send_postcopy_resume(s->to_dst_file); 3299 3300 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 3301 qemu_sem_wait(&s->rp_state.rp_sem); 3302 } 3303 3304 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 3305 return 0; 3306 } 3307 3308 return -1; 3309 } 3310 3311 /* Return zero if success, or <0 for error */ 3312 static int postcopy_do_resume(MigrationState *s) 3313 { 3314 int ret; 3315 3316 /* 3317 * Call all the resume_prepare() hooks, so that modules can be 3318 * ready for the migration resume. 3319 */ 3320 ret = qemu_savevm_state_resume_prepare(s); 3321 if (ret) { 3322 error_report("%s: resume_prepare() failure detected: %d", 3323 __func__, ret); 3324 return ret; 3325 } 3326 3327 /* 3328 * Last handshake with destination on the resume (destination will 3329 * switch to postcopy-active afterwards) 3330 */ 3331 ret = postcopy_resume_handshake(s); 3332 if (ret) { 3333 error_report("%s: handshake failed: %d", __func__, ret); 3334 return ret; 3335 } 3336 3337 return 0; 3338 } 3339 3340 /* 3341 * We don't return until we are in a safe state to continue current 3342 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 3343 * MIG_THR_ERR_FATAL if unrecovery failure happened. 3344 */ 3345 static MigThrError postcopy_pause(MigrationState *s) 3346 { 3347 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 3348 3349 while (true) { 3350 QEMUFile *file; 3351 3352 /* 3353 * Current channel is possibly broken. Release it. Note that this is 3354 * guaranteed even without lock because to_dst_file should only be 3355 * modified by the migration thread. That also guarantees that the 3356 * unregister of yank is safe too without the lock. It should be safe 3357 * even to be within the qemu_file_lock, but we didn't do that to avoid 3358 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 3359 * the qemu_file_lock critical section as small as possible. 3360 */ 3361 assert(s->to_dst_file); 3362 migration_ioc_unregister_yank_from_file(s->to_dst_file); 3363 qemu_mutex_lock(&s->qemu_file_lock); 3364 file = s->to_dst_file; 3365 s->to_dst_file = NULL; 3366 qemu_mutex_unlock(&s->qemu_file_lock); 3367 3368 qemu_file_shutdown(file); 3369 qemu_fclose(file); 3370 3371 migrate_set_state(&s->state, s->state, 3372 MIGRATION_STATUS_POSTCOPY_PAUSED); 3373 3374 error_report("Detected IO failure for postcopy. " 3375 "Migration paused."); 3376 3377 /* 3378 * We wait until things fixed up. Then someone will setup the 3379 * status back for us. 3380 */ 3381 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 3382 qemu_sem_wait(&s->postcopy_pause_sem); 3383 } 3384 3385 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 3386 /* Woken up by a recover procedure. Give it a shot */ 3387 3388 /* 3389 * Firstly, let's wake up the return path now, with a new 3390 * return path channel. 3391 */ 3392 qemu_sem_post(&s->postcopy_pause_rp_sem); 3393 3394 /* Do the resume logic */ 3395 if (postcopy_do_resume(s) == 0) { 3396 /* Let's continue! */ 3397 trace_postcopy_pause_continued(); 3398 return MIG_THR_ERR_RECOVERED; 3399 } else { 3400 /* 3401 * Something wrong happened during the recovery, let's 3402 * pause again. Pause is always better than throwing 3403 * data away. 3404 */ 3405 continue; 3406 } 3407 } else { 3408 /* This is not right... Time to quit. */ 3409 return MIG_THR_ERR_FATAL; 3410 } 3411 } 3412 } 3413 3414 static MigThrError migration_detect_error(MigrationState *s) 3415 { 3416 int ret; 3417 int state = s->state; 3418 Error *local_error = NULL; 3419 3420 if (state == MIGRATION_STATUS_CANCELLING || 3421 state == MIGRATION_STATUS_CANCELLED) { 3422 /* End the migration, but don't set the state to failed */ 3423 return MIG_THR_ERR_FATAL; 3424 } 3425 3426 /* Try to detect any file errors */ 3427 ret = qemu_file_get_error_obj(s->to_dst_file, &local_error); 3428 if (!ret) { 3429 /* Everything is fine */ 3430 assert(!local_error); 3431 return MIG_THR_ERR_NONE; 3432 } 3433 3434 if (local_error) { 3435 migrate_set_error(s, local_error); 3436 error_free(local_error); 3437 } 3438 3439 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret == -EIO) { 3440 /* 3441 * For postcopy, we allow the network to be down for a 3442 * while. After that, it can be continued by a 3443 * recovery phase. 3444 */ 3445 return postcopy_pause(s); 3446 } else { 3447 /* 3448 * For precopy (or postcopy with error outside IO), we fail 3449 * with no time. 3450 */ 3451 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 3452 trace_migration_thread_file_err(); 3453 3454 /* Time to stop the migration, now. */ 3455 return MIG_THR_ERR_FATAL; 3456 } 3457 } 3458 3459 /* How many bytes have we transferred since the beginning of the migration */ 3460 static uint64_t migration_total_bytes(MigrationState *s) 3461 { 3462 return qemu_ftell(s->to_dst_file) + ram_counters.multifd_bytes; 3463 } 3464 3465 static void migration_calculate_complete(MigrationState *s) 3466 { 3467 uint64_t bytes = migration_total_bytes(s); 3468 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3469 int64_t transfer_time; 3470 3471 s->total_time = end_time - s->start_time; 3472 if (!s->downtime) { 3473 /* 3474 * It's still not set, so we are precopy migration. For 3475 * postcopy, downtime is calculated during postcopy_start(). 3476 */ 3477 s->downtime = end_time - s->downtime_start; 3478 } 3479 3480 transfer_time = s->total_time - s->setup_time; 3481 if (transfer_time) { 3482 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3483 } 3484 } 3485 3486 static void update_iteration_initial_status(MigrationState *s) 3487 { 3488 /* 3489 * Update these three fields at the same time to avoid mismatch info lead 3490 * wrong speed calculation. 3491 */ 3492 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3493 s->iteration_initial_bytes = migration_total_bytes(s); 3494 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3495 } 3496 3497 static void migration_update_counters(MigrationState *s, 3498 int64_t current_time) 3499 { 3500 uint64_t transferred, transferred_pages, time_spent; 3501 uint64_t current_bytes; /* bytes transferred since the beginning */ 3502 double bandwidth; 3503 3504 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3505 return; 3506 } 3507 3508 current_bytes = migration_total_bytes(s); 3509 transferred = current_bytes - s->iteration_initial_bytes; 3510 time_spent = current_time - s->iteration_start_time; 3511 bandwidth = (double)transferred / time_spent; 3512 s->threshold_size = bandwidth * s->parameters.downtime_limit; 3513 3514 s->mbps = (((double) transferred * 8.0) / 3515 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3516 3517 transferred_pages = ram_get_total_transferred_pages() - 3518 s->iteration_initial_pages; 3519 s->pages_per_second = (double) transferred_pages / 3520 (((double) time_spent / 1000.0)); 3521 3522 /* 3523 * if we haven't sent anything, we don't want to 3524 * recalculate. 10000 is a small enough number for our purposes 3525 */ 3526 if (ram_counters.dirty_pages_rate && transferred > 10000) { 3527 s->expected_downtime = ram_counters.remaining / bandwidth; 3528 } 3529 3530 qemu_file_reset_rate_limit(s->to_dst_file); 3531 3532 update_iteration_initial_status(s); 3533 3534 trace_migrate_transferred(transferred, time_spent, 3535 bandwidth, s->threshold_size); 3536 } 3537 3538 /* Migration thread iteration status */ 3539 typedef enum { 3540 MIG_ITERATE_RESUME, /* Resume current iteration */ 3541 MIG_ITERATE_SKIP, /* Skip current iteration */ 3542 MIG_ITERATE_BREAK, /* Break the loop */ 3543 } MigIterateState; 3544 3545 /* 3546 * Return true if continue to the next iteration directly, false 3547 * otherwise. 3548 */ 3549 static MigIterateState migration_iteration_run(MigrationState *s) 3550 { 3551 uint64_t pending_size, pend_pre, pend_compat, pend_post; 3552 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3553 3554 qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, &pend_pre, 3555 &pend_compat, &pend_post); 3556 pending_size = pend_pre + pend_compat + pend_post; 3557 3558 trace_migrate_pending(pending_size, s->threshold_size, 3559 pend_pre, pend_compat, pend_post); 3560 3561 if (pending_size && pending_size >= s->threshold_size) { 3562 /* Still a significant amount to transfer */ 3563 if (!in_postcopy && pend_pre <= s->threshold_size && 3564 qatomic_read(&s->start_postcopy)) { 3565 if (postcopy_start(s)) { 3566 error_report("%s: postcopy failed to start", __func__); 3567 } 3568 return MIG_ITERATE_SKIP; 3569 } 3570 /* Just another iteration step */ 3571 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3572 } else { 3573 trace_migration_thread_low_pending(pending_size); 3574 migration_completion(s); 3575 return MIG_ITERATE_BREAK; 3576 } 3577 3578 return MIG_ITERATE_RESUME; 3579 } 3580 3581 static void migration_iteration_finish(MigrationState *s) 3582 { 3583 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3584 cpu_throttle_stop(); 3585 3586 qemu_mutex_lock_iothread(); 3587 switch (s->state) { 3588 case MIGRATION_STATUS_COMPLETED: 3589 migration_calculate_complete(s); 3590 runstate_set(RUN_STATE_POSTMIGRATE); 3591 break; 3592 3593 case MIGRATION_STATUS_ACTIVE: 3594 /* 3595 * We should really assert here, but since it's during 3596 * migration, let's try to reduce the usage of assertions. 3597 */ 3598 if (!migrate_colo_enabled()) { 3599 error_report("%s: critical error: calling COLO code without " 3600 "COLO enabled", __func__); 3601 } 3602 migrate_start_colo_process(s); 3603 /* 3604 * Fixme: we will run VM in COLO no matter its old running state. 3605 * After exited COLO, we will keep running. 3606 */ 3607 s->vm_was_running = true; 3608 /* Fallthrough */ 3609 case MIGRATION_STATUS_FAILED: 3610 case MIGRATION_STATUS_CANCELLED: 3611 case MIGRATION_STATUS_CANCELLING: 3612 if (s->vm_was_running) { 3613 vm_start(); 3614 } else { 3615 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3616 runstate_set(RUN_STATE_POSTMIGRATE); 3617 } 3618 } 3619 break; 3620 3621 default: 3622 /* Should not reach here, but if so, forgive the VM. */ 3623 error_report("%s: Unknown ending state %d", __func__, s->state); 3624 break; 3625 } 3626 migrate_fd_cleanup_schedule(s); 3627 qemu_mutex_unlock_iothread(); 3628 } 3629 3630 static void bg_migration_iteration_finish(MigrationState *s) 3631 { 3632 qemu_mutex_lock_iothread(); 3633 switch (s->state) { 3634 case MIGRATION_STATUS_COMPLETED: 3635 migration_calculate_complete(s); 3636 break; 3637 3638 case MIGRATION_STATUS_ACTIVE: 3639 case MIGRATION_STATUS_FAILED: 3640 case MIGRATION_STATUS_CANCELLED: 3641 case MIGRATION_STATUS_CANCELLING: 3642 break; 3643 3644 default: 3645 /* Should not reach here, but if so, forgive the VM. */ 3646 error_report("%s: Unknown ending state %d", __func__, s->state); 3647 break; 3648 } 3649 3650 migrate_fd_cleanup_schedule(s); 3651 qemu_mutex_unlock_iothread(); 3652 } 3653 3654 /* 3655 * Return true if continue to the next iteration directly, false 3656 * otherwise. 3657 */ 3658 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3659 { 3660 int res; 3661 3662 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3663 if (res > 0) { 3664 bg_migration_completion(s); 3665 return MIG_ITERATE_BREAK; 3666 } 3667 3668 return MIG_ITERATE_RESUME; 3669 } 3670 3671 void migration_make_urgent_request(void) 3672 { 3673 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3674 } 3675 3676 void migration_consume_urgent_request(void) 3677 { 3678 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3679 } 3680 3681 /* Returns true if the rate limiting was broken by an urgent request */ 3682 bool migration_rate_limit(void) 3683 { 3684 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3685 MigrationState *s = migrate_get_current(); 3686 3687 bool urgent = false; 3688 migration_update_counters(s, now); 3689 if (qemu_file_rate_limit(s->to_dst_file)) { 3690 3691 if (qemu_file_get_error(s->to_dst_file)) { 3692 return false; 3693 } 3694 /* 3695 * Wait for a delay to do rate limiting OR 3696 * something urgent to post the semaphore. 3697 */ 3698 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3699 trace_migration_rate_limit_pre(ms); 3700 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3701 /* 3702 * We were woken by one or more urgent things but 3703 * the timedwait will have consumed one of them. 3704 * The service routine for the urgent wake will dec 3705 * the semaphore itself for each item it consumes, 3706 * so add this one we just eat back. 3707 */ 3708 qemu_sem_post(&s->rate_limit_sem); 3709 urgent = true; 3710 } 3711 trace_migration_rate_limit_post(urgent); 3712 } 3713 return urgent; 3714 } 3715 3716 /* 3717 * if failover devices are present, wait they are completely 3718 * unplugged 3719 */ 3720 3721 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3722 int new_state) 3723 { 3724 if (qemu_savevm_state_guest_unplug_pending()) { 3725 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3726 3727 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3728 qemu_savevm_state_guest_unplug_pending()) { 3729 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3730 } 3731 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3732 int timeout = 120; /* 30 seconds */ 3733 /* 3734 * migration has been canceled 3735 * but as we have started an unplug we must wait the end 3736 * to be able to plug back the card 3737 */ 3738 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3739 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3740 } 3741 if (qemu_savevm_state_guest_unplug_pending()) { 3742 warn_report("migration: partially unplugged device on " 3743 "failure"); 3744 } 3745 } 3746 3747 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3748 } else { 3749 migrate_set_state(&s->state, old_state, new_state); 3750 } 3751 } 3752 3753 /* 3754 * Master migration thread on the source VM. 3755 * It drives the migration and pumps the data down the outgoing channel. 3756 */ 3757 static void *migration_thread(void *opaque) 3758 { 3759 MigrationState *s = opaque; 3760 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3761 MigThrError thr_error; 3762 bool urgent = false; 3763 3764 rcu_register_thread(); 3765 3766 object_ref(OBJECT(s)); 3767 update_iteration_initial_status(s); 3768 3769 qemu_savevm_state_header(s->to_dst_file); 3770 3771 /* 3772 * If we opened the return path, we need to make sure dst has it 3773 * opened as well. 3774 */ 3775 if (s->rp_state.rp_thread_created) { 3776 /* Now tell the dest that it should open its end so it can reply */ 3777 qemu_savevm_send_open_return_path(s->to_dst_file); 3778 3779 /* And do a ping that will make stuff easier to debug */ 3780 qemu_savevm_send_ping(s->to_dst_file, 1); 3781 } 3782 3783 if (migrate_postcopy()) { 3784 /* 3785 * Tell the destination that we *might* want to do postcopy later; 3786 * if the other end can't do postcopy it should fail now, nice and 3787 * early. 3788 */ 3789 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3790 } 3791 3792 if (migrate_colo_enabled()) { 3793 /* Notify migration destination that we enable COLO */ 3794 qemu_savevm_send_colo_enable(s->to_dst_file); 3795 } 3796 3797 qemu_savevm_state_setup(s->to_dst_file); 3798 3799 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3800 MIGRATION_STATUS_ACTIVE); 3801 3802 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3803 3804 trace_migration_thread_setup_complete(); 3805 3806 while (migration_is_active(s)) { 3807 if (urgent || !qemu_file_rate_limit(s->to_dst_file)) { 3808 MigIterateState iter_state = migration_iteration_run(s); 3809 if (iter_state == MIG_ITERATE_SKIP) { 3810 continue; 3811 } else if (iter_state == MIG_ITERATE_BREAK) { 3812 break; 3813 } 3814 } 3815 3816 /* 3817 * Try to detect any kind of failures, and see whether we 3818 * should stop the migration now. 3819 */ 3820 thr_error = migration_detect_error(s); 3821 if (thr_error == MIG_THR_ERR_FATAL) { 3822 /* Stop migration */ 3823 break; 3824 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3825 /* 3826 * Just recovered from a e.g. network failure, reset all 3827 * the local variables. This is important to avoid 3828 * breaking transferred_bytes and bandwidth calculation 3829 */ 3830 update_iteration_initial_status(s); 3831 } 3832 3833 urgent = migration_rate_limit(); 3834 } 3835 3836 trace_migration_thread_after_loop(); 3837 migration_iteration_finish(s); 3838 object_unref(OBJECT(s)); 3839 rcu_unregister_thread(); 3840 return NULL; 3841 } 3842 3843 static void bg_migration_vm_start_bh(void *opaque) 3844 { 3845 MigrationState *s = opaque; 3846 3847 qemu_bh_delete(s->vm_start_bh); 3848 s->vm_start_bh = NULL; 3849 3850 vm_start(); 3851 s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start; 3852 } 3853 3854 /** 3855 * Background snapshot thread, based on live migration code. 3856 * This is an alternative implementation of live migration mechanism 3857 * introduced specifically to support background snapshots. 3858 * 3859 * It takes advantage of userfault_fd write protection mechanism introduced 3860 * in v5.7 kernel. Compared to existing dirty page logging migration much 3861 * lesser stream traffic is produced resulting in smaller snapshot images, 3862 * simply cause of no page duplicates can get into the stream. 3863 * 3864 * Another key point is that generated vmstate stream reflects machine state 3865 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3866 * mechanism, which effectively results in that saved snapshot is the state of VM 3867 * at the end of the process. 3868 */ 3869 static void *bg_migration_thread(void *opaque) 3870 { 3871 MigrationState *s = opaque; 3872 int64_t setup_start; 3873 MigThrError thr_error; 3874 QEMUFile *fb; 3875 bool early_fail = true; 3876 3877 rcu_register_thread(); 3878 object_ref(OBJECT(s)); 3879 3880 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); 3881 3882 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3883 /* 3884 * We want to save vmstate for the moment when migration has been 3885 * initiated but also we want to save RAM content while VM is running. 3886 * The RAM content should appear first in the vmstate. So, we first 3887 * stash the non-RAM part of the vmstate to the temporary buffer, 3888 * then write RAM part of the vmstate to the migration stream 3889 * with vCPUs running and, finally, write stashed non-RAM part of 3890 * the vmstate from the buffer to the migration stream. 3891 */ 3892 s->bioc = qio_channel_buffer_new(512 * 1024); 3893 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3894 fb = qemu_fopen_channel_output(QIO_CHANNEL(s->bioc)); 3895 object_unref(OBJECT(s->bioc)); 3896 3897 update_iteration_initial_status(s); 3898 3899 /* 3900 * Prepare for tracking memory writes with UFFD-WP - populate 3901 * RAM pages before protecting. 3902 */ 3903 #ifdef __linux__ 3904 ram_write_tracking_prepare(); 3905 #endif 3906 3907 qemu_savevm_state_header(s->to_dst_file); 3908 qemu_savevm_state_setup(s->to_dst_file); 3909 3910 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3911 MIGRATION_STATUS_ACTIVE); 3912 3913 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3914 3915 trace_migration_thread_setup_complete(); 3916 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3917 3918 qemu_mutex_lock_iothread(); 3919 3920 /* 3921 * If VM is currently in suspended state, then, to make a valid runstate 3922 * transition in vm_stop_force_state() we need to wakeup it up. 3923 */ 3924 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); 3925 s->vm_was_running = runstate_is_running(); 3926 3927 if (global_state_store()) { 3928 goto fail; 3929 } 3930 /* Forcibly stop VM before saving state of vCPUs and devices */ 3931 if (vm_stop_force_state(RUN_STATE_PAUSED)) { 3932 goto fail; 3933 } 3934 /* 3935 * Put vCPUs in sync with shadow context structures, then 3936 * save their state to channel-buffer along with devices. 3937 */ 3938 cpu_synchronize_all_states(); 3939 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3940 goto fail; 3941 } 3942 /* 3943 * Since we are going to get non-iterable state data directly 3944 * from s->bioc->data, explicit flush is needed here. 3945 */ 3946 qemu_fflush(fb); 3947 3948 /* Now initialize UFFD context and start tracking RAM writes */ 3949 if (ram_write_tracking_start()) { 3950 goto fail; 3951 } 3952 early_fail = false; 3953 3954 /* 3955 * Start VM from BH handler to avoid write-fault lock here. 3956 * UFFD-WP protection for the whole RAM is already enabled so 3957 * calling VM state change notifiers from vm_start() would initiate 3958 * writes to virtio VQs memory which is in write-protected region. 3959 */ 3960 s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); 3961 qemu_bh_schedule(s->vm_start_bh); 3962 3963 qemu_mutex_unlock_iothread(); 3964 3965 while (migration_is_active(s)) { 3966 MigIterateState iter_state = bg_migration_iteration_run(s); 3967 if (iter_state == MIG_ITERATE_SKIP) { 3968 continue; 3969 } else if (iter_state == MIG_ITERATE_BREAK) { 3970 break; 3971 } 3972 3973 /* 3974 * Try to detect any kind of failures, and see whether we 3975 * should stop the migration now. 3976 */ 3977 thr_error = migration_detect_error(s); 3978 if (thr_error == MIG_THR_ERR_FATAL) { 3979 /* Stop migration */ 3980 break; 3981 } 3982 3983 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3984 } 3985 3986 trace_migration_thread_after_loop(); 3987 3988 fail: 3989 if (early_fail) { 3990 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3991 MIGRATION_STATUS_FAILED); 3992 qemu_mutex_unlock_iothread(); 3993 } 3994 3995 bg_migration_iteration_finish(s); 3996 3997 qemu_fclose(fb); 3998 object_unref(OBJECT(s)); 3999 rcu_unregister_thread(); 4000 4001 return NULL; 4002 } 4003 4004 void migrate_fd_connect(MigrationState *s, Error *error_in) 4005 { 4006 Error *local_err = NULL; 4007 int64_t rate_limit; 4008 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 4009 4010 /* 4011 * If there's a previous error, free it and prepare for another one. 4012 * Meanwhile if migration completes successfully, there won't have an error 4013 * dumped when calling migrate_fd_cleanup(). 4014 */ 4015 migrate_error_free(s); 4016 4017 s->expected_downtime = s->parameters.downtime_limit; 4018 if (resume) { 4019 assert(s->cleanup_bh); 4020 } else { 4021 assert(!s->cleanup_bh); 4022 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s); 4023 } 4024 if (error_in) { 4025 migrate_fd_error(s, error_in); 4026 if (resume) { 4027 /* 4028 * Don't do cleanup for resume if channel is invalid, but only dump 4029 * the error. We wait for another channel connect from the user. 4030 * The error_report still gives HMP user a hint on what failed. 4031 * It's normally done in migrate_fd_cleanup(), but call it here 4032 * explicitly. 4033 */ 4034 error_report_err(error_copy(s->error)); 4035 } else { 4036 migrate_fd_cleanup(s); 4037 } 4038 return; 4039 } 4040 4041 if (resume) { 4042 /* This is a resumed migration */ 4043 rate_limit = s->parameters.max_postcopy_bandwidth / 4044 XFER_LIMIT_RATIO; 4045 } else { 4046 /* This is a fresh new migration */ 4047 rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO; 4048 4049 /* Notify before starting migration thread */ 4050 notifier_list_notify(&migration_state_notifiers, s); 4051 } 4052 4053 qemu_file_set_rate_limit(s->to_dst_file, rate_limit); 4054 qemu_file_set_blocking(s->to_dst_file, true); 4055 4056 /* 4057 * Open the return path. For postcopy, it is used exclusively. For 4058 * precopy, only if user specified "return-path" capability would 4059 * QEMU uses the return path. 4060 */ 4061 if (migrate_postcopy_ram() || migrate_use_return_path()) { 4062 if (open_return_path_on_source(s, !resume)) { 4063 error_report("Unable to open return-path for postcopy"); 4064 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 4065 migrate_fd_cleanup(s); 4066 return; 4067 } 4068 } 4069 4070 if (resume) { 4071 /* Wakeup the main migration thread to do the recovery */ 4072 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 4073 MIGRATION_STATUS_POSTCOPY_RECOVER); 4074 qemu_sem_post(&s->postcopy_pause_sem); 4075 return; 4076 } 4077 4078 if (multifd_save_setup(&local_err) != 0) { 4079 error_report_err(local_err); 4080 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 4081 MIGRATION_STATUS_FAILED); 4082 migrate_fd_cleanup(s); 4083 return; 4084 } 4085 4086 if (migrate_background_snapshot()) { 4087 qemu_thread_create(&s->thread, "bg_snapshot", 4088 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 4089 } else { 4090 qemu_thread_create(&s->thread, "live_migration", 4091 migration_thread, s, QEMU_THREAD_JOINABLE); 4092 } 4093 s->migration_thread_running = true; 4094 } 4095 4096 void migration_global_dump(Monitor *mon) 4097 { 4098 MigrationState *ms = migrate_get_current(); 4099 4100 monitor_printf(mon, "globals:\n"); 4101 monitor_printf(mon, "store-global-state: %s\n", 4102 ms->store_global_state ? "on" : "off"); 4103 monitor_printf(mon, "only-migratable: %s\n", 4104 only_migratable ? "on" : "off"); 4105 monitor_printf(mon, "send-configuration: %s\n", 4106 ms->send_configuration ? "on" : "off"); 4107 monitor_printf(mon, "send-section-footer: %s\n", 4108 ms->send_section_footer ? "on" : "off"); 4109 monitor_printf(mon, "decompress-error-check: %s\n", 4110 ms->decompress_error_check ? "on" : "off"); 4111 monitor_printf(mon, "clear-bitmap-shift: %u\n", 4112 ms->clear_bitmap_shift); 4113 } 4114 4115 #define DEFINE_PROP_MIG_CAP(name, x) \ 4116 DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false) 4117 4118 static Property migration_properties[] = { 4119 DEFINE_PROP_BOOL("store-global-state", MigrationState, 4120 store_global_state, true), 4121 DEFINE_PROP_BOOL("send-configuration", MigrationState, 4122 send_configuration, true), 4123 DEFINE_PROP_BOOL("send-section-footer", MigrationState, 4124 send_section_footer, true), 4125 DEFINE_PROP_BOOL("decompress-error-check", MigrationState, 4126 decompress_error_check, true), 4127 DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState, 4128 clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT), 4129 4130 /* Migration parameters */ 4131 DEFINE_PROP_UINT8("x-compress-level", MigrationState, 4132 parameters.compress_level, 4133 DEFAULT_MIGRATE_COMPRESS_LEVEL), 4134 DEFINE_PROP_UINT8("x-compress-threads", MigrationState, 4135 parameters.compress_threads, 4136 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT), 4137 DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState, 4138 parameters.compress_wait_thread, true), 4139 DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, 4140 parameters.decompress_threads, 4141 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), 4142 DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, 4143 parameters.throttle_trigger_threshold, 4144 DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), 4145 DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState, 4146 parameters.cpu_throttle_initial, 4147 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), 4148 DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState, 4149 parameters.cpu_throttle_increment, 4150 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT), 4151 DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState, 4152 parameters.cpu_throttle_tailslow, false), 4153 DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState, 4154 parameters.max_bandwidth, MAX_THROTTLE), 4155 DEFINE_PROP_UINT64("x-downtime-limit", MigrationState, 4156 parameters.downtime_limit, 4157 DEFAULT_MIGRATE_SET_DOWNTIME), 4158 DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState, 4159 parameters.x_checkpoint_delay, 4160 DEFAULT_MIGRATE_X_CHECKPOINT_DELAY), 4161 DEFINE_PROP_UINT8("multifd-channels", MigrationState, 4162 parameters.multifd_channels, 4163 DEFAULT_MIGRATE_MULTIFD_CHANNELS), 4164 DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState, 4165 parameters.multifd_compression, 4166 DEFAULT_MIGRATE_MULTIFD_COMPRESSION), 4167 DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState, 4168 parameters.multifd_zlib_level, 4169 DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL), 4170 DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState, 4171 parameters.multifd_zstd_level, 4172 DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL), 4173 DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState, 4174 parameters.xbzrle_cache_size, 4175 DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE), 4176 DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState, 4177 parameters.max_postcopy_bandwidth, 4178 DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH), 4179 DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState, 4180 parameters.max_cpu_throttle, 4181 DEFAULT_MIGRATE_MAX_CPU_THROTTLE), 4182 DEFINE_PROP_SIZE("announce-initial", MigrationState, 4183 parameters.announce_initial, 4184 DEFAULT_MIGRATE_ANNOUNCE_INITIAL), 4185 DEFINE_PROP_SIZE("announce-max", MigrationState, 4186 parameters.announce_max, 4187 DEFAULT_MIGRATE_ANNOUNCE_MAX), 4188 DEFINE_PROP_SIZE("announce-rounds", MigrationState, 4189 parameters.announce_rounds, 4190 DEFAULT_MIGRATE_ANNOUNCE_ROUNDS), 4191 DEFINE_PROP_SIZE("announce-step", MigrationState, 4192 parameters.announce_step, 4193 DEFAULT_MIGRATE_ANNOUNCE_STEP), 4194 4195 /* Migration capabilities */ 4196 DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), 4197 DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL), 4198 DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE), 4199 DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS), 4200 DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS), 4201 DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS), 4202 DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM), 4203 DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO), 4204 DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM), 4205 DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK), 4206 DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH), 4207 DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD), 4208 DEFINE_PROP_MIG_CAP("x-background-snapshot", 4209 MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT), 4210 4211 DEFINE_PROP_END_OF_LIST(), 4212 }; 4213 4214 static void migration_class_init(ObjectClass *klass, void *data) 4215 { 4216 DeviceClass *dc = DEVICE_CLASS(klass); 4217 4218 dc->user_creatable = false; 4219 device_class_set_props(dc, migration_properties); 4220 } 4221 4222 static void migration_instance_finalize(Object *obj) 4223 { 4224 MigrationState *ms = MIGRATION_OBJ(obj); 4225 MigrationParameters *params = &ms->parameters; 4226 4227 qemu_mutex_destroy(&ms->error_mutex); 4228 qemu_mutex_destroy(&ms->qemu_file_lock); 4229 g_free(params->tls_hostname); 4230 g_free(params->tls_creds); 4231 qemu_sem_destroy(&ms->wait_unplug_sem); 4232 qemu_sem_destroy(&ms->rate_limit_sem); 4233 qemu_sem_destroy(&ms->pause_sem); 4234 qemu_sem_destroy(&ms->postcopy_pause_sem); 4235 qemu_sem_destroy(&ms->postcopy_pause_rp_sem); 4236 qemu_sem_destroy(&ms->rp_state.rp_sem); 4237 error_free(ms->error); 4238 } 4239 4240 static void migration_instance_init(Object *obj) 4241 { 4242 MigrationState *ms = MIGRATION_OBJ(obj); 4243 MigrationParameters *params = &ms->parameters; 4244 4245 ms->state = MIGRATION_STATUS_NONE; 4246 ms->mbps = -1; 4247 ms->pages_per_second = -1; 4248 qemu_sem_init(&ms->pause_sem, 0); 4249 qemu_mutex_init(&ms->error_mutex); 4250 4251 params->tls_hostname = g_strdup(""); 4252 params->tls_creds = g_strdup(""); 4253 4254 /* Set has_* up only for parameter checks */ 4255 params->has_compress_level = true; 4256 params->has_compress_threads = true; 4257 params->has_decompress_threads = true; 4258 params->has_throttle_trigger_threshold = true; 4259 params->has_cpu_throttle_initial = true; 4260 params->has_cpu_throttle_increment = true; 4261 params->has_cpu_throttle_tailslow = true; 4262 params->has_max_bandwidth = true; 4263 params->has_downtime_limit = true; 4264 params->has_x_checkpoint_delay = true; 4265 params->has_block_incremental = true; 4266 params->has_multifd_channels = true; 4267 params->has_multifd_compression = true; 4268 params->has_multifd_zlib_level = true; 4269 params->has_multifd_zstd_level = true; 4270 params->has_xbzrle_cache_size = true; 4271 params->has_max_postcopy_bandwidth = true; 4272 params->has_max_cpu_throttle = true; 4273 params->has_announce_initial = true; 4274 params->has_announce_max = true; 4275 params->has_announce_rounds = true; 4276 params->has_announce_step = true; 4277 4278 qemu_sem_init(&ms->postcopy_pause_sem, 0); 4279 qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); 4280 qemu_sem_init(&ms->rp_state.rp_sem, 0); 4281 qemu_sem_init(&ms->rate_limit_sem, 0); 4282 qemu_sem_init(&ms->wait_unplug_sem, 0); 4283 qemu_mutex_init(&ms->qemu_file_lock); 4284 } 4285 4286 /* 4287 * Return true if check pass, false otherwise. Error will be put 4288 * inside errp if provided. 4289 */ 4290 static bool migration_object_check(MigrationState *ms, Error **errp) 4291 { 4292 MigrationCapabilityStatusList *head = NULL; 4293 /* Assuming all off */ 4294 bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret; 4295 int i; 4296 4297 if (!migrate_params_check(&ms->parameters, errp)) { 4298 return false; 4299 } 4300 4301 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 4302 if (ms->enabled_capabilities[i]) { 4303 QAPI_LIST_PREPEND(head, migrate_cap_add(i, true)); 4304 } 4305 } 4306 4307 ret = migrate_caps_check(cap_list, head, errp); 4308 4309 /* It works with head == NULL */ 4310 qapi_free_MigrationCapabilityStatusList(head); 4311 4312 return ret; 4313 } 4314 4315 static const TypeInfo migration_type = { 4316 .name = TYPE_MIGRATION, 4317 /* 4318 * NOTE: TYPE_MIGRATION is not really a device, as the object is 4319 * not created using qdev_new(), it is not attached to the qdev 4320 * device tree, and it is never realized. 4321 * 4322 * TODO: Make this TYPE_OBJECT once QOM provides something like 4323 * TYPE_DEVICE's "-global" properties. 4324 */ 4325 .parent = TYPE_DEVICE, 4326 .class_init = migration_class_init, 4327 .class_size = sizeof(MigrationClass), 4328 .instance_size = sizeof(MigrationState), 4329 .instance_init = migration_instance_init, 4330 .instance_finalize = migration_instance_finalize, 4331 }; 4332 4333 static void register_migration_types(void) 4334 { 4335 type_register_static(&migration_type); 4336 } 4337 4338 type_init(register_migration_types); 4339