1 /* 2 * QEMU live migration 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/cutils.h" 18 #include "qemu/error-report.h" 19 #include "qemu/main-loop.h" 20 #include "migration/blocker.h" 21 #include "exec.h" 22 #include "fd.h" 23 #include "file.h" 24 #include "socket.h" 25 #include "sysemu/runstate.h" 26 #include "sysemu/sysemu.h" 27 #include "sysemu/cpu-throttle.h" 28 #include "rdma.h" 29 #include "ram.h" 30 #include "ram-compress.h" 31 #include "migration/global_state.h" 32 #include "migration/misc.h" 33 #include "migration.h" 34 #include "migration-stats.h" 35 #include "savevm.h" 36 #include "qemu-file.h" 37 #include "channel.h" 38 #include "migration/vmstate.h" 39 #include "block/block.h" 40 #include "qapi/error.h" 41 #include "qapi/clone-visitor.h" 42 #include "qapi/qapi-visit-migration.h" 43 #include "qapi/qapi-visit-sockets.h" 44 #include "qapi/qapi-commands-migration.h" 45 #include "qapi/qapi-events-migration.h" 46 #include "qapi/qmp/qerror.h" 47 #include "qapi/qmp/qnull.h" 48 #include "qemu/rcu.h" 49 #include "block.h" 50 #include "postcopy-ram.h" 51 #include "qemu/thread.h" 52 #include "trace.h" 53 #include "exec/target_page.h" 54 #include "io/channel-buffer.h" 55 #include "io/channel-tls.h" 56 #include "migration/colo.h" 57 #include "hw/boards.h" 58 #include "monitor/monitor.h" 59 #include "net/announce.h" 60 #include "qemu/queue.h" 61 #include "multifd.h" 62 #include "threadinfo.h" 63 #include "qemu/yank.h" 64 #include "sysemu/cpus.h" 65 #include "yank_functions.h" 66 #include "sysemu/qtest.h" 67 #include "options.h" 68 #include "sysemu/dirtylimit.h" 69 #include "qemu/sockets.h" 70 #include "sysemu/kvm.h" 71 72 static NotifierWithReturnList migration_state_notifiers = 73 NOTIFIER_WITH_RETURN_LIST_INITIALIZER(migration_state_notifiers); 74 75 /* Messages sent on the return path from destination to source */ 76 enum mig_rp_message_type { 77 MIG_RP_MSG_INVALID = 0, /* Must be 0 */ 78 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */ 79 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */ 80 81 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */ 82 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */ 83 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */ 84 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */ 85 MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */ 86 87 MIG_RP_MSG_MAX 88 }; 89 90 /* When we add fault tolerance, we could have several 91 migrations at once. For now we don't need to add 92 dynamic creation of migration */ 93 94 static MigrationState *current_migration; 95 static MigrationIncomingState *current_incoming; 96 97 static GSList *migration_blockers[MIG_MODE__MAX]; 98 99 static bool migration_object_check(MigrationState *ms, Error **errp); 100 static int migration_maybe_pause(MigrationState *s, 101 int *current_active_state, 102 int new_state); 103 static void migrate_fd_cancel(MigrationState *s); 104 static bool close_return_path_on_source(MigrationState *s); 105 106 static void migration_downtime_start(MigrationState *s) 107 { 108 trace_vmstate_downtime_checkpoint("src-downtime-start"); 109 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 110 } 111 112 static void migration_downtime_end(MigrationState *s) 113 { 114 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 115 116 /* 117 * If downtime already set, should mean that postcopy already set it, 118 * then that should be the real downtime already. 119 */ 120 if (!s->downtime) { 121 s->downtime = now - s->downtime_start; 122 } 123 124 trace_vmstate_downtime_checkpoint("src-downtime-end"); 125 } 126 127 static bool migration_needs_multiple_sockets(void) 128 { 129 return migrate_multifd() || migrate_postcopy_preempt(); 130 } 131 132 static bool transport_supports_multi_channels(MigrationAddress *addr) 133 { 134 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 135 SocketAddress *saddr = &addr->u.socket; 136 137 return saddr->type == SOCKET_ADDRESS_TYPE_INET || 138 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 139 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK; 140 } 141 142 return false; 143 } 144 145 static bool 146 migration_channels_and_transport_compatible(MigrationAddress *addr, 147 Error **errp) 148 { 149 if (migration_needs_multiple_sockets() && 150 !transport_supports_multi_channels(addr)) { 151 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)"); 152 return false; 153 } 154 155 return true; 156 } 157 158 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) 159 { 160 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp; 161 162 return (a > b) - (a < b); 163 } 164 165 int migration_stop_vm(RunState state) 166 { 167 int ret = vm_stop_force_state(state); 168 169 trace_vmstate_downtime_checkpoint("src-vm-stopped"); 170 171 return ret; 172 } 173 174 void migration_object_init(void) 175 { 176 /* This can only be called once. */ 177 assert(!current_migration); 178 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION)); 179 180 /* 181 * Init the migrate incoming object as well no matter whether 182 * we'll use it or not. 183 */ 184 assert(!current_incoming); 185 current_incoming = g_new0(MigrationIncomingState, 1); 186 current_incoming->state = MIGRATION_STATUS_NONE; 187 current_incoming->postcopy_remote_fds = 188 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD)); 189 qemu_mutex_init(¤t_incoming->rp_mutex); 190 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex); 191 qemu_event_init(¤t_incoming->main_thread_load_event, false); 192 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0); 193 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0); 194 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0); 195 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0); 196 197 qemu_mutex_init(¤t_incoming->page_request_mutex); 198 qemu_cond_init(¤t_incoming->page_request_cond); 199 current_incoming->page_requested = g_tree_new(page_request_addr_cmp); 200 201 migration_object_check(current_migration, &error_fatal); 202 203 blk_mig_init(); 204 ram_mig_init(); 205 dirty_bitmap_mig_init(); 206 } 207 208 typedef struct { 209 QEMUBH *bh; 210 QEMUBHFunc *cb; 211 void *opaque; 212 } MigrationBH; 213 214 static void migration_bh_dispatch_bh(void *opaque) 215 { 216 MigrationState *s = migrate_get_current(); 217 MigrationBH *migbh = opaque; 218 219 /* cleanup this BH */ 220 qemu_bh_delete(migbh->bh); 221 migbh->bh = NULL; 222 223 /* dispatch the other one */ 224 migbh->cb(migbh->opaque); 225 object_unref(OBJECT(s)); 226 227 g_free(migbh); 228 } 229 230 void migration_bh_schedule(QEMUBHFunc *cb, void *opaque) 231 { 232 MigrationState *s = migrate_get_current(); 233 MigrationBH *migbh = g_new0(MigrationBH, 1); 234 QEMUBH *bh = qemu_bh_new(migration_bh_dispatch_bh, migbh); 235 236 /* Store these to dispatch when the BH runs */ 237 migbh->bh = bh; 238 migbh->cb = cb; 239 migbh->opaque = opaque; 240 241 /* 242 * Ref the state for bh, because it may be called when 243 * there're already no other refs 244 */ 245 object_ref(OBJECT(s)); 246 qemu_bh_schedule(bh); 247 } 248 249 void migration_cancel(const Error *error) 250 { 251 if (error) { 252 migrate_set_error(current_migration, error); 253 } 254 if (migrate_dirty_limit()) { 255 qmp_cancel_vcpu_dirty_limit(false, -1, NULL); 256 } 257 migrate_fd_cancel(current_migration); 258 } 259 260 void migration_shutdown(void) 261 { 262 /* 263 * When the QEMU main thread exit, the COLO thread 264 * may wait a semaphore. So, we should wakeup the 265 * COLO thread before migration shutdown. 266 */ 267 colo_shutdown(); 268 /* 269 * Cancel the current migration - that will (eventually) 270 * stop the migration using this structure 271 */ 272 migration_cancel(NULL); 273 object_unref(OBJECT(current_migration)); 274 275 /* 276 * Cancel outgoing migration of dirty bitmaps. It should 277 * at least unref used block nodes. 278 */ 279 dirty_bitmap_mig_cancel_outgoing(); 280 281 /* 282 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps 283 * are non-critical data, and their loss never considered as 284 * something serious. 285 */ 286 dirty_bitmap_mig_cancel_incoming(); 287 } 288 289 /* For outgoing */ 290 MigrationState *migrate_get_current(void) 291 { 292 /* This can only be called after the object created. */ 293 assert(current_migration); 294 return current_migration; 295 } 296 297 MigrationIncomingState *migration_incoming_get_current(void) 298 { 299 assert(current_incoming); 300 return current_incoming; 301 } 302 303 void migration_incoming_transport_cleanup(MigrationIncomingState *mis) 304 { 305 if (mis->socket_address_list) { 306 qapi_free_SocketAddressList(mis->socket_address_list); 307 mis->socket_address_list = NULL; 308 } 309 310 if (mis->transport_cleanup) { 311 mis->transport_cleanup(mis->transport_data); 312 mis->transport_data = mis->transport_cleanup = NULL; 313 } 314 } 315 316 void migration_incoming_state_destroy(void) 317 { 318 struct MigrationIncomingState *mis = migration_incoming_get_current(); 319 320 multifd_recv_cleanup(); 321 compress_threads_load_cleanup(); 322 323 if (mis->to_src_file) { 324 /* Tell source that we are done */ 325 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); 326 qemu_fclose(mis->to_src_file); 327 mis->to_src_file = NULL; 328 } 329 330 if (mis->from_src_file) { 331 migration_ioc_unregister_yank_from_file(mis->from_src_file); 332 qemu_fclose(mis->from_src_file); 333 mis->from_src_file = NULL; 334 } 335 if (mis->postcopy_remote_fds) { 336 g_array_free(mis->postcopy_remote_fds, TRUE); 337 mis->postcopy_remote_fds = NULL; 338 } 339 340 migration_incoming_transport_cleanup(mis); 341 qemu_event_reset(&mis->main_thread_load_event); 342 343 if (mis->page_requested) { 344 g_tree_destroy(mis->page_requested); 345 mis->page_requested = NULL; 346 } 347 348 if (mis->postcopy_qemufile_dst) { 349 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 350 qemu_fclose(mis->postcopy_qemufile_dst); 351 mis->postcopy_qemufile_dst = NULL; 352 } 353 354 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 355 } 356 357 static void migrate_generate_event(int new_state) 358 { 359 if (migrate_events()) { 360 qapi_event_send_migration(new_state); 361 } 362 } 363 364 /* 365 * Send a message on the return channel back to the source 366 * of the migration. 367 */ 368 static int migrate_send_rp_message(MigrationIncomingState *mis, 369 enum mig_rp_message_type message_type, 370 uint16_t len, void *data) 371 { 372 int ret = 0; 373 374 trace_migrate_send_rp_message((int)message_type, len); 375 QEMU_LOCK_GUARD(&mis->rp_mutex); 376 377 /* 378 * It's possible that the file handle got lost due to network 379 * failures. 380 */ 381 if (!mis->to_src_file) { 382 ret = -EIO; 383 return ret; 384 } 385 386 qemu_put_be16(mis->to_src_file, (unsigned int)message_type); 387 qemu_put_be16(mis->to_src_file, len); 388 qemu_put_buffer(mis->to_src_file, data, len); 389 return qemu_fflush(mis->to_src_file); 390 } 391 392 /* Request one page from the source VM at the given start address. 393 * rb: the RAMBlock to request the page in 394 * Start: Address offset within the RB 395 * Len: Length in bytes required - must be a multiple of pagesize 396 */ 397 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, 398 RAMBlock *rb, ram_addr_t start) 399 { 400 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */ 401 size_t msglen = 12; /* start + len */ 402 size_t len = qemu_ram_pagesize(rb); 403 enum mig_rp_message_type msg_type; 404 const char *rbname; 405 int rbname_len; 406 407 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); 408 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); 409 410 /* 411 * We maintain the last ramblock that we requested for page. Note that we 412 * don't need locking because this function will only be called within the 413 * postcopy ram fault thread. 414 */ 415 if (rb != mis->last_rb) { 416 mis->last_rb = rb; 417 418 rbname = qemu_ram_get_idstr(rb); 419 rbname_len = strlen(rbname); 420 421 assert(rbname_len < 256); 422 423 bufc[msglen++] = rbname_len; 424 memcpy(bufc + msglen, rbname, rbname_len); 425 msglen += rbname_len; 426 msg_type = MIG_RP_MSG_REQ_PAGES_ID; 427 } else { 428 msg_type = MIG_RP_MSG_REQ_PAGES; 429 } 430 431 return migrate_send_rp_message(mis, msg_type, msglen, bufc); 432 } 433 434 int migrate_send_rp_req_pages(MigrationIncomingState *mis, 435 RAMBlock *rb, ram_addr_t start, uint64_t haddr) 436 { 437 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); 438 bool received = false; 439 440 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 441 received = ramblock_recv_bitmap_test_byte_offset(rb, start); 442 if (!received && !g_tree_lookup(mis->page_requested, aligned)) { 443 /* 444 * The page has not been received, and it's not yet in the page 445 * request list. Queue it. Set the value of element to 1, so that 446 * things like g_tree_lookup() will return TRUE (1) when found. 447 */ 448 g_tree_insert(mis->page_requested, aligned, (gpointer)1); 449 qatomic_inc(&mis->page_requested_count); 450 trace_postcopy_page_req_add(aligned, mis->page_requested_count); 451 } 452 } 453 454 /* 455 * If the page is there, skip sending the message. We don't even need the 456 * lock because as long as the page arrived, it'll be there forever. 457 */ 458 if (received) { 459 return 0; 460 } 461 462 return migrate_send_rp_message_req_pages(mis, rb, start); 463 } 464 465 static bool migration_colo_enabled; 466 bool migration_incoming_colo_enabled(void) 467 { 468 return migration_colo_enabled; 469 } 470 471 void migration_incoming_disable_colo(void) 472 { 473 ram_block_discard_disable(false); 474 migration_colo_enabled = false; 475 } 476 477 int migration_incoming_enable_colo(void) 478 { 479 #ifndef CONFIG_REPLICATION 480 error_report("ENABLE_COLO command come in migration stream, but COLO " 481 "module is not built in"); 482 return -ENOTSUP; 483 #endif 484 485 if (!migrate_colo()) { 486 error_report("ENABLE_COLO command come in migration stream, but c-colo " 487 "capability is not set"); 488 return -EINVAL; 489 } 490 491 if (ram_block_discard_disable(true)) { 492 error_report("COLO: cannot disable RAM discard"); 493 return -EBUSY; 494 } 495 migration_colo_enabled = true; 496 return 0; 497 } 498 499 void migrate_add_address(SocketAddress *address) 500 { 501 MigrationIncomingState *mis = migration_incoming_get_current(); 502 503 QAPI_LIST_PREPEND(mis->socket_address_list, 504 QAPI_CLONE(SocketAddress, address)); 505 } 506 507 bool migrate_uri_parse(const char *uri, MigrationChannel **channel, 508 Error **errp) 509 { 510 g_autoptr(MigrationChannel) val = g_new0(MigrationChannel, 1); 511 g_autoptr(MigrationAddress) addr = g_new0(MigrationAddress, 1); 512 InetSocketAddress *isock = &addr->u.rdma; 513 strList **tail = &addr->u.exec.args; 514 515 if (strstart(uri, "exec:", NULL)) { 516 addr->transport = MIGRATION_ADDRESS_TYPE_EXEC; 517 #ifdef WIN32 518 QAPI_LIST_APPEND(tail, g_strdup(exec_get_cmd_path())); 519 QAPI_LIST_APPEND(tail, g_strdup("/c")); 520 #else 521 QAPI_LIST_APPEND(tail, g_strdup("/bin/sh")); 522 QAPI_LIST_APPEND(tail, g_strdup("-c")); 523 #endif 524 QAPI_LIST_APPEND(tail, g_strdup(uri + strlen("exec:"))); 525 } else if (strstart(uri, "rdma:", NULL)) { 526 if (inet_parse(isock, uri + strlen("rdma:"), errp)) { 527 qapi_free_InetSocketAddress(isock); 528 return false; 529 } 530 addr->transport = MIGRATION_ADDRESS_TYPE_RDMA; 531 } else if (strstart(uri, "tcp:", NULL) || 532 strstart(uri, "unix:", NULL) || 533 strstart(uri, "vsock:", NULL) || 534 strstart(uri, "fd:", NULL)) { 535 addr->transport = MIGRATION_ADDRESS_TYPE_SOCKET; 536 SocketAddress *saddr = socket_parse(uri, errp); 537 if (!saddr) { 538 return false; 539 } 540 addr->u.socket.type = saddr->type; 541 addr->u.socket.u = saddr->u; 542 /* Don't free the objects inside; their ownership moved to "addr" */ 543 g_free(saddr); 544 } else if (strstart(uri, "file:", NULL)) { 545 addr->transport = MIGRATION_ADDRESS_TYPE_FILE; 546 addr->u.file.filename = g_strdup(uri + strlen("file:")); 547 if (file_parse_offset(addr->u.file.filename, &addr->u.file.offset, 548 errp)) { 549 return false; 550 } 551 } else { 552 error_setg(errp, "unknown migration protocol: %s", uri); 553 return false; 554 } 555 556 val->channel_type = MIGRATION_CHANNEL_TYPE_MAIN; 557 val->addr = g_steal_pointer(&addr); 558 *channel = g_steal_pointer(&val); 559 return true; 560 } 561 562 static void qemu_start_incoming_migration(const char *uri, bool has_channels, 563 MigrationChannelList *channels, 564 Error **errp) 565 { 566 g_autoptr(MigrationChannel) channel = NULL; 567 MigrationAddress *addr = NULL; 568 MigrationIncomingState *mis = migration_incoming_get_current(); 569 570 /* 571 * Having preliminary checks for uri and channel 572 */ 573 if (!uri == !channels) { 574 error_setg(errp, "need either 'uri' or 'channels' argument"); 575 return; 576 } 577 578 if (channels) { 579 /* To verify that Migrate channel list has only item */ 580 if (channels->next) { 581 error_setg(errp, "Channel list has more than one entries"); 582 return; 583 } 584 addr = channels->value->addr; 585 } 586 587 if (uri) { 588 /* caller uses the old URI syntax */ 589 if (!migrate_uri_parse(uri, &channel, errp)) { 590 return; 591 } 592 addr = channel->addr; 593 } 594 595 /* transport mechanism not suitable for migration? */ 596 if (!migration_channels_and_transport_compatible(addr, errp)) { 597 return; 598 } 599 600 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE, 601 MIGRATION_STATUS_SETUP); 602 603 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 604 SocketAddress *saddr = &addr->u.socket; 605 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 606 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 607 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 608 socket_start_incoming_migration(saddr, errp); 609 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 610 fd_start_incoming_migration(saddr->u.fd.str, errp); 611 } 612 #ifdef CONFIG_RDMA 613 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 614 if (migrate_compress()) { 615 error_setg(errp, "RDMA and compression can't be used together"); 616 return; 617 } 618 if (migrate_xbzrle()) { 619 error_setg(errp, "RDMA and XBZRLE can't be used together"); 620 return; 621 } 622 if (migrate_multifd()) { 623 error_setg(errp, "RDMA and multifd can't be used together"); 624 return; 625 } 626 rdma_start_incoming_migration(&addr->u.rdma, errp); 627 #endif 628 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 629 exec_start_incoming_migration(addr->u.exec.args, errp); 630 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 631 file_start_incoming_migration(&addr->u.file, errp); 632 } else { 633 error_setg(errp, "unknown migration protocol: %s", uri); 634 } 635 } 636 637 static void process_incoming_migration_bh(void *opaque) 638 { 639 Error *local_err = NULL; 640 MigrationIncomingState *mis = opaque; 641 642 trace_vmstate_downtime_checkpoint("dst-precopy-bh-enter"); 643 644 /* If capability late_block_activate is set: 645 * Only fire up the block code now if we're going to restart the 646 * VM, else 'cont' will do it. 647 * This causes file locking to happen; so we don't want it to happen 648 * unless we really are starting the VM. 649 */ 650 if (!migrate_late_block_activate() || 651 (autostart && (!global_state_received() || 652 runstate_is_live(global_state_get_runstate())))) { 653 /* Make sure all file formats throw away their mutable metadata. 654 * If we get an error here, just don't restart the VM yet. */ 655 bdrv_activate_all(&local_err); 656 if (local_err) { 657 error_report_err(local_err); 658 local_err = NULL; 659 autostart = false; 660 } 661 } 662 663 /* 664 * This must happen after all error conditions are dealt with and 665 * we're sure the VM is going to be running on this host. 666 */ 667 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 668 669 trace_vmstate_downtime_checkpoint("dst-precopy-bh-announced"); 670 671 multifd_recv_shutdown(); 672 673 dirty_bitmap_mig_before_vm_start(); 674 675 if (!global_state_received() || 676 runstate_is_live(global_state_get_runstate())) { 677 if (autostart) { 678 vm_start(); 679 } else { 680 runstate_set(RUN_STATE_PAUSED); 681 } 682 } else if (migration_incoming_colo_enabled()) { 683 migration_incoming_disable_colo(); 684 vm_start(); 685 } else { 686 runstate_set(global_state_get_runstate()); 687 } 688 trace_vmstate_downtime_checkpoint("dst-precopy-bh-vm-started"); 689 /* 690 * This must happen after any state changes since as soon as an external 691 * observer sees this event they might start to prod at the VM assuming 692 * it's ready to use. 693 */ 694 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 695 MIGRATION_STATUS_COMPLETED); 696 migration_incoming_state_destroy(); 697 } 698 699 static void coroutine_fn 700 process_incoming_migration_co(void *opaque) 701 { 702 MigrationIncomingState *mis = migration_incoming_get_current(); 703 PostcopyState ps; 704 int ret; 705 706 assert(mis->from_src_file); 707 708 if (compress_threads_load_setup(mis->from_src_file)) { 709 error_report("Failed to setup decompress threads"); 710 goto fail; 711 } 712 713 mis->largest_page_size = qemu_ram_pagesize_largest(); 714 postcopy_state_set(POSTCOPY_INCOMING_NONE); 715 migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP, 716 MIGRATION_STATUS_ACTIVE); 717 718 mis->loadvm_co = qemu_coroutine_self(); 719 ret = qemu_loadvm_state(mis->from_src_file); 720 mis->loadvm_co = NULL; 721 722 trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); 723 724 ps = postcopy_state_get(); 725 trace_process_incoming_migration_co_end(ret, ps); 726 if (ps != POSTCOPY_INCOMING_NONE) { 727 if (ps == POSTCOPY_INCOMING_ADVISE) { 728 /* 729 * Where a migration had postcopy enabled (and thus went to advise) 730 * but managed to complete within the precopy period, we can use 731 * the normal exit. 732 */ 733 postcopy_ram_incoming_cleanup(mis); 734 } else if (ret >= 0) { 735 /* 736 * Postcopy was started, cleanup should happen at the end of the 737 * postcopy thread. 738 */ 739 trace_process_incoming_migration_co_postcopy_end_main(); 740 return; 741 } 742 /* Else if something went wrong then just fall out of the normal exit */ 743 } 744 745 if (ret < 0) { 746 MigrationState *s = migrate_get_current(); 747 748 if (migrate_has_error(s)) { 749 WITH_QEMU_LOCK_GUARD(&s->error_mutex) { 750 error_report_err(s->error); 751 } 752 } 753 error_report("load of migration failed: %s", strerror(-ret)); 754 goto fail; 755 } 756 757 if (colo_incoming_co() < 0) { 758 goto fail; 759 } 760 761 migration_bh_schedule(process_incoming_migration_bh, mis); 762 return; 763 fail: 764 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 765 MIGRATION_STATUS_FAILED); 766 qemu_fclose(mis->from_src_file); 767 768 multifd_recv_cleanup(); 769 compress_threads_load_cleanup(); 770 771 exit(EXIT_FAILURE); 772 } 773 774 /** 775 * migration_incoming_setup: Setup incoming migration 776 * @f: file for main migration channel 777 */ 778 static void migration_incoming_setup(QEMUFile *f) 779 { 780 MigrationIncomingState *mis = migration_incoming_get_current(); 781 782 if (!mis->from_src_file) { 783 mis->from_src_file = f; 784 } 785 qemu_file_set_blocking(f, false); 786 } 787 788 void migration_incoming_process(void) 789 { 790 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL); 791 qemu_coroutine_enter(co); 792 } 793 794 /* Returns true if recovered from a paused migration, otherwise false */ 795 static bool postcopy_try_recover(void) 796 { 797 MigrationIncomingState *mis = migration_incoming_get_current(); 798 799 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 800 /* Resumed from a paused postcopy migration */ 801 802 /* This should be set already in migration_incoming_setup() */ 803 assert(mis->from_src_file); 804 /* Postcopy has standalone thread to do vm load */ 805 qemu_file_set_blocking(mis->from_src_file, true); 806 807 /* Re-configure the return path */ 808 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); 809 810 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 811 MIGRATION_STATUS_POSTCOPY_RECOVER); 812 813 /* 814 * Here, we only wake up the main loading thread (while the 815 * rest threads will still be waiting), so that we can receive 816 * commands from source now, and answer it if needed. The 817 * rest threads will be woken up afterwards until we are sure 818 * that source is ready to reply to page requests. 819 */ 820 qemu_sem_post(&mis->postcopy_pause_sem_dst); 821 return true; 822 } 823 824 return false; 825 } 826 827 void migration_fd_process_incoming(QEMUFile *f) 828 { 829 migration_incoming_setup(f); 830 if (postcopy_try_recover()) { 831 return; 832 } 833 migration_incoming_process(); 834 } 835 836 /* 837 * Returns true when we want to start a new incoming migration process, 838 * false otherwise. 839 */ 840 static bool migration_should_start_incoming(bool main_channel) 841 { 842 /* Multifd doesn't start unless all channels are established */ 843 if (migrate_multifd()) { 844 return migration_has_all_channels(); 845 } 846 847 /* Preempt channel only starts when the main channel is created */ 848 if (migrate_postcopy_preempt()) { 849 return main_channel; 850 } 851 852 /* 853 * For all the rest types of migration, we should only reach here when 854 * it's the main channel that's being created, and we should always 855 * proceed with this channel. 856 */ 857 assert(main_channel); 858 return true; 859 } 860 861 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) 862 { 863 MigrationIncomingState *mis = migration_incoming_get_current(); 864 Error *local_err = NULL; 865 QEMUFile *f; 866 bool default_channel = true; 867 uint32_t channel_magic = 0; 868 int ret = 0; 869 870 if (migrate_multifd() && !migrate_postcopy_ram() && 871 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) { 872 /* 873 * With multiple channels, it is possible that we receive channels 874 * out of order on destination side, causing incorrect mapping of 875 * source channels on destination side. Check channel MAGIC to 876 * decide type of channel. Please note this is best effort, postcopy 877 * preempt channel does not send any magic number so avoid it for 878 * postcopy live migration. Also tls live migration already does 879 * tls handshake while initializing main channel so with tls this 880 * issue is not possible. 881 */ 882 ret = migration_channel_read_peek(ioc, (void *)&channel_magic, 883 sizeof(channel_magic), errp); 884 885 if (ret != 0) { 886 return; 887 } 888 889 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC)); 890 } else { 891 default_channel = !mis->from_src_file; 892 } 893 894 if (multifd_recv_setup(errp) != 0) { 895 return; 896 } 897 898 if (default_channel) { 899 f = qemu_file_new_input(ioc); 900 migration_incoming_setup(f); 901 } else { 902 /* Multiple connections */ 903 assert(migration_needs_multiple_sockets()); 904 if (migrate_multifd()) { 905 multifd_recv_new_channel(ioc, &local_err); 906 } else { 907 assert(migrate_postcopy_preempt()); 908 f = qemu_file_new_input(ioc); 909 postcopy_preempt_new_channel(mis, f); 910 } 911 if (local_err) { 912 error_propagate(errp, local_err); 913 return; 914 } 915 } 916 917 if (migration_should_start_incoming(default_channel)) { 918 /* If it's a recovery, we're done */ 919 if (postcopy_try_recover()) { 920 return; 921 } 922 migration_incoming_process(); 923 } 924 } 925 926 /** 927 * @migration_has_all_channels: We have received all channels that we need 928 * 929 * Returns true when we have got connections to all the channels that 930 * we need for migration. 931 */ 932 bool migration_has_all_channels(void) 933 { 934 MigrationIncomingState *mis = migration_incoming_get_current(); 935 936 if (!mis->from_src_file) { 937 return false; 938 } 939 940 if (migrate_multifd()) { 941 return multifd_recv_all_channels_created(); 942 } 943 944 if (migrate_postcopy_preempt()) { 945 return mis->postcopy_qemufile_dst != NULL; 946 } 947 948 return true; 949 } 950 951 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis) 952 { 953 return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL); 954 } 955 956 /* 957 * Send a 'SHUT' message on the return channel with the given value 958 * to indicate that we've finished with the RP. Non-0 value indicates 959 * error. 960 */ 961 void migrate_send_rp_shut(MigrationIncomingState *mis, 962 uint32_t value) 963 { 964 uint32_t buf; 965 966 buf = cpu_to_be32(value); 967 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); 968 } 969 970 /* 971 * Send a 'PONG' message on the return channel with the given value 972 * (normally in response to a 'PING') 973 */ 974 void migrate_send_rp_pong(MigrationIncomingState *mis, 975 uint32_t value) 976 { 977 uint32_t buf; 978 979 buf = cpu_to_be32(value); 980 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); 981 } 982 983 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis, 984 char *block_name) 985 { 986 char buf[512]; 987 int len; 988 int64_t res; 989 990 /* 991 * First, we send the header part. It contains only the len of 992 * idstr, and the idstr itself. 993 */ 994 len = strlen(block_name); 995 buf[0] = len; 996 memcpy(buf + 1, block_name, len); 997 998 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 999 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery", 1000 __func__); 1001 return; 1002 } 1003 1004 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf); 1005 1006 /* 1007 * Next, we dump the received bitmap to the stream. 1008 * 1009 * TODO: currently we are safe since we are the only one that is 1010 * using the to_src_file handle (fault thread is still paused), 1011 * and it's ok even not taking the mutex. However the best way is 1012 * to take the lock before sending the message header, and release 1013 * the lock after sending the bitmap. 1014 */ 1015 qemu_mutex_lock(&mis->rp_mutex); 1016 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name); 1017 qemu_mutex_unlock(&mis->rp_mutex); 1018 1019 trace_migrate_send_rp_recv_bitmap(block_name, res); 1020 } 1021 1022 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value) 1023 { 1024 uint32_t buf; 1025 1026 buf = cpu_to_be32(value); 1027 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf); 1028 } 1029 1030 /* 1031 * Return true if we're already in the middle of a migration 1032 * (i.e. any of the active or setup states) 1033 */ 1034 bool migration_is_setup_or_active(int state) 1035 { 1036 switch (state) { 1037 case MIGRATION_STATUS_ACTIVE: 1038 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1039 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1040 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1041 case MIGRATION_STATUS_SETUP: 1042 case MIGRATION_STATUS_PRE_SWITCHOVER: 1043 case MIGRATION_STATUS_DEVICE: 1044 case MIGRATION_STATUS_WAIT_UNPLUG: 1045 case MIGRATION_STATUS_COLO: 1046 return true; 1047 1048 default: 1049 return false; 1050 1051 } 1052 } 1053 1054 bool migration_is_running(int state) 1055 { 1056 switch (state) { 1057 case MIGRATION_STATUS_ACTIVE: 1058 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1059 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1060 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1061 case MIGRATION_STATUS_SETUP: 1062 case MIGRATION_STATUS_PRE_SWITCHOVER: 1063 case MIGRATION_STATUS_DEVICE: 1064 case MIGRATION_STATUS_WAIT_UNPLUG: 1065 case MIGRATION_STATUS_CANCELLING: 1066 return true; 1067 1068 default: 1069 return false; 1070 1071 } 1072 } 1073 1074 static bool migrate_show_downtime(MigrationState *s) 1075 { 1076 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy(); 1077 } 1078 1079 static void populate_time_info(MigrationInfo *info, MigrationState *s) 1080 { 1081 info->has_status = true; 1082 info->has_setup_time = true; 1083 info->setup_time = s->setup_time; 1084 1085 if (s->state == MIGRATION_STATUS_COMPLETED) { 1086 info->has_total_time = true; 1087 info->total_time = s->total_time; 1088 } else { 1089 info->has_total_time = true; 1090 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - 1091 s->start_time; 1092 } 1093 1094 if (migrate_show_downtime(s)) { 1095 info->has_downtime = true; 1096 info->downtime = s->downtime; 1097 } else { 1098 info->has_expected_downtime = true; 1099 info->expected_downtime = s->expected_downtime; 1100 } 1101 } 1102 1103 static void populate_ram_info(MigrationInfo *info, MigrationState *s) 1104 { 1105 size_t page_size = qemu_target_page_size(); 1106 1107 info->ram = g_malloc0(sizeof(*info->ram)); 1108 info->ram->transferred = migration_transferred_bytes(); 1109 info->ram->total = ram_bytes_total(); 1110 info->ram->duplicate = stat64_get(&mig_stats.zero_pages); 1111 /* legacy value. It is not used anymore */ 1112 info->ram->skipped = 0; 1113 info->ram->normal = stat64_get(&mig_stats.normal_pages); 1114 info->ram->normal_bytes = info->ram->normal * page_size; 1115 info->ram->mbps = s->mbps; 1116 info->ram->dirty_sync_count = 1117 stat64_get(&mig_stats.dirty_sync_count); 1118 info->ram->dirty_sync_missed_zero_copy = 1119 stat64_get(&mig_stats.dirty_sync_missed_zero_copy); 1120 info->ram->postcopy_requests = 1121 stat64_get(&mig_stats.postcopy_requests); 1122 info->ram->page_size = page_size; 1123 info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes); 1124 info->ram->pages_per_second = s->pages_per_second; 1125 info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes); 1126 info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes); 1127 info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes); 1128 1129 if (migrate_xbzrle()) { 1130 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); 1131 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); 1132 info->xbzrle_cache->bytes = xbzrle_counters.bytes; 1133 info->xbzrle_cache->pages = xbzrle_counters.pages; 1134 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss; 1135 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate; 1136 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate; 1137 info->xbzrle_cache->overflow = xbzrle_counters.overflow; 1138 } 1139 1140 populate_compress(info); 1141 1142 if (cpu_throttle_active()) { 1143 info->has_cpu_throttle_percentage = true; 1144 info->cpu_throttle_percentage = cpu_throttle_get_percentage(); 1145 } 1146 1147 if (s->state != MIGRATION_STATUS_COMPLETED) { 1148 info->ram->remaining = ram_bytes_remaining(); 1149 info->ram->dirty_pages_rate = 1150 stat64_get(&mig_stats.dirty_pages_rate); 1151 } 1152 1153 if (migrate_dirty_limit() && dirtylimit_in_service()) { 1154 info->has_dirty_limit_throttle_time_per_round = true; 1155 info->dirty_limit_throttle_time_per_round = 1156 dirtylimit_throttle_time_per_round(); 1157 1158 info->has_dirty_limit_ring_full_time = true; 1159 info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); 1160 } 1161 } 1162 1163 static void populate_disk_info(MigrationInfo *info) 1164 { 1165 if (blk_mig_active()) { 1166 info->disk = g_malloc0(sizeof(*info->disk)); 1167 info->disk->transferred = blk_mig_bytes_transferred(); 1168 info->disk->remaining = blk_mig_bytes_remaining(); 1169 info->disk->total = blk_mig_bytes_total(); 1170 } 1171 } 1172 1173 static void fill_source_migration_info(MigrationInfo *info) 1174 { 1175 MigrationState *s = migrate_get_current(); 1176 int state = qatomic_read(&s->state); 1177 GSList *cur_blocker = migration_blockers[migrate_mode()]; 1178 1179 info->blocked_reasons = NULL; 1180 1181 /* 1182 * There are two types of reasons a migration might be blocked; 1183 * a) devices marked in VMState as non-migratable, and 1184 * b) Explicit migration blockers 1185 * We need to add both of them here. 1186 */ 1187 qemu_savevm_non_migratable_list(&info->blocked_reasons); 1188 1189 while (cur_blocker) { 1190 QAPI_LIST_PREPEND(info->blocked_reasons, 1191 g_strdup(error_get_pretty(cur_blocker->data))); 1192 cur_blocker = g_slist_next(cur_blocker); 1193 } 1194 info->has_blocked_reasons = info->blocked_reasons != NULL; 1195 1196 switch (state) { 1197 case MIGRATION_STATUS_NONE: 1198 /* no migration has happened ever */ 1199 /* do not overwrite destination migration status */ 1200 return; 1201 case MIGRATION_STATUS_SETUP: 1202 info->has_status = true; 1203 info->has_total_time = false; 1204 break; 1205 case MIGRATION_STATUS_ACTIVE: 1206 case MIGRATION_STATUS_CANCELLING: 1207 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1208 case MIGRATION_STATUS_PRE_SWITCHOVER: 1209 case MIGRATION_STATUS_DEVICE: 1210 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1211 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1212 /* TODO add some postcopy stats */ 1213 populate_time_info(info, s); 1214 populate_ram_info(info, s); 1215 populate_disk_info(info); 1216 migration_populate_vfio_info(info); 1217 break; 1218 case MIGRATION_STATUS_COLO: 1219 info->has_status = true; 1220 /* TODO: display COLO specific information (checkpoint info etc.) */ 1221 break; 1222 case MIGRATION_STATUS_COMPLETED: 1223 populate_time_info(info, s); 1224 populate_ram_info(info, s); 1225 migration_populate_vfio_info(info); 1226 break; 1227 case MIGRATION_STATUS_FAILED: 1228 info->has_status = true; 1229 break; 1230 case MIGRATION_STATUS_CANCELLED: 1231 info->has_status = true; 1232 break; 1233 case MIGRATION_STATUS_WAIT_UNPLUG: 1234 info->has_status = true; 1235 break; 1236 } 1237 info->status = state; 1238 1239 QEMU_LOCK_GUARD(&s->error_mutex); 1240 if (s->error) { 1241 info->error_desc = g_strdup(error_get_pretty(s->error)); 1242 } 1243 } 1244 1245 static void fill_destination_migration_info(MigrationInfo *info) 1246 { 1247 MigrationIncomingState *mis = migration_incoming_get_current(); 1248 1249 if (mis->socket_address_list) { 1250 info->has_socket_address = true; 1251 info->socket_address = 1252 QAPI_CLONE(SocketAddressList, mis->socket_address_list); 1253 } 1254 1255 switch (mis->state) { 1256 case MIGRATION_STATUS_NONE: 1257 return; 1258 case MIGRATION_STATUS_SETUP: 1259 case MIGRATION_STATUS_CANCELLING: 1260 case MIGRATION_STATUS_CANCELLED: 1261 case MIGRATION_STATUS_ACTIVE: 1262 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1263 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1264 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1265 case MIGRATION_STATUS_FAILED: 1266 case MIGRATION_STATUS_COLO: 1267 info->has_status = true; 1268 break; 1269 case MIGRATION_STATUS_COMPLETED: 1270 info->has_status = true; 1271 fill_destination_postcopy_migration_info(info); 1272 break; 1273 } 1274 info->status = mis->state; 1275 } 1276 1277 MigrationInfo *qmp_query_migrate(Error **errp) 1278 { 1279 MigrationInfo *info = g_malloc0(sizeof(*info)); 1280 1281 fill_destination_migration_info(info); 1282 fill_source_migration_info(info); 1283 1284 return info; 1285 } 1286 1287 void qmp_migrate_start_postcopy(Error **errp) 1288 { 1289 MigrationState *s = migrate_get_current(); 1290 1291 if (!migrate_postcopy()) { 1292 error_setg(errp, "Enable postcopy with migrate_set_capability before" 1293 " the start of migration"); 1294 return; 1295 } 1296 1297 if (s->state == MIGRATION_STATUS_NONE) { 1298 error_setg(errp, "Postcopy must be started after migration has been" 1299 " started"); 1300 return; 1301 } 1302 /* 1303 * we don't error if migration has finished since that would be racy 1304 * with issuing this command. 1305 */ 1306 qatomic_set(&s->start_postcopy, true); 1307 } 1308 1309 /* shared migration helpers */ 1310 1311 void migrate_set_state(int *state, int old_state, int new_state) 1312 { 1313 assert(new_state < MIGRATION_STATUS__MAX); 1314 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { 1315 trace_migrate_set_state(MigrationStatus_str(new_state)); 1316 migrate_generate_event(new_state); 1317 } 1318 } 1319 1320 static void migrate_fd_cleanup(MigrationState *s) 1321 { 1322 MigrationEventType type; 1323 1324 g_free(s->hostname); 1325 s->hostname = NULL; 1326 json_writer_free(s->vmdesc); 1327 s->vmdesc = NULL; 1328 1329 qemu_savevm_state_cleanup(); 1330 1331 if (s->to_dst_file) { 1332 QEMUFile *tmp; 1333 1334 trace_migrate_fd_cleanup(); 1335 bql_unlock(); 1336 if (s->migration_thread_running) { 1337 qemu_thread_join(&s->thread); 1338 s->migration_thread_running = false; 1339 } 1340 bql_lock(); 1341 1342 multifd_send_shutdown(); 1343 qemu_mutex_lock(&s->qemu_file_lock); 1344 tmp = s->to_dst_file; 1345 s->to_dst_file = NULL; 1346 qemu_mutex_unlock(&s->qemu_file_lock); 1347 /* 1348 * Close the file handle without the lock to make sure the 1349 * critical section won't block for long. 1350 */ 1351 migration_ioc_unregister_yank_from_file(tmp); 1352 qemu_fclose(tmp); 1353 } 1354 1355 /* 1356 * We already cleaned up to_dst_file, so errors from the return 1357 * path might be due to that, ignore them. 1358 */ 1359 close_return_path_on_source(s); 1360 1361 assert(!migration_is_active(s)); 1362 1363 if (s->state == MIGRATION_STATUS_CANCELLING) { 1364 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING, 1365 MIGRATION_STATUS_CANCELLED); 1366 } 1367 1368 if (s->error) { 1369 /* It is used on info migrate. We can't free it */ 1370 error_report_err(error_copy(s->error)); 1371 } 1372 type = migration_has_failed(s) ? MIG_EVENT_PRECOPY_FAILED : 1373 MIG_EVENT_PRECOPY_DONE; 1374 migration_call_notifiers(s, type); 1375 block_cleanup_parameters(); 1376 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1377 } 1378 1379 static void migrate_fd_cleanup_bh(void *opaque) 1380 { 1381 migrate_fd_cleanup(opaque); 1382 } 1383 1384 void migrate_set_error(MigrationState *s, const Error *error) 1385 { 1386 QEMU_LOCK_GUARD(&s->error_mutex); 1387 if (!s->error) { 1388 s->error = error_copy(error); 1389 } 1390 } 1391 1392 bool migrate_has_error(MigrationState *s) 1393 { 1394 /* The lock is not helpful here, but still follow the rule */ 1395 QEMU_LOCK_GUARD(&s->error_mutex); 1396 return qatomic_read(&s->error); 1397 } 1398 1399 static void migrate_error_free(MigrationState *s) 1400 { 1401 QEMU_LOCK_GUARD(&s->error_mutex); 1402 if (s->error) { 1403 error_free(s->error); 1404 s->error = NULL; 1405 } 1406 } 1407 1408 static void migrate_fd_error(MigrationState *s, const Error *error) 1409 { 1410 trace_migrate_fd_error(error_get_pretty(error)); 1411 assert(s->to_dst_file == NULL); 1412 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 1413 MIGRATION_STATUS_FAILED); 1414 migrate_set_error(s, error); 1415 } 1416 1417 static void migrate_fd_cancel(MigrationState *s) 1418 { 1419 int old_state ; 1420 1421 trace_migrate_fd_cancel(); 1422 1423 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1424 if (s->rp_state.from_dst_file) { 1425 /* shutdown the rp socket, so causing the rp thread to shutdown */ 1426 qemu_file_shutdown(s->rp_state.from_dst_file); 1427 } 1428 } 1429 1430 do { 1431 old_state = s->state; 1432 if (!migration_is_running(old_state)) { 1433 break; 1434 } 1435 /* If the migration is paused, kick it out of the pause */ 1436 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) { 1437 qemu_sem_post(&s->pause_sem); 1438 } 1439 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); 1440 } while (s->state != MIGRATION_STATUS_CANCELLING); 1441 1442 /* 1443 * If we're unlucky the migration code might be stuck somewhere in a 1444 * send/write while the network has failed and is waiting to timeout; 1445 * if we've got shutdown(2) available then we can force it to quit. 1446 */ 1447 if (s->state == MIGRATION_STATUS_CANCELLING) { 1448 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) { 1449 if (s->to_dst_file) { 1450 qemu_file_shutdown(s->to_dst_file); 1451 } 1452 } 1453 } 1454 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) { 1455 Error *local_err = NULL; 1456 1457 bdrv_activate_all(&local_err); 1458 if (local_err) { 1459 error_report_err(local_err); 1460 } else { 1461 s->block_inactive = false; 1462 } 1463 } 1464 } 1465 1466 void migration_add_notifier(NotifierWithReturn *notify, 1467 NotifierWithReturnFunc func) 1468 { 1469 notify->notify = func; 1470 notifier_with_return_list_add(&migration_state_notifiers, notify); 1471 } 1472 1473 void migration_remove_notifier(NotifierWithReturn *notify) 1474 { 1475 if (notify->notify) { 1476 notifier_with_return_remove(notify); 1477 notify->notify = NULL; 1478 } 1479 } 1480 1481 void migration_call_notifiers(MigrationState *s, MigrationEventType type) 1482 { 1483 MigrationEvent e; 1484 1485 e.type = type; 1486 notifier_with_return_list_notify(&migration_state_notifiers, &e, 0); 1487 } 1488 1489 bool migration_in_setup(MigrationState *s) 1490 { 1491 return s->state == MIGRATION_STATUS_SETUP; 1492 } 1493 1494 bool migration_has_finished(MigrationState *s) 1495 { 1496 return s->state == MIGRATION_STATUS_COMPLETED; 1497 } 1498 1499 bool migration_has_failed(MigrationState *s) 1500 { 1501 return (s->state == MIGRATION_STATUS_CANCELLED || 1502 s->state == MIGRATION_STATUS_FAILED); 1503 } 1504 1505 bool migration_in_postcopy(void) 1506 { 1507 MigrationState *s = migrate_get_current(); 1508 1509 switch (s->state) { 1510 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1511 case MIGRATION_STATUS_POSTCOPY_PAUSED: 1512 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1513 return true; 1514 default: 1515 return false; 1516 } 1517 } 1518 1519 bool migration_postcopy_is_alive(int state) 1520 { 1521 switch (state) { 1522 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1523 case MIGRATION_STATUS_POSTCOPY_RECOVER: 1524 return true; 1525 default: 1526 return false; 1527 } 1528 } 1529 1530 bool migration_in_postcopy_after_devices(MigrationState *s) 1531 { 1532 return migration_in_postcopy() && s->postcopy_after_devices; 1533 } 1534 1535 bool migration_in_incoming_postcopy(void) 1536 { 1537 PostcopyState ps = postcopy_state_get(); 1538 1539 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1540 } 1541 1542 bool migration_incoming_postcopy_advised(void) 1543 { 1544 PostcopyState ps = postcopy_state_get(); 1545 1546 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END; 1547 } 1548 1549 bool migration_in_bg_snapshot(void) 1550 { 1551 MigrationState *s = migrate_get_current(); 1552 1553 return migrate_background_snapshot() && 1554 migration_is_setup_or_active(s->state); 1555 } 1556 1557 bool migration_is_idle(void) 1558 { 1559 MigrationState *s = current_migration; 1560 1561 if (!s) { 1562 return true; 1563 } 1564 1565 switch (s->state) { 1566 case MIGRATION_STATUS_NONE: 1567 case MIGRATION_STATUS_CANCELLED: 1568 case MIGRATION_STATUS_COMPLETED: 1569 case MIGRATION_STATUS_FAILED: 1570 return true; 1571 case MIGRATION_STATUS_SETUP: 1572 case MIGRATION_STATUS_CANCELLING: 1573 case MIGRATION_STATUS_ACTIVE: 1574 case MIGRATION_STATUS_POSTCOPY_ACTIVE: 1575 case MIGRATION_STATUS_COLO: 1576 case MIGRATION_STATUS_PRE_SWITCHOVER: 1577 case MIGRATION_STATUS_DEVICE: 1578 case MIGRATION_STATUS_WAIT_UNPLUG: 1579 return false; 1580 case MIGRATION_STATUS__MAX: 1581 g_assert_not_reached(); 1582 } 1583 1584 return false; 1585 } 1586 1587 bool migration_is_active(MigrationState *s) 1588 { 1589 return (s->state == MIGRATION_STATUS_ACTIVE || 1590 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 1591 } 1592 1593 int migrate_init(MigrationState *s, Error **errp) 1594 { 1595 int ret; 1596 1597 ret = qemu_savevm_state_prepare(errp); 1598 if (ret) { 1599 return ret; 1600 } 1601 1602 /* 1603 * Reinitialise all migration state, except 1604 * parameters/capabilities that the user set, and 1605 * locks. 1606 */ 1607 s->to_dst_file = NULL; 1608 s->state = MIGRATION_STATUS_NONE; 1609 s->rp_state.from_dst_file = NULL; 1610 s->mbps = 0.0; 1611 s->pages_per_second = 0.0; 1612 s->downtime = 0; 1613 s->expected_downtime = 0; 1614 s->setup_time = 0; 1615 s->start_postcopy = false; 1616 s->postcopy_after_devices = false; 1617 s->migration_thread_running = false; 1618 error_free(s->error); 1619 s->error = NULL; 1620 s->vmdesc = NULL; 1621 1622 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); 1623 1624 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 1625 s->total_time = 0; 1626 s->vm_old_state = -1; 1627 s->iteration_initial_bytes = 0; 1628 s->threshold_size = 0; 1629 s->switchover_acked = false; 1630 s->rdma_migration = false; 1631 /* 1632 * set mig_stats memory to zero for a new migration 1633 */ 1634 memset(&mig_stats, 0, sizeof(mig_stats)); 1635 migration_reset_vfio_bytes_transferred(); 1636 1637 return 0; 1638 } 1639 1640 static bool is_busy(Error **reasonp, Error **errp) 1641 { 1642 ERRP_GUARD(); 1643 1644 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ 1645 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { 1646 error_propagate_prepend(errp, *reasonp, 1647 "disallowing migration blocker " 1648 "(migration/snapshot in progress) for: "); 1649 *reasonp = NULL; 1650 return true; 1651 } 1652 return false; 1653 } 1654 1655 static bool is_only_migratable(Error **reasonp, Error **errp, int modes) 1656 { 1657 ERRP_GUARD(); 1658 1659 if (only_migratable && (modes & BIT(MIG_MODE_NORMAL))) { 1660 error_propagate_prepend(errp, *reasonp, 1661 "disallowing migration blocker " 1662 "(--only-migratable) for: "); 1663 *reasonp = NULL; 1664 return true; 1665 } 1666 return false; 1667 } 1668 1669 static int get_modes(MigMode mode, va_list ap) 1670 { 1671 int modes = 0; 1672 1673 while (mode != -1 && mode != MIG_MODE_ALL) { 1674 assert(mode >= MIG_MODE_NORMAL && mode < MIG_MODE__MAX); 1675 modes |= BIT(mode); 1676 mode = va_arg(ap, MigMode); 1677 } 1678 if (mode == MIG_MODE_ALL) { 1679 modes = BIT(MIG_MODE__MAX) - 1; 1680 } 1681 return modes; 1682 } 1683 1684 static int add_blockers(Error **reasonp, Error **errp, int modes) 1685 { 1686 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1687 if (modes & BIT(mode)) { 1688 migration_blockers[mode] = g_slist_prepend(migration_blockers[mode], 1689 *reasonp); 1690 } 1691 } 1692 return 0; 1693 } 1694 1695 int migrate_add_blocker(Error **reasonp, Error **errp) 1696 { 1697 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_ALL); 1698 } 1699 1700 int migrate_add_blocker_normal(Error **reasonp, Error **errp) 1701 { 1702 return migrate_add_blocker_modes(reasonp, errp, MIG_MODE_NORMAL, -1); 1703 } 1704 1705 int migrate_add_blocker_modes(Error **reasonp, Error **errp, MigMode mode, ...) 1706 { 1707 int modes; 1708 va_list ap; 1709 1710 va_start(ap, mode); 1711 modes = get_modes(mode, ap); 1712 va_end(ap); 1713 1714 if (is_only_migratable(reasonp, errp, modes)) { 1715 return -EACCES; 1716 } else if (is_busy(reasonp, errp)) { 1717 return -EBUSY; 1718 } 1719 return add_blockers(reasonp, errp, modes); 1720 } 1721 1722 int migrate_add_blocker_internal(Error **reasonp, Error **errp) 1723 { 1724 int modes = BIT(MIG_MODE__MAX) - 1; 1725 1726 if (is_busy(reasonp, errp)) { 1727 return -EBUSY; 1728 } 1729 return add_blockers(reasonp, errp, modes); 1730 } 1731 1732 void migrate_del_blocker(Error **reasonp) 1733 { 1734 if (*reasonp) { 1735 for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { 1736 migration_blockers[mode] = g_slist_remove(migration_blockers[mode], 1737 *reasonp); 1738 } 1739 error_free(*reasonp); 1740 *reasonp = NULL; 1741 } 1742 } 1743 1744 void qmp_migrate_incoming(const char *uri, bool has_channels, 1745 MigrationChannelList *channels, Error **errp) 1746 { 1747 Error *local_err = NULL; 1748 static bool once = true; 1749 1750 if (!once) { 1751 error_setg(errp, "The incoming migration has already been started"); 1752 return; 1753 } 1754 if (!runstate_check(RUN_STATE_INMIGRATE)) { 1755 error_setg(errp, "'-incoming' was not specified on the command line"); 1756 return; 1757 } 1758 1759 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 1760 return; 1761 } 1762 1763 qemu_start_incoming_migration(uri, has_channels, channels, &local_err); 1764 1765 if (local_err) { 1766 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 1767 error_propagate(errp, local_err); 1768 return; 1769 } 1770 1771 once = false; 1772 } 1773 1774 void qmp_migrate_recover(const char *uri, Error **errp) 1775 { 1776 MigrationIncomingState *mis = migration_incoming_get_current(); 1777 1778 /* 1779 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by 1780 * callers (no one should ignore a recover failure); if there is, it's a 1781 * programming error. 1782 */ 1783 assert(errp); 1784 1785 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1786 error_setg(errp, "Migrate recover can only be run " 1787 "when postcopy is paused."); 1788 return; 1789 } 1790 1791 /* If there's an existing transport, release it */ 1792 migration_incoming_transport_cleanup(mis); 1793 1794 /* 1795 * Note that this call will never start a real migration; it will 1796 * only re-setup the migration stream and poke existing migration 1797 * to continue using that newly established channel. 1798 */ 1799 qemu_start_incoming_migration(uri, false, NULL, errp); 1800 } 1801 1802 void qmp_migrate_pause(Error **errp) 1803 { 1804 MigrationState *ms = migrate_get_current(); 1805 MigrationIncomingState *mis = migration_incoming_get_current(); 1806 int ret = 0; 1807 1808 if (migration_postcopy_is_alive(ms->state)) { 1809 /* Source side, during postcopy */ 1810 Error *error = NULL; 1811 1812 /* Tell the core migration that we're pausing */ 1813 error_setg(&error, "Postcopy migration is paused by the user"); 1814 migrate_set_error(ms, error); 1815 error_free(error); 1816 1817 qemu_mutex_lock(&ms->qemu_file_lock); 1818 if (ms->to_dst_file) { 1819 ret = qemu_file_shutdown(ms->to_dst_file); 1820 } 1821 qemu_mutex_unlock(&ms->qemu_file_lock); 1822 if (ret) { 1823 error_setg(errp, "Failed to pause source migration"); 1824 } 1825 1826 /* 1827 * Kick the migration thread out of any waiting windows (on behalf 1828 * of the rp thread). 1829 */ 1830 migration_rp_kick(ms); 1831 1832 return; 1833 } 1834 1835 if (migration_postcopy_is_alive(mis->state)) { 1836 ret = qemu_file_shutdown(mis->from_src_file); 1837 if (ret) { 1838 error_setg(errp, "Failed to pause destination migration"); 1839 } 1840 return; 1841 } 1842 1843 error_setg(errp, "migrate-pause is currently only supported " 1844 "during postcopy-active or postcopy-recover state"); 1845 } 1846 1847 bool migration_is_blocked(Error **errp) 1848 { 1849 GSList *blockers = migration_blockers[migrate_mode()]; 1850 1851 if (qemu_savevm_state_blocked(errp)) { 1852 return true; 1853 } 1854 1855 if (blockers) { 1856 error_propagate(errp, error_copy(blockers->data)); 1857 return true; 1858 } 1859 1860 return false; 1861 } 1862 1863 /* Returns true if continue to migrate, or false if error detected */ 1864 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, 1865 bool resume, Error **errp) 1866 { 1867 if (blk_inc) { 1868 warn_report("parameter 'inc' is deprecated;" 1869 " use blockdev-mirror with NBD instead"); 1870 } 1871 1872 if (blk) { 1873 warn_report("parameter 'blk' is deprecated;" 1874 " use blockdev-mirror with NBD instead"); 1875 } 1876 1877 if (resume) { 1878 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) { 1879 error_setg(errp, "Cannot resume if there is no " 1880 "paused migration"); 1881 return false; 1882 } 1883 1884 /* 1885 * Postcopy recovery won't work well with release-ram 1886 * capability since release-ram will drop the page buffer as 1887 * long as the page is put into the send buffer. So if there 1888 * is a network failure happened, any page buffers that have 1889 * not yet reached the destination VM but have already been 1890 * sent from the source VM will be lost forever. Let's refuse 1891 * the client from resuming such a postcopy migration. 1892 * Luckily release-ram was designed to only be used when src 1893 * and destination VMs are on the same host, so it should be 1894 * fine. 1895 */ 1896 if (migrate_release_ram()) { 1897 error_setg(errp, "Postcopy recovery cannot work " 1898 "when release-ram capability is set"); 1899 return false; 1900 } 1901 1902 /* This is a resume, skip init status */ 1903 return true; 1904 } 1905 1906 if (migration_is_running(s->state)) { 1907 error_setg(errp, QERR_MIGRATION_ACTIVE); 1908 return false; 1909 } 1910 1911 if (runstate_check(RUN_STATE_INMIGRATE)) { 1912 error_setg(errp, "Guest is waiting for an incoming migration"); 1913 return false; 1914 } 1915 1916 if (runstate_check(RUN_STATE_POSTMIGRATE)) { 1917 error_setg(errp, "Can't migrate the vm that was paused due to " 1918 "previous migration"); 1919 return false; 1920 } 1921 1922 if (kvm_hwpoisoned_mem()) { 1923 error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " 1924 "please reboot the vm and try again"); 1925 return false; 1926 } 1927 1928 if (migration_is_blocked(errp)) { 1929 return false; 1930 } 1931 1932 if (blk || blk_inc) { 1933 if (migrate_colo()) { 1934 error_setg(errp, "No disk migration is required in COLO mode"); 1935 return false; 1936 } 1937 if (migrate_block() || migrate_block_incremental()) { 1938 error_setg(errp, "Command options are incompatible with " 1939 "current migration capabilities"); 1940 return false; 1941 } 1942 if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { 1943 return false; 1944 } 1945 s->must_remove_block_options = true; 1946 } 1947 1948 if (blk_inc) { 1949 migrate_set_block_incremental(true); 1950 } 1951 1952 if (migrate_init(s, errp)) { 1953 return false; 1954 } 1955 1956 return true; 1957 } 1958 1959 void qmp_migrate(const char *uri, bool has_channels, 1960 MigrationChannelList *channels, bool has_blk, bool blk, 1961 bool has_inc, bool inc, bool has_detach, bool detach, 1962 bool has_resume, bool resume, Error **errp) 1963 { 1964 bool resume_requested; 1965 Error *local_err = NULL; 1966 MigrationState *s = migrate_get_current(); 1967 g_autoptr(MigrationChannel) channel = NULL; 1968 MigrationAddress *addr = NULL; 1969 1970 /* 1971 * Having preliminary checks for uri and channel 1972 */ 1973 if (!uri == !channels) { 1974 error_setg(errp, "need either 'uri' or 'channels' argument"); 1975 return; 1976 } 1977 1978 if (channels) { 1979 /* To verify that Migrate channel list has only item */ 1980 if (channels->next) { 1981 error_setg(errp, "Channel list has more than one entries"); 1982 return; 1983 } 1984 addr = channels->value->addr; 1985 } 1986 1987 if (uri) { 1988 /* caller uses the old URI syntax */ 1989 if (!migrate_uri_parse(uri, &channel, errp)) { 1990 return; 1991 } 1992 addr = channel->addr; 1993 } 1994 1995 /* transport mechanism not suitable for migration? */ 1996 if (!migration_channels_and_transport_compatible(addr, errp)) { 1997 return; 1998 } 1999 2000 resume_requested = has_resume && resume; 2001 if (!migrate_prepare(s, has_blk && blk, has_inc && inc, 2002 resume_requested, errp)) { 2003 /* Error detected, put into errp */ 2004 return; 2005 } 2006 2007 if (!resume_requested) { 2008 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 2009 return; 2010 } 2011 } 2012 2013 if (addr->transport == MIGRATION_ADDRESS_TYPE_SOCKET) { 2014 SocketAddress *saddr = &addr->u.socket; 2015 if (saddr->type == SOCKET_ADDRESS_TYPE_INET || 2016 saddr->type == SOCKET_ADDRESS_TYPE_UNIX || 2017 saddr->type == SOCKET_ADDRESS_TYPE_VSOCK) { 2018 socket_start_outgoing_migration(s, saddr, &local_err); 2019 } else if (saddr->type == SOCKET_ADDRESS_TYPE_FD) { 2020 fd_start_outgoing_migration(s, saddr->u.fd.str, &local_err); 2021 } 2022 #ifdef CONFIG_RDMA 2023 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_RDMA) { 2024 rdma_start_outgoing_migration(s, &addr->u.rdma, &local_err); 2025 #endif 2026 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_EXEC) { 2027 exec_start_outgoing_migration(s, addr->u.exec.args, &local_err); 2028 } else if (addr->transport == MIGRATION_ADDRESS_TYPE_FILE) { 2029 file_start_outgoing_migration(s, &addr->u.file, &local_err); 2030 } else { 2031 error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri", 2032 "a valid migration protocol"); 2033 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, 2034 MIGRATION_STATUS_FAILED); 2035 block_cleanup_parameters(); 2036 } 2037 2038 if (local_err) { 2039 if (!resume_requested) { 2040 yank_unregister_instance(MIGRATION_YANK_INSTANCE); 2041 } 2042 migrate_fd_error(s, local_err); 2043 error_propagate(errp, local_err); 2044 return; 2045 } 2046 } 2047 2048 void qmp_migrate_cancel(Error **errp) 2049 { 2050 migration_cancel(NULL); 2051 } 2052 2053 void qmp_migrate_continue(MigrationStatus state, Error **errp) 2054 { 2055 MigrationState *s = migrate_get_current(); 2056 if (s->state != state) { 2057 error_setg(errp, "Migration not in expected state: %s", 2058 MigrationStatus_str(s->state)); 2059 return; 2060 } 2061 qemu_sem_post(&s->pause_sem); 2062 } 2063 2064 int migration_rp_wait(MigrationState *s) 2065 { 2066 /* If migration has failure already, ignore the wait */ 2067 if (migrate_has_error(s)) { 2068 return -1; 2069 } 2070 2071 qemu_sem_wait(&s->rp_state.rp_sem); 2072 2073 /* After wait, double check that there's no failure */ 2074 if (migrate_has_error(s)) { 2075 return -1; 2076 } 2077 2078 return 0; 2079 } 2080 2081 void migration_rp_kick(MigrationState *s) 2082 { 2083 qemu_sem_post(&s->rp_state.rp_sem); 2084 } 2085 2086 static struct rp_cmd_args { 2087 ssize_t len; /* -1 = variable */ 2088 const char *name; 2089 } rp_cmd_args[] = { 2090 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, 2091 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, 2092 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, 2093 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, 2094 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, 2095 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 2096 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" }, 2097 [MIG_RP_MSG_SWITCHOVER_ACK] = { .len = 0, .name = "SWITCHOVER_ACK" }, 2098 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, 2099 }; 2100 2101 /* 2102 * Process a request for pages received on the return path, 2103 * We're allowed to send more than requested (e.g. to round to our page size) 2104 * and we don't need to send pages that have already been sent. 2105 */ 2106 static void 2107 migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, 2108 ram_addr_t start, size_t len, Error **errp) 2109 { 2110 long our_host_ps = qemu_real_host_page_size(); 2111 2112 trace_migrate_handle_rp_req_pages(rbname, start, len); 2113 2114 /* 2115 * Since we currently insist on matching page sizes, just sanity check 2116 * we're being asked for whole host pages. 2117 */ 2118 if (!QEMU_IS_ALIGNED(start, our_host_ps) || 2119 !QEMU_IS_ALIGNED(len, our_host_ps)) { 2120 error_setg(errp, "MIG_RP_MSG_REQ_PAGES: Misaligned page request, start:" 2121 RAM_ADDR_FMT " len: %zd", start, len); 2122 return; 2123 } 2124 2125 ram_save_queue_pages(rbname, start, len, errp); 2126 } 2127 2128 static bool migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name, 2129 Error **errp) 2130 { 2131 RAMBlock *block = qemu_ram_block_by_name(block_name); 2132 2133 if (!block) { 2134 error_setg(errp, "MIG_RP_MSG_RECV_BITMAP has invalid block name '%s'", 2135 block_name); 2136 return false; 2137 } 2138 2139 /* Fetch the received bitmap and refresh the dirty bitmap */ 2140 return ram_dirty_bitmap_reload(s, block, errp); 2141 } 2142 2143 static bool migrate_handle_rp_resume_ack(MigrationState *s, 2144 uint32_t value, Error **errp) 2145 { 2146 trace_source_return_path_thread_resume_ack(value); 2147 2148 if (value != MIGRATION_RESUME_ACK_VALUE) { 2149 error_setg(errp, "illegal resume_ack value %"PRIu32, value); 2150 return false; 2151 } 2152 2153 /* Now both sides are active. */ 2154 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2155 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2156 2157 /* Notify send thread that time to continue send pages */ 2158 migration_rp_kick(s); 2159 2160 return true; 2161 } 2162 2163 /* 2164 * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if 2165 * existed) in a safe way. 2166 */ 2167 static void migration_release_dst_files(MigrationState *ms) 2168 { 2169 QEMUFile *file; 2170 2171 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2172 /* 2173 * Reset the from_dst_file pointer first before releasing it, as we 2174 * can't block within lock section 2175 */ 2176 file = ms->rp_state.from_dst_file; 2177 ms->rp_state.from_dst_file = NULL; 2178 } 2179 2180 /* 2181 * Do the same to postcopy fast path socket too if there is. No 2182 * locking needed because this qemufile should only be managed by 2183 * return path thread. 2184 */ 2185 if (ms->postcopy_qemufile_src) { 2186 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src); 2187 qemu_file_shutdown(ms->postcopy_qemufile_src); 2188 qemu_fclose(ms->postcopy_qemufile_src); 2189 ms->postcopy_qemufile_src = NULL; 2190 } 2191 2192 qemu_fclose(file); 2193 } 2194 2195 /* 2196 * Handles messages sent on the return path towards the source VM 2197 * 2198 */ 2199 static void *source_return_path_thread(void *opaque) 2200 { 2201 MigrationState *ms = opaque; 2202 QEMUFile *rp = ms->rp_state.from_dst_file; 2203 uint16_t header_len, header_type; 2204 uint8_t buf[512]; 2205 uint32_t tmp32, sibling_error; 2206 ram_addr_t start = 0; /* =0 to silence warning */ 2207 size_t len = 0, expected_len; 2208 Error *err = NULL; 2209 int res; 2210 2211 trace_source_return_path_thread_entry(); 2212 rcu_register_thread(); 2213 2214 while (migration_is_setup_or_active(ms->state)) { 2215 trace_source_return_path_thread_loop_top(); 2216 2217 header_type = qemu_get_be16(rp); 2218 header_len = qemu_get_be16(rp); 2219 2220 if (qemu_file_get_error(rp)) { 2221 qemu_file_get_error_obj(rp, &err); 2222 goto out; 2223 } 2224 2225 if (header_type >= MIG_RP_MSG_MAX || 2226 header_type == MIG_RP_MSG_INVALID) { 2227 error_setg(&err, "Received invalid message 0x%04x length 0x%04x", 2228 header_type, header_len); 2229 goto out; 2230 } 2231 2232 if ((rp_cmd_args[header_type].len != -1 && 2233 header_len != rp_cmd_args[header_type].len) || 2234 header_len > sizeof(buf)) { 2235 error_setg(&err, "Received '%s' message (0x%04x) with" 2236 "incorrect length %d expecting %zu", 2237 rp_cmd_args[header_type].name, header_type, header_len, 2238 (size_t)rp_cmd_args[header_type].len); 2239 goto out; 2240 } 2241 2242 /* We know we've got a valid header by this point */ 2243 res = qemu_get_buffer(rp, buf, header_len); 2244 if (res != header_len) { 2245 error_setg(&err, "Failed reading data for message 0x%04x" 2246 " read %d expected %d", 2247 header_type, res, header_len); 2248 goto out; 2249 } 2250 2251 /* OK, we have the message and the data */ 2252 switch (header_type) { 2253 case MIG_RP_MSG_SHUT: 2254 sibling_error = ldl_be_p(buf); 2255 trace_source_return_path_thread_shut(sibling_error); 2256 if (sibling_error) { 2257 error_setg(&err, "Sibling indicated error %d", sibling_error); 2258 } 2259 /* 2260 * We'll let the main thread deal with closing the RP 2261 * we could do a shutdown(2) on it, but we're the only user 2262 * anyway, so there's nothing gained. 2263 */ 2264 goto out; 2265 2266 case MIG_RP_MSG_PONG: 2267 tmp32 = ldl_be_p(buf); 2268 trace_source_return_path_thread_pong(tmp32); 2269 qemu_sem_post(&ms->rp_state.rp_pong_acks); 2270 break; 2271 2272 case MIG_RP_MSG_REQ_PAGES: 2273 start = ldq_be_p(buf); 2274 len = ldl_be_p(buf + 8); 2275 migrate_handle_rp_req_pages(ms, NULL, start, len, &err); 2276 if (err) { 2277 goto out; 2278 } 2279 break; 2280 2281 case MIG_RP_MSG_REQ_PAGES_ID: 2282 expected_len = 12 + 1; /* header + termination */ 2283 2284 if (header_len >= expected_len) { 2285 start = ldq_be_p(buf); 2286 len = ldl_be_p(buf + 8); 2287 /* Now we expect an idstr */ 2288 tmp32 = buf[12]; /* Length of the following idstr */ 2289 buf[13 + tmp32] = '\0'; 2290 expected_len += tmp32; 2291 } 2292 if (header_len != expected_len) { 2293 error_setg(&err, "Req_Page_id with length %d expecting %zd", 2294 header_len, expected_len); 2295 goto out; 2296 } 2297 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len, 2298 &err); 2299 if (err) { 2300 goto out; 2301 } 2302 break; 2303 2304 case MIG_RP_MSG_RECV_BITMAP: 2305 if (header_len < 1) { 2306 error_setg(&err, "MIG_RP_MSG_RECV_BITMAP missing block name"); 2307 goto out; 2308 } 2309 /* Format: len (1B) + idstr (<255B). This ends the idstr. */ 2310 buf[buf[0] + 1] = '\0'; 2311 if (!migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1), &err)) { 2312 goto out; 2313 } 2314 break; 2315 2316 case MIG_RP_MSG_RESUME_ACK: 2317 tmp32 = ldl_be_p(buf); 2318 if (!migrate_handle_rp_resume_ack(ms, tmp32, &err)) { 2319 goto out; 2320 } 2321 break; 2322 2323 case MIG_RP_MSG_SWITCHOVER_ACK: 2324 ms->switchover_acked = true; 2325 trace_source_return_path_thread_switchover_acked(); 2326 break; 2327 2328 default: 2329 break; 2330 } 2331 } 2332 2333 out: 2334 if (err) { 2335 migrate_set_error(ms, err); 2336 error_free(err); 2337 trace_source_return_path_thread_bad_end(); 2338 } 2339 2340 if (ms->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2341 /* 2342 * this will be extremely unlikely: that we got yet another network 2343 * issue during recovering of the 1st network failure.. during this 2344 * period the main migration thread can be waiting on rp_sem for 2345 * this thread to sync with the other side. 2346 * 2347 * When this happens, explicitly kick the migration thread out of 2348 * RECOVER stage and back to PAUSED, so the admin can try 2349 * everything again. 2350 */ 2351 migration_rp_kick(ms); 2352 } 2353 2354 trace_source_return_path_thread_end(); 2355 rcu_unregister_thread(); 2356 2357 return NULL; 2358 } 2359 2360 static int open_return_path_on_source(MigrationState *ms) 2361 { 2362 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); 2363 if (!ms->rp_state.from_dst_file) { 2364 return -1; 2365 } 2366 2367 trace_open_return_path_on_source(); 2368 2369 qemu_thread_create(&ms->rp_state.rp_thread, "return path", 2370 source_return_path_thread, ms, QEMU_THREAD_JOINABLE); 2371 ms->rp_state.rp_thread_created = true; 2372 2373 trace_open_return_path_on_source_continue(); 2374 2375 return 0; 2376 } 2377 2378 /* Return true if error detected, or false otherwise */ 2379 static bool close_return_path_on_source(MigrationState *ms) 2380 { 2381 if (!ms->rp_state.rp_thread_created) { 2382 return false; 2383 } 2384 2385 trace_migration_return_path_end_before(); 2386 2387 /* 2388 * If this is a normal exit then the destination will send a SHUT 2389 * and the rp_thread will exit, however if there's an error we 2390 * need to cause it to exit. shutdown(2), if we have it, will 2391 * cause it to unblock if it's stuck waiting for the destination. 2392 */ 2393 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { 2394 if (ms->to_dst_file && ms->rp_state.from_dst_file && 2395 qemu_file_get_error(ms->to_dst_file)) { 2396 qemu_file_shutdown(ms->rp_state.from_dst_file); 2397 } 2398 } 2399 2400 qemu_thread_join(&ms->rp_state.rp_thread); 2401 ms->rp_state.rp_thread_created = false; 2402 migration_release_dst_files(ms); 2403 trace_migration_return_path_end_after(); 2404 2405 /* Return path will persist the error in MigrationState when quit */ 2406 return migrate_has_error(ms); 2407 } 2408 2409 static inline void 2410 migration_wait_main_channel(MigrationState *ms) 2411 { 2412 /* Wait until one PONG message received */ 2413 qemu_sem_wait(&ms->rp_state.rp_pong_acks); 2414 } 2415 2416 /* 2417 * Switch from normal iteration to postcopy 2418 * Returns non-0 on error 2419 */ 2420 static int postcopy_start(MigrationState *ms, Error **errp) 2421 { 2422 int ret; 2423 QIOChannelBuffer *bioc; 2424 QEMUFile *fb; 2425 uint64_t bandwidth = migrate_max_postcopy_bandwidth(); 2426 bool restart_block = false; 2427 int cur_state = MIGRATION_STATUS_ACTIVE; 2428 2429 if (migrate_postcopy_preempt()) { 2430 migration_wait_main_channel(ms); 2431 if (postcopy_preempt_establish_channel(ms)) { 2432 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); 2433 return -1; 2434 } 2435 } 2436 2437 if (!migrate_pause_before_switchover()) { 2438 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, 2439 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2440 } 2441 2442 trace_postcopy_start(); 2443 bql_lock(); 2444 trace_postcopy_start_set_run(); 2445 2446 migration_downtime_start(ms); 2447 2448 global_state_store(); 2449 ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); 2450 if (ret < 0) { 2451 goto fail; 2452 } 2453 2454 ret = migration_maybe_pause(ms, &cur_state, 2455 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2456 if (ret < 0) { 2457 goto fail; 2458 } 2459 2460 ret = bdrv_inactivate_all(); 2461 if (ret < 0) { 2462 goto fail; 2463 } 2464 restart_block = true; 2465 2466 /* 2467 * Cause any non-postcopiable, but iterative devices to 2468 * send out their final data. 2469 */ 2470 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false); 2471 2472 /* 2473 * in Finish migrate and with the io-lock held everything should 2474 * be quiet, but we've potentially still got dirty pages and we 2475 * need to tell the destination to throw any pages it's already received 2476 * that are dirty 2477 */ 2478 if (migrate_postcopy_ram()) { 2479 ram_postcopy_send_discard_bitmap(ms); 2480 } 2481 2482 /* 2483 * send rest of state - note things that are doing postcopy 2484 * will notice we're in POSTCOPY_ACTIVE and not actually 2485 * wrap their state up here 2486 */ 2487 migration_rate_set(bandwidth); 2488 if (migrate_postcopy_ram()) { 2489 /* Ping just for debugging, helps line traces up */ 2490 qemu_savevm_send_ping(ms->to_dst_file, 2); 2491 } 2492 2493 /* 2494 * While loading the device state we may trigger page transfer 2495 * requests and the fd must be free to process those, and thus 2496 * the destination must read the whole device state off the fd before 2497 * it starts processing it. Unfortunately the ad-hoc migration format 2498 * doesn't allow the destination to know the size to read without fully 2499 * parsing it through each devices load-state code (especially the open 2500 * coded devices that use get/put). 2501 * So we wrap the device state up in a package with a length at the start; 2502 * to do this we use a qemu_buf to hold the whole of the device state. 2503 */ 2504 bioc = qio_channel_buffer_new(4096); 2505 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer"); 2506 fb = qemu_file_new_output(QIO_CHANNEL(bioc)); 2507 object_unref(OBJECT(bioc)); 2508 2509 /* 2510 * Make sure the receiver can get incoming pages before we send the rest 2511 * of the state 2512 */ 2513 qemu_savevm_send_postcopy_listen(fb); 2514 2515 qemu_savevm_state_complete_precopy(fb, false, false); 2516 if (migrate_postcopy_ram()) { 2517 qemu_savevm_send_ping(fb, 3); 2518 } 2519 2520 qemu_savevm_send_postcopy_run(fb); 2521 2522 /* <><> end of stuff going into the package */ 2523 2524 /* Last point of recovery; as soon as we send the package the destination 2525 * can open devices and potentially start running. 2526 * Lets just check again we've not got any errors. 2527 */ 2528 ret = qemu_file_get_error(ms->to_dst_file); 2529 if (ret) { 2530 error_setg(errp, "postcopy_start: Migration stream errored (pre package)"); 2531 goto fail_closefb; 2532 } 2533 2534 restart_block = false; 2535 2536 /* Now send that blob */ 2537 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) { 2538 goto fail_closefb; 2539 } 2540 qemu_fclose(fb); 2541 2542 /* Send a notify to give a chance for anything that needs to happen 2543 * at the transition to postcopy and after the device state; in particular 2544 * spice needs to trigger a transition now 2545 */ 2546 ms->postcopy_after_devices = true; 2547 migration_call_notifiers(ms, MIG_EVENT_PRECOPY_DONE); 2548 2549 migration_downtime_end(ms); 2550 2551 bql_unlock(); 2552 2553 if (migrate_postcopy_ram()) { 2554 /* 2555 * Although this ping is just for debug, it could potentially be 2556 * used for getting a better measurement of downtime at the source. 2557 */ 2558 qemu_savevm_send_ping(ms->to_dst_file, 4); 2559 } 2560 2561 if (migrate_release_ram()) { 2562 ram_postcopy_migrated_memory_release(ms); 2563 } 2564 2565 ret = qemu_file_get_error(ms->to_dst_file); 2566 if (ret) { 2567 error_setg(errp, "postcopy_start: Migration stream errored"); 2568 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2569 MIGRATION_STATUS_FAILED); 2570 } 2571 2572 trace_postcopy_preempt_enabled(migrate_postcopy_preempt()); 2573 2574 return ret; 2575 2576 fail_closefb: 2577 qemu_fclose(fb); 2578 fail: 2579 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2580 MIGRATION_STATUS_FAILED); 2581 if (restart_block) { 2582 /* A failure happened early enough that we know the destination hasn't 2583 * accessed block devices, so we're safe to recover. 2584 */ 2585 Error *local_err = NULL; 2586 2587 bdrv_activate_all(&local_err); 2588 if (local_err) { 2589 error_report_err(local_err); 2590 } 2591 } 2592 bql_unlock(); 2593 return -1; 2594 } 2595 2596 /** 2597 * migration_maybe_pause: Pause if required to by 2598 * migrate_pause_before_switchover called with the BQL locked 2599 * Returns: 0 on success 2600 */ 2601 static int migration_maybe_pause(MigrationState *s, 2602 int *current_active_state, 2603 int new_state) 2604 { 2605 if (!migrate_pause_before_switchover()) { 2606 return 0; 2607 } 2608 2609 /* Since leaving this state is not atomic with posting the semaphore 2610 * it's possible that someone could have issued multiple migrate_continue 2611 * and the semaphore is incorrectly positive at this point; 2612 * the docs say it's undefined to reinit a semaphore that's already 2613 * init'd, so use timedwait to eat up any existing posts. 2614 */ 2615 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) { 2616 /* This block intentionally left blank */ 2617 } 2618 2619 /* 2620 * If the migration is cancelled when it is in the completion phase, 2621 * the migration state is set to MIGRATION_STATUS_CANCELLING. 2622 * So we don't need to wait a semaphore, otherwise we would always 2623 * wait for the 'pause_sem' semaphore. 2624 */ 2625 if (s->state != MIGRATION_STATUS_CANCELLING) { 2626 bql_unlock(); 2627 migrate_set_state(&s->state, *current_active_state, 2628 MIGRATION_STATUS_PRE_SWITCHOVER); 2629 qemu_sem_wait(&s->pause_sem); 2630 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, 2631 new_state); 2632 *current_active_state = new_state; 2633 bql_lock(); 2634 } 2635 2636 return s->state == new_state ? 0 : -EINVAL; 2637 } 2638 2639 static int migration_completion_precopy(MigrationState *s, 2640 int *current_active_state) 2641 { 2642 int ret; 2643 2644 bql_lock(); 2645 migration_downtime_start(s); 2646 2647 s->vm_old_state = runstate_get(); 2648 global_state_store(); 2649 2650 ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); 2651 trace_migration_completion_vm_stop(ret); 2652 if (ret < 0) { 2653 goto out_unlock; 2654 } 2655 2656 ret = migration_maybe_pause(s, current_active_state, 2657 MIGRATION_STATUS_DEVICE); 2658 if (ret < 0) { 2659 goto out_unlock; 2660 } 2661 2662 /* 2663 * Inactivate disks except in COLO, and track that we have done so in order 2664 * to remember to reactivate them if migration fails or is cancelled. 2665 */ 2666 s->block_inactive = !migrate_colo(); 2667 migration_rate_set(RATE_LIMIT_DISABLED); 2668 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, 2669 s->block_inactive); 2670 out_unlock: 2671 bql_unlock(); 2672 return ret; 2673 } 2674 2675 static void migration_completion_postcopy(MigrationState *s) 2676 { 2677 trace_migration_completion_postcopy_end(); 2678 2679 bql_lock(); 2680 qemu_savevm_state_complete_postcopy(s->to_dst_file); 2681 bql_unlock(); 2682 2683 /* 2684 * Shutdown the postcopy fast path thread. This is only needed when dest 2685 * QEMU binary is old (7.1/7.2). QEMU 8.0+ doesn't need this. 2686 */ 2687 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 2688 postcopy_preempt_shutdown_file(s); 2689 } 2690 2691 trace_migration_completion_postcopy_end_after_complete(); 2692 } 2693 2694 static void migration_completion_failed(MigrationState *s, 2695 int current_active_state) 2696 { 2697 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE || 2698 s->state == MIGRATION_STATUS_DEVICE)) { 2699 /* 2700 * If not doing postcopy, vm_start() will be called: let's 2701 * regain control on images. 2702 */ 2703 Error *local_err = NULL; 2704 2705 bql_lock(); 2706 bdrv_activate_all(&local_err); 2707 if (local_err) { 2708 error_report_err(local_err); 2709 } else { 2710 s->block_inactive = false; 2711 } 2712 bql_unlock(); 2713 } 2714 2715 migrate_set_state(&s->state, current_active_state, 2716 MIGRATION_STATUS_FAILED); 2717 } 2718 2719 /** 2720 * migration_completion: Used by migration_thread when there's not much left. 2721 * The caller 'breaks' the loop when this returns. 2722 * 2723 * @s: Current migration state 2724 */ 2725 static void migration_completion(MigrationState *s) 2726 { 2727 int ret = 0; 2728 int current_active_state = s->state; 2729 2730 if (s->state == MIGRATION_STATUS_ACTIVE) { 2731 ret = migration_completion_precopy(s, ¤t_active_state); 2732 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2733 migration_completion_postcopy(s); 2734 } else { 2735 ret = -1; 2736 } 2737 2738 if (ret < 0) { 2739 goto fail; 2740 } 2741 2742 if (close_return_path_on_source(s)) { 2743 goto fail; 2744 } 2745 2746 if (qemu_file_get_error(s->to_dst_file)) { 2747 trace_migration_completion_file_err(); 2748 goto fail; 2749 } 2750 2751 if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) { 2752 /* COLO does not support postcopy */ 2753 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 2754 MIGRATION_STATUS_COLO); 2755 } else { 2756 migrate_set_state(&s->state, current_active_state, 2757 MIGRATION_STATUS_COMPLETED); 2758 } 2759 2760 return; 2761 2762 fail: 2763 migration_completion_failed(s, current_active_state); 2764 } 2765 2766 /** 2767 * bg_migration_completion: Used by bg_migration_thread when after all the 2768 * RAM has been saved. The caller 'breaks' the loop when this returns. 2769 * 2770 * @s: Current migration state 2771 */ 2772 static void bg_migration_completion(MigrationState *s) 2773 { 2774 int current_active_state = s->state; 2775 2776 if (s->state == MIGRATION_STATUS_ACTIVE) { 2777 /* 2778 * By this moment we have RAM content saved into the migration stream. 2779 * The next step is to flush the non-RAM content (device state) 2780 * right after the ram content. The device state has been stored into 2781 * the temporary buffer before RAM saving started. 2782 */ 2783 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage); 2784 qemu_fflush(s->to_dst_file); 2785 } else if (s->state == MIGRATION_STATUS_CANCELLING) { 2786 goto fail; 2787 } 2788 2789 if (qemu_file_get_error(s->to_dst_file)) { 2790 trace_migration_completion_file_err(); 2791 goto fail; 2792 } 2793 2794 migrate_set_state(&s->state, current_active_state, 2795 MIGRATION_STATUS_COMPLETED); 2796 return; 2797 2798 fail: 2799 migrate_set_state(&s->state, current_active_state, 2800 MIGRATION_STATUS_FAILED); 2801 } 2802 2803 typedef enum MigThrError { 2804 /* No error detected */ 2805 MIG_THR_ERR_NONE = 0, 2806 /* Detected error, but resumed successfully */ 2807 MIG_THR_ERR_RECOVERED = 1, 2808 /* Detected fatal error, need to exit */ 2809 MIG_THR_ERR_FATAL = 2, 2810 } MigThrError; 2811 2812 static int postcopy_resume_handshake(MigrationState *s) 2813 { 2814 qemu_savevm_send_postcopy_resume(s->to_dst_file); 2815 2816 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2817 if (migration_rp_wait(s)) { 2818 return -1; 2819 } 2820 } 2821 2822 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { 2823 return 0; 2824 } 2825 2826 return -1; 2827 } 2828 2829 /* Return zero if success, or <0 for error */ 2830 static int postcopy_do_resume(MigrationState *s) 2831 { 2832 int ret; 2833 2834 /* 2835 * Call all the resume_prepare() hooks, so that modules can be 2836 * ready for the migration resume. 2837 */ 2838 ret = qemu_savevm_state_resume_prepare(s); 2839 if (ret) { 2840 error_report("%s: resume_prepare() failure detected: %d", 2841 __func__, ret); 2842 return ret; 2843 } 2844 2845 /* 2846 * If preempt is enabled, re-establish the preempt channel. Note that 2847 * we do it after resume prepare to make sure the main channel will be 2848 * created before the preempt channel. E.g. with weak network, the 2849 * dest QEMU may get messed up with the preempt and main channels on 2850 * the order of connection setup. This guarantees the correct order. 2851 */ 2852 ret = postcopy_preempt_establish_channel(s); 2853 if (ret) { 2854 error_report("%s: postcopy_preempt_establish_channel(): %d", 2855 __func__, ret); 2856 return ret; 2857 } 2858 2859 /* 2860 * Last handshake with destination on the resume (destination will 2861 * switch to postcopy-active afterwards) 2862 */ 2863 ret = postcopy_resume_handshake(s); 2864 if (ret) { 2865 error_report("%s: handshake failed: %d", __func__, ret); 2866 return ret; 2867 } 2868 2869 return 0; 2870 } 2871 2872 /* 2873 * We don't return until we are in a safe state to continue current 2874 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or 2875 * MIG_THR_ERR_FATAL if unrecovery failure happened. 2876 */ 2877 static MigThrError postcopy_pause(MigrationState *s) 2878 { 2879 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); 2880 2881 while (true) { 2882 QEMUFile *file; 2883 2884 /* 2885 * Current channel is possibly broken. Release it. Note that this is 2886 * guaranteed even without lock because to_dst_file should only be 2887 * modified by the migration thread. That also guarantees that the 2888 * unregister of yank is safe too without the lock. It should be safe 2889 * even to be within the qemu_file_lock, but we didn't do that to avoid 2890 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make 2891 * the qemu_file_lock critical section as small as possible. 2892 */ 2893 assert(s->to_dst_file); 2894 migration_ioc_unregister_yank_from_file(s->to_dst_file); 2895 qemu_mutex_lock(&s->qemu_file_lock); 2896 file = s->to_dst_file; 2897 s->to_dst_file = NULL; 2898 qemu_mutex_unlock(&s->qemu_file_lock); 2899 2900 qemu_file_shutdown(file); 2901 qemu_fclose(file); 2902 2903 /* 2904 * We're already pausing, so ignore any errors on the return 2905 * path and just wait for the thread to finish. It will be 2906 * re-created when we resume. 2907 */ 2908 close_return_path_on_source(s); 2909 2910 migrate_set_state(&s->state, s->state, 2911 MIGRATION_STATUS_POSTCOPY_PAUSED); 2912 2913 error_report("Detected IO failure for postcopy. " 2914 "Migration paused."); 2915 2916 /* 2917 * We wait until things fixed up. Then someone will setup the 2918 * status back for us. 2919 */ 2920 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2921 qemu_sem_wait(&s->postcopy_pause_sem); 2922 } 2923 2924 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) { 2925 /* Woken up by a recover procedure. Give it a shot */ 2926 2927 /* Do the resume logic */ 2928 if (postcopy_do_resume(s) == 0) { 2929 /* Let's continue! */ 2930 trace_postcopy_pause_continued(); 2931 return MIG_THR_ERR_RECOVERED; 2932 } else { 2933 /* 2934 * Something wrong happened during the recovery, let's 2935 * pause again. Pause is always better than throwing 2936 * data away. 2937 */ 2938 continue; 2939 } 2940 } else { 2941 /* This is not right... Time to quit. */ 2942 return MIG_THR_ERR_FATAL; 2943 } 2944 } 2945 } 2946 2947 static MigThrError migration_detect_error(MigrationState *s) 2948 { 2949 int ret; 2950 int state = s->state; 2951 Error *local_error = NULL; 2952 2953 if (state == MIGRATION_STATUS_CANCELLING || 2954 state == MIGRATION_STATUS_CANCELLED) { 2955 /* End the migration, but don't set the state to failed */ 2956 return MIG_THR_ERR_FATAL; 2957 } 2958 2959 /* 2960 * Try to detect any file errors. Note that postcopy_qemufile_src will 2961 * be NULL when postcopy preempt is not enabled. 2962 */ 2963 ret = qemu_file_get_error_obj_any(s->to_dst_file, 2964 s->postcopy_qemufile_src, 2965 &local_error); 2966 if (!ret) { 2967 /* Everything is fine */ 2968 assert(!local_error); 2969 return MIG_THR_ERR_NONE; 2970 } 2971 2972 if (local_error) { 2973 migrate_set_error(s, local_error); 2974 error_free(local_error); 2975 } 2976 2977 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) { 2978 /* 2979 * For postcopy, we allow the network to be down for a 2980 * while. After that, it can be continued by a 2981 * recovery phase. 2982 */ 2983 return postcopy_pause(s); 2984 } else { 2985 /* 2986 * For precopy (or postcopy with error outside IO), we fail 2987 * with no time. 2988 */ 2989 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED); 2990 trace_migration_thread_file_err(); 2991 2992 /* Time to stop the migration, now. */ 2993 return MIG_THR_ERR_FATAL; 2994 } 2995 } 2996 2997 static void migration_calculate_complete(MigrationState *s) 2998 { 2999 uint64_t bytes = migration_transferred_bytes(); 3000 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3001 int64_t transfer_time; 3002 3003 migration_downtime_end(s); 3004 s->total_time = end_time - s->start_time; 3005 transfer_time = s->total_time - s->setup_time; 3006 if (transfer_time) { 3007 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000; 3008 } 3009 } 3010 3011 static void update_iteration_initial_status(MigrationState *s) 3012 { 3013 /* 3014 * Update these three fields at the same time to avoid mismatch info lead 3015 * wrong speed calculation. 3016 */ 3017 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3018 s->iteration_initial_bytes = migration_transferred_bytes(); 3019 s->iteration_initial_pages = ram_get_total_transferred_pages(); 3020 } 3021 3022 static void migration_update_counters(MigrationState *s, 3023 int64_t current_time) 3024 { 3025 uint64_t transferred, transferred_pages, time_spent; 3026 uint64_t current_bytes; /* bytes transferred since the beginning */ 3027 uint64_t switchover_bw; 3028 /* Expected bandwidth when switching over to destination QEMU */ 3029 double expected_bw_per_ms; 3030 double bandwidth; 3031 3032 if (current_time < s->iteration_start_time + BUFFER_DELAY) { 3033 return; 3034 } 3035 3036 switchover_bw = migrate_avail_switchover_bandwidth(); 3037 current_bytes = migration_transferred_bytes(); 3038 transferred = current_bytes - s->iteration_initial_bytes; 3039 time_spent = current_time - s->iteration_start_time; 3040 bandwidth = (double)transferred / time_spent; 3041 3042 if (switchover_bw) { 3043 /* 3044 * If the user specified a switchover bandwidth, let's trust the 3045 * user so that can be more accurate than what we estimated. 3046 */ 3047 expected_bw_per_ms = switchover_bw / 1000; 3048 } else { 3049 /* If the user doesn't specify bandwidth, we use the estimated */ 3050 expected_bw_per_ms = bandwidth; 3051 } 3052 3053 s->threshold_size = expected_bw_per_ms * migrate_downtime_limit(); 3054 3055 s->mbps = (((double) transferred * 8.0) / 3056 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; 3057 3058 transferred_pages = ram_get_total_transferred_pages() - 3059 s->iteration_initial_pages; 3060 s->pages_per_second = (double) transferred_pages / 3061 (((double) time_spent / 1000.0)); 3062 3063 /* 3064 * if we haven't sent anything, we don't want to 3065 * recalculate. 10000 is a small enough number for our purposes 3066 */ 3067 if (stat64_get(&mig_stats.dirty_pages_rate) && 3068 transferred > 10000) { 3069 s->expected_downtime = 3070 stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms; 3071 } 3072 3073 migration_rate_reset(); 3074 3075 update_iteration_initial_status(s); 3076 3077 trace_migrate_transferred(transferred, time_spent, 3078 /* Both in unit bytes/ms */ 3079 bandwidth, switchover_bw / 1000, 3080 s->threshold_size); 3081 } 3082 3083 static bool migration_can_switchover(MigrationState *s) 3084 { 3085 if (!migrate_switchover_ack()) { 3086 return true; 3087 } 3088 3089 /* No reason to wait for switchover ACK if VM is stopped */ 3090 if (!runstate_is_running()) { 3091 return true; 3092 } 3093 3094 return s->switchover_acked; 3095 } 3096 3097 /* Migration thread iteration status */ 3098 typedef enum { 3099 MIG_ITERATE_RESUME, /* Resume current iteration */ 3100 MIG_ITERATE_SKIP, /* Skip current iteration */ 3101 MIG_ITERATE_BREAK, /* Break the loop */ 3102 } MigIterateState; 3103 3104 /* 3105 * Return true if continue to the next iteration directly, false 3106 * otherwise. 3107 */ 3108 static MigIterateState migration_iteration_run(MigrationState *s) 3109 { 3110 uint64_t must_precopy, can_postcopy; 3111 Error *local_err = NULL; 3112 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE; 3113 bool can_switchover = migration_can_switchover(s); 3114 3115 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy); 3116 uint64_t pending_size = must_precopy + can_postcopy; 3117 3118 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy); 3119 3120 if (must_precopy <= s->threshold_size) { 3121 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy); 3122 pending_size = must_precopy + can_postcopy; 3123 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy); 3124 } 3125 3126 if ((!pending_size || pending_size < s->threshold_size) && can_switchover) { 3127 trace_migration_thread_low_pending(pending_size); 3128 migration_completion(s); 3129 return MIG_ITERATE_BREAK; 3130 } 3131 3132 /* Still a significant amount to transfer */ 3133 if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover && 3134 qatomic_read(&s->start_postcopy)) { 3135 if (postcopy_start(s, &local_err)) { 3136 migrate_set_error(s, local_err); 3137 error_report_err(local_err); 3138 } 3139 return MIG_ITERATE_SKIP; 3140 } 3141 3142 /* Just another iteration step */ 3143 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy); 3144 return MIG_ITERATE_RESUME; 3145 } 3146 3147 static void migration_iteration_finish(MigrationState *s) 3148 { 3149 /* If we enabled cpu throttling for auto-converge, turn it off. */ 3150 cpu_throttle_stop(); 3151 3152 bql_lock(); 3153 switch (s->state) { 3154 case MIGRATION_STATUS_COMPLETED: 3155 migration_calculate_complete(s); 3156 runstate_set(RUN_STATE_POSTMIGRATE); 3157 break; 3158 case MIGRATION_STATUS_COLO: 3159 assert(migrate_colo()); 3160 migrate_start_colo_process(s); 3161 s->vm_old_state = RUN_STATE_RUNNING; 3162 /* Fallthrough */ 3163 case MIGRATION_STATUS_FAILED: 3164 case MIGRATION_STATUS_CANCELLED: 3165 case MIGRATION_STATUS_CANCELLING: 3166 if (runstate_is_live(s->vm_old_state)) { 3167 if (!runstate_check(RUN_STATE_SHUTDOWN)) { 3168 vm_start(); 3169 } 3170 } else { 3171 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 3172 runstate_set(s->vm_old_state); 3173 } 3174 } 3175 break; 3176 3177 default: 3178 /* Should not reach here, but if so, forgive the VM. */ 3179 error_report("%s: Unknown ending state %d", __func__, s->state); 3180 break; 3181 } 3182 3183 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3184 bql_unlock(); 3185 } 3186 3187 static void bg_migration_iteration_finish(MigrationState *s) 3188 { 3189 /* 3190 * Stop tracking RAM writes - un-protect memory, un-register UFFD 3191 * memory ranges, flush kernel wait queues and wake up threads 3192 * waiting for write fault to be resolved. 3193 */ 3194 ram_write_tracking_stop(); 3195 3196 bql_lock(); 3197 switch (s->state) { 3198 case MIGRATION_STATUS_COMPLETED: 3199 migration_calculate_complete(s); 3200 break; 3201 3202 case MIGRATION_STATUS_ACTIVE: 3203 case MIGRATION_STATUS_FAILED: 3204 case MIGRATION_STATUS_CANCELLED: 3205 case MIGRATION_STATUS_CANCELLING: 3206 break; 3207 3208 default: 3209 /* Should not reach here, but if so, forgive the VM. */ 3210 error_report("%s: Unknown ending state %d", __func__, s->state); 3211 break; 3212 } 3213 3214 migration_bh_schedule(migrate_fd_cleanup_bh, s); 3215 bql_unlock(); 3216 } 3217 3218 /* 3219 * Return true if continue to the next iteration directly, false 3220 * otherwise. 3221 */ 3222 static MigIterateState bg_migration_iteration_run(MigrationState *s) 3223 { 3224 int res; 3225 3226 res = qemu_savevm_state_iterate(s->to_dst_file, false); 3227 if (res > 0) { 3228 bg_migration_completion(s); 3229 return MIG_ITERATE_BREAK; 3230 } 3231 3232 return MIG_ITERATE_RESUME; 3233 } 3234 3235 void migration_make_urgent_request(void) 3236 { 3237 qemu_sem_post(&migrate_get_current()->rate_limit_sem); 3238 } 3239 3240 void migration_consume_urgent_request(void) 3241 { 3242 qemu_sem_wait(&migrate_get_current()->rate_limit_sem); 3243 } 3244 3245 /* Returns true if the rate limiting was broken by an urgent request */ 3246 bool migration_rate_limit(void) 3247 { 3248 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); 3249 MigrationState *s = migrate_get_current(); 3250 3251 bool urgent = false; 3252 migration_update_counters(s, now); 3253 if (migration_rate_exceeded(s->to_dst_file)) { 3254 3255 if (qemu_file_get_error(s->to_dst_file)) { 3256 return false; 3257 } 3258 /* 3259 * Wait for a delay to do rate limiting OR 3260 * something urgent to post the semaphore. 3261 */ 3262 int ms = s->iteration_start_time + BUFFER_DELAY - now; 3263 trace_migration_rate_limit_pre(ms); 3264 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) { 3265 /* 3266 * We were woken by one or more urgent things but 3267 * the timedwait will have consumed one of them. 3268 * The service routine for the urgent wake will dec 3269 * the semaphore itself for each item it consumes, 3270 * so add this one we just eat back. 3271 */ 3272 qemu_sem_post(&s->rate_limit_sem); 3273 urgent = true; 3274 } 3275 trace_migration_rate_limit_post(urgent); 3276 } 3277 return urgent; 3278 } 3279 3280 /* 3281 * if failover devices are present, wait they are completely 3282 * unplugged 3283 */ 3284 3285 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, 3286 int new_state) 3287 { 3288 if (qemu_savevm_state_guest_unplug_pending()) { 3289 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG); 3290 3291 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG && 3292 qemu_savevm_state_guest_unplug_pending()) { 3293 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3294 } 3295 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) { 3296 int timeout = 120; /* 30 seconds */ 3297 /* 3298 * migration has been canceled 3299 * but as we have started an unplug we must wait the end 3300 * to be able to plug back the card 3301 */ 3302 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { 3303 qemu_sem_timedwait(&s->wait_unplug_sem, 250); 3304 } 3305 if (qemu_savevm_state_guest_unplug_pending() && 3306 !qtest_enabled()) { 3307 warn_report("migration: partially unplugged device on " 3308 "failure"); 3309 } 3310 } 3311 3312 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); 3313 } else { 3314 migrate_set_state(&s->state, old_state, new_state); 3315 } 3316 } 3317 3318 /* 3319 * Master migration thread on the source VM. 3320 * It drives the migration and pumps the data down the outgoing channel. 3321 */ 3322 static void *migration_thread(void *opaque) 3323 { 3324 MigrationState *s = opaque; 3325 MigrationThread *thread = NULL; 3326 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3327 MigThrError thr_error; 3328 bool urgent = false; 3329 3330 thread = migration_threads_add("live_migration", qemu_get_thread_id()); 3331 3332 rcu_register_thread(); 3333 3334 object_ref(OBJECT(s)); 3335 update_iteration_initial_status(s); 3336 3337 if (!multifd_send_setup()) { 3338 goto out; 3339 } 3340 3341 bql_lock(); 3342 qemu_savevm_state_header(s->to_dst_file); 3343 bql_unlock(); 3344 3345 /* 3346 * If we opened the return path, we need to make sure dst has it 3347 * opened as well. 3348 */ 3349 if (s->rp_state.rp_thread_created) { 3350 /* Now tell the dest that it should open its end so it can reply */ 3351 qemu_savevm_send_open_return_path(s->to_dst_file); 3352 3353 /* And do a ping that will make stuff easier to debug */ 3354 qemu_savevm_send_ping(s->to_dst_file, 1); 3355 } 3356 3357 if (migrate_postcopy()) { 3358 /* 3359 * Tell the destination that we *might* want to do postcopy later; 3360 * if the other end can't do postcopy it should fail now, nice and 3361 * early. 3362 */ 3363 qemu_savevm_send_postcopy_advise(s->to_dst_file); 3364 } 3365 3366 if (migrate_colo()) { 3367 /* Notify migration destination that we enable COLO */ 3368 qemu_savevm_send_colo_enable(s->to_dst_file); 3369 } 3370 3371 bql_lock(); 3372 qemu_savevm_state_setup(s->to_dst_file); 3373 bql_unlock(); 3374 3375 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3376 MIGRATION_STATUS_ACTIVE); 3377 3378 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3379 3380 trace_migration_thread_setup_complete(); 3381 3382 while (migration_is_active(s)) { 3383 if (urgent || !migration_rate_exceeded(s->to_dst_file)) { 3384 MigIterateState iter_state = migration_iteration_run(s); 3385 if (iter_state == MIG_ITERATE_SKIP) { 3386 continue; 3387 } else if (iter_state == MIG_ITERATE_BREAK) { 3388 break; 3389 } 3390 } 3391 3392 /* 3393 * Try to detect any kind of failures, and see whether we 3394 * should stop the migration now. 3395 */ 3396 thr_error = migration_detect_error(s); 3397 if (thr_error == MIG_THR_ERR_FATAL) { 3398 /* Stop migration */ 3399 break; 3400 } else if (thr_error == MIG_THR_ERR_RECOVERED) { 3401 /* 3402 * Just recovered from a e.g. network failure, reset all 3403 * the local variables. This is important to avoid 3404 * breaking transferred_bytes and bandwidth calculation 3405 */ 3406 update_iteration_initial_status(s); 3407 } 3408 3409 urgent = migration_rate_limit(); 3410 } 3411 3412 out: 3413 trace_migration_thread_after_loop(); 3414 migration_iteration_finish(s); 3415 object_unref(OBJECT(s)); 3416 rcu_unregister_thread(); 3417 migration_threads_remove(thread); 3418 return NULL; 3419 } 3420 3421 static void bg_migration_vm_start_bh(void *opaque) 3422 { 3423 MigrationState *s = opaque; 3424 3425 vm_resume(s->vm_old_state); 3426 migration_downtime_end(s); 3427 } 3428 3429 /** 3430 * Background snapshot thread, based on live migration code. 3431 * This is an alternative implementation of live migration mechanism 3432 * introduced specifically to support background snapshots. 3433 * 3434 * It takes advantage of userfault_fd write protection mechanism introduced 3435 * in v5.7 kernel. Compared to existing dirty page logging migration much 3436 * lesser stream traffic is produced resulting in smaller snapshot images, 3437 * simply cause of no page duplicates can get into the stream. 3438 * 3439 * Another key point is that generated vmstate stream reflects machine state 3440 * 'frozen' at the beginning of snapshot creation compared to dirty page logging 3441 * mechanism, which effectively results in that saved snapshot is the state of VM 3442 * at the end of the process. 3443 */ 3444 static void *bg_migration_thread(void *opaque) 3445 { 3446 MigrationState *s = opaque; 3447 int64_t setup_start; 3448 MigThrError thr_error; 3449 QEMUFile *fb; 3450 bool early_fail = true; 3451 3452 rcu_register_thread(); 3453 object_ref(OBJECT(s)); 3454 3455 migration_rate_set(RATE_LIMIT_DISABLED); 3456 3457 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); 3458 /* 3459 * We want to save vmstate for the moment when migration has been 3460 * initiated but also we want to save RAM content while VM is running. 3461 * The RAM content should appear first in the vmstate. So, we first 3462 * stash the non-RAM part of the vmstate to the temporary buffer, 3463 * then write RAM part of the vmstate to the migration stream 3464 * with vCPUs running and, finally, write stashed non-RAM part of 3465 * the vmstate from the buffer to the migration stream. 3466 */ 3467 s->bioc = qio_channel_buffer_new(512 * 1024); 3468 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer"); 3469 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc)); 3470 object_unref(OBJECT(s->bioc)); 3471 3472 update_iteration_initial_status(s); 3473 3474 /* 3475 * Prepare for tracking memory writes with UFFD-WP - populate 3476 * RAM pages before protecting. 3477 */ 3478 #ifdef __linux__ 3479 ram_write_tracking_prepare(); 3480 #endif 3481 3482 bql_lock(); 3483 qemu_savevm_state_header(s->to_dst_file); 3484 qemu_savevm_state_setup(s->to_dst_file); 3485 bql_unlock(); 3486 3487 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, 3488 MIGRATION_STATUS_ACTIVE); 3489 3490 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; 3491 3492 trace_migration_thread_setup_complete(); 3493 migration_downtime_start(s); 3494 3495 bql_lock(); 3496 3497 s->vm_old_state = runstate_get(); 3498 3499 global_state_store(); 3500 /* Forcibly stop VM before saving state of vCPUs and devices */ 3501 if (migration_stop_vm(RUN_STATE_PAUSED)) { 3502 goto fail; 3503 } 3504 /* 3505 * Put vCPUs in sync with shadow context structures, then 3506 * save their state to channel-buffer along with devices. 3507 */ 3508 cpu_synchronize_all_states(); 3509 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) { 3510 goto fail; 3511 } 3512 /* 3513 * Since we are going to get non-iterable state data directly 3514 * from s->bioc->data, explicit flush is needed here. 3515 */ 3516 qemu_fflush(fb); 3517 3518 /* Now initialize UFFD context and start tracking RAM writes */ 3519 if (ram_write_tracking_start()) { 3520 goto fail; 3521 } 3522 early_fail = false; 3523 3524 /* 3525 * Start VM from BH handler to avoid write-fault lock here. 3526 * UFFD-WP protection for the whole RAM is already enabled so 3527 * calling VM state change notifiers from vm_start() would initiate 3528 * writes to virtio VQs memory which is in write-protected region. 3529 */ 3530 migration_bh_schedule(bg_migration_vm_start_bh, s); 3531 bql_unlock(); 3532 3533 while (migration_is_active(s)) { 3534 MigIterateState iter_state = bg_migration_iteration_run(s); 3535 if (iter_state == MIG_ITERATE_SKIP) { 3536 continue; 3537 } else if (iter_state == MIG_ITERATE_BREAK) { 3538 break; 3539 } 3540 3541 /* 3542 * Try to detect any kind of failures, and see whether we 3543 * should stop the migration now. 3544 */ 3545 thr_error = migration_detect_error(s); 3546 if (thr_error == MIG_THR_ERR_FATAL) { 3547 /* Stop migration */ 3548 break; 3549 } 3550 3551 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 3552 } 3553 3554 trace_migration_thread_after_loop(); 3555 3556 fail: 3557 if (early_fail) { 3558 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, 3559 MIGRATION_STATUS_FAILED); 3560 bql_unlock(); 3561 } 3562 3563 bg_migration_iteration_finish(s); 3564 3565 qemu_fclose(fb); 3566 object_unref(OBJECT(s)); 3567 rcu_unregister_thread(); 3568 3569 return NULL; 3570 } 3571 3572 void migrate_fd_connect(MigrationState *s, Error *error_in) 3573 { 3574 Error *local_err = NULL; 3575 uint64_t rate_limit; 3576 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; 3577 3578 /* 3579 * If there's a previous error, free it and prepare for another one. 3580 * Meanwhile if migration completes successfully, there won't have an error 3581 * dumped when calling migrate_fd_cleanup(). 3582 */ 3583 migrate_error_free(s); 3584 3585 s->expected_downtime = migrate_downtime_limit(); 3586 if (error_in) { 3587 migrate_fd_error(s, error_in); 3588 if (resume) { 3589 /* 3590 * Don't do cleanup for resume if channel is invalid, but only dump 3591 * the error. We wait for another channel connect from the user. 3592 * The error_report still gives HMP user a hint on what failed. 3593 * It's normally done in migrate_fd_cleanup(), but call it here 3594 * explicitly. 3595 */ 3596 error_report_err(error_copy(s->error)); 3597 } else { 3598 migrate_fd_cleanup(s); 3599 } 3600 return; 3601 } 3602 3603 if (resume) { 3604 /* This is a resumed migration */ 3605 rate_limit = migrate_max_postcopy_bandwidth(); 3606 } else { 3607 /* This is a fresh new migration */ 3608 rate_limit = migrate_max_bandwidth(); 3609 3610 /* Notify before starting migration thread */ 3611 migration_call_notifiers(s, MIG_EVENT_PRECOPY_SETUP); 3612 } 3613 3614 migration_rate_set(rate_limit); 3615 qemu_file_set_blocking(s->to_dst_file, true); 3616 3617 /* 3618 * Open the return path. For postcopy, it is used exclusively. For 3619 * precopy, only if user specified "return-path" capability would 3620 * QEMU uses the return path. 3621 */ 3622 if (migrate_postcopy_ram() || migrate_return_path()) { 3623 if (open_return_path_on_source(s)) { 3624 error_setg(&local_err, "Unable to open return-path for postcopy"); 3625 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); 3626 migrate_set_error(s, local_err); 3627 error_report_err(local_err); 3628 migrate_fd_cleanup(s); 3629 return; 3630 } 3631 } 3632 3633 /* 3634 * This needs to be done before resuming a postcopy. Note: for newer 3635 * QEMUs we will delay the channel creation until postcopy_start(), to 3636 * avoid disorder of channel creations. 3637 */ 3638 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) { 3639 postcopy_preempt_setup(s); 3640 } 3641 3642 if (resume) { 3643 /* Wakeup the main migration thread to do the recovery */ 3644 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED, 3645 MIGRATION_STATUS_POSTCOPY_RECOVER); 3646 qemu_sem_post(&s->postcopy_pause_sem); 3647 return; 3648 } 3649 3650 if (migrate_background_snapshot()) { 3651 qemu_thread_create(&s->thread, "bg_snapshot", 3652 bg_migration_thread, s, QEMU_THREAD_JOINABLE); 3653 } else { 3654 qemu_thread_create(&s->thread, "live_migration", 3655 migration_thread, s, QEMU_THREAD_JOINABLE); 3656 } 3657 s->migration_thread_running = true; 3658 } 3659 3660 static void migration_class_init(ObjectClass *klass, void *data) 3661 { 3662 DeviceClass *dc = DEVICE_CLASS(klass); 3663 3664 dc->user_creatable = false; 3665 device_class_set_props(dc, migration_properties); 3666 } 3667 3668 static void migration_instance_finalize(Object *obj) 3669 { 3670 MigrationState *ms = MIGRATION_OBJ(obj); 3671 3672 qemu_mutex_destroy(&ms->error_mutex); 3673 qemu_mutex_destroy(&ms->qemu_file_lock); 3674 qemu_sem_destroy(&ms->wait_unplug_sem); 3675 qemu_sem_destroy(&ms->rate_limit_sem); 3676 qemu_sem_destroy(&ms->pause_sem); 3677 qemu_sem_destroy(&ms->postcopy_pause_sem); 3678 qemu_sem_destroy(&ms->rp_state.rp_sem); 3679 qemu_sem_destroy(&ms->rp_state.rp_pong_acks); 3680 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem); 3681 error_free(ms->error); 3682 } 3683 3684 static void migration_instance_init(Object *obj) 3685 { 3686 MigrationState *ms = MIGRATION_OBJ(obj); 3687 3688 ms->state = MIGRATION_STATUS_NONE; 3689 ms->mbps = -1; 3690 ms->pages_per_second = -1; 3691 qemu_sem_init(&ms->pause_sem, 0); 3692 qemu_mutex_init(&ms->error_mutex); 3693 3694 migrate_params_init(&ms->parameters); 3695 3696 qemu_sem_init(&ms->postcopy_pause_sem, 0); 3697 qemu_sem_init(&ms->rp_state.rp_sem, 0); 3698 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0); 3699 qemu_sem_init(&ms->rate_limit_sem, 0); 3700 qemu_sem_init(&ms->wait_unplug_sem, 0); 3701 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0); 3702 qemu_mutex_init(&ms->qemu_file_lock); 3703 } 3704 3705 /* 3706 * Return true if check pass, false otherwise. Error will be put 3707 * inside errp if provided. 3708 */ 3709 static bool migration_object_check(MigrationState *ms, Error **errp) 3710 { 3711 /* Assuming all off */ 3712 bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 }; 3713 3714 if (!migrate_params_check(&ms->parameters, errp)) { 3715 return false; 3716 } 3717 3718 return migrate_caps_check(old_caps, ms->capabilities, errp); 3719 } 3720 3721 static const TypeInfo migration_type = { 3722 .name = TYPE_MIGRATION, 3723 /* 3724 * NOTE: TYPE_MIGRATION is not really a device, as the object is 3725 * not created using qdev_new(), it is not attached to the qdev 3726 * device tree, and it is never realized. 3727 * 3728 * TODO: Make this TYPE_OBJECT once QOM provides something like 3729 * TYPE_DEVICE's "-global" properties. 3730 */ 3731 .parent = TYPE_DEVICE, 3732 .class_init = migration_class_init, 3733 .class_size = sizeof(MigrationClass), 3734 .instance_size = sizeof(MigrationState), 3735 .instance_init = migration_instance_init, 3736 .instance_finalize = migration_instance_finalize, 3737 }; 3738 3739 static void register_migration_types(void) 3740 { 3741 type_register_static(&migration_type); 3742 } 3743 3744 type_init(register_migration_types); 3745