xref: /qemu/migration/migration.c (revision 4d7dd4ed)
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15 
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/blocker.h"
21 #include "exec.h"
22 #include "fd.h"
23 #include "file.h"
24 #include "socket.h"
25 #include "sysemu/runstate.h"
26 #include "sysemu/sysemu.h"
27 #include "sysemu/cpu-throttle.h"
28 #include "rdma.h"
29 #include "ram.h"
30 #include "ram-compress.h"
31 #include "migration/global_state.h"
32 #include "migration/misc.h"
33 #include "migration.h"
34 #include "migration-stats.h"
35 #include "savevm.h"
36 #include "qemu-file.h"
37 #include "channel.h"
38 #include "migration/vmstate.h"
39 #include "block/block.h"
40 #include "qapi/error.h"
41 #include "qapi/clone-visitor.h"
42 #include "qapi/qapi-visit-migration.h"
43 #include "qapi/qapi-visit-sockets.h"
44 #include "qapi/qapi-commands-migration.h"
45 #include "qapi/qapi-events-migration.h"
46 #include "qapi/qmp/qerror.h"
47 #include "qapi/qmp/qnull.h"
48 #include "qemu/rcu.h"
49 #include "block.h"
50 #include "postcopy-ram.h"
51 #include "qemu/thread.h"
52 #include "trace.h"
53 #include "exec/target_page.h"
54 #include "io/channel-buffer.h"
55 #include "io/channel-tls.h"
56 #include "migration/colo.h"
57 #include "hw/boards.h"
58 #include "monitor/monitor.h"
59 #include "net/announce.h"
60 #include "qemu/queue.h"
61 #include "multifd.h"
62 #include "threadinfo.h"
63 #include "qemu/yank.h"
64 #include "sysemu/cpus.h"
65 #include "yank_functions.h"
66 #include "sysemu/qtest.h"
67 #include "options.h"
68 #include "sysemu/dirtylimit.h"
69 
70 static NotifierList migration_state_notifiers =
71     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
72 
73 /* Messages sent on the return path from destination to source */
74 enum mig_rp_message_type {
75     MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
76     MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
77     MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */
78 
79     MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
80     MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */
81     MIG_RP_MSG_RECV_BITMAP,  /* send recved_bitmap back to source */
82     MIG_RP_MSG_RESUME_ACK,   /* tell source that we are ready to resume */
83     MIG_RP_MSG_SWITCHOVER_ACK, /* Tell source it's OK to do switchover */
84 
85     MIG_RP_MSG_MAX
86 };
87 
88 /* When we add fault tolerance, we could have several
89    migrations at once.  For now we don't need to add
90    dynamic creation of migration */
91 
92 static MigrationState *current_migration;
93 static MigrationIncomingState *current_incoming;
94 
95 static GSList *migration_blockers;
96 
97 static bool migration_object_check(MigrationState *ms, Error **errp);
98 static int migration_maybe_pause(MigrationState *s,
99                                  int *current_active_state,
100                                  int new_state);
101 static void migrate_fd_cancel(MigrationState *s);
102 static int close_return_path_on_source(MigrationState *s);
103 
104 static bool migration_needs_multiple_sockets(void)
105 {
106     return migrate_multifd() || migrate_postcopy_preempt();
107 }
108 
109 static bool uri_supports_multi_channels(const char *uri)
110 {
111     return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) ||
112            strstart(uri, "vsock:", NULL);
113 }
114 
115 static bool
116 migration_channels_and_uri_compatible(const char *uri, Error **errp)
117 {
118     if (migration_needs_multiple_sockets() &&
119         !uri_supports_multi_channels(uri)) {
120         error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
121         return false;
122     }
123 
124     return true;
125 }
126 
127 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
128 {
129     uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
130 
131     return (a > b) - (a < b);
132 }
133 
134 void migration_object_init(void)
135 {
136     /* This can only be called once. */
137     assert(!current_migration);
138     current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
139 
140     /*
141      * Init the migrate incoming object as well no matter whether
142      * we'll use it or not.
143      */
144     assert(!current_incoming);
145     current_incoming = g_new0(MigrationIncomingState, 1);
146     current_incoming->state = MIGRATION_STATUS_NONE;
147     current_incoming->postcopy_remote_fds =
148         g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
149     qemu_mutex_init(&current_incoming->rp_mutex);
150     qemu_mutex_init(&current_incoming->postcopy_prio_thread_mutex);
151     qemu_event_init(&current_incoming->main_thread_load_event, false);
152     qemu_sem_init(&current_incoming->postcopy_pause_sem_dst, 0);
153     qemu_sem_init(&current_incoming->postcopy_pause_sem_fault, 0);
154     qemu_sem_init(&current_incoming->postcopy_pause_sem_fast_load, 0);
155     qemu_sem_init(&current_incoming->postcopy_qemufile_dst_done, 0);
156 
157     qemu_mutex_init(&current_incoming->page_request_mutex);
158     qemu_cond_init(&current_incoming->page_request_cond);
159     current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
160 
161     migration_object_check(current_migration, &error_fatal);
162 
163     blk_mig_init();
164     ram_mig_init();
165     dirty_bitmap_mig_init();
166 }
167 
168 void migration_cancel(const Error *error)
169 {
170     if (error) {
171         migrate_set_error(current_migration, error);
172     }
173     if (migrate_dirty_limit()) {
174         qmp_cancel_vcpu_dirty_limit(false, -1, NULL);
175     }
176     migrate_fd_cancel(current_migration);
177 }
178 
179 void migration_shutdown(void)
180 {
181     /*
182      * When the QEMU main thread exit, the COLO thread
183      * may wait a semaphore. So, we should wakeup the
184      * COLO thread before migration shutdown.
185      */
186     colo_shutdown();
187     /*
188      * Cancel the current migration - that will (eventually)
189      * stop the migration using this structure
190      */
191     migration_cancel(NULL);
192     object_unref(OBJECT(current_migration));
193 
194     /*
195      * Cancel outgoing migration of dirty bitmaps. It should
196      * at least unref used block nodes.
197      */
198     dirty_bitmap_mig_cancel_outgoing();
199 
200     /*
201      * Cancel incoming migration of dirty bitmaps. Dirty bitmaps
202      * are non-critical data, and their loss never considered as
203      * something serious.
204      */
205     dirty_bitmap_mig_cancel_incoming();
206 }
207 
208 /* For outgoing */
209 MigrationState *migrate_get_current(void)
210 {
211     /* This can only be called after the object created. */
212     assert(current_migration);
213     return current_migration;
214 }
215 
216 MigrationIncomingState *migration_incoming_get_current(void)
217 {
218     assert(current_incoming);
219     return current_incoming;
220 }
221 
222 void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
223 {
224     if (mis->socket_address_list) {
225         qapi_free_SocketAddressList(mis->socket_address_list);
226         mis->socket_address_list = NULL;
227     }
228 
229     if (mis->transport_cleanup) {
230         mis->transport_cleanup(mis->transport_data);
231         mis->transport_data = mis->transport_cleanup = NULL;
232     }
233 }
234 
235 void migration_incoming_state_destroy(void)
236 {
237     struct MigrationIncomingState *mis = migration_incoming_get_current();
238 
239     multifd_load_cleanup();
240     compress_threads_load_cleanup();
241 
242     if (mis->to_src_file) {
243         /* Tell source that we are done */
244         migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
245         qemu_fclose(mis->to_src_file);
246         mis->to_src_file = NULL;
247     }
248 
249     if (mis->from_src_file) {
250         migration_ioc_unregister_yank_from_file(mis->from_src_file);
251         qemu_fclose(mis->from_src_file);
252         mis->from_src_file = NULL;
253     }
254     if (mis->postcopy_remote_fds) {
255         g_array_free(mis->postcopy_remote_fds, TRUE);
256         mis->postcopy_remote_fds = NULL;
257     }
258 
259     migration_incoming_transport_cleanup(mis);
260     qemu_event_reset(&mis->main_thread_load_event);
261 
262     if (mis->page_requested) {
263         g_tree_destroy(mis->page_requested);
264         mis->page_requested = NULL;
265     }
266 
267     if (mis->postcopy_qemufile_dst) {
268         migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
269         qemu_fclose(mis->postcopy_qemufile_dst);
270         mis->postcopy_qemufile_dst = NULL;
271     }
272 
273     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
274 }
275 
276 static void migrate_generate_event(int new_state)
277 {
278     if (migrate_events()) {
279         qapi_event_send_migration(new_state);
280     }
281 }
282 
283 /*
284  * Send a message on the return channel back to the source
285  * of the migration.
286  */
287 static int migrate_send_rp_message(MigrationIncomingState *mis,
288                                    enum mig_rp_message_type message_type,
289                                    uint16_t len, void *data)
290 {
291     int ret = 0;
292 
293     trace_migrate_send_rp_message((int)message_type, len);
294     QEMU_LOCK_GUARD(&mis->rp_mutex);
295 
296     /*
297      * It's possible that the file handle got lost due to network
298      * failures.
299      */
300     if (!mis->to_src_file) {
301         ret = -EIO;
302         return ret;
303     }
304 
305     qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
306     qemu_put_be16(mis->to_src_file, len);
307     qemu_put_buffer(mis->to_src_file, data, len);
308     return qemu_fflush(mis->to_src_file);
309 }
310 
311 /* Request one page from the source VM at the given start address.
312  *   rb: the RAMBlock to request the page in
313  *   Start: Address offset within the RB
314  *   Len: Length in bytes required - must be a multiple of pagesize
315  */
316 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
317                                       RAMBlock *rb, ram_addr_t start)
318 {
319     uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
320     size_t msglen = 12; /* start + len */
321     size_t len = qemu_ram_pagesize(rb);
322     enum mig_rp_message_type msg_type;
323     const char *rbname;
324     int rbname_len;
325 
326     *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
327     *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
328 
329     /*
330      * We maintain the last ramblock that we requested for page.  Note that we
331      * don't need locking because this function will only be called within the
332      * postcopy ram fault thread.
333      */
334     if (rb != mis->last_rb) {
335         mis->last_rb = rb;
336 
337         rbname = qemu_ram_get_idstr(rb);
338         rbname_len = strlen(rbname);
339 
340         assert(rbname_len < 256);
341 
342         bufc[msglen++] = rbname_len;
343         memcpy(bufc + msglen, rbname, rbname_len);
344         msglen += rbname_len;
345         msg_type = MIG_RP_MSG_REQ_PAGES_ID;
346     } else {
347         msg_type = MIG_RP_MSG_REQ_PAGES;
348     }
349 
350     return migrate_send_rp_message(mis, msg_type, msglen, bufc);
351 }
352 
353 int migrate_send_rp_req_pages(MigrationIncomingState *mis,
354                               RAMBlock *rb, ram_addr_t start, uint64_t haddr)
355 {
356     void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
357     bool received = false;
358 
359     WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
360         received = ramblock_recv_bitmap_test_byte_offset(rb, start);
361         if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
362             /*
363              * The page has not been received, and it's not yet in the page
364              * request list.  Queue it.  Set the value of element to 1, so that
365              * things like g_tree_lookup() will return TRUE (1) when found.
366              */
367             g_tree_insert(mis->page_requested, aligned, (gpointer)1);
368             qatomic_inc(&mis->page_requested_count);
369             trace_postcopy_page_req_add(aligned, mis->page_requested_count);
370         }
371     }
372 
373     /*
374      * If the page is there, skip sending the message.  We don't even need the
375      * lock because as long as the page arrived, it'll be there forever.
376      */
377     if (received) {
378         return 0;
379     }
380 
381     return migrate_send_rp_message_req_pages(mis, rb, start);
382 }
383 
384 static bool migration_colo_enabled;
385 bool migration_incoming_colo_enabled(void)
386 {
387     return migration_colo_enabled;
388 }
389 
390 void migration_incoming_disable_colo(void)
391 {
392     ram_block_discard_disable(false);
393     migration_colo_enabled = false;
394 }
395 
396 int migration_incoming_enable_colo(void)
397 {
398 #ifndef CONFIG_REPLICATION
399     error_report("ENABLE_COLO command come in migration stream, but COLO "
400                  "module is not built in");
401     return -ENOTSUP;
402 #endif
403 
404     if (!migrate_colo()) {
405         error_report("ENABLE_COLO command come in migration stream, but c-colo "
406                      "capability is not set");
407         return -EINVAL;
408     }
409 
410     if (ram_block_discard_disable(true)) {
411         error_report("COLO: cannot disable RAM discard");
412         return -EBUSY;
413     }
414     migration_colo_enabled = true;
415     return 0;
416 }
417 
418 void migrate_add_address(SocketAddress *address)
419 {
420     MigrationIncomingState *mis = migration_incoming_get_current();
421 
422     QAPI_LIST_PREPEND(mis->socket_address_list,
423                       QAPI_CLONE(SocketAddress, address));
424 }
425 
426 static void qemu_start_incoming_migration(const char *uri, Error **errp)
427 {
428     const char *p = NULL;
429     MigrationIncomingState *mis = migration_incoming_get_current();
430 
431     /* URI is not suitable for migration? */
432     if (!migration_channels_and_uri_compatible(uri, errp)) {
433         return;
434     }
435 
436     migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
437                       MIGRATION_STATUS_SETUP);
438 
439     if (strstart(uri, "tcp:", &p) ||
440         strstart(uri, "unix:", NULL) ||
441         strstart(uri, "vsock:", NULL)) {
442         socket_start_incoming_migration(p ? p : uri, errp);
443 #ifdef CONFIG_RDMA
444     } else if (strstart(uri, "rdma:", &p)) {
445         if (migrate_compress()) {
446             error_setg(errp, "RDMA and compression can't be used together");
447             return;
448         }
449         if (migrate_xbzrle()) {
450             error_setg(errp, "RDMA and XBZRLE can't be used together");
451             return;
452         }
453         if (migrate_multifd()) {
454             error_setg(errp, "RDMA and multifd can't be used together");
455             return;
456         }
457         rdma_start_incoming_migration(p, errp);
458 #endif
459     } else if (strstart(uri, "exec:", &p)) {
460         exec_start_incoming_migration(p, errp);
461     } else if (strstart(uri, "fd:", &p)) {
462         fd_start_incoming_migration(p, errp);
463     } else if (strstart(uri, "file:", &p)) {
464         file_start_incoming_migration(p, errp);
465     } else {
466         error_setg(errp, "unknown migration protocol: %s", uri);
467     }
468 }
469 
470 static void process_incoming_migration_bh(void *opaque)
471 {
472     Error *local_err = NULL;
473     MigrationIncomingState *mis = opaque;
474 
475     /* If capability late_block_activate is set:
476      * Only fire up the block code now if we're going to restart the
477      * VM, else 'cont' will do it.
478      * This causes file locking to happen; so we don't want it to happen
479      * unless we really are starting the VM.
480      */
481     if (!migrate_late_block_activate() ||
482          (autostart && (!global_state_received() ||
483             global_state_get_runstate() == RUN_STATE_RUNNING))) {
484         /* Make sure all file formats throw away their mutable metadata.
485          * If we get an error here, just don't restart the VM yet. */
486         bdrv_activate_all(&local_err);
487         if (local_err) {
488             error_report_err(local_err);
489             local_err = NULL;
490             autostart = false;
491         }
492     }
493 
494     /*
495      * This must happen after all error conditions are dealt with and
496      * we're sure the VM is going to be running on this host.
497      */
498     qemu_announce_self(&mis->announce_timer, migrate_announce_params());
499 
500     multifd_load_shutdown();
501 
502     dirty_bitmap_mig_before_vm_start();
503 
504     if (!global_state_received() ||
505         global_state_get_runstate() == RUN_STATE_RUNNING) {
506         if (autostart) {
507             vm_start();
508         } else {
509             runstate_set(RUN_STATE_PAUSED);
510         }
511     } else if (migration_incoming_colo_enabled()) {
512         migration_incoming_disable_colo();
513         vm_start();
514     } else {
515         runstate_set(global_state_get_runstate());
516     }
517     /*
518      * This must happen after any state changes since as soon as an external
519      * observer sees this event they might start to prod at the VM assuming
520      * it's ready to use.
521      */
522     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
523                       MIGRATION_STATUS_COMPLETED);
524     qemu_bh_delete(mis->bh);
525     migration_incoming_state_destroy();
526 }
527 
528 static void coroutine_fn
529 process_incoming_migration_co(void *opaque)
530 {
531     MigrationIncomingState *mis = migration_incoming_get_current();
532     PostcopyState ps;
533     int ret;
534 
535     assert(mis->from_src_file);
536 
537     if (compress_threads_load_setup(mis->from_src_file)) {
538         error_report("Failed to setup decompress threads");
539         goto fail;
540     }
541 
542     mis->largest_page_size = qemu_ram_pagesize_largest();
543     postcopy_state_set(POSTCOPY_INCOMING_NONE);
544     migrate_set_state(&mis->state, MIGRATION_STATUS_SETUP,
545                       MIGRATION_STATUS_ACTIVE);
546 
547     mis->loadvm_co = qemu_coroutine_self();
548     ret = qemu_loadvm_state(mis->from_src_file);
549     mis->loadvm_co = NULL;
550 
551     ps = postcopy_state_get();
552     trace_process_incoming_migration_co_end(ret, ps);
553     if (ps != POSTCOPY_INCOMING_NONE) {
554         if (ps == POSTCOPY_INCOMING_ADVISE) {
555             /*
556              * Where a migration had postcopy enabled (and thus went to advise)
557              * but managed to complete within the precopy period, we can use
558              * the normal exit.
559              */
560             postcopy_ram_incoming_cleanup(mis);
561         } else if (ret >= 0) {
562             /*
563              * Postcopy was started, cleanup should happen at the end of the
564              * postcopy thread.
565              */
566             trace_process_incoming_migration_co_postcopy_end_main();
567             return;
568         }
569         /* Else if something went wrong then just fall out of the normal exit */
570     }
571 
572     if (ret < 0) {
573         error_report("load of migration failed: %s", strerror(-ret));
574         goto fail;
575     }
576 
577     if (colo_incoming_co() < 0) {
578         goto fail;
579     }
580 
581     mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
582     qemu_bh_schedule(mis->bh);
583     return;
584 fail:
585     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
586                       MIGRATION_STATUS_FAILED);
587     qemu_fclose(mis->from_src_file);
588 
589     multifd_load_cleanup();
590     compress_threads_load_cleanup();
591 
592     exit(EXIT_FAILURE);
593 }
594 
595 /**
596  * migration_incoming_setup: Setup incoming migration
597  * @f: file for main migration channel
598  * @errp: where to put errors
599  *
600  * Returns: %true on success, %false on error.
601  */
602 static bool migration_incoming_setup(QEMUFile *f, Error **errp)
603 {
604     MigrationIncomingState *mis = migration_incoming_get_current();
605 
606     if (!mis->from_src_file) {
607         mis->from_src_file = f;
608     }
609     qemu_file_set_blocking(f, false);
610     return true;
611 }
612 
613 void migration_incoming_process(void)
614 {
615     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
616     qemu_coroutine_enter(co);
617 }
618 
619 /* Returns true if recovered from a paused migration, otherwise false */
620 static bool postcopy_try_recover(void)
621 {
622     MigrationIncomingState *mis = migration_incoming_get_current();
623 
624     if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
625         /* Resumed from a paused postcopy migration */
626 
627         /* This should be set already in migration_incoming_setup() */
628         assert(mis->from_src_file);
629         /* Postcopy has standalone thread to do vm load */
630         qemu_file_set_blocking(mis->from_src_file, true);
631 
632         /* Re-configure the return path */
633         mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
634 
635         migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
636                           MIGRATION_STATUS_POSTCOPY_RECOVER);
637 
638         /*
639          * Here, we only wake up the main loading thread (while the
640          * rest threads will still be waiting), so that we can receive
641          * commands from source now, and answer it if needed. The
642          * rest threads will be woken up afterwards until we are sure
643          * that source is ready to reply to page requests.
644          */
645         qemu_sem_post(&mis->postcopy_pause_sem_dst);
646         return true;
647     }
648 
649     return false;
650 }
651 
652 void migration_fd_process_incoming(QEMUFile *f, Error **errp)
653 {
654     if (!migration_incoming_setup(f, errp)) {
655         return;
656     }
657     if (postcopy_try_recover()) {
658         return;
659     }
660     migration_incoming_process();
661 }
662 
663 /*
664  * Returns true when we want to start a new incoming migration process,
665  * false otherwise.
666  */
667 static bool migration_should_start_incoming(bool main_channel)
668 {
669     /* Multifd doesn't start unless all channels are established */
670     if (migrate_multifd()) {
671         return migration_has_all_channels();
672     }
673 
674     /* Preempt channel only starts when the main channel is created */
675     if (migrate_postcopy_preempt()) {
676         return main_channel;
677     }
678 
679     /*
680      * For all the rest types of migration, we should only reach here when
681      * it's the main channel that's being created, and we should always
682      * proceed with this channel.
683      */
684     assert(main_channel);
685     return true;
686 }
687 
688 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
689 {
690     MigrationIncomingState *mis = migration_incoming_get_current();
691     Error *local_err = NULL;
692     QEMUFile *f;
693     bool default_channel = true;
694     uint32_t channel_magic = 0;
695     int ret = 0;
696 
697     if (migrate_multifd() && !migrate_postcopy_ram() &&
698         qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
699         /*
700          * With multiple channels, it is possible that we receive channels
701          * out of order on destination side, causing incorrect mapping of
702          * source channels on destination side. Check channel MAGIC to
703          * decide type of channel. Please note this is best effort, postcopy
704          * preempt channel does not send any magic number so avoid it for
705          * postcopy live migration. Also tls live migration already does
706          * tls handshake while initializing main channel so with tls this
707          * issue is not possible.
708          */
709         ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
710                                           sizeof(channel_magic), &local_err);
711 
712         if (ret != 0) {
713             error_propagate(errp, local_err);
714             return;
715         }
716 
717         default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
718     } else {
719         default_channel = !mis->from_src_file;
720     }
721 
722     if (multifd_load_setup(errp) != 0) {
723         error_setg(errp, "Failed to setup multifd channels");
724         return;
725     }
726 
727     if (default_channel) {
728         f = qemu_file_new_input(ioc);
729 
730         if (!migration_incoming_setup(f, errp)) {
731             return;
732         }
733     } else {
734         /* Multiple connections */
735         assert(migration_needs_multiple_sockets());
736         if (migrate_multifd()) {
737             multifd_recv_new_channel(ioc, &local_err);
738         } else {
739             assert(migrate_postcopy_preempt());
740             f = qemu_file_new_input(ioc);
741             postcopy_preempt_new_channel(mis, f);
742         }
743         if (local_err) {
744             error_propagate(errp, local_err);
745             return;
746         }
747     }
748 
749     if (migration_should_start_incoming(default_channel)) {
750         /* If it's a recovery, we're done */
751         if (postcopy_try_recover()) {
752             return;
753         }
754         migration_incoming_process();
755     }
756 }
757 
758 /**
759  * @migration_has_all_channels: We have received all channels that we need
760  *
761  * Returns true when we have got connections to all the channels that
762  * we need for migration.
763  */
764 bool migration_has_all_channels(void)
765 {
766     MigrationIncomingState *mis = migration_incoming_get_current();
767 
768     if (!mis->from_src_file) {
769         return false;
770     }
771 
772     if (migrate_multifd()) {
773         return multifd_recv_all_channels_created();
774     }
775 
776     if (migrate_postcopy_preempt()) {
777         return mis->postcopy_qemufile_dst != NULL;
778     }
779 
780     return true;
781 }
782 
783 int migrate_send_rp_switchover_ack(MigrationIncomingState *mis)
784 {
785     return migrate_send_rp_message(mis, MIG_RP_MSG_SWITCHOVER_ACK, 0, NULL);
786 }
787 
788 /*
789  * Send a 'SHUT' message on the return channel with the given value
790  * to indicate that we've finished with the RP.  Non-0 value indicates
791  * error.
792  */
793 void migrate_send_rp_shut(MigrationIncomingState *mis,
794                           uint32_t value)
795 {
796     uint32_t buf;
797 
798     buf = cpu_to_be32(value);
799     migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
800 }
801 
802 /*
803  * Send a 'PONG' message on the return channel with the given value
804  * (normally in response to a 'PING')
805  */
806 void migrate_send_rp_pong(MigrationIncomingState *mis,
807                           uint32_t value)
808 {
809     uint32_t buf;
810 
811     buf = cpu_to_be32(value);
812     migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
813 }
814 
815 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
816                                  char *block_name)
817 {
818     char buf[512];
819     int len;
820     int64_t res;
821 
822     /*
823      * First, we send the header part. It contains only the len of
824      * idstr, and the idstr itself.
825      */
826     len = strlen(block_name);
827     buf[0] = len;
828     memcpy(buf + 1, block_name, len);
829 
830     if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
831         error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
832                      __func__);
833         return;
834     }
835 
836     migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
837 
838     /*
839      * Next, we dump the received bitmap to the stream.
840      *
841      * TODO: currently we are safe since we are the only one that is
842      * using the to_src_file handle (fault thread is still paused),
843      * and it's ok even not taking the mutex. However the best way is
844      * to take the lock before sending the message header, and release
845      * the lock after sending the bitmap.
846      */
847     qemu_mutex_lock(&mis->rp_mutex);
848     res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
849     qemu_mutex_unlock(&mis->rp_mutex);
850 
851     trace_migrate_send_rp_recv_bitmap(block_name, res);
852 }
853 
854 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
855 {
856     uint32_t buf;
857 
858     buf = cpu_to_be32(value);
859     migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
860 }
861 
862 /*
863  * Return true if we're already in the middle of a migration
864  * (i.e. any of the active or setup states)
865  */
866 bool migration_is_setup_or_active(int state)
867 {
868     switch (state) {
869     case MIGRATION_STATUS_ACTIVE:
870     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
871     case MIGRATION_STATUS_POSTCOPY_PAUSED:
872     case MIGRATION_STATUS_POSTCOPY_RECOVER:
873     case MIGRATION_STATUS_SETUP:
874     case MIGRATION_STATUS_PRE_SWITCHOVER:
875     case MIGRATION_STATUS_DEVICE:
876     case MIGRATION_STATUS_WAIT_UNPLUG:
877     case MIGRATION_STATUS_COLO:
878         return true;
879 
880     default:
881         return false;
882 
883     }
884 }
885 
886 bool migration_is_running(int state)
887 {
888     switch (state) {
889     case MIGRATION_STATUS_ACTIVE:
890     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
891     case MIGRATION_STATUS_POSTCOPY_PAUSED:
892     case MIGRATION_STATUS_POSTCOPY_RECOVER:
893     case MIGRATION_STATUS_SETUP:
894     case MIGRATION_STATUS_PRE_SWITCHOVER:
895     case MIGRATION_STATUS_DEVICE:
896     case MIGRATION_STATUS_WAIT_UNPLUG:
897     case MIGRATION_STATUS_CANCELLING:
898         return true;
899 
900     default:
901         return false;
902 
903     }
904 }
905 
906 static bool migrate_show_downtime(MigrationState *s)
907 {
908     return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
909 }
910 
911 static void populate_time_info(MigrationInfo *info, MigrationState *s)
912 {
913     info->has_status = true;
914     info->has_setup_time = true;
915     info->setup_time = s->setup_time;
916 
917     if (s->state == MIGRATION_STATUS_COMPLETED) {
918         info->has_total_time = true;
919         info->total_time = s->total_time;
920     } else {
921         info->has_total_time = true;
922         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
923                            s->start_time;
924     }
925 
926     if (migrate_show_downtime(s)) {
927         info->has_downtime = true;
928         info->downtime = s->downtime;
929     } else {
930         info->has_expected_downtime = true;
931         info->expected_downtime = s->expected_downtime;
932     }
933 }
934 
935 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
936 {
937     size_t page_size = qemu_target_page_size();
938 
939     info->ram = g_malloc0(sizeof(*info->ram));
940     info->ram->transferred = migration_transferred_bytes();
941     info->ram->total = ram_bytes_total();
942     info->ram->duplicate = stat64_get(&mig_stats.zero_pages);
943     /* legacy value.  It is not used anymore */
944     info->ram->skipped = 0;
945     info->ram->normal = stat64_get(&mig_stats.normal_pages);
946     info->ram->normal_bytes = info->ram->normal * page_size;
947     info->ram->mbps = s->mbps;
948     info->ram->dirty_sync_count =
949         stat64_get(&mig_stats.dirty_sync_count);
950     info->ram->dirty_sync_missed_zero_copy =
951         stat64_get(&mig_stats.dirty_sync_missed_zero_copy);
952     info->ram->postcopy_requests =
953         stat64_get(&mig_stats.postcopy_requests);
954     info->ram->page_size = page_size;
955     info->ram->multifd_bytes = stat64_get(&mig_stats.multifd_bytes);
956     info->ram->pages_per_second = s->pages_per_second;
957     info->ram->precopy_bytes = stat64_get(&mig_stats.precopy_bytes);
958     info->ram->downtime_bytes = stat64_get(&mig_stats.downtime_bytes);
959     info->ram->postcopy_bytes = stat64_get(&mig_stats.postcopy_bytes);
960 
961     if (migrate_xbzrle()) {
962         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
963         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
964         info->xbzrle_cache->bytes = xbzrle_counters.bytes;
965         info->xbzrle_cache->pages = xbzrle_counters.pages;
966         info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
967         info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
968         info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
969         info->xbzrle_cache->overflow = xbzrle_counters.overflow;
970     }
971 
972     populate_compress(info);
973 
974     if (cpu_throttle_active()) {
975         info->has_cpu_throttle_percentage = true;
976         info->cpu_throttle_percentage = cpu_throttle_get_percentage();
977     }
978 
979     if (s->state != MIGRATION_STATUS_COMPLETED) {
980         info->ram->remaining = ram_bytes_remaining();
981         info->ram->dirty_pages_rate =
982            stat64_get(&mig_stats.dirty_pages_rate);
983     }
984 
985     if (migrate_dirty_limit() && dirtylimit_in_service()) {
986         info->has_dirty_limit_throttle_time_per_round = true;
987         info->dirty_limit_throttle_time_per_round =
988                             dirtylimit_throttle_time_per_round();
989 
990         info->has_dirty_limit_ring_full_time = true;
991         info->dirty_limit_ring_full_time = dirtylimit_ring_full_time();
992     }
993 }
994 
995 static void populate_disk_info(MigrationInfo *info)
996 {
997     if (blk_mig_active()) {
998         info->disk = g_malloc0(sizeof(*info->disk));
999         info->disk->transferred = blk_mig_bytes_transferred();
1000         info->disk->remaining = blk_mig_bytes_remaining();
1001         info->disk->total = blk_mig_bytes_total();
1002     }
1003 }
1004 
1005 static void fill_source_migration_info(MigrationInfo *info)
1006 {
1007     MigrationState *s = migrate_get_current();
1008     int state = qatomic_read(&s->state);
1009     GSList *cur_blocker = migration_blockers;
1010 
1011     info->blocked_reasons = NULL;
1012 
1013     /*
1014      * There are two types of reasons a migration might be blocked;
1015      * a) devices marked in VMState as non-migratable, and
1016      * b) Explicit migration blockers
1017      * We need to add both of them here.
1018      */
1019     qemu_savevm_non_migratable_list(&info->blocked_reasons);
1020 
1021     while (cur_blocker) {
1022         QAPI_LIST_PREPEND(info->blocked_reasons,
1023                           g_strdup(error_get_pretty(cur_blocker->data)));
1024         cur_blocker = g_slist_next(cur_blocker);
1025     }
1026     info->has_blocked_reasons = info->blocked_reasons != NULL;
1027 
1028     switch (state) {
1029     case MIGRATION_STATUS_NONE:
1030         /* no migration has happened ever */
1031         /* do not overwrite destination migration status */
1032         return;
1033     case MIGRATION_STATUS_SETUP:
1034         info->has_status = true;
1035         info->has_total_time = false;
1036         break;
1037     case MIGRATION_STATUS_ACTIVE:
1038     case MIGRATION_STATUS_CANCELLING:
1039     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1040     case MIGRATION_STATUS_PRE_SWITCHOVER:
1041     case MIGRATION_STATUS_DEVICE:
1042     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1043     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1044         /* TODO add some postcopy stats */
1045         populate_time_info(info, s);
1046         populate_ram_info(info, s);
1047         populate_disk_info(info);
1048         migration_populate_vfio_info(info);
1049         break;
1050     case MIGRATION_STATUS_COLO:
1051         info->has_status = true;
1052         /* TODO: display COLO specific information (checkpoint info etc.) */
1053         break;
1054     case MIGRATION_STATUS_COMPLETED:
1055         populate_time_info(info, s);
1056         populate_ram_info(info, s);
1057         migration_populate_vfio_info(info);
1058         break;
1059     case MIGRATION_STATUS_FAILED:
1060         info->has_status = true;
1061         break;
1062     case MIGRATION_STATUS_CANCELLED:
1063         info->has_status = true;
1064         break;
1065     case MIGRATION_STATUS_WAIT_UNPLUG:
1066         info->has_status = true;
1067         break;
1068     }
1069     info->status = state;
1070 
1071     QEMU_LOCK_GUARD(&s->error_mutex);
1072     if (s->error) {
1073         info->error_desc = g_strdup(error_get_pretty(s->error));
1074     }
1075 }
1076 
1077 static void fill_destination_migration_info(MigrationInfo *info)
1078 {
1079     MigrationIncomingState *mis = migration_incoming_get_current();
1080 
1081     if (mis->socket_address_list) {
1082         info->has_socket_address = true;
1083         info->socket_address =
1084             QAPI_CLONE(SocketAddressList, mis->socket_address_list);
1085     }
1086 
1087     switch (mis->state) {
1088     case MIGRATION_STATUS_NONE:
1089         return;
1090     case MIGRATION_STATUS_SETUP:
1091     case MIGRATION_STATUS_CANCELLING:
1092     case MIGRATION_STATUS_CANCELLED:
1093     case MIGRATION_STATUS_ACTIVE:
1094     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1095     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1096     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1097     case MIGRATION_STATUS_FAILED:
1098     case MIGRATION_STATUS_COLO:
1099         info->has_status = true;
1100         break;
1101     case MIGRATION_STATUS_COMPLETED:
1102         info->has_status = true;
1103         fill_destination_postcopy_migration_info(info);
1104         break;
1105     }
1106     info->status = mis->state;
1107 }
1108 
1109 MigrationInfo *qmp_query_migrate(Error **errp)
1110 {
1111     MigrationInfo *info = g_malloc0(sizeof(*info));
1112 
1113     fill_destination_migration_info(info);
1114     fill_source_migration_info(info);
1115 
1116     return info;
1117 }
1118 
1119 void qmp_migrate_start_postcopy(Error **errp)
1120 {
1121     MigrationState *s = migrate_get_current();
1122 
1123     if (!migrate_postcopy()) {
1124         error_setg(errp, "Enable postcopy with migrate_set_capability before"
1125                          " the start of migration");
1126         return;
1127     }
1128 
1129     if (s->state == MIGRATION_STATUS_NONE) {
1130         error_setg(errp, "Postcopy must be started after migration has been"
1131                          " started");
1132         return;
1133     }
1134     /*
1135      * we don't error if migration has finished since that would be racy
1136      * with issuing this command.
1137      */
1138     qatomic_set(&s->start_postcopy, true);
1139 }
1140 
1141 /* shared migration helpers */
1142 
1143 void migrate_set_state(int *state, int old_state, int new_state)
1144 {
1145     assert(new_state < MIGRATION_STATUS__MAX);
1146     if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
1147         trace_migrate_set_state(MigrationStatus_str(new_state));
1148         migrate_generate_event(new_state);
1149     }
1150 }
1151 
1152 static void migrate_fd_cleanup(MigrationState *s)
1153 {
1154     qemu_bh_delete(s->cleanup_bh);
1155     s->cleanup_bh = NULL;
1156 
1157     g_free(s->hostname);
1158     s->hostname = NULL;
1159     json_writer_free(s->vmdesc);
1160     s->vmdesc = NULL;
1161 
1162     qemu_savevm_state_cleanup();
1163 
1164     if (s->to_dst_file) {
1165         QEMUFile *tmp;
1166 
1167         trace_migrate_fd_cleanup();
1168         qemu_mutex_unlock_iothread();
1169         if (s->migration_thread_running) {
1170             qemu_thread_join(&s->thread);
1171             s->migration_thread_running = false;
1172         }
1173         qemu_mutex_lock_iothread();
1174 
1175         multifd_save_cleanup();
1176         qemu_mutex_lock(&s->qemu_file_lock);
1177         tmp = s->to_dst_file;
1178         s->to_dst_file = NULL;
1179         qemu_mutex_unlock(&s->qemu_file_lock);
1180         /*
1181          * Close the file handle without the lock to make sure the
1182          * critical section won't block for long.
1183          */
1184         migration_ioc_unregister_yank_from_file(tmp);
1185         qemu_fclose(tmp);
1186     }
1187 
1188     /*
1189      * We already cleaned up to_dst_file, so errors from the return
1190      * path might be due to that, ignore them.
1191      */
1192     close_return_path_on_source(s);
1193 
1194     assert(!migration_is_active(s));
1195 
1196     if (s->state == MIGRATION_STATUS_CANCELLING) {
1197         migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
1198                           MIGRATION_STATUS_CANCELLED);
1199     }
1200 
1201     if (s->error) {
1202         /* It is used on info migrate.  We can't free it */
1203         error_report_err(error_copy(s->error));
1204     }
1205     migration_call_notifiers(s);
1206     block_cleanup_parameters();
1207     yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1208 }
1209 
1210 static void migrate_fd_cleanup_schedule(MigrationState *s)
1211 {
1212     /*
1213      * Ref the state for bh, because it may be called when
1214      * there're already no other refs
1215      */
1216     object_ref(OBJECT(s));
1217     qemu_bh_schedule(s->cleanup_bh);
1218 }
1219 
1220 static void migrate_fd_cleanup_bh(void *opaque)
1221 {
1222     MigrationState *s = opaque;
1223     migrate_fd_cleanup(s);
1224     object_unref(OBJECT(s));
1225 }
1226 
1227 void migrate_set_error(MigrationState *s, const Error *error)
1228 {
1229     QEMU_LOCK_GUARD(&s->error_mutex);
1230     if (!s->error) {
1231         s->error = error_copy(error);
1232     }
1233 }
1234 
1235 bool migrate_has_error(MigrationState *s)
1236 {
1237     /* The lock is not helpful here, but still follow the rule */
1238     QEMU_LOCK_GUARD(&s->error_mutex);
1239     return qatomic_read(&s->error);
1240 }
1241 
1242 static void migrate_error_free(MigrationState *s)
1243 {
1244     QEMU_LOCK_GUARD(&s->error_mutex);
1245     if (s->error) {
1246         error_free(s->error);
1247         s->error = NULL;
1248     }
1249 }
1250 
1251 static void migrate_fd_error(MigrationState *s, const Error *error)
1252 {
1253     trace_migrate_fd_error(error_get_pretty(error));
1254     assert(s->to_dst_file == NULL);
1255     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1256                       MIGRATION_STATUS_FAILED);
1257     migrate_set_error(s, error);
1258 }
1259 
1260 static void migrate_fd_cancel(MigrationState *s)
1261 {
1262     int old_state ;
1263 
1264     trace_migrate_fd_cancel();
1265 
1266     WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
1267         if (s->rp_state.from_dst_file) {
1268             /* shutdown the rp socket, so causing the rp thread to shutdown */
1269             qemu_file_shutdown(s->rp_state.from_dst_file);
1270         }
1271     }
1272 
1273     do {
1274         old_state = s->state;
1275         if (!migration_is_running(old_state)) {
1276             break;
1277         }
1278         /* If the migration is paused, kick it out of the pause */
1279         if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
1280             qemu_sem_post(&s->pause_sem);
1281         }
1282         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1283     } while (s->state != MIGRATION_STATUS_CANCELLING);
1284 
1285     /*
1286      * If we're unlucky the migration code might be stuck somewhere in a
1287      * send/write while the network has failed and is waiting to timeout;
1288      * if we've got shutdown(2) available then we can force it to quit.
1289      */
1290     if (s->state == MIGRATION_STATUS_CANCELLING) {
1291         WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
1292             if (s->to_dst_file) {
1293                 qemu_file_shutdown(s->to_dst_file);
1294             }
1295         }
1296     }
1297     if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1298         Error *local_err = NULL;
1299 
1300         bdrv_activate_all(&local_err);
1301         if (local_err) {
1302             error_report_err(local_err);
1303         } else {
1304             s->block_inactive = false;
1305         }
1306     }
1307 }
1308 
1309 void migration_add_notifier(Notifier *notify,
1310                             void (*func)(Notifier *notifier, void *data))
1311 {
1312     notify->notify = func;
1313     notifier_list_add(&migration_state_notifiers, notify);
1314 }
1315 
1316 void migration_remove_notifier(Notifier *notify)
1317 {
1318     if (notify->notify) {
1319         notifier_remove(notify);
1320         notify->notify = NULL;
1321     }
1322 }
1323 
1324 void migration_call_notifiers(MigrationState *s)
1325 {
1326     notifier_list_notify(&migration_state_notifiers, s);
1327 }
1328 
1329 bool migration_in_setup(MigrationState *s)
1330 {
1331     return s->state == MIGRATION_STATUS_SETUP;
1332 }
1333 
1334 bool migration_has_finished(MigrationState *s)
1335 {
1336     return s->state == MIGRATION_STATUS_COMPLETED;
1337 }
1338 
1339 bool migration_has_failed(MigrationState *s)
1340 {
1341     return (s->state == MIGRATION_STATUS_CANCELLED ||
1342             s->state == MIGRATION_STATUS_FAILED);
1343 }
1344 
1345 bool migration_in_postcopy(void)
1346 {
1347     MigrationState *s = migrate_get_current();
1348 
1349     switch (s->state) {
1350     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1351     case MIGRATION_STATUS_POSTCOPY_PAUSED:
1352     case MIGRATION_STATUS_POSTCOPY_RECOVER:
1353         return true;
1354     default:
1355         return false;
1356     }
1357 }
1358 
1359 bool migration_in_postcopy_after_devices(MigrationState *s)
1360 {
1361     return migration_in_postcopy() && s->postcopy_after_devices;
1362 }
1363 
1364 bool migration_in_incoming_postcopy(void)
1365 {
1366     PostcopyState ps = postcopy_state_get();
1367 
1368     return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
1369 }
1370 
1371 bool migration_incoming_postcopy_advised(void)
1372 {
1373     PostcopyState ps = postcopy_state_get();
1374 
1375     return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
1376 }
1377 
1378 bool migration_in_bg_snapshot(void)
1379 {
1380     MigrationState *s = migrate_get_current();
1381 
1382     return migrate_background_snapshot() &&
1383             migration_is_setup_or_active(s->state);
1384 }
1385 
1386 bool migration_is_idle(void)
1387 {
1388     MigrationState *s = current_migration;
1389 
1390     if (!s) {
1391         return true;
1392     }
1393 
1394     switch (s->state) {
1395     case MIGRATION_STATUS_NONE:
1396     case MIGRATION_STATUS_CANCELLED:
1397     case MIGRATION_STATUS_COMPLETED:
1398     case MIGRATION_STATUS_FAILED:
1399         return true;
1400     case MIGRATION_STATUS_SETUP:
1401     case MIGRATION_STATUS_CANCELLING:
1402     case MIGRATION_STATUS_ACTIVE:
1403     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1404     case MIGRATION_STATUS_COLO:
1405     case MIGRATION_STATUS_PRE_SWITCHOVER:
1406     case MIGRATION_STATUS_DEVICE:
1407     case MIGRATION_STATUS_WAIT_UNPLUG:
1408         return false;
1409     case MIGRATION_STATUS__MAX:
1410         g_assert_not_reached();
1411     }
1412 
1413     return false;
1414 }
1415 
1416 bool migration_is_active(MigrationState *s)
1417 {
1418     return (s->state == MIGRATION_STATUS_ACTIVE ||
1419             s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1420 }
1421 
1422 int migrate_init(MigrationState *s, Error **errp)
1423 {
1424     int ret;
1425 
1426     ret = qemu_savevm_state_prepare(errp);
1427     if (ret) {
1428         return ret;
1429     }
1430 
1431     /*
1432      * Reinitialise all migration state, except
1433      * parameters/capabilities that the user set, and
1434      * locks.
1435      */
1436     s->cleanup_bh = 0;
1437     s->vm_start_bh = 0;
1438     s->to_dst_file = NULL;
1439     s->state = MIGRATION_STATUS_NONE;
1440     s->rp_state.from_dst_file = NULL;
1441     s->rp_state.error = false;
1442     s->mbps = 0.0;
1443     s->pages_per_second = 0.0;
1444     s->downtime = 0;
1445     s->expected_downtime = 0;
1446     s->setup_time = 0;
1447     s->start_postcopy = false;
1448     s->postcopy_after_devices = false;
1449     s->migration_thread_running = false;
1450     error_free(s->error);
1451     s->error = NULL;
1452     s->hostname = NULL;
1453     s->vmdesc = NULL;
1454 
1455     migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1456 
1457     s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1458     s->total_time = 0;
1459     s->vm_old_state = -1;
1460     s->iteration_initial_bytes = 0;
1461     s->threshold_size = 0;
1462     s->switchover_acked = false;
1463     s->rdma_migration = false;
1464     /*
1465      * set mig_stats memory to zero for a new migration
1466      */
1467     memset(&mig_stats, 0, sizeof(mig_stats));
1468     migration_reset_vfio_bytes_transferred();
1469 
1470     return 0;
1471 }
1472 
1473 int migrate_add_blocker_internal(Error **reasonp, Error **errp)
1474 {
1475     /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
1476     if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
1477         error_propagate_prepend(errp, *reasonp,
1478                                 "disallowing migration blocker "
1479                                 "(migration/snapshot in progress) for: ");
1480         *reasonp = NULL;
1481         return -EBUSY;
1482     }
1483 
1484     migration_blockers = g_slist_prepend(migration_blockers, *reasonp);
1485     return 0;
1486 }
1487 
1488 int migrate_add_blocker(Error **reasonp, Error **errp)
1489 {
1490     if (only_migratable) {
1491         error_propagate_prepend(errp, *reasonp,
1492                                 "disallowing migration blocker "
1493                                 "(--only-migratable) for: ");
1494         *reasonp = NULL;
1495         return -EACCES;
1496     }
1497 
1498     return migrate_add_blocker_internal(reasonp, errp);
1499 }
1500 
1501 void migrate_del_blocker(Error **reasonp)
1502 {
1503     if (*reasonp) {
1504         migration_blockers = g_slist_remove(migration_blockers, *reasonp);
1505         error_free(*reasonp);
1506         *reasonp = NULL;
1507     }
1508 }
1509 
1510 void qmp_migrate_incoming(const char *uri, Error **errp)
1511 {
1512     Error *local_err = NULL;
1513     static bool once = true;
1514 
1515     if (!once) {
1516         error_setg(errp, "The incoming migration has already been started");
1517         return;
1518     }
1519     if (!runstate_check(RUN_STATE_INMIGRATE)) {
1520         error_setg(errp, "'-incoming' was not specified on the command line");
1521         return;
1522     }
1523 
1524     if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
1525         return;
1526     }
1527 
1528     qemu_start_incoming_migration(uri, &local_err);
1529 
1530     if (local_err) {
1531         yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1532         error_propagate(errp, local_err);
1533         return;
1534     }
1535 
1536     once = false;
1537 }
1538 
1539 void qmp_migrate_recover(const char *uri, Error **errp)
1540 {
1541     MigrationIncomingState *mis = migration_incoming_get_current();
1542 
1543     /*
1544      * Don't even bother to use ERRP_GUARD() as it _must_ always be set by
1545      * callers (no one should ignore a recover failure); if there is, it's a
1546      * programming error.
1547      */
1548     assert(errp);
1549 
1550     if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
1551         error_setg(errp, "Migrate recover can only be run "
1552                    "when postcopy is paused.");
1553         return;
1554     }
1555 
1556     /* If there's an existing transport, release it */
1557     migration_incoming_transport_cleanup(mis);
1558 
1559     /*
1560      * Note that this call will never start a real migration; it will
1561      * only re-setup the migration stream and poke existing migration
1562      * to continue using that newly established channel.
1563      */
1564     qemu_start_incoming_migration(uri, errp);
1565 }
1566 
1567 void qmp_migrate_pause(Error **errp)
1568 {
1569     MigrationState *ms = migrate_get_current();
1570     MigrationIncomingState *mis = migration_incoming_get_current();
1571     int ret = 0;
1572 
1573     if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1574         /* Source side, during postcopy */
1575         qemu_mutex_lock(&ms->qemu_file_lock);
1576         if (ms->to_dst_file) {
1577             ret = qemu_file_shutdown(ms->to_dst_file);
1578         }
1579         qemu_mutex_unlock(&ms->qemu_file_lock);
1580         if (ret) {
1581             error_setg(errp, "Failed to pause source migration");
1582         }
1583         return;
1584     }
1585 
1586     if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1587         ret = qemu_file_shutdown(mis->from_src_file);
1588         if (ret) {
1589             error_setg(errp, "Failed to pause destination migration");
1590         }
1591         return;
1592     }
1593 
1594     error_setg(errp, "migrate-pause is currently only supported "
1595                "during postcopy-active state");
1596 }
1597 
1598 bool migration_is_blocked(Error **errp)
1599 {
1600     if (qemu_savevm_state_blocked(errp)) {
1601         return true;
1602     }
1603 
1604     if (migration_blockers) {
1605         error_propagate(errp, error_copy(migration_blockers->data));
1606         return true;
1607     }
1608 
1609     return false;
1610 }
1611 
1612 /* Returns true if continue to migrate, or false if error detected */
1613 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
1614                             bool resume, Error **errp)
1615 {
1616     Error *local_err = NULL;
1617 
1618     if (blk_inc) {
1619         warn_report("parameter 'inc' is deprecated;"
1620                     " use blockdev-mirror with NBD instead");
1621     }
1622 
1623     if (blk) {
1624         warn_report("parameter 'blk' is deprecated;"
1625                     " use blockdev-mirror with NBD instead");
1626     }
1627 
1628     if (resume) {
1629         if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
1630             error_setg(errp, "Cannot resume if there is no "
1631                        "paused migration");
1632             return false;
1633         }
1634 
1635         /*
1636          * Postcopy recovery won't work well with release-ram
1637          * capability since release-ram will drop the page buffer as
1638          * long as the page is put into the send buffer.  So if there
1639          * is a network failure happened, any page buffers that have
1640          * not yet reached the destination VM but have already been
1641          * sent from the source VM will be lost forever.  Let's refuse
1642          * the client from resuming such a postcopy migration.
1643          * Luckily release-ram was designed to only be used when src
1644          * and destination VMs are on the same host, so it should be
1645          * fine.
1646          */
1647         if (migrate_release_ram()) {
1648             error_setg(errp, "Postcopy recovery cannot work "
1649                        "when release-ram capability is set");
1650             return false;
1651         }
1652 
1653         /* This is a resume, skip init status */
1654         return true;
1655     }
1656 
1657     if (migration_is_running(s->state)) {
1658         error_setg(errp, QERR_MIGRATION_ACTIVE);
1659         return false;
1660     }
1661 
1662     if (runstate_check(RUN_STATE_INMIGRATE)) {
1663         error_setg(errp, "Guest is waiting for an incoming migration");
1664         return false;
1665     }
1666 
1667     if (runstate_check(RUN_STATE_POSTMIGRATE)) {
1668         error_setg(errp, "Can't migrate the vm that was paused due to "
1669                    "previous migration");
1670         return false;
1671     }
1672 
1673     if (migration_is_blocked(errp)) {
1674         return false;
1675     }
1676 
1677     if (blk || blk_inc) {
1678         if (migrate_colo()) {
1679             error_setg(errp, "No disk migration is required in COLO mode");
1680             return false;
1681         }
1682         if (migrate_block() || migrate_block_incremental()) {
1683             error_setg(errp, "Command options are incompatible with "
1684                        "current migration capabilities");
1685             return false;
1686         }
1687         if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) {
1688             error_propagate(errp, local_err);
1689             return false;
1690         }
1691         s->must_remove_block_options = true;
1692     }
1693 
1694     if (blk_inc) {
1695         migrate_set_block_incremental(true);
1696     }
1697 
1698     if (migrate_init(s, errp)) {
1699         return false;
1700     }
1701 
1702     return true;
1703 }
1704 
1705 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1706                  bool has_inc, bool inc, bool has_detach, bool detach,
1707                  bool has_resume, bool resume, Error **errp)
1708 {
1709     bool resume_requested;
1710     Error *local_err = NULL;
1711     MigrationState *s = migrate_get_current();
1712     const char *p = NULL;
1713 
1714     /* URI is not suitable for migration? */
1715     if (!migration_channels_and_uri_compatible(uri, errp)) {
1716         return;
1717     }
1718 
1719     resume_requested = has_resume && resume;
1720     if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
1721                          resume_requested, errp)) {
1722         /* Error detected, put into errp */
1723         return;
1724     }
1725 
1726     if (!resume_requested) {
1727         if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
1728             return;
1729         }
1730     }
1731 
1732     if (strstart(uri, "tcp:", &p) ||
1733         strstart(uri, "unix:", NULL) ||
1734         strstart(uri, "vsock:", NULL)) {
1735         socket_start_outgoing_migration(s, p ? p : uri, &local_err);
1736 #ifdef CONFIG_RDMA
1737     } else if (strstart(uri, "rdma:", &p)) {
1738         rdma_start_outgoing_migration(s, p, &local_err);
1739 #endif
1740     } else if (strstart(uri, "exec:", &p)) {
1741         exec_start_outgoing_migration(s, p, &local_err);
1742     } else if (strstart(uri, "fd:", &p)) {
1743         fd_start_outgoing_migration(s, p, &local_err);
1744     } else if (strstart(uri, "file:", &p)) {
1745         file_start_outgoing_migration(s, p, &local_err);
1746     } else {
1747         error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, "uri",
1748                    "a valid migration protocol");
1749         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1750                           MIGRATION_STATUS_FAILED);
1751         block_cleanup_parameters();
1752     }
1753 
1754     if (local_err) {
1755         if (!resume_requested) {
1756             yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1757         }
1758         migrate_fd_error(s, local_err);
1759         error_propagate(errp, local_err);
1760         return;
1761     }
1762 }
1763 
1764 void qmp_migrate_cancel(Error **errp)
1765 {
1766     migration_cancel(NULL);
1767 }
1768 
1769 void qmp_migrate_continue(MigrationStatus state, Error **errp)
1770 {
1771     MigrationState *s = migrate_get_current();
1772     if (s->state != state) {
1773         error_setg(errp,  "Migration not in expected state: %s",
1774                    MigrationStatus_str(s->state));
1775         return;
1776     }
1777     qemu_sem_post(&s->pause_sem);
1778 }
1779 
1780 /* migration thread support */
1781 /*
1782  * Something bad happened to the RP stream, mark an error
1783  * The caller shall print or trace something to indicate why
1784  */
1785 static void mark_source_rp_bad(MigrationState *s)
1786 {
1787     s->rp_state.error = true;
1788 }
1789 
1790 void migration_rp_wait(MigrationState *s)
1791 {
1792     qemu_sem_wait(&s->rp_state.rp_sem);
1793 }
1794 
1795 void migration_rp_kick(MigrationState *s)
1796 {
1797     qemu_sem_post(&s->rp_state.rp_sem);
1798 }
1799 
1800 static struct rp_cmd_args {
1801     ssize_t     len; /* -1 = variable */
1802     const char *name;
1803 } rp_cmd_args[] = {
1804     [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
1805     [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
1806     [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1807     [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
1808     [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1809     [MIG_RP_MSG_RECV_BITMAP]    = { .len = -1, .name = "RECV_BITMAP" },
1810     [MIG_RP_MSG_RESUME_ACK]     = { .len =  4, .name = "RESUME_ACK" },
1811     [MIG_RP_MSG_SWITCHOVER_ACK] = { .len =  0, .name = "SWITCHOVER_ACK" },
1812     [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
1813 };
1814 
1815 /*
1816  * Process a request for pages received on the return path,
1817  * We're allowed to send more than requested (e.g. to round to our page size)
1818  * and we don't need to send pages that have already been sent.
1819  */
1820 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1821                                        ram_addr_t start, size_t len)
1822 {
1823     long our_host_ps = qemu_real_host_page_size();
1824 
1825     trace_migrate_handle_rp_req_pages(rbname, start, len);
1826 
1827     /*
1828      * Since we currently insist on matching page sizes, just sanity check
1829      * we're being asked for whole host pages.
1830      */
1831     if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
1832         !QEMU_IS_ALIGNED(len, our_host_ps)) {
1833         error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1834                      " len: %zd", __func__, start, len);
1835         mark_source_rp_bad(ms);
1836         return;
1837     }
1838 
1839     if (ram_save_queue_pages(rbname, start, len)) {
1840         mark_source_rp_bad(ms);
1841     }
1842 }
1843 
1844 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
1845 {
1846     RAMBlock *block = qemu_ram_block_by_name(block_name);
1847 
1848     if (!block) {
1849         error_report("%s: invalid block name '%s'", __func__, block_name);
1850         return -EINVAL;
1851     }
1852 
1853     /* Fetch the received bitmap and refresh the dirty bitmap */
1854     return ram_dirty_bitmap_reload(s, block);
1855 }
1856 
1857 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
1858 {
1859     trace_source_return_path_thread_resume_ack(value);
1860 
1861     if (value != MIGRATION_RESUME_ACK_VALUE) {
1862         error_report("%s: illegal resume_ack value %"PRIu32,
1863                      __func__, value);
1864         return -1;
1865     }
1866 
1867     /* Now both sides are active. */
1868     migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
1869                       MIGRATION_STATUS_POSTCOPY_ACTIVE);
1870 
1871     /* Notify send thread that time to continue send pages */
1872     migration_rp_kick(s);
1873 
1874     return 0;
1875 }
1876 
1877 /*
1878  * Release ms->rp_state.from_dst_file (and postcopy_qemufile_src if
1879  * existed) in a safe way.
1880  */
1881 static void migration_release_dst_files(MigrationState *ms)
1882 {
1883     QEMUFile *file;
1884 
1885     WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
1886         /*
1887          * Reset the from_dst_file pointer first before releasing it, as we
1888          * can't block within lock section
1889          */
1890         file = ms->rp_state.from_dst_file;
1891         ms->rp_state.from_dst_file = NULL;
1892     }
1893 
1894     /*
1895      * Do the same to postcopy fast path socket too if there is.  No
1896      * locking needed because this qemufile should only be managed by
1897      * return path thread.
1898      */
1899     if (ms->postcopy_qemufile_src) {
1900         migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src);
1901         qemu_file_shutdown(ms->postcopy_qemufile_src);
1902         qemu_fclose(ms->postcopy_qemufile_src);
1903         ms->postcopy_qemufile_src = NULL;
1904     }
1905 
1906     qemu_fclose(file);
1907 }
1908 
1909 /*
1910  * Handles messages sent on the return path towards the source VM
1911  *
1912  */
1913 static void *source_return_path_thread(void *opaque)
1914 {
1915     MigrationState *ms = opaque;
1916     QEMUFile *rp = ms->rp_state.from_dst_file;
1917     uint16_t header_len, header_type;
1918     uint8_t buf[512];
1919     uint32_t tmp32, sibling_error;
1920     ram_addr_t start = 0; /* =0 to silence warning */
1921     size_t  len = 0, expected_len;
1922     int res;
1923 
1924     trace_source_return_path_thread_entry();
1925     rcu_register_thread();
1926 
1927     while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1928            migration_is_setup_or_active(ms->state)) {
1929         trace_source_return_path_thread_loop_top();
1930         header_type = qemu_get_be16(rp);
1931         header_len = qemu_get_be16(rp);
1932 
1933         if (qemu_file_get_error(rp)) {
1934             mark_source_rp_bad(ms);
1935             goto out;
1936         }
1937 
1938         if (header_type >= MIG_RP_MSG_MAX ||
1939             header_type == MIG_RP_MSG_INVALID) {
1940             error_report("RP: Received invalid message 0x%04x length 0x%04x",
1941                          header_type, header_len);
1942             mark_source_rp_bad(ms);
1943             goto out;
1944         }
1945 
1946         if ((rp_cmd_args[header_type].len != -1 &&
1947             header_len != rp_cmd_args[header_type].len) ||
1948             header_len > sizeof(buf)) {
1949             error_report("RP: Received '%s' message (0x%04x) with"
1950                          "incorrect length %d expecting %zu",
1951                          rp_cmd_args[header_type].name, header_type, header_len,
1952                          (size_t)rp_cmd_args[header_type].len);
1953             mark_source_rp_bad(ms);
1954             goto out;
1955         }
1956 
1957         /* We know we've got a valid header by this point */
1958         res = qemu_get_buffer(rp, buf, header_len);
1959         if (res != header_len) {
1960             error_report("RP: Failed reading data for message 0x%04x"
1961                          " read %d expected %d",
1962                          header_type, res, header_len);
1963             mark_source_rp_bad(ms);
1964             goto out;
1965         }
1966 
1967         /* OK, we have the message and the data */
1968         switch (header_type) {
1969         case MIG_RP_MSG_SHUT:
1970             sibling_error = ldl_be_p(buf);
1971             trace_source_return_path_thread_shut(sibling_error);
1972             if (sibling_error) {
1973                 error_report("RP: Sibling indicated error %d", sibling_error);
1974                 mark_source_rp_bad(ms);
1975             }
1976             /*
1977              * We'll let the main thread deal with closing the RP
1978              * we could do a shutdown(2) on it, but we're the only user
1979              * anyway, so there's nothing gained.
1980              */
1981             goto out;
1982 
1983         case MIG_RP_MSG_PONG:
1984             tmp32 = ldl_be_p(buf);
1985             trace_source_return_path_thread_pong(tmp32);
1986             qemu_sem_post(&ms->rp_state.rp_pong_acks);
1987             break;
1988 
1989         case MIG_RP_MSG_REQ_PAGES:
1990             start = ldq_be_p(buf);
1991             len = ldl_be_p(buf + 8);
1992             migrate_handle_rp_req_pages(ms, NULL, start, len);
1993             break;
1994 
1995         case MIG_RP_MSG_REQ_PAGES_ID:
1996             expected_len = 12 + 1; /* header + termination */
1997 
1998             if (header_len >= expected_len) {
1999                 start = ldq_be_p(buf);
2000                 len = ldl_be_p(buf + 8);
2001                 /* Now we expect an idstr */
2002                 tmp32 = buf[12]; /* Length of the following idstr */
2003                 buf[13 + tmp32] = '\0';
2004                 expected_len += tmp32;
2005             }
2006             if (header_len != expected_len) {
2007                 error_report("RP: Req_Page_id with length %d expecting %zd",
2008                              header_len, expected_len);
2009                 mark_source_rp_bad(ms);
2010                 goto out;
2011             }
2012             migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
2013             break;
2014 
2015         case MIG_RP_MSG_RECV_BITMAP:
2016             if (header_len < 1) {
2017                 error_report("%s: missing block name", __func__);
2018                 mark_source_rp_bad(ms);
2019                 goto out;
2020             }
2021             /* Format: len (1B) + idstr (<255B). This ends the idstr. */
2022             buf[buf[0] + 1] = '\0';
2023             if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
2024                 mark_source_rp_bad(ms);
2025                 goto out;
2026             }
2027             break;
2028 
2029         case MIG_RP_MSG_RESUME_ACK:
2030             tmp32 = ldl_be_p(buf);
2031             if (migrate_handle_rp_resume_ack(ms, tmp32)) {
2032                 mark_source_rp_bad(ms);
2033                 goto out;
2034             }
2035             break;
2036 
2037         case MIG_RP_MSG_SWITCHOVER_ACK:
2038             ms->switchover_acked = true;
2039             trace_source_return_path_thread_switchover_acked();
2040             break;
2041 
2042         default:
2043             break;
2044         }
2045     }
2046 
2047 out:
2048     if (qemu_file_get_error(rp)) {
2049         trace_source_return_path_thread_bad_end();
2050         mark_source_rp_bad(ms);
2051     }
2052 
2053     trace_source_return_path_thread_end();
2054     rcu_unregister_thread();
2055     return NULL;
2056 }
2057 
2058 static int open_return_path_on_source(MigrationState *ms)
2059 {
2060     ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
2061     if (!ms->rp_state.from_dst_file) {
2062         return -1;
2063     }
2064 
2065     trace_open_return_path_on_source();
2066 
2067     qemu_thread_create(&ms->rp_state.rp_thread, "return path",
2068                        source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
2069     ms->rp_state.rp_thread_created = true;
2070 
2071     trace_open_return_path_on_source_continue();
2072 
2073     return 0;
2074 }
2075 
2076 static int close_return_path_on_source(MigrationState *ms)
2077 {
2078     int ret;
2079 
2080     if (!ms->rp_state.rp_thread_created) {
2081         return 0;
2082     }
2083 
2084     trace_migration_return_path_end_before();
2085 
2086     /*
2087      * If this is a normal exit then the destination will send a SHUT
2088      * and the rp_thread will exit, however if there's an error we
2089      * need to cause it to exit. shutdown(2), if we have it, will
2090      * cause it to unblock if it's stuck waiting for the destination.
2091      */
2092     WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
2093         if (ms->to_dst_file && ms->rp_state.from_dst_file &&
2094             qemu_file_get_error(ms->to_dst_file)) {
2095             qemu_file_shutdown(ms->rp_state.from_dst_file);
2096         }
2097     }
2098 
2099     trace_await_return_path_close_on_source_joining();
2100     qemu_thread_join(&ms->rp_state.rp_thread);
2101     ms->rp_state.rp_thread_created = false;
2102     trace_await_return_path_close_on_source_close();
2103 
2104     ret = ms->rp_state.error;
2105     ms->rp_state.error = false;
2106 
2107     migration_release_dst_files(ms);
2108 
2109     trace_migration_return_path_end_after(ret);
2110     return ret;
2111 }
2112 
2113 static inline void
2114 migration_wait_main_channel(MigrationState *ms)
2115 {
2116     /* Wait until one PONG message received */
2117     qemu_sem_wait(&ms->rp_state.rp_pong_acks);
2118 }
2119 
2120 /*
2121  * Switch from normal iteration to postcopy
2122  * Returns non-0 on error
2123  */
2124 static int postcopy_start(MigrationState *ms, Error **errp)
2125 {
2126     int ret;
2127     QIOChannelBuffer *bioc;
2128     QEMUFile *fb;
2129     int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2130     uint64_t bandwidth = migrate_max_postcopy_bandwidth();
2131     bool restart_block = false;
2132     int cur_state = MIGRATION_STATUS_ACTIVE;
2133 
2134     if (migrate_postcopy_preempt()) {
2135         migration_wait_main_channel(ms);
2136         if (postcopy_preempt_establish_channel(ms)) {
2137             migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
2138             return -1;
2139         }
2140     }
2141 
2142     if (!migrate_pause_before_switchover()) {
2143         migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
2144                           MIGRATION_STATUS_POSTCOPY_ACTIVE);
2145     }
2146 
2147     trace_postcopy_start();
2148     qemu_mutex_lock_iothread();
2149     trace_postcopy_start_set_run();
2150 
2151     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2152     global_state_store();
2153     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2154     if (ret < 0) {
2155         goto fail;
2156     }
2157 
2158     ret = migration_maybe_pause(ms, &cur_state,
2159                                 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2160     if (ret < 0) {
2161         goto fail;
2162     }
2163 
2164     ret = bdrv_inactivate_all();
2165     if (ret < 0) {
2166         goto fail;
2167     }
2168     restart_block = true;
2169 
2170     /*
2171      * Cause any non-postcopiable, but iterative devices to
2172      * send out their final data.
2173      */
2174     qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
2175 
2176     /*
2177      * in Finish migrate and with the io-lock held everything should
2178      * be quiet, but we've potentially still got dirty pages and we
2179      * need to tell the destination to throw any pages it's already received
2180      * that are dirty
2181      */
2182     if (migrate_postcopy_ram()) {
2183         ram_postcopy_send_discard_bitmap(ms);
2184     }
2185 
2186     /*
2187      * send rest of state - note things that are doing postcopy
2188      * will notice we're in POSTCOPY_ACTIVE and not actually
2189      * wrap their state up here
2190      */
2191     migration_rate_set(bandwidth);
2192     if (migrate_postcopy_ram()) {
2193         /* Ping just for debugging, helps line traces up */
2194         qemu_savevm_send_ping(ms->to_dst_file, 2);
2195     }
2196 
2197     /*
2198      * While loading the device state we may trigger page transfer
2199      * requests and the fd must be free to process those, and thus
2200      * the destination must read the whole device state off the fd before
2201      * it starts processing it.  Unfortunately the ad-hoc migration format
2202      * doesn't allow the destination to know the size to read without fully
2203      * parsing it through each devices load-state code (especially the open
2204      * coded devices that use get/put).
2205      * So we wrap the device state up in a package with a length at the start;
2206      * to do this we use a qemu_buf to hold the whole of the device state.
2207      */
2208     bioc = qio_channel_buffer_new(4096);
2209     qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
2210     fb = qemu_file_new_output(QIO_CHANNEL(bioc));
2211     object_unref(OBJECT(bioc));
2212 
2213     /*
2214      * Make sure the receiver can get incoming pages before we send the rest
2215      * of the state
2216      */
2217     qemu_savevm_send_postcopy_listen(fb);
2218 
2219     qemu_savevm_state_complete_precopy(fb, false, false);
2220     if (migrate_postcopy_ram()) {
2221         qemu_savevm_send_ping(fb, 3);
2222     }
2223 
2224     qemu_savevm_send_postcopy_run(fb);
2225 
2226     /* <><> end of stuff going into the package */
2227 
2228     /* Last point of recovery; as soon as we send the package the destination
2229      * can open devices and potentially start running.
2230      * Lets just check again we've not got any errors.
2231      */
2232     ret = qemu_file_get_error(ms->to_dst_file);
2233     if (ret) {
2234         error_setg(errp, "postcopy_start: Migration stream errored (pre package)");
2235         goto fail_closefb;
2236     }
2237 
2238     restart_block = false;
2239 
2240     /* Now send that blob */
2241     if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
2242         goto fail_closefb;
2243     }
2244     qemu_fclose(fb);
2245 
2246     /* Send a notify to give a chance for anything that needs to happen
2247      * at the transition to postcopy and after the device state; in particular
2248      * spice needs to trigger a transition now
2249      */
2250     ms->postcopy_after_devices = true;
2251     migration_call_notifiers(ms);
2252 
2253     ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
2254 
2255     qemu_mutex_unlock_iothread();
2256 
2257     if (migrate_postcopy_ram()) {
2258         /*
2259          * Although this ping is just for debug, it could potentially be
2260          * used for getting a better measurement of downtime at the source.
2261          */
2262         qemu_savevm_send_ping(ms->to_dst_file, 4);
2263     }
2264 
2265     if (migrate_release_ram()) {
2266         ram_postcopy_migrated_memory_release(ms);
2267     }
2268 
2269     ret = qemu_file_get_error(ms->to_dst_file);
2270     if (ret) {
2271         error_setg(errp, "postcopy_start: Migration stream errored");
2272         migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2273                               MIGRATION_STATUS_FAILED);
2274     }
2275 
2276     trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
2277 
2278     return ret;
2279 
2280 fail_closefb:
2281     qemu_fclose(fb);
2282 fail:
2283     migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2284                           MIGRATION_STATUS_FAILED);
2285     if (restart_block) {
2286         /* A failure happened early enough that we know the destination hasn't
2287          * accessed block devices, so we're safe to recover.
2288          */
2289         Error *local_err = NULL;
2290 
2291         bdrv_activate_all(&local_err);
2292         if (local_err) {
2293             error_report_err(local_err);
2294         }
2295     }
2296     qemu_mutex_unlock_iothread();
2297     return -1;
2298 }
2299 
2300 /**
2301  * migration_maybe_pause: Pause if required to by
2302  * migrate_pause_before_switchover called with the iothread locked
2303  * Returns: 0 on success
2304  */
2305 static int migration_maybe_pause(MigrationState *s,
2306                                  int *current_active_state,
2307                                  int new_state)
2308 {
2309     if (!migrate_pause_before_switchover()) {
2310         return 0;
2311     }
2312 
2313     /* Since leaving this state is not atomic with posting the semaphore
2314      * it's possible that someone could have issued multiple migrate_continue
2315      * and the semaphore is incorrectly positive at this point;
2316      * the docs say it's undefined to reinit a semaphore that's already
2317      * init'd, so use timedwait to eat up any existing posts.
2318      */
2319     while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
2320         /* This block intentionally left blank */
2321     }
2322 
2323     /*
2324      * If the migration is cancelled when it is in the completion phase,
2325      * the migration state is set to MIGRATION_STATUS_CANCELLING.
2326      * So we don't need to wait a semaphore, otherwise we would always
2327      * wait for the 'pause_sem' semaphore.
2328      */
2329     if (s->state != MIGRATION_STATUS_CANCELLING) {
2330         qemu_mutex_unlock_iothread();
2331         migrate_set_state(&s->state, *current_active_state,
2332                           MIGRATION_STATUS_PRE_SWITCHOVER);
2333         qemu_sem_wait(&s->pause_sem);
2334         migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
2335                           new_state);
2336         *current_active_state = new_state;
2337         qemu_mutex_lock_iothread();
2338     }
2339 
2340     return s->state == new_state ? 0 : -EINVAL;
2341 }
2342 
2343 static int migration_completion_precopy(MigrationState *s,
2344                                         int *current_active_state)
2345 {
2346     int ret;
2347 
2348     qemu_mutex_lock_iothread();
2349     s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2350     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2351 
2352     s->vm_old_state = runstate_get();
2353     global_state_store();
2354 
2355     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2356     trace_migration_completion_vm_stop(ret);
2357     if (ret < 0) {
2358         goto out_unlock;
2359     }
2360 
2361     ret = migration_maybe_pause(s, current_active_state,
2362                                 MIGRATION_STATUS_DEVICE);
2363     if (ret < 0) {
2364         goto out_unlock;
2365     }
2366 
2367     /*
2368      * Inactivate disks except in COLO, and track that we have done so in order
2369      * to remember to reactivate them if migration fails or is cancelled.
2370      */
2371     s->block_inactive = !migrate_colo();
2372     migration_rate_set(RATE_LIMIT_DISABLED);
2373     ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
2374                                              s->block_inactive);
2375 out_unlock:
2376     qemu_mutex_unlock_iothread();
2377     return ret;
2378 }
2379 
2380 static void migration_completion_postcopy(MigrationState *s)
2381 {
2382     trace_migration_completion_postcopy_end();
2383 
2384     qemu_mutex_lock_iothread();
2385     qemu_savevm_state_complete_postcopy(s->to_dst_file);
2386     qemu_mutex_unlock_iothread();
2387 
2388     /*
2389      * Shutdown the postcopy fast path thread.  This is only needed when dest
2390      * QEMU binary is old (7.1/7.2).  QEMU 8.0+ doesn't need this.
2391      */
2392     if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
2393         postcopy_preempt_shutdown_file(s);
2394     }
2395 
2396     trace_migration_completion_postcopy_end_after_complete();
2397 }
2398 
2399 static void migration_completion_failed(MigrationState *s,
2400                                         int current_active_state)
2401 {
2402     if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
2403                               s->state == MIGRATION_STATUS_DEVICE)) {
2404         /*
2405          * If not doing postcopy, vm_start() will be called: let's
2406          * regain control on images.
2407          */
2408         Error *local_err = NULL;
2409 
2410         qemu_mutex_lock_iothread();
2411         bdrv_activate_all(&local_err);
2412         if (local_err) {
2413             error_report_err(local_err);
2414         } else {
2415             s->block_inactive = false;
2416         }
2417         qemu_mutex_unlock_iothread();
2418     }
2419 
2420     migrate_set_state(&s->state, current_active_state,
2421                       MIGRATION_STATUS_FAILED);
2422 }
2423 
2424 /**
2425  * migration_completion: Used by migration_thread when there's not much left.
2426  *   The caller 'breaks' the loop when this returns.
2427  *
2428  * @s: Current migration state
2429  */
2430 static void migration_completion(MigrationState *s)
2431 {
2432     int ret = 0;
2433     int current_active_state = s->state;
2434 
2435     if (s->state == MIGRATION_STATUS_ACTIVE) {
2436         ret = migration_completion_precopy(s, &current_active_state);
2437     } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2438         migration_completion_postcopy(s);
2439     } else {
2440         ret = -1;
2441     }
2442 
2443     if (ret < 0) {
2444         goto fail;
2445     }
2446 
2447     if (close_return_path_on_source(s)) {
2448         goto fail;
2449     }
2450 
2451     if (qemu_file_get_error(s->to_dst_file)) {
2452         trace_migration_completion_file_err();
2453         goto fail;
2454     }
2455 
2456     if (migrate_colo() && s->state == MIGRATION_STATUS_ACTIVE) {
2457         /* COLO does not support postcopy */
2458         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
2459                           MIGRATION_STATUS_COLO);
2460     } else {
2461         migrate_set_state(&s->state, current_active_state,
2462                           MIGRATION_STATUS_COMPLETED);
2463     }
2464 
2465     return;
2466 
2467 fail:
2468     migration_completion_failed(s, current_active_state);
2469 }
2470 
2471 /**
2472  * bg_migration_completion: Used by bg_migration_thread when after all the
2473  *   RAM has been saved. The caller 'breaks' the loop when this returns.
2474  *
2475  * @s: Current migration state
2476  */
2477 static void bg_migration_completion(MigrationState *s)
2478 {
2479     int current_active_state = s->state;
2480 
2481     if (s->state == MIGRATION_STATUS_ACTIVE) {
2482         /*
2483          * By this moment we have RAM content saved into the migration stream.
2484          * The next step is to flush the non-RAM content (device state)
2485          * right after the ram content. The device state has been stored into
2486          * the temporary buffer before RAM saving started.
2487          */
2488         qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
2489         qemu_fflush(s->to_dst_file);
2490     } else if (s->state == MIGRATION_STATUS_CANCELLING) {
2491         goto fail;
2492     }
2493 
2494     if (qemu_file_get_error(s->to_dst_file)) {
2495         trace_migration_completion_file_err();
2496         goto fail;
2497     }
2498 
2499     migrate_set_state(&s->state, current_active_state,
2500                       MIGRATION_STATUS_COMPLETED);
2501     return;
2502 
2503 fail:
2504     migrate_set_state(&s->state, current_active_state,
2505                       MIGRATION_STATUS_FAILED);
2506 }
2507 
2508 typedef enum MigThrError {
2509     /* No error detected */
2510     MIG_THR_ERR_NONE = 0,
2511     /* Detected error, but resumed successfully */
2512     MIG_THR_ERR_RECOVERED = 1,
2513     /* Detected fatal error, need to exit */
2514     MIG_THR_ERR_FATAL = 2,
2515 } MigThrError;
2516 
2517 static int postcopy_resume_handshake(MigrationState *s)
2518 {
2519     qemu_savevm_send_postcopy_resume(s->to_dst_file);
2520 
2521     while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
2522         migration_rp_wait(s);
2523     }
2524 
2525     if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2526         return 0;
2527     }
2528 
2529     return -1;
2530 }
2531 
2532 /* Return zero if success, or <0 for error */
2533 static int postcopy_do_resume(MigrationState *s)
2534 {
2535     int ret;
2536 
2537     /*
2538      * Call all the resume_prepare() hooks, so that modules can be
2539      * ready for the migration resume.
2540      */
2541     ret = qemu_savevm_state_resume_prepare(s);
2542     if (ret) {
2543         error_report("%s: resume_prepare() failure detected: %d",
2544                      __func__, ret);
2545         return ret;
2546     }
2547 
2548     /*
2549      * If preempt is enabled, re-establish the preempt channel.  Note that
2550      * we do it after resume prepare to make sure the main channel will be
2551      * created before the preempt channel.  E.g. with weak network, the
2552      * dest QEMU may get messed up with the preempt and main channels on
2553      * the order of connection setup.  This guarantees the correct order.
2554      */
2555     ret = postcopy_preempt_establish_channel(s);
2556     if (ret) {
2557         error_report("%s: postcopy_preempt_establish_channel(): %d",
2558                      __func__, ret);
2559         return ret;
2560     }
2561 
2562     /*
2563      * Last handshake with destination on the resume (destination will
2564      * switch to postcopy-active afterwards)
2565      */
2566     ret = postcopy_resume_handshake(s);
2567     if (ret) {
2568         error_report("%s: handshake failed: %d", __func__, ret);
2569         return ret;
2570     }
2571 
2572     return 0;
2573 }
2574 
2575 /*
2576  * We don't return until we are in a safe state to continue current
2577  * postcopy migration.  Returns MIG_THR_ERR_RECOVERED if recovered, or
2578  * MIG_THR_ERR_FATAL if unrecovery failure happened.
2579  */
2580 static MigThrError postcopy_pause(MigrationState *s)
2581 {
2582     assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
2583 
2584     while (true) {
2585         QEMUFile *file;
2586 
2587         /*
2588          * Current channel is possibly broken. Release it.  Note that this is
2589          * guaranteed even without lock because to_dst_file should only be
2590          * modified by the migration thread.  That also guarantees that the
2591          * unregister of yank is safe too without the lock.  It should be safe
2592          * even to be within the qemu_file_lock, but we didn't do that to avoid
2593          * taking more mutex (yank_lock) within qemu_file_lock.  TL;DR: we make
2594          * the qemu_file_lock critical section as small as possible.
2595          */
2596         assert(s->to_dst_file);
2597         migration_ioc_unregister_yank_from_file(s->to_dst_file);
2598         qemu_mutex_lock(&s->qemu_file_lock);
2599         file = s->to_dst_file;
2600         s->to_dst_file = NULL;
2601         qemu_mutex_unlock(&s->qemu_file_lock);
2602 
2603         qemu_file_shutdown(file);
2604         qemu_fclose(file);
2605 
2606         /*
2607          * We're already pausing, so ignore any errors on the return
2608          * path and just wait for the thread to finish. It will be
2609          * re-created when we resume.
2610          */
2611         close_return_path_on_source(s);
2612 
2613         migrate_set_state(&s->state, s->state,
2614                           MIGRATION_STATUS_POSTCOPY_PAUSED);
2615 
2616         error_report("Detected IO failure for postcopy. "
2617                      "Migration paused.");
2618 
2619         /*
2620          * We wait until things fixed up. Then someone will setup the
2621          * status back for us.
2622          */
2623         while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
2624             qemu_sem_wait(&s->postcopy_pause_sem);
2625         }
2626 
2627         if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
2628             /* Woken up by a recover procedure. Give it a shot */
2629 
2630             /* Do the resume logic */
2631             if (postcopy_do_resume(s) == 0) {
2632                 /* Let's continue! */
2633                 trace_postcopy_pause_continued();
2634                 return MIG_THR_ERR_RECOVERED;
2635             } else {
2636                 /*
2637                  * Something wrong happened during the recovery, let's
2638                  * pause again. Pause is always better than throwing
2639                  * data away.
2640                  */
2641                 continue;
2642             }
2643         } else {
2644             /* This is not right... Time to quit. */
2645             return MIG_THR_ERR_FATAL;
2646         }
2647     }
2648 }
2649 
2650 static MigThrError migration_detect_error(MigrationState *s)
2651 {
2652     int ret;
2653     int state = s->state;
2654     Error *local_error = NULL;
2655 
2656     if (state == MIGRATION_STATUS_CANCELLING ||
2657         state == MIGRATION_STATUS_CANCELLED) {
2658         /* End the migration, but don't set the state to failed */
2659         return MIG_THR_ERR_FATAL;
2660     }
2661 
2662     /*
2663      * Try to detect any file errors.  Note that postcopy_qemufile_src will
2664      * be NULL when postcopy preempt is not enabled.
2665      */
2666     ret = qemu_file_get_error_obj_any(s->to_dst_file,
2667                                       s->postcopy_qemufile_src,
2668                                       &local_error);
2669     if (!ret) {
2670         /* Everything is fine */
2671         assert(!local_error);
2672         return MIG_THR_ERR_NONE;
2673     }
2674 
2675     if (local_error) {
2676         migrate_set_error(s, local_error);
2677         error_free(local_error);
2678     }
2679 
2680     if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) {
2681         /*
2682          * For postcopy, we allow the network to be down for a
2683          * while. After that, it can be continued by a
2684          * recovery phase.
2685          */
2686         return postcopy_pause(s);
2687     } else {
2688         /*
2689          * For precopy (or postcopy with error outside IO), we fail
2690          * with no time.
2691          */
2692         migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
2693         trace_migration_thread_file_err();
2694 
2695         /* Time to stop the migration, now. */
2696         return MIG_THR_ERR_FATAL;
2697     }
2698 }
2699 
2700 static void migration_calculate_complete(MigrationState *s)
2701 {
2702     uint64_t bytes = migration_transferred_bytes();
2703     int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2704     int64_t transfer_time;
2705 
2706     s->total_time = end_time - s->start_time;
2707     if (!s->downtime) {
2708         /*
2709          * It's still not set, so we are precopy migration.  For
2710          * postcopy, downtime is calculated during postcopy_start().
2711          */
2712         s->downtime = end_time - s->downtime_start;
2713     }
2714 
2715     transfer_time = s->total_time - s->setup_time;
2716     if (transfer_time) {
2717         s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
2718     }
2719 }
2720 
2721 static void update_iteration_initial_status(MigrationState *s)
2722 {
2723     /*
2724      * Update these three fields at the same time to avoid mismatch info lead
2725      * wrong speed calculation.
2726      */
2727     s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2728     s->iteration_initial_bytes = migration_transferred_bytes();
2729     s->iteration_initial_pages = ram_get_total_transferred_pages();
2730 }
2731 
2732 static void migration_update_counters(MigrationState *s,
2733                                       int64_t current_time)
2734 {
2735     uint64_t transferred, transferred_pages, time_spent;
2736     uint64_t current_bytes; /* bytes transferred since the beginning */
2737     uint64_t switchover_bw;
2738     /* Expected bandwidth when switching over to destination QEMU */
2739     double expected_bw_per_ms;
2740     double bandwidth;
2741 
2742     if (current_time < s->iteration_start_time + BUFFER_DELAY) {
2743         return;
2744     }
2745 
2746     switchover_bw = migrate_avail_switchover_bandwidth();
2747     current_bytes = migration_transferred_bytes();
2748     transferred = current_bytes - s->iteration_initial_bytes;
2749     time_spent = current_time - s->iteration_start_time;
2750     bandwidth = (double)transferred / time_spent;
2751 
2752     if (switchover_bw) {
2753         /*
2754          * If the user specified a switchover bandwidth, let's trust the
2755          * user so that can be more accurate than what we estimated.
2756          */
2757         expected_bw_per_ms = switchover_bw / 1000;
2758     } else {
2759         /* If the user doesn't specify bandwidth, we use the estimated */
2760         expected_bw_per_ms = bandwidth;
2761     }
2762 
2763     s->threshold_size = expected_bw_per_ms * migrate_downtime_limit();
2764 
2765     s->mbps = (((double) transferred * 8.0) /
2766                ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
2767 
2768     transferred_pages = ram_get_total_transferred_pages() -
2769                             s->iteration_initial_pages;
2770     s->pages_per_second = (double) transferred_pages /
2771                              (((double) time_spent / 1000.0));
2772 
2773     /*
2774      * if we haven't sent anything, we don't want to
2775      * recalculate. 10000 is a small enough number for our purposes
2776      */
2777     if (stat64_get(&mig_stats.dirty_pages_rate) &&
2778         transferred > 10000) {
2779         s->expected_downtime =
2780             stat64_get(&mig_stats.dirty_bytes_last_sync) / expected_bw_per_ms;
2781     }
2782 
2783     migration_rate_reset();
2784 
2785     update_iteration_initial_status(s);
2786 
2787     trace_migrate_transferred(transferred, time_spent,
2788                               /* Both in unit bytes/ms */
2789                               bandwidth, switchover_bw / 1000,
2790                               s->threshold_size);
2791 }
2792 
2793 static bool migration_can_switchover(MigrationState *s)
2794 {
2795     if (!migrate_switchover_ack()) {
2796         return true;
2797     }
2798 
2799     /* No reason to wait for switchover ACK if VM is stopped */
2800     if (!runstate_is_running()) {
2801         return true;
2802     }
2803 
2804     return s->switchover_acked;
2805 }
2806 
2807 /* Migration thread iteration status */
2808 typedef enum {
2809     MIG_ITERATE_RESUME,         /* Resume current iteration */
2810     MIG_ITERATE_SKIP,           /* Skip current iteration */
2811     MIG_ITERATE_BREAK,          /* Break the loop */
2812 } MigIterateState;
2813 
2814 /*
2815  * Return true if continue to the next iteration directly, false
2816  * otherwise.
2817  */
2818 static MigIterateState migration_iteration_run(MigrationState *s)
2819 {
2820     uint64_t must_precopy, can_postcopy;
2821     Error *local_err = NULL;
2822     bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
2823     bool can_switchover = migration_can_switchover(s);
2824 
2825     qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
2826     uint64_t pending_size = must_precopy + can_postcopy;
2827 
2828     trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
2829 
2830     if (must_precopy <= s->threshold_size) {
2831         qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
2832         pending_size = must_precopy + can_postcopy;
2833         trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
2834     }
2835 
2836     if ((!pending_size || pending_size < s->threshold_size) && can_switchover) {
2837         trace_migration_thread_low_pending(pending_size);
2838         migration_completion(s);
2839         return MIG_ITERATE_BREAK;
2840     }
2841 
2842     /* Still a significant amount to transfer */
2843     if (!in_postcopy && must_precopy <= s->threshold_size && can_switchover &&
2844         qatomic_read(&s->start_postcopy)) {
2845         if (postcopy_start(s, &local_err)) {
2846             migrate_set_error(s, local_err);
2847             error_report_err(local_err);
2848         }
2849         return MIG_ITERATE_SKIP;
2850     }
2851 
2852     /* Just another iteration step */
2853     qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
2854     return MIG_ITERATE_RESUME;
2855 }
2856 
2857 static void migration_iteration_finish(MigrationState *s)
2858 {
2859     /* If we enabled cpu throttling for auto-converge, turn it off. */
2860     cpu_throttle_stop();
2861 
2862     qemu_mutex_lock_iothread();
2863     switch (s->state) {
2864     case MIGRATION_STATUS_COMPLETED:
2865         migration_calculate_complete(s);
2866         runstate_set(RUN_STATE_POSTMIGRATE);
2867         break;
2868     case MIGRATION_STATUS_COLO:
2869         assert(migrate_colo());
2870         migrate_start_colo_process(s);
2871         s->vm_old_state = RUN_STATE_RUNNING;
2872         /* Fallthrough */
2873     case MIGRATION_STATUS_FAILED:
2874     case MIGRATION_STATUS_CANCELLED:
2875     case MIGRATION_STATUS_CANCELLING:
2876         if (s->vm_old_state == RUN_STATE_RUNNING) {
2877             if (!runstate_check(RUN_STATE_SHUTDOWN)) {
2878                 vm_start();
2879             }
2880         } else {
2881             if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2882                 runstate_set(s->vm_old_state);
2883             }
2884         }
2885         break;
2886 
2887     default:
2888         /* Should not reach here, but if so, forgive the VM. */
2889         error_report("%s: Unknown ending state %d", __func__, s->state);
2890         break;
2891     }
2892     migrate_fd_cleanup_schedule(s);
2893     qemu_mutex_unlock_iothread();
2894 }
2895 
2896 static void bg_migration_iteration_finish(MigrationState *s)
2897 {
2898     /*
2899      * Stop tracking RAM writes - un-protect memory, un-register UFFD
2900      * memory ranges, flush kernel wait queues and wake up threads
2901      * waiting for write fault to be resolved.
2902      */
2903     ram_write_tracking_stop();
2904 
2905     qemu_mutex_lock_iothread();
2906     switch (s->state) {
2907     case MIGRATION_STATUS_COMPLETED:
2908         migration_calculate_complete(s);
2909         break;
2910 
2911     case MIGRATION_STATUS_ACTIVE:
2912     case MIGRATION_STATUS_FAILED:
2913     case MIGRATION_STATUS_CANCELLED:
2914     case MIGRATION_STATUS_CANCELLING:
2915         break;
2916 
2917     default:
2918         /* Should not reach here, but if so, forgive the VM. */
2919         error_report("%s: Unknown ending state %d", __func__, s->state);
2920         break;
2921     }
2922 
2923     migrate_fd_cleanup_schedule(s);
2924     qemu_mutex_unlock_iothread();
2925 }
2926 
2927 /*
2928  * Return true if continue to the next iteration directly, false
2929  * otherwise.
2930  */
2931 static MigIterateState bg_migration_iteration_run(MigrationState *s)
2932 {
2933     int res;
2934 
2935     res = qemu_savevm_state_iterate(s->to_dst_file, false);
2936     if (res > 0) {
2937         bg_migration_completion(s);
2938         return MIG_ITERATE_BREAK;
2939     }
2940 
2941     return MIG_ITERATE_RESUME;
2942 }
2943 
2944 void migration_make_urgent_request(void)
2945 {
2946     qemu_sem_post(&migrate_get_current()->rate_limit_sem);
2947 }
2948 
2949 void migration_consume_urgent_request(void)
2950 {
2951     qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
2952 }
2953 
2954 /* Returns true if the rate limiting was broken by an urgent request */
2955 bool migration_rate_limit(void)
2956 {
2957     int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2958     MigrationState *s = migrate_get_current();
2959 
2960     bool urgent = false;
2961     migration_update_counters(s, now);
2962     if (migration_rate_exceeded(s->to_dst_file)) {
2963 
2964         if (qemu_file_get_error(s->to_dst_file)) {
2965             return false;
2966         }
2967         /*
2968          * Wait for a delay to do rate limiting OR
2969          * something urgent to post the semaphore.
2970          */
2971         int ms = s->iteration_start_time + BUFFER_DELAY - now;
2972         trace_migration_rate_limit_pre(ms);
2973         if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
2974             /*
2975              * We were woken by one or more urgent things but
2976              * the timedwait will have consumed one of them.
2977              * The service routine for the urgent wake will dec
2978              * the semaphore itself for each item it consumes,
2979              * so add this one we just eat back.
2980              */
2981             qemu_sem_post(&s->rate_limit_sem);
2982             urgent = true;
2983         }
2984         trace_migration_rate_limit_post(urgent);
2985     }
2986     return urgent;
2987 }
2988 
2989 /*
2990  * if failover devices are present, wait they are completely
2991  * unplugged
2992  */
2993 
2994 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
2995                                     int new_state)
2996 {
2997     if (qemu_savevm_state_guest_unplug_pending()) {
2998         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
2999 
3000         while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
3001                qemu_savevm_state_guest_unplug_pending()) {
3002             qemu_sem_timedwait(&s->wait_unplug_sem, 250);
3003         }
3004         if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
3005             int timeout = 120; /* 30 seconds */
3006             /*
3007              * migration has been canceled
3008              * but as we have started an unplug we must wait the end
3009              * to be able to plug back the card
3010              */
3011             while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
3012                 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
3013             }
3014             if (qemu_savevm_state_guest_unplug_pending() &&
3015                 !qtest_enabled()) {
3016                 warn_report("migration: partially unplugged device on "
3017                             "failure");
3018             }
3019         }
3020 
3021         migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
3022     } else {
3023         migrate_set_state(&s->state, old_state, new_state);
3024     }
3025 }
3026 
3027 /*
3028  * Master migration thread on the source VM.
3029  * It drives the migration and pumps the data down the outgoing channel.
3030  */
3031 static void *migration_thread(void *opaque)
3032 {
3033     MigrationState *s = opaque;
3034     MigrationThread *thread = NULL;
3035     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3036     MigThrError thr_error;
3037     bool urgent = false;
3038 
3039     thread = migration_threads_add("live_migration", qemu_get_thread_id());
3040 
3041     rcu_register_thread();
3042 
3043     object_ref(OBJECT(s));
3044     update_iteration_initial_status(s);
3045 
3046     qemu_mutex_lock_iothread();
3047     qemu_savevm_state_header(s->to_dst_file);
3048     qemu_mutex_unlock_iothread();
3049 
3050     /*
3051      * If we opened the return path, we need to make sure dst has it
3052      * opened as well.
3053      */
3054     if (s->rp_state.rp_thread_created) {
3055         /* Now tell the dest that it should open its end so it can reply */
3056         qemu_savevm_send_open_return_path(s->to_dst_file);
3057 
3058         /* And do a ping that will make stuff easier to debug */
3059         qemu_savevm_send_ping(s->to_dst_file, 1);
3060     }
3061 
3062     if (migrate_postcopy()) {
3063         /*
3064          * Tell the destination that we *might* want to do postcopy later;
3065          * if the other end can't do postcopy it should fail now, nice and
3066          * early.
3067          */
3068         qemu_savevm_send_postcopy_advise(s->to_dst_file);
3069     }
3070 
3071     if (migrate_colo()) {
3072         /* Notify migration destination that we enable COLO */
3073         qemu_savevm_send_colo_enable(s->to_dst_file);
3074     }
3075 
3076     qemu_mutex_lock_iothread();
3077     qemu_savevm_state_setup(s->to_dst_file);
3078     qemu_mutex_unlock_iothread();
3079 
3080     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3081                                MIGRATION_STATUS_ACTIVE);
3082 
3083     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3084 
3085     trace_migration_thread_setup_complete();
3086 
3087     while (migration_is_active(s)) {
3088         if (urgent || !migration_rate_exceeded(s->to_dst_file)) {
3089             MigIterateState iter_state = migration_iteration_run(s);
3090             if (iter_state == MIG_ITERATE_SKIP) {
3091                 continue;
3092             } else if (iter_state == MIG_ITERATE_BREAK) {
3093                 break;
3094             }
3095         }
3096 
3097         /*
3098          * Try to detect any kind of failures, and see whether we
3099          * should stop the migration now.
3100          */
3101         thr_error = migration_detect_error(s);
3102         if (thr_error == MIG_THR_ERR_FATAL) {
3103             /* Stop migration */
3104             break;
3105         } else if (thr_error == MIG_THR_ERR_RECOVERED) {
3106             /*
3107              * Just recovered from a e.g. network failure, reset all
3108              * the local variables. This is important to avoid
3109              * breaking transferred_bytes and bandwidth calculation
3110              */
3111             update_iteration_initial_status(s);
3112         }
3113 
3114         urgent = migration_rate_limit();
3115     }
3116 
3117     trace_migration_thread_after_loop();
3118     migration_iteration_finish(s);
3119     object_unref(OBJECT(s));
3120     rcu_unregister_thread();
3121     migration_threads_remove(thread);
3122     return NULL;
3123 }
3124 
3125 static void bg_migration_vm_start_bh(void *opaque)
3126 {
3127     MigrationState *s = opaque;
3128 
3129     qemu_bh_delete(s->vm_start_bh);
3130     s->vm_start_bh = NULL;
3131 
3132     vm_start();
3133     s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
3134 }
3135 
3136 /**
3137  * Background snapshot thread, based on live migration code.
3138  * This is an alternative implementation of live migration mechanism
3139  * introduced specifically to support background snapshots.
3140  *
3141  * It takes advantage of userfault_fd write protection mechanism introduced
3142  * in v5.7 kernel. Compared to existing dirty page logging migration much
3143  * lesser stream traffic is produced resulting in smaller snapshot images,
3144  * simply cause of no page duplicates can get into the stream.
3145  *
3146  * Another key point is that generated vmstate stream reflects machine state
3147  * 'frozen' at the beginning of snapshot creation compared to dirty page logging
3148  * mechanism, which effectively results in that saved snapshot is the state of VM
3149  * at the end of the process.
3150  */
3151 static void *bg_migration_thread(void *opaque)
3152 {
3153     MigrationState *s = opaque;
3154     int64_t setup_start;
3155     MigThrError thr_error;
3156     QEMUFile *fb;
3157     bool early_fail = true;
3158 
3159     rcu_register_thread();
3160     object_ref(OBJECT(s));
3161 
3162     migration_rate_set(RATE_LIMIT_DISABLED);
3163 
3164     setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3165     /*
3166      * We want to save vmstate for the moment when migration has been
3167      * initiated but also we want to save RAM content while VM is running.
3168      * The RAM content should appear first in the vmstate. So, we first
3169      * stash the non-RAM part of the vmstate to the temporary buffer,
3170      * then write RAM part of the vmstate to the migration stream
3171      * with vCPUs running and, finally, write stashed non-RAM part of
3172      * the vmstate from the buffer to the migration stream.
3173      */
3174     s->bioc = qio_channel_buffer_new(512 * 1024);
3175     qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
3176     fb = qemu_file_new_output(QIO_CHANNEL(s->bioc));
3177     object_unref(OBJECT(s->bioc));
3178 
3179     update_iteration_initial_status(s);
3180 
3181     /*
3182      * Prepare for tracking memory writes with UFFD-WP - populate
3183      * RAM pages before protecting.
3184      */
3185 #ifdef __linux__
3186     ram_write_tracking_prepare();
3187 #endif
3188 
3189     qemu_mutex_lock_iothread();
3190     qemu_savevm_state_header(s->to_dst_file);
3191     qemu_savevm_state_setup(s->to_dst_file);
3192     qemu_mutex_unlock_iothread();
3193 
3194     qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3195                                MIGRATION_STATUS_ACTIVE);
3196 
3197     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3198 
3199     trace_migration_thread_setup_complete();
3200     s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3201 
3202     qemu_mutex_lock_iothread();
3203 
3204     /*
3205      * If VM is currently in suspended state, then, to make a valid runstate
3206      * transition in vm_stop_force_state() we need to wakeup it up.
3207      */
3208     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3209     s->vm_old_state = runstate_get();
3210 
3211     global_state_store();
3212     /* Forcibly stop VM before saving state of vCPUs and devices */
3213     if (vm_stop_force_state(RUN_STATE_PAUSED)) {
3214         goto fail;
3215     }
3216     /*
3217      * Put vCPUs in sync with shadow context structures, then
3218      * save their state to channel-buffer along with devices.
3219      */
3220     cpu_synchronize_all_states();
3221     if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
3222         goto fail;
3223     }
3224     /*
3225      * Since we are going to get non-iterable state data directly
3226      * from s->bioc->data, explicit flush is needed here.
3227      */
3228     qemu_fflush(fb);
3229 
3230     /* Now initialize UFFD context and start tracking RAM writes */
3231     if (ram_write_tracking_start()) {
3232         goto fail;
3233     }
3234     early_fail = false;
3235 
3236     /*
3237      * Start VM from BH handler to avoid write-fault lock here.
3238      * UFFD-WP protection for the whole RAM is already enabled so
3239      * calling VM state change notifiers from vm_start() would initiate
3240      * writes to virtio VQs memory which is in write-protected region.
3241      */
3242     s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
3243     qemu_bh_schedule(s->vm_start_bh);
3244 
3245     qemu_mutex_unlock_iothread();
3246 
3247     while (migration_is_active(s)) {
3248         MigIterateState iter_state = bg_migration_iteration_run(s);
3249         if (iter_state == MIG_ITERATE_SKIP) {
3250             continue;
3251         } else if (iter_state == MIG_ITERATE_BREAK) {
3252             break;
3253         }
3254 
3255         /*
3256          * Try to detect any kind of failures, and see whether we
3257          * should stop the migration now.
3258          */
3259         thr_error = migration_detect_error(s);
3260         if (thr_error == MIG_THR_ERR_FATAL) {
3261             /* Stop migration */
3262             break;
3263         }
3264 
3265         migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
3266     }
3267 
3268     trace_migration_thread_after_loop();
3269 
3270 fail:
3271     if (early_fail) {
3272         migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3273                 MIGRATION_STATUS_FAILED);
3274         qemu_mutex_unlock_iothread();
3275     }
3276 
3277     bg_migration_iteration_finish(s);
3278 
3279     qemu_fclose(fb);
3280     object_unref(OBJECT(s));
3281     rcu_unregister_thread();
3282 
3283     return NULL;
3284 }
3285 
3286 void migrate_fd_connect(MigrationState *s, Error *error_in)
3287 {
3288     Error *local_err = NULL;
3289     uint64_t rate_limit;
3290     bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
3291 
3292     /*
3293      * If there's a previous error, free it and prepare for another one.
3294      * Meanwhile if migration completes successfully, there won't have an error
3295      * dumped when calling migrate_fd_cleanup().
3296      */
3297     migrate_error_free(s);
3298 
3299     s->expected_downtime = migrate_downtime_limit();
3300     if (resume) {
3301         assert(s->cleanup_bh);
3302     } else {
3303         assert(!s->cleanup_bh);
3304         s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
3305     }
3306     if (error_in) {
3307         migrate_fd_error(s, error_in);
3308         if (resume) {
3309             /*
3310              * Don't do cleanup for resume if channel is invalid, but only dump
3311              * the error.  We wait for another channel connect from the user.
3312              * The error_report still gives HMP user a hint on what failed.
3313              * It's normally done in migrate_fd_cleanup(), but call it here
3314              * explicitly.
3315              */
3316             error_report_err(error_copy(s->error));
3317         } else {
3318             migrate_fd_cleanup(s);
3319         }
3320         return;
3321     }
3322 
3323     if (resume) {
3324         /* This is a resumed migration */
3325         rate_limit = migrate_max_postcopy_bandwidth();
3326     } else {
3327         /* This is a fresh new migration */
3328         rate_limit = migrate_max_bandwidth();
3329 
3330         /* Notify before starting migration thread */
3331         migration_call_notifiers(s);
3332     }
3333 
3334     migration_rate_set(rate_limit);
3335     qemu_file_set_blocking(s->to_dst_file, true);
3336 
3337     /*
3338      * Open the return path. For postcopy, it is used exclusively. For
3339      * precopy, only if user specified "return-path" capability would
3340      * QEMU uses the return path.
3341      */
3342     if (migrate_postcopy_ram() || migrate_return_path()) {
3343         if (open_return_path_on_source(s)) {
3344             error_setg(&local_err, "Unable to open return-path for postcopy");
3345             migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
3346             migrate_set_error(s, local_err);
3347             error_report_err(local_err);
3348             migrate_fd_cleanup(s);
3349             return;
3350         }
3351     }
3352 
3353     /*
3354      * This needs to be done before resuming a postcopy.  Note: for newer
3355      * QEMUs we will delay the channel creation until postcopy_start(), to
3356      * avoid disorder of channel creations.
3357      */
3358     if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
3359         postcopy_preempt_setup(s);
3360     }
3361 
3362     if (resume) {
3363         /* Wakeup the main migration thread to do the recovery */
3364         migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
3365                           MIGRATION_STATUS_POSTCOPY_RECOVER);
3366         qemu_sem_post(&s->postcopy_pause_sem);
3367         return;
3368     }
3369 
3370     if (multifd_save_setup(&local_err) != 0) {
3371         migrate_set_error(s, local_err);
3372         error_report_err(local_err);
3373         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
3374                           MIGRATION_STATUS_FAILED);
3375         migrate_fd_cleanup(s);
3376         return;
3377     }
3378 
3379     if (migrate_background_snapshot()) {
3380         qemu_thread_create(&s->thread, "bg_snapshot",
3381                 bg_migration_thread, s, QEMU_THREAD_JOINABLE);
3382     } else {
3383         qemu_thread_create(&s->thread, "live_migration",
3384                 migration_thread, s, QEMU_THREAD_JOINABLE);
3385     }
3386     s->migration_thread_running = true;
3387 }
3388 
3389 static void migration_class_init(ObjectClass *klass, void *data)
3390 {
3391     DeviceClass *dc = DEVICE_CLASS(klass);
3392 
3393     dc->user_creatable = false;
3394     device_class_set_props(dc, migration_properties);
3395 }
3396 
3397 static void migration_instance_finalize(Object *obj)
3398 {
3399     MigrationState *ms = MIGRATION_OBJ(obj);
3400 
3401     qemu_mutex_destroy(&ms->error_mutex);
3402     qemu_mutex_destroy(&ms->qemu_file_lock);
3403     qemu_sem_destroy(&ms->wait_unplug_sem);
3404     qemu_sem_destroy(&ms->rate_limit_sem);
3405     qemu_sem_destroy(&ms->pause_sem);
3406     qemu_sem_destroy(&ms->postcopy_pause_sem);
3407     qemu_sem_destroy(&ms->rp_state.rp_sem);
3408     qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
3409     qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
3410     error_free(ms->error);
3411 }
3412 
3413 static void migration_instance_init(Object *obj)
3414 {
3415     MigrationState *ms = MIGRATION_OBJ(obj);
3416 
3417     ms->state = MIGRATION_STATUS_NONE;
3418     ms->mbps = -1;
3419     ms->pages_per_second = -1;
3420     qemu_sem_init(&ms->pause_sem, 0);
3421     qemu_mutex_init(&ms->error_mutex);
3422 
3423     migrate_params_init(&ms->parameters);
3424 
3425     qemu_sem_init(&ms->postcopy_pause_sem, 0);
3426     qemu_sem_init(&ms->rp_state.rp_sem, 0);
3427     qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
3428     qemu_sem_init(&ms->rate_limit_sem, 0);
3429     qemu_sem_init(&ms->wait_unplug_sem, 0);
3430     qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0);
3431     qemu_mutex_init(&ms->qemu_file_lock);
3432 }
3433 
3434 /*
3435  * Return true if check pass, false otherwise. Error will be put
3436  * inside errp if provided.
3437  */
3438 static bool migration_object_check(MigrationState *ms, Error **errp)
3439 {
3440     /* Assuming all off */
3441     bool old_caps[MIGRATION_CAPABILITY__MAX] = { 0 };
3442 
3443     if (!migrate_params_check(&ms->parameters, errp)) {
3444         return false;
3445     }
3446 
3447     return migrate_caps_check(old_caps, ms->capabilities, errp);
3448 }
3449 
3450 static const TypeInfo migration_type = {
3451     .name = TYPE_MIGRATION,
3452     /*
3453      * NOTE: TYPE_MIGRATION is not really a device, as the object is
3454      * not created using qdev_new(), it is not attached to the qdev
3455      * device tree, and it is never realized.
3456      *
3457      * TODO: Make this TYPE_OBJECT once QOM provides something like
3458      * TYPE_DEVICE's "-global" properties.
3459      */
3460     .parent = TYPE_DEVICE,
3461     .class_init = migration_class_init,
3462     .class_size = sizeof(MigrationClass),
3463     .instance_size = sizeof(MigrationState),
3464     .instance_init = migration_instance_init,
3465     .instance_finalize = migration_instance_finalize,
3466 };
3467 
3468 static void register_migration_types(void)
3469 {
3470     type_register_static(&migration_type);
3471 }
3472 
3473 type_init(register_migration_types);
3474