xref: /qemu/migration/migration.h (revision 29b62a10)
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
16 
17 #include "exec/cpu-common.h"
18 #include "hw/qdev-core.h"
19 #include "qapi/qapi-types-migration.h"
20 #include "qemu/thread.h"
21 #include "qemu/coroutine_int.h"
22 #include "io/channel.h"
23 #include "io/channel-buffer.h"
24 #include "net/announce.h"
25 #include "qom/object.h"
26 #include "postcopy-ram.h"
27 
28 struct PostcopyBlocktimeContext;
29 
30 #define  MIGRATION_RESUME_ACK_VALUE  (1)
31 
32 /*
33  * 1<<6=64 pages -> 256K chunk when page size is 4K.  This gives us
34  * the benefit that all the chunks are 64 pages aligned then the
35  * bitmaps are always aligned to LONG.
36  */
37 #define CLEAR_BITMAP_SHIFT_MIN             6
38 /*
39  * 1<<18=256K pages -> 1G chunk when page size is 4K.  This is the
40  * default value to use if no one specified.
41  */
42 #define CLEAR_BITMAP_SHIFT_DEFAULT        18
43 /*
44  * 1<<31=2G pages -> 8T chunk when page size is 4K.  This should be
45  * big enough and make sure we won't overflow easily.
46  */
47 #define CLEAR_BITMAP_SHIFT_MAX            31
48 
49 /* This is an abstraction of a "temp huge page" for postcopy's purpose */
50 typedef struct {
51     /*
52      * This points to a temporary huge page as a buffer for UFFDIO_COPY.  It's
53      * mmap()ed and needs to be freed when cleanup.
54      */
55     void *tmp_huge_page;
56     /*
57      * This points to the host page we're going to install for this temp page.
58      * It tells us after we've received the whole page, where we should put it.
59      */
60     void *host_addr;
61     /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */
62     unsigned int target_pages;
63     /* Whether this page contains all zeros */
64     bool all_zero;
65 } PostcopyTmpPage;
66 
67 /* State for the incoming migration */
68 struct MigrationIncomingState {
69     QEMUFile *from_src_file;
70     /* Previously received RAM's RAMBlock pointer */
71     RAMBlock *last_recv_block[RAM_CHANNEL_MAX];
72     /* A hook to allow cleanup at the end of incoming migration */
73     void *transport_data;
74     void (*transport_cleanup)(void *data);
75     /*
76      * Used to sync thread creations.  Note that we can't create threads in
77      * parallel with this sem.
78      */
79     QemuSemaphore  thread_sync_sem;
80     /*
81      * Free at the start of the main state load, set as the main thread finishes
82      * loading state.
83      */
84     QemuEvent main_thread_load_event;
85 
86     /* For network announces */
87     AnnounceTimer  announce_timer;
88 
89     size_t         largest_page_size;
90     bool           have_fault_thread;
91     QemuThread     fault_thread;
92     /* Set this when we want the fault thread to quit */
93     bool           fault_thread_quit;
94 
95     bool           have_listen_thread;
96     QemuThread     listen_thread;
97 
98     /* For the kernel to send us notifications */
99     int       userfault_fd;
100     /* To notify the fault_thread to wake, e.g., when need to quit */
101     int       userfault_event_fd;
102     QEMUFile *to_src_file;
103     QemuMutex rp_mutex;    /* We send replies from multiple threads */
104     /* RAMBlock of last request sent to source */
105     RAMBlock *last_rb;
106     /*
107      * Number of postcopy channels including the default precopy channel, so
108      * vanilla postcopy will only contain one channel which contain both
109      * precopy and postcopy streams.
110      *
111      * This is calculated when the src requests to enable postcopy but before
112      * it starts.  Its value can depend on e.g. whether postcopy preemption is
113      * enabled.
114      */
115     unsigned int postcopy_channels;
116     /* QEMUFile for postcopy only; it'll be handled by a separate thread */
117     QEMUFile *postcopy_qemufile_dst;
118     /* Postcopy priority thread is used to receive postcopy requested pages */
119     QemuThread postcopy_prio_thread;
120     bool postcopy_prio_thread_created;
121     /*
122      * Used to sync between the ram load main thread and the fast ram load
123      * thread.  It protects postcopy_qemufile_dst, which is the postcopy
124      * fast channel.
125      *
126      * The ram fast load thread will take it mostly for the whole lifecycle
127      * because it needs to continuously read data from the channel, and
128      * it'll only release this mutex if postcopy is interrupted, so that
129      * the ram load main thread will take this mutex over and properly
130      * release the broken channel.
131      */
132     QemuMutex postcopy_prio_thread_mutex;
133     /*
134      * An array of temp host huge pages to be used, one for each postcopy
135      * channel.
136      */
137     PostcopyTmpPage *postcopy_tmp_pages;
138     /* This is shared for all postcopy channels */
139     void     *postcopy_tmp_zero_page;
140     /* PostCopyFD's for external userfaultfds & handlers of shared memory */
141     GArray   *postcopy_remote_fds;
142 
143     QEMUBH *bh;
144 
145     int state;
146 
147     bool have_colo_incoming_thread;
148     QemuThread colo_incoming_thread;
149     /* The coroutine we should enter (back) after failover */
150     Coroutine *migration_incoming_co;
151     QemuSemaphore colo_incoming_sem;
152 
153     /*
154      * PostcopyBlocktimeContext to keep information for postcopy
155      * live migration, to calculate vCPU block time
156      * */
157     struct PostcopyBlocktimeContext *blocktime_ctx;
158 
159     /* notify PAUSED postcopy incoming migrations to try to continue */
160     QemuSemaphore postcopy_pause_sem_dst;
161     QemuSemaphore postcopy_pause_sem_fault;
162     /*
163      * This semaphore is used to allow the ram fast load thread (only when
164      * postcopy preempt is enabled) fall into sleep when there's network
165      * interruption detected.  When the recovery is done, the main load
166      * thread will kick the fast ram load thread using this semaphore.
167      */
168     QemuSemaphore postcopy_pause_sem_fast_load;
169 
170     /* List of listening socket addresses  */
171     SocketAddressList *socket_address_list;
172 
173     /* A tree of pages that we requested to the source VM */
174     GTree *page_requested;
175     /* For debugging purpose only, but would be nice to keep */
176     int page_requested_count;
177     /*
178      * The mutex helps to maintain the requested pages that we sent to the
179      * source, IOW, to guarantee coherent between the page_requests tree and
180      * the per-ramblock receivedmap.  Note! This does not guarantee consistency
181      * of the real page copy procedures (using UFFDIO_[ZERO]COPY).  E.g., even
182      * if one bit in receivedmap is cleared, UFFDIO_COPY could have happened
183      * for that page already.  This is intended so that the mutex won't
184      * serialize and blocked by slow operations like UFFDIO_* ioctls.  However
185      * this should be enough to make sure the page_requested tree always
186      * contains valid information.
187      */
188     QemuMutex page_request_mutex;
189 };
190 
191 MigrationIncomingState *migration_incoming_get_current(void);
192 void migration_incoming_state_destroy(void);
193 void migration_incoming_transport_cleanup(MigrationIncomingState *mis);
194 /*
195  * Functions to work with blocktime context
196  */
197 void fill_destination_postcopy_migration_info(MigrationInfo *info);
198 
199 #define TYPE_MIGRATION "migration"
200 
201 typedef struct MigrationClass MigrationClass;
202 DECLARE_OBJ_CHECKERS(MigrationState, MigrationClass,
203                      MIGRATION_OBJ, TYPE_MIGRATION)
204 
205 struct MigrationClass {
206     /*< private >*/
207     DeviceClass parent_class;
208 };
209 
210 struct MigrationState {
211     /*< private >*/
212     DeviceState parent_obj;
213 
214     /*< public >*/
215     QemuThread thread;
216     QEMUBH *vm_start_bh;
217     QEMUBH *cleanup_bh;
218     /* Protected by qemu_file_lock */
219     QEMUFile *to_dst_file;
220     /* Postcopy specific transfer channel */
221     QEMUFile *postcopy_qemufile_src;
222     /*
223      * It is posted when the preempt channel is established.  Note: this is
224      * used for both the start or recover of a postcopy migration.  We'll
225      * post to this sem every time a new preempt channel is created in the
226      * main thread, and we keep post() and wait() in pair.
227      */
228     QemuSemaphore postcopy_qemufile_src_sem;
229     QIOChannelBuffer *bioc;
230     /*
231      * Protects to_dst_file/from_dst_file pointers.  We need to make sure we
232      * won't yield or hang during the critical section, since this lock will be
233      * used in OOB command handler.
234      */
235     QemuMutex qemu_file_lock;
236 
237     /*
238      * Used to allow urgent requests to override rate limiting.
239      */
240     QemuSemaphore rate_limit_sem;
241 
242     /* pages already send at the beginning of current iteration */
243     uint64_t iteration_initial_pages;
244 
245     /* pages transferred per second */
246     double pages_per_second;
247 
248     /* bytes already send at the beginning of current iteration */
249     uint64_t iteration_initial_bytes;
250     /* time at the start of current iteration */
251     int64_t iteration_start_time;
252     /*
253      * The final stage happens when the remaining data is smaller than
254      * this threshold; it's calculated from the requested downtime and
255      * measured bandwidth
256      */
257     int64_t threshold_size;
258 
259     /* params from 'migrate-set-parameters' */
260     MigrationParameters parameters;
261 
262     int state;
263 
264     /* State related to return path */
265     struct {
266         /* Protected by qemu_file_lock */
267         QEMUFile     *from_dst_file;
268         QemuThread    rp_thread;
269         bool          error;
270         /*
271          * We can also check non-zero of rp_thread, but there's no "official"
272          * way to do this, so this bool makes it slightly more elegant.
273          * Checking from_dst_file for this is racy because from_dst_file will
274          * be cleared in the rp_thread!
275          */
276         bool          rp_thread_created;
277         QemuSemaphore rp_sem;
278     } rp_state;
279 
280     double mbps;
281     /* Timestamp when recent migration starts (ms) */
282     int64_t start_time;
283     /* Total time used by latest migration (ms) */
284     int64_t total_time;
285     /* Timestamp when VM is down (ms) to migrate the last stuff */
286     int64_t downtime_start;
287     int64_t downtime;
288     int64_t expected_downtime;
289     bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
290     int64_t setup_time;
291     /*
292      * Whether guest was running when we enter the completion stage.
293      * If migration is interrupted by any reason, we need to continue
294      * running the guest on source.
295      */
296     bool vm_was_running;
297 
298     /* Flag set once the migration has been asked to enter postcopy */
299     bool start_postcopy;
300     /* Flag set after postcopy has sent the device state */
301     bool postcopy_after_devices;
302 
303     /* Flag set once the migration thread is running (and needs joining) */
304     bool migration_thread_running;
305 
306     /* Flag set once the migration thread called bdrv_inactivate_all */
307     bool block_inactive;
308 
309     /* Migration is waiting for guest to unplug device */
310     QemuSemaphore wait_unplug_sem;
311 
312     /* Migration is paused due to pause-before-switchover */
313     QemuSemaphore pause_sem;
314 
315     /* The semaphore is used to notify COLO thread that failover is finished */
316     QemuSemaphore colo_exit_sem;
317 
318     /* The event is used to notify COLO thread to do checkpoint */
319     QemuEvent colo_checkpoint_event;
320     int64_t colo_checkpoint_time;
321     QEMUTimer *colo_delay_timer;
322 
323     /* The first error that has occurred.
324        We used the mutex to be able to return the 1st error message */
325     Error *error;
326     /* mutex to protect errp */
327     QemuMutex error_mutex;
328 
329     /* Do we have to clean up -b/-i from old migrate parameters */
330     /* This feature is deprecated and will be removed */
331     bool must_remove_block_options;
332 
333     /*
334      * Global switch on whether we need to store the global state
335      * during migration.
336      */
337     bool store_global_state;
338 
339     /* Whether we send QEMU_VM_CONFIGURATION during migration */
340     bool send_configuration;
341     /* Whether we send section footer during migration */
342     bool send_section_footer;
343 
344     /* Needed by postcopy-pause state */
345     QemuSemaphore postcopy_pause_sem;
346     QemuSemaphore postcopy_pause_rp_sem;
347     /*
348      * Whether we abort the migration if decompression errors are
349      * detected at the destination. It is left at false for qemu
350      * older than 3.0, since only newer qemu sends streams that
351      * do not trigger spurious decompression errors.
352      */
353     bool decompress_error_check;
354 
355     /*
356      * This decides the size of guest memory chunk that will be used
357      * to track dirty bitmap clearing.  The size of memory chunk will
358      * be GUEST_PAGE_SIZE << N.  Say, N=0 means we will clear dirty
359      * bitmap for each page to send (1<<0=1); N=10 means we will clear
360      * dirty bitmap only once for 1<<10=1K continuous guest pages
361      * (which is in 4M chunk).
362      */
363     uint8_t clear_bitmap_shift;
364 
365     /*
366      * This save hostname when out-going migration starts
367      */
368     char *hostname;
369 };
370 
371 void migrate_set_state(int *state, int old_state, int new_state);
372 
373 void migration_fd_process_incoming(QEMUFile *f, Error **errp);
374 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp);
375 void migration_incoming_process(void);
376 
377 bool  migration_has_all_channels(void);
378 
379 uint64_t migrate_max_downtime(void);
380 
381 void migrate_set_error(MigrationState *s, const Error *error);
382 void migrate_fd_error(MigrationState *s, const Error *error);
383 
384 void migrate_fd_connect(MigrationState *s, Error *error_in);
385 
386 bool migration_is_setup_or_active(int state);
387 bool migration_is_running(int state);
388 
389 void migrate_init(MigrationState *s);
390 bool migration_is_blocked(Error **errp);
391 /* True if outgoing migration has entered postcopy phase */
392 bool migration_in_postcopy(void);
393 MigrationState *migrate_get_current(void);
394 
395 bool migrate_postcopy(void);
396 
397 bool migrate_release_ram(void);
398 bool migrate_postcopy_ram(void);
399 bool migrate_zero_blocks(void);
400 bool migrate_dirty_bitmaps(void);
401 bool migrate_ignore_shared(void);
402 bool migrate_validate_uuid(void);
403 
404 bool migrate_auto_converge(void);
405 bool migrate_use_multifd(void);
406 bool migrate_pause_before_switchover(void);
407 int migrate_multifd_channels(void);
408 MultiFDCompression migrate_multifd_compression(void);
409 int migrate_multifd_zlib_level(void);
410 int migrate_multifd_zstd_level(void);
411 
412 #ifdef CONFIG_LINUX
413 bool migrate_use_zero_copy_send(void);
414 #else
415 #define migrate_use_zero_copy_send() (false)
416 #endif
417 int migrate_use_tls(void);
418 int migrate_use_xbzrle(void);
419 uint64_t migrate_xbzrle_cache_size(void);
420 bool migrate_colo_enabled(void);
421 
422 bool migrate_use_block(void);
423 bool migrate_use_block_incremental(void);
424 int migrate_max_cpu_throttle(void);
425 bool migrate_use_return_path(void);
426 
427 uint64_t ram_get_total_transferred_pages(void);
428 
429 bool migrate_use_compression(void);
430 int migrate_compress_level(void);
431 int migrate_compress_threads(void);
432 int migrate_compress_wait_thread(void);
433 int migrate_decompress_threads(void);
434 bool migrate_use_events(void);
435 bool migrate_postcopy_blocktime(void);
436 bool migrate_background_snapshot(void);
437 bool migrate_postcopy_preempt(void);
438 
439 /* Sending on the return path - generic and then for each message type */
440 void migrate_send_rp_shut(MigrationIncomingState *mis,
441                           uint32_t value);
442 void migrate_send_rp_pong(MigrationIncomingState *mis,
443                           uint32_t value);
444 int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
445                               ram_addr_t start, uint64_t haddr);
446 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
447                                       RAMBlock *rb, ram_addr_t start);
448 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
449                                  char *block_name);
450 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value);
451 
452 void dirty_bitmap_mig_before_vm_start(void);
453 void dirty_bitmap_mig_cancel_outgoing(void);
454 void dirty_bitmap_mig_cancel_incoming(void);
455 bool check_dirty_bitmap_mig_alias_map(const BitmapMigrationNodeAliasList *bbm,
456                                       Error **errp);
457 
458 void migrate_add_address(SocketAddress *address);
459 
460 int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
461 
462 #define qemu_ram_foreach_block \
463   #warning "Use foreach_not_ignored_block in migration code"
464 
465 void migration_make_urgent_request(void);
466 void migration_consume_urgent_request(void);
467 bool migration_rate_limit(void);
468 void migration_cancel(const Error *error);
469 
470 void populate_vfio_info(MigrationInfo *info);
471 void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page);
472 
473 bool migrate_multi_channels_is_allowed(void);
474 void migrate_protocol_allow_multi_channels(bool allow);
475 
476 #endif
477