xref: /qemu/migration/options.c (revision aef04fc7)
1 /*
2  * QEMU migration capabilities
3  *
4  * Copyright (c) 2012-2023 Red Hat Inc
5  *
6  * Authors:
7  *   Orit Wasserman <owasserm@redhat.com>
8  *   Juan Quintela <quintela@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/clone-visitor.h"
16 #include "qapi/error.h"
17 #include "qapi/qapi-commands-migration.h"
18 #include "qapi/qapi-visit-migration.h"
19 #include "qapi/qmp/qerror.h"
20 #include "sysemu/runstate.h"
21 #include "migration/misc.h"
22 #include "migration.h"
23 #include "ram.h"
24 #include "options.h"
25 
26 bool migrate_auto_converge(void)
27 {
28     MigrationState *s;
29 
30     s = migrate_get_current();
31 
32     return s->capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
33 }
34 
35 bool migrate_background_snapshot(void)
36 {
37     MigrationState *s;
38 
39     s = migrate_get_current();
40 
41     return s->capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
42 }
43 
44 bool migrate_block(void)
45 {
46     MigrationState *s;
47 
48     s = migrate_get_current();
49 
50     return s->capabilities[MIGRATION_CAPABILITY_BLOCK];
51 }
52 
53 bool migrate_colo(void)
54 {
55     MigrationState *s = migrate_get_current();
56     return s->capabilities[MIGRATION_CAPABILITY_X_COLO];
57 }
58 
59 bool migrate_compress(void)
60 {
61     MigrationState *s;
62 
63     s = migrate_get_current();
64 
65     return s->capabilities[MIGRATION_CAPABILITY_COMPRESS];
66 }
67 
68 bool migrate_dirty_bitmaps(void)
69 {
70     MigrationState *s;
71 
72     s = migrate_get_current();
73 
74     return s->capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
75 }
76 
77 bool migrate_events(void)
78 {
79     MigrationState *s;
80 
81     s = migrate_get_current();
82 
83     return s->capabilities[MIGRATION_CAPABILITY_EVENTS];
84 }
85 
86 bool migrate_ignore_shared(void)
87 {
88     MigrationState *s;
89 
90     s = migrate_get_current();
91 
92     return s->capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
93 }
94 
95 bool migrate_late_block_activate(void)
96 {
97     MigrationState *s;
98 
99     s = migrate_get_current();
100 
101     return s->capabilities[MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
102 }
103 
104 bool migrate_multifd(void)
105 {
106     MigrationState *s;
107 
108     s = migrate_get_current();
109 
110     return s->capabilities[MIGRATION_CAPABILITY_MULTIFD];
111 }
112 
113 bool migrate_pause_before_switchover(void)
114 {
115     MigrationState *s;
116 
117     s = migrate_get_current();
118 
119     return s->capabilities[MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
120 }
121 
122 bool migrate_postcopy_blocktime(void)
123 {
124     MigrationState *s;
125 
126     s = migrate_get_current();
127 
128     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
129 }
130 
131 bool migrate_postcopy_preempt(void)
132 {
133     MigrationState *s;
134 
135     s = migrate_get_current();
136 
137     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT];
138 }
139 
140 bool migrate_postcopy_ram(void)
141 {
142     MigrationState *s;
143 
144     s = migrate_get_current();
145 
146     return s->capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
147 }
148 
149 bool migrate_rdma_pin_all(void)
150 {
151     MigrationState *s = migrate_get_current();
152 
153     return s->capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
154 }
155 
156 bool migrate_release_ram(void)
157 {
158     MigrationState *s;
159 
160     s = migrate_get_current();
161 
162     return s->capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
163 }
164 
165 bool migrate_return_path(void)
166 {
167     MigrationState *s;
168 
169     s = migrate_get_current();
170 
171     return s->capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
172 }
173 
174 bool migrate_validate_uuid(void)
175 {
176     MigrationState *s;
177 
178     s = migrate_get_current();
179 
180     return s->capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
181 }
182 
183 bool migrate_xbzrle(void)
184 {
185     MigrationState *s;
186 
187     s = migrate_get_current();
188 
189     return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
190 }
191 
192 bool migrate_zero_blocks(void)
193 {
194     MigrationState *s;
195 
196     s = migrate_get_current();
197 
198     return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
199 }
200 
201 bool migrate_zero_copy_send(void)
202 {
203     MigrationState *s;
204 
205     s = migrate_get_current();
206 
207     return s->capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND];
208 }
209 
210 /* pseudo capabilities */
211 
212 bool migrate_postcopy(void)
213 {
214     return migrate_postcopy_ram() || migrate_dirty_bitmaps();
215 }
216 
217 typedef enum WriteTrackingSupport {
218     WT_SUPPORT_UNKNOWN = 0,
219     WT_SUPPORT_ABSENT,
220     WT_SUPPORT_AVAILABLE,
221     WT_SUPPORT_COMPATIBLE
222 } WriteTrackingSupport;
223 
224 static
225 WriteTrackingSupport migrate_query_write_tracking(void)
226 {
227     /* Check if kernel supports required UFFD features */
228     if (!ram_write_tracking_available()) {
229         return WT_SUPPORT_ABSENT;
230     }
231     /*
232      * Check if current memory configuration is
233      * compatible with required UFFD features.
234      */
235     if (!ram_write_tracking_compatible()) {
236         return WT_SUPPORT_AVAILABLE;
237     }
238 
239     return WT_SUPPORT_COMPATIBLE;
240 }
241 
242 /* Migration capabilities set */
243 struct MigrateCapsSet {
244     int size;                       /* Capability set size */
245     MigrationCapability caps[];     /* Variadic array of capabilities */
246 };
247 typedef struct MigrateCapsSet MigrateCapsSet;
248 
249 /* Define and initialize MigrateCapsSet */
250 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...)   \
251     MigrateCapsSet _name = {    \
252         .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
253         .caps = { __VA_ARGS__ } \
254     }
255 
256 /* Background-snapshot compatibility check list */
257 static const
258 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
259     MIGRATION_CAPABILITY_POSTCOPY_RAM,
260     MIGRATION_CAPABILITY_DIRTY_BITMAPS,
261     MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
262     MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
263     MIGRATION_CAPABILITY_RETURN_PATH,
264     MIGRATION_CAPABILITY_MULTIFD,
265     MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
266     MIGRATION_CAPABILITY_AUTO_CONVERGE,
267     MIGRATION_CAPABILITY_RELEASE_RAM,
268     MIGRATION_CAPABILITY_RDMA_PIN_ALL,
269     MIGRATION_CAPABILITY_COMPRESS,
270     MIGRATION_CAPABILITY_XBZRLE,
271     MIGRATION_CAPABILITY_X_COLO,
272     MIGRATION_CAPABILITY_VALIDATE_UUID,
273     MIGRATION_CAPABILITY_ZERO_COPY_SEND);
274 
275 /**
276  * @migration_caps_check - check capability compatibility
277  *
278  * @old_caps: old capability list
279  * @new_caps: new capability list
280  * @errp: set *errp if the check failed, with reason
281  *
282  * Returns true if check passed, otherwise false.
283  */
284 bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
285 {
286     MigrationIncomingState *mis = migration_incoming_get_current();
287 
288 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
289     if (new_caps[MIGRATION_CAPABILITY_BLOCK]) {
290         error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
291                    "block migration");
292         error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
293         return false;
294     }
295 #endif
296 
297 #ifndef CONFIG_REPLICATION
298     if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
299         error_setg(errp, "QEMU compiled without replication module"
300                    " can't enable COLO");
301         error_append_hint(errp, "Please enable replication before COLO.\n");
302         return false;
303     }
304 #endif
305 
306     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
307         /* This check is reasonably expensive, so only when it's being
308          * set the first time, also it's only the destination that needs
309          * special support.
310          */
311         if (!old_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM] &&
312             runstate_check(RUN_STATE_INMIGRATE) &&
313             !postcopy_ram_supported_by_host(mis)) {
314             /* postcopy_ram_supported_by_host will have emitted a more
315              * detailed message
316              */
317             error_setg(errp, "Postcopy is not supported");
318             return false;
319         }
320 
321         if (new_caps[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
322             error_setg(errp, "Postcopy is not compatible with ignore-shared");
323             return false;
324         }
325     }
326 
327     if (new_caps[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
328         WriteTrackingSupport wt_support;
329         int idx;
330         /*
331          * Check if 'background-snapshot' capability is supported by
332          * host kernel and compatible with guest memory configuration.
333          */
334         wt_support = migrate_query_write_tracking();
335         if (wt_support < WT_SUPPORT_AVAILABLE) {
336             error_setg(errp, "Background-snapshot is not supported by host kernel");
337             return false;
338         }
339         if (wt_support < WT_SUPPORT_COMPATIBLE) {
340             error_setg(errp, "Background-snapshot is not compatible "
341                     "with guest memory configuration");
342             return false;
343         }
344 
345         /*
346          * Check if there are any migration capabilities
347          * incompatible with 'background-snapshot'.
348          */
349         for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
350             int incomp_cap = check_caps_background_snapshot.caps[idx];
351             if (new_caps[incomp_cap]) {
352                 error_setg(errp,
353                         "Background-snapshot is not compatible with %s",
354                         MigrationCapability_str(incomp_cap));
355                 return false;
356             }
357         }
358     }
359 
360 #ifdef CONFIG_LINUX
361     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
362         (!new_caps[MIGRATION_CAPABILITY_MULTIFD] ||
363          new_caps[MIGRATION_CAPABILITY_COMPRESS] ||
364          new_caps[MIGRATION_CAPABILITY_XBZRLE] ||
365          migrate_multifd_compression() ||
366          migrate_use_tls())) {
367         error_setg(errp,
368                    "Zero copy only available for non-compressed non-TLS multifd migration");
369         return false;
370     }
371 #else
372     if (new_caps[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) {
373         error_setg(errp,
374                    "Zero copy currently only available on Linux");
375         return false;
376     }
377 #endif
378 
379     if (new_caps[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) {
380         if (!new_caps[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
381             error_setg(errp, "Postcopy preempt requires postcopy-ram");
382             return false;
383         }
384 
385         /*
386          * Preempt mode requires urgent pages to be sent in separate
387          * channel, OTOH compression logic will disorder all pages into
388          * different compression channels, which is not compatible with the
389          * preempt assumptions on channel assignments.
390          */
391         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
392             error_setg(errp, "Postcopy preempt not compatible with compress");
393             return false;
394         }
395     }
396 
397     if (new_caps[MIGRATION_CAPABILITY_MULTIFD]) {
398         if (new_caps[MIGRATION_CAPABILITY_COMPRESS]) {
399             error_setg(errp, "Multifd is not compatible with compress");
400             return false;
401         }
402     }
403 
404     return true;
405 }
406 
407 bool migrate_cap_set(int cap, bool value, Error **errp)
408 {
409     MigrationState *s = migrate_get_current();
410     bool new_caps[MIGRATION_CAPABILITY__MAX];
411 
412     if (migration_is_running(s->state)) {
413         error_setg(errp, QERR_MIGRATION_ACTIVE);
414         return false;
415     }
416 
417     memcpy(new_caps, s->capabilities, sizeof(new_caps));
418     new_caps[cap] = value;
419 
420     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
421         return false;
422     }
423     s->capabilities[cap] = value;
424     return true;
425 }
426 
427 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
428 {
429     MigrationCapabilityStatusList *head = NULL, **tail = &head;
430     MigrationCapabilityStatus *caps;
431     MigrationState *s = migrate_get_current();
432     int i;
433 
434     for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
435 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
436         if (i == MIGRATION_CAPABILITY_BLOCK) {
437             continue;
438         }
439 #endif
440         caps = g_malloc0(sizeof(*caps));
441         caps->capability = i;
442         caps->state = s->capabilities[i];
443         QAPI_LIST_APPEND(tail, caps);
444     }
445 
446     return head;
447 }
448 
449 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
450                                   Error **errp)
451 {
452     MigrationState *s = migrate_get_current();
453     MigrationCapabilityStatusList *cap;
454     bool new_caps[MIGRATION_CAPABILITY__MAX];
455 
456     if (migration_is_running(s->state)) {
457         error_setg(errp, QERR_MIGRATION_ACTIVE);
458         return;
459     }
460 
461     memcpy(new_caps, s->capabilities, sizeof(new_caps));
462     for (cap = params; cap; cap = cap->next) {
463         new_caps[cap->value->capability] = cap->value->state;
464     }
465 
466     if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
467         return;
468     }
469 
470     for (cap = params; cap; cap = cap->next) {
471         s->capabilities[cap->value->capability] = cap->value->state;
472     }
473 }
474 
475 /* parameters */
476 
477 bool migrate_block_incremental(void)
478 {
479     MigrationState *s;
480 
481     s = migrate_get_current();
482 
483     return s->parameters.block_incremental;
484 }
485 
486 uint32_t migrate_checkpoint_delay(void)
487 {
488     MigrationState *s;
489 
490     s = migrate_get_current();
491 
492     return s->parameters.x_checkpoint_delay;
493 }
494 
495 int migrate_compress_level(void)
496 {
497     MigrationState *s;
498 
499     s = migrate_get_current();
500 
501     return s->parameters.compress_level;
502 }
503 
504 int migrate_compress_threads(void)
505 {
506     MigrationState *s;
507 
508     s = migrate_get_current();
509 
510     return s->parameters.compress_threads;
511 }
512 
513 int migrate_compress_wait_thread(void)
514 {
515     MigrationState *s;
516 
517     s = migrate_get_current();
518 
519     return s->parameters.compress_wait_thread;
520 }
521 
522 uint8_t migrate_cpu_throttle_increment(void)
523 {
524     MigrationState *s;
525 
526     s = migrate_get_current();
527 
528     return s->parameters.cpu_throttle_increment;
529 }
530 
531 uint8_t migrate_cpu_throttle_initial(void)
532 {
533     MigrationState *s;
534 
535     s = migrate_get_current();
536 
537     return s->parameters.cpu_throttle_initial;
538 }
539 
540 bool migrate_cpu_throttle_tailslow(void)
541 {
542     MigrationState *s;
543 
544     s = migrate_get_current();
545 
546     return s->parameters.cpu_throttle_tailslow;
547 }
548 
549 int migrate_decompress_threads(void)
550 {
551     MigrationState *s;
552 
553     s = migrate_get_current();
554 
555     return s->parameters.decompress_threads;
556 }
557 
558 uint8_t migrate_max_cpu_throttle(void)
559 {
560     MigrationState *s;
561 
562     s = migrate_get_current();
563 
564     return s->parameters.max_cpu_throttle;
565 }
566 
567 uint64_t migrate_max_bandwidth(void)
568 {
569     MigrationState *s;
570 
571     s = migrate_get_current();
572 
573     return s->parameters.max_bandwidth;
574 }
575 
576 int64_t migrate_max_postcopy_bandwidth(void)
577 {
578     MigrationState *s;
579 
580     s = migrate_get_current();
581 
582     return s->parameters.max_postcopy_bandwidth;
583 }
584 
585 int migrate_multifd_channels(void)
586 {
587     MigrationState *s;
588 
589     s = migrate_get_current();
590 
591     return s->parameters.multifd_channels;
592 }
593 
594 MultiFDCompression migrate_multifd_compression(void)
595 {
596     MigrationState *s;
597 
598     s = migrate_get_current();
599 
600     assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX);
601     return s->parameters.multifd_compression;
602 }
603 
604 int migrate_multifd_zlib_level(void)
605 {
606     MigrationState *s;
607 
608     s = migrate_get_current();
609 
610     return s->parameters.multifd_zlib_level;
611 }
612 
613 int migrate_multifd_zstd_level(void)
614 {
615     MigrationState *s;
616 
617     s = migrate_get_current();
618 
619     return s->parameters.multifd_zstd_level;
620 }
621 
622 uint8_t migrate_throttle_trigger_threshold(void)
623 {
624     MigrationState *s;
625 
626     s = migrate_get_current();
627 
628     return s->parameters.throttle_trigger_threshold;
629 }
630 
631 uint64_t migrate_xbzrle_cache_size(void)
632 {
633     MigrationState *s;
634 
635     s = migrate_get_current();
636 
637     return s->parameters.xbzrle_cache_size;
638 }
639 
640 /* parameters helpers */
641 
642 AnnounceParameters *migrate_announce_params(void)
643 {
644     static AnnounceParameters ap;
645 
646     MigrationState *s = migrate_get_current();
647 
648     ap.initial = s->parameters.announce_initial;
649     ap.max = s->parameters.announce_max;
650     ap.rounds = s->parameters.announce_rounds;
651     ap.step = s->parameters.announce_step;
652 
653     return &ap;
654 }
655 
656 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
657 {
658     MigrationParameters *params;
659     MigrationState *s = migrate_get_current();
660 
661     /* TODO use QAPI_CLONE() instead of duplicating it inline */
662     params = g_malloc0(sizeof(*params));
663     params->has_compress_level = true;
664     params->compress_level = s->parameters.compress_level;
665     params->has_compress_threads = true;
666     params->compress_threads = s->parameters.compress_threads;
667     params->has_compress_wait_thread = true;
668     params->compress_wait_thread = s->parameters.compress_wait_thread;
669     params->has_decompress_threads = true;
670     params->decompress_threads = s->parameters.decompress_threads;
671     params->has_throttle_trigger_threshold = true;
672     params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold;
673     params->has_cpu_throttle_initial = true;
674     params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
675     params->has_cpu_throttle_increment = true;
676     params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
677     params->has_cpu_throttle_tailslow = true;
678     params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow;
679     params->tls_creds = g_strdup(s->parameters.tls_creds);
680     params->tls_hostname = g_strdup(s->parameters.tls_hostname);
681     params->tls_authz = g_strdup(s->parameters.tls_authz ?
682                                  s->parameters.tls_authz : "");
683     params->has_max_bandwidth = true;
684     params->max_bandwidth = s->parameters.max_bandwidth;
685     params->has_downtime_limit = true;
686     params->downtime_limit = s->parameters.downtime_limit;
687     params->has_x_checkpoint_delay = true;
688     params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
689     params->has_block_incremental = true;
690     params->block_incremental = s->parameters.block_incremental;
691     params->has_multifd_channels = true;
692     params->multifd_channels = s->parameters.multifd_channels;
693     params->has_multifd_compression = true;
694     params->multifd_compression = s->parameters.multifd_compression;
695     params->has_multifd_zlib_level = true;
696     params->multifd_zlib_level = s->parameters.multifd_zlib_level;
697     params->has_multifd_zstd_level = true;
698     params->multifd_zstd_level = s->parameters.multifd_zstd_level;
699     params->has_xbzrle_cache_size = true;
700     params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
701     params->has_max_postcopy_bandwidth = true;
702     params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth;
703     params->has_max_cpu_throttle = true;
704     params->max_cpu_throttle = s->parameters.max_cpu_throttle;
705     params->has_announce_initial = true;
706     params->announce_initial = s->parameters.announce_initial;
707     params->has_announce_max = true;
708     params->announce_max = s->parameters.announce_max;
709     params->has_announce_rounds = true;
710     params->announce_rounds = s->parameters.announce_rounds;
711     params->has_announce_step = true;
712     params->announce_step = s->parameters.announce_step;
713 
714     if (s->parameters.has_block_bitmap_mapping) {
715         params->has_block_bitmap_mapping = true;
716         params->block_bitmap_mapping =
717             QAPI_CLONE(BitmapMigrationNodeAliasList,
718                        s->parameters.block_bitmap_mapping);
719     }
720 
721     return params;
722 }
723