1 /* 2 * QEMU System Emulator 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2009-2015 Red Hat Inc 6 * 7 * Authors: 8 * Juan Quintela <quintela@redhat.com> 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include "hw/boards.h" 31 #include "net/net.h" 32 #include "migration.h" 33 #include "migration/snapshot.h" 34 #include "migration-stats.h" 35 #include "migration/vmstate.h" 36 #include "migration/misc.h" 37 #include "migration/register.h" 38 #include "migration/global_state.h" 39 #include "migration/channel-block.h" 40 #include "ram.h" 41 #include "qemu-file.h" 42 #include "savevm.h" 43 #include "postcopy-ram.h" 44 #include "qapi/error.h" 45 #include "qapi/qapi-commands-migration.h" 46 #include "qapi/clone-visitor.h" 47 #include "qapi/qapi-builtin-visit.h" 48 #include "qapi/qmp/qerror.h" 49 #include "qemu/error-report.h" 50 #include "sysemu/cpus.h" 51 #include "exec/memory.h" 52 #include "exec/target_page.h" 53 #include "trace.h" 54 #include "qemu/iov.h" 55 #include "qemu/job.h" 56 #include "qemu/main-loop.h" 57 #include "block/snapshot.h" 58 #include "qemu/cutils.h" 59 #include "io/channel-buffer.h" 60 #include "io/channel-file.h" 61 #include "sysemu/replay.h" 62 #include "sysemu/runstate.h" 63 #include "sysemu/sysemu.h" 64 #include "sysemu/xen.h" 65 #include "migration/colo.h" 66 #include "qemu/bitmap.h" 67 #include "net/announce.h" 68 #include "qemu/yank.h" 69 #include "yank_functions.h" 70 #include "sysemu/qtest.h" 71 #include "options.h" 72 73 const unsigned int postcopy_ram_discard_version; 74 75 /* Subcommands for QEMU_VM_COMMAND */ 76 enum qemu_vm_cmd { 77 MIG_CMD_INVALID = 0, /* Must be 0 */ 78 MIG_CMD_OPEN_RETURN_PATH, /* Tell the dest to open the Return path */ 79 MIG_CMD_PING, /* Request a PONG on the RP */ 80 81 MIG_CMD_POSTCOPY_ADVISE, /* Prior to any page transfers, just 82 warn we might want to do PC */ 83 MIG_CMD_POSTCOPY_LISTEN, /* Start listening for incoming 84 pages as it's running. */ 85 MIG_CMD_POSTCOPY_RUN, /* Start execution */ 86 87 MIG_CMD_POSTCOPY_RAM_DISCARD, /* A list of pages to discard that 88 were previously sent during 89 precopy but are dirty. */ 90 MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */ 91 MIG_CMD_ENABLE_COLO, /* Enable COLO */ 92 MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ 93 MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ 94 MIG_CMD_MAX 95 }; 96 97 #define MAX_VM_CMD_PACKAGED_SIZE UINT32_MAX 98 static struct mig_cmd_args { 99 ssize_t len; /* -1 = variable */ 100 const char *name; 101 } mig_cmd_args[] = { 102 [MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" }, 103 [MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" }, 104 [MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" }, 105 [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" }, 106 [MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" }, 107 [MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" }, 108 [MIG_CMD_POSTCOPY_RAM_DISCARD] = { 109 .len = -1, .name = "POSTCOPY_RAM_DISCARD" }, 110 [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, 111 [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, 112 [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, 113 [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, 114 }; 115 116 /* Note for MIG_CMD_POSTCOPY_ADVISE: 117 * The format of arguments is depending on postcopy mode: 118 * - postcopy RAM only 119 * uint64_t host page size 120 * uint64_t target page size 121 * 122 * - postcopy RAM and postcopy dirty bitmaps 123 * format is the same as for postcopy RAM only 124 * 125 * - postcopy dirty bitmaps only 126 * Nothing. Command length field is 0. 127 * 128 * Be careful: adding a new postcopy entity with some other parameters should 129 * not break format self-description ability. Good way is to introduce some 130 * generic extendable format with an exception for two old entities. 131 */ 132 133 /***********************************************************/ 134 /* savevm/loadvm support */ 135 136 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) 137 { 138 if (is_writable) { 139 return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs))); 140 } else { 141 return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs))); 142 } 143 } 144 145 146 /* QEMUFile timer support. 147 * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c 148 */ 149 150 void timer_put(QEMUFile *f, QEMUTimer *ts) 151 { 152 uint64_t expire_time; 153 154 expire_time = timer_expire_time_ns(ts); 155 qemu_put_be64(f, expire_time); 156 } 157 158 void timer_get(QEMUFile *f, QEMUTimer *ts) 159 { 160 uint64_t expire_time; 161 162 expire_time = qemu_get_be64(f); 163 if (expire_time != -1) { 164 timer_mod_ns(ts, expire_time); 165 } else { 166 timer_del(ts); 167 } 168 } 169 170 171 /* VMState timer support. 172 * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c 173 */ 174 175 static int get_timer(QEMUFile *f, void *pv, size_t size, 176 const VMStateField *field) 177 { 178 QEMUTimer *v = pv; 179 timer_get(f, v); 180 return 0; 181 } 182 183 static int put_timer(QEMUFile *f, void *pv, size_t size, 184 const VMStateField *field, JSONWriter *vmdesc) 185 { 186 QEMUTimer *v = pv; 187 timer_put(f, v); 188 189 return 0; 190 } 191 192 const VMStateInfo vmstate_info_timer = { 193 .name = "timer", 194 .get = get_timer, 195 .put = put_timer, 196 }; 197 198 199 typedef struct CompatEntry { 200 char idstr[256]; 201 int instance_id; 202 } CompatEntry; 203 204 typedef struct SaveStateEntry { 205 QTAILQ_ENTRY(SaveStateEntry) entry; 206 char idstr[256]; 207 uint32_t instance_id; 208 int alias_id; 209 int version_id; 210 /* version id read from the stream */ 211 int load_version_id; 212 int section_id; 213 /* section id read from the stream */ 214 int load_section_id; 215 const SaveVMHandlers *ops; 216 const VMStateDescription *vmsd; 217 void *opaque; 218 CompatEntry *compat; 219 int is_ram; 220 } SaveStateEntry; 221 222 typedef struct SaveState { 223 QTAILQ_HEAD(, SaveStateEntry) handlers; 224 SaveStateEntry *handler_pri_head[MIG_PRI_MAX + 1]; 225 int global_section_id; 226 uint32_t len; 227 const char *name; 228 uint32_t target_page_bits; 229 uint32_t caps_count; 230 MigrationCapability *capabilities; 231 QemuUUID uuid; 232 } SaveState; 233 234 static SaveState savevm_state = { 235 .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), 236 .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL }, 237 .global_section_id = 0, 238 }; 239 240 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id); 241 242 static bool should_validate_capability(int capability) 243 { 244 assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX); 245 /* Validate only new capabilities to keep compatibility. */ 246 switch (capability) { 247 case MIGRATION_CAPABILITY_X_IGNORE_SHARED: 248 case MIGRATION_CAPABILITY_MAPPED_RAM: 249 return true; 250 default: 251 return false; 252 } 253 } 254 255 static uint32_t get_validatable_capabilities_count(void) 256 { 257 MigrationState *s = migrate_get_current(); 258 uint32_t result = 0; 259 int i; 260 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 261 if (should_validate_capability(i) && s->capabilities[i]) { 262 result++; 263 } 264 } 265 return result; 266 } 267 268 static int configuration_pre_save(void *opaque) 269 { 270 SaveState *state = opaque; 271 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 272 MigrationState *s = migrate_get_current(); 273 int i, j; 274 275 state->len = strlen(current_name); 276 state->name = current_name; 277 state->target_page_bits = qemu_target_page_bits(); 278 279 state->caps_count = get_validatable_capabilities_count(); 280 state->capabilities = g_renew(MigrationCapability, state->capabilities, 281 state->caps_count); 282 for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 283 if (should_validate_capability(i) && s->capabilities[i]) { 284 state->capabilities[j++] = i; 285 } 286 } 287 state->uuid = qemu_uuid; 288 289 return 0; 290 } 291 292 static int configuration_post_save(void *opaque) 293 { 294 SaveState *state = opaque; 295 296 g_free(state->capabilities); 297 state->capabilities = NULL; 298 state->caps_count = 0; 299 return 0; 300 } 301 302 static int configuration_pre_load(void *opaque) 303 { 304 SaveState *state = opaque; 305 306 /* If there is no target-page-bits subsection it means the source 307 * predates the variable-target-page-bits support and is using the 308 * minimum possible value for this CPU. 309 */ 310 state->target_page_bits = qemu_target_page_bits_min(); 311 return 0; 312 } 313 314 static bool configuration_validate_capabilities(SaveState *state) 315 { 316 bool ret = true; 317 MigrationState *s = migrate_get_current(); 318 unsigned long *source_caps_bm; 319 int i; 320 321 source_caps_bm = bitmap_new(MIGRATION_CAPABILITY__MAX); 322 for (i = 0; i < state->caps_count; i++) { 323 MigrationCapability capability = state->capabilities[i]; 324 set_bit(capability, source_caps_bm); 325 } 326 327 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 328 bool source_state, target_state; 329 if (!should_validate_capability(i)) { 330 continue; 331 } 332 source_state = test_bit(i, source_caps_bm); 333 target_state = s->capabilities[i]; 334 if (source_state != target_state) { 335 error_report("Capability %s is %s, but received capability is %s", 336 MigrationCapability_str(i), 337 target_state ? "on" : "off", 338 source_state ? "on" : "off"); 339 ret = false; 340 /* Don't break here to report all failed capabilities */ 341 } 342 } 343 344 g_free(source_caps_bm); 345 return ret; 346 } 347 348 static int configuration_post_load(void *opaque, int version_id) 349 { 350 SaveState *state = opaque; 351 const char *current_name = MACHINE_GET_CLASS(current_machine)->name; 352 int ret = 0; 353 354 if (strncmp(state->name, current_name, state->len) != 0) { 355 error_report("Machine type received is '%.*s' and local is '%s'", 356 (int) state->len, state->name, current_name); 357 ret = -EINVAL; 358 goto out; 359 } 360 361 if (state->target_page_bits != qemu_target_page_bits()) { 362 error_report("Received TARGET_PAGE_BITS is %d but local is %d", 363 state->target_page_bits, qemu_target_page_bits()); 364 ret = -EINVAL; 365 goto out; 366 } 367 368 if (!configuration_validate_capabilities(state)) { 369 ret = -EINVAL; 370 goto out; 371 } 372 373 out: 374 g_free((void *)state->name); 375 state->name = NULL; 376 state->len = 0; 377 g_free(state->capabilities); 378 state->capabilities = NULL; 379 state->caps_count = 0; 380 381 return ret; 382 } 383 384 static int get_capability(QEMUFile *f, void *pv, size_t size, 385 const VMStateField *field) 386 { 387 MigrationCapability *capability = pv; 388 char capability_str[UINT8_MAX + 1]; 389 uint8_t len; 390 int i; 391 392 len = qemu_get_byte(f); 393 qemu_get_buffer(f, (uint8_t *)capability_str, len); 394 capability_str[len] = '\0'; 395 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) { 396 if (!strcmp(MigrationCapability_str(i), capability_str)) { 397 *capability = i; 398 return 0; 399 } 400 } 401 error_report("Received unknown capability %s", capability_str); 402 return -EINVAL; 403 } 404 405 static int put_capability(QEMUFile *f, void *pv, size_t size, 406 const VMStateField *field, JSONWriter *vmdesc) 407 { 408 MigrationCapability *capability = pv; 409 const char *capability_str = MigrationCapability_str(*capability); 410 size_t len = strlen(capability_str); 411 assert(len <= UINT8_MAX); 412 413 qemu_put_byte(f, len); 414 qemu_put_buffer(f, (uint8_t *)capability_str, len); 415 return 0; 416 } 417 418 static const VMStateInfo vmstate_info_capability = { 419 .name = "capability", 420 .get = get_capability, 421 .put = put_capability, 422 }; 423 424 /* The target-page-bits subsection is present only if the 425 * target page size is not the same as the default (ie the 426 * minimum page size for a variable-page-size guest CPU). 427 * If it is present then it contains the actual target page 428 * bits for the machine, and migration will fail if the 429 * two ends don't agree about it. 430 */ 431 static bool vmstate_target_page_bits_needed(void *opaque) 432 { 433 return qemu_target_page_bits() 434 > qemu_target_page_bits_min(); 435 } 436 437 static const VMStateDescription vmstate_target_page_bits = { 438 .name = "configuration/target-page-bits", 439 .version_id = 1, 440 .minimum_version_id = 1, 441 .needed = vmstate_target_page_bits_needed, 442 .fields = (const VMStateField[]) { 443 VMSTATE_UINT32(target_page_bits, SaveState), 444 VMSTATE_END_OF_LIST() 445 } 446 }; 447 448 static bool vmstate_capabilites_needed(void *opaque) 449 { 450 return get_validatable_capabilities_count() > 0; 451 } 452 453 static const VMStateDescription vmstate_capabilites = { 454 .name = "configuration/capabilities", 455 .version_id = 1, 456 .minimum_version_id = 1, 457 .needed = vmstate_capabilites_needed, 458 .fields = (const VMStateField[]) { 459 VMSTATE_UINT32_V(caps_count, SaveState, 1), 460 VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, 461 vmstate_info_capability, 462 MigrationCapability), 463 VMSTATE_END_OF_LIST() 464 } 465 }; 466 467 static bool vmstate_uuid_needed(void *opaque) 468 { 469 return qemu_uuid_set && migrate_validate_uuid(); 470 } 471 472 static int vmstate_uuid_post_load(void *opaque, int version_id) 473 { 474 SaveState *state = opaque; 475 char uuid_src[UUID_STR_LEN]; 476 char uuid_dst[UUID_STR_LEN]; 477 478 if (!qemu_uuid_set) { 479 /* 480 * It's warning because user might not know UUID in some cases, 481 * e.g. load an old snapshot 482 */ 483 qemu_uuid_unparse(&state->uuid, uuid_src); 484 warn_report("UUID is received %s, but local uuid isn't set", 485 uuid_src); 486 return 0; 487 } 488 if (!qemu_uuid_is_equal(&state->uuid, &qemu_uuid)) { 489 qemu_uuid_unparse(&state->uuid, uuid_src); 490 qemu_uuid_unparse(&qemu_uuid, uuid_dst); 491 error_report("UUID received is %s and local is %s", uuid_src, uuid_dst); 492 return -EINVAL; 493 } 494 return 0; 495 } 496 497 static const VMStateDescription vmstate_uuid = { 498 .name = "configuration/uuid", 499 .version_id = 1, 500 .minimum_version_id = 1, 501 .needed = vmstate_uuid_needed, 502 .post_load = vmstate_uuid_post_load, 503 .fields = (const VMStateField[]) { 504 VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), 505 VMSTATE_END_OF_LIST() 506 } 507 }; 508 509 static const VMStateDescription vmstate_configuration = { 510 .name = "configuration", 511 .version_id = 1, 512 .pre_load = configuration_pre_load, 513 .post_load = configuration_post_load, 514 .pre_save = configuration_pre_save, 515 .post_save = configuration_post_save, 516 .fields = (const VMStateField[]) { 517 VMSTATE_UINT32(len, SaveState), 518 VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), 519 VMSTATE_END_OF_LIST() 520 }, 521 .subsections = (const VMStateDescription * const []) { 522 &vmstate_target_page_bits, 523 &vmstate_capabilites, 524 &vmstate_uuid, 525 NULL 526 } 527 }; 528 529 static void dump_vmstate_vmsd(FILE *out_file, 530 const VMStateDescription *vmsd, int indent, 531 bool is_subsection); 532 533 static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, 534 int indent) 535 { 536 fprintf(out_file, "%*s{\n", indent, ""); 537 indent += 2; 538 fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name); 539 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 540 field->version_id); 541 fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", 542 field->field_exists ? "true" : "false"); 543 if (field->flags & VMS_ARRAY) { 544 fprintf(out_file, "%*s\"num\": %d,\n", indent, "", field->num); 545 } 546 fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); 547 if (field->vmsd != NULL) { 548 fprintf(out_file, ",\n"); 549 dump_vmstate_vmsd(out_file, field->vmsd, indent, false); 550 } 551 fprintf(out_file, "\n%*s}", indent - 2, ""); 552 } 553 554 static void dump_vmstate_vmss(FILE *out_file, 555 const VMStateDescription *subsection, 556 int indent) 557 { 558 if (subsection != NULL) { 559 dump_vmstate_vmsd(out_file, subsection, indent, true); 560 } 561 } 562 563 static void dump_vmstate_vmsd(FILE *out_file, 564 const VMStateDescription *vmsd, int indent, 565 bool is_subsection) 566 { 567 if (is_subsection) { 568 fprintf(out_file, "%*s{\n", indent, ""); 569 } else { 570 fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description"); 571 } 572 indent += 2; 573 fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name); 574 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 575 vmsd->version_id); 576 fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "", 577 vmsd->minimum_version_id); 578 if (vmsd->fields != NULL) { 579 const VMStateField *field = vmsd->fields; 580 bool first; 581 582 fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, ""); 583 first = true; 584 while (field->name != NULL) { 585 if (field->flags & VMS_MUST_EXIST) { 586 /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */ 587 field++; 588 continue; 589 } 590 if (!first) { 591 fprintf(out_file, ",\n"); 592 } 593 dump_vmstate_vmsf(out_file, field, indent + 2); 594 field++; 595 first = false; 596 } 597 assert(field->flags == VMS_END); 598 fprintf(out_file, "\n%*s]", indent, ""); 599 } 600 if (vmsd->subsections != NULL) { 601 const VMStateDescription * const *subsection = vmsd->subsections; 602 bool first; 603 604 fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); 605 first = true; 606 while (*subsection != NULL) { 607 if (!first) { 608 fprintf(out_file, ",\n"); 609 } 610 dump_vmstate_vmss(out_file, *subsection, indent + 2); 611 subsection++; 612 first = false; 613 } 614 fprintf(out_file, "\n%*s]", indent, ""); 615 } 616 fprintf(out_file, "\n%*s}", indent - 2, ""); 617 } 618 619 static void dump_machine_type(FILE *out_file) 620 { 621 MachineClass *mc; 622 623 mc = MACHINE_GET_CLASS(current_machine); 624 625 fprintf(out_file, " \"vmschkmachine\": {\n"); 626 fprintf(out_file, " \"Name\": \"%s\"\n", mc->name); 627 fprintf(out_file, " },\n"); 628 } 629 630 void dump_vmstate_json_to_file(FILE *out_file) 631 { 632 GSList *list, *elt; 633 bool first; 634 635 fprintf(out_file, "{\n"); 636 dump_machine_type(out_file); 637 638 first = true; 639 list = object_class_get_list(TYPE_DEVICE, true); 640 for (elt = list; elt; elt = elt->next) { 641 DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data, 642 TYPE_DEVICE); 643 const char *name; 644 int indent = 2; 645 646 if (!dc->vmsd) { 647 continue; 648 } 649 650 if (!first) { 651 fprintf(out_file, ",\n"); 652 } 653 name = object_class_get_name(OBJECT_CLASS(dc)); 654 fprintf(out_file, "%*s\"%s\": {\n", indent, "", name); 655 indent += 2; 656 fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name); 657 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", 658 dc->vmsd->version_id); 659 fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "", 660 dc->vmsd->minimum_version_id); 661 662 dump_vmstate_vmsd(out_file, dc->vmsd, indent, false); 663 664 fprintf(out_file, "\n%*s}", indent - 2, ""); 665 first = false; 666 } 667 fprintf(out_file, "\n}\n"); 668 fclose(out_file); 669 g_slist_free(list); 670 } 671 672 static uint32_t calculate_new_instance_id(const char *idstr) 673 { 674 SaveStateEntry *se; 675 uint32_t instance_id = 0; 676 677 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 678 if (strcmp(idstr, se->idstr) == 0 679 && instance_id <= se->instance_id) { 680 instance_id = se->instance_id + 1; 681 } 682 } 683 /* Make sure we never loop over without being noticed */ 684 assert(instance_id != VMSTATE_INSTANCE_ID_ANY); 685 return instance_id; 686 } 687 688 static int calculate_compat_instance_id(const char *idstr) 689 { 690 SaveStateEntry *se; 691 int instance_id = 0; 692 693 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 694 if (!se->compat) { 695 continue; 696 } 697 698 if (strcmp(idstr, se->compat->idstr) == 0 699 && instance_id <= se->compat->instance_id) { 700 instance_id = se->compat->instance_id + 1; 701 } 702 } 703 return instance_id; 704 } 705 706 static inline MigrationPriority save_state_priority(SaveStateEntry *se) 707 { 708 if (se->vmsd) { 709 return se->vmsd->priority; 710 } 711 return MIG_PRI_DEFAULT; 712 } 713 714 static void savevm_state_handler_insert(SaveStateEntry *nse) 715 { 716 MigrationPriority priority = save_state_priority(nse); 717 SaveStateEntry *se; 718 int i; 719 720 assert(priority <= MIG_PRI_MAX); 721 722 /* 723 * This should never happen otherwise migration will probably fail 724 * silently somewhere because we can be wrongly applying one 725 * object properties upon another one. Bail out ASAP. 726 */ 727 if (find_se(nse->idstr, nse->instance_id)) { 728 error_report("%s: Detected duplicate SaveStateEntry: " 729 "id=%s, instance_id=0x%"PRIx32, __func__, 730 nse->idstr, nse->instance_id); 731 exit(EXIT_FAILURE); 732 } 733 734 for (i = priority - 1; i >= 0; i--) { 735 se = savevm_state.handler_pri_head[i]; 736 if (se != NULL) { 737 assert(save_state_priority(se) < priority); 738 break; 739 } 740 } 741 742 if (i >= 0) { 743 QTAILQ_INSERT_BEFORE(se, nse, entry); 744 } else { 745 QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry); 746 } 747 748 if (savevm_state.handler_pri_head[priority] == NULL) { 749 savevm_state.handler_pri_head[priority] = nse; 750 } 751 } 752 753 static void savevm_state_handler_remove(SaveStateEntry *se) 754 { 755 SaveStateEntry *next; 756 MigrationPriority priority = save_state_priority(se); 757 758 if (se == savevm_state.handler_pri_head[priority]) { 759 next = QTAILQ_NEXT(se, entry); 760 if (next != NULL && save_state_priority(next) == priority) { 761 savevm_state.handler_pri_head[priority] = next; 762 } else { 763 savevm_state.handler_pri_head[priority] = NULL; 764 } 765 } 766 QTAILQ_REMOVE(&savevm_state.handlers, se, entry); 767 } 768 769 /* TODO: Individual devices generally have very little idea about the rest 770 of the system, so instance_id should be removed/replaced. 771 Meanwhile pass -1 as instance_id if you do not already have a clearly 772 distinguishing id for all instances of your device class. */ 773 int register_savevm_live(const char *idstr, 774 uint32_t instance_id, 775 int version_id, 776 const SaveVMHandlers *ops, 777 void *opaque) 778 { 779 SaveStateEntry *se; 780 781 se = g_new0(SaveStateEntry, 1); 782 se->version_id = version_id; 783 se->section_id = savevm_state.global_section_id++; 784 se->ops = ops; 785 se->opaque = opaque; 786 se->vmsd = NULL; 787 /* if this is a live_savem then set is_ram */ 788 if (ops->save_setup != NULL) { 789 se->is_ram = 1; 790 } 791 792 pstrcat(se->idstr, sizeof(se->idstr), idstr); 793 794 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 795 se->instance_id = calculate_new_instance_id(se->idstr); 796 } else { 797 se->instance_id = instance_id; 798 } 799 assert(!se->compat || se->instance_id == 0); 800 savevm_state_handler_insert(se); 801 return 0; 802 } 803 804 void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) 805 { 806 SaveStateEntry *se, *new_se; 807 char id[256] = ""; 808 809 if (obj) { 810 char *oid = vmstate_if_get_id(obj); 811 if (oid) { 812 pstrcpy(id, sizeof(id), oid); 813 pstrcat(id, sizeof(id), "/"); 814 g_free(oid); 815 } 816 } 817 pstrcat(id, sizeof(id), idstr); 818 819 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 820 if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) { 821 savevm_state_handler_remove(se); 822 g_free(se->compat); 823 g_free(se); 824 } 825 } 826 } 827 828 /* 829 * Perform some basic checks on vmsd's at registration 830 * time. 831 */ 832 static void vmstate_check(const VMStateDescription *vmsd) 833 { 834 const VMStateField *field = vmsd->fields; 835 const VMStateDescription * const *subsection = vmsd->subsections; 836 837 if (field) { 838 while (field->name) { 839 if (field->flags & (VMS_STRUCT | VMS_VSTRUCT)) { 840 /* Recurse to sub structures */ 841 vmstate_check(field->vmsd); 842 } 843 /* Carry on */ 844 field++; 845 } 846 /* Check for the end of field list canary */ 847 if (field->flags != VMS_END) { 848 error_report("VMSTATE not ending with VMS_END: %s", vmsd->name); 849 g_assert_not_reached(); 850 } 851 } 852 853 while (subsection && *subsection) { 854 /* 855 * The name of a subsection should start with the name of the 856 * current object. 857 */ 858 assert(!strncmp(vmsd->name, (*subsection)->name, strlen(vmsd->name))); 859 vmstate_check(*subsection); 860 subsection++; 861 } 862 } 863 864 /* 865 * See comment in hw/intc/xics.c:icp_realize() 866 * 867 * This function can be removed when 868 * pre_2_10_vmstate_register_dummy_icp() is removed. 869 */ 870 int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id, 871 const VMStateDescription *vmsd, 872 void *opaque) 873 { 874 SaveStateEntry *se = find_se(vmsd->name, instance_id); 875 876 if (se) { 877 savevm_state_handler_remove(se); 878 } 879 return vmstate_register(obj, instance_id, vmsd, opaque); 880 } 881 882 int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, 883 const VMStateDescription *vmsd, 884 void *opaque, int alias_id, 885 int required_for_version, 886 Error **errp) 887 { 888 SaveStateEntry *se; 889 890 /* If this triggers, alias support can be dropped for the vmsd. */ 891 assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); 892 893 se = g_new0(SaveStateEntry, 1); 894 se->version_id = vmsd->version_id; 895 se->section_id = savevm_state.global_section_id++; 896 se->opaque = opaque; 897 se->vmsd = vmsd; 898 se->alias_id = alias_id; 899 900 if (obj) { 901 char *id = vmstate_if_get_id(obj); 902 if (id) { 903 if (snprintf(se->idstr, sizeof(se->idstr), "%s/", id) >= 904 sizeof(se->idstr)) { 905 error_setg(errp, "Path too long for VMState (%s)", id); 906 g_free(id); 907 g_free(se); 908 909 return -1; 910 } 911 g_free(id); 912 913 se->compat = g_new0(CompatEntry, 1); 914 pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); 915 se->compat->instance_id = instance_id == VMSTATE_INSTANCE_ID_ANY ? 916 calculate_compat_instance_id(vmsd->name) : instance_id; 917 instance_id = VMSTATE_INSTANCE_ID_ANY; 918 } 919 } 920 pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); 921 922 if (instance_id == VMSTATE_INSTANCE_ID_ANY) { 923 se->instance_id = calculate_new_instance_id(se->idstr); 924 } else { 925 se->instance_id = instance_id; 926 } 927 928 /* Perform a recursive sanity check during the test runs */ 929 if (qtest_enabled()) { 930 vmstate_check(vmsd); 931 } 932 assert(!se->compat || se->instance_id == 0); 933 savevm_state_handler_insert(se); 934 return 0; 935 } 936 937 void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd, 938 void *opaque) 939 { 940 SaveStateEntry *se, *new_se; 941 942 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) { 943 if (se->vmsd == vmsd && se->opaque == opaque) { 944 savevm_state_handler_remove(se); 945 g_free(se->compat); 946 g_free(se); 947 } 948 } 949 } 950 951 static int vmstate_load(QEMUFile *f, SaveStateEntry *se) 952 { 953 trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 954 if (!se->vmsd) { /* Old style */ 955 return se->ops->load_state(f, se->opaque, se->load_version_id); 956 } 957 return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id); 958 } 959 960 static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, 961 JSONWriter *vmdesc) 962 { 963 uint64_t old_offset = qemu_file_transferred(f); 964 se->ops->save_state(f, se->opaque); 965 uint64_t size = qemu_file_transferred(f) - old_offset; 966 967 if (vmdesc) { 968 json_writer_int64(vmdesc, "size", size); 969 json_writer_start_array(vmdesc, "fields"); 970 json_writer_start_object(vmdesc, NULL); 971 json_writer_str(vmdesc, "name", "data"); 972 json_writer_int64(vmdesc, "size", size); 973 json_writer_str(vmdesc, "type", "buffer"); 974 json_writer_end_object(vmdesc); 975 json_writer_end_array(vmdesc); 976 } 977 } 978 979 /* 980 * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) 981 */ 982 static void save_section_header(QEMUFile *f, SaveStateEntry *se, 983 uint8_t section_type) 984 { 985 qemu_put_byte(f, section_type); 986 qemu_put_be32(f, se->section_id); 987 988 if (section_type == QEMU_VM_SECTION_FULL || 989 section_type == QEMU_VM_SECTION_START) { 990 /* ID string */ 991 size_t len = strlen(se->idstr); 992 qemu_put_byte(f, len); 993 qemu_put_buffer(f, (uint8_t *)se->idstr, len); 994 995 qemu_put_be32(f, se->instance_id); 996 qemu_put_be32(f, se->version_id); 997 } 998 } 999 1000 /* 1001 * Write a footer onto device sections that catches cases misformatted device 1002 * sections. 1003 */ 1004 static void save_section_footer(QEMUFile *f, SaveStateEntry *se) 1005 { 1006 if (migrate_get_current()->send_section_footer) { 1007 qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); 1008 qemu_put_be32(f, se->section_id); 1009 } 1010 } 1011 1012 static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc, 1013 Error **errp) 1014 { 1015 int ret; 1016 1017 if ((!se->ops || !se->ops->save_state) && !se->vmsd) { 1018 return 0; 1019 } 1020 if (se->vmsd && !vmstate_section_needed(se->vmsd, se->opaque)) { 1021 trace_savevm_section_skip(se->idstr, se->section_id); 1022 return 0; 1023 } 1024 1025 trace_savevm_section_start(se->idstr, se->section_id); 1026 save_section_header(f, se, QEMU_VM_SECTION_FULL); 1027 if (vmdesc) { 1028 json_writer_start_object(vmdesc, NULL); 1029 json_writer_str(vmdesc, "name", se->idstr); 1030 json_writer_int64(vmdesc, "instance_id", se->instance_id); 1031 } 1032 1033 trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); 1034 if (!se->vmsd) { 1035 vmstate_save_old_style(f, se, vmdesc); 1036 } else { 1037 ret = vmstate_save_state_with_err(f, se->vmsd, se->opaque, vmdesc, 1038 errp); 1039 if (ret) { 1040 return ret; 1041 } 1042 } 1043 1044 trace_savevm_section_end(se->idstr, se->section_id, 0); 1045 save_section_footer(f, se); 1046 if (vmdesc) { 1047 json_writer_end_object(vmdesc); 1048 } 1049 return 0; 1050 } 1051 /** 1052 * qemu_savevm_command_send: Send a 'QEMU_VM_COMMAND' type element with the 1053 * command and associated data. 1054 * 1055 * @f: File to send command on 1056 * @command: Command type to send 1057 * @len: Length of associated data 1058 * @data: Data associated with command. 1059 */ 1060 static void qemu_savevm_command_send(QEMUFile *f, 1061 enum qemu_vm_cmd command, 1062 uint16_t len, 1063 uint8_t *data) 1064 { 1065 trace_savevm_command_send(command, len); 1066 qemu_put_byte(f, QEMU_VM_COMMAND); 1067 qemu_put_be16(f, (uint16_t)command); 1068 qemu_put_be16(f, len); 1069 qemu_put_buffer(f, data, len); 1070 qemu_fflush(f); 1071 } 1072 1073 void qemu_savevm_send_colo_enable(QEMUFile *f) 1074 { 1075 trace_savevm_send_colo_enable(); 1076 qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL); 1077 } 1078 1079 void qemu_savevm_send_ping(QEMUFile *f, uint32_t value) 1080 { 1081 uint32_t buf; 1082 1083 trace_savevm_send_ping(value); 1084 buf = cpu_to_be32(value); 1085 qemu_savevm_command_send(f, MIG_CMD_PING, sizeof(value), (uint8_t *)&buf); 1086 } 1087 1088 void qemu_savevm_send_open_return_path(QEMUFile *f) 1089 { 1090 trace_savevm_send_open_return_path(); 1091 qemu_savevm_command_send(f, MIG_CMD_OPEN_RETURN_PATH, 0, NULL); 1092 } 1093 1094 /* We have a buffer of data to send; we don't want that all to be loaded 1095 * by the command itself, so the command contains just the length of the 1096 * extra buffer that we then send straight after it. 1097 * TODO: Must be a better way to organise that 1098 * 1099 * Returns: 1100 * 0 on success 1101 * -ve on error 1102 */ 1103 int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len) 1104 { 1105 uint32_t tmp; 1106 MigrationState *ms = migrate_get_current(); 1107 Error *local_err = NULL; 1108 1109 if (len > MAX_VM_CMD_PACKAGED_SIZE) { 1110 error_setg(&local_err, "%s: Unreasonably large packaged state: %zu", 1111 __func__, len); 1112 migrate_set_error(ms, local_err); 1113 error_report_err(local_err); 1114 return -1; 1115 } 1116 1117 tmp = cpu_to_be32(len); 1118 1119 trace_qemu_savevm_send_packaged(); 1120 qemu_savevm_command_send(f, MIG_CMD_PACKAGED, 4, (uint8_t *)&tmp); 1121 1122 qemu_put_buffer(f, buf, len); 1123 1124 return 0; 1125 } 1126 1127 /* Send prior to any postcopy transfer */ 1128 void qemu_savevm_send_postcopy_advise(QEMUFile *f) 1129 { 1130 if (migrate_postcopy_ram()) { 1131 uint64_t tmp[2]; 1132 tmp[0] = cpu_to_be64(ram_pagesize_summary()); 1133 tmp[1] = cpu_to_be64(qemu_target_page_size()); 1134 1135 trace_qemu_savevm_send_postcopy_advise(); 1136 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 1137 16, (uint8_t *)tmp); 1138 } else { 1139 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL); 1140 } 1141 } 1142 1143 /* Sent prior to starting the destination running in postcopy, discard pages 1144 * that have already been sent but redirtied on the source. 1145 * CMD_POSTCOPY_RAM_DISCARD consist of: 1146 * byte version (0) 1147 * byte Length of name field (not including 0) 1148 * n x byte RAM block name 1149 * byte 0 terminator (just for safety) 1150 * n x Byte ranges within the named RAMBlock 1151 * be64 Start of the range 1152 * be64 Length 1153 * 1154 * name: RAMBlock name that these entries are part of 1155 * len: Number of page entries 1156 * start_list: 'len' addresses 1157 * length_list: 'len' addresses 1158 * 1159 */ 1160 void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, 1161 uint16_t len, 1162 uint64_t *start_list, 1163 uint64_t *length_list) 1164 { 1165 uint8_t *buf; 1166 uint16_t tmplen; 1167 uint16_t t; 1168 size_t name_len = strlen(name); 1169 1170 trace_qemu_savevm_send_postcopy_ram_discard(name, len); 1171 assert(name_len < 256); 1172 buf = g_malloc0(1 + 1 + name_len + 1 + (8 + 8) * len); 1173 buf[0] = postcopy_ram_discard_version; 1174 buf[1] = name_len; 1175 memcpy(buf + 2, name, name_len); 1176 tmplen = 2 + name_len; 1177 buf[tmplen++] = '\0'; 1178 1179 for (t = 0; t < len; t++) { 1180 stq_be_p(buf + tmplen, start_list[t]); 1181 tmplen += 8; 1182 stq_be_p(buf + tmplen, length_list[t]); 1183 tmplen += 8; 1184 } 1185 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf); 1186 g_free(buf); 1187 } 1188 1189 /* Get the destination into a state where it can receive postcopy data. */ 1190 void qemu_savevm_send_postcopy_listen(QEMUFile *f) 1191 { 1192 trace_savevm_send_postcopy_listen(); 1193 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_LISTEN, 0, NULL); 1194 } 1195 1196 /* Kick the destination into running */ 1197 void qemu_savevm_send_postcopy_run(QEMUFile *f) 1198 { 1199 trace_savevm_send_postcopy_run(); 1200 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RUN, 0, NULL); 1201 } 1202 1203 void qemu_savevm_send_postcopy_resume(QEMUFile *f) 1204 { 1205 trace_savevm_send_postcopy_resume(); 1206 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RESUME, 0, NULL); 1207 } 1208 1209 void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) 1210 { 1211 size_t len; 1212 char buf[256]; 1213 1214 trace_savevm_send_recv_bitmap(block_name); 1215 1216 buf[0] = len = strlen(block_name); 1217 memcpy(buf + 1, block_name, len); 1218 1219 qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); 1220 } 1221 1222 bool qemu_savevm_state_blocked(Error **errp) 1223 { 1224 SaveStateEntry *se; 1225 1226 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1227 if (se->vmsd && se->vmsd->unmigratable) { 1228 error_setg(errp, "State blocked by non-migratable device '%s'", 1229 se->idstr); 1230 return true; 1231 } 1232 } 1233 return false; 1234 } 1235 1236 void qemu_savevm_non_migratable_list(strList **reasons) 1237 { 1238 SaveStateEntry *se; 1239 1240 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1241 if (se->vmsd && se->vmsd->unmigratable) { 1242 QAPI_LIST_PREPEND(*reasons, 1243 g_strdup_printf("non-migratable device: %s", 1244 se->idstr)); 1245 } 1246 } 1247 } 1248 1249 void qemu_savevm_state_header(QEMUFile *f) 1250 { 1251 MigrationState *s = migrate_get_current(); 1252 1253 s->vmdesc = json_writer_new(false); 1254 1255 trace_savevm_state_header(); 1256 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1257 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1258 1259 if (s->send_configuration) { 1260 qemu_put_byte(f, QEMU_VM_CONFIGURATION); 1261 1262 /* 1263 * This starts the main json object and is paired with the 1264 * json_writer_end_object in 1265 * qemu_savevm_state_complete_precopy_non_iterable 1266 */ 1267 json_writer_start_object(s->vmdesc, NULL); 1268 1269 json_writer_start_object(s->vmdesc, "configuration"); 1270 vmstate_save_state(f, &vmstate_configuration, &savevm_state, s->vmdesc); 1271 json_writer_end_object(s->vmdesc); 1272 } 1273 } 1274 1275 bool qemu_savevm_state_guest_unplug_pending(void) 1276 { 1277 SaveStateEntry *se; 1278 1279 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1280 if (se->vmsd && se->vmsd->dev_unplug_pending && 1281 se->vmsd->dev_unplug_pending(se->opaque)) { 1282 return true; 1283 } 1284 } 1285 1286 return false; 1287 } 1288 1289 int qemu_savevm_state_prepare(Error **errp) 1290 { 1291 SaveStateEntry *se; 1292 int ret; 1293 1294 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1295 if (!se->ops || !se->ops->save_prepare) { 1296 continue; 1297 } 1298 if (se->ops->is_active) { 1299 if (!se->ops->is_active(se->opaque)) { 1300 continue; 1301 } 1302 } 1303 1304 ret = se->ops->save_prepare(se->opaque, errp); 1305 if (ret < 0) { 1306 return ret; 1307 } 1308 } 1309 1310 return 0; 1311 } 1312 1313 int qemu_savevm_state_setup(QEMUFile *f, Error **errp) 1314 { 1315 ERRP_GUARD(); 1316 MigrationState *ms = migrate_get_current(); 1317 SaveStateEntry *se; 1318 int ret = 0; 1319 1320 json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); 1321 json_writer_start_array(ms->vmdesc, "devices"); 1322 1323 trace_savevm_state_setup(); 1324 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1325 if (se->vmsd && se->vmsd->early_setup) { 1326 ret = vmstate_save(f, se, ms->vmdesc, errp); 1327 if (ret) { 1328 migrate_set_error(ms, *errp); 1329 qemu_file_set_error(f, ret); 1330 break; 1331 } 1332 continue; 1333 } 1334 1335 if (!se->ops || !se->ops->save_setup) { 1336 continue; 1337 } 1338 if (se->ops->is_active) { 1339 if (!se->ops->is_active(se->opaque)) { 1340 continue; 1341 } 1342 } 1343 save_section_header(f, se, QEMU_VM_SECTION_START); 1344 1345 ret = se->ops->save_setup(f, se->opaque); 1346 save_section_footer(f, se); 1347 if (ret < 0) { 1348 error_setg(errp, "failed to setup SaveStateEntry with id(name): " 1349 "%d(%s): %d", se->section_id, se->idstr, ret); 1350 qemu_file_set_error(f, ret); 1351 break; 1352 } 1353 } 1354 1355 if (ret) { 1356 return ret; 1357 } 1358 1359 /* TODO: Should we check that errp is set in case of failure ? */ 1360 return precopy_notify(PRECOPY_NOTIFY_SETUP, errp); 1361 } 1362 1363 int qemu_savevm_state_resume_prepare(MigrationState *s) 1364 { 1365 SaveStateEntry *se; 1366 int ret; 1367 1368 trace_savevm_state_resume_prepare(); 1369 1370 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1371 if (!se->ops || !se->ops->resume_prepare) { 1372 continue; 1373 } 1374 if (se->ops->is_active) { 1375 if (!se->ops->is_active(se->opaque)) { 1376 continue; 1377 } 1378 } 1379 ret = se->ops->resume_prepare(s, se->opaque); 1380 if (ret < 0) { 1381 return ret; 1382 } 1383 } 1384 1385 return 0; 1386 } 1387 1388 /* 1389 * this function has three return values: 1390 * negative: there was one error, and we have -errno. 1391 * 0 : We haven't finished, caller have to go again 1392 * 1 : We have finished, we can go to complete phase 1393 */ 1394 int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) 1395 { 1396 SaveStateEntry *se; 1397 bool all_finished = true; 1398 int ret; 1399 1400 trace_savevm_state_iterate(); 1401 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1402 if (!se->ops || !se->ops->save_live_iterate) { 1403 continue; 1404 } 1405 if (se->ops->is_active && 1406 !se->ops->is_active(se->opaque)) { 1407 continue; 1408 } 1409 if (se->ops->is_active_iterate && 1410 !se->ops->is_active_iterate(se->opaque)) { 1411 continue; 1412 } 1413 /* 1414 * In the postcopy phase, any device that doesn't know how to 1415 * do postcopy should have saved it's state in the _complete 1416 * call that's already run, it might get confused if we call 1417 * iterate afterwards. 1418 */ 1419 if (postcopy && 1420 !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) { 1421 continue; 1422 } 1423 if (migration_rate_exceeded(f)) { 1424 return 0; 1425 } 1426 trace_savevm_section_start(se->idstr, se->section_id); 1427 1428 save_section_header(f, se, QEMU_VM_SECTION_PART); 1429 1430 ret = se->ops->save_live_iterate(f, se->opaque); 1431 trace_savevm_section_end(se->idstr, se->section_id, ret); 1432 save_section_footer(f, se); 1433 1434 if (ret < 0) { 1435 error_report("failed to save SaveStateEntry with id(name): " 1436 "%d(%s): %d", 1437 se->section_id, se->idstr, ret); 1438 qemu_file_set_error(f, ret); 1439 return ret; 1440 } else if (!ret) { 1441 all_finished = false; 1442 } 1443 } 1444 return all_finished; 1445 } 1446 1447 static bool should_send_vmdesc(void) 1448 { 1449 MachineState *machine = MACHINE(qdev_get_machine()); 1450 bool in_postcopy = migration_in_postcopy(); 1451 return !machine->suppress_vmdesc && !in_postcopy; 1452 } 1453 1454 /* 1455 * Calls the save_live_complete_postcopy methods 1456 * causing the last few pages to be sent immediately and doing any associated 1457 * cleanup. 1458 * Note postcopy also calls qemu_savevm_state_complete_precopy to complete 1459 * all the other devices, but that happens at the point we switch to postcopy. 1460 */ 1461 void qemu_savevm_state_complete_postcopy(QEMUFile *f) 1462 { 1463 SaveStateEntry *se; 1464 int ret; 1465 1466 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1467 if (!se->ops || !se->ops->save_live_complete_postcopy) { 1468 continue; 1469 } 1470 if (se->ops->is_active) { 1471 if (!se->ops->is_active(se->opaque)) { 1472 continue; 1473 } 1474 } 1475 trace_savevm_section_start(se->idstr, se->section_id); 1476 /* Section type */ 1477 qemu_put_byte(f, QEMU_VM_SECTION_END); 1478 qemu_put_be32(f, se->section_id); 1479 1480 ret = se->ops->save_live_complete_postcopy(f, se->opaque); 1481 trace_savevm_section_end(se->idstr, se->section_id, ret); 1482 save_section_footer(f, se); 1483 if (ret < 0) { 1484 qemu_file_set_error(f, ret); 1485 return; 1486 } 1487 } 1488 1489 qemu_put_byte(f, QEMU_VM_EOF); 1490 qemu_fflush(f); 1491 } 1492 1493 static 1494 int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) 1495 { 1496 int64_t start_ts_each, end_ts_each; 1497 SaveStateEntry *se; 1498 int ret; 1499 1500 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1501 if (!se->ops || 1502 (in_postcopy && se->ops->has_postcopy && 1503 se->ops->has_postcopy(se->opaque)) || 1504 !se->ops->save_live_complete_precopy) { 1505 continue; 1506 } 1507 1508 if (se->ops->is_active) { 1509 if (!se->ops->is_active(se->opaque)) { 1510 continue; 1511 } 1512 } 1513 1514 start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1515 trace_savevm_section_start(se->idstr, se->section_id); 1516 1517 save_section_header(f, se, QEMU_VM_SECTION_END); 1518 1519 ret = se->ops->save_live_complete_precopy(f, se->opaque); 1520 trace_savevm_section_end(se->idstr, se->section_id, ret); 1521 save_section_footer(f, se); 1522 if (ret < 0) { 1523 qemu_file_set_error(f, ret); 1524 return -1; 1525 } 1526 end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1527 trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id, 1528 end_ts_each - start_ts_each); 1529 } 1530 1531 trace_vmstate_downtime_checkpoint("src-iterable-saved"); 1532 1533 return 0; 1534 } 1535 1536 int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, 1537 bool in_postcopy, 1538 bool inactivate_disks) 1539 { 1540 MigrationState *ms = migrate_get_current(); 1541 int64_t start_ts_each, end_ts_each; 1542 JSONWriter *vmdesc = ms->vmdesc; 1543 int vmdesc_len; 1544 SaveStateEntry *se; 1545 Error *local_err = NULL; 1546 int ret; 1547 1548 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1549 if (se->vmsd && se->vmsd->early_setup) { 1550 /* Already saved during qemu_savevm_state_setup(). */ 1551 continue; 1552 } 1553 1554 start_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1555 1556 ret = vmstate_save(f, se, vmdesc, &local_err); 1557 if (ret) { 1558 migrate_set_error(ms, local_err); 1559 error_report_err(local_err); 1560 qemu_file_set_error(f, ret); 1561 return ret; 1562 } 1563 1564 end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 1565 trace_vmstate_downtime_save("non-iterable", se->idstr, se->instance_id, 1566 end_ts_each - start_ts_each); 1567 } 1568 1569 if (inactivate_disks) { 1570 /* Inactivate before sending QEMU_VM_EOF so that the 1571 * bdrv_activate_all() on the other end won't fail. */ 1572 ret = bdrv_inactivate_all(); 1573 if (ret) { 1574 error_setg(&local_err, "%s: bdrv_inactivate_all() failed (%d)", 1575 __func__, ret); 1576 migrate_set_error(ms, local_err); 1577 error_report_err(local_err); 1578 qemu_file_set_error(f, ret); 1579 return ret; 1580 } 1581 } 1582 if (!in_postcopy) { 1583 /* Postcopy stream will still be going */ 1584 qemu_put_byte(f, QEMU_VM_EOF); 1585 } 1586 1587 json_writer_end_array(vmdesc); 1588 json_writer_end_object(vmdesc); 1589 vmdesc_len = strlen(json_writer_get(vmdesc)); 1590 1591 if (should_send_vmdesc()) { 1592 qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); 1593 qemu_put_be32(f, vmdesc_len); 1594 qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); 1595 } 1596 1597 /* Free it now to detect any inconsistencies. */ 1598 json_writer_free(vmdesc); 1599 ms->vmdesc = NULL; 1600 1601 trace_vmstate_downtime_checkpoint("src-non-iterable-saved"); 1602 1603 return 0; 1604 } 1605 1606 int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, 1607 bool inactivate_disks) 1608 { 1609 int ret; 1610 Error *local_err = NULL; 1611 bool in_postcopy = migration_in_postcopy(); 1612 1613 if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { 1614 error_report_err(local_err); 1615 } 1616 1617 trace_savevm_state_complete_precopy(); 1618 1619 cpu_synchronize_all_states(); 1620 1621 if (!in_postcopy || iterable_only) { 1622 ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy); 1623 if (ret) { 1624 return ret; 1625 } 1626 } 1627 1628 if (iterable_only) { 1629 goto flush; 1630 } 1631 1632 ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy, 1633 inactivate_disks); 1634 if (ret) { 1635 return ret; 1636 } 1637 1638 flush: 1639 return qemu_fflush(f); 1640 } 1641 1642 /* Give an estimate of the amount left to be transferred, 1643 * the result is split into the amount for units that can and 1644 * for units that can't do postcopy. 1645 */ 1646 void qemu_savevm_state_pending_estimate(uint64_t *must_precopy, 1647 uint64_t *can_postcopy) 1648 { 1649 SaveStateEntry *se; 1650 1651 *must_precopy = 0; 1652 *can_postcopy = 0; 1653 1654 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1655 if (!se->ops || !se->ops->state_pending_estimate) { 1656 continue; 1657 } 1658 if (se->ops->is_active) { 1659 if (!se->ops->is_active(se->opaque)) { 1660 continue; 1661 } 1662 } 1663 se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy); 1664 } 1665 } 1666 1667 void qemu_savevm_state_pending_exact(uint64_t *must_precopy, 1668 uint64_t *can_postcopy) 1669 { 1670 SaveStateEntry *se; 1671 1672 *must_precopy = 0; 1673 *can_postcopy = 0; 1674 1675 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1676 if (!se->ops || !se->ops->state_pending_exact) { 1677 continue; 1678 } 1679 if (se->ops->is_active) { 1680 if (!se->ops->is_active(se->opaque)) { 1681 continue; 1682 } 1683 } 1684 se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy); 1685 } 1686 } 1687 1688 void qemu_savevm_state_cleanup(void) 1689 { 1690 SaveStateEntry *se; 1691 Error *local_err = NULL; 1692 1693 if (precopy_notify(PRECOPY_NOTIFY_CLEANUP, &local_err)) { 1694 error_report_err(local_err); 1695 } 1696 1697 trace_savevm_state_cleanup(); 1698 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1699 if (se->ops && se->ops->save_cleanup) { 1700 se->ops->save_cleanup(se->opaque); 1701 } 1702 } 1703 } 1704 1705 static int qemu_savevm_state(QEMUFile *f, Error **errp) 1706 { 1707 int ret; 1708 MigrationState *ms = migrate_get_current(); 1709 MigrationStatus status; 1710 1711 if (migration_is_running()) { 1712 error_setg(errp, QERR_MIGRATION_ACTIVE); 1713 return -EINVAL; 1714 } 1715 1716 if (migrate_block()) { 1717 error_setg(errp, "Block migration and snapshots are incompatible"); 1718 return -EINVAL; 1719 } 1720 1721 ret = migrate_init(ms, errp); 1722 if (ret) { 1723 return ret; 1724 } 1725 ms->to_dst_file = f; 1726 1727 qemu_savevm_state_header(f); 1728 ret = qemu_savevm_state_setup(f, errp); 1729 if (ret) { 1730 goto cleanup; 1731 } 1732 1733 while (qemu_file_get_error(f) == 0) { 1734 if (qemu_savevm_state_iterate(f, false) > 0) { 1735 break; 1736 } 1737 } 1738 1739 ret = qemu_file_get_error(f); 1740 if (ret == 0) { 1741 qemu_savevm_state_complete_precopy(f, false, false); 1742 ret = qemu_file_get_error(f); 1743 } 1744 if (ret != 0) { 1745 error_setg_errno(errp, -ret, "Error while writing VM state"); 1746 } 1747 cleanup: 1748 qemu_savevm_state_cleanup(); 1749 1750 if (ret != 0) { 1751 status = MIGRATION_STATUS_FAILED; 1752 } else { 1753 status = MIGRATION_STATUS_COMPLETED; 1754 } 1755 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status); 1756 1757 /* f is outer parameter, it should not stay in global migration state after 1758 * this function finished */ 1759 ms->to_dst_file = NULL; 1760 1761 return ret; 1762 } 1763 1764 void qemu_savevm_live_state(QEMUFile *f) 1765 { 1766 /* save QEMU_VM_SECTION_END section */ 1767 qemu_savevm_state_complete_precopy(f, true, false); 1768 qemu_put_byte(f, QEMU_VM_EOF); 1769 } 1770 1771 int qemu_save_device_state(QEMUFile *f) 1772 { 1773 MigrationState *ms = migrate_get_current(); 1774 Error *local_err = NULL; 1775 SaveStateEntry *se; 1776 1777 if (!migration_in_colo_state()) { 1778 qemu_put_be32(f, QEMU_VM_FILE_MAGIC); 1779 qemu_put_be32(f, QEMU_VM_FILE_VERSION); 1780 } 1781 cpu_synchronize_all_states(); 1782 1783 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1784 int ret; 1785 1786 if (se->is_ram) { 1787 continue; 1788 } 1789 ret = vmstate_save(f, se, NULL, &local_err); 1790 if (ret) { 1791 migrate_set_error(ms, local_err); 1792 error_report_err(local_err); 1793 return ret; 1794 } 1795 } 1796 1797 qemu_put_byte(f, QEMU_VM_EOF); 1798 1799 return qemu_file_get_error(f); 1800 } 1801 1802 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id) 1803 { 1804 SaveStateEntry *se; 1805 1806 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 1807 if (!strcmp(se->idstr, idstr) && 1808 (instance_id == se->instance_id || 1809 instance_id == se->alias_id)) 1810 return se; 1811 /* Migrating from an older version? */ 1812 if (strstr(se->idstr, idstr) && se->compat) { 1813 if (!strcmp(se->compat->idstr, idstr) && 1814 (instance_id == se->compat->instance_id || 1815 instance_id == se->alias_id)) 1816 return se; 1817 } 1818 } 1819 return NULL; 1820 } 1821 1822 enum LoadVMExitCodes { 1823 /* Allow a command to quit all layers of nested loadvm loops */ 1824 LOADVM_QUIT = 1, 1825 }; 1826 1827 /* ------ incoming postcopy messages ------ */ 1828 /* 'advise' arrives before any transfers just to tell us that a postcopy 1829 * *might* happen - it might be skipped if precopy transferred everything 1830 * quickly. 1831 */ 1832 static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, 1833 uint16_t len) 1834 { 1835 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); 1836 uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; 1837 size_t page_size = qemu_target_page_size(); 1838 Error *local_err = NULL; 1839 1840 trace_loadvm_postcopy_handle_advise(); 1841 if (ps != POSTCOPY_INCOMING_NONE) { 1842 error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps); 1843 return -1; 1844 } 1845 1846 switch (len) { 1847 case 0: 1848 if (migrate_postcopy_ram()) { 1849 error_report("RAM postcopy is enabled but have 0 byte advise"); 1850 return -EINVAL; 1851 } 1852 return 0; 1853 case 8 + 8: 1854 if (!migrate_postcopy_ram()) { 1855 error_report("RAM postcopy is disabled but have 16 byte advise"); 1856 return -EINVAL; 1857 } 1858 break; 1859 default: 1860 error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len); 1861 return -EINVAL; 1862 } 1863 1864 if (!postcopy_ram_supported_by_host(mis, &local_err)) { 1865 error_report_err(local_err); 1866 postcopy_state_set(POSTCOPY_INCOMING_NONE); 1867 return -1; 1868 } 1869 1870 remote_pagesize_summary = qemu_get_be64(mis->from_src_file); 1871 local_pagesize_summary = ram_pagesize_summary(); 1872 1873 if (remote_pagesize_summary != local_pagesize_summary) { 1874 /* 1875 * This detects two potential causes of mismatch: 1876 * a) A mismatch in host page sizes 1877 * Some combinations of mismatch are probably possible but it gets 1878 * a bit more complicated. In particular we need to place whole 1879 * host pages on the dest at once, and we need to ensure that we 1880 * handle dirtying to make sure we never end up sending part of 1881 * a hostpage on it's own. 1882 * b) The use of different huge page sizes on source/destination 1883 * a more fine grain test is performed during RAM block migration 1884 * but this test here causes a nice early clear failure, and 1885 * also fails when passed to an older qemu that doesn't 1886 * do huge pages. 1887 */ 1888 error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64 1889 " d=%" PRIx64 ")", 1890 remote_pagesize_summary, local_pagesize_summary); 1891 return -1; 1892 } 1893 1894 remote_tps = qemu_get_be64(mis->from_src_file); 1895 if (remote_tps != page_size) { 1896 /* 1897 * Again, some differences could be dealt with, but for now keep it 1898 * simple. 1899 */ 1900 error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", 1901 (int)remote_tps, page_size); 1902 return -1; 1903 } 1904 1905 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) { 1906 error_report_err(local_err); 1907 return -1; 1908 } 1909 1910 if (ram_postcopy_incoming_init(mis)) { 1911 return -1; 1912 } 1913 1914 return 0; 1915 } 1916 1917 /* After postcopy we will be told to throw some pages away since they're 1918 * dirty and will have to be demand fetched. Must happen before CPU is 1919 * started. 1920 * There can be 0..many of these messages, each encoding multiple pages. 1921 */ 1922 static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, 1923 uint16_t len) 1924 { 1925 int tmp; 1926 char ramid[256]; 1927 PostcopyState ps = postcopy_state_get(); 1928 1929 trace_loadvm_postcopy_ram_handle_discard(); 1930 1931 switch (ps) { 1932 case POSTCOPY_INCOMING_ADVISE: 1933 /* 1st discard */ 1934 tmp = postcopy_ram_prepare_discard(mis); 1935 if (tmp) { 1936 return tmp; 1937 } 1938 break; 1939 1940 case POSTCOPY_INCOMING_DISCARD: 1941 /* Expected state */ 1942 break; 1943 1944 default: 1945 error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", 1946 ps); 1947 return -1; 1948 } 1949 /* We're expecting a 1950 * Version (0) 1951 * a RAM ID string (length byte, name, 0 term) 1952 * then at least 1 16 byte chunk 1953 */ 1954 if (len < (1 + 1 + 1 + 1 + 2 * 8)) { 1955 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1956 return -1; 1957 } 1958 1959 tmp = qemu_get_byte(mis->from_src_file); 1960 if (tmp != postcopy_ram_discard_version) { 1961 error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); 1962 return -1; 1963 } 1964 1965 if (!qemu_get_counted_string(mis->from_src_file, ramid)) { 1966 error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); 1967 return -1; 1968 } 1969 tmp = qemu_get_byte(mis->from_src_file); 1970 if (tmp != 0) { 1971 error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); 1972 return -1; 1973 } 1974 1975 len -= 3 + strlen(ramid); 1976 if (len % 16) { 1977 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); 1978 return -1; 1979 } 1980 trace_loadvm_postcopy_ram_handle_discard_header(ramid, len); 1981 while (len) { 1982 uint64_t start_addr, block_length; 1983 start_addr = qemu_get_be64(mis->from_src_file); 1984 block_length = qemu_get_be64(mis->from_src_file); 1985 1986 len -= 16; 1987 int ret = ram_discard_range(ramid, start_addr, block_length); 1988 if (ret) { 1989 return ret; 1990 } 1991 } 1992 trace_loadvm_postcopy_ram_handle_discard_end(); 1993 1994 return 0; 1995 } 1996 1997 /* 1998 * Triggered by a postcopy_listen command; this thread takes over reading 1999 * the input stream, leaving the main thread free to carry on loading the rest 2000 * of the device state (from RAM). 2001 * (TODO:This could do with being in a postcopy file - but there again it's 2002 * just another input loop, not that postcopy specific) 2003 */ 2004 static void *postcopy_ram_listen_thread(void *opaque) 2005 { 2006 MigrationIncomingState *mis = migration_incoming_get_current(); 2007 QEMUFile *f = mis->from_src_file; 2008 int load_res; 2009 MigrationState *migr = migrate_get_current(); 2010 2011 object_ref(OBJECT(migr)); 2012 2013 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, 2014 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2015 qemu_sem_post(&mis->thread_sync_sem); 2016 trace_postcopy_ram_listen_thread_start(); 2017 2018 rcu_register_thread(); 2019 /* 2020 * Because we're a thread and not a coroutine we can't yield 2021 * in qemu_file, and thus we must be blocking now. 2022 */ 2023 qemu_file_set_blocking(f, true); 2024 load_res = qemu_loadvm_state_main(f, mis); 2025 2026 /* 2027 * This is tricky, but, mis->from_src_file can change after it 2028 * returns, when postcopy recovery happened. In the future, we may 2029 * want a wrapper for the QEMUFile handle. 2030 */ 2031 f = mis->from_src_file; 2032 2033 /* And non-blocking again so we don't block in any cleanup */ 2034 qemu_file_set_blocking(f, false); 2035 2036 trace_postcopy_ram_listen_thread_exit(); 2037 if (load_res < 0) { 2038 qemu_file_set_error(f, load_res); 2039 dirty_bitmap_mig_cancel_incoming(); 2040 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 2041 !migrate_postcopy_ram() && migrate_dirty_bitmaps()) 2042 { 2043 error_report("%s: loadvm failed during postcopy: %d. All states " 2044 "are migrated except dirty bitmaps. Some dirty " 2045 "bitmaps may be lost, and present migrated dirty " 2046 "bitmaps are correctly migrated and valid.", 2047 __func__, load_res); 2048 load_res = 0; /* prevent further exit() */ 2049 } else { 2050 error_report("%s: loadvm failed: %d", __func__, load_res); 2051 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2052 MIGRATION_STATUS_FAILED); 2053 } 2054 } 2055 if (load_res >= 0) { 2056 /* 2057 * This looks good, but it's possible that the device loading in the 2058 * main thread hasn't finished yet, and so we might not be in 'RUN' 2059 * state yet; wait for the end of the main thread. 2060 */ 2061 qemu_event_wait(&mis->main_thread_load_event); 2062 } 2063 postcopy_ram_incoming_cleanup(mis); 2064 2065 if (load_res < 0) { 2066 /* 2067 * If something went wrong then we have a bad state so exit; 2068 * depending how far we got it might be possible at this point 2069 * to leave the guest running and fire MCEs for pages that never 2070 * arrived as a desperate recovery step. 2071 */ 2072 rcu_unregister_thread(); 2073 exit(EXIT_FAILURE); 2074 } 2075 2076 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, 2077 MIGRATION_STATUS_COMPLETED); 2078 /* 2079 * If everything has worked fine, then the main thread has waited 2080 * for us to start, and we're the last use of the mis. 2081 * (If something broke then qemu will have to exit anyway since it's 2082 * got a bad migration state). 2083 */ 2084 migration_incoming_state_destroy(); 2085 qemu_loadvm_state_cleanup(); 2086 2087 rcu_unregister_thread(); 2088 mis->have_listen_thread = false; 2089 postcopy_state_set(POSTCOPY_INCOMING_END); 2090 2091 object_unref(OBJECT(migr)); 2092 2093 return NULL; 2094 } 2095 2096 /* After this message we must be able to immediately receive postcopy data */ 2097 static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) 2098 { 2099 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); 2100 Error *local_err = NULL; 2101 2102 trace_loadvm_postcopy_handle_listen("enter"); 2103 2104 if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { 2105 error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); 2106 return -1; 2107 } 2108 if (ps == POSTCOPY_INCOMING_ADVISE) { 2109 /* 2110 * A rare case, we entered listen without having to do any discards, 2111 * so do the setup that's normally done at the time of the 1st discard. 2112 */ 2113 if (migrate_postcopy_ram()) { 2114 postcopy_ram_prepare_discard(mis); 2115 } 2116 } 2117 2118 trace_loadvm_postcopy_handle_listen("after discard"); 2119 2120 /* 2121 * Sensitise RAM - can now generate requests for blocks that don't exist 2122 * However, at this point the CPU shouldn't be running, and the IO 2123 * shouldn't be doing anything yet so don't actually expect requests 2124 */ 2125 if (migrate_postcopy_ram()) { 2126 if (postcopy_ram_incoming_setup(mis)) { 2127 postcopy_ram_incoming_cleanup(mis); 2128 return -1; 2129 } 2130 } 2131 2132 trace_loadvm_postcopy_handle_listen("after uffd"); 2133 2134 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { 2135 error_report_err(local_err); 2136 return -1; 2137 } 2138 2139 mis->have_listen_thread = true; 2140 postcopy_thread_create(mis, &mis->listen_thread, "postcopy/listen", 2141 postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); 2142 trace_loadvm_postcopy_handle_listen("return"); 2143 2144 return 0; 2145 } 2146 2147 static void loadvm_postcopy_handle_run_bh(void *opaque) 2148 { 2149 Error *local_err = NULL; 2150 MigrationIncomingState *mis = opaque; 2151 2152 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter"); 2153 2154 /* TODO we should move all of this lot into postcopy_ram.c or a shared code 2155 * in migration.c 2156 */ 2157 cpu_synchronize_all_post_init(); 2158 2159 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cpu-synced"); 2160 2161 qemu_announce_self(&mis->announce_timer, migrate_announce_params()); 2162 2163 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced"); 2164 2165 /* Make sure all file formats throw away their mutable metadata. 2166 * If we get an error here, just don't restart the VM yet. */ 2167 bdrv_activate_all(&local_err); 2168 if (local_err) { 2169 error_report_err(local_err); 2170 local_err = NULL; 2171 autostart = false; 2172 } 2173 2174 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); 2175 2176 dirty_bitmap_mig_before_vm_start(); 2177 2178 if (autostart) { 2179 /* Hold onto your hats, starting the CPU */ 2180 vm_start(); 2181 } else { 2182 /* leave it paused and let management decide when to start the CPU */ 2183 runstate_set(RUN_STATE_PAUSED); 2184 } 2185 2186 trace_vmstate_downtime_checkpoint("dst-postcopy-bh-vm-started"); 2187 } 2188 2189 /* After all discards we can start running and asking for pages */ 2190 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) 2191 { 2192 PostcopyState ps = postcopy_state_get(); 2193 2194 trace_loadvm_postcopy_handle_run(); 2195 if (ps != POSTCOPY_INCOMING_LISTENING) { 2196 error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); 2197 return -1; 2198 } 2199 2200 postcopy_state_set(POSTCOPY_INCOMING_RUNNING); 2201 migration_bh_schedule(loadvm_postcopy_handle_run_bh, mis); 2202 2203 /* We need to finish reading the stream from the package 2204 * and also stop reading anything more from the stream that loaded the 2205 * package (since it's now being read by the listener thread). 2206 * LOADVM_QUIT will quit all the layers of nested loadvm loops. 2207 */ 2208 return LOADVM_QUIT; 2209 } 2210 2211 /* We must be with page_request_mutex held */ 2212 static gboolean postcopy_sync_page_req(gpointer key, gpointer value, 2213 gpointer data) 2214 { 2215 MigrationIncomingState *mis = data; 2216 void *host_addr = (void *) key; 2217 ram_addr_t rb_offset; 2218 RAMBlock *rb; 2219 int ret; 2220 2221 rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); 2222 if (!rb) { 2223 /* 2224 * This should _never_ happen. However be nice for a migrating VM to 2225 * not crash/assert. Post an error (note: intended to not use *_once 2226 * because we do want to see all the illegal addresses; and this can 2227 * never be triggered by the guest so we're safe) and move on next. 2228 */ 2229 error_report("%s: illegal host addr %p", __func__, host_addr); 2230 /* Try the next entry */ 2231 return FALSE; 2232 } 2233 2234 ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); 2235 if (ret) { 2236 /* Please refer to above comment. */ 2237 error_report("%s: send rp message failed for addr %p", 2238 __func__, host_addr); 2239 return FALSE; 2240 } 2241 2242 trace_postcopy_page_req_sync(host_addr); 2243 2244 return FALSE; 2245 } 2246 2247 static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) 2248 { 2249 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { 2250 g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); 2251 } 2252 } 2253 2254 static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) 2255 { 2256 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { 2257 error_report("%s: illegal resume received", __func__); 2258 /* Don't fail the load, only for this. */ 2259 return 0; 2260 } 2261 2262 /* 2263 * Reset the last_rb before we resend any page req to source again, since 2264 * the source should have it reset already. 2265 */ 2266 mis->last_rb = NULL; 2267 2268 /* 2269 * This means source VM is ready to resume the postcopy migration. 2270 */ 2271 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_RECOVER, 2272 MIGRATION_STATUS_POSTCOPY_ACTIVE); 2273 2274 trace_loadvm_postcopy_handle_resume(); 2275 2276 /* Tell source that "we are ready" */ 2277 migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); 2278 2279 /* 2280 * After a postcopy recovery, the source should have lost the postcopy 2281 * queue, or potentially the requested pages could have been lost during 2282 * the network down phase. Let's re-sync with the source VM by re-sending 2283 * all the pending pages that we eagerly need, so these threads won't get 2284 * blocked too long due to the recovery. 2285 * 2286 * Without this procedure, the faulted destination VM threads (waiting for 2287 * page requests right before the postcopy is interrupted) can keep hanging 2288 * until the pages are sent by the source during the background copying of 2289 * pages, or another thread faulted on the same address accidentally. 2290 */ 2291 migrate_send_rp_req_pages_pending(mis); 2292 2293 /* 2294 * It's time to switch state and release the fault thread to continue 2295 * service page faults. Note that this should be explicitly after the 2296 * above call to migrate_send_rp_req_pages_pending(). In short: 2297 * migrate_send_rp_message_req_pages() is not thread safe, yet. 2298 */ 2299 qemu_sem_post(&mis->postcopy_pause_sem_fault); 2300 2301 if (migrate_postcopy_preempt()) { 2302 /* 2303 * The preempt channel will be created in async manner, now let's 2304 * wait for it and make sure it's created. 2305 */ 2306 qemu_sem_wait(&mis->postcopy_qemufile_dst_done); 2307 assert(mis->postcopy_qemufile_dst); 2308 /* Kick the fast ram load thread too */ 2309 qemu_sem_post(&mis->postcopy_pause_sem_fast_load); 2310 } 2311 2312 return 0; 2313 } 2314 2315 /** 2316 * Immediately following this command is a blob of data containing an embedded 2317 * chunk of migration stream; read it and load it. 2318 * 2319 * @mis: Incoming state 2320 * @length: Length of packaged data to read 2321 * 2322 * Returns: Negative values on error 2323 * 2324 */ 2325 static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) 2326 { 2327 int ret; 2328 size_t length; 2329 QIOChannelBuffer *bioc; 2330 2331 length = qemu_get_be32(mis->from_src_file); 2332 trace_loadvm_handle_cmd_packaged(length); 2333 2334 if (length > MAX_VM_CMD_PACKAGED_SIZE) { 2335 error_report("Unreasonably large packaged state: %zu", length); 2336 return -1; 2337 } 2338 2339 bioc = qio_channel_buffer_new(length); 2340 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-loadvm-buffer"); 2341 ret = qemu_get_buffer(mis->from_src_file, 2342 bioc->data, 2343 length); 2344 if (ret != length) { 2345 object_unref(OBJECT(bioc)); 2346 error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", 2347 ret, length); 2348 return (ret < 0) ? ret : -EAGAIN; 2349 } 2350 bioc->usage += length; 2351 trace_loadvm_handle_cmd_packaged_received(ret); 2352 2353 QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc)); 2354 2355 /* 2356 * Before loading the guest states, ensure that the preempt channel has 2357 * been ready to use, as some of the states (e.g. via virtio_load) might 2358 * trigger page faults that will be handled through the preempt channel. 2359 * So yield to the main thread in the case that the channel create event 2360 * hasn't been dispatched. 2361 * 2362 * TODO: if we can move migration loadvm out of main thread, then we 2363 * won't block main thread from polling the accept() fds. We can drop 2364 * this as a whole when that is done. 2365 */ 2366 do { 2367 if (!migrate_postcopy_preempt() || !qemu_in_coroutine() || 2368 mis->postcopy_qemufile_dst) { 2369 break; 2370 } 2371 2372 aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self()); 2373 qemu_coroutine_yield(); 2374 } while (1); 2375 2376 ret = qemu_loadvm_state_main(packf, mis); 2377 trace_loadvm_handle_cmd_packaged_main(ret); 2378 qemu_fclose(packf); 2379 object_unref(OBJECT(bioc)); 2380 2381 return ret; 2382 } 2383 2384 /* 2385 * Handle request that source requests for recved_bitmap on 2386 * destination. Payload format: 2387 * 2388 * len (1 byte) + ramblock_name (<255 bytes) 2389 */ 2390 static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, 2391 uint16_t len) 2392 { 2393 QEMUFile *file = mis->from_src_file; 2394 RAMBlock *rb; 2395 char block_name[256]; 2396 size_t cnt; 2397 2398 cnt = qemu_get_counted_string(file, block_name); 2399 if (!cnt) { 2400 error_report("%s: failed to read block name", __func__); 2401 return -EINVAL; 2402 } 2403 2404 /* Validate before using the data */ 2405 if (qemu_file_get_error(file)) { 2406 return qemu_file_get_error(file); 2407 } 2408 2409 if (len != cnt + 1) { 2410 error_report("%s: invalid payload length (%d)", __func__, len); 2411 return -EINVAL; 2412 } 2413 2414 rb = qemu_ram_block_by_name(block_name); 2415 if (!rb) { 2416 error_report("%s: block '%s' not found", __func__, block_name); 2417 return -EINVAL; 2418 } 2419 2420 migrate_send_rp_recv_bitmap(mis, block_name); 2421 2422 trace_loadvm_handle_recv_bitmap(block_name); 2423 2424 return 0; 2425 } 2426 2427 static int loadvm_process_enable_colo(MigrationIncomingState *mis) 2428 { 2429 int ret = migration_incoming_enable_colo(); 2430 2431 if (!ret) { 2432 ret = colo_init_ram_cache(); 2433 if (ret) { 2434 migration_incoming_disable_colo(); 2435 } 2436 } 2437 return ret; 2438 } 2439 2440 /* 2441 * Process an incoming 'QEMU_VM_COMMAND' 2442 * 0 just a normal return 2443 * LOADVM_QUIT All good, but exit the loop 2444 * <0 Error 2445 */ 2446 static int loadvm_process_command(QEMUFile *f) 2447 { 2448 MigrationIncomingState *mis = migration_incoming_get_current(); 2449 uint16_t cmd; 2450 uint16_t len; 2451 uint32_t tmp32; 2452 2453 cmd = qemu_get_be16(f); 2454 len = qemu_get_be16(f); 2455 2456 /* Check validity before continue processing of cmds */ 2457 if (qemu_file_get_error(f)) { 2458 return qemu_file_get_error(f); 2459 } 2460 2461 if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { 2462 error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); 2463 return -EINVAL; 2464 } 2465 2466 trace_loadvm_process_command(mig_cmd_args[cmd].name, len); 2467 2468 if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { 2469 error_report("%s received with bad length - expecting %zu, got %d", 2470 mig_cmd_args[cmd].name, 2471 (size_t)mig_cmd_args[cmd].len, len); 2472 return -ERANGE; 2473 } 2474 2475 switch (cmd) { 2476 case MIG_CMD_OPEN_RETURN_PATH: 2477 if (mis->to_src_file) { 2478 error_report("CMD_OPEN_RETURN_PATH called when RP already open"); 2479 /* Not really a problem, so don't give up */ 2480 return 0; 2481 } 2482 mis->to_src_file = qemu_file_get_return_path(f); 2483 if (!mis->to_src_file) { 2484 error_report("CMD_OPEN_RETURN_PATH failed"); 2485 return -1; 2486 } 2487 2488 /* 2489 * Switchover ack is enabled but no device uses it, so send an ACK to 2490 * source that it's OK to switchover. Do it here, after return path has 2491 * been created. 2492 */ 2493 if (migrate_switchover_ack() && !mis->switchover_ack_pending_num) { 2494 int ret = migrate_send_rp_switchover_ack(mis); 2495 if (ret) { 2496 error_report( 2497 "Could not send switchover ack RP MSG, err %d (%s)", ret, 2498 strerror(-ret)); 2499 return ret; 2500 } 2501 } 2502 break; 2503 2504 case MIG_CMD_PING: 2505 tmp32 = qemu_get_be32(f); 2506 trace_loadvm_process_command_ping(tmp32); 2507 if (!mis->to_src_file) { 2508 error_report("CMD_PING (0x%x) received with no return path", 2509 tmp32); 2510 return -1; 2511 } 2512 migrate_send_rp_pong(mis, tmp32); 2513 break; 2514 2515 case MIG_CMD_PACKAGED: 2516 return loadvm_handle_cmd_packaged(mis); 2517 2518 case MIG_CMD_POSTCOPY_ADVISE: 2519 return loadvm_postcopy_handle_advise(mis, len); 2520 2521 case MIG_CMD_POSTCOPY_LISTEN: 2522 return loadvm_postcopy_handle_listen(mis); 2523 2524 case MIG_CMD_POSTCOPY_RUN: 2525 return loadvm_postcopy_handle_run(mis); 2526 2527 case MIG_CMD_POSTCOPY_RAM_DISCARD: 2528 return loadvm_postcopy_ram_handle_discard(mis, len); 2529 2530 case MIG_CMD_POSTCOPY_RESUME: 2531 return loadvm_postcopy_handle_resume(mis); 2532 2533 case MIG_CMD_RECV_BITMAP: 2534 return loadvm_handle_recv_bitmap(mis, len); 2535 2536 case MIG_CMD_ENABLE_COLO: 2537 return loadvm_process_enable_colo(mis); 2538 } 2539 2540 return 0; 2541 } 2542 2543 /* 2544 * Read a footer off the wire and check that it matches the expected section 2545 * 2546 * Returns: true if the footer was good 2547 * false if there is a problem (and calls error_report to say why) 2548 */ 2549 static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) 2550 { 2551 int ret; 2552 uint8_t read_mark; 2553 uint32_t read_section_id; 2554 2555 if (!migrate_get_current()->send_section_footer) { 2556 /* No footer to check */ 2557 return true; 2558 } 2559 2560 read_mark = qemu_get_byte(f); 2561 2562 ret = qemu_file_get_error(f); 2563 if (ret) { 2564 error_report("%s: Read section footer failed: %d", 2565 __func__, ret); 2566 return false; 2567 } 2568 2569 if (read_mark != QEMU_VM_SECTION_FOOTER) { 2570 error_report("Missing section footer for %s", se->idstr); 2571 return false; 2572 } 2573 2574 read_section_id = qemu_get_be32(f); 2575 if (read_section_id != se->load_section_id) { 2576 error_report("Mismatched section id in footer for %s -" 2577 " read 0x%x expected 0x%x", 2578 se->idstr, read_section_id, se->load_section_id); 2579 return false; 2580 } 2581 2582 /* All good */ 2583 return true; 2584 } 2585 2586 static int 2587 qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis, 2588 uint8_t type) 2589 { 2590 bool trace_downtime = (type == QEMU_VM_SECTION_FULL); 2591 uint32_t instance_id, version_id, section_id; 2592 int64_t start_ts, end_ts; 2593 SaveStateEntry *se; 2594 char idstr[256]; 2595 int ret; 2596 2597 /* Read section start */ 2598 section_id = qemu_get_be32(f); 2599 if (!qemu_get_counted_string(f, idstr)) { 2600 error_report("Unable to read ID string for section %u", 2601 section_id); 2602 return -EINVAL; 2603 } 2604 instance_id = qemu_get_be32(f); 2605 version_id = qemu_get_be32(f); 2606 2607 ret = qemu_file_get_error(f); 2608 if (ret) { 2609 error_report("%s: Failed to read instance/version ID: %d", 2610 __func__, ret); 2611 return ret; 2612 } 2613 2614 trace_qemu_loadvm_state_section_startfull(section_id, idstr, 2615 instance_id, version_id); 2616 /* Find savevm section */ 2617 se = find_se(idstr, instance_id); 2618 if (se == NULL) { 2619 error_report("Unknown savevm section or instance '%s' %"PRIu32". " 2620 "Make sure that your current VM setup matches your " 2621 "saved VM setup, including any hotplugged devices", 2622 idstr, instance_id); 2623 return -EINVAL; 2624 } 2625 2626 /* Validate version */ 2627 if (version_id > se->version_id) { 2628 error_report("savevm: unsupported version %d for '%s' v%d", 2629 version_id, idstr, se->version_id); 2630 return -EINVAL; 2631 } 2632 se->load_version_id = version_id; 2633 se->load_section_id = section_id; 2634 2635 /* Validate if it is a device's state */ 2636 if (xen_enabled() && se->is_ram) { 2637 error_report("loadvm: %s RAM loading not allowed on Xen", idstr); 2638 return -EINVAL; 2639 } 2640 2641 if (trace_downtime) { 2642 start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2643 } 2644 2645 ret = vmstate_load(f, se); 2646 if (ret < 0) { 2647 error_report("error while loading state for instance 0x%"PRIx32" of" 2648 " device '%s'", instance_id, idstr); 2649 return ret; 2650 } 2651 2652 if (trace_downtime) { 2653 end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2654 trace_vmstate_downtime_load("non-iterable", se->idstr, 2655 se->instance_id, end_ts - start_ts); 2656 } 2657 2658 if (!check_section_footer(f, se)) { 2659 return -EINVAL; 2660 } 2661 2662 return 0; 2663 } 2664 2665 static int 2666 qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis, 2667 uint8_t type) 2668 { 2669 bool trace_downtime = (type == QEMU_VM_SECTION_END); 2670 int64_t start_ts, end_ts; 2671 uint32_t section_id; 2672 SaveStateEntry *se; 2673 int ret; 2674 2675 section_id = qemu_get_be32(f); 2676 2677 ret = qemu_file_get_error(f); 2678 if (ret) { 2679 error_report("%s: Failed to read section ID: %d", 2680 __func__, ret); 2681 return ret; 2682 } 2683 2684 trace_qemu_loadvm_state_section_partend(section_id); 2685 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2686 if (se->load_section_id == section_id) { 2687 break; 2688 } 2689 } 2690 if (se == NULL) { 2691 error_report("Unknown savevm section %d", section_id); 2692 return -EINVAL; 2693 } 2694 2695 if (trace_downtime) { 2696 start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2697 } 2698 2699 ret = vmstate_load(f, se); 2700 if (ret < 0) { 2701 error_report("error while loading state section id %d(%s)", 2702 section_id, se->idstr); 2703 return ret; 2704 } 2705 2706 if (trace_downtime) { 2707 end_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); 2708 trace_vmstate_downtime_load("iterable", se->idstr, 2709 se->instance_id, end_ts - start_ts); 2710 } 2711 2712 if (!check_section_footer(f, se)) { 2713 return -EINVAL; 2714 } 2715 2716 return 0; 2717 } 2718 2719 static int qemu_loadvm_state_header(QEMUFile *f) 2720 { 2721 unsigned int v; 2722 int ret; 2723 2724 v = qemu_get_be32(f); 2725 if (v != QEMU_VM_FILE_MAGIC) { 2726 error_report("Not a migration stream"); 2727 return -EINVAL; 2728 } 2729 2730 v = qemu_get_be32(f); 2731 if (v == QEMU_VM_FILE_VERSION_COMPAT) { 2732 error_report("SaveVM v2 format is obsolete and don't work anymore"); 2733 return -ENOTSUP; 2734 } 2735 if (v != QEMU_VM_FILE_VERSION) { 2736 error_report("Unsupported migration stream version"); 2737 return -ENOTSUP; 2738 } 2739 2740 if (migrate_get_current()->send_configuration) { 2741 if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { 2742 error_report("Configuration section missing"); 2743 qemu_loadvm_state_cleanup(); 2744 return -EINVAL; 2745 } 2746 ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); 2747 2748 if (ret) { 2749 qemu_loadvm_state_cleanup(); 2750 return ret; 2751 } 2752 } 2753 return 0; 2754 } 2755 2756 static void qemu_loadvm_state_switchover_ack_needed(MigrationIncomingState *mis) 2757 { 2758 SaveStateEntry *se; 2759 2760 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2761 if (!se->ops || !se->ops->switchover_ack_needed) { 2762 continue; 2763 } 2764 2765 if (se->ops->switchover_ack_needed(se->opaque)) { 2766 mis->switchover_ack_pending_num++; 2767 } 2768 } 2769 2770 trace_loadvm_state_switchover_ack_needed(mis->switchover_ack_pending_num); 2771 } 2772 2773 static int qemu_loadvm_state_setup(QEMUFile *f) 2774 { 2775 SaveStateEntry *se; 2776 int ret; 2777 2778 trace_loadvm_state_setup(); 2779 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2780 if (!se->ops || !se->ops->load_setup) { 2781 continue; 2782 } 2783 if (se->ops->is_active) { 2784 if (!se->ops->is_active(se->opaque)) { 2785 continue; 2786 } 2787 } 2788 2789 ret = se->ops->load_setup(f, se->opaque); 2790 if (ret < 0) { 2791 qemu_file_set_error(f, ret); 2792 error_report("Load state of device %s failed", se->idstr); 2793 return ret; 2794 } 2795 } 2796 return 0; 2797 } 2798 2799 void qemu_loadvm_state_cleanup(void) 2800 { 2801 SaveStateEntry *se; 2802 2803 trace_loadvm_state_cleanup(); 2804 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { 2805 if (se->ops && se->ops->load_cleanup) { 2806 se->ops->load_cleanup(se->opaque); 2807 } 2808 } 2809 } 2810 2811 /* Return true if we should continue the migration, or false. */ 2812 static bool postcopy_pause_incoming(MigrationIncomingState *mis) 2813 { 2814 int i; 2815 2816 trace_postcopy_pause_incoming(); 2817 2818 assert(migrate_postcopy_ram()); 2819 2820 /* 2821 * Unregister yank with either from/to src would work, since ioc behind it 2822 * is the same 2823 */ 2824 migration_ioc_unregister_yank_from_file(mis->from_src_file); 2825 2826 assert(mis->from_src_file); 2827 qemu_file_shutdown(mis->from_src_file); 2828 qemu_fclose(mis->from_src_file); 2829 mis->from_src_file = NULL; 2830 2831 assert(mis->to_src_file); 2832 qemu_file_shutdown(mis->to_src_file); 2833 qemu_mutex_lock(&mis->rp_mutex); 2834 qemu_fclose(mis->to_src_file); 2835 mis->to_src_file = NULL; 2836 qemu_mutex_unlock(&mis->rp_mutex); 2837 2838 /* 2839 * NOTE: this must happen before reset the PostcopyTmpPages below, 2840 * otherwise it's racy to reset those fields when the fast load thread 2841 * can be accessing it in parallel. 2842 */ 2843 if (mis->postcopy_qemufile_dst) { 2844 qemu_file_shutdown(mis->postcopy_qemufile_dst); 2845 /* Take the mutex to make sure the fast ram load thread halted */ 2846 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); 2847 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst); 2848 qemu_fclose(mis->postcopy_qemufile_dst); 2849 mis->postcopy_qemufile_dst = NULL; 2850 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); 2851 } 2852 2853 /* Current state can be either ACTIVE or RECOVER */ 2854 migrate_set_state(&mis->state, mis->state, 2855 MIGRATION_STATUS_POSTCOPY_PAUSED); 2856 2857 /* Notify the fault thread for the invalidated file handle */ 2858 postcopy_fault_thread_notify(mis); 2859 2860 /* 2861 * If network is interrupted, any temp page we received will be useless 2862 * because we didn't mark them as "received" in receivedmap. After a 2863 * proper recovery later (which will sync src dirty bitmap with receivedmap 2864 * on dest) these cached small pages will be resent again. 2865 */ 2866 for (i = 0; i < mis->postcopy_channels; i++) { 2867 postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]); 2868 } 2869 2870 error_report("Detected IO failure for postcopy. " 2871 "Migration paused."); 2872 2873 while (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) { 2874 qemu_sem_wait(&mis->postcopy_pause_sem_dst); 2875 } 2876 2877 trace_postcopy_pause_incoming_continued(); 2878 2879 return true; 2880 } 2881 2882 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) 2883 { 2884 uint8_t section_type; 2885 int ret = 0; 2886 2887 retry: 2888 while (true) { 2889 section_type = qemu_get_byte(f); 2890 2891 ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); 2892 if (ret) { 2893 break; 2894 } 2895 2896 trace_qemu_loadvm_state_section(section_type); 2897 switch (section_type) { 2898 case QEMU_VM_SECTION_START: 2899 case QEMU_VM_SECTION_FULL: 2900 ret = qemu_loadvm_section_start_full(f, mis, section_type); 2901 if (ret < 0) { 2902 goto out; 2903 } 2904 break; 2905 case QEMU_VM_SECTION_PART: 2906 case QEMU_VM_SECTION_END: 2907 ret = qemu_loadvm_section_part_end(f, mis, section_type); 2908 if (ret < 0) { 2909 goto out; 2910 } 2911 break; 2912 case QEMU_VM_COMMAND: 2913 ret = loadvm_process_command(f); 2914 trace_qemu_loadvm_state_section_command(ret); 2915 if ((ret < 0) || (ret == LOADVM_QUIT)) { 2916 goto out; 2917 } 2918 break; 2919 case QEMU_VM_EOF: 2920 /* This is the end of migration */ 2921 goto out; 2922 default: 2923 error_report("Unknown savevm section type %d", section_type); 2924 ret = -EINVAL; 2925 goto out; 2926 } 2927 } 2928 2929 out: 2930 if (ret < 0) { 2931 qemu_file_set_error(f, ret); 2932 2933 /* Cancel bitmaps incoming regardless of recovery */ 2934 dirty_bitmap_mig_cancel_incoming(); 2935 2936 /* 2937 * If we are during an active postcopy, then we pause instead 2938 * of bail out to at least keep the VM's dirty data. Note 2939 * that POSTCOPY_INCOMING_LISTENING stage is still not enough, 2940 * during which we're still receiving device states and we 2941 * still haven't yet started the VM on destination. 2942 * 2943 * Only RAM postcopy supports recovery. Still, if RAM postcopy is 2944 * enabled, canceled bitmaps postcopy will not affect RAM postcopy 2945 * recovering. 2946 */ 2947 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING && 2948 migrate_postcopy_ram() && postcopy_pause_incoming(mis)) { 2949 /* Reset f to point to the newly created channel */ 2950 f = mis->from_src_file; 2951 goto retry; 2952 } 2953 } 2954 return ret; 2955 } 2956 2957 int qemu_loadvm_state(QEMUFile *f) 2958 { 2959 MigrationIncomingState *mis = migration_incoming_get_current(); 2960 Error *local_err = NULL; 2961 int ret; 2962 2963 if (qemu_savevm_state_blocked(&local_err)) { 2964 error_report_err(local_err); 2965 return -EINVAL; 2966 } 2967 2968 ret = qemu_loadvm_state_header(f); 2969 if (ret) { 2970 return ret; 2971 } 2972 2973 if (qemu_loadvm_state_setup(f) != 0) { 2974 return -EINVAL; 2975 } 2976 2977 if (migrate_switchover_ack()) { 2978 qemu_loadvm_state_switchover_ack_needed(mis); 2979 } 2980 2981 cpu_synchronize_all_pre_loadvm(); 2982 2983 ret = qemu_loadvm_state_main(f, mis); 2984 qemu_event_set(&mis->main_thread_load_event); 2985 2986 trace_qemu_loadvm_state_post_main(ret); 2987 2988 if (mis->have_listen_thread) { 2989 /* Listen thread still going, can't clean up yet */ 2990 return ret; 2991 } 2992 2993 if (ret == 0) { 2994 ret = qemu_file_get_error(f); 2995 } 2996 2997 /* 2998 * Try to read in the VMDESC section as well, so that dumping tools that 2999 * intercept our migration stream have the chance to see it. 3000 */ 3001 3002 /* We've got to be careful; if we don't read the data and just shut the fd 3003 * then the sender can error if we close while it's still sending. 3004 * We also mustn't read data that isn't there; some transports (RDMA) 3005 * will stall waiting for that data when the source has already closed. 3006 */ 3007 if (ret == 0 && should_send_vmdesc()) { 3008 uint8_t *buf; 3009 uint32_t size; 3010 uint8_t section_type = qemu_get_byte(f); 3011 3012 if (section_type != QEMU_VM_VMDESCRIPTION) { 3013 error_report("Expected vmdescription section, but got %d", 3014 section_type); 3015 /* 3016 * It doesn't seem worth failing at this point since 3017 * we apparently have an otherwise valid VM state 3018 */ 3019 } else { 3020 buf = g_malloc(0x1000); 3021 size = qemu_get_be32(f); 3022 3023 while (size > 0) { 3024 uint32_t read_chunk = MIN(size, 0x1000); 3025 qemu_get_buffer(f, buf, read_chunk); 3026 size -= read_chunk; 3027 } 3028 g_free(buf); 3029 } 3030 } 3031 3032 qemu_loadvm_state_cleanup(); 3033 cpu_synchronize_all_post_init(); 3034 3035 return ret; 3036 } 3037 3038 int qemu_load_device_state(QEMUFile *f) 3039 { 3040 MigrationIncomingState *mis = migration_incoming_get_current(); 3041 int ret; 3042 3043 /* Load QEMU_VM_SECTION_FULL section */ 3044 ret = qemu_loadvm_state_main(f, mis); 3045 if (ret < 0) { 3046 error_report("Failed to load device state: %d", ret); 3047 return ret; 3048 } 3049 3050 cpu_synchronize_all_post_init(); 3051 return 0; 3052 } 3053 3054 int qemu_loadvm_approve_switchover(void) 3055 { 3056 MigrationIncomingState *mis = migration_incoming_get_current(); 3057 3058 if (!mis->switchover_ack_pending_num) { 3059 return -EINVAL; 3060 } 3061 3062 mis->switchover_ack_pending_num--; 3063 trace_loadvm_approve_switchover(mis->switchover_ack_pending_num); 3064 3065 if (mis->switchover_ack_pending_num) { 3066 return 0; 3067 } 3068 3069 return migrate_send_rp_switchover_ack(mis); 3070 } 3071 3072 bool save_snapshot(const char *name, bool overwrite, const char *vmstate, 3073 bool has_devices, strList *devices, Error **errp) 3074 { 3075 BlockDriverState *bs; 3076 QEMUSnapshotInfo sn1, *sn = &sn1; 3077 int ret = -1, ret2; 3078 QEMUFile *f; 3079 RunState saved_state = runstate_get(); 3080 uint64_t vm_state_size; 3081 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 3082 3083 GLOBAL_STATE_CODE(); 3084 3085 if (migration_is_blocked(errp)) { 3086 return false; 3087 } 3088 3089 if (!replay_can_snapshot()) { 3090 error_setg(errp, "Record/replay does not allow making snapshot " 3091 "right now. Try once more later."); 3092 return false; 3093 } 3094 3095 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3096 return false; 3097 } 3098 3099 /* Delete old snapshots of the same name */ 3100 if (name) { 3101 if (overwrite) { 3102 if (bdrv_all_delete_snapshot(name, has_devices, 3103 devices, errp) < 0) { 3104 return false; 3105 } 3106 } else { 3107 ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3108 if (ret2 < 0) { 3109 return false; 3110 } 3111 if (ret2 == 1) { 3112 error_setg(errp, 3113 "Snapshot '%s' already exists in one or more devices", 3114 name); 3115 return false; 3116 } 3117 } 3118 } 3119 3120 bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3121 if (bs == NULL) { 3122 return false; 3123 } 3124 3125 global_state_store(); 3126 vm_stop(RUN_STATE_SAVE_VM); 3127 3128 bdrv_drain_all_begin(); 3129 3130 memset(sn, 0, sizeof(*sn)); 3131 3132 /* fill auxiliary fields */ 3133 sn->date_sec = g_date_time_to_unix(now); 3134 sn->date_nsec = g_date_time_get_microsecond(now) * 1000; 3135 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 3136 if (replay_mode != REPLAY_MODE_NONE) { 3137 sn->icount = replay_get_current_icount(); 3138 } else { 3139 sn->icount = -1ULL; 3140 } 3141 3142 if (name) { 3143 pstrcpy(sn->name, sizeof(sn->name), name); 3144 } else { 3145 g_autofree char *autoname = g_date_time_format(now, "vm-%Y%m%d%H%M%S"); 3146 pstrcpy(sn->name, sizeof(sn->name), autoname); 3147 } 3148 3149 /* save the VM state */ 3150 f = qemu_fopen_bdrv(bs, 1); 3151 if (!f) { 3152 error_setg(errp, "Could not open VM state file"); 3153 goto the_end; 3154 } 3155 ret = qemu_savevm_state(f, errp); 3156 vm_state_size = qemu_file_transferred(f); 3157 ret2 = qemu_fclose(f); 3158 if (ret < 0) { 3159 goto the_end; 3160 } 3161 if (ret2 < 0) { 3162 ret = ret2; 3163 goto the_end; 3164 } 3165 3166 ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, 3167 has_devices, devices, errp); 3168 if (ret < 0) { 3169 bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL); 3170 goto the_end; 3171 } 3172 3173 ret = 0; 3174 3175 the_end: 3176 bdrv_drain_all_end(); 3177 3178 vm_resume(saved_state); 3179 return ret == 0; 3180 } 3181 3182 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, 3183 Error **errp) 3184 { 3185 QEMUFile *f; 3186 QIOChannelFile *ioc; 3187 int saved_vm_running; 3188 int ret; 3189 3190 if (!has_live) { 3191 /* live default to true so old version of Xen tool stack can have a 3192 * successful live migration */ 3193 live = true; 3194 } 3195 3196 saved_vm_running = runstate_is_running(); 3197 vm_stop(RUN_STATE_SAVE_VM); 3198 global_state_store_running(); 3199 3200 ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT | O_TRUNC, 3201 0660, errp); 3202 if (!ioc) { 3203 goto the_end; 3204 } 3205 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); 3206 f = qemu_file_new_output(QIO_CHANNEL(ioc)); 3207 object_unref(OBJECT(ioc)); 3208 ret = qemu_save_device_state(f); 3209 if (ret < 0 || qemu_fclose(f) < 0) { 3210 error_setg(errp, QERR_IO_ERROR); 3211 } else { 3212 /* libxl calls the QMP command "stop" before calling 3213 * "xen-save-devices-state" and in case of migration failure, libxl 3214 * would call "cont". 3215 * So call bdrv_inactivate_all (release locks) here to let the other 3216 * side of the migration take control of the images. 3217 */ 3218 if (live && !saved_vm_running) { 3219 ret = bdrv_inactivate_all(); 3220 if (ret) { 3221 error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)", 3222 __func__, ret); 3223 } 3224 } 3225 } 3226 3227 the_end: 3228 if (saved_vm_running) { 3229 vm_start(); 3230 } 3231 } 3232 3233 void qmp_xen_load_devices_state(const char *filename, Error **errp) 3234 { 3235 QEMUFile *f; 3236 QIOChannelFile *ioc; 3237 int ret; 3238 3239 /* Guest must be paused before loading the device state; the RAM state 3240 * will already have been loaded by xc 3241 */ 3242 if (runstate_is_running()) { 3243 error_setg(errp, "Cannot update device state while vm is running"); 3244 return; 3245 } 3246 vm_stop(RUN_STATE_RESTORE_VM); 3247 3248 ioc = qio_channel_file_new_path(filename, O_RDONLY | O_BINARY, 0, errp); 3249 if (!ioc) { 3250 return; 3251 } 3252 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state"); 3253 f = qemu_file_new_input(QIO_CHANNEL(ioc)); 3254 object_unref(OBJECT(ioc)); 3255 3256 ret = qemu_loadvm_state(f); 3257 qemu_fclose(f); 3258 if (ret < 0) { 3259 error_setg(errp, QERR_IO_ERROR); 3260 } 3261 migration_incoming_state_destroy(); 3262 } 3263 3264 bool load_snapshot(const char *name, const char *vmstate, 3265 bool has_devices, strList *devices, Error **errp) 3266 { 3267 BlockDriverState *bs_vm_state; 3268 QEMUSnapshotInfo sn; 3269 QEMUFile *f; 3270 int ret; 3271 MigrationIncomingState *mis = migration_incoming_get_current(); 3272 3273 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3274 return false; 3275 } 3276 ret = bdrv_all_has_snapshot(name, has_devices, devices, errp); 3277 if (ret < 0) { 3278 return false; 3279 } 3280 if (ret == 0) { 3281 error_setg(errp, "Snapshot '%s' does not exist in one or more devices", 3282 name); 3283 return false; 3284 } 3285 3286 bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp); 3287 if (!bs_vm_state) { 3288 return false; 3289 } 3290 3291 /* Don't even try to load empty VM states */ 3292 ret = bdrv_snapshot_find(bs_vm_state, &sn, name); 3293 if (ret < 0) { 3294 return false; 3295 } else if (sn.vm_state_size == 0) { 3296 error_setg(errp, "This is a disk-only snapshot. Revert to it " 3297 " offline using qemu-img"); 3298 return false; 3299 } 3300 3301 /* 3302 * Flush the record/replay queue. Now the VM state is going 3303 * to change. Therefore we don't need to preserve its consistency 3304 */ 3305 replay_flush_events(); 3306 3307 /* Flush all IO requests so they don't interfere with the new state. */ 3308 bdrv_drain_all_begin(); 3309 3310 ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp); 3311 if (ret < 0) { 3312 goto err_drain; 3313 } 3314 3315 /* restore the VM state */ 3316 f = qemu_fopen_bdrv(bs_vm_state, 0); 3317 if (!f) { 3318 error_setg(errp, "Could not open VM state file"); 3319 goto err_drain; 3320 } 3321 3322 qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD); 3323 mis->from_src_file = f; 3324 3325 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) { 3326 ret = -EINVAL; 3327 goto err_drain; 3328 } 3329 ret = qemu_loadvm_state(f); 3330 migration_incoming_state_destroy(); 3331 3332 bdrv_drain_all_end(); 3333 3334 if (ret < 0) { 3335 error_setg(errp, "Error %d while loading VM state", ret); 3336 return false; 3337 } 3338 3339 return true; 3340 3341 err_drain: 3342 bdrv_drain_all_end(); 3343 return false; 3344 } 3345 3346 void load_snapshot_resume(RunState state) 3347 { 3348 vm_resume(state); 3349 if (state == RUN_STATE_RUNNING && runstate_get() == RUN_STATE_SUSPENDED) { 3350 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, &error_abort); 3351 } 3352 } 3353 3354 bool delete_snapshot(const char *name, bool has_devices, 3355 strList *devices, Error **errp) 3356 { 3357 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { 3358 return false; 3359 } 3360 3361 if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) { 3362 return false; 3363 } 3364 3365 return true; 3366 } 3367 3368 void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) 3369 { 3370 qemu_ram_set_idstr(mr->ram_block, 3371 memory_region_name(mr), dev); 3372 qemu_ram_set_migratable(mr->ram_block); 3373 } 3374 3375 void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) 3376 { 3377 qemu_ram_unset_idstr(mr->ram_block); 3378 qemu_ram_unset_migratable(mr->ram_block); 3379 } 3380 3381 void vmstate_register_ram_global(MemoryRegion *mr) 3382 { 3383 vmstate_register_ram(mr, NULL); 3384 } 3385 3386 bool vmstate_check_only_migratable(const VMStateDescription *vmsd) 3387 { 3388 /* check needed if --only-migratable is specified */ 3389 if (!only_migratable) { 3390 return true; 3391 } 3392 3393 return !(vmsd && vmsd->unmigratable); 3394 } 3395 3396 typedef struct SnapshotJob { 3397 Job common; 3398 char *tag; 3399 char *vmstate; 3400 strList *devices; 3401 Coroutine *co; 3402 Error **errp; 3403 bool ret; 3404 } SnapshotJob; 3405 3406 static void qmp_snapshot_job_free(SnapshotJob *s) 3407 { 3408 g_free(s->tag); 3409 g_free(s->vmstate); 3410 qapi_free_strList(s->devices); 3411 } 3412 3413 3414 static void snapshot_load_job_bh(void *opaque) 3415 { 3416 Job *job = opaque; 3417 SnapshotJob *s = container_of(job, SnapshotJob, common); 3418 RunState orig_state = runstate_get(); 3419 3420 job_progress_set_remaining(&s->common, 1); 3421 3422 vm_stop(RUN_STATE_RESTORE_VM); 3423 3424 s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); 3425 if (s->ret) { 3426 load_snapshot_resume(orig_state); 3427 } 3428 3429 job_progress_update(&s->common, 1); 3430 3431 qmp_snapshot_job_free(s); 3432 aio_co_wake(s->co); 3433 } 3434 3435 static void snapshot_save_job_bh(void *opaque) 3436 { 3437 Job *job = opaque; 3438 SnapshotJob *s = container_of(job, SnapshotJob, common); 3439 3440 job_progress_set_remaining(&s->common, 1); 3441 s->ret = save_snapshot(s->tag, false, s->vmstate, 3442 true, s->devices, s->errp); 3443 job_progress_update(&s->common, 1); 3444 3445 qmp_snapshot_job_free(s); 3446 aio_co_wake(s->co); 3447 } 3448 3449 static void snapshot_delete_job_bh(void *opaque) 3450 { 3451 Job *job = opaque; 3452 SnapshotJob *s = container_of(job, SnapshotJob, common); 3453 3454 job_progress_set_remaining(&s->common, 1); 3455 s->ret = delete_snapshot(s->tag, true, s->devices, s->errp); 3456 job_progress_update(&s->common, 1); 3457 3458 qmp_snapshot_job_free(s); 3459 aio_co_wake(s->co); 3460 } 3461 3462 static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp) 3463 { 3464 SnapshotJob *s = container_of(job, SnapshotJob, common); 3465 s->errp = errp; 3466 s->co = qemu_coroutine_self(); 3467 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3468 snapshot_save_job_bh, job); 3469 qemu_coroutine_yield(); 3470 return s->ret ? 0 : -1; 3471 } 3472 3473 static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp) 3474 { 3475 SnapshotJob *s = container_of(job, SnapshotJob, common); 3476 s->errp = errp; 3477 s->co = qemu_coroutine_self(); 3478 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3479 snapshot_load_job_bh, job); 3480 qemu_coroutine_yield(); 3481 return s->ret ? 0 : -1; 3482 } 3483 3484 static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp) 3485 { 3486 SnapshotJob *s = container_of(job, SnapshotJob, common); 3487 s->errp = errp; 3488 s->co = qemu_coroutine_self(); 3489 aio_bh_schedule_oneshot(qemu_get_aio_context(), 3490 snapshot_delete_job_bh, job); 3491 qemu_coroutine_yield(); 3492 return s->ret ? 0 : -1; 3493 } 3494 3495 3496 static const JobDriver snapshot_load_job_driver = { 3497 .instance_size = sizeof(SnapshotJob), 3498 .job_type = JOB_TYPE_SNAPSHOT_LOAD, 3499 .run = snapshot_load_job_run, 3500 }; 3501 3502 static const JobDriver snapshot_save_job_driver = { 3503 .instance_size = sizeof(SnapshotJob), 3504 .job_type = JOB_TYPE_SNAPSHOT_SAVE, 3505 .run = snapshot_save_job_run, 3506 }; 3507 3508 static const JobDriver snapshot_delete_job_driver = { 3509 .instance_size = sizeof(SnapshotJob), 3510 .job_type = JOB_TYPE_SNAPSHOT_DELETE, 3511 .run = snapshot_delete_job_run, 3512 }; 3513 3514 3515 void qmp_snapshot_save(const char *job_id, 3516 const char *tag, 3517 const char *vmstate, 3518 strList *devices, 3519 Error **errp) 3520 { 3521 SnapshotJob *s; 3522 3523 s = job_create(job_id, &snapshot_save_job_driver, NULL, 3524 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3525 NULL, NULL, errp); 3526 if (!s) { 3527 return; 3528 } 3529 3530 s->tag = g_strdup(tag); 3531 s->vmstate = g_strdup(vmstate); 3532 s->devices = QAPI_CLONE(strList, devices); 3533 3534 job_start(&s->common); 3535 } 3536 3537 void qmp_snapshot_load(const char *job_id, 3538 const char *tag, 3539 const char *vmstate, 3540 strList *devices, 3541 Error **errp) 3542 { 3543 SnapshotJob *s; 3544 3545 s = job_create(job_id, &snapshot_load_job_driver, NULL, 3546 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3547 NULL, NULL, errp); 3548 if (!s) { 3549 return; 3550 } 3551 3552 s->tag = g_strdup(tag); 3553 s->vmstate = g_strdup(vmstate); 3554 s->devices = QAPI_CLONE(strList, devices); 3555 3556 job_start(&s->common); 3557 } 3558 3559 void qmp_snapshot_delete(const char *job_id, 3560 const char *tag, 3561 strList *devices, 3562 Error **errp) 3563 { 3564 SnapshotJob *s; 3565 3566 s = job_create(job_id, &snapshot_delete_job_driver, NULL, 3567 qemu_get_aio_context(), JOB_MANUAL_DISMISS, 3568 NULL, NULL, errp); 3569 if (!s) { 3570 return; 3571 } 3572 3573 s->tag = g_strdup(tag); 3574 s->devices = QAPI_CLONE(strList, devices); 3575 3576 job_start(&s->common); 3577 } 3578