1 /* 2 * QEMU dump 3 * 4 * Copyright Fujitsu, Corp. 2011, 2012 5 * 6 * Authors: 7 * Wen Congyang <wency@cn.fujitsu.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/cutils.h" 16 #include "elf.h" 17 #include "exec/hwaddr.h" 18 #include "exec/target_page.h" 19 #include "monitor/monitor.h" 20 #include "sysemu/kvm.h" 21 #include "sysemu/dump.h" 22 #include "sysemu/memory_mapping.h" 23 #include "sysemu/runstate.h" 24 #include "sysemu/cpus.h" 25 #include "qapi/error.h" 26 #include "qapi/qapi-commands-dump.h" 27 #include "qapi/qapi-events-dump.h" 28 #include "qapi/qmp/qerror.h" 29 #include "qemu/error-report.h" 30 #include "qemu/main-loop.h" 31 #include "hw/misc/vmcoreinfo.h" 32 #include "migration/blocker.h" 33 34 #ifdef TARGET_X86_64 35 #include "win_dump.h" 36 #endif 37 38 #include <zlib.h> 39 #ifdef CONFIG_LZO 40 #include <lzo/lzo1x.h> 41 #endif 42 #ifdef CONFIG_SNAPPY 43 #include <snappy-c.h> 44 #endif 45 #ifndef ELF_MACHINE_UNAME 46 #define ELF_MACHINE_UNAME "Unknown" 47 #endif 48 49 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ 50 51 static Error *dump_migration_blocker; 52 53 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ 54 ((DIV_ROUND_UP((hdr_size), 4) + \ 55 DIV_ROUND_UP((name_size), 4) + \ 56 DIV_ROUND_UP((desc_size), 4)) * 4) 57 58 static inline bool dump_is_64bit(DumpState *s) 59 { 60 return s->dump_info.d_class == ELFCLASS64; 61 } 62 63 static inline bool dump_has_filter(DumpState *s) 64 { 65 return s->filter_area_length > 0; 66 } 67 68 uint16_t cpu_to_dump16(DumpState *s, uint16_t val) 69 { 70 if (s->dump_info.d_endian == ELFDATA2LSB) { 71 val = cpu_to_le16(val); 72 } else { 73 val = cpu_to_be16(val); 74 } 75 76 return val; 77 } 78 79 uint32_t cpu_to_dump32(DumpState *s, uint32_t val) 80 { 81 if (s->dump_info.d_endian == ELFDATA2LSB) { 82 val = cpu_to_le32(val); 83 } else { 84 val = cpu_to_be32(val); 85 } 86 87 return val; 88 } 89 90 uint64_t cpu_to_dump64(DumpState *s, uint64_t val) 91 { 92 if (s->dump_info.d_endian == ELFDATA2LSB) { 93 val = cpu_to_le64(val); 94 } else { 95 val = cpu_to_be64(val); 96 } 97 98 return val; 99 } 100 101 static int dump_cleanup(DumpState *s) 102 { 103 guest_phys_blocks_free(&s->guest_phys_blocks); 104 memory_mapping_list_free(&s->list); 105 close(s->fd); 106 g_free(s->guest_note); 107 g_array_unref(s->string_table_buf); 108 s->guest_note = NULL; 109 if (s->resume) { 110 if (s->detached) { 111 qemu_mutex_lock_iothread(); 112 } 113 vm_start(); 114 if (s->detached) { 115 qemu_mutex_unlock_iothread(); 116 } 117 } 118 migrate_del_blocker(dump_migration_blocker); 119 120 return 0; 121 } 122 123 static int fd_write_vmcore(const void *buf, size_t size, void *opaque) 124 { 125 DumpState *s = opaque; 126 size_t written_size; 127 128 written_size = qemu_write_full(s->fd, buf, size); 129 if (written_size != size) { 130 return -errno; 131 } 132 133 return 0; 134 } 135 136 static void prepare_elf64_header(DumpState *s, Elf64_Ehdr *elf_header) 137 { 138 /* 139 * phnum in the elf header is 16 bit, if we have more segments we 140 * set phnum to PN_XNUM and write the real number of segments to a 141 * special section. 142 */ 143 uint16_t phnum = MIN(s->phdr_num, PN_XNUM); 144 145 memset(elf_header, 0, sizeof(Elf64_Ehdr)); 146 memcpy(elf_header, ELFMAG, SELFMAG); 147 elf_header->e_ident[EI_CLASS] = ELFCLASS64; 148 elf_header->e_ident[EI_DATA] = s->dump_info.d_endian; 149 elf_header->e_ident[EI_VERSION] = EV_CURRENT; 150 elf_header->e_type = cpu_to_dump16(s, ET_CORE); 151 elf_header->e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 152 elf_header->e_version = cpu_to_dump32(s, EV_CURRENT); 153 elf_header->e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 154 elf_header->e_phoff = cpu_to_dump64(s, s->phdr_offset); 155 elf_header->e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); 156 elf_header->e_phnum = cpu_to_dump16(s, phnum); 157 elf_header->e_shoff = cpu_to_dump64(s, s->shdr_offset); 158 elf_header->e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); 159 elf_header->e_shnum = cpu_to_dump16(s, s->shdr_num); 160 elf_header->e_shstrndx = cpu_to_dump16(s, s->shdr_num - 1); 161 } 162 163 static void prepare_elf32_header(DumpState *s, Elf32_Ehdr *elf_header) 164 { 165 /* 166 * phnum in the elf header is 16 bit, if we have more segments we 167 * set phnum to PN_XNUM and write the real number of segments to a 168 * special section. 169 */ 170 uint16_t phnum = MIN(s->phdr_num, PN_XNUM); 171 172 memset(elf_header, 0, sizeof(Elf32_Ehdr)); 173 memcpy(elf_header, ELFMAG, SELFMAG); 174 elf_header->e_ident[EI_CLASS] = ELFCLASS32; 175 elf_header->e_ident[EI_DATA] = s->dump_info.d_endian; 176 elf_header->e_ident[EI_VERSION] = EV_CURRENT; 177 elf_header->e_type = cpu_to_dump16(s, ET_CORE); 178 elf_header->e_machine = cpu_to_dump16(s, s->dump_info.d_machine); 179 elf_header->e_version = cpu_to_dump32(s, EV_CURRENT); 180 elf_header->e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); 181 elf_header->e_phoff = cpu_to_dump32(s, s->phdr_offset); 182 elf_header->e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); 183 elf_header->e_phnum = cpu_to_dump16(s, phnum); 184 elf_header->e_shoff = cpu_to_dump32(s, s->shdr_offset); 185 elf_header->e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); 186 elf_header->e_shnum = cpu_to_dump16(s, s->shdr_num); 187 elf_header->e_shstrndx = cpu_to_dump16(s, s->shdr_num - 1); 188 } 189 190 static void write_elf_header(DumpState *s, Error **errp) 191 { 192 Elf32_Ehdr elf32_header; 193 Elf64_Ehdr elf64_header; 194 size_t header_size; 195 void *header_ptr; 196 int ret; 197 198 /* The NULL header and the shstrtab are always defined */ 199 assert(s->shdr_num >= 2); 200 if (dump_is_64bit(s)) { 201 prepare_elf64_header(s, &elf64_header); 202 header_size = sizeof(elf64_header); 203 header_ptr = &elf64_header; 204 } else { 205 prepare_elf32_header(s, &elf32_header); 206 header_size = sizeof(elf32_header); 207 header_ptr = &elf32_header; 208 } 209 210 ret = fd_write_vmcore(header_ptr, header_size, s); 211 if (ret < 0) { 212 error_setg_errno(errp, -ret, "dump: failed to write elf header"); 213 } 214 } 215 216 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping, 217 int phdr_index, hwaddr offset, 218 hwaddr filesz, Error **errp) 219 { 220 Elf64_Phdr phdr; 221 int ret; 222 223 memset(&phdr, 0, sizeof(Elf64_Phdr)); 224 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 225 phdr.p_offset = cpu_to_dump64(s, offset); 226 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr); 227 phdr.p_filesz = cpu_to_dump64(s, filesz); 228 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length); 229 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 230 231 assert(memory_mapping->length >= filesz); 232 233 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); 234 if (ret < 0) { 235 error_setg_errno(errp, -ret, 236 "dump: failed to write program header table"); 237 } 238 } 239 240 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, 241 int phdr_index, hwaddr offset, 242 hwaddr filesz, Error **errp) 243 { 244 Elf32_Phdr phdr; 245 int ret; 246 247 memset(&phdr, 0, sizeof(Elf32_Phdr)); 248 phdr.p_type = cpu_to_dump32(s, PT_LOAD); 249 phdr.p_offset = cpu_to_dump32(s, offset); 250 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr); 251 phdr.p_filesz = cpu_to_dump32(s, filesz); 252 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length); 253 phdr.p_vaddr = 254 cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr; 255 256 assert(memory_mapping->length >= filesz); 257 258 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); 259 if (ret < 0) { 260 error_setg_errno(errp, -ret, 261 "dump: failed to write program header table"); 262 } 263 } 264 265 static void prepare_elf64_phdr_note(DumpState *s, Elf64_Phdr *phdr) 266 { 267 memset(phdr, 0, sizeof(*phdr)); 268 phdr->p_type = cpu_to_dump32(s, PT_NOTE); 269 phdr->p_offset = cpu_to_dump64(s, s->note_offset); 270 phdr->p_paddr = 0; 271 phdr->p_filesz = cpu_to_dump64(s, s->note_size); 272 phdr->p_memsz = cpu_to_dump64(s, s->note_size); 273 phdr->p_vaddr = 0; 274 } 275 276 static inline int cpu_index(CPUState *cpu) 277 { 278 return cpu->cpu_index + 1; 279 } 280 281 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s, 282 Error **errp) 283 { 284 int ret; 285 286 if (s->guest_note) { 287 ret = f(s->guest_note, s->guest_note_size, s); 288 if (ret < 0) { 289 error_setg(errp, "dump: failed to write guest note"); 290 } 291 } 292 } 293 294 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, 295 Error **errp) 296 { 297 CPUState *cpu; 298 int ret; 299 int id; 300 301 CPU_FOREACH(cpu) { 302 id = cpu_index(cpu); 303 ret = cpu_write_elf64_note(f, cpu, id, s); 304 if (ret < 0) { 305 error_setg(errp, "dump: failed to write elf notes"); 306 return; 307 } 308 } 309 310 CPU_FOREACH(cpu) { 311 ret = cpu_write_elf64_qemunote(f, cpu, s); 312 if (ret < 0) { 313 error_setg(errp, "dump: failed to write CPU status"); 314 return; 315 } 316 } 317 318 write_guest_note(f, s, errp); 319 } 320 321 static void prepare_elf32_phdr_note(DumpState *s, Elf32_Phdr *phdr) 322 { 323 memset(phdr, 0, sizeof(*phdr)); 324 phdr->p_type = cpu_to_dump32(s, PT_NOTE); 325 phdr->p_offset = cpu_to_dump32(s, s->note_offset); 326 phdr->p_paddr = 0; 327 phdr->p_filesz = cpu_to_dump32(s, s->note_size); 328 phdr->p_memsz = cpu_to_dump32(s, s->note_size); 329 phdr->p_vaddr = 0; 330 } 331 332 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, 333 Error **errp) 334 { 335 CPUState *cpu; 336 int ret; 337 int id; 338 339 CPU_FOREACH(cpu) { 340 id = cpu_index(cpu); 341 ret = cpu_write_elf32_note(f, cpu, id, s); 342 if (ret < 0) { 343 error_setg(errp, "dump: failed to write elf notes"); 344 return; 345 } 346 } 347 348 CPU_FOREACH(cpu) { 349 ret = cpu_write_elf32_qemunote(f, cpu, s); 350 if (ret < 0) { 351 error_setg(errp, "dump: failed to write CPU status"); 352 return; 353 } 354 } 355 356 write_guest_note(f, s, errp); 357 } 358 359 static void write_elf_phdr_note(DumpState *s, Error **errp) 360 { 361 Elf32_Phdr phdr32; 362 Elf64_Phdr phdr64; 363 void *phdr; 364 size_t size; 365 int ret; 366 367 if (dump_is_64bit(s)) { 368 prepare_elf64_phdr_note(s, &phdr64); 369 size = sizeof(phdr64); 370 phdr = &phdr64; 371 } else { 372 prepare_elf32_phdr_note(s, &phdr32); 373 size = sizeof(phdr32); 374 phdr = &phdr32; 375 } 376 377 ret = fd_write_vmcore(phdr, size, s); 378 if (ret < 0) { 379 error_setg_errno(errp, -ret, 380 "dump: failed to write program header table"); 381 } 382 } 383 384 static void prepare_elf_section_hdr_zero(DumpState *s) 385 { 386 if (dump_is_64bit(s)) { 387 Elf64_Shdr *shdr64 = s->elf_section_hdrs; 388 389 shdr64->sh_info = cpu_to_dump32(s, s->phdr_num); 390 } else { 391 Elf32_Shdr *shdr32 = s->elf_section_hdrs; 392 393 shdr32->sh_info = cpu_to_dump32(s, s->phdr_num); 394 } 395 } 396 397 static void prepare_elf_section_hdr_string(DumpState *s, void *buff) 398 { 399 uint64_t index = s->string_table_buf->len; 400 const char strtab[] = ".shstrtab"; 401 Elf32_Shdr shdr32 = {}; 402 Elf64_Shdr shdr64 = {}; 403 int shdr_size; 404 void *shdr; 405 406 g_array_append_vals(s->string_table_buf, strtab, sizeof(strtab)); 407 if (dump_is_64bit(s)) { 408 shdr_size = sizeof(Elf64_Shdr); 409 shdr64.sh_type = SHT_STRTAB; 410 shdr64.sh_offset = s->section_offset + s->elf_section_data_size; 411 shdr64.sh_name = index; 412 shdr64.sh_size = s->string_table_buf->len; 413 shdr = &shdr64; 414 } else { 415 shdr_size = sizeof(Elf32_Shdr); 416 shdr32.sh_type = SHT_STRTAB; 417 shdr32.sh_offset = s->section_offset + s->elf_section_data_size; 418 shdr32.sh_name = index; 419 shdr32.sh_size = s->string_table_buf->len; 420 shdr = &shdr32; 421 } 422 memcpy(buff, shdr, shdr_size); 423 } 424 425 static bool prepare_elf_section_hdrs(DumpState *s, Error **errp) 426 { 427 size_t len, sizeof_shdr; 428 void *buff_hdr; 429 430 /* 431 * Section ordering: 432 * - HDR zero 433 * - Arch section hdrs 434 * - String table hdr 435 */ 436 sizeof_shdr = dump_is_64bit(s) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr); 437 len = sizeof_shdr * s->shdr_num; 438 s->elf_section_hdrs = g_malloc0(len); 439 buff_hdr = s->elf_section_hdrs; 440 441 /* 442 * The first section header is ALWAYS a special initial section 443 * header. 444 * 445 * The header should be 0 with one exception being that if 446 * phdr_num is PN_XNUM then the sh_info field contains the real 447 * number of segment entries. 448 * 449 * As we zero allocate the buffer we will only need to modify 450 * sh_info for the PN_XNUM case. 451 */ 452 if (s->phdr_num >= PN_XNUM) { 453 prepare_elf_section_hdr_zero(s); 454 } 455 buff_hdr += sizeof_shdr; 456 457 /* Add architecture defined section headers */ 458 if (s->dump_info.arch_sections_write_hdr_fn 459 && s->shdr_num > 2) { 460 buff_hdr += s->dump_info.arch_sections_write_hdr_fn(s, buff_hdr); 461 462 if (s->shdr_num >= SHN_LORESERVE) { 463 error_setg_errno(errp, EINVAL, 464 "dump: too many architecture defined sections"); 465 return false; 466 } 467 } 468 469 /* 470 * String table is the last section since strings are added via 471 * arch_sections_write_hdr(). 472 */ 473 prepare_elf_section_hdr_string(s, buff_hdr); 474 return true; 475 } 476 477 static void write_elf_section_headers(DumpState *s, Error **errp) 478 { 479 size_t sizeof_shdr = dump_is_64bit(s) ? sizeof(Elf64_Shdr) : sizeof(Elf32_Shdr); 480 int ret; 481 482 if (!prepare_elf_section_hdrs(s, errp)) { 483 return; 484 } 485 486 ret = fd_write_vmcore(s->elf_section_hdrs, s->shdr_num * sizeof_shdr, s); 487 if (ret < 0) { 488 error_setg_errno(errp, -ret, "dump: failed to write section headers"); 489 } 490 491 g_free(s->elf_section_hdrs); 492 } 493 494 static void write_elf_sections(DumpState *s, Error **errp) 495 { 496 int ret; 497 498 if (s->elf_section_data_size) { 499 /* Write architecture section data */ 500 ret = fd_write_vmcore(s->elf_section_data, 501 s->elf_section_data_size, s); 502 if (ret < 0) { 503 error_setg_errno(errp, -ret, 504 "dump: failed to write architecture section data"); 505 return; 506 } 507 } 508 509 /* Write string table */ 510 ret = fd_write_vmcore(s->string_table_buf->data, 511 s->string_table_buf->len, s); 512 if (ret < 0) { 513 error_setg_errno(errp, -ret, "dump: failed to write string table data"); 514 } 515 } 516 517 static void write_data(DumpState *s, void *buf, int length, Error **errp) 518 { 519 int ret; 520 521 ret = fd_write_vmcore(buf, length, s); 522 if (ret < 0) { 523 error_setg_errno(errp, -ret, "dump: failed to save memory"); 524 } else { 525 s->written_size += length; 526 } 527 } 528 529 /* write the memory to vmcore. 1 page per I/O. */ 530 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, 531 int64_t size, Error **errp) 532 { 533 ERRP_GUARD(); 534 int64_t i; 535 536 for (i = 0; i < size / s->dump_info.page_size; i++) { 537 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 538 s->dump_info.page_size, errp); 539 if (*errp) { 540 return; 541 } 542 } 543 544 if ((size % s->dump_info.page_size) != 0) { 545 write_data(s, block->host_addr + start + i * s->dump_info.page_size, 546 size % s->dump_info.page_size, errp); 547 if (*errp) { 548 return; 549 } 550 } 551 } 552 553 /* get the memory's offset and size in the vmcore */ 554 static void get_offset_range(hwaddr phys_addr, 555 ram_addr_t mapping_length, 556 DumpState *s, 557 hwaddr *p_offset, 558 hwaddr *p_filesz) 559 { 560 GuestPhysBlock *block; 561 hwaddr offset = s->memory_offset; 562 int64_t size_in_block, start; 563 564 /* When the memory is not stored into vmcore, offset will be -1 */ 565 *p_offset = -1; 566 *p_filesz = 0; 567 568 if (dump_has_filter(s)) { 569 if (phys_addr < s->filter_area_begin || 570 phys_addr >= s->filter_area_begin + s->filter_area_length) { 571 return; 572 } 573 } 574 575 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 576 if (dump_has_filter(s)) { 577 if (block->target_start >= s->filter_area_begin + s->filter_area_length || 578 block->target_end <= s->filter_area_begin) { 579 /* This block is out of the range */ 580 continue; 581 } 582 583 if (s->filter_area_begin <= block->target_start) { 584 start = block->target_start; 585 } else { 586 start = s->filter_area_begin; 587 } 588 589 size_in_block = block->target_end - start; 590 if (s->filter_area_begin + s->filter_area_length < block->target_end) { 591 size_in_block -= block->target_end - (s->filter_area_begin + s->filter_area_length); 592 } 593 } else { 594 start = block->target_start; 595 size_in_block = block->target_end - block->target_start; 596 } 597 598 if (phys_addr >= start && phys_addr < start + size_in_block) { 599 *p_offset = phys_addr - start + offset; 600 601 /* The offset range mapped from the vmcore file must not spill over 602 * the GuestPhysBlock, clamp it. The rest of the mapping will be 603 * zero-filled in memory at load time; see 604 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>. 605 */ 606 *p_filesz = phys_addr + mapping_length <= start + size_in_block ? 607 mapping_length : 608 size_in_block - (phys_addr - start); 609 return; 610 } 611 612 offset += size_in_block; 613 } 614 } 615 616 static void write_elf_phdr_loads(DumpState *s, Error **errp) 617 { 618 ERRP_GUARD(); 619 hwaddr offset, filesz; 620 MemoryMapping *memory_mapping; 621 uint32_t phdr_index = 1; 622 623 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { 624 get_offset_range(memory_mapping->phys_addr, 625 memory_mapping->length, 626 s, &offset, &filesz); 627 if (dump_is_64bit(s)) { 628 write_elf64_load(s, memory_mapping, phdr_index++, offset, 629 filesz, errp); 630 } else { 631 write_elf32_load(s, memory_mapping, phdr_index++, offset, 632 filesz, errp); 633 } 634 635 if (*errp) { 636 return; 637 } 638 639 if (phdr_index >= s->phdr_num) { 640 break; 641 } 642 } 643 } 644 645 static void write_elf_notes(DumpState *s, Error **errp) 646 { 647 if (dump_is_64bit(s)) { 648 write_elf64_notes(fd_write_vmcore, s, errp); 649 } else { 650 write_elf32_notes(fd_write_vmcore, s, errp); 651 } 652 } 653 654 /* write elf header, PT_NOTE and elf note to vmcore. */ 655 static void dump_begin(DumpState *s, Error **errp) 656 { 657 ERRP_GUARD(); 658 659 /* 660 * the vmcore's format is: 661 * -------------- 662 * | elf header | 663 * -------------- 664 * | sctn_hdr | 665 * -------------- 666 * | PT_NOTE | 667 * -------------- 668 * | PT_LOAD | 669 * -------------- 670 * | ...... | 671 * -------------- 672 * | PT_LOAD | 673 * -------------- 674 * | elf note | 675 * -------------- 676 * | memory | 677 * -------------- 678 * 679 * we only know where the memory is saved after we write elf note into 680 * vmcore. 681 */ 682 683 /* write elf header to vmcore */ 684 write_elf_header(s, errp); 685 if (*errp) { 686 return; 687 } 688 689 /* write section headers to vmcore */ 690 write_elf_section_headers(s, errp); 691 if (*errp) { 692 return; 693 } 694 695 /* write PT_NOTE to vmcore */ 696 write_elf_phdr_note(s, errp); 697 if (*errp) { 698 return; 699 } 700 701 /* write all PT_LOADs to vmcore */ 702 write_elf_phdr_loads(s, errp); 703 if (*errp) { 704 return; 705 } 706 707 /* write notes to vmcore */ 708 write_elf_notes(s, errp); 709 } 710 711 int64_t dump_filtered_memblock_size(GuestPhysBlock *block, 712 int64_t filter_area_start, 713 int64_t filter_area_length) 714 { 715 int64_t size, left, right; 716 717 /* No filter, return full size */ 718 if (!filter_area_length) { 719 return block->target_end - block->target_start; 720 } 721 722 /* calculate the overlapped region. */ 723 left = MAX(filter_area_start, block->target_start); 724 right = MIN(filter_area_start + filter_area_length, block->target_end); 725 size = right - left; 726 size = size > 0 ? size : 0; 727 728 return size; 729 } 730 731 int64_t dump_filtered_memblock_start(GuestPhysBlock *block, 732 int64_t filter_area_start, 733 int64_t filter_area_length) 734 { 735 if (filter_area_length) { 736 /* return -1 if the block is not within filter area */ 737 if (block->target_start >= filter_area_start + filter_area_length || 738 block->target_end <= filter_area_start) { 739 return -1; 740 } 741 742 if (filter_area_start > block->target_start) { 743 return filter_area_start - block->target_start; 744 } 745 } 746 747 return 0; 748 } 749 750 /* write all memory to vmcore */ 751 static void dump_iterate(DumpState *s, Error **errp) 752 { 753 ERRP_GUARD(); 754 GuestPhysBlock *block; 755 int64_t memblock_size, memblock_start; 756 757 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 758 memblock_start = dump_filtered_memblock_start(block, s->filter_area_begin, s->filter_area_length); 759 if (memblock_start == -1) { 760 continue; 761 } 762 763 memblock_size = dump_filtered_memblock_size(block, s->filter_area_begin, s->filter_area_length); 764 765 /* Write the memory to file */ 766 write_memory(s, block, memblock_start, memblock_size, errp); 767 if (*errp) { 768 return; 769 } 770 } 771 } 772 773 static void dump_end(DumpState *s, Error **errp) 774 { 775 int rc; 776 777 if (s->elf_section_data_size) { 778 s->elf_section_data = g_malloc0(s->elf_section_data_size); 779 } 780 781 /* Adds the architecture defined section data to s->elf_section_data */ 782 if (s->dump_info.arch_sections_write_fn && 783 s->elf_section_data_size) { 784 rc = s->dump_info.arch_sections_write_fn(s, s->elf_section_data); 785 if (rc) { 786 error_setg_errno(errp, rc, 787 "dump: failed to get arch section data"); 788 g_free(s->elf_section_data); 789 return; 790 } 791 } 792 793 /* write sections to vmcore */ 794 write_elf_sections(s, errp); 795 } 796 797 static void create_vmcore(DumpState *s, Error **errp) 798 { 799 ERRP_GUARD(); 800 801 dump_begin(s, errp); 802 if (*errp) { 803 return; 804 } 805 806 /* Iterate over memory and dump it to file */ 807 dump_iterate(s, errp); 808 if (*errp) { 809 return; 810 } 811 812 /* Write the section data */ 813 dump_end(s, errp); 814 } 815 816 static int write_start_flat_header(int fd) 817 { 818 MakedumpfileHeader *mh; 819 int ret = 0; 820 821 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER); 822 mh = g_malloc0(MAX_SIZE_MDF_HEADER); 823 824 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE, 825 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE)); 826 827 mh->type = cpu_to_be64(TYPE_FLAT_HEADER); 828 mh->version = cpu_to_be64(VERSION_FLAT_HEADER); 829 830 size_t written_size; 831 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER); 832 if (written_size != MAX_SIZE_MDF_HEADER) { 833 ret = -1; 834 } 835 836 g_free(mh); 837 return ret; 838 } 839 840 static int write_end_flat_header(int fd) 841 { 842 MakedumpfileDataHeader mdh; 843 844 mdh.offset = END_FLAG_FLAT_HEADER; 845 mdh.buf_size = END_FLAG_FLAT_HEADER; 846 847 size_t written_size; 848 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 849 if (written_size != sizeof(mdh)) { 850 return -1; 851 } 852 853 return 0; 854 } 855 856 static int write_buffer(int fd, off_t offset, const void *buf, size_t size) 857 { 858 size_t written_size; 859 MakedumpfileDataHeader mdh; 860 861 mdh.offset = cpu_to_be64(offset); 862 mdh.buf_size = cpu_to_be64(size); 863 864 written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); 865 if (written_size != sizeof(mdh)) { 866 return -1; 867 } 868 869 written_size = qemu_write_full(fd, buf, size); 870 if (written_size != size) { 871 return -1; 872 } 873 874 return 0; 875 } 876 877 static int buf_write_note(const void *buf, size_t size, void *opaque) 878 { 879 DumpState *s = opaque; 880 881 /* note_buf is not enough */ 882 if (s->note_buf_offset + size > s->note_size) { 883 return -1; 884 } 885 886 memcpy(s->note_buf + s->note_buf_offset, buf, size); 887 888 s->note_buf_offset += size; 889 890 return 0; 891 } 892 893 /* 894 * This function retrieves various sizes from an elf header. 895 * 896 * @note has to be a valid ELF note. The return sizes are unmodified 897 * (not padded or rounded up to be multiple of 4). 898 */ 899 static void get_note_sizes(DumpState *s, const void *note, 900 uint64_t *note_head_size, 901 uint64_t *name_size, 902 uint64_t *desc_size) 903 { 904 uint64_t note_head_sz; 905 uint64_t name_sz; 906 uint64_t desc_sz; 907 908 if (dump_is_64bit(s)) { 909 const Elf64_Nhdr *hdr = note; 910 note_head_sz = sizeof(Elf64_Nhdr); 911 name_sz = cpu_to_dump64(s, hdr->n_namesz); 912 desc_sz = cpu_to_dump64(s, hdr->n_descsz); 913 } else { 914 const Elf32_Nhdr *hdr = note; 915 note_head_sz = sizeof(Elf32_Nhdr); 916 name_sz = cpu_to_dump32(s, hdr->n_namesz); 917 desc_sz = cpu_to_dump32(s, hdr->n_descsz); 918 } 919 920 if (note_head_size) { 921 *note_head_size = note_head_sz; 922 } 923 if (name_size) { 924 *name_size = name_sz; 925 } 926 if (desc_size) { 927 *desc_size = desc_sz; 928 } 929 } 930 931 static bool note_name_equal(DumpState *s, 932 const uint8_t *note, const char *name) 933 { 934 int len = strlen(name) + 1; 935 uint64_t head_size, name_size; 936 937 get_note_sizes(s, note, &head_size, &name_size, NULL); 938 head_size = ROUND_UP(head_size, 4); 939 940 return name_size == len && memcmp(note + head_size, name, len) == 0; 941 } 942 943 /* write common header, sub header and elf note to vmcore */ 944 static void create_header32(DumpState *s, Error **errp) 945 { 946 ERRP_GUARD(); 947 DiskDumpHeader32 *dh = NULL; 948 KdumpSubHeader32 *kh = NULL; 949 size_t size; 950 uint32_t block_size; 951 uint32_t sub_hdr_size; 952 uint32_t bitmap_blocks; 953 uint32_t status = 0; 954 uint64_t offset_note; 955 956 /* write common header, the version of kdump-compressed format is 6th */ 957 size = sizeof(DiskDumpHeader32); 958 dh = g_malloc0(size); 959 960 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 961 dh->header_version = cpu_to_dump32(s, 6); 962 block_size = s->dump_info.page_size; 963 dh->block_size = cpu_to_dump32(s, block_size); 964 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size; 965 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 966 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 967 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 968 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 969 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 970 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 971 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 972 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 973 974 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 975 status |= DUMP_DH_COMPRESSED_ZLIB; 976 } 977 #ifdef CONFIG_LZO 978 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 979 status |= DUMP_DH_COMPRESSED_LZO; 980 } 981 #endif 982 #ifdef CONFIG_SNAPPY 983 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 984 status |= DUMP_DH_COMPRESSED_SNAPPY; 985 } 986 #endif 987 dh->status = cpu_to_dump32(s, status); 988 989 if (write_buffer(s->fd, 0, dh, size) < 0) { 990 error_setg(errp, "dump: failed to write disk dump header"); 991 goto out; 992 } 993 994 /* write sub header */ 995 size = sizeof(KdumpSubHeader32); 996 kh = g_malloc0(size); 997 998 /* 64bit max_mapnr_64 */ 999 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 1000 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base); 1001 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 1002 1003 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 1004 if (s->guest_note && 1005 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1006 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 1007 1008 get_note_sizes(s, s->guest_note, 1009 &hsize, &name_size, &size_vmcoreinfo_desc); 1010 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 1011 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 1012 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 1013 kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc); 1014 } 1015 1016 kh->offset_note = cpu_to_dump64(s, offset_note); 1017 kh->note_size = cpu_to_dump32(s, s->note_size); 1018 1019 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 1020 block_size, kh, size) < 0) { 1021 error_setg(errp, "dump: failed to write kdump sub header"); 1022 goto out; 1023 } 1024 1025 /* write note */ 1026 s->note_buf = g_malloc0(s->note_size); 1027 s->note_buf_offset = 0; 1028 1029 /* use s->note_buf to store notes temporarily */ 1030 write_elf32_notes(buf_write_note, s, errp); 1031 if (*errp) { 1032 goto out; 1033 } 1034 if (write_buffer(s->fd, offset_note, s->note_buf, 1035 s->note_size) < 0) { 1036 error_setg(errp, "dump: failed to write notes"); 1037 goto out; 1038 } 1039 1040 /* get offset of dump_bitmap */ 1041 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 1042 block_size; 1043 1044 /* get offset of page */ 1045 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 1046 block_size; 1047 1048 out: 1049 g_free(dh); 1050 g_free(kh); 1051 g_free(s->note_buf); 1052 } 1053 1054 /* write common header, sub header and elf note to vmcore */ 1055 static void create_header64(DumpState *s, Error **errp) 1056 { 1057 ERRP_GUARD(); 1058 DiskDumpHeader64 *dh = NULL; 1059 KdumpSubHeader64 *kh = NULL; 1060 size_t size; 1061 uint32_t block_size; 1062 uint32_t sub_hdr_size; 1063 uint32_t bitmap_blocks; 1064 uint32_t status = 0; 1065 uint64_t offset_note; 1066 1067 /* write common header, the version of kdump-compressed format is 6th */ 1068 size = sizeof(DiskDumpHeader64); 1069 dh = g_malloc0(size); 1070 1071 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); 1072 dh->header_version = cpu_to_dump32(s, 6); 1073 block_size = s->dump_info.page_size; 1074 dh->block_size = cpu_to_dump32(s, block_size); 1075 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; 1076 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); 1077 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); 1078 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ 1079 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); 1080 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); 1081 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; 1082 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); 1083 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); 1084 1085 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { 1086 status |= DUMP_DH_COMPRESSED_ZLIB; 1087 } 1088 #ifdef CONFIG_LZO 1089 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { 1090 status |= DUMP_DH_COMPRESSED_LZO; 1091 } 1092 #endif 1093 #ifdef CONFIG_SNAPPY 1094 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { 1095 status |= DUMP_DH_COMPRESSED_SNAPPY; 1096 } 1097 #endif 1098 dh->status = cpu_to_dump32(s, status); 1099 1100 if (write_buffer(s->fd, 0, dh, size) < 0) { 1101 error_setg(errp, "dump: failed to write disk dump header"); 1102 goto out; 1103 } 1104 1105 /* write sub header */ 1106 size = sizeof(KdumpSubHeader64); 1107 kh = g_malloc0(size); 1108 1109 /* 64bit max_mapnr_64 */ 1110 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); 1111 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base); 1112 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); 1113 1114 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; 1115 if (s->guest_note && 1116 note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1117 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; 1118 1119 get_note_sizes(s, s->guest_note, 1120 &hsize, &name_size, &size_vmcoreinfo_desc); 1121 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + 1122 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; 1123 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); 1124 kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc); 1125 } 1126 1127 kh->offset_note = cpu_to_dump64(s, offset_note); 1128 kh->note_size = cpu_to_dump64(s, s->note_size); 1129 1130 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * 1131 block_size, kh, size) < 0) { 1132 error_setg(errp, "dump: failed to write kdump sub header"); 1133 goto out; 1134 } 1135 1136 /* write note */ 1137 s->note_buf = g_malloc0(s->note_size); 1138 s->note_buf_offset = 0; 1139 1140 /* use s->note_buf to store notes temporarily */ 1141 write_elf64_notes(buf_write_note, s, errp); 1142 if (*errp) { 1143 goto out; 1144 } 1145 1146 if (write_buffer(s->fd, offset_note, s->note_buf, 1147 s->note_size) < 0) { 1148 error_setg(errp, "dump: failed to write notes"); 1149 goto out; 1150 } 1151 1152 /* get offset of dump_bitmap */ 1153 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * 1154 block_size; 1155 1156 /* get offset of page */ 1157 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * 1158 block_size; 1159 1160 out: 1161 g_free(dh); 1162 g_free(kh); 1163 g_free(s->note_buf); 1164 } 1165 1166 static void write_dump_header(DumpState *s, Error **errp) 1167 { 1168 if (dump_is_64bit(s)) { 1169 create_header64(s, errp); 1170 } else { 1171 create_header32(s, errp); 1172 } 1173 } 1174 1175 static size_t dump_bitmap_get_bufsize(DumpState *s) 1176 { 1177 return s->dump_info.page_size; 1178 } 1179 1180 /* 1181 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be 1182 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0. 1183 * set_dump_bitmap will always leave the recently set bit un-sync. And setting 1184 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into 1185 * vmcore, ie. synchronizing un-sync bit into vmcore. 1186 */ 1187 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value, 1188 uint8_t *buf, DumpState *s) 1189 { 1190 off_t old_offset, new_offset; 1191 off_t offset_bitmap1, offset_bitmap2; 1192 uint32_t byte, bit; 1193 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1194 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1195 1196 /* should not set the previous place */ 1197 assert(last_pfn <= pfn); 1198 1199 /* 1200 * if the bit needed to be set is not cached in buf, flush the data in buf 1201 * to vmcore firstly. 1202 * making new_offset be bigger than old_offset can also sync remained data 1203 * into vmcore. 1204 */ 1205 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf); 1206 new_offset = bitmap_bufsize * (pfn / bits_per_buf); 1207 1208 while (old_offset < new_offset) { 1209 /* calculate the offset and write dump_bitmap */ 1210 offset_bitmap1 = s->offset_dump_bitmap + old_offset; 1211 if (write_buffer(s->fd, offset_bitmap1, buf, 1212 bitmap_bufsize) < 0) { 1213 return -1; 1214 } 1215 1216 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */ 1217 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap + 1218 old_offset; 1219 if (write_buffer(s->fd, offset_bitmap2, buf, 1220 bitmap_bufsize) < 0) { 1221 return -1; 1222 } 1223 1224 memset(buf, 0, bitmap_bufsize); 1225 old_offset += bitmap_bufsize; 1226 } 1227 1228 /* get the exact place of the bit in the buf, and set it */ 1229 byte = (pfn % bits_per_buf) / CHAR_BIT; 1230 bit = (pfn % bits_per_buf) % CHAR_BIT; 1231 if (value) { 1232 buf[byte] |= 1u << bit; 1233 } else { 1234 buf[byte] &= ~(1u << bit); 1235 } 1236 1237 return 0; 1238 } 1239 1240 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr) 1241 { 1242 int target_page_shift = ctz32(s->dump_info.page_size); 1243 1244 return (addr >> target_page_shift) - ARCH_PFN_OFFSET; 1245 } 1246 1247 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) 1248 { 1249 int target_page_shift = ctz32(s->dump_info.page_size); 1250 1251 return (pfn + ARCH_PFN_OFFSET) << target_page_shift; 1252 } 1253 1254 /* 1255 * Return the page frame number and the page content in *bufptr. bufptr can be 1256 * NULL. If not NULL, *bufptr must contains a target page size of pre-allocated 1257 * memory. This is not necessarily the memory returned. 1258 */ 1259 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, 1260 uint8_t **bufptr, DumpState *s) 1261 { 1262 GuestPhysBlock *block = *blockptr; 1263 uint32_t page_size = s->dump_info.page_size; 1264 uint8_t *buf = NULL, *hbuf; 1265 hwaddr addr; 1266 1267 /* block == NULL means the start of the iteration */ 1268 if (!block) { 1269 block = QTAILQ_FIRST(&s->guest_phys_blocks.head); 1270 *blockptr = block; 1271 addr = block->target_start; 1272 *pfnptr = dump_paddr_to_pfn(s, addr); 1273 } else { 1274 *pfnptr += 1; 1275 addr = dump_pfn_to_paddr(s, *pfnptr); 1276 } 1277 assert(block != NULL); 1278 1279 while (1) { 1280 if (addr >= block->target_start && addr < block->target_end) { 1281 size_t n = MIN(block->target_end - addr, page_size - addr % page_size); 1282 hbuf = block->host_addr + (addr - block->target_start); 1283 if (!buf) { 1284 if (n == page_size) { 1285 /* this is a whole target page, go for it */ 1286 assert(addr % page_size == 0); 1287 buf = hbuf; 1288 break; 1289 } else if (bufptr) { 1290 assert(*bufptr); 1291 buf = *bufptr; 1292 memset(buf, 0, page_size); 1293 } else { 1294 return true; 1295 } 1296 } 1297 1298 memcpy(buf + addr % page_size, hbuf, n); 1299 addr += n; 1300 if (addr % page_size == 0) { 1301 /* we filled up the page */ 1302 break; 1303 } 1304 } else { 1305 /* the next page is in the next block */ 1306 *blockptr = block = QTAILQ_NEXT(block, next); 1307 if (!block) { 1308 break; 1309 } 1310 1311 addr = block->target_start; 1312 /* are we still in the same page? */ 1313 if (dump_paddr_to_pfn(s, addr) != *pfnptr) { 1314 if (buf) { 1315 /* no, but we already filled something earlier, return it */ 1316 break; 1317 } else { 1318 /* else continue from there */ 1319 *pfnptr = dump_paddr_to_pfn(s, addr); 1320 } 1321 } 1322 } 1323 } 1324 1325 if (bufptr) { 1326 *bufptr = buf; 1327 } 1328 1329 return buf != NULL; 1330 } 1331 1332 static void write_dump_bitmap(DumpState *s, Error **errp) 1333 { 1334 int ret = 0; 1335 uint64_t last_pfn, pfn; 1336 void *dump_bitmap_buf; 1337 size_t num_dumpable; 1338 GuestPhysBlock *block_iter = NULL; 1339 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); 1340 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; 1341 1342 /* dump_bitmap_buf is used to store dump_bitmap temporarily */ 1343 dump_bitmap_buf = g_malloc0(bitmap_bufsize); 1344 1345 num_dumpable = 0; 1346 last_pfn = 0; 1347 1348 /* 1349 * exam memory page by page, and set the bit in dump_bitmap corresponded 1350 * to the existing page. 1351 */ 1352 while (get_next_page(&block_iter, &pfn, NULL, s)) { 1353 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s); 1354 if (ret < 0) { 1355 error_setg(errp, "dump: failed to set dump_bitmap"); 1356 goto out; 1357 } 1358 1359 last_pfn = pfn; 1360 num_dumpable++; 1361 } 1362 1363 /* 1364 * set_dump_bitmap will always leave the recently set bit un-sync. Here we 1365 * set the remaining bits from last_pfn to the end of the bitmap buffer to 1366 * 0. With those set, the un-sync bit will be synchronized into the vmcore. 1367 */ 1368 if (num_dumpable > 0) { 1369 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false, 1370 dump_bitmap_buf, s); 1371 if (ret < 0) { 1372 error_setg(errp, "dump: failed to sync dump_bitmap"); 1373 goto out; 1374 } 1375 } 1376 1377 /* number of dumpable pages that will be dumped later */ 1378 s->num_dumpable = num_dumpable; 1379 1380 out: 1381 g_free(dump_bitmap_buf); 1382 } 1383 1384 static void prepare_data_cache(DataCache *data_cache, DumpState *s, 1385 off_t offset) 1386 { 1387 data_cache->fd = s->fd; 1388 data_cache->data_size = 0; 1389 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s); 1390 data_cache->buf = g_malloc0(data_cache->buf_size); 1391 data_cache->offset = offset; 1392 } 1393 1394 static int write_cache(DataCache *dc, const void *buf, size_t size, 1395 bool flag_sync) 1396 { 1397 /* 1398 * dc->buf_size should not be less than size, otherwise dc will never be 1399 * enough 1400 */ 1401 assert(size <= dc->buf_size); 1402 1403 /* 1404 * if flag_sync is set, synchronize data in dc->buf into vmcore. 1405 * otherwise check if the space is enough for caching data in buf, if not, 1406 * write the data in dc->buf to dc->fd and reset dc->buf 1407 */ 1408 if ((!flag_sync && dc->data_size + size > dc->buf_size) || 1409 (flag_sync && dc->data_size > 0)) { 1410 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) { 1411 return -1; 1412 } 1413 1414 dc->offset += dc->data_size; 1415 dc->data_size = 0; 1416 } 1417 1418 if (!flag_sync) { 1419 memcpy(dc->buf + dc->data_size, buf, size); 1420 dc->data_size += size; 1421 } 1422 1423 return 0; 1424 } 1425 1426 static void free_data_cache(DataCache *data_cache) 1427 { 1428 g_free(data_cache->buf); 1429 } 1430 1431 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) 1432 { 1433 switch (flag_compress) { 1434 case DUMP_DH_COMPRESSED_ZLIB: 1435 return compressBound(page_size); 1436 1437 case DUMP_DH_COMPRESSED_LZO: 1438 /* 1439 * LZO will expand incompressible data by a little amount. Please check 1440 * the following URL to see the expansion calculation: 1441 * http://www.oberhumer.com/opensource/lzo/lzofaq.php 1442 */ 1443 return page_size + page_size / 16 + 64 + 3; 1444 1445 #ifdef CONFIG_SNAPPY 1446 case DUMP_DH_COMPRESSED_SNAPPY: 1447 return snappy_max_compressed_length(page_size); 1448 #endif 1449 } 1450 return 0; 1451 } 1452 1453 static void write_dump_pages(DumpState *s, Error **errp) 1454 { 1455 int ret = 0; 1456 DataCache page_desc, page_data; 1457 size_t len_buf_out, size_out; 1458 #ifdef CONFIG_LZO 1459 lzo_bytep wrkmem = NULL; 1460 #endif 1461 uint8_t *buf_out = NULL; 1462 off_t offset_desc, offset_data; 1463 PageDescriptor pd, pd_zero; 1464 uint8_t *buf; 1465 GuestPhysBlock *block_iter = NULL; 1466 uint64_t pfn_iter; 1467 g_autofree uint8_t *page = NULL; 1468 1469 /* get offset of page_desc and page_data in dump file */ 1470 offset_desc = s->offset_page; 1471 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; 1472 1473 prepare_data_cache(&page_desc, s, offset_desc); 1474 prepare_data_cache(&page_data, s, offset_data); 1475 1476 /* prepare buffer to store compressed data */ 1477 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress); 1478 assert(len_buf_out != 0); 1479 1480 #ifdef CONFIG_LZO 1481 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); 1482 #endif 1483 1484 buf_out = g_malloc(len_buf_out); 1485 1486 /* 1487 * init zero page's page_desc and page_data, because every zero page 1488 * uses the same page_data 1489 */ 1490 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size); 1491 pd_zero.flags = cpu_to_dump32(s, 0); 1492 pd_zero.offset = cpu_to_dump64(s, offset_data); 1493 pd_zero.page_flags = cpu_to_dump64(s, 0); 1494 buf = g_malloc0(s->dump_info.page_size); 1495 ret = write_cache(&page_data, buf, s->dump_info.page_size, false); 1496 g_free(buf); 1497 if (ret < 0) { 1498 error_setg(errp, "dump: failed to write page data (zero page)"); 1499 goto out; 1500 } 1501 1502 offset_data += s->dump_info.page_size; 1503 page = g_malloc(s->dump_info.page_size); 1504 1505 /* 1506 * dump memory to vmcore page by page. zero page will all be resided in the 1507 * first page of page section 1508 */ 1509 for (buf = page; get_next_page(&block_iter, &pfn_iter, &buf, s); buf = page) { 1510 /* check zero page */ 1511 if (buffer_is_zero(buf, s->dump_info.page_size)) { 1512 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), 1513 false); 1514 if (ret < 0) { 1515 error_setg(errp, "dump: failed to write page desc"); 1516 goto out; 1517 } 1518 } else { 1519 /* 1520 * not zero page, then: 1521 * 1. compress the page 1522 * 2. write the compressed page into the cache of page_data 1523 * 3. get page desc of the compressed page and write it into the 1524 * cache of page_desc 1525 * 1526 * only one compression format will be used here, for 1527 * s->flag_compress is set. But when compression fails to work, 1528 * we fall back to save in plaintext. 1529 */ 1530 size_out = len_buf_out; 1531 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && 1532 (compress2(buf_out, (uLongf *)&size_out, buf, 1533 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) && 1534 (size_out < s->dump_info.page_size)) { 1535 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB); 1536 pd.size = cpu_to_dump32(s, size_out); 1537 1538 ret = write_cache(&page_data, buf_out, size_out, false); 1539 if (ret < 0) { 1540 error_setg(errp, "dump: failed to write page data"); 1541 goto out; 1542 } 1543 #ifdef CONFIG_LZO 1544 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && 1545 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out, 1546 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && 1547 (size_out < s->dump_info.page_size)) { 1548 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO); 1549 pd.size = cpu_to_dump32(s, size_out); 1550 1551 ret = write_cache(&page_data, buf_out, size_out, false); 1552 if (ret < 0) { 1553 error_setg(errp, "dump: failed to write page data"); 1554 goto out; 1555 } 1556 #endif 1557 #ifdef CONFIG_SNAPPY 1558 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && 1559 (snappy_compress((char *)buf, s->dump_info.page_size, 1560 (char *)buf_out, &size_out) == SNAPPY_OK) && 1561 (size_out < s->dump_info.page_size)) { 1562 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY); 1563 pd.size = cpu_to_dump32(s, size_out); 1564 1565 ret = write_cache(&page_data, buf_out, size_out, false); 1566 if (ret < 0) { 1567 error_setg(errp, "dump: failed to write page data"); 1568 goto out; 1569 } 1570 #endif 1571 } else { 1572 /* 1573 * fall back to save in plaintext, size_out should be 1574 * assigned the target's page size 1575 */ 1576 pd.flags = cpu_to_dump32(s, 0); 1577 size_out = s->dump_info.page_size; 1578 pd.size = cpu_to_dump32(s, size_out); 1579 1580 ret = write_cache(&page_data, buf, 1581 s->dump_info.page_size, false); 1582 if (ret < 0) { 1583 error_setg(errp, "dump: failed to write page data"); 1584 goto out; 1585 } 1586 } 1587 1588 /* get and write page desc here */ 1589 pd.page_flags = cpu_to_dump64(s, 0); 1590 pd.offset = cpu_to_dump64(s, offset_data); 1591 offset_data += size_out; 1592 1593 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); 1594 if (ret < 0) { 1595 error_setg(errp, "dump: failed to write page desc"); 1596 goto out; 1597 } 1598 } 1599 s->written_size += s->dump_info.page_size; 1600 } 1601 1602 ret = write_cache(&page_desc, NULL, 0, true); 1603 if (ret < 0) { 1604 error_setg(errp, "dump: failed to sync cache for page_desc"); 1605 goto out; 1606 } 1607 ret = write_cache(&page_data, NULL, 0, true); 1608 if (ret < 0) { 1609 error_setg(errp, "dump: failed to sync cache for page_data"); 1610 goto out; 1611 } 1612 1613 out: 1614 free_data_cache(&page_desc); 1615 free_data_cache(&page_data); 1616 1617 #ifdef CONFIG_LZO 1618 g_free(wrkmem); 1619 #endif 1620 1621 g_free(buf_out); 1622 } 1623 1624 static void create_kdump_vmcore(DumpState *s, Error **errp) 1625 { 1626 ERRP_GUARD(); 1627 int ret; 1628 1629 /* 1630 * the kdump-compressed format is: 1631 * File offset 1632 * +------------------------------------------+ 0x0 1633 * | main header (struct disk_dump_header) | 1634 * |------------------------------------------+ block 1 1635 * | sub header (struct kdump_sub_header) | 1636 * |------------------------------------------+ block 2 1637 * | 1st-dump_bitmap | 1638 * |------------------------------------------+ block 2 + X blocks 1639 * | 2nd-dump_bitmap | (aligned by block) 1640 * |------------------------------------------+ block 2 + 2 * X blocks 1641 * | page desc for pfn 0 (struct page_desc) | (aligned by block) 1642 * | page desc for pfn 1 (struct page_desc) | 1643 * | : | 1644 * |------------------------------------------| (not aligned by block) 1645 * | page data (pfn 0) | 1646 * | page data (pfn 1) | 1647 * | : | 1648 * +------------------------------------------+ 1649 */ 1650 1651 ret = write_start_flat_header(s->fd); 1652 if (ret < 0) { 1653 error_setg(errp, "dump: failed to write start flat header"); 1654 return; 1655 } 1656 1657 write_dump_header(s, errp); 1658 if (*errp) { 1659 return; 1660 } 1661 1662 write_dump_bitmap(s, errp); 1663 if (*errp) { 1664 return; 1665 } 1666 1667 write_dump_pages(s, errp); 1668 if (*errp) { 1669 return; 1670 } 1671 1672 ret = write_end_flat_header(s->fd); 1673 if (ret < 0) { 1674 error_setg(errp, "dump: failed to write end flat header"); 1675 return; 1676 } 1677 } 1678 1679 static int validate_start_block(DumpState *s) 1680 { 1681 GuestPhysBlock *block; 1682 1683 if (!dump_has_filter(s)) { 1684 return 0; 1685 } 1686 1687 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1688 /* This block is out of the range */ 1689 if (block->target_start >= s->filter_area_begin + s->filter_area_length || 1690 block->target_end <= s->filter_area_begin) { 1691 continue; 1692 } 1693 return 0; 1694 } 1695 1696 return -1; 1697 } 1698 1699 static void get_max_mapnr(DumpState *s) 1700 { 1701 GuestPhysBlock *last_block; 1702 1703 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head); 1704 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end); 1705 } 1706 1707 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE }; 1708 1709 static void dump_state_prepare(DumpState *s) 1710 { 1711 /* zero the struct, setting status to active */ 1712 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; 1713 } 1714 1715 bool qemu_system_dump_in_progress(void) 1716 { 1717 DumpState *state = &dump_state_global; 1718 return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE); 1719 } 1720 1721 /* 1722 * calculate total size of memory to be dumped (taking filter into 1723 * account.) 1724 */ 1725 static int64_t dump_calculate_size(DumpState *s) 1726 { 1727 GuestPhysBlock *block; 1728 int64_t total = 0; 1729 1730 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { 1731 total += dump_filtered_memblock_size(block, 1732 s->filter_area_begin, 1733 s->filter_area_length); 1734 } 1735 1736 return total; 1737 } 1738 1739 static void vmcoreinfo_update_phys_base(DumpState *s) 1740 { 1741 uint64_t size, note_head_size, name_size, phys_base; 1742 char **lines; 1743 uint8_t *vmci; 1744 size_t i; 1745 1746 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) { 1747 return; 1748 } 1749 1750 get_note_sizes(s, s->guest_note, ¬e_head_size, &name_size, &size); 1751 note_head_size = ROUND_UP(note_head_size, 4); 1752 1753 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4); 1754 *(vmci + size) = '\0'; 1755 1756 lines = g_strsplit((char *)vmci, "\n", -1); 1757 for (i = 0; lines[i]; i++) { 1758 const char *prefix = NULL; 1759 1760 if (s->dump_info.d_machine == EM_X86_64) { 1761 prefix = "NUMBER(phys_base)="; 1762 } else if (s->dump_info.d_machine == EM_AARCH64) { 1763 prefix = "NUMBER(PHYS_OFFSET)="; 1764 } 1765 1766 if (prefix && g_str_has_prefix(lines[i], prefix)) { 1767 if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16, 1768 &phys_base) < 0) { 1769 warn_report("Failed to read %s", prefix); 1770 } else { 1771 s->dump_info.phys_base = phys_base; 1772 } 1773 break; 1774 } 1775 } 1776 1777 g_strfreev(lines); 1778 } 1779 1780 static void dump_init(DumpState *s, int fd, bool has_format, 1781 DumpGuestMemoryFormat format, bool paging, bool has_filter, 1782 int64_t begin, int64_t length, Error **errp) 1783 { 1784 ERRP_GUARD(); 1785 VMCoreInfoState *vmci = vmcoreinfo_find(); 1786 CPUState *cpu; 1787 int nr_cpus; 1788 int ret; 1789 1790 s->has_format = has_format; 1791 s->format = format; 1792 s->written_size = 0; 1793 1794 /* kdump-compressed is conflict with paging and filter */ 1795 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1796 assert(!paging && !has_filter); 1797 } 1798 1799 if (runstate_is_running()) { 1800 vm_stop(RUN_STATE_SAVE_VM); 1801 s->resume = true; 1802 } else { 1803 s->resume = false; 1804 } 1805 1806 /* If we use KVM, we should synchronize the registers before we get dump 1807 * info or physmap info. 1808 */ 1809 cpu_synchronize_all_states(); 1810 nr_cpus = 0; 1811 CPU_FOREACH(cpu) { 1812 nr_cpus++; 1813 } 1814 1815 s->fd = fd; 1816 if (has_filter && !length) { 1817 error_setg(errp, QERR_INVALID_PARAMETER, "length"); 1818 goto cleanup; 1819 } 1820 s->filter_area_begin = begin; 1821 s->filter_area_length = length; 1822 1823 /* First index is 0, it's the special null name */ 1824 s->string_table_buf = g_array_new(FALSE, TRUE, 1); 1825 /* 1826 * Allocate the null name, due to the clearing option set to true 1827 * it will be 0. 1828 */ 1829 g_array_set_size(s->string_table_buf, 1); 1830 1831 memory_mapping_list_init(&s->list); 1832 1833 guest_phys_blocks_init(&s->guest_phys_blocks); 1834 guest_phys_blocks_append(&s->guest_phys_blocks); 1835 s->total_size = dump_calculate_size(s); 1836 #ifdef DEBUG_DUMP_GUEST_MEMORY 1837 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size); 1838 #endif 1839 1840 /* it does not make sense to dump non-existent memory */ 1841 if (!s->total_size) { 1842 error_setg(errp, "dump: no guest memory to dump"); 1843 goto cleanup; 1844 } 1845 1846 /* Is the filter filtering everything? */ 1847 if (validate_start_block(s) == -1) { 1848 error_setg(errp, QERR_INVALID_PARAMETER, "begin"); 1849 goto cleanup; 1850 } 1851 1852 /* get dump info: endian, class and architecture. 1853 * If the target architecture is not supported, cpu_get_dump_info() will 1854 * return -1. 1855 */ 1856 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); 1857 if (ret < 0) { 1858 error_setg(errp, 1859 "dumping guest memory is not supported on this target"); 1860 goto cleanup; 1861 } 1862 1863 if (!s->dump_info.page_size) { 1864 s->dump_info.page_size = qemu_target_page_size(); 1865 } 1866 1867 s->note_size = cpu_get_note_size(s->dump_info.d_class, 1868 s->dump_info.d_machine, nr_cpus); 1869 assert(s->note_size >= 0); 1870 1871 /* 1872 * The goal of this block is to (a) update the previously guessed 1873 * phys_base, (b) copy the guest note out of the guest. 1874 * Failure to do so is not fatal for dumping. 1875 */ 1876 if (vmci) { 1877 uint64_t addr, note_head_size, name_size, desc_size; 1878 uint32_t size; 1879 uint16_t format; 1880 1881 note_head_size = dump_is_64bit(s) ? 1882 sizeof(Elf64_Nhdr) : sizeof(Elf32_Nhdr); 1883 1884 format = le16_to_cpu(vmci->vmcoreinfo.guest_format); 1885 size = le32_to_cpu(vmci->vmcoreinfo.size); 1886 addr = le64_to_cpu(vmci->vmcoreinfo.paddr); 1887 if (!vmci->has_vmcoreinfo) { 1888 warn_report("guest note is not present"); 1889 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) { 1890 warn_report("guest note size is invalid: %" PRIu32, size); 1891 } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) { 1892 warn_report("guest note format is unsupported: %" PRIu16, format); 1893 } else { 1894 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */ 1895 cpu_physical_memory_read(addr, s->guest_note, size); 1896 1897 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size); 1898 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size, 1899 desc_size); 1900 if (name_size > MAX_GUEST_NOTE_SIZE || 1901 desc_size > MAX_GUEST_NOTE_SIZE || 1902 s->guest_note_size > size) { 1903 warn_report("Invalid guest note header"); 1904 g_free(s->guest_note); 1905 s->guest_note = NULL; 1906 } else { 1907 vmcoreinfo_update_phys_base(s); 1908 s->note_size += s->guest_note_size; 1909 } 1910 } 1911 } 1912 1913 /* get memory mapping */ 1914 if (paging) { 1915 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp); 1916 if (*errp) { 1917 goto cleanup; 1918 } 1919 } else { 1920 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); 1921 } 1922 1923 s->nr_cpus = nr_cpus; 1924 1925 get_max_mapnr(s); 1926 1927 uint64_t tmp; 1928 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), 1929 s->dump_info.page_size); 1930 s->len_dump_bitmap = tmp * s->dump_info.page_size; 1931 1932 /* init for kdump-compressed format */ 1933 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 1934 switch (format) { 1935 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: 1936 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; 1937 break; 1938 1939 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: 1940 #ifdef CONFIG_LZO 1941 if (lzo_init() != LZO_E_OK) { 1942 error_setg(errp, "failed to initialize the LZO library"); 1943 goto cleanup; 1944 } 1945 #endif 1946 s->flag_compress = DUMP_DH_COMPRESSED_LZO; 1947 break; 1948 1949 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: 1950 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; 1951 break; 1952 1953 default: 1954 s->flag_compress = 0; 1955 } 1956 1957 return; 1958 } 1959 1960 if (dump_has_filter(s)) { 1961 memory_mapping_filter(&s->list, s->filter_area_begin, s->filter_area_length); 1962 } 1963 1964 /* 1965 * The first section header is always a special one in which most 1966 * fields are 0. The section header string table is also always 1967 * set. 1968 */ 1969 s->shdr_num = 2; 1970 1971 /* 1972 * Adds the number of architecture sections to shdr_num and sets 1973 * elf_section_data_size so we know the offsets and sizes of all 1974 * parts. 1975 */ 1976 if (s->dump_info.arch_sections_add_fn) { 1977 s->dump_info.arch_sections_add_fn(s); 1978 } 1979 1980 /* 1981 * calculate shdr_num so we know the offsets and sizes of all 1982 * parts. 1983 * Calculate phdr_num 1984 * 1985 * The absolute maximum amount of phdrs is UINT32_MAX - 1 as 1986 * sh_info is 32 bit. There's special handling once we go over 1987 * UINT16_MAX - 1 but that is handled in the ehdr and section 1988 * code. 1989 */ 1990 s->phdr_num = 1; /* Reserve PT_NOTE */ 1991 if (s->list.num <= UINT32_MAX - 1) { 1992 s->phdr_num += s->list.num; 1993 } else { 1994 s->phdr_num = UINT32_MAX; 1995 } 1996 1997 /* 1998 * Now that the number of section and program headers is known we 1999 * can calculate the offsets of the headers and data. 2000 */ 2001 if (dump_is_64bit(s)) { 2002 s->shdr_offset = sizeof(Elf64_Ehdr); 2003 s->phdr_offset = s->shdr_offset + sizeof(Elf64_Shdr) * s->shdr_num; 2004 s->note_offset = s->phdr_offset + sizeof(Elf64_Phdr) * s->phdr_num; 2005 } else { 2006 s->shdr_offset = sizeof(Elf32_Ehdr); 2007 s->phdr_offset = s->shdr_offset + sizeof(Elf32_Shdr) * s->shdr_num; 2008 s->note_offset = s->phdr_offset + sizeof(Elf32_Phdr) * s->phdr_num; 2009 } 2010 s->memory_offset = s->note_offset + s->note_size; 2011 s->section_offset = s->memory_offset + s->total_size; 2012 2013 return; 2014 2015 cleanup: 2016 dump_cleanup(s); 2017 } 2018 2019 /* this operation might be time consuming. */ 2020 static void dump_process(DumpState *s, Error **errp) 2021 { 2022 ERRP_GUARD(); 2023 DumpQueryResult *result = NULL; 2024 2025 if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 2026 #ifdef TARGET_X86_64 2027 create_win_dump(s, errp); 2028 #endif 2029 } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { 2030 create_kdump_vmcore(s, errp); 2031 } else { 2032 create_vmcore(s, errp); 2033 } 2034 2035 /* make sure status is written after written_size updates */ 2036 smp_wmb(); 2037 qatomic_set(&s->status, 2038 (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); 2039 2040 /* send DUMP_COMPLETED message (unconditionally) */ 2041 result = qmp_query_dump(NULL); 2042 /* should never fail */ 2043 assert(result); 2044 qapi_event_send_dump_completed(result, 2045 *errp ? error_get_pretty(*errp) : NULL); 2046 qapi_free_DumpQueryResult(result); 2047 2048 dump_cleanup(s); 2049 } 2050 2051 static void *dump_thread(void *data) 2052 { 2053 DumpState *s = (DumpState *)data; 2054 dump_process(s, NULL); 2055 return NULL; 2056 } 2057 2058 DumpQueryResult *qmp_query_dump(Error **errp) 2059 { 2060 DumpQueryResult *result = g_new(DumpQueryResult, 1); 2061 DumpState *state = &dump_state_global; 2062 result->status = qatomic_read(&state->status); 2063 /* make sure we are reading status and written_size in order */ 2064 smp_rmb(); 2065 result->completed = state->written_size; 2066 result->total = state->total_size; 2067 return result; 2068 } 2069 2070 void qmp_dump_guest_memory(bool paging, const char *file, 2071 bool has_detach, bool detach, 2072 bool has_begin, int64_t begin, bool has_length, 2073 int64_t length, bool has_format, 2074 DumpGuestMemoryFormat format, Error **errp) 2075 { 2076 ERRP_GUARD(); 2077 const char *p; 2078 int fd = -1; 2079 DumpState *s; 2080 bool detach_p = false; 2081 2082 if (runstate_check(RUN_STATE_INMIGRATE)) { 2083 error_setg(errp, "Dump not allowed during incoming migration."); 2084 return; 2085 } 2086 2087 /* if there is a dump in background, we should wait until the dump 2088 * finished */ 2089 if (qemu_system_dump_in_progress()) { 2090 error_setg(errp, "There is a dump in process, please wait."); 2091 return; 2092 } 2093 2094 /* 2095 * kdump-compressed format need the whole memory dumped, so paging or 2096 * filter is not supported here. 2097 */ 2098 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) && 2099 (paging || has_begin || has_length)) { 2100 error_setg(errp, "kdump-compressed format doesn't support paging or " 2101 "filter"); 2102 return; 2103 } 2104 if (has_begin && !has_length) { 2105 error_setg(errp, QERR_MISSING_PARAMETER, "length"); 2106 return; 2107 } 2108 if (!has_begin && has_length) { 2109 error_setg(errp, QERR_MISSING_PARAMETER, "begin"); 2110 return; 2111 } 2112 if (has_detach) { 2113 detach_p = detach; 2114 } 2115 2116 /* check whether lzo/snappy is supported */ 2117 #ifndef CONFIG_LZO 2118 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) { 2119 error_setg(errp, "kdump-lzo is not available now"); 2120 return; 2121 } 2122 #endif 2123 2124 #ifndef CONFIG_SNAPPY 2125 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) { 2126 error_setg(errp, "kdump-snappy is not available now"); 2127 return; 2128 } 2129 #endif 2130 2131 #ifndef TARGET_X86_64 2132 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { 2133 error_setg(errp, "Windows dump is only available for x86-64"); 2134 return; 2135 } 2136 #endif 2137 2138 #if !defined(WIN32) 2139 if (strstart(file, "fd:", &p)) { 2140 fd = monitor_get_fd(monitor_cur(), p, errp); 2141 if (fd == -1) { 2142 return; 2143 } 2144 } 2145 #endif 2146 2147 if (strstart(file, "file:", &p)) { 2148 fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); 2149 if (fd < 0) { 2150 error_setg_file_open(errp, errno, p); 2151 return; 2152 } 2153 } 2154 2155 if (fd == -1) { 2156 error_setg(errp, QERR_INVALID_PARAMETER, "protocol"); 2157 return; 2158 } 2159 2160 if (!dump_migration_blocker) { 2161 error_setg(&dump_migration_blocker, 2162 "Live migration disabled: dump-guest-memory in progress"); 2163 } 2164 2165 /* 2166 * Allows even for -only-migratable, but forbid migration during the 2167 * process of dump guest memory. 2168 */ 2169 if (migrate_add_blocker_internal(dump_migration_blocker, errp)) { 2170 /* Remember to release the fd before passing it over to dump state */ 2171 close(fd); 2172 return; 2173 } 2174 2175 s = &dump_state_global; 2176 dump_state_prepare(s); 2177 2178 dump_init(s, fd, has_format, format, paging, has_begin, 2179 begin, length, errp); 2180 if (*errp) { 2181 qatomic_set(&s->status, DUMP_STATUS_FAILED); 2182 return; 2183 } 2184 2185 if (detach_p) { 2186 /* detached dump */ 2187 s->detached = true; 2188 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread, 2189 s, QEMU_THREAD_DETACHED); 2190 } else { 2191 /* sync dump */ 2192 dump_process(s, errp); 2193 } 2194 } 2195 2196 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) 2197 { 2198 DumpGuestMemoryCapability *cap = 2199 g_new0(DumpGuestMemoryCapability, 1); 2200 DumpGuestMemoryFormatList **tail = &cap->formats; 2201 2202 /* elf is always available */ 2203 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF); 2204 2205 /* kdump-zlib is always available */ 2206 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB); 2207 2208 /* add new item if kdump-lzo is available */ 2209 #ifdef CONFIG_LZO 2210 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO); 2211 #endif 2212 2213 /* add new item if kdump-snappy is available */ 2214 #ifdef CONFIG_SNAPPY 2215 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY); 2216 #endif 2217 2218 /* Windows dump is available only if target is x86_64 */ 2219 #ifdef TARGET_X86_64 2220 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP); 2221 #endif 2222 2223 return cap; 2224 } 2225