xref: /qemu/dump/dump.c (revision abff1abf)
1 /*
2  * QEMU dump
3  *
4  * Copyright Fujitsu, Corp. 2011, 2012
5  *
6  * Authors:
7  *     Wen Congyang <wency@cn.fujitsu.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "qemu/cutils.h"
17 #include "elf.h"
18 #include "cpu.h"
19 #include "exec/hwaddr.h"
20 #include "monitor/monitor.h"
21 #include "sysemu/kvm.h"
22 #include "sysemu/dump.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/cpus.h"
26 #include "qapi/error.h"
27 #include "qapi/qapi-commands-dump.h"
28 #include "qapi/qapi-events-dump.h"
29 #include "qapi/qmp/qerror.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "hw/misc/vmcoreinfo.h"
33 
34 #ifdef TARGET_X86_64
35 #include "win_dump.h"
36 #endif
37 
38 #include <zlib.h>
39 #ifdef CONFIG_LZO
40 #include <lzo/lzo1x.h>
41 #endif
42 #ifdef CONFIG_SNAPPY
43 #include <snappy-c.h>
44 #endif
45 #ifndef ELF_MACHINE_UNAME
46 #define ELF_MACHINE_UNAME "Unknown"
47 #endif
48 
49 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
50 
51 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size)   \
52     ((DIV_ROUND_UP((hdr_size), 4) +                     \
53       DIV_ROUND_UP((name_size), 4) +                    \
54       DIV_ROUND_UP((desc_size), 4)) * 4)
55 
56 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
57 {
58     if (s->dump_info.d_endian == ELFDATA2LSB) {
59         val = cpu_to_le16(val);
60     } else {
61         val = cpu_to_be16(val);
62     }
63 
64     return val;
65 }
66 
67 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
68 {
69     if (s->dump_info.d_endian == ELFDATA2LSB) {
70         val = cpu_to_le32(val);
71     } else {
72         val = cpu_to_be32(val);
73     }
74 
75     return val;
76 }
77 
78 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
79 {
80     if (s->dump_info.d_endian == ELFDATA2LSB) {
81         val = cpu_to_le64(val);
82     } else {
83         val = cpu_to_be64(val);
84     }
85 
86     return val;
87 }
88 
89 static int dump_cleanup(DumpState *s)
90 {
91     guest_phys_blocks_free(&s->guest_phys_blocks);
92     memory_mapping_list_free(&s->list);
93     close(s->fd);
94     g_free(s->guest_note);
95     s->guest_note = NULL;
96     if (s->resume) {
97         if (s->detached) {
98             qemu_mutex_lock_iothread();
99         }
100         vm_start();
101         if (s->detached) {
102             qemu_mutex_unlock_iothread();
103         }
104     }
105 
106     return 0;
107 }
108 
109 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
110 {
111     DumpState *s = opaque;
112     size_t written_size;
113 
114     written_size = qemu_write_full(s->fd, buf, size);
115     if (written_size != size) {
116         return -errno;
117     }
118 
119     return 0;
120 }
121 
122 static void write_elf64_header(DumpState *s, Error **errp)
123 {
124     Elf64_Ehdr elf_header;
125     int ret;
126 
127     memset(&elf_header, 0, sizeof(Elf64_Ehdr));
128     memcpy(&elf_header, ELFMAG, SELFMAG);
129     elf_header.e_ident[EI_CLASS] = ELFCLASS64;
130     elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
131     elf_header.e_ident[EI_VERSION] = EV_CURRENT;
132     elf_header.e_type = cpu_to_dump16(s, ET_CORE);
133     elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
134     elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
135     elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
136     elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
137     elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
138     elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
139     if (s->have_section) {
140         uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
141 
142         elf_header.e_shoff = cpu_to_dump64(s, shoff);
143         elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
144         elf_header.e_shnum = cpu_to_dump16(s, 1);
145     }
146 
147     ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
148     if (ret < 0) {
149         error_setg_errno(errp, -ret, "dump: failed to write elf header");
150     }
151 }
152 
153 static void write_elf32_header(DumpState *s, Error **errp)
154 {
155     Elf32_Ehdr elf_header;
156     int ret;
157 
158     memset(&elf_header, 0, sizeof(Elf32_Ehdr));
159     memcpy(&elf_header, ELFMAG, SELFMAG);
160     elf_header.e_ident[EI_CLASS] = ELFCLASS32;
161     elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
162     elf_header.e_ident[EI_VERSION] = EV_CURRENT;
163     elf_header.e_type = cpu_to_dump16(s, ET_CORE);
164     elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
165     elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
166     elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
167     elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
168     elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
169     elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
170     if (s->have_section) {
171         uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
172 
173         elf_header.e_shoff = cpu_to_dump32(s, shoff);
174         elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
175         elf_header.e_shnum = cpu_to_dump16(s, 1);
176     }
177 
178     ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
179     if (ret < 0) {
180         error_setg_errno(errp, -ret, "dump: failed to write elf header");
181     }
182 }
183 
184 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
185                              int phdr_index, hwaddr offset,
186                              hwaddr filesz, Error **errp)
187 {
188     Elf64_Phdr phdr;
189     int ret;
190 
191     memset(&phdr, 0, sizeof(Elf64_Phdr));
192     phdr.p_type = cpu_to_dump32(s, PT_LOAD);
193     phdr.p_offset = cpu_to_dump64(s, offset);
194     phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
195     phdr.p_filesz = cpu_to_dump64(s, filesz);
196     phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
197     phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
198 
199     assert(memory_mapping->length >= filesz);
200 
201     ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
202     if (ret < 0) {
203         error_setg_errno(errp, -ret,
204                          "dump: failed to write program header table");
205     }
206 }
207 
208 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
209                              int phdr_index, hwaddr offset,
210                              hwaddr filesz, Error **errp)
211 {
212     Elf32_Phdr phdr;
213     int ret;
214 
215     memset(&phdr, 0, sizeof(Elf32_Phdr));
216     phdr.p_type = cpu_to_dump32(s, PT_LOAD);
217     phdr.p_offset = cpu_to_dump32(s, offset);
218     phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
219     phdr.p_filesz = cpu_to_dump32(s, filesz);
220     phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
221     phdr.p_vaddr =
222         cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
223 
224     assert(memory_mapping->length >= filesz);
225 
226     ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
227     if (ret < 0) {
228         error_setg_errno(errp, -ret,
229                          "dump: failed to write program header table");
230     }
231 }
232 
233 static void write_elf64_note(DumpState *s, Error **errp)
234 {
235     Elf64_Phdr phdr;
236     hwaddr begin = s->memory_offset - s->note_size;
237     int ret;
238 
239     memset(&phdr, 0, sizeof(Elf64_Phdr));
240     phdr.p_type = cpu_to_dump32(s, PT_NOTE);
241     phdr.p_offset = cpu_to_dump64(s, begin);
242     phdr.p_paddr = 0;
243     phdr.p_filesz = cpu_to_dump64(s, s->note_size);
244     phdr.p_memsz = cpu_to_dump64(s, s->note_size);
245     phdr.p_vaddr = 0;
246 
247     ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
248     if (ret < 0) {
249         error_setg_errno(errp, -ret,
250                          "dump: failed to write program header table");
251     }
252 }
253 
254 static inline int cpu_index(CPUState *cpu)
255 {
256     return cpu->cpu_index + 1;
257 }
258 
259 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
260                              Error **errp)
261 {
262     int ret;
263 
264     if (s->guest_note) {
265         ret = f(s->guest_note, s->guest_note_size, s);
266         if (ret < 0) {
267             error_setg(errp, "dump: failed to write guest note");
268         }
269     }
270 }
271 
272 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
273                               Error **errp)
274 {
275     CPUState *cpu;
276     int ret;
277     int id;
278 
279     CPU_FOREACH(cpu) {
280         id = cpu_index(cpu);
281         ret = cpu_write_elf64_note(f, cpu, id, s);
282         if (ret < 0) {
283             error_setg(errp, "dump: failed to write elf notes");
284             return;
285         }
286     }
287 
288     CPU_FOREACH(cpu) {
289         ret = cpu_write_elf64_qemunote(f, cpu, s);
290         if (ret < 0) {
291             error_setg(errp, "dump: failed to write CPU status");
292             return;
293         }
294     }
295 
296     write_guest_note(f, s, errp);
297 }
298 
299 static void write_elf32_note(DumpState *s, Error **errp)
300 {
301     hwaddr begin = s->memory_offset - s->note_size;
302     Elf32_Phdr phdr;
303     int ret;
304 
305     memset(&phdr, 0, sizeof(Elf32_Phdr));
306     phdr.p_type = cpu_to_dump32(s, PT_NOTE);
307     phdr.p_offset = cpu_to_dump32(s, begin);
308     phdr.p_paddr = 0;
309     phdr.p_filesz = cpu_to_dump32(s, s->note_size);
310     phdr.p_memsz = cpu_to_dump32(s, s->note_size);
311     phdr.p_vaddr = 0;
312 
313     ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
314     if (ret < 0) {
315         error_setg_errno(errp, -ret,
316                          "dump: failed to write program header table");
317     }
318 }
319 
320 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
321                               Error **errp)
322 {
323     CPUState *cpu;
324     int ret;
325     int id;
326 
327     CPU_FOREACH(cpu) {
328         id = cpu_index(cpu);
329         ret = cpu_write_elf32_note(f, cpu, id, s);
330         if (ret < 0) {
331             error_setg(errp, "dump: failed to write elf notes");
332             return;
333         }
334     }
335 
336     CPU_FOREACH(cpu) {
337         ret = cpu_write_elf32_qemunote(f, cpu, s);
338         if (ret < 0) {
339             error_setg(errp, "dump: failed to write CPU status");
340             return;
341         }
342     }
343 
344     write_guest_note(f, s, errp);
345 }
346 
347 static void write_elf_section(DumpState *s, int type, Error **errp)
348 {
349     Elf32_Shdr shdr32;
350     Elf64_Shdr shdr64;
351     int shdr_size;
352     void *shdr;
353     int ret;
354 
355     if (type == 0) {
356         shdr_size = sizeof(Elf32_Shdr);
357         memset(&shdr32, 0, shdr_size);
358         shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
359         shdr = &shdr32;
360     } else {
361         shdr_size = sizeof(Elf64_Shdr);
362         memset(&shdr64, 0, shdr_size);
363         shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
364         shdr = &shdr64;
365     }
366 
367     ret = fd_write_vmcore(shdr, shdr_size, s);
368     if (ret < 0) {
369         error_setg_errno(errp, -ret,
370                          "dump: failed to write section header table");
371     }
372 }
373 
374 static void write_data(DumpState *s, void *buf, int length, Error **errp)
375 {
376     int ret;
377 
378     ret = fd_write_vmcore(buf, length, s);
379     if (ret < 0) {
380         error_setg_errno(errp, -ret, "dump: failed to save memory");
381     } else {
382         s->written_size += length;
383     }
384 }
385 
386 /* write the memory to vmcore. 1 page per I/O. */
387 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
388                          int64_t size, Error **errp)
389 {
390     int64_t i;
391     Error *local_err = NULL;
392 
393     for (i = 0; i < size / s->dump_info.page_size; i++) {
394         write_data(s, block->host_addr + start + i * s->dump_info.page_size,
395                    s->dump_info.page_size, &local_err);
396         if (local_err) {
397             error_propagate(errp, local_err);
398             return;
399         }
400     }
401 
402     if ((size % s->dump_info.page_size) != 0) {
403         write_data(s, block->host_addr + start + i * s->dump_info.page_size,
404                    size % s->dump_info.page_size, &local_err);
405         if (local_err) {
406             error_propagate(errp, local_err);
407             return;
408         }
409     }
410 }
411 
412 /* get the memory's offset and size in the vmcore */
413 static void get_offset_range(hwaddr phys_addr,
414                              ram_addr_t mapping_length,
415                              DumpState *s,
416                              hwaddr *p_offset,
417                              hwaddr *p_filesz)
418 {
419     GuestPhysBlock *block;
420     hwaddr offset = s->memory_offset;
421     int64_t size_in_block, start;
422 
423     /* When the memory is not stored into vmcore, offset will be -1 */
424     *p_offset = -1;
425     *p_filesz = 0;
426 
427     if (s->has_filter) {
428         if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
429             return;
430         }
431     }
432 
433     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
434         if (s->has_filter) {
435             if (block->target_start >= s->begin + s->length ||
436                 block->target_end <= s->begin) {
437                 /* This block is out of the range */
438                 continue;
439             }
440 
441             if (s->begin <= block->target_start) {
442                 start = block->target_start;
443             } else {
444                 start = s->begin;
445             }
446 
447             size_in_block = block->target_end - start;
448             if (s->begin + s->length < block->target_end) {
449                 size_in_block -= block->target_end - (s->begin + s->length);
450             }
451         } else {
452             start = block->target_start;
453             size_in_block = block->target_end - block->target_start;
454         }
455 
456         if (phys_addr >= start && phys_addr < start + size_in_block) {
457             *p_offset = phys_addr - start + offset;
458 
459             /* The offset range mapped from the vmcore file must not spill over
460              * the GuestPhysBlock, clamp it. The rest of the mapping will be
461              * zero-filled in memory at load time; see
462              * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
463              */
464             *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
465                         mapping_length :
466                         size_in_block - (phys_addr - start);
467             return;
468         }
469 
470         offset += size_in_block;
471     }
472 }
473 
474 static void write_elf_loads(DumpState *s, Error **errp)
475 {
476     hwaddr offset, filesz;
477     MemoryMapping *memory_mapping;
478     uint32_t phdr_index = 1;
479     uint32_t max_index;
480     Error *local_err = NULL;
481 
482     if (s->have_section) {
483         max_index = s->sh_info;
484     } else {
485         max_index = s->phdr_num;
486     }
487 
488     QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
489         get_offset_range(memory_mapping->phys_addr,
490                          memory_mapping->length,
491                          s, &offset, &filesz);
492         if (s->dump_info.d_class == ELFCLASS64) {
493             write_elf64_load(s, memory_mapping, phdr_index++, offset,
494                              filesz, &local_err);
495         } else {
496             write_elf32_load(s, memory_mapping, phdr_index++, offset,
497                              filesz, &local_err);
498         }
499 
500         if (local_err) {
501             error_propagate(errp, local_err);
502             return;
503         }
504 
505         if (phdr_index >= max_index) {
506             break;
507         }
508     }
509 }
510 
511 /* write elf header, PT_NOTE and elf note to vmcore. */
512 static void dump_begin(DumpState *s, Error **errp)
513 {
514     Error *local_err = NULL;
515 
516     /*
517      * the vmcore's format is:
518      *   --------------
519      *   |  elf header |
520      *   --------------
521      *   |  PT_NOTE    |
522      *   --------------
523      *   |  PT_LOAD    |
524      *   --------------
525      *   |  ......     |
526      *   --------------
527      *   |  PT_LOAD    |
528      *   --------------
529      *   |  sec_hdr    |
530      *   --------------
531      *   |  elf note   |
532      *   --------------
533      *   |  memory     |
534      *   --------------
535      *
536      * we only know where the memory is saved after we write elf note into
537      * vmcore.
538      */
539 
540     /* write elf header to vmcore */
541     if (s->dump_info.d_class == ELFCLASS64) {
542         write_elf64_header(s, &local_err);
543     } else {
544         write_elf32_header(s, &local_err);
545     }
546     if (local_err) {
547         error_propagate(errp, local_err);
548         return;
549     }
550 
551     if (s->dump_info.d_class == ELFCLASS64) {
552         /* write PT_NOTE to vmcore */
553         write_elf64_note(s, &local_err);
554         if (local_err) {
555             error_propagate(errp, local_err);
556             return;
557         }
558 
559         /* write all PT_LOAD to vmcore */
560         write_elf_loads(s, &local_err);
561         if (local_err) {
562             error_propagate(errp, local_err);
563             return;
564         }
565 
566         /* write section to vmcore */
567         if (s->have_section) {
568             write_elf_section(s, 1, &local_err);
569             if (local_err) {
570                 error_propagate(errp, local_err);
571                 return;
572             }
573         }
574 
575         /* write notes to vmcore */
576         write_elf64_notes(fd_write_vmcore, s, &local_err);
577         if (local_err) {
578             error_propagate(errp, local_err);
579             return;
580         }
581     } else {
582         /* write PT_NOTE to vmcore */
583         write_elf32_note(s, &local_err);
584         if (local_err) {
585             error_propagate(errp, local_err);
586             return;
587         }
588 
589         /* write all PT_LOAD to vmcore */
590         write_elf_loads(s, &local_err);
591         if (local_err) {
592             error_propagate(errp, local_err);
593             return;
594         }
595 
596         /* write section to vmcore */
597         if (s->have_section) {
598             write_elf_section(s, 0, &local_err);
599             if (local_err) {
600                 error_propagate(errp, local_err);
601                 return;
602             }
603         }
604 
605         /* write notes to vmcore */
606         write_elf32_notes(fd_write_vmcore, s, &local_err);
607         if (local_err) {
608             error_propagate(errp, local_err);
609             return;
610         }
611     }
612 }
613 
614 static int get_next_block(DumpState *s, GuestPhysBlock *block)
615 {
616     while (1) {
617         block = QTAILQ_NEXT(block, next);
618         if (!block) {
619             /* no more block */
620             return 1;
621         }
622 
623         s->start = 0;
624         s->next_block = block;
625         if (s->has_filter) {
626             if (block->target_start >= s->begin + s->length ||
627                 block->target_end <= s->begin) {
628                 /* This block is out of the range */
629                 continue;
630             }
631 
632             if (s->begin > block->target_start) {
633                 s->start = s->begin - block->target_start;
634             }
635         }
636 
637         return 0;
638     }
639 }
640 
641 /* write all memory to vmcore */
642 static void dump_iterate(DumpState *s, Error **errp)
643 {
644     GuestPhysBlock *block;
645     int64_t size;
646     Error *local_err = NULL;
647 
648     do {
649         block = s->next_block;
650 
651         size = block->target_end - block->target_start;
652         if (s->has_filter) {
653             size -= s->start;
654             if (s->begin + s->length < block->target_end) {
655                 size -= block->target_end - (s->begin + s->length);
656             }
657         }
658         write_memory(s, block, s->start, size, &local_err);
659         if (local_err) {
660             error_propagate(errp, local_err);
661             return;
662         }
663 
664     } while (!get_next_block(s, block));
665 }
666 
667 static void create_vmcore(DumpState *s, Error **errp)
668 {
669     Error *local_err = NULL;
670 
671     dump_begin(s, &local_err);
672     if (local_err) {
673         error_propagate(errp, local_err);
674         return;
675     }
676 
677     dump_iterate(s, errp);
678 }
679 
680 static int write_start_flat_header(int fd)
681 {
682     MakedumpfileHeader *mh;
683     int ret = 0;
684 
685     QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
686     mh = g_malloc0(MAX_SIZE_MDF_HEADER);
687 
688     memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
689            MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
690 
691     mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
692     mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
693 
694     size_t written_size;
695     written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
696     if (written_size != MAX_SIZE_MDF_HEADER) {
697         ret = -1;
698     }
699 
700     g_free(mh);
701     return ret;
702 }
703 
704 static int write_end_flat_header(int fd)
705 {
706     MakedumpfileDataHeader mdh;
707 
708     mdh.offset = END_FLAG_FLAT_HEADER;
709     mdh.buf_size = END_FLAG_FLAT_HEADER;
710 
711     size_t written_size;
712     written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
713     if (written_size != sizeof(mdh)) {
714         return -1;
715     }
716 
717     return 0;
718 }
719 
720 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
721 {
722     size_t written_size;
723     MakedumpfileDataHeader mdh;
724 
725     mdh.offset = cpu_to_be64(offset);
726     mdh.buf_size = cpu_to_be64(size);
727 
728     written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
729     if (written_size != sizeof(mdh)) {
730         return -1;
731     }
732 
733     written_size = qemu_write_full(fd, buf, size);
734     if (written_size != size) {
735         return -1;
736     }
737 
738     return 0;
739 }
740 
741 static int buf_write_note(const void *buf, size_t size, void *opaque)
742 {
743     DumpState *s = opaque;
744 
745     /* note_buf is not enough */
746     if (s->note_buf_offset + size > s->note_size) {
747         return -1;
748     }
749 
750     memcpy(s->note_buf + s->note_buf_offset, buf, size);
751 
752     s->note_buf_offset += size;
753 
754     return 0;
755 }
756 
757 /*
758  * This function retrieves various sizes from an elf header.
759  *
760  * @note has to be a valid ELF note. The return sizes are unmodified
761  * (not padded or rounded up to be multiple of 4).
762  */
763 static void get_note_sizes(DumpState *s, const void *note,
764                            uint64_t *note_head_size,
765                            uint64_t *name_size,
766                            uint64_t *desc_size)
767 {
768     uint64_t note_head_sz;
769     uint64_t name_sz;
770     uint64_t desc_sz;
771 
772     if (s->dump_info.d_class == ELFCLASS64) {
773         const Elf64_Nhdr *hdr = note;
774         note_head_sz = sizeof(Elf64_Nhdr);
775         name_sz = tswap64(hdr->n_namesz);
776         desc_sz = tswap64(hdr->n_descsz);
777     } else {
778         const Elf32_Nhdr *hdr = note;
779         note_head_sz = sizeof(Elf32_Nhdr);
780         name_sz = tswap32(hdr->n_namesz);
781         desc_sz = tswap32(hdr->n_descsz);
782     }
783 
784     if (note_head_size) {
785         *note_head_size = note_head_sz;
786     }
787     if (name_size) {
788         *name_size = name_sz;
789     }
790     if (desc_size) {
791         *desc_size = desc_sz;
792     }
793 }
794 
795 static bool note_name_equal(DumpState *s,
796                             const uint8_t *note, const char *name)
797 {
798     int len = strlen(name) + 1;
799     uint64_t head_size, name_size;
800 
801     get_note_sizes(s, note, &head_size, &name_size, NULL);
802     head_size = ROUND_UP(head_size, 4);
803 
804     return name_size == len && memcmp(note + head_size, name, len) == 0;
805 }
806 
807 /* write common header, sub header and elf note to vmcore */
808 static void create_header32(DumpState *s, Error **errp)
809 {
810     DiskDumpHeader32 *dh = NULL;
811     KdumpSubHeader32 *kh = NULL;
812     size_t size;
813     uint32_t block_size;
814     uint32_t sub_hdr_size;
815     uint32_t bitmap_blocks;
816     uint32_t status = 0;
817     uint64_t offset_note;
818     Error *local_err = NULL;
819 
820     /* write common header, the version of kdump-compressed format is 6th */
821     size = sizeof(DiskDumpHeader32);
822     dh = g_malloc0(size);
823 
824     memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
825     dh->header_version = cpu_to_dump32(s, 6);
826     block_size = s->dump_info.page_size;
827     dh->block_size = cpu_to_dump32(s, block_size);
828     sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
829     sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
830     dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
831     /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
832     dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
833     dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
834     bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
835     dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
836     strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
837 
838     if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
839         status |= DUMP_DH_COMPRESSED_ZLIB;
840     }
841 #ifdef CONFIG_LZO
842     if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
843         status |= DUMP_DH_COMPRESSED_LZO;
844     }
845 #endif
846 #ifdef CONFIG_SNAPPY
847     if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
848         status |= DUMP_DH_COMPRESSED_SNAPPY;
849     }
850 #endif
851     dh->status = cpu_to_dump32(s, status);
852 
853     if (write_buffer(s->fd, 0, dh, size) < 0) {
854         error_setg(errp, "dump: failed to write disk dump header");
855         goto out;
856     }
857 
858     /* write sub header */
859     size = sizeof(KdumpSubHeader32);
860     kh = g_malloc0(size);
861 
862     /* 64bit max_mapnr_64 */
863     kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
864     kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
865     kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
866 
867     offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
868     if (s->guest_note &&
869         note_name_equal(s, s->guest_note, "VMCOREINFO")) {
870         uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
871 
872         get_note_sizes(s, s->guest_note,
873                        &hsize, &name_size, &size_vmcoreinfo_desc);
874         offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
875             (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
876         kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
877         kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
878     }
879 
880     kh->offset_note = cpu_to_dump64(s, offset_note);
881     kh->note_size = cpu_to_dump32(s, s->note_size);
882 
883     if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
884                      block_size, kh, size) < 0) {
885         error_setg(errp, "dump: failed to write kdump sub header");
886         goto out;
887     }
888 
889     /* write note */
890     s->note_buf = g_malloc0(s->note_size);
891     s->note_buf_offset = 0;
892 
893     /* use s->note_buf to store notes temporarily */
894     write_elf32_notes(buf_write_note, s, &local_err);
895     if (local_err) {
896         error_propagate(errp, local_err);
897         goto out;
898     }
899     if (write_buffer(s->fd, offset_note, s->note_buf,
900                      s->note_size) < 0) {
901         error_setg(errp, "dump: failed to write notes");
902         goto out;
903     }
904 
905     /* get offset of dump_bitmap */
906     s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
907                              block_size;
908 
909     /* get offset of page */
910     s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
911                      block_size;
912 
913 out:
914     g_free(dh);
915     g_free(kh);
916     g_free(s->note_buf);
917 }
918 
919 /* write common header, sub header and elf note to vmcore */
920 static void create_header64(DumpState *s, Error **errp)
921 {
922     DiskDumpHeader64 *dh = NULL;
923     KdumpSubHeader64 *kh = NULL;
924     size_t size;
925     uint32_t block_size;
926     uint32_t sub_hdr_size;
927     uint32_t bitmap_blocks;
928     uint32_t status = 0;
929     uint64_t offset_note;
930     Error *local_err = NULL;
931 
932     /* write common header, the version of kdump-compressed format is 6th */
933     size = sizeof(DiskDumpHeader64);
934     dh = g_malloc0(size);
935 
936     memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
937     dh->header_version = cpu_to_dump32(s, 6);
938     block_size = s->dump_info.page_size;
939     dh->block_size = cpu_to_dump32(s, block_size);
940     sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
941     sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
942     dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
943     /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
944     dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
945     dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
946     bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
947     dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
948     strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
949 
950     if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
951         status |= DUMP_DH_COMPRESSED_ZLIB;
952     }
953 #ifdef CONFIG_LZO
954     if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
955         status |= DUMP_DH_COMPRESSED_LZO;
956     }
957 #endif
958 #ifdef CONFIG_SNAPPY
959     if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
960         status |= DUMP_DH_COMPRESSED_SNAPPY;
961     }
962 #endif
963     dh->status = cpu_to_dump32(s, status);
964 
965     if (write_buffer(s->fd, 0, dh, size) < 0) {
966         error_setg(errp, "dump: failed to write disk dump header");
967         goto out;
968     }
969 
970     /* write sub header */
971     size = sizeof(KdumpSubHeader64);
972     kh = g_malloc0(size);
973 
974     /* 64bit max_mapnr_64 */
975     kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
976     kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
977     kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
978 
979     offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
980     if (s->guest_note &&
981         note_name_equal(s, s->guest_note, "VMCOREINFO")) {
982         uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
983 
984         get_note_sizes(s, s->guest_note,
985                        &hsize, &name_size, &size_vmcoreinfo_desc);
986         offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
987             (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
988         kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
989         kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
990     }
991 
992     kh->offset_note = cpu_to_dump64(s, offset_note);
993     kh->note_size = cpu_to_dump64(s, s->note_size);
994 
995     if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
996                      block_size, kh, size) < 0) {
997         error_setg(errp, "dump: failed to write kdump sub header");
998         goto out;
999     }
1000 
1001     /* write note */
1002     s->note_buf = g_malloc0(s->note_size);
1003     s->note_buf_offset = 0;
1004 
1005     /* use s->note_buf to store notes temporarily */
1006     write_elf64_notes(buf_write_note, s, &local_err);
1007     if (local_err) {
1008         error_propagate(errp, local_err);
1009         goto out;
1010     }
1011 
1012     if (write_buffer(s->fd, offset_note, s->note_buf,
1013                      s->note_size) < 0) {
1014         error_setg(errp, "dump: failed to write notes");
1015         goto out;
1016     }
1017 
1018     /* get offset of dump_bitmap */
1019     s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
1020                              block_size;
1021 
1022     /* get offset of page */
1023     s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
1024                      block_size;
1025 
1026 out:
1027     g_free(dh);
1028     g_free(kh);
1029     g_free(s->note_buf);
1030 }
1031 
1032 static void write_dump_header(DumpState *s, Error **errp)
1033 {
1034     if (s->dump_info.d_class == ELFCLASS32) {
1035         create_header32(s, errp);
1036     } else {
1037         create_header64(s, errp);
1038     }
1039 }
1040 
1041 static size_t dump_bitmap_get_bufsize(DumpState *s)
1042 {
1043     return s->dump_info.page_size;
1044 }
1045 
1046 /*
1047  * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1048  * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1049  * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1050  * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1051  * vmcore, ie. synchronizing un-sync bit into vmcore.
1052  */
1053 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
1054                            uint8_t *buf, DumpState *s)
1055 {
1056     off_t old_offset, new_offset;
1057     off_t offset_bitmap1, offset_bitmap2;
1058     uint32_t byte, bit;
1059     size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1060     size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1061 
1062     /* should not set the previous place */
1063     assert(last_pfn <= pfn);
1064 
1065     /*
1066      * if the bit needed to be set is not cached in buf, flush the data in buf
1067      * to vmcore firstly.
1068      * making new_offset be bigger than old_offset can also sync remained data
1069      * into vmcore.
1070      */
1071     old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
1072     new_offset = bitmap_bufsize * (pfn / bits_per_buf);
1073 
1074     while (old_offset < new_offset) {
1075         /* calculate the offset and write dump_bitmap */
1076         offset_bitmap1 = s->offset_dump_bitmap + old_offset;
1077         if (write_buffer(s->fd, offset_bitmap1, buf,
1078                          bitmap_bufsize) < 0) {
1079             return -1;
1080         }
1081 
1082         /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1083         offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
1084                          old_offset;
1085         if (write_buffer(s->fd, offset_bitmap2, buf,
1086                          bitmap_bufsize) < 0) {
1087             return -1;
1088         }
1089 
1090         memset(buf, 0, bitmap_bufsize);
1091         old_offset += bitmap_bufsize;
1092     }
1093 
1094     /* get the exact place of the bit in the buf, and set it */
1095     byte = (pfn % bits_per_buf) / CHAR_BIT;
1096     bit = (pfn % bits_per_buf) % CHAR_BIT;
1097     if (value) {
1098         buf[byte] |= 1u << bit;
1099     } else {
1100         buf[byte] &= ~(1u << bit);
1101     }
1102 
1103     return 0;
1104 }
1105 
1106 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
1107 {
1108     int target_page_shift = ctz32(s->dump_info.page_size);
1109 
1110     return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
1111 }
1112 
1113 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
1114 {
1115     int target_page_shift = ctz32(s->dump_info.page_size);
1116 
1117     return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1118 }
1119 
1120 /*
1121  * exam every page and return the page frame number and the address of the page.
1122  * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1123  * blocks, so block->target_start and block->target_end should be interal
1124  * multiples of the target page size.
1125  */
1126 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1127                           uint8_t **bufptr, DumpState *s)
1128 {
1129     GuestPhysBlock *block = *blockptr;
1130     hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1131     uint8_t *buf;
1132 
1133     /* block == NULL means the start of the iteration */
1134     if (!block) {
1135         block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1136         *blockptr = block;
1137         assert((block->target_start & ~target_page_mask) == 0);
1138         assert((block->target_end & ~target_page_mask) == 0);
1139         *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1140         if (bufptr) {
1141             *bufptr = block->host_addr;
1142         }
1143         return true;
1144     }
1145 
1146     *pfnptr = *pfnptr + 1;
1147     addr = dump_pfn_to_paddr(s, *pfnptr);
1148 
1149     if ((addr >= block->target_start) &&
1150         (addr + s->dump_info.page_size <= block->target_end)) {
1151         buf = block->host_addr + (addr - block->target_start);
1152     } else {
1153         /* the next page is in the next block */
1154         block = QTAILQ_NEXT(block, next);
1155         *blockptr = block;
1156         if (!block) {
1157             return false;
1158         }
1159         assert((block->target_start & ~target_page_mask) == 0);
1160         assert((block->target_end & ~target_page_mask) == 0);
1161         *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1162         buf = block->host_addr;
1163     }
1164 
1165     if (bufptr) {
1166         *bufptr = buf;
1167     }
1168 
1169     return true;
1170 }
1171 
1172 static void write_dump_bitmap(DumpState *s, Error **errp)
1173 {
1174     int ret = 0;
1175     uint64_t last_pfn, pfn;
1176     void *dump_bitmap_buf;
1177     size_t num_dumpable;
1178     GuestPhysBlock *block_iter = NULL;
1179     size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1180     size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1181 
1182     /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1183     dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1184 
1185     num_dumpable = 0;
1186     last_pfn = 0;
1187 
1188     /*
1189      * exam memory page by page, and set the bit in dump_bitmap corresponded
1190      * to the existing page.
1191      */
1192     while (get_next_page(&block_iter, &pfn, NULL, s)) {
1193         ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1194         if (ret < 0) {
1195             error_setg(errp, "dump: failed to set dump_bitmap");
1196             goto out;
1197         }
1198 
1199         last_pfn = pfn;
1200         num_dumpable++;
1201     }
1202 
1203     /*
1204      * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1205      * set the remaining bits from last_pfn to the end of the bitmap buffer to
1206      * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1207      */
1208     if (num_dumpable > 0) {
1209         ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1210                               dump_bitmap_buf, s);
1211         if (ret < 0) {
1212             error_setg(errp, "dump: failed to sync dump_bitmap");
1213             goto out;
1214         }
1215     }
1216 
1217     /* number of dumpable pages that will be dumped later */
1218     s->num_dumpable = num_dumpable;
1219 
1220 out:
1221     g_free(dump_bitmap_buf);
1222 }
1223 
1224 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1225                                off_t offset)
1226 {
1227     data_cache->fd = s->fd;
1228     data_cache->data_size = 0;
1229     data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1230     data_cache->buf = g_malloc0(data_cache->buf_size);
1231     data_cache->offset = offset;
1232 }
1233 
1234 static int write_cache(DataCache *dc, const void *buf, size_t size,
1235                        bool flag_sync)
1236 {
1237     /*
1238      * dc->buf_size should not be less than size, otherwise dc will never be
1239      * enough
1240      */
1241     assert(size <= dc->buf_size);
1242 
1243     /*
1244      * if flag_sync is set, synchronize data in dc->buf into vmcore.
1245      * otherwise check if the space is enough for caching data in buf, if not,
1246      * write the data in dc->buf to dc->fd and reset dc->buf
1247      */
1248     if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1249         (flag_sync && dc->data_size > 0)) {
1250         if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1251             return -1;
1252         }
1253 
1254         dc->offset += dc->data_size;
1255         dc->data_size = 0;
1256     }
1257 
1258     if (!flag_sync) {
1259         memcpy(dc->buf + dc->data_size, buf, size);
1260         dc->data_size += size;
1261     }
1262 
1263     return 0;
1264 }
1265 
1266 static void free_data_cache(DataCache *data_cache)
1267 {
1268     g_free(data_cache->buf);
1269 }
1270 
1271 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1272 {
1273     switch (flag_compress) {
1274     case DUMP_DH_COMPRESSED_ZLIB:
1275         return compressBound(page_size);
1276 
1277     case DUMP_DH_COMPRESSED_LZO:
1278         /*
1279          * LZO will expand incompressible data by a little amount. Please check
1280          * the following URL to see the expansion calculation:
1281          * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1282          */
1283         return page_size + page_size / 16 + 64 + 3;
1284 
1285 #ifdef CONFIG_SNAPPY
1286     case DUMP_DH_COMPRESSED_SNAPPY:
1287         return snappy_max_compressed_length(page_size);
1288 #endif
1289     }
1290     return 0;
1291 }
1292 
1293 /*
1294  * check if the page is all 0
1295  */
1296 static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
1297 {
1298     return buffer_is_zero(buf, page_size);
1299 }
1300 
1301 static void write_dump_pages(DumpState *s, Error **errp)
1302 {
1303     int ret = 0;
1304     DataCache page_desc, page_data;
1305     size_t len_buf_out, size_out;
1306 #ifdef CONFIG_LZO
1307     lzo_bytep wrkmem = NULL;
1308 #endif
1309     uint8_t *buf_out = NULL;
1310     off_t offset_desc, offset_data;
1311     PageDescriptor pd, pd_zero;
1312     uint8_t *buf;
1313     GuestPhysBlock *block_iter = NULL;
1314     uint64_t pfn_iter;
1315 
1316     /* get offset of page_desc and page_data in dump file */
1317     offset_desc = s->offset_page;
1318     offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1319 
1320     prepare_data_cache(&page_desc, s, offset_desc);
1321     prepare_data_cache(&page_data, s, offset_data);
1322 
1323     /* prepare buffer to store compressed data */
1324     len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1325     assert(len_buf_out != 0);
1326 
1327 #ifdef CONFIG_LZO
1328     wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1329 #endif
1330 
1331     buf_out = g_malloc(len_buf_out);
1332 
1333     /*
1334      * init zero page's page_desc and page_data, because every zero page
1335      * uses the same page_data
1336      */
1337     pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1338     pd_zero.flags = cpu_to_dump32(s, 0);
1339     pd_zero.offset = cpu_to_dump64(s, offset_data);
1340     pd_zero.page_flags = cpu_to_dump64(s, 0);
1341     buf = g_malloc0(s->dump_info.page_size);
1342     ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1343     g_free(buf);
1344     if (ret < 0) {
1345         error_setg(errp, "dump: failed to write page data (zero page)");
1346         goto out;
1347     }
1348 
1349     offset_data += s->dump_info.page_size;
1350 
1351     /*
1352      * dump memory to vmcore page by page. zero page will all be resided in the
1353      * first page of page section
1354      */
1355     while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1356         /* check zero page */
1357         if (is_zero_page(buf, s->dump_info.page_size)) {
1358             ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1359                               false);
1360             if (ret < 0) {
1361                 error_setg(errp, "dump: failed to write page desc");
1362                 goto out;
1363             }
1364         } else {
1365             /*
1366              * not zero page, then:
1367              * 1. compress the page
1368              * 2. write the compressed page into the cache of page_data
1369              * 3. get page desc of the compressed page and write it into the
1370              *    cache of page_desc
1371              *
1372              * only one compression format will be used here, for
1373              * s->flag_compress is set. But when compression fails to work,
1374              * we fall back to save in plaintext.
1375              */
1376              size_out = len_buf_out;
1377              if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1378                     (compress2(buf_out, (uLongf *)&size_out, buf,
1379                                s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1380                     (size_out < s->dump_info.page_size)) {
1381                 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1382                 pd.size  = cpu_to_dump32(s, size_out);
1383 
1384                 ret = write_cache(&page_data, buf_out, size_out, false);
1385                 if (ret < 0) {
1386                     error_setg(errp, "dump: failed to write page data");
1387                     goto out;
1388                 }
1389 #ifdef CONFIG_LZO
1390             } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1391                     (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1392                     (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1393                     (size_out < s->dump_info.page_size)) {
1394                 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1395                 pd.size  = cpu_to_dump32(s, size_out);
1396 
1397                 ret = write_cache(&page_data, buf_out, size_out, false);
1398                 if (ret < 0) {
1399                     error_setg(errp, "dump: failed to write page data");
1400                     goto out;
1401                 }
1402 #endif
1403 #ifdef CONFIG_SNAPPY
1404             } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1405                     (snappy_compress((char *)buf, s->dump_info.page_size,
1406                     (char *)buf_out, &size_out) == SNAPPY_OK) &&
1407                     (size_out < s->dump_info.page_size)) {
1408                 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1409                 pd.size  = cpu_to_dump32(s, size_out);
1410 
1411                 ret = write_cache(&page_data, buf_out, size_out, false);
1412                 if (ret < 0) {
1413                     error_setg(errp, "dump: failed to write page data");
1414                     goto out;
1415                 }
1416 #endif
1417             } else {
1418                 /*
1419                  * fall back to save in plaintext, size_out should be
1420                  * assigned the target's page size
1421                  */
1422                 pd.flags = cpu_to_dump32(s, 0);
1423                 size_out = s->dump_info.page_size;
1424                 pd.size = cpu_to_dump32(s, size_out);
1425 
1426                 ret = write_cache(&page_data, buf,
1427                                   s->dump_info.page_size, false);
1428                 if (ret < 0) {
1429                     error_setg(errp, "dump: failed to write page data");
1430                     goto out;
1431                 }
1432             }
1433 
1434             /* get and write page desc here */
1435             pd.page_flags = cpu_to_dump64(s, 0);
1436             pd.offset = cpu_to_dump64(s, offset_data);
1437             offset_data += size_out;
1438 
1439             ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1440             if (ret < 0) {
1441                 error_setg(errp, "dump: failed to write page desc");
1442                 goto out;
1443             }
1444         }
1445         s->written_size += s->dump_info.page_size;
1446     }
1447 
1448     ret = write_cache(&page_desc, NULL, 0, true);
1449     if (ret < 0) {
1450         error_setg(errp, "dump: failed to sync cache for page_desc");
1451         goto out;
1452     }
1453     ret = write_cache(&page_data, NULL, 0, true);
1454     if (ret < 0) {
1455         error_setg(errp, "dump: failed to sync cache for page_data");
1456         goto out;
1457     }
1458 
1459 out:
1460     free_data_cache(&page_desc);
1461     free_data_cache(&page_data);
1462 
1463 #ifdef CONFIG_LZO
1464     g_free(wrkmem);
1465 #endif
1466 
1467     g_free(buf_out);
1468 }
1469 
1470 static void create_kdump_vmcore(DumpState *s, Error **errp)
1471 {
1472     int ret;
1473     Error *local_err = NULL;
1474 
1475     /*
1476      * the kdump-compressed format is:
1477      *                                               File offset
1478      *  +------------------------------------------+ 0x0
1479      *  |    main header (struct disk_dump_header) |
1480      *  |------------------------------------------+ block 1
1481      *  |    sub header (struct kdump_sub_header)  |
1482      *  |------------------------------------------+ block 2
1483      *  |            1st-dump_bitmap               |
1484      *  |------------------------------------------+ block 2 + X blocks
1485      *  |            2nd-dump_bitmap               | (aligned by block)
1486      *  |------------------------------------------+ block 2 + 2 * X blocks
1487      *  |  page desc for pfn 0 (struct page_desc)  | (aligned by block)
1488      *  |  page desc for pfn 1 (struct page_desc)  |
1489      *  |                    :                     |
1490      *  |------------------------------------------| (not aligned by block)
1491      *  |         page data (pfn 0)                |
1492      *  |         page data (pfn 1)                |
1493      *  |                    :                     |
1494      *  +------------------------------------------+
1495      */
1496 
1497     ret = write_start_flat_header(s->fd);
1498     if (ret < 0) {
1499         error_setg(errp, "dump: failed to write start flat header");
1500         return;
1501     }
1502 
1503     write_dump_header(s, &local_err);
1504     if (local_err) {
1505         error_propagate(errp, local_err);
1506         return;
1507     }
1508 
1509     write_dump_bitmap(s, &local_err);
1510     if (local_err) {
1511         error_propagate(errp, local_err);
1512         return;
1513     }
1514 
1515     write_dump_pages(s, &local_err);
1516     if (local_err) {
1517         error_propagate(errp, local_err);
1518         return;
1519     }
1520 
1521     ret = write_end_flat_header(s->fd);
1522     if (ret < 0) {
1523         error_setg(errp, "dump: failed to write end flat header");
1524         return;
1525     }
1526 }
1527 
1528 static ram_addr_t get_start_block(DumpState *s)
1529 {
1530     GuestPhysBlock *block;
1531 
1532     if (!s->has_filter) {
1533         s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1534         return 0;
1535     }
1536 
1537     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1538         if (block->target_start >= s->begin + s->length ||
1539             block->target_end <= s->begin) {
1540             /* This block is out of the range */
1541             continue;
1542         }
1543 
1544         s->next_block = block;
1545         if (s->begin > block->target_start) {
1546             s->start = s->begin - block->target_start;
1547         } else {
1548             s->start = 0;
1549         }
1550         return s->start;
1551     }
1552 
1553     return -1;
1554 }
1555 
1556 static void get_max_mapnr(DumpState *s)
1557 {
1558     GuestPhysBlock *last_block;
1559 
1560     last_block = QTAILQ_LAST(&s->guest_phys_blocks.head);
1561     s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1562 }
1563 
1564 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1565 
1566 static void dump_state_prepare(DumpState *s)
1567 {
1568     /* zero the struct, setting status to active */
1569     *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1570 }
1571 
1572 bool dump_in_progress(void)
1573 {
1574     DumpState *state = &dump_state_global;
1575     return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1576 }
1577 
1578 /* calculate total size of memory to be dumped (taking filter into
1579  * acoount.) */
1580 static int64_t dump_calculate_size(DumpState *s)
1581 {
1582     GuestPhysBlock *block;
1583     int64_t size = 0, total = 0, left = 0, right = 0;
1584 
1585     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1586         if (s->has_filter) {
1587             /* calculate the overlapped region. */
1588             left = MAX(s->begin, block->target_start);
1589             right = MIN(s->begin + s->length, block->target_end);
1590             size = right - left;
1591             size = size > 0 ? size : 0;
1592         } else {
1593             /* count the whole region in */
1594             size = (block->target_end - block->target_start);
1595         }
1596         total += size;
1597     }
1598 
1599     return total;
1600 }
1601 
1602 static void vmcoreinfo_update_phys_base(DumpState *s)
1603 {
1604     uint64_t size, note_head_size, name_size, phys_base;
1605     char **lines;
1606     uint8_t *vmci;
1607     size_t i;
1608 
1609     if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
1610         return;
1611     }
1612 
1613     get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
1614     note_head_size = ROUND_UP(note_head_size, 4);
1615 
1616     vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
1617     *(vmci + size) = '\0';
1618 
1619     lines = g_strsplit((char *)vmci, "\n", -1);
1620     for (i = 0; lines[i]; i++) {
1621         const char *prefix = NULL;
1622 
1623         if (s->dump_info.d_machine == EM_X86_64) {
1624             prefix = "NUMBER(phys_base)=";
1625         } else if (s->dump_info.d_machine == EM_AARCH64) {
1626             prefix = "NUMBER(PHYS_OFFSET)=";
1627         }
1628 
1629         if (prefix && g_str_has_prefix(lines[i], prefix)) {
1630             if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16,
1631                               &phys_base) < 0) {
1632                 warn_report("Failed to read %s", prefix);
1633             } else {
1634                 s->dump_info.phys_base = phys_base;
1635             }
1636             break;
1637         }
1638     }
1639 
1640     g_strfreev(lines);
1641 }
1642 
1643 static void dump_init(DumpState *s, int fd, bool has_format,
1644                       DumpGuestMemoryFormat format, bool paging, bool has_filter,
1645                       int64_t begin, int64_t length, Error **errp)
1646 {
1647     VMCoreInfoState *vmci = vmcoreinfo_find();
1648     CPUState *cpu;
1649     int nr_cpus;
1650     Error *err = NULL;
1651     int ret;
1652 
1653     s->has_format = has_format;
1654     s->format = format;
1655     s->written_size = 0;
1656 
1657     /* kdump-compressed is conflict with paging and filter */
1658     if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1659         assert(!paging && !has_filter);
1660     }
1661 
1662     if (runstate_is_running()) {
1663         vm_stop(RUN_STATE_SAVE_VM);
1664         s->resume = true;
1665     } else {
1666         s->resume = false;
1667     }
1668 
1669     /* If we use KVM, we should synchronize the registers before we get dump
1670      * info or physmap info.
1671      */
1672     cpu_synchronize_all_states();
1673     nr_cpus = 0;
1674     CPU_FOREACH(cpu) {
1675         nr_cpus++;
1676     }
1677 
1678     s->fd = fd;
1679     s->has_filter = has_filter;
1680     s->begin = begin;
1681     s->length = length;
1682 
1683     memory_mapping_list_init(&s->list);
1684 
1685     guest_phys_blocks_init(&s->guest_phys_blocks);
1686     guest_phys_blocks_append(&s->guest_phys_blocks);
1687     s->total_size = dump_calculate_size(s);
1688 #ifdef DEBUG_DUMP_GUEST_MEMORY
1689     fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1690 #endif
1691 
1692     /* it does not make sense to dump non-existent memory */
1693     if (!s->total_size) {
1694         error_setg(errp, "dump: no guest memory to dump");
1695         goto cleanup;
1696     }
1697 
1698     s->start = get_start_block(s);
1699     if (s->start == -1) {
1700         error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1701         goto cleanup;
1702     }
1703 
1704     /* get dump info: endian, class and architecture.
1705      * If the target architecture is not supported, cpu_get_dump_info() will
1706      * return -1.
1707      */
1708     ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1709     if (ret < 0) {
1710         error_setg(errp, QERR_UNSUPPORTED);
1711         goto cleanup;
1712     }
1713 
1714     if (!s->dump_info.page_size) {
1715         s->dump_info.page_size = TARGET_PAGE_SIZE;
1716     }
1717 
1718     s->note_size = cpu_get_note_size(s->dump_info.d_class,
1719                                      s->dump_info.d_machine, nr_cpus);
1720     if (s->note_size < 0) {
1721         error_setg(errp, QERR_UNSUPPORTED);
1722         goto cleanup;
1723     }
1724 
1725     /*
1726      * The goal of this block is to (a) update the previously guessed
1727      * phys_base, (b) copy the guest note out of the guest.
1728      * Failure to do so is not fatal for dumping.
1729      */
1730     if (vmci) {
1731         uint64_t addr, note_head_size, name_size, desc_size;
1732         uint32_t size;
1733         uint16_t format;
1734 
1735         note_head_size = s->dump_info.d_class == ELFCLASS32 ?
1736             sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr);
1737 
1738         format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
1739         size = le32_to_cpu(vmci->vmcoreinfo.size);
1740         addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
1741         if (!vmci->has_vmcoreinfo) {
1742             warn_report("guest note is not present");
1743         } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
1744             warn_report("guest note size is invalid: %" PRIu32, size);
1745         } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) {
1746             warn_report("guest note format is unsupported: %" PRIu16, format);
1747         } else {
1748             s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
1749             cpu_physical_memory_read(addr, s->guest_note, size);
1750 
1751             get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
1752             s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
1753                                                desc_size);
1754             if (name_size > MAX_GUEST_NOTE_SIZE ||
1755                 desc_size > MAX_GUEST_NOTE_SIZE ||
1756                 s->guest_note_size > size) {
1757                 warn_report("Invalid guest note header");
1758                 g_free(s->guest_note);
1759                 s->guest_note = NULL;
1760             } else {
1761                 vmcoreinfo_update_phys_base(s);
1762                 s->note_size += s->guest_note_size;
1763             }
1764         }
1765     }
1766 
1767     /* get memory mapping */
1768     if (paging) {
1769         qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1770         if (err != NULL) {
1771             error_propagate(errp, err);
1772             goto cleanup;
1773         }
1774     } else {
1775         qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1776     }
1777 
1778     s->nr_cpus = nr_cpus;
1779 
1780     get_max_mapnr(s);
1781 
1782     uint64_t tmp;
1783     tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1784                        s->dump_info.page_size);
1785     s->len_dump_bitmap = tmp * s->dump_info.page_size;
1786 
1787     /* init for kdump-compressed format */
1788     if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1789         switch (format) {
1790         case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1791             s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1792             break;
1793 
1794         case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1795 #ifdef CONFIG_LZO
1796             if (lzo_init() != LZO_E_OK) {
1797                 error_setg(errp, "failed to initialize the LZO library");
1798                 goto cleanup;
1799             }
1800 #endif
1801             s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1802             break;
1803 
1804         case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1805             s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1806             break;
1807 
1808         default:
1809             s->flag_compress = 0;
1810         }
1811 
1812         return;
1813     }
1814 
1815     if (s->has_filter) {
1816         memory_mapping_filter(&s->list, s->begin, s->length);
1817     }
1818 
1819     /*
1820      * calculate phdr_num
1821      *
1822      * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1823      */
1824     s->phdr_num = 1; /* PT_NOTE */
1825     if (s->list.num < UINT16_MAX - 2) {
1826         s->phdr_num += s->list.num;
1827         s->have_section = false;
1828     } else {
1829         s->have_section = true;
1830         s->phdr_num = PN_XNUM;
1831         s->sh_info = 1; /* PT_NOTE */
1832 
1833         /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1834         if (s->list.num <= UINT32_MAX - 1) {
1835             s->sh_info += s->list.num;
1836         } else {
1837             s->sh_info = UINT32_MAX;
1838         }
1839     }
1840 
1841     if (s->dump_info.d_class == ELFCLASS64) {
1842         if (s->have_section) {
1843             s->memory_offset = sizeof(Elf64_Ehdr) +
1844                                sizeof(Elf64_Phdr) * s->sh_info +
1845                                sizeof(Elf64_Shdr) + s->note_size;
1846         } else {
1847             s->memory_offset = sizeof(Elf64_Ehdr) +
1848                                sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1849         }
1850     } else {
1851         if (s->have_section) {
1852             s->memory_offset = sizeof(Elf32_Ehdr) +
1853                                sizeof(Elf32_Phdr) * s->sh_info +
1854                                sizeof(Elf32_Shdr) + s->note_size;
1855         } else {
1856             s->memory_offset = sizeof(Elf32_Ehdr) +
1857                                sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1858         }
1859     }
1860 
1861     return;
1862 
1863 cleanup:
1864     dump_cleanup(s);
1865 }
1866 
1867 /* this operation might be time consuming. */
1868 static void dump_process(DumpState *s, Error **errp)
1869 {
1870     Error *local_err = NULL;
1871     DumpQueryResult *result = NULL;
1872 
1873     if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1874 #ifdef TARGET_X86_64
1875         create_win_dump(s, &local_err);
1876 #endif
1877     } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1878         create_kdump_vmcore(s, &local_err);
1879     } else {
1880         create_vmcore(s, &local_err);
1881     }
1882 
1883     /* make sure status is written after written_size updates */
1884     smp_wmb();
1885     atomic_set(&s->status,
1886                (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1887 
1888     /* send DUMP_COMPLETED message (unconditionally) */
1889     result = qmp_query_dump(NULL);
1890     /* should never fail */
1891     assert(result);
1892     qapi_event_send_dump_completed(result, !!local_err, (local_err ?
1893                                    error_get_pretty(local_err) : NULL));
1894     qapi_free_DumpQueryResult(result);
1895 
1896     error_propagate(errp, local_err);
1897     dump_cleanup(s);
1898 }
1899 
1900 static void *dump_thread(void *data)
1901 {
1902     DumpState *s = (DumpState *)data;
1903     dump_process(s, NULL);
1904     return NULL;
1905 }
1906 
1907 DumpQueryResult *qmp_query_dump(Error **errp)
1908 {
1909     DumpQueryResult *result = g_new(DumpQueryResult, 1);
1910     DumpState *state = &dump_state_global;
1911     result->status = atomic_read(&state->status);
1912     /* make sure we are reading status and written_size in order */
1913     smp_rmb();
1914     result->completed = state->written_size;
1915     result->total = state->total_size;
1916     return result;
1917 }
1918 
1919 void qmp_dump_guest_memory(bool paging, const char *file,
1920                            bool has_detach, bool detach,
1921                            bool has_begin, int64_t begin, bool has_length,
1922                            int64_t length, bool has_format,
1923                            DumpGuestMemoryFormat format, Error **errp)
1924 {
1925     const char *p;
1926     int fd = -1;
1927     DumpState *s;
1928     Error *local_err = NULL;
1929     bool detach_p = false;
1930 
1931     if (runstate_check(RUN_STATE_INMIGRATE)) {
1932         error_setg(errp, "Dump not allowed during incoming migration.");
1933         return;
1934     }
1935 
1936     /* if there is a dump in background, we should wait until the dump
1937      * finished */
1938     if (dump_in_progress()) {
1939         error_setg(errp, "There is a dump in process, please wait.");
1940         return;
1941     }
1942 
1943     /*
1944      * kdump-compressed format need the whole memory dumped, so paging or
1945      * filter is not supported here.
1946      */
1947     if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1948         (paging || has_begin || has_length)) {
1949         error_setg(errp, "kdump-compressed format doesn't support paging or "
1950                          "filter");
1951         return;
1952     }
1953     if (has_begin && !has_length) {
1954         error_setg(errp, QERR_MISSING_PARAMETER, "length");
1955         return;
1956     }
1957     if (!has_begin && has_length) {
1958         error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1959         return;
1960     }
1961     if (has_detach) {
1962         detach_p = detach;
1963     }
1964 
1965     /* check whether lzo/snappy is supported */
1966 #ifndef CONFIG_LZO
1967     if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1968         error_setg(errp, "kdump-lzo is not available now");
1969         return;
1970     }
1971 #endif
1972 
1973 #ifndef CONFIG_SNAPPY
1974     if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1975         error_setg(errp, "kdump-snappy is not available now");
1976         return;
1977     }
1978 #endif
1979 
1980 #ifndef TARGET_X86_64
1981     if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1982         error_setg(errp, "Windows dump is only available for x86-64");
1983         return;
1984     }
1985 #endif
1986 
1987 #if !defined(WIN32)
1988     if (strstart(file, "fd:", &p)) {
1989         fd = monitor_get_fd(cur_mon, p, errp);
1990         if (fd == -1) {
1991             return;
1992         }
1993     }
1994 #endif
1995 
1996     if  (strstart(file, "file:", &p)) {
1997         fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1998         if (fd < 0) {
1999             error_setg_file_open(errp, errno, p);
2000             return;
2001         }
2002     }
2003 
2004     if (fd == -1) {
2005         error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
2006         return;
2007     }
2008 
2009     s = &dump_state_global;
2010     dump_state_prepare(s);
2011 
2012     dump_init(s, fd, has_format, format, paging, has_begin,
2013               begin, length, &local_err);
2014     if (local_err) {
2015         error_propagate(errp, local_err);
2016         atomic_set(&s->status, DUMP_STATUS_FAILED);
2017         return;
2018     }
2019 
2020     if (detach_p) {
2021         /* detached dump */
2022         s->detached = true;
2023         qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
2024                            s, QEMU_THREAD_DETACHED);
2025     } else {
2026         /* sync dump */
2027         dump_process(s, errp);
2028     }
2029 }
2030 
2031 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
2032 {
2033     DumpGuestMemoryFormatList *item;
2034     DumpGuestMemoryCapability *cap =
2035                                   g_malloc0(sizeof(DumpGuestMemoryCapability));
2036 
2037     /* elf is always available */
2038     item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2039     cap->formats = item;
2040     item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
2041 
2042     /* kdump-zlib is always available */
2043     item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2044     item = item->next;
2045     item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
2046 
2047     /* add new item if kdump-lzo is available */
2048 #ifdef CONFIG_LZO
2049     item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2050     item = item->next;
2051     item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
2052 #endif
2053 
2054     /* add new item if kdump-snappy is available */
2055 #ifdef CONFIG_SNAPPY
2056     item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2057     item = item->next;
2058     item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
2059 #endif
2060 
2061     /* Windows dump is available only if target is x86_64 */
2062 #ifdef TARGET_X86_64
2063     item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
2064     item = item->next;
2065     item->value = DUMP_GUEST_MEMORY_FORMAT_WIN_DMP;
2066 #endif
2067 
2068     return cap;
2069 }
2070