xref: /qemu/dump/dump.c (revision ca61e750)
1 /*
2  * QEMU dump
3  *
4  * Copyright Fujitsu, Corp. 2011, 2012
5  *
6  * Authors:
7  *     Wen Congyang <wency@cn.fujitsu.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "elf.h"
17 #include "exec/hwaddr.h"
18 #include "monitor/monitor.h"
19 #include "sysemu/kvm.h"
20 #include "sysemu/dump.h"
21 #include "sysemu/memory_mapping.h"
22 #include "sysemu/runstate.h"
23 #include "sysemu/cpus.h"
24 #include "qapi/error.h"
25 #include "qapi/qapi-commands-dump.h"
26 #include "qapi/qapi-events-dump.h"
27 #include "qapi/qmp/qerror.h"
28 #include "qemu/error-report.h"
29 #include "qemu/main-loop.h"
30 #include "hw/misc/vmcoreinfo.h"
31 #include "migration/blocker.h"
32 
33 #ifdef TARGET_X86_64
34 #include "win_dump.h"
35 #endif
36 
37 #include <zlib.h>
38 #ifdef CONFIG_LZO
39 #include <lzo/lzo1x.h>
40 #endif
41 #ifdef CONFIG_SNAPPY
42 #include <snappy-c.h>
43 #endif
44 #ifndef ELF_MACHINE_UNAME
45 #define ELF_MACHINE_UNAME "Unknown"
46 #endif
47 
48 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
49 
50 static Error *dump_migration_blocker;
51 
52 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size)   \
53     ((DIV_ROUND_UP((hdr_size), 4) +                     \
54       DIV_ROUND_UP((name_size), 4) +                    \
55       DIV_ROUND_UP((desc_size), 4)) * 4)
56 
57 static inline bool dump_is_64bit(DumpState *s)
58 {
59     return s->dump_info.d_class == ELFCLASS64;
60 }
61 
62 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
63 {
64     if (s->dump_info.d_endian == ELFDATA2LSB) {
65         val = cpu_to_le16(val);
66     } else {
67         val = cpu_to_be16(val);
68     }
69 
70     return val;
71 }
72 
73 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
74 {
75     if (s->dump_info.d_endian == ELFDATA2LSB) {
76         val = cpu_to_le32(val);
77     } else {
78         val = cpu_to_be32(val);
79     }
80 
81     return val;
82 }
83 
84 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
85 {
86     if (s->dump_info.d_endian == ELFDATA2LSB) {
87         val = cpu_to_le64(val);
88     } else {
89         val = cpu_to_be64(val);
90     }
91 
92     return val;
93 }
94 
95 static int dump_cleanup(DumpState *s)
96 {
97     guest_phys_blocks_free(&s->guest_phys_blocks);
98     memory_mapping_list_free(&s->list);
99     close(s->fd);
100     g_free(s->guest_note);
101     s->guest_note = NULL;
102     if (s->resume) {
103         if (s->detached) {
104             qemu_mutex_lock_iothread();
105         }
106         vm_start();
107         if (s->detached) {
108             qemu_mutex_unlock_iothread();
109         }
110     }
111     migrate_del_blocker(dump_migration_blocker);
112 
113     return 0;
114 }
115 
116 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
117 {
118     DumpState *s = opaque;
119     size_t written_size;
120 
121     written_size = qemu_write_full(s->fd, buf, size);
122     if (written_size != size) {
123         return -errno;
124     }
125 
126     return 0;
127 }
128 
129 static void write_elf64_header(DumpState *s, Error **errp)
130 {
131     /*
132      * phnum in the elf header is 16 bit, if we have more segments we
133      * set phnum to PN_XNUM and write the real number of segments to a
134      * special section.
135      */
136     uint16_t phnum = MIN(s->phdr_num, PN_XNUM);
137     Elf64_Ehdr elf_header;
138     int ret;
139 
140     memset(&elf_header, 0, sizeof(Elf64_Ehdr));
141     memcpy(&elf_header, ELFMAG, SELFMAG);
142     elf_header.e_ident[EI_CLASS] = ELFCLASS64;
143     elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
144     elf_header.e_ident[EI_VERSION] = EV_CURRENT;
145     elf_header.e_type = cpu_to_dump16(s, ET_CORE);
146     elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
147     elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
148     elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
149     elf_header.e_phoff = cpu_to_dump64(s, s->phdr_offset);
150     elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
151     elf_header.e_phnum = cpu_to_dump16(s, phnum);
152     if (s->shdr_num) {
153         elf_header.e_shoff = cpu_to_dump64(s, s->shdr_offset);
154         elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
155         elf_header.e_shnum = cpu_to_dump16(s, s->shdr_num);
156     }
157 
158     ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
159     if (ret < 0) {
160         error_setg_errno(errp, -ret, "dump: failed to write elf header");
161     }
162 }
163 
164 static void write_elf32_header(DumpState *s, Error **errp)
165 {
166     /*
167      * phnum in the elf header is 16 bit, if we have more segments we
168      * set phnum to PN_XNUM and write the real number of segments to a
169      * special section.
170      */
171     uint16_t phnum = MIN(s->phdr_num, PN_XNUM);
172     Elf32_Ehdr elf_header;
173     int ret;
174 
175     memset(&elf_header, 0, sizeof(Elf32_Ehdr));
176     memcpy(&elf_header, ELFMAG, SELFMAG);
177     elf_header.e_ident[EI_CLASS] = ELFCLASS32;
178     elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
179     elf_header.e_ident[EI_VERSION] = EV_CURRENT;
180     elf_header.e_type = cpu_to_dump16(s, ET_CORE);
181     elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
182     elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
183     elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
184     elf_header.e_phoff = cpu_to_dump32(s, s->phdr_offset);
185     elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
186     elf_header.e_phnum = cpu_to_dump16(s, phnum);
187     if (s->shdr_num) {
188         elf_header.e_shoff = cpu_to_dump32(s, s->shdr_offset);
189         elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
190         elf_header.e_shnum = cpu_to_dump16(s, s->shdr_num);
191     }
192 
193     ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
194     if (ret < 0) {
195         error_setg_errno(errp, -ret, "dump: failed to write elf header");
196     }
197 }
198 
199 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
200                              int phdr_index, hwaddr offset,
201                              hwaddr filesz, Error **errp)
202 {
203     Elf64_Phdr phdr;
204     int ret;
205 
206     memset(&phdr, 0, sizeof(Elf64_Phdr));
207     phdr.p_type = cpu_to_dump32(s, PT_LOAD);
208     phdr.p_offset = cpu_to_dump64(s, offset);
209     phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
210     phdr.p_filesz = cpu_to_dump64(s, filesz);
211     phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
212     phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
213 
214     assert(memory_mapping->length >= filesz);
215 
216     ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
217     if (ret < 0) {
218         error_setg_errno(errp, -ret,
219                          "dump: failed to write program header table");
220     }
221 }
222 
223 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
224                              int phdr_index, hwaddr offset,
225                              hwaddr filesz, Error **errp)
226 {
227     Elf32_Phdr phdr;
228     int ret;
229 
230     memset(&phdr, 0, sizeof(Elf32_Phdr));
231     phdr.p_type = cpu_to_dump32(s, PT_LOAD);
232     phdr.p_offset = cpu_to_dump32(s, offset);
233     phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
234     phdr.p_filesz = cpu_to_dump32(s, filesz);
235     phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
236     phdr.p_vaddr =
237         cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
238 
239     assert(memory_mapping->length >= filesz);
240 
241     ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
242     if (ret < 0) {
243         error_setg_errno(errp, -ret,
244                          "dump: failed to write program header table");
245     }
246 }
247 
248 static void write_elf64_phdr_note(DumpState *s, Elf64_Phdr *phdr)
249 {
250     memset(phdr, 0, sizeof(*phdr));
251     phdr->p_type = cpu_to_dump32(s, PT_NOTE);
252     phdr->p_offset = cpu_to_dump64(s, s->note_offset);
253     phdr->p_paddr = 0;
254     phdr->p_filesz = cpu_to_dump64(s, s->note_size);
255     phdr->p_memsz = cpu_to_dump64(s, s->note_size);
256     phdr->p_vaddr = 0;
257 }
258 
259 static inline int cpu_index(CPUState *cpu)
260 {
261     return cpu->cpu_index + 1;
262 }
263 
264 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
265                              Error **errp)
266 {
267     int ret;
268 
269     if (s->guest_note) {
270         ret = f(s->guest_note, s->guest_note_size, s);
271         if (ret < 0) {
272             error_setg(errp, "dump: failed to write guest note");
273         }
274     }
275 }
276 
277 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
278                               Error **errp)
279 {
280     CPUState *cpu;
281     int ret;
282     int id;
283 
284     CPU_FOREACH(cpu) {
285         id = cpu_index(cpu);
286         ret = cpu_write_elf64_note(f, cpu, id, s);
287         if (ret < 0) {
288             error_setg(errp, "dump: failed to write elf notes");
289             return;
290         }
291     }
292 
293     CPU_FOREACH(cpu) {
294         ret = cpu_write_elf64_qemunote(f, cpu, s);
295         if (ret < 0) {
296             error_setg(errp, "dump: failed to write CPU status");
297             return;
298         }
299     }
300 
301     write_guest_note(f, s, errp);
302 }
303 
304 static void write_elf32_phdr_note(DumpState *s, Elf32_Phdr *phdr)
305 {
306     memset(phdr, 0, sizeof(*phdr));
307     phdr->p_type = cpu_to_dump32(s, PT_NOTE);
308     phdr->p_offset = cpu_to_dump32(s, s->note_offset);
309     phdr->p_paddr = 0;
310     phdr->p_filesz = cpu_to_dump32(s, s->note_size);
311     phdr->p_memsz = cpu_to_dump32(s, s->note_size);
312     phdr->p_vaddr = 0;
313 }
314 
315 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
316                               Error **errp)
317 {
318     CPUState *cpu;
319     int ret;
320     int id;
321 
322     CPU_FOREACH(cpu) {
323         id = cpu_index(cpu);
324         ret = cpu_write_elf32_note(f, cpu, id, s);
325         if (ret < 0) {
326             error_setg(errp, "dump: failed to write elf notes");
327             return;
328         }
329     }
330 
331     CPU_FOREACH(cpu) {
332         ret = cpu_write_elf32_qemunote(f, cpu, s);
333         if (ret < 0) {
334             error_setg(errp, "dump: failed to write CPU status");
335             return;
336         }
337     }
338 
339     write_guest_note(f, s, errp);
340 }
341 
342 static void write_elf_phdr_note(DumpState *s, Error **errp)
343 {
344     ERRP_GUARD();
345     Elf32_Phdr phdr32;
346     Elf64_Phdr phdr64;
347     void *phdr;
348     size_t size;
349     int ret;
350 
351     if (dump_is_64bit(s)) {
352         write_elf64_phdr_note(s, &phdr64);
353         size = sizeof(phdr64);
354         phdr = &phdr64;
355     } else {
356         write_elf32_phdr_note(s, &phdr32);
357         size = sizeof(phdr32);
358         phdr = &phdr32;
359     }
360 
361     ret = fd_write_vmcore(phdr, size, s);
362     if (ret < 0) {
363         error_setg_errno(errp, -ret,
364                          "dump: failed to write program header table");
365     }
366 }
367 
368 static void write_elf_section(DumpState *s, int type, Error **errp)
369 {
370     Elf32_Shdr shdr32;
371     Elf64_Shdr shdr64;
372     int shdr_size;
373     void *shdr;
374     int ret;
375 
376     if (type == 0) {
377         shdr_size = sizeof(Elf32_Shdr);
378         memset(&shdr32, 0, shdr_size);
379         shdr32.sh_info = cpu_to_dump32(s, s->phdr_num);
380         shdr = &shdr32;
381     } else {
382         shdr_size = sizeof(Elf64_Shdr);
383         memset(&shdr64, 0, shdr_size);
384         shdr64.sh_info = cpu_to_dump32(s, s->phdr_num);
385         shdr = &shdr64;
386     }
387 
388     ret = fd_write_vmcore(shdr, shdr_size, s);
389     if (ret < 0) {
390         error_setg_errno(errp, -ret,
391                          "dump: failed to write section header table");
392     }
393 }
394 
395 static void write_data(DumpState *s, void *buf, int length, Error **errp)
396 {
397     int ret;
398 
399     ret = fd_write_vmcore(buf, length, s);
400     if (ret < 0) {
401         error_setg_errno(errp, -ret, "dump: failed to save memory");
402     } else {
403         s->written_size += length;
404     }
405 }
406 
407 /* write the memory to vmcore. 1 page per I/O. */
408 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
409                          int64_t size, Error **errp)
410 {
411     ERRP_GUARD();
412     int64_t i;
413 
414     for (i = 0; i < size / s->dump_info.page_size; i++) {
415         write_data(s, block->host_addr + start + i * s->dump_info.page_size,
416                    s->dump_info.page_size, errp);
417         if (*errp) {
418             return;
419         }
420     }
421 
422     if ((size % s->dump_info.page_size) != 0) {
423         write_data(s, block->host_addr + start + i * s->dump_info.page_size,
424                    size % s->dump_info.page_size, errp);
425         if (*errp) {
426             return;
427         }
428     }
429 }
430 
431 /* get the memory's offset and size in the vmcore */
432 static void get_offset_range(hwaddr phys_addr,
433                              ram_addr_t mapping_length,
434                              DumpState *s,
435                              hwaddr *p_offset,
436                              hwaddr *p_filesz)
437 {
438     GuestPhysBlock *block;
439     hwaddr offset = s->memory_offset;
440     int64_t size_in_block, start;
441 
442     /* When the memory is not stored into vmcore, offset will be -1 */
443     *p_offset = -1;
444     *p_filesz = 0;
445 
446     if (s->has_filter) {
447         if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
448             return;
449         }
450     }
451 
452     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
453         if (s->has_filter) {
454             if (block->target_start >= s->begin + s->length ||
455                 block->target_end <= s->begin) {
456                 /* This block is out of the range */
457                 continue;
458             }
459 
460             if (s->begin <= block->target_start) {
461                 start = block->target_start;
462             } else {
463                 start = s->begin;
464             }
465 
466             size_in_block = block->target_end - start;
467             if (s->begin + s->length < block->target_end) {
468                 size_in_block -= block->target_end - (s->begin + s->length);
469             }
470         } else {
471             start = block->target_start;
472             size_in_block = block->target_end - block->target_start;
473         }
474 
475         if (phys_addr >= start && phys_addr < start + size_in_block) {
476             *p_offset = phys_addr - start + offset;
477 
478             /* The offset range mapped from the vmcore file must not spill over
479              * the GuestPhysBlock, clamp it. The rest of the mapping will be
480              * zero-filled in memory at load time; see
481              * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
482              */
483             *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
484                         mapping_length :
485                         size_in_block - (phys_addr - start);
486             return;
487         }
488 
489         offset += size_in_block;
490     }
491 }
492 
493 static void write_elf_loads(DumpState *s, Error **errp)
494 {
495     ERRP_GUARD();
496     hwaddr offset, filesz;
497     MemoryMapping *memory_mapping;
498     uint32_t phdr_index = 1;
499 
500     QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
501         get_offset_range(memory_mapping->phys_addr,
502                          memory_mapping->length,
503                          s, &offset, &filesz);
504         if (dump_is_64bit(s)) {
505             write_elf64_load(s, memory_mapping, phdr_index++, offset,
506                              filesz, errp);
507         } else {
508             write_elf32_load(s, memory_mapping, phdr_index++, offset,
509                              filesz, errp);
510         }
511 
512         if (*errp) {
513             return;
514         }
515 
516         if (phdr_index >= s->phdr_num) {
517             break;
518         }
519     }
520 }
521 
522 static void write_elf_notes(DumpState *s, Error **errp)
523 {
524     if (dump_is_64bit(s)) {
525         write_elf64_notes(fd_write_vmcore, s, errp);
526     } else {
527         write_elf32_notes(fd_write_vmcore, s, errp);
528     }
529 }
530 
531 /* write elf header, PT_NOTE and elf note to vmcore. */
532 static void dump_begin(DumpState *s, Error **errp)
533 {
534     ERRP_GUARD();
535 
536     /*
537      * the vmcore's format is:
538      *   --------------
539      *   |  elf header |
540      *   --------------
541      *   |  PT_NOTE    |
542      *   --------------
543      *   |  PT_LOAD    |
544      *   --------------
545      *   |  ......     |
546      *   --------------
547      *   |  PT_LOAD    |
548      *   --------------
549      *   |  sec_hdr    |
550      *   --------------
551      *   |  elf note   |
552      *   --------------
553      *   |  memory     |
554      *   --------------
555      *
556      * we only know where the memory is saved after we write elf note into
557      * vmcore.
558      */
559 
560     /* write elf header to vmcore */
561     if (dump_is_64bit(s)) {
562         write_elf64_header(s, errp);
563     } else {
564         write_elf32_header(s, errp);
565     }
566     if (*errp) {
567         return;
568     }
569 
570     /* write PT_NOTE to vmcore */
571     write_elf_phdr_note(s, errp);
572     if (*errp) {
573         return;
574     }
575 
576     /* write all PT_LOAD to vmcore */
577     write_elf_loads(s, errp);
578     if (*errp) {
579         return;
580     }
581 
582     /* write section to vmcore */
583     if (s->shdr_num) {
584         write_elf_section(s, 1, errp);
585         if (*errp) {
586             return;
587         }
588     }
589 
590     /* write notes to vmcore */
591     write_elf_notes(s, errp);
592 }
593 
594 static int get_next_block(DumpState *s, GuestPhysBlock *block)
595 {
596     while (1) {
597         block = QTAILQ_NEXT(block, next);
598         if (!block) {
599             /* no more block */
600             return 1;
601         }
602 
603         s->start = 0;
604         s->next_block = block;
605         if (s->has_filter) {
606             if (block->target_start >= s->begin + s->length ||
607                 block->target_end <= s->begin) {
608                 /* This block is out of the range */
609                 continue;
610             }
611 
612             if (s->begin > block->target_start) {
613                 s->start = s->begin - block->target_start;
614             }
615         }
616 
617         return 0;
618     }
619 }
620 
621 /* write all memory to vmcore */
622 static void dump_iterate(DumpState *s, Error **errp)
623 {
624     ERRP_GUARD();
625     GuestPhysBlock *block;
626     int64_t size;
627 
628     do {
629         block = s->next_block;
630 
631         size = block->target_end - block->target_start;
632         if (s->has_filter) {
633             size -= s->start;
634             if (s->begin + s->length < block->target_end) {
635                 size -= block->target_end - (s->begin + s->length);
636             }
637         }
638         write_memory(s, block, s->start, size, errp);
639         if (*errp) {
640             return;
641         }
642 
643     } while (!get_next_block(s, block));
644 }
645 
646 static void create_vmcore(DumpState *s, Error **errp)
647 {
648     ERRP_GUARD();
649 
650     dump_begin(s, errp);
651     if (*errp) {
652         return;
653     }
654 
655     dump_iterate(s, errp);
656 }
657 
658 static int write_start_flat_header(int fd)
659 {
660     MakedumpfileHeader *mh;
661     int ret = 0;
662 
663     QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
664     mh = g_malloc0(MAX_SIZE_MDF_HEADER);
665 
666     memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
667            MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
668 
669     mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
670     mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
671 
672     size_t written_size;
673     written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
674     if (written_size != MAX_SIZE_MDF_HEADER) {
675         ret = -1;
676     }
677 
678     g_free(mh);
679     return ret;
680 }
681 
682 static int write_end_flat_header(int fd)
683 {
684     MakedumpfileDataHeader mdh;
685 
686     mdh.offset = END_FLAG_FLAT_HEADER;
687     mdh.buf_size = END_FLAG_FLAT_HEADER;
688 
689     size_t written_size;
690     written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
691     if (written_size != sizeof(mdh)) {
692         return -1;
693     }
694 
695     return 0;
696 }
697 
698 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
699 {
700     size_t written_size;
701     MakedumpfileDataHeader mdh;
702 
703     mdh.offset = cpu_to_be64(offset);
704     mdh.buf_size = cpu_to_be64(size);
705 
706     written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
707     if (written_size != sizeof(mdh)) {
708         return -1;
709     }
710 
711     written_size = qemu_write_full(fd, buf, size);
712     if (written_size != size) {
713         return -1;
714     }
715 
716     return 0;
717 }
718 
719 static int buf_write_note(const void *buf, size_t size, void *opaque)
720 {
721     DumpState *s = opaque;
722 
723     /* note_buf is not enough */
724     if (s->note_buf_offset + size > s->note_size) {
725         return -1;
726     }
727 
728     memcpy(s->note_buf + s->note_buf_offset, buf, size);
729 
730     s->note_buf_offset += size;
731 
732     return 0;
733 }
734 
735 /*
736  * This function retrieves various sizes from an elf header.
737  *
738  * @note has to be a valid ELF note. The return sizes are unmodified
739  * (not padded or rounded up to be multiple of 4).
740  */
741 static void get_note_sizes(DumpState *s, const void *note,
742                            uint64_t *note_head_size,
743                            uint64_t *name_size,
744                            uint64_t *desc_size)
745 {
746     uint64_t note_head_sz;
747     uint64_t name_sz;
748     uint64_t desc_sz;
749 
750     if (dump_is_64bit(s)) {
751         const Elf64_Nhdr *hdr = note;
752         note_head_sz = sizeof(Elf64_Nhdr);
753         name_sz = tswap64(hdr->n_namesz);
754         desc_sz = tswap64(hdr->n_descsz);
755     } else {
756         const Elf32_Nhdr *hdr = note;
757         note_head_sz = sizeof(Elf32_Nhdr);
758         name_sz = tswap32(hdr->n_namesz);
759         desc_sz = tswap32(hdr->n_descsz);
760     }
761 
762     if (note_head_size) {
763         *note_head_size = note_head_sz;
764     }
765     if (name_size) {
766         *name_size = name_sz;
767     }
768     if (desc_size) {
769         *desc_size = desc_sz;
770     }
771 }
772 
773 static bool note_name_equal(DumpState *s,
774                             const uint8_t *note, const char *name)
775 {
776     int len = strlen(name) + 1;
777     uint64_t head_size, name_size;
778 
779     get_note_sizes(s, note, &head_size, &name_size, NULL);
780     head_size = ROUND_UP(head_size, 4);
781 
782     return name_size == len && memcmp(note + head_size, name, len) == 0;
783 }
784 
785 /* write common header, sub header and elf note to vmcore */
786 static void create_header32(DumpState *s, Error **errp)
787 {
788     ERRP_GUARD();
789     DiskDumpHeader32 *dh = NULL;
790     KdumpSubHeader32 *kh = NULL;
791     size_t size;
792     uint32_t block_size;
793     uint32_t sub_hdr_size;
794     uint32_t bitmap_blocks;
795     uint32_t status = 0;
796     uint64_t offset_note;
797 
798     /* write common header, the version of kdump-compressed format is 6th */
799     size = sizeof(DiskDumpHeader32);
800     dh = g_malloc0(size);
801 
802     memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
803     dh->header_version = cpu_to_dump32(s, 6);
804     block_size = s->dump_info.page_size;
805     dh->block_size = cpu_to_dump32(s, block_size);
806     sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
807     sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
808     dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
809     /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
810     dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
811     dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
812     bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
813     dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
814     strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
815 
816     if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
817         status |= DUMP_DH_COMPRESSED_ZLIB;
818     }
819 #ifdef CONFIG_LZO
820     if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
821         status |= DUMP_DH_COMPRESSED_LZO;
822     }
823 #endif
824 #ifdef CONFIG_SNAPPY
825     if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
826         status |= DUMP_DH_COMPRESSED_SNAPPY;
827     }
828 #endif
829     dh->status = cpu_to_dump32(s, status);
830 
831     if (write_buffer(s->fd, 0, dh, size) < 0) {
832         error_setg(errp, "dump: failed to write disk dump header");
833         goto out;
834     }
835 
836     /* write sub header */
837     size = sizeof(KdumpSubHeader32);
838     kh = g_malloc0(size);
839 
840     /* 64bit max_mapnr_64 */
841     kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
842     kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
843     kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
844 
845     offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
846     if (s->guest_note &&
847         note_name_equal(s, s->guest_note, "VMCOREINFO")) {
848         uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
849 
850         get_note_sizes(s, s->guest_note,
851                        &hsize, &name_size, &size_vmcoreinfo_desc);
852         offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
853             (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
854         kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
855         kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
856     }
857 
858     kh->offset_note = cpu_to_dump64(s, offset_note);
859     kh->note_size = cpu_to_dump32(s, s->note_size);
860 
861     if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
862                      block_size, kh, size) < 0) {
863         error_setg(errp, "dump: failed to write kdump sub header");
864         goto out;
865     }
866 
867     /* write note */
868     s->note_buf = g_malloc0(s->note_size);
869     s->note_buf_offset = 0;
870 
871     /* use s->note_buf to store notes temporarily */
872     write_elf32_notes(buf_write_note, s, errp);
873     if (*errp) {
874         goto out;
875     }
876     if (write_buffer(s->fd, offset_note, s->note_buf,
877                      s->note_size) < 0) {
878         error_setg(errp, "dump: failed to write notes");
879         goto out;
880     }
881 
882     /* get offset of dump_bitmap */
883     s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
884                              block_size;
885 
886     /* get offset of page */
887     s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
888                      block_size;
889 
890 out:
891     g_free(dh);
892     g_free(kh);
893     g_free(s->note_buf);
894 }
895 
896 /* write common header, sub header and elf note to vmcore */
897 static void create_header64(DumpState *s, Error **errp)
898 {
899     ERRP_GUARD();
900     DiskDumpHeader64 *dh = NULL;
901     KdumpSubHeader64 *kh = NULL;
902     size_t size;
903     uint32_t block_size;
904     uint32_t sub_hdr_size;
905     uint32_t bitmap_blocks;
906     uint32_t status = 0;
907     uint64_t offset_note;
908 
909     /* write common header, the version of kdump-compressed format is 6th */
910     size = sizeof(DiskDumpHeader64);
911     dh = g_malloc0(size);
912 
913     memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
914     dh->header_version = cpu_to_dump32(s, 6);
915     block_size = s->dump_info.page_size;
916     dh->block_size = cpu_to_dump32(s, block_size);
917     sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
918     sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
919     dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
920     /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
921     dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
922     dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
923     bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
924     dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
925     strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
926 
927     if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
928         status |= DUMP_DH_COMPRESSED_ZLIB;
929     }
930 #ifdef CONFIG_LZO
931     if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
932         status |= DUMP_DH_COMPRESSED_LZO;
933     }
934 #endif
935 #ifdef CONFIG_SNAPPY
936     if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
937         status |= DUMP_DH_COMPRESSED_SNAPPY;
938     }
939 #endif
940     dh->status = cpu_to_dump32(s, status);
941 
942     if (write_buffer(s->fd, 0, dh, size) < 0) {
943         error_setg(errp, "dump: failed to write disk dump header");
944         goto out;
945     }
946 
947     /* write sub header */
948     size = sizeof(KdumpSubHeader64);
949     kh = g_malloc0(size);
950 
951     /* 64bit max_mapnr_64 */
952     kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
953     kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
954     kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
955 
956     offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
957     if (s->guest_note &&
958         note_name_equal(s, s->guest_note, "VMCOREINFO")) {
959         uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
960 
961         get_note_sizes(s, s->guest_note,
962                        &hsize, &name_size, &size_vmcoreinfo_desc);
963         offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
964             (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
965         kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
966         kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
967     }
968 
969     kh->offset_note = cpu_to_dump64(s, offset_note);
970     kh->note_size = cpu_to_dump64(s, s->note_size);
971 
972     if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
973                      block_size, kh, size) < 0) {
974         error_setg(errp, "dump: failed to write kdump sub header");
975         goto out;
976     }
977 
978     /* write note */
979     s->note_buf = g_malloc0(s->note_size);
980     s->note_buf_offset = 0;
981 
982     /* use s->note_buf to store notes temporarily */
983     write_elf64_notes(buf_write_note, s, errp);
984     if (*errp) {
985         goto out;
986     }
987 
988     if (write_buffer(s->fd, offset_note, s->note_buf,
989                      s->note_size) < 0) {
990         error_setg(errp, "dump: failed to write notes");
991         goto out;
992     }
993 
994     /* get offset of dump_bitmap */
995     s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
996                              block_size;
997 
998     /* get offset of page */
999     s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
1000                      block_size;
1001 
1002 out:
1003     g_free(dh);
1004     g_free(kh);
1005     g_free(s->note_buf);
1006 }
1007 
1008 static void write_dump_header(DumpState *s, Error **errp)
1009 {
1010     if (dump_is_64bit(s)) {
1011         create_header64(s, errp);
1012     } else {
1013         create_header32(s, errp);
1014     }
1015 }
1016 
1017 static size_t dump_bitmap_get_bufsize(DumpState *s)
1018 {
1019     return s->dump_info.page_size;
1020 }
1021 
1022 /*
1023  * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1024  * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1025  * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1026  * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1027  * vmcore, ie. synchronizing un-sync bit into vmcore.
1028  */
1029 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
1030                            uint8_t *buf, DumpState *s)
1031 {
1032     off_t old_offset, new_offset;
1033     off_t offset_bitmap1, offset_bitmap2;
1034     uint32_t byte, bit;
1035     size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1036     size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1037 
1038     /* should not set the previous place */
1039     assert(last_pfn <= pfn);
1040 
1041     /*
1042      * if the bit needed to be set is not cached in buf, flush the data in buf
1043      * to vmcore firstly.
1044      * making new_offset be bigger than old_offset can also sync remained data
1045      * into vmcore.
1046      */
1047     old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
1048     new_offset = bitmap_bufsize * (pfn / bits_per_buf);
1049 
1050     while (old_offset < new_offset) {
1051         /* calculate the offset and write dump_bitmap */
1052         offset_bitmap1 = s->offset_dump_bitmap + old_offset;
1053         if (write_buffer(s->fd, offset_bitmap1, buf,
1054                          bitmap_bufsize) < 0) {
1055             return -1;
1056         }
1057 
1058         /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1059         offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
1060                          old_offset;
1061         if (write_buffer(s->fd, offset_bitmap2, buf,
1062                          bitmap_bufsize) < 0) {
1063             return -1;
1064         }
1065 
1066         memset(buf, 0, bitmap_bufsize);
1067         old_offset += bitmap_bufsize;
1068     }
1069 
1070     /* get the exact place of the bit in the buf, and set it */
1071     byte = (pfn % bits_per_buf) / CHAR_BIT;
1072     bit = (pfn % bits_per_buf) % CHAR_BIT;
1073     if (value) {
1074         buf[byte] |= 1u << bit;
1075     } else {
1076         buf[byte] &= ~(1u << bit);
1077     }
1078 
1079     return 0;
1080 }
1081 
1082 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
1083 {
1084     int target_page_shift = ctz32(s->dump_info.page_size);
1085 
1086     return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
1087 }
1088 
1089 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
1090 {
1091     int target_page_shift = ctz32(s->dump_info.page_size);
1092 
1093     return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1094 }
1095 
1096 /*
1097  * exam every page and return the page frame number and the address of the page.
1098  * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1099  * blocks, so block->target_start and block->target_end should be interal
1100  * multiples of the target page size.
1101  */
1102 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1103                           uint8_t **bufptr, DumpState *s)
1104 {
1105     GuestPhysBlock *block = *blockptr;
1106     hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1107     uint8_t *buf;
1108 
1109     /* block == NULL means the start of the iteration */
1110     if (!block) {
1111         block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1112         *blockptr = block;
1113         assert((block->target_start & ~target_page_mask) == 0);
1114         assert((block->target_end & ~target_page_mask) == 0);
1115         *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1116         if (bufptr) {
1117             *bufptr = block->host_addr;
1118         }
1119         return true;
1120     }
1121 
1122     *pfnptr = *pfnptr + 1;
1123     addr = dump_pfn_to_paddr(s, *pfnptr);
1124 
1125     if ((addr >= block->target_start) &&
1126         (addr + s->dump_info.page_size <= block->target_end)) {
1127         buf = block->host_addr + (addr - block->target_start);
1128     } else {
1129         /* the next page is in the next block */
1130         block = QTAILQ_NEXT(block, next);
1131         *blockptr = block;
1132         if (!block) {
1133             return false;
1134         }
1135         assert((block->target_start & ~target_page_mask) == 0);
1136         assert((block->target_end & ~target_page_mask) == 0);
1137         *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1138         buf = block->host_addr;
1139     }
1140 
1141     if (bufptr) {
1142         *bufptr = buf;
1143     }
1144 
1145     return true;
1146 }
1147 
1148 static void write_dump_bitmap(DumpState *s, Error **errp)
1149 {
1150     int ret = 0;
1151     uint64_t last_pfn, pfn;
1152     void *dump_bitmap_buf;
1153     size_t num_dumpable;
1154     GuestPhysBlock *block_iter = NULL;
1155     size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1156     size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1157 
1158     /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1159     dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1160 
1161     num_dumpable = 0;
1162     last_pfn = 0;
1163 
1164     /*
1165      * exam memory page by page, and set the bit in dump_bitmap corresponded
1166      * to the existing page.
1167      */
1168     while (get_next_page(&block_iter, &pfn, NULL, s)) {
1169         ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1170         if (ret < 0) {
1171             error_setg(errp, "dump: failed to set dump_bitmap");
1172             goto out;
1173         }
1174 
1175         last_pfn = pfn;
1176         num_dumpable++;
1177     }
1178 
1179     /*
1180      * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1181      * set the remaining bits from last_pfn to the end of the bitmap buffer to
1182      * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1183      */
1184     if (num_dumpable > 0) {
1185         ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1186                               dump_bitmap_buf, s);
1187         if (ret < 0) {
1188             error_setg(errp, "dump: failed to sync dump_bitmap");
1189             goto out;
1190         }
1191     }
1192 
1193     /* number of dumpable pages that will be dumped later */
1194     s->num_dumpable = num_dumpable;
1195 
1196 out:
1197     g_free(dump_bitmap_buf);
1198 }
1199 
1200 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1201                                off_t offset)
1202 {
1203     data_cache->fd = s->fd;
1204     data_cache->data_size = 0;
1205     data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1206     data_cache->buf = g_malloc0(data_cache->buf_size);
1207     data_cache->offset = offset;
1208 }
1209 
1210 static int write_cache(DataCache *dc, const void *buf, size_t size,
1211                        bool flag_sync)
1212 {
1213     /*
1214      * dc->buf_size should not be less than size, otherwise dc will never be
1215      * enough
1216      */
1217     assert(size <= dc->buf_size);
1218 
1219     /*
1220      * if flag_sync is set, synchronize data in dc->buf into vmcore.
1221      * otherwise check if the space is enough for caching data in buf, if not,
1222      * write the data in dc->buf to dc->fd and reset dc->buf
1223      */
1224     if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1225         (flag_sync && dc->data_size > 0)) {
1226         if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1227             return -1;
1228         }
1229 
1230         dc->offset += dc->data_size;
1231         dc->data_size = 0;
1232     }
1233 
1234     if (!flag_sync) {
1235         memcpy(dc->buf + dc->data_size, buf, size);
1236         dc->data_size += size;
1237     }
1238 
1239     return 0;
1240 }
1241 
1242 static void free_data_cache(DataCache *data_cache)
1243 {
1244     g_free(data_cache->buf);
1245 }
1246 
1247 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1248 {
1249     switch (flag_compress) {
1250     case DUMP_DH_COMPRESSED_ZLIB:
1251         return compressBound(page_size);
1252 
1253     case DUMP_DH_COMPRESSED_LZO:
1254         /*
1255          * LZO will expand incompressible data by a little amount. Please check
1256          * the following URL to see the expansion calculation:
1257          * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1258          */
1259         return page_size + page_size / 16 + 64 + 3;
1260 
1261 #ifdef CONFIG_SNAPPY
1262     case DUMP_DH_COMPRESSED_SNAPPY:
1263         return snappy_max_compressed_length(page_size);
1264 #endif
1265     }
1266     return 0;
1267 }
1268 
1269 static void write_dump_pages(DumpState *s, Error **errp)
1270 {
1271     int ret = 0;
1272     DataCache page_desc, page_data;
1273     size_t len_buf_out, size_out;
1274 #ifdef CONFIG_LZO
1275     lzo_bytep wrkmem = NULL;
1276 #endif
1277     uint8_t *buf_out = NULL;
1278     off_t offset_desc, offset_data;
1279     PageDescriptor pd, pd_zero;
1280     uint8_t *buf;
1281     GuestPhysBlock *block_iter = NULL;
1282     uint64_t pfn_iter;
1283 
1284     /* get offset of page_desc and page_data in dump file */
1285     offset_desc = s->offset_page;
1286     offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1287 
1288     prepare_data_cache(&page_desc, s, offset_desc);
1289     prepare_data_cache(&page_data, s, offset_data);
1290 
1291     /* prepare buffer to store compressed data */
1292     len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1293     assert(len_buf_out != 0);
1294 
1295 #ifdef CONFIG_LZO
1296     wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1297 #endif
1298 
1299     buf_out = g_malloc(len_buf_out);
1300 
1301     /*
1302      * init zero page's page_desc and page_data, because every zero page
1303      * uses the same page_data
1304      */
1305     pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1306     pd_zero.flags = cpu_to_dump32(s, 0);
1307     pd_zero.offset = cpu_to_dump64(s, offset_data);
1308     pd_zero.page_flags = cpu_to_dump64(s, 0);
1309     buf = g_malloc0(s->dump_info.page_size);
1310     ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1311     g_free(buf);
1312     if (ret < 0) {
1313         error_setg(errp, "dump: failed to write page data (zero page)");
1314         goto out;
1315     }
1316 
1317     offset_data += s->dump_info.page_size;
1318 
1319     /*
1320      * dump memory to vmcore page by page. zero page will all be resided in the
1321      * first page of page section
1322      */
1323     while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1324         /* check zero page */
1325         if (buffer_is_zero(buf, s->dump_info.page_size)) {
1326             ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1327                               false);
1328             if (ret < 0) {
1329                 error_setg(errp, "dump: failed to write page desc");
1330                 goto out;
1331             }
1332         } else {
1333             /*
1334              * not zero page, then:
1335              * 1. compress the page
1336              * 2. write the compressed page into the cache of page_data
1337              * 3. get page desc of the compressed page and write it into the
1338              *    cache of page_desc
1339              *
1340              * only one compression format will be used here, for
1341              * s->flag_compress is set. But when compression fails to work,
1342              * we fall back to save in plaintext.
1343              */
1344              size_out = len_buf_out;
1345              if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1346                     (compress2(buf_out, (uLongf *)&size_out, buf,
1347                                s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1348                     (size_out < s->dump_info.page_size)) {
1349                 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1350                 pd.size  = cpu_to_dump32(s, size_out);
1351 
1352                 ret = write_cache(&page_data, buf_out, size_out, false);
1353                 if (ret < 0) {
1354                     error_setg(errp, "dump: failed to write page data");
1355                     goto out;
1356                 }
1357 #ifdef CONFIG_LZO
1358             } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1359                     (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1360                     (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1361                     (size_out < s->dump_info.page_size)) {
1362                 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1363                 pd.size  = cpu_to_dump32(s, size_out);
1364 
1365                 ret = write_cache(&page_data, buf_out, size_out, false);
1366                 if (ret < 0) {
1367                     error_setg(errp, "dump: failed to write page data");
1368                     goto out;
1369                 }
1370 #endif
1371 #ifdef CONFIG_SNAPPY
1372             } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1373                     (snappy_compress((char *)buf, s->dump_info.page_size,
1374                     (char *)buf_out, &size_out) == SNAPPY_OK) &&
1375                     (size_out < s->dump_info.page_size)) {
1376                 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1377                 pd.size  = cpu_to_dump32(s, size_out);
1378 
1379                 ret = write_cache(&page_data, buf_out, size_out, false);
1380                 if (ret < 0) {
1381                     error_setg(errp, "dump: failed to write page data");
1382                     goto out;
1383                 }
1384 #endif
1385             } else {
1386                 /*
1387                  * fall back to save in plaintext, size_out should be
1388                  * assigned the target's page size
1389                  */
1390                 pd.flags = cpu_to_dump32(s, 0);
1391                 size_out = s->dump_info.page_size;
1392                 pd.size = cpu_to_dump32(s, size_out);
1393 
1394                 ret = write_cache(&page_data, buf,
1395                                   s->dump_info.page_size, false);
1396                 if (ret < 0) {
1397                     error_setg(errp, "dump: failed to write page data");
1398                     goto out;
1399                 }
1400             }
1401 
1402             /* get and write page desc here */
1403             pd.page_flags = cpu_to_dump64(s, 0);
1404             pd.offset = cpu_to_dump64(s, offset_data);
1405             offset_data += size_out;
1406 
1407             ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1408             if (ret < 0) {
1409                 error_setg(errp, "dump: failed to write page desc");
1410                 goto out;
1411             }
1412         }
1413         s->written_size += s->dump_info.page_size;
1414     }
1415 
1416     ret = write_cache(&page_desc, NULL, 0, true);
1417     if (ret < 0) {
1418         error_setg(errp, "dump: failed to sync cache for page_desc");
1419         goto out;
1420     }
1421     ret = write_cache(&page_data, NULL, 0, true);
1422     if (ret < 0) {
1423         error_setg(errp, "dump: failed to sync cache for page_data");
1424         goto out;
1425     }
1426 
1427 out:
1428     free_data_cache(&page_desc);
1429     free_data_cache(&page_data);
1430 
1431 #ifdef CONFIG_LZO
1432     g_free(wrkmem);
1433 #endif
1434 
1435     g_free(buf_out);
1436 }
1437 
1438 static void create_kdump_vmcore(DumpState *s, Error **errp)
1439 {
1440     ERRP_GUARD();
1441     int ret;
1442 
1443     /*
1444      * the kdump-compressed format is:
1445      *                                               File offset
1446      *  +------------------------------------------+ 0x0
1447      *  |    main header (struct disk_dump_header) |
1448      *  |------------------------------------------+ block 1
1449      *  |    sub header (struct kdump_sub_header)  |
1450      *  |------------------------------------------+ block 2
1451      *  |            1st-dump_bitmap               |
1452      *  |------------------------------------------+ block 2 + X blocks
1453      *  |            2nd-dump_bitmap               | (aligned by block)
1454      *  |------------------------------------------+ block 2 + 2 * X blocks
1455      *  |  page desc for pfn 0 (struct page_desc)  | (aligned by block)
1456      *  |  page desc for pfn 1 (struct page_desc)  |
1457      *  |                    :                     |
1458      *  |------------------------------------------| (not aligned by block)
1459      *  |         page data (pfn 0)                |
1460      *  |         page data (pfn 1)                |
1461      *  |                    :                     |
1462      *  +------------------------------------------+
1463      */
1464 
1465     ret = write_start_flat_header(s->fd);
1466     if (ret < 0) {
1467         error_setg(errp, "dump: failed to write start flat header");
1468         return;
1469     }
1470 
1471     write_dump_header(s, errp);
1472     if (*errp) {
1473         return;
1474     }
1475 
1476     write_dump_bitmap(s, errp);
1477     if (*errp) {
1478         return;
1479     }
1480 
1481     write_dump_pages(s, errp);
1482     if (*errp) {
1483         return;
1484     }
1485 
1486     ret = write_end_flat_header(s->fd);
1487     if (ret < 0) {
1488         error_setg(errp, "dump: failed to write end flat header");
1489         return;
1490     }
1491 }
1492 
1493 static ram_addr_t get_start_block(DumpState *s)
1494 {
1495     GuestPhysBlock *block;
1496 
1497     if (!s->has_filter) {
1498         s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1499         return 0;
1500     }
1501 
1502     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1503         if (block->target_start >= s->begin + s->length ||
1504             block->target_end <= s->begin) {
1505             /* This block is out of the range */
1506             continue;
1507         }
1508 
1509         s->next_block = block;
1510         if (s->begin > block->target_start) {
1511             s->start = s->begin - block->target_start;
1512         } else {
1513             s->start = 0;
1514         }
1515         return s->start;
1516     }
1517 
1518     return -1;
1519 }
1520 
1521 static void get_max_mapnr(DumpState *s)
1522 {
1523     GuestPhysBlock *last_block;
1524 
1525     last_block = QTAILQ_LAST(&s->guest_phys_blocks.head);
1526     s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1527 }
1528 
1529 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1530 
1531 static void dump_state_prepare(DumpState *s)
1532 {
1533     /* zero the struct, setting status to active */
1534     *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1535 }
1536 
1537 bool qemu_system_dump_in_progress(void)
1538 {
1539     DumpState *state = &dump_state_global;
1540     return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1541 }
1542 
1543 /* calculate total size of memory to be dumped (taking filter into
1544  * acoount.) */
1545 static int64_t dump_calculate_size(DumpState *s)
1546 {
1547     GuestPhysBlock *block;
1548     int64_t size = 0, total = 0, left = 0, right = 0;
1549 
1550     QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1551         if (s->has_filter) {
1552             /* calculate the overlapped region. */
1553             left = MAX(s->begin, block->target_start);
1554             right = MIN(s->begin + s->length, block->target_end);
1555             size = right - left;
1556             size = size > 0 ? size : 0;
1557         } else {
1558             /* count the whole region in */
1559             size = (block->target_end - block->target_start);
1560         }
1561         total += size;
1562     }
1563 
1564     return total;
1565 }
1566 
1567 static void vmcoreinfo_update_phys_base(DumpState *s)
1568 {
1569     uint64_t size, note_head_size, name_size, phys_base;
1570     char **lines;
1571     uint8_t *vmci;
1572     size_t i;
1573 
1574     if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
1575         return;
1576     }
1577 
1578     get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
1579     note_head_size = ROUND_UP(note_head_size, 4);
1580 
1581     vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
1582     *(vmci + size) = '\0';
1583 
1584     lines = g_strsplit((char *)vmci, "\n", -1);
1585     for (i = 0; lines[i]; i++) {
1586         const char *prefix = NULL;
1587 
1588         if (s->dump_info.d_machine == EM_X86_64) {
1589             prefix = "NUMBER(phys_base)=";
1590         } else if (s->dump_info.d_machine == EM_AARCH64) {
1591             prefix = "NUMBER(PHYS_OFFSET)=";
1592         }
1593 
1594         if (prefix && g_str_has_prefix(lines[i], prefix)) {
1595             if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16,
1596                               &phys_base) < 0) {
1597                 warn_report("Failed to read %s", prefix);
1598             } else {
1599                 s->dump_info.phys_base = phys_base;
1600             }
1601             break;
1602         }
1603     }
1604 
1605     g_strfreev(lines);
1606 }
1607 
1608 static void dump_init(DumpState *s, int fd, bool has_format,
1609                       DumpGuestMemoryFormat format, bool paging, bool has_filter,
1610                       int64_t begin, int64_t length, Error **errp)
1611 {
1612     ERRP_GUARD();
1613     VMCoreInfoState *vmci = vmcoreinfo_find();
1614     CPUState *cpu;
1615     int nr_cpus;
1616     int ret;
1617 
1618     s->has_format = has_format;
1619     s->format = format;
1620     s->written_size = 0;
1621 
1622     /* kdump-compressed is conflict with paging and filter */
1623     if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1624         assert(!paging && !has_filter);
1625     }
1626 
1627     if (runstate_is_running()) {
1628         vm_stop(RUN_STATE_SAVE_VM);
1629         s->resume = true;
1630     } else {
1631         s->resume = false;
1632     }
1633 
1634     /* If we use KVM, we should synchronize the registers before we get dump
1635      * info or physmap info.
1636      */
1637     cpu_synchronize_all_states();
1638     nr_cpus = 0;
1639     CPU_FOREACH(cpu) {
1640         nr_cpus++;
1641     }
1642 
1643     s->fd = fd;
1644     s->has_filter = has_filter;
1645     s->begin = begin;
1646     s->length = length;
1647 
1648     memory_mapping_list_init(&s->list);
1649 
1650     guest_phys_blocks_init(&s->guest_phys_blocks);
1651     guest_phys_blocks_append(&s->guest_phys_blocks);
1652     s->total_size = dump_calculate_size(s);
1653 #ifdef DEBUG_DUMP_GUEST_MEMORY
1654     fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1655 #endif
1656 
1657     /* it does not make sense to dump non-existent memory */
1658     if (!s->total_size) {
1659         error_setg(errp, "dump: no guest memory to dump");
1660         goto cleanup;
1661     }
1662 
1663     s->start = get_start_block(s);
1664     if (s->start == -1) {
1665         error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1666         goto cleanup;
1667     }
1668 
1669     /* get dump info: endian, class and architecture.
1670      * If the target architecture is not supported, cpu_get_dump_info() will
1671      * return -1.
1672      */
1673     ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1674     if (ret < 0) {
1675         error_setg(errp, QERR_UNSUPPORTED);
1676         goto cleanup;
1677     }
1678 
1679     if (!s->dump_info.page_size) {
1680         s->dump_info.page_size = TARGET_PAGE_SIZE;
1681     }
1682 
1683     s->note_size = cpu_get_note_size(s->dump_info.d_class,
1684                                      s->dump_info.d_machine, nr_cpus);
1685     if (s->note_size < 0) {
1686         error_setg(errp, QERR_UNSUPPORTED);
1687         goto cleanup;
1688     }
1689 
1690     /*
1691      * The goal of this block is to (a) update the previously guessed
1692      * phys_base, (b) copy the guest note out of the guest.
1693      * Failure to do so is not fatal for dumping.
1694      */
1695     if (vmci) {
1696         uint64_t addr, note_head_size, name_size, desc_size;
1697         uint32_t size;
1698         uint16_t format;
1699 
1700         note_head_size = dump_is_64bit(s) ?
1701             sizeof(Elf64_Nhdr) : sizeof(Elf32_Nhdr);
1702 
1703         format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
1704         size = le32_to_cpu(vmci->vmcoreinfo.size);
1705         addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
1706         if (!vmci->has_vmcoreinfo) {
1707             warn_report("guest note is not present");
1708         } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
1709             warn_report("guest note size is invalid: %" PRIu32, size);
1710         } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) {
1711             warn_report("guest note format is unsupported: %" PRIu16, format);
1712         } else {
1713             s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
1714             cpu_physical_memory_read(addr, s->guest_note, size);
1715 
1716             get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
1717             s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
1718                                                desc_size);
1719             if (name_size > MAX_GUEST_NOTE_SIZE ||
1720                 desc_size > MAX_GUEST_NOTE_SIZE ||
1721                 s->guest_note_size > size) {
1722                 warn_report("Invalid guest note header");
1723                 g_free(s->guest_note);
1724                 s->guest_note = NULL;
1725             } else {
1726                 vmcoreinfo_update_phys_base(s);
1727                 s->note_size += s->guest_note_size;
1728             }
1729         }
1730     }
1731 
1732     /* get memory mapping */
1733     if (paging) {
1734         qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp);
1735         if (*errp) {
1736             goto cleanup;
1737         }
1738     } else {
1739         qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1740     }
1741 
1742     s->nr_cpus = nr_cpus;
1743 
1744     get_max_mapnr(s);
1745 
1746     uint64_t tmp;
1747     tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1748                        s->dump_info.page_size);
1749     s->len_dump_bitmap = tmp * s->dump_info.page_size;
1750 
1751     /* init for kdump-compressed format */
1752     if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1753         switch (format) {
1754         case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1755             s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1756             break;
1757 
1758         case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1759 #ifdef CONFIG_LZO
1760             if (lzo_init() != LZO_E_OK) {
1761                 error_setg(errp, "failed to initialize the LZO library");
1762                 goto cleanup;
1763             }
1764 #endif
1765             s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1766             break;
1767 
1768         case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1769             s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1770             break;
1771 
1772         default:
1773             s->flag_compress = 0;
1774         }
1775 
1776         return;
1777     }
1778 
1779     if (s->has_filter) {
1780         memory_mapping_filter(&s->list, s->begin, s->length);
1781     }
1782 
1783     /*
1784      * calculate phdr_num
1785      *
1786      * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1787      */
1788     s->phdr_num = 1; /* PT_NOTE */
1789     if (s->list.num < UINT16_MAX - 2) {
1790         s->shdr_num = 0;
1791         s->phdr_num += s->list.num;
1792     } else {
1793         /* sh_info of section 0 holds the real number of phdrs */
1794         s->shdr_num = 1;
1795 
1796         /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1797         if (s->list.num <= UINT32_MAX - 1) {
1798             s->phdr_num += s->list.num;
1799         } else {
1800             s->phdr_num = UINT32_MAX;
1801         }
1802     }
1803 
1804     if (dump_is_64bit(s)) {
1805         s->phdr_offset = sizeof(Elf64_Ehdr);
1806         s->shdr_offset = s->phdr_offset + sizeof(Elf64_Phdr) * s->phdr_num;
1807         s->note_offset = s->shdr_offset + sizeof(Elf64_Shdr) * s->shdr_num;
1808         s->memory_offset = s->note_offset + s->note_size;
1809     } else {
1810 
1811         s->phdr_offset = sizeof(Elf32_Ehdr);
1812         s->shdr_offset = s->phdr_offset + sizeof(Elf32_Phdr) * s->phdr_num;
1813         s->note_offset = s->shdr_offset + sizeof(Elf32_Shdr) * s->shdr_num;
1814         s->memory_offset = s->note_offset + s->note_size;
1815     }
1816 
1817     return;
1818 
1819 cleanup:
1820     dump_cleanup(s);
1821 }
1822 
1823 /* this operation might be time consuming. */
1824 static void dump_process(DumpState *s, Error **errp)
1825 {
1826     ERRP_GUARD();
1827     DumpQueryResult *result = NULL;
1828 
1829     if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1830 #ifdef TARGET_X86_64
1831         create_win_dump(s, errp);
1832 #endif
1833     } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1834         create_kdump_vmcore(s, errp);
1835     } else {
1836         create_vmcore(s, errp);
1837     }
1838 
1839     /* make sure status is written after written_size updates */
1840     smp_wmb();
1841     qatomic_set(&s->status,
1842                (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1843 
1844     /* send DUMP_COMPLETED message (unconditionally) */
1845     result = qmp_query_dump(NULL);
1846     /* should never fail */
1847     assert(result);
1848     qapi_event_send_dump_completed(result, !!*errp, (*errp ?
1849                                                      error_get_pretty(*errp) : NULL));
1850     qapi_free_DumpQueryResult(result);
1851 
1852     dump_cleanup(s);
1853 }
1854 
1855 static void *dump_thread(void *data)
1856 {
1857     DumpState *s = (DumpState *)data;
1858     dump_process(s, NULL);
1859     return NULL;
1860 }
1861 
1862 DumpQueryResult *qmp_query_dump(Error **errp)
1863 {
1864     DumpQueryResult *result = g_new(DumpQueryResult, 1);
1865     DumpState *state = &dump_state_global;
1866     result->status = qatomic_read(&state->status);
1867     /* make sure we are reading status and written_size in order */
1868     smp_rmb();
1869     result->completed = state->written_size;
1870     result->total = state->total_size;
1871     return result;
1872 }
1873 
1874 void qmp_dump_guest_memory(bool paging, const char *file,
1875                            bool has_detach, bool detach,
1876                            bool has_begin, int64_t begin, bool has_length,
1877                            int64_t length, bool has_format,
1878                            DumpGuestMemoryFormat format, Error **errp)
1879 {
1880     ERRP_GUARD();
1881     const char *p;
1882     int fd = -1;
1883     DumpState *s;
1884     bool detach_p = false;
1885 
1886     if (runstate_check(RUN_STATE_INMIGRATE)) {
1887         error_setg(errp, "Dump not allowed during incoming migration.");
1888         return;
1889     }
1890 
1891     /* if there is a dump in background, we should wait until the dump
1892      * finished */
1893     if (qemu_system_dump_in_progress()) {
1894         error_setg(errp, "There is a dump in process, please wait.");
1895         return;
1896     }
1897 
1898     /*
1899      * kdump-compressed format need the whole memory dumped, so paging or
1900      * filter is not supported here.
1901      */
1902     if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1903         (paging || has_begin || has_length)) {
1904         error_setg(errp, "kdump-compressed format doesn't support paging or "
1905                          "filter");
1906         return;
1907     }
1908     if (has_begin && !has_length) {
1909         error_setg(errp, QERR_MISSING_PARAMETER, "length");
1910         return;
1911     }
1912     if (!has_begin && has_length) {
1913         error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1914         return;
1915     }
1916     if (has_detach) {
1917         detach_p = detach;
1918     }
1919 
1920     /* check whether lzo/snappy is supported */
1921 #ifndef CONFIG_LZO
1922     if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1923         error_setg(errp, "kdump-lzo is not available now");
1924         return;
1925     }
1926 #endif
1927 
1928 #ifndef CONFIG_SNAPPY
1929     if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1930         error_setg(errp, "kdump-snappy is not available now");
1931         return;
1932     }
1933 #endif
1934 
1935 #ifndef TARGET_X86_64
1936     if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1937         error_setg(errp, "Windows dump is only available for x86-64");
1938         return;
1939     }
1940 #endif
1941 
1942 #if !defined(WIN32)
1943     if (strstart(file, "fd:", &p)) {
1944         fd = monitor_get_fd(monitor_cur(), p, errp);
1945         if (fd == -1) {
1946             return;
1947         }
1948     }
1949 #endif
1950 
1951     if  (strstart(file, "file:", &p)) {
1952         fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1953         if (fd < 0) {
1954             error_setg_file_open(errp, errno, p);
1955             return;
1956         }
1957     }
1958 
1959     if (fd == -1) {
1960         error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
1961         return;
1962     }
1963 
1964     if (!dump_migration_blocker) {
1965         error_setg(&dump_migration_blocker,
1966                    "Live migration disabled: dump-guest-memory in progress");
1967     }
1968 
1969     /*
1970      * Allows even for -only-migratable, but forbid migration during the
1971      * process of dump guest memory.
1972      */
1973     if (migrate_add_blocker_internal(dump_migration_blocker, errp)) {
1974         /* Remember to release the fd before passing it over to dump state */
1975         close(fd);
1976         return;
1977     }
1978 
1979     s = &dump_state_global;
1980     dump_state_prepare(s);
1981 
1982     dump_init(s, fd, has_format, format, paging, has_begin,
1983               begin, length, errp);
1984     if (*errp) {
1985         qatomic_set(&s->status, DUMP_STATUS_FAILED);
1986         return;
1987     }
1988 
1989     if (detach_p) {
1990         /* detached dump */
1991         s->detached = true;
1992         qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
1993                            s, QEMU_THREAD_DETACHED);
1994     } else {
1995         /* sync dump */
1996         dump_process(s, errp);
1997     }
1998 }
1999 
2000 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
2001 {
2002     DumpGuestMemoryCapability *cap =
2003                                   g_new0(DumpGuestMemoryCapability, 1);
2004     DumpGuestMemoryFormatList **tail = &cap->formats;
2005 
2006     /* elf is always available */
2007     QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF);
2008 
2009     /* kdump-zlib is always available */
2010     QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB);
2011 
2012     /* add new item if kdump-lzo is available */
2013 #ifdef CONFIG_LZO
2014     QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO);
2015 #endif
2016 
2017     /* add new item if kdump-snappy is available */
2018 #ifdef CONFIG_SNAPPY
2019     QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY);
2020 #endif
2021 
2022     /* Windows dump is available only if target is x86_64 */
2023 #ifdef TARGET_X86_64
2024     QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP);
2025 #endif
2026 
2027     return cap;
2028 }
2029