]> git.proxmox.com Git - mirror_qemu.git/blob - dump/dump.c
dump: Consolidate phdr note writes
[mirror_qemu.git] / dump / dump.c
1 /*
2 * QEMU dump
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "elf.h"
17 #include "exec/hwaddr.h"
18 #include "monitor/monitor.h"
19 #include "sysemu/kvm.h"
20 #include "sysemu/dump.h"
21 #include "sysemu/memory_mapping.h"
22 #include "sysemu/runstate.h"
23 #include "sysemu/cpus.h"
24 #include "qapi/error.h"
25 #include "qapi/qapi-commands-dump.h"
26 #include "qapi/qapi-events-dump.h"
27 #include "qapi/qmp/qerror.h"
28 #include "qemu/error-report.h"
29 #include "qemu/main-loop.h"
30 #include "hw/misc/vmcoreinfo.h"
31 #include "migration/blocker.h"
32
33 #ifdef TARGET_X86_64
34 #include "win_dump.h"
35 #endif
36
37 #include <zlib.h>
38 #ifdef CONFIG_LZO
39 #include <lzo/lzo1x.h>
40 #endif
41 #ifdef CONFIG_SNAPPY
42 #include <snappy-c.h>
43 #endif
44 #ifndef ELF_MACHINE_UNAME
45 #define ELF_MACHINE_UNAME "Unknown"
46 #endif
47
48 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
49
50 static Error *dump_migration_blocker;
51
52 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
53 ((DIV_ROUND_UP((hdr_size), 4) + \
54 DIV_ROUND_UP((name_size), 4) + \
55 DIV_ROUND_UP((desc_size), 4)) * 4)
56
57 static inline bool dump_is_64bit(DumpState *s)
58 {
59 return s->dump_info.d_class == ELFCLASS64;
60 }
61
62 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
63 {
64 if (s->dump_info.d_endian == ELFDATA2LSB) {
65 val = cpu_to_le16(val);
66 } else {
67 val = cpu_to_be16(val);
68 }
69
70 return val;
71 }
72
73 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
74 {
75 if (s->dump_info.d_endian == ELFDATA2LSB) {
76 val = cpu_to_le32(val);
77 } else {
78 val = cpu_to_be32(val);
79 }
80
81 return val;
82 }
83
84 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
85 {
86 if (s->dump_info.d_endian == ELFDATA2LSB) {
87 val = cpu_to_le64(val);
88 } else {
89 val = cpu_to_be64(val);
90 }
91
92 return val;
93 }
94
95 static int dump_cleanup(DumpState *s)
96 {
97 guest_phys_blocks_free(&s->guest_phys_blocks);
98 memory_mapping_list_free(&s->list);
99 close(s->fd);
100 g_free(s->guest_note);
101 s->guest_note = NULL;
102 if (s->resume) {
103 if (s->detached) {
104 qemu_mutex_lock_iothread();
105 }
106 vm_start();
107 if (s->detached) {
108 qemu_mutex_unlock_iothread();
109 }
110 }
111 migrate_del_blocker(dump_migration_blocker);
112
113 return 0;
114 }
115
116 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
117 {
118 DumpState *s = opaque;
119 size_t written_size;
120
121 written_size = qemu_write_full(s->fd, buf, size);
122 if (written_size != size) {
123 return -errno;
124 }
125
126 return 0;
127 }
128
129 static void write_elf64_header(DumpState *s, Error **errp)
130 {
131 /*
132 * phnum in the elf header is 16 bit, if we have more segments we
133 * set phnum to PN_XNUM and write the real number of segments to a
134 * special section.
135 */
136 uint16_t phnum = MIN(s->phdr_num, PN_XNUM);
137 Elf64_Ehdr elf_header;
138 int ret;
139
140 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
141 memcpy(&elf_header, ELFMAG, SELFMAG);
142 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
143 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
144 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
145 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
146 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
147 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
148 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
149 elf_header.e_phoff = cpu_to_dump64(s, s->phdr_offset);
150 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
151 elf_header.e_phnum = cpu_to_dump16(s, phnum);
152 if (s->shdr_num) {
153 elf_header.e_shoff = cpu_to_dump64(s, s->shdr_offset);
154 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
155 elf_header.e_shnum = cpu_to_dump16(s, s->shdr_num);
156 }
157
158 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
159 if (ret < 0) {
160 error_setg_errno(errp, -ret, "dump: failed to write elf header");
161 }
162 }
163
164 static void write_elf32_header(DumpState *s, Error **errp)
165 {
166 /*
167 * phnum in the elf header is 16 bit, if we have more segments we
168 * set phnum to PN_XNUM and write the real number of segments to a
169 * special section.
170 */
171 uint16_t phnum = MIN(s->phdr_num, PN_XNUM);
172 Elf32_Ehdr elf_header;
173 int ret;
174
175 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
176 memcpy(&elf_header, ELFMAG, SELFMAG);
177 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
178 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
179 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
180 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
181 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
182 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
183 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
184 elf_header.e_phoff = cpu_to_dump32(s, s->phdr_offset);
185 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
186 elf_header.e_phnum = cpu_to_dump16(s, phnum);
187 if (s->shdr_num) {
188 elf_header.e_shoff = cpu_to_dump32(s, s->shdr_offset);
189 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
190 elf_header.e_shnum = cpu_to_dump16(s, s->shdr_num);
191 }
192
193 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
194 if (ret < 0) {
195 error_setg_errno(errp, -ret, "dump: failed to write elf header");
196 }
197 }
198
199 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
200 int phdr_index, hwaddr offset,
201 hwaddr filesz, Error **errp)
202 {
203 Elf64_Phdr phdr;
204 int ret;
205
206 memset(&phdr, 0, sizeof(Elf64_Phdr));
207 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
208 phdr.p_offset = cpu_to_dump64(s, offset);
209 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
210 phdr.p_filesz = cpu_to_dump64(s, filesz);
211 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
212 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
213
214 assert(memory_mapping->length >= filesz);
215
216 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
217 if (ret < 0) {
218 error_setg_errno(errp, -ret,
219 "dump: failed to write program header table");
220 }
221 }
222
223 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
224 int phdr_index, hwaddr offset,
225 hwaddr filesz, Error **errp)
226 {
227 Elf32_Phdr phdr;
228 int ret;
229
230 memset(&phdr, 0, sizeof(Elf32_Phdr));
231 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
232 phdr.p_offset = cpu_to_dump32(s, offset);
233 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
234 phdr.p_filesz = cpu_to_dump32(s, filesz);
235 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
236 phdr.p_vaddr =
237 cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
238
239 assert(memory_mapping->length >= filesz);
240
241 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
242 if (ret < 0) {
243 error_setg_errno(errp, -ret,
244 "dump: failed to write program header table");
245 }
246 }
247
248 static void write_elf64_phdr_note(DumpState *s, Elf64_Phdr *phdr)
249 {
250 memset(phdr, 0, sizeof(*phdr));
251 phdr->p_type = cpu_to_dump32(s, PT_NOTE);
252 phdr->p_offset = cpu_to_dump64(s, s->note_offset);
253 phdr->p_paddr = 0;
254 phdr->p_filesz = cpu_to_dump64(s, s->note_size);
255 phdr->p_memsz = cpu_to_dump64(s, s->note_size);
256 phdr->p_vaddr = 0;
257 }
258
259 static inline int cpu_index(CPUState *cpu)
260 {
261 return cpu->cpu_index + 1;
262 }
263
264 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
265 Error **errp)
266 {
267 int ret;
268
269 if (s->guest_note) {
270 ret = f(s->guest_note, s->guest_note_size, s);
271 if (ret < 0) {
272 error_setg(errp, "dump: failed to write guest note");
273 }
274 }
275 }
276
277 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
278 Error **errp)
279 {
280 CPUState *cpu;
281 int ret;
282 int id;
283
284 CPU_FOREACH(cpu) {
285 id = cpu_index(cpu);
286 ret = cpu_write_elf64_note(f, cpu, id, s);
287 if (ret < 0) {
288 error_setg(errp, "dump: failed to write elf notes");
289 return;
290 }
291 }
292
293 CPU_FOREACH(cpu) {
294 ret = cpu_write_elf64_qemunote(f, cpu, s);
295 if (ret < 0) {
296 error_setg(errp, "dump: failed to write CPU status");
297 return;
298 }
299 }
300
301 write_guest_note(f, s, errp);
302 }
303
304 static void write_elf32_phdr_note(DumpState *s, Elf32_Phdr *phdr)
305 {
306 memset(phdr, 0, sizeof(*phdr));
307 phdr->p_type = cpu_to_dump32(s, PT_NOTE);
308 phdr->p_offset = cpu_to_dump32(s, s->note_offset);
309 phdr->p_paddr = 0;
310 phdr->p_filesz = cpu_to_dump32(s, s->note_size);
311 phdr->p_memsz = cpu_to_dump32(s, s->note_size);
312 phdr->p_vaddr = 0;
313 }
314
315 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
316 Error **errp)
317 {
318 CPUState *cpu;
319 int ret;
320 int id;
321
322 CPU_FOREACH(cpu) {
323 id = cpu_index(cpu);
324 ret = cpu_write_elf32_note(f, cpu, id, s);
325 if (ret < 0) {
326 error_setg(errp, "dump: failed to write elf notes");
327 return;
328 }
329 }
330
331 CPU_FOREACH(cpu) {
332 ret = cpu_write_elf32_qemunote(f, cpu, s);
333 if (ret < 0) {
334 error_setg(errp, "dump: failed to write CPU status");
335 return;
336 }
337 }
338
339 write_guest_note(f, s, errp);
340 }
341
342 static void write_elf_phdr_note(DumpState *s, Error **errp)
343 {
344 ERRP_GUARD();
345 Elf32_Phdr phdr32;
346 Elf64_Phdr phdr64;
347 void *phdr;
348 size_t size;
349 int ret;
350
351 if (dump_is_64bit(s)) {
352 write_elf64_phdr_note(s, &phdr64);
353 size = sizeof(phdr64);
354 phdr = &phdr64;
355 } else {
356 write_elf32_phdr_note(s, &phdr32);
357 size = sizeof(phdr32);
358 phdr = &phdr32;
359 }
360
361 ret = fd_write_vmcore(phdr, size, s);
362 if (ret < 0) {
363 error_setg_errno(errp, -ret,
364 "dump: failed to write program header table");
365 }
366 }
367
368 static void write_elf_section(DumpState *s, int type, Error **errp)
369 {
370 Elf32_Shdr shdr32;
371 Elf64_Shdr shdr64;
372 int shdr_size;
373 void *shdr;
374 int ret;
375
376 if (type == 0) {
377 shdr_size = sizeof(Elf32_Shdr);
378 memset(&shdr32, 0, shdr_size);
379 shdr32.sh_info = cpu_to_dump32(s, s->phdr_num);
380 shdr = &shdr32;
381 } else {
382 shdr_size = sizeof(Elf64_Shdr);
383 memset(&shdr64, 0, shdr_size);
384 shdr64.sh_info = cpu_to_dump32(s, s->phdr_num);
385 shdr = &shdr64;
386 }
387
388 ret = fd_write_vmcore(shdr, shdr_size, s);
389 if (ret < 0) {
390 error_setg_errno(errp, -ret,
391 "dump: failed to write section header table");
392 }
393 }
394
395 static void write_data(DumpState *s, void *buf, int length, Error **errp)
396 {
397 int ret;
398
399 ret = fd_write_vmcore(buf, length, s);
400 if (ret < 0) {
401 error_setg_errno(errp, -ret, "dump: failed to save memory");
402 } else {
403 s->written_size += length;
404 }
405 }
406
407 /* write the memory to vmcore. 1 page per I/O. */
408 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
409 int64_t size, Error **errp)
410 {
411 ERRP_GUARD();
412 int64_t i;
413
414 for (i = 0; i < size / s->dump_info.page_size; i++) {
415 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
416 s->dump_info.page_size, errp);
417 if (*errp) {
418 return;
419 }
420 }
421
422 if ((size % s->dump_info.page_size) != 0) {
423 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
424 size % s->dump_info.page_size, errp);
425 if (*errp) {
426 return;
427 }
428 }
429 }
430
431 /* get the memory's offset and size in the vmcore */
432 static void get_offset_range(hwaddr phys_addr,
433 ram_addr_t mapping_length,
434 DumpState *s,
435 hwaddr *p_offset,
436 hwaddr *p_filesz)
437 {
438 GuestPhysBlock *block;
439 hwaddr offset = s->memory_offset;
440 int64_t size_in_block, start;
441
442 /* When the memory is not stored into vmcore, offset will be -1 */
443 *p_offset = -1;
444 *p_filesz = 0;
445
446 if (s->has_filter) {
447 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
448 return;
449 }
450 }
451
452 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
453 if (s->has_filter) {
454 if (block->target_start >= s->begin + s->length ||
455 block->target_end <= s->begin) {
456 /* This block is out of the range */
457 continue;
458 }
459
460 if (s->begin <= block->target_start) {
461 start = block->target_start;
462 } else {
463 start = s->begin;
464 }
465
466 size_in_block = block->target_end - start;
467 if (s->begin + s->length < block->target_end) {
468 size_in_block -= block->target_end - (s->begin + s->length);
469 }
470 } else {
471 start = block->target_start;
472 size_in_block = block->target_end - block->target_start;
473 }
474
475 if (phys_addr >= start && phys_addr < start + size_in_block) {
476 *p_offset = phys_addr - start + offset;
477
478 /* The offset range mapped from the vmcore file must not spill over
479 * the GuestPhysBlock, clamp it. The rest of the mapping will be
480 * zero-filled in memory at load time; see
481 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
482 */
483 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
484 mapping_length :
485 size_in_block - (phys_addr - start);
486 return;
487 }
488
489 offset += size_in_block;
490 }
491 }
492
493 static void write_elf_loads(DumpState *s, Error **errp)
494 {
495 ERRP_GUARD();
496 hwaddr offset, filesz;
497 MemoryMapping *memory_mapping;
498 uint32_t phdr_index = 1;
499
500 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
501 get_offset_range(memory_mapping->phys_addr,
502 memory_mapping->length,
503 s, &offset, &filesz);
504 if (dump_is_64bit(s)) {
505 write_elf64_load(s, memory_mapping, phdr_index++, offset,
506 filesz, errp);
507 } else {
508 write_elf32_load(s, memory_mapping, phdr_index++, offset,
509 filesz, errp);
510 }
511
512 if (*errp) {
513 return;
514 }
515
516 if (phdr_index >= s->phdr_num) {
517 break;
518 }
519 }
520 }
521
522 /* write elf header, PT_NOTE and elf note to vmcore. */
523 static void dump_begin(DumpState *s, Error **errp)
524 {
525 ERRP_GUARD();
526
527 /*
528 * the vmcore's format is:
529 * --------------
530 * | elf header |
531 * --------------
532 * | PT_NOTE |
533 * --------------
534 * | PT_LOAD |
535 * --------------
536 * | ...... |
537 * --------------
538 * | PT_LOAD |
539 * --------------
540 * | sec_hdr |
541 * --------------
542 * | elf note |
543 * --------------
544 * | memory |
545 * --------------
546 *
547 * we only know where the memory is saved after we write elf note into
548 * vmcore.
549 */
550
551 /* write elf header to vmcore */
552 if (dump_is_64bit(s)) {
553 write_elf64_header(s, errp);
554 } else {
555 write_elf32_header(s, errp);
556 }
557 if (*errp) {
558 return;
559 }
560
561 /* write PT_NOTE to vmcore */
562 write_elf_phdr_note(s, errp);
563 if (*errp) {
564 return;
565 }
566
567 if (dump_is_64bit(s)) {
568 /* write all PT_LOAD to vmcore */
569 write_elf_loads(s, errp);
570 if (*errp) {
571 return;
572 }
573
574 /* write section to vmcore */
575 if (s->shdr_num) {
576 write_elf_section(s, 1, errp);
577 if (*errp) {
578 return;
579 }
580 }
581
582 /* write notes to vmcore */
583 write_elf64_notes(fd_write_vmcore, s, errp);
584 if (*errp) {
585 return;
586 }
587 } else {
588 /* write all PT_LOAD to vmcore */
589 write_elf_loads(s, errp);
590 if (*errp) {
591 return;
592 }
593
594 /* write section to vmcore */
595 if (s->shdr_num) {
596 write_elf_section(s, 0, errp);
597 if (*errp) {
598 return;
599 }
600 }
601
602 /* write notes to vmcore */
603 write_elf32_notes(fd_write_vmcore, s, errp);
604 if (*errp) {
605 return;
606 }
607 }
608 }
609
610 static int get_next_block(DumpState *s, GuestPhysBlock *block)
611 {
612 while (1) {
613 block = QTAILQ_NEXT(block, next);
614 if (!block) {
615 /* no more block */
616 return 1;
617 }
618
619 s->start = 0;
620 s->next_block = block;
621 if (s->has_filter) {
622 if (block->target_start >= s->begin + s->length ||
623 block->target_end <= s->begin) {
624 /* This block is out of the range */
625 continue;
626 }
627
628 if (s->begin > block->target_start) {
629 s->start = s->begin - block->target_start;
630 }
631 }
632
633 return 0;
634 }
635 }
636
637 /* write all memory to vmcore */
638 static void dump_iterate(DumpState *s, Error **errp)
639 {
640 ERRP_GUARD();
641 GuestPhysBlock *block;
642 int64_t size;
643
644 do {
645 block = s->next_block;
646
647 size = block->target_end - block->target_start;
648 if (s->has_filter) {
649 size -= s->start;
650 if (s->begin + s->length < block->target_end) {
651 size -= block->target_end - (s->begin + s->length);
652 }
653 }
654 write_memory(s, block, s->start, size, errp);
655 if (*errp) {
656 return;
657 }
658
659 } while (!get_next_block(s, block));
660 }
661
662 static void create_vmcore(DumpState *s, Error **errp)
663 {
664 ERRP_GUARD();
665
666 dump_begin(s, errp);
667 if (*errp) {
668 return;
669 }
670
671 dump_iterate(s, errp);
672 }
673
674 static int write_start_flat_header(int fd)
675 {
676 MakedumpfileHeader *mh;
677 int ret = 0;
678
679 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
680 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
681
682 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
683 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
684
685 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
686 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
687
688 size_t written_size;
689 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
690 if (written_size != MAX_SIZE_MDF_HEADER) {
691 ret = -1;
692 }
693
694 g_free(mh);
695 return ret;
696 }
697
698 static int write_end_flat_header(int fd)
699 {
700 MakedumpfileDataHeader mdh;
701
702 mdh.offset = END_FLAG_FLAT_HEADER;
703 mdh.buf_size = END_FLAG_FLAT_HEADER;
704
705 size_t written_size;
706 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
707 if (written_size != sizeof(mdh)) {
708 return -1;
709 }
710
711 return 0;
712 }
713
714 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
715 {
716 size_t written_size;
717 MakedumpfileDataHeader mdh;
718
719 mdh.offset = cpu_to_be64(offset);
720 mdh.buf_size = cpu_to_be64(size);
721
722 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
723 if (written_size != sizeof(mdh)) {
724 return -1;
725 }
726
727 written_size = qemu_write_full(fd, buf, size);
728 if (written_size != size) {
729 return -1;
730 }
731
732 return 0;
733 }
734
735 static int buf_write_note(const void *buf, size_t size, void *opaque)
736 {
737 DumpState *s = opaque;
738
739 /* note_buf is not enough */
740 if (s->note_buf_offset + size > s->note_size) {
741 return -1;
742 }
743
744 memcpy(s->note_buf + s->note_buf_offset, buf, size);
745
746 s->note_buf_offset += size;
747
748 return 0;
749 }
750
751 /*
752 * This function retrieves various sizes from an elf header.
753 *
754 * @note has to be a valid ELF note. The return sizes are unmodified
755 * (not padded or rounded up to be multiple of 4).
756 */
757 static void get_note_sizes(DumpState *s, const void *note,
758 uint64_t *note_head_size,
759 uint64_t *name_size,
760 uint64_t *desc_size)
761 {
762 uint64_t note_head_sz;
763 uint64_t name_sz;
764 uint64_t desc_sz;
765
766 if (dump_is_64bit(s)) {
767 const Elf64_Nhdr *hdr = note;
768 note_head_sz = sizeof(Elf64_Nhdr);
769 name_sz = tswap64(hdr->n_namesz);
770 desc_sz = tswap64(hdr->n_descsz);
771 } else {
772 const Elf32_Nhdr *hdr = note;
773 note_head_sz = sizeof(Elf32_Nhdr);
774 name_sz = tswap32(hdr->n_namesz);
775 desc_sz = tswap32(hdr->n_descsz);
776 }
777
778 if (note_head_size) {
779 *note_head_size = note_head_sz;
780 }
781 if (name_size) {
782 *name_size = name_sz;
783 }
784 if (desc_size) {
785 *desc_size = desc_sz;
786 }
787 }
788
789 static bool note_name_equal(DumpState *s,
790 const uint8_t *note, const char *name)
791 {
792 int len = strlen(name) + 1;
793 uint64_t head_size, name_size;
794
795 get_note_sizes(s, note, &head_size, &name_size, NULL);
796 head_size = ROUND_UP(head_size, 4);
797
798 return name_size == len && memcmp(note + head_size, name, len) == 0;
799 }
800
801 /* write common header, sub header and elf note to vmcore */
802 static void create_header32(DumpState *s, Error **errp)
803 {
804 ERRP_GUARD();
805 DiskDumpHeader32 *dh = NULL;
806 KdumpSubHeader32 *kh = NULL;
807 size_t size;
808 uint32_t block_size;
809 uint32_t sub_hdr_size;
810 uint32_t bitmap_blocks;
811 uint32_t status = 0;
812 uint64_t offset_note;
813
814 /* write common header, the version of kdump-compressed format is 6th */
815 size = sizeof(DiskDumpHeader32);
816 dh = g_malloc0(size);
817
818 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
819 dh->header_version = cpu_to_dump32(s, 6);
820 block_size = s->dump_info.page_size;
821 dh->block_size = cpu_to_dump32(s, block_size);
822 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
823 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
824 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
825 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
826 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
827 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
828 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
829 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
830 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
831
832 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
833 status |= DUMP_DH_COMPRESSED_ZLIB;
834 }
835 #ifdef CONFIG_LZO
836 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
837 status |= DUMP_DH_COMPRESSED_LZO;
838 }
839 #endif
840 #ifdef CONFIG_SNAPPY
841 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
842 status |= DUMP_DH_COMPRESSED_SNAPPY;
843 }
844 #endif
845 dh->status = cpu_to_dump32(s, status);
846
847 if (write_buffer(s->fd, 0, dh, size) < 0) {
848 error_setg(errp, "dump: failed to write disk dump header");
849 goto out;
850 }
851
852 /* write sub header */
853 size = sizeof(KdumpSubHeader32);
854 kh = g_malloc0(size);
855
856 /* 64bit max_mapnr_64 */
857 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
858 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
859 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
860
861 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
862 if (s->guest_note &&
863 note_name_equal(s, s->guest_note, "VMCOREINFO")) {
864 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
865
866 get_note_sizes(s, s->guest_note,
867 &hsize, &name_size, &size_vmcoreinfo_desc);
868 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
869 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
870 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
871 kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
872 }
873
874 kh->offset_note = cpu_to_dump64(s, offset_note);
875 kh->note_size = cpu_to_dump32(s, s->note_size);
876
877 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
878 block_size, kh, size) < 0) {
879 error_setg(errp, "dump: failed to write kdump sub header");
880 goto out;
881 }
882
883 /* write note */
884 s->note_buf = g_malloc0(s->note_size);
885 s->note_buf_offset = 0;
886
887 /* use s->note_buf to store notes temporarily */
888 write_elf32_notes(buf_write_note, s, errp);
889 if (*errp) {
890 goto out;
891 }
892 if (write_buffer(s->fd, offset_note, s->note_buf,
893 s->note_size) < 0) {
894 error_setg(errp, "dump: failed to write notes");
895 goto out;
896 }
897
898 /* get offset of dump_bitmap */
899 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
900 block_size;
901
902 /* get offset of page */
903 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
904 block_size;
905
906 out:
907 g_free(dh);
908 g_free(kh);
909 g_free(s->note_buf);
910 }
911
912 /* write common header, sub header and elf note to vmcore */
913 static void create_header64(DumpState *s, Error **errp)
914 {
915 ERRP_GUARD();
916 DiskDumpHeader64 *dh = NULL;
917 KdumpSubHeader64 *kh = NULL;
918 size_t size;
919 uint32_t block_size;
920 uint32_t sub_hdr_size;
921 uint32_t bitmap_blocks;
922 uint32_t status = 0;
923 uint64_t offset_note;
924
925 /* write common header, the version of kdump-compressed format is 6th */
926 size = sizeof(DiskDumpHeader64);
927 dh = g_malloc0(size);
928
929 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
930 dh->header_version = cpu_to_dump32(s, 6);
931 block_size = s->dump_info.page_size;
932 dh->block_size = cpu_to_dump32(s, block_size);
933 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
934 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
935 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
936 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
937 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
938 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
939 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
940 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
941 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
942
943 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
944 status |= DUMP_DH_COMPRESSED_ZLIB;
945 }
946 #ifdef CONFIG_LZO
947 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
948 status |= DUMP_DH_COMPRESSED_LZO;
949 }
950 #endif
951 #ifdef CONFIG_SNAPPY
952 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
953 status |= DUMP_DH_COMPRESSED_SNAPPY;
954 }
955 #endif
956 dh->status = cpu_to_dump32(s, status);
957
958 if (write_buffer(s->fd, 0, dh, size) < 0) {
959 error_setg(errp, "dump: failed to write disk dump header");
960 goto out;
961 }
962
963 /* write sub header */
964 size = sizeof(KdumpSubHeader64);
965 kh = g_malloc0(size);
966
967 /* 64bit max_mapnr_64 */
968 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
969 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
970 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
971
972 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
973 if (s->guest_note &&
974 note_name_equal(s, s->guest_note, "VMCOREINFO")) {
975 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
976
977 get_note_sizes(s, s->guest_note,
978 &hsize, &name_size, &size_vmcoreinfo_desc);
979 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
980 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
981 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
982 kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
983 }
984
985 kh->offset_note = cpu_to_dump64(s, offset_note);
986 kh->note_size = cpu_to_dump64(s, s->note_size);
987
988 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
989 block_size, kh, size) < 0) {
990 error_setg(errp, "dump: failed to write kdump sub header");
991 goto out;
992 }
993
994 /* write note */
995 s->note_buf = g_malloc0(s->note_size);
996 s->note_buf_offset = 0;
997
998 /* use s->note_buf to store notes temporarily */
999 write_elf64_notes(buf_write_note, s, errp);
1000 if (*errp) {
1001 goto out;
1002 }
1003
1004 if (write_buffer(s->fd, offset_note, s->note_buf,
1005 s->note_size) < 0) {
1006 error_setg(errp, "dump: failed to write notes");
1007 goto out;
1008 }
1009
1010 /* get offset of dump_bitmap */
1011 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
1012 block_size;
1013
1014 /* get offset of page */
1015 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
1016 block_size;
1017
1018 out:
1019 g_free(dh);
1020 g_free(kh);
1021 g_free(s->note_buf);
1022 }
1023
1024 static void write_dump_header(DumpState *s, Error **errp)
1025 {
1026 if (dump_is_64bit(s)) {
1027 create_header64(s, errp);
1028 } else {
1029 create_header32(s, errp);
1030 }
1031 }
1032
1033 static size_t dump_bitmap_get_bufsize(DumpState *s)
1034 {
1035 return s->dump_info.page_size;
1036 }
1037
1038 /*
1039 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1040 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1041 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1042 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1043 * vmcore, ie. synchronizing un-sync bit into vmcore.
1044 */
1045 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
1046 uint8_t *buf, DumpState *s)
1047 {
1048 off_t old_offset, new_offset;
1049 off_t offset_bitmap1, offset_bitmap2;
1050 uint32_t byte, bit;
1051 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1052 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1053
1054 /* should not set the previous place */
1055 assert(last_pfn <= pfn);
1056
1057 /*
1058 * if the bit needed to be set is not cached in buf, flush the data in buf
1059 * to vmcore firstly.
1060 * making new_offset be bigger than old_offset can also sync remained data
1061 * into vmcore.
1062 */
1063 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
1064 new_offset = bitmap_bufsize * (pfn / bits_per_buf);
1065
1066 while (old_offset < new_offset) {
1067 /* calculate the offset and write dump_bitmap */
1068 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
1069 if (write_buffer(s->fd, offset_bitmap1, buf,
1070 bitmap_bufsize) < 0) {
1071 return -1;
1072 }
1073
1074 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1075 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
1076 old_offset;
1077 if (write_buffer(s->fd, offset_bitmap2, buf,
1078 bitmap_bufsize) < 0) {
1079 return -1;
1080 }
1081
1082 memset(buf, 0, bitmap_bufsize);
1083 old_offset += bitmap_bufsize;
1084 }
1085
1086 /* get the exact place of the bit in the buf, and set it */
1087 byte = (pfn % bits_per_buf) / CHAR_BIT;
1088 bit = (pfn % bits_per_buf) % CHAR_BIT;
1089 if (value) {
1090 buf[byte] |= 1u << bit;
1091 } else {
1092 buf[byte] &= ~(1u << bit);
1093 }
1094
1095 return 0;
1096 }
1097
1098 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
1099 {
1100 int target_page_shift = ctz32(s->dump_info.page_size);
1101
1102 return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
1103 }
1104
1105 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
1106 {
1107 int target_page_shift = ctz32(s->dump_info.page_size);
1108
1109 return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1110 }
1111
1112 /*
1113 * exam every page and return the page frame number and the address of the page.
1114 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1115 * blocks, so block->target_start and block->target_end should be interal
1116 * multiples of the target page size.
1117 */
1118 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1119 uint8_t **bufptr, DumpState *s)
1120 {
1121 GuestPhysBlock *block = *blockptr;
1122 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1123 uint8_t *buf;
1124
1125 /* block == NULL means the start of the iteration */
1126 if (!block) {
1127 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1128 *blockptr = block;
1129 assert((block->target_start & ~target_page_mask) == 0);
1130 assert((block->target_end & ~target_page_mask) == 0);
1131 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1132 if (bufptr) {
1133 *bufptr = block->host_addr;
1134 }
1135 return true;
1136 }
1137
1138 *pfnptr = *pfnptr + 1;
1139 addr = dump_pfn_to_paddr(s, *pfnptr);
1140
1141 if ((addr >= block->target_start) &&
1142 (addr + s->dump_info.page_size <= block->target_end)) {
1143 buf = block->host_addr + (addr - block->target_start);
1144 } else {
1145 /* the next page is in the next block */
1146 block = QTAILQ_NEXT(block, next);
1147 *blockptr = block;
1148 if (!block) {
1149 return false;
1150 }
1151 assert((block->target_start & ~target_page_mask) == 0);
1152 assert((block->target_end & ~target_page_mask) == 0);
1153 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1154 buf = block->host_addr;
1155 }
1156
1157 if (bufptr) {
1158 *bufptr = buf;
1159 }
1160
1161 return true;
1162 }
1163
1164 static void write_dump_bitmap(DumpState *s, Error **errp)
1165 {
1166 int ret = 0;
1167 uint64_t last_pfn, pfn;
1168 void *dump_bitmap_buf;
1169 size_t num_dumpable;
1170 GuestPhysBlock *block_iter = NULL;
1171 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1172 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1173
1174 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1175 dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1176
1177 num_dumpable = 0;
1178 last_pfn = 0;
1179
1180 /*
1181 * exam memory page by page, and set the bit in dump_bitmap corresponded
1182 * to the existing page.
1183 */
1184 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1185 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1186 if (ret < 0) {
1187 error_setg(errp, "dump: failed to set dump_bitmap");
1188 goto out;
1189 }
1190
1191 last_pfn = pfn;
1192 num_dumpable++;
1193 }
1194
1195 /*
1196 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1197 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1198 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1199 */
1200 if (num_dumpable > 0) {
1201 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1202 dump_bitmap_buf, s);
1203 if (ret < 0) {
1204 error_setg(errp, "dump: failed to sync dump_bitmap");
1205 goto out;
1206 }
1207 }
1208
1209 /* number of dumpable pages that will be dumped later */
1210 s->num_dumpable = num_dumpable;
1211
1212 out:
1213 g_free(dump_bitmap_buf);
1214 }
1215
1216 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1217 off_t offset)
1218 {
1219 data_cache->fd = s->fd;
1220 data_cache->data_size = 0;
1221 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1222 data_cache->buf = g_malloc0(data_cache->buf_size);
1223 data_cache->offset = offset;
1224 }
1225
1226 static int write_cache(DataCache *dc, const void *buf, size_t size,
1227 bool flag_sync)
1228 {
1229 /*
1230 * dc->buf_size should not be less than size, otherwise dc will never be
1231 * enough
1232 */
1233 assert(size <= dc->buf_size);
1234
1235 /*
1236 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1237 * otherwise check if the space is enough for caching data in buf, if not,
1238 * write the data in dc->buf to dc->fd and reset dc->buf
1239 */
1240 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1241 (flag_sync && dc->data_size > 0)) {
1242 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1243 return -1;
1244 }
1245
1246 dc->offset += dc->data_size;
1247 dc->data_size = 0;
1248 }
1249
1250 if (!flag_sync) {
1251 memcpy(dc->buf + dc->data_size, buf, size);
1252 dc->data_size += size;
1253 }
1254
1255 return 0;
1256 }
1257
1258 static void free_data_cache(DataCache *data_cache)
1259 {
1260 g_free(data_cache->buf);
1261 }
1262
1263 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1264 {
1265 switch (flag_compress) {
1266 case DUMP_DH_COMPRESSED_ZLIB:
1267 return compressBound(page_size);
1268
1269 case DUMP_DH_COMPRESSED_LZO:
1270 /*
1271 * LZO will expand incompressible data by a little amount. Please check
1272 * the following URL to see the expansion calculation:
1273 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1274 */
1275 return page_size + page_size / 16 + 64 + 3;
1276
1277 #ifdef CONFIG_SNAPPY
1278 case DUMP_DH_COMPRESSED_SNAPPY:
1279 return snappy_max_compressed_length(page_size);
1280 #endif
1281 }
1282 return 0;
1283 }
1284
1285 static void write_dump_pages(DumpState *s, Error **errp)
1286 {
1287 int ret = 0;
1288 DataCache page_desc, page_data;
1289 size_t len_buf_out, size_out;
1290 #ifdef CONFIG_LZO
1291 lzo_bytep wrkmem = NULL;
1292 #endif
1293 uint8_t *buf_out = NULL;
1294 off_t offset_desc, offset_data;
1295 PageDescriptor pd, pd_zero;
1296 uint8_t *buf;
1297 GuestPhysBlock *block_iter = NULL;
1298 uint64_t pfn_iter;
1299
1300 /* get offset of page_desc and page_data in dump file */
1301 offset_desc = s->offset_page;
1302 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1303
1304 prepare_data_cache(&page_desc, s, offset_desc);
1305 prepare_data_cache(&page_data, s, offset_data);
1306
1307 /* prepare buffer to store compressed data */
1308 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1309 assert(len_buf_out != 0);
1310
1311 #ifdef CONFIG_LZO
1312 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1313 #endif
1314
1315 buf_out = g_malloc(len_buf_out);
1316
1317 /*
1318 * init zero page's page_desc and page_data, because every zero page
1319 * uses the same page_data
1320 */
1321 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1322 pd_zero.flags = cpu_to_dump32(s, 0);
1323 pd_zero.offset = cpu_to_dump64(s, offset_data);
1324 pd_zero.page_flags = cpu_to_dump64(s, 0);
1325 buf = g_malloc0(s->dump_info.page_size);
1326 ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1327 g_free(buf);
1328 if (ret < 0) {
1329 error_setg(errp, "dump: failed to write page data (zero page)");
1330 goto out;
1331 }
1332
1333 offset_data += s->dump_info.page_size;
1334
1335 /*
1336 * dump memory to vmcore page by page. zero page will all be resided in the
1337 * first page of page section
1338 */
1339 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1340 /* check zero page */
1341 if (buffer_is_zero(buf, s->dump_info.page_size)) {
1342 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1343 false);
1344 if (ret < 0) {
1345 error_setg(errp, "dump: failed to write page desc");
1346 goto out;
1347 }
1348 } else {
1349 /*
1350 * not zero page, then:
1351 * 1. compress the page
1352 * 2. write the compressed page into the cache of page_data
1353 * 3. get page desc of the compressed page and write it into the
1354 * cache of page_desc
1355 *
1356 * only one compression format will be used here, for
1357 * s->flag_compress is set. But when compression fails to work,
1358 * we fall back to save in plaintext.
1359 */
1360 size_out = len_buf_out;
1361 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1362 (compress2(buf_out, (uLongf *)&size_out, buf,
1363 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1364 (size_out < s->dump_info.page_size)) {
1365 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1366 pd.size = cpu_to_dump32(s, size_out);
1367
1368 ret = write_cache(&page_data, buf_out, size_out, false);
1369 if (ret < 0) {
1370 error_setg(errp, "dump: failed to write page data");
1371 goto out;
1372 }
1373 #ifdef CONFIG_LZO
1374 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1375 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1376 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1377 (size_out < s->dump_info.page_size)) {
1378 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1379 pd.size = cpu_to_dump32(s, size_out);
1380
1381 ret = write_cache(&page_data, buf_out, size_out, false);
1382 if (ret < 0) {
1383 error_setg(errp, "dump: failed to write page data");
1384 goto out;
1385 }
1386 #endif
1387 #ifdef CONFIG_SNAPPY
1388 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1389 (snappy_compress((char *)buf, s->dump_info.page_size,
1390 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1391 (size_out < s->dump_info.page_size)) {
1392 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1393 pd.size = cpu_to_dump32(s, size_out);
1394
1395 ret = write_cache(&page_data, buf_out, size_out, false);
1396 if (ret < 0) {
1397 error_setg(errp, "dump: failed to write page data");
1398 goto out;
1399 }
1400 #endif
1401 } else {
1402 /*
1403 * fall back to save in plaintext, size_out should be
1404 * assigned the target's page size
1405 */
1406 pd.flags = cpu_to_dump32(s, 0);
1407 size_out = s->dump_info.page_size;
1408 pd.size = cpu_to_dump32(s, size_out);
1409
1410 ret = write_cache(&page_data, buf,
1411 s->dump_info.page_size, false);
1412 if (ret < 0) {
1413 error_setg(errp, "dump: failed to write page data");
1414 goto out;
1415 }
1416 }
1417
1418 /* get and write page desc here */
1419 pd.page_flags = cpu_to_dump64(s, 0);
1420 pd.offset = cpu_to_dump64(s, offset_data);
1421 offset_data += size_out;
1422
1423 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1424 if (ret < 0) {
1425 error_setg(errp, "dump: failed to write page desc");
1426 goto out;
1427 }
1428 }
1429 s->written_size += s->dump_info.page_size;
1430 }
1431
1432 ret = write_cache(&page_desc, NULL, 0, true);
1433 if (ret < 0) {
1434 error_setg(errp, "dump: failed to sync cache for page_desc");
1435 goto out;
1436 }
1437 ret = write_cache(&page_data, NULL, 0, true);
1438 if (ret < 0) {
1439 error_setg(errp, "dump: failed to sync cache for page_data");
1440 goto out;
1441 }
1442
1443 out:
1444 free_data_cache(&page_desc);
1445 free_data_cache(&page_data);
1446
1447 #ifdef CONFIG_LZO
1448 g_free(wrkmem);
1449 #endif
1450
1451 g_free(buf_out);
1452 }
1453
1454 static void create_kdump_vmcore(DumpState *s, Error **errp)
1455 {
1456 ERRP_GUARD();
1457 int ret;
1458
1459 /*
1460 * the kdump-compressed format is:
1461 * File offset
1462 * +------------------------------------------+ 0x0
1463 * | main header (struct disk_dump_header) |
1464 * |------------------------------------------+ block 1
1465 * | sub header (struct kdump_sub_header) |
1466 * |------------------------------------------+ block 2
1467 * | 1st-dump_bitmap |
1468 * |------------------------------------------+ block 2 + X blocks
1469 * | 2nd-dump_bitmap | (aligned by block)
1470 * |------------------------------------------+ block 2 + 2 * X blocks
1471 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1472 * | page desc for pfn 1 (struct page_desc) |
1473 * | : |
1474 * |------------------------------------------| (not aligned by block)
1475 * | page data (pfn 0) |
1476 * | page data (pfn 1) |
1477 * | : |
1478 * +------------------------------------------+
1479 */
1480
1481 ret = write_start_flat_header(s->fd);
1482 if (ret < 0) {
1483 error_setg(errp, "dump: failed to write start flat header");
1484 return;
1485 }
1486
1487 write_dump_header(s, errp);
1488 if (*errp) {
1489 return;
1490 }
1491
1492 write_dump_bitmap(s, errp);
1493 if (*errp) {
1494 return;
1495 }
1496
1497 write_dump_pages(s, errp);
1498 if (*errp) {
1499 return;
1500 }
1501
1502 ret = write_end_flat_header(s->fd);
1503 if (ret < 0) {
1504 error_setg(errp, "dump: failed to write end flat header");
1505 return;
1506 }
1507 }
1508
1509 static ram_addr_t get_start_block(DumpState *s)
1510 {
1511 GuestPhysBlock *block;
1512
1513 if (!s->has_filter) {
1514 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1515 return 0;
1516 }
1517
1518 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1519 if (block->target_start >= s->begin + s->length ||
1520 block->target_end <= s->begin) {
1521 /* This block is out of the range */
1522 continue;
1523 }
1524
1525 s->next_block = block;
1526 if (s->begin > block->target_start) {
1527 s->start = s->begin - block->target_start;
1528 } else {
1529 s->start = 0;
1530 }
1531 return s->start;
1532 }
1533
1534 return -1;
1535 }
1536
1537 static void get_max_mapnr(DumpState *s)
1538 {
1539 GuestPhysBlock *last_block;
1540
1541 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head);
1542 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1543 }
1544
1545 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1546
1547 static void dump_state_prepare(DumpState *s)
1548 {
1549 /* zero the struct, setting status to active */
1550 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1551 }
1552
1553 bool qemu_system_dump_in_progress(void)
1554 {
1555 DumpState *state = &dump_state_global;
1556 return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1557 }
1558
1559 /* calculate total size of memory to be dumped (taking filter into
1560 * acoount.) */
1561 static int64_t dump_calculate_size(DumpState *s)
1562 {
1563 GuestPhysBlock *block;
1564 int64_t size = 0, total = 0, left = 0, right = 0;
1565
1566 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1567 if (s->has_filter) {
1568 /* calculate the overlapped region. */
1569 left = MAX(s->begin, block->target_start);
1570 right = MIN(s->begin + s->length, block->target_end);
1571 size = right - left;
1572 size = size > 0 ? size : 0;
1573 } else {
1574 /* count the whole region in */
1575 size = (block->target_end - block->target_start);
1576 }
1577 total += size;
1578 }
1579
1580 return total;
1581 }
1582
1583 static void vmcoreinfo_update_phys_base(DumpState *s)
1584 {
1585 uint64_t size, note_head_size, name_size, phys_base;
1586 char **lines;
1587 uint8_t *vmci;
1588 size_t i;
1589
1590 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
1591 return;
1592 }
1593
1594 get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
1595 note_head_size = ROUND_UP(note_head_size, 4);
1596
1597 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
1598 *(vmci + size) = '\0';
1599
1600 lines = g_strsplit((char *)vmci, "\n", -1);
1601 for (i = 0; lines[i]; i++) {
1602 const char *prefix = NULL;
1603
1604 if (s->dump_info.d_machine == EM_X86_64) {
1605 prefix = "NUMBER(phys_base)=";
1606 } else if (s->dump_info.d_machine == EM_AARCH64) {
1607 prefix = "NUMBER(PHYS_OFFSET)=";
1608 }
1609
1610 if (prefix && g_str_has_prefix(lines[i], prefix)) {
1611 if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16,
1612 &phys_base) < 0) {
1613 warn_report("Failed to read %s", prefix);
1614 } else {
1615 s->dump_info.phys_base = phys_base;
1616 }
1617 break;
1618 }
1619 }
1620
1621 g_strfreev(lines);
1622 }
1623
1624 static void dump_init(DumpState *s, int fd, bool has_format,
1625 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1626 int64_t begin, int64_t length, Error **errp)
1627 {
1628 ERRP_GUARD();
1629 VMCoreInfoState *vmci = vmcoreinfo_find();
1630 CPUState *cpu;
1631 int nr_cpus;
1632 int ret;
1633
1634 s->has_format = has_format;
1635 s->format = format;
1636 s->written_size = 0;
1637
1638 /* kdump-compressed is conflict with paging and filter */
1639 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1640 assert(!paging && !has_filter);
1641 }
1642
1643 if (runstate_is_running()) {
1644 vm_stop(RUN_STATE_SAVE_VM);
1645 s->resume = true;
1646 } else {
1647 s->resume = false;
1648 }
1649
1650 /* If we use KVM, we should synchronize the registers before we get dump
1651 * info or physmap info.
1652 */
1653 cpu_synchronize_all_states();
1654 nr_cpus = 0;
1655 CPU_FOREACH(cpu) {
1656 nr_cpus++;
1657 }
1658
1659 s->fd = fd;
1660 s->has_filter = has_filter;
1661 s->begin = begin;
1662 s->length = length;
1663
1664 memory_mapping_list_init(&s->list);
1665
1666 guest_phys_blocks_init(&s->guest_phys_blocks);
1667 guest_phys_blocks_append(&s->guest_phys_blocks);
1668 s->total_size = dump_calculate_size(s);
1669 #ifdef DEBUG_DUMP_GUEST_MEMORY
1670 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1671 #endif
1672
1673 /* it does not make sense to dump non-existent memory */
1674 if (!s->total_size) {
1675 error_setg(errp, "dump: no guest memory to dump");
1676 goto cleanup;
1677 }
1678
1679 s->start = get_start_block(s);
1680 if (s->start == -1) {
1681 error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1682 goto cleanup;
1683 }
1684
1685 /* get dump info: endian, class and architecture.
1686 * If the target architecture is not supported, cpu_get_dump_info() will
1687 * return -1.
1688 */
1689 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1690 if (ret < 0) {
1691 error_setg(errp, QERR_UNSUPPORTED);
1692 goto cleanup;
1693 }
1694
1695 if (!s->dump_info.page_size) {
1696 s->dump_info.page_size = TARGET_PAGE_SIZE;
1697 }
1698
1699 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1700 s->dump_info.d_machine, nr_cpus);
1701 if (s->note_size < 0) {
1702 error_setg(errp, QERR_UNSUPPORTED);
1703 goto cleanup;
1704 }
1705
1706 /*
1707 * The goal of this block is to (a) update the previously guessed
1708 * phys_base, (b) copy the guest note out of the guest.
1709 * Failure to do so is not fatal for dumping.
1710 */
1711 if (vmci) {
1712 uint64_t addr, note_head_size, name_size, desc_size;
1713 uint32_t size;
1714 uint16_t format;
1715
1716 note_head_size = dump_is_64bit(s) ?
1717 sizeof(Elf64_Nhdr) : sizeof(Elf32_Nhdr);
1718
1719 format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
1720 size = le32_to_cpu(vmci->vmcoreinfo.size);
1721 addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
1722 if (!vmci->has_vmcoreinfo) {
1723 warn_report("guest note is not present");
1724 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
1725 warn_report("guest note size is invalid: %" PRIu32, size);
1726 } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) {
1727 warn_report("guest note format is unsupported: %" PRIu16, format);
1728 } else {
1729 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
1730 cpu_physical_memory_read(addr, s->guest_note, size);
1731
1732 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
1733 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
1734 desc_size);
1735 if (name_size > MAX_GUEST_NOTE_SIZE ||
1736 desc_size > MAX_GUEST_NOTE_SIZE ||
1737 s->guest_note_size > size) {
1738 warn_report("Invalid guest note header");
1739 g_free(s->guest_note);
1740 s->guest_note = NULL;
1741 } else {
1742 vmcoreinfo_update_phys_base(s);
1743 s->note_size += s->guest_note_size;
1744 }
1745 }
1746 }
1747
1748 /* get memory mapping */
1749 if (paging) {
1750 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp);
1751 if (*errp) {
1752 goto cleanup;
1753 }
1754 } else {
1755 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1756 }
1757
1758 s->nr_cpus = nr_cpus;
1759
1760 get_max_mapnr(s);
1761
1762 uint64_t tmp;
1763 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1764 s->dump_info.page_size);
1765 s->len_dump_bitmap = tmp * s->dump_info.page_size;
1766
1767 /* init for kdump-compressed format */
1768 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1769 switch (format) {
1770 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1771 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1772 break;
1773
1774 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1775 #ifdef CONFIG_LZO
1776 if (lzo_init() != LZO_E_OK) {
1777 error_setg(errp, "failed to initialize the LZO library");
1778 goto cleanup;
1779 }
1780 #endif
1781 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1782 break;
1783
1784 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1785 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1786 break;
1787
1788 default:
1789 s->flag_compress = 0;
1790 }
1791
1792 return;
1793 }
1794
1795 if (s->has_filter) {
1796 memory_mapping_filter(&s->list, s->begin, s->length);
1797 }
1798
1799 /*
1800 * calculate phdr_num
1801 *
1802 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1803 */
1804 s->phdr_num = 1; /* PT_NOTE */
1805 if (s->list.num < UINT16_MAX - 2) {
1806 s->shdr_num = 0;
1807 s->phdr_num += s->list.num;
1808 } else {
1809 /* sh_info of section 0 holds the real number of phdrs */
1810 s->shdr_num = 1;
1811
1812 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1813 if (s->list.num <= UINT32_MAX - 1) {
1814 s->phdr_num += s->list.num;
1815 } else {
1816 s->phdr_num = UINT32_MAX;
1817 }
1818 }
1819
1820 if (dump_is_64bit(s)) {
1821 s->phdr_offset = sizeof(Elf64_Ehdr);
1822 s->shdr_offset = s->phdr_offset + sizeof(Elf64_Phdr) * s->phdr_num;
1823 s->note_offset = s->shdr_offset + sizeof(Elf64_Shdr) * s->shdr_num;
1824 s->memory_offset = s->note_offset + s->note_size;
1825 } else {
1826
1827 s->phdr_offset = sizeof(Elf32_Ehdr);
1828 s->shdr_offset = s->phdr_offset + sizeof(Elf32_Phdr) * s->phdr_num;
1829 s->note_offset = s->shdr_offset + sizeof(Elf32_Shdr) * s->shdr_num;
1830 s->memory_offset = s->note_offset + s->note_size;
1831 }
1832
1833 return;
1834
1835 cleanup:
1836 dump_cleanup(s);
1837 }
1838
1839 /* this operation might be time consuming. */
1840 static void dump_process(DumpState *s, Error **errp)
1841 {
1842 ERRP_GUARD();
1843 DumpQueryResult *result = NULL;
1844
1845 if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1846 #ifdef TARGET_X86_64
1847 create_win_dump(s, errp);
1848 #endif
1849 } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1850 create_kdump_vmcore(s, errp);
1851 } else {
1852 create_vmcore(s, errp);
1853 }
1854
1855 /* make sure status is written after written_size updates */
1856 smp_wmb();
1857 qatomic_set(&s->status,
1858 (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1859
1860 /* send DUMP_COMPLETED message (unconditionally) */
1861 result = qmp_query_dump(NULL);
1862 /* should never fail */
1863 assert(result);
1864 qapi_event_send_dump_completed(result, !!*errp, (*errp ?
1865 error_get_pretty(*errp) : NULL));
1866 qapi_free_DumpQueryResult(result);
1867
1868 dump_cleanup(s);
1869 }
1870
1871 static void *dump_thread(void *data)
1872 {
1873 DumpState *s = (DumpState *)data;
1874 dump_process(s, NULL);
1875 return NULL;
1876 }
1877
1878 DumpQueryResult *qmp_query_dump(Error **errp)
1879 {
1880 DumpQueryResult *result = g_new(DumpQueryResult, 1);
1881 DumpState *state = &dump_state_global;
1882 result->status = qatomic_read(&state->status);
1883 /* make sure we are reading status and written_size in order */
1884 smp_rmb();
1885 result->completed = state->written_size;
1886 result->total = state->total_size;
1887 return result;
1888 }
1889
1890 void qmp_dump_guest_memory(bool paging, const char *file,
1891 bool has_detach, bool detach,
1892 bool has_begin, int64_t begin, bool has_length,
1893 int64_t length, bool has_format,
1894 DumpGuestMemoryFormat format, Error **errp)
1895 {
1896 ERRP_GUARD();
1897 const char *p;
1898 int fd = -1;
1899 DumpState *s;
1900 bool detach_p = false;
1901
1902 if (runstate_check(RUN_STATE_INMIGRATE)) {
1903 error_setg(errp, "Dump not allowed during incoming migration.");
1904 return;
1905 }
1906
1907 /* if there is a dump in background, we should wait until the dump
1908 * finished */
1909 if (qemu_system_dump_in_progress()) {
1910 error_setg(errp, "There is a dump in process, please wait.");
1911 return;
1912 }
1913
1914 /*
1915 * kdump-compressed format need the whole memory dumped, so paging or
1916 * filter is not supported here.
1917 */
1918 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1919 (paging || has_begin || has_length)) {
1920 error_setg(errp, "kdump-compressed format doesn't support paging or "
1921 "filter");
1922 return;
1923 }
1924 if (has_begin && !has_length) {
1925 error_setg(errp, QERR_MISSING_PARAMETER, "length");
1926 return;
1927 }
1928 if (!has_begin && has_length) {
1929 error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1930 return;
1931 }
1932 if (has_detach) {
1933 detach_p = detach;
1934 }
1935
1936 /* check whether lzo/snappy is supported */
1937 #ifndef CONFIG_LZO
1938 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1939 error_setg(errp, "kdump-lzo is not available now");
1940 return;
1941 }
1942 #endif
1943
1944 #ifndef CONFIG_SNAPPY
1945 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1946 error_setg(errp, "kdump-snappy is not available now");
1947 return;
1948 }
1949 #endif
1950
1951 #ifndef TARGET_X86_64
1952 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1953 error_setg(errp, "Windows dump is only available for x86-64");
1954 return;
1955 }
1956 #endif
1957
1958 #if !defined(WIN32)
1959 if (strstart(file, "fd:", &p)) {
1960 fd = monitor_get_fd(monitor_cur(), p, errp);
1961 if (fd == -1) {
1962 return;
1963 }
1964 }
1965 #endif
1966
1967 if (strstart(file, "file:", &p)) {
1968 fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1969 if (fd < 0) {
1970 error_setg_file_open(errp, errno, p);
1971 return;
1972 }
1973 }
1974
1975 if (fd == -1) {
1976 error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
1977 return;
1978 }
1979
1980 if (!dump_migration_blocker) {
1981 error_setg(&dump_migration_blocker,
1982 "Live migration disabled: dump-guest-memory in progress");
1983 }
1984
1985 /*
1986 * Allows even for -only-migratable, but forbid migration during the
1987 * process of dump guest memory.
1988 */
1989 if (migrate_add_blocker_internal(dump_migration_blocker, errp)) {
1990 /* Remember to release the fd before passing it over to dump state */
1991 close(fd);
1992 return;
1993 }
1994
1995 s = &dump_state_global;
1996 dump_state_prepare(s);
1997
1998 dump_init(s, fd, has_format, format, paging, has_begin,
1999 begin, length, errp);
2000 if (*errp) {
2001 qatomic_set(&s->status, DUMP_STATUS_FAILED);
2002 return;
2003 }
2004
2005 if (detach_p) {
2006 /* detached dump */
2007 s->detached = true;
2008 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
2009 s, QEMU_THREAD_DETACHED);
2010 } else {
2011 /* sync dump */
2012 dump_process(s, errp);
2013 }
2014 }
2015
2016 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
2017 {
2018 DumpGuestMemoryCapability *cap =
2019 g_new0(DumpGuestMemoryCapability, 1);
2020 DumpGuestMemoryFormatList **tail = &cap->formats;
2021
2022 /* elf is always available */
2023 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF);
2024
2025 /* kdump-zlib is always available */
2026 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB);
2027
2028 /* add new item if kdump-lzo is available */
2029 #ifdef CONFIG_LZO
2030 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO);
2031 #endif
2032
2033 /* add new item if kdump-snappy is available */
2034 #ifdef CONFIG_SNAPPY
2035 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY);
2036 #endif
2037
2038 /* Windows dump is available only if target is x86_64 */
2039 #ifdef TARGET_X86_64
2040 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP);
2041 #endif
2042
2043 return cap;
2044 }