]> git.proxmox.com Git - mirror_qemu.git/blob - dump/dump.c
dump: Add more offset variables
[mirror_qemu.git] / dump / dump.c
1 /*
2 * QEMU dump
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "elf.h"
17 #include "exec/hwaddr.h"
18 #include "monitor/monitor.h"
19 #include "sysemu/kvm.h"
20 #include "sysemu/dump.h"
21 #include "sysemu/memory_mapping.h"
22 #include "sysemu/runstate.h"
23 #include "sysemu/cpus.h"
24 #include "qapi/error.h"
25 #include "qapi/qapi-commands-dump.h"
26 #include "qapi/qapi-events-dump.h"
27 #include "qapi/qmp/qerror.h"
28 #include "qemu/error-report.h"
29 #include "qemu/main-loop.h"
30 #include "hw/misc/vmcoreinfo.h"
31 #include "migration/blocker.h"
32
33 #ifdef TARGET_X86_64
34 #include "win_dump.h"
35 #endif
36
37 #include <zlib.h>
38 #ifdef CONFIG_LZO
39 #include <lzo/lzo1x.h>
40 #endif
41 #ifdef CONFIG_SNAPPY
42 #include <snappy-c.h>
43 #endif
44 #ifndef ELF_MACHINE_UNAME
45 #define ELF_MACHINE_UNAME "Unknown"
46 #endif
47
48 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
49
50 static Error *dump_migration_blocker;
51
52 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
53 ((DIV_ROUND_UP((hdr_size), 4) + \
54 DIV_ROUND_UP((name_size), 4) + \
55 DIV_ROUND_UP((desc_size), 4)) * 4)
56
57 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
58 {
59 if (s->dump_info.d_endian == ELFDATA2LSB) {
60 val = cpu_to_le16(val);
61 } else {
62 val = cpu_to_be16(val);
63 }
64
65 return val;
66 }
67
68 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
69 {
70 if (s->dump_info.d_endian == ELFDATA2LSB) {
71 val = cpu_to_le32(val);
72 } else {
73 val = cpu_to_be32(val);
74 }
75
76 return val;
77 }
78
79 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
80 {
81 if (s->dump_info.d_endian == ELFDATA2LSB) {
82 val = cpu_to_le64(val);
83 } else {
84 val = cpu_to_be64(val);
85 }
86
87 return val;
88 }
89
90 static int dump_cleanup(DumpState *s)
91 {
92 guest_phys_blocks_free(&s->guest_phys_blocks);
93 memory_mapping_list_free(&s->list);
94 close(s->fd);
95 g_free(s->guest_note);
96 s->guest_note = NULL;
97 if (s->resume) {
98 if (s->detached) {
99 qemu_mutex_lock_iothread();
100 }
101 vm_start();
102 if (s->detached) {
103 qemu_mutex_unlock_iothread();
104 }
105 }
106 migrate_del_blocker(dump_migration_blocker);
107
108 return 0;
109 }
110
111 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
112 {
113 DumpState *s = opaque;
114 size_t written_size;
115
116 written_size = qemu_write_full(s->fd, buf, size);
117 if (written_size != size) {
118 return -errno;
119 }
120
121 return 0;
122 }
123
124 static void write_elf64_header(DumpState *s, Error **errp)
125 {
126 /*
127 * phnum in the elf header is 16 bit, if we have more segments we
128 * set phnum to PN_XNUM and write the real number of segments to a
129 * special section.
130 */
131 uint16_t phnum = MIN(s->phdr_num, PN_XNUM);
132 Elf64_Ehdr elf_header;
133 int ret;
134
135 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
136 memcpy(&elf_header, ELFMAG, SELFMAG);
137 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
138 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
139 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
140 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
141 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
142 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
143 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
144 elf_header.e_phoff = cpu_to_dump64(s, s->phdr_offset);
145 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
146 elf_header.e_phnum = cpu_to_dump16(s, phnum);
147 if (s->shdr_num) {
148 elf_header.e_shoff = cpu_to_dump64(s, s->shdr_offset);
149 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
150 elf_header.e_shnum = cpu_to_dump16(s, s->shdr_num);
151 }
152
153 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
154 if (ret < 0) {
155 error_setg_errno(errp, -ret, "dump: failed to write elf header");
156 }
157 }
158
159 static void write_elf32_header(DumpState *s, Error **errp)
160 {
161 /*
162 * phnum in the elf header is 16 bit, if we have more segments we
163 * set phnum to PN_XNUM and write the real number of segments to a
164 * special section.
165 */
166 uint16_t phnum = MIN(s->phdr_num, PN_XNUM);
167 Elf32_Ehdr elf_header;
168 int ret;
169
170 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
171 memcpy(&elf_header, ELFMAG, SELFMAG);
172 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
173 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
174 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
175 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
176 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
177 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
178 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
179 elf_header.e_phoff = cpu_to_dump32(s, s->phdr_offset);
180 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
181 elf_header.e_phnum = cpu_to_dump16(s, phnum);
182 if (s->shdr_num) {
183 elf_header.e_shoff = cpu_to_dump32(s, s->shdr_offset);
184 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
185 elf_header.e_shnum = cpu_to_dump16(s, s->shdr_num);
186 }
187
188 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
189 if (ret < 0) {
190 error_setg_errno(errp, -ret, "dump: failed to write elf header");
191 }
192 }
193
194 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
195 int phdr_index, hwaddr offset,
196 hwaddr filesz, Error **errp)
197 {
198 Elf64_Phdr phdr;
199 int ret;
200
201 memset(&phdr, 0, sizeof(Elf64_Phdr));
202 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
203 phdr.p_offset = cpu_to_dump64(s, offset);
204 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
205 phdr.p_filesz = cpu_to_dump64(s, filesz);
206 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
207 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
208
209 assert(memory_mapping->length >= filesz);
210
211 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
212 if (ret < 0) {
213 error_setg_errno(errp, -ret,
214 "dump: failed to write program header table");
215 }
216 }
217
218 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
219 int phdr_index, hwaddr offset,
220 hwaddr filesz, Error **errp)
221 {
222 Elf32_Phdr phdr;
223 int ret;
224
225 memset(&phdr, 0, sizeof(Elf32_Phdr));
226 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
227 phdr.p_offset = cpu_to_dump32(s, offset);
228 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
229 phdr.p_filesz = cpu_to_dump32(s, filesz);
230 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
231 phdr.p_vaddr =
232 cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
233
234 assert(memory_mapping->length >= filesz);
235
236 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
237 if (ret < 0) {
238 error_setg_errno(errp, -ret,
239 "dump: failed to write program header table");
240 }
241 }
242
243 static void write_elf64_note(DumpState *s, Error **errp)
244 {
245 Elf64_Phdr phdr;
246 int ret;
247
248 memset(&phdr, 0, sizeof(Elf64_Phdr));
249 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
250 phdr.p_offset = cpu_to_dump64(s, s->note_offset);
251 phdr.p_paddr = 0;
252 phdr.p_filesz = cpu_to_dump64(s, s->note_size);
253 phdr.p_memsz = cpu_to_dump64(s, s->note_size);
254 phdr.p_vaddr = 0;
255
256 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
257 if (ret < 0) {
258 error_setg_errno(errp, -ret,
259 "dump: failed to write program header table");
260 }
261 }
262
263 static inline int cpu_index(CPUState *cpu)
264 {
265 return cpu->cpu_index + 1;
266 }
267
268 static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
269 Error **errp)
270 {
271 int ret;
272
273 if (s->guest_note) {
274 ret = f(s->guest_note, s->guest_note_size, s);
275 if (ret < 0) {
276 error_setg(errp, "dump: failed to write guest note");
277 }
278 }
279 }
280
281 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
282 Error **errp)
283 {
284 CPUState *cpu;
285 int ret;
286 int id;
287
288 CPU_FOREACH(cpu) {
289 id = cpu_index(cpu);
290 ret = cpu_write_elf64_note(f, cpu, id, s);
291 if (ret < 0) {
292 error_setg(errp, "dump: failed to write elf notes");
293 return;
294 }
295 }
296
297 CPU_FOREACH(cpu) {
298 ret = cpu_write_elf64_qemunote(f, cpu, s);
299 if (ret < 0) {
300 error_setg(errp, "dump: failed to write CPU status");
301 return;
302 }
303 }
304
305 write_guest_note(f, s, errp);
306 }
307
308 static void write_elf32_note(DumpState *s, Error **errp)
309 {
310 Elf32_Phdr phdr;
311 int ret;
312
313 memset(&phdr, 0, sizeof(Elf32_Phdr));
314 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
315 phdr.p_offset = cpu_to_dump32(s, s->note_offset);
316 phdr.p_paddr = 0;
317 phdr.p_filesz = cpu_to_dump32(s, s->note_size);
318 phdr.p_memsz = cpu_to_dump32(s, s->note_size);
319 phdr.p_vaddr = 0;
320
321 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
322 if (ret < 0) {
323 error_setg_errno(errp, -ret,
324 "dump: failed to write program header table");
325 }
326 }
327
328 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
329 Error **errp)
330 {
331 CPUState *cpu;
332 int ret;
333 int id;
334
335 CPU_FOREACH(cpu) {
336 id = cpu_index(cpu);
337 ret = cpu_write_elf32_note(f, cpu, id, s);
338 if (ret < 0) {
339 error_setg(errp, "dump: failed to write elf notes");
340 return;
341 }
342 }
343
344 CPU_FOREACH(cpu) {
345 ret = cpu_write_elf32_qemunote(f, cpu, s);
346 if (ret < 0) {
347 error_setg(errp, "dump: failed to write CPU status");
348 return;
349 }
350 }
351
352 write_guest_note(f, s, errp);
353 }
354
355 static void write_elf_section(DumpState *s, int type, Error **errp)
356 {
357 Elf32_Shdr shdr32;
358 Elf64_Shdr shdr64;
359 int shdr_size;
360 void *shdr;
361 int ret;
362
363 if (type == 0) {
364 shdr_size = sizeof(Elf32_Shdr);
365 memset(&shdr32, 0, shdr_size);
366 shdr32.sh_info = cpu_to_dump32(s, s->phdr_num);
367 shdr = &shdr32;
368 } else {
369 shdr_size = sizeof(Elf64_Shdr);
370 memset(&shdr64, 0, shdr_size);
371 shdr64.sh_info = cpu_to_dump32(s, s->phdr_num);
372 shdr = &shdr64;
373 }
374
375 ret = fd_write_vmcore(shdr, shdr_size, s);
376 if (ret < 0) {
377 error_setg_errno(errp, -ret,
378 "dump: failed to write section header table");
379 }
380 }
381
382 static void write_data(DumpState *s, void *buf, int length, Error **errp)
383 {
384 int ret;
385
386 ret = fd_write_vmcore(buf, length, s);
387 if (ret < 0) {
388 error_setg_errno(errp, -ret, "dump: failed to save memory");
389 } else {
390 s->written_size += length;
391 }
392 }
393
394 /* write the memory to vmcore. 1 page per I/O. */
395 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
396 int64_t size, Error **errp)
397 {
398 ERRP_GUARD();
399 int64_t i;
400
401 for (i = 0; i < size / s->dump_info.page_size; i++) {
402 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
403 s->dump_info.page_size, errp);
404 if (*errp) {
405 return;
406 }
407 }
408
409 if ((size % s->dump_info.page_size) != 0) {
410 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
411 size % s->dump_info.page_size, errp);
412 if (*errp) {
413 return;
414 }
415 }
416 }
417
418 /* get the memory's offset and size in the vmcore */
419 static void get_offset_range(hwaddr phys_addr,
420 ram_addr_t mapping_length,
421 DumpState *s,
422 hwaddr *p_offset,
423 hwaddr *p_filesz)
424 {
425 GuestPhysBlock *block;
426 hwaddr offset = s->memory_offset;
427 int64_t size_in_block, start;
428
429 /* When the memory is not stored into vmcore, offset will be -1 */
430 *p_offset = -1;
431 *p_filesz = 0;
432
433 if (s->has_filter) {
434 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
435 return;
436 }
437 }
438
439 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
440 if (s->has_filter) {
441 if (block->target_start >= s->begin + s->length ||
442 block->target_end <= s->begin) {
443 /* This block is out of the range */
444 continue;
445 }
446
447 if (s->begin <= block->target_start) {
448 start = block->target_start;
449 } else {
450 start = s->begin;
451 }
452
453 size_in_block = block->target_end - start;
454 if (s->begin + s->length < block->target_end) {
455 size_in_block -= block->target_end - (s->begin + s->length);
456 }
457 } else {
458 start = block->target_start;
459 size_in_block = block->target_end - block->target_start;
460 }
461
462 if (phys_addr >= start && phys_addr < start + size_in_block) {
463 *p_offset = phys_addr - start + offset;
464
465 /* The offset range mapped from the vmcore file must not spill over
466 * the GuestPhysBlock, clamp it. The rest of the mapping will be
467 * zero-filled in memory at load time; see
468 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
469 */
470 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
471 mapping_length :
472 size_in_block - (phys_addr - start);
473 return;
474 }
475
476 offset += size_in_block;
477 }
478 }
479
480 static void write_elf_loads(DumpState *s, Error **errp)
481 {
482 ERRP_GUARD();
483 hwaddr offset, filesz;
484 MemoryMapping *memory_mapping;
485 uint32_t phdr_index = 1;
486
487 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
488 get_offset_range(memory_mapping->phys_addr,
489 memory_mapping->length,
490 s, &offset, &filesz);
491 if (s->dump_info.d_class == ELFCLASS64) {
492 write_elf64_load(s, memory_mapping, phdr_index++, offset,
493 filesz, errp);
494 } else {
495 write_elf32_load(s, memory_mapping, phdr_index++, offset,
496 filesz, errp);
497 }
498
499 if (*errp) {
500 return;
501 }
502
503 if (phdr_index >= s->phdr_num) {
504 break;
505 }
506 }
507 }
508
509 /* write elf header, PT_NOTE and elf note to vmcore. */
510 static void dump_begin(DumpState *s, Error **errp)
511 {
512 ERRP_GUARD();
513
514 /*
515 * the vmcore's format is:
516 * --------------
517 * | elf header |
518 * --------------
519 * | PT_NOTE |
520 * --------------
521 * | PT_LOAD |
522 * --------------
523 * | ...... |
524 * --------------
525 * | PT_LOAD |
526 * --------------
527 * | sec_hdr |
528 * --------------
529 * | elf note |
530 * --------------
531 * | memory |
532 * --------------
533 *
534 * we only know where the memory is saved after we write elf note into
535 * vmcore.
536 */
537
538 /* write elf header to vmcore */
539 if (s->dump_info.d_class == ELFCLASS64) {
540 write_elf64_header(s, errp);
541 } else {
542 write_elf32_header(s, errp);
543 }
544 if (*errp) {
545 return;
546 }
547
548 if (s->dump_info.d_class == ELFCLASS64) {
549 /* write PT_NOTE to vmcore */
550 write_elf64_note(s, errp);
551 if (*errp) {
552 return;
553 }
554
555 /* write all PT_LOAD to vmcore */
556 write_elf_loads(s, errp);
557 if (*errp) {
558 return;
559 }
560
561 /* write section to vmcore */
562 if (s->shdr_num) {
563 write_elf_section(s, 1, errp);
564 if (*errp) {
565 return;
566 }
567 }
568
569 /* write notes to vmcore */
570 write_elf64_notes(fd_write_vmcore, s, errp);
571 if (*errp) {
572 return;
573 }
574 } else {
575 /* write PT_NOTE to vmcore */
576 write_elf32_note(s, errp);
577 if (*errp) {
578 return;
579 }
580
581 /* write all PT_LOAD to vmcore */
582 write_elf_loads(s, errp);
583 if (*errp) {
584 return;
585 }
586
587 /* write section to vmcore */
588 if (s->shdr_num) {
589 write_elf_section(s, 0, errp);
590 if (*errp) {
591 return;
592 }
593 }
594
595 /* write notes to vmcore */
596 write_elf32_notes(fd_write_vmcore, s, errp);
597 if (*errp) {
598 return;
599 }
600 }
601 }
602
603 static int get_next_block(DumpState *s, GuestPhysBlock *block)
604 {
605 while (1) {
606 block = QTAILQ_NEXT(block, next);
607 if (!block) {
608 /* no more block */
609 return 1;
610 }
611
612 s->start = 0;
613 s->next_block = block;
614 if (s->has_filter) {
615 if (block->target_start >= s->begin + s->length ||
616 block->target_end <= s->begin) {
617 /* This block is out of the range */
618 continue;
619 }
620
621 if (s->begin > block->target_start) {
622 s->start = s->begin - block->target_start;
623 }
624 }
625
626 return 0;
627 }
628 }
629
630 /* write all memory to vmcore */
631 static void dump_iterate(DumpState *s, Error **errp)
632 {
633 ERRP_GUARD();
634 GuestPhysBlock *block;
635 int64_t size;
636
637 do {
638 block = s->next_block;
639
640 size = block->target_end - block->target_start;
641 if (s->has_filter) {
642 size -= s->start;
643 if (s->begin + s->length < block->target_end) {
644 size -= block->target_end - (s->begin + s->length);
645 }
646 }
647 write_memory(s, block, s->start, size, errp);
648 if (*errp) {
649 return;
650 }
651
652 } while (!get_next_block(s, block));
653 }
654
655 static void create_vmcore(DumpState *s, Error **errp)
656 {
657 ERRP_GUARD();
658
659 dump_begin(s, errp);
660 if (*errp) {
661 return;
662 }
663
664 dump_iterate(s, errp);
665 }
666
667 static int write_start_flat_header(int fd)
668 {
669 MakedumpfileHeader *mh;
670 int ret = 0;
671
672 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
673 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
674
675 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
676 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
677
678 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
679 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
680
681 size_t written_size;
682 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
683 if (written_size != MAX_SIZE_MDF_HEADER) {
684 ret = -1;
685 }
686
687 g_free(mh);
688 return ret;
689 }
690
691 static int write_end_flat_header(int fd)
692 {
693 MakedumpfileDataHeader mdh;
694
695 mdh.offset = END_FLAG_FLAT_HEADER;
696 mdh.buf_size = END_FLAG_FLAT_HEADER;
697
698 size_t written_size;
699 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
700 if (written_size != sizeof(mdh)) {
701 return -1;
702 }
703
704 return 0;
705 }
706
707 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
708 {
709 size_t written_size;
710 MakedumpfileDataHeader mdh;
711
712 mdh.offset = cpu_to_be64(offset);
713 mdh.buf_size = cpu_to_be64(size);
714
715 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
716 if (written_size != sizeof(mdh)) {
717 return -1;
718 }
719
720 written_size = qemu_write_full(fd, buf, size);
721 if (written_size != size) {
722 return -1;
723 }
724
725 return 0;
726 }
727
728 static int buf_write_note(const void *buf, size_t size, void *opaque)
729 {
730 DumpState *s = opaque;
731
732 /* note_buf is not enough */
733 if (s->note_buf_offset + size > s->note_size) {
734 return -1;
735 }
736
737 memcpy(s->note_buf + s->note_buf_offset, buf, size);
738
739 s->note_buf_offset += size;
740
741 return 0;
742 }
743
744 /*
745 * This function retrieves various sizes from an elf header.
746 *
747 * @note has to be a valid ELF note. The return sizes are unmodified
748 * (not padded or rounded up to be multiple of 4).
749 */
750 static void get_note_sizes(DumpState *s, const void *note,
751 uint64_t *note_head_size,
752 uint64_t *name_size,
753 uint64_t *desc_size)
754 {
755 uint64_t note_head_sz;
756 uint64_t name_sz;
757 uint64_t desc_sz;
758
759 if (s->dump_info.d_class == ELFCLASS64) {
760 const Elf64_Nhdr *hdr = note;
761 note_head_sz = sizeof(Elf64_Nhdr);
762 name_sz = tswap64(hdr->n_namesz);
763 desc_sz = tswap64(hdr->n_descsz);
764 } else {
765 const Elf32_Nhdr *hdr = note;
766 note_head_sz = sizeof(Elf32_Nhdr);
767 name_sz = tswap32(hdr->n_namesz);
768 desc_sz = tswap32(hdr->n_descsz);
769 }
770
771 if (note_head_size) {
772 *note_head_size = note_head_sz;
773 }
774 if (name_size) {
775 *name_size = name_sz;
776 }
777 if (desc_size) {
778 *desc_size = desc_sz;
779 }
780 }
781
782 static bool note_name_equal(DumpState *s,
783 const uint8_t *note, const char *name)
784 {
785 int len = strlen(name) + 1;
786 uint64_t head_size, name_size;
787
788 get_note_sizes(s, note, &head_size, &name_size, NULL);
789 head_size = ROUND_UP(head_size, 4);
790
791 return name_size == len && memcmp(note + head_size, name, len) == 0;
792 }
793
794 /* write common header, sub header and elf note to vmcore */
795 static void create_header32(DumpState *s, Error **errp)
796 {
797 ERRP_GUARD();
798 DiskDumpHeader32 *dh = NULL;
799 KdumpSubHeader32 *kh = NULL;
800 size_t size;
801 uint32_t block_size;
802 uint32_t sub_hdr_size;
803 uint32_t bitmap_blocks;
804 uint32_t status = 0;
805 uint64_t offset_note;
806
807 /* write common header, the version of kdump-compressed format is 6th */
808 size = sizeof(DiskDumpHeader32);
809 dh = g_malloc0(size);
810
811 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
812 dh->header_version = cpu_to_dump32(s, 6);
813 block_size = s->dump_info.page_size;
814 dh->block_size = cpu_to_dump32(s, block_size);
815 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
816 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
817 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
818 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
819 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
820 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
821 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
822 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
823 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
824
825 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
826 status |= DUMP_DH_COMPRESSED_ZLIB;
827 }
828 #ifdef CONFIG_LZO
829 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
830 status |= DUMP_DH_COMPRESSED_LZO;
831 }
832 #endif
833 #ifdef CONFIG_SNAPPY
834 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
835 status |= DUMP_DH_COMPRESSED_SNAPPY;
836 }
837 #endif
838 dh->status = cpu_to_dump32(s, status);
839
840 if (write_buffer(s->fd, 0, dh, size) < 0) {
841 error_setg(errp, "dump: failed to write disk dump header");
842 goto out;
843 }
844
845 /* write sub header */
846 size = sizeof(KdumpSubHeader32);
847 kh = g_malloc0(size);
848
849 /* 64bit max_mapnr_64 */
850 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
851 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
852 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
853
854 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
855 if (s->guest_note &&
856 note_name_equal(s, s->guest_note, "VMCOREINFO")) {
857 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
858
859 get_note_sizes(s, s->guest_note,
860 &hsize, &name_size, &size_vmcoreinfo_desc);
861 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
862 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
863 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
864 kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
865 }
866
867 kh->offset_note = cpu_to_dump64(s, offset_note);
868 kh->note_size = cpu_to_dump32(s, s->note_size);
869
870 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
871 block_size, kh, size) < 0) {
872 error_setg(errp, "dump: failed to write kdump sub header");
873 goto out;
874 }
875
876 /* write note */
877 s->note_buf = g_malloc0(s->note_size);
878 s->note_buf_offset = 0;
879
880 /* use s->note_buf to store notes temporarily */
881 write_elf32_notes(buf_write_note, s, errp);
882 if (*errp) {
883 goto out;
884 }
885 if (write_buffer(s->fd, offset_note, s->note_buf,
886 s->note_size) < 0) {
887 error_setg(errp, "dump: failed to write notes");
888 goto out;
889 }
890
891 /* get offset of dump_bitmap */
892 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
893 block_size;
894
895 /* get offset of page */
896 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
897 block_size;
898
899 out:
900 g_free(dh);
901 g_free(kh);
902 g_free(s->note_buf);
903 }
904
905 /* write common header, sub header and elf note to vmcore */
906 static void create_header64(DumpState *s, Error **errp)
907 {
908 ERRP_GUARD();
909 DiskDumpHeader64 *dh = NULL;
910 KdumpSubHeader64 *kh = NULL;
911 size_t size;
912 uint32_t block_size;
913 uint32_t sub_hdr_size;
914 uint32_t bitmap_blocks;
915 uint32_t status = 0;
916 uint64_t offset_note;
917
918 /* write common header, the version of kdump-compressed format is 6th */
919 size = sizeof(DiskDumpHeader64);
920 dh = g_malloc0(size);
921
922 memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
923 dh->header_version = cpu_to_dump32(s, 6);
924 block_size = s->dump_info.page_size;
925 dh->block_size = cpu_to_dump32(s, block_size);
926 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
927 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
928 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
929 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
930 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
931 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
932 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
933 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
934 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
935
936 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
937 status |= DUMP_DH_COMPRESSED_ZLIB;
938 }
939 #ifdef CONFIG_LZO
940 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
941 status |= DUMP_DH_COMPRESSED_LZO;
942 }
943 #endif
944 #ifdef CONFIG_SNAPPY
945 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
946 status |= DUMP_DH_COMPRESSED_SNAPPY;
947 }
948 #endif
949 dh->status = cpu_to_dump32(s, status);
950
951 if (write_buffer(s->fd, 0, dh, size) < 0) {
952 error_setg(errp, "dump: failed to write disk dump header");
953 goto out;
954 }
955
956 /* write sub header */
957 size = sizeof(KdumpSubHeader64);
958 kh = g_malloc0(size);
959
960 /* 64bit max_mapnr_64 */
961 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
962 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
963 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
964
965 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
966 if (s->guest_note &&
967 note_name_equal(s, s->guest_note, "VMCOREINFO")) {
968 uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
969
970 get_note_sizes(s, s->guest_note,
971 &hsize, &name_size, &size_vmcoreinfo_desc);
972 offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
973 (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
974 kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
975 kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
976 }
977
978 kh->offset_note = cpu_to_dump64(s, offset_note);
979 kh->note_size = cpu_to_dump64(s, s->note_size);
980
981 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
982 block_size, kh, size) < 0) {
983 error_setg(errp, "dump: failed to write kdump sub header");
984 goto out;
985 }
986
987 /* write note */
988 s->note_buf = g_malloc0(s->note_size);
989 s->note_buf_offset = 0;
990
991 /* use s->note_buf to store notes temporarily */
992 write_elf64_notes(buf_write_note, s, errp);
993 if (*errp) {
994 goto out;
995 }
996
997 if (write_buffer(s->fd, offset_note, s->note_buf,
998 s->note_size) < 0) {
999 error_setg(errp, "dump: failed to write notes");
1000 goto out;
1001 }
1002
1003 /* get offset of dump_bitmap */
1004 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
1005 block_size;
1006
1007 /* get offset of page */
1008 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
1009 block_size;
1010
1011 out:
1012 g_free(dh);
1013 g_free(kh);
1014 g_free(s->note_buf);
1015 }
1016
1017 static void write_dump_header(DumpState *s, Error **errp)
1018 {
1019 if (s->dump_info.d_class == ELFCLASS32) {
1020 create_header32(s, errp);
1021 } else {
1022 create_header64(s, errp);
1023 }
1024 }
1025
1026 static size_t dump_bitmap_get_bufsize(DumpState *s)
1027 {
1028 return s->dump_info.page_size;
1029 }
1030
1031 /*
1032 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1033 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1034 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1035 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1036 * vmcore, ie. synchronizing un-sync bit into vmcore.
1037 */
1038 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
1039 uint8_t *buf, DumpState *s)
1040 {
1041 off_t old_offset, new_offset;
1042 off_t offset_bitmap1, offset_bitmap2;
1043 uint32_t byte, bit;
1044 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1045 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1046
1047 /* should not set the previous place */
1048 assert(last_pfn <= pfn);
1049
1050 /*
1051 * if the bit needed to be set is not cached in buf, flush the data in buf
1052 * to vmcore firstly.
1053 * making new_offset be bigger than old_offset can also sync remained data
1054 * into vmcore.
1055 */
1056 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
1057 new_offset = bitmap_bufsize * (pfn / bits_per_buf);
1058
1059 while (old_offset < new_offset) {
1060 /* calculate the offset and write dump_bitmap */
1061 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
1062 if (write_buffer(s->fd, offset_bitmap1, buf,
1063 bitmap_bufsize) < 0) {
1064 return -1;
1065 }
1066
1067 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1068 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
1069 old_offset;
1070 if (write_buffer(s->fd, offset_bitmap2, buf,
1071 bitmap_bufsize) < 0) {
1072 return -1;
1073 }
1074
1075 memset(buf, 0, bitmap_bufsize);
1076 old_offset += bitmap_bufsize;
1077 }
1078
1079 /* get the exact place of the bit in the buf, and set it */
1080 byte = (pfn % bits_per_buf) / CHAR_BIT;
1081 bit = (pfn % bits_per_buf) % CHAR_BIT;
1082 if (value) {
1083 buf[byte] |= 1u << bit;
1084 } else {
1085 buf[byte] &= ~(1u << bit);
1086 }
1087
1088 return 0;
1089 }
1090
1091 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
1092 {
1093 int target_page_shift = ctz32(s->dump_info.page_size);
1094
1095 return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
1096 }
1097
1098 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
1099 {
1100 int target_page_shift = ctz32(s->dump_info.page_size);
1101
1102 return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1103 }
1104
1105 /*
1106 * exam every page and return the page frame number and the address of the page.
1107 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1108 * blocks, so block->target_start and block->target_end should be interal
1109 * multiples of the target page size.
1110 */
1111 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1112 uint8_t **bufptr, DumpState *s)
1113 {
1114 GuestPhysBlock *block = *blockptr;
1115 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1116 uint8_t *buf;
1117
1118 /* block == NULL means the start of the iteration */
1119 if (!block) {
1120 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1121 *blockptr = block;
1122 assert((block->target_start & ~target_page_mask) == 0);
1123 assert((block->target_end & ~target_page_mask) == 0);
1124 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1125 if (bufptr) {
1126 *bufptr = block->host_addr;
1127 }
1128 return true;
1129 }
1130
1131 *pfnptr = *pfnptr + 1;
1132 addr = dump_pfn_to_paddr(s, *pfnptr);
1133
1134 if ((addr >= block->target_start) &&
1135 (addr + s->dump_info.page_size <= block->target_end)) {
1136 buf = block->host_addr + (addr - block->target_start);
1137 } else {
1138 /* the next page is in the next block */
1139 block = QTAILQ_NEXT(block, next);
1140 *blockptr = block;
1141 if (!block) {
1142 return false;
1143 }
1144 assert((block->target_start & ~target_page_mask) == 0);
1145 assert((block->target_end & ~target_page_mask) == 0);
1146 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1147 buf = block->host_addr;
1148 }
1149
1150 if (bufptr) {
1151 *bufptr = buf;
1152 }
1153
1154 return true;
1155 }
1156
1157 static void write_dump_bitmap(DumpState *s, Error **errp)
1158 {
1159 int ret = 0;
1160 uint64_t last_pfn, pfn;
1161 void *dump_bitmap_buf;
1162 size_t num_dumpable;
1163 GuestPhysBlock *block_iter = NULL;
1164 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1165 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1166
1167 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1168 dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1169
1170 num_dumpable = 0;
1171 last_pfn = 0;
1172
1173 /*
1174 * exam memory page by page, and set the bit in dump_bitmap corresponded
1175 * to the existing page.
1176 */
1177 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1178 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1179 if (ret < 0) {
1180 error_setg(errp, "dump: failed to set dump_bitmap");
1181 goto out;
1182 }
1183
1184 last_pfn = pfn;
1185 num_dumpable++;
1186 }
1187
1188 /*
1189 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1190 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1191 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1192 */
1193 if (num_dumpable > 0) {
1194 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1195 dump_bitmap_buf, s);
1196 if (ret < 0) {
1197 error_setg(errp, "dump: failed to sync dump_bitmap");
1198 goto out;
1199 }
1200 }
1201
1202 /* number of dumpable pages that will be dumped later */
1203 s->num_dumpable = num_dumpable;
1204
1205 out:
1206 g_free(dump_bitmap_buf);
1207 }
1208
1209 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1210 off_t offset)
1211 {
1212 data_cache->fd = s->fd;
1213 data_cache->data_size = 0;
1214 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1215 data_cache->buf = g_malloc0(data_cache->buf_size);
1216 data_cache->offset = offset;
1217 }
1218
1219 static int write_cache(DataCache *dc, const void *buf, size_t size,
1220 bool flag_sync)
1221 {
1222 /*
1223 * dc->buf_size should not be less than size, otherwise dc will never be
1224 * enough
1225 */
1226 assert(size <= dc->buf_size);
1227
1228 /*
1229 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1230 * otherwise check if the space is enough for caching data in buf, if not,
1231 * write the data in dc->buf to dc->fd and reset dc->buf
1232 */
1233 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1234 (flag_sync && dc->data_size > 0)) {
1235 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1236 return -1;
1237 }
1238
1239 dc->offset += dc->data_size;
1240 dc->data_size = 0;
1241 }
1242
1243 if (!flag_sync) {
1244 memcpy(dc->buf + dc->data_size, buf, size);
1245 dc->data_size += size;
1246 }
1247
1248 return 0;
1249 }
1250
1251 static void free_data_cache(DataCache *data_cache)
1252 {
1253 g_free(data_cache->buf);
1254 }
1255
1256 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1257 {
1258 switch (flag_compress) {
1259 case DUMP_DH_COMPRESSED_ZLIB:
1260 return compressBound(page_size);
1261
1262 case DUMP_DH_COMPRESSED_LZO:
1263 /*
1264 * LZO will expand incompressible data by a little amount. Please check
1265 * the following URL to see the expansion calculation:
1266 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1267 */
1268 return page_size + page_size / 16 + 64 + 3;
1269
1270 #ifdef CONFIG_SNAPPY
1271 case DUMP_DH_COMPRESSED_SNAPPY:
1272 return snappy_max_compressed_length(page_size);
1273 #endif
1274 }
1275 return 0;
1276 }
1277
1278 static void write_dump_pages(DumpState *s, Error **errp)
1279 {
1280 int ret = 0;
1281 DataCache page_desc, page_data;
1282 size_t len_buf_out, size_out;
1283 #ifdef CONFIG_LZO
1284 lzo_bytep wrkmem = NULL;
1285 #endif
1286 uint8_t *buf_out = NULL;
1287 off_t offset_desc, offset_data;
1288 PageDescriptor pd, pd_zero;
1289 uint8_t *buf;
1290 GuestPhysBlock *block_iter = NULL;
1291 uint64_t pfn_iter;
1292
1293 /* get offset of page_desc and page_data in dump file */
1294 offset_desc = s->offset_page;
1295 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1296
1297 prepare_data_cache(&page_desc, s, offset_desc);
1298 prepare_data_cache(&page_data, s, offset_data);
1299
1300 /* prepare buffer to store compressed data */
1301 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1302 assert(len_buf_out != 0);
1303
1304 #ifdef CONFIG_LZO
1305 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1306 #endif
1307
1308 buf_out = g_malloc(len_buf_out);
1309
1310 /*
1311 * init zero page's page_desc and page_data, because every zero page
1312 * uses the same page_data
1313 */
1314 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1315 pd_zero.flags = cpu_to_dump32(s, 0);
1316 pd_zero.offset = cpu_to_dump64(s, offset_data);
1317 pd_zero.page_flags = cpu_to_dump64(s, 0);
1318 buf = g_malloc0(s->dump_info.page_size);
1319 ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1320 g_free(buf);
1321 if (ret < 0) {
1322 error_setg(errp, "dump: failed to write page data (zero page)");
1323 goto out;
1324 }
1325
1326 offset_data += s->dump_info.page_size;
1327
1328 /*
1329 * dump memory to vmcore page by page. zero page will all be resided in the
1330 * first page of page section
1331 */
1332 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1333 /* check zero page */
1334 if (buffer_is_zero(buf, s->dump_info.page_size)) {
1335 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1336 false);
1337 if (ret < 0) {
1338 error_setg(errp, "dump: failed to write page desc");
1339 goto out;
1340 }
1341 } else {
1342 /*
1343 * not zero page, then:
1344 * 1. compress the page
1345 * 2. write the compressed page into the cache of page_data
1346 * 3. get page desc of the compressed page and write it into the
1347 * cache of page_desc
1348 *
1349 * only one compression format will be used here, for
1350 * s->flag_compress is set. But when compression fails to work,
1351 * we fall back to save in plaintext.
1352 */
1353 size_out = len_buf_out;
1354 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1355 (compress2(buf_out, (uLongf *)&size_out, buf,
1356 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1357 (size_out < s->dump_info.page_size)) {
1358 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1359 pd.size = cpu_to_dump32(s, size_out);
1360
1361 ret = write_cache(&page_data, buf_out, size_out, false);
1362 if (ret < 0) {
1363 error_setg(errp, "dump: failed to write page data");
1364 goto out;
1365 }
1366 #ifdef CONFIG_LZO
1367 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1368 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1369 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1370 (size_out < s->dump_info.page_size)) {
1371 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1372 pd.size = cpu_to_dump32(s, size_out);
1373
1374 ret = write_cache(&page_data, buf_out, size_out, false);
1375 if (ret < 0) {
1376 error_setg(errp, "dump: failed to write page data");
1377 goto out;
1378 }
1379 #endif
1380 #ifdef CONFIG_SNAPPY
1381 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1382 (snappy_compress((char *)buf, s->dump_info.page_size,
1383 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1384 (size_out < s->dump_info.page_size)) {
1385 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1386 pd.size = cpu_to_dump32(s, size_out);
1387
1388 ret = write_cache(&page_data, buf_out, size_out, false);
1389 if (ret < 0) {
1390 error_setg(errp, "dump: failed to write page data");
1391 goto out;
1392 }
1393 #endif
1394 } else {
1395 /*
1396 * fall back to save in plaintext, size_out should be
1397 * assigned the target's page size
1398 */
1399 pd.flags = cpu_to_dump32(s, 0);
1400 size_out = s->dump_info.page_size;
1401 pd.size = cpu_to_dump32(s, size_out);
1402
1403 ret = write_cache(&page_data, buf,
1404 s->dump_info.page_size, false);
1405 if (ret < 0) {
1406 error_setg(errp, "dump: failed to write page data");
1407 goto out;
1408 }
1409 }
1410
1411 /* get and write page desc here */
1412 pd.page_flags = cpu_to_dump64(s, 0);
1413 pd.offset = cpu_to_dump64(s, offset_data);
1414 offset_data += size_out;
1415
1416 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1417 if (ret < 0) {
1418 error_setg(errp, "dump: failed to write page desc");
1419 goto out;
1420 }
1421 }
1422 s->written_size += s->dump_info.page_size;
1423 }
1424
1425 ret = write_cache(&page_desc, NULL, 0, true);
1426 if (ret < 0) {
1427 error_setg(errp, "dump: failed to sync cache for page_desc");
1428 goto out;
1429 }
1430 ret = write_cache(&page_data, NULL, 0, true);
1431 if (ret < 0) {
1432 error_setg(errp, "dump: failed to sync cache for page_data");
1433 goto out;
1434 }
1435
1436 out:
1437 free_data_cache(&page_desc);
1438 free_data_cache(&page_data);
1439
1440 #ifdef CONFIG_LZO
1441 g_free(wrkmem);
1442 #endif
1443
1444 g_free(buf_out);
1445 }
1446
1447 static void create_kdump_vmcore(DumpState *s, Error **errp)
1448 {
1449 ERRP_GUARD();
1450 int ret;
1451
1452 /*
1453 * the kdump-compressed format is:
1454 * File offset
1455 * +------------------------------------------+ 0x0
1456 * | main header (struct disk_dump_header) |
1457 * |------------------------------------------+ block 1
1458 * | sub header (struct kdump_sub_header) |
1459 * |------------------------------------------+ block 2
1460 * | 1st-dump_bitmap |
1461 * |------------------------------------------+ block 2 + X blocks
1462 * | 2nd-dump_bitmap | (aligned by block)
1463 * |------------------------------------------+ block 2 + 2 * X blocks
1464 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1465 * | page desc for pfn 1 (struct page_desc) |
1466 * | : |
1467 * |------------------------------------------| (not aligned by block)
1468 * | page data (pfn 0) |
1469 * | page data (pfn 1) |
1470 * | : |
1471 * +------------------------------------------+
1472 */
1473
1474 ret = write_start_flat_header(s->fd);
1475 if (ret < 0) {
1476 error_setg(errp, "dump: failed to write start flat header");
1477 return;
1478 }
1479
1480 write_dump_header(s, errp);
1481 if (*errp) {
1482 return;
1483 }
1484
1485 write_dump_bitmap(s, errp);
1486 if (*errp) {
1487 return;
1488 }
1489
1490 write_dump_pages(s, errp);
1491 if (*errp) {
1492 return;
1493 }
1494
1495 ret = write_end_flat_header(s->fd);
1496 if (ret < 0) {
1497 error_setg(errp, "dump: failed to write end flat header");
1498 return;
1499 }
1500 }
1501
1502 static ram_addr_t get_start_block(DumpState *s)
1503 {
1504 GuestPhysBlock *block;
1505
1506 if (!s->has_filter) {
1507 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1508 return 0;
1509 }
1510
1511 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1512 if (block->target_start >= s->begin + s->length ||
1513 block->target_end <= s->begin) {
1514 /* This block is out of the range */
1515 continue;
1516 }
1517
1518 s->next_block = block;
1519 if (s->begin > block->target_start) {
1520 s->start = s->begin - block->target_start;
1521 } else {
1522 s->start = 0;
1523 }
1524 return s->start;
1525 }
1526
1527 return -1;
1528 }
1529
1530 static void get_max_mapnr(DumpState *s)
1531 {
1532 GuestPhysBlock *last_block;
1533
1534 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head);
1535 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1536 }
1537
1538 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1539
1540 static void dump_state_prepare(DumpState *s)
1541 {
1542 /* zero the struct, setting status to active */
1543 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1544 }
1545
1546 bool qemu_system_dump_in_progress(void)
1547 {
1548 DumpState *state = &dump_state_global;
1549 return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1550 }
1551
1552 /* calculate total size of memory to be dumped (taking filter into
1553 * acoount.) */
1554 static int64_t dump_calculate_size(DumpState *s)
1555 {
1556 GuestPhysBlock *block;
1557 int64_t size = 0, total = 0, left = 0, right = 0;
1558
1559 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1560 if (s->has_filter) {
1561 /* calculate the overlapped region. */
1562 left = MAX(s->begin, block->target_start);
1563 right = MIN(s->begin + s->length, block->target_end);
1564 size = right - left;
1565 size = size > 0 ? size : 0;
1566 } else {
1567 /* count the whole region in */
1568 size = (block->target_end - block->target_start);
1569 }
1570 total += size;
1571 }
1572
1573 return total;
1574 }
1575
1576 static void vmcoreinfo_update_phys_base(DumpState *s)
1577 {
1578 uint64_t size, note_head_size, name_size, phys_base;
1579 char **lines;
1580 uint8_t *vmci;
1581 size_t i;
1582
1583 if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
1584 return;
1585 }
1586
1587 get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
1588 note_head_size = ROUND_UP(note_head_size, 4);
1589
1590 vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
1591 *(vmci + size) = '\0';
1592
1593 lines = g_strsplit((char *)vmci, "\n", -1);
1594 for (i = 0; lines[i]; i++) {
1595 const char *prefix = NULL;
1596
1597 if (s->dump_info.d_machine == EM_X86_64) {
1598 prefix = "NUMBER(phys_base)=";
1599 } else if (s->dump_info.d_machine == EM_AARCH64) {
1600 prefix = "NUMBER(PHYS_OFFSET)=";
1601 }
1602
1603 if (prefix && g_str_has_prefix(lines[i], prefix)) {
1604 if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16,
1605 &phys_base) < 0) {
1606 warn_report("Failed to read %s", prefix);
1607 } else {
1608 s->dump_info.phys_base = phys_base;
1609 }
1610 break;
1611 }
1612 }
1613
1614 g_strfreev(lines);
1615 }
1616
1617 static void dump_init(DumpState *s, int fd, bool has_format,
1618 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1619 int64_t begin, int64_t length, Error **errp)
1620 {
1621 ERRP_GUARD();
1622 VMCoreInfoState *vmci = vmcoreinfo_find();
1623 CPUState *cpu;
1624 int nr_cpus;
1625 int ret;
1626
1627 s->has_format = has_format;
1628 s->format = format;
1629 s->written_size = 0;
1630
1631 /* kdump-compressed is conflict with paging and filter */
1632 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1633 assert(!paging && !has_filter);
1634 }
1635
1636 if (runstate_is_running()) {
1637 vm_stop(RUN_STATE_SAVE_VM);
1638 s->resume = true;
1639 } else {
1640 s->resume = false;
1641 }
1642
1643 /* If we use KVM, we should synchronize the registers before we get dump
1644 * info or physmap info.
1645 */
1646 cpu_synchronize_all_states();
1647 nr_cpus = 0;
1648 CPU_FOREACH(cpu) {
1649 nr_cpus++;
1650 }
1651
1652 s->fd = fd;
1653 s->has_filter = has_filter;
1654 s->begin = begin;
1655 s->length = length;
1656
1657 memory_mapping_list_init(&s->list);
1658
1659 guest_phys_blocks_init(&s->guest_phys_blocks);
1660 guest_phys_blocks_append(&s->guest_phys_blocks);
1661 s->total_size = dump_calculate_size(s);
1662 #ifdef DEBUG_DUMP_GUEST_MEMORY
1663 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1664 #endif
1665
1666 /* it does not make sense to dump non-existent memory */
1667 if (!s->total_size) {
1668 error_setg(errp, "dump: no guest memory to dump");
1669 goto cleanup;
1670 }
1671
1672 s->start = get_start_block(s);
1673 if (s->start == -1) {
1674 error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1675 goto cleanup;
1676 }
1677
1678 /* get dump info: endian, class and architecture.
1679 * If the target architecture is not supported, cpu_get_dump_info() will
1680 * return -1.
1681 */
1682 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1683 if (ret < 0) {
1684 error_setg(errp, QERR_UNSUPPORTED);
1685 goto cleanup;
1686 }
1687
1688 if (!s->dump_info.page_size) {
1689 s->dump_info.page_size = TARGET_PAGE_SIZE;
1690 }
1691
1692 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1693 s->dump_info.d_machine, nr_cpus);
1694 if (s->note_size < 0) {
1695 error_setg(errp, QERR_UNSUPPORTED);
1696 goto cleanup;
1697 }
1698
1699 /*
1700 * The goal of this block is to (a) update the previously guessed
1701 * phys_base, (b) copy the guest note out of the guest.
1702 * Failure to do so is not fatal for dumping.
1703 */
1704 if (vmci) {
1705 uint64_t addr, note_head_size, name_size, desc_size;
1706 uint32_t size;
1707 uint16_t format;
1708
1709 note_head_size = s->dump_info.d_class == ELFCLASS32 ?
1710 sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr);
1711
1712 format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
1713 size = le32_to_cpu(vmci->vmcoreinfo.size);
1714 addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
1715 if (!vmci->has_vmcoreinfo) {
1716 warn_report("guest note is not present");
1717 } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
1718 warn_report("guest note size is invalid: %" PRIu32, size);
1719 } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) {
1720 warn_report("guest note format is unsupported: %" PRIu16, format);
1721 } else {
1722 s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
1723 cpu_physical_memory_read(addr, s->guest_note, size);
1724
1725 get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
1726 s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
1727 desc_size);
1728 if (name_size > MAX_GUEST_NOTE_SIZE ||
1729 desc_size > MAX_GUEST_NOTE_SIZE ||
1730 s->guest_note_size > size) {
1731 warn_report("Invalid guest note header");
1732 g_free(s->guest_note);
1733 s->guest_note = NULL;
1734 } else {
1735 vmcoreinfo_update_phys_base(s);
1736 s->note_size += s->guest_note_size;
1737 }
1738 }
1739 }
1740
1741 /* get memory mapping */
1742 if (paging) {
1743 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, errp);
1744 if (*errp) {
1745 goto cleanup;
1746 }
1747 } else {
1748 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1749 }
1750
1751 s->nr_cpus = nr_cpus;
1752
1753 get_max_mapnr(s);
1754
1755 uint64_t tmp;
1756 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1757 s->dump_info.page_size);
1758 s->len_dump_bitmap = tmp * s->dump_info.page_size;
1759
1760 /* init for kdump-compressed format */
1761 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1762 switch (format) {
1763 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1764 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1765 break;
1766
1767 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1768 #ifdef CONFIG_LZO
1769 if (lzo_init() != LZO_E_OK) {
1770 error_setg(errp, "failed to initialize the LZO library");
1771 goto cleanup;
1772 }
1773 #endif
1774 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1775 break;
1776
1777 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1778 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1779 break;
1780
1781 default:
1782 s->flag_compress = 0;
1783 }
1784
1785 return;
1786 }
1787
1788 if (s->has_filter) {
1789 memory_mapping_filter(&s->list, s->begin, s->length);
1790 }
1791
1792 /*
1793 * calculate phdr_num
1794 *
1795 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1796 */
1797 s->phdr_num = 1; /* PT_NOTE */
1798 if (s->list.num < UINT16_MAX - 2) {
1799 s->shdr_num = 0;
1800 s->phdr_num += s->list.num;
1801 } else {
1802 /* sh_info of section 0 holds the real number of phdrs */
1803 s->shdr_num = 1;
1804
1805 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1806 if (s->list.num <= UINT32_MAX - 1) {
1807 s->phdr_num += s->list.num;
1808 } else {
1809 s->phdr_num = UINT32_MAX;
1810 }
1811 }
1812
1813 if (s->dump_info.d_class == ELFCLASS64) {
1814 s->phdr_offset = sizeof(Elf64_Ehdr);
1815 s->shdr_offset = s->phdr_offset + sizeof(Elf64_Phdr) * s->phdr_num;
1816 s->note_offset = s->shdr_offset + sizeof(Elf64_Shdr) * s->shdr_num;
1817 s->memory_offset = s->note_offset + s->note_size;
1818 } else {
1819
1820 s->phdr_offset = sizeof(Elf32_Ehdr);
1821 s->shdr_offset = s->phdr_offset + sizeof(Elf32_Phdr) * s->phdr_num;
1822 s->note_offset = s->shdr_offset + sizeof(Elf32_Shdr) * s->shdr_num;
1823 s->memory_offset = s->note_offset + s->note_size;
1824 }
1825
1826 return;
1827
1828 cleanup:
1829 dump_cleanup(s);
1830 }
1831
1832 /* this operation might be time consuming. */
1833 static void dump_process(DumpState *s, Error **errp)
1834 {
1835 ERRP_GUARD();
1836 DumpQueryResult *result = NULL;
1837
1838 if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1839 #ifdef TARGET_X86_64
1840 create_win_dump(s, errp);
1841 #endif
1842 } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1843 create_kdump_vmcore(s, errp);
1844 } else {
1845 create_vmcore(s, errp);
1846 }
1847
1848 /* make sure status is written after written_size updates */
1849 smp_wmb();
1850 qatomic_set(&s->status,
1851 (*errp ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1852
1853 /* send DUMP_COMPLETED message (unconditionally) */
1854 result = qmp_query_dump(NULL);
1855 /* should never fail */
1856 assert(result);
1857 qapi_event_send_dump_completed(result, !!*errp, (*errp ?
1858 error_get_pretty(*errp) : NULL));
1859 qapi_free_DumpQueryResult(result);
1860
1861 dump_cleanup(s);
1862 }
1863
1864 static void *dump_thread(void *data)
1865 {
1866 DumpState *s = (DumpState *)data;
1867 dump_process(s, NULL);
1868 return NULL;
1869 }
1870
1871 DumpQueryResult *qmp_query_dump(Error **errp)
1872 {
1873 DumpQueryResult *result = g_new(DumpQueryResult, 1);
1874 DumpState *state = &dump_state_global;
1875 result->status = qatomic_read(&state->status);
1876 /* make sure we are reading status and written_size in order */
1877 smp_rmb();
1878 result->completed = state->written_size;
1879 result->total = state->total_size;
1880 return result;
1881 }
1882
1883 void qmp_dump_guest_memory(bool paging, const char *file,
1884 bool has_detach, bool detach,
1885 bool has_begin, int64_t begin, bool has_length,
1886 int64_t length, bool has_format,
1887 DumpGuestMemoryFormat format, Error **errp)
1888 {
1889 ERRP_GUARD();
1890 const char *p;
1891 int fd = -1;
1892 DumpState *s;
1893 bool detach_p = false;
1894
1895 if (runstate_check(RUN_STATE_INMIGRATE)) {
1896 error_setg(errp, "Dump not allowed during incoming migration.");
1897 return;
1898 }
1899
1900 /* if there is a dump in background, we should wait until the dump
1901 * finished */
1902 if (qemu_system_dump_in_progress()) {
1903 error_setg(errp, "There is a dump in process, please wait.");
1904 return;
1905 }
1906
1907 /*
1908 * kdump-compressed format need the whole memory dumped, so paging or
1909 * filter is not supported here.
1910 */
1911 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1912 (paging || has_begin || has_length)) {
1913 error_setg(errp, "kdump-compressed format doesn't support paging or "
1914 "filter");
1915 return;
1916 }
1917 if (has_begin && !has_length) {
1918 error_setg(errp, QERR_MISSING_PARAMETER, "length");
1919 return;
1920 }
1921 if (!has_begin && has_length) {
1922 error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1923 return;
1924 }
1925 if (has_detach) {
1926 detach_p = detach;
1927 }
1928
1929 /* check whether lzo/snappy is supported */
1930 #ifndef CONFIG_LZO
1931 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1932 error_setg(errp, "kdump-lzo is not available now");
1933 return;
1934 }
1935 #endif
1936
1937 #ifndef CONFIG_SNAPPY
1938 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1939 error_setg(errp, "kdump-snappy is not available now");
1940 return;
1941 }
1942 #endif
1943
1944 #ifndef TARGET_X86_64
1945 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
1946 error_setg(errp, "Windows dump is only available for x86-64");
1947 return;
1948 }
1949 #endif
1950
1951 #if !defined(WIN32)
1952 if (strstart(file, "fd:", &p)) {
1953 fd = monitor_get_fd(monitor_cur(), p, errp);
1954 if (fd == -1) {
1955 return;
1956 }
1957 }
1958 #endif
1959
1960 if (strstart(file, "file:", &p)) {
1961 fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1962 if (fd < 0) {
1963 error_setg_file_open(errp, errno, p);
1964 return;
1965 }
1966 }
1967
1968 if (fd == -1) {
1969 error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
1970 return;
1971 }
1972
1973 if (!dump_migration_blocker) {
1974 error_setg(&dump_migration_blocker,
1975 "Live migration disabled: dump-guest-memory in progress");
1976 }
1977
1978 /*
1979 * Allows even for -only-migratable, but forbid migration during the
1980 * process of dump guest memory.
1981 */
1982 if (migrate_add_blocker_internal(dump_migration_blocker, errp)) {
1983 /* Remember to release the fd before passing it over to dump state */
1984 close(fd);
1985 return;
1986 }
1987
1988 s = &dump_state_global;
1989 dump_state_prepare(s);
1990
1991 dump_init(s, fd, has_format, format, paging, has_begin,
1992 begin, length, errp);
1993 if (*errp) {
1994 qatomic_set(&s->status, DUMP_STATUS_FAILED);
1995 return;
1996 }
1997
1998 if (detach_p) {
1999 /* detached dump */
2000 s->detached = true;
2001 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
2002 s, QEMU_THREAD_DETACHED);
2003 } else {
2004 /* sync dump */
2005 dump_process(s, errp);
2006 }
2007 }
2008
2009 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
2010 {
2011 DumpGuestMemoryCapability *cap =
2012 g_new0(DumpGuestMemoryCapability, 1);
2013 DumpGuestMemoryFormatList **tail = &cap->formats;
2014
2015 /* elf is always available */
2016 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_ELF);
2017
2018 /* kdump-zlib is always available */
2019 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB);
2020
2021 /* add new item if kdump-lzo is available */
2022 #ifdef CONFIG_LZO
2023 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO);
2024 #endif
2025
2026 /* add new item if kdump-snappy is available */
2027 #ifdef CONFIG_SNAPPY
2028 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY);
2029 #endif
2030
2031 /* Windows dump is available only if target is x86_64 */
2032 #ifdef TARGET_X86_64
2033 QAPI_LIST_APPEND(tail, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP);
2034 #endif
2035
2036 return cap;
2037 }