4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qmp-commands.h"
30 #include <lzo/lzo1x.h>
35 #ifndef ELF_MACHINE_UNAME
36 #define ELF_MACHINE_UNAME "Unknown"
39 static uint16_t cpu_convert_to_target16(uint16_t val
, int endian
)
41 if (endian
== ELFDATA2LSB
) {
42 val
= cpu_to_le16(val
);
44 val
= cpu_to_be16(val
);
50 static uint32_t cpu_convert_to_target32(uint32_t val
, int endian
)
52 if (endian
== ELFDATA2LSB
) {
53 val
= cpu_to_le32(val
);
55 val
= cpu_to_be32(val
);
61 static uint64_t cpu_convert_to_target64(uint64_t val
, int endian
)
63 if (endian
== ELFDATA2LSB
) {
64 val
= cpu_to_le64(val
);
66 val
= cpu_to_be64(val
);
72 typedef struct DumpState
{
73 GuestPhysBlockList guest_phys_blocks
;
74 ArchDumpInfo dump_info
;
75 MemoryMappingList list
;
84 GuestPhysBlock
*next_block
;
90 uint8_t *note_buf
; /* buffer for notes */
91 size_t note_buf_offset
; /* the writing place in note_buf */
92 uint32_t nr_cpus
; /* number of guest's cpu */
93 size_t page_size
; /* guest's page size */
94 uint32_t page_shift
; /* guest's page shift */
95 uint64_t max_mapnr
; /* the biggest guest's phys-mem's number */
96 size_t len_dump_bitmap
; /* the size of the place used to store
97 dump_bitmap in vmcore */
98 off_t offset_dump_bitmap
; /* offset of dump_bitmap part in vmcore */
99 off_t offset_page
; /* offset of page part in vmcore */
100 size_t num_dumpable
; /* number of page that can be dumped */
101 uint32_t flag_compress
; /* indicate the compression format */
104 static int dump_cleanup(DumpState
*s
)
108 guest_phys_blocks_free(&s
->guest_phys_blocks
);
109 memory_mapping_list_free(&s
->list
);
120 static void dump_error(DumpState
*s
, const char *reason
)
125 static int fd_write_vmcore(const void *buf
, size_t size
, void *opaque
)
127 DumpState
*s
= opaque
;
130 written_size
= qemu_write_full(s
->fd
, buf
, size
);
131 if (written_size
!= size
) {
138 static int write_elf64_header(DumpState
*s
)
140 Elf64_Ehdr elf_header
;
142 int endian
= s
->dump_info
.d_endian
;
144 memset(&elf_header
, 0, sizeof(Elf64_Ehdr
));
145 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
146 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS64
;
147 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
148 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
149 elf_header
.e_type
= cpu_convert_to_target16(ET_CORE
, endian
);
150 elf_header
.e_machine
= cpu_convert_to_target16(s
->dump_info
.d_machine
,
152 elf_header
.e_version
= cpu_convert_to_target32(EV_CURRENT
, endian
);
153 elf_header
.e_ehsize
= cpu_convert_to_target16(sizeof(elf_header
), endian
);
154 elf_header
.e_phoff
= cpu_convert_to_target64(sizeof(Elf64_Ehdr
), endian
);
155 elf_header
.e_phentsize
= cpu_convert_to_target16(sizeof(Elf64_Phdr
),
157 elf_header
.e_phnum
= cpu_convert_to_target16(s
->phdr_num
, endian
);
158 if (s
->have_section
) {
159 uint64_t shoff
= sizeof(Elf64_Ehdr
) + sizeof(Elf64_Phdr
) * s
->sh_info
;
161 elf_header
.e_shoff
= cpu_convert_to_target64(shoff
, endian
);
162 elf_header
.e_shentsize
= cpu_convert_to_target16(sizeof(Elf64_Shdr
),
164 elf_header
.e_shnum
= cpu_convert_to_target16(1, endian
);
167 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
169 dump_error(s
, "dump: failed to write elf header.\n");
176 static int write_elf32_header(DumpState
*s
)
178 Elf32_Ehdr elf_header
;
180 int endian
= s
->dump_info
.d_endian
;
182 memset(&elf_header
, 0, sizeof(Elf32_Ehdr
));
183 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
184 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS32
;
185 elf_header
.e_ident
[EI_DATA
] = endian
;
186 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
187 elf_header
.e_type
= cpu_convert_to_target16(ET_CORE
, endian
);
188 elf_header
.e_machine
= cpu_convert_to_target16(s
->dump_info
.d_machine
,
190 elf_header
.e_version
= cpu_convert_to_target32(EV_CURRENT
, endian
);
191 elf_header
.e_ehsize
= cpu_convert_to_target16(sizeof(elf_header
), endian
);
192 elf_header
.e_phoff
= cpu_convert_to_target32(sizeof(Elf32_Ehdr
), endian
);
193 elf_header
.e_phentsize
= cpu_convert_to_target16(sizeof(Elf32_Phdr
),
195 elf_header
.e_phnum
= cpu_convert_to_target16(s
->phdr_num
, endian
);
196 if (s
->have_section
) {
197 uint32_t shoff
= sizeof(Elf32_Ehdr
) + sizeof(Elf32_Phdr
) * s
->sh_info
;
199 elf_header
.e_shoff
= cpu_convert_to_target32(shoff
, endian
);
200 elf_header
.e_shentsize
= cpu_convert_to_target16(sizeof(Elf32_Shdr
),
202 elf_header
.e_shnum
= cpu_convert_to_target16(1, endian
);
205 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
207 dump_error(s
, "dump: failed to write elf header.\n");
214 static int write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
215 int phdr_index
, hwaddr offset
,
220 int endian
= s
->dump_info
.d_endian
;
222 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
223 phdr
.p_type
= cpu_convert_to_target32(PT_LOAD
, endian
);
224 phdr
.p_offset
= cpu_convert_to_target64(offset
, endian
);
225 phdr
.p_paddr
= cpu_convert_to_target64(memory_mapping
->phys_addr
, endian
);
226 phdr
.p_filesz
= cpu_convert_to_target64(filesz
, endian
);
227 phdr
.p_memsz
= cpu_convert_to_target64(memory_mapping
->length
, endian
);
228 phdr
.p_vaddr
= cpu_convert_to_target64(memory_mapping
->virt_addr
, endian
);
230 assert(memory_mapping
->length
>= filesz
);
232 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
234 dump_error(s
, "dump: failed to write program header table.\n");
241 static int write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
242 int phdr_index
, hwaddr offset
,
247 int endian
= s
->dump_info
.d_endian
;
249 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
250 phdr
.p_type
= cpu_convert_to_target32(PT_LOAD
, endian
);
251 phdr
.p_offset
= cpu_convert_to_target32(offset
, endian
);
252 phdr
.p_paddr
= cpu_convert_to_target32(memory_mapping
->phys_addr
, endian
);
253 phdr
.p_filesz
= cpu_convert_to_target32(filesz
, endian
);
254 phdr
.p_memsz
= cpu_convert_to_target32(memory_mapping
->length
, endian
);
255 phdr
.p_vaddr
= cpu_convert_to_target32(memory_mapping
->virt_addr
, endian
);
257 assert(memory_mapping
->length
>= filesz
);
259 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
261 dump_error(s
, "dump: failed to write program header table.\n");
268 static int write_elf64_note(DumpState
*s
)
271 int endian
= s
->dump_info
.d_endian
;
272 hwaddr begin
= s
->memory_offset
- s
->note_size
;
275 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
276 phdr
.p_type
= cpu_convert_to_target32(PT_NOTE
, endian
);
277 phdr
.p_offset
= cpu_convert_to_target64(begin
, endian
);
279 phdr
.p_filesz
= cpu_convert_to_target64(s
->note_size
, endian
);
280 phdr
.p_memsz
= cpu_convert_to_target64(s
->note_size
, endian
);
283 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
285 dump_error(s
, "dump: failed to write program header table.\n");
292 static inline int cpu_index(CPUState
*cpu
)
294 return cpu
->cpu_index
+ 1;
297 static int write_elf64_notes(WriteCoreDumpFunction f
, DumpState
*s
)
305 ret
= cpu_write_elf64_note(f
, cpu
, id
, s
);
307 dump_error(s
, "dump: failed to write elf notes.\n");
313 ret
= cpu_write_elf64_qemunote(f
, cpu
, s
);
315 dump_error(s
, "dump: failed to write CPU status.\n");
323 static int write_elf32_note(DumpState
*s
)
325 hwaddr begin
= s
->memory_offset
- s
->note_size
;
327 int endian
= s
->dump_info
.d_endian
;
330 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
331 phdr
.p_type
= cpu_convert_to_target32(PT_NOTE
, endian
);
332 phdr
.p_offset
= cpu_convert_to_target32(begin
, endian
);
334 phdr
.p_filesz
= cpu_convert_to_target32(s
->note_size
, endian
);
335 phdr
.p_memsz
= cpu_convert_to_target32(s
->note_size
, endian
);
338 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
340 dump_error(s
, "dump: failed to write program header table.\n");
347 static int write_elf32_notes(WriteCoreDumpFunction f
, DumpState
*s
)
355 ret
= cpu_write_elf32_note(f
, cpu
, id
, s
);
357 dump_error(s
, "dump: failed to write elf notes.\n");
363 ret
= cpu_write_elf32_qemunote(f
, cpu
, s
);
365 dump_error(s
, "dump: failed to write CPU status.\n");
373 static int write_elf_section(DumpState
*s
, int type
)
377 int endian
= s
->dump_info
.d_endian
;
383 shdr_size
= sizeof(Elf32_Shdr
);
384 memset(&shdr32
, 0, shdr_size
);
385 shdr32
.sh_info
= cpu_convert_to_target32(s
->sh_info
, endian
);
388 shdr_size
= sizeof(Elf64_Shdr
);
389 memset(&shdr64
, 0, shdr_size
);
390 shdr64
.sh_info
= cpu_convert_to_target32(s
->sh_info
, endian
);
394 ret
= fd_write_vmcore(&shdr
, shdr_size
, s
);
396 dump_error(s
, "dump: failed to write section header table.\n");
403 static int write_data(DumpState
*s
, void *buf
, int length
)
407 ret
= fd_write_vmcore(buf
, length
, s
);
409 dump_error(s
, "dump: failed to save memory.\n");
416 /* write the memroy to vmcore. 1 page per I/O. */
417 static int write_memory(DumpState
*s
, GuestPhysBlock
*block
, ram_addr_t start
,
423 for (i
= 0; i
< size
/ TARGET_PAGE_SIZE
; i
++) {
424 ret
= write_data(s
, block
->host_addr
+ start
+ i
* TARGET_PAGE_SIZE
,
431 if ((size
% TARGET_PAGE_SIZE
) != 0) {
432 ret
= write_data(s
, block
->host_addr
+ start
+ i
* TARGET_PAGE_SIZE
,
433 size
% TARGET_PAGE_SIZE
);
442 /* get the memory's offset and size in the vmcore */
443 static void get_offset_range(hwaddr phys_addr
,
444 ram_addr_t mapping_length
,
449 GuestPhysBlock
*block
;
450 hwaddr offset
= s
->memory_offset
;
451 int64_t size_in_block
, start
;
453 /* When the memory is not stored into vmcore, offset will be -1 */
458 if (phys_addr
< s
->begin
|| phys_addr
>= s
->begin
+ s
->length
) {
463 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
465 if (block
->target_start
>= s
->begin
+ s
->length
||
466 block
->target_end
<= s
->begin
) {
467 /* This block is out of the range */
471 if (s
->begin
<= block
->target_start
) {
472 start
= block
->target_start
;
477 size_in_block
= block
->target_end
- start
;
478 if (s
->begin
+ s
->length
< block
->target_end
) {
479 size_in_block
-= block
->target_end
- (s
->begin
+ s
->length
);
482 start
= block
->target_start
;
483 size_in_block
= block
->target_end
- block
->target_start
;
486 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
487 *p_offset
= phys_addr
- start
+ offset
;
489 /* The offset range mapped from the vmcore file must not spill over
490 * the GuestPhysBlock, clamp it. The rest of the mapping will be
491 * zero-filled in memory at load time; see
492 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
494 *p_filesz
= phys_addr
+ mapping_length
<= start
+ size_in_block
?
496 size_in_block
- (phys_addr
- start
);
500 offset
+= size_in_block
;
504 static int write_elf_loads(DumpState
*s
)
506 hwaddr offset
, filesz
;
507 MemoryMapping
*memory_mapping
;
508 uint32_t phdr_index
= 1;
512 if (s
->have_section
) {
513 max_index
= s
->sh_info
;
515 max_index
= s
->phdr_num
;
518 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
519 get_offset_range(memory_mapping
->phys_addr
,
520 memory_mapping
->length
,
521 s
, &offset
, &filesz
);
522 if (s
->dump_info
.d_class
== ELFCLASS64
) {
523 ret
= write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
,
526 ret
= write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
,
534 if (phdr_index
>= max_index
) {
542 /* write elf header, PT_NOTE and elf note to vmcore. */
543 static int dump_begin(DumpState
*s
)
548 * the vmcore's format is:
567 * we only know where the memory is saved after we write elf note into
571 /* write elf header to vmcore */
572 if (s
->dump_info
.d_class
== ELFCLASS64
) {
573 ret
= write_elf64_header(s
);
575 ret
= write_elf32_header(s
);
581 if (s
->dump_info
.d_class
== ELFCLASS64
) {
582 /* write PT_NOTE to vmcore */
583 if (write_elf64_note(s
) < 0) {
587 /* write all PT_LOAD to vmcore */
588 if (write_elf_loads(s
) < 0) {
592 /* write section to vmcore */
593 if (s
->have_section
) {
594 if (write_elf_section(s
, 1) < 0) {
599 /* write notes to vmcore */
600 if (write_elf64_notes(fd_write_vmcore
, s
) < 0) {
605 /* write PT_NOTE to vmcore */
606 if (write_elf32_note(s
) < 0) {
610 /* write all PT_LOAD to vmcore */
611 if (write_elf_loads(s
) < 0) {
615 /* write section to vmcore */
616 if (s
->have_section
) {
617 if (write_elf_section(s
, 0) < 0) {
622 /* write notes to vmcore */
623 if (write_elf32_notes(fd_write_vmcore
, s
) < 0) {
631 /* write PT_LOAD to vmcore */
632 static int dump_completed(DumpState
*s
)
638 static int get_next_block(DumpState
*s
, GuestPhysBlock
*block
)
641 block
= QTAILQ_NEXT(block
, next
);
648 s
->next_block
= block
;
650 if (block
->target_start
>= s
->begin
+ s
->length
||
651 block
->target_end
<= s
->begin
) {
652 /* This block is out of the range */
656 if (s
->begin
> block
->target_start
) {
657 s
->start
= s
->begin
- block
->target_start
;
665 /* write all memory to vmcore */
666 static int dump_iterate(DumpState
*s
)
668 GuestPhysBlock
*block
;
673 block
= s
->next_block
;
675 size
= block
->target_end
- block
->target_start
;
678 if (s
->begin
+ s
->length
< block
->target_end
) {
679 size
-= block
->target_end
- (s
->begin
+ s
->length
);
682 ret
= write_memory(s
, block
, s
->start
, size
);
687 ret
= get_next_block(s
, block
);
695 static int create_vmcore(DumpState
*s
)
704 ret
= dump_iterate(s
);
712 static int write_start_flat_header(int fd
)
714 MakedumpfileHeader
*mh
;
717 QEMU_BUILD_BUG_ON(sizeof *mh
> MAX_SIZE_MDF_HEADER
);
718 mh
= g_malloc0(MAX_SIZE_MDF_HEADER
);
720 memcpy(mh
->signature
, MAKEDUMPFILE_SIGNATURE
,
721 MIN(sizeof mh
->signature
, sizeof MAKEDUMPFILE_SIGNATURE
));
723 mh
->type
= cpu_to_be64(TYPE_FLAT_HEADER
);
724 mh
->version
= cpu_to_be64(VERSION_FLAT_HEADER
);
727 written_size
= qemu_write_full(fd
, mh
, MAX_SIZE_MDF_HEADER
);
728 if (written_size
!= MAX_SIZE_MDF_HEADER
) {
736 static int write_end_flat_header(int fd
)
738 MakedumpfileDataHeader mdh
;
740 mdh
.offset
= END_FLAG_FLAT_HEADER
;
741 mdh
.buf_size
= END_FLAG_FLAT_HEADER
;
744 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
745 if (written_size
!= sizeof(mdh
)) {
752 static int write_buffer(int fd
, off_t offset
, const void *buf
, size_t size
)
755 MakedumpfileDataHeader mdh
;
757 mdh
.offset
= cpu_to_be64(offset
);
758 mdh
.buf_size
= cpu_to_be64(size
);
760 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
761 if (written_size
!= sizeof(mdh
)) {
765 written_size
= qemu_write_full(fd
, buf
, size
);
766 if (written_size
!= size
) {
773 static int buf_write_note(const void *buf
, size_t size
, void *opaque
)
775 DumpState
*s
= opaque
;
777 /* note_buf is not enough */
778 if (s
->note_buf_offset
+ size
> s
->note_size
) {
782 memcpy(s
->note_buf
+ s
->note_buf_offset
, buf
, size
);
784 s
->note_buf_offset
+= size
;
789 /* write common header, sub header and elf note to vmcore */
790 static int create_header32(DumpState
*s
)
793 DiskDumpHeader32
*dh
= NULL
;
794 KdumpSubHeader32
*kh
= NULL
;
796 int endian
= s
->dump_info
.d_endian
;
798 uint32_t sub_hdr_size
;
799 uint32_t bitmap_blocks
;
801 uint64_t offset_note
;
803 /* write common header, the version of kdump-compressed format is 6th */
804 size
= sizeof(DiskDumpHeader32
);
805 dh
= g_malloc0(size
);
807 strncpy(dh
->signature
, KDUMP_SIGNATURE
, strlen(KDUMP_SIGNATURE
));
808 dh
->header_version
= cpu_convert_to_target32(6, endian
);
809 block_size
= s
->page_size
;
810 dh
->block_size
= cpu_convert_to_target32(block_size
, endian
);
811 sub_hdr_size
= sizeof(struct KdumpSubHeader32
) + s
->note_size
;
812 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
813 dh
->sub_hdr_size
= cpu_convert_to_target32(sub_hdr_size
, endian
);
814 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
815 dh
->max_mapnr
= cpu_convert_to_target32(MIN(s
->max_mapnr
, UINT_MAX
),
817 dh
->nr_cpus
= cpu_convert_to_target32(s
->nr_cpus
, endian
);
818 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
819 dh
->bitmap_blocks
= cpu_convert_to_target32(bitmap_blocks
, endian
);
820 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
822 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
823 status
|= DUMP_DH_COMPRESSED_ZLIB
;
826 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
827 status
|= DUMP_DH_COMPRESSED_LZO
;
831 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
832 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
835 dh
->status
= cpu_convert_to_target32(status
, endian
);
837 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
838 dump_error(s
, "dump: failed to write disk dump header.\n");
843 /* write sub header */
844 size
= sizeof(KdumpSubHeader32
);
845 kh
= g_malloc0(size
);
847 /* 64bit max_mapnr_64 */
848 kh
->max_mapnr_64
= cpu_convert_to_target64(s
->max_mapnr
, endian
);
849 kh
->phys_base
= cpu_convert_to_target32(PHYS_BASE
, endian
);
850 kh
->dump_level
= cpu_convert_to_target32(DUMP_LEVEL
, endian
);
852 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
853 kh
->offset_note
= cpu_convert_to_target64(offset_note
, endian
);
854 kh
->note_size
= cpu_convert_to_target32(s
->note_size
, endian
);
856 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
857 block_size
, kh
, size
) < 0) {
858 dump_error(s
, "dump: failed to write kdump sub header.\n");
864 s
->note_buf
= g_malloc0(s
->note_size
);
865 s
->note_buf_offset
= 0;
867 /* use s->note_buf to store notes temporarily */
868 if (write_elf32_notes(buf_write_note
, s
) < 0) {
873 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
875 dump_error(s
, "dump: failed to write notes");
880 /* get offset of dump_bitmap */
881 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
884 /* get offset of page */
885 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
896 /* write common header, sub header and elf note to vmcore */
897 static int create_header64(DumpState
*s
)
900 DiskDumpHeader64
*dh
= NULL
;
901 KdumpSubHeader64
*kh
= NULL
;
903 int endian
= s
->dump_info
.d_endian
;
905 uint32_t sub_hdr_size
;
906 uint32_t bitmap_blocks
;
908 uint64_t offset_note
;
910 /* write common header, the version of kdump-compressed format is 6th */
911 size
= sizeof(DiskDumpHeader64
);
912 dh
= g_malloc0(size
);
914 strncpy(dh
->signature
, KDUMP_SIGNATURE
, strlen(KDUMP_SIGNATURE
));
915 dh
->header_version
= cpu_convert_to_target32(6, endian
);
916 block_size
= s
->page_size
;
917 dh
->block_size
= cpu_convert_to_target32(block_size
, endian
);
918 sub_hdr_size
= sizeof(struct KdumpSubHeader64
) + s
->note_size
;
919 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
920 dh
->sub_hdr_size
= cpu_convert_to_target32(sub_hdr_size
, endian
);
921 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
922 dh
->max_mapnr
= cpu_convert_to_target32(MIN(s
->max_mapnr
, UINT_MAX
),
924 dh
->nr_cpus
= cpu_convert_to_target32(s
->nr_cpus
, endian
);
925 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
926 dh
->bitmap_blocks
= cpu_convert_to_target32(bitmap_blocks
, endian
);
927 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
929 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
930 status
|= DUMP_DH_COMPRESSED_ZLIB
;
933 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
934 status
|= DUMP_DH_COMPRESSED_LZO
;
938 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
939 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
942 dh
->status
= cpu_convert_to_target32(status
, endian
);
944 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
945 dump_error(s
, "dump: failed to write disk dump header.\n");
950 /* write sub header */
951 size
= sizeof(KdumpSubHeader64
);
952 kh
= g_malloc0(size
);
954 /* 64bit max_mapnr_64 */
955 kh
->max_mapnr_64
= cpu_convert_to_target64(s
->max_mapnr
, endian
);
956 kh
->phys_base
= cpu_convert_to_target64(PHYS_BASE
, endian
);
957 kh
->dump_level
= cpu_convert_to_target32(DUMP_LEVEL
, endian
);
959 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
960 kh
->offset_note
= cpu_convert_to_target64(offset_note
, endian
);
961 kh
->note_size
= cpu_convert_to_target64(s
->note_size
, endian
);
963 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
964 block_size
, kh
, size
) < 0) {
965 dump_error(s
, "dump: failed to write kdump sub header.\n");
971 s
->note_buf
= g_malloc0(s
->note_size
);
972 s
->note_buf_offset
= 0;
974 /* use s->note_buf to store notes temporarily */
975 if (write_elf64_notes(buf_write_note
, s
) < 0) {
980 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
982 dump_error(s
, "dump: failed to write notes");
987 /* get offset of dump_bitmap */
988 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
991 /* get offset of page */
992 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
1003 static int write_dump_header(DumpState
*s
)
1005 if (s
->dump_info
.d_machine
== EM_386
) {
1006 return create_header32(s
);
1008 return create_header64(s
);
1013 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1014 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1015 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1016 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1017 * vmcore, ie. synchronizing un-sync bit into vmcore.
1019 static int set_dump_bitmap(uint64_t last_pfn
, uint64_t pfn
, bool value
,
1020 uint8_t *buf
, DumpState
*s
)
1022 off_t old_offset
, new_offset
;
1023 off_t offset_bitmap1
, offset_bitmap2
;
1026 /* should not set the previous place */
1027 assert(last_pfn
<= pfn
);
1030 * if the bit needed to be set is not cached in buf, flush the data in buf
1031 * to vmcore firstly.
1032 * making new_offset be bigger than old_offset can also sync remained data
1035 old_offset
= BUFSIZE_BITMAP
* (last_pfn
/ PFN_BUFBITMAP
);
1036 new_offset
= BUFSIZE_BITMAP
* (pfn
/ PFN_BUFBITMAP
);
1038 while (old_offset
< new_offset
) {
1039 /* calculate the offset and write dump_bitmap */
1040 offset_bitmap1
= s
->offset_dump_bitmap
+ old_offset
;
1041 if (write_buffer(s
->fd
, offset_bitmap1
, buf
,
1042 BUFSIZE_BITMAP
) < 0) {
1046 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1047 offset_bitmap2
= s
->offset_dump_bitmap
+ s
->len_dump_bitmap
+
1049 if (write_buffer(s
->fd
, offset_bitmap2
, buf
,
1050 BUFSIZE_BITMAP
) < 0) {
1054 memset(buf
, 0, BUFSIZE_BITMAP
);
1055 old_offset
+= BUFSIZE_BITMAP
;
1058 /* get the exact place of the bit in the buf, and set it */
1059 byte
= (pfn
% PFN_BUFBITMAP
) / CHAR_BIT
;
1060 bit
= (pfn
% PFN_BUFBITMAP
) % CHAR_BIT
;
1062 buf
[byte
] |= 1u << bit
;
1064 buf
[byte
] &= ~(1u << bit
);
1071 * exam every page and return the page frame number and the address of the page.
1072 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1073 * blocks, so block->target_start and block->target_end should be interal
1074 * multiples of the target page size.
1076 static bool get_next_page(GuestPhysBlock
**blockptr
, uint64_t *pfnptr
,
1077 uint8_t **bufptr
, DumpState
*s
)
1079 GuestPhysBlock
*block
= *blockptr
;
1083 /* block == NULL means the start of the iteration */
1085 block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1087 assert(block
->target_start
% s
->page_size
== 0);
1088 assert(block
->target_end
% s
->page_size
== 0);
1089 *pfnptr
= paddr_to_pfn(block
->target_start
, s
->page_shift
);
1091 *bufptr
= block
->host_addr
;
1096 *pfnptr
= *pfnptr
+ 1;
1097 addr
= pfn_to_paddr(*pfnptr
, s
->page_shift
);
1099 if ((addr
>= block
->target_start
) &&
1100 (addr
+ s
->page_size
<= block
->target_end
)) {
1101 buf
= block
->host_addr
+ (addr
- block
->target_start
);
1103 /* the next page is in the next block */
1104 block
= QTAILQ_NEXT(block
, next
);
1109 assert(block
->target_start
% s
->page_size
== 0);
1110 assert(block
->target_end
% s
->page_size
== 0);
1111 *pfnptr
= paddr_to_pfn(block
->target_start
, s
->page_shift
);
1112 buf
= block
->host_addr
;
1122 static int write_dump_bitmap(DumpState
*s
)
1125 uint64_t last_pfn
, pfn
;
1126 void *dump_bitmap_buf
;
1127 size_t num_dumpable
;
1128 GuestPhysBlock
*block_iter
= NULL
;
1130 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1131 dump_bitmap_buf
= g_malloc0(BUFSIZE_BITMAP
);
1137 * exam memory page by page, and set the bit in dump_bitmap corresponded
1138 * to the existing page.
1140 while (get_next_page(&block_iter
, &pfn
, NULL
, s
)) {
1141 ret
= set_dump_bitmap(last_pfn
, pfn
, true, dump_bitmap_buf
, s
);
1143 dump_error(s
, "dump: failed to set dump_bitmap.\n");
1153 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1154 * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
1155 * synchronized into vmcore.
1157 if (num_dumpable
> 0) {
1158 ret
= set_dump_bitmap(last_pfn
, last_pfn
+ PFN_BUFBITMAP
, false,
1159 dump_bitmap_buf
, s
);
1161 dump_error(s
, "dump: failed to sync dump_bitmap.\n");
1167 /* number of dumpable pages that will be dumped later */
1168 s
->num_dumpable
= num_dumpable
;
1171 g_free(dump_bitmap_buf
);
1176 static void prepare_data_cache(DataCache
*data_cache
, DumpState
*s
,
1179 data_cache
->fd
= s
->fd
;
1180 data_cache
->data_size
= 0;
1181 data_cache
->buf_size
= BUFSIZE_DATA_CACHE
;
1182 data_cache
->buf
= g_malloc0(BUFSIZE_DATA_CACHE
);
1183 data_cache
->offset
= offset
;
1186 static int write_cache(DataCache
*dc
, const void *buf
, size_t size
,
1190 * dc->buf_size should not be less than size, otherwise dc will never be
1193 assert(size
<= dc
->buf_size
);
1196 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1197 * otherwise check if the space is enough for caching data in buf, if not,
1198 * write the data in dc->buf to dc->fd and reset dc->buf
1200 if ((!flag_sync
&& dc
->data_size
+ size
> dc
->buf_size
) ||
1201 (flag_sync
&& dc
->data_size
> 0)) {
1202 if (write_buffer(dc
->fd
, dc
->offset
, dc
->buf
, dc
->data_size
) < 0) {
1206 dc
->offset
+= dc
->data_size
;
1211 memcpy(dc
->buf
+ dc
->data_size
, buf
, size
);
1212 dc
->data_size
+= size
;
1218 static void free_data_cache(DataCache
*data_cache
)
1220 g_free(data_cache
->buf
);
1223 static size_t get_len_buf_out(size_t page_size
, uint32_t flag_compress
)
1225 size_t len_buf_out_zlib
, len_buf_out_lzo
, len_buf_out_snappy
;
1229 len_buf_out_zlib
= len_buf_out_lzo
= len_buf_out_snappy
= 0;
1231 /* buf size for zlib */
1232 len_buf_out_zlib
= compressBound(page_size
);
1234 /* buf size for lzo */
1236 if (flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
1237 if (lzo_init() != LZO_E_OK
) {
1238 /* return 0 to indicate lzo is unavailable */
1244 * LZO will expand incompressible data by a little amount. please check the
1245 * following URL to see the expansion calculation:
1246 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1248 len_buf_out_lzo
= page_size
+ page_size
/ 16 + 64 + 3;
1251 #ifdef CONFIG_SNAPPY
1252 /* buf size for snappy */
1253 len_buf_out_snappy
= snappy_max_compressed_length(page_size
);
1256 /* get the biggest that can store all kinds of compressed page */
1257 len_buf_out
= MAX(len_buf_out_zlib
,
1258 MAX(len_buf_out_lzo
, len_buf_out_snappy
));
1264 * check if the page is all 0
1266 static inline bool is_zero_page(const uint8_t *buf
, size_t page_size
)
1268 return buffer_is_zero(buf
, page_size
);
1271 static int write_dump_pages(DumpState
*s
)
1274 DataCache page_desc
, page_data
;
1275 size_t len_buf_out
, size_out
;
1277 lzo_bytep wrkmem
= NULL
;
1279 uint8_t *buf_out
= NULL
;
1280 off_t offset_desc
, offset_data
;
1281 PageDescriptor pd
, pd_zero
;
1283 int endian
= s
->dump_info
.d_endian
;
1284 GuestPhysBlock
*block_iter
= NULL
;
1287 /* get offset of page_desc and page_data in dump file */
1288 offset_desc
= s
->offset_page
;
1289 offset_data
= offset_desc
+ sizeof(PageDescriptor
) * s
->num_dumpable
;
1291 prepare_data_cache(&page_desc
, s
, offset_desc
);
1292 prepare_data_cache(&page_data
, s
, offset_data
);
1294 /* prepare buffer to store compressed data */
1295 len_buf_out
= get_len_buf_out(s
->page_size
, s
->flag_compress
);
1296 if (len_buf_out
== 0) {
1297 dump_error(s
, "dump: failed to get length of output buffer.\n");
1302 wrkmem
= g_malloc(LZO1X_1_MEM_COMPRESS
);
1305 buf_out
= g_malloc(len_buf_out
);
1308 * init zero page's page_desc and page_data, because every zero page
1309 * uses the same page_data
1311 pd_zero
.size
= cpu_convert_to_target32(s
->page_size
, endian
);
1312 pd_zero
.flags
= cpu_convert_to_target32(0, endian
);
1313 pd_zero
.offset
= cpu_convert_to_target64(offset_data
, endian
);
1314 pd_zero
.page_flags
= cpu_convert_to_target64(0, endian
);
1315 buf
= g_malloc0(s
->page_size
);
1316 ret
= write_cache(&page_data
, buf
, s
->page_size
, false);
1319 dump_error(s
, "dump: failed to write page data(zero page).\n");
1323 offset_data
+= s
->page_size
;
1326 * dump memory to vmcore page by page. zero page will all be resided in the
1327 * first page of page section
1329 while (get_next_page(&block_iter
, &pfn_iter
, &buf
, s
)) {
1330 /* check zero page */
1331 if (is_zero_page(buf
, s
->page_size
)) {
1332 ret
= write_cache(&page_desc
, &pd_zero
, sizeof(PageDescriptor
),
1335 dump_error(s
, "dump: failed to write page desc.\n");
1340 * not zero page, then:
1341 * 1. compress the page
1342 * 2. write the compressed page into the cache of page_data
1343 * 3. get page desc of the compressed page and write it into the
1344 * cache of page_desc
1346 * only one compression format will be used here, for
1347 * s->flag_compress is set. But when compression fails to work,
1348 * we fall back to save in plaintext.
1350 size_out
= len_buf_out
;
1351 if ((s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) &&
1352 (compress2(buf_out
, (uLongf
*)&size_out
, buf
, s
->page_size
,
1353 Z_BEST_SPEED
) == Z_OK
) && (size_out
< s
->page_size
)) {
1354 pd
.flags
= cpu_convert_to_target32(DUMP_DH_COMPRESSED_ZLIB
,
1356 pd
.size
= cpu_convert_to_target32(size_out
, endian
);
1358 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1360 dump_error(s
, "dump: failed to write page data.\n");
1364 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) &&
1365 (lzo1x_1_compress(buf
, s
->page_size
, buf_out
,
1366 (lzo_uint
*)&size_out
, wrkmem
) == LZO_E_OK
) &&
1367 (size_out
< s
->page_size
)) {
1368 pd
.flags
= cpu_convert_to_target32(DUMP_DH_COMPRESSED_LZO
,
1370 pd
.size
= cpu_convert_to_target32(size_out
, endian
);
1372 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1374 dump_error(s
, "dump: failed to write page data.\n");
1378 #ifdef CONFIG_SNAPPY
1379 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) &&
1380 (snappy_compress((char *)buf
, s
->page_size
,
1381 (char *)buf_out
, &size_out
) == SNAPPY_OK
) &&
1382 (size_out
< s
->page_size
)) {
1383 pd
.flags
= cpu_convert_to_target32(
1384 DUMP_DH_COMPRESSED_SNAPPY
, endian
);
1385 pd
.size
= cpu_convert_to_target32(size_out
, endian
);
1387 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1389 dump_error(s
, "dump: failed to write page data.\n");
1395 * fall back to save in plaintext, size_out should be
1396 * assigned to s->page_size
1398 pd
.flags
= cpu_convert_to_target32(0, endian
);
1399 size_out
= s
->page_size
;
1400 pd
.size
= cpu_convert_to_target32(size_out
, endian
);
1402 ret
= write_cache(&page_data
, buf
, s
->page_size
, false);
1404 dump_error(s
, "dump: failed to write page data.\n");
1409 /* get and write page desc here */
1410 pd
.page_flags
= cpu_convert_to_target64(0, endian
);
1411 pd
.offset
= cpu_convert_to_target64(offset_data
, endian
);
1412 offset_data
+= size_out
;
1414 ret
= write_cache(&page_desc
, &pd
, sizeof(PageDescriptor
), false);
1416 dump_error(s
, "dump: failed to write page desc.\n");
1422 ret
= write_cache(&page_desc
, NULL
, 0, true);
1424 dump_error(s
, "dump: failed to sync cache for page_desc.\n");
1427 ret
= write_cache(&page_data
, NULL
, 0, true);
1429 dump_error(s
, "dump: failed to sync cache for page_data.\n");
1434 free_data_cache(&page_desc
);
1435 free_data_cache(&page_data
);
1446 static int create_kdump_vmcore(DumpState
*s
)
1451 * the kdump-compressed format is:
1453 * +------------------------------------------+ 0x0
1454 * | main header (struct disk_dump_header) |
1455 * |------------------------------------------+ block 1
1456 * | sub header (struct kdump_sub_header) |
1457 * |------------------------------------------+ block 2
1458 * | 1st-dump_bitmap |
1459 * |------------------------------------------+ block 2 + X blocks
1460 * | 2nd-dump_bitmap | (aligned by block)
1461 * |------------------------------------------+ block 2 + 2 * X blocks
1462 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1463 * | page desc for pfn 1 (struct page_desc) |
1465 * |------------------------------------------| (not aligned by block)
1466 * | page data (pfn 0) |
1467 * | page data (pfn 1) |
1469 * +------------------------------------------+
1472 ret
= write_start_flat_header(s
->fd
);
1474 dump_error(s
, "dump: failed to write start flat header.\n");
1478 ret
= write_dump_header(s
);
1483 ret
= write_dump_bitmap(s
);
1488 ret
= write_dump_pages(s
);
1493 ret
= write_end_flat_header(s
->fd
);
1495 dump_error(s
, "dump: failed to write end flat header.\n");
1504 static ram_addr_t
get_start_block(DumpState
*s
)
1506 GuestPhysBlock
*block
;
1508 if (!s
->has_filter
) {
1509 s
->next_block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1513 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1514 if (block
->target_start
>= s
->begin
+ s
->length
||
1515 block
->target_end
<= s
->begin
) {
1516 /* This block is out of the range */
1520 s
->next_block
= block
;
1521 if (s
->begin
> block
->target_start
) {
1522 s
->start
= s
->begin
- block
->target_start
;
1532 static void get_max_mapnr(DumpState
*s
)
1534 GuestPhysBlock
*last_block
;
1536 last_block
= QTAILQ_LAST(&s
->guest_phys_blocks
.head
, GuestPhysBlockHead
);
1537 s
->max_mapnr
= paddr_to_pfn(last_block
->target_end
, s
->page_shift
);
1540 static int dump_init(DumpState
*s
, int fd
, bool has_format
,
1541 DumpGuestMemoryFormat format
, bool paging
, bool has_filter
,
1542 int64_t begin
, int64_t length
, Error
**errp
)
1549 /* kdump-compressed is conflict with paging and filter */
1550 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1551 assert(!paging
&& !has_filter
);
1554 if (runstate_is_running()) {
1555 vm_stop(RUN_STATE_SAVE_VM
);
1561 /* If we use KVM, we should synchronize the registers before we get dump
1562 * info or physmap info.
1564 cpu_synchronize_all_states();
1571 s
->has_filter
= has_filter
;
1575 guest_phys_blocks_init(&s
->guest_phys_blocks
);
1576 guest_phys_blocks_append(&s
->guest_phys_blocks
);
1578 s
->start
= get_start_block(s
);
1579 if (s
->start
== -1) {
1580 error_set(errp
, QERR_INVALID_PARAMETER
, "begin");
1584 /* get dump info: endian, class and architecture.
1585 * If the target architecture is not supported, cpu_get_dump_info() will
1588 ret
= cpu_get_dump_info(&s
->dump_info
, &s
->guest_phys_blocks
);
1590 error_set(errp
, QERR_UNSUPPORTED
);
1594 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
1595 s
->dump_info
.d_machine
, nr_cpus
);
1596 if (s
->note_size
< 0) {
1597 error_set(errp
, QERR_UNSUPPORTED
);
1601 /* get memory mapping */
1602 memory_mapping_list_init(&s
->list
);
1604 qemu_get_guest_memory_mapping(&s
->list
, &s
->guest_phys_blocks
, &err
);
1606 error_propagate(errp
, err
);
1610 qemu_get_guest_simple_memory_mapping(&s
->list
, &s
->guest_phys_blocks
);
1613 s
->nr_cpus
= nr_cpus
;
1614 s
->page_size
= TARGET_PAGE_SIZE
;
1615 s
->page_shift
= ffs(s
->page_size
) - 1;
1620 tmp
= DIV_ROUND_UP(DIV_ROUND_UP(s
->max_mapnr
, CHAR_BIT
), s
->page_size
);
1621 s
->len_dump_bitmap
= tmp
* s
->page_size
;
1623 /* init for kdump-compressed format */
1624 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1626 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
:
1627 s
->flag_compress
= DUMP_DH_COMPRESSED_ZLIB
;
1630 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
:
1631 s
->flag_compress
= DUMP_DH_COMPRESSED_LZO
;
1634 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
:
1635 s
->flag_compress
= DUMP_DH_COMPRESSED_SNAPPY
;
1639 s
->flag_compress
= 0;
1645 if (s
->has_filter
) {
1646 memory_mapping_filter(&s
->list
, s
->begin
, s
->length
);
1650 * calculate phdr_num
1652 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1654 s
->phdr_num
= 1; /* PT_NOTE */
1655 if (s
->list
.num
< UINT16_MAX
- 2) {
1656 s
->phdr_num
+= s
->list
.num
;
1657 s
->have_section
= false;
1659 s
->have_section
= true;
1660 s
->phdr_num
= PN_XNUM
;
1661 s
->sh_info
= 1; /* PT_NOTE */
1663 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1664 if (s
->list
.num
<= UINT32_MAX
- 1) {
1665 s
->sh_info
+= s
->list
.num
;
1667 s
->sh_info
= UINT32_MAX
;
1671 if (s
->dump_info
.d_class
== ELFCLASS64
) {
1672 if (s
->have_section
) {
1673 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1674 sizeof(Elf64_Phdr
) * s
->sh_info
+
1675 sizeof(Elf64_Shdr
) + s
->note_size
;
1677 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1678 sizeof(Elf64_Phdr
) * s
->phdr_num
+ s
->note_size
;
1681 if (s
->have_section
) {
1682 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1683 sizeof(Elf32_Phdr
) * s
->sh_info
+
1684 sizeof(Elf32_Shdr
) + s
->note_size
;
1686 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1687 sizeof(Elf32_Phdr
) * s
->phdr_num
+ s
->note_size
;
1694 guest_phys_blocks_free(&s
->guest_phys_blocks
);
1703 void qmp_dump_guest_memory(bool paging
, const char *file
, bool has_begin
,
1704 int64_t begin
, bool has_length
,
1705 int64_t length
, bool has_format
,
1706 DumpGuestMemoryFormat format
, Error
**errp
)
1714 * kdump-compressed format need the whole memory dumped, so paging or
1715 * filter is not supported here.
1717 if ((has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) &&
1718 (paging
|| has_begin
|| has_length
)) {
1719 error_setg(errp
, "kdump-compressed format doesn't support paging or "
1723 if (has_begin
&& !has_length
) {
1724 error_set(errp
, QERR_MISSING_PARAMETER
, "length");
1727 if (!has_begin
&& has_length
) {
1728 error_set(errp
, QERR_MISSING_PARAMETER
, "begin");
1732 /* check whether lzo/snappy is supported */
1734 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
) {
1735 error_setg(errp
, "kdump-lzo is not available now");
1740 #ifndef CONFIG_SNAPPY
1741 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
) {
1742 error_setg(errp
, "kdump-snappy is not available now");
1748 if (strstart(file
, "fd:", &p
)) {
1749 fd
= monitor_get_fd(cur_mon
, p
, errp
);
1756 if (strstart(file
, "file:", &p
)) {
1757 fd
= qemu_open(p
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
);
1759 error_setg_file_open(errp
, errno
, p
);
1765 error_set(errp
, QERR_INVALID_PARAMETER
, "protocol");
1769 s
= g_malloc0(sizeof(DumpState
));
1771 ret
= dump_init(s
, fd
, has_format
, format
, paging
, has_begin
,
1772 begin
, length
, errp
);
1778 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1779 if (create_kdump_vmcore(s
) < 0) {
1780 error_set(errp
, QERR_IO_ERROR
);
1783 if (create_vmcore(s
) < 0) {
1784 error_set(errp
, QERR_IO_ERROR
);
1791 DumpGuestMemoryCapability
*qmp_query_dump_guest_memory_capability(Error
**errp
)
1793 DumpGuestMemoryFormatList
*item
;
1794 DumpGuestMemoryCapability
*cap
=
1795 g_malloc0(sizeof(DumpGuestMemoryCapability
));
1797 /* elf is always available */
1798 item
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1799 cap
->formats
= item
;
1800 item
->value
= DUMP_GUEST_MEMORY_FORMAT_ELF
;
1802 /* kdump-zlib is always available */
1803 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1805 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
;
1807 /* add new item if kdump-lzo is available */
1809 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1811 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
;
1814 /* add new item if kdump-snappy is available */
1815 #ifdef CONFIG_SNAPPY
1816 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1818 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
;