]> git.proxmox.com Git - mirror_qemu.git/blob - dump.c
Dump: add qmp command "query-dump"
[mirror_qemu.git] / dump.c
1 /*
2 * QEMU dump
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "elf.h"
17 #include "cpu.h"
18 #include "exec/cpu-all.h"
19 #include "exec/hwaddr.h"
20 #include "monitor/monitor.h"
21 #include "sysemu/kvm.h"
22 #include "sysemu/dump.h"
23 #include "sysemu/sysemu.h"
24 #include "sysemu/memory_mapping.h"
25 #include "sysemu/cpus.h"
26 #include "qapi/qmp/qerror.h"
27 #include "qmp-commands.h"
28
29 #include <zlib.h>
30 #ifdef CONFIG_LZO
31 #include <lzo/lzo1x.h>
32 #endif
33 #ifdef CONFIG_SNAPPY
34 #include <snappy-c.h>
35 #endif
36 #ifndef ELF_MACHINE_UNAME
37 #define ELF_MACHINE_UNAME "Unknown"
38 #endif
39
40 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
41 {
42 if (s->dump_info.d_endian == ELFDATA2LSB) {
43 val = cpu_to_le16(val);
44 } else {
45 val = cpu_to_be16(val);
46 }
47
48 return val;
49 }
50
51 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
52 {
53 if (s->dump_info.d_endian == ELFDATA2LSB) {
54 val = cpu_to_le32(val);
55 } else {
56 val = cpu_to_be32(val);
57 }
58
59 return val;
60 }
61
62 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
63 {
64 if (s->dump_info.d_endian == ELFDATA2LSB) {
65 val = cpu_to_le64(val);
66 } else {
67 val = cpu_to_be64(val);
68 }
69
70 return val;
71 }
72
73 static int dump_cleanup(DumpState *s)
74 {
75 guest_phys_blocks_free(&s->guest_phys_blocks);
76 memory_mapping_list_free(&s->list);
77 close(s->fd);
78 if (s->resume) {
79 vm_start();
80 }
81
82 return 0;
83 }
84
85 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
86 {
87 DumpState *s = opaque;
88 size_t written_size;
89
90 written_size = qemu_write_full(s->fd, buf, size);
91 if (written_size != size) {
92 return -1;
93 }
94
95 return 0;
96 }
97
98 static void write_elf64_header(DumpState *s, Error **errp)
99 {
100 Elf64_Ehdr elf_header;
101 int ret;
102
103 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
104 memcpy(&elf_header, ELFMAG, SELFMAG);
105 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
106 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
107 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
108 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
109 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
110 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
111 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
112 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
113 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
114 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
115 if (s->have_section) {
116 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
117
118 elf_header.e_shoff = cpu_to_dump64(s, shoff);
119 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
120 elf_header.e_shnum = cpu_to_dump16(s, 1);
121 }
122
123 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
124 if (ret < 0) {
125 error_setg(errp, "dump: failed to write elf header");
126 }
127 }
128
129 static void write_elf32_header(DumpState *s, Error **errp)
130 {
131 Elf32_Ehdr elf_header;
132 int ret;
133
134 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
135 memcpy(&elf_header, ELFMAG, SELFMAG);
136 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
137 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
138 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
139 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
140 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
141 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
142 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
143 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
144 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
145 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
146 if (s->have_section) {
147 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
148
149 elf_header.e_shoff = cpu_to_dump32(s, shoff);
150 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
151 elf_header.e_shnum = cpu_to_dump16(s, 1);
152 }
153
154 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
155 if (ret < 0) {
156 error_setg(errp, "dump: failed to write elf header");
157 }
158 }
159
160 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
161 int phdr_index, hwaddr offset,
162 hwaddr filesz, Error **errp)
163 {
164 Elf64_Phdr phdr;
165 int ret;
166
167 memset(&phdr, 0, sizeof(Elf64_Phdr));
168 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
169 phdr.p_offset = cpu_to_dump64(s, offset);
170 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
171 phdr.p_filesz = cpu_to_dump64(s, filesz);
172 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
173 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
174
175 assert(memory_mapping->length >= filesz);
176
177 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
178 if (ret < 0) {
179 error_setg(errp, "dump: failed to write program header table");
180 }
181 }
182
183 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
184 int phdr_index, hwaddr offset,
185 hwaddr filesz, Error **errp)
186 {
187 Elf32_Phdr phdr;
188 int ret;
189
190 memset(&phdr, 0, sizeof(Elf32_Phdr));
191 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
192 phdr.p_offset = cpu_to_dump32(s, offset);
193 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
194 phdr.p_filesz = cpu_to_dump32(s, filesz);
195 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
196 phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
197
198 assert(memory_mapping->length >= filesz);
199
200 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
201 if (ret < 0) {
202 error_setg(errp, "dump: failed to write program header table");
203 }
204 }
205
206 static void write_elf64_note(DumpState *s, Error **errp)
207 {
208 Elf64_Phdr phdr;
209 hwaddr begin = s->memory_offset - s->note_size;
210 int ret;
211
212 memset(&phdr, 0, sizeof(Elf64_Phdr));
213 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
214 phdr.p_offset = cpu_to_dump64(s, begin);
215 phdr.p_paddr = 0;
216 phdr.p_filesz = cpu_to_dump64(s, s->note_size);
217 phdr.p_memsz = cpu_to_dump64(s, s->note_size);
218 phdr.p_vaddr = 0;
219
220 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
221 if (ret < 0) {
222 error_setg(errp, "dump: failed to write program header table");
223 }
224 }
225
226 static inline int cpu_index(CPUState *cpu)
227 {
228 return cpu->cpu_index + 1;
229 }
230
231 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
232 Error **errp)
233 {
234 CPUState *cpu;
235 int ret;
236 int id;
237
238 CPU_FOREACH(cpu) {
239 id = cpu_index(cpu);
240 ret = cpu_write_elf64_note(f, cpu, id, s);
241 if (ret < 0) {
242 error_setg(errp, "dump: failed to write elf notes");
243 return;
244 }
245 }
246
247 CPU_FOREACH(cpu) {
248 ret = cpu_write_elf64_qemunote(f, cpu, s);
249 if (ret < 0) {
250 error_setg(errp, "dump: failed to write CPU status");
251 return;
252 }
253 }
254 }
255
256 static void write_elf32_note(DumpState *s, Error **errp)
257 {
258 hwaddr begin = s->memory_offset - s->note_size;
259 Elf32_Phdr phdr;
260 int ret;
261
262 memset(&phdr, 0, sizeof(Elf32_Phdr));
263 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
264 phdr.p_offset = cpu_to_dump32(s, begin);
265 phdr.p_paddr = 0;
266 phdr.p_filesz = cpu_to_dump32(s, s->note_size);
267 phdr.p_memsz = cpu_to_dump32(s, s->note_size);
268 phdr.p_vaddr = 0;
269
270 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
271 if (ret < 0) {
272 error_setg(errp, "dump: failed to write program header table");
273 }
274 }
275
276 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
277 Error **errp)
278 {
279 CPUState *cpu;
280 int ret;
281 int id;
282
283 CPU_FOREACH(cpu) {
284 id = cpu_index(cpu);
285 ret = cpu_write_elf32_note(f, cpu, id, s);
286 if (ret < 0) {
287 error_setg(errp, "dump: failed to write elf notes");
288 return;
289 }
290 }
291
292 CPU_FOREACH(cpu) {
293 ret = cpu_write_elf32_qemunote(f, cpu, s);
294 if (ret < 0) {
295 error_setg(errp, "dump: failed to write CPU status");
296 return;
297 }
298 }
299 }
300
301 static void write_elf_section(DumpState *s, int type, Error **errp)
302 {
303 Elf32_Shdr shdr32;
304 Elf64_Shdr shdr64;
305 int shdr_size;
306 void *shdr;
307 int ret;
308
309 if (type == 0) {
310 shdr_size = sizeof(Elf32_Shdr);
311 memset(&shdr32, 0, shdr_size);
312 shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
313 shdr = &shdr32;
314 } else {
315 shdr_size = sizeof(Elf64_Shdr);
316 memset(&shdr64, 0, shdr_size);
317 shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
318 shdr = &shdr64;
319 }
320
321 ret = fd_write_vmcore(&shdr, shdr_size, s);
322 if (ret < 0) {
323 error_setg(errp, "dump: failed to write section header table");
324 }
325 }
326
327 static void write_data(DumpState *s, void *buf, int length, Error **errp)
328 {
329 int ret;
330
331 ret = fd_write_vmcore(buf, length, s);
332 if (ret < 0) {
333 error_setg(errp, "dump: failed to save memory");
334 } else {
335 s->written_size += length;
336 }
337 }
338
339 /* write the memory to vmcore. 1 page per I/O. */
340 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
341 int64_t size, Error **errp)
342 {
343 int64_t i;
344 Error *local_err = NULL;
345
346 for (i = 0; i < size / s->dump_info.page_size; i++) {
347 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
348 s->dump_info.page_size, &local_err);
349 if (local_err) {
350 error_propagate(errp, local_err);
351 return;
352 }
353 }
354
355 if ((size % s->dump_info.page_size) != 0) {
356 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
357 size % s->dump_info.page_size, &local_err);
358 if (local_err) {
359 error_propagate(errp, local_err);
360 return;
361 }
362 }
363 }
364
365 /* get the memory's offset and size in the vmcore */
366 static void get_offset_range(hwaddr phys_addr,
367 ram_addr_t mapping_length,
368 DumpState *s,
369 hwaddr *p_offset,
370 hwaddr *p_filesz)
371 {
372 GuestPhysBlock *block;
373 hwaddr offset = s->memory_offset;
374 int64_t size_in_block, start;
375
376 /* When the memory is not stored into vmcore, offset will be -1 */
377 *p_offset = -1;
378 *p_filesz = 0;
379
380 if (s->has_filter) {
381 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
382 return;
383 }
384 }
385
386 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
387 if (s->has_filter) {
388 if (block->target_start >= s->begin + s->length ||
389 block->target_end <= s->begin) {
390 /* This block is out of the range */
391 continue;
392 }
393
394 if (s->begin <= block->target_start) {
395 start = block->target_start;
396 } else {
397 start = s->begin;
398 }
399
400 size_in_block = block->target_end - start;
401 if (s->begin + s->length < block->target_end) {
402 size_in_block -= block->target_end - (s->begin + s->length);
403 }
404 } else {
405 start = block->target_start;
406 size_in_block = block->target_end - block->target_start;
407 }
408
409 if (phys_addr >= start && phys_addr < start + size_in_block) {
410 *p_offset = phys_addr - start + offset;
411
412 /* The offset range mapped from the vmcore file must not spill over
413 * the GuestPhysBlock, clamp it. The rest of the mapping will be
414 * zero-filled in memory at load time; see
415 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
416 */
417 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
418 mapping_length :
419 size_in_block - (phys_addr - start);
420 return;
421 }
422
423 offset += size_in_block;
424 }
425 }
426
427 static void write_elf_loads(DumpState *s, Error **errp)
428 {
429 hwaddr offset, filesz;
430 MemoryMapping *memory_mapping;
431 uint32_t phdr_index = 1;
432 uint32_t max_index;
433 Error *local_err = NULL;
434
435 if (s->have_section) {
436 max_index = s->sh_info;
437 } else {
438 max_index = s->phdr_num;
439 }
440
441 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
442 get_offset_range(memory_mapping->phys_addr,
443 memory_mapping->length,
444 s, &offset, &filesz);
445 if (s->dump_info.d_class == ELFCLASS64) {
446 write_elf64_load(s, memory_mapping, phdr_index++, offset,
447 filesz, &local_err);
448 } else {
449 write_elf32_load(s, memory_mapping, phdr_index++, offset,
450 filesz, &local_err);
451 }
452
453 if (local_err) {
454 error_propagate(errp, local_err);
455 return;
456 }
457
458 if (phdr_index >= max_index) {
459 break;
460 }
461 }
462 }
463
464 /* write elf header, PT_NOTE and elf note to vmcore. */
465 static void dump_begin(DumpState *s, Error **errp)
466 {
467 Error *local_err = NULL;
468
469 /*
470 * the vmcore's format is:
471 * --------------
472 * | elf header |
473 * --------------
474 * | PT_NOTE |
475 * --------------
476 * | PT_LOAD |
477 * --------------
478 * | ...... |
479 * --------------
480 * | PT_LOAD |
481 * --------------
482 * | sec_hdr |
483 * --------------
484 * | elf note |
485 * --------------
486 * | memory |
487 * --------------
488 *
489 * we only know where the memory is saved after we write elf note into
490 * vmcore.
491 */
492
493 /* write elf header to vmcore */
494 if (s->dump_info.d_class == ELFCLASS64) {
495 write_elf64_header(s, &local_err);
496 } else {
497 write_elf32_header(s, &local_err);
498 }
499 if (local_err) {
500 error_propagate(errp, local_err);
501 return;
502 }
503
504 if (s->dump_info.d_class == ELFCLASS64) {
505 /* write PT_NOTE to vmcore */
506 write_elf64_note(s, &local_err);
507 if (local_err) {
508 error_propagate(errp, local_err);
509 return;
510 }
511
512 /* write all PT_LOAD to vmcore */
513 write_elf_loads(s, &local_err);
514 if (local_err) {
515 error_propagate(errp, local_err);
516 return;
517 }
518
519 /* write section to vmcore */
520 if (s->have_section) {
521 write_elf_section(s, 1, &local_err);
522 if (local_err) {
523 error_propagate(errp, local_err);
524 return;
525 }
526 }
527
528 /* write notes to vmcore */
529 write_elf64_notes(fd_write_vmcore, s, &local_err);
530 if (local_err) {
531 error_propagate(errp, local_err);
532 return;
533 }
534 } else {
535 /* write PT_NOTE to vmcore */
536 write_elf32_note(s, &local_err);
537 if (local_err) {
538 error_propagate(errp, local_err);
539 return;
540 }
541
542 /* write all PT_LOAD to vmcore */
543 write_elf_loads(s, &local_err);
544 if (local_err) {
545 error_propagate(errp, local_err);
546 return;
547 }
548
549 /* write section to vmcore */
550 if (s->have_section) {
551 write_elf_section(s, 0, &local_err);
552 if (local_err) {
553 error_propagate(errp, local_err);
554 return;
555 }
556 }
557
558 /* write notes to vmcore */
559 write_elf32_notes(fd_write_vmcore, s, &local_err);
560 if (local_err) {
561 error_propagate(errp, local_err);
562 return;
563 }
564 }
565 }
566
567 static int get_next_block(DumpState *s, GuestPhysBlock *block)
568 {
569 while (1) {
570 block = QTAILQ_NEXT(block, next);
571 if (!block) {
572 /* no more block */
573 return 1;
574 }
575
576 s->start = 0;
577 s->next_block = block;
578 if (s->has_filter) {
579 if (block->target_start >= s->begin + s->length ||
580 block->target_end <= s->begin) {
581 /* This block is out of the range */
582 continue;
583 }
584
585 if (s->begin > block->target_start) {
586 s->start = s->begin - block->target_start;
587 }
588 }
589
590 return 0;
591 }
592 }
593
594 /* write all memory to vmcore */
595 static void dump_iterate(DumpState *s, Error **errp)
596 {
597 GuestPhysBlock *block;
598 int64_t size;
599 Error *local_err = NULL;
600
601 do {
602 block = s->next_block;
603
604 size = block->target_end - block->target_start;
605 if (s->has_filter) {
606 size -= s->start;
607 if (s->begin + s->length < block->target_end) {
608 size -= block->target_end - (s->begin + s->length);
609 }
610 }
611 write_memory(s, block, s->start, size, &local_err);
612 if (local_err) {
613 error_propagate(errp, local_err);
614 return;
615 }
616
617 } while (!get_next_block(s, block));
618 }
619
620 static void create_vmcore(DumpState *s, Error **errp)
621 {
622 Error *local_err = NULL;
623
624 dump_begin(s, &local_err);
625 if (local_err) {
626 error_propagate(errp, local_err);
627 return;
628 }
629
630 dump_iterate(s, errp);
631 }
632
633 static int write_start_flat_header(int fd)
634 {
635 MakedumpfileHeader *mh;
636 int ret = 0;
637
638 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
639 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
640
641 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
642 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
643
644 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
645 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
646
647 size_t written_size;
648 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
649 if (written_size != MAX_SIZE_MDF_HEADER) {
650 ret = -1;
651 }
652
653 g_free(mh);
654 return ret;
655 }
656
657 static int write_end_flat_header(int fd)
658 {
659 MakedumpfileDataHeader mdh;
660
661 mdh.offset = END_FLAG_FLAT_HEADER;
662 mdh.buf_size = END_FLAG_FLAT_HEADER;
663
664 size_t written_size;
665 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
666 if (written_size != sizeof(mdh)) {
667 return -1;
668 }
669
670 return 0;
671 }
672
673 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
674 {
675 size_t written_size;
676 MakedumpfileDataHeader mdh;
677
678 mdh.offset = cpu_to_be64(offset);
679 mdh.buf_size = cpu_to_be64(size);
680
681 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
682 if (written_size != sizeof(mdh)) {
683 return -1;
684 }
685
686 written_size = qemu_write_full(fd, buf, size);
687 if (written_size != size) {
688 return -1;
689 }
690
691 return 0;
692 }
693
694 static int buf_write_note(const void *buf, size_t size, void *opaque)
695 {
696 DumpState *s = opaque;
697
698 /* note_buf is not enough */
699 if (s->note_buf_offset + size > s->note_size) {
700 return -1;
701 }
702
703 memcpy(s->note_buf + s->note_buf_offset, buf, size);
704
705 s->note_buf_offset += size;
706
707 return 0;
708 }
709
710 /* write common header, sub header and elf note to vmcore */
711 static void create_header32(DumpState *s, Error **errp)
712 {
713 DiskDumpHeader32 *dh = NULL;
714 KdumpSubHeader32 *kh = NULL;
715 size_t size;
716 uint32_t block_size;
717 uint32_t sub_hdr_size;
718 uint32_t bitmap_blocks;
719 uint32_t status = 0;
720 uint64_t offset_note;
721 Error *local_err = NULL;
722
723 /* write common header, the version of kdump-compressed format is 6th */
724 size = sizeof(DiskDumpHeader32);
725 dh = g_malloc0(size);
726
727 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
728 dh->header_version = cpu_to_dump32(s, 6);
729 block_size = s->dump_info.page_size;
730 dh->block_size = cpu_to_dump32(s, block_size);
731 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
732 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
733 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
734 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
735 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
736 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
737 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
738 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
739 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
740
741 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
742 status |= DUMP_DH_COMPRESSED_ZLIB;
743 }
744 #ifdef CONFIG_LZO
745 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
746 status |= DUMP_DH_COMPRESSED_LZO;
747 }
748 #endif
749 #ifdef CONFIG_SNAPPY
750 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
751 status |= DUMP_DH_COMPRESSED_SNAPPY;
752 }
753 #endif
754 dh->status = cpu_to_dump32(s, status);
755
756 if (write_buffer(s->fd, 0, dh, size) < 0) {
757 error_setg(errp, "dump: failed to write disk dump header");
758 goto out;
759 }
760
761 /* write sub header */
762 size = sizeof(KdumpSubHeader32);
763 kh = g_malloc0(size);
764
765 /* 64bit max_mapnr_64 */
766 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
767 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
768 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
769
770 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
771 kh->offset_note = cpu_to_dump64(s, offset_note);
772 kh->note_size = cpu_to_dump32(s, s->note_size);
773
774 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
775 block_size, kh, size) < 0) {
776 error_setg(errp, "dump: failed to write kdump sub header");
777 goto out;
778 }
779
780 /* write note */
781 s->note_buf = g_malloc0(s->note_size);
782 s->note_buf_offset = 0;
783
784 /* use s->note_buf to store notes temporarily */
785 write_elf32_notes(buf_write_note, s, &local_err);
786 if (local_err) {
787 error_propagate(errp, local_err);
788 goto out;
789 }
790 if (write_buffer(s->fd, offset_note, s->note_buf,
791 s->note_size) < 0) {
792 error_setg(errp, "dump: failed to write notes");
793 goto out;
794 }
795
796 /* get offset of dump_bitmap */
797 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
798 block_size;
799
800 /* get offset of page */
801 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
802 block_size;
803
804 out:
805 g_free(dh);
806 g_free(kh);
807 g_free(s->note_buf);
808 }
809
810 /* write common header, sub header and elf note to vmcore */
811 static void create_header64(DumpState *s, Error **errp)
812 {
813 DiskDumpHeader64 *dh = NULL;
814 KdumpSubHeader64 *kh = NULL;
815 size_t size;
816 uint32_t block_size;
817 uint32_t sub_hdr_size;
818 uint32_t bitmap_blocks;
819 uint32_t status = 0;
820 uint64_t offset_note;
821 Error *local_err = NULL;
822
823 /* write common header, the version of kdump-compressed format is 6th */
824 size = sizeof(DiskDumpHeader64);
825 dh = g_malloc0(size);
826
827 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
828 dh->header_version = cpu_to_dump32(s, 6);
829 block_size = s->dump_info.page_size;
830 dh->block_size = cpu_to_dump32(s, block_size);
831 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
832 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
833 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
834 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
835 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
836 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
837 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
838 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
839 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
840
841 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
842 status |= DUMP_DH_COMPRESSED_ZLIB;
843 }
844 #ifdef CONFIG_LZO
845 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
846 status |= DUMP_DH_COMPRESSED_LZO;
847 }
848 #endif
849 #ifdef CONFIG_SNAPPY
850 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
851 status |= DUMP_DH_COMPRESSED_SNAPPY;
852 }
853 #endif
854 dh->status = cpu_to_dump32(s, status);
855
856 if (write_buffer(s->fd, 0, dh, size) < 0) {
857 error_setg(errp, "dump: failed to write disk dump header");
858 goto out;
859 }
860
861 /* write sub header */
862 size = sizeof(KdumpSubHeader64);
863 kh = g_malloc0(size);
864
865 /* 64bit max_mapnr_64 */
866 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
867 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
868 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
869
870 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
871 kh->offset_note = cpu_to_dump64(s, offset_note);
872 kh->note_size = cpu_to_dump64(s, s->note_size);
873
874 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
875 block_size, kh, size) < 0) {
876 error_setg(errp, "dump: failed to write kdump sub header");
877 goto out;
878 }
879
880 /* write note */
881 s->note_buf = g_malloc0(s->note_size);
882 s->note_buf_offset = 0;
883
884 /* use s->note_buf to store notes temporarily */
885 write_elf64_notes(buf_write_note, s, &local_err);
886 if (local_err) {
887 error_propagate(errp, local_err);
888 goto out;
889 }
890
891 if (write_buffer(s->fd, offset_note, s->note_buf,
892 s->note_size) < 0) {
893 error_setg(errp, "dump: failed to write notes");
894 goto out;
895 }
896
897 /* get offset of dump_bitmap */
898 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
899 block_size;
900
901 /* get offset of page */
902 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
903 block_size;
904
905 out:
906 g_free(dh);
907 g_free(kh);
908 g_free(s->note_buf);
909 }
910
911 static void write_dump_header(DumpState *s, Error **errp)
912 {
913 Error *local_err = NULL;
914
915 if (s->dump_info.d_class == ELFCLASS32) {
916 create_header32(s, &local_err);
917 } else {
918 create_header64(s, &local_err);
919 }
920 if (local_err) {
921 error_propagate(errp, local_err);
922 }
923 }
924
925 static size_t dump_bitmap_get_bufsize(DumpState *s)
926 {
927 return s->dump_info.page_size;
928 }
929
930 /*
931 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
932 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
933 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
934 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
935 * vmcore, ie. synchronizing un-sync bit into vmcore.
936 */
937 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
938 uint8_t *buf, DumpState *s)
939 {
940 off_t old_offset, new_offset;
941 off_t offset_bitmap1, offset_bitmap2;
942 uint32_t byte, bit;
943 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
944 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
945
946 /* should not set the previous place */
947 assert(last_pfn <= pfn);
948
949 /*
950 * if the bit needed to be set is not cached in buf, flush the data in buf
951 * to vmcore firstly.
952 * making new_offset be bigger than old_offset can also sync remained data
953 * into vmcore.
954 */
955 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
956 new_offset = bitmap_bufsize * (pfn / bits_per_buf);
957
958 while (old_offset < new_offset) {
959 /* calculate the offset and write dump_bitmap */
960 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
961 if (write_buffer(s->fd, offset_bitmap1, buf,
962 bitmap_bufsize) < 0) {
963 return -1;
964 }
965
966 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
967 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
968 old_offset;
969 if (write_buffer(s->fd, offset_bitmap2, buf,
970 bitmap_bufsize) < 0) {
971 return -1;
972 }
973
974 memset(buf, 0, bitmap_bufsize);
975 old_offset += bitmap_bufsize;
976 }
977
978 /* get the exact place of the bit in the buf, and set it */
979 byte = (pfn % bits_per_buf) / CHAR_BIT;
980 bit = (pfn % bits_per_buf) % CHAR_BIT;
981 if (value) {
982 buf[byte] |= 1u << bit;
983 } else {
984 buf[byte] &= ~(1u << bit);
985 }
986
987 return 0;
988 }
989
990 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
991 {
992 int target_page_shift = ctz32(s->dump_info.page_size);
993
994 return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
995 }
996
997 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
998 {
999 int target_page_shift = ctz32(s->dump_info.page_size);
1000
1001 return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1002 }
1003
1004 /*
1005 * exam every page and return the page frame number and the address of the page.
1006 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1007 * blocks, so block->target_start and block->target_end should be interal
1008 * multiples of the target page size.
1009 */
1010 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1011 uint8_t **bufptr, DumpState *s)
1012 {
1013 GuestPhysBlock *block = *blockptr;
1014 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1015 uint8_t *buf;
1016
1017 /* block == NULL means the start of the iteration */
1018 if (!block) {
1019 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1020 *blockptr = block;
1021 assert((block->target_start & ~target_page_mask) == 0);
1022 assert((block->target_end & ~target_page_mask) == 0);
1023 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1024 if (bufptr) {
1025 *bufptr = block->host_addr;
1026 }
1027 return true;
1028 }
1029
1030 *pfnptr = *pfnptr + 1;
1031 addr = dump_pfn_to_paddr(s, *pfnptr);
1032
1033 if ((addr >= block->target_start) &&
1034 (addr + s->dump_info.page_size <= block->target_end)) {
1035 buf = block->host_addr + (addr - block->target_start);
1036 } else {
1037 /* the next page is in the next block */
1038 block = QTAILQ_NEXT(block, next);
1039 *blockptr = block;
1040 if (!block) {
1041 return false;
1042 }
1043 assert((block->target_start & ~target_page_mask) == 0);
1044 assert((block->target_end & ~target_page_mask) == 0);
1045 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1046 buf = block->host_addr;
1047 }
1048
1049 if (bufptr) {
1050 *bufptr = buf;
1051 }
1052
1053 return true;
1054 }
1055
1056 static void write_dump_bitmap(DumpState *s, Error **errp)
1057 {
1058 int ret = 0;
1059 uint64_t last_pfn, pfn;
1060 void *dump_bitmap_buf;
1061 size_t num_dumpable;
1062 GuestPhysBlock *block_iter = NULL;
1063 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1064 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1065
1066 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1067 dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1068
1069 num_dumpable = 0;
1070 last_pfn = 0;
1071
1072 /*
1073 * exam memory page by page, and set the bit in dump_bitmap corresponded
1074 * to the existing page.
1075 */
1076 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1077 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1078 if (ret < 0) {
1079 error_setg(errp, "dump: failed to set dump_bitmap");
1080 goto out;
1081 }
1082
1083 last_pfn = pfn;
1084 num_dumpable++;
1085 }
1086
1087 /*
1088 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1089 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1090 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1091 */
1092 if (num_dumpable > 0) {
1093 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1094 dump_bitmap_buf, s);
1095 if (ret < 0) {
1096 error_setg(errp, "dump: failed to sync dump_bitmap");
1097 goto out;
1098 }
1099 }
1100
1101 /* number of dumpable pages that will be dumped later */
1102 s->num_dumpable = num_dumpable;
1103
1104 out:
1105 g_free(dump_bitmap_buf);
1106 }
1107
1108 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1109 off_t offset)
1110 {
1111 data_cache->fd = s->fd;
1112 data_cache->data_size = 0;
1113 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1114 data_cache->buf = g_malloc0(data_cache->buf_size);
1115 data_cache->offset = offset;
1116 }
1117
1118 static int write_cache(DataCache *dc, const void *buf, size_t size,
1119 bool flag_sync)
1120 {
1121 /*
1122 * dc->buf_size should not be less than size, otherwise dc will never be
1123 * enough
1124 */
1125 assert(size <= dc->buf_size);
1126
1127 /*
1128 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1129 * otherwise check if the space is enough for caching data in buf, if not,
1130 * write the data in dc->buf to dc->fd and reset dc->buf
1131 */
1132 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1133 (flag_sync && dc->data_size > 0)) {
1134 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1135 return -1;
1136 }
1137
1138 dc->offset += dc->data_size;
1139 dc->data_size = 0;
1140 }
1141
1142 if (!flag_sync) {
1143 memcpy(dc->buf + dc->data_size, buf, size);
1144 dc->data_size += size;
1145 }
1146
1147 return 0;
1148 }
1149
1150 static void free_data_cache(DataCache *data_cache)
1151 {
1152 g_free(data_cache->buf);
1153 }
1154
1155 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1156 {
1157 switch (flag_compress) {
1158 case DUMP_DH_COMPRESSED_ZLIB:
1159 return compressBound(page_size);
1160
1161 case DUMP_DH_COMPRESSED_LZO:
1162 /*
1163 * LZO will expand incompressible data by a little amount. Please check
1164 * the following URL to see the expansion calculation:
1165 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1166 */
1167 return page_size + page_size / 16 + 64 + 3;
1168
1169 #ifdef CONFIG_SNAPPY
1170 case DUMP_DH_COMPRESSED_SNAPPY:
1171 return snappy_max_compressed_length(page_size);
1172 #endif
1173 }
1174 return 0;
1175 }
1176
1177 /*
1178 * check if the page is all 0
1179 */
1180 static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
1181 {
1182 return buffer_is_zero(buf, page_size);
1183 }
1184
1185 static void write_dump_pages(DumpState *s, Error **errp)
1186 {
1187 int ret = 0;
1188 DataCache page_desc, page_data;
1189 size_t len_buf_out, size_out;
1190 #ifdef CONFIG_LZO
1191 lzo_bytep wrkmem = NULL;
1192 #endif
1193 uint8_t *buf_out = NULL;
1194 off_t offset_desc, offset_data;
1195 PageDescriptor pd, pd_zero;
1196 uint8_t *buf;
1197 GuestPhysBlock *block_iter = NULL;
1198 uint64_t pfn_iter;
1199
1200 /* get offset of page_desc and page_data in dump file */
1201 offset_desc = s->offset_page;
1202 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1203
1204 prepare_data_cache(&page_desc, s, offset_desc);
1205 prepare_data_cache(&page_data, s, offset_data);
1206
1207 /* prepare buffer to store compressed data */
1208 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1209 assert(len_buf_out != 0);
1210
1211 #ifdef CONFIG_LZO
1212 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1213 #endif
1214
1215 buf_out = g_malloc(len_buf_out);
1216
1217 /*
1218 * init zero page's page_desc and page_data, because every zero page
1219 * uses the same page_data
1220 */
1221 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1222 pd_zero.flags = cpu_to_dump32(s, 0);
1223 pd_zero.offset = cpu_to_dump64(s, offset_data);
1224 pd_zero.page_flags = cpu_to_dump64(s, 0);
1225 buf = g_malloc0(s->dump_info.page_size);
1226 ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1227 g_free(buf);
1228 if (ret < 0) {
1229 error_setg(errp, "dump: failed to write page data (zero page)");
1230 goto out;
1231 }
1232
1233 offset_data += s->dump_info.page_size;
1234
1235 /*
1236 * dump memory to vmcore page by page. zero page will all be resided in the
1237 * first page of page section
1238 */
1239 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1240 /* check zero page */
1241 if (is_zero_page(buf, s->dump_info.page_size)) {
1242 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1243 false);
1244 if (ret < 0) {
1245 error_setg(errp, "dump: failed to write page desc");
1246 goto out;
1247 }
1248 } else {
1249 /*
1250 * not zero page, then:
1251 * 1. compress the page
1252 * 2. write the compressed page into the cache of page_data
1253 * 3. get page desc of the compressed page and write it into the
1254 * cache of page_desc
1255 *
1256 * only one compression format will be used here, for
1257 * s->flag_compress is set. But when compression fails to work,
1258 * we fall back to save in plaintext.
1259 */
1260 size_out = len_buf_out;
1261 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1262 (compress2(buf_out, (uLongf *)&size_out, buf,
1263 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1264 (size_out < s->dump_info.page_size)) {
1265 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1266 pd.size = cpu_to_dump32(s, size_out);
1267
1268 ret = write_cache(&page_data, buf_out, size_out, false);
1269 if (ret < 0) {
1270 error_setg(errp, "dump: failed to write page data");
1271 goto out;
1272 }
1273 #ifdef CONFIG_LZO
1274 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1275 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1276 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1277 (size_out < s->dump_info.page_size)) {
1278 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1279 pd.size = cpu_to_dump32(s, size_out);
1280
1281 ret = write_cache(&page_data, buf_out, size_out, false);
1282 if (ret < 0) {
1283 error_setg(errp, "dump: failed to write page data");
1284 goto out;
1285 }
1286 #endif
1287 #ifdef CONFIG_SNAPPY
1288 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1289 (snappy_compress((char *)buf, s->dump_info.page_size,
1290 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1291 (size_out < s->dump_info.page_size)) {
1292 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1293 pd.size = cpu_to_dump32(s, size_out);
1294
1295 ret = write_cache(&page_data, buf_out, size_out, false);
1296 if (ret < 0) {
1297 error_setg(errp, "dump: failed to write page data");
1298 goto out;
1299 }
1300 #endif
1301 } else {
1302 /*
1303 * fall back to save in plaintext, size_out should be
1304 * assigned the target's page size
1305 */
1306 pd.flags = cpu_to_dump32(s, 0);
1307 size_out = s->dump_info.page_size;
1308 pd.size = cpu_to_dump32(s, size_out);
1309
1310 ret = write_cache(&page_data, buf,
1311 s->dump_info.page_size, false);
1312 if (ret < 0) {
1313 error_setg(errp, "dump: failed to write page data");
1314 goto out;
1315 }
1316 }
1317
1318 /* get and write page desc here */
1319 pd.page_flags = cpu_to_dump64(s, 0);
1320 pd.offset = cpu_to_dump64(s, offset_data);
1321 offset_data += size_out;
1322
1323 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1324 if (ret < 0) {
1325 error_setg(errp, "dump: failed to write page desc");
1326 goto out;
1327 }
1328 }
1329 s->written_size += s->dump_info.page_size;
1330 }
1331
1332 ret = write_cache(&page_desc, NULL, 0, true);
1333 if (ret < 0) {
1334 error_setg(errp, "dump: failed to sync cache for page_desc");
1335 goto out;
1336 }
1337 ret = write_cache(&page_data, NULL, 0, true);
1338 if (ret < 0) {
1339 error_setg(errp, "dump: failed to sync cache for page_data");
1340 goto out;
1341 }
1342
1343 out:
1344 free_data_cache(&page_desc);
1345 free_data_cache(&page_data);
1346
1347 #ifdef CONFIG_LZO
1348 g_free(wrkmem);
1349 #endif
1350
1351 g_free(buf_out);
1352 }
1353
1354 static void create_kdump_vmcore(DumpState *s, Error **errp)
1355 {
1356 int ret;
1357 Error *local_err = NULL;
1358
1359 /*
1360 * the kdump-compressed format is:
1361 * File offset
1362 * +------------------------------------------+ 0x0
1363 * | main header (struct disk_dump_header) |
1364 * |------------------------------------------+ block 1
1365 * | sub header (struct kdump_sub_header) |
1366 * |------------------------------------------+ block 2
1367 * | 1st-dump_bitmap |
1368 * |------------------------------------------+ block 2 + X blocks
1369 * | 2nd-dump_bitmap | (aligned by block)
1370 * |------------------------------------------+ block 2 + 2 * X blocks
1371 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1372 * | page desc for pfn 1 (struct page_desc) |
1373 * | : |
1374 * |------------------------------------------| (not aligned by block)
1375 * | page data (pfn 0) |
1376 * | page data (pfn 1) |
1377 * | : |
1378 * +------------------------------------------+
1379 */
1380
1381 ret = write_start_flat_header(s->fd);
1382 if (ret < 0) {
1383 error_setg(errp, "dump: failed to write start flat header");
1384 return;
1385 }
1386
1387 write_dump_header(s, &local_err);
1388 if (local_err) {
1389 error_propagate(errp, local_err);
1390 return;
1391 }
1392
1393 write_dump_bitmap(s, &local_err);
1394 if (local_err) {
1395 error_propagate(errp, local_err);
1396 return;
1397 }
1398
1399 write_dump_pages(s, &local_err);
1400 if (local_err) {
1401 error_propagate(errp, local_err);
1402 return;
1403 }
1404
1405 ret = write_end_flat_header(s->fd);
1406 if (ret < 0) {
1407 error_setg(errp, "dump: failed to write end flat header");
1408 return;
1409 }
1410 }
1411
1412 static ram_addr_t get_start_block(DumpState *s)
1413 {
1414 GuestPhysBlock *block;
1415
1416 if (!s->has_filter) {
1417 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1418 return 0;
1419 }
1420
1421 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1422 if (block->target_start >= s->begin + s->length ||
1423 block->target_end <= s->begin) {
1424 /* This block is out of the range */
1425 continue;
1426 }
1427
1428 s->next_block = block;
1429 if (s->begin > block->target_start) {
1430 s->start = s->begin - block->target_start;
1431 } else {
1432 s->start = 0;
1433 }
1434 return s->start;
1435 }
1436
1437 return -1;
1438 }
1439
1440 static void get_max_mapnr(DumpState *s)
1441 {
1442 GuestPhysBlock *last_block;
1443
1444 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
1445 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1446 }
1447
1448 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1449
1450 static void dump_state_prepare(DumpState *s)
1451 {
1452 /* zero the struct, setting status to active */
1453 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1454 }
1455
1456 bool dump_in_progress(void)
1457 {
1458 DumpState *state = &dump_state_global;
1459 return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1460 }
1461
1462 /* calculate total size of memory to be dumped (taking filter into
1463 * acoount.) */
1464 static int64_t dump_calculate_size(DumpState *s)
1465 {
1466 GuestPhysBlock *block;
1467 int64_t size = 0, total = 0, left = 0, right = 0;
1468
1469 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1470 if (s->has_filter) {
1471 /* calculate the overlapped region. */
1472 left = MAX(s->begin, block->target_start);
1473 right = MIN(s->begin + s->length, block->target_end);
1474 size = right - left;
1475 size = size > 0 ? size : 0;
1476 } else {
1477 /* count the whole region in */
1478 size = (block->target_end - block->target_start);
1479 }
1480 total += size;
1481 }
1482
1483 return total;
1484 }
1485
1486 static void dump_init(DumpState *s, int fd, bool has_format,
1487 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1488 int64_t begin, int64_t length, Error **errp)
1489 {
1490 CPUState *cpu;
1491 int nr_cpus;
1492 Error *err = NULL;
1493 int ret;
1494
1495 s->has_format = has_format;
1496 s->format = format;
1497 s->written_size = 0;
1498
1499 /* kdump-compressed is conflict with paging and filter */
1500 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1501 assert(!paging && !has_filter);
1502 }
1503
1504 if (runstate_is_running()) {
1505 vm_stop(RUN_STATE_SAVE_VM);
1506 s->resume = true;
1507 } else {
1508 s->resume = false;
1509 }
1510
1511 /* If we use KVM, we should synchronize the registers before we get dump
1512 * info or physmap info.
1513 */
1514 cpu_synchronize_all_states();
1515 nr_cpus = 0;
1516 CPU_FOREACH(cpu) {
1517 nr_cpus++;
1518 }
1519
1520 s->fd = fd;
1521 s->has_filter = has_filter;
1522 s->begin = begin;
1523 s->length = length;
1524
1525 memory_mapping_list_init(&s->list);
1526
1527 guest_phys_blocks_init(&s->guest_phys_blocks);
1528 guest_phys_blocks_append(&s->guest_phys_blocks);
1529 s->total_size = dump_calculate_size(s);
1530 #ifdef DEBUG_DUMP_GUEST_MEMORY
1531 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1532 #endif
1533
1534 s->start = get_start_block(s);
1535 if (s->start == -1) {
1536 error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1537 goto cleanup;
1538 }
1539
1540 /* get dump info: endian, class and architecture.
1541 * If the target architecture is not supported, cpu_get_dump_info() will
1542 * return -1.
1543 */
1544 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1545 if (ret < 0) {
1546 error_setg(errp, QERR_UNSUPPORTED);
1547 goto cleanup;
1548 }
1549
1550 if (!s->dump_info.page_size) {
1551 s->dump_info.page_size = TARGET_PAGE_SIZE;
1552 }
1553
1554 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1555 s->dump_info.d_machine, nr_cpus);
1556 if (s->note_size < 0) {
1557 error_setg(errp, QERR_UNSUPPORTED);
1558 goto cleanup;
1559 }
1560
1561 /* get memory mapping */
1562 if (paging) {
1563 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1564 if (err != NULL) {
1565 error_propagate(errp, err);
1566 goto cleanup;
1567 }
1568 } else {
1569 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1570 }
1571
1572 s->nr_cpus = nr_cpus;
1573
1574 get_max_mapnr(s);
1575
1576 uint64_t tmp;
1577 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1578 s->dump_info.page_size);
1579 s->len_dump_bitmap = tmp * s->dump_info.page_size;
1580
1581 /* init for kdump-compressed format */
1582 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1583 switch (format) {
1584 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1585 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1586 break;
1587
1588 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1589 #ifdef CONFIG_LZO
1590 if (lzo_init() != LZO_E_OK) {
1591 error_setg(errp, "failed to initialize the LZO library");
1592 goto cleanup;
1593 }
1594 #endif
1595 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1596 break;
1597
1598 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1599 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1600 break;
1601
1602 default:
1603 s->flag_compress = 0;
1604 }
1605
1606 return;
1607 }
1608
1609 if (s->has_filter) {
1610 memory_mapping_filter(&s->list, s->begin, s->length);
1611 }
1612
1613 /*
1614 * calculate phdr_num
1615 *
1616 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1617 */
1618 s->phdr_num = 1; /* PT_NOTE */
1619 if (s->list.num < UINT16_MAX - 2) {
1620 s->phdr_num += s->list.num;
1621 s->have_section = false;
1622 } else {
1623 s->have_section = true;
1624 s->phdr_num = PN_XNUM;
1625 s->sh_info = 1; /* PT_NOTE */
1626
1627 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1628 if (s->list.num <= UINT32_MAX - 1) {
1629 s->sh_info += s->list.num;
1630 } else {
1631 s->sh_info = UINT32_MAX;
1632 }
1633 }
1634
1635 if (s->dump_info.d_class == ELFCLASS64) {
1636 if (s->have_section) {
1637 s->memory_offset = sizeof(Elf64_Ehdr) +
1638 sizeof(Elf64_Phdr) * s->sh_info +
1639 sizeof(Elf64_Shdr) + s->note_size;
1640 } else {
1641 s->memory_offset = sizeof(Elf64_Ehdr) +
1642 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1643 }
1644 } else {
1645 if (s->have_section) {
1646 s->memory_offset = sizeof(Elf32_Ehdr) +
1647 sizeof(Elf32_Phdr) * s->sh_info +
1648 sizeof(Elf32_Shdr) + s->note_size;
1649 } else {
1650 s->memory_offset = sizeof(Elf32_Ehdr) +
1651 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1652 }
1653 }
1654
1655 return;
1656
1657 cleanup:
1658 dump_cleanup(s);
1659 }
1660
1661 /* this operation might be time consuming. */
1662 static void dump_process(DumpState *s, Error **errp)
1663 {
1664 Error *local_err = NULL;
1665
1666 if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1667 create_kdump_vmcore(s, &local_err);
1668 } else {
1669 create_vmcore(s, &local_err);
1670 }
1671
1672 /* make sure status is written after written_size updates */
1673 smp_wmb();
1674 atomic_set(&s->status,
1675 (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1676
1677 error_propagate(errp, local_err);
1678 dump_cleanup(s);
1679 }
1680
1681 static void *dump_thread(void *data)
1682 {
1683 Error *err = NULL;
1684 DumpState *s = (DumpState *)data;
1685
1686 dump_process(s, &err);
1687
1688 if (err) {
1689 /* TODO: notify user the error */
1690 error_free(err);
1691 }
1692 return NULL;
1693 }
1694
1695 DumpQueryResult *qmp_query_dump(Error **errp)
1696 {
1697 DumpQueryResult *result = g_new(DumpQueryResult, 1);
1698 DumpState *state = &dump_state_global;
1699 result->status = atomic_read(&state->status);
1700 /* make sure we are reading status and written_size in order */
1701 smp_rmb();
1702 result->completed = state->written_size;
1703 result->total = state->total_size;
1704 return result;
1705 }
1706
1707 void qmp_dump_guest_memory(bool paging, const char *file,
1708 bool has_detach, bool detach,
1709 bool has_begin, int64_t begin, bool has_length,
1710 int64_t length, bool has_format,
1711 DumpGuestMemoryFormat format, Error **errp)
1712 {
1713 const char *p;
1714 int fd = -1;
1715 DumpState *s;
1716 Error *local_err = NULL;
1717 bool detach_p = false;
1718
1719 if (runstate_check(RUN_STATE_INMIGRATE)) {
1720 error_setg(errp, "Dump not allowed during incoming migration.");
1721 return;
1722 }
1723
1724 /* if there is a dump in background, we should wait until the dump
1725 * finished */
1726 if (dump_in_progress()) {
1727 error_setg(errp, "There is a dump in process, please wait.");
1728 return;
1729 }
1730
1731 /*
1732 * kdump-compressed format need the whole memory dumped, so paging or
1733 * filter is not supported here.
1734 */
1735 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1736 (paging || has_begin || has_length)) {
1737 error_setg(errp, "kdump-compressed format doesn't support paging or "
1738 "filter");
1739 return;
1740 }
1741 if (has_begin && !has_length) {
1742 error_setg(errp, QERR_MISSING_PARAMETER, "length");
1743 return;
1744 }
1745 if (!has_begin && has_length) {
1746 error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1747 return;
1748 }
1749 if (has_detach) {
1750 detach_p = detach;
1751 }
1752
1753 /* check whether lzo/snappy is supported */
1754 #ifndef CONFIG_LZO
1755 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1756 error_setg(errp, "kdump-lzo is not available now");
1757 return;
1758 }
1759 #endif
1760
1761 #ifndef CONFIG_SNAPPY
1762 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1763 error_setg(errp, "kdump-snappy is not available now");
1764 return;
1765 }
1766 #endif
1767
1768 #if !defined(WIN32)
1769 if (strstart(file, "fd:", &p)) {
1770 fd = monitor_get_fd(cur_mon, p, errp);
1771 if (fd == -1) {
1772 return;
1773 }
1774 }
1775 #endif
1776
1777 if (strstart(file, "file:", &p)) {
1778 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1779 if (fd < 0) {
1780 error_setg_file_open(errp, errno, p);
1781 return;
1782 }
1783 }
1784
1785 if (fd == -1) {
1786 error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
1787 return;
1788 }
1789
1790 s = &dump_state_global;
1791 dump_state_prepare(s);
1792
1793 dump_init(s, fd, has_format, format, paging, has_begin,
1794 begin, length, &local_err);
1795 if (local_err) {
1796 error_propagate(errp, local_err);
1797 atomic_set(&s->status, DUMP_STATUS_FAILED);
1798 return;
1799 }
1800
1801 if (detach_p) {
1802 /* detached dump */
1803 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
1804 s, QEMU_THREAD_DETACHED);
1805 } else {
1806 /* sync dump */
1807 dump_process(s, errp);
1808 }
1809 }
1810
1811 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
1812 {
1813 DumpGuestMemoryFormatList *item;
1814 DumpGuestMemoryCapability *cap =
1815 g_malloc0(sizeof(DumpGuestMemoryCapability));
1816
1817 /* elf is always available */
1818 item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1819 cap->formats = item;
1820 item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
1821
1822 /* kdump-zlib is always available */
1823 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1824 item = item->next;
1825 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
1826
1827 /* add new item if kdump-lzo is available */
1828 #ifdef CONFIG_LZO
1829 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1830 item = item->next;
1831 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
1832 #endif
1833
1834 /* add new item if kdump-snappy is available */
1835 #ifdef CONFIG_SNAPPY
1836 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1837 item = item->next;
1838 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
1839 #endif
1840
1841 return cap;
1842 }