]> git.proxmox.com Git - qemu.git/blob - dump.c
f5c72833a3c6e41026dcdc48384acf86b56d3ac1
[qemu.git] / dump.c
1 /*
2 * QEMU dump
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu-common.h"
15 #include <unistd.h>
16 #include "elf.h"
17 #include <sys/procfs.h>
18 #include <glib.h>
19 #include "cpu.h"
20 #include "cpu-all.h"
21 #include "targphys.h"
22 #include "monitor.h"
23 #include "kvm.h"
24 #include "dump.h"
25 #include "sysemu.h"
26 #include "bswap.h"
27 #include "memory_mapping.h"
28 #include "error.h"
29 #include "qmp-commands.h"
30 #include "gdbstub.h"
31
32 static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
33 {
34 if (endian == ELFDATA2LSB) {
35 val = cpu_to_le16(val);
36 } else {
37 val = cpu_to_be16(val);
38 }
39
40 return val;
41 }
42
43 static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
44 {
45 if (endian == ELFDATA2LSB) {
46 val = cpu_to_le32(val);
47 } else {
48 val = cpu_to_be32(val);
49 }
50
51 return val;
52 }
53
54 static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
55 {
56 if (endian == ELFDATA2LSB) {
57 val = cpu_to_le64(val);
58 } else {
59 val = cpu_to_be64(val);
60 }
61
62 return val;
63 }
64
65 typedef struct DumpState {
66 ArchDumpInfo dump_info;
67 MemoryMappingList list;
68 uint16_t phdr_num;
69 uint32_t sh_info;
70 bool have_section;
71 bool resume;
72 size_t note_size;
73 target_phys_addr_t memory_offset;
74 int fd;
75
76 RAMBlock *block;
77 ram_addr_t start;
78 bool has_filter;
79 int64_t begin;
80 int64_t length;
81 Error **errp;
82 } DumpState;
83
84 static int dump_cleanup(DumpState *s)
85 {
86 int ret = 0;
87
88 memory_mapping_list_free(&s->list);
89 if (s->fd != -1) {
90 close(s->fd);
91 }
92 if (s->resume) {
93 vm_start();
94 }
95
96 return ret;
97 }
98
99 static void dump_error(DumpState *s, const char *reason)
100 {
101 dump_cleanup(s);
102 }
103
104 static int fd_write_vmcore(void *buf, size_t size, void *opaque)
105 {
106 DumpState *s = opaque;
107 int fd = s->fd;
108 size_t writen_size;
109
110 /* The fd may be passed from user, and it can be non-blocked */
111 while (size) {
112 writen_size = qemu_write_full(fd, buf, size);
113 if (writen_size != size && errno != EAGAIN) {
114 return -1;
115 }
116
117 buf += writen_size;
118 size -= writen_size;
119 }
120
121 return 0;
122 }
123
124 static int write_elf64_header(DumpState *s)
125 {
126 Elf64_Ehdr elf_header;
127 int ret;
128 int endian = s->dump_info.d_endian;
129
130 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
131 memcpy(&elf_header, ELFMAG, SELFMAG);
132 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
133 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
134 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
135 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
136 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
137 endian);
138 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
139 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
140 elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
141 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
142 endian);
143 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
144 if (s->have_section) {
145 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
146
147 elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
148 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
149 endian);
150 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
151 }
152
153 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
154 if (ret < 0) {
155 dump_error(s, "dump: failed to write elf header.\n");
156 return -1;
157 }
158
159 return 0;
160 }
161
162 static int write_elf32_header(DumpState *s)
163 {
164 Elf32_Ehdr elf_header;
165 int ret;
166 int endian = s->dump_info.d_endian;
167
168 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
169 memcpy(&elf_header, ELFMAG, SELFMAG);
170 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
171 elf_header.e_ident[EI_DATA] = endian;
172 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
173 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
174 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
175 endian);
176 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
177 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
178 elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
179 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
180 endian);
181 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
182 if (s->have_section) {
183 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
184
185 elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
186 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
187 endian);
188 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
189 }
190
191 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
192 if (ret < 0) {
193 dump_error(s, "dump: failed to write elf header.\n");
194 return -1;
195 }
196
197 return 0;
198 }
199
200 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
201 int phdr_index, target_phys_addr_t offset)
202 {
203 Elf64_Phdr phdr;
204 int ret;
205 int endian = s->dump_info.d_endian;
206
207 memset(&phdr, 0, sizeof(Elf64_Phdr));
208 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
209 phdr.p_offset = cpu_convert_to_target64(offset, endian);
210 phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
211 if (offset == -1) {
212 /* When the memory is not stored into vmcore, offset will be -1 */
213 phdr.p_filesz = 0;
214 } else {
215 phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
216 }
217 phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
218 phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
219
220 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
221 if (ret < 0) {
222 dump_error(s, "dump: failed to write program header table.\n");
223 return -1;
224 }
225
226 return 0;
227 }
228
229 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
230 int phdr_index, target_phys_addr_t offset)
231 {
232 Elf32_Phdr phdr;
233 int ret;
234 int endian = s->dump_info.d_endian;
235
236 memset(&phdr, 0, sizeof(Elf32_Phdr));
237 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
238 phdr.p_offset = cpu_convert_to_target32(offset, endian);
239 phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
240 if (offset == -1) {
241 /* When the memory is not stored into vmcore, offset will be -1 */
242 phdr.p_filesz = 0;
243 } else {
244 phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
245 }
246 phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
247 phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
248
249 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
250 if (ret < 0) {
251 dump_error(s, "dump: failed to write program header table.\n");
252 return -1;
253 }
254
255 return 0;
256 }
257
258 static int write_elf64_note(DumpState *s)
259 {
260 Elf64_Phdr phdr;
261 int endian = s->dump_info.d_endian;
262 target_phys_addr_t begin = s->memory_offset - s->note_size;
263 int ret;
264
265 memset(&phdr, 0, sizeof(Elf64_Phdr));
266 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
267 phdr.p_offset = cpu_convert_to_target64(begin, endian);
268 phdr.p_paddr = 0;
269 phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
270 phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
271 phdr.p_vaddr = 0;
272
273 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
274 if (ret < 0) {
275 dump_error(s, "dump: failed to write program header table.\n");
276 return -1;
277 }
278
279 return 0;
280 }
281
282 static int write_elf64_notes(DumpState *s)
283 {
284 CPUArchState *env;
285 int ret;
286 int id;
287
288 for (env = first_cpu; env != NULL; env = env->next_cpu) {
289 id = cpu_index(env);
290 ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
291 if (ret < 0) {
292 dump_error(s, "dump: failed to write elf notes.\n");
293 return -1;
294 }
295 }
296
297 for (env = first_cpu; env != NULL; env = env->next_cpu) {
298 ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
299 if (ret < 0) {
300 dump_error(s, "dump: failed to write CPU status.\n");
301 return -1;
302 }
303 }
304
305 return 0;
306 }
307
308 static int write_elf32_note(DumpState *s)
309 {
310 target_phys_addr_t begin = s->memory_offset - s->note_size;
311 Elf32_Phdr phdr;
312 int endian = s->dump_info.d_endian;
313 int ret;
314
315 memset(&phdr, 0, sizeof(Elf32_Phdr));
316 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
317 phdr.p_offset = cpu_convert_to_target32(begin, endian);
318 phdr.p_paddr = 0;
319 phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
320 phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
321 phdr.p_vaddr = 0;
322
323 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
324 if (ret < 0) {
325 dump_error(s, "dump: failed to write program header table.\n");
326 return -1;
327 }
328
329 return 0;
330 }
331
332 static int write_elf32_notes(DumpState *s)
333 {
334 CPUArchState *env;
335 int ret;
336 int id;
337
338 for (env = first_cpu; env != NULL; env = env->next_cpu) {
339 id = cpu_index(env);
340 ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
341 if (ret < 0) {
342 dump_error(s, "dump: failed to write elf notes.\n");
343 return -1;
344 }
345 }
346
347 for (env = first_cpu; env != NULL; env = env->next_cpu) {
348 ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
349 if (ret < 0) {
350 dump_error(s, "dump: failed to write CPU status.\n");
351 return -1;
352 }
353 }
354
355 return 0;
356 }
357
358 static int write_elf_section(DumpState *s, int type)
359 {
360 Elf32_Shdr shdr32;
361 Elf64_Shdr shdr64;
362 int endian = s->dump_info.d_endian;
363 int shdr_size;
364 void *shdr;
365 int ret;
366
367 if (type == 0) {
368 shdr_size = sizeof(Elf32_Shdr);
369 memset(&shdr32, 0, shdr_size);
370 shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
371 shdr = &shdr32;
372 } else {
373 shdr_size = sizeof(Elf64_Shdr);
374 memset(&shdr64, 0, shdr_size);
375 shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
376 shdr = &shdr64;
377 }
378
379 ret = fd_write_vmcore(&shdr, shdr_size, s);
380 if (ret < 0) {
381 dump_error(s, "dump: failed to write section header table.\n");
382 return -1;
383 }
384
385 return 0;
386 }
387
388 static int write_data(DumpState *s, void *buf, int length)
389 {
390 int ret;
391
392 ret = fd_write_vmcore(buf, length, s);
393 if (ret < 0) {
394 dump_error(s, "dump: failed to save memory.\n");
395 return -1;
396 }
397
398 return 0;
399 }
400
401 /* write the memroy to vmcore. 1 page per I/O. */
402 static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
403 int64_t size)
404 {
405 int64_t i;
406 int ret;
407
408 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
409 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
410 TARGET_PAGE_SIZE);
411 if (ret < 0) {
412 return ret;
413 }
414 }
415
416 if ((size % TARGET_PAGE_SIZE) != 0) {
417 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
418 size % TARGET_PAGE_SIZE);
419 if (ret < 0) {
420 return ret;
421 }
422 }
423
424 return 0;
425 }
426
427 /* get the memory's offset in the vmcore */
428 static target_phys_addr_t get_offset(target_phys_addr_t phys_addr,
429 DumpState *s)
430 {
431 RAMBlock *block;
432 target_phys_addr_t offset = s->memory_offset;
433 int64_t size_in_block, start;
434
435 if (s->has_filter) {
436 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
437 return -1;
438 }
439 }
440
441 QLIST_FOREACH(block, &ram_list.blocks, next) {
442 if (s->has_filter) {
443 if (block->offset >= s->begin + s->length ||
444 block->offset + block->length <= s->begin) {
445 /* This block is out of the range */
446 continue;
447 }
448
449 if (s->begin <= block->offset) {
450 start = block->offset;
451 } else {
452 start = s->begin;
453 }
454
455 size_in_block = block->length - (start - block->offset);
456 if (s->begin + s->length < block->offset + block->length) {
457 size_in_block -= block->offset + block->length -
458 (s->begin + s->length);
459 }
460 } else {
461 start = block->offset;
462 size_in_block = block->length;
463 }
464
465 if (phys_addr >= start && phys_addr < start + size_in_block) {
466 return phys_addr - start + offset;
467 }
468
469 offset += size_in_block;
470 }
471
472 return -1;
473 }
474
475 static int write_elf_loads(DumpState *s)
476 {
477 target_phys_addr_t offset;
478 MemoryMapping *memory_mapping;
479 uint32_t phdr_index = 1;
480 int ret;
481 uint32_t max_index;
482
483 if (s->have_section) {
484 max_index = s->sh_info;
485 } else {
486 max_index = s->phdr_num;
487 }
488
489 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
490 offset = get_offset(memory_mapping->phys_addr, s);
491 if (s->dump_info.d_class == ELFCLASS64) {
492 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
493 } else {
494 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
495 }
496
497 if (ret < 0) {
498 return -1;
499 }
500
501 if (phdr_index >= max_index) {
502 break;
503 }
504 }
505
506 return 0;
507 }
508
509 /* write elf header, PT_NOTE and elf note to vmcore. */
510 static int dump_begin(DumpState *s)
511 {
512 int ret;
513
514 /*
515 * the vmcore's format is:
516 * --------------
517 * | elf header |
518 * --------------
519 * | PT_NOTE |
520 * --------------
521 * | PT_LOAD |
522 * --------------
523 * | ...... |
524 * --------------
525 * | PT_LOAD |
526 * --------------
527 * | sec_hdr |
528 * --------------
529 * | elf note |
530 * --------------
531 * | memory |
532 * --------------
533 *
534 * we only know where the memory is saved after we write elf note into
535 * vmcore.
536 */
537
538 /* write elf header to vmcore */
539 if (s->dump_info.d_class == ELFCLASS64) {
540 ret = write_elf64_header(s);
541 } else {
542 ret = write_elf32_header(s);
543 }
544 if (ret < 0) {
545 return -1;
546 }
547
548 if (s->dump_info.d_class == ELFCLASS64) {
549 /* write PT_NOTE to vmcore */
550 if (write_elf64_note(s) < 0) {
551 return -1;
552 }
553
554 /* write all PT_LOAD to vmcore */
555 if (write_elf_loads(s) < 0) {
556 return -1;
557 }
558
559 /* write section to vmcore */
560 if (s->have_section) {
561 if (write_elf_section(s, 1) < 0) {
562 return -1;
563 }
564 }
565
566 /* write notes to vmcore */
567 if (write_elf64_notes(s) < 0) {
568 return -1;
569 }
570
571 } else {
572 /* write PT_NOTE to vmcore */
573 if (write_elf32_note(s) < 0) {
574 return -1;
575 }
576
577 /* write all PT_LOAD to vmcore */
578 if (write_elf_loads(s) < 0) {
579 return -1;
580 }
581
582 /* write section to vmcore */
583 if (s->have_section) {
584 if (write_elf_section(s, 0) < 0) {
585 return -1;
586 }
587 }
588
589 /* write notes to vmcore */
590 if (write_elf32_notes(s) < 0) {
591 return -1;
592 }
593 }
594
595 return 0;
596 }
597
598 /* write PT_LOAD to vmcore */
599 static int dump_completed(DumpState *s)
600 {
601 dump_cleanup(s);
602 return 0;
603 }
604
605 static int get_next_block(DumpState *s, RAMBlock *block)
606 {
607 while (1) {
608 block = QLIST_NEXT(block, next);
609 if (!block) {
610 /* no more block */
611 return 1;
612 }
613
614 s->start = 0;
615 s->block = block;
616 if (s->has_filter) {
617 if (block->offset >= s->begin + s->length ||
618 block->offset + block->length <= s->begin) {
619 /* This block is out of the range */
620 continue;
621 }
622
623 if (s->begin > block->offset) {
624 s->start = s->begin - block->offset;
625 }
626 }
627
628 return 0;
629 }
630 }
631
632 /* write all memory to vmcore */
633 static int dump_iterate(DumpState *s)
634 {
635 RAMBlock *block;
636 int64_t size;
637 int ret;
638
639 while (1) {
640 block = s->block;
641
642 size = block->length;
643 if (s->has_filter) {
644 size -= s->start;
645 if (s->begin + s->length < block->offset + block->length) {
646 size -= block->offset + block->length - (s->begin + s->length);
647 }
648 }
649 ret = write_memory(s, block, s->start, size);
650 if (ret == -1) {
651 return ret;
652 }
653
654 ret = get_next_block(s, block);
655 if (ret == 1) {
656 dump_completed(s);
657 return 0;
658 }
659 }
660 }
661
662 static int create_vmcore(DumpState *s)
663 {
664 int ret;
665
666 ret = dump_begin(s);
667 if (ret < 0) {
668 return -1;
669 }
670
671 ret = dump_iterate(s);
672 if (ret < 0) {
673 return -1;
674 }
675
676 return 0;
677 }
678
679 static ram_addr_t get_start_block(DumpState *s)
680 {
681 RAMBlock *block;
682
683 if (!s->has_filter) {
684 s->block = QLIST_FIRST(&ram_list.blocks);
685 return 0;
686 }
687
688 QLIST_FOREACH(block, &ram_list.blocks, next) {
689 if (block->offset >= s->begin + s->length ||
690 block->offset + block->length <= s->begin) {
691 /* This block is out of the range */
692 continue;
693 }
694
695 s->block = block;
696 if (s->begin > block->offset) {
697 s->start = s->begin - block->offset;
698 } else {
699 s->start = 0;
700 }
701 return s->start;
702 }
703
704 return -1;
705 }
706
707 static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
708 int64_t begin, int64_t length, Error **errp)
709 {
710 CPUArchState *env;
711 int nr_cpus;
712 int ret;
713
714 if (runstate_is_running()) {
715 vm_stop(RUN_STATE_SAVE_VM);
716 s->resume = true;
717 } else {
718 s->resume = false;
719 }
720
721 s->errp = errp;
722 s->fd = fd;
723 s->has_filter = has_filter;
724 s->begin = begin;
725 s->length = length;
726 s->start = get_start_block(s);
727 if (s->start == -1) {
728 error_set(errp, QERR_INVALID_PARAMETER, "begin");
729 goto cleanup;
730 }
731
732 /*
733 * get dump info: endian, class and architecture.
734 * If the target architecture is not supported, cpu_get_dump_info() will
735 * return -1.
736 *
737 * if we use kvm, we should synchronize the register before we get dump
738 * info.
739 */
740 nr_cpus = 0;
741 for (env = first_cpu; env != NULL; env = env->next_cpu) {
742 cpu_synchronize_state(env);
743 nr_cpus++;
744 }
745
746 ret = cpu_get_dump_info(&s->dump_info);
747 if (ret < 0) {
748 error_set(errp, QERR_UNSUPPORTED);
749 goto cleanup;
750 }
751
752 s->note_size = cpu_get_note_size(s->dump_info.d_class,
753 s->dump_info.d_machine, nr_cpus);
754 if (ret < 0) {
755 error_set(errp, QERR_UNSUPPORTED);
756 goto cleanup;
757 }
758
759 /* get memory mapping */
760 memory_mapping_list_init(&s->list);
761 if (paging) {
762 qemu_get_guest_memory_mapping(&s->list);
763 } else {
764 qemu_get_guest_simple_memory_mapping(&s->list);
765 }
766
767 if (s->has_filter) {
768 memory_mapping_filter(&s->list, s->begin, s->length);
769 }
770
771 /*
772 * calculate phdr_num
773 *
774 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
775 */
776 s->phdr_num = 1; /* PT_NOTE */
777 if (s->list.num < UINT16_MAX - 2) {
778 s->phdr_num += s->list.num;
779 s->have_section = false;
780 } else {
781 s->have_section = true;
782 s->phdr_num = PN_XNUM;
783 s->sh_info = 1; /* PT_NOTE */
784
785 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
786 if (s->list.num <= UINT32_MAX - 1) {
787 s->sh_info += s->list.num;
788 } else {
789 s->sh_info = UINT32_MAX;
790 }
791 }
792
793 if (s->dump_info.d_class == ELFCLASS64) {
794 if (s->have_section) {
795 s->memory_offset = sizeof(Elf64_Ehdr) +
796 sizeof(Elf64_Phdr) * s->sh_info +
797 sizeof(Elf64_Shdr) + s->note_size;
798 } else {
799 s->memory_offset = sizeof(Elf64_Ehdr) +
800 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
801 }
802 } else {
803 if (s->have_section) {
804 s->memory_offset = sizeof(Elf32_Ehdr) +
805 sizeof(Elf32_Phdr) * s->sh_info +
806 sizeof(Elf32_Shdr) + s->note_size;
807 } else {
808 s->memory_offset = sizeof(Elf32_Ehdr) +
809 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
810 }
811 }
812
813 return 0;
814
815 cleanup:
816 if (s->resume) {
817 vm_start();
818 }
819
820 return -1;
821 }
822
823 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
824 int64_t begin, bool has_length, int64_t length,
825 Error **errp)
826 {
827 const char *p;
828 int fd = -1;
829 DumpState *s;
830 int ret;
831
832 if (has_begin && !has_length) {
833 error_set(errp, QERR_MISSING_PARAMETER, "length");
834 return;
835 }
836 if (!has_begin && has_length) {
837 error_set(errp, QERR_MISSING_PARAMETER, "begin");
838 return;
839 }
840
841 #if !defined(WIN32)
842 if (strstart(file, "fd:", &p)) {
843 fd = monitor_get_fd(cur_mon, p);
844 if (fd == -1) {
845 error_set(errp, QERR_FD_NOT_FOUND, p);
846 return;
847 }
848 }
849 #endif
850
851 if (strstart(file, "file:", &p)) {
852 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
853 if (fd < 0) {
854 error_set(errp, QERR_OPEN_FILE_FAILED, p);
855 return;
856 }
857 }
858
859 if (fd == -1) {
860 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
861 return;
862 }
863
864 s = g_malloc(sizeof(DumpState));
865
866 ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
867 if (ret < 0) {
868 g_free(s);
869 return;
870 }
871
872 if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
873 error_set(errp, QERR_IO_ERROR);
874 }
875
876 g_free(s);
877 }