]> git.proxmox.com Git - qemu.git/blob - dump.c
Merge remote-tracking branch 'qmp/queue/qmp' into staging
[qemu.git] / dump.c
1 /*
2 * QEMU dump
3 *
4 * Copyright Fujitsu, Corp. 2011, 2012
5 *
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #include "qemu-common.h"
15 #include <unistd.h>
16 #include "elf.h"
17 #include <sys/procfs.h>
18 #include <glib.h>
19 #include "cpu.h"
20 #include "cpu-all.h"
21 #include "targphys.h"
22 #include "monitor.h"
23 #include "kvm.h"
24 #include "dump.h"
25 #include "sysemu.h"
26 #include "bswap.h"
27 #include "memory_mapping.h"
28 #include "error.h"
29 #include "qmp-commands.h"
30 #include "gdbstub.h"
31
32 #if defined(CONFIG_HAVE_CORE_DUMP)
33 static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
34 {
35 if (endian == ELFDATA2LSB) {
36 val = cpu_to_le16(val);
37 } else {
38 val = cpu_to_be16(val);
39 }
40
41 return val;
42 }
43
44 static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
45 {
46 if (endian == ELFDATA2LSB) {
47 val = cpu_to_le32(val);
48 } else {
49 val = cpu_to_be32(val);
50 }
51
52 return val;
53 }
54
55 static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
56 {
57 if (endian == ELFDATA2LSB) {
58 val = cpu_to_le64(val);
59 } else {
60 val = cpu_to_be64(val);
61 }
62
63 return val;
64 }
65
66 typedef struct DumpState {
67 ArchDumpInfo dump_info;
68 MemoryMappingList list;
69 uint16_t phdr_num;
70 uint32_t sh_info;
71 bool have_section;
72 bool resume;
73 size_t note_size;
74 target_phys_addr_t memory_offset;
75 int fd;
76
77 RAMBlock *block;
78 ram_addr_t start;
79 bool has_filter;
80 int64_t begin;
81 int64_t length;
82 Error **errp;
83 } DumpState;
84
85 static int dump_cleanup(DumpState *s)
86 {
87 int ret = 0;
88
89 memory_mapping_list_free(&s->list);
90 if (s->fd != -1) {
91 close(s->fd);
92 }
93 if (s->resume) {
94 vm_start();
95 }
96
97 return ret;
98 }
99
100 static void dump_error(DumpState *s, const char *reason)
101 {
102 dump_cleanup(s);
103 }
104
105 static int fd_write_vmcore(void *buf, size_t size, void *opaque)
106 {
107 DumpState *s = opaque;
108 int fd = s->fd;
109 size_t writen_size;
110
111 /* The fd may be passed from user, and it can be non-blocked */
112 while (size) {
113 writen_size = qemu_write_full(fd, buf, size);
114 if (writen_size != size && errno != EAGAIN) {
115 return -1;
116 }
117
118 buf += writen_size;
119 size -= writen_size;
120 }
121
122 return 0;
123 }
124
125 static int write_elf64_header(DumpState *s)
126 {
127 Elf64_Ehdr elf_header;
128 int ret;
129 int endian = s->dump_info.d_endian;
130
131 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
132 memcpy(&elf_header, ELFMAG, SELFMAG);
133 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
134 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
135 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
136 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
137 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
138 endian);
139 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
140 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
141 elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
142 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
143 endian);
144 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
145 if (s->have_section) {
146 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
147
148 elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
149 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
150 endian);
151 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
152 }
153
154 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
155 if (ret < 0) {
156 dump_error(s, "dump: failed to write elf header.\n");
157 return -1;
158 }
159
160 return 0;
161 }
162
163 static int write_elf32_header(DumpState *s)
164 {
165 Elf32_Ehdr elf_header;
166 int ret;
167 int endian = s->dump_info.d_endian;
168
169 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
170 memcpy(&elf_header, ELFMAG, SELFMAG);
171 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
172 elf_header.e_ident[EI_DATA] = endian;
173 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
174 elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
175 elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
176 endian);
177 elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
178 elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
179 elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
180 elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
181 endian);
182 elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
183 if (s->have_section) {
184 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
185
186 elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
187 elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
188 endian);
189 elf_header.e_shnum = cpu_convert_to_target16(1, endian);
190 }
191
192 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
193 if (ret < 0) {
194 dump_error(s, "dump: failed to write elf header.\n");
195 return -1;
196 }
197
198 return 0;
199 }
200
201 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
202 int phdr_index, target_phys_addr_t offset)
203 {
204 Elf64_Phdr phdr;
205 int ret;
206 int endian = s->dump_info.d_endian;
207
208 memset(&phdr, 0, sizeof(Elf64_Phdr));
209 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
210 phdr.p_offset = cpu_convert_to_target64(offset, endian);
211 phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
212 if (offset == -1) {
213 /* When the memory is not stored into vmcore, offset will be -1 */
214 phdr.p_filesz = 0;
215 } else {
216 phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
217 }
218 phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
219 phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
220
221 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
222 if (ret < 0) {
223 dump_error(s, "dump: failed to write program header table.\n");
224 return -1;
225 }
226
227 return 0;
228 }
229
230 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
231 int phdr_index, target_phys_addr_t offset)
232 {
233 Elf32_Phdr phdr;
234 int ret;
235 int endian = s->dump_info.d_endian;
236
237 memset(&phdr, 0, sizeof(Elf32_Phdr));
238 phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
239 phdr.p_offset = cpu_convert_to_target32(offset, endian);
240 phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
241 if (offset == -1) {
242 /* When the memory is not stored into vmcore, offset will be -1 */
243 phdr.p_filesz = 0;
244 } else {
245 phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
246 }
247 phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
248 phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
249
250 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
251 if (ret < 0) {
252 dump_error(s, "dump: failed to write program header table.\n");
253 return -1;
254 }
255
256 return 0;
257 }
258
259 static int write_elf64_note(DumpState *s)
260 {
261 Elf64_Phdr phdr;
262 int endian = s->dump_info.d_endian;
263 target_phys_addr_t begin = s->memory_offset - s->note_size;
264 int ret;
265
266 memset(&phdr, 0, sizeof(Elf64_Phdr));
267 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
268 phdr.p_offset = cpu_convert_to_target64(begin, endian);
269 phdr.p_paddr = 0;
270 phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
271 phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
272 phdr.p_vaddr = 0;
273
274 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
275 if (ret < 0) {
276 dump_error(s, "dump: failed to write program header table.\n");
277 return -1;
278 }
279
280 return 0;
281 }
282
283 static int write_elf64_notes(DumpState *s)
284 {
285 CPUArchState *env;
286 int ret;
287 int id;
288
289 for (env = first_cpu; env != NULL; env = env->next_cpu) {
290 id = cpu_index(env);
291 ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
292 if (ret < 0) {
293 dump_error(s, "dump: failed to write elf notes.\n");
294 return -1;
295 }
296 }
297
298 for (env = first_cpu; env != NULL; env = env->next_cpu) {
299 ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
300 if (ret < 0) {
301 dump_error(s, "dump: failed to write CPU status.\n");
302 return -1;
303 }
304 }
305
306 return 0;
307 }
308
309 static int write_elf32_note(DumpState *s)
310 {
311 target_phys_addr_t begin = s->memory_offset - s->note_size;
312 Elf32_Phdr phdr;
313 int endian = s->dump_info.d_endian;
314 int ret;
315
316 memset(&phdr, 0, sizeof(Elf32_Phdr));
317 phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
318 phdr.p_offset = cpu_convert_to_target32(begin, endian);
319 phdr.p_paddr = 0;
320 phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
321 phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
322 phdr.p_vaddr = 0;
323
324 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
325 if (ret < 0) {
326 dump_error(s, "dump: failed to write program header table.\n");
327 return -1;
328 }
329
330 return 0;
331 }
332
333 static int write_elf32_notes(DumpState *s)
334 {
335 CPUArchState *env;
336 int ret;
337 int id;
338
339 for (env = first_cpu; env != NULL; env = env->next_cpu) {
340 id = cpu_index(env);
341 ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
342 if (ret < 0) {
343 dump_error(s, "dump: failed to write elf notes.\n");
344 return -1;
345 }
346 }
347
348 for (env = first_cpu; env != NULL; env = env->next_cpu) {
349 ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
350 if (ret < 0) {
351 dump_error(s, "dump: failed to write CPU status.\n");
352 return -1;
353 }
354 }
355
356 return 0;
357 }
358
359 static int write_elf_section(DumpState *s, int type)
360 {
361 Elf32_Shdr shdr32;
362 Elf64_Shdr shdr64;
363 int endian = s->dump_info.d_endian;
364 int shdr_size;
365 void *shdr;
366 int ret;
367
368 if (type == 0) {
369 shdr_size = sizeof(Elf32_Shdr);
370 memset(&shdr32, 0, shdr_size);
371 shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
372 shdr = &shdr32;
373 } else {
374 shdr_size = sizeof(Elf64_Shdr);
375 memset(&shdr64, 0, shdr_size);
376 shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
377 shdr = &shdr64;
378 }
379
380 ret = fd_write_vmcore(&shdr, shdr_size, s);
381 if (ret < 0) {
382 dump_error(s, "dump: failed to write section header table.\n");
383 return -1;
384 }
385
386 return 0;
387 }
388
389 static int write_data(DumpState *s, void *buf, int length)
390 {
391 int ret;
392
393 ret = fd_write_vmcore(buf, length, s);
394 if (ret < 0) {
395 dump_error(s, "dump: failed to save memory.\n");
396 return -1;
397 }
398
399 return 0;
400 }
401
402 /* write the memroy to vmcore. 1 page per I/O. */
403 static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
404 int64_t size)
405 {
406 int64_t i;
407 int ret;
408
409 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
410 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
411 TARGET_PAGE_SIZE);
412 if (ret < 0) {
413 return ret;
414 }
415 }
416
417 if ((size % TARGET_PAGE_SIZE) != 0) {
418 ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
419 size % TARGET_PAGE_SIZE);
420 if (ret < 0) {
421 return ret;
422 }
423 }
424
425 return 0;
426 }
427
428 /* get the memory's offset in the vmcore */
429 static target_phys_addr_t get_offset(target_phys_addr_t phys_addr,
430 DumpState *s)
431 {
432 RAMBlock *block;
433 target_phys_addr_t offset = s->memory_offset;
434 int64_t size_in_block, start;
435
436 if (s->has_filter) {
437 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
438 return -1;
439 }
440 }
441
442 QLIST_FOREACH(block, &ram_list.blocks, next) {
443 if (s->has_filter) {
444 if (block->offset >= s->begin + s->length ||
445 block->offset + block->length <= s->begin) {
446 /* This block is out of the range */
447 continue;
448 }
449
450 if (s->begin <= block->offset) {
451 start = block->offset;
452 } else {
453 start = s->begin;
454 }
455
456 size_in_block = block->length - (start - block->offset);
457 if (s->begin + s->length < block->offset + block->length) {
458 size_in_block -= block->offset + block->length -
459 (s->begin + s->length);
460 }
461 } else {
462 start = block->offset;
463 size_in_block = block->length;
464 }
465
466 if (phys_addr >= start && phys_addr < start + size_in_block) {
467 return phys_addr - start + offset;
468 }
469
470 offset += size_in_block;
471 }
472
473 return -1;
474 }
475
476 static int write_elf_loads(DumpState *s)
477 {
478 target_phys_addr_t offset;
479 MemoryMapping *memory_mapping;
480 uint32_t phdr_index = 1;
481 int ret;
482 uint32_t max_index;
483
484 if (s->have_section) {
485 max_index = s->sh_info;
486 } else {
487 max_index = s->phdr_num;
488 }
489
490 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
491 offset = get_offset(memory_mapping->phys_addr, s);
492 if (s->dump_info.d_class == ELFCLASS64) {
493 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
494 } else {
495 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
496 }
497
498 if (ret < 0) {
499 return -1;
500 }
501
502 if (phdr_index >= max_index) {
503 break;
504 }
505 }
506
507 return 0;
508 }
509
510 /* write elf header, PT_NOTE and elf note to vmcore. */
511 static int dump_begin(DumpState *s)
512 {
513 int ret;
514
515 /*
516 * the vmcore's format is:
517 * --------------
518 * | elf header |
519 * --------------
520 * | PT_NOTE |
521 * --------------
522 * | PT_LOAD |
523 * --------------
524 * | ...... |
525 * --------------
526 * | PT_LOAD |
527 * --------------
528 * | sec_hdr |
529 * --------------
530 * | elf note |
531 * --------------
532 * | memory |
533 * --------------
534 *
535 * we only know where the memory is saved after we write elf note into
536 * vmcore.
537 */
538
539 /* write elf header to vmcore */
540 if (s->dump_info.d_class == ELFCLASS64) {
541 ret = write_elf64_header(s);
542 } else {
543 ret = write_elf32_header(s);
544 }
545 if (ret < 0) {
546 return -1;
547 }
548
549 if (s->dump_info.d_class == ELFCLASS64) {
550 /* write PT_NOTE to vmcore */
551 if (write_elf64_note(s) < 0) {
552 return -1;
553 }
554
555 /* write all PT_LOAD to vmcore */
556 if (write_elf_loads(s) < 0) {
557 return -1;
558 }
559
560 /* write section to vmcore */
561 if (s->have_section) {
562 if (write_elf_section(s, 1) < 0) {
563 return -1;
564 }
565 }
566
567 /* write notes to vmcore */
568 if (write_elf64_notes(s) < 0) {
569 return -1;
570 }
571
572 } else {
573 /* write PT_NOTE to vmcore */
574 if (write_elf32_note(s) < 0) {
575 return -1;
576 }
577
578 /* write all PT_LOAD to vmcore */
579 if (write_elf_loads(s) < 0) {
580 return -1;
581 }
582
583 /* write section to vmcore */
584 if (s->have_section) {
585 if (write_elf_section(s, 0) < 0) {
586 return -1;
587 }
588 }
589
590 /* write notes to vmcore */
591 if (write_elf32_notes(s) < 0) {
592 return -1;
593 }
594 }
595
596 return 0;
597 }
598
599 /* write PT_LOAD to vmcore */
600 static int dump_completed(DumpState *s)
601 {
602 dump_cleanup(s);
603 return 0;
604 }
605
606 static int get_next_block(DumpState *s, RAMBlock *block)
607 {
608 while (1) {
609 block = QLIST_NEXT(block, next);
610 if (!block) {
611 /* no more block */
612 return 1;
613 }
614
615 s->start = 0;
616 s->block = block;
617 if (s->has_filter) {
618 if (block->offset >= s->begin + s->length ||
619 block->offset + block->length <= s->begin) {
620 /* This block is out of the range */
621 continue;
622 }
623
624 if (s->begin > block->offset) {
625 s->start = s->begin - block->offset;
626 }
627 }
628
629 return 0;
630 }
631 }
632
633 /* write all memory to vmcore */
634 static int dump_iterate(DumpState *s)
635 {
636 RAMBlock *block;
637 int64_t size;
638 int ret;
639
640 while (1) {
641 block = s->block;
642
643 size = block->length;
644 if (s->has_filter) {
645 size -= s->start;
646 if (s->begin + s->length < block->offset + block->length) {
647 size -= block->offset + block->length - (s->begin + s->length);
648 }
649 }
650 ret = write_memory(s, block, s->start, size);
651 if (ret == -1) {
652 return ret;
653 }
654
655 ret = get_next_block(s, block);
656 if (ret == 1) {
657 dump_completed(s);
658 return 0;
659 }
660 }
661 }
662
663 static int create_vmcore(DumpState *s)
664 {
665 int ret;
666
667 ret = dump_begin(s);
668 if (ret < 0) {
669 return -1;
670 }
671
672 ret = dump_iterate(s);
673 if (ret < 0) {
674 return -1;
675 }
676
677 return 0;
678 }
679
680 static ram_addr_t get_start_block(DumpState *s)
681 {
682 RAMBlock *block;
683
684 if (!s->has_filter) {
685 s->block = QLIST_FIRST(&ram_list.blocks);
686 return 0;
687 }
688
689 QLIST_FOREACH(block, &ram_list.blocks, next) {
690 if (block->offset >= s->begin + s->length ||
691 block->offset + block->length <= s->begin) {
692 /* This block is out of the range */
693 continue;
694 }
695
696 s->block = block;
697 if (s->begin > block->offset) {
698 s->start = s->begin - block->offset;
699 } else {
700 s->start = 0;
701 }
702 return s->start;
703 }
704
705 return -1;
706 }
707
708 static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
709 int64_t begin, int64_t length, Error **errp)
710 {
711 CPUArchState *env;
712 int nr_cpus;
713 int ret;
714
715 if (runstate_is_running()) {
716 vm_stop(RUN_STATE_SAVE_VM);
717 s->resume = true;
718 } else {
719 s->resume = false;
720 }
721
722 s->errp = errp;
723 s->fd = fd;
724 s->has_filter = has_filter;
725 s->begin = begin;
726 s->length = length;
727 s->start = get_start_block(s);
728 if (s->start == -1) {
729 error_set(errp, QERR_INVALID_PARAMETER, "begin");
730 goto cleanup;
731 }
732
733 /*
734 * get dump info: endian, class and architecture.
735 * If the target architecture is not supported, cpu_get_dump_info() will
736 * return -1.
737 *
738 * if we use kvm, we should synchronize the register before we get dump
739 * info.
740 */
741 nr_cpus = 0;
742 for (env = first_cpu; env != NULL; env = env->next_cpu) {
743 cpu_synchronize_state(env);
744 nr_cpus++;
745 }
746
747 ret = cpu_get_dump_info(&s->dump_info);
748 if (ret < 0) {
749 error_set(errp, QERR_UNSUPPORTED);
750 goto cleanup;
751 }
752
753 /* get memory mapping */
754 memory_mapping_list_init(&s->list);
755 if (paging) {
756 qemu_get_guest_memory_mapping(&s->list);
757 } else {
758 qemu_get_guest_simple_memory_mapping(&s->list);
759 }
760
761 if (s->has_filter) {
762 memory_mapping_filter(&s->list, s->begin, s->length);
763 }
764
765 /*
766 * calculate phdr_num
767 *
768 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
769 */
770 s->phdr_num = 1; /* PT_NOTE */
771 if (s->list.num < UINT16_MAX - 2) {
772 s->phdr_num += s->list.num;
773 s->have_section = false;
774 } else {
775 s->have_section = true;
776 s->phdr_num = PN_XNUM;
777 s->sh_info = 1; /* PT_NOTE */
778
779 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
780 if (s->list.num <= UINT32_MAX - 1) {
781 s->sh_info += s->list.num;
782 } else {
783 s->sh_info = UINT32_MAX;
784 }
785 }
786
787 s->note_size = cpu_get_note_size(s->dump_info.d_class,
788 s->dump_info.d_machine, nr_cpus);
789 if (s->dump_info.d_class == ELFCLASS64) {
790 if (s->have_section) {
791 s->memory_offset = sizeof(Elf64_Ehdr) +
792 sizeof(Elf64_Phdr) * s->sh_info +
793 sizeof(Elf64_Shdr) + s->note_size;
794 } else {
795 s->memory_offset = sizeof(Elf64_Ehdr) +
796 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
797 }
798 } else {
799 if (s->have_section) {
800 s->memory_offset = sizeof(Elf32_Ehdr) +
801 sizeof(Elf32_Phdr) * s->sh_info +
802 sizeof(Elf32_Shdr) + s->note_size;
803 } else {
804 s->memory_offset = sizeof(Elf32_Ehdr) +
805 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
806 }
807 }
808
809 return 0;
810
811 cleanup:
812 if (s->resume) {
813 vm_start();
814 }
815
816 return -1;
817 }
818
819 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
820 int64_t begin, bool has_length, int64_t length,
821 Error **errp)
822 {
823 const char *p;
824 int fd = -1;
825 DumpState *s;
826 int ret;
827
828 if (has_begin && !has_length) {
829 error_set(errp, QERR_MISSING_PARAMETER, "length");
830 return;
831 }
832 if (!has_begin && has_length) {
833 error_set(errp, QERR_MISSING_PARAMETER, "begin");
834 return;
835 }
836
837 #if !defined(WIN32)
838 if (strstart(file, "fd:", &p)) {
839 fd = monitor_get_fd(cur_mon, p);
840 if (fd == -1) {
841 error_set(errp, QERR_FD_NOT_FOUND, p);
842 return;
843 }
844 }
845 #endif
846
847 if (strstart(file, "file:", &p)) {
848 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
849 if (fd < 0) {
850 error_set(errp, QERR_OPEN_FILE_FAILED, p);
851 return;
852 }
853 }
854
855 if (fd == -1) {
856 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
857 return;
858 }
859
860 s = g_malloc(sizeof(DumpState));
861
862 ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
863 if (ret < 0) {
864 g_free(s);
865 return;
866 }
867
868 if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
869 error_set(errp, QERR_IO_ERROR);
870 }
871
872 g_free(s);
873 }
874
875 #else
876 /* we need this function in hmp.c */
877 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
878 int64_t begin, bool has_length, int64_t length,
879 Error **errp)
880 {
881 error_set(errp, QERR_UNSUPPORTED);
882 }
883 #endif