]> git.proxmox.com Git - qemu.git/blob - linux-user/elfload.c
linux-user: add core dump support for MIPS
[qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 typedef target_ulong target_elf_greg_t;
101 #ifdef USE_UID16
102 typedef uint16_t target_uid_t;
103 typedef uint16_t target_gid_t;
104 #else
105 typedef uint32_t target_uid_t;
106 typedef uint32_t target_gid_t;
107 #endif
108 typedef int32_t target_pid_t;
109
110 #ifdef TARGET_I386
111
112 #define ELF_PLATFORM get_elf_platform()
113
114 static const char *get_elf_platform(void)
115 {
116 static char elf_platform[] = "i386";
117 int family = (thread_env->cpuid_version >> 8) & 0xff;
118 if (family > 6)
119 family = 6;
120 if (family >= 3)
121 elf_platform[1] = '0' + family;
122 return elf_platform;
123 }
124
125 #define ELF_HWCAP get_elf_hwcap()
126
127 static uint32_t get_elf_hwcap(void)
128 {
129 return thread_env->cpuid_features;
130 }
131
132 #ifdef TARGET_X86_64
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
135
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
139
140 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
141 {
142 regs->rax = 0;
143 regs->rsp = infop->start_stack;
144 regs->rip = infop->entry;
145 }
146
147 #define ELF_NREG 27
148 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
149
150 /*
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
153 * those.
154 *
155 * See linux kernel: arch/x86/include/asm/elf.h
156 */
157 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
158 {
159 (*regs)[0] = env->regs[15];
160 (*regs)[1] = env->regs[14];
161 (*regs)[2] = env->regs[13];
162 (*regs)[3] = env->regs[12];
163 (*regs)[4] = env->regs[R_EBP];
164 (*regs)[5] = env->regs[R_EBX];
165 (*regs)[6] = env->regs[11];
166 (*regs)[7] = env->regs[10];
167 (*regs)[8] = env->regs[9];
168 (*regs)[9] = env->regs[8];
169 (*regs)[10] = env->regs[R_EAX];
170 (*regs)[11] = env->regs[R_ECX];
171 (*regs)[12] = env->regs[R_EDX];
172 (*regs)[13] = env->regs[R_ESI];
173 (*regs)[14] = env->regs[R_EDI];
174 (*regs)[15] = env->regs[R_EAX]; /* XXX */
175 (*regs)[16] = env->eip;
176 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
177 (*regs)[18] = env->eflags;
178 (*regs)[19] = env->regs[R_ESP];
179 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
180 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
181 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
182 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
183 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
184 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
185 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
186 }
187
188 #else
189
190 #define ELF_START_MMAP 0x80000000
191
192 /*
193 * This is used to ensure we don't load something for the wrong architecture.
194 */
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
196
197 /*
198 * These are used to set parameters in the core dumps.
199 */
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
203
204 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
205 {
206 regs->esp = infop->start_stack;
207 regs->eip = infop->entry;
208
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
214
215 A value of 0 tells we have no such handler. */
216 regs->edx = 0;
217 }
218
219 #define ELF_NREG 17
220 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 #define ELF_NREG 18
290 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291
292 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
293 {
294 (*regs)[0] = tswapl(env->regs[0]);
295 (*regs)[1] = tswapl(env->regs[1]);
296 (*regs)[2] = tswapl(env->regs[2]);
297 (*regs)[3] = tswapl(env->regs[3]);
298 (*regs)[4] = tswapl(env->regs[4]);
299 (*regs)[5] = tswapl(env->regs[5]);
300 (*regs)[6] = tswapl(env->regs[6]);
301 (*regs)[7] = tswapl(env->regs[7]);
302 (*regs)[8] = tswapl(env->regs[8]);
303 (*regs)[9] = tswapl(env->regs[9]);
304 (*regs)[10] = tswapl(env->regs[10]);
305 (*regs)[11] = tswapl(env->regs[11]);
306 (*regs)[12] = tswapl(env->regs[12]);
307 (*regs)[13] = tswapl(env->regs[13]);
308 (*regs)[14] = tswapl(env->regs[14]);
309 (*regs)[15] = tswapl(env->regs[15]);
310
311 (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
312 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
313 }
314
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
317
318 enum
319 {
320 ARM_HWCAP_ARM_SWP = 1 << 0,
321 ARM_HWCAP_ARM_HALF = 1 << 1,
322 ARM_HWCAP_ARM_THUMB = 1 << 2,
323 ARM_HWCAP_ARM_26BIT = 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
325 ARM_HWCAP_ARM_FPA = 1 << 5,
326 ARM_HWCAP_ARM_VFP = 1 << 6,
327 ARM_HWCAP_ARM_EDSP = 1 << 7,
328 ARM_HWCAP_ARM_JAVA = 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
331 ARM_HWCAP_ARM_NEON = 1 << 11,
332 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
334 };
335
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
340
341 #endif
342
343 #ifdef TARGET_SPARC
344 #ifdef TARGET_SPARC64
345
346 #define ELF_START_MMAP 0x80000000
347
348 #ifndef TARGET_ABI32
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
350 #else
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
352 #endif
353
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
357
358 #define STACK_BIAS 2047
359
360 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
361 {
362 #ifndef TARGET_ABI32
363 regs->tstate = 0;
364 #endif
365 regs->pc = infop->entry;
366 regs->npc = regs->pc + 4;
367 regs->y = 0;
368 #ifdef TARGET_ABI32
369 regs->u_regs[14] = infop->start_stack - 16 * 4;
370 #else
371 if (personality(infop->personality) == PER_LINUX32)
372 regs->u_regs[14] = infop->start_stack - 16 * 4;
373 else
374 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
375 #endif
376 }
377
378 #else
379 #define ELF_START_MMAP 0x80000000
380
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
382
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
386
387 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
388 {
389 regs->psr = 0;
390 regs->pc = infop->entry;
391 regs->npc = regs->pc + 4;
392 regs->y = 0;
393 regs->u_regs[14] = infop->start_stack - 16 * 4;
394 }
395
396 #endif
397 #endif
398
399 #ifdef TARGET_PPC
400
401 #define ELF_START_MMAP 0x80000000
402
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
404
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
406
407 #define ELF_CLASS ELFCLASS64
408
409 #else
410
411 #define elf_check_arch(x) ( (x) == EM_PPC )
412
413 #define ELF_CLASS ELFCLASS32
414
415 #endif
416
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
419 #else
420 #define ELF_DATA ELFDATA2LSB
421 #endif
422 #define ELF_ARCH EM_PPC
423
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
426 enum {
427 QEMU_PPC_FEATURE_32 = 0x80000000,
428 QEMU_PPC_FEATURE_64 = 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
439 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
440 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
442 QEMU_PPC_FEATURE_CELL = 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
444 QEMU_PPC_FEATURE_SMT = 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
447 QEMU_PPC_FEATURE_PA6T = 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
453
454 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
456 };
457
458 #define ELF_HWCAP get_elf_hwcap()
459
460 static uint32_t get_elf_hwcap(void)
461 {
462 CPUState *e = thread_env;
463 uint32_t features = 0;
464
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
470 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
471 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
472 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
473 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
474 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
475 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
476 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
477 #undef GET_FEATURE
478
479 return features;
480 }
481
482 /*
483 * We need to put in some extra aux table entries to tell glibc what
484 * the cache block size is, so it can use the dcbz instruction safely.
485 */
486 #define AT_DCACHEBSIZE 19
487 #define AT_ICACHEBSIZE 20
488 #define AT_UCACHEBSIZE 21
489 /* A special ignored type value for PPC, for glibc compatibility. */
490 #define AT_IGNOREPPC 22
491 /*
492 * The requirements here are:
493 * - keep the final alignment of sp (sp & 0xf)
494 * - make sure the 32-bit value at the first 16 byte aligned position of
495 * AUXV is greater than 16 for glibc compatibility.
496 * AT_IGNOREPPC is used for that.
497 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
498 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
499 */
500 #define DLINFO_ARCH_ITEMS 5
501 #define ARCH_DLINFO \
502 do { \
503 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
504 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
505 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
506 /* \
507 * Now handle glibc compatibility. \
508 */ \
509 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
510 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
511 } while (0)
512
513 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
514 {
515 abi_ulong pos = infop->start_stack;
516 abi_ulong tmp;
517 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
518 abi_ulong entry, toc;
519 #endif
520
521 _regs->gpr[1] = infop->start_stack;
522 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
523 entry = ldq_raw(infop->entry) + infop->load_addr;
524 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
525 _regs->gpr[2] = toc;
526 infop->entry = entry;
527 #endif
528 _regs->nip = infop->entry;
529 /* Note that isn't exactly what regular kernel does
530 * but this is what the ABI wants and is needed to allow
531 * execution of PPC BSD programs.
532 */
533 /* FIXME - what to for failure of get_user()? */
534 get_user_ual(_regs->gpr[3], pos);
535 pos += sizeof(abi_ulong);
536 _regs->gpr[4] = pos;
537 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
538 tmp = ldl(pos);
539 _regs->gpr[5] = pos;
540 }
541
542 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
543 #define ELF_NREG 48
544 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
545
546 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
547 {
548 int i;
549 target_ulong ccr = 0;
550
551 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
552 (*regs)[i] = tswapl(env->gpr[i]);
553 }
554
555 (*regs)[32] = tswapl(env->nip);
556 (*regs)[33] = tswapl(env->msr);
557 (*regs)[35] = tswapl(env->ctr);
558 (*regs)[36] = tswapl(env->lr);
559 (*regs)[37] = tswapl(env->xer);
560
561 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
562 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
563 }
564 (*regs)[38] = tswapl(ccr);
565 }
566
567 #define USE_ELF_CORE_DUMP
568 #define ELF_EXEC_PAGESIZE 4096
569
570 #endif
571
572 #ifdef TARGET_MIPS
573
574 #define ELF_START_MMAP 0x80000000
575
576 #define elf_check_arch(x) ( (x) == EM_MIPS )
577
578 #ifdef TARGET_MIPS64
579 #define ELF_CLASS ELFCLASS64
580 #else
581 #define ELF_CLASS ELFCLASS32
582 #endif
583 #ifdef TARGET_WORDS_BIGENDIAN
584 #define ELF_DATA ELFDATA2MSB
585 #else
586 #define ELF_DATA ELFDATA2LSB
587 #endif
588 #define ELF_ARCH EM_MIPS
589
590 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
591 {
592 regs->cp0_status = 2 << CP0St_KSU;
593 regs->cp0_epc = infop->entry;
594 regs->regs[29] = infop->start_stack;
595 }
596
597 /* See linux kernel: arch/mips/include/asm/elf.h. */
598 #define ELF_NREG 45
599 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
600
601 /* See linux kernel: arch/mips/include/asm/reg.h. */
602 enum {
603 #ifdef TARGET_MIPS64
604 TARGET_EF_R0 = 0,
605 #else
606 TARGET_EF_R0 = 6,
607 #endif
608 TARGET_EF_R26 = TARGET_EF_R0 + 26,
609 TARGET_EF_R27 = TARGET_EF_R0 + 27,
610 TARGET_EF_LO = TARGET_EF_R0 + 32,
611 TARGET_EF_HI = TARGET_EF_R0 + 33,
612 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
613 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
614 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
615 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
616 };
617
618 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
619 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
620 {
621 int i;
622
623 for (i = 0; i < TARGET_EF_R0; i++) {
624 (*regs)[i] = 0;
625 }
626 (*regs)[TARGET_EF_R0] = 0;
627
628 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
629 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
630 }
631
632 (*regs)[TARGET_EF_R26] = 0;
633 (*regs)[TARGET_EF_R27] = 0;
634 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
635 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
636 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
637 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
638 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
639 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
640 }
641
642 #define USE_ELF_CORE_DUMP
643 #define ELF_EXEC_PAGESIZE 4096
644
645 #endif /* TARGET_MIPS */
646
647 #ifdef TARGET_MICROBLAZE
648
649 #define ELF_START_MMAP 0x80000000
650
651 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
652
653 #define ELF_CLASS ELFCLASS32
654 #define ELF_DATA ELFDATA2MSB
655 #define ELF_ARCH EM_MIPS
656
657 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
658 {
659 regs->pc = infop->entry;
660 regs->r1 = infop->start_stack;
661
662 }
663
664 #define ELF_EXEC_PAGESIZE 4096
665
666 #endif /* TARGET_MICROBLAZE */
667
668 #ifdef TARGET_SH4
669
670 #define ELF_START_MMAP 0x80000000
671
672 #define elf_check_arch(x) ( (x) == EM_SH )
673
674 #define ELF_CLASS ELFCLASS32
675 #define ELF_DATA ELFDATA2LSB
676 #define ELF_ARCH EM_SH
677
678 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
679 {
680 /* Check other registers XXXXX */
681 regs->pc = infop->entry;
682 regs->regs[15] = infop->start_stack;
683 }
684
685 #define ELF_EXEC_PAGESIZE 4096
686
687 #endif
688
689 #ifdef TARGET_CRIS
690
691 #define ELF_START_MMAP 0x80000000
692
693 #define elf_check_arch(x) ( (x) == EM_CRIS )
694
695 #define ELF_CLASS ELFCLASS32
696 #define ELF_DATA ELFDATA2LSB
697 #define ELF_ARCH EM_CRIS
698
699 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
700 {
701 regs->erp = infop->entry;
702 }
703
704 #define ELF_EXEC_PAGESIZE 8192
705
706 #endif
707
708 #ifdef TARGET_M68K
709
710 #define ELF_START_MMAP 0x80000000
711
712 #define elf_check_arch(x) ( (x) == EM_68K )
713
714 #define ELF_CLASS ELFCLASS32
715 #define ELF_DATA ELFDATA2MSB
716 #define ELF_ARCH EM_68K
717
718 /* ??? Does this need to do anything?
719 #define ELF_PLAT_INIT(_r) */
720
721 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
722 {
723 regs->usp = infop->start_stack;
724 regs->sr = 0;
725 regs->pc = infop->entry;
726 }
727
728 #define ELF_EXEC_PAGESIZE 8192
729
730 #endif
731
732 #ifdef TARGET_ALPHA
733
734 #define ELF_START_MMAP (0x30000000000ULL)
735
736 #define elf_check_arch(x) ( (x) == ELF_ARCH )
737
738 #define ELF_CLASS ELFCLASS64
739 #define ELF_DATA ELFDATA2MSB
740 #define ELF_ARCH EM_ALPHA
741
742 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
743 {
744 regs->pc = infop->entry;
745 regs->ps = 8;
746 regs->usp = infop->start_stack;
747 }
748
749 #define ELF_EXEC_PAGESIZE 8192
750
751 #endif /* TARGET_ALPHA */
752
753 #ifndef ELF_PLATFORM
754 #define ELF_PLATFORM (NULL)
755 #endif
756
757 #ifndef ELF_HWCAP
758 #define ELF_HWCAP 0
759 #endif
760
761 #ifdef TARGET_ABI32
762 #undef ELF_CLASS
763 #define ELF_CLASS ELFCLASS32
764 #undef bswaptls
765 #define bswaptls(ptr) bswap32s(ptr)
766 #endif
767
768 #include "elf.h"
769
770 struct exec
771 {
772 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
773 unsigned int a_text; /* length of text, in bytes */
774 unsigned int a_data; /* length of data, in bytes */
775 unsigned int a_bss; /* length of uninitialized data area, in bytes */
776 unsigned int a_syms; /* length of symbol table data in file, in bytes */
777 unsigned int a_entry; /* start address */
778 unsigned int a_trsize; /* length of relocation info for text, in bytes */
779 unsigned int a_drsize; /* length of relocation info for data, in bytes */
780 };
781
782
783 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
784 #define OMAGIC 0407
785 #define NMAGIC 0410
786 #define ZMAGIC 0413
787 #define QMAGIC 0314
788
789 /* max code+data+bss space allocated to elf interpreter */
790 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
791
792 /* max code+data+bss+brk space allocated to ET_DYN executables */
793 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
794
795 /* Necessary parameters */
796 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
797 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
798 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
799
800 #define INTERPRETER_NONE 0
801 #define INTERPRETER_AOUT 1
802 #define INTERPRETER_ELF 2
803
804 #define DLINFO_ITEMS 12
805
806 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
807 {
808 memcpy(to, from, n);
809 }
810
811 static int load_aout_interp(void * exptr, int interp_fd);
812
813 #ifdef BSWAP_NEEDED
814 static void bswap_ehdr(struct elfhdr *ehdr)
815 {
816 bswap16s(&ehdr->e_type); /* Object file type */
817 bswap16s(&ehdr->e_machine); /* Architecture */
818 bswap32s(&ehdr->e_version); /* Object file version */
819 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
820 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
821 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
822 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
823 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
824 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
825 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
826 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
827 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
828 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
829 }
830
831 static void bswap_phdr(struct elf_phdr *phdr)
832 {
833 bswap32s(&phdr->p_type); /* Segment type */
834 bswaptls(&phdr->p_offset); /* Segment file offset */
835 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
836 bswaptls(&phdr->p_paddr); /* Segment physical address */
837 bswaptls(&phdr->p_filesz); /* Segment size in file */
838 bswaptls(&phdr->p_memsz); /* Segment size in memory */
839 bswap32s(&phdr->p_flags); /* Segment flags */
840 bswaptls(&phdr->p_align); /* Segment alignment */
841 }
842
843 static void bswap_shdr(struct elf_shdr *shdr)
844 {
845 bswap32s(&shdr->sh_name);
846 bswap32s(&shdr->sh_type);
847 bswaptls(&shdr->sh_flags);
848 bswaptls(&shdr->sh_addr);
849 bswaptls(&shdr->sh_offset);
850 bswaptls(&shdr->sh_size);
851 bswap32s(&shdr->sh_link);
852 bswap32s(&shdr->sh_info);
853 bswaptls(&shdr->sh_addralign);
854 bswaptls(&shdr->sh_entsize);
855 }
856
857 static void bswap_sym(struct elf_sym *sym)
858 {
859 bswap32s(&sym->st_name);
860 bswaptls(&sym->st_value);
861 bswaptls(&sym->st_size);
862 bswap16s(&sym->st_shndx);
863 }
864 #endif
865
866 #ifdef USE_ELF_CORE_DUMP
867 static int elf_core_dump(int, const CPUState *);
868
869 #ifdef BSWAP_NEEDED
870 static void bswap_note(struct elf_note *en)
871 {
872 bswap32s(&en->n_namesz);
873 bswap32s(&en->n_descsz);
874 bswap32s(&en->n_type);
875 }
876 #endif /* BSWAP_NEEDED */
877
878 #endif /* USE_ELF_CORE_DUMP */
879
880 /*
881 * 'copy_elf_strings()' copies argument/envelope strings from user
882 * memory to free pages in kernel mem. These are in a format ready
883 * to be put directly into the top of new user memory.
884 *
885 */
886 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
887 abi_ulong p)
888 {
889 char *tmp, *tmp1, *pag = NULL;
890 int len, offset = 0;
891
892 if (!p) {
893 return 0; /* bullet-proofing */
894 }
895 while (argc-- > 0) {
896 tmp = argv[argc];
897 if (!tmp) {
898 fprintf(stderr, "VFS: argc is wrong");
899 exit(-1);
900 }
901 tmp1 = tmp;
902 while (*tmp++);
903 len = tmp - tmp1;
904 if (p < len) { /* this shouldn't happen - 128kB */
905 return 0;
906 }
907 while (len) {
908 --p; --tmp; --len;
909 if (--offset < 0) {
910 offset = p % TARGET_PAGE_SIZE;
911 pag = (char *)page[p/TARGET_PAGE_SIZE];
912 if (!pag) {
913 pag = (char *)malloc(TARGET_PAGE_SIZE);
914 memset(pag, 0, TARGET_PAGE_SIZE);
915 page[p/TARGET_PAGE_SIZE] = pag;
916 if (!pag)
917 return 0;
918 }
919 }
920 if (len == 0 || offset == 0) {
921 *(pag + offset) = *tmp;
922 }
923 else {
924 int bytes_to_copy = (len > offset) ? offset : len;
925 tmp -= bytes_to_copy;
926 p -= bytes_to_copy;
927 offset -= bytes_to_copy;
928 len -= bytes_to_copy;
929 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
930 }
931 }
932 }
933 return p;
934 }
935
936 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
937 struct image_info *info)
938 {
939 abi_ulong stack_base, size, error;
940 int i;
941
942 /* Create enough stack to hold everything. If we don't use
943 * it for args, we'll use it for something else...
944 */
945 size = x86_stack_size;
946 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
947 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
948 error = target_mmap(0,
949 size + qemu_host_page_size,
950 PROT_READ | PROT_WRITE,
951 MAP_PRIVATE | MAP_ANONYMOUS,
952 -1, 0);
953 if (error == -1) {
954 perror("stk mmap");
955 exit(-1);
956 }
957 /* we reserve one extra page at the top of the stack as guard */
958 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
959
960 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
961 p += stack_base;
962
963 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
964 if (bprm->page[i]) {
965 info->rss++;
966 /* FIXME - check return value of memcpy_to_target() for failure */
967 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
968 free(bprm->page[i]);
969 }
970 stack_base += TARGET_PAGE_SIZE;
971 }
972 return p;
973 }
974
975 static void set_brk(abi_ulong start, abi_ulong end)
976 {
977 /* page-align the start and end addresses... */
978 start = HOST_PAGE_ALIGN(start);
979 end = HOST_PAGE_ALIGN(end);
980 if (end <= start)
981 return;
982 if(target_mmap(start, end - start,
983 PROT_READ | PROT_WRITE | PROT_EXEC,
984 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
985 perror("cannot mmap brk");
986 exit(-1);
987 }
988 }
989
990
991 /* We need to explicitly zero any fractional pages after the data
992 section (i.e. bss). This would contain the junk from the file that
993 should not be in memory. */
994 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
995 {
996 abi_ulong nbyte;
997
998 if (elf_bss >= last_bss)
999 return;
1000
1001 /* XXX: this is really a hack : if the real host page size is
1002 smaller than the target page size, some pages after the end
1003 of the file may not be mapped. A better fix would be to
1004 patch target_mmap(), but it is more complicated as the file
1005 size must be known */
1006 if (qemu_real_host_page_size < qemu_host_page_size) {
1007 abi_ulong end_addr, end_addr1;
1008 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
1009 ~(qemu_real_host_page_size - 1);
1010 end_addr = HOST_PAGE_ALIGN(elf_bss);
1011 if (end_addr1 < end_addr) {
1012 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
1013 PROT_READ|PROT_WRITE|PROT_EXEC,
1014 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1015 }
1016 }
1017
1018 nbyte = elf_bss & (qemu_host_page_size-1);
1019 if (nbyte) {
1020 nbyte = qemu_host_page_size - nbyte;
1021 do {
1022 /* FIXME - what to do if put_user() fails? */
1023 put_user_u8(0, elf_bss);
1024 elf_bss++;
1025 } while (--nbyte);
1026 }
1027 }
1028
1029
1030 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1031 struct elfhdr * exec,
1032 abi_ulong load_addr,
1033 abi_ulong load_bias,
1034 abi_ulong interp_load_addr, int ibcs,
1035 struct image_info *info)
1036 {
1037 abi_ulong sp;
1038 int size;
1039 abi_ulong u_platform;
1040 const char *k_platform;
1041 const int n = sizeof(elf_addr_t);
1042
1043 sp = p;
1044 u_platform = 0;
1045 k_platform = ELF_PLATFORM;
1046 if (k_platform) {
1047 size_t len = strlen(k_platform) + 1;
1048 sp -= (len + n - 1) & ~(n - 1);
1049 u_platform = sp;
1050 /* FIXME - check return value of memcpy_to_target() for failure */
1051 memcpy_to_target(sp, k_platform, len);
1052 }
1053 /*
1054 * Force 16 byte _final_ alignment here for generality.
1055 */
1056 sp = sp &~ (abi_ulong)15;
1057 size = (DLINFO_ITEMS + 1) * 2;
1058 if (k_platform)
1059 size += 2;
1060 #ifdef DLINFO_ARCH_ITEMS
1061 size += DLINFO_ARCH_ITEMS * 2;
1062 #endif
1063 size += envc + argc + 2;
1064 size += (!ibcs ? 3 : 1); /* argc itself */
1065 size *= n;
1066 if (size & 15)
1067 sp -= 16 - (size & 15);
1068
1069 /* This is correct because Linux defines
1070 * elf_addr_t as Elf32_Off / Elf64_Off
1071 */
1072 #define NEW_AUX_ENT(id, val) do { \
1073 sp -= n; put_user_ual(val, sp); \
1074 sp -= n; put_user_ual(id, sp); \
1075 } while(0)
1076
1077 NEW_AUX_ENT (AT_NULL, 0);
1078
1079 /* There must be exactly DLINFO_ITEMS entries here. */
1080 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1081 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1082 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1083 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1084 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1085 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1086 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1087 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1088 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1089 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1090 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1091 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1092 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1093 if (k_platform)
1094 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1095 #ifdef ARCH_DLINFO
1096 /*
1097 * ARCH_DLINFO must come last so platform specific code can enforce
1098 * special alignment requirements on the AUXV if necessary (eg. PPC).
1099 */
1100 ARCH_DLINFO;
1101 #endif
1102 #undef NEW_AUX_ENT
1103
1104 info->saved_auxv = sp;
1105
1106 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1107 return sp;
1108 }
1109
1110
1111 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1112 int interpreter_fd,
1113 abi_ulong *interp_load_addr)
1114 {
1115 struct elf_phdr *elf_phdata = NULL;
1116 struct elf_phdr *eppnt;
1117 abi_ulong load_addr = 0;
1118 int load_addr_set = 0;
1119 int retval;
1120 abi_ulong last_bss, elf_bss;
1121 abi_ulong error;
1122 int i;
1123
1124 elf_bss = 0;
1125 last_bss = 0;
1126 error = 0;
1127
1128 #ifdef BSWAP_NEEDED
1129 bswap_ehdr(interp_elf_ex);
1130 #endif
1131 /* First of all, some simple consistency checks */
1132 if ((interp_elf_ex->e_type != ET_EXEC &&
1133 interp_elf_ex->e_type != ET_DYN) ||
1134 !elf_check_arch(interp_elf_ex->e_machine)) {
1135 return ~((abi_ulong)0UL);
1136 }
1137
1138
1139 /* Now read in all of the header information */
1140
1141 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1142 return ~(abi_ulong)0UL;
1143
1144 elf_phdata = (struct elf_phdr *)
1145 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1146
1147 if (!elf_phdata)
1148 return ~((abi_ulong)0UL);
1149
1150 /*
1151 * If the size of this structure has changed, then punt, since
1152 * we will be doing the wrong thing.
1153 */
1154 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1155 free(elf_phdata);
1156 return ~((abi_ulong)0UL);
1157 }
1158
1159 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1160 if(retval >= 0) {
1161 retval = read(interpreter_fd,
1162 (char *) elf_phdata,
1163 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1164 }
1165 if (retval < 0) {
1166 perror("load_elf_interp");
1167 exit(-1);
1168 free (elf_phdata);
1169 return retval;
1170 }
1171 #ifdef BSWAP_NEEDED
1172 eppnt = elf_phdata;
1173 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1174 bswap_phdr(eppnt);
1175 }
1176 #endif
1177
1178 if (interp_elf_ex->e_type == ET_DYN) {
1179 /* in order to avoid hardcoding the interpreter load
1180 address in qemu, we allocate a big enough memory zone */
1181 error = target_mmap(0, INTERP_MAP_SIZE,
1182 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1183 -1, 0);
1184 if (error == -1) {
1185 perror("mmap");
1186 exit(-1);
1187 }
1188 load_addr = error;
1189 load_addr_set = 1;
1190 }
1191
1192 eppnt = elf_phdata;
1193 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1194 if (eppnt->p_type == PT_LOAD) {
1195 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1196 int elf_prot = 0;
1197 abi_ulong vaddr = 0;
1198 abi_ulong k;
1199
1200 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1201 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1202 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1203 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1204 elf_type |= MAP_FIXED;
1205 vaddr = eppnt->p_vaddr;
1206 }
1207 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1208 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1209 elf_prot,
1210 elf_type,
1211 interpreter_fd,
1212 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1213
1214 if (error == -1) {
1215 /* Real error */
1216 close(interpreter_fd);
1217 free(elf_phdata);
1218 return ~((abi_ulong)0UL);
1219 }
1220
1221 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1222 load_addr = error;
1223 load_addr_set = 1;
1224 }
1225
1226 /*
1227 * Find the end of the file mapping for this phdr, and keep
1228 * track of the largest address we see for this.
1229 */
1230 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1231 if (k > elf_bss) elf_bss = k;
1232
1233 /*
1234 * Do the same thing for the memory mapping - between
1235 * elf_bss and last_bss is the bss section.
1236 */
1237 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1238 if (k > last_bss) last_bss = k;
1239 }
1240
1241 /* Now use mmap to map the library into memory. */
1242
1243 close(interpreter_fd);
1244
1245 /*
1246 * Now fill out the bss section. First pad the last page up
1247 * to the page boundary, and then perform a mmap to make sure
1248 * that there are zeromapped pages up to and including the last
1249 * bss page.
1250 */
1251 padzero(elf_bss, last_bss);
1252 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1253
1254 /* Map the last of the bss segment */
1255 if (last_bss > elf_bss) {
1256 target_mmap(elf_bss, last_bss-elf_bss,
1257 PROT_READ|PROT_WRITE|PROT_EXEC,
1258 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1259 }
1260 free(elf_phdata);
1261
1262 *interp_load_addr = load_addr;
1263 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1264 }
1265
1266 static int symfind(const void *s0, const void *s1)
1267 {
1268 struct elf_sym *key = (struct elf_sym *)s0;
1269 struct elf_sym *sym = (struct elf_sym *)s1;
1270 int result = 0;
1271 if (key->st_value < sym->st_value) {
1272 result = -1;
1273 } else if (key->st_value >= sym->st_value + sym->st_size) {
1274 result = 1;
1275 }
1276 return result;
1277 }
1278
1279 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1280 {
1281 #if ELF_CLASS == ELFCLASS32
1282 struct elf_sym *syms = s->disas_symtab.elf32;
1283 #else
1284 struct elf_sym *syms = s->disas_symtab.elf64;
1285 #endif
1286
1287 // binary search
1288 struct elf_sym key;
1289 struct elf_sym *sym;
1290
1291 key.st_value = orig_addr;
1292
1293 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1294 if (sym != NULL) {
1295 return s->disas_strtab + sym->st_name;
1296 }
1297
1298 return "";
1299 }
1300
1301 /* FIXME: This should use elf_ops.h */
1302 static int symcmp(const void *s0, const void *s1)
1303 {
1304 struct elf_sym *sym0 = (struct elf_sym *)s0;
1305 struct elf_sym *sym1 = (struct elf_sym *)s1;
1306 return (sym0->st_value < sym1->st_value)
1307 ? -1
1308 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1309 }
1310
1311 /* Best attempt to load symbols from this ELF object. */
1312 static void load_symbols(struct elfhdr *hdr, int fd)
1313 {
1314 unsigned int i, nsyms;
1315 struct elf_shdr sechdr, symtab, strtab;
1316 char *strings;
1317 struct syminfo *s;
1318 struct elf_sym *syms;
1319
1320 lseek(fd, hdr->e_shoff, SEEK_SET);
1321 for (i = 0; i < hdr->e_shnum; i++) {
1322 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1323 return;
1324 #ifdef BSWAP_NEEDED
1325 bswap_shdr(&sechdr);
1326 #endif
1327 if (sechdr.sh_type == SHT_SYMTAB) {
1328 symtab = sechdr;
1329 lseek(fd, hdr->e_shoff
1330 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1331 if (read(fd, &strtab, sizeof(strtab))
1332 != sizeof(strtab))
1333 return;
1334 #ifdef BSWAP_NEEDED
1335 bswap_shdr(&strtab);
1336 #endif
1337 goto found;
1338 }
1339 }
1340 return; /* Shouldn't happen... */
1341
1342 found:
1343 /* Now know where the strtab and symtab are. Snarf them. */
1344 s = malloc(sizeof(*s));
1345 syms = malloc(symtab.sh_size);
1346 if (!syms)
1347 return;
1348 s->disas_strtab = strings = malloc(strtab.sh_size);
1349 if (!s->disas_strtab)
1350 return;
1351
1352 lseek(fd, symtab.sh_offset, SEEK_SET);
1353 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1354 return;
1355
1356 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1357
1358 i = 0;
1359 while (i < nsyms) {
1360 #ifdef BSWAP_NEEDED
1361 bswap_sym(syms + i);
1362 #endif
1363 // Throw away entries which we do not need.
1364 if (syms[i].st_shndx == SHN_UNDEF ||
1365 syms[i].st_shndx >= SHN_LORESERVE ||
1366 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1367 nsyms--;
1368 if (i < nsyms) {
1369 syms[i] = syms[nsyms];
1370 }
1371 continue;
1372 }
1373 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1374 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1375 syms[i].st_value &= ~(target_ulong)1;
1376 #endif
1377 i++;
1378 }
1379 syms = realloc(syms, nsyms * sizeof(*syms));
1380
1381 qsort(syms, nsyms, sizeof(*syms), symcmp);
1382
1383 lseek(fd, strtab.sh_offset, SEEK_SET);
1384 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1385 return;
1386 s->disas_num_syms = nsyms;
1387 #if ELF_CLASS == ELFCLASS32
1388 s->disas_symtab.elf32 = syms;
1389 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1390 #else
1391 s->disas_symtab.elf64 = syms;
1392 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1393 #endif
1394 s->next = syminfos;
1395 syminfos = s;
1396 }
1397
1398 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1399 struct image_info * info)
1400 {
1401 struct elfhdr elf_ex;
1402 struct elfhdr interp_elf_ex;
1403 struct exec interp_ex;
1404 int interpreter_fd = -1; /* avoid warning */
1405 abi_ulong load_addr, load_bias;
1406 int load_addr_set = 0;
1407 unsigned int interpreter_type = INTERPRETER_NONE;
1408 unsigned char ibcs2_interpreter;
1409 int i;
1410 abi_ulong mapped_addr;
1411 struct elf_phdr * elf_ppnt;
1412 struct elf_phdr *elf_phdata;
1413 abi_ulong elf_bss, k, elf_brk;
1414 int retval;
1415 char * elf_interpreter;
1416 abi_ulong elf_entry, interp_load_addr = 0;
1417 int status;
1418 abi_ulong start_code, end_code, start_data, end_data;
1419 abi_ulong reloc_func_desc = 0;
1420 abi_ulong elf_stack;
1421 char passed_fileno[6];
1422
1423 ibcs2_interpreter = 0;
1424 status = 0;
1425 load_addr = 0;
1426 load_bias = 0;
1427 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1428 #ifdef BSWAP_NEEDED
1429 bswap_ehdr(&elf_ex);
1430 #endif
1431
1432 /* First of all, some simple consistency checks */
1433 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1434 (! elf_check_arch(elf_ex.e_machine))) {
1435 return -ENOEXEC;
1436 }
1437
1438 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1439 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1440 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1441 if (!bprm->p) {
1442 retval = -E2BIG;
1443 }
1444
1445 /* Now read in all of the header information */
1446 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1447 if (elf_phdata == NULL) {
1448 return -ENOMEM;
1449 }
1450
1451 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1452 if(retval > 0) {
1453 retval = read(bprm->fd, (char *) elf_phdata,
1454 elf_ex.e_phentsize * elf_ex.e_phnum);
1455 }
1456
1457 if (retval < 0) {
1458 perror("load_elf_binary");
1459 exit(-1);
1460 free (elf_phdata);
1461 return -errno;
1462 }
1463
1464 #ifdef BSWAP_NEEDED
1465 elf_ppnt = elf_phdata;
1466 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1467 bswap_phdr(elf_ppnt);
1468 }
1469 #endif
1470 elf_ppnt = elf_phdata;
1471
1472 elf_bss = 0;
1473 elf_brk = 0;
1474
1475
1476 elf_stack = ~((abi_ulong)0UL);
1477 elf_interpreter = NULL;
1478 start_code = ~((abi_ulong)0UL);
1479 end_code = 0;
1480 start_data = 0;
1481 end_data = 0;
1482 interp_ex.a_info = 0;
1483
1484 for(i=0;i < elf_ex.e_phnum; i++) {
1485 if (elf_ppnt->p_type == PT_INTERP) {
1486 if ( elf_interpreter != NULL )
1487 {
1488 free (elf_phdata);
1489 free(elf_interpreter);
1490 close(bprm->fd);
1491 return -EINVAL;
1492 }
1493
1494 /* This is the program interpreter used for
1495 * shared libraries - for now assume that this
1496 * is an a.out format binary
1497 */
1498
1499 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1500
1501 if (elf_interpreter == NULL) {
1502 free (elf_phdata);
1503 close(bprm->fd);
1504 return -ENOMEM;
1505 }
1506
1507 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1508 if(retval >= 0) {
1509 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1510 }
1511 if(retval < 0) {
1512 perror("load_elf_binary2");
1513 exit(-1);
1514 }
1515
1516 /* If the program interpreter is one of these two,
1517 then assume an iBCS2 image. Otherwise assume
1518 a native linux image. */
1519
1520 /* JRP - Need to add X86 lib dir stuff here... */
1521
1522 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1523 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1524 ibcs2_interpreter = 1;
1525 }
1526
1527 #if 0
1528 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1529 #endif
1530 if (retval >= 0) {
1531 retval = open(path(elf_interpreter), O_RDONLY);
1532 if(retval >= 0) {
1533 interpreter_fd = retval;
1534 }
1535 else {
1536 perror(elf_interpreter);
1537 exit(-1);
1538 /* retval = -errno; */
1539 }
1540 }
1541
1542 if (retval >= 0) {
1543 retval = lseek(interpreter_fd, 0, SEEK_SET);
1544 if(retval >= 0) {
1545 retval = read(interpreter_fd,bprm->buf,128);
1546 }
1547 }
1548 if (retval >= 0) {
1549 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1550 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1551 }
1552 if (retval < 0) {
1553 perror("load_elf_binary3");
1554 exit(-1);
1555 free (elf_phdata);
1556 free(elf_interpreter);
1557 close(bprm->fd);
1558 return retval;
1559 }
1560 }
1561 elf_ppnt++;
1562 }
1563
1564 /* Some simple consistency checks for the interpreter */
1565 if (elf_interpreter){
1566 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1567
1568 /* Now figure out which format our binary is */
1569 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1570 (N_MAGIC(interp_ex) != QMAGIC)) {
1571 interpreter_type = INTERPRETER_ELF;
1572 }
1573
1574 if (interp_elf_ex.e_ident[0] != 0x7f ||
1575 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1576 interpreter_type &= ~INTERPRETER_ELF;
1577 }
1578
1579 if (!interpreter_type) {
1580 free(elf_interpreter);
1581 free(elf_phdata);
1582 close(bprm->fd);
1583 return -ELIBBAD;
1584 }
1585 }
1586
1587 /* OK, we are done with that, now set up the arg stuff,
1588 and then start this sucker up */
1589
1590 {
1591 char * passed_p;
1592
1593 if (interpreter_type == INTERPRETER_AOUT) {
1594 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1595 passed_p = passed_fileno;
1596
1597 if (elf_interpreter) {
1598 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1599 bprm->argc++;
1600 }
1601 }
1602 if (!bprm->p) {
1603 if (elf_interpreter) {
1604 free(elf_interpreter);
1605 }
1606 free (elf_phdata);
1607 close(bprm->fd);
1608 return -E2BIG;
1609 }
1610 }
1611
1612 /* OK, This is the point of no return */
1613 info->end_data = 0;
1614 info->end_code = 0;
1615 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1616 info->mmap = 0;
1617 elf_entry = (abi_ulong) elf_ex.e_entry;
1618
1619 #if defined(CONFIG_USE_GUEST_BASE)
1620 /*
1621 * In case where user has not explicitly set the guest_base, we
1622 * probe here that should we set it automatically.
1623 */
1624 if (!have_guest_base) {
1625 /*
1626 * Go through ELF program header table and find out whether
1627 * any of the segments drop below our current mmap_min_addr and
1628 * in that case set guest_base to corresponding address.
1629 */
1630 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1631 i++, elf_ppnt++) {
1632 if (elf_ppnt->p_type != PT_LOAD)
1633 continue;
1634 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1635 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1636 break;
1637 }
1638 }
1639 }
1640 #endif /* CONFIG_USE_GUEST_BASE */
1641
1642 /* Do this so that we can load the interpreter, if need be. We will
1643 change some of these later */
1644 info->rss = 0;
1645 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1646 info->start_stack = bprm->p;
1647
1648 /* Now we do a little grungy work by mmaping the ELF image into
1649 * the correct location in memory. At this point, we assume that
1650 * the image should be loaded at fixed address, not at a variable
1651 * address.
1652 */
1653
1654 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1655 int elf_prot = 0;
1656 int elf_flags = 0;
1657 abi_ulong error;
1658
1659 if (elf_ppnt->p_type != PT_LOAD)
1660 continue;
1661
1662 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1663 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1664 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1665 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1666 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1667 elf_flags |= MAP_FIXED;
1668 } else if (elf_ex.e_type == ET_DYN) {
1669 /* Try and get dynamic programs out of the way of the default mmap
1670 base, as well as whatever program they might try to exec. This
1671 is because the brk will follow the loader, and is not movable. */
1672 /* NOTE: for qemu, we do a big mmap to get enough space
1673 without hardcoding any address */
1674 error = target_mmap(0, ET_DYN_MAP_SIZE,
1675 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1676 -1, 0);
1677 if (error == -1) {
1678 perror("mmap");
1679 exit(-1);
1680 }
1681 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1682 }
1683
1684 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1685 (elf_ppnt->p_filesz +
1686 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1687 elf_prot,
1688 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1689 bprm->fd,
1690 (elf_ppnt->p_offset -
1691 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1692 if (error == -1) {
1693 perror("mmap");
1694 exit(-1);
1695 }
1696
1697 #ifdef LOW_ELF_STACK
1698 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1699 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1700 #endif
1701
1702 if (!load_addr_set) {
1703 load_addr_set = 1;
1704 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1705 if (elf_ex.e_type == ET_DYN) {
1706 load_bias += error -
1707 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1708 load_addr += load_bias;
1709 reloc_func_desc = load_bias;
1710 }
1711 }
1712 k = elf_ppnt->p_vaddr;
1713 if (k < start_code)
1714 start_code = k;
1715 if (start_data < k)
1716 start_data = k;
1717 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1718 if (k > elf_bss)
1719 elf_bss = k;
1720 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1721 end_code = k;
1722 if (end_data < k)
1723 end_data = k;
1724 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1725 if (k > elf_brk) elf_brk = k;
1726 }
1727
1728 elf_entry += load_bias;
1729 elf_bss += load_bias;
1730 elf_brk += load_bias;
1731 start_code += load_bias;
1732 end_code += load_bias;
1733 start_data += load_bias;
1734 end_data += load_bias;
1735
1736 if (elf_interpreter) {
1737 if (interpreter_type & 1) {
1738 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1739 }
1740 else if (interpreter_type & 2) {
1741 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1742 &interp_load_addr);
1743 }
1744 reloc_func_desc = interp_load_addr;
1745
1746 close(interpreter_fd);
1747 free(elf_interpreter);
1748
1749 if (elf_entry == ~((abi_ulong)0UL)) {
1750 printf("Unable to load interpreter\n");
1751 free(elf_phdata);
1752 exit(-1);
1753 return 0;
1754 }
1755 }
1756
1757 free(elf_phdata);
1758
1759 if (qemu_log_enabled())
1760 load_symbols(&elf_ex, bprm->fd);
1761
1762 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1763 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1764
1765 #ifdef LOW_ELF_STACK
1766 info->start_stack = bprm->p = elf_stack - 4;
1767 #endif
1768 bprm->p = create_elf_tables(bprm->p,
1769 bprm->argc,
1770 bprm->envc,
1771 &elf_ex,
1772 load_addr, load_bias,
1773 interp_load_addr,
1774 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1775 info);
1776 info->load_addr = reloc_func_desc;
1777 info->start_brk = info->brk = elf_brk;
1778 info->end_code = end_code;
1779 info->start_code = start_code;
1780 info->start_data = start_data;
1781 info->end_data = end_data;
1782 info->start_stack = bprm->p;
1783
1784 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1785 sections */
1786 set_brk(elf_bss, elf_brk);
1787
1788 padzero(elf_bss, elf_brk);
1789
1790 #if 0
1791 printf("(start_brk) %x\n" , info->start_brk);
1792 printf("(end_code) %x\n" , info->end_code);
1793 printf("(start_code) %x\n" , info->start_code);
1794 printf("(end_data) %x\n" , info->end_data);
1795 printf("(start_stack) %x\n" , info->start_stack);
1796 printf("(brk) %x\n" , info->brk);
1797 #endif
1798
1799 if ( info->personality == PER_SVR4 )
1800 {
1801 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1802 and some applications "depend" upon this behavior.
1803 Since we do not have the power to recompile these, we
1804 emulate the SVr4 behavior. Sigh. */
1805 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1806 MAP_FIXED | MAP_PRIVATE, -1, 0);
1807 }
1808
1809 info->entry = elf_entry;
1810
1811 #ifdef USE_ELF_CORE_DUMP
1812 bprm->core_dump = &elf_core_dump;
1813 #endif
1814
1815 return 0;
1816 }
1817
1818 #ifdef USE_ELF_CORE_DUMP
1819
1820 /*
1821 * Definitions to generate Intel SVR4-like core files.
1822 * These mostly have the same names as the SVR4 types with "target_elf_"
1823 * tacked on the front to prevent clashes with linux definitions,
1824 * and the typedef forms have been avoided. This is mostly like
1825 * the SVR4 structure, but more Linuxy, with things that Linux does
1826 * not support and which gdb doesn't really use excluded.
1827 *
1828 * Fields we don't dump (their contents is zero) in linux-user qemu
1829 * are marked with XXX.
1830 *
1831 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1832 *
1833 * Porting ELF coredump for target is (quite) simple process. First you
1834 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1835 * the target resides):
1836 *
1837 * #define USE_ELF_CORE_DUMP
1838 *
1839 * Next you define type of register set used for dumping. ELF specification
1840 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1841 *
1842 * typedef <target_regtype> target_elf_greg_t;
1843 * #define ELF_NREG <number of registers>
1844 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1845 *
1846 * Last step is to implement target specific function that copies registers
1847 * from given cpu into just specified register set. Prototype is:
1848 *
1849 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1850 * const CPUState *env);
1851 *
1852 * Parameters:
1853 * regs - copy register values into here (allocated and zeroed by caller)
1854 * env - copy registers from here
1855 *
1856 * Example for ARM target is provided in this file.
1857 */
1858
1859 /* An ELF note in memory */
1860 struct memelfnote {
1861 const char *name;
1862 size_t namesz;
1863 size_t namesz_rounded;
1864 int type;
1865 size_t datasz;
1866 void *data;
1867 size_t notesz;
1868 };
1869
1870 struct target_elf_siginfo {
1871 int si_signo; /* signal number */
1872 int si_code; /* extra code */
1873 int si_errno; /* errno */
1874 };
1875
1876 struct target_elf_prstatus {
1877 struct target_elf_siginfo pr_info; /* Info associated with signal */
1878 short pr_cursig; /* Current signal */
1879 target_ulong pr_sigpend; /* XXX */
1880 target_ulong pr_sighold; /* XXX */
1881 target_pid_t pr_pid;
1882 target_pid_t pr_ppid;
1883 target_pid_t pr_pgrp;
1884 target_pid_t pr_sid;
1885 struct target_timeval pr_utime; /* XXX User time */
1886 struct target_timeval pr_stime; /* XXX System time */
1887 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1888 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1889 target_elf_gregset_t pr_reg; /* GP registers */
1890 int pr_fpvalid; /* XXX */
1891 };
1892
1893 #define ELF_PRARGSZ (80) /* Number of chars for args */
1894
1895 struct target_elf_prpsinfo {
1896 char pr_state; /* numeric process state */
1897 char pr_sname; /* char for pr_state */
1898 char pr_zomb; /* zombie */
1899 char pr_nice; /* nice val */
1900 target_ulong pr_flag; /* flags */
1901 target_uid_t pr_uid;
1902 target_gid_t pr_gid;
1903 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1904 /* Lots missing */
1905 char pr_fname[16]; /* filename of executable */
1906 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1907 };
1908
1909 /* Here is the structure in which status of each thread is captured. */
1910 struct elf_thread_status {
1911 QTAILQ_ENTRY(elf_thread_status) ets_link;
1912 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1913 #if 0
1914 elf_fpregset_t fpu; /* NT_PRFPREG */
1915 struct task_struct *thread;
1916 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1917 #endif
1918 struct memelfnote notes[1];
1919 int num_notes;
1920 };
1921
1922 struct elf_note_info {
1923 struct memelfnote *notes;
1924 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1925 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1926
1927 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1928 #if 0
1929 /*
1930 * Current version of ELF coredump doesn't support
1931 * dumping fp regs etc.
1932 */
1933 elf_fpregset_t *fpu;
1934 elf_fpxregset_t *xfpu;
1935 int thread_status_size;
1936 #endif
1937 int notes_size;
1938 int numnote;
1939 };
1940
1941 struct vm_area_struct {
1942 abi_ulong vma_start; /* start vaddr of memory region */
1943 abi_ulong vma_end; /* end vaddr of memory region */
1944 abi_ulong vma_flags; /* protection etc. flags for the region */
1945 QTAILQ_ENTRY(vm_area_struct) vma_link;
1946 };
1947
1948 struct mm_struct {
1949 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1950 int mm_count; /* number of mappings */
1951 };
1952
1953 static struct mm_struct *vma_init(void);
1954 static void vma_delete(struct mm_struct *);
1955 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1956 abi_ulong, abi_ulong);
1957 static int vma_get_mapping_count(const struct mm_struct *);
1958 static struct vm_area_struct *vma_first(const struct mm_struct *);
1959 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1960 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1961 static int vma_walker(void *priv, unsigned long start, unsigned long end,
1962 unsigned long flags);
1963
1964 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1965 static void fill_note(struct memelfnote *, const char *, int,
1966 unsigned int, void *);
1967 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
1968 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
1969 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1970 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1971 static size_t note_size(const struct memelfnote *);
1972 static void free_note_info(struct elf_note_info *);
1973 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1974 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1975 static int core_dump_filename(const TaskState *, char *, size_t);
1976
1977 static int dump_write(int, const void *, size_t);
1978 static int write_note(struct memelfnote *, int);
1979 static int write_note_info(struct elf_note_info *, int);
1980
1981 #ifdef BSWAP_NEEDED
1982 static void bswap_prstatus(struct target_elf_prstatus *);
1983 static void bswap_psinfo(struct target_elf_prpsinfo *);
1984
1985 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
1986 {
1987 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1988 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1989 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1990 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1991 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1992 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1993 prstatus->pr_pid = tswap32(prstatus->pr_pid);
1994 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1995 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1996 prstatus->pr_sid = tswap32(prstatus->pr_sid);
1997 /* cpu times are not filled, so we skip them */
1998 /* regs should be in correct format already */
1999 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2000 }
2001
2002 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2003 {
2004 psinfo->pr_flag = tswapl(psinfo->pr_flag);
2005 psinfo->pr_uid = tswap16(psinfo->pr_uid);
2006 psinfo->pr_gid = tswap16(psinfo->pr_gid);
2007 psinfo->pr_pid = tswap32(psinfo->pr_pid);
2008 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2009 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2010 psinfo->pr_sid = tswap32(psinfo->pr_sid);
2011 }
2012 #endif /* BSWAP_NEEDED */
2013
2014 /*
2015 * Minimal support for linux memory regions. These are needed
2016 * when we are finding out what memory exactly belongs to
2017 * emulated process. No locks needed here, as long as
2018 * thread that received the signal is stopped.
2019 */
2020
2021 static struct mm_struct *vma_init(void)
2022 {
2023 struct mm_struct *mm;
2024
2025 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2026 return (NULL);
2027
2028 mm->mm_count = 0;
2029 QTAILQ_INIT(&mm->mm_mmap);
2030
2031 return (mm);
2032 }
2033
2034 static void vma_delete(struct mm_struct *mm)
2035 {
2036 struct vm_area_struct *vma;
2037
2038 while ((vma = vma_first(mm)) != NULL) {
2039 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2040 qemu_free(vma);
2041 }
2042 qemu_free(mm);
2043 }
2044
2045 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2046 abi_ulong end, abi_ulong flags)
2047 {
2048 struct vm_area_struct *vma;
2049
2050 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2051 return (-1);
2052
2053 vma->vma_start = start;
2054 vma->vma_end = end;
2055 vma->vma_flags = flags;
2056
2057 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2058 mm->mm_count++;
2059
2060 return (0);
2061 }
2062
2063 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2064 {
2065 return (QTAILQ_FIRST(&mm->mm_mmap));
2066 }
2067
2068 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2069 {
2070 return (QTAILQ_NEXT(vma, vma_link));
2071 }
2072
2073 static int vma_get_mapping_count(const struct mm_struct *mm)
2074 {
2075 return (mm->mm_count);
2076 }
2077
2078 /*
2079 * Calculate file (dump) size of given memory region.
2080 */
2081 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2082 {
2083 /* if we cannot even read the first page, skip it */
2084 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2085 return (0);
2086
2087 /*
2088 * Usually we don't dump executable pages as they contain
2089 * non-writable code that debugger can read directly from
2090 * target library etc. However, thread stacks are marked
2091 * also executable so we read in first page of given region
2092 * and check whether it contains elf header. If there is
2093 * no elf header, we dump it.
2094 */
2095 if (vma->vma_flags & PROT_EXEC) {
2096 char page[TARGET_PAGE_SIZE];
2097
2098 copy_from_user(page, vma->vma_start, sizeof (page));
2099 if ((page[EI_MAG0] == ELFMAG0) &&
2100 (page[EI_MAG1] == ELFMAG1) &&
2101 (page[EI_MAG2] == ELFMAG2) &&
2102 (page[EI_MAG3] == ELFMAG3)) {
2103 /*
2104 * Mappings are possibly from ELF binary. Don't dump
2105 * them.
2106 */
2107 return (0);
2108 }
2109 }
2110
2111 return (vma->vma_end - vma->vma_start);
2112 }
2113
2114 static int vma_walker(void *priv, unsigned long start, unsigned long end,
2115 unsigned long flags)
2116 {
2117 struct mm_struct *mm = (struct mm_struct *)priv;
2118
2119 /*
2120 * Don't dump anything that qemu has reserved for internal use.
2121 */
2122 if (flags & PAGE_RESERVED)
2123 return (0);
2124
2125 vma_add_mapping(mm, start, end, flags);
2126 return (0);
2127 }
2128
2129 static void fill_note(struct memelfnote *note, const char *name, int type,
2130 unsigned int sz, void *data)
2131 {
2132 unsigned int namesz;
2133
2134 namesz = strlen(name) + 1;
2135 note->name = name;
2136 note->namesz = namesz;
2137 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2138 note->type = type;
2139 note->datasz = roundup(sz, sizeof (int32_t));;
2140 note->data = data;
2141
2142 /*
2143 * We calculate rounded up note size here as specified by
2144 * ELF document.
2145 */
2146 note->notesz = sizeof (struct elf_note) +
2147 note->namesz_rounded + note->datasz;
2148 }
2149
2150 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2151 uint32_t flags)
2152 {
2153 (void) memset(elf, 0, sizeof(*elf));
2154
2155 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2156 elf->e_ident[EI_CLASS] = ELF_CLASS;
2157 elf->e_ident[EI_DATA] = ELF_DATA;
2158 elf->e_ident[EI_VERSION] = EV_CURRENT;
2159 elf->e_ident[EI_OSABI] = ELF_OSABI;
2160
2161 elf->e_type = ET_CORE;
2162 elf->e_machine = machine;
2163 elf->e_version = EV_CURRENT;
2164 elf->e_phoff = sizeof(struct elfhdr);
2165 elf->e_flags = flags;
2166 elf->e_ehsize = sizeof(struct elfhdr);
2167 elf->e_phentsize = sizeof(struct elf_phdr);
2168 elf->e_phnum = segs;
2169
2170 #ifdef BSWAP_NEEDED
2171 bswap_ehdr(elf);
2172 #endif
2173 }
2174
2175 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2176 {
2177 phdr->p_type = PT_NOTE;
2178 phdr->p_offset = offset;
2179 phdr->p_vaddr = 0;
2180 phdr->p_paddr = 0;
2181 phdr->p_filesz = sz;
2182 phdr->p_memsz = 0;
2183 phdr->p_flags = 0;
2184 phdr->p_align = 0;
2185
2186 #ifdef BSWAP_NEEDED
2187 bswap_phdr(phdr);
2188 #endif
2189 }
2190
2191 static size_t note_size(const struct memelfnote *note)
2192 {
2193 return (note->notesz);
2194 }
2195
2196 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2197 const TaskState *ts, int signr)
2198 {
2199 (void) memset(prstatus, 0, sizeof (*prstatus));
2200 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2201 prstatus->pr_pid = ts->ts_tid;
2202 prstatus->pr_ppid = getppid();
2203 prstatus->pr_pgrp = getpgrp();
2204 prstatus->pr_sid = getsid(0);
2205
2206 #ifdef BSWAP_NEEDED
2207 bswap_prstatus(prstatus);
2208 #endif
2209 }
2210
2211 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2212 {
2213 char *filename, *base_filename;
2214 unsigned int i, len;
2215
2216 (void) memset(psinfo, 0, sizeof (*psinfo));
2217
2218 len = ts->info->arg_end - ts->info->arg_start;
2219 if (len >= ELF_PRARGSZ)
2220 len = ELF_PRARGSZ - 1;
2221 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2222 return -EFAULT;
2223 for (i = 0; i < len; i++)
2224 if (psinfo->pr_psargs[i] == 0)
2225 psinfo->pr_psargs[i] = ' ';
2226 psinfo->pr_psargs[len] = 0;
2227
2228 psinfo->pr_pid = getpid();
2229 psinfo->pr_ppid = getppid();
2230 psinfo->pr_pgrp = getpgrp();
2231 psinfo->pr_sid = getsid(0);
2232 psinfo->pr_uid = getuid();
2233 psinfo->pr_gid = getgid();
2234
2235 filename = strdup(ts->bprm->filename);
2236 base_filename = strdup(basename(filename));
2237 (void) strncpy(psinfo->pr_fname, base_filename,
2238 sizeof(psinfo->pr_fname));
2239 free(base_filename);
2240 free(filename);
2241
2242 #ifdef BSWAP_NEEDED
2243 bswap_psinfo(psinfo);
2244 #endif
2245 return (0);
2246 }
2247
2248 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2249 {
2250 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2251 elf_addr_t orig_auxv = auxv;
2252 abi_ulong val;
2253 void *ptr;
2254 int i, len;
2255
2256 /*
2257 * Auxiliary vector is stored in target process stack. It contains
2258 * {type, value} pairs that we need to dump into note. This is not
2259 * strictly necessary but we do it here for sake of completeness.
2260 */
2261
2262 /* find out lenght of the vector, AT_NULL is terminator */
2263 i = len = 0;
2264 do {
2265 get_user_ual(val, auxv);
2266 i += 2;
2267 auxv += 2 * sizeof (elf_addr_t);
2268 } while (val != AT_NULL);
2269 len = i * sizeof (elf_addr_t);
2270
2271 /* read in whole auxv vector and copy it to memelfnote */
2272 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2273 if (ptr != NULL) {
2274 fill_note(note, "CORE", NT_AUXV, len, ptr);
2275 unlock_user(ptr, auxv, len);
2276 }
2277 }
2278
2279 /*
2280 * Constructs name of coredump file. We have following convention
2281 * for the name:
2282 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2283 *
2284 * Returns 0 in case of success, -1 otherwise (errno is set).
2285 */
2286 static int core_dump_filename(const TaskState *ts, char *buf,
2287 size_t bufsize)
2288 {
2289 char timestamp[64];
2290 char *filename = NULL;
2291 char *base_filename = NULL;
2292 struct timeval tv;
2293 struct tm tm;
2294
2295 assert(bufsize >= PATH_MAX);
2296
2297 if (gettimeofday(&tv, NULL) < 0) {
2298 (void) fprintf(stderr, "unable to get current timestamp: %s",
2299 strerror(errno));
2300 return (-1);
2301 }
2302
2303 filename = strdup(ts->bprm->filename);
2304 base_filename = strdup(basename(filename));
2305 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2306 localtime_r(&tv.tv_sec, &tm));
2307 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2308 base_filename, timestamp, (int)getpid());
2309 free(base_filename);
2310 free(filename);
2311
2312 return (0);
2313 }
2314
2315 static int dump_write(int fd, const void *ptr, size_t size)
2316 {
2317 const char *bufp = (const char *)ptr;
2318 ssize_t bytes_written, bytes_left;
2319 struct rlimit dumpsize;
2320 off_t pos;
2321
2322 bytes_written = 0;
2323 getrlimit(RLIMIT_CORE, &dumpsize);
2324 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2325 if (errno == ESPIPE) { /* not a seekable stream */
2326 bytes_left = size;
2327 } else {
2328 return pos;
2329 }
2330 } else {
2331 if (dumpsize.rlim_cur <= pos) {
2332 return -1;
2333 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2334 bytes_left = size;
2335 } else {
2336 size_t limit_left=dumpsize.rlim_cur - pos;
2337 bytes_left = limit_left >= size ? size : limit_left ;
2338 }
2339 }
2340
2341 /*
2342 * In normal conditions, single write(2) should do but
2343 * in case of socket etc. this mechanism is more portable.
2344 */
2345 do {
2346 bytes_written = write(fd, bufp, bytes_left);
2347 if (bytes_written < 0) {
2348 if (errno == EINTR)
2349 continue;
2350 return (-1);
2351 } else if (bytes_written == 0) { /* eof */
2352 return (-1);
2353 }
2354 bufp += bytes_written;
2355 bytes_left -= bytes_written;
2356 } while (bytes_left > 0);
2357
2358 return (0);
2359 }
2360
2361 static int write_note(struct memelfnote *men, int fd)
2362 {
2363 struct elf_note en;
2364
2365 en.n_namesz = men->namesz;
2366 en.n_type = men->type;
2367 en.n_descsz = men->datasz;
2368
2369 #ifdef BSWAP_NEEDED
2370 bswap_note(&en);
2371 #endif
2372
2373 if (dump_write(fd, &en, sizeof(en)) != 0)
2374 return (-1);
2375 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2376 return (-1);
2377 if (dump_write(fd, men->data, men->datasz) != 0)
2378 return (-1);
2379
2380 return (0);
2381 }
2382
2383 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2384 {
2385 TaskState *ts = (TaskState *)env->opaque;
2386 struct elf_thread_status *ets;
2387
2388 ets = qemu_mallocz(sizeof (*ets));
2389 ets->num_notes = 1; /* only prstatus is dumped */
2390 fill_prstatus(&ets->prstatus, ts, 0);
2391 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2392 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2393 &ets->prstatus);
2394
2395 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2396
2397 info->notes_size += note_size(&ets->notes[0]);
2398 }
2399
2400 static int fill_note_info(struct elf_note_info *info,
2401 long signr, const CPUState *env)
2402 {
2403 #define NUMNOTES 3
2404 CPUState *cpu = NULL;
2405 TaskState *ts = (TaskState *)env->opaque;
2406 int i;
2407
2408 (void) memset(info, 0, sizeof (*info));
2409
2410 QTAILQ_INIT(&info->thread_list);
2411
2412 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2413 if (info->notes == NULL)
2414 return (-ENOMEM);
2415 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2416 if (info->prstatus == NULL)
2417 return (-ENOMEM);
2418 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2419 if (info->prstatus == NULL)
2420 return (-ENOMEM);
2421
2422 /*
2423 * First fill in status (and registers) of current thread
2424 * including process info & aux vector.
2425 */
2426 fill_prstatus(info->prstatus, ts, signr);
2427 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2428 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2429 sizeof (*info->prstatus), info->prstatus);
2430 fill_psinfo(info->psinfo, ts);
2431 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2432 sizeof (*info->psinfo), info->psinfo);
2433 fill_auxv_note(&info->notes[2], ts);
2434 info->numnote = 3;
2435
2436 info->notes_size = 0;
2437 for (i = 0; i < info->numnote; i++)
2438 info->notes_size += note_size(&info->notes[i]);
2439
2440 /* read and fill status of all threads */
2441 cpu_list_lock();
2442 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2443 if (cpu == thread_env)
2444 continue;
2445 fill_thread_info(info, cpu);
2446 }
2447 cpu_list_unlock();
2448
2449 return (0);
2450 }
2451
2452 static void free_note_info(struct elf_note_info *info)
2453 {
2454 struct elf_thread_status *ets;
2455
2456 while (!QTAILQ_EMPTY(&info->thread_list)) {
2457 ets = QTAILQ_FIRST(&info->thread_list);
2458 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2459 qemu_free(ets);
2460 }
2461
2462 qemu_free(info->prstatus);
2463 qemu_free(info->psinfo);
2464 qemu_free(info->notes);
2465 }
2466
2467 static int write_note_info(struct elf_note_info *info, int fd)
2468 {
2469 struct elf_thread_status *ets;
2470 int i, error = 0;
2471
2472 /* write prstatus, psinfo and auxv for current thread */
2473 for (i = 0; i < info->numnote; i++)
2474 if ((error = write_note(&info->notes[i], fd)) != 0)
2475 return (error);
2476
2477 /* write prstatus for each thread */
2478 for (ets = info->thread_list.tqh_first; ets != NULL;
2479 ets = ets->ets_link.tqe_next) {
2480 if ((error = write_note(&ets->notes[0], fd)) != 0)
2481 return (error);
2482 }
2483
2484 return (0);
2485 }
2486
2487 /*
2488 * Write out ELF coredump.
2489 *
2490 * See documentation of ELF object file format in:
2491 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2492 *
2493 * Coredump format in linux is following:
2494 *
2495 * 0 +----------------------+ \
2496 * | ELF header | ET_CORE |
2497 * +----------------------+ |
2498 * | ELF program headers | |--- headers
2499 * | - NOTE section | |
2500 * | - PT_LOAD sections | |
2501 * +----------------------+ /
2502 * | NOTEs: |
2503 * | - NT_PRSTATUS |
2504 * | - NT_PRSINFO |
2505 * | - NT_AUXV |
2506 * +----------------------+ <-- aligned to target page
2507 * | Process memory dump |
2508 * : :
2509 * . .
2510 * : :
2511 * | |
2512 * +----------------------+
2513 *
2514 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2515 * NT_PRSINFO -> struct elf_prpsinfo
2516 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2517 *
2518 * Format follows System V format as close as possible. Current
2519 * version limitations are as follows:
2520 * - no floating point registers are dumped
2521 *
2522 * Function returns 0 in case of success, negative errno otherwise.
2523 *
2524 * TODO: make this work also during runtime: it should be
2525 * possible to force coredump from running process and then
2526 * continue processing. For example qemu could set up SIGUSR2
2527 * handler (provided that target process haven't registered
2528 * handler for that) that does the dump when signal is received.
2529 */
2530 static int elf_core_dump(int signr, const CPUState *env)
2531 {
2532 const TaskState *ts = (const TaskState *)env->opaque;
2533 struct vm_area_struct *vma = NULL;
2534 char corefile[PATH_MAX];
2535 struct elf_note_info info;
2536 struct elfhdr elf;
2537 struct elf_phdr phdr;
2538 struct rlimit dumpsize;
2539 struct mm_struct *mm = NULL;
2540 off_t offset = 0, data_offset = 0;
2541 int segs = 0;
2542 int fd = -1;
2543
2544 errno = 0;
2545 getrlimit(RLIMIT_CORE, &dumpsize);
2546 if (dumpsize.rlim_cur == 0)
2547 return 0;
2548
2549 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2550 return (-errno);
2551
2552 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2553 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2554 return (-errno);
2555
2556 /*
2557 * Walk through target process memory mappings and
2558 * set up structure containing this information. After
2559 * this point vma_xxx functions can be used.
2560 */
2561 if ((mm = vma_init()) == NULL)
2562 goto out;
2563
2564 walk_memory_regions(mm, vma_walker);
2565 segs = vma_get_mapping_count(mm);
2566
2567 /*
2568 * Construct valid coredump ELF header. We also
2569 * add one more segment for notes.
2570 */
2571 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2572 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2573 goto out;
2574
2575 /* fill in in-memory version of notes */
2576 if (fill_note_info(&info, signr, env) < 0)
2577 goto out;
2578
2579 offset += sizeof (elf); /* elf header */
2580 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2581
2582 /* write out notes program header */
2583 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2584
2585 offset += info.notes_size;
2586 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2587 goto out;
2588
2589 /*
2590 * ELF specification wants data to start at page boundary so
2591 * we align it here.
2592 */
2593 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2594
2595 /*
2596 * Write program headers for memory regions mapped in
2597 * the target process.
2598 */
2599 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2600 (void) memset(&phdr, 0, sizeof (phdr));
2601
2602 phdr.p_type = PT_LOAD;
2603 phdr.p_offset = offset;
2604 phdr.p_vaddr = vma->vma_start;
2605 phdr.p_paddr = 0;
2606 phdr.p_filesz = vma_dump_size(vma);
2607 offset += phdr.p_filesz;
2608 phdr.p_memsz = vma->vma_end - vma->vma_start;
2609 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2610 if (vma->vma_flags & PROT_WRITE)
2611 phdr.p_flags |= PF_W;
2612 if (vma->vma_flags & PROT_EXEC)
2613 phdr.p_flags |= PF_X;
2614 phdr.p_align = ELF_EXEC_PAGESIZE;
2615
2616 dump_write(fd, &phdr, sizeof (phdr));
2617 }
2618
2619 /*
2620 * Next we write notes just after program headers. No
2621 * alignment needed here.
2622 */
2623 if (write_note_info(&info, fd) < 0)
2624 goto out;
2625
2626 /* align data to page boundary */
2627 data_offset = lseek(fd, 0, SEEK_CUR);
2628 data_offset = TARGET_PAGE_ALIGN(data_offset);
2629 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2630 goto out;
2631
2632 /*
2633 * Finally we can dump process memory into corefile as well.
2634 */
2635 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2636 abi_ulong addr;
2637 abi_ulong end;
2638
2639 end = vma->vma_start + vma_dump_size(vma);
2640
2641 for (addr = vma->vma_start; addr < end;
2642 addr += TARGET_PAGE_SIZE) {
2643 char page[TARGET_PAGE_SIZE];
2644 int error;
2645
2646 /*
2647 * Read in page from target process memory and
2648 * write it to coredump file.
2649 */
2650 error = copy_from_user(page, addr, sizeof (page));
2651 if (error != 0) {
2652 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2653 addr);
2654 errno = -error;
2655 goto out;
2656 }
2657 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2658 goto out;
2659 }
2660 }
2661
2662 out:
2663 free_note_info(&info);
2664 if (mm != NULL)
2665 vma_delete(mm);
2666 (void) close(fd);
2667
2668 if (errno != 0)
2669 return (-errno);
2670 return (0);
2671 }
2672
2673 #endif /* USE_ELF_CORE_DUMP */
2674
2675 static int load_aout_interp(void * exptr, int interp_fd)
2676 {
2677 printf("a.out interpreter not yet supported\n");
2678 return(0);
2679 }
2680
2681 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2682 {
2683 init_thread(regs, infop);
2684 }