]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/elfload.c
linux-user: commonify definitions of target typedefs
[mirror_qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 typedef target_ulong target_elf_greg_t;
101 #ifdef USE_UID16
102 typedef uint16_t target_uid_t;
103 typedef uint16_t target_gid_t;
104 #else
105 typedef uint32_t target_uid_t;
106 typedef uint32_t target_gid_t;
107 #endif
108 typedef int32_t target_pid_t;
109
110 #ifdef TARGET_I386
111
112 #define ELF_PLATFORM get_elf_platform()
113
114 static const char *get_elf_platform(void)
115 {
116 static char elf_platform[] = "i386";
117 int family = (thread_env->cpuid_version >> 8) & 0xff;
118 if (family > 6)
119 family = 6;
120 if (family >= 3)
121 elf_platform[1] = '0' + family;
122 return elf_platform;
123 }
124
125 #define ELF_HWCAP get_elf_hwcap()
126
127 static uint32_t get_elf_hwcap(void)
128 {
129 return thread_env->cpuid_features;
130 }
131
132 #ifdef TARGET_X86_64
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
135
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
139
140 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
141 {
142 regs->rax = 0;
143 regs->rsp = infop->start_stack;
144 regs->rip = infop->entry;
145 }
146
147 #define ELF_NREG 27
148 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
149
150 /*
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
153 * those.
154 *
155 * See linux kernel: arch/x86/include/asm/elf.h
156 */
157 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
158 {
159 (*regs)[0] = env->regs[15];
160 (*regs)[1] = env->regs[14];
161 (*regs)[2] = env->regs[13];
162 (*regs)[3] = env->regs[12];
163 (*regs)[4] = env->regs[R_EBP];
164 (*regs)[5] = env->regs[R_EBX];
165 (*regs)[6] = env->regs[11];
166 (*regs)[7] = env->regs[10];
167 (*regs)[8] = env->regs[9];
168 (*regs)[9] = env->regs[8];
169 (*regs)[10] = env->regs[R_EAX];
170 (*regs)[11] = env->regs[R_ECX];
171 (*regs)[12] = env->regs[R_EDX];
172 (*regs)[13] = env->regs[R_ESI];
173 (*regs)[14] = env->regs[R_EDI];
174 (*regs)[15] = env->regs[R_EAX]; /* XXX */
175 (*regs)[16] = env->eip;
176 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
177 (*regs)[18] = env->eflags;
178 (*regs)[19] = env->regs[R_ESP];
179 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
180 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
181 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
182 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
183 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
184 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
185 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
186 }
187
188 #else
189
190 #define ELF_START_MMAP 0x80000000
191
192 /*
193 * This is used to ensure we don't load something for the wrong architecture.
194 */
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
196
197 /*
198 * These are used to set parameters in the core dumps.
199 */
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
203
204 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
205 {
206 regs->esp = infop->start_stack;
207 regs->eip = infop->entry;
208
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
214
215 A value of 0 tells we have no such handler. */
216 regs->edx = 0;
217 }
218
219 #define ELF_NREG 17
220 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 #define ELF_NREG 18
290 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291
292 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
293 {
294 (*regs)[0] = env->regs[0];
295 (*regs)[1] = env->regs[1];
296 (*regs)[2] = env->regs[2];
297 (*regs)[3] = env->regs[3];
298 (*regs)[4] = env->regs[4];
299 (*regs)[5] = env->regs[5];
300 (*regs)[6] = env->regs[6];
301 (*regs)[7] = env->regs[7];
302 (*regs)[8] = env->regs[8];
303 (*regs)[9] = env->regs[9];
304 (*regs)[10] = env->regs[10];
305 (*regs)[11] = env->regs[11];
306 (*regs)[12] = env->regs[12];
307 (*regs)[13] = env->regs[13];
308 (*regs)[14] = env->regs[14];
309 (*regs)[15] = env->regs[15];
310
311 (*regs)[16] = cpsr_read((CPUState *)env);
312 (*regs)[17] = env->regs[0]; /* XXX */
313 }
314
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
317
318 enum
319 {
320 ARM_HWCAP_ARM_SWP = 1 << 0,
321 ARM_HWCAP_ARM_HALF = 1 << 1,
322 ARM_HWCAP_ARM_THUMB = 1 << 2,
323 ARM_HWCAP_ARM_26BIT = 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
325 ARM_HWCAP_ARM_FPA = 1 << 5,
326 ARM_HWCAP_ARM_VFP = 1 << 6,
327 ARM_HWCAP_ARM_EDSP = 1 << 7,
328 ARM_HWCAP_ARM_JAVA = 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
331 ARM_HWCAP_ARM_NEON = 1 << 11,
332 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
334 };
335
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
340
341 #endif
342
343 #ifdef TARGET_SPARC
344 #ifdef TARGET_SPARC64
345
346 #define ELF_START_MMAP 0x80000000
347
348 #ifndef TARGET_ABI32
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
350 #else
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
352 #endif
353
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
357
358 #define STACK_BIAS 2047
359
360 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
361 {
362 #ifndef TARGET_ABI32
363 regs->tstate = 0;
364 #endif
365 regs->pc = infop->entry;
366 regs->npc = regs->pc + 4;
367 regs->y = 0;
368 #ifdef TARGET_ABI32
369 regs->u_regs[14] = infop->start_stack - 16 * 4;
370 #else
371 if (personality(infop->personality) == PER_LINUX32)
372 regs->u_regs[14] = infop->start_stack - 16 * 4;
373 else
374 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
375 #endif
376 }
377
378 #else
379 #define ELF_START_MMAP 0x80000000
380
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
382
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
386
387 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
388 {
389 regs->psr = 0;
390 regs->pc = infop->entry;
391 regs->npc = regs->pc + 4;
392 regs->y = 0;
393 regs->u_regs[14] = infop->start_stack - 16 * 4;
394 }
395
396 #endif
397 #endif
398
399 #ifdef TARGET_PPC
400
401 #define ELF_START_MMAP 0x80000000
402
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
404
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
406
407 #define ELF_CLASS ELFCLASS64
408
409 #else
410
411 #define elf_check_arch(x) ( (x) == EM_PPC )
412
413 #define ELF_CLASS ELFCLASS32
414
415 #endif
416
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
419 #else
420 #define ELF_DATA ELFDATA2LSB
421 #endif
422 #define ELF_ARCH EM_PPC
423
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
426 enum {
427 QEMU_PPC_FEATURE_32 = 0x80000000,
428 QEMU_PPC_FEATURE_64 = 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
439 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
440 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
442 QEMU_PPC_FEATURE_CELL = 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
444 QEMU_PPC_FEATURE_SMT = 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
447 QEMU_PPC_FEATURE_PA6T = 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
453
454 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
456 };
457
458 #define ELF_HWCAP get_elf_hwcap()
459
460 static uint32_t get_elf_hwcap(void)
461 {
462 CPUState *e = thread_env;
463 uint32_t features = 0;
464
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
470 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
471 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
472 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
473 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
474 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
475 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
476 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
477 #undef GET_FEATURE
478
479 return features;
480 }
481
482 /*
483 * We need to put in some extra aux table entries to tell glibc what
484 * the cache block size is, so it can use the dcbz instruction safely.
485 */
486 #define AT_DCACHEBSIZE 19
487 #define AT_ICACHEBSIZE 20
488 #define AT_UCACHEBSIZE 21
489 /* A special ignored type value for PPC, for glibc compatibility. */
490 #define AT_IGNOREPPC 22
491 /*
492 * The requirements here are:
493 * - keep the final alignment of sp (sp & 0xf)
494 * - make sure the 32-bit value at the first 16 byte aligned position of
495 * AUXV is greater than 16 for glibc compatibility.
496 * AT_IGNOREPPC is used for that.
497 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
498 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
499 */
500 #define DLINFO_ARCH_ITEMS 5
501 #define ARCH_DLINFO \
502 do { \
503 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
504 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
505 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
506 /* \
507 * Now handle glibc compatibility. \
508 */ \
509 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
510 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
511 } while (0)
512
513 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
514 {
515 abi_ulong pos = infop->start_stack;
516 abi_ulong tmp;
517 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
518 abi_ulong entry, toc;
519 #endif
520
521 _regs->gpr[1] = infop->start_stack;
522 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
523 entry = ldq_raw(infop->entry) + infop->load_addr;
524 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
525 _regs->gpr[2] = toc;
526 infop->entry = entry;
527 #endif
528 _regs->nip = infop->entry;
529 /* Note that isn't exactly what regular kernel does
530 * but this is what the ABI wants and is needed to allow
531 * execution of PPC BSD programs.
532 */
533 /* FIXME - what to for failure of get_user()? */
534 get_user_ual(_regs->gpr[3], pos);
535 pos += sizeof(abi_ulong);
536 _regs->gpr[4] = pos;
537 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
538 tmp = ldl(pos);
539 _regs->gpr[5] = pos;
540 }
541
542 #define ELF_EXEC_PAGESIZE 4096
543
544 #endif
545
546 #ifdef TARGET_MIPS
547
548 #define ELF_START_MMAP 0x80000000
549
550 #define elf_check_arch(x) ( (x) == EM_MIPS )
551
552 #ifdef TARGET_MIPS64
553 #define ELF_CLASS ELFCLASS64
554 #else
555 #define ELF_CLASS ELFCLASS32
556 #endif
557 #ifdef TARGET_WORDS_BIGENDIAN
558 #define ELF_DATA ELFDATA2MSB
559 #else
560 #define ELF_DATA ELFDATA2LSB
561 #endif
562 #define ELF_ARCH EM_MIPS
563
564 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
565 {
566 regs->cp0_status = 2 << CP0St_KSU;
567 regs->cp0_epc = infop->entry;
568 regs->regs[29] = infop->start_stack;
569 }
570
571 #define ELF_EXEC_PAGESIZE 4096
572
573 #endif /* TARGET_MIPS */
574
575 #ifdef TARGET_MICROBLAZE
576
577 #define ELF_START_MMAP 0x80000000
578
579 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
580
581 #define ELF_CLASS ELFCLASS32
582 #define ELF_DATA ELFDATA2MSB
583 #define ELF_ARCH EM_MIPS
584
585 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
586 {
587 regs->pc = infop->entry;
588 regs->r1 = infop->start_stack;
589
590 }
591
592 #define ELF_EXEC_PAGESIZE 4096
593
594 #endif /* TARGET_MICROBLAZE */
595
596 #ifdef TARGET_SH4
597
598 #define ELF_START_MMAP 0x80000000
599
600 #define elf_check_arch(x) ( (x) == EM_SH )
601
602 #define ELF_CLASS ELFCLASS32
603 #define ELF_DATA ELFDATA2LSB
604 #define ELF_ARCH EM_SH
605
606 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
607 {
608 /* Check other registers XXXXX */
609 regs->pc = infop->entry;
610 regs->regs[15] = infop->start_stack;
611 }
612
613 #define ELF_EXEC_PAGESIZE 4096
614
615 #endif
616
617 #ifdef TARGET_CRIS
618
619 #define ELF_START_MMAP 0x80000000
620
621 #define elf_check_arch(x) ( (x) == EM_CRIS )
622
623 #define ELF_CLASS ELFCLASS32
624 #define ELF_DATA ELFDATA2LSB
625 #define ELF_ARCH EM_CRIS
626
627 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
628 {
629 regs->erp = infop->entry;
630 }
631
632 #define ELF_EXEC_PAGESIZE 8192
633
634 #endif
635
636 #ifdef TARGET_M68K
637
638 #define ELF_START_MMAP 0x80000000
639
640 #define elf_check_arch(x) ( (x) == EM_68K )
641
642 #define ELF_CLASS ELFCLASS32
643 #define ELF_DATA ELFDATA2MSB
644 #define ELF_ARCH EM_68K
645
646 /* ??? Does this need to do anything?
647 #define ELF_PLAT_INIT(_r) */
648
649 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
650 {
651 regs->usp = infop->start_stack;
652 regs->sr = 0;
653 regs->pc = infop->entry;
654 }
655
656 #define ELF_EXEC_PAGESIZE 8192
657
658 #endif
659
660 #ifdef TARGET_ALPHA
661
662 #define ELF_START_MMAP (0x30000000000ULL)
663
664 #define elf_check_arch(x) ( (x) == ELF_ARCH )
665
666 #define ELF_CLASS ELFCLASS64
667 #define ELF_DATA ELFDATA2MSB
668 #define ELF_ARCH EM_ALPHA
669
670 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
671 {
672 regs->pc = infop->entry;
673 regs->ps = 8;
674 regs->usp = infop->start_stack;
675 }
676
677 #define ELF_EXEC_PAGESIZE 8192
678
679 #endif /* TARGET_ALPHA */
680
681 #ifndef ELF_PLATFORM
682 #define ELF_PLATFORM (NULL)
683 #endif
684
685 #ifndef ELF_HWCAP
686 #define ELF_HWCAP 0
687 #endif
688
689 #ifdef TARGET_ABI32
690 #undef ELF_CLASS
691 #define ELF_CLASS ELFCLASS32
692 #undef bswaptls
693 #define bswaptls(ptr) bswap32s(ptr)
694 #endif
695
696 #include "elf.h"
697
698 struct exec
699 {
700 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
701 unsigned int a_text; /* length of text, in bytes */
702 unsigned int a_data; /* length of data, in bytes */
703 unsigned int a_bss; /* length of uninitialized data area, in bytes */
704 unsigned int a_syms; /* length of symbol table data in file, in bytes */
705 unsigned int a_entry; /* start address */
706 unsigned int a_trsize; /* length of relocation info for text, in bytes */
707 unsigned int a_drsize; /* length of relocation info for data, in bytes */
708 };
709
710
711 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
712 #define OMAGIC 0407
713 #define NMAGIC 0410
714 #define ZMAGIC 0413
715 #define QMAGIC 0314
716
717 /* max code+data+bss space allocated to elf interpreter */
718 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
719
720 /* max code+data+bss+brk space allocated to ET_DYN executables */
721 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
722
723 /* Necessary parameters */
724 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
725 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
726 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
727
728 #define INTERPRETER_NONE 0
729 #define INTERPRETER_AOUT 1
730 #define INTERPRETER_ELF 2
731
732 #define DLINFO_ITEMS 12
733
734 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
735 {
736 memcpy(to, from, n);
737 }
738
739 static int load_aout_interp(void * exptr, int interp_fd);
740
741 #ifdef BSWAP_NEEDED
742 static void bswap_ehdr(struct elfhdr *ehdr)
743 {
744 bswap16s(&ehdr->e_type); /* Object file type */
745 bswap16s(&ehdr->e_machine); /* Architecture */
746 bswap32s(&ehdr->e_version); /* Object file version */
747 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
748 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
749 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
750 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
751 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
752 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
753 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
754 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
755 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
756 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
757 }
758
759 static void bswap_phdr(struct elf_phdr *phdr)
760 {
761 bswap32s(&phdr->p_type); /* Segment type */
762 bswaptls(&phdr->p_offset); /* Segment file offset */
763 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
764 bswaptls(&phdr->p_paddr); /* Segment physical address */
765 bswaptls(&phdr->p_filesz); /* Segment size in file */
766 bswaptls(&phdr->p_memsz); /* Segment size in memory */
767 bswap32s(&phdr->p_flags); /* Segment flags */
768 bswaptls(&phdr->p_align); /* Segment alignment */
769 }
770
771 static void bswap_shdr(struct elf_shdr *shdr)
772 {
773 bswap32s(&shdr->sh_name);
774 bswap32s(&shdr->sh_type);
775 bswaptls(&shdr->sh_flags);
776 bswaptls(&shdr->sh_addr);
777 bswaptls(&shdr->sh_offset);
778 bswaptls(&shdr->sh_size);
779 bswap32s(&shdr->sh_link);
780 bswap32s(&shdr->sh_info);
781 bswaptls(&shdr->sh_addralign);
782 bswaptls(&shdr->sh_entsize);
783 }
784
785 static void bswap_sym(struct elf_sym *sym)
786 {
787 bswap32s(&sym->st_name);
788 bswaptls(&sym->st_value);
789 bswaptls(&sym->st_size);
790 bswap16s(&sym->st_shndx);
791 }
792 #endif
793
794 #ifdef USE_ELF_CORE_DUMP
795 static int elf_core_dump(int, const CPUState *);
796
797 #ifdef BSWAP_NEEDED
798 static void bswap_note(struct elf_note *en)
799 {
800 bswap32s(&en->n_namesz);
801 bswap32s(&en->n_descsz);
802 bswap32s(&en->n_type);
803 }
804 #endif /* BSWAP_NEEDED */
805
806 #endif /* USE_ELF_CORE_DUMP */
807
808 /*
809 * 'copy_elf_strings()' copies argument/envelope strings from user
810 * memory to free pages in kernel mem. These are in a format ready
811 * to be put directly into the top of new user memory.
812 *
813 */
814 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
815 abi_ulong p)
816 {
817 char *tmp, *tmp1, *pag = NULL;
818 int len, offset = 0;
819
820 if (!p) {
821 return 0; /* bullet-proofing */
822 }
823 while (argc-- > 0) {
824 tmp = argv[argc];
825 if (!tmp) {
826 fprintf(stderr, "VFS: argc is wrong");
827 exit(-1);
828 }
829 tmp1 = tmp;
830 while (*tmp++);
831 len = tmp - tmp1;
832 if (p < len) { /* this shouldn't happen - 128kB */
833 return 0;
834 }
835 while (len) {
836 --p; --tmp; --len;
837 if (--offset < 0) {
838 offset = p % TARGET_PAGE_SIZE;
839 pag = (char *)page[p/TARGET_PAGE_SIZE];
840 if (!pag) {
841 pag = (char *)malloc(TARGET_PAGE_SIZE);
842 memset(pag, 0, TARGET_PAGE_SIZE);
843 page[p/TARGET_PAGE_SIZE] = pag;
844 if (!pag)
845 return 0;
846 }
847 }
848 if (len == 0 || offset == 0) {
849 *(pag + offset) = *tmp;
850 }
851 else {
852 int bytes_to_copy = (len > offset) ? offset : len;
853 tmp -= bytes_to_copy;
854 p -= bytes_to_copy;
855 offset -= bytes_to_copy;
856 len -= bytes_to_copy;
857 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
858 }
859 }
860 }
861 return p;
862 }
863
864 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
865 struct image_info *info)
866 {
867 abi_ulong stack_base, size, error;
868 int i;
869
870 /* Create enough stack to hold everything. If we don't use
871 * it for args, we'll use it for something else...
872 */
873 size = x86_stack_size;
874 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
875 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
876 error = target_mmap(0,
877 size + qemu_host_page_size,
878 PROT_READ | PROT_WRITE,
879 MAP_PRIVATE | MAP_ANONYMOUS,
880 -1, 0);
881 if (error == -1) {
882 perror("stk mmap");
883 exit(-1);
884 }
885 /* we reserve one extra page at the top of the stack as guard */
886 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
887
888 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
889 p += stack_base;
890
891 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
892 if (bprm->page[i]) {
893 info->rss++;
894 /* FIXME - check return value of memcpy_to_target() for failure */
895 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
896 free(bprm->page[i]);
897 }
898 stack_base += TARGET_PAGE_SIZE;
899 }
900 return p;
901 }
902
903 static void set_brk(abi_ulong start, abi_ulong end)
904 {
905 /* page-align the start and end addresses... */
906 start = HOST_PAGE_ALIGN(start);
907 end = HOST_PAGE_ALIGN(end);
908 if (end <= start)
909 return;
910 if(target_mmap(start, end - start,
911 PROT_READ | PROT_WRITE | PROT_EXEC,
912 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
913 perror("cannot mmap brk");
914 exit(-1);
915 }
916 }
917
918
919 /* We need to explicitly zero any fractional pages after the data
920 section (i.e. bss). This would contain the junk from the file that
921 should not be in memory. */
922 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
923 {
924 abi_ulong nbyte;
925
926 if (elf_bss >= last_bss)
927 return;
928
929 /* XXX: this is really a hack : if the real host page size is
930 smaller than the target page size, some pages after the end
931 of the file may not be mapped. A better fix would be to
932 patch target_mmap(), but it is more complicated as the file
933 size must be known */
934 if (qemu_real_host_page_size < qemu_host_page_size) {
935 abi_ulong end_addr, end_addr1;
936 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
937 ~(qemu_real_host_page_size - 1);
938 end_addr = HOST_PAGE_ALIGN(elf_bss);
939 if (end_addr1 < end_addr) {
940 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
941 PROT_READ|PROT_WRITE|PROT_EXEC,
942 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
943 }
944 }
945
946 nbyte = elf_bss & (qemu_host_page_size-1);
947 if (nbyte) {
948 nbyte = qemu_host_page_size - nbyte;
949 do {
950 /* FIXME - what to do if put_user() fails? */
951 put_user_u8(0, elf_bss);
952 elf_bss++;
953 } while (--nbyte);
954 }
955 }
956
957
958 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
959 struct elfhdr * exec,
960 abi_ulong load_addr,
961 abi_ulong load_bias,
962 abi_ulong interp_load_addr, int ibcs,
963 struct image_info *info)
964 {
965 abi_ulong sp;
966 int size;
967 abi_ulong u_platform;
968 const char *k_platform;
969 const int n = sizeof(elf_addr_t);
970
971 sp = p;
972 u_platform = 0;
973 k_platform = ELF_PLATFORM;
974 if (k_platform) {
975 size_t len = strlen(k_platform) + 1;
976 sp -= (len + n - 1) & ~(n - 1);
977 u_platform = sp;
978 /* FIXME - check return value of memcpy_to_target() for failure */
979 memcpy_to_target(sp, k_platform, len);
980 }
981 /*
982 * Force 16 byte _final_ alignment here for generality.
983 */
984 sp = sp &~ (abi_ulong)15;
985 size = (DLINFO_ITEMS + 1) * 2;
986 if (k_platform)
987 size += 2;
988 #ifdef DLINFO_ARCH_ITEMS
989 size += DLINFO_ARCH_ITEMS * 2;
990 #endif
991 size += envc + argc + 2;
992 size += (!ibcs ? 3 : 1); /* argc itself */
993 size *= n;
994 if (size & 15)
995 sp -= 16 - (size & 15);
996
997 /* This is correct because Linux defines
998 * elf_addr_t as Elf32_Off / Elf64_Off
999 */
1000 #define NEW_AUX_ENT(id, val) do { \
1001 sp -= n; put_user_ual(val, sp); \
1002 sp -= n; put_user_ual(id, sp); \
1003 } while(0)
1004
1005 NEW_AUX_ENT (AT_NULL, 0);
1006
1007 /* There must be exactly DLINFO_ITEMS entries here. */
1008 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1009 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1010 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1011 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1012 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1013 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1014 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1015 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1016 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1017 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1018 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1019 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1020 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1021 if (k_platform)
1022 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1023 #ifdef ARCH_DLINFO
1024 /*
1025 * ARCH_DLINFO must come last so platform specific code can enforce
1026 * special alignment requirements on the AUXV if necessary (eg. PPC).
1027 */
1028 ARCH_DLINFO;
1029 #endif
1030 #undef NEW_AUX_ENT
1031
1032 info->saved_auxv = sp;
1033
1034 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1035 return sp;
1036 }
1037
1038
1039 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1040 int interpreter_fd,
1041 abi_ulong *interp_load_addr)
1042 {
1043 struct elf_phdr *elf_phdata = NULL;
1044 struct elf_phdr *eppnt;
1045 abi_ulong load_addr = 0;
1046 int load_addr_set = 0;
1047 int retval;
1048 abi_ulong last_bss, elf_bss;
1049 abi_ulong error;
1050 int i;
1051
1052 elf_bss = 0;
1053 last_bss = 0;
1054 error = 0;
1055
1056 #ifdef BSWAP_NEEDED
1057 bswap_ehdr(interp_elf_ex);
1058 #endif
1059 /* First of all, some simple consistency checks */
1060 if ((interp_elf_ex->e_type != ET_EXEC &&
1061 interp_elf_ex->e_type != ET_DYN) ||
1062 !elf_check_arch(interp_elf_ex->e_machine)) {
1063 return ~((abi_ulong)0UL);
1064 }
1065
1066
1067 /* Now read in all of the header information */
1068
1069 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1070 return ~(abi_ulong)0UL;
1071
1072 elf_phdata = (struct elf_phdr *)
1073 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1074
1075 if (!elf_phdata)
1076 return ~((abi_ulong)0UL);
1077
1078 /*
1079 * If the size of this structure has changed, then punt, since
1080 * we will be doing the wrong thing.
1081 */
1082 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1083 free(elf_phdata);
1084 return ~((abi_ulong)0UL);
1085 }
1086
1087 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1088 if(retval >= 0) {
1089 retval = read(interpreter_fd,
1090 (char *) elf_phdata,
1091 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1092 }
1093 if (retval < 0) {
1094 perror("load_elf_interp");
1095 exit(-1);
1096 free (elf_phdata);
1097 return retval;
1098 }
1099 #ifdef BSWAP_NEEDED
1100 eppnt = elf_phdata;
1101 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1102 bswap_phdr(eppnt);
1103 }
1104 #endif
1105
1106 if (interp_elf_ex->e_type == ET_DYN) {
1107 /* in order to avoid hardcoding the interpreter load
1108 address in qemu, we allocate a big enough memory zone */
1109 error = target_mmap(0, INTERP_MAP_SIZE,
1110 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1111 -1, 0);
1112 if (error == -1) {
1113 perror("mmap");
1114 exit(-1);
1115 }
1116 load_addr = error;
1117 load_addr_set = 1;
1118 }
1119
1120 eppnt = elf_phdata;
1121 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1122 if (eppnt->p_type == PT_LOAD) {
1123 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1124 int elf_prot = 0;
1125 abi_ulong vaddr = 0;
1126 abi_ulong k;
1127
1128 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1129 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1130 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1131 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1132 elf_type |= MAP_FIXED;
1133 vaddr = eppnt->p_vaddr;
1134 }
1135 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1136 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1137 elf_prot,
1138 elf_type,
1139 interpreter_fd,
1140 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1141
1142 if (error == -1) {
1143 /* Real error */
1144 close(interpreter_fd);
1145 free(elf_phdata);
1146 return ~((abi_ulong)0UL);
1147 }
1148
1149 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1150 load_addr = error;
1151 load_addr_set = 1;
1152 }
1153
1154 /*
1155 * Find the end of the file mapping for this phdr, and keep
1156 * track of the largest address we see for this.
1157 */
1158 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1159 if (k > elf_bss) elf_bss = k;
1160
1161 /*
1162 * Do the same thing for the memory mapping - between
1163 * elf_bss and last_bss is the bss section.
1164 */
1165 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1166 if (k > last_bss) last_bss = k;
1167 }
1168
1169 /* Now use mmap to map the library into memory. */
1170
1171 close(interpreter_fd);
1172
1173 /*
1174 * Now fill out the bss section. First pad the last page up
1175 * to the page boundary, and then perform a mmap to make sure
1176 * that there are zeromapped pages up to and including the last
1177 * bss page.
1178 */
1179 padzero(elf_bss, last_bss);
1180 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1181
1182 /* Map the last of the bss segment */
1183 if (last_bss > elf_bss) {
1184 target_mmap(elf_bss, last_bss-elf_bss,
1185 PROT_READ|PROT_WRITE|PROT_EXEC,
1186 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1187 }
1188 free(elf_phdata);
1189
1190 *interp_load_addr = load_addr;
1191 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1192 }
1193
1194 static int symfind(const void *s0, const void *s1)
1195 {
1196 struct elf_sym *key = (struct elf_sym *)s0;
1197 struct elf_sym *sym = (struct elf_sym *)s1;
1198 int result = 0;
1199 if (key->st_value < sym->st_value) {
1200 result = -1;
1201 } else if (key->st_value >= sym->st_value + sym->st_size) {
1202 result = 1;
1203 }
1204 return result;
1205 }
1206
1207 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1208 {
1209 #if ELF_CLASS == ELFCLASS32
1210 struct elf_sym *syms = s->disas_symtab.elf32;
1211 #else
1212 struct elf_sym *syms = s->disas_symtab.elf64;
1213 #endif
1214
1215 // binary search
1216 struct elf_sym key;
1217 struct elf_sym *sym;
1218
1219 key.st_value = orig_addr;
1220
1221 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1222 if (sym != NULL) {
1223 return s->disas_strtab + sym->st_name;
1224 }
1225
1226 return "";
1227 }
1228
1229 /* FIXME: This should use elf_ops.h */
1230 static int symcmp(const void *s0, const void *s1)
1231 {
1232 struct elf_sym *sym0 = (struct elf_sym *)s0;
1233 struct elf_sym *sym1 = (struct elf_sym *)s1;
1234 return (sym0->st_value < sym1->st_value)
1235 ? -1
1236 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1237 }
1238
1239 /* Best attempt to load symbols from this ELF object. */
1240 static void load_symbols(struct elfhdr *hdr, int fd)
1241 {
1242 unsigned int i, nsyms;
1243 struct elf_shdr sechdr, symtab, strtab;
1244 char *strings;
1245 struct syminfo *s;
1246 struct elf_sym *syms;
1247
1248 lseek(fd, hdr->e_shoff, SEEK_SET);
1249 for (i = 0; i < hdr->e_shnum; i++) {
1250 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1251 return;
1252 #ifdef BSWAP_NEEDED
1253 bswap_shdr(&sechdr);
1254 #endif
1255 if (sechdr.sh_type == SHT_SYMTAB) {
1256 symtab = sechdr;
1257 lseek(fd, hdr->e_shoff
1258 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1259 if (read(fd, &strtab, sizeof(strtab))
1260 != sizeof(strtab))
1261 return;
1262 #ifdef BSWAP_NEEDED
1263 bswap_shdr(&strtab);
1264 #endif
1265 goto found;
1266 }
1267 }
1268 return; /* Shouldn't happen... */
1269
1270 found:
1271 /* Now know where the strtab and symtab are. Snarf them. */
1272 s = malloc(sizeof(*s));
1273 syms = malloc(symtab.sh_size);
1274 if (!syms)
1275 return;
1276 s->disas_strtab = strings = malloc(strtab.sh_size);
1277 if (!s->disas_strtab)
1278 return;
1279
1280 lseek(fd, symtab.sh_offset, SEEK_SET);
1281 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1282 return;
1283
1284 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1285
1286 i = 0;
1287 while (i < nsyms) {
1288 #ifdef BSWAP_NEEDED
1289 bswap_sym(syms + i);
1290 #endif
1291 // Throw away entries which we do not need.
1292 if (syms[i].st_shndx == SHN_UNDEF ||
1293 syms[i].st_shndx >= SHN_LORESERVE ||
1294 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1295 nsyms--;
1296 if (i < nsyms) {
1297 syms[i] = syms[nsyms];
1298 }
1299 continue;
1300 }
1301 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1302 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1303 syms[i].st_value &= ~(target_ulong)1;
1304 #endif
1305 i++;
1306 }
1307 syms = realloc(syms, nsyms * sizeof(*syms));
1308
1309 qsort(syms, nsyms, sizeof(*syms), symcmp);
1310
1311 lseek(fd, strtab.sh_offset, SEEK_SET);
1312 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1313 return;
1314 s->disas_num_syms = nsyms;
1315 #if ELF_CLASS == ELFCLASS32
1316 s->disas_symtab.elf32 = syms;
1317 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1318 #else
1319 s->disas_symtab.elf64 = syms;
1320 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1321 #endif
1322 s->next = syminfos;
1323 syminfos = s;
1324 }
1325
1326 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1327 struct image_info * info)
1328 {
1329 struct elfhdr elf_ex;
1330 struct elfhdr interp_elf_ex;
1331 struct exec interp_ex;
1332 int interpreter_fd = -1; /* avoid warning */
1333 abi_ulong load_addr, load_bias;
1334 int load_addr_set = 0;
1335 unsigned int interpreter_type = INTERPRETER_NONE;
1336 unsigned char ibcs2_interpreter;
1337 int i;
1338 abi_ulong mapped_addr;
1339 struct elf_phdr * elf_ppnt;
1340 struct elf_phdr *elf_phdata;
1341 abi_ulong elf_bss, k, elf_brk;
1342 int retval;
1343 char * elf_interpreter;
1344 abi_ulong elf_entry, interp_load_addr = 0;
1345 int status;
1346 abi_ulong start_code, end_code, start_data, end_data;
1347 abi_ulong reloc_func_desc = 0;
1348 abi_ulong elf_stack;
1349 char passed_fileno[6];
1350
1351 ibcs2_interpreter = 0;
1352 status = 0;
1353 load_addr = 0;
1354 load_bias = 0;
1355 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1356 #ifdef BSWAP_NEEDED
1357 bswap_ehdr(&elf_ex);
1358 #endif
1359
1360 /* First of all, some simple consistency checks */
1361 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1362 (! elf_check_arch(elf_ex.e_machine))) {
1363 return -ENOEXEC;
1364 }
1365
1366 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1367 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1368 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1369 if (!bprm->p) {
1370 retval = -E2BIG;
1371 }
1372
1373 /* Now read in all of the header information */
1374 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1375 if (elf_phdata == NULL) {
1376 return -ENOMEM;
1377 }
1378
1379 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1380 if(retval > 0) {
1381 retval = read(bprm->fd, (char *) elf_phdata,
1382 elf_ex.e_phentsize * elf_ex.e_phnum);
1383 }
1384
1385 if (retval < 0) {
1386 perror("load_elf_binary");
1387 exit(-1);
1388 free (elf_phdata);
1389 return -errno;
1390 }
1391
1392 #ifdef BSWAP_NEEDED
1393 elf_ppnt = elf_phdata;
1394 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1395 bswap_phdr(elf_ppnt);
1396 }
1397 #endif
1398 elf_ppnt = elf_phdata;
1399
1400 elf_bss = 0;
1401 elf_brk = 0;
1402
1403
1404 elf_stack = ~((abi_ulong)0UL);
1405 elf_interpreter = NULL;
1406 start_code = ~((abi_ulong)0UL);
1407 end_code = 0;
1408 start_data = 0;
1409 end_data = 0;
1410 interp_ex.a_info = 0;
1411
1412 for(i=0;i < elf_ex.e_phnum; i++) {
1413 if (elf_ppnt->p_type == PT_INTERP) {
1414 if ( elf_interpreter != NULL )
1415 {
1416 free (elf_phdata);
1417 free(elf_interpreter);
1418 close(bprm->fd);
1419 return -EINVAL;
1420 }
1421
1422 /* This is the program interpreter used for
1423 * shared libraries - for now assume that this
1424 * is an a.out format binary
1425 */
1426
1427 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1428
1429 if (elf_interpreter == NULL) {
1430 free (elf_phdata);
1431 close(bprm->fd);
1432 return -ENOMEM;
1433 }
1434
1435 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1436 if(retval >= 0) {
1437 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1438 }
1439 if(retval < 0) {
1440 perror("load_elf_binary2");
1441 exit(-1);
1442 }
1443
1444 /* If the program interpreter is one of these two,
1445 then assume an iBCS2 image. Otherwise assume
1446 a native linux image. */
1447
1448 /* JRP - Need to add X86 lib dir stuff here... */
1449
1450 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1451 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1452 ibcs2_interpreter = 1;
1453 }
1454
1455 #if 0
1456 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1457 #endif
1458 if (retval >= 0) {
1459 retval = open(path(elf_interpreter), O_RDONLY);
1460 if(retval >= 0) {
1461 interpreter_fd = retval;
1462 }
1463 else {
1464 perror(elf_interpreter);
1465 exit(-1);
1466 /* retval = -errno; */
1467 }
1468 }
1469
1470 if (retval >= 0) {
1471 retval = lseek(interpreter_fd, 0, SEEK_SET);
1472 if(retval >= 0) {
1473 retval = read(interpreter_fd,bprm->buf,128);
1474 }
1475 }
1476 if (retval >= 0) {
1477 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1478 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1479 }
1480 if (retval < 0) {
1481 perror("load_elf_binary3");
1482 exit(-1);
1483 free (elf_phdata);
1484 free(elf_interpreter);
1485 close(bprm->fd);
1486 return retval;
1487 }
1488 }
1489 elf_ppnt++;
1490 }
1491
1492 /* Some simple consistency checks for the interpreter */
1493 if (elf_interpreter){
1494 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1495
1496 /* Now figure out which format our binary is */
1497 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1498 (N_MAGIC(interp_ex) != QMAGIC)) {
1499 interpreter_type = INTERPRETER_ELF;
1500 }
1501
1502 if (interp_elf_ex.e_ident[0] != 0x7f ||
1503 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1504 interpreter_type &= ~INTERPRETER_ELF;
1505 }
1506
1507 if (!interpreter_type) {
1508 free(elf_interpreter);
1509 free(elf_phdata);
1510 close(bprm->fd);
1511 return -ELIBBAD;
1512 }
1513 }
1514
1515 /* OK, we are done with that, now set up the arg stuff,
1516 and then start this sucker up */
1517
1518 {
1519 char * passed_p;
1520
1521 if (interpreter_type == INTERPRETER_AOUT) {
1522 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1523 passed_p = passed_fileno;
1524
1525 if (elf_interpreter) {
1526 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1527 bprm->argc++;
1528 }
1529 }
1530 if (!bprm->p) {
1531 if (elf_interpreter) {
1532 free(elf_interpreter);
1533 }
1534 free (elf_phdata);
1535 close(bprm->fd);
1536 return -E2BIG;
1537 }
1538 }
1539
1540 /* OK, This is the point of no return */
1541 info->end_data = 0;
1542 info->end_code = 0;
1543 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1544 info->mmap = 0;
1545 elf_entry = (abi_ulong) elf_ex.e_entry;
1546
1547 #if defined(CONFIG_USE_GUEST_BASE)
1548 /*
1549 * In case where user has not explicitly set the guest_base, we
1550 * probe here that should we set it automatically.
1551 */
1552 if (!have_guest_base) {
1553 /*
1554 * Go through ELF program header table and find out whether
1555 * any of the segments drop below our current mmap_min_addr and
1556 * in that case set guest_base to corresponding address.
1557 */
1558 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1559 i++, elf_ppnt++) {
1560 if (elf_ppnt->p_type != PT_LOAD)
1561 continue;
1562 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1563 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1564 break;
1565 }
1566 }
1567 }
1568 #endif /* CONFIG_USE_GUEST_BASE */
1569
1570 /* Do this so that we can load the interpreter, if need be. We will
1571 change some of these later */
1572 info->rss = 0;
1573 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1574 info->start_stack = bprm->p;
1575
1576 /* Now we do a little grungy work by mmaping the ELF image into
1577 * the correct location in memory. At this point, we assume that
1578 * the image should be loaded at fixed address, not at a variable
1579 * address.
1580 */
1581
1582 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1583 int elf_prot = 0;
1584 int elf_flags = 0;
1585 abi_ulong error;
1586
1587 if (elf_ppnt->p_type != PT_LOAD)
1588 continue;
1589
1590 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1591 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1592 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1593 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1594 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1595 elf_flags |= MAP_FIXED;
1596 } else if (elf_ex.e_type == ET_DYN) {
1597 /* Try and get dynamic programs out of the way of the default mmap
1598 base, as well as whatever program they might try to exec. This
1599 is because the brk will follow the loader, and is not movable. */
1600 /* NOTE: for qemu, we do a big mmap to get enough space
1601 without hardcoding any address */
1602 error = target_mmap(0, ET_DYN_MAP_SIZE,
1603 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1604 -1, 0);
1605 if (error == -1) {
1606 perror("mmap");
1607 exit(-1);
1608 }
1609 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1610 }
1611
1612 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1613 (elf_ppnt->p_filesz +
1614 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1615 elf_prot,
1616 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1617 bprm->fd,
1618 (elf_ppnt->p_offset -
1619 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1620 if (error == -1) {
1621 perror("mmap");
1622 exit(-1);
1623 }
1624
1625 #ifdef LOW_ELF_STACK
1626 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1627 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1628 #endif
1629
1630 if (!load_addr_set) {
1631 load_addr_set = 1;
1632 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1633 if (elf_ex.e_type == ET_DYN) {
1634 load_bias += error -
1635 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1636 load_addr += load_bias;
1637 reloc_func_desc = load_bias;
1638 }
1639 }
1640 k = elf_ppnt->p_vaddr;
1641 if (k < start_code)
1642 start_code = k;
1643 if (start_data < k)
1644 start_data = k;
1645 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1646 if (k > elf_bss)
1647 elf_bss = k;
1648 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1649 end_code = k;
1650 if (end_data < k)
1651 end_data = k;
1652 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1653 if (k > elf_brk) elf_brk = k;
1654 }
1655
1656 elf_entry += load_bias;
1657 elf_bss += load_bias;
1658 elf_brk += load_bias;
1659 start_code += load_bias;
1660 end_code += load_bias;
1661 start_data += load_bias;
1662 end_data += load_bias;
1663
1664 if (elf_interpreter) {
1665 if (interpreter_type & 1) {
1666 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1667 }
1668 else if (interpreter_type & 2) {
1669 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1670 &interp_load_addr);
1671 }
1672 reloc_func_desc = interp_load_addr;
1673
1674 close(interpreter_fd);
1675 free(elf_interpreter);
1676
1677 if (elf_entry == ~((abi_ulong)0UL)) {
1678 printf("Unable to load interpreter\n");
1679 free(elf_phdata);
1680 exit(-1);
1681 return 0;
1682 }
1683 }
1684
1685 free(elf_phdata);
1686
1687 if (qemu_log_enabled())
1688 load_symbols(&elf_ex, bprm->fd);
1689
1690 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1691 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1692
1693 #ifdef LOW_ELF_STACK
1694 info->start_stack = bprm->p = elf_stack - 4;
1695 #endif
1696 bprm->p = create_elf_tables(bprm->p,
1697 bprm->argc,
1698 bprm->envc,
1699 &elf_ex,
1700 load_addr, load_bias,
1701 interp_load_addr,
1702 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1703 info);
1704 info->load_addr = reloc_func_desc;
1705 info->start_brk = info->brk = elf_brk;
1706 info->end_code = end_code;
1707 info->start_code = start_code;
1708 info->start_data = start_data;
1709 info->end_data = end_data;
1710 info->start_stack = bprm->p;
1711
1712 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1713 sections */
1714 set_brk(elf_bss, elf_brk);
1715
1716 padzero(elf_bss, elf_brk);
1717
1718 #if 0
1719 printf("(start_brk) %x\n" , info->start_brk);
1720 printf("(end_code) %x\n" , info->end_code);
1721 printf("(start_code) %x\n" , info->start_code);
1722 printf("(end_data) %x\n" , info->end_data);
1723 printf("(start_stack) %x\n" , info->start_stack);
1724 printf("(brk) %x\n" , info->brk);
1725 #endif
1726
1727 if ( info->personality == PER_SVR4 )
1728 {
1729 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1730 and some applications "depend" upon this behavior.
1731 Since we do not have the power to recompile these, we
1732 emulate the SVr4 behavior. Sigh. */
1733 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1734 MAP_FIXED | MAP_PRIVATE, -1, 0);
1735 }
1736
1737 info->entry = elf_entry;
1738
1739 #ifdef USE_ELF_CORE_DUMP
1740 bprm->core_dump = &elf_core_dump;
1741 #endif
1742
1743 return 0;
1744 }
1745
1746 #ifdef USE_ELF_CORE_DUMP
1747
1748 /*
1749 * Definitions to generate Intel SVR4-like core files.
1750 * These mostly have the same names as the SVR4 types with "target_elf_"
1751 * tacked on the front to prevent clashes with linux definitions,
1752 * and the typedef forms have been avoided. This is mostly like
1753 * the SVR4 structure, but more Linuxy, with things that Linux does
1754 * not support and which gdb doesn't really use excluded.
1755 *
1756 * Fields we don't dump (their contents is zero) in linux-user qemu
1757 * are marked with XXX.
1758 *
1759 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1760 *
1761 * Porting ELF coredump for target is (quite) simple process. First you
1762 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1763 * the target resides):
1764 *
1765 * #define USE_ELF_CORE_DUMP
1766 *
1767 * Next you define type of register set used for dumping. ELF specification
1768 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1769 *
1770 * typedef <target_regtype> target_elf_greg_t;
1771 * #define ELF_NREG <number of registers>
1772 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1773 *
1774 * Last step is to implement target specific function that copies registers
1775 * from given cpu into just specified register set. Prototype is:
1776 *
1777 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1778 * const CPUState *env);
1779 *
1780 * Parameters:
1781 * regs - copy register values into here (allocated and zeroed by caller)
1782 * env - copy registers from here
1783 *
1784 * Example for ARM target is provided in this file.
1785 */
1786
1787 /* An ELF note in memory */
1788 struct memelfnote {
1789 const char *name;
1790 size_t namesz;
1791 size_t namesz_rounded;
1792 int type;
1793 size_t datasz;
1794 void *data;
1795 size_t notesz;
1796 };
1797
1798 struct target_elf_siginfo {
1799 int si_signo; /* signal number */
1800 int si_code; /* extra code */
1801 int si_errno; /* errno */
1802 };
1803
1804 struct target_elf_prstatus {
1805 struct target_elf_siginfo pr_info; /* Info associated with signal */
1806 short pr_cursig; /* Current signal */
1807 target_ulong pr_sigpend; /* XXX */
1808 target_ulong pr_sighold; /* XXX */
1809 target_pid_t pr_pid;
1810 target_pid_t pr_ppid;
1811 target_pid_t pr_pgrp;
1812 target_pid_t pr_sid;
1813 struct target_timeval pr_utime; /* XXX User time */
1814 struct target_timeval pr_stime; /* XXX System time */
1815 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1816 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1817 target_elf_gregset_t pr_reg; /* GP registers */
1818 int pr_fpvalid; /* XXX */
1819 };
1820
1821 #define ELF_PRARGSZ (80) /* Number of chars for args */
1822
1823 struct target_elf_prpsinfo {
1824 char pr_state; /* numeric process state */
1825 char pr_sname; /* char for pr_state */
1826 char pr_zomb; /* zombie */
1827 char pr_nice; /* nice val */
1828 target_ulong pr_flag; /* flags */
1829 target_uid_t pr_uid;
1830 target_gid_t pr_gid;
1831 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1832 /* Lots missing */
1833 char pr_fname[16]; /* filename of executable */
1834 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1835 };
1836
1837 /* Here is the structure in which status of each thread is captured. */
1838 struct elf_thread_status {
1839 QTAILQ_ENTRY(elf_thread_status) ets_link;
1840 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1841 #if 0
1842 elf_fpregset_t fpu; /* NT_PRFPREG */
1843 struct task_struct *thread;
1844 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1845 #endif
1846 struct memelfnote notes[1];
1847 int num_notes;
1848 };
1849
1850 struct elf_note_info {
1851 struct memelfnote *notes;
1852 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1853 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1854
1855 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1856 #if 0
1857 /*
1858 * Current version of ELF coredump doesn't support
1859 * dumping fp regs etc.
1860 */
1861 elf_fpregset_t *fpu;
1862 elf_fpxregset_t *xfpu;
1863 int thread_status_size;
1864 #endif
1865 int notes_size;
1866 int numnote;
1867 };
1868
1869 struct vm_area_struct {
1870 abi_ulong vma_start; /* start vaddr of memory region */
1871 abi_ulong vma_end; /* end vaddr of memory region */
1872 abi_ulong vma_flags; /* protection etc. flags for the region */
1873 QTAILQ_ENTRY(vm_area_struct) vma_link;
1874 };
1875
1876 struct mm_struct {
1877 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1878 int mm_count; /* number of mappings */
1879 };
1880
1881 static struct mm_struct *vma_init(void);
1882 static void vma_delete(struct mm_struct *);
1883 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1884 abi_ulong, abi_ulong);
1885 static int vma_get_mapping_count(const struct mm_struct *);
1886 static struct vm_area_struct *vma_first(const struct mm_struct *);
1887 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1888 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1889 static int vma_walker(void *priv, unsigned long start, unsigned long end,
1890 unsigned long flags);
1891
1892 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1893 static void fill_note(struct memelfnote *, const char *, int,
1894 unsigned int, void *);
1895 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
1896 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
1897 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1898 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1899 static size_t note_size(const struct memelfnote *);
1900 static void free_note_info(struct elf_note_info *);
1901 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1902 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1903 static int core_dump_filename(const TaskState *, char *, size_t);
1904
1905 static int dump_write(int, const void *, size_t);
1906 static int write_note(struct memelfnote *, int);
1907 static int write_note_info(struct elf_note_info *, int);
1908
1909 #ifdef BSWAP_NEEDED
1910 static void bswap_prstatus(struct target_elf_prstatus *);
1911 static void bswap_psinfo(struct target_elf_prpsinfo *);
1912
1913 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
1914 {
1915 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1916 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1917 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1918 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1919 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1920 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1921 prstatus->pr_pid = tswap32(prstatus->pr_pid);
1922 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1923 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1924 prstatus->pr_sid = tswap32(prstatus->pr_sid);
1925 /* cpu times are not filled, so we skip them */
1926 /* regs should be in correct format already */
1927 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
1928 }
1929
1930 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
1931 {
1932 psinfo->pr_flag = tswapl(psinfo->pr_flag);
1933 psinfo->pr_uid = tswap16(psinfo->pr_uid);
1934 psinfo->pr_gid = tswap16(psinfo->pr_gid);
1935 psinfo->pr_pid = tswap32(psinfo->pr_pid);
1936 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
1937 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
1938 psinfo->pr_sid = tswap32(psinfo->pr_sid);
1939 }
1940 #endif /* BSWAP_NEEDED */
1941
1942 /*
1943 * Minimal support for linux memory regions. These are needed
1944 * when we are finding out what memory exactly belongs to
1945 * emulated process. No locks needed here, as long as
1946 * thread that received the signal is stopped.
1947 */
1948
1949 static struct mm_struct *vma_init(void)
1950 {
1951 struct mm_struct *mm;
1952
1953 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
1954 return (NULL);
1955
1956 mm->mm_count = 0;
1957 QTAILQ_INIT(&mm->mm_mmap);
1958
1959 return (mm);
1960 }
1961
1962 static void vma_delete(struct mm_struct *mm)
1963 {
1964 struct vm_area_struct *vma;
1965
1966 while ((vma = vma_first(mm)) != NULL) {
1967 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
1968 qemu_free(vma);
1969 }
1970 qemu_free(mm);
1971 }
1972
1973 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
1974 abi_ulong end, abi_ulong flags)
1975 {
1976 struct vm_area_struct *vma;
1977
1978 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
1979 return (-1);
1980
1981 vma->vma_start = start;
1982 vma->vma_end = end;
1983 vma->vma_flags = flags;
1984
1985 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
1986 mm->mm_count++;
1987
1988 return (0);
1989 }
1990
1991 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
1992 {
1993 return (QTAILQ_FIRST(&mm->mm_mmap));
1994 }
1995
1996 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
1997 {
1998 return (QTAILQ_NEXT(vma, vma_link));
1999 }
2000
2001 static int vma_get_mapping_count(const struct mm_struct *mm)
2002 {
2003 return (mm->mm_count);
2004 }
2005
2006 /*
2007 * Calculate file (dump) size of given memory region.
2008 */
2009 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2010 {
2011 /* if we cannot even read the first page, skip it */
2012 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2013 return (0);
2014
2015 /*
2016 * Usually we don't dump executable pages as they contain
2017 * non-writable code that debugger can read directly from
2018 * target library etc. However, thread stacks are marked
2019 * also executable so we read in first page of given region
2020 * and check whether it contains elf header. If there is
2021 * no elf header, we dump it.
2022 */
2023 if (vma->vma_flags & PROT_EXEC) {
2024 char page[TARGET_PAGE_SIZE];
2025
2026 copy_from_user(page, vma->vma_start, sizeof (page));
2027 if ((page[EI_MAG0] == ELFMAG0) &&
2028 (page[EI_MAG1] == ELFMAG1) &&
2029 (page[EI_MAG2] == ELFMAG2) &&
2030 (page[EI_MAG3] == ELFMAG3)) {
2031 /*
2032 * Mappings are possibly from ELF binary. Don't dump
2033 * them.
2034 */
2035 return (0);
2036 }
2037 }
2038
2039 return (vma->vma_end - vma->vma_start);
2040 }
2041
2042 static int vma_walker(void *priv, unsigned long start, unsigned long end,
2043 unsigned long flags)
2044 {
2045 struct mm_struct *mm = (struct mm_struct *)priv;
2046
2047 /*
2048 * Don't dump anything that qemu has reserved for internal use.
2049 */
2050 if (flags & PAGE_RESERVED)
2051 return (0);
2052
2053 vma_add_mapping(mm, start, end, flags);
2054 return (0);
2055 }
2056
2057 static void fill_note(struct memelfnote *note, const char *name, int type,
2058 unsigned int sz, void *data)
2059 {
2060 unsigned int namesz;
2061
2062 namesz = strlen(name) + 1;
2063 note->name = name;
2064 note->namesz = namesz;
2065 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2066 note->type = type;
2067 note->datasz = roundup(sz, sizeof (int32_t));;
2068 note->data = data;
2069
2070 /*
2071 * We calculate rounded up note size here as specified by
2072 * ELF document.
2073 */
2074 note->notesz = sizeof (struct elf_note) +
2075 note->namesz_rounded + note->datasz;
2076 }
2077
2078 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2079 uint32_t flags)
2080 {
2081 (void) memset(elf, 0, sizeof(*elf));
2082
2083 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2084 elf->e_ident[EI_CLASS] = ELF_CLASS;
2085 elf->e_ident[EI_DATA] = ELF_DATA;
2086 elf->e_ident[EI_VERSION] = EV_CURRENT;
2087 elf->e_ident[EI_OSABI] = ELF_OSABI;
2088
2089 elf->e_type = ET_CORE;
2090 elf->e_machine = machine;
2091 elf->e_version = EV_CURRENT;
2092 elf->e_phoff = sizeof(struct elfhdr);
2093 elf->e_flags = flags;
2094 elf->e_ehsize = sizeof(struct elfhdr);
2095 elf->e_phentsize = sizeof(struct elf_phdr);
2096 elf->e_phnum = segs;
2097
2098 #ifdef BSWAP_NEEDED
2099 bswap_ehdr(elf);
2100 #endif
2101 }
2102
2103 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2104 {
2105 phdr->p_type = PT_NOTE;
2106 phdr->p_offset = offset;
2107 phdr->p_vaddr = 0;
2108 phdr->p_paddr = 0;
2109 phdr->p_filesz = sz;
2110 phdr->p_memsz = 0;
2111 phdr->p_flags = 0;
2112 phdr->p_align = 0;
2113
2114 #ifdef BSWAP_NEEDED
2115 bswap_phdr(phdr);
2116 #endif
2117 }
2118
2119 static size_t note_size(const struct memelfnote *note)
2120 {
2121 return (note->notesz);
2122 }
2123
2124 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2125 const TaskState *ts, int signr)
2126 {
2127 (void) memset(prstatus, 0, sizeof (*prstatus));
2128 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2129 prstatus->pr_pid = ts->ts_tid;
2130 prstatus->pr_ppid = getppid();
2131 prstatus->pr_pgrp = getpgrp();
2132 prstatus->pr_sid = getsid(0);
2133
2134 #ifdef BSWAP_NEEDED
2135 bswap_prstatus(prstatus);
2136 #endif
2137 }
2138
2139 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2140 {
2141 char *filename, *base_filename;
2142 unsigned int i, len;
2143
2144 (void) memset(psinfo, 0, sizeof (*psinfo));
2145
2146 len = ts->info->arg_end - ts->info->arg_start;
2147 if (len >= ELF_PRARGSZ)
2148 len = ELF_PRARGSZ - 1;
2149 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2150 return -EFAULT;
2151 for (i = 0; i < len; i++)
2152 if (psinfo->pr_psargs[i] == 0)
2153 psinfo->pr_psargs[i] = ' ';
2154 psinfo->pr_psargs[len] = 0;
2155
2156 psinfo->pr_pid = getpid();
2157 psinfo->pr_ppid = getppid();
2158 psinfo->pr_pgrp = getpgrp();
2159 psinfo->pr_sid = getsid(0);
2160 psinfo->pr_uid = getuid();
2161 psinfo->pr_gid = getgid();
2162
2163 filename = strdup(ts->bprm->filename);
2164 base_filename = strdup(basename(filename));
2165 (void) strncpy(psinfo->pr_fname, base_filename,
2166 sizeof(psinfo->pr_fname));
2167 free(base_filename);
2168 free(filename);
2169
2170 #ifdef BSWAP_NEEDED
2171 bswap_psinfo(psinfo);
2172 #endif
2173 return (0);
2174 }
2175
2176 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2177 {
2178 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2179 elf_addr_t orig_auxv = auxv;
2180 abi_ulong val;
2181 void *ptr;
2182 int i, len;
2183
2184 /*
2185 * Auxiliary vector is stored in target process stack. It contains
2186 * {type, value} pairs that we need to dump into note. This is not
2187 * strictly necessary but we do it here for sake of completeness.
2188 */
2189
2190 /* find out lenght of the vector, AT_NULL is terminator */
2191 i = len = 0;
2192 do {
2193 get_user_ual(val, auxv);
2194 i += 2;
2195 auxv += 2 * sizeof (elf_addr_t);
2196 } while (val != AT_NULL);
2197 len = i * sizeof (elf_addr_t);
2198
2199 /* read in whole auxv vector and copy it to memelfnote */
2200 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2201 if (ptr != NULL) {
2202 fill_note(note, "CORE", NT_AUXV, len, ptr);
2203 unlock_user(ptr, auxv, len);
2204 }
2205 }
2206
2207 /*
2208 * Constructs name of coredump file. We have following convention
2209 * for the name:
2210 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2211 *
2212 * Returns 0 in case of success, -1 otherwise (errno is set).
2213 */
2214 static int core_dump_filename(const TaskState *ts, char *buf,
2215 size_t bufsize)
2216 {
2217 char timestamp[64];
2218 char *filename = NULL;
2219 char *base_filename = NULL;
2220 struct timeval tv;
2221 struct tm tm;
2222
2223 assert(bufsize >= PATH_MAX);
2224
2225 if (gettimeofday(&tv, NULL) < 0) {
2226 (void) fprintf(stderr, "unable to get current timestamp: %s",
2227 strerror(errno));
2228 return (-1);
2229 }
2230
2231 filename = strdup(ts->bprm->filename);
2232 base_filename = strdup(basename(filename));
2233 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2234 localtime_r(&tv.tv_sec, &tm));
2235 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2236 base_filename, timestamp, (int)getpid());
2237 free(base_filename);
2238 free(filename);
2239
2240 return (0);
2241 }
2242
2243 static int dump_write(int fd, const void *ptr, size_t size)
2244 {
2245 const char *bufp = (const char *)ptr;
2246 ssize_t bytes_written, bytes_left;
2247 struct rlimit dumpsize;
2248 off_t pos;
2249
2250 bytes_written = 0;
2251 getrlimit(RLIMIT_CORE, &dumpsize);
2252 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2253 if (errno == ESPIPE) { /* not a seekable stream */
2254 bytes_left = size;
2255 } else {
2256 return pos;
2257 }
2258 } else {
2259 if (dumpsize.rlim_cur <= pos) {
2260 return -1;
2261 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2262 bytes_left = size;
2263 } else {
2264 size_t limit_left=dumpsize.rlim_cur - pos;
2265 bytes_left = limit_left >= size ? size : limit_left ;
2266 }
2267 }
2268
2269 /*
2270 * In normal conditions, single write(2) should do but
2271 * in case of socket etc. this mechanism is more portable.
2272 */
2273 do {
2274 bytes_written = write(fd, bufp, bytes_left);
2275 if (bytes_written < 0) {
2276 if (errno == EINTR)
2277 continue;
2278 return (-1);
2279 } else if (bytes_written == 0) { /* eof */
2280 return (-1);
2281 }
2282 bufp += bytes_written;
2283 bytes_left -= bytes_written;
2284 } while (bytes_left > 0);
2285
2286 return (0);
2287 }
2288
2289 static int write_note(struct memelfnote *men, int fd)
2290 {
2291 struct elf_note en;
2292
2293 en.n_namesz = men->namesz;
2294 en.n_type = men->type;
2295 en.n_descsz = men->datasz;
2296
2297 #ifdef BSWAP_NEEDED
2298 bswap_note(&en);
2299 #endif
2300
2301 if (dump_write(fd, &en, sizeof(en)) != 0)
2302 return (-1);
2303 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2304 return (-1);
2305 if (dump_write(fd, men->data, men->datasz) != 0)
2306 return (-1);
2307
2308 return (0);
2309 }
2310
2311 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2312 {
2313 TaskState *ts = (TaskState *)env->opaque;
2314 struct elf_thread_status *ets;
2315
2316 ets = qemu_mallocz(sizeof (*ets));
2317 ets->num_notes = 1; /* only prstatus is dumped */
2318 fill_prstatus(&ets->prstatus, ts, 0);
2319 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2320 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2321 &ets->prstatus);
2322
2323 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2324
2325 info->notes_size += note_size(&ets->notes[0]);
2326 }
2327
2328 static int fill_note_info(struct elf_note_info *info,
2329 long signr, const CPUState *env)
2330 {
2331 #define NUMNOTES 3
2332 CPUState *cpu = NULL;
2333 TaskState *ts = (TaskState *)env->opaque;
2334 int i;
2335
2336 (void) memset(info, 0, sizeof (*info));
2337
2338 QTAILQ_INIT(&info->thread_list);
2339
2340 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2341 if (info->notes == NULL)
2342 return (-ENOMEM);
2343 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2344 if (info->prstatus == NULL)
2345 return (-ENOMEM);
2346 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2347 if (info->prstatus == NULL)
2348 return (-ENOMEM);
2349
2350 /*
2351 * First fill in status (and registers) of current thread
2352 * including process info & aux vector.
2353 */
2354 fill_prstatus(info->prstatus, ts, signr);
2355 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2356 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2357 sizeof (*info->prstatus), info->prstatus);
2358 fill_psinfo(info->psinfo, ts);
2359 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2360 sizeof (*info->psinfo), info->psinfo);
2361 fill_auxv_note(&info->notes[2], ts);
2362 info->numnote = 3;
2363
2364 info->notes_size = 0;
2365 for (i = 0; i < info->numnote; i++)
2366 info->notes_size += note_size(&info->notes[i]);
2367
2368 /* read and fill status of all threads */
2369 cpu_list_lock();
2370 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2371 if (cpu == thread_env)
2372 continue;
2373 fill_thread_info(info, cpu);
2374 }
2375 cpu_list_unlock();
2376
2377 return (0);
2378 }
2379
2380 static void free_note_info(struct elf_note_info *info)
2381 {
2382 struct elf_thread_status *ets;
2383
2384 while (!QTAILQ_EMPTY(&info->thread_list)) {
2385 ets = QTAILQ_FIRST(&info->thread_list);
2386 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2387 qemu_free(ets);
2388 }
2389
2390 qemu_free(info->prstatus);
2391 qemu_free(info->psinfo);
2392 qemu_free(info->notes);
2393 }
2394
2395 static int write_note_info(struct elf_note_info *info, int fd)
2396 {
2397 struct elf_thread_status *ets;
2398 int i, error = 0;
2399
2400 /* write prstatus, psinfo and auxv for current thread */
2401 for (i = 0; i < info->numnote; i++)
2402 if ((error = write_note(&info->notes[i], fd)) != 0)
2403 return (error);
2404
2405 /* write prstatus for each thread */
2406 for (ets = info->thread_list.tqh_first; ets != NULL;
2407 ets = ets->ets_link.tqe_next) {
2408 if ((error = write_note(&ets->notes[0], fd)) != 0)
2409 return (error);
2410 }
2411
2412 return (0);
2413 }
2414
2415 /*
2416 * Write out ELF coredump.
2417 *
2418 * See documentation of ELF object file format in:
2419 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2420 *
2421 * Coredump format in linux is following:
2422 *
2423 * 0 +----------------------+ \
2424 * | ELF header | ET_CORE |
2425 * +----------------------+ |
2426 * | ELF program headers | |--- headers
2427 * | - NOTE section | |
2428 * | - PT_LOAD sections | |
2429 * +----------------------+ /
2430 * | NOTEs: |
2431 * | - NT_PRSTATUS |
2432 * | - NT_PRSINFO |
2433 * | - NT_AUXV |
2434 * +----------------------+ <-- aligned to target page
2435 * | Process memory dump |
2436 * : :
2437 * . .
2438 * : :
2439 * | |
2440 * +----------------------+
2441 *
2442 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2443 * NT_PRSINFO -> struct elf_prpsinfo
2444 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2445 *
2446 * Format follows System V format as close as possible. Current
2447 * version limitations are as follows:
2448 * - no floating point registers are dumped
2449 *
2450 * Function returns 0 in case of success, negative errno otherwise.
2451 *
2452 * TODO: make this work also during runtime: it should be
2453 * possible to force coredump from running process and then
2454 * continue processing. For example qemu could set up SIGUSR2
2455 * handler (provided that target process haven't registered
2456 * handler for that) that does the dump when signal is received.
2457 */
2458 static int elf_core_dump(int signr, const CPUState *env)
2459 {
2460 const TaskState *ts = (const TaskState *)env->opaque;
2461 struct vm_area_struct *vma = NULL;
2462 char corefile[PATH_MAX];
2463 struct elf_note_info info;
2464 struct elfhdr elf;
2465 struct elf_phdr phdr;
2466 struct rlimit dumpsize;
2467 struct mm_struct *mm = NULL;
2468 off_t offset = 0, data_offset = 0;
2469 int segs = 0;
2470 int fd = -1;
2471
2472 errno = 0;
2473 getrlimit(RLIMIT_CORE, &dumpsize);
2474 if (dumpsize.rlim_cur == 0)
2475 return 0;
2476
2477 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2478 return (-errno);
2479
2480 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2481 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2482 return (-errno);
2483
2484 /*
2485 * Walk through target process memory mappings and
2486 * set up structure containing this information. After
2487 * this point vma_xxx functions can be used.
2488 */
2489 if ((mm = vma_init()) == NULL)
2490 goto out;
2491
2492 walk_memory_regions(mm, vma_walker);
2493 segs = vma_get_mapping_count(mm);
2494
2495 /*
2496 * Construct valid coredump ELF header. We also
2497 * add one more segment for notes.
2498 */
2499 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2500 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2501 goto out;
2502
2503 /* fill in in-memory version of notes */
2504 if (fill_note_info(&info, signr, env) < 0)
2505 goto out;
2506
2507 offset += sizeof (elf); /* elf header */
2508 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2509
2510 /* write out notes program header */
2511 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2512
2513 offset += info.notes_size;
2514 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2515 goto out;
2516
2517 /*
2518 * ELF specification wants data to start at page boundary so
2519 * we align it here.
2520 */
2521 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2522
2523 /*
2524 * Write program headers for memory regions mapped in
2525 * the target process.
2526 */
2527 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2528 (void) memset(&phdr, 0, sizeof (phdr));
2529
2530 phdr.p_type = PT_LOAD;
2531 phdr.p_offset = offset;
2532 phdr.p_vaddr = vma->vma_start;
2533 phdr.p_paddr = 0;
2534 phdr.p_filesz = vma_dump_size(vma);
2535 offset += phdr.p_filesz;
2536 phdr.p_memsz = vma->vma_end - vma->vma_start;
2537 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2538 if (vma->vma_flags & PROT_WRITE)
2539 phdr.p_flags |= PF_W;
2540 if (vma->vma_flags & PROT_EXEC)
2541 phdr.p_flags |= PF_X;
2542 phdr.p_align = ELF_EXEC_PAGESIZE;
2543
2544 dump_write(fd, &phdr, sizeof (phdr));
2545 }
2546
2547 /*
2548 * Next we write notes just after program headers. No
2549 * alignment needed here.
2550 */
2551 if (write_note_info(&info, fd) < 0)
2552 goto out;
2553
2554 /* align data to page boundary */
2555 data_offset = lseek(fd, 0, SEEK_CUR);
2556 data_offset = TARGET_PAGE_ALIGN(data_offset);
2557 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2558 goto out;
2559
2560 /*
2561 * Finally we can dump process memory into corefile as well.
2562 */
2563 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2564 abi_ulong addr;
2565 abi_ulong end;
2566
2567 end = vma->vma_start + vma_dump_size(vma);
2568
2569 for (addr = vma->vma_start; addr < end;
2570 addr += TARGET_PAGE_SIZE) {
2571 char page[TARGET_PAGE_SIZE];
2572 int error;
2573
2574 /*
2575 * Read in page from target process memory and
2576 * write it to coredump file.
2577 */
2578 error = copy_from_user(page, addr, sizeof (page));
2579 if (error != 0) {
2580 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2581 addr);
2582 errno = -error;
2583 goto out;
2584 }
2585 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2586 goto out;
2587 }
2588 }
2589
2590 out:
2591 free_note_info(&info);
2592 if (mm != NULL)
2593 vma_delete(mm);
2594 (void) close(fd);
2595
2596 if (errno != 0)
2597 return (-errno);
2598 return (0);
2599 }
2600
2601 #endif /* USE_ELF_CORE_DUMP */
2602
2603 static int load_aout_interp(void * exptr, int interp_fd)
2604 {
2605 printf("a.out interpreter not yet supported\n");
2606 return(0);
2607 }
2608
2609 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2610 {
2611 init_thread(regs, infop);
2612 }