]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/elfload.c
Userspace guest address offsetting
[mirror_qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 #ifdef TARGET_I386
101
102 #define ELF_PLATFORM get_elf_platform()
103
104 static const char *get_elf_platform(void)
105 {
106 static char elf_platform[] = "i386";
107 int family = (thread_env->cpuid_version >> 8) & 0xff;
108 if (family > 6)
109 family = 6;
110 if (family >= 3)
111 elf_platform[1] = '0' + family;
112 return elf_platform;
113 }
114
115 #define ELF_HWCAP get_elf_hwcap()
116
117 static uint32_t get_elf_hwcap(void)
118 {
119 return thread_env->cpuid_features;
120 }
121
122 #ifdef TARGET_X86_64
123 #define ELF_START_MMAP 0x2aaaaab000ULL
124 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
125
126 #define ELF_CLASS ELFCLASS64
127 #define ELF_DATA ELFDATA2LSB
128 #define ELF_ARCH EM_X86_64
129
130 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
131 {
132 regs->rax = 0;
133 regs->rsp = infop->start_stack;
134 regs->rip = infop->entry;
135 }
136
137 typedef target_ulong elf_greg_t;
138 typedef uint32_t target_uid_t;
139 typedef uint32_t target_gid_t;
140 typedef int32_t target_pid_t;
141
142 #define ELF_NREG 27
143 typedef elf_greg_t elf_gregset_t[ELF_NREG];
144
145 /*
146 * Note that ELF_NREG should be 29 as there should be place for
147 * TRAPNO and ERR "registers" as well but linux doesn't dump
148 * those.
149 *
150 * See linux kernel: arch/x86/include/asm/elf.h
151 */
152 static void elf_core_copy_regs(elf_gregset_t *regs, const CPUState *env)
153 {
154 (*regs)[0] = env->regs[15];
155 (*regs)[1] = env->regs[14];
156 (*regs)[2] = env->regs[13];
157 (*regs)[3] = env->regs[12];
158 (*regs)[4] = env->regs[R_EBP];
159 (*regs)[5] = env->regs[R_EBX];
160 (*regs)[6] = env->regs[11];
161 (*regs)[7] = env->regs[10];
162 (*regs)[8] = env->regs[9];
163 (*regs)[9] = env->regs[8];
164 (*regs)[10] = env->regs[R_EAX];
165 (*regs)[11] = env->regs[R_ECX];
166 (*regs)[12] = env->regs[R_EDX];
167 (*regs)[13] = env->regs[R_ESI];
168 (*regs)[14] = env->regs[R_EDI];
169 (*regs)[15] = env->regs[R_EAX]; /* XXX */
170 (*regs)[16] = env->eip;
171 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
172 (*regs)[18] = env->eflags;
173 (*regs)[19] = env->regs[R_ESP];
174 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
175 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
176 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
177 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
178 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
179 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
180 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
181 }
182
183 #else
184
185 #define ELF_START_MMAP 0x80000000
186
187 /*
188 * This is used to ensure we don't load something for the wrong architecture.
189 */
190 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
191
192 /*
193 * These are used to set parameters in the core dumps.
194 */
195 #define ELF_CLASS ELFCLASS32
196 #define ELF_DATA ELFDATA2LSB
197 #define ELF_ARCH EM_386
198
199 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
200 {
201 regs->esp = infop->start_stack;
202 regs->eip = infop->entry;
203
204 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
205 starts %edx contains a pointer to a function which might be
206 registered using `atexit'. This provides a mean for the
207 dynamic linker to call DT_FINI functions for shared libraries
208 that have been loaded before the code runs.
209
210 A value of 0 tells we have no such handler. */
211 regs->edx = 0;
212 }
213
214 typedef target_ulong elf_greg_t;
215 typedef uint16_t target_uid_t;
216 typedef uint16_t target_gid_t;
217 typedef int32_t target_pid_t;
218
219 #define ELF_NREG 17
220 typedef elf_greg_t elf_gregset_t[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(elf_gregset_t *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 typedef uint32_t elf_greg_t;
290 typedef uint16_t target_uid_t;
291 typedef uint16_t target_gid_t;
292 typedef int32_t target_pid_t;
293
294 #define ELF_NREG 18
295 typedef elf_greg_t elf_gregset_t[ELF_NREG];
296
297 static void elf_core_copy_regs(elf_gregset_t *regs, const CPUState *env)
298 {
299 (*regs)[0] = env->regs[0];
300 (*regs)[1] = env->regs[1];
301 (*regs)[2] = env->regs[2];
302 (*regs)[3] = env->regs[3];
303 (*regs)[4] = env->regs[4];
304 (*regs)[5] = env->regs[5];
305 (*regs)[6] = env->regs[6];
306 (*regs)[7] = env->regs[7];
307 (*regs)[8] = env->regs[8];
308 (*regs)[9] = env->regs[9];
309 (*regs)[10] = env->regs[10];
310 (*regs)[11] = env->regs[11];
311 (*regs)[12] = env->regs[12];
312 (*regs)[13] = env->regs[13];
313 (*regs)[14] = env->regs[14];
314 (*regs)[15] = env->regs[15];
315
316 (*regs)[16] = cpsr_read((CPUState *)env);
317 (*regs)[17] = env->regs[0]; /* XXX */
318 }
319
320 #define USE_ELF_CORE_DUMP
321 #define ELF_EXEC_PAGESIZE 4096
322
323 enum
324 {
325 ARM_HWCAP_ARM_SWP = 1 << 0,
326 ARM_HWCAP_ARM_HALF = 1 << 1,
327 ARM_HWCAP_ARM_THUMB = 1 << 2,
328 ARM_HWCAP_ARM_26BIT = 1 << 3,
329 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
330 ARM_HWCAP_ARM_FPA = 1 << 5,
331 ARM_HWCAP_ARM_VFP = 1 << 6,
332 ARM_HWCAP_ARM_EDSP = 1 << 7,
333 };
334
335 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
336 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
337 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
338
339 #endif
340
341 #ifdef TARGET_SPARC
342 #ifdef TARGET_SPARC64
343
344 #define ELF_START_MMAP 0x80000000
345
346 #ifndef TARGET_ABI32
347 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
348 #else
349 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
350 #endif
351
352 #define ELF_CLASS ELFCLASS64
353 #define ELF_DATA ELFDATA2MSB
354 #define ELF_ARCH EM_SPARCV9
355
356 #define STACK_BIAS 2047
357
358 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
359 {
360 #ifndef TARGET_ABI32
361 regs->tstate = 0;
362 #endif
363 regs->pc = infop->entry;
364 regs->npc = regs->pc + 4;
365 regs->y = 0;
366 #ifdef TARGET_ABI32
367 regs->u_regs[14] = infop->start_stack - 16 * 4;
368 #else
369 if (personality(infop->personality) == PER_LINUX32)
370 regs->u_regs[14] = infop->start_stack - 16 * 4;
371 else
372 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
373 #endif
374 }
375
376 #else
377 #define ELF_START_MMAP 0x80000000
378
379 #define elf_check_arch(x) ( (x) == EM_SPARC )
380
381 #define ELF_CLASS ELFCLASS32
382 #define ELF_DATA ELFDATA2MSB
383 #define ELF_ARCH EM_SPARC
384
385 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
386 {
387 regs->psr = 0;
388 regs->pc = infop->entry;
389 regs->npc = regs->pc + 4;
390 regs->y = 0;
391 regs->u_regs[14] = infop->start_stack - 16 * 4;
392 }
393
394 #endif
395 #endif
396
397 #ifdef TARGET_PPC
398
399 #define ELF_START_MMAP 0x80000000
400
401 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
402
403 #define elf_check_arch(x) ( (x) == EM_PPC64 )
404
405 #define ELF_CLASS ELFCLASS64
406
407 #else
408
409 #define elf_check_arch(x) ( (x) == EM_PPC )
410
411 #define ELF_CLASS ELFCLASS32
412
413 #endif
414
415 #ifdef TARGET_WORDS_BIGENDIAN
416 #define ELF_DATA ELFDATA2MSB
417 #else
418 #define ELF_DATA ELFDATA2LSB
419 #endif
420 #define ELF_ARCH EM_PPC
421
422 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
423 See arch/powerpc/include/asm/cputable.h. */
424 enum {
425 PPC_FEATURE_32 = 0x80000000,
426 PPC_FEATURE_64 = 0x40000000,
427 PPC_FEATURE_601_INSTR = 0x20000000,
428 PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
429 PPC_FEATURE_HAS_FPU = 0x08000000,
430 PPC_FEATURE_HAS_MMU = 0x04000000,
431 PPC_FEATURE_HAS_4xxMAC = 0x02000000,
432 PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
433 PPC_FEATURE_HAS_SPE = 0x00800000,
434 PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
435 PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
436 PPC_FEATURE_NO_TB = 0x00100000,
437 PPC_FEATURE_POWER4 = 0x00080000,
438 PPC_FEATURE_POWER5 = 0x00040000,
439 PPC_FEATURE_POWER5_PLUS = 0x00020000,
440 PPC_FEATURE_CELL = 0x00010000,
441 PPC_FEATURE_BOOKE = 0x00008000,
442 PPC_FEATURE_SMT = 0x00004000,
443 PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
444 PPC_FEATURE_ARCH_2_05 = 0x00001000,
445 PPC_FEATURE_PA6T = 0x00000800,
446 PPC_FEATURE_HAS_DFP = 0x00000400,
447 PPC_FEATURE_POWER6_EXT = 0x00000200,
448 PPC_FEATURE_ARCH_2_06 = 0x00000100,
449 PPC_FEATURE_HAS_VSX = 0x00000080,
450 PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
451
452 PPC_FEATURE_TRUE_LE = 0x00000002,
453 PPC_FEATURE_PPC_LE = 0x00000001,
454 };
455
456 #define ELF_HWCAP get_elf_hwcap()
457
458 static uint32_t get_elf_hwcap(void)
459 {
460 CPUState *e = thread_env;
461 uint32_t features = 0;
462
463 /* We don't have to be terribly complete here; the high points are
464 Altivec/FP/SPE support. Anything else is just a bonus. */
465 #define GET_FEATURE(flag, feature) \
466 do {if (e->insns_flags & flag) features |= feature; } while(0)
467 GET_FEATURE(PPC_64B, PPC_FEATURE_64);
468 GET_FEATURE(PPC_FLOAT, PPC_FEATURE_HAS_FPU);
469 GET_FEATURE(PPC_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC);
470 GET_FEATURE(PPC_SPE, PPC_FEATURE_HAS_SPE);
471 GET_FEATURE(PPC_SPE_SINGLE, PPC_FEATURE_HAS_EFP_SINGLE);
472 GET_FEATURE(PPC_SPE_DOUBLE, PPC_FEATURE_HAS_EFP_DOUBLE);
473 GET_FEATURE(PPC_BOOKE, PPC_FEATURE_BOOKE);
474 GET_FEATURE(PPC_405_MAC, PPC_FEATURE_HAS_4xxMAC);
475 #undef GET_FEATURE
476
477 return features;
478 }
479
480 /*
481 * We need to put in some extra aux table entries to tell glibc what
482 * the cache block size is, so it can use the dcbz instruction safely.
483 */
484 #define AT_DCACHEBSIZE 19
485 #define AT_ICACHEBSIZE 20
486 #define AT_UCACHEBSIZE 21
487 /* A special ignored type value for PPC, for glibc compatibility. */
488 #define AT_IGNOREPPC 22
489 /*
490 * The requirements here are:
491 * - keep the final alignment of sp (sp & 0xf)
492 * - make sure the 32-bit value at the first 16 byte aligned position of
493 * AUXV is greater than 16 for glibc compatibility.
494 * AT_IGNOREPPC is used for that.
495 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
496 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
497 */
498 #define DLINFO_ARCH_ITEMS 5
499 #define ARCH_DLINFO \
500 do { \
501 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
502 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
503 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
504 /* \
505 * Now handle glibc compatibility. \
506 */ \
507 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
508 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
509 } while (0)
510
511 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
512 {
513 abi_ulong pos = infop->start_stack;
514 abi_ulong tmp;
515 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
516 abi_ulong entry, toc;
517 #endif
518
519 _regs->gpr[1] = infop->start_stack;
520 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
521 entry = ldq_raw(infop->entry) + infop->load_addr;
522 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
523 _regs->gpr[2] = toc;
524 infop->entry = entry;
525 #endif
526 _regs->nip = infop->entry;
527 /* Note that isn't exactly what regular kernel does
528 * but this is what the ABI wants and is needed to allow
529 * execution of PPC BSD programs.
530 */
531 /* FIXME - what to for failure of get_user()? */
532 get_user_ual(_regs->gpr[3], pos);
533 pos += sizeof(abi_ulong);
534 _regs->gpr[4] = pos;
535 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
536 tmp = ldl(pos);
537 _regs->gpr[5] = pos;
538 }
539
540 #define ELF_EXEC_PAGESIZE 4096
541
542 #endif
543
544 #ifdef TARGET_MIPS
545
546 #define ELF_START_MMAP 0x80000000
547
548 #define elf_check_arch(x) ( (x) == EM_MIPS )
549
550 #ifdef TARGET_MIPS64
551 #define ELF_CLASS ELFCLASS64
552 #else
553 #define ELF_CLASS ELFCLASS32
554 #endif
555 #ifdef TARGET_WORDS_BIGENDIAN
556 #define ELF_DATA ELFDATA2MSB
557 #else
558 #define ELF_DATA ELFDATA2LSB
559 #endif
560 #define ELF_ARCH EM_MIPS
561
562 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
563 {
564 regs->cp0_status = 2 << CP0St_KSU;
565 regs->cp0_epc = infop->entry;
566 regs->regs[29] = infop->start_stack;
567 }
568
569 #define ELF_EXEC_PAGESIZE 4096
570
571 #endif /* TARGET_MIPS */
572
573 #ifdef TARGET_MICROBLAZE
574
575 #define ELF_START_MMAP 0x80000000
576
577 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
578
579 #define ELF_CLASS ELFCLASS32
580 #define ELF_DATA ELFDATA2MSB
581 #define ELF_ARCH EM_MIPS
582
583 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
584 {
585 regs->pc = infop->entry;
586 regs->r1 = infop->start_stack;
587
588 }
589
590 #define ELF_EXEC_PAGESIZE 4096
591
592 #endif /* TARGET_MICROBLAZE */
593
594 #ifdef TARGET_SH4
595
596 #define ELF_START_MMAP 0x80000000
597
598 #define elf_check_arch(x) ( (x) == EM_SH )
599
600 #define ELF_CLASS ELFCLASS32
601 #define ELF_DATA ELFDATA2LSB
602 #define ELF_ARCH EM_SH
603
604 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
605 {
606 /* Check other registers XXXXX */
607 regs->pc = infop->entry;
608 regs->regs[15] = infop->start_stack;
609 }
610
611 #define ELF_EXEC_PAGESIZE 4096
612
613 #endif
614
615 #ifdef TARGET_CRIS
616
617 #define ELF_START_MMAP 0x80000000
618
619 #define elf_check_arch(x) ( (x) == EM_CRIS )
620
621 #define ELF_CLASS ELFCLASS32
622 #define ELF_DATA ELFDATA2LSB
623 #define ELF_ARCH EM_CRIS
624
625 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
626 {
627 regs->erp = infop->entry;
628 }
629
630 #define ELF_EXEC_PAGESIZE 8192
631
632 #endif
633
634 #ifdef TARGET_M68K
635
636 #define ELF_START_MMAP 0x80000000
637
638 #define elf_check_arch(x) ( (x) == EM_68K )
639
640 #define ELF_CLASS ELFCLASS32
641 #define ELF_DATA ELFDATA2MSB
642 #define ELF_ARCH EM_68K
643
644 /* ??? Does this need to do anything?
645 #define ELF_PLAT_INIT(_r) */
646
647 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
648 {
649 regs->usp = infop->start_stack;
650 regs->sr = 0;
651 regs->pc = infop->entry;
652 }
653
654 #define ELF_EXEC_PAGESIZE 8192
655
656 #endif
657
658 #ifdef TARGET_ALPHA
659
660 #define ELF_START_MMAP (0x30000000000ULL)
661
662 #define elf_check_arch(x) ( (x) == ELF_ARCH )
663
664 #define ELF_CLASS ELFCLASS64
665 #define ELF_DATA ELFDATA2MSB
666 #define ELF_ARCH EM_ALPHA
667
668 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
669 {
670 regs->pc = infop->entry;
671 regs->ps = 8;
672 regs->usp = infop->start_stack;
673 regs->unique = infop->start_data; /* ? */
674 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
675 regs->unique, infop->start_data);
676 }
677
678 #define ELF_EXEC_PAGESIZE 8192
679
680 #endif /* TARGET_ALPHA */
681
682 #ifndef ELF_PLATFORM
683 #define ELF_PLATFORM (NULL)
684 #endif
685
686 #ifndef ELF_HWCAP
687 #define ELF_HWCAP 0
688 #endif
689
690 #ifdef TARGET_ABI32
691 #undef ELF_CLASS
692 #define ELF_CLASS ELFCLASS32
693 #undef bswaptls
694 #define bswaptls(ptr) bswap32s(ptr)
695 #endif
696
697 #include "elf.h"
698
699 struct exec
700 {
701 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
702 unsigned int a_text; /* length of text, in bytes */
703 unsigned int a_data; /* length of data, in bytes */
704 unsigned int a_bss; /* length of uninitialized data area, in bytes */
705 unsigned int a_syms; /* length of symbol table data in file, in bytes */
706 unsigned int a_entry; /* start address */
707 unsigned int a_trsize; /* length of relocation info for text, in bytes */
708 unsigned int a_drsize; /* length of relocation info for data, in bytes */
709 };
710
711
712 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
713 #define OMAGIC 0407
714 #define NMAGIC 0410
715 #define ZMAGIC 0413
716 #define QMAGIC 0314
717
718 /* max code+data+bss space allocated to elf interpreter */
719 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
720
721 /* max code+data+bss+brk space allocated to ET_DYN executables */
722 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
723
724 /* Necessary parameters */
725 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
726 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
727 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
728
729 #define INTERPRETER_NONE 0
730 #define INTERPRETER_AOUT 1
731 #define INTERPRETER_ELF 2
732
733 #define DLINFO_ITEMS 12
734
735 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
736 {
737 memcpy(to, from, n);
738 }
739
740 static int load_aout_interp(void * exptr, int interp_fd);
741
742 #ifdef BSWAP_NEEDED
743 static void bswap_ehdr(struct elfhdr *ehdr)
744 {
745 bswap16s(&ehdr->e_type); /* Object file type */
746 bswap16s(&ehdr->e_machine); /* Architecture */
747 bswap32s(&ehdr->e_version); /* Object file version */
748 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
749 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
750 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
751 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
752 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
753 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
754 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
755 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
756 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
757 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
758 }
759
760 static void bswap_phdr(struct elf_phdr *phdr)
761 {
762 bswap32s(&phdr->p_type); /* Segment type */
763 bswaptls(&phdr->p_offset); /* Segment file offset */
764 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
765 bswaptls(&phdr->p_paddr); /* Segment physical address */
766 bswaptls(&phdr->p_filesz); /* Segment size in file */
767 bswaptls(&phdr->p_memsz); /* Segment size in memory */
768 bswap32s(&phdr->p_flags); /* Segment flags */
769 bswaptls(&phdr->p_align); /* Segment alignment */
770 }
771
772 static void bswap_shdr(struct elf_shdr *shdr)
773 {
774 bswap32s(&shdr->sh_name);
775 bswap32s(&shdr->sh_type);
776 bswaptls(&shdr->sh_flags);
777 bswaptls(&shdr->sh_addr);
778 bswaptls(&shdr->sh_offset);
779 bswaptls(&shdr->sh_size);
780 bswap32s(&shdr->sh_link);
781 bswap32s(&shdr->sh_info);
782 bswaptls(&shdr->sh_addralign);
783 bswaptls(&shdr->sh_entsize);
784 }
785
786 static void bswap_sym(struct elf_sym *sym)
787 {
788 bswap32s(&sym->st_name);
789 bswaptls(&sym->st_value);
790 bswaptls(&sym->st_size);
791 bswap16s(&sym->st_shndx);
792 }
793 #endif
794
795 #ifdef USE_ELF_CORE_DUMP
796 static int elf_core_dump(int, const CPUState *);
797
798 #ifdef BSWAP_NEEDED
799 static void bswap_note(struct elf_note *en)
800 {
801 bswaptls(&en->n_namesz);
802 bswaptls(&en->n_descsz);
803 bswaptls(&en->n_type);
804 }
805 #endif /* BSWAP_NEEDED */
806
807 #endif /* USE_ELF_CORE_DUMP */
808
809 /*
810 * 'copy_elf_strings()' copies argument/envelope strings from user
811 * memory to free pages in kernel mem. These are in a format ready
812 * to be put directly into the top of new user memory.
813 *
814 */
815 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
816 abi_ulong p)
817 {
818 char *tmp, *tmp1, *pag = NULL;
819 int len, offset = 0;
820
821 if (!p) {
822 return 0; /* bullet-proofing */
823 }
824 while (argc-- > 0) {
825 tmp = argv[argc];
826 if (!tmp) {
827 fprintf(stderr, "VFS: argc is wrong");
828 exit(-1);
829 }
830 tmp1 = tmp;
831 while (*tmp++);
832 len = tmp - tmp1;
833 if (p < len) { /* this shouldn't happen - 128kB */
834 return 0;
835 }
836 while (len) {
837 --p; --tmp; --len;
838 if (--offset < 0) {
839 offset = p % TARGET_PAGE_SIZE;
840 pag = (char *)page[p/TARGET_PAGE_SIZE];
841 if (!pag) {
842 pag = (char *)malloc(TARGET_PAGE_SIZE);
843 memset(pag, 0, TARGET_PAGE_SIZE);
844 page[p/TARGET_PAGE_SIZE] = pag;
845 if (!pag)
846 return 0;
847 }
848 }
849 if (len == 0 || offset == 0) {
850 *(pag + offset) = *tmp;
851 }
852 else {
853 int bytes_to_copy = (len > offset) ? offset : len;
854 tmp -= bytes_to_copy;
855 p -= bytes_to_copy;
856 offset -= bytes_to_copy;
857 len -= bytes_to_copy;
858 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
859 }
860 }
861 }
862 return p;
863 }
864
865 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
866 struct image_info *info)
867 {
868 abi_ulong stack_base, size, error;
869 int i;
870
871 /* Create enough stack to hold everything. If we don't use
872 * it for args, we'll use it for something else...
873 */
874 size = x86_stack_size;
875 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
876 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
877 error = target_mmap(0,
878 size + qemu_host_page_size,
879 PROT_READ | PROT_WRITE,
880 MAP_PRIVATE | MAP_ANONYMOUS,
881 -1, 0);
882 if (error == -1) {
883 perror("stk mmap");
884 exit(-1);
885 }
886 /* we reserve one extra page at the top of the stack as guard */
887 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
888
889 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
890 p += stack_base;
891
892 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
893 if (bprm->page[i]) {
894 info->rss++;
895 /* FIXME - check return value of memcpy_to_target() for failure */
896 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
897 free(bprm->page[i]);
898 }
899 stack_base += TARGET_PAGE_SIZE;
900 }
901 return p;
902 }
903
904 static void set_brk(abi_ulong start, abi_ulong end)
905 {
906 /* page-align the start and end addresses... */
907 start = HOST_PAGE_ALIGN(start);
908 end = HOST_PAGE_ALIGN(end);
909 if (end <= start)
910 return;
911 if(target_mmap(start, end - start,
912 PROT_READ | PROT_WRITE | PROT_EXEC,
913 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
914 perror("cannot mmap brk");
915 exit(-1);
916 }
917 }
918
919
920 /* We need to explicitly zero any fractional pages after the data
921 section (i.e. bss). This would contain the junk from the file that
922 should not be in memory. */
923 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
924 {
925 abi_ulong nbyte;
926
927 if (elf_bss >= last_bss)
928 return;
929
930 /* XXX: this is really a hack : if the real host page size is
931 smaller than the target page size, some pages after the end
932 of the file may not be mapped. A better fix would be to
933 patch target_mmap(), but it is more complicated as the file
934 size must be known */
935 if (qemu_real_host_page_size < qemu_host_page_size) {
936 abi_ulong end_addr, end_addr1;
937 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
938 ~(qemu_real_host_page_size - 1);
939 end_addr = HOST_PAGE_ALIGN(elf_bss);
940 if (end_addr1 < end_addr) {
941 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
942 PROT_READ|PROT_WRITE|PROT_EXEC,
943 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
944 }
945 }
946
947 nbyte = elf_bss & (qemu_host_page_size-1);
948 if (nbyte) {
949 nbyte = qemu_host_page_size - nbyte;
950 do {
951 /* FIXME - what to do if put_user() fails? */
952 put_user_u8(0, elf_bss);
953 elf_bss++;
954 } while (--nbyte);
955 }
956 }
957
958
959 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
960 struct elfhdr * exec,
961 abi_ulong load_addr,
962 abi_ulong load_bias,
963 abi_ulong interp_load_addr, int ibcs,
964 struct image_info *info)
965 {
966 abi_ulong sp;
967 int size;
968 abi_ulong u_platform;
969 const char *k_platform;
970 const int n = sizeof(elf_addr_t);
971
972 sp = p;
973 u_platform = 0;
974 k_platform = ELF_PLATFORM;
975 if (k_platform) {
976 size_t len = strlen(k_platform) + 1;
977 sp -= (len + n - 1) & ~(n - 1);
978 u_platform = sp;
979 /* FIXME - check return value of memcpy_to_target() for failure */
980 memcpy_to_target(sp, k_platform, len);
981 }
982 /*
983 * Force 16 byte _final_ alignment here for generality.
984 */
985 sp = sp &~ (abi_ulong)15;
986 size = (DLINFO_ITEMS + 1) * 2;
987 if (k_platform)
988 size += 2;
989 #ifdef DLINFO_ARCH_ITEMS
990 size += DLINFO_ARCH_ITEMS * 2;
991 #endif
992 size += envc + argc + 2;
993 size += (!ibcs ? 3 : 1); /* argc itself */
994 size *= n;
995 if (size & 15)
996 sp -= 16 - (size & 15);
997
998 /* This is correct because Linux defines
999 * elf_addr_t as Elf32_Off / Elf64_Off
1000 */
1001 #define NEW_AUX_ENT(id, val) do { \
1002 sp -= n; put_user_ual(val, sp); \
1003 sp -= n; put_user_ual(id, sp); \
1004 } while(0)
1005
1006 NEW_AUX_ENT (AT_NULL, 0);
1007
1008 /* There must be exactly DLINFO_ITEMS entries here. */
1009 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1010 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1011 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1012 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1013 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1014 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1015 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1016 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1017 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1018 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1019 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1020 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1021 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1022 if (k_platform)
1023 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1024 #ifdef ARCH_DLINFO
1025 /*
1026 * ARCH_DLINFO must come last so platform specific code can enforce
1027 * special alignment requirements on the AUXV if necessary (eg. PPC).
1028 */
1029 ARCH_DLINFO;
1030 #endif
1031 #undef NEW_AUX_ENT
1032
1033 info->saved_auxv = sp;
1034
1035 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1036 return sp;
1037 }
1038
1039
1040 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1041 int interpreter_fd,
1042 abi_ulong *interp_load_addr)
1043 {
1044 struct elf_phdr *elf_phdata = NULL;
1045 struct elf_phdr *eppnt;
1046 abi_ulong load_addr = 0;
1047 int load_addr_set = 0;
1048 int retval;
1049 abi_ulong last_bss, elf_bss;
1050 abi_ulong error;
1051 int i;
1052
1053 elf_bss = 0;
1054 last_bss = 0;
1055 error = 0;
1056
1057 #ifdef BSWAP_NEEDED
1058 bswap_ehdr(interp_elf_ex);
1059 #endif
1060 /* First of all, some simple consistency checks */
1061 if ((interp_elf_ex->e_type != ET_EXEC &&
1062 interp_elf_ex->e_type != ET_DYN) ||
1063 !elf_check_arch(interp_elf_ex->e_machine)) {
1064 return ~((abi_ulong)0UL);
1065 }
1066
1067
1068 /* Now read in all of the header information */
1069
1070 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1071 return ~(abi_ulong)0UL;
1072
1073 elf_phdata = (struct elf_phdr *)
1074 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1075
1076 if (!elf_phdata)
1077 return ~((abi_ulong)0UL);
1078
1079 /*
1080 * If the size of this structure has changed, then punt, since
1081 * we will be doing the wrong thing.
1082 */
1083 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1084 free(elf_phdata);
1085 return ~((abi_ulong)0UL);
1086 }
1087
1088 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1089 if(retval >= 0) {
1090 retval = read(interpreter_fd,
1091 (char *) elf_phdata,
1092 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1093 }
1094 if (retval < 0) {
1095 perror("load_elf_interp");
1096 exit(-1);
1097 free (elf_phdata);
1098 return retval;
1099 }
1100 #ifdef BSWAP_NEEDED
1101 eppnt = elf_phdata;
1102 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1103 bswap_phdr(eppnt);
1104 }
1105 #endif
1106
1107 if (interp_elf_ex->e_type == ET_DYN) {
1108 /* in order to avoid hardcoding the interpreter load
1109 address in qemu, we allocate a big enough memory zone */
1110 error = target_mmap(0, INTERP_MAP_SIZE,
1111 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1112 -1, 0);
1113 if (error == -1) {
1114 perror("mmap");
1115 exit(-1);
1116 }
1117 load_addr = error;
1118 load_addr_set = 1;
1119 }
1120
1121 eppnt = elf_phdata;
1122 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1123 if (eppnt->p_type == PT_LOAD) {
1124 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1125 int elf_prot = 0;
1126 abi_ulong vaddr = 0;
1127 abi_ulong k;
1128
1129 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1130 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1131 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1132 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1133 elf_type |= MAP_FIXED;
1134 vaddr = eppnt->p_vaddr;
1135 }
1136 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1137 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1138 elf_prot,
1139 elf_type,
1140 interpreter_fd,
1141 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1142
1143 if (error == -1) {
1144 /* Real error */
1145 close(interpreter_fd);
1146 free(elf_phdata);
1147 return ~((abi_ulong)0UL);
1148 }
1149
1150 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1151 load_addr = error;
1152 load_addr_set = 1;
1153 }
1154
1155 /*
1156 * Find the end of the file mapping for this phdr, and keep
1157 * track of the largest address we see for this.
1158 */
1159 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1160 if (k > elf_bss) elf_bss = k;
1161
1162 /*
1163 * Do the same thing for the memory mapping - between
1164 * elf_bss and last_bss is the bss section.
1165 */
1166 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1167 if (k > last_bss) last_bss = k;
1168 }
1169
1170 /* Now use mmap to map the library into memory. */
1171
1172 close(interpreter_fd);
1173
1174 /*
1175 * Now fill out the bss section. First pad the last page up
1176 * to the page boundary, and then perform a mmap to make sure
1177 * that there are zeromapped pages up to and including the last
1178 * bss page.
1179 */
1180 padzero(elf_bss, last_bss);
1181 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1182
1183 /* Map the last of the bss segment */
1184 if (last_bss > elf_bss) {
1185 target_mmap(elf_bss, last_bss-elf_bss,
1186 PROT_READ|PROT_WRITE|PROT_EXEC,
1187 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1188 }
1189 free(elf_phdata);
1190
1191 *interp_load_addr = load_addr;
1192 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1193 }
1194
1195 static int symfind(const void *s0, const void *s1)
1196 {
1197 struct elf_sym *key = (struct elf_sym *)s0;
1198 struct elf_sym *sym = (struct elf_sym *)s1;
1199 int result = 0;
1200 if (key->st_value < sym->st_value) {
1201 result = -1;
1202 } else if (key->st_value > sym->st_value + sym->st_size) {
1203 result = 1;
1204 }
1205 return result;
1206 }
1207
1208 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1209 {
1210 #if ELF_CLASS == ELFCLASS32
1211 struct elf_sym *syms = s->disas_symtab.elf32;
1212 #else
1213 struct elf_sym *syms = s->disas_symtab.elf64;
1214 #endif
1215
1216 // binary search
1217 struct elf_sym key;
1218 struct elf_sym *sym;
1219
1220 key.st_value = orig_addr;
1221
1222 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1223 if (sym != 0) {
1224 return s->disas_strtab + sym->st_name;
1225 }
1226
1227 return "";
1228 }
1229
1230 /* FIXME: This should use elf_ops.h */
1231 static int symcmp(const void *s0, const void *s1)
1232 {
1233 struct elf_sym *sym0 = (struct elf_sym *)s0;
1234 struct elf_sym *sym1 = (struct elf_sym *)s1;
1235 return (sym0->st_value < sym1->st_value)
1236 ? -1
1237 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1238 }
1239
1240 /* Best attempt to load symbols from this ELF object. */
1241 static void load_symbols(struct elfhdr *hdr, int fd)
1242 {
1243 unsigned int i, nsyms;
1244 struct elf_shdr sechdr, symtab, strtab;
1245 char *strings;
1246 struct syminfo *s;
1247 struct elf_sym *syms;
1248
1249 lseek(fd, hdr->e_shoff, SEEK_SET);
1250 for (i = 0; i < hdr->e_shnum; i++) {
1251 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1252 return;
1253 #ifdef BSWAP_NEEDED
1254 bswap_shdr(&sechdr);
1255 #endif
1256 if (sechdr.sh_type == SHT_SYMTAB) {
1257 symtab = sechdr;
1258 lseek(fd, hdr->e_shoff
1259 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1260 if (read(fd, &strtab, sizeof(strtab))
1261 != sizeof(strtab))
1262 return;
1263 #ifdef BSWAP_NEEDED
1264 bswap_shdr(&strtab);
1265 #endif
1266 goto found;
1267 }
1268 }
1269 return; /* Shouldn't happen... */
1270
1271 found:
1272 /* Now know where the strtab and symtab are. Snarf them. */
1273 s = malloc(sizeof(*s));
1274 syms = malloc(symtab.sh_size);
1275 if (!syms)
1276 return;
1277 s->disas_strtab = strings = malloc(strtab.sh_size);
1278 if (!s->disas_strtab)
1279 return;
1280
1281 lseek(fd, symtab.sh_offset, SEEK_SET);
1282 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1283 return;
1284
1285 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1286
1287 i = 0;
1288 while (i < nsyms) {
1289 #ifdef BSWAP_NEEDED
1290 bswap_sym(syms + i);
1291 #endif
1292 // Throw away entries which we do not need.
1293 if (syms[i].st_shndx == SHN_UNDEF ||
1294 syms[i].st_shndx >= SHN_LORESERVE ||
1295 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1296 nsyms--;
1297 if (i < nsyms) {
1298 syms[i] = syms[nsyms];
1299 }
1300 continue;
1301 }
1302 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1303 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1304 syms[i].st_value &= ~(target_ulong)1;
1305 #endif
1306 i++;
1307 }
1308 syms = realloc(syms, nsyms * sizeof(*syms));
1309
1310 qsort(syms, nsyms, sizeof(*syms), symcmp);
1311
1312 lseek(fd, strtab.sh_offset, SEEK_SET);
1313 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1314 return;
1315 s->disas_num_syms = nsyms;
1316 #if ELF_CLASS == ELFCLASS32
1317 s->disas_symtab.elf32 = syms;
1318 s->lookup_symbol = lookup_symbolxx;
1319 #else
1320 s->disas_symtab.elf64 = syms;
1321 s->lookup_symbol = lookup_symbolxx;
1322 #endif
1323 s->next = syminfos;
1324 syminfos = s;
1325 }
1326
1327 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1328 struct image_info * info)
1329 {
1330 struct elfhdr elf_ex;
1331 struct elfhdr interp_elf_ex;
1332 struct exec interp_ex;
1333 int interpreter_fd = -1; /* avoid warning */
1334 abi_ulong load_addr, load_bias;
1335 int load_addr_set = 0;
1336 unsigned int interpreter_type = INTERPRETER_NONE;
1337 unsigned char ibcs2_interpreter;
1338 int i;
1339 abi_ulong mapped_addr;
1340 struct elf_phdr * elf_ppnt;
1341 struct elf_phdr *elf_phdata;
1342 abi_ulong elf_bss, k, elf_brk;
1343 int retval;
1344 char * elf_interpreter;
1345 abi_ulong elf_entry, interp_load_addr = 0;
1346 int status;
1347 abi_ulong start_code, end_code, start_data, end_data;
1348 abi_ulong reloc_func_desc = 0;
1349 abi_ulong elf_stack;
1350 char passed_fileno[6];
1351
1352 ibcs2_interpreter = 0;
1353 status = 0;
1354 load_addr = 0;
1355 load_bias = 0;
1356 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1357 #ifdef BSWAP_NEEDED
1358 bswap_ehdr(&elf_ex);
1359 #endif
1360
1361 /* First of all, some simple consistency checks */
1362 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1363 (! elf_check_arch(elf_ex.e_machine))) {
1364 return -ENOEXEC;
1365 }
1366
1367 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1368 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1369 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1370 if (!bprm->p) {
1371 retval = -E2BIG;
1372 }
1373
1374 /* Now read in all of the header information */
1375 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1376 if (elf_phdata == NULL) {
1377 return -ENOMEM;
1378 }
1379
1380 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1381 if(retval > 0) {
1382 retval = read(bprm->fd, (char *) elf_phdata,
1383 elf_ex.e_phentsize * elf_ex.e_phnum);
1384 }
1385
1386 if (retval < 0) {
1387 perror("load_elf_binary");
1388 exit(-1);
1389 free (elf_phdata);
1390 return -errno;
1391 }
1392
1393 #ifdef BSWAP_NEEDED
1394 elf_ppnt = elf_phdata;
1395 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1396 bswap_phdr(elf_ppnt);
1397 }
1398 #endif
1399 elf_ppnt = elf_phdata;
1400
1401 elf_bss = 0;
1402 elf_brk = 0;
1403
1404
1405 elf_stack = ~((abi_ulong)0UL);
1406 elf_interpreter = NULL;
1407 start_code = ~((abi_ulong)0UL);
1408 end_code = 0;
1409 start_data = 0;
1410 end_data = 0;
1411 interp_ex.a_info = 0;
1412
1413 for(i=0;i < elf_ex.e_phnum; i++) {
1414 if (elf_ppnt->p_type == PT_INTERP) {
1415 if ( elf_interpreter != NULL )
1416 {
1417 free (elf_phdata);
1418 free(elf_interpreter);
1419 close(bprm->fd);
1420 return -EINVAL;
1421 }
1422
1423 /* This is the program interpreter used for
1424 * shared libraries - for now assume that this
1425 * is an a.out format binary
1426 */
1427
1428 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1429
1430 if (elf_interpreter == NULL) {
1431 free (elf_phdata);
1432 close(bprm->fd);
1433 return -ENOMEM;
1434 }
1435
1436 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1437 if(retval >= 0) {
1438 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1439 }
1440 if(retval < 0) {
1441 perror("load_elf_binary2");
1442 exit(-1);
1443 }
1444
1445 /* If the program interpreter is one of these two,
1446 then assume an iBCS2 image. Otherwise assume
1447 a native linux image. */
1448
1449 /* JRP - Need to add X86 lib dir stuff here... */
1450
1451 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1452 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1453 ibcs2_interpreter = 1;
1454 }
1455
1456 #if 0
1457 printf("Using ELF interpreter %s\n", elf_interpreter);
1458 #endif
1459 if (retval >= 0) {
1460 retval = open(path(elf_interpreter), O_RDONLY);
1461 if(retval >= 0) {
1462 interpreter_fd = retval;
1463 }
1464 else {
1465 perror(elf_interpreter);
1466 exit(-1);
1467 /* retval = -errno; */
1468 }
1469 }
1470
1471 if (retval >= 0) {
1472 retval = lseek(interpreter_fd, 0, SEEK_SET);
1473 if(retval >= 0) {
1474 retval = read(interpreter_fd,bprm->buf,128);
1475 }
1476 }
1477 if (retval >= 0) {
1478 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1479 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1480 }
1481 if (retval < 0) {
1482 perror("load_elf_binary3");
1483 exit(-1);
1484 free (elf_phdata);
1485 free(elf_interpreter);
1486 close(bprm->fd);
1487 return retval;
1488 }
1489 }
1490 elf_ppnt++;
1491 }
1492
1493 /* Some simple consistency checks for the interpreter */
1494 if (elf_interpreter){
1495 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1496
1497 /* Now figure out which format our binary is */
1498 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1499 (N_MAGIC(interp_ex) != QMAGIC)) {
1500 interpreter_type = INTERPRETER_ELF;
1501 }
1502
1503 if (interp_elf_ex.e_ident[0] != 0x7f ||
1504 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1505 interpreter_type &= ~INTERPRETER_ELF;
1506 }
1507
1508 if (!interpreter_type) {
1509 free(elf_interpreter);
1510 free(elf_phdata);
1511 close(bprm->fd);
1512 return -ELIBBAD;
1513 }
1514 }
1515
1516 /* OK, we are done with that, now set up the arg stuff,
1517 and then start this sucker up */
1518
1519 {
1520 char * passed_p;
1521
1522 if (interpreter_type == INTERPRETER_AOUT) {
1523 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1524 passed_p = passed_fileno;
1525
1526 if (elf_interpreter) {
1527 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1528 bprm->argc++;
1529 }
1530 }
1531 if (!bprm->p) {
1532 if (elf_interpreter) {
1533 free(elf_interpreter);
1534 }
1535 free (elf_phdata);
1536 close(bprm->fd);
1537 return -E2BIG;
1538 }
1539 }
1540
1541 /* OK, This is the point of no return */
1542 info->end_data = 0;
1543 info->end_code = 0;
1544 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1545 info->mmap = 0;
1546 elf_entry = (abi_ulong) elf_ex.e_entry;
1547
1548 #if defined(CONFIG_USE_GUEST_BASE)
1549 /*
1550 * In case where user has not explicitly set the guest_base, we
1551 * probe here that should we set it automatically.
1552 */
1553 if (!have_guest_base) {
1554 /*
1555 * Go through ELF program header table and find out whether
1556 * any of the segments drop below our current mmap_min_addr and
1557 * in that case set guest_base to corresponding address.
1558 */
1559 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1560 i++, elf_ppnt++) {
1561 if (elf_ppnt->p_type != PT_LOAD)
1562 continue;
1563 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1564 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1565 break;
1566 }
1567 }
1568 }
1569 #endif /* CONFIG_USE_GUEST_BASE */
1570
1571 /* Do this so that we can load the interpreter, if need be. We will
1572 change some of these later */
1573 info->rss = 0;
1574 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1575 info->start_stack = bprm->p;
1576
1577 /* Now we do a little grungy work by mmaping the ELF image into
1578 * the correct location in memory. At this point, we assume that
1579 * the image should be loaded at fixed address, not at a variable
1580 * address.
1581 */
1582
1583 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1584 int elf_prot = 0;
1585 int elf_flags = 0;
1586 abi_ulong error;
1587
1588 if (elf_ppnt->p_type != PT_LOAD)
1589 continue;
1590
1591 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1592 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1593 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1594 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1595 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1596 elf_flags |= MAP_FIXED;
1597 } else if (elf_ex.e_type == ET_DYN) {
1598 /* Try and get dynamic programs out of the way of the default mmap
1599 base, as well as whatever program they might try to exec. This
1600 is because the brk will follow the loader, and is not movable. */
1601 /* NOTE: for qemu, we do a big mmap to get enough space
1602 without hardcoding any address */
1603 error = target_mmap(0, ET_DYN_MAP_SIZE,
1604 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1605 -1, 0);
1606 if (error == -1) {
1607 perror("mmap");
1608 exit(-1);
1609 }
1610 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1611 }
1612
1613 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1614 (elf_ppnt->p_filesz +
1615 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1616 elf_prot,
1617 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1618 bprm->fd,
1619 (elf_ppnt->p_offset -
1620 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1621 if (error == -1) {
1622 perror("mmap");
1623 exit(-1);
1624 }
1625
1626 #ifdef LOW_ELF_STACK
1627 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1628 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1629 #endif
1630
1631 if (!load_addr_set) {
1632 load_addr_set = 1;
1633 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1634 if (elf_ex.e_type == ET_DYN) {
1635 load_bias += error -
1636 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1637 load_addr += load_bias;
1638 reloc_func_desc = load_bias;
1639 }
1640 }
1641 k = elf_ppnt->p_vaddr;
1642 if (k < start_code)
1643 start_code = k;
1644 if (start_data < k)
1645 start_data = k;
1646 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1647 if (k > elf_bss)
1648 elf_bss = k;
1649 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1650 end_code = k;
1651 if (end_data < k)
1652 end_data = k;
1653 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1654 if (k > elf_brk) elf_brk = k;
1655 }
1656
1657 elf_entry += load_bias;
1658 elf_bss += load_bias;
1659 elf_brk += load_bias;
1660 start_code += load_bias;
1661 end_code += load_bias;
1662 start_data += load_bias;
1663 end_data += load_bias;
1664
1665 if (elf_interpreter) {
1666 if (interpreter_type & 1) {
1667 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1668 }
1669 else if (interpreter_type & 2) {
1670 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1671 &interp_load_addr);
1672 }
1673 reloc_func_desc = interp_load_addr;
1674
1675 close(interpreter_fd);
1676 free(elf_interpreter);
1677
1678 if (elf_entry == ~((abi_ulong)0UL)) {
1679 printf("Unable to load interpreter\n");
1680 free(elf_phdata);
1681 exit(-1);
1682 return 0;
1683 }
1684 }
1685
1686 free(elf_phdata);
1687
1688 if (qemu_log_enabled())
1689 load_symbols(&elf_ex, bprm->fd);
1690
1691 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1692 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1693
1694 #ifdef LOW_ELF_STACK
1695 info->start_stack = bprm->p = elf_stack - 4;
1696 #endif
1697 bprm->p = create_elf_tables(bprm->p,
1698 bprm->argc,
1699 bprm->envc,
1700 &elf_ex,
1701 load_addr, load_bias,
1702 interp_load_addr,
1703 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1704 info);
1705 info->load_addr = reloc_func_desc;
1706 info->start_brk = info->brk = elf_brk;
1707 info->end_code = end_code;
1708 info->start_code = start_code;
1709 info->start_data = start_data;
1710 info->end_data = end_data;
1711 info->start_stack = bprm->p;
1712
1713 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1714 sections */
1715 set_brk(elf_bss, elf_brk);
1716
1717 padzero(elf_bss, elf_brk);
1718
1719 #if 0
1720 printf("(start_brk) %x\n" , info->start_brk);
1721 printf("(end_code) %x\n" , info->end_code);
1722 printf("(start_code) %x\n" , info->start_code);
1723 printf("(end_data) %x\n" , info->end_data);
1724 printf("(start_stack) %x\n" , info->start_stack);
1725 printf("(brk) %x\n" , info->brk);
1726 #endif
1727
1728 if ( info->personality == PER_SVR4 )
1729 {
1730 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1731 and some applications "depend" upon this behavior.
1732 Since we do not have the power to recompile these, we
1733 emulate the SVr4 behavior. Sigh. */
1734 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1735 MAP_FIXED | MAP_PRIVATE, -1, 0);
1736 }
1737
1738 info->entry = elf_entry;
1739
1740 #ifdef USE_ELF_CORE_DUMP
1741 bprm->core_dump = &elf_core_dump;
1742 #endif
1743
1744 return 0;
1745 }
1746
1747 #ifdef USE_ELF_CORE_DUMP
1748
1749 /*
1750 * Definitions to generate Intel SVR4-like core files.
1751 * These mostly have the same names as the SVR4 types with "elf_"
1752 * tacked on the front to prevent clashes with linux definitions,
1753 * and the typedef forms have been avoided. This is mostly like
1754 * the SVR4 structure, but more Linuxy, with things that Linux does
1755 * not support and which gdb doesn't really use excluded.
1756 *
1757 * Fields we don't dump (their contents is zero) in linux-user qemu
1758 * are marked with XXX.
1759 *
1760 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1761 *
1762 * Porting ELF coredump for target is (quite) simple process. First you
1763 * define ELF_USE_CORE_DUMP in target ELF code (where init_thread() for
1764 * the target resides):
1765 *
1766 * #define USE_ELF_CORE_DUMP
1767 *
1768 * Next you define type of register set used for dumping. ELF specification
1769 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1770 *
1771 * typedef <target_regtype> elf_greg_t;
1772 * #define ELF_NREG <number of registers>
1773 * typedef elf_greg_t elf_gregset_t[ELF_NREG];
1774 *
1775 * Then define following types to match target types. Actual types can
1776 * be found from linux kernel (arch/<ARCH>/include/asm/posix_types.h):
1777 *
1778 * typedef <target_uid_type> target_uid_t;
1779 * typedef <target_gid_type> target_gid_t;
1780 * typedef <target_pid_type> target_pid_t;
1781 *
1782 * Last step is to implement target specific function that copies registers
1783 * from given cpu into just specified register set. Prototype is:
1784 *
1785 * static void elf_core_copy_regs(elf_gregset_t *regs, const CPUState *env);
1786 *
1787 * Parameters:
1788 * regs - copy register values into here (allocated and zeroed by caller)
1789 * env - copy registers from here
1790 *
1791 * Example for ARM target is provided in this file.
1792 */
1793
1794 /* An ELF note in memory */
1795 struct memelfnote {
1796 const char *name;
1797 size_t namesz;
1798 size_t namesz_rounded;
1799 int type;
1800 size_t datasz;
1801 void *data;
1802 size_t notesz;
1803 };
1804
1805 struct elf_siginfo {
1806 int si_signo; /* signal number */
1807 int si_code; /* extra code */
1808 int si_errno; /* errno */
1809 };
1810
1811 struct elf_prstatus {
1812 struct elf_siginfo pr_info; /* Info associated with signal */
1813 short pr_cursig; /* Current signal */
1814 target_ulong pr_sigpend; /* XXX */
1815 target_ulong pr_sighold; /* XXX */
1816 target_pid_t pr_pid;
1817 target_pid_t pr_ppid;
1818 target_pid_t pr_pgrp;
1819 target_pid_t pr_sid;
1820 struct target_timeval pr_utime; /* XXX User time */
1821 struct target_timeval pr_stime; /* XXX System time */
1822 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1823 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1824 elf_gregset_t pr_reg; /* GP registers */
1825 int pr_fpvalid; /* XXX */
1826 };
1827
1828 #define ELF_PRARGSZ (80) /* Number of chars for args */
1829
1830 struct elf_prpsinfo {
1831 char pr_state; /* numeric process state */
1832 char pr_sname; /* char for pr_state */
1833 char pr_zomb; /* zombie */
1834 char pr_nice; /* nice val */
1835 target_ulong pr_flag; /* flags */
1836 target_uid_t pr_uid;
1837 target_gid_t pr_gid;
1838 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1839 /* Lots missing */
1840 char pr_fname[16]; /* filename of executable */
1841 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1842 };
1843
1844 /* Here is the structure in which status of each thread is captured. */
1845 struct elf_thread_status {
1846 TAILQ_ENTRY(elf_thread_status) ets_link;
1847 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1848 #if 0
1849 elf_fpregset_t fpu; /* NT_PRFPREG */
1850 struct task_struct *thread;
1851 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1852 #endif
1853 struct memelfnote notes[1];
1854 int num_notes;
1855 };
1856
1857 struct elf_note_info {
1858 struct memelfnote *notes;
1859 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1860 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1861
1862 TAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1863 #if 0
1864 /*
1865 * Current version of ELF coredump doesn't support
1866 * dumping fp regs etc.
1867 */
1868 elf_fpregset_t *fpu;
1869 elf_fpxregset_t *xfpu;
1870 int thread_status_size;
1871 #endif
1872 int notes_size;
1873 int numnote;
1874 };
1875
1876 struct vm_area_struct {
1877 abi_ulong vma_start; /* start vaddr of memory region */
1878 abi_ulong vma_end; /* end vaddr of memory region */
1879 abi_ulong vma_flags; /* protection etc. flags for the region */
1880 TAILQ_ENTRY(vm_area_struct) vma_link;
1881 };
1882
1883 struct mm_struct {
1884 TAILQ_HEAD(, vm_area_struct) mm_mmap;
1885 int mm_count; /* number of mappings */
1886 };
1887
1888 static struct mm_struct *vma_init(void);
1889 static void vma_delete(struct mm_struct *);
1890 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1891 abi_ulong, abi_ulong);
1892 static int vma_get_mapping_count(const struct mm_struct *);
1893 static struct vm_area_struct *vma_first(const struct mm_struct *);
1894 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1895 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1896 static int vma_walker(void *priv, unsigned long start, unsigned long end,
1897 unsigned long flags);
1898
1899 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1900 static void fill_note(struct memelfnote *, const char *, int,
1901 unsigned int, void *);
1902 static void fill_prstatus(struct elf_prstatus *, const TaskState *, int);
1903 static int fill_psinfo(struct elf_prpsinfo *, const TaskState *);
1904 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1905 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1906 static size_t note_size(const struct memelfnote *);
1907 static void free_note_info(struct elf_note_info *);
1908 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1909 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1910 static int core_dump_filename(const TaskState *, char *, size_t);
1911
1912 static int dump_write(int, const void *, size_t);
1913 static int write_note(struct memelfnote *, int);
1914 static int write_note_info(struct elf_note_info *, int);
1915
1916 #ifdef BSWAP_NEEDED
1917 static void bswap_prstatus(struct elf_prstatus *);
1918 static void bswap_psinfo(struct elf_prpsinfo *);
1919
1920 static void bswap_prstatus(struct elf_prstatus *prstatus)
1921 {
1922 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1923 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1924 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1925 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1926 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1927 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1928 prstatus->pr_pid = tswap32(prstatus->pr_pid);
1929 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1930 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1931 prstatus->pr_sid = tswap32(prstatus->pr_sid);
1932 /* cpu times are not filled, so we skip them */
1933 /* regs should be in correct format already */
1934 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
1935 }
1936
1937 static void bswap_psinfo(struct elf_prpsinfo *psinfo)
1938 {
1939 psinfo->pr_flag = tswapl(psinfo->pr_flag);
1940 psinfo->pr_uid = tswap16(psinfo->pr_uid);
1941 psinfo->pr_gid = tswap16(psinfo->pr_gid);
1942 psinfo->pr_pid = tswap32(psinfo->pr_pid);
1943 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
1944 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
1945 psinfo->pr_sid = tswap32(psinfo->pr_sid);
1946 }
1947 #endif /* BSWAP_NEEDED */
1948
1949 /*
1950 * Minimal support for linux memory regions. These are needed
1951 * when we are finding out what memory exactly belongs to
1952 * emulated process. No locks needed here, as long as
1953 * thread that received the signal is stopped.
1954 */
1955
1956 static struct mm_struct *vma_init(void)
1957 {
1958 struct mm_struct *mm;
1959
1960 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
1961 return (NULL);
1962
1963 mm->mm_count = 0;
1964 TAILQ_INIT(&mm->mm_mmap);
1965
1966 return (mm);
1967 }
1968
1969 static void vma_delete(struct mm_struct *mm)
1970 {
1971 struct vm_area_struct *vma;
1972
1973 while ((vma = vma_first(mm)) != NULL) {
1974 TAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
1975 qemu_free(vma);
1976 }
1977 qemu_free(mm);
1978 }
1979
1980 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
1981 abi_ulong end, abi_ulong flags)
1982 {
1983 struct vm_area_struct *vma;
1984
1985 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
1986 return (-1);
1987
1988 vma->vma_start = start;
1989 vma->vma_end = end;
1990 vma->vma_flags = flags;
1991
1992 TAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
1993 mm->mm_count++;
1994
1995 return (0);
1996 }
1997
1998 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
1999 {
2000 return (TAILQ_FIRST(&mm->mm_mmap));
2001 }
2002
2003 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2004 {
2005 return (TAILQ_NEXT(vma, vma_link));
2006 }
2007
2008 static int vma_get_mapping_count(const struct mm_struct *mm)
2009 {
2010 return (mm->mm_count);
2011 }
2012
2013 /*
2014 * Calculate file (dump) size of given memory region.
2015 */
2016 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2017 {
2018 /* if we cannot even read the first page, skip it */
2019 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2020 return (0);
2021
2022 /*
2023 * Usually we don't dump executable pages as they contain
2024 * non-writable code that debugger can read directly from
2025 * target library etc. However, thread stacks are marked
2026 * also executable so we read in first page of given region
2027 * and check whether it contains elf header. If there is
2028 * no elf header, we dump it.
2029 */
2030 if (vma->vma_flags & PROT_EXEC) {
2031 char page[TARGET_PAGE_SIZE];
2032
2033 copy_from_user(page, vma->vma_start, sizeof (page));
2034 if ((page[EI_MAG0] == ELFMAG0) &&
2035 (page[EI_MAG1] == ELFMAG1) &&
2036 (page[EI_MAG2] == ELFMAG2) &&
2037 (page[EI_MAG3] == ELFMAG3)) {
2038 /*
2039 * Mappings are possibly from ELF binary. Don't dump
2040 * them.
2041 */
2042 return (0);
2043 }
2044 }
2045
2046 return (vma->vma_end - vma->vma_start);
2047 }
2048
2049 static int vma_walker(void *priv, unsigned long start, unsigned long end,
2050 unsigned long flags)
2051 {
2052 struct mm_struct *mm = (struct mm_struct *)priv;
2053
2054 /*
2055 * Don't dump anything that qemu has reserved for internal use.
2056 */
2057 if (flags & PAGE_RESERVED)
2058 return (0);
2059
2060 vma_add_mapping(mm, start, end, flags);
2061 return (0);
2062 }
2063
2064 static void fill_note(struct memelfnote *note, const char *name, int type,
2065 unsigned int sz, void *data)
2066 {
2067 unsigned int namesz;
2068
2069 namesz = strlen(name) + 1;
2070 note->name = name;
2071 note->namesz = namesz;
2072 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2073 note->type = type;
2074 note->datasz = roundup(sz, sizeof (int32_t));;
2075 note->data = data;
2076
2077 /*
2078 * We calculate rounded up note size here as specified by
2079 * ELF document.
2080 */
2081 note->notesz = sizeof (struct elf_note) +
2082 note->namesz_rounded + note->datasz;
2083 }
2084
2085 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2086 uint32_t flags)
2087 {
2088 (void) memset(elf, 0, sizeof(*elf));
2089
2090 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2091 elf->e_ident[EI_CLASS] = ELF_CLASS;
2092 elf->e_ident[EI_DATA] = ELF_DATA;
2093 elf->e_ident[EI_VERSION] = EV_CURRENT;
2094 elf->e_ident[EI_OSABI] = ELF_OSABI;
2095
2096 elf->e_type = ET_CORE;
2097 elf->e_machine = machine;
2098 elf->e_version = EV_CURRENT;
2099 elf->e_phoff = sizeof(struct elfhdr);
2100 elf->e_flags = flags;
2101 elf->e_ehsize = sizeof(struct elfhdr);
2102 elf->e_phentsize = sizeof(struct elf_phdr);
2103 elf->e_phnum = segs;
2104
2105 #ifdef BSWAP_NEEDED
2106 bswap_ehdr(elf);
2107 #endif
2108 }
2109
2110 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2111 {
2112 phdr->p_type = PT_NOTE;
2113 phdr->p_offset = offset;
2114 phdr->p_vaddr = 0;
2115 phdr->p_paddr = 0;
2116 phdr->p_filesz = sz;
2117 phdr->p_memsz = 0;
2118 phdr->p_flags = 0;
2119 phdr->p_align = 0;
2120
2121 #ifdef BSWAP_NEEDED
2122 bswap_phdr(phdr);
2123 #endif
2124 }
2125
2126 static size_t note_size(const struct memelfnote *note)
2127 {
2128 return (note->notesz);
2129 }
2130
2131 static void fill_prstatus(struct elf_prstatus *prstatus,
2132 const TaskState *ts, int signr)
2133 {
2134 (void) memset(prstatus, 0, sizeof (*prstatus));
2135 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2136 prstatus->pr_pid = ts->ts_tid;
2137 prstatus->pr_ppid = getppid();
2138 prstatus->pr_pgrp = getpgrp();
2139 prstatus->pr_sid = getsid(0);
2140
2141 #ifdef BSWAP_NEEDED
2142 bswap_prstatus(prstatus);
2143 #endif
2144 }
2145
2146 static int fill_psinfo(struct elf_prpsinfo *psinfo, const TaskState *ts)
2147 {
2148 char *filename, *base_filename;
2149 unsigned int i, len;
2150
2151 (void) memset(psinfo, 0, sizeof (*psinfo));
2152
2153 len = ts->info->arg_end - ts->info->arg_start;
2154 if (len >= ELF_PRARGSZ)
2155 len = ELF_PRARGSZ - 1;
2156 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2157 return -EFAULT;
2158 for (i = 0; i < len; i++)
2159 if (psinfo->pr_psargs[i] == 0)
2160 psinfo->pr_psargs[i] = ' ';
2161 psinfo->pr_psargs[len] = 0;
2162
2163 psinfo->pr_pid = getpid();
2164 psinfo->pr_ppid = getppid();
2165 psinfo->pr_pgrp = getpgrp();
2166 psinfo->pr_sid = getsid(0);
2167 psinfo->pr_uid = getuid();
2168 psinfo->pr_gid = getgid();
2169
2170 filename = strdup(ts->bprm->filename);
2171 base_filename = strdup(basename(filename));
2172 (void) strncpy(psinfo->pr_fname, base_filename,
2173 sizeof(psinfo->pr_fname));
2174 free(base_filename);
2175 free(filename);
2176
2177 #ifdef BSWAP_NEEDED
2178 bswap_psinfo(psinfo);
2179 #endif
2180 return (0);
2181 }
2182
2183 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2184 {
2185 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2186 elf_addr_t orig_auxv = auxv;
2187 abi_ulong val;
2188 void *ptr;
2189 int i, len;
2190
2191 /*
2192 * Auxiliary vector is stored in target process stack. It contains
2193 * {type, value} pairs that we need to dump into note. This is not
2194 * strictly necessary but we do it here for sake of completeness.
2195 */
2196
2197 /* find out lenght of the vector, AT_NULL is terminator */
2198 i = len = 0;
2199 do {
2200 get_user_ual(val, auxv);
2201 i += 2;
2202 auxv += 2 * sizeof (elf_addr_t);
2203 } while (val != AT_NULL);
2204 len = i * sizeof (elf_addr_t);
2205
2206 /* read in whole auxv vector and copy it to memelfnote */
2207 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2208 if (ptr != NULL) {
2209 fill_note(note, "CORE", NT_AUXV, len, ptr);
2210 unlock_user(ptr, auxv, len);
2211 }
2212 }
2213
2214 /*
2215 * Constructs name of coredump file. We have following convention
2216 * for the name:
2217 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2218 *
2219 * Returns 0 in case of success, -1 otherwise (errno is set).
2220 */
2221 static int core_dump_filename(const TaskState *ts, char *buf,
2222 size_t bufsize)
2223 {
2224 char timestamp[64];
2225 char *filename = NULL;
2226 char *base_filename = NULL;
2227 struct timeval tv;
2228 struct tm tm;
2229
2230 assert(bufsize >= PATH_MAX);
2231
2232 if (gettimeofday(&tv, NULL) < 0) {
2233 (void) fprintf(stderr, "unable to get current timestamp: %s",
2234 strerror(errno));
2235 return (-1);
2236 }
2237
2238 filename = strdup(ts->bprm->filename);
2239 base_filename = strdup(basename(filename));
2240 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2241 localtime_r(&tv.tv_sec, &tm));
2242 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2243 base_filename, timestamp, (int)getpid());
2244 free(base_filename);
2245 free(filename);
2246
2247 return (0);
2248 }
2249
2250 static int dump_write(int fd, const void *ptr, size_t size)
2251 {
2252 const char *bufp = (const char *)ptr;
2253 ssize_t bytes_written, bytes_left;
2254 struct rlimit dumpsize;
2255 off_t pos;
2256
2257 bytes_written = 0;
2258 getrlimit(RLIMIT_CORE, &dumpsize);
2259 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2260 if (errno == ESPIPE) { /* not a seekable stream */
2261 bytes_left = size;
2262 } else {
2263 return pos;
2264 }
2265 } else {
2266 if (dumpsize.rlim_cur <= pos) {
2267 return -1;
2268 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2269 bytes_left = size;
2270 } else {
2271 size_t limit_left=dumpsize.rlim_cur - pos;
2272 bytes_left = limit_left >= size ? size : limit_left ;
2273 }
2274 }
2275
2276 /*
2277 * In normal conditions, single write(2) should do but
2278 * in case of socket etc. this mechanism is more portable.
2279 */
2280 do {
2281 bytes_written = write(fd, bufp, bytes_left);
2282 if (bytes_written < 0) {
2283 if (errno == EINTR)
2284 continue;
2285 return (-1);
2286 } else if (bytes_written == 0) { /* eof */
2287 return (-1);
2288 }
2289 bufp += bytes_written;
2290 bytes_left -= bytes_written;
2291 } while (bytes_left > 0);
2292
2293 return (0);
2294 }
2295
2296 static int write_note(struct memelfnote *men, int fd)
2297 {
2298 struct elf_note en;
2299
2300 en.n_namesz = men->namesz;
2301 en.n_type = men->type;
2302 en.n_descsz = men->datasz;
2303
2304 #ifdef BSWAP_NEEDED
2305 bswap_note(&en);
2306 #endif
2307
2308 if (dump_write(fd, &en, sizeof(en)) != 0)
2309 return (-1);
2310 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2311 return (-1);
2312 if (dump_write(fd, men->data, men->datasz) != 0)
2313 return (-1);
2314
2315 return (0);
2316 }
2317
2318 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2319 {
2320 TaskState *ts = (TaskState *)env->opaque;
2321 struct elf_thread_status *ets;
2322
2323 ets = qemu_mallocz(sizeof (*ets));
2324 ets->num_notes = 1; /* only prstatus is dumped */
2325 fill_prstatus(&ets->prstatus, ts, 0);
2326 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2327 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2328 &ets->prstatus);
2329
2330 TAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2331
2332 info->notes_size += note_size(&ets->notes[0]);
2333 }
2334
2335 static int fill_note_info(struct elf_note_info *info,
2336 long signr, const CPUState *env)
2337 {
2338 #define NUMNOTES 3
2339 CPUState *cpu = NULL;
2340 TaskState *ts = (TaskState *)env->opaque;
2341 int i;
2342
2343 (void) memset(info, 0, sizeof (*info));
2344
2345 TAILQ_INIT(&info->thread_list);
2346
2347 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2348 if (info->notes == NULL)
2349 return (-ENOMEM);
2350 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2351 if (info->prstatus == NULL)
2352 return (-ENOMEM);
2353 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2354 if (info->prstatus == NULL)
2355 return (-ENOMEM);
2356
2357 /*
2358 * First fill in status (and registers) of current thread
2359 * including process info & aux vector.
2360 */
2361 fill_prstatus(info->prstatus, ts, signr);
2362 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2363 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2364 sizeof (*info->prstatus), info->prstatus);
2365 fill_psinfo(info->psinfo, ts);
2366 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2367 sizeof (*info->psinfo), info->psinfo);
2368 fill_auxv_note(&info->notes[2], ts);
2369 info->numnote = 3;
2370
2371 info->notes_size = 0;
2372 for (i = 0; i < info->numnote; i++)
2373 info->notes_size += note_size(&info->notes[i]);
2374
2375 /* read and fill status of all threads */
2376 cpu_list_lock();
2377 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2378 if (cpu == thread_env)
2379 continue;
2380 fill_thread_info(info, cpu);
2381 }
2382 cpu_list_unlock();
2383
2384 return (0);
2385 }
2386
2387 static void free_note_info(struct elf_note_info *info)
2388 {
2389 struct elf_thread_status *ets;
2390
2391 while (!TAILQ_EMPTY(&info->thread_list)) {
2392 ets = TAILQ_FIRST(&info->thread_list);
2393 TAILQ_REMOVE(&info->thread_list, ets, ets_link);
2394 qemu_free(ets);
2395 }
2396
2397 qemu_free(info->prstatus);
2398 qemu_free(info->psinfo);
2399 qemu_free(info->notes);
2400 }
2401
2402 static int write_note_info(struct elf_note_info *info, int fd)
2403 {
2404 struct elf_thread_status *ets;
2405 int i, error = 0;
2406
2407 /* write prstatus, psinfo and auxv for current thread */
2408 for (i = 0; i < info->numnote; i++)
2409 if ((error = write_note(&info->notes[i], fd)) != 0)
2410 return (error);
2411
2412 /* write prstatus for each thread */
2413 for (ets = info->thread_list.tqh_first; ets != NULL;
2414 ets = ets->ets_link.tqe_next) {
2415 if ((error = write_note(&ets->notes[0], fd)) != 0)
2416 return (error);
2417 }
2418
2419 return (0);
2420 }
2421
2422 /*
2423 * Write out ELF coredump.
2424 *
2425 * See documentation of ELF object file format in:
2426 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2427 *
2428 * Coredump format in linux is following:
2429 *
2430 * 0 +----------------------+ \
2431 * | ELF header | ET_CORE |
2432 * +----------------------+ |
2433 * | ELF program headers | |--- headers
2434 * | - NOTE section | |
2435 * | - PT_LOAD sections | |
2436 * +----------------------+ /
2437 * | NOTEs: |
2438 * | - NT_PRSTATUS |
2439 * | - NT_PRSINFO |
2440 * | - NT_AUXV |
2441 * +----------------------+ <-- aligned to target page
2442 * | Process memory dump |
2443 * : :
2444 * . .
2445 * : :
2446 * | |
2447 * +----------------------+
2448 *
2449 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2450 * NT_PRSINFO -> struct elf_prpsinfo
2451 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2452 *
2453 * Format follows System V format as close as possible. Current
2454 * version limitations are as follows:
2455 * - no floating point registers are dumped
2456 *
2457 * Function returns 0 in case of success, negative errno otherwise.
2458 *
2459 * TODO: make this work also during runtime: it should be
2460 * possible to force coredump from running process and then
2461 * continue processing. For example qemu could set up SIGUSR2
2462 * handler (provided that target process haven't registered
2463 * handler for that) that does the dump when signal is received.
2464 */
2465 static int elf_core_dump(int signr, const CPUState *env)
2466 {
2467 const TaskState *ts = (const TaskState *)env->opaque;
2468 struct vm_area_struct *vma = NULL;
2469 char corefile[PATH_MAX];
2470 struct elf_note_info info;
2471 struct elfhdr elf;
2472 struct elf_phdr phdr;
2473 struct rlimit dumpsize;
2474 struct mm_struct *mm = NULL;
2475 off_t offset = 0, data_offset = 0;
2476 int segs = 0;
2477 int fd = -1;
2478
2479 errno = 0;
2480 getrlimit(RLIMIT_CORE, &dumpsize);
2481 if (dumpsize.rlim_cur == 0)
2482 return 0;
2483
2484 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2485 return (-errno);
2486
2487 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2488 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2489 return (-errno);
2490
2491 /*
2492 * Walk through target process memory mappings and
2493 * set up structure containing this information. After
2494 * this point vma_xxx functions can be used.
2495 */
2496 if ((mm = vma_init()) == NULL)
2497 goto out;
2498
2499 walk_memory_regions(mm, vma_walker);
2500 segs = vma_get_mapping_count(mm);
2501
2502 /*
2503 * Construct valid coredump ELF header. We also
2504 * add one more segment for notes.
2505 */
2506 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2507 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2508 goto out;
2509
2510 /* fill in in-memory version of notes */
2511 if (fill_note_info(&info, signr, env) < 0)
2512 goto out;
2513
2514 offset += sizeof (elf); /* elf header */
2515 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2516
2517 /* write out notes program header */
2518 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2519
2520 offset += info.notes_size;
2521 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2522 goto out;
2523
2524 /*
2525 * ELF specification wants data to start at page boundary so
2526 * we align it here.
2527 */
2528 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2529
2530 /*
2531 * Write program headers for memory regions mapped in
2532 * the target process.
2533 */
2534 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2535 (void) memset(&phdr, 0, sizeof (phdr));
2536
2537 phdr.p_type = PT_LOAD;
2538 phdr.p_offset = offset;
2539 phdr.p_vaddr = vma->vma_start;
2540 phdr.p_paddr = 0;
2541 phdr.p_filesz = vma_dump_size(vma);
2542 offset += phdr.p_filesz;
2543 phdr.p_memsz = vma->vma_end - vma->vma_start;
2544 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2545 if (vma->vma_flags & PROT_WRITE)
2546 phdr.p_flags |= PF_W;
2547 if (vma->vma_flags & PROT_EXEC)
2548 phdr.p_flags |= PF_X;
2549 phdr.p_align = ELF_EXEC_PAGESIZE;
2550
2551 dump_write(fd, &phdr, sizeof (phdr));
2552 }
2553
2554 /*
2555 * Next we write notes just after program headers. No
2556 * alignment needed here.
2557 */
2558 if (write_note_info(&info, fd) < 0)
2559 goto out;
2560
2561 /* align data to page boundary */
2562 data_offset = lseek(fd, 0, SEEK_CUR);
2563 data_offset = TARGET_PAGE_ALIGN(data_offset);
2564 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2565 goto out;
2566
2567 /*
2568 * Finally we can dump process memory into corefile as well.
2569 */
2570 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2571 abi_ulong addr;
2572 abi_ulong end;
2573
2574 end = vma->vma_start + vma_dump_size(vma);
2575
2576 for (addr = vma->vma_start; addr < end;
2577 addr += TARGET_PAGE_SIZE) {
2578 char page[TARGET_PAGE_SIZE];
2579 int error;
2580
2581 /*
2582 * Read in page from target process memory and
2583 * write it to coredump file.
2584 */
2585 error = copy_from_user(page, addr, sizeof (page));
2586 if (error != 0) {
2587 (void) fprintf(stderr, "unable to dump " TARGET_FMT_lx "\n",
2588 addr);
2589 errno = -error;
2590 goto out;
2591 }
2592 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2593 goto out;
2594 }
2595 }
2596
2597 out:
2598 free_note_info(&info);
2599 if (mm != NULL)
2600 vma_delete(mm);
2601 (void) close(fd);
2602
2603 if (errno != 0)
2604 return (-errno);
2605 return (0);
2606 }
2607
2608 #endif /* USE_ELF_CORE_DUMP */
2609
2610 static int load_aout_interp(void * exptr, int interp_fd)
2611 {
2612 printf("a.out interpreter not yet supported\n");
2613 return(0);
2614 }
2615
2616 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2617 {
2618 init_thread(regs, infop);
2619 }