]> git.proxmox.com Git - qemu.git/blob - linux-user/elfload.c
Get rid of _t suffix
[qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 #ifdef TARGET_I386
101
102 #define ELF_PLATFORM get_elf_platform()
103
104 static const char *get_elf_platform(void)
105 {
106 static char elf_platform[] = "i386";
107 int family = (thread_env->cpuid_version >> 8) & 0xff;
108 if (family > 6)
109 family = 6;
110 if (family >= 3)
111 elf_platform[1] = '0' + family;
112 return elf_platform;
113 }
114
115 #define ELF_HWCAP get_elf_hwcap()
116
117 static uint32_t get_elf_hwcap(void)
118 {
119 return thread_env->cpuid_features;
120 }
121
122 #ifdef TARGET_X86_64
123 #define ELF_START_MMAP 0x2aaaaab000ULL
124 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
125
126 #define ELF_CLASS ELFCLASS64
127 #define ELF_DATA ELFDATA2LSB
128 #define ELF_ARCH EM_X86_64
129
130 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
131 {
132 regs->rax = 0;
133 regs->rsp = infop->start_stack;
134 regs->rip = infop->entry;
135 }
136
137 typedef target_ulong a_target_elf_greg;
138 typedef uint32_t a_target_uid;
139 typedef uint32_t a_target_gid;
140 typedef int32_t a_target_pid;
141
142 #define ELF_NREG 27
143 typedef a_target_elf_greg a_target_elf_gregset[ELF_NREG];
144
145 /*
146 * Note that ELF_NREG should be 29 as there should be place for
147 * TRAPNO and ERR "registers" as well but linux doesn't dump
148 * those.
149 *
150 * See linux kernel: arch/x86/include/asm/elf.h
151 */
152 static void elf_core_copy_regs(a_target_elf_gregset *regs, const CPUState *env)
153 {
154 (*regs)[0] = env->regs[15];
155 (*regs)[1] = env->regs[14];
156 (*regs)[2] = env->regs[13];
157 (*regs)[3] = env->regs[12];
158 (*regs)[4] = env->regs[R_EBP];
159 (*regs)[5] = env->regs[R_EBX];
160 (*regs)[6] = env->regs[11];
161 (*regs)[7] = env->regs[10];
162 (*regs)[8] = env->regs[9];
163 (*regs)[9] = env->regs[8];
164 (*regs)[10] = env->regs[R_EAX];
165 (*regs)[11] = env->regs[R_ECX];
166 (*regs)[12] = env->regs[R_EDX];
167 (*regs)[13] = env->regs[R_ESI];
168 (*regs)[14] = env->regs[R_EDI];
169 (*regs)[15] = env->regs[R_EAX]; /* XXX */
170 (*regs)[16] = env->eip;
171 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
172 (*regs)[18] = env->eflags;
173 (*regs)[19] = env->regs[R_ESP];
174 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
175 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
176 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
177 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
178 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
179 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
180 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
181 }
182
183 #else
184
185 #define ELF_START_MMAP 0x80000000
186
187 /*
188 * This is used to ensure we don't load something for the wrong architecture.
189 */
190 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
191
192 /*
193 * These are used to set parameters in the core dumps.
194 */
195 #define ELF_CLASS ELFCLASS32
196 #define ELF_DATA ELFDATA2LSB
197 #define ELF_ARCH EM_386
198
199 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
200 {
201 regs->esp = infop->start_stack;
202 regs->eip = infop->entry;
203
204 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
205 starts %edx contains a pointer to a function which might be
206 registered using `atexit'. This provides a mean for the
207 dynamic linker to call DT_FINI functions for shared libraries
208 that have been loaded before the code runs.
209
210 A value of 0 tells we have no such handler. */
211 regs->edx = 0;
212 }
213
214 typedef target_ulong a_target_elf_greg;
215 typedef uint16_t a_target_uid;
216 typedef uint16_t a_target_gid;
217 typedef int32_t a_target_pid;
218
219 #define ELF_NREG 17
220 typedef a_target_elf_greg a_target_elf_gregset[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(a_target_elf_gregset *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 typedef uint32_t a_target_elf_greg;
290 typedef uint16_t a_target_uid;
291 typedef uint16_t a_target_gid;
292 typedef int32_t a_target_pid;
293
294 #define ELF_NREG 18
295 typedef a_target_elf_greg a_target_elf_gregset[ELF_NREG];
296
297 static void elf_core_copy_regs(a_target_elf_gregset *regs, const CPUState *env)
298 {
299 (*regs)[0] = env->regs[0];
300 (*regs)[1] = env->regs[1];
301 (*regs)[2] = env->regs[2];
302 (*regs)[3] = env->regs[3];
303 (*regs)[4] = env->regs[4];
304 (*regs)[5] = env->regs[5];
305 (*regs)[6] = env->regs[6];
306 (*regs)[7] = env->regs[7];
307 (*regs)[8] = env->regs[8];
308 (*regs)[9] = env->regs[9];
309 (*regs)[10] = env->regs[10];
310 (*regs)[11] = env->regs[11];
311 (*regs)[12] = env->regs[12];
312 (*regs)[13] = env->regs[13];
313 (*regs)[14] = env->regs[14];
314 (*regs)[15] = env->regs[15];
315
316 (*regs)[16] = cpsr_read((CPUState *)env);
317 (*regs)[17] = env->regs[0]; /* XXX */
318 }
319
320 #define USE_ELF_CORE_DUMP
321 #define ELF_EXEC_PAGESIZE 4096
322
323 enum
324 {
325 ARM_HWCAP_ARM_SWP = 1 << 0,
326 ARM_HWCAP_ARM_HALF = 1 << 1,
327 ARM_HWCAP_ARM_THUMB = 1 << 2,
328 ARM_HWCAP_ARM_26BIT = 1 << 3,
329 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
330 ARM_HWCAP_ARM_FPA = 1 << 5,
331 ARM_HWCAP_ARM_VFP = 1 << 6,
332 ARM_HWCAP_ARM_EDSP = 1 << 7,
333 };
334
335 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
336 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
337 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
338
339 #endif
340
341 #ifdef TARGET_SPARC
342 #ifdef TARGET_SPARC64
343
344 #define ELF_START_MMAP 0x80000000
345
346 #ifndef TARGET_ABI32
347 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
348 #else
349 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
350 #endif
351
352 #define ELF_CLASS ELFCLASS64
353 #define ELF_DATA ELFDATA2MSB
354 #define ELF_ARCH EM_SPARCV9
355
356 #define STACK_BIAS 2047
357
358 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
359 {
360 #ifndef TARGET_ABI32
361 regs->tstate = 0;
362 #endif
363 regs->pc = infop->entry;
364 regs->npc = regs->pc + 4;
365 regs->y = 0;
366 #ifdef TARGET_ABI32
367 regs->u_regs[14] = infop->start_stack - 16 * 4;
368 #else
369 if (personality(infop->personality) == PER_LINUX32)
370 regs->u_regs[14] = infop->start_stack - 16 * 4;
371 else
372 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
373 #endif
374 }
375
376 #else
377 #define ELF_START_MMAP 0x80000000
378
379 #define elf_check_arch(x) ( (x) == EM_SPARC )
380
381 #define ELF_CLASS ELFCLASS32
382 #define ELF_DATA ELFDATA2MSB
383 #define ELF_ARCH EM_SPARC
384
385 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
386 {
387 regs->psr = 0;
388 regs->pc = infop->entry;
389 regs->npc = regs->pc + 4;
390 regs->y = 0;
391 regs->u_regs[14] = infop->start_stack - 16 * 4;
392 }
393
394 #endif
395 #endif
396
397 #ifdef TARGET_PPC
398
399 #define ELF_START_MMAP 0x80000000
400
401 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
402
403 #define elf_check_arch(x) ( (x) == EM_PPC64 )
404
405 #define ELF_CLASS ELFCLASS64
406
407 #else
408
409 #define elf_check_arch(x) ( (x) == EM_PPC )
410
411 #define ELF_CLASS ELFCLASS32
412
413 #endif
414
415 #ifdef TARGET_WORDS_BIGENDIAN
416 #define ELF_DATA ELFDATA2MSB
417 #else
418 #define ELF_DATA ELFDATA2LSB
419 #endif
420 #define ELF_ARCH EM_PPC
421
422 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
423 See arch/powerpc/include/asm/cputable.h. */
424 enum {
425 QEMU_PPC_FEATURE_32 = 0x80000000,
426 QEMU_PPC_FEATURE_64 = 0x40000000,
427 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
428 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
429 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
430 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
431 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
432 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
433 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
434 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
435 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
436 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
437 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
438 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
439 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
440 QEMU_PPC_FEATURE_CELL = 0x00010000,
441 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
442 QEMU_PPC_FEATURE_SMT = 0x00004000,
443 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
444 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
445 QEMU_PPC_FEATURE_PA6T = 0x00000800,
446 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
447 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
448 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
449 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
450 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
451
452 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
453 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
454 };
455
456 #define ELF_HWCAP get_elf_hwcap()
457
458 static uint32_t get_elf_hwcap(void)
459 {
460 CPUState *e = thread_env;
461 uint32_t features = 0;
462
463 /* We don't have to be terribly complete here; the high points are
464 Altivec/FP/SPE support. Anything else is just a bonus. */
465 #define GET_FEATURE(flag, feature) \
466 do {if (e->insns_flags & flag) features |= feature; } while(0)
467 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
468 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
469 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
470 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
471 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
472 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
473 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
474 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
475 #undef GET_FEATURE
476
477 return features;
478 }
479
480 /*
481 * We need to put in some extra aux table entries to tell glibc what
482 * the cache block size is, so it can use the dcbz instruction safely.
483 */
484 #define AT_DCACHEBSIZE 19
485 #define AT_ICACHEBSIZE 20
486 #define AT_UCACHEBSIZE 21
487 /* A special ignored type value for PPC, for glibc compatibility. */
488 #define AT_IGNOREPPC 22
489 /*
490 * The requirements here are:
491 * - keep the final alignment of sp (sp & 0xf)
492 * - make sure the 32-bit value at the first 16 byte aligned position of
493 * AUXV is greater than 16 for glibc compatibility.
494 * AT_IGNOREPPC is used for that.
495 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
496 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
497 */
498 #define DLINFO_ARCH_ITEMS 5
499 #define ARCH_DLINFO \
500 do { \
501 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
502 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
503 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
504 /* \
505 * Now handle glibc compatibility. \
506 */ \
507 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
508 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
509 } while (0)
510
511 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
512 {
513 abi_ulong pos = infop->start_stack;
514 abi_ulong tmp;
515 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
516 abi_ulong entry, toc;
517 #endif
518
519 _regs->gpr[1] = infop->start_stack;
520 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
521 entry = ldq_raw(infop->entry) + infop->load_addr;
522 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
523 _regs->gpr[2] = toc;
524 infop->entry = entry;
525 #endif
526 _regs->nip = infop->entry;
527 /* Note that isn't exactly what regular kernel does
528 * but this is what the ABI wants and is needed to allow
529 * execution of PPC BSD programs.
530 */
531 /* FIXME - what to for failure of get_user()? */
532 get_user_ual(_regs->gpr[3], pos);
533 pos += sizeof(abi_ulong);
534 _regs->gpr[4] = pos;
535 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
536 tmp = ldl(pos);
537 _regs->gpr[5] = pos;
538 }
539
540 #define ELF_EXEC_PAGESIZE 4096
541
542 #endif
543
544 #ifdef TARGET_MIPS
545
546 #define ELF_START_MMAP 0x80000000
547
548 #define elf_check_arch(x) ( (x) == EM_MIPS )
549
550 #ifdef TARGET_MIPS64
551 #define ELF_CLASS ELFCLASS64
552 #else
553 #define ELF_CLASS ELFCLASS32
554 #endif
555 #ifdef TARGET_WORDS_BIGENDIAN
556 #define ELF_DATA ELFDATA2MSB
557 #else
558 #define ELF_DATA ELFDATA2LSB
559 #endif
560 #define ELF_ARCH EM_MIPS
561
562 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
563 {
564 regs->cp0_status = 2 << CP0St_KSU;
565 regs->cp0_epc = infop->entry;
566 regs->regs[29] = infop->start_stack;
567 }
568
569 #define ELF_EXEC_PAGESIZE 4096
570
571 #endif /* TARGET_MIPS */
572
573 #ifdef TARGET_MICROBLAZE
574
575 #define ELF_START_MMAP 0x80000000
576
577 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
578
579 #define ELF_CLASS ELFCLASS32
580 #define ELF_DATA ELFDATA2MSB
581 #define ELF_ARCH EM_MIPS
582
583 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
584 {
585 regs->pc = infop->entry;
586 regs->r1 = infop->start_stack;
587
588 }
589
590 #define ELF_EXEC_PAGESIZE 4096
591
592 #endif /* TARGET_MICROBLAZE */
593
594 #ifdef TARGET_SH4
595
596 #define ELF_START_MMAP 0x80000000
597
598 #define elf_check_arch(x) ( (x) == EM_SH )
599
600 #define ELF_CLASS ELFCLASS32
601 #define ELF_DATA ELFDATA2LSB
602 #define ELF_ARCH EM_SH
603
604 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
605 {
606 /* Check other registers XXXXX */
607 regs->pc = infop->entry;
608 regs->regs[15] = infop->start_stack;
609 }
610
611 #define ELF_EXEC_PAGESIZE 4096
612
613 #endif
614
615 #ifdef TARGET_CRIS
616
617 #define ELF_START_MMAP 0x80000000
618
619 #define elf_check_arch(x) ( (x) == EM_CRIS )
620
621 #define ELF_CLASS ELFCLASS32
622 #define ELF_DATA ELFDATA2LSB
623 #define ELF_ARCH EM_CRIS
624
625 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
626 {
627 regs->erp = infop->entry;
628 }
629
630 #define ELF_EXEC_PAGESIZE 8192
631
632 #endif
633
634 #ifdef TARGET_M68K
635
636 #define ELF_START_MMAP 0x80000000
637
638 #define elf_check_arch(x) ( (x) == EM_68K )
639
640 #define ELF_CLASS ELFCLASS32
641 #define ELF_DATA ELFDATA2MSB
642 #define ELF_ARCH EM_68K
643
644 /* ??? Does this need to do anything?
645 #define ELF_PLAT_INIT(_r) */
646
647 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
648 {
649 regs->usp = infop->start_stack;
650 regs->sr = 0;
651 regs->pc = infop->entry;
652 }
653
654 #define ELF_EXEC_PAGESIZE 8192
655
656 #endif
657
658 #ifdef TARGET_ALPHA
659
660 #define ELF_START_MMAP (0x30000000000ULL)
661
662 #define elf_check_arch(x) ( (x) == ELF_ARCH )
663
664 #define ELF_CLASS ELFCLASS64
665 #define ELF_DATA ELFDATA2MSB
666 #define ELF_ARCH EM_ALPHA
667
668 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
669 {
670 regs->pc = infop->entry;
671 regs->ps = 8;
672 regs->usp = infop->start_stack;
673 regs->unique = infop->start_data; /* ? */
674 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
675 regs->unique, infop->start_data);
676 }
677
678 #define ELF_EXEC_PAGESIZE 8192
679
680 #endif /* TARGET_ALPHA */
681
682 #ifndef ELF_PLATFORM
683 #define ELF_PLATFORM (NULL)
684 #endif
685
686 #ifndef ELF_HWCAP
687 #define ELF_HWCAP 0
688 #endif
689
690 #ifdef TARGET_ABI32
691 #undef ELF_CLASS
692 #define ELF_CLASS ELFCLASS32
693 #undef bswaptls
694 #define bswaptls(ptr) bswap32s(ptr)
695 #endif
696
697 #include "elf.h"
698
699 struct exec
700 {
701 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
702 unsigned int a_text; /* length of text, in bytes */
703 unsigned int a_data; /* length of data, in bytes */
704 unsigned int a_bss; /* length of uninitialized data area, in bytes */
705 unsigned int a_syms; /* length of symbol table data in file, in bytes */
706 unsigned int a_entry; /* start address */
707 unsigned int a_trsize; /* length of relocation info for text, in bytes */
708 unsigned int a_drsize; /* length of relocation info for data, in bytes */
709 };
710
711
712 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
713 #define OMAGIC 0407
714 #define NMAGIC 0410
715 #define ZMAGIC 0413
716 #define QMAGIC 0314
717
718 /* max code+data+bss space allocated to elf interpreter */
719 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
720
721 /* max code+data+bss+brk space allocated to ET_DYN executables */
722 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
723
724 /* Necessary parameters */
725 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
726 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
727 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
728
729 #define INTERPRETER_NONE 0
730 #define INTERPRETER_AOUT 1
731 #define INTERPRETER_ELF 2
732
733 #define DLINFO_ITEMS 12
734
735 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
736 {
737 memcpy(to, from, n);
738 }
739
740 static int load_aout_interp(void * exptr, int interp_fd);
741
742 #ifdef BSWAP_NEEDED
743 static void bswap_ehdr(struct elfhdr *ehdr)
744 {
745 bswap16s(&ehdr->e_type); /* Object file type */
746 bswap16s(&ehdr->e_machine); /* Architecture */
747 bswap32s(&ehdr->e_version); /* Object file version */
748 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
749 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
750 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
751 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
752 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
753 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
754 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
755 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
756 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
757 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
758 }
759
760 static void bswap_phdr(struct elf_phdr *phdr)
761 {
762 bswap32s(&phdr->p_type); /* Segment type */
763 bswaptls(&phdr->p_offset); /* Segment file offset */
764 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
765 bswaptls(&phdr->p_paddr); /* Segment physical address */
766 bswaptls(&phdr->p_filesz); /* Segment size in file */
767 bswaptls(&phdr->p_memsz); /* Segment size in memory */
768 bswap32s(&phdr->p_flags); /* Segment flags */
769 bswaptls(&phdr->p_align); /* Segment alignment */
770 }
771
772 static void bswap_shdr(struct elf_shdr *shdr)
773 {
774 bswap32s(&shdr->sh_name);
775 bswap32s(&shdr->sh_type);
776 bswaptls(&shdr->sh_flags);
777 bswaptls(&shdr->sh_addr);
778 bswaptls(&shdr->sh_offset);
779 bswaptls(&shdr->sh_size);
780 bswap32s(&shdr->sh_link);
781 bswap32s(&shdr->sh_info);
782 bswaptls(&shdr->sh_addralign);
783 bswaptls(&shdr->sh_entsize);
784 }
785
786 static void bswap_sym(struct elf_sym *sym)
787 {
788 bswap32s(&sym->st_name);
789 bswaptls(&sym->st_value);
790 bswaptls(&sym->st_size);
791 bswap16s(&sym->st_shndx);
792 }
793 #endif
794
795 #ifdef USE_ELF_CORE_DUMP
796 static int elf_core_dump(int, const CPUState *);
797
798 #ifdef BSWAP_NEEDED
799 static void bswap_note(struct elf_note *en)
800 {
801 bswap32s(&en->n_namesz);
802 bswap32s(&en->n_descsz);
803 bswap32s(&en->n_type);
804 }
805 #endif /* BSWAP_NEEDED */
806
807 #endif /* USE_ELF_CORE_DUMP */
808
809 /*
810 * 'copy_elf_strings()' copies argument/envelope strings from user
811 * memory to free pages in kernel mem. These are in a format ready
812 * to be put directly into the top of new user memory.
813 *
814 */
815 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
816 abi_ulong p)
817 {
818 char *tmp, *tmp1, *pag = NULL;
819 int len, offset = 0;
820
821 if (!p) {
822 return 0; /* bullet-proofing */
823 }
824 while (argc-- > 0) {
825 tmp = argv[argc];
826 if (!tmp) {
827 fprintf(stderr, "VFS: argc is wrong");
828 exit(-1);
829 }
830 tmp1 = tmp;
831 while (*tmp++);
832 len = tmp - tmp1;
833 if (p < len) { /* this shouldn't happen - 128kB */
834 return 0;
835 }
836 while (len) {
837 --p; --tmp; --len;
838 if (--offset < 0) {
839 offset = p % TARGET_PAGE_SIZE;
840 pag = (char *)page[p/TARGET_PAGE_SIZE];
841 if (!pag) {
842 pag = (char *)malloc(TARGET_PAGE_SIZE);
843 memset(pag, 0, TARGET_PAGE_SIZE);
844 page[p/TARGET_PAGE_SIZE] = pag;
845 if (!pag)
846 return 0;
847 }
848 }
849 if (len == 0 || offset == 0) {
850 *(pag + offset) = *tmp;
851 }
852 else {
853 int bytes_to_copy = (len > offset) ? offset : len;
854 tmp -= bytes_to_copy;
855 p -= bytes_to_copy;
856 offset -= bytes_to_copy;
857 len -= bytes_to_copy;
858 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
859 }
860 }
861 }
862 return p;
863 }
864
865 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
866 struct image_info *info)
867 {
868 abi_ulong stack_base, size, error;
869 int i;
870
871 /* Create enough stack to hold everything. If we don't use
872 * it for args, we'll use it for something else...
873 */
874 size = x86_stack_size;
875 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
876 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
877 error = target_mmap(0,
878 size + qemu_host_page_size,
879 PROT_READ | PROT_WRITE,
880 MAP_PRIVATE | MAP_ANONYMOUS,
881 -1, 0);
882 if (error == -1) {
883 perror("stk mmap");
884 exit(-1);
885 }
886 /* we reserve one extra page at the top of the stack as guard */
887 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
888
889 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
890 p += stack_base;
891
892 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
893 if (bprm->page[i]) {
894 info->rss++;
895 /* FIXME - check return value of memcpy_to_target() for failure */
896 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
897 free(bprm->page[i]);
898 }
899 stack_base += TARGET_PAGE_SIZE;
900 }
901 return p;
902 }
903
904 static void set_brk(abi_ulong start, abi_ulong end)
905 {
906 /* page-align the start and end addresses... */
907 start = HOST_PAGE_ALIGN(start);
908 end = HOST_PAGE_ALIGN(end);
909 if (end <= start)
910 return;
911 if(target_mmap(start, end - start,
912 PROT_READ | PROT_WRITE | PROT_EXEC,
913 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
914 perror("cannot mmap brk");
915 exit(-1);
916 }
917 }
918
919
920 /* We need to explicitly zero any fractional pages after the data
921 section (i.e. bss). This would contain the junk from the file that
922 should not be in memory. */
923 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
924 {
925 abi_ulong nbyte;
926
927 if (elf_bss >= last_bss)
928 return;
929
930 /* XXX: this is really a hack : if the real host page size is
931 smaller than the target page size, some pages after the end
932 of the file may not be mapped. A better fix would be to
933 patch target_mmap(), but it is more complicated as the file
934 size must be known */
935 if (qemu_real_host_page_size < qemu_host_page_size) {
936 abi_ulong end_addr, end_addr1;
937 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
938 ~(qemu_real_host_page_size - 1);
939 end_addr = HOST_PAGE_ALIGN(elf_bss);
940 if (end_addr1 < end_addr) {
941 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
942 PROT_READ|PROT_WRITE|PROT_EXEC,
943 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
944 }
945 }
946
947 nbyte = elf_bss & (qemu_host_page_size-1);
948 if (nbyte) {
949 nbyte = qemu_host_page_size - nbyte;
950 do {
951 /* FIXME - what to do if put_user() fails? */
952 put_user_u8(0, elf_bss);
953 elf_bss++;
954 } while (--nbyte);
955 }
956 }
957
958
959 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
960 struct elfhdr * exec,
961 abi_ulong load_addr,
962 abi_ulong load_bias,
963 abi_ulong interp_load_addr, int ibcs,
964 struct image_info *info)
965 {
966 abi_ulong sp;
967 int size;
968 abi_ulong u_platform;
969 const char *k_platform;
970 const int n = sizeof(elf_addr_t);
971
972 sp = p;
973 u_platform = 0;
974 k_platform = ELF_PLATFORM;
975 if (k_platform) {
976 size_t len = strlen(k_platform) + 1;
977 sp -= (len + n - 1) & ~(n - 1);
978 u_platform = sp;
979 /* FIXME - check return value of memcpy_to_target() for failure */
980 memcpy_to_target(sp, k_platform, len);
981 }
982 /*
983 * Force 16 byte _final_ alignment here for generality.
984 */
985 sp = sp &~ (abi_ulong)15;
986 size = (DLINFO_ITEMS + 1) * 2;
987 if (k_platform)
988 size += 2;
989 #ifdef DLINFO_ARCH_ITEMS
990 size += DLINFO_ARCH_ITEMS * 2;
991 #endif
992 size += envc + argc + 2;
993 size += (!ibcs ? 3 : 1); /* argc itself */
994 size *= n;
995 if (size & 15)
996 sp -= 16 - (size & 15);
997
998 /* This is correct because Linux defines
999 * elf_addr_t as Elf32_Off / Elf64_Off
1000 */
1001 #define NEW_AUX_ENT(id, val) do { \
1002 sp -= n; put_user_ual(val, sp); \
1003 sp -= n; put_user_ual(id, sp); \
1004 } while(0)
1005
1006 NEW_AUX_ENT (AT_NULL, 0);
1007
1008 /* There must be exactly DLINFO_ITEMS entries here. */
1009 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1010 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1011 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1012 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1013 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1014 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1015 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1016 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1017 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1018 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1019 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1020 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1021 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1022 if (k_platform)
1023 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1024 #ifdef ARCH_DLINFO
1025 /*
1026 * ARCH_DLINFO must come last so platform specific code can enforce
1027 * special alignment requirements on the AUXV if necessary (eg. PPC).
1028 */
1029 ARCH_DLINFO;
1030 #endif
1031 #undef NEW_AUX_ENT
1032
1033 info->saved_auxv = sp;
1034
1035 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1036 return sp;
1037 }
1038
1039
1040 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1041 int interpreter_fd,
1042 abi_ulong *interp_load_addr)
1043 {
1044 struct elf_phdr *elf_phdata = NULL;
1045 struct elf_phdr *eppnt;
1046 abi_ulong load_addr = 0;
1047 int load_addr_set = 0;
1048 int retval;
1049 abi_ulong last_bss, elf_bss;
1050 abi_ulong error;
1051 int i;
1052
1053 elf_bss = 0;
1054 last_bss = 0;
1055 error = 0;
1056
1057 #ifdef BSWAP_NEEDED
1058 bswap_ehdr(interp_elf_ex);
1059 #endif
1060 /* First of all, some simple consistency checks */
1061 if ((interp_elf_ex->e_type != ET_EXEC &&
1062 interp_elf_ex->e_type != ET_DYN) ||
1063 !elf_check_arch(interp_elf_ex->e_machine)) {
1064 return ~((abi_ulong)0UL);
1065 }
1066
1067
1068 /* Now read in all of the header information */
1069
1070 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1071 return ~(abi_ulong)0UL;
1072
1073 elf_phdata = (struct elf_phdr *)
1074 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1075
1076 if (!elf_phdata)
1077 return ~((abi_ulong)0UL);
1078
1079 /*
1080 * If the size of this structure has changed, then punt, since
1081 * we will be doing the wrong thing.
1082 */
1083 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1084 free(elf_phdata);
1085 return ~((abi_ulong)0UL);
1086 }
1087
1088 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1089 if(retval >= 0) {
1090 retval = read(interpreter_fd,
1091 (char *) elf_phdata,
1092 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1093 }
1094 if (retval < 0) {
1095 perror("load_elf_interp");
1096 exit(-1);
1097 free (elf_phdata);
1098 return retval;
1099 }
1100 #ifdef BSWAP_NEEDED
1101 eppnt = elf_phdata;
1102 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1103 bswap_phdr(eppnt);
1104 }
1105 #endif
1106
1107 if (interp_elf_ex->e_type == ET_DYN) {
1108 /* in order to avoid hardcoding the interpreter load
1109 address in qemu, we allocate a big enough memory zone */
1110 error = target_mmap(0, INTERP_MAP_SIZE,
1111 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1112 -1, 0);
1113 if (error == -1) {
1114 perror("mmap");
1115 exit(-1);
1116 }
1117 load_addr = error;
1118 load_addr_set = 1;
1119 }
1120
1121 eppnt = elf_phdata;
1122 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1123 if (eppnt->p_type == PT_LOAD) {
1124 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1125 int elf_prot = 0;
1126 abi_ulong vaddr = 0;
1127 abi_ulong k;
1128
1129 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1130 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1131 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1132 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1133 elf_type |= MAP_FIXED;
1134 vaddr = eppnt->p_vaddr;
1135 }
1136 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1137 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1138 elf_prot,
1139 elf_type,
1140 interpreter_fd,
1141 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1142
1143 if (error == -1) {
1144 /* Real error */
1145 close(interpreter_fd);
1146 free(elf_phdata);
1147 return ~((abi_ulong)0UL);
1148 }
1149
1150 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1151 load_addr = error;
1152 load_addr_set = 1;
1153 }
1154
1155 /*
1156 * Find the end of the file mapping for this phdr, and keep
1157 * track of the largest address we see for this.
1158 */
1159 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1160 if (k > elf_bss) elf_bss = k;
1161
1162 /*
1163 * Do the same thing for the memory mapping - between
1164 * elf_bss and last_bss is the bss section.
1165 */
1166 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1167 if (k > last_bss) last_bss = k;
1168 }
1169
1170 /* Now use mmap to map the library into memory. */
1171
1172 close(interpreter_fd);
1173
1174 /*
1175 * Now fill out the bss section. First pad the last page up
1176 * to the page boundary, and then perform a mmap to make sure
1177 * that there are zeromapped pages up to and including the last
1178 * bss page.
1179 */
1180 padzero(elf_bss, last_bss);
1181 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1182
1183 /* Map the last of the bss segment */
1184 if (last_bss > elf_bss) {
1185 target_mmap(elf_bss, last_bss-elf_bss,
1186 PROT_READ|PROT_WRITE|PROT_EXEC,
1187 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1188 }
1189 free(elf_phdata);
1190
1191 *interp_load_addr = load_addr;
1192 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1193 }
1194
1195 static int symfind(const void *s0, const void *s1)
1196 {
1197 struct elf_sym *key = (struct elf_sym *)s0;
1198 struct elf_sym *sym = (struct elf_sym *)s1;
1199 int result = 0;
1200 if (key->st_value < sym->st_value) {
1201 result = -1;
1202 } else if (key->st_value >= sym->st_value + sym->st_size) {
1203 result = 1;
1204 }
1205 return result;
1206 }
1207
1208 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1209 {
1210 #if ELF_CLASS == ELFCLASS32
1211 struct elf_sym *syms = s->disas_symtab.elf32;
1212 #else
1213 struct elf_sym *syms = s->disas_symtab.elf64;
1214 #endif
1215
1216 // binary search
1217 struct elf_sym key;
1218 struct elf_sym *sym;
1219
1220 key.st_value = orig_addr;
1221
1222 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1223 if (sym != NULL) {
1224 return s->disas_strtab + sym->st_name;
1225 }
1226
1227 return "";
1228 }
1229
1230 /* FIXME: This should use elf_ops.h */
1231 static int symcmp(const void *s0, const void *s1)
1232 {
1233 struct elf_sym *sym0 = (struct elf_sym *)s0;
1234 struct elf_sym *sym1 = (struct elf_sym *)s1;
1235 return (sym0->st_value < sym1->st_value)
1236 ? -1
1237 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1238 }
1239
1240 /* Best attempt to load symbols from this ELF object. */
1241 static void load_symbols(struct elfhdr *hdr, int fd)
1242 {
1243 unsigned int i, nsyms;
1244 struct elf_shdr sechdr, symtab, strtab;
1245 char *strings;
1246 struct syminfo *s;
1247 struct elf_sym *syms;
1248
1249 lseek(fd, hdr->e_shoff, SEEK_SET);
1250 for (i = 0; i < hdr->e_shnum; i++) {
1251 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1252 return;
1253 #ifdef BSWAP_NEEDED
1254 bswap_shdr(&sechdr);
1255 #endif
1256 if (sechdr.sh_type == SHT_SYMTAB) {
1257 symtab = sechdr;
1258 lseek(fd, hdr->e_shoff
1259 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1260 if (read(fd, &strtab, sizeof(strtab))
1261 != sizeof(strtab))
1262 return;
1263 #ifdef BSWAP_NEEDED
1264 bswap_shdr(&strtab);
1265 #endif
1266 goto found;
1267 }
1268 }
1269 return; /* Shouldn't happen... */
1270
1271 found:
1272 /* Now know where the strtab and symtab are. Snarf them. */
1273 s = malloc(sizeof(*s));
1274 syms = malloc(symtab.sh_size);
1275 if (!syms)
1276 return;
1277 s->disas_strtab = strings = malloc(strtab.sh_size);
1278 if (!s->disas_strtab)
1279 return;
1280
1281 lseek(fd, symtab.sh_offset, SEEK_SET);
1282 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1283 return;
1284
1285 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1286
1287 i = 0;
1288 while (i < nsyms) {
1289 #ifdef BSWAP_NEEDED
1290 bswap_sym(syms + i);
1291 #endif
1292 // Throw away entries which we do not need.
1293 if (syms[i].st_shndx == SHN_UNDEF ||
1294 syms[i].st_shndx >= SHN_LORESERVE ||
1295 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1296 nsyms--;
1297 if (i < nsyms) {
1298 syms[i] = syms[nsyms];
1299 }
1300 continue;
1301 }
1302 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1303 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1304 syms[i].st_value &= ~(target_ulong)1;
1305 #endif
1306 i++;
1307 }
1308 syms = realloc(syms, nsyms * sizeof(*syms));
1309
1310 qsort(syms, nsyms, sizeof(*syms), symcmp);
1311
1312 lseek(fd, strtab.sh_offset, SEEK_SET);
1313 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1314 return;
1315 s->disas_num_syms = nsyms;
1316 #if ELF_CLASS == ELFCLASS32
1317 s->disas_symtab.elf32 = syms;
1318 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1319 #else
1320 s->disas_symtab.elf64 = syms;
1321 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1322 #endif
1323 s->next = syminfos;
1324 syminfos = s;
1325 }
1326
1327 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1328 struct image_info * info)
1329 {
1330 struct elfhdr elf_ex;
1331 struct elfhdr interp_elf_ex;
1332 struct exec interp_ex;
1333 int interpreter_fd = -1; /* avoid warning */
1334 abi_ulong load_addr, load_bias;
1335 int load_addr_set = 0;
1336 unsigned int interpreter_type = INTERPRETER_NONE;
1337 unsigned char ibcs2_interpreter;
1338 int i;
1339 abi_ulong mapped_addr;
1340 struct elf_phdr * elf_ppnt;
1341 struct elf_phdr *elf_phdata;
1342 abi_ulong elf_bss, k, elf_brk;
1343 int retval;
1344 char * elf_interpreter;
1345 abi_ulong elf_entry, interp_load_addr = 0;
1346 int status;
1347 abi_ulong start_code, end_code, start_data, end_data;
1348 abi_ulong reloc_func_desc = 0;
1349 abi_ulong elf_stack;
1350 char passed_fileno[6];
1351
1352 ibcs2_interpreter = 0;
1353 status = 0;
1354 load_addr = 0;
1355 load_bias = 0;
1356 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1357 #ifdef BSWAP_NEEDED
1358 bswap_ehdr(&elf_ex);
1359 #endif
1360
1361 /* First of all, some simple consistency checks */
1362 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1363 (! elf_check_arch(elf_ex.e_machine))) {
1364 return -ENOEXEC;
1365 }
1366
1367 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1368 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1369 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1370 if (!bprm->p) {
1371 retval = -E2BIG;
1372 }
1373
1374 /* Now read in all of the header information */
1375 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1376 if (elf_phdata == NULL) {
1377 return -ENOMEM;
1378 }
1379
1380 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1381 if(retval > 0) {
1382 retval = read(bprm->fd, (char *) elf_phdata,
1383 elf_ex.e_phentsize * elf_ex.e_phnum);
1384 }
1385
1386 if (retval < 0) {
1387 perror("load_elf_binary");
1388 exit(-1);
1389 free (elf_phdata);
1390 return -errno;
1391 }
1392
1393 #ifdef BSWAP_NEEDED
1394 elf_ppnt = elf_phdata;
1395 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1396 bswap_phdr(elf_ppnt);
1397 }
1398 #endif
1399 elf_ppnt = elf_phdata;
1400
1401 elf_bss = 0;
1402 elf_brk = 0;
1403
1404
1405 elf_stack = ~((abi_ulong)0UL);
1406 elf_interpreter = NULL;
1407 start_code = ~((abi_ulong)0UL);
1408 end_code = 0;
1409 start_data = 0;
1410 end_data = 0;
1411 interp_ex.a_info = 0;
1412
1413 for(i=0;i < elf_ex.e_phnum; i++) {
1414 if (elf_ppnt->p_type == PT_INTERP) {
1415 if ( elf_interpreter != NULL )
1416 {
1417 free (elf_phdata);
1418 free(elf_interpreter);
1419 close(bprm->fd);
1420 return -EINVAL;
1421 }
1422
1423 /* This is the program interpreter used for
1424 * shared libraries - for now assume that this
1425 * is an a.out format binary
1426 */
1427
1428 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1429
1430 if (elf_interpreter == NULL) {
1431 free (elf_phdata);
1432 close(bprm->fd);
1433 return -ENOMEM;
1434 }
1435
1436 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1437 if(retval >= 0) {
1438 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1439 }
1440 if(retval < 0) {
1441 perror("load_elf_binary2");
1442 exit(-1);
1443 }
1444
1445 /* If the program interpreter is one of these two,
1446 then assume an iBCS2 image. Otherwise assume
1447 a native linux image. */
1448
1449 /* JRP - Need to add X86 lib dir stuff here... */
1450
1451 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1452 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1453 ibcs2_interpreter = 1;
1454 }
1455
1456 #if 0
1457 printf("Using ELF interpreter %s\n", elf_interpreter);
1458 #endif
1459 if (retval >= 0) {
1460 retval = open(path(elf_interpreter), O_RDONLY);
1461 if(retval >= 0) {
1462 interpreter_fd = retval;
1463 }
1464 else {
1465 perror(elf_interpreter);
1466 exit(-1);
1467 /* retval = -errno; */
1468 }
1469 }
1470
1471 if (retval >= 0) {
1472 retval = lseek(interpreter_fd, 0, SEEK_SET);
1473 if(retval >= 0) {
1474 retval = read(interpreter_fd,bprm->buf,128);
1475 }
1476 }
1477 if (retval >= 0) {
1478 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1479 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1480 }
1481 if (retval < 0) {
1482 perror("load_elf_binary3");
1483 exit(-1);
1484 free (elf_phdata);
1485 free(elf_interpreter);
1486 close(bprm->fd);
1487 return retval;
1488 }
1489 }
1490 elf_ppnt++;
1491 }
1492
1493 /* Some simple consistency checks for the interpreter */
1494 if (elf_interpreter){
1495 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1496
1497 /* Now figure out which format our binary is */
1498 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1499 (N_MAGIC(interp_ex) != QMAGIC)) {
1500 interpreter_type = INTERPRETER_ELF;
1501 }
1502
1503 if (interp_elf_ex.e_ident[0] != 0x7f ||
1504 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1505 interpreter_type &= ~INTERPRETER_ELF;
1506 }
1507
1508 if (!interpreter_type) {
1509 free(elf_interpreter);
1510 free(elf_phdata);
1511 close(bprm->fd);
1512 return -ELIBBAD;
1513 }
1514 }
1515
1516 /* OK, we are done with that, now set up the arg stuff,
1517 and then start this sucker up */
1518
1519 {
1520 char * passed_p;
1521
1522 if (interpreter_type == INTERPRETER_AOUT) {
1523 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1524 passed_p = passed_fileno;
1525
1526 if (elf_interpreter) {
1527 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1528 bprm->argc++;
1529 }
1530 }
1531 if (!bprm->p) {
1532 if (elf_interpreter) {
1533 free(elf_interpreter);
1534 }
1535 free (elf_phdata);
1536 close(bprm->fd);
1537 return -E2BIG;
1538 }
1539 }
1540
1541 /* OK, This is the point of no return */
1542 info->end_data = 0;
1543 info->end_code = 0;
1544 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1545 info->mmap = 0;
1546 elf_entry = (abi_ulong) elf_ex.e_entry;
1547
1548 #if defined(CONFIG_USE_GUEST_BASE)
1549 /*
1550 * In case where user has not explicitly set the guest_base, we
1551 * probe here that should we set it automatically.
1552 */
1553 if (!have_guest_base) {
1554 /*
1555 * Go through ELF program header table and find out whether
1556 * any of the segments drop below our current mmap_min_addr and
1557 * in that case set guest_base to corresponding address.
1558 */
1559 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1560 i++, elf_ppnt++) {
1561 if (elf_ppnt->p_type != PT_LOAD)
1562 continue;
1563 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1564 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1565 break;
1566 }
1567 }
1568 }
1569 #endif /* CONFIG_USE_GUEST_BASE */
1570
1571 /* Do this so that we can load the interpreter, if need be. We will
1572 change some of these later */
1573 info->rss = 0;
1574 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1575 info->start_stack = bprm->p;
1576
1577 /* Now we do a little grungy work by mmaping the ELF image into
1578 * the correct location in memory. At this point, we assume that
1579 * the image should be loaded at fixed address, not at a variable
1580 * address.
1581 */
1582
1583 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1584 int elf_prot = 0;
1585 int elf_flags = 0;
1586 abi_ulong error;
1587
1588 if (elf_ppnt->p_type != PT_LOAD)
1589 continue;
1590
1591 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1592 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1593 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1594 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1595 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1596 elf_flags |= MAP_FIXED;
1597 } else if (elf_ex.e_type == ET_DYN) {
1598 /* Try and get dynamic programs out of the way of the default mmap
1599 base, as well as whatever program they might try to exec. This
1600 is because the brk will follow the loader, and is not movable. */
1601 /* NOTE: for qemu, we do a big mmap to get enough space
1602 without hardcoding any address */
1603 error = target_mmap(0, ET_DYN_MAP_SIZE,
1604 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1605 -1, 0);
1606 if (error == -1) {
1607 perror("mmap");
1608 exit(-1);
1609 }
1610 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1611 }
1612
1613 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1614 (elf_ppnt->p_filesz +
1615 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1616 elf_prot,
1617 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1618 bprm->fd,
1619 (elf_ppnt->p_offset -
1620 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1621 if (error == -1) {
1622 perror("mmap");
1623 exit(-1);
1624 }
1625
1626 #ifdef LOW_ELF_STACK
1627 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1628 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1629 #endif
1630
1631 if (!load_addr_set) {
1632 load_addr_set = 1;
1633 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1634 if (elf_ex.e_type == ET_DYN) {
1635 load_bias += error -
1636 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1637 load_addr += load_bias;
1638 reloc_func_desc = load_bias;
1639 }
1640 }
1641 k = elf_ppnt->p_vaddr;
1642 if (k < start_code)
1643 start_code = k;
1644 if (start_data < k)
1645 start_data = k;
1646 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1647 if (k > elf_bss)
1648 elf_bss = k;
1649 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1650 end_code = k;
1651 if (end_data < k)
1652 end_data = k;
1653 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1654 if (k > elf_brk) elf_brk = k;
1655 }
1656
1657 elf_entry += load_bias;
1658 elf_bss += load_bias;
1659 elf_brk += load_bias;
1660 start_code += load_bias;
1661 end_code += load_bias;
1662 start_data += load_bias;
1663 end_data += load_bias;
1664
1665 if (elf_interpreter) {
1666 if (interpreter_type & 1) {
1667 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1668 }
1669 else if (interpreter_type & 2) {
1670 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1671 &interp_load_addr);
1672 }
1673 reloc_func_desc = interp_load_addr;
1674
1675 close(interpreter_fd);
1676 free(elf_interpreter);
1677
1678 if (elf_entry == ~((abi_ulong)0UL)) {
1679 printf("Unable to load interpreter\n");
1680 free(elf_phdata);
1681 exit(-1);
1682 return 0;
1683 }
1684 }
1685
1686 free(elf_phdata);
1687
1688 if (qemu_log_enabled())
1689 load_symbols(&elf_ex, bprm->fd);
1690
1691 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1692 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1693
1694 #ifdef LOW_ELF_STACK
1695 info->start_stack = bprm->p = elf_stack - 4;
1696 #endif
1697 bprm->p = create_elf_tables(bprm->p,
1698 bprm->argc,
1699 bprm->envc,
1700 &elf_ex,
1701 load_addr, load_bias,
1702 interp_load_addr,
1703 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1704 info);
1705 info->load_addr = reloc_func_desc;
1706 info->start_brk = info->brk = elf_brk;
1707 info->end_code = end_code;
1708 info->start_code = start_code;
1709 info->start_data = start_data;
1710 info->end_data = end_data;
1711 info->start_stack = bprm->p;
1712
1713 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1714 sections */
1715 set_brk(elf_bss, elf_brk);
1716
1717 padzero(elf_bss, elf_brk);
1718
1719 #if 0
1720 printf("(start_brk) %x\n" , info->start_brk);
1721 printf("(end_code) %x\n" , info->end_code);
1722 printf("(start_code) %x\n" , info->start_code);
1723 printf("(end_data) %x\n" , info->end_data);
1724 printf("(start_stack) %x\n" , info->start_stack);
1725 printf("(brk) %x\n" , info->brk);
1726 #endif
1727
1728 if ( info->personality == PER_SVR4 )
1729 {
1730 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1731 and some applications "depend" upon this behavior.
1732 Since we do not have the power to recompile these, we
1733 emulate the SVr4 behavior. Sigh. */
1734 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1735 MAP_FIXED | MAP_PRIVATE, -1, 0);
1736 }
1737
1738 info->entry = elf_entry;
1739
1740 #ifdef USE_ELF_CORE_DUMP
1741 bprm->core_dump = &elf_core_dump;
1742 #endif
1743
1744 return 0;
1745 }
1746
1747 #ifdef USE_ELF_CORE_DUMP
1748
1749 /*
1750 * Definitions to generate Intel SVR4-like core files.
1751 * These mostly have the same names as the SVR4 types with "target_elf_"
1752 * tacked on the front to prevent clashes with linux definitions,
1753 * and the typedef forms have been avoided. This is mostly like
1754 * the SVR4 structure, but more Linuxy, with things that Linux does
1755 * not support and which gdb doesn't really use excluded.
1756 *
1757 * Fields we don't dump (their contents is zero) in linux-user qemu
1758 * are marked with XXX.
1759 *
1760 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1761 *
1762 * Porting ELF coredump for target is (quite) simple process. First you
1763 * define ELF_USE_CORE_DUMP in target ELF code (where init_thread() for
1764 * the target resides):
1765 *
1766 * #define USE_ELF_CORE_DUMP
1767 *
1768 * Next you define type of register set used for dumping. ELF specification
1769 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1770 *
1771 * typedef <target_regtype> a_target_elf_greg;
1772 * #define ELF_NREG <number of registers>
1773 * typedef a_taret_elf_greg a_target_elf_gregset[ELF_NREG];
1774 *
1775 * Then define following types to match target types. Actual types can
1776 * be found from linux kernel (arch/<ARCH>/include/asm/posix_types.h):
1777 *
1778 * typedef <target_uid_type> a_target_uid;
1779 * typedef <target_gid_type> a_target_gid;
1780 * typedef <target_pid_type> a_target_pid;
1781 *
1782 * Last step is to implement target specific function that copies registers
1783 * from given cpu into just specified register set. Prototype is:
1784 *
1785 * static void elf_core_copy_regs(a_taret_elf_gregset *regs,
1786 * const CPUState *env);
1787 *
1788 * Parameters:
1789 * regs - copy register values into here (allocated and zeroed by caller)
1790 * env - copy registers from here
1791 *
1792 * Example for ARM target is provided in this file.
1793 */
1794
1795 /* An ELF note in memory */
1796 struct memelfnote {
1797 const char *name;
1798 size_t namesz;
1799 size_t namesz_rounded;
1800 int type;
1801 size_t datasz;
1802 void *data;
1803 size_t notesz;
1804 };
1805
1806 struct target_elf_siginfo {
1807 int si_signo; /* signal number */
1808 int si_code; /* extra code */
1809 int si_errno; /* errno */
1810 };
1811
1812 struct target_elf_prstatus {
1813 struct target_elf_siginfo pr_info; /* Info associated with signal */
1814 short pr_cursig; /* Current signal */
1815 target_ulong pr_sigpend; /* XXX */
1816 target_ulong pr_sighold; /* XXX */
1817 a_target_pid pr_pid;
1818 a_target_pid pr_ppid;
1819 a_target_pid pr_pgrp;
1820 a_target_pid pr_sid;
1821 struct target_timeval pr_utime; /* XXX User time */
1822 struct target_timeval pr_stime; /* XXX System time */
1823 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1824 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1825 a_target_elf_gregset pr_reg; /* GP registers */
1826 int pr_fpvalid; /* XXX */
1827 };
1828
1829 #define ELF_PRARGSZ (80) /* Number of chars for args */
1830
1831 struct target_elf_prpsinfo {
1832 char pr_state; /* numeric process state */
1833 char pr_sname; /* char for pr_state */
1834 char pr_zomb; /* zombie */
1835 char pr_nice; /* nice val */
1836 target_ulong pr_flag; /* flags */
1837 a_target_uid pr_uid;
1838 a_target_gid pr_gid;
1839 a_target_pid pr_pid, pr_ppid, pr_pgrp, pr_sid;
1840 /* Lots missing */
1841 char pr_fname[16]; /* filename of executable */
1842 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1843 };
1844
1845 /* Here is the structure in which status of each thread is captured. */
1846 struct elf_thread_status {
1847 QTAILQ_ENTRY(elf_thread_status) ets_link;
1848 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1849 #if 0
1850 elf_fpregset_t fpu; /* NT_PRFPREG */
1851 struct task_struct *thread;
1852 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1853 #endif
1854 struct memelfnote notes[1];
1855 int num_notes;
1856 };
1857
1858 struct elf_note_info {
1859 struct memelfnote *notes;
1860 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1861 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1862
1863 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1864 #if 0
1865 /*
1866 * Current version of ELF coredump doesn't support
1867 * dumping fp regs etc.
1868 */
1869 elf_fpregset_t *fpu;
1870 elf_fpxregset_t *xfpu;
1871 int thread_status_size;
1872 #endif
1873 int notes_size;
1874 int numnote;
1875 };
1876
1877 struct vm_area_struct {
1878 abi_ulong vma_start; /* start vaddr of memory region */
1879 abi_ulong vma_end; /* end vaddr of memory region */
1880 abi_ulong vma_flags; /* protection etc. flags for the region */
1881 QTAILQ_ENTRY(vm_area_struct) vma_link;
1882 };
1883
1884 struct mm_struct {
1885 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1886 int mm_count; /* number of mappings */
1887 };
1888
1889 static struct mm_struct *vma_init(void);
1890 static void vma_delete(struct mm_struct *);
1891 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1892 abi_ulong, abi_ulong);
1893 static int vma_get_mapping_count(const struct mm_struct *);
1894 static struct vm_area_struct *vma_first(const struct mm_struct *);
1895 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1896 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1897 static int vma_walker(void *priv, unsigned long start, unsigned long end,
1898 unsigned long flags);
1899
1900 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1901 static void fill_note(struct memelfnote *, const char *, int,
1902 unsigned int, void *);
1903 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
1904 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
1905 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1906 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1907 static size_t note_size(const struct memelfnote *);
1908 static void free_note_info(struct elf_note_info *);
1909 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1910 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1911 static int core_dump_filename(const TaskState *, char *, size_t);
1912
1913 static int dump_write(int, const void *, size_t);
1914 static int write_note(struct memelfnote *, int);
1915 static int write_note_info(struct elf_note_info *, int);
1916
1917 #ifdef BSWAP_NEEDED
1918 static void bswap_prstatus(struct target_elf_prstatus *);
1919 static void bswap_psinfo(struct target_elf_prpsinfo *);
1920
1921 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
1922 {
1923 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1924 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1925 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1926 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1927 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1928 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1929 prstatus->pr_pid = tswap32(prstatus->pr_pid);
1930 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1931 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1932 prstatus->pr_sid = tswap32(prstatus->pr_sid);
1933 /* cpu times are not filled, so we skip them */
1934 /* regs should be in correct format already */
1935 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
1936 }
1937
1938 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
1939 {
1940 psinfo->pr_flag = tswapl(psinfo->pr_flag);
1941 psinfo->pr_uid = tswap16(psinfo->pr_uid);
1942 psinfo->pr_gid = tswap16(psinfo->pr_gid);
1943 psinfo->pr_pid = tswap32(psinfo->pr_pid);
1944 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
1945 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
1946 psinfo->pr_sid = tswap32(psinfo->pr_sid);
1947 }
1948 #endif /* BSWAP_NEEDED */
1949
1950 /*
1951 * Minimal support for linux memory regions. These are needed
1952 * when we are finding out what memory exactly belongs to
1953 * emulated process. No locks needed here, as long as
1954 * thread that received the signal is stopped.
1955 */
1956
1957 static struct mm_struct *vma_init(void)
1958 {
1959 struct mm_struct *mm;
1960
1961 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
1962 return (NULL);
1963
1964 mm->mm_count = 0;
1965 QTAILQ_INIT(&mm->mm_mmap);
1966
1967 return (mm);
1968 }
1969
1970 static void vma_delete(struct mm_struct *mm)
1971 {
1972 struct vm_area_struct *vma;
1973
1974 while ((vma = vma_first(mm)) != NULL) {
1975 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
1976 qemu_free(vma);
1977 }
1978 qemu_free(mm);
1979 }
1980
1981 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
1982 abi_ulong end, abi_ulong flags)
1983 {
1984 struct vm_area_struct *vma;
1985
1986 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
1987 return (-1);
1988
1989 vma->vma_start = start;
1990 vma->vma_end = end;
1991 vma->vma_flags = flags;
1992
1993 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
1994 mm->mm_count++;
1995
1996 return (0);
1997 }
1998
1999 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2000 {
2001 return (QTAILQ_FIRST(&mm->mm_mmap));
2002 }
2003
2004 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2005 {
2006 return (QTAILQ_NEXT(vma, vma_link));
2007 }
2008
2009 static int vma_get_mapping_count(const struct mm_struct *mm)
2010 {
2011 return (mm->mm_count);
2012 }
2013
2014 /*
2015 * Calculate file (dump) size of given memory region.
2016 */
2017 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2018 {
2019 /* if we cannot even read the first page, skip it */
2020 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2021 return (0);
2022
2023 /*
2024 * Usually we don't dump executable pages as they contain
2025 * non-writable code that debugger can read directly from
2026 * target library etc. However, thread stacks are marked
2027 * also executable so we read in first page of given region
2028 * and check whether it contains elf header. If there is
2029 * no elf header, we dump it.
2030 */
2031 if (vma->vma_flags & PROT_EXEC) {
2032 char page[TARGET_PAGE_SIZE];
2033
2034 copy_from_user(page, vma->vma_start, sizeof (page));
2035 if ((page[EI_MAG0] == ELFMAG0) &&
2036 (page[EI_MAG1] == ELFMAG1) &&
2037 (page[EI_MAG2] == ELFMAG2) &&
2038 (page[EI_MAG3] == ELFMAG3)) {
2039 /*
2040 * Mappings are possibly from ELF binary. Don't dump
2041 * them.
2042 */
2043 return (0);
2044 }
2045 }
2046
2047 return (vma->vma_end - vma->vma_start);
2048 }
2049
2050 static int vma_walker(void *priv, unsigned long start, unsigned long end,
2051 unsigned long flags)
2052 {
2053 struct mm_struct *mm = (struct mm_struct *)priv;
2054
2055 /*
2056 * Don't dump anything that qemu has reserved for internal use.
2057 */
2058 if (flags & PAGE_RESERVED)
2059 return (0);
2060
2061 vma_add_mapping(mm, start, end, flags);
2062 return (0);
2063 }
2064
2065 static void fill_note(struct memelfnote *note, const char *name, int type,
2066 unsigned int sz, void *data)
2067 {
2068 unsigned int namesz;
2069
2070 namesz = strlen(name) + 1;
2071 note->name = name;
2072 note->namesz = namesz;
2073 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2074 note->type = type;
2075 note->datasz = roundup(sz, sizeof (int32_t));;
2076 note->data = data;
2077
2078 /*
2079 * We calculate rounded up note size here as specified by
2080 * ELF document.
2081 */
2082 note->notesz = sizeof (struct elf_note) +
2083 note->namesz_rounded + note->datasz;
2084 }
2085
2086 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2087 uint32_t flags)
2088 {
2089 (void) memset(elf, 0, sizeof(*elf));
2090
2091 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2092 elf->e_ident[EI_CLASS] = ELF_CLASS;
2093 elf->e_ident[EI_DATA] = ELF_DATA;
2094 elf->e_ident[EI_VERSION] = EV_CURRENT;
2095 elf->e_ident[EI_OSABI] = ELF_OSABI;
2096
2097 elf->e_type = ET_CORE;
2098 elf->e_machine = machine;
2099 elf->e_version = EV_CURRENT;
2100 elf->e_phoff = sizeof(struct elfhdr);
2101 elf->e_flags = flags;
2102 elf->e_ehsize = sizeof(struct elfhdr);
2103 elf->e_phentsize = sizeof(struct elf_phdr);
2104 elf->e_phnum = segs;
2105
2106 #ifdef BSWAP_NEEDED
2107 bswap_ehdr(elf);
2108 #endif
2109 }
2110
2111 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2112 {
2113 phdr->p_type = PT_NOTE;
2114 phdr->p_offset = offset;
2115 phdr->p_vaddr = 0;
2116 phdr->p_paddr = 0;
2117 phdr->p_filesz = sz;
2118 phdr->p_memsz = 0;
2119 phdr->p_flags = 0;
2120 phdr->p_align = 0;
2121
2122 #ifdef BSWAP_NEEDED
2123 bswap_phdr(phdr);
2124 #endif
2125 }
2126
2127 static size_t note_size(const struct memelfnote *note)
2128 {
2129 return (note->notesz);
2130 }
2131
2132 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2133 const TaskState *ts, int signr)
2134 {
2135 (void) memset(prstatus, 0, sizeof (*prstatus));
2136 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2137 prstatus->pr_pid = ts->ts_tid;
2138 prstatus->pr_ppid = getppid();
2139 prstatus->pr_pgrp = getpgrp();
2140 prstatus->pr_sid = getsid(0);
2141
2142 #ifdef BSWAP_NEEDED
2143 bswap_prstatus(prstatus);
2144 #endif
2145 }
2146
2147 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2148 {
2149 char *filename, *base_filename;
2150 unsigned int i, len;
2151
2152 (void) memset(psinfo, 0, sizeof (*psinfo));
2153
2154 len = ts->info->arg_end - ts->info->arg_start;
2155 if (len >= ELF_PRARGSZ)
2156 len = ELF_PRARGSZ - 1;
2157 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2158 return -EFAULT;
2159 for (i = 0; i < len; i++)
2160 if (psinfo->pr_psargs[i] == 0)
2161 psinfo->pr_psargs[i] = ' ';
2162 psinfo->pr_psargs[len] = 0;
2163
2164 psinfo->pr_pid = getpid();
2165 psinfo->pr_ppid = getppid();
2166 psinfo->pr_pgrp = getpgrp();
2167 psinfo->pr_sid = getsid(0);
2168 psinfo->pr_uid = getuid();
2169 psinfo->pr_gid = getgid();
2170
2171 filename = strdup(ts->bprm->filename);
2172 base_filename = strdup(basename(filename));
2173 (void) strncpy(psinfo->pr_fname, base_filename,
2174 sizeof(psinfo->pr_fname));
2175 free(base_filename);
2176 free(filename);
2177
2178 #ifdef BSWAP_NEEDED
2179 bswap_psinfo(psinfo);
2180 #endif
2181 return (0);
2182 }
2183
2184 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2185 {
2186 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2187 elf_addr_t orig_auxv = auxv;
2188 abi_ulong val;
2189 void *ptr;
2190 int i, len;
2191
2192 /*
2193 * Auxiliary vector is stored in target process stack. It contains
2194 * {type, value} pairs that we need to dump into note. This is not
2195 * strictly necessary but we do it here for sake of completeness.
2196 */
2197
2198 /* find out lenght of the vector, AT_NULL is terminator */
2199 i = len = 0;
2200 do {
2201 get_user_ual(val, auxv);
2202 i += 2;
2203 auxv += 2 * sizeof (elf_addr_t);
2204 } while (val != AT_NULL);
2205 len = i * sizeof (elf_addr_t);
2206
2207 /* read in whole auxv vector and copy it to memelfnote */
2208 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2209 if (ptr != NULL) {
2210 fill_note(note, "CORE", NT_AUXV, len, ptr);
2211 unlock_user(ptr, auxv, len);
2212 }
2213 }
2214
2215 /*
2216 * Constructs name of coredump file. We have following convention
2217 * for the name:
2218 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2219 *
2220 * Returns 0 in case of success, -1 otherwise (errno is set).
2221 */
2222 static int core_dump_filename(const TaskState *ts, char *buf,
2223 size_t bufsize)
2224 {
2225 char timestamp[64];
2226 char *filename = NULL;
2227 char *base_filename = NULL;
2228 struct timeval tv;
2229 struct tm tm;
2230
2231 assert(bufsize >= PATH_MAX);
2232
2233 if (gettimeofday(&tv, NULL) < 0) {
2234 (void) fprintf(stderr, "unable to get current timestamp: %s",
2235 strerror(errno));
2236 return (-1);
2237 }
2238
2239 filename = strdup(ts->bprm->filename);
2240 base_filename = strdup(basename(filename));
2241 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2242 localtime_r(&tv.tv_sec, &tm));
2243 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2244 base_filename, timestamp, (int)getpid());
2245 free(base_filename);
2246 free(filename);
2247
2248 return (0);
2249 }
2250
2251 static int dump_write(int fd, const void *ptr, size_t size)
2252 {
2253 const char *bufp = (const char *)ptr;
2254 ssize_t bytes_written, bytes_left;
2255 struct rlimit dumpsize;
2256 off_t pos;
2257
2258 bytes_written = 0;
2259 getrlimit(RLIMIT_CORE, &dumpsize);
2260 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2261 if (errno == ESPIPE) { /* not a seekable stream */
2262 bytes_left = size;
2263 } else {
2264 return pos;
2265 }
2266 } else {
2267 if (dumpsize.rlim_cur <= pos) {
2268 return -1;
2269 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2270 bytes_left = size;
2271 } else {
2272 size_t limit_left=dumpsize.rlim_cur - pos;
2273 bytes_left = limit_left >= size ? size : limit_left ;
2274 }
2275 }
2276
2277 /*
2278 * In normal conditions, single write(2) should do but
2279 * in case of socket etc. this mechanism is more portable.
2280 */
2281 do {
2282 bytes_written = write(fd, bufp, bytes_left);
2283 if (bytes_written < 0) {
2284 if (errno == EINTR)
2285 continue;
2286 return (-1);
2287 } else if (bytes_written == 0) { /* eof */
2288 return (-1);
2289 }
2290 bufp += bytes_written;
2291 bytes_left -= bytes_written;
2292 } while (bytes_left > 0);
2293
2294 return (0);
2295 }
2296
2297 static int write_note(struct memelfnote *men, int fd)
2298 {
2299 struct elf_note en;
2300
2301 en.n_namesz = men->namesz;
2302 en.n_type = men->type;
2303 en.n_descsz = men->datasz;
2304
2305 #ifdef BSWAP_NEEDED
2306 bswap_note(&en);
2307 #endif
2308
2309 if (dump_write(fd, &en, sizeof(en)) != 0)
2310 return (-1);
2311 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2312 return (-1);
2313 if (dump_write(fd, men->data, men->datasz) != 0)
2314 return (-1);
2315
2316 return (0);
2317 }
2318
2319 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2320 {
2321 TaskState *ts = (TaskState *)env->opaque;
2322 struct elf_thread_status *ets;
2323
2324 ets = qemu_mallocz(sizeof (*ets));
2325 ets->num_notes = 1; /* only prstatus is dumped */
2326 fill_prstatus(&ets->prstatus, ts, 0);
2327 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2328 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2329 &ets->prstatus);
2330
2331 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2332
2333 info->notes_size += note_size(&ets->notes[0]);
2334 }
2335
2336 static int fill_note_info(struct elf_note_info *info,
2337 long signr, const CPUState *env)
2338 {
2339 #define NUMNOTES 3
2340 CPUState *cpu = NULL;
2341 TaskState *ts = (TaskState *)env->opaque;
2342 int i;
2343
2344 (void) memset(info, 0, sizeof (*info));
2345
2346 QTAILQ_INIT(&info->thread_list);
2347
2348 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2349 if (info->notes == NULL)
2350 return (-ENOMEM);
2351 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2352 if (info->prstatus == NULL)
2353 return (-ENOMEM);
2354 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2355 if (info->prstatus == NULL)
2356 return (-ENOMEM);
2357
2358 /*
2359 * First fill in status (and registers) of current thread
2360 * including process info & aux vector.
2361 */
2362 fill_prstatus(info->prstatus, ts, signr);
2363 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2364 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2365 sizeof (*info->prstatus), info->prstatus);
2366 fill_psinfo(info->psinfo, ts);
2367 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2368 sizeof (*info->psinfo), info->psinfo);
2369 fill_auxv_note(&info->notes[2], ts);
2370 info->numnote = 3;
2371
2372 info->notes_size = 0;
2373 for (i = 0; i < info->numnote; i++)
2374 info->notes_size += note_size(&info->notes[i]);
2375
2376 /* read and fill status of all threads */
2377 cpu_list_lock();
2378 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2379 if (cpu == thread_env)
2380 continue;
2381 fill_thread_info(info, cpu);
2382 }
2383 cpu_list_unlock();
2384
2385 return (0);
2386 }
2387
2388 static void free_note_info(struct elf_note_info *info)
2389 {
2390 struct elf_thread_status *ets;
2391
2392 while (!QTAILQ_EMPTY(&info->thread_list)) {
2393 ets = QTAILQ_FIRST(&info->thread_list);
2394 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2395 qemu_free(ets);
2396 }
2397
2398 qemu_free(info->prstatus);
2399 qemu_free(info->psinfo);
2400 qemu_free(info->notes);
2401 }
2402
2403 static int write_note_info(struct elf_note_info *info, int fd)
2404 {
2405 struct elf_thread_status *ets;
2406 int i, error = 0;
2407
2408 /* write prstatus, psinfo and auxv for current thread */
2409 for (i = 0; i < info->numnote; i++)
2410 if ((error = write_note(&info->notes[i], fd)) != 0)
2411 return (error);
2412
2413 /* write prstatus for each thread */
2414 for (ets = info->thread_list.tqh_first; ets != NULL;
2415 ets = ets->ets_link.tqe_next) {
2416 if ((error = write_note(&ets->notes[0], fd)) != 0)
2417 return (error);
2418 }
2419
2420 return (0);
2421 }
2422
2423 /*
2424 * Write out ELF coredump.
2425 *
2426 * See documentation of ELF object file format in:
2427 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2428 *
2429 * Coredump format in linux is following:
2430 *
2431 * 0 +----------------------+ \
2432 * | ELF header | ET_CORE |
2433 * +----------------------+ |
2434 * | ELF program headers | |--- headers
2435 * | - NOTE section | |
2436 * | - PT_LOAD sections | |
2437 * +----------------------+ /
2438 * | NOTEs: |
2439 * | - NT_PRSTATUS |
2440 * | - NT_PRSINFO |
2441 * | - NT_AUXV |
2442 * +----------------------+ <-- aligned to target page
2443 * | Process memory dump |
2444 * : :
2445 * . .
2446 * : :
2447 * | |
2448 * +----------------------+
2449 *
2450 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2451 * NT_PRSINFO -> struct elf_prpsinfo
2452 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2453 *
2454 * Format follows System V format as close as possible. Current
2455 * version limitations are as follows:
2456 * - no floating point registers are dumped
2457 *
2458 * Function returns 0 in case of success, negative errno otherwise.
2459 *
2460 * TODO: make this work also during runtime: it should be
2461 * possible to force coredump from running process and then
2462 * continue processing. For example qemu could set up SIGUSR2
2463 * handler (provided that target process haven't registered
2464 * handler for that) that does the dump when signal is received.
2465 */
2466 static int elf_core_dump(int signr, const CPUState *env)
2467 {
2468 const TaskState *ts = (const TaskState *)env->opaque;
2469 struct vm_area_struct *vma = NULL;
2470 char corefile[PATH_MAX];
2471 struct elf_note_info info;
2472 struct elfhdr elf;
2473 struct elf_phdr phdr;
2474 struct rlimit dumpsize;
2475 struct mm_struct *mm = NULL;
2476 off_t offset = 0, data_offset = 0;
2477 int segs = 0;
2478 int fd = -1;
2479
2480 errno = 0;
2481 getrlimit(RLIMIT_CORE, &dumpsize);
2482 if (dumpsize.rlim_cur == 0)
2483 return 0;
2484
2485 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2486 return (-errno);
2487
2488 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2489 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2490 return (-errno);
2491
2492 /*
2493 * Walk through target process memory mappings and
2494 * set up structure containing this information. After
2495 * this point vma_xxx functions can be used.
2496 */
2497 if ((mm = vma_init()) == NULL)
2498 goto out;
2499
2500 walk_memory_regions(mm, vma_walker);
2501 segs = vma_get_mapping_count(mm);
2502
2503 /*
2504 * Construct valid coredump ELF header. We also
2505 * add one more segment for notes.
2506 */
2507 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2508 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2509 goto out;
2510
2511 /* fill in in-memory version of notes */
2512 if (fill_note_info(&info, signr, env) < 0)
2513 goto out;
2514
2515 offset += sizeof (elf); /* elf header */
2516 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2517
2518 /* write out notes program header */
2519 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2520
2521 offset += info.notes_size;
2522 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2523 goto out;
2524
2525 /*
2526 * ELF specification wants data to start at page boundary so
2527 * we align it here.
2528 */
2529 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2530
2531 /*
2532 * Write program headers for memory regions mapped in
2533 * the target process.
2534 */
2535 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2536 (void) memset(&phdr, 0, sizeof (phdr));
2537
2538 phdr.p_type = PT_LOAD;
2539 phdr.p_offset = offset;
2540 phdr.p_vaddr = vma->vma_start;
2541 phdr.p_paddr = 0;
2542 phdr.p_filesz = vma_dump_size(vma);
2543 offset += phdr.p_filesz;
2544 phdr.p_memsz = vma->vma_end - vma->vma_start;
2545 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2546 if (vma->vma_flags & PROT_WRITE)
2547 phdr.p_flags |= PF_W;
2548 if (vma->vma_flags & PROT_EXEC)
2549 phdr.p_flags |= PF_X;
2550 phdr.p_align = ELF_EXEC_PAGESIZE;
2551
2552 dump_write(fd, &phdr, sizeof (phdr));
2553 }
2554
2555 /*
2556 * Next we write notes just after program headers. No
2557 * alignment needed here.
2558 */
2559 if (write_note_info(&info, fd) < 0)
2560 goto out;
2561
2562 /* align data to page boundary */
2563 data_offset = lseek(fd, 0, SEEK_CUR);
2564 data_offset = TARGET_PAGE_ALIGN(data_offset);
2565 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2566 goto out;
2567
2568 /*
2569 * Finally we can dump process memory into corefile as well.
2570 */
2571 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2572 abi_ulong addr;
2573 abi_ulong end;
2574
2575 end = vma->vma_start + vma_dump_size(vma);
2576
2577 for (addr = vma->vma_start; addr < end;
2578 addr += TARGET_PAGE_SIZE) {
2579 char page[TARGET_PAGE_SIZE];
2580 int error;
2581
2582 /*
2583 * Read in page from target process memory and
2584 * write it to coredump file.
2585 */
2586 error = copy_from_user(page, addr, sizeof (page));
2587 if (error != 0) {
2588 (void) fprintf(stderr, "unable to dump " TARGET_FMT_lx "\n",
2589 addr);
2590 errno = -error;
2591 goto out;
2592 }
2593 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2594 goto out;
2595 }
2596 }
2597
2598 out:
2599 free_note_info(&info);
2600 if (mm != NULL)
2601 vma_delete(mm);
2602 (void) close(fd);
2603
2604 if (errno != 0)
2605 return (-errno);
2606 return (0);
2607 }
2608
2609 #endif /* USE_ELF_CORE_DUMP */
2610
2611 static int load_aout_interp(void * exptr, int interp_fd)
2612 {
2613 printf("a.out interpreter not yet supported\n");
2614 return(0);
2615 }
2616
2617 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2618 {
2619 init_thread(regs, infop);
2620 }