]> git.proxmox.com Git - qemu.git/blob - linux-user/elfload.c
linux-user: fix ELF_USE_CORE_DUMP/USE_ELF_CORE_DUMP confusion
[qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 #ifdef TARGET_I386
101
102 #define ELF_PLATFORM get_elf_platform()
103
104 static const char *get_elf_platform(void)
105 {
106 static char elf_platform[] = "i386";
107 int family = (thread_env->cpuid_version >> 8) & 0xff;
108 if (family > 6)
109 family = 6;
110 if (family >= 3)
111 elf_platform[1] = '0' + family;
112 return elf_platform;
113 }
114
115 #define ELF_HWCAP get_elf_hwcap()
116
117 static uint32_t get_elf_hwcap(void)
118 {
119 return thread_env->cpuid_features;
120 }
121
122 #ifdef TARGET_X86_64
123 #define ELF_START_MMAP 0x2aaaaab000ULL
124 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
125
126 #define ELF_CLASS ELFCLASS64
127 #define ELF_DATA ELFDATA2LSB
128 #define ELF_ARCH EM_X86_64
129
130 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
131 {
132 regs->rax = 0;
133 regs->rsp = infop->start_stack;
134 regs->rip = infop->entry;
135 }
136
137 typedef target_ulong target_elf_greg_t;
138 typedef uint32_t target_uid_t;
139 typedef uint32_t target_gid_t;
140 typedef int32_t target_pid_t;
141
142 #define ELF_NREG 27
143 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
144
145 /*
146 * Note that ELF_NREG should be 29 as there should be place for
147 * TRAPNO and ERR "registers" as well but linux doesn't dump
148 * those.
149 *
150 * See linux kernel: arch/x86/include/asm/elf.h
151 */
152 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
153 {
154 (*regs)[0] = env->regs[15];
155 (*regs)[1] = env->regs[14];
156 (*regs)[2] = env->regs[13];
157 (*regs)[3] = env->regs[12];
158 (*regs)[4] = env->regs[R_EBP];
159 (*regs)[5] = env->regs[R_EBX];
160 (*regs)[6] = env->regs[11];
161 (*regs)[7] = env->regs[10];
162 (*regs)[8] = env->regs[9];
163 (*regs)[9] = env->regs[8];
164 (*regs)[10] = env->regs[R_EAX];
165 (*regs)[11] = env->regs[R_ECX];
166 (*regs)[12] = env->regs[R_EDX];
167 (*regs)[13] = env->regs[R_ESI];
168 (*regs)[14] = env->regs[R_EDI];
169 (*regs)[15] = env->regs[R_EAX]; /* XXX */
170 (*regs)[16] = env->eip;
171 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
172 (*regs)[18] = env->eflags;
173 (*regs)[19] = env->regs[R_ESP];
174 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
175 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
176 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
177 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
178 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
179 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
180 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
181 }
182
183 #else
184
185 #define ELF_START_MMAP 0x80000000
186
187 /*
188 * This is used to ensure we don't load something for the wrong architecture.
189 */
190 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
191
192 /*
193 * These are used to set parameters in the core dumps.
194 */
195 #define ELF_CLASS ELFCLASS32
196 #define ELF_DATA ELFDATA2LSB
197 #define ELF_ARCH EM_386
198
199 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
200 {
201 regs->esp = infop->start_stack;
202 regs->eip = infop->entry;
203
204 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
205 starts %edx contains a pointer to a function which might be
206 registered using `atexit'. This provides a mean for the
207 dynamic linker to call DT_FINI functions for shared libraries
208 that have been loaded before the code runs.
209
210 A value of 0 tells we have no such handler. */
211 regs->edx = 0;
212 }
213
214 typedef target_ulong target_elf_greg_t;
215 typedef uint16_t target_uid_t;
216 typedef uint16_t target_gid_t;
217 typedef int32_t target_pid_t;
218
219 #define ELF_NREG 17
220 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 typedef uint32_t target_elf_greg_t;
290 typedef uint16_t target_uid_t;
291 typedef uint16_t target_gid_t;
292 typedef int32_t target_pid_t;
293
294 #define ELF_NREG 18
295 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
296
297 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
298 {
299 (*regs)[0] = env->regs[0];
300 (*regs)[1] = env->regs[1];
301 (*regs)[2] = env->regs[2];
302 (*regs)[3] = env->regs[3];
303 (*regs)[4] = env->regs[4];
304 (*regs)[5] = env->regs[5];
305 (*regs)[6] = env->regs[6];
306 (*regs)[7] = env->regs[7];
307 (*regs)[8] = env->regs[8];
308 (*regs)[9] = env->regs[9];
309 (*regs)[10] = env->regs[10];
310 (*regs)[11] = env->regs[11];
311 (*regs)[12] = env->regs[12];
312 (*regs)[13] = env->regs[13];
313 (*regs)[14] = env->regs[14];
314 (*regs)[15] = env->regs[15];
315
316 (*regs)[16] = cpsr_read((CPUState *)env);
317 (*regs)[17] = env->regs[0]; /* XXX */
318 }
319
320 #define USE_ELF_CORE_DUMP
321 #define ELF_EXEC_PAGESIZE 4096
322
323 enum
324 {
325 ARM_HWCAP_ARM_SWP = 1 << 0,
326 ARM_HWCAP_ARM_HALF = 1 << 1,
327 ARM_HWCAP_ARM_THUMB = 1 << 2,
328 ARM_HWCAP_ARM_26BIT = 1 << 3,
329 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
330 ARM_HWCAP_ARM_FPA = 1 << 5,
331 ARM_HWCAP_ARM_VFP = 1 << 6,
332 ARM_HWCAP_ARM_EDSP = 1 << 7,
333 ARM_HWCAP_ARM_JAVA = 1 << 8,
334 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
335 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
336 ARM_HWCAP_ARM_NEON = 1 << 11,
337 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
338 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
339 };
340
341 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
342 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
343 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
344 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
345
346 #endif
347
348 #ifdef TARGET_SPARC
349 #ifdef TARGET_SPARC64
350
351 #define ELF_START_MMAP 0x80000000
352
353 #ifndef TARGET_ABI32
354 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
355 #else
356 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
357 #endif
358
359 #define ELF_CLASS ELFCLASS64
360 #define ELF_DATA ELFDATA2MSB
361 #define ELF_ARCH EM_SPARCV9
362
363 #define STACK_BIAS 2047
364
365 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
366 {
367 #ifndef TARGET_ABI32
368 regs->tstate = 0;
369 #endif
370 regs->pc = infop->entry;
371 regs->npc = regs->pc + 4;
372 regs->y = 0;
373 #ifdef TARGET_ABI32
374 regs->u_regs[14] = infop->start_stack - 16 * 4;
375 #else
376 if (personality(infop->personality) == PER_LINUX32)
377 regs->u_regs[14] = infop->start_stack - 16 * 4;
378 else
379 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
380 #endif
381 }
382
383 #else
384 #define ELF_START_MMAP 0x80000000
385
386 #define elf_check_arch(x) ( (x) == EM_SPARC )
387
388 #define ELF_CLASS ELFCLASS32
389 #define ELF_DATA ELFDATA2MSB
390 #define ELF_ARCH EM_SPARC
391
392 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
393 {
394 regs->psr = 0;
395 regs->pc = infop->entry;
396 regs->npc = regs->pc + 4;
397 regs->y = 0;
398 regs->u_regs[14] = infop->start_stack - 16 * 4;
399 }
400
401 #endif
402 #endif
403
404 #ifdef TARGET_PPC
405
406 #define ELF_START_MMAP 0x80000000
407
408 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
409
410 #define elf_check_arch(x) ( (x) == EM_PPC64 )
411
412 #define ELF_CLASS ELFCLASS64
413
414 #else
415
416 #define elf_check_arch(x) ( (x) == EM_PPC )
417
418 #define ELF_CLASS ELFCLASS32
419
420 #endif
421
422 #ifdef TARGET_WORDS_BIGENDIAN
423 #define ELF_DATA ELFDATA2MSB
424 #else
425 #define ELF_DATA ELFDATA2LSB
426 #endif
427 #define ELF_ARCH EM_PPC
428
429 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
430 See arch/powerpc/include/asm/cputable.h. */
431 enum {
432 QEMU_PPC_FEATURE_32 = 0x80000000,
433 QEMU_PPC_FEATURE_64 = 0x40000000,
434 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
435 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
436 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
437 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
438 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
439 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
440 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
441 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
442 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
443 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
444 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
445 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
446 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
447 QEMU_PPC_FEATURE_CELL = 0x00010000,
448 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
449 QEMU_PPC_FEATURE_SMT = 0x00004000,
450 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
451 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
452 QEMU_PPC_FEATURE_PA6T = 0x00000800,
453 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
454 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
455 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
456 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
457 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
458
459 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
460 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
461 };
462
463 #define ELF_HWCAP get_elf_hwcap()
464
465 static uint32_t get_elf_hwcap(void)
466 {
467 CPUState *e = thread_env;
468 uint32_t features = 0;
469
470 /* We don't have to be terribly complete here; the high points are
471 Altivec/FP/SPE support. Anything else is just a bonus. */
472 #define GET_FEATURE(flag, feature) \
473 do {if (e->insns_flags & flag) features |= feature; } while(0)
474 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
475 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
476 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
477 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
478 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
479 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
480 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
481 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
482 #undef GET_FEATURE
483
484 return features;
485 }
486
487 /*
488 * We need to put in some extra aux table entries to tell glibc what
489 * the cache block size is, so it can use the dcbz instruction safely.
490 */
491 #define AT_DCACHEBSIZE 19
492 #define AT_ICACHEBSIZE 20
493 #define AT_UCACHEBSIZE 21
494 /* A special ignored type value for PPC, for glibc compatibility. */
495 #define AT_IGNOREPPC 22
496 /*
497 * The requirements here are:
498 * - keep the final alignment of sp (sp & 0xf)
499 * - make sure the 32-bit value at the first 16 byte aligned position of
500 * AUXV is greater than 16 for glibc compatibility.
501 * AT_IGNOREPPC is used for that.
502 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
503 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
504 */
505 #define DLINFO_ARCH_ITEMS 5
506 #define ARCH_DLINFO \
507 do { \
508 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
509 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
510 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
511 /* \
512 * Now handle glibc compatibility. \
513 */ \
514 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
515 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
516 } while (0)
517
518 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
519 {
520 abi_ulong pos = infop->start_stack;
521 abi_ulong tmp;
522 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
523 abi_ulong entry, toc;
524 #endif
525
526 _regs->gpr[1] = infop->start_stack;
527 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
528 entry = ldq_raw(infop->entry) + infop->load_addr;
529 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
530 _regs->gpr[2] = toc;
531 infop->entry = entry;
532 #endif
533 _regs->nip = infop->entry;
534 /* Note that isn't exactly what regular kernel does
535 * but this is what the ABI wants and is needed to allow
536 * execution of PPC BSD programs.
537 */
538 /* FIXME - what to for failure of get_user()? */
539 get_user_ual(_regs->gpr[3], pos);
540 pos += sizeof(abi_ulong);
541 _regs->gpr[4] = pos;
542 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
543 tmp = ldl(pos);
544 _regs->gpr[5] = pos;
545 }
546
547 #define ELF_EXEC_PAGESIZE 4096
548
549 #endif
550
551 #ifdef TARGET_MIPS
552
553 #define ELF_START_MMAP 0x80000000
554
555 #define elf_check_arch(x) ( (x) == EM_MIPS )
556
557 #ifdef TARGET_MIPS64
558 #define ELF_CLASS ELFCLASS64
559 #else
560 #define ELF_CLASS ELFCLASS32
561 #endif
562 #ifdef TARGET_WORDS_BIGENDIAN
563 #define ELF_DATA ELFDATA2MSB
564 #else
565 #define ELF_DATA ELFDATA2LSB
566 #endif
567 #define ELF_ARCH EM_MIPS
568
569 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
570 {
571 regs->cp0_status = 2 << CP0St_KSU;
572 regs->cp0_epc = infop->entry;
573 regs->regs[29] = infop->start_stack;
574 }
575
576 #define ELF_EXEC_PAGESIZE 4096
577
578 #endif /* TARGET_MIPS */
579
580 #ifdef TARGET_MICROBLAZE
581
582 #define ELF_START_MMAP 0x80000000
583
584 #define elf_check_arch(x) ( (x) == EM_XILINX_MICROBLAZE )
585
586 #define ELF_CLASS ELFCLASS32
587 #define ELF_DATA ELFDATA2MSB
588 #define ELF_ARCH EM_MIPS
589
590 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
591 {
592 regs->pc = infop->entry;
593 regs->r1 = infop->start_stack;
594
595 }
596
597 #define ELF_EXEC_PAGESIZE 4096
598
599 #endif /* TARGET_MICROBLAZE */
600
601 #ifdef TARGET_SH4
602
603 #define ELF_START_MMAP 0x80000000
604
605 #define elf_check_arch(x) ( (x) == EM_SH )
606
607 #define ELF_CLASS ELFCLASS32
608 #define ELF_DATA ELFDATA2LSB
609 #define ELF_ARCH EM_SH
610
611 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
612 {
613 /* Check other registers XXXXX */
614 regs->pc = infop->entry;
615 regs->regs[15] = infop->start_stack;
616 }
617
618 #define ELF_EXEC_PAGESIZE 4096
619
620 #endif
621
622 #ifdef TARGET_CRIS
623
624 #define ELF_START_MMAP 0x80000000
625
626 #define elf_check_arch(x) ( (x) == EM_CRIS )
627
628 #define ELF_CLASS ELFCLASS32
629 #define ELF_DATA ELFDATA2LSB
630 #define ELF_ARCH EM_CRIS
631
632 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
633 {
634 regs->erp = infop->entry;
635 }
636
637 #define ELF_EXEC_PAGESIZE 8192
638
639 #endif
640
641 #ifdef TARGET_M68K
642
643 #define ELF_START_MMAP 0x80000000
644
645 #define elf_check_arch(x) ( (x) == EM_68K )
646
647 #define ELF_CLASS ELFCLASS32
648 #define ELF_DATA ELFDATA2MSB
649 #define ELF_ARCH EM_68K
650
651 /* ??? Does this need to do anything?
652 #define ELF_PLAT_INIT(_r) */
653
654 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
655 {
656 regs->usp = infop->start_stack;
657 regs->sr = 0;
658 regs->pc = infop->entry;
659 }
660
661 #define ELF_EXEC_PAGESIZE 8192
662
663 #endif
664
665 #ifdef TARGET_ALPHA
666
667 #define ELF_START_MMAP (0x30000000000ULL)
668
669 #define elf_check_arch(x) ( (x) == ELF_ARCH )
670
671 #define ELF_CLASS ELFCLASS64
672 #define ELF_DATA ELFDATA2MSB
673 #define ELF_ARCH EM_ALPHA
674
675 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
676 {
677 regs->pc = infop->entry;
678 regs->ps = 8;
679 regs->usp = infop->start_stack;
680 }
681
682 #define ELF_EXEC_PAGESIZE 8192
683
684 #endif /* TARGET_ALPHA */
685
686 #ifndef ELF_PLATFORM
687 #define ELF_PLATFORM (NULL)
688 #endif
689
690 #ifndef ELF_HWCAP
691 #define ELF_HWCAP 0
692 #endif
693
694 #ifdef TARGET_ABI32
695 #undef ELF_CLASS
696 #define ELF_CLASS ELFCLASS32
697 #undef bswaptls
698 #define bswaptls(ptr) bswap32s(ptr)
699 #endif
700
701 #include "elf.h"
702
703 struct exec
704 {
705 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
706 unsigned int a_text; /* length of text, in bytes */
707 unsigned int a_data; /* length of data, in bytes */
708 unsigned int a_bss; /* length of uninitialized data area, in bytes */
709 unsigned int a_syms; /* length of symbol table data in file, in bytes */
710 unsigned int a_entry; /* start address */
711 unsigned int a_trsize; /* length of relocation info for text, in bytes */
712 unsigned int a_drsize; /* length of relocation info for data, in bytes */
713 };
714
715
716 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
717 #define OMAGIC 0407
718 #define NMAGIC 0410
719 #define ZMAGIC 0413
720 #define QMAGIC 0314
721
722 /* max code+data+bss space allocated to elf interpreter */
723 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
724
725 /* max code+data+bss+brk space allocated to ET_DYN executables */
726 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
727
728 /* Necessary parameters */
729 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
730 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
731 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
732
733 #define INTERPRETER_NONE 0
734 #define INTERPRETER_AOUT 1
735 #define INTERPRETER_ELF 2
736
737 #define DLINFO_ITEMS 12
738
739 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
740 {
741 memcpy(to, from, n);
742 }
743
744 static int load_aout_interp(void * exptr, int interp_fd);
745
746 #ifdef BSWAP_NEEDED
747 static void bswap_ehdr(struct elfhdr *ehdr)
748 {
749 bswap16s(&ehdr->e_type); /* Object file type */
750 bswap16s(&ehdr->e_machine); /* Architecture */
751 bswap32s(&ehdr->e_version); /* Object file version */
752 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
753 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
754 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
755 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
756 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
757 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
758 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
759 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
760 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
761 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
762 }
763
764 static void bswap_phdr(struct elf_phdr *phdr)
765 {
766 bswap32s(&phdr->p_type); /* Segment type */
767 bswaptls(&phdr->p_offset); /* Segment file offset */
768 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
769 bswaptls(&phdr->p_paddr); /* Segment physical address */
770 bswaptls(&phdr->p_filesz); /* Segment size in file */
771 bswaptls(&phdr->p_memsz); /* Segment size in memory */
772 bswap32s(&phdr->p_flags); /* Segment flags */
773 bswaptls(&phdr->p_align); /* Segment alignment */
774 }
775
776 static void bswap_shdr(struct elf_shdr *shdr)
777 {
778 bswap32s(&shdr->sh_name);
779 bswap32s(&shdr->sh_type);
780 bswaptls(&shdr->sh_flags);
781 bswaptls(&shdr->sh_addr);
782 bswaptls(&shdr->sh_offset);
783 bswaptls(&shdr->sh_size);
784 bswap32s(&shdr->sh_link);
785 bswap32s(&shdr->sh_info);
786 bswaptls(&shdr->sh_addralign);
787 bswaptls(&shdr->sh_entsize);
788 }
789
790 static void bswap_sym(struct elf_sym *sym)
791 {
792 bswap32s(&sym->st_name);
793 bswaptls(&sym->st_value);
794 bswaptls(&sym->st_size);
795 bswap16s(&sym->st_shndx);
796 }
797 #endif
798
799 #ifdef USE_ELF_CORE_DUMP
800 static int elf_core_dump(int, const CPUState *);
801
802 #ifdef BSWAP_NEEDED
803 static void bswap_note(struct elf_note *en)
804 {
805 bswap32s(&en->n_namesz);
806 bswap32s(&en->n_descsz);
807 bswap32s(&en->n_type);
808 }
809 #endif /* BSWAP_NEEDED */
810
811 #endif /* USE_ELF_CORE_DUMP */
812
813 /*
814 * 'copy_elf_strings()' copies argument/envelope strings from user
815 * memory to free pages in kernel mem. These are in a format ready
816 * to be put directly into the top of new user memory.
817 *
818 */
819 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
820 abi_ulong p)
821 {
822 char *tmp, *tmp1, *pag = NULL;
823 int len, offset = 0;
824
825 if (!p) {
826 return 0; /* bullet-proofing */
827 }
828 while (argc-- > 0) {
829 tmp = argv[argc];
830 if (!tmp) {
831 fprintf(stderr, "VFS: argc is wrong");
832 exit(-1);
833 }
834 tmp1 = tmp;
835 while (*tmp++);
836 len = tmp - tmp1;
837 if (p < len) { /* this shouldn't happen - 128kB */
838 return 0;
839 }
840 while (len) {
841 --p; --tmp; --len;
842 if (--offset < 0) {
843 offset = p % TARGET_PAGE_SIZE;
844 pag = (char *)page[p/TARGET_PAGE_SIZE];
845 if (!pag) {
846 pag = (char *)malloc(TARGET_PAGE_SIZE);
847 memset(pag, 0, TARGET_PAGE_SIZE);
848 page[p/TARGET_PAGE_SIZE] = pag;
849 if (!pag)
850 return 0;
851 }
852 }
853 if (len == 0 || offset == 0) {
854 *(pag + offset) = *tmp;
855 }
856 else {
857 int bytes_to_copy = (len > offset) ? offset : len;
858 tmp -= bytes_to_copy;
859 p -= bytes_to_copy;
860 offset -= bytes_to_copy;
861 len -= bytes_to_copy;
862 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
863 }
864 }
865 }
866 return p;
867 }
868
869 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
870 struct image_info *info)
871 {
872 abi_ulong stack_base, size, error;
873 int i;
874
875 /* Create enough stack to hold everything. If we don't use
876 * it for args, we'll use it for something else...
877 */
878 size = x86_stack_size;
879 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
880 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
881 error = target_mmap(0,
882 size + qemu_host_page_size,
883 PROT_READ | PROT_WRITE,
884 MAP_PRIVATE | MAP_ANONYMOUS,
885 -1, 0);
886 if (error == -1) {
887 perror("stk mmap");
888 exit(-1);
889 }
890 /* we reserve one extra page at the top of the stack as guard */
891 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
892
893 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
894 p += stack_base;
895
896 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
897 if (bprm->page[i]) {
898 info->rss++;
899 /* FIXME - check return value of memcpy_to_target() for failure */
900 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
901 free(bprm->page[i]);
902 }
903 stack_base += TARGET_PAGE_SIZE;
904 }
905 return p;
906 }
907
908 static void set_brk(abi_ulong start, abi_ulong end)
909 {
910 /* page-align the start and end addresses... */
911 start = HOST_PAGE_ALIGN(start);
912 end = HOST_PAGE_ALIGN(end);
913 if (end <= start)
914 return;
915 if(target_mmap(start, end - start,
916 PROT_READ | PROT_WRITE | PROT_EXEC,
917 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
918 perror("cannot mmap brk");
919 exit(-1);
920 }
921 }
922
923
924 /* We need to explicitly zero any fractional pages after the data
925 section (i.e. bss). This would contain the junk from the file that
926 should not be in memory. */
927 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
928 {
929 abi_ulong nbyte;
930
931 if (elf_bss >= last_bss)
932 return;
933
934 /* XXX: this is really a hack : if the real host page size is
935 smaller than the target page size, some pages after the end
936 of the file may not be mapped. A better fix would be to
937 patch target_mmap(), but it is more complicated as the file
938 size must be known */
939 if (qemu_real_host_page_size < qemu_host_page_size) {
940 abi_ulong end_addr, end_addr1;
941 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
942 ~(qemu_real_host_page_size - 1);
943 end_addr = HOST_PAGE_ALIGN(elf_bss);
944 if (end_addr1 < end_addr) {
945 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
946 PROT_READ|PROT_WRITE|PROT_EXEC,
947 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
948 }
949 }
950
951 nbyte = elf_bss & (qemu_host_page_size-1);
952 if (nbyte) {
953 nbyte = qemu_host_page_size - nbyte;
954 do {
955 /* FIXME - what to do if put_user() fails? */
956 put_user_u8(0, elf_bss);
957 elf_bss++;
958 } while (--nbyte);
959 }
960 }
961
962
963 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
964 struct elfhdr * exec,
965 abi_ulong load_addr,
966 abi_ulong load_bias,
967 abi_ulong interp_load_addr, int ibcs,
968 struct image_info *info)
969 {
970 abi_ulong sp;
971 int size;
972 abi_ulong u_platform;
973 const char *k_platform;
974 const int n = sizeof(elf_addr_t);
975
976 sp = p;
977 u_platform = 0;
978 k_platform = ELF_PLATFORM;
979 if (k_platform) {
980 size_t len = strlen(k_platform) + 1;
981 sp -= (len + n - 1) & ~(n - 1);
982 u_platform = sp;
983 /* FIXME - check return value of memcpy_to_target() for failure */
984 memcpy_to_target(sp, k_platform, len);
985 }
986 /*
987 * Force 16 byte _final_ alignment here for generality.
988 */
989 sp = sp &~ (abi_ulong)15;
990 size = (DLINFO_ITEMS + 1) * 2;
991 if (k_platform)
992 size += 2;
993 #ifdef DLINFO_ARCH_ITEMS
994 size += DLINFO_ARCH_ITEMS * 2;
995 #endif
996 size += envc + argc + 2;
997 size += (!ibcs ? 3 : 1); /* argc itself */
998 size *= n;
999 if (size & 15)
1000 sp -= 16 - (size & 15);
1001
1002 /* This is correct because Linux defines
1003 * elf_addr_t as Elf32_Off / Elf64_Off
1004 */
1005 #define NEW_AUX_ENT(id, val) do { \
1006 sp -= n; put_user_ual(val, sp); \
1007 sp -= n; put_user_ual(id, sp); \
1008 } while(0)
1009
1010 NEW_AUX_ENT (AT_NULL, 0);
1011
1012 /* There must be exactly DLINFO_ITEMS entries here. */
1013 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1014 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1015 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1016 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1017 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1018 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1019 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1020 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1021 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1022 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1023 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1024 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1025 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1026 if (k_platform)
1027 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1028 #ifdef ARCH_DLINFO
1029 /*
1030 * ARCH_DLINFO must come last so platform specific code can enforce
1031 * special alignment requirements on the AUXV if necessary (eg. PPC).
1032 */
1033 ARCH_DLINFO;
1034 #endif
1035 #undef NEW_AUX_ENT
1036
1037 info->saved_auxv = sp;
1038
1039 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1040 return sp;
1041 }
1042
1043
1044 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1045 int interpreter_fd,
1046 abi_ulong *interp_load_addr)
1047 {
1048 struct elf_phdr *elf_phdata = NULL;
1049 struct elf_phdr *eppnt;
1050 abi_ulong load_addr = 0;
1051 int load_addr_set = 0;
1052 int retval;
1053 abi_ulong last_bss, elf_bss;
1054 abi_ulong error;
1055 int i;
1056
1057 elf_bss = 0;
1058 last_bss = 0;
1059 error = 0;
1060
1061 #ifdef BSWAP_NEEDED
1062 bswap_ehdr(interp_elf_ex);
1063 #endif
1064 /* First of all, some simple consistency checks */
1065 if ((interp_elf_ex->e_type != ET_EXEC &&
1066 interp_elf_ex->e_type != ET_DYN) ||
1067 !elf_check_arch(interp_elf_ex->e_machine)) {
1068 return ~((abi_ulong)0UL);
1069 }
1070
1071
1072 /* Now read in all of the header information */
1073
1074 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1075 return ~(abi_ulong)0UL;
1076
1077 elf_phdata = (struct elf_phdr *)
1078 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1079
1080 if (!elf_phdata)
1081 return ~((abi_ulong)0UL);
1082
1083 /*
1084 * If the size of this structure has changed, then punt, since
1085 * we will be doing the wrong thing.
1086 */
1087 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1088 free(elf_phdata);
1089 return ~((abi_ulong)0UL);
1090 }
1091
1092 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1093 if(retval >= 0) {
1094 retval = read(interpreter_fd,
1095 (char *) elf_phdata,
1096 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1097 }
1098 if (retval < 0) {
1099 perror("load_elf_interp");
1100 exit(-1);
1101 free (elf_phdata);
1102 return retval;
1103 }
1104 #ifdef BSWAP_NEEDED
1105 eppnt = elf_phdata;
1106 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1107 bswap_phdr(eppnt);
1108 }
1109 #endif
1110
1111 if (interp_elf_ex->e_type == ET_DYN) {
1112 /* in order to avoid hardcoding the interpreter load
1113 address in qemu, we allocate a big enough memory zone */
1114 error = target_mmap(0, INTERP_MAP_SIZE,
1115 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1116 -1, 0);
1117 if (error == -1) {
1118 perror("mmap");
1119 exit(-1);
1120 }
1121 load_addr = error;
1122 load_addr_set = 1;
1123 }
1124
1125 eppnt = elf_phdata;
1126 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1127 if (eppnt->p_type == PT_LOAD) {
1128 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1129 int elf_prot = 0;
1130 abi_ulong vaddr = 0;
1131 abi_ulong k;
1132
1133 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1134 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1135 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1136 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1137 elf_type |= MAP_FIXED;
1138 vaddr = eppnt->p_vaddr;
1139 }
1140 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1141 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1142 elf_prot,
1143 elf_type,
1144 interpreter_fd,
1145 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1146
1147 if (error == -1) {
1148 /* Real error */
1149 close(interpreter_fd);
1150 free(elf_phdata);
1151 return ~((abi_ulong)0UL);
1152 }
1153
1154 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1155 load_addr = error;
1156 load_addr_set = 1;
1157 }
1158
1159 /*
1160 * Find the end of the file mapping for this phdr, and keep
1161 * track of the largest address we see for this.
1162 */
1163 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1164 if (k > elf_bss) elf_bss = k;
1165
1166 /*
1167 * Do the same thing for the memory mapping - between
1168 * elf_bss and last_bss is the bss section.
1169 */
1170 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1171 if (k > last_bss) last_bss = k;
1172 }
1173
1174 /* Now use mmap to map the library into memory. */
1175
1176 close(interpreter_fd);
1177
1178 /*
1179 * Now fill out the bss section. First pad the last page up
1180 * to the page boundary, and then perform a mmap to make sure
1181 * that there are zeromapped pages up to and including the last
1182 * bss page.
1183 */
1184 padzero(elf_bss, last_bss);
1185 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1186
1187 /* Map the last of the bss segment */
1188 if (last_bss > elf_bss) {
1189 target_mmap(elf_bss, last_bss-elf_bss,
1190 PROT_READ|PROT_WRITE|PROT_EXEC,
1191 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1192 }
1193 free(elf_phdata);
1194
1195 *interp_load_addr = load_addr;
1196 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1197 }
1198
1199 static int symfind(const void *s0, const void *s1)
1200 {
1201 struct elf_sym *key = (struct elf_sym *)s0;
1202 struct elf_sym *sym = (struct elf_sym *)s1;
1203 int result = 0;
1204 if (key->st_value < sym->st_value) {
1205 result = -1;
1206 } else if (key->st_value >= sym->st_value + sym->st_size) {
1207 result = 1;
1208 }
1209 return result;
1210 }
1211
1212 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1213 {
1214 #if ELF_CLASS == ELFCLASS32
1215 struct elf_sym *syms = s->disas_symtab.elf32;
1216 #else
1217 struct elf_sym *syms = s->disas_symtab.elf64;
1218 #endif
1219
1220 // binary search
1221 struct elf_sym key;
1222 struct elf_sym *sym;
1223
1224 key.st_value = orig_addr;
1225
1226 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1227 if (sym != NULL) {
1228 return s->disas_strtab + sym->st_name;
1229 }
1230
1231 return "";
1232 }
1233
1234 /* FIXME: This should use elf_ops.h */
1235 static int symcmp(const void *s0, const void *s1)
1236 {
1237 struct elf_sym *sym0 = (struct elf_sym *)s0;
1238 struct elf_sym *sym1 = (struct elf_sym *)s1;
1239 return (sym0->st_value < sym1->st_value)
1240 ? -1
1241 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1242 }
1243
1244 /* Best attempt to load symbols from this ELF object. */
1245 static void load_symbols(struct elfhdr *hdr, int fd)
1246 {
1247 unsigned int i, nsyms;
1248 struct elf_shdr sechdr, symtab, strtab;
1249 char *strings;
1250 struct syminfo *s;
1251 struct elf_sym *syms;
1252
1253 lseek(fd, hdr->e_shoff, SEEK_SET);
1254 for (i = 0; i < hdr->e_shnum; i++) {
1255 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1256 return;
1257 #ifdef BSWAP_NEEDED
1258 bswap_shdr(&sechdr);
1259 #endif
1260 if (sechdr.sh_type == SHT_SYMTAB) {
1261 symtab = sechdr;
1262 lseek(fd, hdr->e_shoff
1263 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1264 if (read(fd, &strtab, sizeof(strtab))
1265 != sizeof(strtab))
1266 return;
1267 #ifdef BSWAP_NEEDED
1268 bswap_shdr(&strtab);
1269 #endif
1270 goto found;
1271 }
1272 }
1273 return; /* Shouldn't happen... */
1274
1275 found:
1276 /* Now know where the strtab and symtab are. Snarf them. */
1277 s = malloc(sizeof(*s));
1278 syms = malloc(symtab.sh_size);
1279 if (!syms)
1280 return;
1281 s->disas_strtab = strings = malloc(strtab.sh_size);
1282 if (!s->disas_strtab)
1283 return;
1284
1285 lseek(fd, symtab.sh_offset, SEEK_SET);
1286 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1287 return;
1288
1289 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1290
1291 i = 0;
1292 while (i < nsyms) {
1293 #ifdef BSWAP_NEEDED
1294 bswap_sym(syms + i);
1295 #endif
1296 // Throw away entries which we do not need.
1297 if (syms[i].st_shndx == SHN_UNDEF ||
1298 syms[i].st_shndx >= SHN_LORESERVE ||
1299 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1300 nsyms--;
1301 if (i < nsyms) {
1302 syms[i] = syms[nsyms];
1303 }
1304 continue;
1305 }
1306 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1307 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1308 syms[i].st_value &= ~(target_ulong)1;
1309 #endif
1310 i++;
1311 }
1312 syms = realloc(syms, nsyms * sizeof(*syms));
1313
1314 qsort(syms, nsyms, sizeof(*syms), symcmp);
1315
1316 lseek(fd, strtab.sh_offset, SEEK_SET);
1317 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1318 return;
1319 s->disas_num_syms = nsyms;
1320 #if ELF_CLASS == ELFCLASS32
1321 s->disas_symtab.elf32 = syms;
1322 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1323 #else
1324 s->disas_symtab.elf64 = syms;
1325 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1326 #endif
1327 s->next = syminfos;
1328 syminfos = s;
1329 }
1330
1331 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1332 struct image_info * info)
1333 {
1334 struct elfhdr elf_ex;
1335 struct elfhdr interp_elf_ex;
1336 struct exec interp_ex;
1337 int interpreter_fd = -1; /* avoid warning */
1338 abi_ulong load_addr, load_bias;
1339 int load_addr_set = 0;
1340 unsigned int interpreter_type = INTERPRETER_NONE;
1341 unsigned char ibcs2_interpreter;
1342 int i;
1343 abi_ulong mapped_addr;
1344 struct elf_phdr * elf_ppnt;
1345 struct elf_phdr *elf_phdata;
1346 abi_ulong elf_bss, k, elf_brk;
1347 int retval;
1348 char * elf_interpreter;
1349 abi_ulong elf_entry, interp_load_addr = 0;
1350 int status;
1351 abi_ulong start_code, end_code, start_data, end_data;
1352 abi_ulong reloc_func_desc = 0;
1353 abi_ulong elf_stack;
1354 char passed_fileno[6];
1355
1356 ibcs2_interpreter = 0;
1357 status = 0;
1358 load_addr = 0;
1359 load_bias = 0;
1360 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1361 #ifdef BSWAP_NEEDED
1362 bswap_ehdr(&elf_ex);
1363 #endif
1364
1365 /* First of all, some simple consistency checks */
1366 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1367 (! elf_check_arch(elf_ex.e_machine))) {
1368 return -ENOEXEC;
1369 }
1370
1371 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1372 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1373 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1374 if (!bprm->p) {
1375 retval = -E2BIG;
1376 }
1377
1378 /* Now read in all of the header information */
1379 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1380 if (elf_phdata == NULL) {
1381 return -ENOMEM;
1382 }
1383
1384 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1385 if(retval > 0) {
1386 retval = read(bprm->fd, (char *) elf_phdata,
1387 elf_ex.e_phentsize * elf_ex.e_phnum);
1388 }
1389
1390 if (retval < 0) {
1391 perror("load_elf_binary");
1392 exit(-1);
1393 free (elf_phdata);
1394 return -errno;
1395 }
1396
1397 #ifdef BSWAP_NEEDED
1398 elf_ppnt = elf_phdata;
1399 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1400 bswap_phdr(elf_ppnt);
1401 }
1402 #endif
1403 elf_ppnt = elf_phdata;
1404
1405 elf_bss = 0;
1406 elf_brk = 0;
1407
1408
1409 elf_stack = ~((abi_ulong)0UL);
1410 elf_interpreter = NULL;
1411 start_code = ~((abi_ulong)0UL);
1412 end_code = 0;
1413 start_data = 0;
1414 end_data = 0;
1415 interp_ex.a_info = 0;
1416
1417 for(i=0;i < elf_ex.e_phnum; i++) {
1418 if (elf_ppnt->p_type == PT_INTERP) {
1419 if ( elf_interpreter != NULL )
1420 {
1421 free (elf_phdata);
1422 free(elf_interpreter);
1423 close(bprm->fd);
1424 return -EINVAL;
1425 }
1426
1427 /* This is the program interpreter used for
1428 * shared libraries - for now assume that this
1429 * is an a.out format binary
1430 */
1431
1432 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1433
1434 if (elf_interpreter == NULL) {
1435 free (elf_phdata);
1436 close(bprm->fd);
1437 return -ENOMEM;
1438 }
1439
1440 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1441 if(retval >= 0) {
1442 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1443 }
1444 if(retval < 0) {
1445 perror("load_elf_binary2");
1446 exit(-1);
1447 }
1448
1449 /* If the program interpreter is one of these two,
1450 then assume an iBCS2 image. Otherwise assume
1451 a native linux image. */
1452
1453 /* JRP - Need to add X86 lib dir stuff here... */
1454
1455 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1456 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1457 ibcs2_interpreter = 1;
1458 }
1459
1460 #if 0
1461 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1462 #endif
1463 if (retval >= 0) {
1464 retval = open(path(elf_interpreter), O_RDONLY);
1465 if(retval >= 0) {
1466 interpreter_fd = retval;
1467 }
1468 else {
1469 perror(elf_interpreter);
1470 exit(-1);
1471 /* retval = -errno; */
1472 }
1473 }
1474
1475 if (retval >= 0) {
1476 retval = lseek(interpreter_fd, 0, SEEK_SET);
1477 if(retval >= 0) {
1478 retval = read(interpreter_fd,bprm->buf,128);
1479 }
1480 }
1481 if (retval >= 0) {
1482 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1483 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1484 }
1485 if (retval < 0) {
1486 perror("load_elf_binary3");
1487 exit(-1);
1488 free (elf_phdata);
1489 free(elf_interpreter);
1490 close(bprm->fd);
1491 return retval;
1492 }
1493 }
1494 elf_ppnt++;
1495 }
1496
1497 /* Some simple consistency checks for the interpreter */
1498 if (elf_interpreter){
1499 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1500
1501 /* Now figure out which format our binary is */
1502 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1503 (N_MAGIC(interp_ex) != QMAGIC)) {
1504 interpreter_type = INTERPRETER_ELF;
1505 }
1506
1507 if (interp_elf_ex.e_ident[0] != 0x7f ||
1508 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1509 interpreter_type &= ~INTERPRETER_ELF;
1510 }
1511
1512 if (!interpreter_type) {
1513 free(elf_interpreter);
1514 free(elf_phdata);
1515 close(bprm->fd);
1516 return -ELIBBAD;
1517 }
1518 }
1519
1520 /* OK, we are done with that, now set up the arg stuff,
1521 and then start this sucker up */
1522
1523 {
1524 char * passed_p;
1525
1526 if (interpreter_type == INTERPRETER_AOUT) {
1527 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1528 passed_p = passed_fileno;
1529
1530 if (elf_interpreter) {
1531 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1532 bprm->argc++;
1533 }
1534 }
1535 if (!bprm->p) {
1536 if (elf_interpreter) {
1537 free(elf_interpreter);
1538 }
1539 free (elf_phdata);
1540 close(bprm->fd);
1541 return -E2BIG;
1542 }
1543 }
1544
1545 /* OK, This is the point of no return */
1546 info->end_data = 0;
1547 info->end_code = 0;
1548 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1549 info->mmap = 0;
1550 elf_entry = (abi_ulong) elf_ex.e_entry;
1551
1552 #if defined(CONFIG_USE_GUEST_BASE)
1553 /*
1554 * In case where user has not explicitly set the guest_base, we
1555 * probe here that should we set it automatically.
1556 */
1557 if (!have_guest_base) {
1558 /*
1559 * Go through ELF program header table and find out whether
1560 * any of the segments drop below our current mmap_min_addr and
1561 * in that case set guest_base to corresponding address.
1562 */
1563 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1564 i++, elf_ppnt++) {
1565 if (elf_ppnt->p_type != PT_LOAD)
1566 continue;
1567 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1568 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1569 break;
1570 }
1571 }
1572 }
1573 #endif /* CONFIG_USE_GUEST_BASE */
1574
1575 /* Do this so that we can load the interpreter, if need be. We will
1576 change some of these later */
1577 info->rss = 0;
1578 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1579 info->start_stack = bprm->p;
1580
1581 /* Now we do a little grungy work by mmaping the ELF image into
1582 * the correct location in memory. At this point, we assume that
1583 * the image should be loaded at fixed address, not at a variable
1584 * address.
1585 */
1586
1587 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1588 int elf_prot = 0;
1589 int elf_flags = 0;
1590 abi_ulong error;
1591
1592 if (elf_ppnt->p_type != PT_LOAD)
1593 continue;
1594
1595 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1596 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1597 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1598 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1599 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1600 elf_flags |= MAP_FIXED;
1601 } else if (elf_ex.e_type == ET_DYN) {
1602 /* Try and get dynamic programs out of the way of the default mmap
1603 base, as well as whatever program they might try to exec. This
1604 is because the brk will follow the loader, and is not movable. */
1605 /* NOTE: for qemu, we do a big mmap to get enough space
1606 without hardcoding any address */
1607 error = target_mmap(0, ET_DYN_MAP_SIZE,
1608 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1609 -1, 0);
1610 if (error == -1) {
1611 perror("mmap");
1612 exit(-1);
1613 }
1614 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1615 }
1616
1617 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1618 (elf_ppnt->p_filesz +
1619 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1620 elf_prot,
1621 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1622 bprm->fd,
1623 (elf_ppnt->p_offset -
1624 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1625 if (error == -1) {
1626 perror("mmap");
1627 exit(-1);
1628 }
1629
1630 #ifdef LOW_ELF_STACK
1631 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1632 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1633 #endif
1634
1635 if (!load_addr_set) {
1636 load_addr_set = 1;
1637 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1638 if (elf_ex.e_type == ET_DYN) {
1639 load_bias += error -
1640 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1641 load_addr += load_bias;
1642 reloc_func_desc = load_bias;
1643 }
1644 }
1645 k = elf_ppnt->p_vaddr;
1646 if (k < start_code)
1647 start_code = k;
1648 if (start_data < k)
1649 start_data = k;
1650 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1651 if (k > elf_bss)
1652 elf_bss = k;
1653 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1654 end_code = k;
1655 if (end_data < k)
1656 end_data = k;
1657 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1658 if (k > elf_brk) elf_brk = k;
1659 }
1660
1661 elf_entry += load_bias;
1662 elf_bss += load_bias;
1663 elf_brk += load_bias;
1664 start_code += load_bias;
1665 end_code += load_bias;
1666 start_data += load_bias;
1667 end_data += load_bias;
1668
1669 if (elf_interpreter) {
1670 if (interpreter_type & 1) {
1671 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1672 }
1673 else if (interpreter_type & 2) {
1674 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1675 &interp_load_addr);
1676 }
1677 reloc_func_desc = interp_load_addr;
1678
1679 close(interpreter_fd);
1680 free(elf_interpreter);
1681
1682 if (elf_entry == ~((abi_ulong)0UL)) {
1683 printf("Unable to load interpreter\n");
1684 free(elf_phdata);
1685 exit(-1);
1686 return 0;
1687 }
1688 }
1689
1690 free(elf_phdata);
1691
1692 if (qemu_log_enabled())
1693 load_symbols(&elf_ex, bprm->fd);
1694
1695 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1696 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1697
1698 #ifdef LOW_ELF_STACK
1699 info->start_stack = bprm->p = elf_stack - 4;
1700 #endif
1701 bprm->p = create_elf_tables(bprm->p,
1702 bprm->argc,
1703 bprm->envc,
1704 &elf_ex,
1705 load_addr, load_bias,
1706 interp_load_addr,
1707 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1708 info);
1709 info->load_addr = reloc_func_desc;
1710 info->start_brk = info->brk = elf_brk;
1711 info->end_code = end_code;
1712 info->start_code = start_code;
1713 info->start_data = start_data;
1714 info->end_data = end_data;
1715 info->start_stack = bprm->p;
1716
1717 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1718 sections */
1719 set_brk(elf_bss, elf_brk);
1720
1721 padzero(elf_bss, elf_brk);
1722
1723 #if 0
1724 printf("(start_brk) %x\n" , info->start_brk);
1725 printf("(end_code) %x\n" , info->end_code);
1726 printf("(start_code) %x\n" , info->start_code);
1727 printf("(end_data) %x\n" , info->end_data);
1728 printf("(start_stack) %x\n" , info->start_stack);
1729 printf("(brk) %x\n" , info->brk);
1730 #endif
1731
1732 if ( info->personality == PER_SVR4 )
1733 {
1734 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1735 and some applications "depend" upon this behavior.
1736 Since we do not have the power to recompile these, we
1737 emulate the SVr4 behavior. Sigh. */
1738 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1739 MAP_FIXED | MAP_PRIVATE, -1, 0);
1740 }
1741
1742 info->entry = elf_entry;
1743
1744 #ifdef USE_ELF_CORE_DUMP
1745 bprm->core_dump = &elf_core_dump;
1746 #endif
1747
1748 return 0;
1749 }
1750
1751 #ifdef USE_ELF_CORE_DUMP
1752
1753 /*
1754 * Definitions to generate Intel SVR4-like core files.
1755 * These mostly have the same names as the SVR4 types with "target_elf_"
1756 * tacked on the front to prevent clashes with linux definitions,
1757 * and the typedef forms have been avoided. This is mostly like
1758 * the SVR4 structure, but more Linuxy, with things that Linux does
1759 * not support and which gdb doesn't really use excluded.
1760 *
1761 * Fields we don't dump (their contents is zero) in linux-user qemu
1762 * are marked with XXX.
1763 *
1764 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1765 *
1766 * Porting ELF coredump for target is (quite) simple process. First you
1767 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1768 * the target resides):
1769 *
1770 * #define USE_ELF_CORE_DUMP
1771 *
1772 * Next you define type of register set used for dumping. ELF specification
1773 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1774 *
1775 * typedef <target_regtype> target_elf_greg_t;
1776 * #define ELF_NREG <number of registers>
1777 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1778 *
1779 * Then define following types to match target types. Actual types can
1780 * be found from linux kernel (arch/<ARCH>/include/asm/posix_types.h):
1781 *
1782 * typedef <target_uid_type> target_uid_t;
1783 * typedef <target_gid_type> target_gid_t;
1784 * typedef <target_pid_type> target_pid_t;
1785 *
1786 * Last step is to implement target specific function that copies registers
1787 * from given cpu into just specified register set. Prototype is:
1788 *
1789 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1790 * const CPUState *env);
1791 *
1792 * Parameters:
1793 * regs - copy register values into here (allocated and zeroed by caller)
1794 * env - copy registers from here
1795 *
1796 * Example for ARM target is provided in this file.
1797 */
1798
1799 /* An ELF note in memory */
1800 struct memelfnote {
1801 const char *name;
1802 size_t namesz;
1803 size_t namesz_rounded;
1804 int type;
1805 size_t datasz;
1806 void *data;
1807 size_t notesz;
1808 };
1809
1810 struct target_elf_siginfo {
1811 int si_signo; /* signal number */
1812 int si_code; /* extra code */
1813 int si_errno; /* errno */
1814 };
1815
1816 struct target_elf_prstatus {
1817 struct target_elf_siginfo pr_info; /* Info associated with signal */
1818 short pr_cursig; /* Current signal */
1819 target_ulong pr_sigpend; /* XXX */
1820 target_ulong pr_sighold; /* XXX */
1821 target_pid_t pr_pid;
1822 target_pid_t pr_ppid;
1823 target_pid_t pr_pgrp;
1824 target_pid_t pr_sid;
1825 struct target_timeval pr_utime; /* XXX User time */
1826 struct target_timeval pr_stime; /* XXX System time */
1827 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1828 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1829 target_elf_gregset_t pr_reg; /* GP registers */
1830 int pr_fpvalid; /* XXX */
1831 };
1832
1833 #define ELF_PRARGSZ (80) /* Number of chars for args */
1834
1835 struct target_elf_prpsinfo {
1836 char pr_state; /* numeric process state */
1837 char pr_sname; /* char for pr_state */
1838 char pr_zomb; /* zombie */
1839 char pr_nice; /* nice val */
1840 target_ulong pr_flag; /* flags */
1841 target_uid_t pr_uid;
1842 target_gid_t pr_gid;
1843 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1844 /* Lots missing */
1845 char pr_fname[16]; /* filename of executable */
1846 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1847 };
1848
1849 /* Here is the structure in which status of each thread is captured. */
1850 struct elf_thread_status {
1851 QTAILQ_ENTRY(elf_thread_status) ets_link;
1852 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1853 #if 0
1854 elf_fpregset_t fpu; /* NT_PRFPREG */
1855 struct task_struct *thread;
1856 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1857 #endif
1858 struct memelfnote notes[1];
1859 int num_notes;
1860 };
1861
1862 struct elf_note_info {
1863 struct memelfnote *notes;
1864 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1865 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1866
1867 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1868 #if 0
1869 /*
1870 * Current version of ELF coredump doesn't support
1871 * dumping fp regs etc.
1872 */
1873 elf_fpregset_t *fpu;
1874 elf_fpxregset_t *xfpu;
1875 int thread_status_size;
1876 #endif
1877 int notes_size;
1878 int numnote;
1879 };
1880
1881 struct vm_area_struct {
1882 abi_ulong vma_start; /* start vaddr of memory region */
1883 abi_ulong vma_end; /* end vaddr of memory region */
1884 abi_ulong vma_flags; /* protection etc. flags for the region */
1885 QTAILQ_ENTRY(vm_area_struct) vma_link;
1886 };
1887
1888 struct mm_struct {
1889 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
1890 int mm_count; /* number of mappings */
1891 };
1892
1893 static struct mm_struct *vma_init(void);
1894 static void vma_delete(struct mm_struct *);
1895 static int vma_add_mapping(struct mm_struct *, abi_ulong,
1896 abi_ulong, abi_ulong);
1897 static int vma_get_mapping_count(const struct mm_struct *);
1898 static struct vm_area_struct *vma_first(const struct mm_struct *);
1899 static struct vm_area_struct *vma_next(struct vm_area_struct *);
1900 static abi_ulong vma_dump_size(const struct vm_area_struct *);
1901 static int vma_walker(void *priv, unsigned long start, unsigned long end,
1902 unsigned long flags);
1903
1904 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
1905 static void fill_note(struct memelfnote *, const char *, int,
1906 unsigned int, void *);
1907 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
1908 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
1909 static void fill_auxv_note(struct memelfnote *, const TaskState *);
1910 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
1911 static size_t note_size(const struct memelfnote *);
1912 static void free_note_info(struct elf_note_info *);
1913 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
1914 static void fill_thread_info(struct elf_note_info *, const CPUState *);
1915 static int core_dump_filename(const TaskState *, char *, size_t);
1916
1917 static int dump_write(int, const void *, size_t);
1918 static int write_note(struct memelfnote *, int);
1919 static int write_note_info(struct elf_note_info *, int);
1920
1921 #ifdef BSWAP_NEEDED
1922 static void bswap_prstatus(struct target_elf_prstatus *);
1923 static void bswap_psinfo(struct target_elf_prpsinfo *);
1924
1925 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
1926 {
1927 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
1928 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
1929 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
1930 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
1931 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
1932 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
1933 prstatus->pr_pid = tswap32(prstatus->pr_pid);
1934 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
1935 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
1936 prstatus->pr_sid = tswap32(prstatus->pr_sid);
1937 /* cpu times are not filled, so we skip them */
1938 /* regs should be in correct format already */
1939 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
1940 }
1941
1942 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
1943 {
1944 psinfo->pr_flag = tswapl(psinfo->pr_flag);
1945 psinfo->pr_uid = tswap16(psinfo->pr_uid);
1946 psinfo->pr_gid = tswap16(psinfo->pr_gid);
1947 psinfo->pr_pid = tswap32(psinfo->pr_pid);
1948 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
1949 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
1950 psinfo->pr_sid = tswap32(psinfo->pr_sid);
1951 }
1952 #endif /* BSWAP_NEEDED */
1953
1954 /*
1955 * Minimal support for linux memory regions. These are needed
1956 * when we are finding out what memory exactly belongs to
1957 * emulated process. No locks needed here, as long as
1958 * thread that received the signal is stopped.
1959 */
1960
1961 static struct mm_struct *vma_init(void)
1962 {
1963 struct mm_struct *mm;
1964
1965 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
1966 return (NULL);
1967
1968 mm->mm_count = 0;
1969 QTAILQ_INIT(&mm->mm_mmap);
1970
1971 return (mm);
1972 }
1973
1974 static void vma_delete(struct mm_struct *mm)
1975 {
1976 struct vm_area_struct *vma;
1977
1978 while ((vma = vma_first(mm)) != NULL) {
1979 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
1980 qemu_free(vma);
1981 }
1982 qemu_free(mm);
1983 }
1984
1985 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
1986 abi_ulong end, abi_ulong flags)
1987 {
1988 struct vm_area_struct *vma;
1989
1990 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
1991 return (-1);
1992
1993 vma->vma_start = start;
1994 vma->vma_end = end;
1995 vma->vma_flags = flags;
1996
1997 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
1998 mm->mm_count++;
1999
2000 return (0);
2001 }
2002
2003 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2004 {
2005 return (QTAILQ_FIRST(&mm->mm_mmap));
2006 }
2007
2008 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2009 {
2010 return (QTAILQ_NEXT(vma, vma_link));
2011 }
2012
2013 static int vma_get_mapping_count(const struct mm_struct *mm)
2014 {
2015 return (mm->mm_count);
2016 }
2017
2018 /*
2019 * Calculate file (dump) size of given memory region.
2020 */
2021 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2022 {
2023 /* if we cannot even read the first page, skip it */
2024 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2025 return (0);
2026
2027 /*
2028 * Usually we don't dump executable pages as they contain
2029 * non-writable code that debugger can read directly from
2030 * target library etc. However, thread stacks are marked
2031 * also executable so we read in first page of given region
2032 * and check whether it contains elf header. If there is
2033 * no elf header, we dump it.
2034 */
2035 if (vma->vma_flags & PROT_EXEC) {
2036 char page[TARGET_PAGE_SIZE];
2037
2038 copy_from_user(page, vma->vma_start, sizeof (page));
2039 if ((page[EI_MAG0] == ELFMAG0) &&
2040 (page[EI_MAG1] == ELFMAG1) &&
2041 (page[EI_MAG2] == ELFMAG2) &&
2042 (page[EI_MAG3] == ELFMAG3)) {
2043 /*
2044 * Mappings are possibly from ELF binary. Don't dump
2045 * them.
2046 */
2047 return (0);
2048 }
2049 }
2050
2051 return (vma->vma_end - vma->vma_start);
2052 }
2053
2054 static int vma_walker(void *priv, unsigned long start, unsigned long end,
2055 unsigned long flags)
2056 {
2057 struct mm_struct *mm = (struct mm_struct *)priv;
2058
2059 /*
2060 * Don't dump anything that qemu has reserved for internal use.
2061 */
2062 if (flags & PAGE_RESERVED)
2063 return (0);
2064
2065 vma_add_mapping(mm, start, end, flags);
2066 return (0);
2067 }
2068
2069 static void fill_note(struct memelfnote *note, const char *name, int type,
2070 unsigned int sz, void *data)
2071 {
2072 unsigned int namesz;
2073
2074 namesz = strlen(name) + 1;
2075 note->name = name;
2076 note->namesz = namesz;
2077 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2078 note->type = type;
2079 note->datasz = roundup(sz, sizeof (int32_t));;
2080 note->data = data;
2081
2082 /*
2083 * We calculate rounded up note size here as specified by
2084 * ELF document.
2085 */
2086 note->notesz = sizeof (struct elf_note) +
2087 note->namesz_rounded + note->datasz;
2088 }
2089
2090 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2091 uint32_t flags)
2092 {
2093 (void) memset(elf, 0, sizeof(*elf));
2094
2095 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2096 elf->e_ident[EI_CLASS] = ELF_CLASS;
2097 elf->e_ident[EI_DATA] = ELF_DATA;
2098 elf->e_ident[EI_VERSION] = EV_CURRENT;
2099 elf->e_ident[EI_OSABI] = ELF_OSABI;
2100
2101 elf->e_type = ET_CORE;
2102 elf->e_machine = machine;
2103 elf->e_version = EV_CURRENT;
2104 elf->e_phoff = sizeof(struct elfhdr);
2105 elf->e_flags = flags;
2106 elf->e_ehsize = sizeof(struct elfhdr);
2107 elf->e_phentsize = sizeof(struct elf_phdr);
2108 elf->e_phnum = segs;
2109
2110 #ifdef BSWAP_NEEDED
2111 bswap_ehdr(elf);
2112 #endif
2113 }
2114
2115 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2116 {
2117 phdr->p_type = PT_NOTE;
2118 phdr->p_offset = offset;
2119 phdr->p_vaddr = 0;
2120 phdr->p_paddr = 0;
2121 phdr->p_filesz = sz;
2122 phdr->p_memsz = 0;
2123 phdr->p_flags = 0;
2124 phdr->p_align = 0;
2125
2126 #ifdef BSWAP_NEEDED
2127 bswap_phdr(phdr);
2128 #endif
2129 }
2130
2131 static size_t note_size(const struct memelfnote *note)
2132 {
2133 return (note->notesz);
2134 }
2135
2136 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2137 const TaskState *ts, int signr)
2138 {
2139 (void) memset(prstatus, 0, sizeof (*prstatus));
2140 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2141 prstatus->pr_pid = ts->ts_tid;
2142 prstatus->pr_ppid = getppid();
2143 prstatus->pr_pgrp = getpgrp();
2144 prstatus->pr_sid = getsid(0);
2145
2146 #ifdef BSWAP_NEEDED
2147 bswap_prstatus(prstatus);
2148 #endif
2149 }
2150
2151 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2152 {
2153 char *filename, *base_filename;
2154 unsigned int i, len;
2155
2156 (void) memset(psinfo, 0, sizeof (*psinfo));
2157
2158 len = ts->info->arg_end - ts->info->arg_start;
2159 if (len >= ELF_PRARGSZ)
2160 len = ELF_PRARGSZ - 1;
2161 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2162 return -EFAULT;
2163 for (i = 0; i < len; i++)
2164 if (psinfo->pr_psargs[i] == 0)
2165 psinfo->pr_psargs[i] = ' ';
2166 psinfo->pr_psargs[len] = 0;
2167
2168 psinfo->pr_pid = getpid();
2169 psinfo->pr_ppid = getppid();
2170 psinfo->pr_pgrp = getpgrp();
2171 psinfo->pr_sid = getsid(0);
2172 psinfo->pr_uid = getuid();
2173 psinfo->pr_gid = getgid();
2174
2175 filename = strdup(ts->bprm->filename);
2176 base_filename = strdup(basename(filename));
2177 (void) strncpy(psinfo->pr_fname, base_filename,
2178 sizeof(psinfo->pr_fname));
2179 free(base_filename);
2180 free(filename);
2181
2182 #ifdef BSWAP_NEEDED
2183 bswap_psinfo(psinfo);
2184 #endif
2185 return (0);
2186 }
2187
2188 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2189 {
2190 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2191 elf_addr_t orig_auxv = auxv;
2192 abi_ulong val;
2193 void *ptr;
2194 int i, len;
2195
2196 /*
2197 * Auxiliary vector is stored in target process stack. It contains
2198 * {type, value} pairs that we need to dump into note. This is not
2199 * strictly necessary but we do it here for sake of completeness.
2200 */
2201
2202 /* find out lenght of the vector, AT_NULL is terminator */
2203 i = len = 0;
2204 do {
2205 get_user_ual(val, auxv);
2206 i += 2;
2207 auxv += 2 * sizeof (elf_addr_t);
2208 } while (val != AT_NULL);
2209 len = i * sizeof (elf_addr_t);
2210
2211 /* read in whole auxv vector and copy it to memelfnote */
2212 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2213 if (ptr != NULL) {
2214 fill_note(note, "CORE", NT_AUXV, len, ptr);
2215 unlock_user(ptr, auxv, len);
2216 }
2217 }
2218
2219 /*
2220 * Constructs name of coredump file. We have following convention
2221 * for the name:
2222 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2223 *
2224 * Returns 0 in case of success, -1 otherwise (errno is set).
2225 */
2226 static int core_dump_filename(const TaskState *ts, char *buf,
2227 size_t bufsize)
2228 {
2229 char timestamp[64];
2230 char *filename = NULL;
2231 char *base_filename = NULL;
2232 struct timeval tv;
2233 struct tm tm;
2234
2235 assert(bufsize >= PATH_MAX);
2236
2237 if (gettimeofday(&tv, NULL) < 0) {
2238 (void) fprintf(stderr, "unable to get current timestamp: %s",
2239 strerror(errno));
2240 return (-1);
2241 }
2242
2243 filename = strdup(ts->bprm->filename);
2244 base_filename = strdup(basename(filename));
2245 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2246 localtime_r(&tv.tv_sec, &tm));
2247 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2248 base_filename, timestamp, (int)getpid());
2249 free(base_filename);
2250 free(filename);
2251
2252 return (0);
2253 }
2254
2255 static int dump_write(int fd, const void *ptr, size_t size)
2256 {
2257 const char *bufp = (const char *)ptr;
2258 ssize_t bytes_written, bytes_left;
2259 struct rlimit dumpsize;
2260 off_t pos;
2261
2262 bytes_written = 0;
2263 getrlimit(RLIMIT_CORE, &dumpsize);
2264 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2265 if (errno == ESPIPE) { /* not a seekable stream */
2266 bytes_left = size;
2267 } else {
2268 return pos;
2269 }
2270 } else {
2271 if (dumpsize.rlim_cur <= pos) {
2272 return -1;
2273 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2274 bytes_left = size;
2275 } else {
2276 size_t limit_left=dumpsize.rlim_cur - pos;
2277 bytes_left = limit_left >= size ? size : limit_left ;
2278 }
2279 }
2280
2281 /*
2282 * In normal conditions, single write(2) should do but
2283 * in case of socket etc. this mechanism is more portable.
2284 */
2285 do {
2286 bytes_written = write(fd, bufp, bytes_left);
2287 if (bytes_written < 0) {
2288 if (errno == EINTR)
2289 continue;
2290 return (-1);
2291 } else if (bytes_written == 0) { /* eof */
2292 return (-1);
2293 }
2294 bufp += bytes_written;
2295 bytes_left -= bytes_written;
2296 } while (bytes_left > 0);
2297
2298 return (0);
2299 }
2300
2301 static int write_note(struct memelfnote *men, int fd)
2302 {
2303 struct elf_note en;
2304
2305 en.n_namesz = men->namesz;
2306 en.n_type = men->type;
2307 en.n_descsz = men->datasz;
2308
2309 #ifdef BSWAP_NEEDED
2310 bswap_note(&en);
2311 #endif
2312
2313 if (dump_write(fd, &en, sizeof(en)) != 0)
2314 return (-1);
2315 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2316 return (-1);
2317 if (dump_write(fd, men->data, men->datasz) != 0)
2318 return (-1);
2319
2320 return (0);
2321 }
2322
2323 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2324 {
2325 TaskState *ts = (TaskState *)env->opaque;
2326 struct elf_thread_status *ets;
2327
2328 ets = qemu_mallocz(sizeof (*ets));
2329 ets->num_notes = 1; /* only prstatus is dumped */
2330 fill_prstatus(&ets->prstatus, ts, 0);
2331 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2332 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2333 &ets->prstatus);
2334
2335 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2336
2337 info->notes_size += note_size(&ets->notes[0]);
2338 }
2339
2340 static int fill_note_info(struct elf_note_info *info,
2341 long signr, const CPUState *env)
2342 {
2343 #define NUMNOTES 3
2344 CPUState *cpu = NULL;
2345 TaskState *ts = (TaskState *)env->opaque;
2346 int i;
2347
2348 (void) memset(info, 0, sizeof (*info));
2349
2350 QTAILQ_INIT(&info->thread_list);
2351
2352 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2353 if (info->notes == NULL)
2354 return (-ENOMEM);
2355 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2356 if (info->prstatus == NULL)
2357 return (-ENOMEM);
2358 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2359 if (info->prstatus == NULL)
2360 return (-ENOMEM);
2361
2362 /*
2363 * First fill in status (and registers) of current thread
2364 * including process info & aux vector.
2365 */
2366 fill_prstatus(info->prstatus, ts, signr);
2367 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2368 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2369 sizeof (*info->prstatus), info->prstatus);
2370 fill_psinfo(info->psinfo, ts);
2371 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2372 sizeof (*info->psinfo), info->psinfo);
2373 fill_auxv_note(&info->notes[2], ts);
2374 info->numnote = 3;
2375
2376 info->notes_size = 0;
2377 for (i = 0; i < info->numnote; i++)
2378 info->notes_size += note_size(&info->notes[i]);
2379
2380 /* read and fill status of all threads */
2381 cpu_list_lock();
2382 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2383 if (cpu == thread_env)
2384 continue;
2385 fill_thread_info(info, cpu);
2386 }
2387 cpu_list_unlock();
2388
2389 return (0);
2390 }
2391
2392 static void free_note_info(struct elf_note_info *info)
2393 {
2394 struct elf_thread_status *ets;
2395
2396 while (!QTAILQ_EMPTY(&info->thread_list)) {
2397 ets = QTAILQ_FIRST(&info->thread_list);
2398 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2399 qemu_free(ets);
2400 }
2401
2402 qemu_free(info->prstatus);
2403 qemu_free(info->psinfo);
2404 qemu_free(info->notes);
2405 }
2406
2407 static int write_note_info(struct elf_note_info *info, int fd)
2408 {
2409 struct elf_thread_status *ets;
2410 int i, error = 0;
2411
2412 /* write prstatus, psinfo and auxv for current thread */
2413 for (i = 0; i < info->numnote; i++)
2414 if ((error = write_note(&info->notes[i], fd)) != 0)
2415 return (error);
2416
2417 /* write prstatus for each thread */
2418 for (ets = info->thread_list.tqh_first; ets != NULL;
2419 ets = ets->ets_link.tqe_next) {
2420 if ((error = write_note(&ets->notes[0], fd)) != 0)
2421 return (error);
2422 }
2423
2424 return (0);
2425 }
2426
2427 /*
2428 * Write out ELF coredump.
2429 *
2430 * See documentation of ELF object file format in:
2431 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2432 *
2433 * Coredump format in linux is following:
2434 *
2435 * 0 +----------------------+ \
2436 * | ELF header | ET_CORE |
2437 * +----------------------+ |
2438 * | ELF program headers | |--- headers
2439 * | - NOTE section | |
2440 * | - PT_LOAD sections | |
2441 * +----------------------+ /
2442 * | NOTEs: |
2443 * | - NT_PRSTATUS |
2444 * | - NT_PRSINFO |
2445 * | - NT_AUXV |
2446 * +----------------------+ <-- aligned to target page
2447 * | Process memory dump |
2448 * : :
2449 * . .
2450 * : :
2451 * | |
2452 * +----------------------+
2453 *
2454 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2455 * NT_PRSINFO -> struct elf_prpsinfo
2456 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2457 *
2458 * Format follows System V format as close as possible. Current
2459 * version limitations are as follows:
2460 * - no floating point registers are dumped
2461 *
2462 * Function returns 0 in case of success, negative errno otherwise.
2463 *
2464 * TODO: make this work also during runtime: it should be
2465 * possible to force coredump from running process and then
2466 * continue processing. For example qemu could set up SIGUSR2
2467 * handler (provided that target process haven't registered
2468 * handler for that) that does the dump when signal is received.
2469 */
2470 static int elf_core_dump(int signr, const CPUState *env)
2471 {
2472 const TaskState *ts = (const TaskState *)env->opaque;
2473 struct vm_area_struct *vma = NULL;
2474 char corefile[PATH_MAX];
2475 struct elf_note_info info;
2476 struct elfhdr elf;
2477 struct elf_phdr phdr;
2478 struct rlimit dumpsize;
2479 struct mm_struct *mm = NULL;
2480 off_t offset = 0, data_offset = 0;
2481 int segs = 0;
2482 int fd = -1;
2483
2484 errno = 0;
2485 getrlimit(RLIMIT_CORE, &dumpsize);
2486 if (dumpsize.rlim_cur == 0)
2487 return 0;
2488
2489 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2490 return (-errno);
2491
2492 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2493 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2494 return (-errno);
2495
2496 /*
2497 * Walk through target process memory mappings and
2498 * set up structure containing this information. After
2499 * this point vma_xxx functions can be used.
2500 */
2501 if ((mm = vma_init()) == NULL)
2502 goto out;
2503
2504 walk_memory_regions(mm, vma_walker);
2505 segs = vma_get_mapping_count(mm);
2506
2507 /*
2508 * Construct valid coredump ELF header. We also
2509 * add one more segment for notes.
2510 */
2511 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2512 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2513 goto out;
2514
2515 /* fill in in-memory version of notes */
2516 if (fill_note_info(&info, signr, env) < 0)
2517 goto out;
2518
2519 offset += sizeof (elf); /* elf header */
2520 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2521
2522 /* write out notes program header */
2523 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2524
2525 offset += info.notes_size;
2526 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2527 goto out;
2528
2529 /*
2530 * ELF specification wants data to start at page boundary so
2531 * we align it here.
2532 */
2533 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2534
2535 /*
2536 * Write program headers for memory regions mapped in
2537 * the target process.
2538 */
2539 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2540 (void) memset(&phdr, 0, sizeof (phdr));
2541
2542 phdr.p_type = PT_LOAD;
2543 phdr.p_offset = offset;
2544 phdr.p_vaddr = vma->vma_start;
2545 phdr.p_paddr = 0;
2546 phdr.p_filesz = vma_dump_size(vma);
2547 offset += phdr.p_filesz;
2548 phdr.p_memsz = vma->vma_end - vma->vma_start;
2549 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2550 if (vma->vma_flags & PROT_WRITE)
2551 phdr.p_flags |= PF_W;
2552 if (vma->vma_flags & PROT_EXEC)
2553 phdr.p_flags |= PF_X;
2554 phdr.p_align = ELF_EXEC_PAGESIZE;
2555
2556 dump_write(fd, &phdr, sizeof (phdr));
2557 }
2558
2559 /*
2560 * Next we write notes just after program headers. No
2561 * alignment needed here.
2562 */
2563 if (write_note_info(&info, fd) < 0)
2564 goto out;
2565
2566 /* align data to page boundary */
2567 data_offset = lseek(fd, 0, SEEK_CUR);
2568 data_offset = TARGET_PAGE_ALIGN(data_offset);
2569 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2570 goto out;
2571
2572 /*
2573 * Finally we can dump process memory into corefile as well.
2574 */
2575 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2576 abi_ulong addr;
2577 abi_ulong end;
2578
2579 end = vma->vma_start + vma_dump_size(vma);
2580
2581 for (addr = vma->vma_start; addr < end;
2582 addr += TARGET_PAGE_SIZE) {
2583 char page[TARGET_PAGE_SIZE];
2584 int error;
2585
2586 /*
2587 * Read in page from target process memory and
2588 * write it to coredump file.
2589 */
2590 error = copy_from_user(page, addr, sizeof (page));
2591 if (error != 0) {
2592 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2593 addr);
2594 errno = -error;
2595 goto out;
2596 }
2597 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2598 goto out;
2599 }
2600 }
2601
2602 out:
2603 free_note_info(&info);
2604 if (mm != NULL)
2605 vma_delete(mm);
2606 (void) close(fd);
2607
2608 if (errno != 0)
2609 return (-errno);
2610 return (0);
2611 }
2612
2613 #endif /* USE_ELF_CORE_DUMP */
2614
2615 static int load_aout_interp(void * exptr, int interp_fd)
2616 {
2617 printf("a.out interpreter not yet supported\n");
2618 return(0);
2619 }
2620
2621 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2622 {
2623 init_thread(regs, infop);
2624 }