]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/elfload.c
Usermode exec-stack fix
[mirror_qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 typedef target_ulong target_elf_greg_t;
101 #ifdef USE_UID16
102 typedef uint16_t target_uid_t;
103 typedef uint16_t target_gid_t;
104 #else
105 typedef uint32_t target_uid_t;
106 typedef uint32_t target_gid_t;
107 #endif
108 typedef int32_t target_pid_t;
109
110 #ifdef TARGET_I386
111
112 #define ELF_PLATFORM get_elf_platform()
113
114 static const char *get_elf_platform(void)
115 {
116 static char elf_platform[] = "i386";
117 int family = (thread_env->cpuid_version >> 8) & 0xff;
118 if (family > 6)
119 family = 6;
120 if (family >= 3)
121 elf_platform[1] = '0' + family;
122 return elf_platform;
123 }
124
125 #define ELF_HWCAP get_elf_hwcap()
126
127 static uint32_t get_elf_hwcap(void)
128 {
129 return thread_env->cpuid_features;
130 }
131
132 #ifdef TARGET_X86_64
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
135
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
139
140 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
141 {
142 regs->rax = 0;
143 regs->rsp = infop->start_stack;
144 regs->rip = infop->entry;
145 }
146
147 #define ELF_NREG 27
148 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
149
150 /*
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
153 * those.
154 *
155 * See linux kernel: arch/x86/include/asm/elf.h
156 */
157 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
158 {
159 (*regs)[0] = env->regs[15];
160 (*regs)[1] = env->regs[14];
161 (*regs)[2] = env->regs[13];
162 (*regs)[3] = env->regs[12];
163 (*regs)[4] = env->regs[R_EBP];
164 (*regs)[5] = env->regs[R_EBX];
165 (*regs)[6] = env->regs[11];
166 (*regs)[7] = env->regs[10];
167 (*regs)[8] = env->regs[9];
168 (*regs)[9] = env->regs[8];
169 (*regs)[10] = env->regs[R_EAX];
170 (*regs)[11] = env->regs[R_ECX];
171 (*regs)[12] = env->regs[R_EDX];
172 (*regs)[13] = env->regs[R_ESI];
173 (*regs)[14] = env->regs[R_EDI];
174 (*regs)[15] = env->regs[R_EAX]; /* XXX */
175 (*regs)[16] = env->eip;
176 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
177 (*regs)[18] = env->eflags;
178 (*regs)[19] = env->regs[R_ESP];
179 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
180 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
181 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
182 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
183 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
184 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
185 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
186 }
187
188 #else
189
190 #define ELF_START_MMAP 0x80000000
191
192 /*
193 * This is used to ensure we don't load something for the wrong architecture.
194 */
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
196
197 /*
198 * These are used to set parameters in the core dumps.
199 */
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
203
204 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
205 {
206 regs->esp = infop->start_stack;
207 regs->eip = infop->entry;
208
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
214
215 A value of 0 tells we have no such handler. */
216 regs->edx = 0;
217 }
218
219 #define ELF_NREG 17
220 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 #define ELF_NREG 18
290 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291
292 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
293 {
294 (*regs)[0] = tswapl(env->regs[0]);
295 (*regs)[1] = tswapl(env->regs[1]);
296 (*regs)[2] = tswapl(env->regs[2]);
297 (*regs)[3] = tswapl(env->regs[3]);
298 (*regs)[4] = tswapl(env->regs[4]);
299 (*regs)[5] = tswapl(env->regs[5]);
300 (*regs)[6] = tswapl(env->regs[6]);
301 (*regs)[7] = tswapl(env->regs[7]);
302 (*regs)[8] = tswapl(env->regs[8]);
303 (*regs)[9] = tswapl(env->regs[9]);
304 (*regs)[10] = tswapl(env->regs[10]);
305 (*regs)[11] = tswapl(env->regs[11]);
306 (*regs)[12] = tswapl(env->regs[12]);
307 (*regs)[13] = tswapl(env->regs[13]);
308 (*regs)[14] = tswapl(env->regs[14]);
309 (*regs)[15] = tswapl(env->regs[15]);
310
311 (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
312 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
313 }
314
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
317
318 enum
319 {
320 ARM_HWCAP_ARM_SWP = 1 << 0,
321 ARM_HWCAP_ARM_HALF = 1 << 1,
322 ARM_HWCAP_ARM_THUMB = 1 << 2,
323 ARM_HWCAP_ARM_26BIT = 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
325 ARM_HWCAP_ARM_FPA = 1 << 5,
326 ARM_HWCAP_ARM_VFP = 1 << 6,
327 ARM_HWCAP_ARM_EDSP = 1 << 7,
328 ARM_HWCAP_ARM_JAVA = 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
331 ARM_HWCAP_ARM_NEON = 1 << 11,
332 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
334 };
335
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
340
341 #endif
342
343 #ifdef TARGET_SPARC
344 #ifdef TARGET_SPARC64
345
346 #define ELF_START_MMAP 0x80000000
347
348 #ifndef TARGET_ABI32
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
350 #else
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
352 #endif
353
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
357
358 #define STACK_BIAS 2047
359
360 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
361 {
362 #ifndef TARGET_ABI32
363 regs->tstate = 0;
364 #endif
365 regs->pc = infop->entry;
366 regs->npc = regs->pc + 4;
367 regs->y = 0;
368 #ifdef TARGET_ABI32
369 regs->u_regs[14] = infop->start_stack - 16 * 4;
370 #else
371 if (personality(infop->personality) == PER_LINUX32)
372 regs->u_regs[14] = infop->start_stack - 16 * 4;
373 else
374 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
375 #endif
376 }
377
378 #else
379 #define ELF_START_MMAP 0x80000000
380
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
382
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
386
387 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
388 {
389 regs->psr = 0;
390 regs->pc = infop->entry;
391 regs->npc = regs->pc + 4;
392 regs->y = 0;
393 regs->u_regs[14] = infop->start_stack - 16 * 4;
394 }
395
396 #endif
397 #endif
398
399 #ifdef TARGET_PPC
400
401 #define ELF_START_MMAP 0x80000000
402
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
404
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
406
407 #define ELF_CLASS ELFCLASS64
408
409 #else
410
411 #define elf_check_arch(x) ( (x) == EM_PPC )
412
413 #define ELF_CLASS ELFCLASS32
414
415 #endif
416
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
419 #else
420 #define ELF_DATA ELFDATA2LSB
421 #endif
422 #define ELF_ARCH EM_PPC
423
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
426 enum {
427 QEMU_PPC_FEATURE_32 = 0x80000000,
428 QEMU_PPC_FEATURE_64 = 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
439 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
440 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
442 QEMU_PPC_FEATURE_CELL = 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
444 QEMU_PPC_FEATURE_SMT = 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
447 QEMU_PPC_FEATURE_PA6T = 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
453
454 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
456 };
457
458 #define ELF_HWCAP get_elf_hwcap()
459
460 static uint32_t get_elf_hwcap(void)
461 {
462 CPUState *e = thread_env;
463 uint32_t features = 0;
464
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
470 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
471 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
472 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
473 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
474 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
475 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
476 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
477 #undef GET_FEATURE
478
479 return features;
480 }
481
482 /*
483 * We need to put in some extra aux table entries to tell glibc what
484 * the cache block size is, so it can use the dcbz instruction safely.
485 */
486 #define AT_DCACHEBSIZE 19
487 #define AT_ICACHEBSIZE 20
488 #define AT_UCACHEBSIZE 21
489 /* A special ignored type value for PPC, for glibc compatibility. */
490 #define AT_IGNOREPPC 22
491 /*
492 * The requirements here are:
493 * - keep the final alignment of sp (sp & 0xf)
494 * - make sure the 32-bit value at the first 16 byte aligned position of
495 * AUXV is greater than 16 for glibc compatibility.
496 * AT_IGNOREPPC is used for that.
497 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
498 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
499 */
500 #define DLINFO_ARCH_ITEMS 5
501 #define ARCH_DLINFO \
502 do { \
503 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
504 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
505 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
506 /* \
507 * Now handle glibc compatibility. \
508 */ \
509 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
510 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
511 } while (0)
512
513 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
514 {
515 _regs->gpr[1] = infop->start_stack;
516 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
517 _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
518 infop->entry = ldq_raw(infop->entry) + infop->load_addr;
519 #endif
520 _regs->nip = infop->entry;
521 }
522
523 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
524 #define ELF_NREG 48
525 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
526
527 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
528 {
529 int i;
530 target_ulong ccr = 0;
531
532 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
533 (*regs)[i] = tswapl(env->gpr[i]);
534 }
535
536 (*regs)[32] = tswapl(env->nip);
537 (*regs)[33] = tswapl(env->msr);
538 (*regs)[35] = tswapl(env->ctr);
539 (*regs)[36] = tswapl(env->lr);
540 (*regs)[37] = tswapl(env->xer);
541
542 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
543 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
544 }
545 (*regs)[38] = tswapl(ccr);
546 }
547
548 #define USE_ELF_CORE_DUMP
549 #define ELF_EXEC_PAGESIZE 4096
550
551 #endif
552
553 #ifdef TARGET_MIPS
554
555 #define ELF_START_MMAP 0x80000000
556
557 #define elf_check_arch(x) ( (x) == EM_MIPS )
558
559 #ifdef TARGET_MIPS64
560 #define ELF_CLASS ELFCLASS64
561 #else
562 #define ELF_CLASS ELFCLASS32
563 #endif
564 #ifdef TARGET_WORDS_BIGENDIAN
565 #define ELF_DATA ELFDATA2MSB
566 #else
567 #define ELF_DATA ELFDATA2LSB
568 #endif
569 #define ELF_ARCH EM_MIPS
570
571 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
572 {
573 regs->cp0_status = 2 << CP0St_KSU;
574 regs->cp0_epc = infop->entry;
575 regs->regs[29] = infop->start_stack;
576 }
577
578 /* See linux kernel: arch/mips/include/asm/elf.h. */
579 #define ELF_NREG 45
580 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
581
582 /* See linux kernel: arch/mips/include/asm/reg.h. */
583 enum {
584 #ifdef TARGET_MIPS64
585 TARGET_EF_R0 = 0,
586 #else
587 TARGET_EF_R0 = 6,
588 #endif
589 TARGET_EF_R26 = TARGET_EF_R0 + 26,
590 TARGET_EF_R27 = TARGET_EF_R0 + 27,
591 TARGET_EF_LO = TARGET_EF_R0 + 32,
592 TARGET_EF_HI = TARGET_EF_R0 + 33,
593 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
594 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
595 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
596 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
597 };
598
599 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
600 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
601 {
602 int i;
603
604 for (i = 0; i < TARGET_EF_R0; i++) {
605 (*regs)[i] = 0;
606 }
607 (*regs)[TARGET_EF_R0] = 0;
608
609 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
610 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
611 }
612
613 (*regs)[TARGET_EF_R26] = 0;
614 (*regs)[TARGET_EF_R27] = 0;
615 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
616 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
617 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
618 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
619 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
620 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
621 }
622
623 #define USE_ELF_CORE_DUMP
624 #define ELF_EXEC_PAGESIZE 4096
625
626 #endif /* TARGET_MIPS */
627
628 #ifdef TARGET_MICROBLAZE
629
630 #define ELF_START_MMAP 0x80000000
631
632 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
633
634 #define ELF_CLASS ELFCLASS32
635 #define ELF_DATA ELFDATA2MSB
636 #define ELF_ARCH EM_MICROBLAZE
637
638 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
639 {
640 regs->pc = infop->entry;
641 regs->r1 = infop->start_stack;
642
643 }
644
645 #define ELF_EXEC_PAGESIZE 4096
646
647 #define USE_ELF_CORE_DUMP
648 #define ELF_NREG 38
649 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
650
651 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
652 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
653 {
654 int i, pos = 0;
655
656 for (i = 0; i < 32; i++) {
657 (*regs)[pos++] = tswapl(env->regs[i]);
658 }
659
660 for (i = 0; i < 6; i++) {
661 (*regs)[pos++] = tswapl(env->sregs[i]);
662 }
663 }
664
665 #endif /* TARGET_MICROBLAZE */
666
667 #ifdef TARGET_SH4
668
669 #define ELF_START_MMAP 0x80000000
670
671 #define elf_check_arch(x) ( (x) == EM_SH )
672
673 #define ELF_CLASS ELFCLASS32
674 #define ELF_DATA ELFDATA2LSB
675 #define ELF_ARCH EM_SH
676
677 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
678 {
679 /* Check other registers XXXXX */
680 regs->pc = infop->entry;
681 regs->regs[15] = infop->start_stack;
682 }
683
684 /* See linux kernel: arch/sh/include/asm/elf.h. */
685 #define ELF_NREG 23
686 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
687
688 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
689 enum {
690 TARGET_REG_PC = 16,
691 TARGET_REG_PR = 17,
692 TARGET_REG_SR = 18,
693 TARGET_REG_GBR = 19,
694 TARGET_REG_MACH = 20,
695 TARGET_REG_MACL = 21,
696 TARGET_REG_SYSCALL = 22
697 };
698
699 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
700 {
701 int i;
702
703 for (i = 0; i < 16; i++) {
704 (*regs[i]) = tswapl(env->gregs[i]);
705 }
706
707 (*regs)[TARGET_REG_PC] = tswapl(env->pc);
708 (*regs)[TARGET_REG_PR] = tswapl(env->pr);
709 (*regs)[TARGET_REG_SR] = tswapl(env->sr);
710 (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
711 (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
712 (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
713 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
714 }
715
716 #define USE_ELF_CORE_DUMP
717 #define ELF_EXEC_PAGESIZE 4096
718
719 #endif
720
721 #ifdef TARGET_CRIS
722
723 #define ELF_START_MMAP 0x80000000
724
725 #define elf_check_arch(x) ( (x) == EM_CRIS )
726
727 #define ELF_CLASS ELFCLASS32
728 #define ELF_DATA ELFDATA2LSB
729 #define ELF_ARCH EM_CRIS
730
731 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
732 {
733 regs->erp = infop->entry;
734 }
735
736 #define ELF_EXEC_PAGESIZE 8192
737
738 #endif
739
740 #ifdef TARGET_M68K
741
742 #define ELF_START_MMAP 0x80000000
743
744 #define elf_check_arch(x) ( (x) == EM_68K )
745
746 #define ELF_CLASS ELFCLASS32
747 #define ELF_DATA ELFDATA2MSB
748 #define ELF_ARCH EM_68K
749
750 /* ??? Does this need to do anything?
751 #define ELF_PLAT_INIT(_r) */
752
753 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
754 {
755 regs->usp = infop->start_stack;
756 regs->sr = 0;
757 regs->pc = infop->entry;
758 }
759
760 /* See linux kernel: arch/m68k/include/asm/elf.h. */
761 #define ELF_NREG 20
762 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
763
764 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
765 {
766 (*regs)[0] = tswapl(env->dregs[1]);
767 (*regs)[1] = tswapl(env->dregs[2]);
768 (*regs)[2] = tswapl(env->dregs[3]);
769 (*regs)[3] = tswapl(env->dregs[4]);
770 (*regs)[4] = tswapl(env->dregs[5]);
771 (*regs)[5] = tswapl(env->dregs[6]);
772 (*regs)[6] = tswapl(env->dregs[7]);
773 (*regs)[7] = tswapl(env->aregs[0]);
774 (*regs)[8] = tswapl(env->aregs[1]);
775 (*regs)[9] = tswapl(env->aregs[2]);
776 (*regs)[10] = tswapl(env->aregs[3]);
777 (*regs)[11] = tswapl(env->aregs[4]);
778 (*regs)[12] = tswapl(env->aregs[5]);
779 (*regs)[13] = tswapl(env->aregs[6]);
780 (*regs)[14] = tswapl(env->dregs[0]);
781 (*regs)[15] = tswapl(env->aregs[7]);
782 (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
783 (*regs)[17] = tswapl(env->sr);
784 (*regs)[18] = tswapl(env->pc);
785 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
786 }
787
788 #define USE_ELF_CORE_DUMP
789 #define ELF_EXEC_PAGESIZE 8192
790
791 #endif
792
793 #ifdef TARGET_ALPHA
794
795 #define ELF_START_MMAP (0x30000000000ULL)
796
797 #define elf_check_arch(x) ( (x) == ELF_ARCH )
798
799 #define ELF_CLASS ELFCLASS64
800 #define ELF_DATA ELFDATA2MSB
801 #define ELF_ARCH EM_ALPHA
802
803 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
804 {
805 regs->pc = infop->entry;
806 regs->ps = 8;
807 regs->usp = infop->start_stack;
808 }
809
810 #define ELF_EXEC_PAGESIZE 8192
811
812 #endif /* TARGET_ALPHA */
813
814 #ifndef ELF_PLATFORM
815 #define ELF_PLATFORM (NULL)
816 #endif
817
818 #ifndef ELF_HWCAP
819 #define ELF_HWCAP 0
820 #endif
821
822 #ifdef TARGET_ABI32
823 #undef ELF_CLASS
824 #define ELF_CLASS ELFCLASS32
825 #undef bswaptls
826 #define bswaptls(ptr) bswap32s(ptr)
827 #endif
828
829 #include "elf.h"
830
831 struct exec
832 {
833 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
834 unsigned int a_text; /* length of text, in bytes */
835 unsigned int a_data; /* length of data, in bytes */
836 unsigned int a_bss; /* length of uninitialized data area, in bytes */
837 unsigned int a_syms; /* length of symbol table data in file, in bytes */
838 unsigned int a_entry; /* start address */
839 unsigned int a_trsize; /* length of relocation info for text, in bytes */
840 unsigned int a_drsize; /* length of relocation info for data, in bytes */
841 };
842
843
844 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
845 #define OMAGIC 0407
846 #define NMAGIC 0410
847 #define ZMAGIC 0413
848 #define QMAGIC 0314
849
850 /* max code+data+bss space allocated to elf interpreter */
851 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
852
853 /* max code+data+bss+brk space allocated to ET_DYN executables */
854 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
855
856 /* Necessary parameters */
857 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
858 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
859 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
860
861 #define INTERPRETER_NONE 0
862 #define INTERPRETER_AOUT 1
863 #define INTERPRETER_ELF 2
864
865 #define DLINFO_ITEMS 12
866
867 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
868 {
869 memcpy(to, from, n);
870 }
871
872 static int load_aout_interp(void * exptr, int interp_fd);
873
874 #ifdef BSWAP_NEEDED
875 static void bswap_ehdr(struct elfhdr *ehdr)
876 {
877 bswap16s(&ehdr->e_type); /* Object file type */
878 bswap16s(&ehdr->e_machine); /* Architecture */
879 bswap32s(&ehdr->e_version); /* Object file version */
880 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
881 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
882 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
883 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
884 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
885 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
886 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
887 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
888 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
889 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
890 }
891
892 static void bswap_phdr(struct elf_phdr *phdr)
893 {
894 bswap32s(&phdr->p_type); /* Segment type */
895 bswaptls(&phdr->p_offset); /* Segment file offset */
896 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
897 bswaptls(&phdr->p_paddr); /* Segment physical address */
898 bswaptls(&phdr->p_filesz); /* Segment size in file */
899 bswaptls(&phdr->p_memsz); /* Segment size in memory */
900 bswap32s(&phdr->p_flags); /* Segment flags */
901 bswaptls(&phdr->p_align); /* Segment alignment */
902 }
903
904 static void bswap_shdr(struct elf_shdr *shdr)
905 {
906 bswap32s(&shdr->sh_name);
907 bswap32s(&shdr->sh_type);
908 bswaptls(&shdr->sh_flags);
909 bswaptls(&shdr->sh_addr);
910 bswaptls(&shdr->sh_offset);
911 bswaptls(&shdr->sh_size);
912 bswap32s(&shdr->sh_link);
913 bswap32s(&shdr->sh_info);
914 bswaptls(&shdr->sh_addralign);
915 bswaptls(&shdr->sh_entsize);
916 }
917
918 static void bswap_sym(struct elf_sym *sym)
919 {
920 bswap32s(&sym->st_name);
921 bswaptls(&sym->st_value);
922 bswaptls(&sym->st_size);
923 bswap16s(&sym->st_shndx);
924 }
925 #endif
926
927 #ifdef USE_ELF_CORE_DUMP
928 static int elf_core_dump(int, const CPUState *);
929
930 #ifdef BSWAP_NEEDED
931 static void bswap_note(struct elf_note *en)
932 {
933 bswap32s(&en->n_namesz);
934 bswap32s(&en->n_descsz);
935 bswap32s(&en->n_type);
936 }
937 #endif /* BSWAP_NEEDED */
938
939 #endif /* USE_ELF_CORE_DUMP */
940
941 /*
942 * 'copy_elf_strings()' copies argument/envelope strings from user
943 * memory to free pages in kernel mem. These are in a format ready
944 * to be put directly into the top of new user memory.
945 *
946 */
947 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
948 abi_ulong p)
949 {
950 char *tmp, *tmp1, *pag = NULL;
951 int len, offset = 0;
952
953 if (!p) {
954 return 0; /* bullet-proofing */
955 }
956 while (argc-- > 0) {
957 tmp = argv[argc];
958 if (!tmp) {
959 fprintf(stderr, "VFS: argc is wrong");
960 exit(-1);
961 }
962 tmp1 = tmp;
963 while (*tmp++);
964 len = tmp - tmp1;
965 if (p < len) { /* this shouldn't happen - 128kB */
966 return 0;
967 }
968 while (len) {
969 --p; --tmp; --len;
970 if (--offset < 0) {
971 offset = p % TARGET_PAGE_SIZE;
972 pag = (char *)page[p/TARGET_PAGE_SIZE];
973 if (!pag) {
974 pag = (char *)malloc(TARGET_PAGE_SIZE);
975 memset(pag, 0, TARGET_PAGE_SIZE);
976 page[p/TARGET_PAGE_SIZE] = pag;
977 if (!pag)
978 return 0;
979 }
980 }
981 if (len == 0 || offset == 0) {
982 *(pag + offset) = *tmp;
983 }
984 else {
985 int bytes_to_copy = (len > offset) ? offset : len;
986 tmp -= bytes_to_copy;
987 p -= bytes_to_copy;
988 offset -= bytes_to_copy;
989 len -= bytes_to_copy;
990 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
991 }
992 }
993 }
994 return p;
995 }
996
997 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
998 struct image_info *info)
999 {
1000 abi_ulong stack_base, size, error;
1001 int i;
1002
1003 /* Create enough stack to hold everything. If we don't use
1004 * it for args, we'll use it for something else...
1005 */
1006 size = guest_stack_size;
1007 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
1008 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1009 error = target_mmap(0,
1010 size + qemu_host_page_size,
1011 PROT_READ | PROT_WRITE,
1012 MAP_PRIVATE | MAP_ANONYMOUS,
1013 -1, 0);
1014 if (error == -1) {
1015 perror("stk mmap");
1016 exit(-1);
1017 }
1018 /* we reserve one extra page at the top of the stack as guard */
1019 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
1020
1021 info->stack_limit = error;
1022 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1023 p += stack_base;
1024
1025 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1026 if (bprm->page[i]) {
1027 info->rss++;
1028 /* FIXME - check return value of memcpy_to_target() for failure */
1029 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1030 free(bprm->page[i]);
1031 }
1032 stack_base += TARGET_PAGE_SIZE;
1033 }
1034 return p;
1035 }
1036
1037 static void set_brk(abi_ulong start, abi_ulong end)
1038 {
1039 /* page-align the start and end addresses... */
1040 start = HOST_PAGE_ALIGN(start);
1041 end = HOST_PAGE_ALIGN(end);
1042 if (end <= start)
1043 return;
1044 if(target_mmap(start, end - start,
1045 PROT_READ | PROT_WRITE | PROT_EXEC,
1046 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
1047 perror("cannot mmap brk");
1048 exit(-1);
1049 }
1050 }
1051
1052
1053 /* We need to explicitly zero any fractional pages after the data
1054 section (i.e. bss). This would contain the junk from the file that
1055 should not be in memory. */
1056 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
1057 {
1058 abi_ulong nbyte;
1059
1060 if (elf_bss >= last_bss)
1061 return;
1062
1063 /* XXX: this is really a hack : if the real host page size is
1064 smaller than the target page size, some pages after the end
1065 of the file may not be mapped. A better fix would be to
1066 patch target_mmap(), but it is more complicated as the file
1067 size must be known */
1068 if (qemu_real_host_page_size < qemu_host_page_size) {
1069 abi_ulong end_addr, end_addr1;
1070 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
1071 ~(qemu_real_host_page_size - 1);
1072 end_addr = HOST_PAGE_ALIGN(elf_bss);
1073 if (end_addr1 < end_addr) {
1074 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
1075 PROT_READ|PROT_WRITE|PROT_EXEC,
1076 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1077 }
1078 }
1079
1080 nbyte = elf_bss & (qemu_host_page_size-1);
1081 if (nbyte) {
1082 nbyte = qemu_host_page_size - nbyte;
1083 do {
1084 /* FIXME - what to do if put_user() fails? */
1085 put_user_u8(0, elf_bss);
1086 elf_bss++;
1087 } while (--nbyte);
1088 }
1089 }
1090
1091
1092 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1093 struct elfhdr * exec,
1094 abi_ulong load_addr,
1095 abi_ulong load_bias,
1096 abi_ulong interp_load_addr, int ibcs,
1097 struct image_info *info)
1098 {
1099 abi_ulong sp;
1100 int size;
1101 abi_ulong u_platform;
1102 const char *k_platform;
1103 const int n = sizeof(elf_addr_t);
1104
1105 sp = p;
1106 u_platform = 0;
1107 k_platform = ELF_PLATFORM;
1108 if (k_platform) {
1109 size_t len = strlen(k_platform) + 1;
1110 sp -= (len + n - 1) & ~(n - 1);
1111 u_platform = sp;
1112 /* FIXME - check return value of memcpy_to_target() for failure */
1113 memcpy_to_target(sp, k_platform, len);
1114 }
1115 /*
1116 * Force 16 byte _final_ alignment here for generality.
1117 */
1118 sp = sp &~ (abi_ulong)15;
1119 size = (DLINFO_ITEMS + 1) * 2;
1120 if (k_platform)
1121 size += 2;
1122 #ifdef DLINFO_ARCH_ITEMS
1123 size += DLINFO_ARCH_ITEMS * 2;
1124 #endif
1125 size += envc + argc + 2;
1126 size += (!ibcs ? 3 : 1); /* argc itself */
1127 size *= n;
1128 if (size & 15)
1129 sp -= 16 - (size & 15);
1130
1131 /* This is correct because Linux defines
1132 * elf_addr_t as Elf32_Off / Elf64_Off
1133 */
1134 #define NEW_AUX_ENT(id, val) do { \
1135 sp -= n; put_user_ual(val, sp); \
1136 sp -= n; put_user_ual(id, sp); \
1137 } while(0)
1138
1139 NEW_AUX_ENT (AT_NULL, 0);
1140
1141 /* There must be exactly DLINFO_ITEMS entries here. */
1142 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1143 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1144 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1145 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1146 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1147 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1148 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1149 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1150 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1151 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1152 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1153 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1154 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1155 if (k_platform)
1156 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1157 #ifdef ARCH_DLINFO
1158 /*
1159 * ARCH_DLINFO must come last so platform specific code can enforce
1160 * special alignment requirements on the AUXV if necessary (eg. PPC).
1161 */
1162 ARCH_DLINFO;
1163 #endif
1164 #undef NEW_AUX_ENT
1165
1166 info->saved_auxv = sp;
1167
1168 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1169 return sp;
1170 }
1171
1172
1173 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1174 int interpreter_fd,
1175 abi_ulong *interp_load_addr)
1176 {
1177 struct elf_phdr *elf_phdata = NULL;
1178 struct elf_phdr *eppnt;
1179 abi_ulong load_addr = 0;
1180 int load_addr_set = 0;
1181 int retval;
1182 abi_ulong last_bss, elf_bss;
1183 abi_ulong error;
1184 int i;
1185
1186 elf_bss = 0;
1187 last_bss = 0;
1188 error = 0;
1189
1190 #ifdef BSWAP_NEEDED
1191 bswap_ehdr(interp_elf_ex);
1192 #endif
1193 /* First of all, some simple consistency checks */
1194 if ((interp_elf_ex->e_type != ET_EXEC &&
1195 interp_elf_ex->e_type != ET_DYN) ||
1196 !elf_check_arch(interp_elf_ex->e_machine)) {
1197 return ~((abi_ulong)0UL);
1198 }
1199
1200
1201 /* Now read in all of the header information */
1202
1203 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1204 return ~(abi_ulong)0UL;
1205
1206 elf_phdata = (struct elf_phdr *)
1207 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1208
1209 if (!elf_phdata)
1210 return ~((abi_ulong)0UL);
1211
1212 /*
1213 * If the size of this structure has changed, then punt, since
1214 * we will be doing the wrong thing.
1215 */
1216 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1217 free(elf_phdata);
1218 return ~((abi_ulong)0UL);
1219 }
1220
1221 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1222 if(retval >= 0) {
1223 retval = read(interpreter_fd,
1224 (char *) elf_phdata,
1225 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1226 }
1227 if (retval < 0) {
1228 perror("load_elf_interp");
1229 exit(-1);
1230 free (elf_phdata);
1231 return retval;
1232 }
1233 #ifdef BSWAP_NEEDED
1234 eppnt = elf_phdata;
1235 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1236 bswap_phdr(eppnt);
1237 }
1238 #endif
1239
1240 if (interp_elf_ex->e_type == ET_DYN) {
1241 /* in order to avoid hardcoding the interpreter load
1242 address in qemu, we allocate a big enough memory zone */
1243 error = target_mmap(0, INTERP_MAP_SIZE,
1244 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1245 -1, 0);
1246 if (error == -1) {
1247 perror("mmap");
1248 exit(-1);
1249 }
1250 load_addr = error;
1251 load_addr_set = 1;
1252 }
1253
1254 eppnt = elf_phdata;
1255 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1256 if (eppnt->p_type == PT_LOAD) {
1257 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1258 int elf_prot = 0;
1259 abi_ulong vaddr = 0;
1260 abi_ulong k;
1261
1262 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1263 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1264 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1265 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1266 elf_type |= MAP_FIXED;
1267 vaddr = eppnt->p_vaddr;
1268 }
1269 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1270 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1271 elf_prot,
1272 elf_type,
1273 interpreter_fd,
1274 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1275
1276 if (error == -1) {
1277 /* Real error */
1278 close(interpreter_fd);
1279 free(elf_phdata);
1280 return ~((abi_ulong)0UL);
1281 }
1282
1283 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1284 load_addr = error;
1285 load_addr_set = 1;
1286 }
1287
1288 /*
1289 * Find the end of the file mapping for this phdr, and keep
1290 * track of the largest address we see for this.
1291 */
1292 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
1293 if (k > elf_bss) elf_bss = k;
1294
1295 /*
1296 * Do the same thing for the memory mapping - between
1297 * elf_bss and last_bss is the bss section.
1298 */
1299 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
1300 if (k > last_bss) last_bss = k;
1301 }
1302
1303 /* Now use mmap to map the library into memory. */
1304
1305 close(interpreter_fd);
1306
1307 /*
1308 * Now fill out the bss section. First pad the last page up
1309 * to the page boundary, and then perform a mmap to make sure
1310 * that there are zeromapped pages up to and including the last
1311 * bss page.
1312 */
1313 padzero(elf_bss, last_bss);
1314 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
1315
1316 /* Map the last of the bss segment */
1317 if (last_bss > elf_bss) {
1318 target_mmap(elf_bss, last_bss-elf_bss,
1319 PROT_READ|PROT_WRITE|PROT_EXEC,
1320 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
1321 }
1322 free(elf_phdata);
1323
1324 *interp_load_addr = load_addr;
1325 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1326 }
1327
1328 static int symfind(const void *s0, const void *s1)
1329 {
1330 struct elf_sym *key = (struct elf_sym *)s0;
1331 struct elf_sym *sym = (struct elf_sym *)s1;
1332 int result = 0;
1333 if (key->st_value < sym->st_value) {
1334 result = -1;
1335 } else if (key->st_value >= sym->st_value + sym->st_size) {
1336 result = 1;
1337 }
1338 return result;
1339 }
1340
1341 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1342 {
1343 #if ELF_CLASS == ELFCLASS32
1344 struct elf_sym *syms = s->disas_symtab.elf32;
1345 #else
1346 struct elf_sym *syms = s->disas_symtab.elf64;
1347 #endif
1348
1349 // binary search
1350 struct elf_sym key;
1351 struct elf_sym *sym;
1352
1353 key.st_value = orig_addr;
1354
1355 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1356 if (sym != NULL) {
1357 return s->disas_strtab + sym->st_name;
1358 }
1359
1360 return "";
1361 }
1362
1363 /* FIXME: This should use elf_ops.h */
1364 static int symcmp(const void *s0, const void *s1)
1365 {
1366 struct elf_sym *sym0 = (struct elf_sym *)s0;
1367 struct elf_sym *sym1 = (struct elf_sym *)s1;
1368 return (sym0->st_value < sym1->st_value)
1369 ? -1
1370 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1371 }
1372
1373 /* Best attempt to load symbols from this ELF object. */
1374 static void load_symbols(struct elfhdr *hdr, int fd)
1375 {
1376 unsigned int i, nsyms;
1377 struct elf_shdr sechdr, symtab, strtab;
1378 char *strings;
1379 struct syminfo *s;
1380 struct elf_sym *syms;
1381
1382 lseek(fd, hdr->e_shoff, SEEK_SET);
1383 for (i = 0; i < hdr->e_shnum; i++) {
1384 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1385 return;
1386 #ifdef BSWAP_NEEDED
1387 bswap_shdr(&sechdr);
1388 #endif
1389 if (sechdr.sh_type == SHT_SYMTAB) {
1390 symtab = sechdr;
1391 lseek(fd, hdr->e_shoff
1392 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1393 if (read(fd, &strtab, sizeof(strtab))
1394 != sizeof(strtab))
1395 return;
1396 #ifdef BSWAP_NEEDED
1397 bswap_shdr(&strtab);
1398 #endif
1399 goto found;
1400 }
1401 }
1402 return; /* Shouldn't happen... */
1403
1404 found:
1405 /* Now know where the strtab and symtab are. Snarf them. */
1406 s = malloc(sizeof(*s));
1407 syms = malloc(symtab.sh_size);
1408 if (!syms)
1409 return;
1410 s->disas_strtab = strings = malloc(strtab.sh_size);
1411 if (!s->disas_strtab)
1412 return;
1413
1414 lseek(fd, symtab.sh_offset, SEEK_SET);
1415 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1416 return;
1417
1418 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1419
1420 i = 0;
1421 while (i < nsyms) {
1422 #ifdef BSWAP_NEEDED
1423 bswap_sym(syms + i);
1424 #endif
1425 // Throw away entries which we do not need.
1426 if (syms[i].st_shndx == SHN_UNDEF ||
1427 syms[i].st_shndx >= SHN_LORESERVE ||
1428 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1429 nsyms--;
1430 if (i < nsyms) {
1431 syms[i] = syms[nsyms];
1432 }
1433 continue;
1434 }
1435 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1436 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1437 syms[i].st_value &= ~(target_ulong)1;
1438 #endif
1439 i++;
1440 }
1441 syms = realloc(syms, nsyms * sizeof(*syms));
1442
1443 qsort(syms, nsyms, sizeof(*syms), symcmp);
1444
1445 lseek(fd, strtab.sh_offset, SEEK_SET);
1446 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1447 return;
1448 s->disas_num_syms = nsyms;
1449 #if ELF_CLASS == ELFCLASS32
1450 s->disas_symtab.elf32 = syms;
1451 s->lookup_symbol = lookup_symbolxx;
1452 #else
1453 s->disas_symtab.elf64 = syms;
1454 s->lookup_symbol = lookup_symbolxx;
1455 #endif
1456 s->next = syminfos;
1457 syminfos = s;
1458 }
1459
1460 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1461 struct image_info * info)
1462 {
1463 struct elfhdr elf_ex;
1464 struct elfhdr interp_elf_ex;
1465 struct exec interp_ex;
1466 int interpreter_fd = -1; /* avoid warning */
1467 abi_ulong load_addr, load_bias;
1468 int load_addr_set = 0;
1469 unsigned int interpreter_type = INTERPRETER_NONE;
1470 unsigned char ibcs2_interpreter;
1471 int i;
1472 abi_ulong mapped_addr;
1473 struct elf_phdr * elf_ppnt;
1474 struct elf_phdr *elf_phdata;
1475 abi_ulong elf_bss, k, elf_brk;
1476 int retval;
1477 char * elf_interpreter;
1478 abi_ulong elf_entry, interp_load_addr = 0;
1479 int status;
1480 abi_ulong start_code, end_code, start_data, end_data;
1481 abi_ulong reloc_func_desc = 0;
1482 abi_ulong elf_stack;
1483 char passed_fileno[6];
1484
1485 ibcs2_interpreter = 0;
1486 status = 0;
1487 load_addr = 0;
1488 load_bias = 0;
1489 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1490 #ifdef BSWAP_NEEDED
1491 bswap_ehdr(&elf_ex);
1492 #endif
1493
1494 /* First of all, some simple consistency checks */
1495 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1496 (! elf_check_arch(elf_ex.e_machine))) {
1497 return -ENOEXEC;
1498 }
1499
1500 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1501 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1502 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1503 if (!bprm->p) {
1504 retval = -E2BIG;
1505 }
1506
1507 /* Now read in all of the header information */
1508 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1509 if (elf_phdata == NULL) {
1510 return -ENOMEM;
1511 }
1512
1513 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1514 if(retval > 0) {
1515 retval = read(bprm->fd, (char *) elf_phdata,
1516 elf_ex.e_phentsize * elf_ex.e_phnum);
1517 }
1518
1519 if (retval < 0) {
1520 perror("load_elf_binary");
1521 exit(-1);
1522 free (elf_phdata);
1523 return -errno;
1524 }
1525
1526 #ifdef BSWAP_NEEDED
1527 elf_ppnt = elf_phdata;
1528 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1529 bswap_phdr(elf_ppnt);
1530 }
1531 #endif
1532 elf_ppnt = elf_phdata;
1533
1534 elf_bss = 0;
1535 elf_brk = 0;
1536
1537
1538 elf_stack = ~((abi_ulong)0UL);
1539 elf_interpreter = NULL;
1540 start_code = ~((abi_ulong)0UL);
1541 end_code = 0;
1542 start_data = 0;
1543 end_data = 0;
1544 interp_ex.a_info = 0;
1545
1546 for(i=0;i < elf_ex.e_phnum; i++) {
1547 if (elf_ppnt->p_type == PT_INTERP) {
1548 if ( elf_interpreter != NULL )
1549 {
1550 free (elf_phdata);
1551 free(elf_interpreter);
1552 close(bprm->fd);
1553 return -EINVAL;
1554 }
1555
1556 /* This is the program interpreter used for
1557 * shared libraries - for now assume that this
1558 * is an a.out format binary
1559 */
1560
1561 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1562
1563 if (elf_interpreter == NULL) {
1564 free (elf_phdata);
1565 close(bprm->fd);
1566 return -ENOMEM;
1567 }
1568
1569 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1570 if(retval >= 0) {
1571 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1572 }
1573 if(retval < 0) {
1574 perror("load_elf_binary2");
1575 exit(-1);
1576 }
1577
1578 /* If the program interpreter is one of these two,
1579 then assume an iBCS2 image. Otherwise assume
1580 a native linux image. */
1581
1582 /* JRP - Need to add X86 lib dir stuff here... */
1583
1584 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1585 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1586 ibcs2_interpreter = 1;
1587 }
1588
1589 #if 0
1590 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1591 #endif
1592 if (retval >= 0) {
1593 retval = open(path(elf_interpreter), O_RDONLY);
1594 if(retval >= 0) {
1595 interpreter_fd = retval;
1596 }
1597 else {
1598 perror(elf_interpreter);
1599 exit(-1);
1600 /* retval = -errno; */
1601 }
1602 }
1603
1604 if (retval >= 0) {
1605 retval = lseek(interpreter_fd, 0, SEEK_SET);
1606 if(retval >= 0) {
1607 retval = read(interpreter_fd,bprm->buf,128);
1608 }
1609 }
1610 if (retval >= 0) {
1611 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1612 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1613 }
1614 if (retval < 0) {
1615 perror("load_elf_binary3");
1616 exit(-1);
1617 free (elf_phdata);
1618 free(elf_interpreter);
1619 close(bprm->fd);
1620 return retval;
1621 }
1622 }
1623 elf_ppnt++;
1624 }
1625
1626 /* Some simple consistency checks for the interpreter */
1627 if (elf_interpreter){
1628 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1629
1630 /* Now figure out which format our binary is */
1631 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1632 (N_MAGIC(interp_ex) != QMAGIC)) {
1633 interpreter_type = INTERPRETER_ELF;
1634 }
1635
1636 if (interp_elf_ex.e_ident[0] != 0x7f ||
1637 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1638 interpreter_type &= ~INTERPRETER_ELF;
1639 }
1640
1641 if (!interpreter_type) {
1642 free(elf_interpreter);
1643 free(elf_phdata);
1644 close(bprm->fd);
1645 return -ELIBBAD;
1646 }
1647 }
1648
1649 /* OK, we are done with that, now set up the arg stuff,
1650 and then start this sucker up */
1651
1652 {
1653 char * passed_p;
1654
1655 if (interpreter_type == INTERPRETER_AOUT) {
1656 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1657 passed_p = passed_fileno;
1658
1659 if (elf_interpreter) {
1660 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1661 bprm->argc++;
1662 }
1663 }
1664 if (!bprm->p) {
1665 if (elf_interpreter) {
1666 free(elf_interpreter);
1667 }
1668 free (elf_phdata);
1669 close(bprm->fd);
1670 return -E2BIG;
1671 }
1672 }
1673
1674 /* OK, This is the point of no return */
1675 info->end_data = 0;
1676 info->end_code = 0;
1677 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1678 info->mmap = 0;
1679 elf_entry = (abi_ulong) elf_ex.e_entry;
1680
1681 #if defined(CONFIG_USE_GUEST_BASE)
1682 /*
1683 * In case where user has not explicitly set the guest_base, we
1684 * probe here that should we set it automatically.
1685 */
1686 if (!(have_guest_base || reserved_va)) {
1687 /*
1688 * Go through ELF program header table and find the address
1689 * range used by loadable segments. Check that this is available on
1690 * the host, and if not find a suitable value for guest_base. */
1691 abi_ulong app_start = ~0;
1692 abi_ulong app_end = 0;
1693 abi_ulong addr;
1694 unsigned long host_start;
1695 unsigned long real_start;
1696 unsigned long host_size;
1697 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1698 i++, elf_ppnt++) {
1699 if (elf_ppnt->p_type != PT_LOAD)
1700 continue;
1701 addr = elf_ppnt->p_vaddr;
1702 if (addr < app_start) {
1703 app_start = addr;
1704 }
1705 addr += elf_ppnt->p_memsz;
1706 if (addr > app_end) {
1707 app_end = addr;
1708 }
1709 }
1710
1711 /* If we don't have any loadable segments then something
1712 is very wrong. */
1713 assert(app_start < app_end);
1714
1715 /* Round addresses to page boundaries. */
1716 app_start = app_start & qemu_host_page_mask;
1717 app_end = HOST_PAGE_ALIGN(app_end);
1718 if (app_start < mmap_min_addr) {
1719 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1720 } else {
1721 host_start = app_start;
1722 if (host_start != app_start) {
1723 fprintf(stderr, "qemu: Address overflow loading ELF binary\n");
1724 abort();
1725 }
1726 }
1727 host_size = app_end - app_start;
1728 while (1) {
1729 /* Do not use mmap_find_vma here because that is limited to the
1730 guest address space. We are going to make the
1731 guest address space fit whatever we're given. */
1732 real_start = (unsigned long)mmap((void *)host_start, host_size,
1733 PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1734 if (real_start == (unsigned long)-1) {
1735 fprintf(stderr, "qemu: Virtual memory exausted\n");
1736 abort();
1737 }
1738 if (real_start == host_start) {
1739 break;
1740 }
1741 /* That address didn't work. Unmap and try a different one.
1742 The address the host picked because is typically
1743 right at the top of the host address space and leaves the
1744 guest with no usable address space. Resort to a linear search.
1745 We already compensated for mmap_min_addr, so this should not
1746 happen often. Probably means we got unlucky and host address
1747 space randomization put a shared library somewhere
1748 inconvenient. */
1749 munmap((void *)real_start, host_size);
1750 host_start += qemu_host_page_size;
1751 if (host_start == app_start) {
1752 /* Theoretically possible if host doesn't have any
1753 suitably aligned areas. Normally the first mmap will
1754 fail. */
1755 fprintf(stderr, "qemu: Unable to find space for application\n");
1756 abort();
1757 }
1758 }
1759 qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
1760 " to 0x%lx\n", app_start, real_start);
1761 guest_base = real_start - app_start;
1762 }
1763 #endif /* CONFIG_USE_GUEST_BASE */
1764
1765 /* Do this so that we can load the interpreter, if need be. We will
1766 change some of these later */
1767 info->rss = 0;
1768 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1769 info->start_stack = bprm->p;
1770
1771 /* Now we do a little grungy work by mmaping the ELF image into
1772 * the correct location in memory. At this point, we assume that
1773 * the image should be loaded at fixed address, not at a variable
1774 * address.
1775 */
1776
1777 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1778 int elf_prot = 0;
1779 int elf_flags = 0;
1780 abi_ulong error;
1781
1782 if (elf_ppnt->p_type != PT_LOAD)
1783 continue;
1784
1785 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1786 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1787 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1788 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1789 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1790 elf_flags |= MAP_FIXED;
1791 } else if (elf_ex.e_type == ET_DYN) {
1792 /* Try and get dynamic programs out of the way of the default mmap
1793 base, as well as whatever program they might try to exec. This
1794 is because the brk will follow the loader, and is not movable. */
1795 /* NOTE: for qemu, we do a big mmap to get enough space
1796 without hardcoding any address */
1797 error = target_mmap(0, ET_DYN_MAP_SIZE,
1798 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1799 -1, 0);
1800 if (error == -1) {
1801 perror("mmap");
1802 exit(-1);
1803 }
1804 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1805 }
1806
1807 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1808 (elf_ppnt->p_filesz +
1809 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1810 elf_prot,
1811 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1812 bprm->fd,
1813 (elf_ppnt->p_offset -
1814 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1815 if (error == -1) {
1816 perror("mmap");
1817 exit(-1);
1818 }
1819
1820 #ifdef LOW_ELF_STACK
1821 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1822 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1823 #endif
1824
1825 if (!load_addr_set) {
1826 load_addr_set = 1;
1827 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1828 if (elf_ex.e_type == ET_DYN) {
1829 load_bias += error -
1830 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1831 load_addr += load_bias;
1832 reloc_func_desc = load_bias;
1833 }
1834 }
1835 k = elf_ppnt->p_vaddr;
1836 if (k < start_code)
1837 start_code = k;
1838 if (start_data < k)
1839 start_data = k;
1840 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1841 if (k > elf_bss)
1842 elf_bss = k;
1843 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1844 end_code = k;
1845 if (end_data < k)
1846 end_data = k;
1847 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1848 if (k > elf_brk) elf_brk = k;
1849 }
1850
1851 elf_entry += load_bias;
1852 elf_bss += load_bias;
1853 elf_brk += load_bias;
1854 start_code += load_bias;
1855 end_code += load_bias;
1856 start_data += load_bias;
1857 end_data += load_bias;
1858
1859 if (elf_interpreter) {
1860 if (interpreter_type & 1) {
1861 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1862 }
1863 else if (interpreter_type & 2) {
1864 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1865 &interp_load_addr);
1866 }
1867 reloc_func_desc = interp_load_addr;
1868
1869 close(interpreter_fd);
1870 free(elf_interpreter);
1871
1872 if (elf_entry == ~((abi_ulong)0UL)) {
1873 printf("Unable to load interpreter\n");
1874 free(elf_phdata);
1875 exit(-1);
1876 return 0;
1877 }
1878 }
1879
1880 free(elf_phdata);
1881
1882 if (qemu_log_enabled())
1883 load_symbols(&elf_ex, bprm->fd);
1884
1885 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1886 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1887
1888 #ifdef LOW_ELF_STACK
1889 info->start_stack = bprm->p = elf_stack - 4;
1890 #endif
1891 bprm->p = create_elf_tables(bprm->p,
1892 bprm->argc,
1893 bprm->envc,
1894 &elf_ex,
1895 load_addr, load_bias,
1896 interp_load_addr,
1897 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1898 info);
1899 info->load_addr = reloc_func_desc;
1900 info->start_brk = info->brk = elf_brk;
1901 info->end_code = end_code;
1902 info->start_code = start_code;
1903 info->start_data = start_data;
1904 info->end_data = end_data;
1905 info->start_stack = bprm->p;
1906
1907 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1908 sections */
1909 set_brk(elf_bss, elf_brk);
1910
1911 padzero(elf_bss, elf_brk);
1912
1913 #if 0
1914 printf("(start_brk) %x\n" , info->start_brk);
1915 printf("(end_code) %x\n" , info->end_code);
1916 printf("(start_code) %x\n" , info->start_code);
1917 printf("(end_data) %x\n" , info->end_data);
1918 printf("(start_stack) %x\n" , info->start_stack);
1919 printf("(brk) %x\n" , info->brk);
1920 #endif
1921
1922 if ( info->personality == PER_SVR4 )
1923 {
1924 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1925 and some applications "depend" upon this behavior.
1926 Since we do not have the power to recompile these, we
1927 emulate the SVr4 behavior. Sigh. */
1928 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1929 MAP_FIXED | MAP_PRIVATE, -1, 0);
1930 }
1931
1932 info->entry = elf_entry;
1933
1934 #ifdef USE_ELF_CORE_DUMP
1935 bprm->core_dump = &elf_core_dump;
1936 #endif
1937
1938 return 0;
1939 }
1940
1941 #ifdef USE_ELF_CORE_DUMP
1942
1943 /*
1944 * Definitions to generate Intel SVR4-like core files.
1945 * These mostly have the same names as the SVR4 types with "target_elf_"
1946 * tacked on the front to prevent clashes with linux definitions,
1947 * and the typedef forms have been avoided. This is mostly like
1948 * the SVR4 structure, but more Linuxy, with things that Linux does
1949 * not support and which gdb doesn't really use excluded.
1950 *
1951 * Fields we don't dump (their contents is zero) in linux-user qemu
1952 * are marked with XXX.
1953 *
1954 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1955 *
1956 * Porting ELF coredump for target is (quite) simple process. First you
1957 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1958 * the target resides):
1959 *
1960 * #define USE_ELF_CORE_DUMP
1961 *
1962 * Next you define type of register set used for dumping. ELF specification
1963 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1964 *
1965 * typedef <target_regtype> target_elf_greg_t;
1966 * #define ELF_NREG <number of registers>
1967 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1968 *
1969 * Last step is to implement target specific function that copies registers
1970 * from given cpu into just specified register set. Prototype is:
1971 *
1972 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1973 * const CPUState *env);
1974 *
1975 * Parameters:
1976 * regs - copy register values into here (allocated and zeroed by caller)
1977 * env - copy registers from here
1978 *
1979 * Example for ARM target is provided in this file.
1980 */
1981
1982 /* An ELF note in memory */
1983 struct memelfnote {
1984 const char *name;
1985 size_t namesz;
1986 size_t namesz_rounded;
1987 int type;
1988 size_t datasz;
1989 void *data;
1990 size_t notesz;
1991 };
1992
1993 struct target_elf_siginfo {
1994 int si_signo; /* signal number */
1995 int si_code; /* extra code */
1996 int si_errno; /* errno */
1997 };
1998
1999 struct target_elf_prstatus {
2000 struct target_elf_siginfo pr_info; /* Info associated with signal */
2001 short pr_cursig; /* Current signal */
2002 target_ulong pr_sigpend; /* XXX */
2003 target_ulong pr_sighold; /* XXX */
2004 target_pid_t pr_pid;
2005 target_pid_t pr_ppid;
2006 target_pid_t pr_pgrp;
2007 target_pid_t pr_sid;
2008 struct target_timeval pr_utime; /* XXX User time */
2009 struct target_timeval pr_stime; /* XXX System time */
2010 struct target_timeval pr_cutime; /* XXX Cumulative user time */
2011 struct target_timeval pr_cstime; /* XXX Cumulative system time */
2012 target_elf_gregset_t pr_reg; /* GP registers */
2013 int pr_fpvalid; /* XXX */
2014 };
2015
2016 #define ELF_PRARGSZ (80) /* Number of chars for args */
2017
2018 struct target_elf_prpsinfo {
2019 char pr_state; /* numeric process state */
2020 char pr_sname; /* char for pr_state */
2021 char pr_zomb; /* zombie */
2022 char pr_nice; /* nice val */
2023 target_ulong pr_flag; /* flags */
2024 target_uid_t pr_uid;
2025 target_gid_t pr_gid;
2026 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2027 /* Lots missing */
2028 char pr_fname[16]; /* filename of executable */
2029 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2030 };
2031
2032 /* Here is the structure in which status of each thread is captured. */
2033 struct elf_thread_status {
2034 QTAILQ_ENTRY(elf_thread_status) ets_link;
2035 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
2036 #if 0
2037 elf_fpregset_t fpu; /* NT_PRFPREG */
2038 struct task_struct *thread;
2039 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
2040 #endif
2041 struct memelfnote notes[1];
2042 int num_notes;
2043 };
2044
2045 struct elf_note_info {
2046 struct memelfnote *notes;
2047 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
2048 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
2049
2050 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2051 #if 0
2052 /*
2053 * Current version of ELF coredump doesn't support
2054 * dumping fp regs etc.
2055 */
2056 elf_fpregset_t *fpu;
2057 elf_fpxregset_t *xfpu;
2058 int thread_status_size;
2059 #endif
2060 int notes_size;
2061 int numnote;
2062 };
2063
2064 struct vm_area_struct {
2065 abi_ulong vma_start; /* start vaddr of memory region */
2066 abi_ulong vma_end; /* end vaddr of memory region */
2067 abi_ulong vma_flags; /* protection etc. flags for the region */
2068 QTAILQ_ENTRY(vm_area_struct) vma_link;
2069 };
2070
2071 struct mm_struct {
2072 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2073 int mm_count; /* number of mappings */
2074 };
2075
2076 static struct mm_struct *vma_init(void);
2077 static void vma_delete(struct mm_struct *);
2078 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2079 abi_ulong, abi_ulong);
2080 static int vma_get_mapping_count(const struct mm_struct *);
2081 static struct vm_area_struct *vma_first(const struct mm_struct *);
2082 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2083 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2084 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2085 unsigned long flags);
2086
2087 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2088 static void fill_note(struct memelfnote *, const char *, int,
2089 unsigned int, void *);
2090 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2091 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2092 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2093 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2094 static size_t note_size(const struct memelfnote *);
2095 static void free_note_info(struct elf_note_info *);
2096 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
2097 static void fill_thread_info(struct elf_note_info *, const CPUState *);
2098 static int core_dump_filename(const TaskState *, char *, size_t);
2099
2100 static int dump_write(int, const void *, size_t);
2101 static int write_note(struct memelfnote *, int);
2102 static int write_note_info(struct elf_note_info *, int);
2103
2104 #ifdef BSWAP_NEEDED
2105 static void bswap_prstatus(struct target_elf_prstatus *);
2106 static void bswap_psinfo(struct target_elf_prpsinfo *);
2107
2108 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2109 {
2110 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2111 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2112 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2113 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2114 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2115 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2116 prstatus->pr_pid = tswap32(prstatus->pr_pid);
2117 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2118 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2119 prstatus->pr_sid = tswap32(prstatus->pr_sid);
2120 /* cpu times are not filled, so we skip them */
2121 /* regs should be in correct format already */
2122 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2123 }
2124
2125 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2126 {
2127 psinfo->pr_flag = tswapl(psinfo->pr_flag);
2128 psinfo->pr_uid = tswap16(psinfo->pr_uid);
2129 psinfo->pr_gid = tswap16(psinfo->pr_gid);
2130 psinfo->pr_pid = tswap32(psinfo->pr_pid);
2131 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2132 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2133 psinfo->pr_sid = tswap32(psinfo->pr_sid);
2134 }
2135 #endif /* BSWAP_NEEDED */
2136
2137 /*
2138 * Minimal support for linux memory regions. These are needed
2139 * when we are finding out what memory exactly belongs to
2140 * emulated process. No locks needed here, as long as
2141 * thread that received the signal is stopped.
2142 */
2143
2144 static struct mm_struct *vma_init(void)
2145 {
2146 struct mm_struct *mm;
2147
2148 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2149 return (NULL);
2150
2151 mm->mm_count = 0;
2152 QTAILQ_INIT(&mm->mm_mmap);
2153
2154 return (mm);
2155 }
2156
2157 static void vma_delete(struct mm_struct *mm)
2158 {
2159 struct vm_area_struct *vma;
2160
2161 while ((vma = vma_first(mm)) != NULL) {
2162 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2163 qemu_free(vma);
2164 }
2165 qemu_free(mm);
2166 }
2167
2168 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2169 abi_ulong end, abi_ulong flags)
2170 {
2171 struct vm_area_struct *vma;
2172
2173 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2174 return (-1);
2175
2176 vma->vma_start = start;
2177 vma->vma_end = end;
2178 vma->vma_flags = flags;
2179
2180 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2181 mm->mm_count++;
2182
2183 return (0);
2184 }
2185
2186 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2187 {
2188 return (QTAILQ_FIRST(&mm->mm_mmap));
2189 }
2190
2191 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2192 {
2193 return (QTAILQ_NEXT(vma, vma_link));
2194 }
2195
2196 static int vma_get_mapping_count(const struct mm_struct *mm)
2197 {
2198 return (mm->mm_count);
2199 }
2200
2201 /*
2202 * Calculate file (dump) size of given memory region.
2203 */
2204 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2205 {
2206 /* if we cannot even read the first page, skip it */
2207 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2208 return (0);
2209
2210 /*
2211 * Usually we don't dump executable pages as they contain
2212 * non-writable code that debugger can read directly from
2213 * target library etc. However, thread stacks are marked
2214 * also executable so we read in first page of given region
2215 * and check whether it contains elf header. If there is
2216 * no elf header, we dump it.
2217 */
2218 if (vma->vma_flags & PROT_EXEC) {
2219 char page[TARGET_PAGE_SIZE];
2220
2221 copy_from_user(page, vma->vma_start, sizeof (page));
2222 if ((page[EI_MAG0] == ELFMAG0) &&
2223 (page[EI_MAG1] == ELFMAG1) &&
2224 (page[EI_MAG2] == ELFMAG2) &&
2225 (page[EI_MAG3] == ELFMAG3)) {
2226 /*
2227 * Mappings are possibly from ELF binary. Don't dump
2228 * them.
2229 */
2230 return (0);
2231 }
2232 }
2233
2234 return (vma->vma_end - vma->vma_start);
2235 }
2236
2237 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2238 unsigned long flags)
2239 {
2240 struct mm_struct *mm = (struct mm_struct *)priv;
2241
2242 vma_add_mapping(mm, start, end, flags);
2243 return (0);
2244 }
2245
2246 static void fill_note(struct memelfnote *note, const char *name, int type,
2247 unsigned int sz, void *data)
2248 {
2249 unsigned int namesz;
2250
2251 namesz = strlen(name) + 1;
2252 note->name = name;
2253 note->namesz = namesz;
2254 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2255 note->type = type;
2256 note->datasz = roundup(sz, sizeof (int32_t));;
2257 note->data = data;
2258
2259 /*
2260 * We calculate rounded up note size here as specified by
2261 * ELF document.
2262 */
2263 note->notesz = sizeof (struct elf_note) +
2264 note->namesz_rounded + note->datasz;
2265 }
2266
2267 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2268 uint32_t flags)
2269 {
2270 (void) memset(elf, 0, sizeof(*elf));
2271
2272 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2273 elf->e_ident[EI_CLASS] = ELF_CLASS;
2274 elf->e_ident[EI_DATA] = ELF_DATA;
2275 elf->e_ident[EI_VERSION] = EV_CURRENT;
2276 elf->e_ident[EI_OSABI] = ELF_OSABI;
2277
2278 elf->e_type = ET_CORE;
2279 elf->e_machine = machine;
2280 elf->e_version = EV_CURRENT;
2281 elf->e_phoff = sizeof(struct elfhdr);
2282 elf->e_flags = flags;
2283 elf->e_ehsize = sizeof(struct elfhdr);
2284 elf->e_phentsize = sizeof(struct elf_phdr);
2285 elf->e_phnum = segs;
2286
2287 #ifdef BSWAP_NEEDED
2288 bswap_ehdr(elf);
2289 #endif
2290 }
2291
2292 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2293 {
2294 phdr->p_type = PT_NOTE;
2295 phdr->p_offset = offset;
2296 phdr->p_vaddr = 0;
2297 phdr->p_paddr = 0;
2298 phdr->p_filesz = sz;
2299 phdr->p_memsz = 0;
2300 phdr->p_flags = 0;
2301 phdr->p_align = 0;
2302
2303 #ifdef BSWAP_NEEDED
2304 bswap_phdr(phdr);
2305 #endif
2306 }
2307
2308 static size_t note_size(const struct memelfnote *note)
2309 {
2310 return (note->notesz);
2311 }
2312
2313 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2314 const TaskState *ts, int signr)
2315 {
2316 (void) memset(prstatus, 0, sizeof (*prstatus));
2317 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2318 prstatus->pr_pid = ts->ts_tid;
2319 prstatus->pr_ppid = getppid();
2320 prstatus->pr_pgrp = getpgrp();
2321 prstatus->pr_sid = getsid(0);
2322
2323 #ifdef BSWAP_NEEDED
2324 bswap_prstatus(prstatus);
2325 #endif
2326 }
2327
2328 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2329 {
2330 char *filename, *base_filename;
2331 unsigned int i, len;
2332
2333 (void) memset(psinfo, 0, sizeof (*psinfo));
2334
2335 len = ts->info->arg_end - ts->info->arg_start;
2336 if (len >= ELF_PRARGSZ)
2337 len = ELF_PRARGSZ - 1;
2338 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2339 return -EFAULT;
2340 for (i = 0; i < len; i++)
2341 if (psinfo->pr_psargs[i] == 0)
2342 psinfo->pr_psargs[i] = ' ';
2343 psinfo->pr_psargs[len] = 0;
2344
2345 psinfo->pr_pid = getpid();
2346 psinfo->pr_ppid = getppid();
2347 psinfo->pr_pgrp = getpgrp();
2348 psinfo->pr_sid = getsid(0);
2349 psinfo->pr_uid = getuid();
2350 psinfo->pr_gid = getgid();
2351
2352 filename = strdup(ts->bprm->filename);
2353 base_filename = strdup(basename(filename));
2354 (void) strncpy(psinfo->pr_fname, base_filename,
2355 sizeof(psinfo->pr_fname));
2356 free(base_filename);
2357 free(filename);
2358
2359 #ifdef BSWAP_NEEDED
2360 bswap_psinfo(psinfo);
2361 #endif
2362 return (0);
2363 }
2364
2365 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2366 {
2367 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2368 elf_addr_t orig_auxv = auxv;
2369 abi_ulong val;
2370 void *ptr;
2371 int i, len;
2372
2373 /*
2374 * Auxiliary vector is stored in target process stack. It contains
2375 * {type, value} pairs that we need to dump into note. This is not
2376 * strictly necessary but we do it here for sake of completeness.
2377 */
2378
2379 /* find out lenght of the vector, AT_NULL is terminator */
2380 i = len = 0;
2381 do {
2382 get_user_ual(val, auxv);
2383 i += 2;
2384 auxv += 2 * sizeof (elf_addr_t);
2385 } while (val != AT_NULL);
2386 len = i * sizeof (elf_addr_t);
2387
2388 /* read in whole auxv vector and copy it to memelfnote */
2389 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2390 if (ptr != NULL) {
2391 fill_note(note, "CORE", NT_AUXV, len, ptr);
2392 unlock_user(ptr, auxv, len);
2393 }
2394 }
2395
2396 /*
2397 * Constructs name of coredump file. We have following convention
2398 * for the name:
2399 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2400 *
2401 * Returns 0 in case of success, -1 otherwise (errno is set).
2402 */
2403 static int core_dump_filename(const TaskState *ts, char *buf,
2404 size_t bufsize)
2405 {
2406 char timestamp[64];
2407 char *filename = NULL;
2408 char *base_filename = NULL;
2409 struct timeval tv;
2410 struct tm tm;
2411
2412 assert(bufsize >= PATH_MAX);
2413
2414 if (gettimeofday(&tv, NULL) < 0) {
2415 (void) fprintf(stderr, "unable to get current timestamp: %s",
2416 strerror(errno));
2417 return (-1);
2418 }
2419
2420 filename = strdup(ts->bprm->filename);
2421 base_filename = strdup(basename(filename));
2422 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2423 localtime_r(&tv.tv_sec, &tm));
2424 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2425 base_filename, timestamp, (int)getpid());
2426 free(base_filename);
2427 free(filename);
2428
2429 return (0);
2430 }
2431
2432 static int dump_write(int fd, const void *ptr, size_t size)
2433 {
2434 const char *bufp = (const char *)ptr;
2435 ssize_t bytes_written, bytes_left;
2436 struct rlimit dumpsize;
2437 off_t pos;
2438
2439 bytes_written = 0;
2440 getrlimit(RLIMIT_CORE, &dumpsize);
2441 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2442 if (errno == ESPIPE) { /* not a seekable stream */
2443 bytes_left = size;
2444 } else {
2445 return pos;
2446 }
2447 } else {
2448 if (dumpsize.rlim_cur <= pos) {
2449 return -1;
2450 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2451 bytes_left = size;
2452 } else {
2453 size_t limit_left=dumpsize.rlim_cur - pos;
2454 bytes_left = limit_left >= size ? size : limit_left ;
2455 }
2456 }
2457
2458 /*
2459 * In normal conditions, single write(2) should do but
2460 * in case of socket etc. this mechanism is more portable.
2461 */
2462 do {
2463 bytes_written = write(fd, bufp, bytes_left);
2464 if (bytes_written < 0) {
2465 if (errno == EINTR)
2466 continue;
2467 return (-1);
2468 } else if (bytes_written == 0) { /* eof */
2469 return (-1);
2470 }
2471 bufp += bytes_written;
2472 bytes_left -= bytes_written;
2473 } while (bytes_left > 0);
2474
2475 return (0);
2476 }
2477
2478 static int write_note(struct memelfnote *men, int fd)
2479 {
2480 struct elf_note en;
2481
2482 en.n_namesz = men->namesz;
2483 en.n_type = men->type;
2484 en.n_descsz = men->datasz;
2485
2486 #ifdef BSWAP_NEEDED
2487 bswap_note(&en);
2488 #endif
2489
2490 if (dump_write(fd, &en, sizeof(en)) != 0)
2491 return (-1);
2492 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2493 return (-1);
2494 if (dump_write(fd, men->data, men->datasz) != 0)
2495 return (-1);
2496
2497 return (0);
2498 }
2499
2500 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2501 {
2502 TaskState *ts = (TaskState *)env->opaque;
2503 struct elf_thread_status *ets;
2504
2505 ets = qemu_mallocz(sizeof (*ets));
2506 ets->num_notes = 1; /* only prstatus is dumped */
2507 fill_prstatus(&ets->prstatus, ts, 0);
2508 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2509 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2510 &ets->prstatus);
2511
2512 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2513
2514 info->notes_size += note_size(&ets->notes[0]);
2515 }
2516
2517 static int fill_note_info(struct elf_note_info *info,
2518 long signr, const CPUState *env)
2519 {
2520 #define NUMNOTES 3
2521 CPUState *cpu = NULL;
2522 TaskState *ts = (TaskState *)env->opaque;
2523 int i;
2524
2525 (void) memset(info, 0, sizeof (*info));
2526
2527 QTAILQ_INIT(&info->thread_list);
2528
2529 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2530 if (info->notes == NULL)
2531 return (-ENOMEM);
2532 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2533 if (info->prstatus == NULL)
2534 return (-ENOMEM);
2535 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2536 if (info->prstatus == NULL)
2537 return (-ENOMEM);
2538
2539 /*
2540 * First fill in status (and registers) of current thread
2541 * including process info & aux vector.
2542 */
2543 fill_prstatus(info->prstatus, ts, signr);
2544 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2545 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2546 sizeof (*info->prstatus), info->prstatus);
2547 fill_psinfo(info->psinfo, ts);
2548 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2549 sizeof (*info->psinfo), info->psinfo);
2550 fill_auxv_note(&info->notes[2], ts);
2551 info->numnote = 3;
2552
2553 info->notes_size = 0;
2554 for (i = 0; i < info->numnote; i++)
2555 info->notes_size += note_size(&info->notes[i]);
2556
2557 /* read and fill status of all threads */
2558 cpu_list_lock();
2559 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2560 if (cpu == thread_env)
2561 continue;
2562 fill_thread_info(info, cpu);
2563 }
2564 cpu_list_unlock();
2565
2566 return (0);
2567 }
2568
2569 static void free_note_info(struct elf_note_info *info)
2570 {
2571 struct elf_thread_status *ets;
2572
2573 while (!QTAILQ_EMPTY(&info->thread_list)) {
2574 ets = QTAILQ_FIRST(&info->thread_list);
2575 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2576 qemu_free(ets);
2577 }
2578
2579 qemu_free(info->prstatus);
2580 qemu_free(info->psinfo);
2581 qemu_free(info->notes);
2582 }
2583
2584 static int write_note_info(struct elf_note_info *info, int fd)
2585 {
2586 struct elf_thread_status *ets;
2587 int i, error = 0;
2588
2589 /* write prstatus, psinfo and auxv for current thread */
2590 for (i = 0; i < info->numnote; i++)
2591 if ((error = write_note(&info->notes[i], fd)) != 0)
2592 return (error);
2593
2594 /* write prstatus for each thread */
2595 for (ets = info->thread_list.tqh_first; ets != NULL;
2596 ets = ets->ets_link.tqe_next) {
2597 if ((error = write_note(&ets->notes[0], fd)) != 0)
2598 return (error);
2599 }
2600
2601 return (0);
2602 }
2603
2604 /*
2605 * Write out ELF coredump.
2606 *
2607 * See documentation of ELF object file format in:
2608 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2609 *
2610 * Coredump format in linux is following:
2611 *
2612 * 0 +----------------------+ \
2613 * | ELF header | ET_CORE |
2614 * +----------------------+ |
2615 * | ELF program headers | |--- headers
2616 * | - NOTE section | |
2617 * | - PT_LOAD sections | |
2618 * +----------------------+ /
2619 * | NOTEs: |
2620 * | - NT_PRSTATUS |
2621 * | - NT_PRSINFO |
2622 * | - NT_AUXV |
2623 * +----------------------+ <-- aligned to target page
2624 * | Process memory dump |
2625 * : :
2626 * . .
2627 * : :
2628 * | |
2629 * +----------------------+
2630 *
2631 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2632 * NT_PRSINFO -> struct elf_prpsinfo
2633 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2634 *
2635 * Format follows System V format as close as possible. Current
2636 * version limitations are as follows:
2637 * - no floating point registers are dumped
2638 *
2639 * Function returns 0 in case of success, negative errno otherwise.
2640 *
2641 * TODO: make this work also during runtime: it should be
2642 * possible to force coredump from running process and then
2643 * continue processing. For example qemu could set up SIGUSR2
2644 * handler (provided that target process haven't registered
2645 * handler for that) that does the dump when signal is received.
2646 */
2647 static int elf_core_dump(int signr, const CPUState *env)
2648 {
2649 const TaskState *ts = (const TaskState *)env->opaque;
2650 struct vm_area_struct *vma = NULL;
2651 char corefile[PATH_MAX];
2652 struct elf_note_info info;
2653 struct elfhdr elf;
2654 struct elf_phdr phdr;
2655 struct rlimit dumpsize;
2656 struct mm_struct *mm = NULL;
2657 off_t offset = 0, data_offset = 0;
2658 int segs = 0;
2659 int fd = -1;
2660
2661 errno = 0;
2662 getrlimit(RLIMIT_CORE, &dumpsize);
2663 if (dumpsize.rlim_cur == 0)
2664 return 0;
2665
2666 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2667 return (-errno);
2668
2669 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2670 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2671 return (-errno);
2672
2673 /*
2674 * Walk through target process memory mappings and
2675 * set up structure containing this information. After
2676 * this point vma_xxx functions can be used.
2677 */
2678 if ((mm = vma_init()) == NULL)
2679 goto out;
2680
2681 walk_memory_regions(mm, vma_walker);
2682 segs = vma_get_mapping_count(mm);
2683
2684 /*
2685 * Construct valid coredump ELF header. We also
2686 * add one more segment for notes.
2687 */
2688 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2689 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2690 goto out;
2691
2692 /* fill in in-memory version of notes */
2693 if (fill_note_info(&info, signr, env) < 0)
2694 goto out;
2695
2696 offset += sizeof (elf); /* elf header */
2697 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2698
2699 /* write out notes program header */
2700 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2701
2702 offset += info.notes_size;
2703 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2704 goto out;
2705
2706 /*
2707 * ELF specification wants data to start at page boundary so
2708 * we align it here.
2709 */
2710 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2711
2712 /*
2713 * Write program headers for memory regions mapped in
2714 * the target process.
2715 */
2716 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2717 (void) memset(&phdr, 0, sizeof (phdr));
2718
2719 phdr.p_type = PT_LOAD;
2720 phdr.p_offset = offset;
2721 phdr.p_vaddr = vma->vma_start;
2722 phdr.p_paddr = 0;
2723 phdr.p_filesz = vma_dump_size(vma);
2724 offset += phdr.p_filesz;
2725 phdr.p_memsz = vma->vma_end - vma->vma_start;
2726 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2727 if (vma->vma_flags & PROT_WRITE)
2728 phdr.p_flags |= PF_W;
2729 if (vma->vma_flags & PROT_EXEC)
2730 phdr.p_flags |= PF_X;
2731 phdr.p_align = ELF_EXEC_PAGESIZE;
2732
2733 dump_write(fd, &phdr, sizeof (phdr));
2734 }
2735
2736 /*
2737 * Next we write notes just after program headers. No
2738 * alignment needed here.
2739 */
2740 if (write_note_info(&info, fd) < 0)
2741 goto out;
2742
2743 /* align data to page boundary */
2744 data_offset = lseek(fd, 0, SEEK_CUR);
2745 data_offset = TARGET_PAGE_ALIGN(data_offset);
2746 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2747 goto out;
2748
2749 /*
2750 * Finally we can dump process memory into corefile as well.
2751 */
2752 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2753 abi_ulong addr;
2754 abi_ulong end;
2755
2756 end = vma->vma_start + vma_dump_size(vma);
2757
2758 for (addr = vma->vma_start; addr < end;
2759 addr += TARGET_PAGE_SIZE) {
2760 char page[TARGET_PAGE_SIZE];
2761 int error;
2762
2763 /*
2764 * Read in page from target process memory and
2765 * write it to coredump file.
2766 */
2767 error = copy_from_user(page, addr, sizeof (page));
2768 if (error != 0) {
2769 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2770 addr);
2771 errno = -error;
2772 goto out;
2773 }
2774 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2775 goto out;
2776 }
2777 }
2778
2779 out:
2780 free_note_info(&info);
2781 if (mm != NULL)
2782 vma_delete(mm);
2783 (void) close(fd);
2784
2785 if (errno != 0)
2786 return (-errno);
2787 return (0);
2788 }
2789
2790 #endif /* USE_ELF_CORE_DUMP */
2791
2792 static int load_aout_interp(void * exptr, int interp_fd)
2793 {
2794 printf("a.out interpreter not yet supported\n");
2795 return(0);
2796 }
2797
2798 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2799 {
2800 init_thread(regs, infop);
2801 }