]> git.proxmox.com Git - qemu.git/blob - linux-user/elfload.c
Add more DT_* and AT_* constants to qemu's copy of elf.h.
[qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include <sys/time.h>
3 #include <sys/param.h>
4
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <fcntl.h>
8 #include <errno.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/resource.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <time.h>
15
16 #include "qemu.h"
17 #include "disas.h"
18
19 #ifdef _ARCH_PPC64
20 #undef ARCH_DLINFO
21 #undef ELF_PLATFORM
22 #undef ELF_HWCAP
23 #undef ELF_CLASS
24 #undef ELF_DATA
25 #undef ELF_ARCH
26 #endif
27
28 #define ELF_OSABI ELFOSABI_SYSV
29
30 /* from personality.h */
31
32 /*
33 * Flags for bug emulation.
34 *
35 * These occupy the top three bytes.
36 */
37 enum {
38 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
39 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
40 * (signal handling)
41 */
42 MMAP_PAGE_ZERO = 0x0100000,
43 ADDR_COMPAT_LAYOUT = 0x0200000,
44 READ_IMPLIES_EXEC = 0x0400000,
45 ADDR_LIMIT_32BIT = 0x0800000,
46 SHORT_INODE = 0x1000000,
47 WHOLE_SECONDS = 0x2000000,
48 STICKY_TIMEOUTS = 0x4000000,
49 ADDR_LIMIT_3GB = 0x8000000,
50 };
51
52 /*
53 * Personality types.
54 *
55 * These go in the low byte. Avoid using the top bit, it will
56 * conflict with error returns.
57 */
58 enum {
59 PER_LINUX = 0x0000,
60 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
61 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
62 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
63 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
65 WHOLE_SECONDS | SHORT_INODE,
66 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
67 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
69 PER_BSD = 0x0006,
70 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
71 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
72 PER_LINUX32 = 0x0008,
73 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
74 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
75 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
76 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
77 PER_RISCOS = 0x000c,
78 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
79 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
80 PER_OSF4 = 0x000f, /* OSF/1 v4 */
81 PER_HPUX = 0x0010,
82 PER_MASK = 0x00ff,
83 };
84
85 /*
86 * Return the base personality without flags.
87 */
88 #define personality(pers) (pers & PER_MASK)
89
90 /* this flag is uneffective under linux too, should be deleted */
91 #ifndef MAP_DENYWRITE
92 #define MAP_DENYWRITE 0
93 #endif
94
95 /* should probably go in elf.h */
96 #ifndef ELIBBAD
97 #define ELIBBAD 80
98 #endif
99
100 typedef target_ulong target_elf_greg_t;
101 #ifdef USE_UID16
102 typedef uint16_t target_uid_t;
103 typedef uint16_t target_gid_t;
104 #else
105 typedef uint32_t target_uid_t;
106 typedef uint32_t target_gid_t;
107 #endif
108 typedef int32_t target_pid_t;
109
110 #ifdef TARGET_I386
111
112 #define ELF_PLATFORM get_elf_platform()
113
114 static const char *get_elf_platform(void)
115 {
116 static char elf_platform[] = "i386";
117 int family = (thread_env->cpuid_version >> 8) & 0xff;
118 if (family > 6)
119 family = 6;
120 if (family >= 3)
121 elf_platform[1] = '0' + family;
122 return elf_platform;
123 }
124
125 #define ELF_HWCAP get_elf_hwcap()
126
127 static uint32_t get_elf_hwcap(void)
128 {
129 return thread_env->cpuid_features;
130 }
131
132 #ifdef TARGET_X86_64
133 #define ELF_START_MMAP 0x2aaaaab000ULL
134 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
135
136 #define ELF_CLASS ELFCLASS64
137 #define ELF_DATA ELFDATA2LSB
138 #define ELF_ARCH EM_X86_64
139
140 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
141 {
142 regs->rax = 0;
143 regs->rsp = infop->start_stack;
144 regs->rip = infop->entry;
145 }
146
147 #define ELF_NREG 27
148 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
149
150 /*
151 * Note that ELF_NREG should be 29 as there should be place for
152 * TRAPNO and ERR "registers" as well but linux doesn't dump
153 * those.
154 *
155 * See linux kernel: arch/x86/include/asm/elf.h
156 */
157 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
158 {
159 (*regs)[0] = env->regs[15];
160 (*regs)[1] = env->regs[14];
161 (*regs)[2] = env->regs[13];
162 (*regs)[3] = env->regs[12];
163 (*regs)[4] = env->regs[R_EBP];
164 (*regs)[5] = env->regs[R_EBX];
165 (*regs)[6] = env->regs[11];
166 (*regs)[7] = env->regs[10];
167 (*regs)[8] = env->regs[9];
168 (*regs)[9] = env->regs[8];
169 (*regs)[10] = env->regs[R_EAX];
170 (*regs)[11] = env->regs[R_ECX];
171 (*regs)[12] = env->regs[R_EDX];
172 (*regs)[13] = env->regs[R_ESI];
173 (*regs)[14] = env->regs[R_EDI];
174 (*regs)[15] = env->regs[R_EAX]; /* XXX */
175 (*regs)[16] = env->eip;
176 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
177 (*regs)[18] = env->eflags;
178 (*regs)[19] = env->regs[R_ESP];
179 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
180 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
181 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
182 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
183 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
184 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
185 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
186 }
187
188 #else
189
190 #define ELF_START_MMAP 0x80000000
191
192 /*
193 * This is used to ensure we don't load something for the wrong architecture.
194 */
195 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
196
197 /*
198 * These are used to set parameters in the core dumps.
199 */
200 #define ELF_CLASS ELFCLASS32
201 #define ELF_DATA ELFDATA2LSB
202 #define ELF_ARCH EM_386
203
204 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
205 {
206 regs->esp = infop->start_stack;
207 regs->eip = infop->entry;
208
209 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
210 starts %edx contains a pointer to a function which might be
211 registered using `atexit'. This provides a mean for the
212 dynamic linker to call DT_FINI functions for shared libraries
213 that have been loaded before the code runs.
214
215 A value of 0 tells we have no such handler. */
216 regs->edx = 0;
217 }
218
219 #define ELF_NREG 17
220 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
221
222 /*
223 * Note that ELF_NREG should be 19 as there should be place for
224 * TRAPNO and ERR "registers" as well but linux doesn't dump
225 * those.
226 *
227 * See linux kernel: arch/x86/include/asm/elf.h
228 */
229 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
230 {
231 (*regs)[0] = env->regs[R_EBX];
232 (*regs)[1] = env->regs[R_ECX];
233 (*regs)[2] = env->regs[R_EDX];
234 (*regs)[3] = env->regs[R_ESI];
235 (*regs)[4] = env->regs[R_EDI];
236 (*regs)[5] = env->regs[R_EBP];
237 (*regs)[6] = env->regs[R_EAX];
238 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
239 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
240 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
241 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
242 (*regs)[11] = env->regs[R_EAX]; /* XXX */
243 (*regs)[12] = env->eip;
244 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
245 (*regs)[14] = env->eflags;
246 (*regs)[15] = env->regs[R_ESP];
247 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
248 }
249 #endif
250
251 #define USE_ELF_CORE_DUMP
252 #define ELF_EXEC_PAGESIZE 4096
253
254 #endif
255
256 #ifdef TARGET_ARM
257
258 #define ELF_START_MMAP 0x80000000
259
260 #define elf_check_arch(x) ( (x) == EM_ARM )
261
262 #define ELF_CLASS ELFCLASS32
263 #ifdef TARGET_WORDS_BIGENDIAN
264 #define ELF_DATA ELFDATA2MSB
265 #else
266 #define ELF_DATA ELFDATA2LSB
267 #endif
268 #define ELF_ARCH EM_ARM
269
270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
271 {
272 abi_long stack = infop->start_stack;
273 memset(regs, 0, sizeof(*regs));
274 regs->ARM_cpsr = 0x10;
275 if (infop->entry & 1)
276 regs->ARM_cpsr |= CPSR_T;
277 regs->ARM_pc = infop->entry & 0xfffffffe;
278 regs->ARM_sp = infop->start_stack;
279 /* FIXME - what to for failure of get_user()? */
280 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
281 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
282 /* XXX: it seems that r0 is zeroed after ! */
283 regs->ARM_r0 = 0;
284 /* For uClinux PIC binaries. */
285 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
286 regs->ARM_r10 = infop->start_data;
287 }
288
289 #define ELF_NREG 18
290 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
291
292 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
293 {
294 (*regs)[0] = tswapl(env->regs[0]);
295 (*regs)[1] = tswapl(env->regs[1]);
296 (*regs)[2] = tswapl(env->regs[2]);
297 (*regs)[3] = tswapl(env->regs[3]);
298 (*regs)[4] = tswapl(env->regs[4]);
299 (*regs)[5] = tswapl(env->regs[5]);
300 (*regs)[6] = tswapl(env->regs[6]);
301 (*regs)[7] = tswapl(env->regs[7]);
302 (*regs)[8] = tswapl(env->regs[8]);
303 (*regs)[9] = tswapl(env->regs[9]);
304 (*regs)[10] = tswapl(env->regs[10]);
305 (*regs)[11] = tswapl(env->regs[11]);
306 (*regs)[12] = tswapl(env->regs[12]);
307 (*regs)[13] = tswapl(env->regs[13]);
308 (*regs)[14] = tswapl(env->regs[14]);
309 (*regs)[15] = tswapl(env->regs[15]);
310
311 (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
312 (*regs)[17] = tswapl(env->regs[0]); /* XXX */
313 }
314
315 #define USE_ELF_CORE_DUMP
316 #define ELF_EXEC_PAGESIZE 4096
317
318 enum
319 {
320 ARM_HWCAP_ARM_SWP = 1 << 0,
321 ARM_HWCAP_ARM_HALF = 1 << 1,
322 ARM_HWCAP_ARM_THUMB = 1 << 2,
323 ARM_HWCAP_ARM_26BIT = 1 << 3,
324 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
325 ARM_HWCAP_ARM_FPA = 1 << 5,
326 ARM_HWCAP_ARM_VFP = 1 << 6,
327 ARM_HWCAP_ARM_EDSP = 1 << 7,
328 ARM_HWCAP_ARM_JAVA = 1 << 8,
329 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
330 ARM_HWCAP_ARM_THUMBEE = 1 << 10,
331 ARM_HWCAP_ARM_NEON = 1 << 11,
332 ARM_HWCAP_ARM_VFPv3 = 1 << 12,
333 ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
334 };
335
336 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
337 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
338 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP \
339 | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
340
341 #endif
342
343 #ifdef TARGET_SPARC
344 #ifdef TARGET_SPARC64
345
346 #define ELF_START_MMAP 0x80000000
347
348 #ifndef TARGET_ABI32
349 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
350 #else
351 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
352 #endif
353
354 #define ELF_CLASS ELFCLASS64
355 #define ELF_DATA ELFDATA2MSB
356 #define ELF_ARCH EM_SPARCV9
357
358 #define STACK_BIAS 2047
359
360 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
361 {
362 #ifndef TARGET_ABI32
363 regs->tstate = 0;
364 #endif
365 regs->pc = infop->entry;
366 regs->npc = regs->pc + 4;
367 regs->y = 0;
368 #ifdef TARGET_ABI32
369 regs->u_regs[14] = infop->start_stack - 16 * 4;
370 #else
371 if (personality(infop->personality) == PER_LINUX32)
372 regs->u_regs[14] = infop->start_stack - 16 * 4;
373 else
374 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
375 #endif
376 }
377
378 #else
379 #define ELF_START_MMAP 0x80000000
380
381 #define elf_check_arch(x) ( (x) == EM_SPARC )
382
383 #define ELF_CLASS ELFCLASS32
384 #define ELF_DATA ELFDATA2MSB
385 #define ELF_ARCH EM_SPARC
386
387 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
388 {
389 regs->psr = 0;
390 regs->pc = infop->entry;
391 regs->npc = regs->pc + 4;
392 regs->y = 0;
393 regs->u_regs[14] = infop->start_stack - 16 * 4;
394 }
395
396 #endif
397 #endif
398
399 #ifdef TARGET_PPC
400
401 #define ELF_START_MMAP 0x80000000
402
403 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
404
405 #define elf_check_arch(x) ( (x) == EM_PPC64 )
406
407 #define ELF_CLASS ELFCLASS64
408
409 #else
410
411 #define elf_check_arch(x) ( (x) == EM_PPC )
412
413 #define ELF_CLASS ELFCLASS32
414
415 #endif
416
417 #ifdef TARGET_WORDS_BIGENDIAN
418 #define ELF_DATA ELFDATA2MSB
419 #else
420 #define ELF_DATA ELFDATA2LSB
421 #endif
422 #define ELF_ARCH EM_PPC
423
424 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
425 See arch/powerpc/include/asm/cputable.h. */
426 enum {
427 QEMU_PPC_FEATURE_32 = 0x80000000,
428 QEMU_PPC_FEATURE_64 = 0x40000000,
429 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
430 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
431 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
432 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
433 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
434 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
435 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
436 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
437 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
438 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
439 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
440 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
441 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
442 QEMU_PPC_FEATURE_CELL = 0x00010000,
443 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
444 QEMU_PPC_FEATURE_SMT = 0x00004000,
445 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
446 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
447 QEMU_PPC_FEATURE_PA6T = 0x00000800,
448 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
449 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
450 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
451 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
452 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
453
454 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
455 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
456 };
457
458 #define ELF_HWCAP get_elf_hwcap()
459
460 static uint32_t get_elf_hwcap(void)
461 {
462 CPUState *e = thread_env;
463 uint32_t features = 0;
464
465 /* We don't have to be terribly complete here; the high points are
466 Altivec/FP/SPE support. Anything else is just a bonus. */
467 #define GET_FEATURE(flag, feature) \
468 do {if (e->insns_flags & flag) features |= feature; } while(0)
469 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
470 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
471 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
472 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
473 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
474 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
475 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
476 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
477 #undef GET_FEATURE
478
479 return features;
480 }
481
482 /*
483 * The requirements here are:
484 * - keep the final alignment of sp (sp & 0xf)
485 * - make sure the 32-bit value at the first 16 byte aligned position of
486 * AUXV is greater than 16 for glibc compatibility.
487 * AT_IGNOREPPC is used for that.
488 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
489 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
490 */
491 #define DLINFO_ARCH_ITEMS 5
492 #define ARCH_DLINFO \
493 do { \
494 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
495 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
496 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
497 /* \
498 * Now handle glibc compatibility. \
499 */ \
500 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
501 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
502 } while (0)
503
504 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
505 {
506 _regs->gpr[1] = infop->start_stack;
507 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
508 _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
509 infop->entry = ldq_raw(infop->entry) + infop->load_addr;
510 #endif
511 _regs->nip = infop->entry;
512 }
513
514 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
515 #define ELF_NREG 48
516 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
517
518 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
519 {
520 int i;
521 target_ulong ccr = 0;
522
523 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
524 (*regs)[i] = tswapl(env->gpr[i]);
525 }
526
527 (*regs)[32] = tswapl(env->nip);
528 (*regs)[33] = tswapl(env->msr);
529 (*regs)[35] = tswapl(env->ctr);
530 (*regs)[36] = tswapl(env->lr);
531 (*regs)[37] = tswapl(env->xer);
532
533 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
534 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
535 }
536 (*regs)[38] = tswapl(ccr);
537 }
538
539 #define USE_ELF_CORE_DUMP
540 #define ELF_EXEC_PAGESIZE 4096
541
542 #endif
543
544 #ifdef TARGET_MIPS
545
546 #define ELF_START_MMAP 0x80000000
547
548 #define elf_check_arch(x) ( (x) == EM_MIPS )
549
550 #ifdef TARGET_MIPS64
551 #define ELF_CLASS ELFCLASS64
552 #else
553 #define ELF_CLASS ELFCLASS32
554 #endif
555 #ifdef TARGET_WORDS_BIGENDIAN
556 #define ELF_DATA ELFDATA2MSB
557 #else
558 #define ELF_DATA ELFDATA2LSB
559 #endif
560 #define ELF_ARCH EM_MIPS
561
562 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
563 {
564 regs->cp0_status = 2 << CP0St_KSU;
565 regs->cp0_epc = infop->entry;
566 regs->regs[29] = infop->start_stack;
567 }
568
569 /* See linux kernel: arch/mips/include/asm/elf.h. */
570 #define ELF_NREG 45
571 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
572
573 /* See linux kernel: arch/mips/include/asm/reg.h. */
574 enum {
575 #ifdef TARGET_MIPS64
576 TARGET_EF_R0 = 0,
577 #else
578 TARGET_EF_R0 = 6,
579 #endif
580 TARGET_EF_R26 = TARGET_EF_R0 + 26,
581 TARGET_EF_R27 = TARGET_EF_R0 + 27,
582 TARGET_EF_LO = TARGET_EF_R0 + 32,
583 TARGET_EF_HI = TARGET_EF_R0 + 33,
584 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
585 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
586 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
587 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
588 };
589
590 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
591 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
592 {
593 int i;
594
595 for (i = 0; i < TARGET_EF_R0; i++) {
596 (*regs)[i] = 0;
597 }
598 (*regs)[TARGET_EF_R0] = 0;
599
600 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
601 (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
602 }
603
604 (*regs)[TARGET_EF_R26] = 0;
605 (*regs)[TARGET_EF_R27] = 0;
606 (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
607 (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
608 (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
609 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
610 (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
611 (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
612 }
613
614 #define USE_ELF_CORE_DUMP
615 #define ELF_EXEC_PAGESIZE 4096
616
617 #endif /* TARGET_MIPS */
618
619 #ifdef TARGET_MICROBLAZE
620
621 #define ELF_START_MMAP 0x80000000
622
623 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
624
625 #define ELF_CLASS ELFCLASS32
626 #define ELF_DATA ELFDATA2MSB
627 #define ELF_ARCH EM_MICROBLAZE
628
629 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
630 {
631 regs->pc = infop->entry;
632 regs->r1 = infop->start_stack;
633
634 }
635
636 #define ELF_EXEC_PAGESIZE 4096
637
638 #define USE_ELF_CORE_DUMP
639 #define ELF_NREG 38
640 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
641
642 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
643 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
644 {
645 int i, pos = 0;
646
647 for (i = 0; i < 32; i++) {
648 (*regs)[pos++] = tswapl(env->regs[i]);
649 }
650
651 for (i = 0; i < 6; i++) {
652 (*regs)[pos++] = tswapl(env->sregs[i]);
653 }
654 }
655
656 #endif /* TARGET_MICROBLAZE */
657
658 #ifdef TARGET_SH4
659
660 #define ELF_START_MMAP 0x80000000
661
662 #define elf_check_arch(x) ( (x) == EM_SH )
663
664 #define ELF_CLASS ELFCLASS32
665 #define ELF_DATA ELFDATA2LSB
666 #define ELF_ARCH EM_SH
667
668 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
669 {
670 /* Check other registers XXXXX */
671 regs->pc = infop->entry;
672 regs->regs[15] = infop->start_stack;
673 }
674
675 /* See linux kernel: arch/sh/include/asm/elf.h. */
676 #define ELF_NREG 23
677 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
678
679 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
680 enum {
681 TARGET_REG_PC = 16,
682 TARGET_REG_PR = 17,
683 TARGET_REG_SR = 18,
684 TARGET_REG_GBR = 19,
685 TARGET_REG_MACH = 20,
686 TARGET_REG_MACL = 21,
687 TARGET_REG_SYSCALL = 22
688 };
689
690 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
691 {
692 int i;
693
694 for (i = 0; i < 16; i++) {
695 (*regs[i]) = tswapl(env->gregs[i]);
696 }
697
698 (*regs)[TARGET_REG_PC] = tswapl(env->pc);
699 (*regs)[TARGET_REG_PR] = tswapl(env->pr);
700 (*regs)[TARGET_REG_SR] = tswapl(env->sr);
701 (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
702 (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
703 (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
704 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
705 }
706
707 #define USE_ELF_CORE_DUMP
708 #define ELF_EXEC_PAGESIZE 4096
709
710 #endif
711
712 #ifdef TARGET_CRIS
713
714 #define ELF_START_MMAP 0x80000000
715
716 #define elf_check_arch(x) ( (x) == EM_CRIS )
717
718 #define ELF_CLASS ELFCLASS32
719 #define ELF_DATA ELFDATA2LSB
720 #define ELF_ARCH EM_CRIS
721
722 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
723 {
724 regs->erp = infop->entry;
725 }
726
727 #define ELF_EXEC_PAGESIZE 8192
728
729 #endif
730
731 #ifdef TARGET_M68K
732
733 #define ELF_START_MMAP 0x80000000
734
735 #define elf_check_arch(x) ( (x) == EM_68K )
736
737 #define ELF_CLASS ELFCLASS32
738 #define ELF_DATA ELFDATA2MSB
739 #define ELF_ARCH EM_68K
740
741 /* ??? Does this need to do anything?
742 #define ELF_PLAT_INIT(_r) */
743
744 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
745 {
746 regs->usp = infop->start_stack;
747 regs->sr = 0;
748 regs->pc = infop->entry;
749 }
750
751 /* See linux kernel: arch/m68k/include/asm/elf.h. */
752 #define ELF_NREG 20
753 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
754
755 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
756 {
757 (*regs)[0] = tswapl(env->dregs[1]);
758 (*regs)[1] = tswapl(env->dregs[2]);
759 (*regs)[2] = tswapl(env->dregs[3]);
760 (*regs)[3] = tswapl(env->dregs[4]);
761 (*regs)[4] = tswapl(env->dregs[5]);
762 (*regs)[5] = tswapl(env->dregs[6]);
763 (*regs)[6] = tswapl(env->dregs[7]);
764 (*regs)[7] = tswapl(env->aregs[0]);
765 (*regs)[8] = tswapl(env->aregs[1]);
766 (*regs)[9] = tswapl(env->aregs[2]);
767 (*regs)[10] = tswapl(env->aregs[3]);
768 (*regs)[11] = tswapl(env->aregs[4]);
769 (*regs)[12] = tswapl(env->aregs[5]);
770 (*regs)[13] = tswapl(env->aregs[6]);
771 (*regs)[14] = tswapl(env->dregs[0]);
772 (*regs)[15] = tswapl(env->aregs[7]);
773 (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
774 (*regs)[17] = tswapl(env->sr);
775 (*regs)[18] = tswapl(env->pc);
776 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
777 }
778
779 #define USE_ELF_CORE_DUMP
780 #define ELF_EXEC_PAGESIZE 8192
781
782 #endif
783
784 #ifdef TARGET_ALPHA
785
786 #define ELF_START_MMAP (0x30000000000ULL)
787
788 #define elf_check_arch(x) ( (x) == ELF_ARCH )
789
790 #define ELF_CLASS ELFCLASS64
791 #define ELF_DATA ELFDATA2MSB
792 #define ELF_ARCH EM_ALPHA
793
794 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
795 {
796 regs->pc = infop->entry;
797 regs->ps = 8;
798 regs->usp = infop->start_stack;
799 }
800
801 #define ELF_EXEC_PAGESIZE 8192
802
803 #endif /* TARGET_ALPHA */
804
805 #ifndef ELF_PLATFORM
806 #define ELF_PLATFORM (NULL)
807 #endif
808
809 #ifndef ELF_HWCAP
810 #define ELF_HWCAP 0
811 #endif
812
813 #ifdef TARGET_ABI32
814 #undef ELF_CLASS
815 #define ELF_CLASS ELFCLASS32
816 #undef bswaptls
817 #define bswaptls(ptr) bswap32s(ptr)
818 #endif
819
820 #include "elf.h"
821
822 struct exec
823 {
824 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
825 unsigned int a_text; /* length of text, in bytes */
826 unsigned int a_data; /* length of data, in bytes */
827 unsigned int a_bss; /* length of uninitialized data area, in bytes */
828 unsigned int a_syms; /* length of symbol table data in file, in bytes */
829 unsigned int a_entry; /* start address */
830 unsigned int a_trsize; /* length of relocation info for text, in bytes */
831 unsigned int a_drsize; /* length of relocation info for data, in bytes */
832 };
833
834
835 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
836 #define OMAGIC 0407
837 #define NMAGIC 0410
838 #define ZMAGIC 0413
839 #define QMAGIC 0314
840
841 /* max code+data+bss space allocated to elf interpreter */
842 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
843
844 /* max code+data+bss+brk space allocated to ET_DYN executables */
845 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
846
847 /* Necessary parameters */
848 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
849 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
850 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
851
852 #define INTERPRETER_NONE 0
853 #define INTERPRETER_AOUT 1
854 #define INTERPRETER_ELF 2
855
856 #define DLINFO_ITEMS 12
857
858 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
859 {
860 memcpy(to, from, n);
861 }
862
863 static int load_aout_interp(void * exptr, int interp_fd);
864
865 #ifdef BSWAP_NEEDED
866 static void bswap_ehdr(struct elfhdr *ehdr)
867 {
868 bswap16s(&ehdr->e_type); /* Object file type */
869 bswap16s(&ehdr->e_machine); /* Architecture */
870 bswap32s(&ehdr->e_version); /* Object file version */
871 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
872 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
873 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
874 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
875 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
876 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
877 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
878 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
879 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
880 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
881 }
882
883 static void bswap_phdr(struct elf_phdr *phdr)
884 {
885 bswap32s(&phdr->p_type); /* Segment type */
886 bswaptls(&phdr->p_offset); /* Segment file offset */
887 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
888 bswaptls(&phdr->p_paddr); /* Segment physical address */
889 bswaptls(&phdr->p_filesz); /* Segment size in file */
890 bswaptls(&phdr->p_memsz); /* Segment size in memory */
891 bswap32s(&phdr->p_flags); /* Segment flags */
892 bswaptls(&phdr->p_align); /* Segment alignment */
893 }
894
895 static void bswap_shdr(struct elf_shdr *shdr)
896 {
897 bswap32s(&shdr->sh_name);
898 bswap32s(&shdr->sh_type);
899 bswaptls(&shdr->sh_flags);
900 bswaptls(&shdr->sh_addr);
901 bswaptls(&shdr->sh_offset);
902 bswaptls(&shdr->sh_size);
903 bswap32s(&shdr->sh_link);
904 bswap32s(&shdr->sh_info);
905 bswaptls(&shdr->sh_addralign);
906 bswaptls(&shdr->sh_entsize);
907 }
908
909 static void bswap_sym(struct elf_sym *sym)
910 {
911 bswap32s(&sym->st_name);
912 bswaptls(&sym->st_value);
913 bswaptls(&sym->st_size);
914 bswap16s(&sym->st_shndx);
915 }
916 #endif
917
918 #ifdef USE_ELF_CORE_DUMP
919 static int elf_core_dump(int, const CPUState *);
920
921 #ifdef BSWAP_NEEDED
922 static void bswap_note(struct elf_note *en)
923 {
924 bswap32s(&en->n_namesz);
925 bswap32s(&en->n_descsz);
926 bswap32s(&en->n_type);
927 }
928 #endif /* BSWAP_NEEDED */
929
930 #endif /* USE_ELF_CORE_DUMP */
931
932 /*
933 * 'copy_elf_strings()' copies argument/envelope strings from user
934 * memory to free pages in kernel mem. These are in a format ready
935 * to be put directly into the top of new user memory.
936 *
937 */
938 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
939 abi_ulong p)
940 {
941 char *tmp, *tmp1, *pag = NULL;
942 int len, offset = 0;
943
944 if (!p) {
945 return 0; /* bullet-proofing */
946 }
947 while (argc-- > 0) {
948 tmp = argv[argc];
949 if (!tmp) {
950 fprintf(stderr, "VFS: argc is wrong");
951 exit(-1);
952 }
953 tmp1 = tmp;
954 while (*tmp++);
955 len = tmp - tmp1;
956 if (p < len) { /* this shouldn't happen - 128kB */
957 return 0;
958 }
959 while (len) {
960 --p; --tmp; --len;
961 if (--offset < 0) {
962 offset = p % TARGET_PAGE_SIZE;
963 pag = (char *)page[p/TARGET_PAGE_SIZE];
964 if (!pag) {
965 pag = (char *)malloc(TARGET_PAGE_SIZE);
966 memset(pag, 0, TARGET_PAGE_SIZE);
967 page[p/TARGET_PAGE_SIZE] = pag;
968 if (!pag)
969 return 0;
970 }
971 }
972 if (len == 0 || offset == 0) {
973 *(pag + offset) = *tmp;
974 }
975 else {
976 int bytes_to_copy = (len > offset) ? offset : len;
977 tmp -= bytes_to_copy;
978 p -= bytes_to_copy;
979 offset -= bytes_to_copy;
980 len -= bytes_to_copy;
981 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
982 }
983 }
984 }
985 return p;
986 }
987
988 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
989 struct image_info *info)
990 {
991 abi_ulong stack_base, size, error;
992 int i;
993
994 /* Create enough stack to hold everything. If we don't use
995 * it for args, we'll use it for something else...
996 */
997 size = guest_stack_size;
998 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
999 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1000 error = target_mmap(0,
1001 size + qemu_host_page_size,
1002 PROT_READ | PROT_WRITE,
1003 MAP_PRIVATE | MAP_ANONYMOUS,
1004 -1, 0);
1005 if (error == -1) {
1006 perror("stk mmap");
1007 exit(-1);
1008 }
1009 /* we reserve one extra page at the top of the stack as guard */
1010 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
1011
1012 info->stack_limit = error;
1013 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1014 p += stack_base;
1015
1016 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1017 if (bprm->page[i]) {
1018 info->rss++;
1019 /* FIXME - check return value of memcpy_to_target() for failure */
1020 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1021 free(bprm->page[i]);
1022 }
1023 stack_base += TARGET_PAGE_SIZE;
1024 }
1025 return p;
1026 }
1027
1028 /* Map and zero the bss. We need to explicitly zero any fractional pages
1029 after the data section (i.e. bss). */
1030 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1031 {
1032 uintptr_t host_start, host_map_start, host_end;
1033
1034 last_bss = TARGET_PAGE_ALIGN(last_bss);
1035
1036 /* ??? There is confusion between qemu_real_host_page_size and
1037 qemu_host_page_size here and elsewhere in target_mmap, which
1038 may lead to the end of the data section mapping from the file
1039 not being mapped. At least there was an explicit test and
1040 comment for that here, suggesting that "the file size must
1041 be known". The comment probably pre-dates the introduction
1042 of the fstat system call in target_mmap which does in fact
1043 find out the size. What isn't clear is if the workaround
1044 here is still actually needed. For now, continue with it,
1045 but merge it with the "normal" mmap that would allocate the bss. */
1046
1047 host_start = (uintptr_t) g2h(elf_bss);
1048 host_end = (uintptr_t) g2h(last_bss);
1049 host_map_start = (host_start + qemu_real_host_page_size - 1);
1050 host_map_start &= -qemu_real_host_page_size;
1051
1052 if (host_map_start < host_end) {
1053 void *p = mmap((void *)host_map_start, host_end - host_map_start,
1054 prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1055 if (p == MAP_FAILED) {
1056 perror("cannot mmap brk");
1057 exit(-1);
1058 }
1059
1060 /* Since we didn't use target_mmap, make sure to record
1061 the validity of the pages with qemu. */
1062 page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1063 }
1064
1065 if (host_start < host_map_start) {
1066 memset((void *)host_start, 0, host_map_start - host_start);
1067 }
1068 }
1069
1070 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1071 struct elfhdr * exec,
1072 abi_ulong load_addr,
1073 abi_ulong load_bias,
1074 abi_ulong interp_load_addr, int ibcs,
1075 struct image_info *info)
1076 {
1077 abi_ulong sp;
1078 int size;
1079 abi_ulong u_platform;
1080 const char *k_platform;
1081 const int n = sizeof(elf_addr_t);
1082
1083 sp = p;
1084 u_platform = 0;
1085 k_platform = ELF_PLATFORM;
1086 if (k_platform) {
1087 size_t len = strlen(k_platform) + 1;
1088 sp -= (len + n - 1) & ~(n - 1);
1089 u_platform = sp;
1090 /* FIXME - check return value of memcpy_to_target() for failure */
1091 memcpy_to_target(sp, k_platform, len);
1092 }
1093 /*
1094 * Force 16 byte _final_ alignment here for generality.
1095 */
1096 sp = sp &~ (abi_ulong)15;
1097 size = (DLINFO_ITEMS + 1) * 2;
1098 if (k_platform)
1099 size += 2;
1100 #ifdef DLINFO_ARCH_ITEMS
1101 size += DLINFO_ARCH_ITEMS * 2;
1102 #endif
1103 size += envc + argc + 2;
1104 size += (!ibcs ? 3 : 1); /* argc itself */
1105 size *= n;
1106 if (size & 15)
1107 sp -= 16 - (size & 15);
1108
1109 /* This is correct because Linux defines
1110 * elf_addr_t as Elf32_Off / Elf64_Off
1111 */
1112 #define NEW_AUX_ENT(id, val) do { \
1113 sp -= n; put_user_ual(val, sp); \
1114 sp -= n; put_user_ual(id, sp); \
1115 } while(0)
1116
1117 NEW_AUX_ENT (AT_NULL, 0);
1118
1119 /* There must be exactly DLINFO_ITEMS entries here. */
1120 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1121 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1122 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1123 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1124 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1125 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1126 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1127 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1128 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1129 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1130 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1131 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1132 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1133 if (k_platform)
1134 NEW_AUX_ENT(AT_PLATFORM, u_platform);
1135 #ifdef ARCH_DLINFO
1136 /*
1137 * ARCH_DLINFO must come last so platform specific code can enforce
1138 * special alignment requirements on the AUXV if necessary (eg. PPC).
1139 */
1140 ARCH_DLINFO;
1141 #endif
1142 #undef NEW_AUX_ENT
1143
1144 info->saved_auxv = sp;
1145
1146 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1147 return sp;
1148 }
1149
1150
1151 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1152 int interpreter_fd,
1153 abi_ulong *interp_load_addr)
1154 {
1155 struct elf_phdr *elf_phdata = NULL;
1156 struct elf_phdr *eppnt;
1157 abi_ulong load_addr = 0;
1158 int load_addr_set = 0;
1159 int retval;
1160 abi_ulong error;
1161 int i;
1162
1163 error = 0;
1164
1165 #ifdef BSWAP_NEEDED
1166 bswap_ehdr(interp_elf_ex);
1167 #endif
1168 /* First of all, some simple consistency checks */
1169 if ((interp_elf_ex->e_type != ET_EXEC &&
1170 interp_elf_ex->e_type != ET_DYN) ||
1171 !elf_check_arch(interp_elf_ex->e_machine)) {
1172 return ~((abi_ulong)0UL);
1173 }
1174
1175
1176 /* Now read in all of the header information */
1177
1178 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
1179 return ~(abi_ulong)0UL;
1180
1181 elf_phdata = (struct elf_phdr *)
1182 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1183
1184 if (!elf_phdata)
1185 return ~((abi_ulong)0UL);
1186
1187 /*
1188 * If the size of this structure has changed, then punt, since
1189 * we will be doing the wrong thing.
1190 */
1191 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
1192 free(elf_phdata);
1193 return ~((abi_ulong)0UL);
1194 }
1195
1196 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
1197 if(retval >= 0) {
1198 retval = read(interpreter_fd,
1199 (char *) elf_phdata,
1200 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1201 }
1202 if (retval < 0) {
1203 perror("load_elf_interp");
1204 exit(-1);
1205 free (elf_phdata);
1206 return retval;
1207 }
1208 #ifdef BSWAP_NEEDED
1209 eppnt = elf_phdata;
1210 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
1211 bswap_phdr(eppnt);
1212 }
1213 #endif
1214
1215 if (interp_elf_ex->e_type == ET_DYN) {
1216 /* in order to avoid hardcoding the interpreter load
1217 address in qemu, we allocate a big enough memory zone */
1218 error = target_mmap(0, INTERP_MAP_SIZE,
1219 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1220 -1, 0);
1221 if (error == -1) {
1222 perror("mmap");
1223 exit(-1);
1224 }
1225 load_addr = error;
1226 load_addr_set = 1;
1227 }
1228
1229 eppnt = elf_phdata;
1230 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
1231 if (eppnt->p_type == PT_LOAD) {
1232 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
1233 int elf_prot = 0;
1234 abi_ulong vaddr = 0;
1235
1236 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
1237 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1238 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1239 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
1240 elf_type |= MAP_FIXED;
1241 vaddr = eppnt->p_vaddr;
1242 }
1243 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
1244 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
1245 elf_prot,
1246 elf_type,
1247 interpreter_fd,
1248 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
1249
1250 if (error == -1) {
1251 /* Real error */
1252 close(interpreter_fd);
1253 free(elf_phdata);
1254 return ~((abi_ulong)0UL);
1255 }
1256
1257 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
1258 load_addr = error;
1259 load_addr_set = 1;
1260 }
1261
1262 /* If the load segment requests extra zeros (e.g. bss), map it. */
1263 if (eppnt->p_filesz < eppnt->p_memsz) {
1264 abi_ulong base = load_addr + eppnt->p_vaddr;
1265 zero_bss(base + eppnt->p_filesz,
1266 base + eppnt->p_memsz, elf_prot);
1267 }
1268 }
1269
1270 /* Now use mmap to map the library into memory. */
1271
1272 close(interpreter_fd);
1273 free(elf_phdata);
1274
1275 *interp_load_addr = load_addr;
1276 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
1277 }
1278
1279 static int symfind(const void *s0, const void *s1)
1280 {
1281 struct elf_sym *key = (struct elf_sym *)s0;
1282 struct elf_sym *sym = (struct elf_sym *)s1;
1283 int result = 0;
1284 if (key->st_value < sym->st_value) {
1285 result = -1;
1286 } else if (key->st_value >= sym->st_value + sym->st_size) {
1287 result = 1;
1288 }
1289 return result;
1290 }
1291
1292 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1293 {
1294 #if ELF_CLASS == ELFCLASS32
1295 struct elf_sym *syms = s->disas_symtab.elf32;
1296 #else
1297 struct elf_sym *syms = s->disas_symtab.elf64;
1298 #endif
1299
1300 // binary search
1301 struct elf_sym key;
1302 struct elf_sym *sym;
1303
1304 key.st_value = orig_addr;
1305
1306 sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1307 if (sym != NULL) {
1308 return s->disas_strtab + sym->st_name;
1309 }
1310
1311 return "";
1312 }
1313
1314 /* FIXME: This should use elf_ops.h */
1315 static int symcmp(const void *s0, const void *s1)
1316 {
1317 struct elf_sym *sym0 = (struct elf_sym *)s0;
1318 struct elf_sym *sym1 = (struct elf_sym *)s1;
1319 return (sym0->st_value < sym1->st_value)
1320 ? -1
1321 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1322 }
1323
1324 /* Best attempt to load symbols from this ELF object. */
1325 static void load_symbols(struct elfhdr *hdr, int fd)
1326 {
1327 unsigned int i, nsyms;
1328 struct elf_shdr sechdr, symtab, strtab;
1329 char *strings;
1330 struct syminfo *s;
1331 struct elf_sym *syms;
1332
1333 lseek(fd, hdr->e_shoff, SEEK_SET);
1334 for (i = 0; i < hdr->e_shnum; i++) {
1335 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1336 return;
1337 #ifdef BSWAP_NEEDED
1338 bswap_shdr(&sechdr);
1339 #endif
1340 if (sechdr.sh_type == SHT_SYMTAB) {
1341 symtab = sechdr;
1342 lseek(fd, hdr->e_shoff
1343 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1344 if (read(fd, &strtab, sizeof(strtab))
1345 != sizeof(strtab))
1346 return;
1347 #ifdef BSWAP_NEEDED
1348 bswap_shdr(&strtab);
1349 #endif
1350 goto found;
1351 }
1352 }
1353 return; /* Shouldn't happen... */
1354
1355 found:
1356 /* Now know where the strtab and symtab are. Snarf them. */
1357 s = malloc(sizeof(*s));
1358 syms = malloc(symtab.sh_size);
1359 if (!syms)
1360 return;
1361 s->disas_strtab = strings = malloc(strtab.sh_size);
1362 if (!s->disas_strtab)
1363 return;
1364
1365 lseek(fd, symtab.sh_offset, SEEK_SET);
1366 if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1367 return;
1368
1369 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1370
1371 i = 0;
1372 while (i < nsyms) {
1373 #ifdef BSWAP_NEEDED
1374 bswap_sym(syms + i);
1375 #endif
1376 // Throw away entries which we do not need.
1377 if (syms[i].st_shndx == SHN_UNDEF ||
1378 syms[i].st_shndx >= SHN_LORESERVE ||
1379 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1380 nsyms--;
1381 if (i < nsyms) {
1382 syms[i] = syms[nsyms];
1383 }
1384 continue;
1385 }
1386 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1387 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1388 syms[i].st_value &= ~(target_ulong)1;
1389 #endif
1390 i++;
1391 }
1392 syms = realloc(syms, nsyms * sizeof(*syms));
1393
1394 qsort(syms, nsyms, sizeof(*syms), symcmp);
1395
1396 lseek(fd, strtab.sh_offset, SEEK_SET);
1397 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1398 return;
1399 s->disas_num_syms = nsyms;
1400 #if ELF_CLASS == ELFCLASS32
1401 s->disas_symtab.elf32 = syms;
1402 s->lookup_symbol = lookup_symbolxx;
1403 #else
1404 s->disas_symtab.elf64 = syms;
1405 s->lookup_symbol = lookup_symbolxx;
1406 #endif
1407 s->next = syminfos;
1408 syminfos = s;
1409 }
1410
1411 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1412 struct image_info * info)
1413 {
1414 struct elfhdr elf_ex;
1415 struct elfhdr interp_elf_ex;
1416 struct exec interp_ex;
1417 int interpreter_fd = -1; /* avoid warning */
1418 abi_ulong load_addr, load_bias;
1419 int load_addr_set = 0;
1420 unsigned int interpreter_type = INTERPRETER_NONE;
1421 unsigned char ibcs2_interpreter;
1422 int i;
1423 abi_ulong mapped_addr;
1424 struct elf_phdr * elf_ppnt;
1425 struct elf_phdr *elf_phdata;
1426 abi_ulong k, elf_brk;
1427 int retval;
1428 char * elf_interpreter;
1429 abi_ulong elf_entry, interp_load_addr = 0;
1430 int status;
1431 abi_ulong start_code, end_code, start_data, end_data;
1432 abi_ulong reloc_func_desc = 0;
1433 abi_ulong elf_stack;
1434 char passed_fileno[6];
1435
1436 ibcs2_interpreter = 0;
1437 status = 0;
1438 load_addr = 0;
1439 load_bias = 0;
1440 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1441 #ifdef BSWAP_NEEDED
1442 bswap_ehdr(&elf_ex);
1443 #endif
1444
1445 /* First of all, some simple consistency checks */
1446 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1447 (! elf_check_arch(elf_ex.e_machine))) {
1448 return -ENOEXEC;
1449 }
1450
1451 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1452 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1453 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1454 if (!bprm->p) {
1455 retval = -E2BIG;
1456 }
1457
1458 /* Now read in all of the header information */
1459 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1460 if (elf_phdata == NULL) {
1461 return -ENOMEM;
1462 }
1463
1464 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1465 if(retval > 0) {
1466 retval = read(bprm->fd, (char *) elf_phdata,
1467 elf_ex.e_phentsize * elf_ex.e_phnum);
1468 }
1469
1470 if (retval < 0) {
1471 perror("load_elf_binary");
1472 exit(-1);
1473 free (elf_phdata);
1474 return -errno;
1475 }
1476
1477 #ifdef BSWAP_NEEDED
1478 elf_ppnt = elf_phdata;
1479 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1480 bswap_phdr(elf_ppnt);
1481 }
1482 #endif
1483 elf_ppnt = elf_phdata;
1484
1485 elf_brk = 0;
1486
1487 elf_stack = ~((abi_ulong)0UL);
1488 elf_interpreter = NULL;
1489 start_code = ~((abi_ulong)0UL);
1490 end_code = 0;
1491 start_data = 0;
1492 end_data = 0;
1493 interp_ex.a_info = 0;
1494
1495 for(i=0;i < elf_ex.e_phnum; i++) {
1496 if (elf_ppnt->p_type == PT_INTERP) {
1497 if ( elf_interpreter != NULL )
1498 {
1499 free (elf_phdata);
1500 free(elf_interpreter);
1501 close(bprm->fd);
1502 return -EINVAL;
1503 }
1504
1505 /* This is the program interpreter used for
1506 * shared libraries - for now assume that this
1507 * is an a.out format binary
1508 */
1509
1510 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1511
1512 if (elf_interpreter == NULL) {
1513 free (elf_phdata);
1514 close(bprm->fd);
1515 return -ENOMEM;
1516 }
1517
1518 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1519 if(retval >= 0) {
1520 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1521 }
1522 if(retval < 0) {
1523 perror("load_elf_binary2");
1524 exit(-1);
1525 }
1526
1527 /* If the program interpreter is one of these two,
1528 then assume an iBCS2 image. Otherwise assume
1529 a native linux image. */
1530
1531 /* JRP - Need to add X86 lib dir stuff here... */
1532
1533 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1534 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1535 ibcs2_interpreter = 1;
1536 }
1537
1538 #if 0
1539 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1540 #endif
1541 if (retval >= 0) {
1542 retval = open(path(elf_interpreter), O_RDONLY);
1543 if(retval >= 0) {
1544 interpreter_fd = retval;
1545 }
1546 else {
1547 perror(elf_interpreter);
1548 exit(-1);
1549 /* retval = -errno; */
1550 }
1551 }
1552
1553 if (retval >= 0) {
1554 retval = lseek(interpreter_fd, 0, SEEK_SET);
1555 if(retval >= 0) {
1556 retval = read(interpreter_fd,bprm->buf,128);
1557 }
1558 }
1559 if (retval >= 0) {
1560 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1561 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1562 }
1563 if (retval < 0) {
1564 perror("load_elf_binary3");
1565 exit(-1);
1566 free (elf_phdata);
1567 free(elf_interpreter);
1568 close(bprm->fd);
1569 return retval;
1570 }
1571 }
1572 elf_ppnt++;
1573 }
1574
1575 /* Some simple consistency checks for the interpreter */
1576 if (elf_interpreter){
1577 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1578
1579 /* Now figure out which format our binary is */
1580 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1581 (N_MAGIC(interp_ex) != QMAGIC)) {
1582 interpreter_type = INTERPRETER_ELF;
1583 }
1584
1585 if (interp_elf_ex.e_ident[0] != 0x7f ||
1586 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1587 interpreter_type &= ~INTERPRETER_ELF;
1588 }
1589
1590 if (!interpreter_type) {
1591 free(elf_interpreter);
1592 free(elf_phdata);
1593 close(bprm->fd);
1594 return -ELIBBAD;
1595 }
1596 }
1597
1598 /* OK, we are done with that, now set up the arg stuff,
1599 and then start this sucker up */
1600
1601 {
1602 char * passed_p;
1603
1604 if (interpreter_type == INTERPRETER_AOUT) {
1605 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1606 passed_p = passed_fileno;
1607
1608 if (elf_interpreter) {
1609 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1610 bprm->argc++;
1611 }
1612 }
1613 if (!bprm->p) {
1614 if (elf_interpreter) {
1615 free(elf_interpreter);
1616 }
1617 free (elf_phdata);
1618 close(bprm->fd);
1619 return -E2BIG;
1620 }
1621 }
1622
1623 /* OK, This is the point of no return */
1624 info->end_data = 0;
1625 info->end_code = 0;
1626 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1627 info->mmap = 0;
1628 elf_entry = (abi_ulong) elf_ex.e_entry;
1629
1630 #if defined(CONFIG_USE_GUEST_BASE)
1631 /*
1632 * In case where user has not explicitly set the guest_base, we
1633 * probe here that should we set it automatically.
1634 */
1635 if (!(have_guest_base || reserved_va)) {
1636 /*
1637 * Go through ELF program header table and find the address
1638 * range used by loadable segments. Check that this is available on
1639 * the host, and if not find a suitable value for guest_base. */
1640 abi_ulong app_start = ~0;
1641 abi_ulong app_end = 0;
1642 abi_ulong addr;
1643 unsigned long host_start;
1644 unsigned long real_start;
1645 unsigned long host_size;
1646 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1647 i++, elf_ppnt++) {
1648 if (elf_ppnt->p_type != PT_LOAD)
1649 continue;
1650 addr = elf_ppnt->p_vaddr;
1651 if (addr < app_start) {
1652 app_start = addr;
1653 }
1654 addr += elf_ppnt->p_memsz;
1655 if (addr > app_end) {
1656 app_end = addr;
1657 }
1658 }
1659
1660 /* If we don't have any loadable segments then something
1661 is very wrong. */
1662 assert(app_start < app_end);
1663
1664 /* Round addresses to page boundaries. */
1665 app_start = app_start & qemu_host_page_mask;
1666 app_end = HOST_PAGE_ALIGN(app_end);
1667 if (app_start < mmap_min_addr) {
1668 host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1669 } else {
1670 host_start = app_start;
1671 if (host_start != app_start) {
1672 fprintf(stderr, "qemu: Address overflow loading ELF binary\n");
1673 abort();
1674 }
1675 }
1676 host_size = app_end - app_start;
1677 while (1) {
1678 /* Do not use mmap_find_vma here because that is limited to the
1679 guest address space. We are going to make the
1680 guest address space fit whatever we're given. */
1681 real_start = (unsigned long)mmap((void *)host_start, host_size,
1682 PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1683 if (real_start == (unsigned long)-1) {
1684 fprintf(stderr, "qemu: Virtual memory exausted\n");
1685 abort();
1686 }
1687 if (real_start == host_start) {
1688 break;
1689 }
1690 /* That address didn't work. Unmap and try a different one.
1691 The address the host picked because is typically
1692 right at the top of the host address space and leaves the
1693 guest with no usable address space. Resort to a linear search.
1694 We already compensated for mmap_min_addr, so this should not
1695 happen often. Probably means we got unlucky and host address
1696 space randomization put a shared library somewhere
1697 inconvenient. */
1698 munmap((void *)real_start, host_size);
1699 host_start += qemu_host_page_size;
1700 if (host_start == app_start) {
1701 /* Theoretically possible if host doesn't have any
1702 suitably aligned areas. Normally the first mmap will
1703 fail. */
1704 fprintf(stderr, "qemu: Unable to find space for application\n");
1705 abort();
1706 }
1707 }
1708 qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
1709 " to 0x%lx\n", app_start, real_start);
1710 guest_base = real_start - app_start;
1711 }
1712 #endif /* CONFIG_USE_GUEST_BASE */
1713
1714 /* Do this so that we can load the interpreter, if need be. We will
1715 change some of these later */
1716 info->rss = 0;
1717 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1718 info->start_stack = bprm->p;
1719
1720 /* Now we do a little grungy work by mmaping the ELF image into
1721 * the correct location in memory. At this point, we assume that
1722 * the image should be loaded at fixed address, not at a variable
1723 * address.
1724 */
1725
1726 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1727 int elf_prot = 0;
1728 int elf_flags = 0;
1729 abi_ulong error;
1730
1731 if (elf_ppnt->p_type != PT_LOAD)
1732 continue;
1733
1734 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1735 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1736 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1737 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1738 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1739 elf_flags |= MAP_FIXED;
1740 } else if (elf_ex.e_type == ET_DYN) {
1741 /* Try and get dynamic programs out of the way of the default mmap
1742 base, as well as whatever program they might try to exec. This
1743 is because the brk will follow the loader, and is not movable. */
1744 /* NOTE: for qemu, we do a big mmap to get enough space
1745 without hardcoding any address */
1746 error = target_mmap(0, ET_DYN_MAP_SIZE,
1747 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1748 -1, 0);
1749 if (error == -1) {
1750 perror("mmap");
1751 exit(-1);
1752 }
1753 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1754 }
1755
1756 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1757 (elf_ppnt->p_filesz +
1758 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1759 elf_prot,
1760 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1761 bprm->fd,
1762 (elf_ppnt->p_offset -
1763 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1764 if (error == -1) {
1765 perror("mmap");
1766 exit(-1);
1767 }
1768
1769 #ifdef LOW_ELF_STACK
1770 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1771 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1772 #endif
1773
1774 if (!load_addr_set) {
1775 load_addr_set = 1;
1776 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1777 if (elf_ex.e_type == ET_DYN) {
1778 load_bias += error -
1779 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1780 load_addr += load_bias;
1781 reloc_func_desc = load_bias;
1782 }
1783 }
1784 k = elf_ppnt->p_vaddr;
1785 if (k < start_code)
1786 start_code = k;
1787 if (start_data < k)
1788 start_data = k;
1789 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1790 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1791 end_code = k;
1792 if (end_data < k)
1793 end_data = k;
1794 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1795 if (k > elf_brk) {
1796 elf_brk = TARGET_PAGE_ALIGN(k);
1797 }
1798
1799 /* If the load segment requests extra zeros (e.g. bss), map it. */
1800 if (elf_ppnt->p_filesz < elf_ppnt->p_memsz) {
1801 abi_ulong base = load_bias + elf_ppnt->p_vaddr;
1802 zero_bss(base + elf_ppnt->p_filesz,
1803 base + elf_ppnt->p_memsz, elf_prot);
1804 }
1805 }
1806
1807 elf_entry += load_bias;
1808 elf_brk += load_bias;
1809 start_code += load_bias;
1810 end_code += load_bias;
1811 start_data += load_bias;
1812 end_data += load_bias;
1813
1814 if (elf_interpreter) {
1815 if (interpreter_type & 1) {
1816 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1817 }
1818 else if (interpreter_type & 2) {
1819 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1820 &interp_load_addr);
1821 }
1822 reloc_func_desc = interp_load_addr;
1823
1824 close(interpreter_fd);
1825 free(elf_interpreter);
1826
1827 if (elf_entry == ~((abi_ulong)0UL)) {
1828 printf("Unable to load interpreter\n");
1829 free(elf_phdata);
1830 exit(-1);
1831 return 0;
1832 }
1833 }
1834
1835 free(elf_phdata);
1836
1837 if (qemu_log_enabled())
1838 load_symbols(&elf_ex, bprm->fd);
1839
1840 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1841 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1842
1843 #ifdef LOW_ELF_STACK
1844 info->start_stack = bprm->p = elf_stack - 4;
1845 #endif
1846 bprm->p = create_elf_tables(bprm->p,
1847 bprm->argc,
1848 bprm->envc,
1849 &elf_ex,
1850 load_addr, load_bias,
1851 interp_load_addr,
1852 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1853 info);
1854 info->load_addr = reloc_func_desc;
1855 info->start_brk = info->brk = elf_brk;
1856 info->end_code = end_code;
1857 info->start_code = start_code;
1858 info->start_data = start_data;
1859 info->end_data = end_data;
1860 info->start_stack = bprm->p;
1861
1862 #if 0
1863 printf("(start_brk) %x\n" , info->start_brk);
1864 printf("(end_code) %x\n" , info->end_code);
1865 printf("(start_code) %x\n" , info->start_code);
1866 printf("(end_data) %x\n" , info->end_data);
1867 printf("(start_stack) %x\n" , info->start_stack);
1868 printf("(brk) %x\n" , info->brk);
1869 #endif
1870
1871 if ( info->personality == PER_SVR4 )
1872 {
1873 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1874 and some applications "depend" upon this behavior.
1875 Since we do not have the power to recompile these, we
1876 emulate the SVr4 behavior. Sigh. */
1877 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1878 MAP_FIXED | MAP_PRIVATE, -1, 0);
1879 }
1880
1881 info->entry = elf_entry;
1882
1883 #ifdef USE_ELF_CORE_DUMP
1884 bprm->core_dump = &elf_core_dump;
1885 #endif
1886
1887 return 0;
1888 }
1889
1890 #ifdef USE_ELF_CORE_DUMP
1891
1892 /*
1893 * Definitions to generate Intel SVR4-like core files.
1894 * These mostly have the same names as the SVR4 types with "target_elf_"
1895 * tacked on the front to prevent clashes with linux definitions,
1896 * and the typedef forms have been avoided. This is mostly like
1897 * the SVR4 structure, but more Linuxy, with things that Linux does
1898 * not support and which gdb doesn't really use excluded.
1899 *
1900 * Fields we don't dump (their contents is zero) in linux-user qemu
1901 * are marked with XXX.
1902 *
1903 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1904 *
1905 * Porting ELF coredump for target is (quite) simple process. First you
1906 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1907 * the target resides):
1908 *
1909 * #define USE_ELF_CORE_DUMP
1910 *
1911 * Next you define type of register set used for dumping. ELF specification
1912 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1913 *
1914 * typedef <target_regtype> target_elf_greg_t;
1915 * #define ELF_NREG <number of registers>
1916 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1917 *
1918 * Last step is to implement target specific function that copies registers
1919 * from given cpu into just specified register set. Prototype is:
1920 *
1921 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1922 * const CPUState *env);
1923 *
1924 * Parameters:
1925 * regs - copy register values into here (allocated and zeroed by caller)
1926 * env - copy registers from here
1927 *
1928 * Example for ARM target is provided in this file.
1929 */
1930
1931 /* An ELF note in memory */
1932 struct memelfnote {
1933 const char *name;
1934 size_t namesz;
1935 size_t namesz_rounded;
1936 int type;
1937 size_t datasz;
1938 void *data;
1939 size_t notesz;
1940 };
1941
1942 struct target_elf_siginfo {
1943 int si_signo; /* signal number */
1944 int si_code; /* extra code */
1945 int si_errno; /* errno */
1946 };
1947
1948 struct target_elf_prstatus {
1949 struct target_elf_siginfo pr_info; /* Info associated with signal */
1950 short pr_cursig; /* Current signal */
1951 target_ulong pr_sigpend; /* XXX */
1952 target_ulong pr_sighold; /* XXX */
1953 target_pid_t pr_pid;
1954 target_pid_t pr_ppid;
1955 target_pid_t pr_pgrp;
1956 target_pid_t pr_sid;
1957 struct target_timeval pr_utime; /* XXX User time */
1958 struct target_timeval pr_stime; /* XXX System time */
1959 struct target_timeval pr_cutime; /* XXX Cumulative user time */
1960 struct target_timeval pr_cstime; /* XXX Cumulative system time */
1961 target_elf_gregset_t pr_reg; /* GP registers */
1962 int pr_fpvalid; /* XXX */
1963 };
1964
1965 #define ELF_PRARGSZ (80) /* Number of chars for args */
1966
1967 struct target_elf_prpsinfo {
1968 char pr_state; /* numeric process state */
1969 char pr_sname; /* char for pr_state */
1970 char pr_zomb; /* zombie */
1971 char pr_nice; /* nice val */
1972 target_ulong pr_flag; /* flags */
1973 target_uid_t pr_uid;
1974 target_gid_t pr_gid;
1975 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1976 /* Lots missing */
1977 char pr_fname[16]; /* filename of executable */
1978 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1979 };
1980
1981 /* Here is the structure in which status of each thread is captured. */
1982 struct elf_thread_status {
1983 QTAILQ_ENTRY(elf_thread_status) ets_link;
1984 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
1985 #if 0
1986 elf_fpregset_t fpu; /* NT_PRFPREG */
1987 struct task_struct *thread;
1988 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1989 #endif
1990 struct memelfnote notes[1];
1991 int num_notes;
1992 };
1993
1994 struct elf_note_info {
1995 struct memelfnote *notes;
1996 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
1997 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1998
1999 QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2000 #if 0
2001 /*
2002 * Current version of ELF coredump doesn't support
2003 * dumping fp regs etc.
2004 */
2005 elf_fpregset_t *fpu;
2006 elf_fpxregset_t *xfpu;
2007 int thread_status_size;
2008 #endif
2009 int notes_size;
2010 int numnote;
2011 };
2012
2013 struct vm_area_struct {
2014 abi_ulong vma_start; /* start vaddr of memory region */
2015 abi_ulong vma_end; /* end vaddr of memory region */
2016 abi_ulong vma_flags; /* protection etc. flags for the region */
2017 QTAILQ_ENTRY(vm_area_struct) vma_link;
2018 };
2019
2020 struct mm_struct {
2021 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2022 int mm_count; /* number of mappings */
2023 };
2024
2025 static struct mm_struct *vma_init(void);
2026 static void vma_delete(struct mm_struct *);
2027 static int vma_add_mapping(struct mm_struct *, abi_ulong,
2028 abi_ulong, abi_ulong);
2029 static int vma_get_mapping_count(const struct mm_struct *);
2030 static struct vm_area_struct *vma_first(const struct mm_struct *);
2031 static struct vm_area_struct *vma_next(struct vm_area_struct *);
2032 static abi_ulong vma_dump_size(const struct vm_area_struct *);
2033 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2034 unsigned long flags);
2035
2036 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2037 static void fill_note(struct memelfnote *, const char *, int,
2038 unsigned int, void *);
2039 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2040 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2041 static void fill_auxv_note(struct memelfnote *, const TaskState *);
2042 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2043 static size_t note_size(const struct memelfnote *);
2044 static void free_note_info(struct elf_note_info *);
2045 static int fill_note_info(struct elf_note_info *, long, const CPUState *);
2046 static void fill_thread_info(struct elf_note_info *, const CPUState *);
2047 static int core_dump_filename(const TaskState *, char *, size_t);
2048
2049 static int dump_write(int, const void *, size_t);
2050 static int write_note(struct memelfnote *, int);
2051 static int write_note_info(struct elf_note_info *, int);
2052
2053 #ifdef BSWAP_NEEDED
2054 static void bswap_prstatus(struct target_elf_prstatus *);
2055 static void bswap_psinfo(struct target_elf_prpsinfo *);
2056
2057 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2058 {
2059 prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2060 prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2061 prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2062 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2063 prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2064 prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2065 prstatus->pr_pid = tswap32(prstatus->pr_pid);
2066 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2067 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2068 prstatus->pr_sid = tswap32(prstatus->pr_sid);
2069 /* cpu times are not filled, so we skip them */
2070 /* regs should be in correct format already */
2071 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2072 }
2073
2074 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2075 {
2076 psinfo->pr_flag = tswapl(psinfo->pr_flag);
2077 psinfo->pr_uid = tswap16(psinfo->pr_uid);
2078 psinfo->pr_gid = tswap16(psinfo->pr_gid);
2079 psinfo->pr_pid = tswap32(psinfo->pr_pid);
2080 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2081 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2082 psinfo->pr_sid = tswap32(psinfo->pr_sid);
2083 }
2084 #endif /* BSWAP_NEEDED */
2085
2086 /*
2087 * Minimal support for linux memory regions. These are needed
2088 * when we are finding out what memory exactly belongs to
2089 * emulated process. No locks needed here, as long as
2090 * thread that received the signal is stopped.
2091 */
2092
2093 static struct mm_struct *vma_init(void)
2094 {
2095 struct mm_struct *mm;
2096
2097 if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2098 return (NULL);
2099
2100 mm->mm_count = 0;
2101 QTAILQ_INIT(&mm->mm_mmap);
2102
2103 return (mm);
2104 }
2105
2106 static void vma_delete(struct mm_struct *mm)
2107 {
2108 struct vm_area_struct *vma;
2109
2110 while ((vma = vma_first(mm)) != NULL) {
2111 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2112 qemu_free(vma);
2113 }
2114 qemu_free(mm);
2115 }
2116
2117 static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2118 abi_ulong end, abi_ulong flags)
2119 {
2120 struct vm_area_struct *vma;
2121
2122 if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2123 return (-1);
2124
2125 vma->vma_start = start;
2126 vma->vma_end = end;
2127 vma->vma_flags = flags;
2128
2129 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2130 mm->mm_count++;
2131
2132 return (0);
2133 }
2134
2135 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2136 {
2137 return (QTAILQ_FIRST(&mm->mm_mmap));
2138 }
2139
2140 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2141 {
2142 return (QTAILQ_NEXT(vma, vma_link));
2143 }
2144
2145 static int vma_get_mapping_count(const struct mm_struct *mm)
2146 {
2147 return (mm->mm_count);
2148 }
2149
2150 /*
2151 * Calculate file (dump) size of given memory region.
2152 */
2153 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2154 {
2155 /* if we cannot even read the first page, skip it */
2156 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2157 return (0);
2158
2159 /*
2160 * Usually we don't dump executable pages as they contain
2161 * non-writable code that debugger can read directly from
2162 * target library etc. However, thread stacks are marked
2163 * also executable so we read in first page of given region
2164 * and check whether it contains elf header. If there is
2165 * no elf header, we dump it.
2166 */
2167 if (vma->vma_flags & PROT_EXEC) {
2168 char page[TARGET_PAGE_SIZE];
2169
2170 copy_from_user(page, vma->vma_start, sizeof (page));
2171 if ((page[EI_MAG0] == ELFMAG0) &&
2172 (page[EI_MAG1] == ELFMAG1) &&
2173 (page[EI_MAG2] == ELFMAG2) &&
2174 (page[EI_MAG3] == ELFMAG3)) {
2175 /*
2176 * Mappings are possibly from ELF binary. Don't dump
2177 * them.
2178 */
2179 return (0);
2180 }
2181 }
2182
2183 return (vma->vma_end - vma->vma_start);
2184 }
2185
2186 static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2187 unsigned long flags)
2188 {
2189 struct mm_struct *mm = (struct mm_struct *)priv;
2190
2191 vma_add_mapping(mm, start, end, flags);
2192 return (0);
2193 }
2194
2195 static void fill_note(struct memelfnote *note, const char *name, int type,
2196 unsigned int sz, void *data)
2197 {
2198 unsigned int namesz;
2199
2200 namesz = strlen(name) + 1;
2201 note->name = name;
2202 note->namesz = namesz;
2203 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2204 note->type = type;
2205 note->datasz = roundup(sz, sizeof (int32_t));;
2206 note->data = data;
2207
2208 /*
2209 * We calculate rounded up note size here as specified by
2210 * ELF document.
2211 */
2212 note->notesz = sizeof (struct elf_note) +
2213 note->namesz_rounded + note->datasz;
2214 }
2215
2216 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2217 uint32_t flags)
2218 {
2219 (void) memset(elf, 0, sizeof(*elf));
2220
2221 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2222 elf->e_ident[EI_CLASS] = ELF_CLASS;
2223 elf->e_ident[EI_DATA] = ELF_DATA;
2224 elf->e_ident[EI_VERSION] = EV_CURRENT;
2225 elf->e_ident[EI_OSABI] = ELF_OSABI;
2226
2227 elf->e_type = ET_CORE;
2228 elf->e_machine = machine;
2229 elf->e_version = EV_CURRENT;
2230 elf->e_phoff = sizeof(struct elfhdr);
2231 elf->e_flags = flags;
2232 elf->e_ehsize = sizeof(struct elfhdr);
2233 elf->e_phentsize = sizeof(struct elf_phdr);
2234 elf->e_phnum = segs;
2235
2236 #ifdef BSWAP_NEEDED
2237 bswap_ehdr(elf);
2238 #endif
2239 }
2240
2241 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2242 {
2243 phdr->p_type = PT_NOTE;
2244 phdr->p_offset = offset;
2245 phdr->p_vaddr = 0;
2246 phdr->p_paddr = 0;
2247 phdr->p_filesz = sz;
2248 phdr->p_memsz = 0;
2249 phdr->p_flags = 0;
2250 phdr->p_align = 0;
2251
2252 #ifdef BSWAP_NEEDED
2253 bswap_phdr(phdr);
2254 #endif
2255 }
2256
2257 static size_t note_size(const struct memelfnote *note)
2258 {
2259 return (note->notesz);
2260 }
2261
2262 static void fill_prstatus(struct target_elf_prstatus *prstatus,
2263 const TaskState *ts, int signr)
2264 {
2265 (void) memset(prstatus, 0, sizeof (*prstatus));
2266 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2267 prstatus->pr_pid = ts->ts_tid;
2268 prstatus->pr_ppid = getppid();
2269 prstatus->pr_pgrp = getpgrp();
2270 prstatus->pr_sid = getsid(0);
2271
2272 #ifdef BSWAP_NEEDED
2273 bswap_prstatus(prstatus);
2274 #endif
2275 }
2276
2277 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2278 {
2279 char *filename, *base_filename;
2280 unsigned int i, len;
2281
2282 (void) memset(psinfo, 0, sizeof (*psinfo));
2283
2284 len = ts->info->arg_end - ts->info->arg_start;
2285 if (len >= ELF_PRARGSZ)
2286 len = ELF_PRARGSZ - 1;
2287 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2288 return -EFAULT;
2289 for (i = 0; i < len; i++)
2290 if (psinfo->pr_psargs[i] == 0)
2291 psinfo->pr_psargs[i] = ' ';
2292 psinfo->pr_psargs[len] = 0;
2293
2294 psinfo->pr_pid = getpid();
2295 psinfo->pr_ppid = getppid();
2296 psinfo->pr_pgrp = getpgrp();
2297 psinfo->pr_sid = getsid(0);
2298 psinfo->pr_uid = getuid();
2299 psinfo->pr_gid = getgid();
2300
2301 filename = strdup(ts->bprm->filename);
2302 base_filename = strdup(basename(filename));
2303 (void) strncpy(psinfo->pr_fname, base_filename,
2304 sizeof(psinfo->pr_fname));
2305 free(base_filename);
2306 free(filename);
2307
2308 #ifdef BSWAP_NEEDED
2309 bswap_psinfo(psinfo);
2310 #endif
2311 return (0);
2312 }
2313
2314 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2315 {
2316 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2317 elf_addr_t orig_auxv = auxv;
2318 abi_ulong val;
2319 void *ptr;
2320 int i, len;
2321
2322 /*
2323 * Auxiliary vector is stored in target process stack. It contains
2324 * {type, value} pairs that we need to dump into note. This is not
2325 * strictly necessary but we do it here for sake of completeness.
2326 */
2327
2328 /* find out lenght of the vector, AT_NULL is terminator */
2329 i = len = 0;
2330 do {
2331 get_user_ual(val, auxv);
2332 i += 2;
2333 auxv += 2 * sizeof (elf_addr_t);
2334 } while (val != AT_NULL);
2335 len = i * sizeof (elf_addr_t);
2336
2337 /* read in whole auxv vector and copy it to memelfnote */
2338 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2339 if (ptr != NULL) {
2340 fill_note(note, "CORE", NT_AUXV, len, ptr);
2341 unlock_user(ptr, auxv, len);
2342 }
2343 }
2344
2345 /*
2346 * Constructs name of coredump file. We have following convention
2347 * for the name:
2348 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2349 *
2350 * Returns 0 in case of success, -1 otherwise (errno is set).
2351 */
2352 static int core_dump_filename(const TaskState *ts, char *buf,
2353 size_t bufsize)
2354 {
2355 char timestamp[64];
2356 char *filename = NULL;
2357 char *base_filename = NULL;
2358 struct timeval tv;
2359 struct tm tm;
2360
2361 assert(bufsize >= PATH_MAX);
2362
2363 if (gettimeofday(&tv, NULL) < 0) {
2364 (void) fprintf(stderr, "unable to get current timestamp: %s",
2365 strerror(errno));
2366 return (-1);
2367 }
2368
2369 filename = strdup(ts->bprm->filename);
2370 base_filename = strdup(basename(filename));
2371 (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2372 localtime_r(&tv.tv_sec, &tm));
2373 (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2374 base_filename, timestamp, (int)getpid());
2375 free(base_filename);
2376 free(filename);
2377
2378 return (0);
2379 }
2380
2381 static int dump_write(int fd, const void *ptr, size_t size)
2382 {
2383 const char *bufp = (const char *)ptr;
2384 ssize_t bytes_written, bytes_left;
2385 struct rlimit dumpsize;
2386 off_t pos;
2387
2388 bytes_written = 0;
2389 getrlimit(RLIMIT_CORE, &dumpsize);
2390 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2391 if (errno == ESPIPE) { /* not a seekable stream */
2392 bytes_left = size;
2393 } else {
2394 return pos;
2395 }
2396 } else {
2397 if (dumpsize.rlim_cur <= pos) {
2398 return -1;
2399 } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2400 bytes_left = size;
2401 } else {
2402 size_t limit_left=dumpsize.rlim_cur - pos;
2403 bytes_left = limit_left >= size ? size : limit_left ;
2404 }
2405 }
2406
2407 /*
2408 * In normal conditions, single write(2) should do but
2409 * in case of socket etc. this mechanism is more portable.
2410 */
2411 do {
2412 bytes_written = write(fd, bufp, bytes_left);
2413 if (bytes_written < 0) {
2414 if (errno == EINTR)
2415 continue;
2416 return (-1);
2417 } else if (bytes_written == 0) { /* eof */
2418 return (-1);
2419 }
2420 bufp += bytes_written;
2421 bytes_left -= bytes_written;
2422 } while (bytes_left > 0);
2423
2424 return (0);
2425 }
2426
2427 static int write_note(struct memelfnote *men, int fd)
2428 {
2429 struct elf_note en;
2430
2431 en.n_namesz = men->namesz;
2432 en.n_type = men->type;
2433 en.n_descsz = men->datasz;
2434
2435 #ifdef BSWAP_NEEDED
2436 bswap_note(&en);
2437 #endif
2438
2439 if (dump_write(fd, &en, sizeof(en)) != 0)
2440 return (-1);
2441 if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2442 return (-1);
2443 if (dump_write(fd, men->data, men->datasz) != 0)
2444 return (-1);
2445
2446 return (0);
2447 }
2448
2449 static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2450 {
2451 TaskState *ts = (TaskState *)env->opaque;
2452 struct elf_thread_status *ets;
2453
2454 ets = qemu_mallocz(sizeof (*ets));
2455 ets->num_notes = 1; /* only prstatus is dumped */
2456 fill_prstatus(&ets->prstatus, ts, 0);
2457 elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2458 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2459 &ets->prstatus);
2460
2461 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2462
2463 info->notes_size += note_size(&ets->notes[0]);
2464 }
2465
2466 static int fill_note_info(struct elf_note_info *info,
2467 long signr, const CPUState *env)
2468 {
2469 #define NUMNOTES 3
2470 CPUState *cpu = NULL;
2471 TaskState *ts = (TaskState *)env->opaque;
2472 int i;
2473
2474 (void) memset(info, 0, sizeof (*info));
2475
2476 QTAILQ_INIT(&info->thread_list);
2477
2478 info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2479 if (info->notes == NULL)
2480 return (-ENOMEM);
2481 info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2482 if (info->prstatus == NULL)
2483 return (-ENOMEM);
2484 info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2485 if (info->prstatus == NULL)
2486 return (-ENOMEM);
2487
2488 /*
2489 * First fill in status (and registers) of current thread
2490 * including process info & aux vector.
2491 */
2492 fill_prstatus(info->prstatus, ts, signr);
2493 elf_core_copy_regs(&info->prstatus->pr_reg, env);
2494 fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2495 sizeof (*info->prstatus), info->prstatus);
2496 fill_psinfo(info->psinfo, ts);
2497 fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2498 sizeof (*info->psinfo), info->psinfo);
2499 fill_auxv_note(&info->notes[2], ts);
2500 info->numnote = 3;
2501
2502 info->notes_size = 0;
2503 for (i = 0; i < info->numnote; i++)
2504 info->notes_size += note_size(&info->notes[i]);
2505
2506 /* read and fill status of all threads */
2507 cpu_list_lock();
2508 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2509 if (cpu == thread_env)
2510 continue;
2511 fill_thread_info(info, cpu);
2512 }
2513 cpu_list_unlock();
2514
2515 return (0);
2516 }
2517
2518 static void free_note_info(struct elf_note_info *info)
2519 {
2520 struct elf_thread_status *ets;
2521
2522 while (!QTAILQ_EMPTY(&info->thread_list)) {
2523 ets = QTAILQ_FIRST(&info->thread_list);
2524 QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2525 qemu_free(ets);
2526 }
2527
2528 qemu_free(info->prstatus);
2529 qemu_free(info->psinfo);
2530 qemu_free(info->notes);
2531 }
2532
2533 static int write_note_info(struct elf_note_info *info, int fd)
2534 {
2535 struct elf_thread_status *ets;
2536 int i, error = 0;
2537
2538 /* write prstatus, psinfo and auxv for current thread */
2539 for (i = 0; i < info->numnote; i++)
2540 if ((error = write_note(&info->notes[i], fd)) != 0)
2541 return (error);
2542
2543 /* write prstatus for each thread */
2544 for (ets = info->thread_list.tqh_first; ets != NULL;
2545 ets = ets->ets_link.tqe_next) {
2546 if ((error = write_note(&ets->notes[0], fd)) != 0)
2547 return (error);
2548 }
2549
2550 return (0);
2551 }
2552
2553 /*
2554 * Write out ELF coredump.
2555 *
2556 * See documentation of ELF object file format in:
2557 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2558 *
2559 * Coredump format in linux is following:
2560 *
2561 * 0 +----------------------+ \
2562 * | ELF header | ET_CORE |
2563 * +----------------------+ |
2564 * | ELF program headers | |--- headers
2565 * | - NOTE section | |
2566 * | - PT_LOAD sections | |
2567 * +----------------------+ /
2568 * | NOTEs: |
2569 * | - NT_PRSTATUS |
2570 * | - NT_PRSINFO |
2571 * | - NT_AUXV |
2572 * +----------------------+ <-- aligned to target page
2573 * | Process memory dump |
2574 * : :
2575 * . .
2576 * : :
2577 * | |
2578 * +----------------------+
2579 *
2580 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2581 * NT_PRSINFO -> struct elf_prpsinfo
2582 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2583 *
2584 * Format follows System V format as close as possible. Current
2585 * version limitations are as follows:
2586 * - no floating point registers are dumped
2587 *
2588 * Function returns 0 in case of success, negative errno otherwise.
2589 *
2590 * TODO: make this work also during runtime: it should be
2591 * possible to force coredump from running process and then
2592 * continue processing. For example qemu could set up SIGUSR2
2593 * handler (provided that target process haven't registered
2594 * handler for that) that does the dump when signal is received.
2595 */
2596 static int elf_core_dump(int signr, const CPUState *env)
2597 {
2598 const TaskState *ts = (const TaskState *)env->opaque;
2599 struct vm_area_struct *vma = NULL;
2600 char corefile[PATH_MAX];
2601 struct elf_note_info info;
2602 struct elfhdr elf;
2603 struct elf_phdr phdr;
2604 struct rlimit dumpsize;
2605 struct mm_struct *mm = NULL;
2606 off_t offset = 0, data_offset = 0;
2607 int segs = 0;
2608 int fd = -1;
2609
2610 errno = 0;
2611 getrlimit(RLIMIT_CORE, &dumpsize);
2612 if (dumpsize.rlim_cur == 0)
2613 return 0;
2614
2615 if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2616 return (-errno);
2617
2618 if ((fd = open(corefile, O_WRONLY | O_CREAT,
2619 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2620 return (-errno);
2621
2622 /*
2623 * Walk through target process memory mappings and
2624 * set up structure containing this information. After
2625 * this point vma_xxx functions can be used.
2626 */
2627 if ((mm = vma_init()) == NULL)
2628 goto out;
2629
2630 walk_memory_regions(mm, vma_walker);
2631 segs = vma_get_mapping_count(mm);
2632
2633 /*
2634 * Construct valid coredump ELF header. We also
2635 * add one more segment for notes.
2636 */
2637 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2638 if (dump_write(fd, &elf, sizeof (elf)) != 0)
2639 goto out;
2640
2641 /* fill in in-memory version of notes */
2642 if (fill_note_info(&info, signr, env) < 0)
2643 goto out;
2644
2645 offset += sizeof (elf); /* elf header */
2646 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
2647
2648 /* write out notes program header */
2649 fill_elf_note_phdr(&phdr, info.notes_size, offset);
2650
2651 offset += info.notes_size;
2652 if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2653 goto out;
2654
2655 /*
2656 * ELF specification wants data to start at page boundary so
2657 * we align it here.
2658 */
2659 offset = roundup(offset, ELF_EXEC_PAGESIZE);
2660
2661 /*
2662 * Write program headers for memory regions mapped in
2663 * the target process.
2664 */
2665 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2666 (void) memset(&phdr, 0, sizeof (phdr));
2667
2668 phdr.p_type = PT_LOAD;
2669 phdr.p_offset = offset;
2670 phdr.p_vaddr = vma->vma_start;
2671 phdr.p_paddr = 0;
2672 phdr.p_filesz = vma_dump_size(vma);
2673 offset += phdr.p_filesz;
2674 phdr.p_memsz = vma->vma_end - vma->vma_start;
2675 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2676 if (vma->vma_flags & PROT_WRITE)
2677 phdr.p_flags |= PF_W;
2678 if (vma->vma_flags & PROT_EXEC)
2679 phdr.p_flags |= PF_X;
2680 phdr.p_align = ELF_EXEC_PAGESIZE;
2681
2682 dump_write(fd, &phdr, sizeof (phdr));
2683 }
2684
2685 /*
2686 * Next we write notes just after program headers. No
2687 * alignment needed here.
2688 */
2689 if (write_note_info(&info, fd) < 0)
2690 goto out;
2691
2692 /* align data to page boundary */
2693 data_offset = lseek(fd, 0, SEEK_CUR);
2694 data_offset = TARGET_PAGE_ALIGN(data_offset);
2695 if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2696 goto out;
2697
2698 /*
2699 * Finally we can dump process memory into corefile as well.
2700 */
2701 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2702 abi_ulong addr;
2703 abi_ulong end;
2704
2705 end = vma->vma_start + vma_dump_size(vma);
2706
2707 for (addr = vma->vma_start; addr < end;
2708 addr += TARGET_PAGE_SIZE) {
2709 char page[TARGET_PAGE_SIZE];
2710 int error;
2711
2712 /*
2713 * Read in page from target process memory and
2714 * write it to coredump file.
2715 */
2716 error = copy_from_user(page, addr, sizeof (page));
2717 if (error != 0) {
2718 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2719 addr);
2720 errno = -error;
2721 goto out;
2722 }
2723 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2724 goto out;
2725 }
2726 }
2727
2728 out:
2729 free_note_info(&info);
2730 if (mm != NULL)
2731 vma_delete(mm);
2732 (void) close(fd);
2733
2734 if (errno != 0)
2735 return (-errno);
2736 return (0);
2737 }
2738
2739 #endif /* USE_ELF_CORE_DUMP */
2740
2741 static int load_aout_interp(void * exptr, int interp_fd)
2742 {
2743 printf("a.out interpreter not yet supported\n");
2744 return(0);
2745 }
2746
2747 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2748 {
2749 init_thread(regs, infop);
2750 }