]> git.proxmox.com Git - qemu.git/blob - bsd-user/elfload.c
tcg-i386: Don't perform GETPC adjustment in TCG code
[qemu.git] / bsd-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2
3 #include <stdio.h>
4 #include <sys/types.h>
5 #include <fcntl.h>
6 #include <errno.h>
7 #include <unistd.h>
8 #include <sys/mman.h>
9 #include <stdlib.h>
10 #include <string.h>
11
12 #include "qemu.h"
13 #include "disas/disas.h"
14
15 #ifdef _ARCH_PPC64
16 #undef ARCH_DLINFO
17 #undef ELF_PLATFORM
18 #undef ELF_HWCAP
19 #undef ELF_CLASS
20 #undef ELF_DATA
21 #undef ELF_ARCH
22 #endif
23
24 /* from personality.h */
25
26 /*
27 * Flags for bug emulation.
28 *
29 * These occupy the top three bytes.
30 */
31 enum {
32 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
34 * (signal handling)
35 */
36 MMAP_PAGE_ZERO = 0x0100000,
37 ADDR_COMPAT_LAYOUT = 0x0200000,
38 READ_IMPLIES_EXEC = 0x0400000,
39 ADDR_LIMIT_32BIT = 0x0800000,
40 SHORT_INODE = 0x1000000,
41 WHOLE_SECONDS = 0x2000000,
42 STICKY_TIMEOUTS = 0x4000000,
43 ADDR_LIMIT_3GB = 0x8000000,
44 };
45
46 /*
47 * Personality types.
48 *
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
51 */
52 enum {
53 PER_LINUX = 0x0000,
54 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
55 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
56 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
57 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
58 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
59 WHOLE_SECONDS | SHORT_INODE,
60 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
61 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
62 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
63 PER_BSD = 0x0006,
64 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
65 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
66 PER_LINUX32 = 0x0008,
67 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
68 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
69 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
70 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
71 PER_RISCOS = 0x000c,
72 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
73 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
74 PER_OSF4 = 0x000f, /* OSF/1 v4 */
75 PER_HPUX = 0x0010,
76 PER_MASK = 0x00ff,
77 };
78
79 /*
80 * Return the base personality without flags.
81 */
82 #define personality(pers) (pers & PER_MASK)
83
84 /* this flag is uneffective under linux too, should be deleted */
85 #ifndef MAP_DENYWRITE
86 #define MAP_DENYWRITE 0
87 #endif
88
89 /* should probably go in elf.h */
90 #ifndef ELIBBAD
91 #define ELIBBAD 80
92 #endif
93
94 #ifdef TARGET_I386
95
96 #define ELF_PLATFORM get_elf_platform()
97
98 static const char *get_elf_platform(void)
99 {
100 static char elf_platform[] = "i386";
101 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
102 if (family > 6)
103 family = 6;
104 if (family >= 3)
105 elf_platform[1] = '0' + family;
106 return elf_platform;
107 }
108
109 #define ELF_HWCAP get_elf_hwcap()
110
111 static uint32_t get_elf_hwcap(void)
112 {
113 X86CPU *cpu = X86_CPU(thread_cpu);
114
115 return cpu->env.features[FEAT_1_EDX];
116 }
117
118 #ifdef TARGET_X86_64
119 #define ELF_START_MMAP 0x2aaaaab000ULL
120 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
121
122 #define ELF_CLASS ELFCLASS64
123 #define ELF_DATA ELFDATA2LSB
124 #define ELF_ARCH EM_X86_64
125
126 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
127 {
128 regs->rax = 0;
129 regs->rsp = infop->start_stack;
130 regs->rip = infop->entry;
131 if (bsd_type == target_freebsd) {
132 regs->rdi = infop->start_stack;
133 }
134 }
135
136 #else
137
138 #define ELF_START_MMAP 0x80000000
139
140 /*
141 * This is used to ensure we don't load something for the wrong architecture.
142 */
143 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
144
145 /*
146 * These are used to set parameters in the core dumps.
147 */
148 #define ELF_CLASS ELFCLASS32
149 #define ELF_DATA ELFDATA2LSB
150 #define ELF_ARCH EM_386
151
152 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
153 {
154 regs->esp = infop->start_stack;
155 regs->eip = infop->entry;
156
157 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
158 starts %edx contains a pointer to a function which might be
159 registered using `atexit'. This provides a mean for the
160 dynamic linker to call DT_FINI functions for shared libraries
161 that have been loaded before the code runs.
162
163 A value of 0 tells we have no such handler. */
164 regs->edx = 0;
165 }
166 #endif
167
168 #define USE_ELF_CORE_DUMP
169 #define ELF_EXEC_PAGESIZE 4096
170
171 #endif
172
173 #ifdef TARGET_ARM
174
175 #define ELF_START_MMAP 0x80000000
176
177 #define elf_check_arch(x) ( (x) == EM_ARM )
178
179 #define ELF_CLASS ELFCLASS32
180 #ifdef TARGET_WORDS_BIGENDIAN
181 #define ELF_DATA ELFDATA2MSB
182 #else
183 #define ELF_DATA ELFDATA2LSB
184 #endif
185 #define ELF_ARCH EM_ARM
186
187 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
188 {
189 abi_long stack = infop->start_stack;
190 memset(regs, 0, sizeof(*regs));
191 regs->ARM_cpsr = 0x10;
192 if (infop->entry & 1)
193 regs->ARM_cpsr |= CPSR_T;
194 regs->ARM_pc = infop->entry & 0xfffffffe;
195 regs->ARM_sp = infop->start_stack;
196 /* FIXME - what to for failure of get_user()? */
197 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
198 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
199 /* XXX: it seems that r0 is zeroed after ! */
200 regs->ARM_r0 = 0;
201 /* For uClinux PIC binaries. */
202 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
203 regs->ARM_r10 = infop->start_data;
204 }
205
206 #define USE_ELF_CORE_DUMP
207 #define ELF_EXEC_PAGESIZE 4096
208
209 enum
210 {
211 ARM_HWCAP_ARM_SWP = 1 << 0,
212 ARM_HWCAP_ARM_HALF = 1 << 1,
213 ARM_HWCAP_ARM_THUMB = 1 << 2,
214 ARM_HWCAP_ARM_26BIT = 1 << 3,
215 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
216 ARM_HWCAP_ARM_FPA = 1 << 5,
217 ARM_HWCAP_ARM_VFP = 1 << 6,
218 ARM_HWCAP_ARM_EDSP = 1 << 7,
219 };
220
221 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
222 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
223 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
224
225 #endif
226
227 #ifdef TARGET_SPARC
228 #ifdef TARGET_SPARC64
229
230 #define ELF_START_MMAP 0x80000000
231
232 #ifndef TARGET_ABI32
233 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
234 #else
235 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
236 #endif
237
238 #define ELF_CLASS ELFCLASS64
239 #define ELF_DATA ELFDATA2MSB
240 #define ELF_ARCH EM_SPARCV9
241
242 #define STACK_BIAS 2047
243
244 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
245 {
246 #ifndef TARGET_ABI32
247 regs->tstate = 0;
248 #endif
249 regs->pc = infop->entry;
250 regs->npc = regs->pc + 4;
251 regs->y = 0;
252 #ifdef TARGET_ABI32
253 regs->u_regs[14] = infop->start_stack - 16 * 4;
254 #else
255 if (personality(infop->personality) == PER_LINUX32)
256 regs->u_regs[14] = infop->start_stack - 16 * 4;
257 else {
258 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
259 if (bsd_type == target_freebsd) {
260 regs->u_regs[8] = infop->start_stack;
261 regs->u_regs[11] = infop->start_stack;
262 }
263 }
264 #endif
265 }
266
267 #else
268 #define ELF_START_MMAP 0x80000000
269
270 #define elf_check_arch(x) ( (x) == EM_SPARC )
271
272 #define ELF_CLASS ELFCLASS32
273 #define ELF_DATA ELFDATA2MSB
274 #define ELF_ARCH EM_SPARC
275
276 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
277 {
278 regs->psr = 0;
279 regs->pc = infop->entry;
280 regs->npc = regs->pc + 4;
281 regs->y = 0;
282 regs->u_regs[14] = infop->start_stack - 16 * 4;
283 }
284
285 #endif
286 #endif
287
288 #ifdef TARGET_PPC
289
290 #define ELF_START_MMAP 0x80000000
291
292 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
293
294 #define elf_check_arch(x) ( (x) == EM_PPC64 )
295
296 #define ELF_CLASS ELFCLASS64
297
298 #else
299
300 #define elf_check_arch(x) ( (x) == EM_PPC )
301
302 #define ELF_CLASS ELFCLASS32
303
304 #endif
305
306 #ifdef TARGET_WORDS_BIGENDIAN
307 #define ELF_DATA ELFDATA2MSB
308 #else
309 #define ELF_DATA ELFDATA2LSB
310 #endif
311 #define ELF_ARCH EM_PPC
312
313 /*
314 * We need to put in some extra aux table entries to tell glibc what
315 * the cache block size is, so it can use the dcbz instruction safely.
316 */
317 #define AT_DCACHEBSIZE 19
318 #define AT_ICACHEBSIZE 20
319 #define AT_UCACHEBSIZE 21
320 /* A special ignored type value for PPC, for glibc compatibility. */
321 #define AT_IGNOREPPC 22
322 /*
323 * The requirements here are:
324 * - keep the final alignment of sp (sp & 0xf)
325 * - make sure the 32-bit value at the first 16 byte aligned position of
326 * AUXV is greater than 16 for glibc compatibility.
327 * AT_IGNOREPPC is used for that.
328 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
329 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
330 */
331 #define DLINFO_ARCH_ITEMS 5
332 #define ARCH_DLINFO \
333 do { \
334 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
335 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
336 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
337 /* \
338 * Now handle glibc compatibility. \
339 */ \
340 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
341 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
342 } while (0)
343
344 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
345 {
346 abi_ulong pos = infop->start_stack;
347 abi_ulong tmp;
348 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
349 abi_ulong entry, toc;
350 #endif
351
352 _regs->gpr[1] = infop->start_stack;
353 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
354 entry = ldq_raw(infop->entry) + infop->load_addr;
355 toc = ldq_raw(infop->entry + 8) + infop->load_addr;
356 _regs->gpr[2] = toc;
357 infop->entry = entry;
358 #endif
359 _regs->nip = infop->entry;
360 /* Note that isn't exactly what regular kernel does
361 * but this is what the ABI wants and is needed to allow
362 * execution of PPC BSD programs.
363 */
364 /* FIXME - what to for failure of get_user()? */
365 get_user_ual(_regs->gpr[3], pos);
366 pos += sizeof(abi_ulong);
367 _regs->gpr[4] = pos;
368 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
369 tmp = ldl(pos);
370 _regs->gpr[5] = pos;
371 }
372
373 #define USE_ELF_CORE_DUMP
374 #define ELF_EXEC_PAGESIZE 4096
375
376 #endif
377
378 #ifdef TARGET_MIPS
379
380 #define ELF_START_MMAP 0x80000000
381
382 #define elf_check_arch(x) ( (x) == EM_MIPS )
383
384 #ifdef TARGET_MIPS64
385 #define ELF_CLASS ELFCLASS64
386 #else
387 #define ELF_CLASS ELFCLASS32
388 #endif
389 #ifdef TARGET_WORDS_BIGENDIAN
390 #define ELF_DATA ELFDATA2MSB
391 #else
392 #define ELF_DATA ELFDATA2LSB
393 #endif
394 #define ELF_ARCH EM_MIPS
395
396 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
397 {
398 regs->cp0_status = 2 << CP0St_KSU;
399 regs->cp0_epc = infop->entry;
400 regs->regs[29] = infop->start_stack;
401 }
402
403 #define USE_ELF_CORE_DUMP
404 #define ELF_EXEC_PAGESIZE 4096
405
406 #endif /* TARGET_MIPS */
407
408 #ifdef TARGET_SH4
409
410 #define ELF_START_MMAP 0x80000000
411
412 #define elf_check_arch(x) ( (x) == EM_SH )
413
414 #define ELF_CLASS ELFCLASS32
415 #define ELF_DATA ELFDATA2LSB
416 #define ELF_ARCH EM_SH
417
418 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
419 {
420 /* Check other registers XXXXX */
421 regs->pc = infop->entry;
422 regs->regs[15] = infop->start_stack;
423 }
424
425 #define USE_ELF_CORE_DUMP
426 #define ELF_EXEC_PAGESIZE 4096
427
428 #endif
429
430 #ifdef TARGET_CRIS
431
432 #define ELF_START_MMAP 0x80000000
433
434 #define elf_check_arch(x) ( (x) == EM_CRIS )
435
436 #define ELF_CLASS ELFCLASS32
437 #define ELF_DATA ELFDATA2LSB
438 #define ELF_ARCH EM_CRIS
439
440 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
441 {
442 regs->erp = infop->entry;
443 }
444
445 #define USE_ELF_CORE_DUMP
446 #define ELF_EXEC_PAGESIZE 8192
447
448 #endif
449
450 #ifdef TARGET_M68K
451
452 #define ELF_START_MMAP 0x80000000
453
454 #define elf_check_arch(x) ( (x) == EM_68K )
455
456 #define ELF_CLASS ELFCLASS32
457 #define ELF_DATA ELFDATA2MSB
458 #define ELF_ARCH EM_68K
459
460 /* ??? Does this need to do anything?
461 #define ELF_PLAT_INIT(_r) */
462
463 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
464 {
465 regs->usp = infop->start_stack;
466 regs->sr = 0;
467 regs->pc = infop->entry;
468 }
469
470 #define USE_ELF_CORE_DUMP
471 #define ELF_EXEC_PAGESIZE 8192
472
473 #endif
474
475 #ifdef TARGET_ALPHA
476
477 #define ELF_START_MMAP (0x30000000000ULL)
478
479 #define elf_check_arch(x) ( (x) == ELF_ARCH )
480
481 #define ELF_CLASS ELFCLASS64
482 #define ELF_DATA ELFDATA2MSB
483 #define ELF_ARCH EM_ALPHA
484
485 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
486 {
487 regs->pc = infop->entry;
488 regs->ps = 8;
489 regs->usp = infop->start_stack;
490 regs->unique = infop->start_data; /* ? */
491 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
492 regs->unique, infop->start_data);
493 }
494
495 #define USE_ELF_CORE_DUMP
496 #define ELF_EXEC_PAGESIZE 8192
497
498 #endif /* TARGET_ALPHA */
499
500 #ifndef ELF_PLATFORM
501 #define ELF_PLATFORM (NULL)
502 #endif
503
504 #ifndef ELF_HWCAP
505 #define ELF_HWCAP 0
506 #endif
507
508 #ifdef TARGET_ABI32
509 #undef ELF_CLASS
510 #define ELF_CLASS ELFCLASS32
511 #undef bswaptls
512 #define bswaptls(ptr) bswap32s(ptr)
513 #endif
514
515 #include "elf.h"
516
517 struct exec
518 {
519 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
520 unsigned int a_text; /* length of text, in bytes */
521 unsigned int a_data; /* length of data, in bytes */
522 unsigned int a_bss; /* length of uninitialized data area, in bytes */
523 unsigned int a_syms; /* length of symbol table data in file, in bytes */
524 unsigned int a_entry; /* start address */
525 unsigned int a_trsize; /* length of relocation info for text, in bytes */
526 unsigned int a_drsize; /* length of relocation info for data, in bytes */
527 };
528
529
530 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
531 #define OMAGIC 0407
532 #define NMAGIC 0410
533 #define ZMAGIC 0413
534 #define QMAGIC 0314
535
536 /* max code+data+bss space allocated to elf interpreter */
537 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
538
539 /* max code+data+bss+brk space allocated to ET_DYN executables */
540 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
541
542 /* Necessary parameters */
543 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
544 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
545 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
546
547 #define INTERPRETER_NONE 0
548 #define INTERPRETER_AOUT 1
549 #define INTERPRETER_ELF 2
550
551 #define DLINFO_ITEMS 12
552
553 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
554 {
555 memcpy(to, from, n);
556 }
557
558 static int load_aout_interp(void * exptr, int interp_fd);
559
560 #ifdef BSWAP_NEEDED
561 static void bswap_ehdr(struct elfhdr *ehdr)
562 {
563 bswap16s(&ehdr->e_type); /* Object file type */
564 bswap16s(&ehdr->e_machine); /* Architecture */
565 bswap32s(&ehdr->e_version); /* Object file version */
566 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
567 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
568 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
569 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
570 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
571 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
572 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
573 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
574 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
575 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
576 }
577
578 static void bswap_phdr(struct elf_phdr *phdr)
579 {
580 bswap32s(&phdr->p_type); /* Segment type */
581 bswaptls(&phdr->p_offset); /* Segment file offset */
582 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
583 bswaptls(&phdr->p_paddr); /* Segment physical address */
584 bswaptls(&phdr->p_filesz); /* Segment size in file */
585 bswaptls(&phdr->p_memsz); /* Segment size in memory */
586 bswap32s(&phdr->p_flags); /* Segment flags */
587 bswaptls(&phdr->p_align); /* Segment alignment */
588 }
589
590 static void bswap_shdr(struct elf_shdr *shdr)
591 {
592 bswap32s(&shdr->sh_name);
593 bswap32s(&shdr->sh_type);
594 bswaptls(&shdr->sh_flags);
595 bswaptls(&shdr->sh_addr);
596 bswaptls(&shdr->sh_offset);
597 bswaptls(&shdr->sh_size);
598 bswap32s(&shdr->sh_link);
599 bswap32s(&shdr->sh_info);
600 bswaptls(&shdr->sh_addralign);
601 bswaptls(&shdr->sh_entsize);
602 }
603
604 static void bswap_sym(struct elf_sym *sym)
605 {
606 bswap32s(&sym->st_name);
607 bswaptls(&sym->st_value);
608 bswaptls(&sym->st_size);
609 bswap16s(&sym->st_shndx);
610 }
611 #endif
612
613 /*
614 * 'copy_elf_strings()' copies argument/envelope strings from user
615 * memory to free pages in kernel mem. These are in a format ready
616 * to be put directly into the top of new user memory.
617 *
618 */
619 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
620 abi_ulong p)
621 {
622 char *tmp, *tmp1, *pag = NULL;
623 int len, offset = 0;
624
625 if (!p) {
626 return 0; /* bullet-proofing */
627 }
628 while (argc-- > 0) {
629 tmp = argv[argc];
630 if (!tmp) {
631 fprintf(stderr, "VFS: argc is wrong");
632 exit(-1);
633 }
634 tmp1 = tmp;
635 while (*tmp++);
636 len = tmp - tmp1;
637 if (p < len) { /* this shouldn't happen - 128kB */
638 return 0;
639 }
640 while (len) {
641 --p; --tmp; --len;
642 if (--offset < 0) {
643 offset = p % TARGET_PAGE_SIZE;
644 pag = (char *)page[p/TARGET_PAGE_SIZE];
645 if (!pag) {
646 pag = g_try_malloc0(TARGET_PAGE_SIZE);
647 page[p/TARGET_PAGE_SIZE] = pag;
648 if (!pag)
649 return 0;
650 }
651 }
652 if (len == 0 || offset == 0) {
653 *(pag + offset) = *tmp;
654 }
655 else {
656 int bytes_to_copy = (len > offset) ? offset : len;
657 tmp -= bytes_to_copy;
658 p -= bytes_to_copy;
659 offset -= bytes_to_copy;
660 len -= bytes_to_copy;
661 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
662 }
663 }
664 }
665 return p;
666 }
667
668 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
669 struct image_info *info)
670 {
671 abi_ulong stack_base, size, error;
672 int i;
673
674 /* Create enough stack to hold everything. If we don't use
675 * it for args, we'll use it for something else...
676 */
677 size = x86_stack_size;
678 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
679 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
680 error = target_mmap(0,
681 size + qemu_host_page_size,
682 PROT_READ | PROT_WRITE,
683 MAP_PRIVATE | MAP_ANON,
684 -1, 0);
685 if (error == -1) {
686 perror("stk mmap");
687 exit(-1);
688 }
689 /* we reserve one extra page at the top of the stack as guard */
690 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
691
692 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
693 p += stack_base;
694
695 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
696 if (bprm->page[i]) {
697 info->rss++;
698 /* FIXME - check return value of memcpy_to_target() for failure */
699 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
700 g_free(bprm->page[i]);
701 }
702 stack_base += TARGET_PAGE_SIZE;
703 }
704 return p;
705 }
706
707 static void set_brk(abi_ulong start, abi_ulong end)
708 {
709 /* page-align the start and end addresses... */
710 start = HOST_PAGE_ALIGN(start);
711 end = HOST_PAGE_ALIGN(end);
712 if (end <= start)
713 return;
714 if(target_mmap(start, end - start,
715 PROT_READ | PROT_WRITE | PROT_EXEC,
716 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
717 perror("cannot mmap brk");
718 exit(-1);
719 }
720 }
721
722
723 /* We need to explicitly zero any fractional pages after the data
724 section (i.e. bss). This would contain the junk from the file that
725 should not be in memory. */
726 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
727 {
728 abi_ulong nbyte;
729
730 if (elf_bss >= last_bss)
731 return;
732
733 /* XXX: this is really a hack : if the real host page size is
734 smaller than the target page size, some pages after the end
735 of the file may not be mapped. A better fix would be to
736 patch target_mmap(), but it is more complicated as the file
737 size must be known */
738 if (qemu_real_host_page_size < qemu_host_page_size) {
739 abi_ulong end_addr, end_addr1;
740 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
741 ~(qemu_real_host_page_size - 1);
742 end_addr = HOST_PAGE_ALIGN(elf_bss);
743 if (end_addr1 < end_addr) {
744 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
745 PROT_READ|PROT_WRITE|PROT_EXEC,
746 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
747 }
748 }
749
750 nbyte = elf_bss & (qemu_host_page_size-1);
751 if (nbyte) {
752 nbyte = qemu_host_page_size - nbyte;
753 do {
754 /* FIXME - what to do if put_user() fails? */
755 put_user_u8(0, elf_bss);
756 elf_bss++;
757 } while (--nbyte);
758 }
759 }
760
761
762 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
763 struct elfhdr * exec,
764 abi_ulong load_addr,
765 abi_ulong load_bias,
766 abi_ulong interp_load_addr, int ibcs,
767 struct image_info *info)
768 {
769 abi_ulong sp;
770 int size;
771 abi_ulong u_platform;
772 const char *k_platform;
773 const int n = sizeof(elf_addr_t);
774
775 sp = p;
776 u_platform = 0;
777 k_platform = ELF_PLATFORM;
778 if (k_platform) {
779 size_t len = strlen(k_platform) + 1;
780 sp -= (len + n - 1) & ~(n - 1);
781 u_platform = sp;
782 /* FIXME - check return value of memcpy_to_target() for failure */
783 memcpy_to_target(sp, k_platform, len);
784 }
785 /*
786 * Force 16 byte _final_ alignment here for generality.
787 */
788 sp = sp &~ (abi_ulong)15;
789 size = (DLINFO_ITEMS + 1) * 2;
790 if (k_platform)
791 size += 2;
792 #ifdef DLINFO_ARCH_ITEMS
793 size += DLINFO_ARCH_ITEMS * 2;
794 #endif
795 size += envc + argc + 2;
796 size += (!ibcs ? 3 : 1); /* argc itself */
797 size *= n;
798 if (size & 15)
799 sp -= 16 - (size & 15);
800
801 /* This is correct because Linux defines
802 * elf_addr_t as Elf32_Off / Elf64_Off
803 */
804 #define NEW_AUX_ENT(id, val) do { \
805 sp -= n; put_user_ual(val, sp); \
806 sp -= n; put_user_ual(id, sp); \
807 } while(0)
808
809 NEW_AUX_ENT (AT_NULL, 0);
810
811 /* There must be exactly DLINFO_ITEMS entries here. */
812 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
813 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
814 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
815 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
816 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
817 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
818 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
819 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
820 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
821 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
822 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
823 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
824 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
825 if (k_platform)
826 NEW_AUX_ENT(AT_PLATFORM, u_platform);
827 #ifdef ARCH_DLINFO
828 /*
829 * ARCH_DLINFO must come last so platform specific code can enforce
830 * special alignment requirements on the AUXV if necessary (eg. PPC).
831 */
832 ARCH_DLINFO;
833 #endif
834 #undef NEW_AUX_ENT
835
836 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
837 return sp;
838 }
839
840
841 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
842 int interpreter_fd,
843 abi_ulong *interp_load_addr)
844 {
845 struct elf_phdr *elf_phdata = NULL;
846 struct elf_phdr *eppnt;
847 abi_ulong load_addr = 0;
848 int load_addr_set = 0;
849 int retval;
850 abi_ulong last_bss, elf_bss;
851 abi_ulong error;
852 int i;
853
854 elf_bss = 0;
855 last_bss = 0;
856 error = 0;
857
858 #ifdef BSWAP_NEEDED
859 bswap_ehdr(interp_elf_ex);
860 #endif
861 /* First of all, some simple consistency checks */
862 if ((interp_elf_ex->e_type != ET_EXEC &&
863 interp_elf_ex->e_type != ET_DYN) ||
864 !elf_check_arch(interp_elf_ex->e_machine)) {
865 return ~((abi_ulong)0UL);
866 }
867
868
869 /* Now read in all of the header information */
870
871 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
872 return ~(abi_ulong)0UL;
873
874 elf_phdata = (struct elf_phdr *)
875 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
876
877 if (!elf_phdata)
878 return ~((abi_ulong)0UL);
879
880 /*
881 * If the size of this structure has changed, then punt, since
882 * we will be doing the wrong thing.
883 */
884 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
885 free(elf_phdata);
886 return ~((abi_ulong)0UL);
887 }
888
889 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
890 if(retval >= 0) {
891 retval = read(interpreter_fd,
892 (char *) elf_phdata,
893 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
894 }
895 if (retval < 0) {
896 perror("load_elf_interp");
897 exit(-1);
898 free (elf_phdata);
899 return retval;
900 }
901 #ifdef BSWAP_NEEDED
902 eppnt = elf_phdata;
903 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
904 bswap_phdr(eppnt);
905 }
906 #endif
907
908 if (interp_elf_ex->e_type == ET_DYN) {
909 /* in order to avoid hardcoding the interpreter load
910 address in qemu, we allocate a big enough memory zone */
911 error = target_mmap(0, INTERP_MAP_SIZE,
912 PROT_NONE, MAP_PRIVATE | MAP_ANON,
913 -1, 0);
914 if (error == -1) {
915 perror("mmap");
916 exit(-1);
917 }
918 load_addr = error;
919 load_addr_set = 1;
920 }
921
922 eppnt = elf_phdata;
923 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
924 if (eppnt->p_type == PT_LOAD) {
925 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
926 int elf_prot = 0;
927 abi_ulong vaddr = 0;
928 abi_ulong k;
929
930 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
931 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
932 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
933 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
934 elf_type |= MAP_FIXED;
935 vaddr = eppnt->p_vaddr;
936 }
937 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
938 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
939 elf_prot,
940 elf_type,
941 interpreter_fd,
942 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
943
944 if (error == -1) {
945 /* Real error */
946 close(interpreter_fd);
947 free(elf_phdata);
948 return ~((abi_ulong)0UL);
949 }
950
951 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
952 load_addr = error;
953 load_addr_set = 1;
954 }
955
956 /*
957 * Find the end of the file mapping for this phdr, and keep
958 * track of the largest address we see for this.
959 */
960 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
961 if (k > elf_bss) elf_bss = k;
962
963 /*
964 * Do the same thing for the memory mapping - between
965 * elf_bss and last_bss is the bss section.
966 */
967 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
968 if (k > last_bss) last_bss = k;
969 }
970
971 /* Now use mmap to map the library into memory. */
972
973 close(interpreter_fd);
974
975 /*
976 * Now fill out the bss section. First pad the last page up
977 * to the page boundary, and then perform a mmap to make sure
978 * that there are zeromapped pages up to and including the last
979 * bss page.
980 */
981 padzero(elf_bss, last_bss);
982 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
983
984 /* Map the last of the bss segment */
985 if (last_bss > elf_bss) {
986 target_mmap(elf_bss, last_bss-elf_bss,
987 PROT_READ|PROT_WRITE|PROT_EXEC,
988 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
989 }
990 free(elf_phdata);
991
992 *interp_load_addr = load_addr;
993 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
994 }
995
996 static int symfind(const void *s0, const void *s1)
997 {
998 target_ulong addr = *(target_ulong *)s0;
999 struct elf_sym *sym = (struct elf_sym *)s1;
1000 int result = 0;
1001 if (addr < sym->st_value) {
1002 result = -1;
1003 } else if (addr >= sym->st_value + sym->st_size) {
1004 result = 1;
1005 }
1006 return result;
1007 }
1008
1009 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1010 {
1011 #if ELF_CLASS == ELFCLASS32
1012 struct elf_sym *syms = s->disas_symtab.elf32;
1013 #else
1014 struct elf_sym *syms = s->disas_symtab.elf64;
1015 #endif
1016
1017 // binary search
1018 struct elf_sym *sym;
1019
1020 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1021 if (sym != NULL) {
1022 return s->disas_strtab + sym->st_name;
1023 }
1024
1025 return "";
1026 }
1027
1028 /* FIXME: This should use elf_ops.h */
1029 static int symcmp(const void *s0, const void *s1)
1030 {
1031 struct elf_sym *sym0 = (struct elf_sym *)s0;
1032 struct elf_sym *sym1 = (struct elf_sym *)s1;
1033 return (sym0->st_value < sym1->st_value)
1034 ? -1
1035 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1036 }
1037
1038 /* Best attempt to load symbols from this ELF object. */
1039 static void load_symbols(struct elfhdr *hdr, int fd)
1040 {
1041 unsigned int i, nsyms;
1042 struct elf_shdr sechdr, symtab, strtab;
1043 char *strings;
1044 struct syminfo *s;
1045 struct elf_sym *syms, *new_syms;
1046
1047 lseek(fd, hdr->e_shoff, SEEK_SET);
1048 for (i = 0; i < hdr->e_shnum; i++) {
1049 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1050 return;
1051 #ifdef BSWAP_NEEDED
1052 bswap_shdr(&sechdr);
1053 #endif
1054 if (sechdr.sh_type == SHT_SYMTAB) {
1055 symtab = sechdr;
1056 lseek(fd, hdr->e_shoff
1057 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1058 if (read(fd, &strtab, sizeof(strtab))
1059 != sizeof(strtab))
1060 return;
1061 #ifdef BSWAP_NEEDED
1062 bswap_shdr(&strtab);
1063 #endif
1064 goto found;
1065 }
1066 }
1067 return; /* Shouldn't happen... */
1068
1069 found:
1070 /* Now know where the strtab and symtab are. Snarf them. */
1071 s = malloc(sizeof(*s));
1072 syms = malloc(symtab.sh_size);
1073 if (!syms) {
1074 free(s);
1075 return;
1076 }
1077 s->disas_strtab = strings = malloc(strtab.sh_size);
1078 if (!s->disas_strtab) {
1079 free(s);
1080 free(syms);
1081 return;
1082 }
1083
1084 lseek(fd, symtab.sh_offset, SEEK_SET);
1085 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) {
1086 free(s);
1087 free(syms);
1088 free(strings);
1089 return;
1090 }
1091
1092 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1093
1094 i = 0;
1095 while (i < nsyms) {
1096 #ifdef BSWAP_NEEDED
1097 bswap_sym(syms + i);
1098 #endif
1099 // Throw away entries which we do not need.
1100 if (syms[i].st_shndx == SHN_UNDEF ||
1101 syms[i].st_shndx >= SHN_LORESERVE ||
1102 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1103 nsyms--;
1104 if (i < nsyms) {
1105 syms[i] = syms[nsyms];
1106 }
1107 continue;
1108 }
1109 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1110 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1111 syms[i].st_value &= ~(target_ulong)1;
1112 #endif
1113 i++;
1114 }
1115
1116 /* Attempt to free the storage associated with the local symbols
1117 that we threw away. Whether or not this has any effect on the
1118 memory allocation depends on the malloc implementation and how
1119 many symbols we managed to discard. */
1120 new_syms = realloc(syms, nsyms * sizeof(*syms));
1121 if (new_syms == NULL) {
1122 free(s);
1123 free(syms);
1124 free(strings);
1125 return;
1126 }
1127 syms = new_syms;
1128
1129 qsort(syms, nsyms, sizeof(*syms), symcmp);
1130
1131 lseek(fd, strtab.sh_offset, SEEK_SET);
1132 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) {
1133 free(s);
1134 free(syms);
1135 free(strings);
1136 return;
1137 }
1138 s->disas_num_syms = nsyms;
1139 #if ELF_CLASS == ELFCLASS32
1140 s->disas_symtab.elf32 = syms;
1141 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1142 #else
1143 s->disas_symtab.elf64 = syms;
1144 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1145 #endif
1146 s->next = syminfos;
1147 syminfos = s;
1148 }
1149
1150 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1151 struct image_info * info)
1152 {
1153 struct elfhdr elf_ex;
1154 struct elfhdr interp_elf_ex;
1155 struct exec interp_ex;
1156 int interpreter_fd = -1; /* avoid warning */
1157 abi_ulong load_addr, load_bias;
1158 int load_addr_set = 0;
1159 unsigned int interpreter_type = INTERPRETER_NONE;
1160 unsigned char ibcs2_interpreter;
1161 int i;
1162 abi_ulong mapped_addr;
1163 struct elf_phdr * elf_ppnt;
1164 struct elf_phdr *elf_phdata;
1165 abi_ulong elf_bss, k, elf_brk;
1166 int retval;
1167 char * elf_interpreter;
1168 abi_ulong elf_entry, interp_load_addr = 0;
1169 int status;
1170 abi_ulong start_code, end_code, start_data, end_data;
1171 abi_ulong reloc_func_desc = 0;
1172 abi_ulong elf_stack;
1173 char passed_fileno[6];
1174
1175 ibcs2_interpreter = 0;
1176 status = 0;
1177 load_addr = 0;
1178 load_bias = 0;
1179 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1180 #ifdef BSWAP_NEEDED
1181 bswap_ehdr(&elf_ex);
1182 #endif
1183
1184 /* First of all, some simple consistency checks */
1185 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1186 (! elf_check_arch(elf_ex.e_machine))) {
1187 return -ENOEXEC;
1188 }
1189
1190 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1191 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1192 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1193 if (!bprm->p) {
1194 retval = -E2BIG;
1195 }
1196
1197 /* Now read in all of the header information */
1198 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1199 if (elf_phdata == NULL) {
1200 return -ENOMEM;
1201 }
1202
1203 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1204 if(retval > 0) {
1205 retval = read(bprm->fd, (char *) elf_phdata,
1206 elf_ex.e_phentsize * elf_ex.e_phnum);
1207 }
1208
1209 if (retval < 0) {
1210 perror("load_elf_binary");
1211 exit(-1);
1212 free (elf_phdata);
1213 return -errno;
1214 }
1215
1216 #ifdef BSWAP_NEEDED
1217 elf_ppnt = elf_phdata;
1218 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1219 bswap_phdr(elf_ppnt);
1220 }
1221 #endif
1222 elf_ppnt = elf_phdata;
1223
1224 elf_bss = 0;
1225 elf_brk = 0;
1226
1227
1228 elf_stack = ~((abi_ulong)0UL);
1229 elf_interpreter = NULL;
1230 start_code = ~((abi_ulong)0UL);
1231 end_code = 0;
1232 start_data = 0;
1233 end_data = 0;
1234 interp_ex.a_info = 0;
1235
1236 for(i=0;i < elf_ex.e_phnum; i++) {
1237 if (elf_ppnt->p_type == PT_INTERP) {
1238 if ( elf_interpreter != NULL )
1239 {
1240 free (elf_phdata);
1241 free(elf_interpreter);
1242 close(bprm->fd);
1243 return -EINVAL;
1244 }
1245
1246 /* This is the program interpreter used for
1247 * shared libraries - for now assume that this
1248 * is an a.out format binary
1249 */
1250
1251 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1252
1253 if (elf_interpreter == NULL) {
1254 free (elf_phdata);
1255 close(bprm->fd);
1256 return -ENOMEM;
1257 }
1258
1259 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1260 if(retval >= 0) {
1261 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1262 }
1263 if(retval < 0) {
1264 perror("load_elf_binary2");
1265 exit(-1);
1266 }
1267
1268 /* If the program interpreter is one of these two,
1269 then assume an iBCS2 image. Otherwise assume
1270 a native linux image. */
1271
1272 /* JRP - Need to add X86 lib dir stuff here... */
1273
1274 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1275 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1276 ibcs2_interpreter = 1;
1277 }
1278
1279 #if 0
1280 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1281 #endif
1282 if (retval >= 0) {
1283 retval = open(path(elf_interpreter), O_RDONLY);
1284 if(retval >= 0) {
1285 interpreter_fd = retval;
1286 }
1287 else {
1288 perror(elf_interpreter);
1289 exit(-1);
1290 /* retval = -errno; */
1291 }
1292 }
1293
1294 if (retval >= 0) {
1295 retval = lseek(interpreter_fd, 0, SEEK_SET);
1296 if(retval >= 0) {
1297 retval = read(interpreter_fd,bprm->buf,128);
1298 }
1299 }
1300 if (retval >= 0) {
1301 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1302 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1303 }
1304 if (retval < 0) {
1305 perror("load_elf_binary3");
1306 exit(-1);
1307 free (elf_phdata);
1308 free(elf_interpreter);
1309 close(bprm->fd);
1310 return retval;
1311 }
1312 }
1313 elf_ppnt++;
1314 }
1315
1316 /* Some simple consistency checks for the interpreter */
1317 if (elf_interpreter){
1318 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1319
1320 /* Now figure out which format our binary is */
1321 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1322 (N_MAGIC(interp_ex) != QMAGIC)) {
1323 interpreter_type = INTERPRETER_ELF;
1324 }
1325
1326 if (interp_elf_ex.e_ident[0] != 0x7f ||
1327 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1328 interpreter_type &= ~INTERPRETER_ELF;
1329 }
1330
1331 if (!interpreter_type) {
1332 free(elf_interpreter);
1333 free(elf_phdata);
1334 close(bprm->fd);
1335 return -ELIBBAD;
1336 }
1337 }
1338
1339 /* OK, we are done with that, now set up the arg stuff,
1340 and then start this sucker up */
1341
1342 {
1343 char * passed_p;
1344
1345 if (interpreter_type == INTERPRETER_AOUT) {
1346 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1347 passed_p = passed_fileno;
1348
1349 if (elf_interpreter) {
1350 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1351 bprm->argc++;
1352 }
1353 }
1354 if (!bprm->p) {
1355 if (elf_interpreter) {
1356 free(elf_interpreter);
1357 }
1358 free (elf_phdata);
1359 close(bprm->fd);
1360 return -E2BIG;
1361 }
1362 }
1363
1364 /* OK, This is the point of no return */
1365 info->end_data = 0;
1366 info->end_code = 0;
1367 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1368 info->mmap = 0;
1369 elf_entry = (abi_ulong) elf_ex.e_entry;
1370
1371 #if defined(CONFIG_USE_GUEST_BASE)
1372 /*
1373 * In case where user has not explicitly set the guest_base, we
1374 * probe here that should we set it automatically.
1375 */
1376 if (!have_guest_base) {
1377 /*
1378 * Go through ELF program header table and find out whether
1379 * any of the segments drop below our current mmap_min_addr and
1380 * in that case set guest_base to corresponding address.
1381 */
1382 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1383 i++, elf_ppnt++) {
1384 if (elf_ppnt->p_type != PT_LOAD)
1385 continue;
1386 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1387 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1388 break;
1389 }
1390 }
1391 }
1392 #endif /* CONFIG_USE_GUEST_BASE */
1393
1394 /* Do this so that we can load the interpreter, if need be. We will
1395 change some of these later */
1396 info->rss = 0;
1397 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1398 info->start_stack = bprm->p;
1399
1400 /* Now we do a little grungy work by mmaping the ELF image into
1401 * the correct location in memory. At this point, we assume that
1402 * the image should be loaded at fixed address, not at a variable
1403 * address.
1404 */
1405
1406 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1407 int elf_prot = 0;
1408 int elf_flags = 0;
1409 abi_ulong error;
1410
1411 if (elf_ppnt->p_type != PT_LOAD)
1412 continue;
1413
1414 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1415 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1416 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1417 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1418 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1419 elf_flags |= MAP_FIXED;
1420 } else if (elf_ex.e_type == ET_DYN) {
1421 /* Try and get dynamic programs out of the way of the default mmap
1422 base, as well as whatever program they might try to exec. This
1423 is because the brk will follow the loader, and is not movable. */
1424 /* NOTE: for qemu, we do a big mmap to get enough space
1425 without hardcoding any address */
1426 error = target_mmap(0, ET_DYN_MAP_SIZE,
1427 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1428 -1, 0);
1429 if (error == -1) {
1430 perror("mmap");
1431 exit(-1);
1432 }
1433 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1434 }
1435
1436 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1437 (elf_ppnt->p_filesz +
1438 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1439 elf_prot,
1440 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1441 bprm->fd,
1442 (elf_ppnt->p_offset -
1443 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1444 if (error == -1) {
1445 perror("mmap");
1446 exit(-1);
1447 }
1448
1449 #ifdef LOW_ELF_STACK
1450 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1451 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1452 #endif
1453
1454 if (!load_addr_set) {
1455 load_addr_set = 1;
1456 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1457 if (elf_ex.e_type == ET_DYN) {
1458 load_bias += error -
1459 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1460 load_addr += load_bias;
1461 reloc_func_desc = load_bias;
1462 }
1463 }
1464 k = elf_ppnt->p_vaddr;
1465 if (k < start_code)
1466 start_code = k;
1467 if (start_data < k)
1468 start_data = k;
1469 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1470 if (k > elf_bss)
1471 elf_bss = k;
1472 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1473 end_code = k;
1474 if (end_data < k)
1475 end_data = k;
1476 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1477 if (k > elf_brk) elf_brk = k;
1478 }
1479
1480 elf_entry += load_bias;
1481 elf_bss += load_bias;
1482 elf_brk += load_bias;
1483 start_code += load_bias;
1484 end_code += load_bias;
1485 start_data += load_bias;
1486 end_data += load_bias;
1487
1488 if (elf_interpreter) {
1489 if (interpreter_type & 1) {
1490 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1491 }
1492 else if (interpreter_type & 2) {
1493 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1494 &interp_load_addr);
1495 }
1496 reloc_func_desc = interp_load_addr;
1497
1498 close(interpreter_fd);
1499 free(elf_interpreter);
1500
1501 if (elf_entry == ~((abi_ulong)0UL)) {
1502 printf("Unable to load interpreter\n");
1503 free(elf_phdata);
1504 exit(-1);
1505 return 0;
1506 }
1507 }
1508
1509 free(elf_phdata);
1510
1511 if (qemu_log_enabled())
1512 load_symbols(&elf_ex, bprm->fd);
1513
1514 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1515 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1516
1517 #ifdef LOW_ELF_STACK
1518 info->start_stack = bprm->p = elf_stack - 4;
1519 #endif
1520 bprm->p = create_elf_tables(bprm->p,
1521 bprm->argc,
1522 bprm->envc,
1523 &elf_ex,
1524 load_addr, load_bias,
1525 interp_load_addr,
1526 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1527 info);
1528 info->load_addr = reloc_func_desc;
1529 info->start_brk = info->brk = elf_brk;
1530 info->end_code = end_code;
1531 info->start_code = start_code;
1532 info->start_data = start_data;
1533 info->end_data = end_data;
1534 info->start_stack = bprm->p;
1535
1536 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1537 sections */
1538 set_brk(elf_bss, elf_brk);
1539
1540 padzero(elf_bss, elf_brk);
1541
1542 #if 0
1543 printf("(start_brk) %x\n" , info->start_brk);
1544 printf("(end_code) %x\n" , info->end_code);
1545 printf("(start_code) %x\n" , info->start_code);
1546 printf("(end_data) %x\n" , info->end_data);
1547 printf("(start_stack) %x\n" , info->start_stack);
1548 printf("(brk) %x\n" , info->brk);
1549 #endif
1550
1551 if ( info->personality == PER_SVR4 )
1552 {
1553 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1554 and some applications "depend" upon this behavior.
1555 Since we do not have the power to recompile these, we
1556 emulate the SVr4 behavior. Sigh. */
1557 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1558 MAP_FIXED | MAP_PRIVATE, -1, 0);
1559 }
1560
1561 info->entry = elf_entry;
1562
1563 return 0;
1564 }
1565
1566 static int load_aout_interp(void * exptr, int interp_fd)
1567 {
1568 printf("a.out interpreter not yet supported\n");
1569 return(0);
1570 }
1571
1572 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1573 {
1574 init_thread(regs, infop);
1575 }