4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 #include "target_arch_elf.h"
28 /* this flag is uneffective under linux too, should be deleted */
30 #define MAP_DENYWRITE 0
33 /* should probably go in elf.h */
39 #define ELF_PLATFORM (NULL)
48 #define ELF_CLASS ELFCLASS32
50 #define bswaptls(ptr) bswap32s(ptr)
57 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
58 unsigned int a_text
; /* length of text, in bytes */
59 unsigned int a_data
; /* length of data, in bytes */
60 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
61 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
62 unsigned int a_entry
; /* start address */
63 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
64 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
68 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
74 /* max code+data+bss space allocated to elf interpreter */
75 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
77 /* max code+data+bss+brk space allocated to ET_DYN executables */
78 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
80 /* Necessary parameters */
81 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
82 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1))
83 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1))
85 #define INTERPRETER_NONE 0
86 #define INTERPRETER_AOUT 1
87 #define INTERPRETER_ELF 2
89 #define DLINFO_ITEMS 12
91 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
96 static int load_aout_interp(void *exptr
, int interp_fd
);
99 static void bswap_ehdr(struct elfhdr
*ehdr
)
101 bswap16s(&ehdr
->e_type
); /* Object file type */
102 bswap16s(&ehdr
->e_machine
); /* Architecture */
103 bswap32s(&ehdr
->e_version
); /* Object file version */
104 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
105 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
106 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
107 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
108 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
109 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
110 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
111 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
112 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
113 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
116 static void bswap_phdr(struct elf_phdr
*phdr
)
118 bswap32s(&phdr
->p_type
); /* Segment type */
119 bswaptls(&phdr
->p_offset
); /* Segment file offset */
120 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
121 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
122 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
123 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
124 bswap32s(&phdr
->p_flags
); /* Segment flags */
125 bswaptls(&phdr
->p_align
); /* Segment alignment */
128 static void bswap_shdr(struct elf_shdr
*shdr
)
130 bswap32s(&shdr
->sh_name
);
131 bswap32s(&shdr
->sh_type
);
132 bswaptls(&shdr
->sh_flags
);
133 bswaptls(&shdr
->sh_addr
);
134 bswaptls(&shdr
->sh_offset
);
135 bswaptls(&shdr
->sh_size
);
136 bswap32s(&shdr
->sh_link
);
137 bswap32s(&shdr
->sh_info
);
138 bswaptls(&shdr
->sh_addralign
);
139 bswaptls(&shdr
->sh_entsize
);
142 static void bswap_sym(struct elf_sym
*sym
)
144 bswap32s(&sym
->st_name
);
145 bswaptls(&sym
->st_value
);
146 bswaptls(&sym
->st_size
);
147 bswap16s(&sym
->st_shndx
);
152 * 'copy_elf_strings()' copies argument/envelope strings from user
153 * memory to free pages in kernel mem. These are in a format ready
154 * to be put directly into the top of new user memory.
157 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
160 char *tmp
, *tmp1
, *pag
= NULL
;
164 return 0; /* bullet-proofing */
169 fprintf(stderr
, "VFS: argc is wrong");
175 if (p
< len
) { /* this shouldn't happen - 128kB */
181 offset
= p
% TARGET_PAGE_SIZE
;
182 pag
= (char *)page
[p
/ TARGET_PAGE_SIZE
];
184 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
185 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
190 if (len
== 0 || offset
== 0) {
191 *(pag
+ offset
) = *tmp
;
194 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
195 tmp
-= bytes_to_copy
;
197 offset
-= bytes_to_copy
;
198 len
-= bytes_to_copy
;
199 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
206 static abi_ulong
setup_arg_pages(abi_ulong p
, struct bsd_binprm
*bprm
,
207 struct image_info
*info
)
209 abi_ulong stack_base
, size
, error
;
212 /* Create enough stack to hold everything. If we don't use
213 * it for args, we'll use it for something else...
215 size
= x86_stack_size
;
216 if (size
< MAX_ARG_PAGES
* TARGET_PAGE_SIZE
)
217 size
= MAX_ARG_PAGES
* TARGET_PAGE_SIZE
;
218 error
= target_mmap(0,
219 size
+ qemu_host_page_size
,
220 PROT_READ
| PROT_WRITE
,
221 MAP_PRIVATE
| MAP_ANON
,
227 /* we reserve one extra page at the top of the stack as guard */
228 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
230 stack_base
= error
+ size
- MAX_ARG_PAGES
* TARGET_PAGE_SIZE
;
233 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
236 /* FIXME - check return value of memcpy_to_target() for failure */
237 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
238 g_free(bprm
->page
[i
]);
240 stack_base
+= TARGET_PAGE_SIZE
;
245 static void set_brk(abi_ulong start
, abi_ulong end
)
247 /* page-align the start and end addresses... */
248 start
= HOST_PAGE_ALIGN(start
);
249 end
= HOST_PAGE_ALIGN(end
);
252 if (target_mmap(start
, end
- start
,
253 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
254 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
255 perror("cannot mmap brk");
261 /* We need to explicitly zero any fractional pages after the data
262 section (i.e. bss). This would contain the junk from the file that
263 should not be in memory. */
264 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
268 if (elf_bss
>= last_bss
)
271 /* XXX: this is really a hack : if the real host page size is
272 smaller than the target page size, some pages after the end
273 of the file may not be mapped. A better fix would be to
274 patch target_mmap(), but it is more complicated as the file
275 size must be known */
276 if (qemu_real_host_page_size
< qemu_host_page_size
) {
277 abi_ulong end_addr
, end_addr1
;
278 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
279 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
280 if (end_addr1
< end_addr
) {
281 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
282 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
283 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
287 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
289 nbyte
= qemu_host_page_size
- nbyte
;
291 /* FIXME - what to do if put_user() fails? */
292 put_user_u8(0, elf_bss
);
299 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
300 struct elfhdr
* exec
,
303 abi_ulong interp_load_addr
, int ibcs
,
304 struct image_info
*info
)
308 abi_ulong u_platform
;
309 const char *k_platform
;
310 const int n
= sizeof(elf_addr_t
);
314 k_platform
= ELF_PLATFORM
;
316 size_t len
= strlen(k_platform
) + 1;
317 sp
-= (len
+ n
- 1) & ~(n
- 1);
319 /* FIXME - check return value of memcpy_to_target() for failure */
320 memcpy_to_target(sp
, k_platform
, len
);
323 * Force 16 byte _final_ alignment here for generality.
325 sp
= sp
& ~(abi_ulong
)15;
326 size
= (DLINFO_ITEMS
+ 1) * 2;
329 #ifdef DLINFO_ARCH_ITEMS
330 size
+= DLINFO_ARCH_ITEMS
* 2;
332 size
+= envc
+ argc
+ 2;
333 size
+= (!ibcs
? 3 : 1); /* argc itself */
336 sp
-= 16 - (size
& 15);
338 /* This is correct because Linux defines
339 * elf_addr_t as Elf32_Off / Elf64_Off
341 #define NEW_AUX_ENT(id, val) do { \
342 sp -= n; put_user_ual(val, sp); \
343 sp -= n; put_user_ual(id, sp); \
346 NEW_AUX_ENT(AT_NULL
, 0);
348 /* There must be exactly DLINFO_ITEMS entries here. */
349 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
350 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof(struct elf_phdr
)));
351 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
352 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
353 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
354 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
355 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
356 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
357 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
358 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
359 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
360 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
361 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
363 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
366 * ARCH_DLINFO must come last so platform specific code can enforce
367 * special alignment requirements on the AUXV if necessary (eg. PPC).
373 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
378 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
380 abi_ulong
*interp_load_addr
)
382 struct elf_phdr
*elf_phdata
= NULL
;
383 struct elf_phdr
*eppnt
;
384 abi_ulong load_addr
= 0;
385 int load_addr_set
= 0;
387 abi_ulong last_bss
, elf_bss
;
396 bswap_ehdr(interp_elf_ex
);
398 /* First of all, some simple consistency checks */
399 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
400 interp_elf_ex
->e_type
!= ET_DYN
) ||
401 !elf_check_arch(interp_elf_ex
->e_machine
)) {
402 return ~((abi_ulong
)0UL);
406 /* Now read in all of the header information */
408 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
409 return ~(abi_ulong
)0UL;
411 elf_phdata
= (struct elf_phdr
*)
412 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
415 return ~((abi_ulong
)0UL);
418 * If the size of this structure has changed, then punt, since
419 * we will be doing the wrong thing.
421 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
423 return ~((abi_ulong
)0UL);
426 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
428 retval
= read(interpreter_fd
,
430 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
433 perror("load_elf_interp");
440 for (i
= 0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
445 if (interp_elf_ex
->e_type
== ET_DYN
) {
446 /* in order to avoid hardcoding the interpreter load
447 address in qemu, we allocate a big enough memory zone */
448 error
= target_mmap(0, INTERP_MAP_SIZE
,
449 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
460 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++)
461 if (eppnt
->p_type
== PT_LOAD
) {
462 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
467 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
468 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
469 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
470 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
471 elf_type
|= MAP_FIXED
;
472 vaddr
= eppnt
->p_vaddr
;
474 error
= target_mmap(load_addr
+ TARGET_ELF_PAGESTART(vaddr
),
475 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
479 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
483 close(interpreter_fd
);
485 return ~((abi_ulong
)0UL);
488 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
494 * Find the end of the file mapping for this phdr, and keep
495 * track of the largest address we see for this.
497 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
498 if (k
> elf_bss
) elf_bss
= k
;
501 * Do the same thing for the memory mapping - between
502 * elf_bss and last_bss is the bss section.
504 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
505 if (k
> last_bss
) last_bss
= k
;
508 /* Now use mmap to map the library into memory. */
510 close(interpreter_fd
);
513 * Now fill out the bss section. First pad the last page up
514 * to the page boundary, and then perform a mmap to make sure
515 * that there are zeromapped pages up to and including the last
518 padzero(elf_bss
, last_bss
);
519 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
521 /* Map the last of the bss segment */
522 if (last_bss
> elf_bss
) {
523 target_mmap(elf_bss
, last_bss
- elf_bss
,
524 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
525 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
529 *interp_load_addr
= load_addr
;
530 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
533 static int symfind(const void *s0
, const void *s1
)
535 target_ulong addr
= *(target_ulong
*)s0
;
536 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
538 if (addr
< sym
->st_value
) {
540 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
546 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
548 #if ELF_CLASS == ELFCLASS32
549 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
551 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
557 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
559 return s
->disas_strtab
+ sym
->st_name
;
565 /* FIXME: This should use elf_ops.h */
566 static int symcmp(const void *s0
, const void *s1
)
568 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
569 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
570 return (sym0
->st_value
< sym1
->st_value
)
572 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
575 /* Best attempt to load symbols from this ELF object. */
576 static void load_symbols(struct elfhdr
*hdr
, int fd
)
578 unsigned int i
, nsyms
;
579 struct elf_shdr sechdr
, symtab
, strtab
;
582 struct elf_sym
*syms
, *new_syms
;
584 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
585 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
586 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
591 if (sechdr
.sh_type
== SHT_SYMTAB
) {
593 lseek(fd
, hdr
->e_shoff
594 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
595 if (read(fd
, &strtab
, sizeof(strtab
))
604 return; /* Shouldn't happen... */
607 /* Now know where the strtab and symtab are. Snarf them. */
608 s
= malloc(sizeof(*s
));
609 syms
= malloc(symtab
.sh_size
);
614 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
615 if (!s
->disas_strtab
) {
621 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
622 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
629 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
636 // Throw away entries which we do not need.
637 if (syms
[i
].st_shndx
== SHN_UNDEF
||
638 syms
[i
].st_shndx
>= SHN_LORESERVE
||
639 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
642 syms
[i
] = syms
[nsyms
];
649 /* Attempt to free the storage associated with the local symbols
650 that we threw away. Whether or not this has any effect on the
651 memory allocation depends on the malloc implementation and how
652 many symbols we managed to discard. */
653 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
654 if (new_syms
== NULL
) {
662 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
664 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
665 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
671 s
->disas_num_syms
= nsyms
;
672 #if ELF_CLASS == ELFCLASS32
673 s
->disas_symtab
.elf32
= syms
;
674 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
676 s
->disas_symtab
.elf64
= syms
;
677 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
683 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
684 struct image_info
*info
)
686 struct elfhdr elf_ex
;
687 struct elfhdr interp_elf_ex
;
688 struct exec interp_ex
;
689 int interpreter_fd
= -1; /* avoid warning */
690 abi_ulong load_addr
, load_bias
;
691 int load_addr_set
= 0;
692 unsigned int interpreter_type
= INTERPRETER_NONE
;
694 struct elf_phdr
* elf_ppnt
;
695 struct elf_phdr
*elf_phdata
;
696 abi_ulong elf_bss
, k
, elf_brk
;
698 char * elf_interpreter
;
699 abi_ulong elf_entry
, interp_load_addr
= 0;
700 abi_ulong start_code
, end_code
, start_data
, end_data
;
701 abi_ulong reloc_func_desc
= 0;
703 abi_ulong elf_stack
= ~((abi_ulong
)0UL);
705 char passed_fileno
[6];
709 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
714 /* First of all, some simple consistency checks */
715 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
716 (!elf_check_arch(elf_ex
.e_machine
))) {
720 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
721 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
,bprm
->p
);
722 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
,bprm
->p
);
727 /* Now read in all of the header information */
728 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
729 if (elf_phdata
== NULL
) {
733 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
735 retval
= read(bprm
->fd
, (char *)elf_phdata
,
736 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
740 perror("load_elf_binary");
747 elf_ppnt
= elf_phdata
;
748 for (i
= 0; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
749 bswap_phdr(elf_ppnt
);
752 elf_ppnt
= elf_phdata
;
758 elf_interpreter
= NULL
;
759 start_code
= ~((abi_ulong
)0UL);
763 interp_ex
.a_info
= 0;
765 for (i
= 0;i
< elf_ex
.e_phnum
; i
++) {
766 if (elf_ppnt
->p_type
== PT_INTERP
) {
767 if (elf_interpreter
!= NULL
)
770 free(elf_interpreter
);
775 /* This is the program interpreter used for
776 * shared libraries - for now assume that this
777 * is an a.out format binary
780 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
782 if (elf_interpreter
== NULL
) {
788 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
790 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
793 perror("load_elf_binary2");
798 retval
= open(path(elf_interpreter
), O_RDONLY
);
800 interpreter_fd
= retval
;
803 perror(elf_interpreter
);
805 /* retval = -errno; */
810 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
812 retval
= read(interpreter_fd
, bprm
->buf
, 128);
816 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
817 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
820 perror("load_elf_binary3");
823 free(elf_interpreter
);
831 /* Some simple consistency checks for the interpreter */
832 if (elf_interpreter
) {
833 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
835 /* Now figure out which format our binary is */
836 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
837 (N_MAGIC(interp_ex
) != QMAGIC
)) {
838 interpreter_type
= INTERPRETER_ELF
;
841 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
842 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
843 interpreter_type
&= ~INTERPRETER_ELF
;
846 if (!interpreter_type
) {
847 free(elf_interpreter
);
854 /* OK, we are done with that, now set up the arg stuff,
855 and then start this sucker up */
860 if (interpreter_type
== INTERPRETER_AOUT
) {
861 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
862 passed_p
= passed_fileno
;
864 if (elf_interpreter
) {
865 bprm
->p
= copy_elf_strings(1, &passed_p
, bprm
->page
, bprm
->p
);
870 free(elf_interpreter
);
877 /* OK, This is the point of no return */
880 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
882 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
885 * In case where user has not explicitly set the guest_base, we
886 * probe here that should we set it automatically.
888 if (!have_guest_base
) {
890 * Go through ELF program header table and find out whether
891 * any of the segments drop below our current mmap_min_addr and
892 * in that case set guest_base to corresponding address.
894 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
896 if (elf_ppnt
->p_type
!= PT_LOAD
)
898 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
899 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
905 /* Do this so that we can load the interpreter, if need be. We will
906 change some of these later */
908 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
909 info
->start_stack
= bprm
->p
;
911 /* Now we do a little grungy work by mmaping the ELF image into
912 * the correct location in memory. At this point, we assume that
913 * the image should be loaded at fixed address, not at a variable
917 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
922 if (elf_ppnt
->p_type
!= PT_LOAD
)
925 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
926 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
927 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
928 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
929 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
930 elf_flags
|= MAP_FIXED
;
931 } else if (elf_ex
.e_type
== ET_DYN
) {
932 /* Try and get dynamic programs out of the way of the default mmap
933 base, as well as whatever program they might try to exec. This
934 is because the brk will follow the loader, and is not movable. */
935 /* NOTE: for qemu, we do a big mmap to get enough space
936 without hardcoding any address */
937 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
938 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
944 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
947 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
948 (elf_ppnt
->p_filesz
+
949 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
951 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
953 (elf_ppnt
->p_offset
-
954 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
961 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
962 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
965 if (!load_addr_set
) {
967 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
968 if (elf_ex
.e_type
== ET_DYN
) {
970 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
971 load_addr
+= load_bias
;
972 reloc_func_desc
= load_bias
;
975 k
= elf_ppnt
->p_vaddr
;
980 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
983 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
987 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
988 if (k
> elf_brk
) elf_brk
= k
;
991 elf_entry
+= load_bias
;
992 elf_bss
+= load_bias
;
993 elf_brk
+= load_bias
;
994 start_code
+= load_bias
;
995 end_code
+= load_bias
;
996 start_data
+= load_bias
;
997 end_data
+= load_bias
;
999 if (elf_interpreter
) {
1000 if (interpreter_type
& 1) {
1001 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1003 else if (interpreter_type
& 2) {
1004 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1007 reloc_func_desc
= interp_load_addr
;
1009 close(interpreter_fd
);
1010 free(elf_interpreter
);
1012 if (elf_entry
== ~((abi_ulong
)0UL)) {
1013 printf("Unable to load interpreter\n");
1022 if (qemu_log_enabled())
1023 load_symbols(&elf_ex
, bprm
->fd
);
1025 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1027 #ifdef LOW_ELF_STACK
1028 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1030 bprm
->p
= create_elf_tables(bprm
->p
,
1034 load_addr
, load_bias
,
1036 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1038 info
->load_addr
= reloc_func_desc
;
1039 info
->start_brk
= info
->brk
= elf_brk
;
1040 info
->end_code
= end_code
;
1041 info
->start_code
= start_code
;
1042 info
->start_data
= start_data
;
1043 info
->end_data
= end_data
;
1044 info
->start_stack
= bprm
->p
;
1046 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1048 set_brk(elf_bss
, elf_brk
);
1050 padzero(elf_bss
, elf_brk
);
1053 printf("(start_brk) %x\n" , info
->start_brk
);
1054 printf("(end_code) %x\n" , info
->end_code
);
1055 printf("(start_code) %x\n" , info
->start_code
);
1056 printf("(end_data) %x\n" , info
->end_data
);
1057 printf("(start_stack) %x\n" , info
->start_stack
);
1058 printf("(brk) %x\n" , info
->brk
);
1061 info
->entry
= elf_entry
;
1066 static int load_aout_interp(void *exptr
, int interp_fd
)
1068 printf("a.out interpreter not yet supported\n");
1072 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1074 init_thread(regs
, infop
);