4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 #include "target_arch_elf.h"
27 #include "target_os_thread.h"
29 /* this flag is uneffective under linux too, should be deleted */
31 #define MAP_DENYWRITE 0
34 /* should probably go in elf.h */
40 #define ELF_PLATFORM (NULL)
49 #define ELF_CLASS ELFCLASS32
51 #define bswaptls(ptr) bswap32s(ptr)
56 /* max code+data+bss space allocated to elf interpreter */
57 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
59 /* max code+data+bss+brk space allocated to ET_DYN executables */
60 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
62 /* Necessary parameters */
63 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
64 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1))
65 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1))
67 #define DLINFO_ITEMS 12
69 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
75 static void bswap_ehdr(struct elfhdr
*ehdr
)
77 bswap16s(&ehdr
->e_type
); /* Object file type */
78 bswap16s(&ehdr
->e_machine
); /* Architecture */
79 bswap32s(&ehdr
->e_version
); /* Object file version */
80 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
81 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
82 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
83 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
84 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
85 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
86 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
87 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
88 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
89 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
92 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
)
96 for (i
= 0; i
< phnum
; i
++, phdr
++) {
97 bswap32s(&phdr
->p_type
); /* Segment type */
98 bswap32s(&phdr
->p_flags
); /* Segment flags */
99 bswaptls(&phdr
->p_offset
); /* Segment file offset */
100 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
101 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
102 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
103 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
104 bswaptls(&phdr
->p_align
); /* Segment alignment */
108 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
)
112 for (i
= 0; i
< shnum
; i
++, shdr
++) {
113 bswap32s(&shdr
->sh_name
);
114 bswap32s(&shdr
->sh_type
);
115 bswaptls(&shdr
->sh_flags
);
116 bswaptls(&shdr
->sh_addr
);
117 bswaptls(&shdr
->sh_offset
);
118 bswaptls(&shdr
->sh_size
);
119 bswap32s(&shdr
->sh_link
);
120 bswap32s(&shdr
->sh_info
);
121 bswaptls(&shdr
->sh_addralign
);
122 bswaptls(&shdr
->sh_entsize
);
126 static void bswap_sym(struct elf_sym
*sym
)
128 bswap32s(&sym
->st_name
);
129 bswaptls(&sym
->st_value
);
130 bswaptls(&sym
->st_size
);
131 bswap16s(&sym
->st_shndx
);
134 #else /* ! BSWAP_NEEDED */
136 static void bswap_ehdr(struct elfhdr
*ehdr
) { }
137 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
) { }
138 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
) { }
139 static void bswap_sym(struct elf_sym
*sym
) { }
141 #endif /* ! BSWAP_NEEDED */
144 * 'copy_elf_strings()' copies argument/envelope strings from user
145 * memory to free pages in kernel mem. These are in a format ready
146 * to be put directly into the top of new user memory.
149 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
152 char *tmp
, *tmp1
, *pag
= NULL
;
156 return 0; /* bullet-proofing */
161 fprintf(stderr
, "VFS: argc is wrong");
167 if (p
< len
) { /* this shouldn't happen - 128kB */
173 offset
= p
% TARGET_PAGE_SIZE
;
174 pag
= (char *)page
[p
/ TARGET_PAGE_SIZE
];
176 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
177 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
182 if (len
== 0 || offset
== 0) {
183 *(pag
+ offset
) = *tmp
;
186 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
187 tmp
-= bytes_to_copy
;
189 offset
-= bytes_to_copy
;
190 len
-= bytes_to_copy
;
191 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
198 static abi_ulong
setup_arg_pages(abi_ulong p
, struct bsd_binprm
*bprm
,
199 struct image_info
*info
)
201 abi_ulong stack_base
, size
, error
;
204 /* Create enough stack to hold everything. If we don't use
205 * it for args, we'll use it for something else...
207 size
= x86_stack_size
;
208 if (size
< MAX_ARG_PAGES
* TARGET_PAGE_SIZE
)
209 size
= MAX_ARG_PAGES
* TARGET_PAGE_SIZE
;
210 error
= target_mmap(0,
211 size
+ qemu_host_page_size
,
212 PROT_READ
| PROT_WRITE
,
213 MAP_PRIVATE
| MAP_ANON
,
219 /* we reserve one extra page at the top of the stack as guard */
220 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
222 stack_base
= error
+ size
- MAX_ARG_PAGES
* TARGET_PAGE_SIZE
;
225 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
228 /* FIXME - check return value of memcpy_to_target() for failure */
229 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
230 g_free(bprm
->page
[i
]);
232 stack_base
+= TARGET_PAGE_SIZE
;
237 static void set_brk(abi_ulong start
, abi_ulong end
)
239 /* page-align the start and end addresses... */
240 start
= HOST_PAGE_ALIGN(start
);
241 end
= HOST_PAGE_ALIGN(end
);
244 if (target_mmap(start
, end
- start
,
245 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
246 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
247 perror("cannot mmap brk");
253 /* We need to explicitly zero any fractional pages after the data
254 section (i.e. bss). This would contain the junk from the file that
255 should not be in memory. */
256 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
260 if (elf_bss
>= last_bss
)
263 /* XXX: this is really a hack : if the real host page size is
264 smaller than the target page size, some pages after the end
265 of the file may not be mapped. A better fix would be to
266 patch target_mmap(), but it is more complicated as the file
267 size must be known */
268 if (qemu_real_host_page_size
< qemu_host_page_size
) {
269 abi_ulong end_addr
, end_addr1
;
270 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
271 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
272 if (end_addr1
< end_addr
) {
273 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
274 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
275 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
279 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
281 nbyte
= qemu_host_page_size
- nbyte
;
283 /* FIXME - what to do if put_user() fails? */
284 put_user_u8(0, elf_bss
);
291 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
292 struct elfhdr
* exec
,
295 abi_ulong interp_load_addr
,
296 struct image_info
*info
)
300 abi_ulong u_platform
;
301 const char *k_platform
;
302 const int n
= sizeof(elf_addr_t
);
306 k_platform
= ELF_PLATFORM
;
308 size_t len
= strlen(k_platform
) + 1;
309 sp
-= (len
+ n
- 1) & ~(n
- 1);
311 /* FIXME - check return value of memcpy_to_target() for failure */
312 memcpy_to_target(sp
, k_platform
, len
);
315 * Force 16 byte _final_ alignment here for generality.
317 sp
= sp
& ~(abi_ulong
)15;
318 size
= (DLINFO_ITEMS
+ 1) * 2;
321 #ifdef DLINFO_ARCH_ITEMS
322 size
+= DLINFO_ARCH_ITEMS
* 2;
324 size
+= envc
+ argc
+ 2;
325 size
+= 1; /* argc itself */
328 sp
-= 16 - (size
& 15);
330 /* This is correct because Linux defines
331 * elf_addr_t as Elf32_Off / Elf64_Off
333 #define NEW_AUX_ENT(id, val) do { \
334 sp -= n; put_user_ual(val, sp); \
335 sp -= n; put_user_ual(id, sp); \
338 NEW_AUX_ENT(AT_NULL
, 0);
340 /* There must be exactly DLINFO_ITEMS entries here. */
341 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
342 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof(struct elf_phdr
)));
343 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
344 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
345 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
346 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
347 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
348 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
349 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
350 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
351 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
352 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
353 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
355 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
358 * ARCH_DLINFO must come last so platform specific code can enforce
359 * special alignment requirements on the AUXV if necessary (eg. PPC).
365 sp
= loader_build_argptr(envc
, argc
, sp
, p
);
370 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
372 abi_ulong
*interp_load_addr
)
374 struct elf_phdr
*elf_phdata
= NULL
;
375 struct elf_phdr
*eppnt
;
376 abi_ulong load_addr
= 0;
377 int load_addr_set
= 0;
379 abi_ulong last_bss
, elf_bss
;
387 bswap_ehdr(interp_elf_ex
);
388 /* First of all, some simple consistency checks */
389 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
390 interp_elf_ex
->e_type
!= ET_DYN
) ||
391 !elf_check_arch(interp_elf_ex
->e_machine
)) {
392 return ~((abi_ulong
)0UL);
396 /* Now read in all of the header information */
398 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
399 return ~(abi_ulong
)0UL;
401 elf_phdata
= (struct elf_phdr
*)
402 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
405 return ~((abi_ulong
)0UL);
408 * If the size of this structure has changed, then punt, since
409 * we will be doing the wrong thing.
411 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
413 return ~((abi_ulong
)0UL);
416 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
418 retval
= read(interpreter_fd
,
420 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
423 perror("load_elf_interp");
428 bswap_phdr(elf_phdata
, interp_elf_ex
->e_phnum
);
430 if (interp_elf_ex
->e_type
== ET_DYN
) {
431 /* in order to avoid hardcoding the interpreter load
432 address in qemu, we allocate a big enough memory zone */
433 error
= target_mmap(0, INTERP_MAP_SIZE
,
434 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
445 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++)
446 if (eppnt
->p_type
== PT_LOAD
) {
447 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
452 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
453 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
454 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
455 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
456 elf_type
|= MAP_FIXED
;
457 vaddr
= eppnt
->p_vaddr
;
459 error
= target_mmap(load_addr
+ TARGET_ELF_PAGESTART(vaddr
),
460 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
464 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
468 close(interpreter_fd
);
470 return ~((abi_ulong
)0UL);
473 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
479 * Find the end of the file mapping for this phdr, and keep
480 * track of the largest address we see for this.
482 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
483 if (k
> elf_bss
) elf_bss
= k
;
486 * Do the same thing for the memory mapping - between
487 * elf_bss and last_bss is the bss section.
489 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
490 if (k
> last_bss
) last_bss
= k
;
493 /* Now use mmap to map the library into memory. */
495 close(interpreter_fd
);
498 * Now fill out the bss section. First pad the last page up
499 * to the page boundary, and then perform a mmap to make sure
500 * that there are zeromapped pages up to and including the last
503 padzero(elf_bss
, last_bss
);
504 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
506 /* Map the last of the bss segment */
507 if (last_bss
> elf_bss
) {
508 target_mmap(elf_bss
, last_bss
- elf_bss
,
509 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
510 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
514 *interp_load_addr
= load_addr
;
515 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
518 static int symfind(const void *s0
, const void *s1
)
520 target_ulong addr
= *(target_ulong
*)s0
;
521 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
523 if (addr
< sym
->st_value
) {
525 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
531 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
533 #if ELF_CLASS == ELFCLASS32
534 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
536 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
542 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
544 return s
->disas_strtab
+ sym
->st_name
;
550 /* FIXME: This should use elf_ops.h */
551 static int symcmp(const void *s0
, const void *s1
)
553 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
554 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
555 return (sym0
->st_value
< sym1
->st_value
)
557 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
560 /* Best attempt to load symbols from this ELF object. */
561 static void load_symbols(struct elfhdr
*hdr
, int fd
)
563 unsigned int i
, nsyms
;
564 struct elf_shdr sechdr
, symtab
, strtab
;
567 struct elf_sym
*syms
, *new_syms
;
569 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
570 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
571 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
573 bswap_shdr(&sechdr
, 1);
574 if (sechdr
.sh_type
== SHT_SYMTAB
) {
576 lseek(fd
, hdr
->e_shoff
577 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
578 if (read(fd
, &strtab
, sizeof(strtab
))
581 bswap_shdr(&strtab
, 1);
585 return; /* Shouldn't happen... */
588 /* Now know where the strtab and symtab are. Snarf them. */
589 s
= malloc(sizeof(*s
));
590 syms
= malloc(symtab
.sh_size
);
595 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
596 if (!s
->disas_strtab
) {
602 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
603 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
610 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
615 // Throw away entries which we do not need.
616 if (syms
[i
].st_shndx
== SHN_UNDEF
||
617 syms
[i
].st_shndx
>= SHN_LORESERVE
||
618 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
621 syms
[i
] = syms
[nsyms
];
628 /* Attempt to free the storage associated with the local symbols
629 that we threw away. Whether or not this has any effect on the
630 memory allocation depends on the malloc implementation and how
631 many symbols we managed to discard. */
632 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
633 if (new_syms
== NULL
) {
641 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
643 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
644 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
650 s
->disas_num_syms
= nsyms
;
651 #if ELF_CLASS == ELFCLASS32
652 s
->disas_symtab
.elf32
= syms
;
653 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
655 s
->disas_symtab
.elf64
= syms
;
656 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
662 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
663 struct image_info
*info
)
665 struct elfhdr elf_ex
;
666 struct elfhdr interp_elf_ex
;
667 int interpreter_fd
= -1; /* avoid warning */
668 abi_ulong load_addr
, load_bias
;
669 int load_addr_set
= 0;
671 struct elf_phdr
* elf_ppnt
;
672 struct elf_phdr
*elf_phdata
;
673 abi_ulong elf_bss
, k
, elf_brk
;
675 char * elf_interpreter
;
676 abi_ulong elf_entry
, interp_load_addr
= 0;
677 abi_ulong start_code
, end_code
, start_data
, end_data
;
678 abi_ulong reloc_func_desc
= 0;
680 abi_ulong elf_stack
= ~((abi_ulong
)0UL);
685 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
688 /* First of all, some simple consistency checks */
689 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
690 (!elf_check_arch(elf_ex
.e_machine
))) {
694 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
695 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
,bprm
->p
);
696 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
,bprm
->p
);
701 /* Now read in all of the header information */
702 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
703 if (elf_phdata
== NULL
) {
707 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
709 retval
= read(bprm
->fd
, (char *)elf_phdata
,
710 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
714 perror("load_elf_binary");
720 bswap_phdr(elf_phdata
, elf_ex
.e_phnum
);
722 elf_ppnt
= elf_phdata
;
728 elf_interpreter
= NULL
;
729 start_code
= ~((abi_ulong
)0UL);
734 for (i
= 0;i
< elf_ex
.e_phnum
; i
++) {
735 if (elf_ppnt
->p_type
== PT_INTERP
) {
736 if (elf_interpreter
!= NULL
)
739 free(elf_interpreter
);
744 /* This is the program interpreter used for
745 * shared libraries - for now assume that this
746 * is an a.out format binary
749 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
751 if (elf_interpreter
== NULL
) {
757 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
759 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
762 perror("load_elf_binary2");
767 retval
= open(path(elf_interpreter
), O_RDONLY
);
769 interpreter_fd
= retval
;
772 perror(elf_interpreter
);
774 /* retval = -errno; */
779 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
781 retval
= read(interpreter_fd
, bprm
->buf
, 128);
785 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
788 perror("load_elf_binary3");
791 free(elf_interpreter
);
799 /* Some simple consistency checks for the interpreter */
800 if (elf_interpreter
) {
801 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
802 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
803 free(elf_interpreter
);
810 /* OK, we are done with that, now set up the arg stuff,
811 and then start this sucker up */
814 free(elf_interpreter
);
820 /* OK, This is the point of no return */
823 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
825 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
828 * In case where user has not explicitly set the guest_base, we
829 * probe here that should we set it automatically.
831 if (!have_guest_base
) {
833 * Go through ELF program header table and find out whether
834 * any of the segments drop below our current mmap_min_addr and
835 * in that case set guest_base to corresponding address.
837 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
839 if (elf_ppnt
->p_type
!= PT_LOAD
)
841 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
842 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
848 /* Do this so that we can load the interpreter, if need be. We will
849 change some of these later */
851 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
852 info
->start_stack
= bprm
->p
;
854 /* Now we do a little grungy work by mmaping the ELF image into
855 * the correct location in memory. At this point, we assume that
856 * the image should be loaded at fixed address, not at a variable
860 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
865 if (elf_ppnt
->p_type
!= PT_LOAD
)
868 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
869 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
870 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
871 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
872 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
873 elf_flags
|= MAP_FIXED
;
874 } else if (elf_ex
.e_type
== ET_DYN
) {
875 /* Try and get dynamic programs out of the way of the default mmap
876 base, as well as whatever program they might try to exec. This
877 is because the brk will follow the loader, and is not movable. */
878 /* NOTE: for qemu, we do a big mmap to get enough space
879 without hardcoding any address */
880 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
881 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
887 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
890 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
891 (elf_ppnt
->p_filesz
+
892 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
894 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
896 (elf_ppnt
->p_offset
-
897 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
904 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
905 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
908 if (!load_addr_set
) {
910 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
911 if (elf_ex
.e_type
== ET_DYN
) {
913 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
914 load_addr
+= load_bias
;
915 reloc_func_desc
= load_bias
;
918 k
= elf_ppnt
->p_vaddr
;
923 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
926 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
930 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
931 if (k
> elf_brk
) elf_brk
= k
;
934 elf_entry
+= load_bias
;
935 elf_bss
+= load_bias
;
936 elf_brk
+= load_bias
;
937 start_code
+= load_bias
;
938 end_code
+= load_bias
;
939 start_data
+= load_bias
;
940 end_data
+= load_bias
;
942 if (elf_interpreter
) {
943 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
945 reloc_func_desc
= interp_load_addr
;
947 close(interpreter_fd
);
948 free(elf_interpreter
);
950 if (elf_entry
== ~((abi_ulong
)0UL)) {
951 printf("Unable to load interpreter\n");
960 if (qemu_log_enabled())
961 load_symbols(&elf_ex
, bprm
->fd
);
966 info
->start_stack
= bprm
->p
= elf_stack
- 4;
968 bprm
->p
= create_elf_tables(bprm
->p
,
972 load_addr
, load_bias
,
975 info
->load_addr
= reloc_func_desc
;
976 info
->start_brk
= info
->brk
= elf_brk
;
977 info
->end_code
= end_code
;
978 info
->start_code
= start_code
;
979 info
->start_data
= start_data
;
980 info
->end_data
= end_data
;
981 info
->start_stack
= bprm
->p
;
983 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
985 set_brk(elf_bss
, elf_brk
);
987 padzero(elf_bss
, elf_brk
);
990 printf("(start_brk) %x\n" , info
->start_brk
);
991 printf("(end_code) %x\n" , info
->end_code
);
992 printf("(start_code) %x\n" , info
->start_code
);
993 printf("(end_data) %x\n" , info
->end_data
);
994 printf("(start_stack) %x\n" , info
->start_stack
);
995 printf("(brk) %x\n" , info
->brk
);
998 info
->entry
= elf_entry
;
1003 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1006 target_thread_init(regs
, infop
);