4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 #include "target_arch_elf.h"
28 /* this flag is uneffective under linux too, should be deleted */
30 #define MAP_DENYWRITE 0
33 /* should probably go in elf.h */
39 #define ELF_PLATFORM (NULL)
48 #define ELF_CLASS ELFCLASS32
50 #define bswaptls(ptr) bswap32s(ptr)
55 /* max code+data+bss space allocated to elf interpreter */
56 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
58 /* max code+data+bss+brk space allocated to ET_DYN executables */
59 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
61 /* Necessary parameters */
62 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
63 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1))
64 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1))
66 #define DLINFO_ITEMS 12
68 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
74 static void bswap_ehdr(struct elfhdr
*ehdr
)
76 bswap16s(&ehdr
->e_type
); /* Object file type */
77 bswap16s(&ehdr
->e_machine
); /* Architecture */
78 bswap32s(&ehdr
->e_version
); /* Object file version */
79 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
80 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
81 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
82 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
83 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
84 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
85 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
86 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
87 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
88 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
91 static void bswap_phdr(struct elf_phdr
*phdr
)
93 bswap32s(&phdr
->p_type
); /* Segment type */
94 bswaptls(&phdr
->p_offset
); /* Segment file offset */
95 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
96 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
97 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
98 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
99 bswap32s(&phdr
->p_flags
); /* Segment flags */
100 bswaptls(&phdr
->p_align
); /* Segment alignment */
103 static void bswap_shdr(struct elf_shdr
*shdr
)
105 bswap32s(&shdr
->sh_name
);
106 bswap32s(&shdr
->sh_type
);
107 bswaptls(&shdr
->sh_flags
);
108 bswaptls(&shdr
->sh_addr
);
109 bswaptls(&shdr
->sh_offset
);
110 bswaptls(&shdr
->sh_size
);
111 bswap32s(&shdr
->sh_link
);
112 bswap32s(&shdr
->sh_info
);
113 bswaptls(&shdr
->sh_addralign
);
114 bswaptls(&shdr
->sh_entsize
);
117 static void bswap_sym(struct elf_sym
*sym
)
119 bswap32s(&sym
->st_name
);
120 bswaptls(&sym
->st_value
);
121 bswaptls(&sym
->st_size
);
122 bswap16s(&sym
->st_shndx
);
127 * 'copy_elf_strings()' copies argument/envelope strings from user
128 * memory to free pages in kernel mem. These are in a format ready
129 * to be put directly into the top of new user memory.
132 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
135 char *tmp
, *tmp1
, *pag
= NULL
;
139 return 0; /* bullet-proofing */
144 fprintf(stderr
, "VFS: argc is wrong");
150 if (p
< len
) { /* this shouldn't happen - 128kB */
156 offset
= p
% TARGET_PAGE_SIZE
;
157 pag
= (char *)page
[p
/ TARGET_PAGE_SIZE
];
159 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
160 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
165 if (len
== 0 || offset
== 0) {
166 *(pag
+ offset
) = *tmp
;
169 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
170 tmp
-= bytes_to_copy
;
172 offset
-= bytes_to_copy
;
173 len
-= bytes_to_copy
;
174 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
181 static abi_ulong
setup_arg_pages(abi_ulong p
, struct bsd_binprm
*bprm
,
182 struct image_info
*info
)
184 abi_ulong stack_base
, size
, error
;
187 /* Create enough stack to hold everything. If we don't use
188 * it for args, we'll use it for something else...
190 size
= x86_stack_size
;
191 if (size
< MAX_ARG_PAGES
* TARGET_PAGE_SIZE
)
192 size
= MAX_ARG_PAGES
* TARGET_PAGE_SIZE
;
193 error
= target_mmap(0,
194 size
+ qemu_host_page_size
,
195 PROT_READ
| PROT_WRITE
,
196 MAP_PRIVATE
| MAP_ANON
,
202 /* we reserve one extra page at the top of the stack as guard */
203 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
205 stack_base
= error
+ size
- MAX_ARG_PAGES
* TARGET_PAGE_SIZE
;
208 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
211 /* FIXME - check return value of memcpy_to_target() for failure */
212 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
213 g_free(bprm
->page
[i
]);
215 stack_base
+= TARGET_PAGE_SIZE
;
220 static void set_brk(abi_ulong start
, abi_ulong end
)
222 /* page-align the start and end addresses... */
223 start
= HOST_PAGE_ALIGN(start
);
224 end
= HOST_PAGE_ALIGN(end
);
227 if (target_mmap(start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
229 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
230 perror("cannot mmap brk");
236 /* We need to explicitly zero any fractional pages after the data
237 section (i.e. bss). This would contain the junk from the file that
238 should not be in memory. */
239 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
243 if (elf_bss
>= last_bss
)
246 /* XXX: this is really a hack : if the real host page size is
247 smaller than the target page size, some pages after the end
248 of the file may not be mapped. A better fix would be to
249 patch target_mmap(), but it is more complicated as the file
250 size must be known */
251 if (qemu_real_host_page_size
< qemu_host_page_size
) {
252 abi_ulong end_addr
, end_addr1
;
253 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
254 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
255 if (end_addr1
< end_addr
) {
256 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
257 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
258 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
262 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
264 nbyte
= qemu_host_page_size
- nbyte
;
266 /* FIXME - what to do if put_user() fails? */
267 put_user_u8(0, elf_bss
);
274 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
275 struct elfhdr
* exec
,
278 abi_ulong interp_load_addr
,
279 struct image_info
*info
)
283 abi_ulong u_platform
;
284 const char *k_platform
;
285 const int n
= sizeof(elf_addr_t
);
289 k_platform
= ELF_PLATFORM
;
291 size_t len
= strlen(k_platform
) + 1;
292 sp
-= (len
+ n
- 1) & ~(n
- 1);
294 /* FIXME - check return value of memcpy_to_target() for failure */
295 memcpy_to_target(sp
, k_platform
, len
);
298 * Force 16 byte _final_ alignment here for generality.
300 sp
= sp
& ~(abi_ulong
)15;
301 size
= (DLINFO_ITEMS
+ 1) * 2;
304 #ifdef DLINFO_ARCH_ITEMS
305 size
+= DLINFO_ARCH_ITEMS
* 2;
307 size
+= envc
+ argc
+ 2;
308 size
+= 1; /* argc itself */
311 sp
-= 16 - (size
& 15);
313 /* This is correct because Linux defines
314 * elf_addr_t as Elf32_Off / Elf64_Off
316 #define NEW_AUX_ENT(id, val) do { \
317 sp -= n; put_user_ual(val, sp); \
318 sp -= n; put_user_ual(id, sp); \
321 NEW_AUX_ENT(AT_NULL
, 0);
323 /* There must be exactly DLINFO_ITEMS entries here. */
324 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
325 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof(struct elf_phdr
)));
326 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
327 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
328 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
329 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
330 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
331 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
332 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
333 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
334 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
335 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
336 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
338 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
341 * ARCH_DLINFO must come last so platform specific code can enforce
342 * special alignment requirements on the AUXV if necessary (eg. PPC).
348 sp
= loader_build_argptr(envc
, argc
, sp
, p
);
353 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
355 abi_ulong
*interp_load_addr
)
357 struct elf_phdr
*elf_phdata
= NULL
;
358 struct elf_phdr
*eppnt
;
359 abi_ulong load_addr
= 0;
360 int load_addr_set
= 0;
362 abi_ulong last_bss
, elf_bss
;
371 bswap_ehdr(interp_elf_ex
);
373 /* First of all, some simple consistency checks */
374 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
375 interp_elf_ex
->e_type
!= ET_DYN
) ||
376 !elf_check_arch(interp_elf_ex
->e_machine
)) {
377 return ~((abi_ulong
)0UL);
381 /* Now read in all of the header information */
383 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
384 return ~(abi_ulong
)0UL;
386 elf_phdata
= (struct elf_phdr
*)
387 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
390 return ~((abi_ulong
)0UL);
393 * If the size of this structure has changed, then punt, since
394 * we will be doing the wrong thing.
396 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
398 return ~((abi_ulong
)0UL);
401 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
403 retval
= read(interpreter_fd
,
405 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
408 perror("load_elf_interp");
415 for (i
= 0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
420 if (interp_elf_ex
->e_type
== ET_DYN
) {
421 /* in order to avoid hardcoding the interpreter load
422 address in qemu, we allocate a big enough memory zone */
423 error
= target_mmap(0, INTERP_MAP_SIZE
,
424 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
435 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++)
436 if (eppnt
->p_type
== PT_LOAD
) {
437 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
442 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
443 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
444 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
445 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
446 elf_type
|= MAP_FIXED
;
447 vaddr
= eppnt
->p_vaddr
;
449 error
= target_mmap(load_addr
+ TARGET_ELF_PAGESTART(vaddr
),
450 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
454 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
458 close(interpreter_fd
);
460 return ~((abi_ulong
)0UL);
463 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
469 * Find the end of the file mapping for this phdr, and keep
470 * track of the largest address we see for this.
472 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
473 if (k
> elf_bss
) elf_bss
= k
;
476 * Do the same thing for the memory mapping - between
477 * elf_bss and last_bss is the bss section.
479 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
480 if (k
> last_bss
) last_bss
= k
;
483 /* Now use mmap to map the library into memory. */
485 close(interpreter_fd
);
488 * Now fill out the bss section. First pad the last page up
489 * to the page boundary, and then perform a mmap to make sure
490 * that there are zeromapped pages up to and including the last
493 padzero(elf_bss
, last_bss
);
494 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
496 /* Map the last of the bss segment */
497 if (last_bss
> elf_bss
) {
498 target_mmap(elf_bss
, last_bss
- elf_bss
,
499 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
500 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
504 *interp_load_addr
= load_addr
;
505 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
508 static int symfind(const void *s0
, const void *s1
)
510 target_ulong addr
= *(target_ulong
*)s0
;
511 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
513 if (addr
< sym
->st_value
) {
515 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
521 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
523 #if ELF_CLASS == ELFCLASS32
524 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
526 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
532 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
534 return s
->disas_strtab
+ sym
->st_name
;
540 /* FIXME: This should use elf_ops.h */
541 static int symcmp(const void *s0
, const void *s1
)
543 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
544 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
545 return (sym0
->st_value
< sym1
->st_value
)
547 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
550 /* Best attempt to load symbols from this ELF object. */
551 static void load_symbols(struct elfhdr
*hdr
, int fd
)
553 unsigned int i
, nsyms
;
554 struct elf_shdr sechdr
, symtab
, strtab
;
557 struct elf_sym
*syms
, *new_syms
;
559 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
560 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
561 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
566 if (sechdr
.sh_type
== SHT_SYMTAB
) {
568 lseek(fd
, hdr
->e_shoff
569 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
570 if (read(fd
, &strtab
, sizeof(strtab
))
579 return; /* Shouldn't happen... */
582 /* Now know where the strtab and symtab are. Snarf them. */
583 s
= malloc(sizeof(*s
));
584 syms
= malloc(symtab
.sh_size
);
589 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
590 if (!s
->disas_strtab
) {
596 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
597 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
604 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
611 // Throw away entries which we do not need.
612 if (syms
[i
].st_shndx
== SHN_UNDEF
||
613 syms
[i
].st_shndx
>= SHN_LORESERVE
||
614 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
617 syms
[i
] = syms
[nsyms
];
624 /* Attempt to free the storage associated with the local symbols
625 that we threw away. Whether or not this has any effect on the
626 memory allocation depends on the malloc implementation and how
627 many symbols we managed to discard. */
628 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
629 if (new_syms
== NULL
) {
637 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
639 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
640 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
646 s
->disas_num_syms
= nsyms
;
647 #if ELF_CLASS == ELFCLASS32
648 s
->disas_symtab
.elf32
= syms
;
649 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
651 s
->disas_symtab
.elf64
= syms
;
652 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
658 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
659 struct image_info
*info
)
661 struct elfhdr elf_ex
;
662 struct elfhdr interp_elf_ex
;
663 int interpreter_fd
= -1; /* avoid warning */
664 abi_ulong load_addr
, load_bias
;
665 int load_addr_set
= 0;
667 struct elf_phdr
* elf_ppnt
;
668 struct elf_phdr
*elf_phdata
;
669 abi_ulong elf_bss
, k
, elf_brk
;
671 char * elf_interpreter
;
672 abi_ulong elf_entry
, interp_load_addr
= 0;
673 abi_ulong start_code
, end_code
, start_data
, end_data
;
674 abi_ulong reloc_func_desc
= 0;
676 abi_ulong elf_stack
= ~((abi_ulong
)0UL);
681 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
686 /* First of all, some simple consistency checks */
687 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
688 (!elf_check_arch(elf_ex
.e_machine
))) {
692 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
693 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
,bprm
->p
);
694 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
,bprm
->p
);
699 /* Now read in all of the header information */
700 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
701 if (elf_phdata
== NULL
) {
705 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
707 retval
= read(bprm
->fd
, (char *)elf_phdata
,
708 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
712 perror("load_elf_binary");
719 elf_ppnt
= elf_phdata
;
720 for (i
= 0; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
721 bswap_phdr(elf_ppnt
);
724 elf_ppnt
= elf_phdata
;
730 elf_interpreter
= NULL
;
731 start_code
= ~((abi_ulong
)0UL);
736 for (i
= 0;i
< elf_ex
.e_phnum
; i
++) {
737 if (elf_ppnt
->p_type
== PT_INTERP
) {
738 if (elf_interpreter
!= NULL
)
741 free(elf_interpreter
);
746 /* This is the program interpreter used for
747 * shared libraries - for now assume that this
748 * is an a.out format binary
751 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
753 if (elf_interpreter
== NULL
) {
759 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
761 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
764 perror("load_elf_binary2");
769 retval
= open(path(elf_interpreter
), O_RDONLY
);
771 interpreter_fd
= retval
;
774 perror(elf_interpreter
);
776 /* retval = -errno; */
781 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
783 retval
= read(interpreter_fd
, bprm
->buf
, 128);
787 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
790 perror("load_elf_binary3");
793 free(elf_interpreter
);
801 /* Some simple consistency checks for the interpreter */
802 if (elf_interpreter
) {
803 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
804 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
805 free(elf_interpreter
);
812 /* OK, we are done with that, now set up the arg stuff,
813 and then start this sucker up */
816 free(elf_interpreter
);
822 /* OK, This is the point of no return */
825 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
827 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
830 * In case where user has not explicitly set the guest_base, we
831 * probe here that should we set it automatically.
833 if (!have_guest_base
) {
835 * Go through ELF program header table and find out whether
836 * any of the segments drop below our current mmap_min_addr and
837 * in that case set guest_base to corresponding address.
839 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
841 if (elf_ppnt
->p_type
!= PT_LOAD
)
843 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
844 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
850 /* Do this so that we can load the interpreter, if need be. We will
851 change some of these later */
853 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
854 info
->start_stack
= bprm
->p
;
856 /* Now we do a little grungy work by mmaping the ELF image into
857 * the correct location in memory. At this point, we assume that
858 * the image should be loaded at fixed address, not at a variable
862 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
867 if (elf_ppnt
->p_type
!= PT_LOAD
)
870 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
871 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
872 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
873 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
874 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
875 elf_flags
|= MAP_FIXED
;
876 } else if (elf_ex
.e_type
== ET_DYN
) {
877 /* Try and get dynamic programs out of the way of the default mmap
878 base, as well as whatever program they might try to exec. This
879 is because the brk will follow the loader, and is not movable. */
880 /* NOTE: for qemu, we do a big mmap to get enough space
881 without hardcoding any address */
882 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
883 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
889 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
892 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
893 (elf_ppnt
->p_filesz
+
894 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
896 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
898 (elf_ppnt
->p_offset
-
899 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
906 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
907 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
910 if (!load_addr_set
) {
912 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
913 if (elf_ex
.e_type
== ET_DYN
) {
915 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
916 load_addr
+= load_bias
;
917 reloc_func_desc
= load_bias
;
920 k
= elf_ppnt
->p_vaddr
;
925 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
928 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
932 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
933 if (k
> elf_brk
) elf_brk
= k
;
936 elf_entry
+= load_bias
;
937 elf_bss
+= load_bias
;
938 elf_brk
+= load_bias
;
939 start_code
+= load_bias
;
940 end_code
+= load_bias
;
941 start_data
+= load_bias
;
942 end_data
+= load_bias
;
944 if (elf_interpreter
) {
945 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
947 reloc_func_desc
= interp_load_addr
;
949 close(interpreter_fd
);
950 free(elf_interpreter
);
952 if (elf_entry
== ~((abi_ulong
)0UL)) {
953 printf("Unable to load interpreter\n");
962 if (qemu_log_enabled())
963 load_symbols(&elf_ex
, bprm
->fd
);
968 info
->start_stack
= bprm
->p
= elf_stack
- 4;
970 bprm
->p
= create_elf_tables(bprm
->p
,
974 load_addr
, load_bias
,
977 info
->load_addr
= reloc_func_desc
;
978 info
->start_brk
= info
->brk
= elf_brk
;
979 info
->end_code
= end_code
;
980 info
->start_code
= start_code
;
981 info
->start_data
= start_data
;
982 info
->end_data
= end_data
;
983 info
->start_stack
= bprm
->p
;
985 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
987 set_brk(elf_bss
, elf_brk
);
989 padzero(elf_bss
, elf_brk
);
992 printf("(start_brk) %x\n" , info
->start_brk
);
993 printf("(end_code) %x\n" , info
->end_code
);
994 printf("(start_code) %x\n" , info
->start_code
);
995 printf("(end_data) %x\n" , info
->end_data
);
996 printf("(start_stack) %x\n" , info
->start_stack
);
997 printf("(brk) %x\n" , info
->brk
);
1000 info
->entry
= elf_entry
;
1005 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1007 init_thread(regs
, infop
);