4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 static abi_ulong target_auxents
; /* Where the AUX entries are in target */
27 static size_t target_auxents_sz
; /* Size of AUX entries including AT_NULL */
29 #include "target_os_elf.h"
30 #include "target_os_stack.h"
31 #include "target_os_thread.h"
35 abi_ulong target_stksiz
;
36 abi_ulong target_stkbas
;
38 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
44 static void bswap_ehdr(struct elfhdr
*ehdr
)
46 bswap16s(&ehdr
->e_type
); /* Object file type */
47 bswap16s(&ehdr
->e_machine
); /* Architecture */
48 bswap32s(&ehdr
->e_version
); /* Object file version */
49 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
50 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
51 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
52 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
53 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
54 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
55 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
56 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
57 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
58 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
61 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
)
65 for (i
= 0; i
< phnum
; i
++, phdr
++) {
66 bswap32s(&phdr
->p_type
); /* Segment type */
67 bswap32s(&phdr
->p_flags
); /* Segment flags */
68 bswaptls(&phdr
->p_offset
); /* Segment file offset */
69 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
70 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
71 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
72 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
73 bswaptls(&phdr
->p_align
); /* Segment alignment */
77 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
)
81 for (i
= 0; i
< shnum
; i
++, shdr
++) {
82 bswap32s(&shdr
->sh_name
);
83 bswap32s(&shdr
->sh_type
);
84 bswaptls(&shdr
->sh_flags
);
85 bswaptls(&shdr
->sh_addr
);
86 bswaptls(&shdr
->sh_offset
);
87 bswaptls(&shdr
->sh_size
);
88 bswap32s(&shdr
->sh_link
);
89 bswap32s(&shdr
->sh_info
);
90 bswaptls(&shdr
->sh_addralign
);
91 bswaptls(&shdr
->sh_entsize
);
95 static void bswap_sym(struct elf_sym
*sym
)
97 bswap32s(&sym
->st_name
);
98 bswaptls(&sym
->st_value
);
99 bswaptls(&sym
->st_size
);
100 bswap16s(&sym
->st_shndx
);
103 #else /* ! BSWAP_NEEDED */
105 static void bswap_ehdr(struct elfhdr
*ehdr
) { }
106 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
) { }
107 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
) { }
108 static void bswap_sym(struct elf_sym
*sym
) { }
110 #endif /* ! BSWAP_NEEDED */
113 * 'copy_elf_strings()' copies argument/envelope strings from user
114 * memory to free pages in kernel mem. These are in a format ready
115 * to be put directly into the top of new user memory.
118 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
121 char *tmp
, *tmp1
, *pag
= NULL
;
125 return 0; /* bullet-proofing */
130 fprintf(stderr
, "VFS: argc is wrong");
136 if (p
< len
) { /* this shouldn't happen - 128kB */
142 offset
= p
% TARGET_PAGE_SIZE
;
143 pag
= (char *)page
[p
/ TARGET_PAGE_SIZE
];
145 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
146 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
151 if (len
== 0 || offset
== 0) {
152 *(pag
+ offset
) = *tmp
;
155 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
156 tmp
-= bytes_to_copy
;
158 offset
-= bytes_to_copy
;
159 len
-= bytes_to_copy
;
160 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
167 static void setup_arg_pages(struct bsd_binprm
*bprm
, struct image_info
*info
,
168 abi_ulong
*stackp
, abi_ulong
*stringp
)
170 abi_ulong stack_base
, size
;
173 /* Create enough stack to hold everything. If we don't use
174 * it for args, we'll use it for something else...
176 size
= target_dflssiz
;
177 stack_base
= TARGET_USRSTACK
- size
;
178 addr
= target_mmap(stack_base
,
179 size
+ qemu_host_page_size
,
180 PROT_READ
| PROT_WRITE
,
181 MAP_PRIVATE
| MAP_ANON
,
187 /* we reserve one extra page at the top of the stack as guard */
188 target_mprotect(addr
+ size
, qemu_host_page_size
, PROT_NONE
);
190 target_stksiz
= size
;
191 target_stkbas
= addr
;
193 if (setup_initial_stack(bprm
, stackp
, stringp
) != 0) {
199 static void set_brk(abi_ulong start
, abi_ulong end
)
201 /* page-align the start and end addresses... */
202 start
= HOST_PAGE_ALIGN(start
);
203 end
= HOST_PAGE_ALIGN(end
);
206 if (target_mmap(start
, end
- start
,
207 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
208 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
209 perror("cannot mmap brk");
215 /* We need to explicitly zero any fractional pages after the data
216 section (i.e. bss). This would contain the junk from the file that
217 should not be in memory. */
218 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
222 if (elf_bss
>= last_bss
)
225 /* XXX: this is really a hack : if the real host page size is
226 smaller than the target page size, some pages after the end
227 of the file may not be mapped. A better fix would be to
228 patch target_mmap(), but it is more complicated as the file
229 size must be known */
230 if (qemu_real_host_page_size
< qemu_host_page_size
) {
231 abi_ulong end_addr
, end_addr1
;
232 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
233 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
234 if (end_addr1
< end_addr
) {
235 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
236 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
237 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
241 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
243 nbyte
= qemu_host_page_size
- nbyte
;
245 /* FIXME - what to do if put_user() fails? */
246 put_user_u8(0, elf_bss
);
252 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
254 abi_ulong
*interp_load_addr
)
256 struct elf_phdr
*elf_phdata
= NULL
;
257 struct elf_phdr
*eppnt
;
258 abi_ulong load_addr
= 0;
259 int load_addr_set
= 0;
261 abi_ulong last_bss
, elf_bss
;
269 bswap_ehdr(interp_elf_ex
);
270 /* First of all, some simple consistency checks */
271 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
272 interp_elf_ex
->e_type
!= ET_DYN
) ||
273 !elf_check_arch(interp_elf_ex
->e_machine
)) {
274 return ~((abi_ulong
)0UL);
278 /* Now read in all of the header information */
280 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
281 return ~(abi_ulong
)0UL;
283 elf_phdata
= (struct elf_phdr
*)
284 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
287 return ~((abi_ulong
)0UL);
290 * If the size of this structure has changed, then punt, since
291 * we will be doing the wrong thing.
293 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
295 return ~((abi_ulong
)0UL);
298 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
300 retval
= read(interpreter_fd
,
302 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
305 perror("load_elf_interp");
310 bswap_phdr(elf_phdata
, interp_elf_ex
->e_phnum
);
312 if (interp_elf_ex
->e_type
== ET_DYN
) {
313 /* in order to avoid hardcoding the interpreter load
314 address in qemu, we allocate a big enough memory zone */
315 error
= target_mmap(0, INTERP_MAP_SIZE
,
316 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
327 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++)
328 if (eppnt
->p_type
== PT_LOAD
) {
329 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
334 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
335 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
336 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
337 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
338 elf_type
|= MAP_FIXED
;
339 vaddr
= eppnt
->p_vaddr
;
341 error
= target_mmap(load_addr
+ TARGET_ELF_PAGESTART(vaddr
),
342 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
346 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
350 close(interpreter_fd
);
352 return ~((abi_ulong
)0UL);
355 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
361 * Find the end of the file mapping for this phdr, and keep
362 * track of the largest address we see for this.
364 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
365 if (k
> elf_bss
) elf_bss
= k
;
368 * Do the same thing for the memory mapping - between
369 * elf_bss and last_bss is the bss section.
371 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
372 if (k
> last_bss
) last_bss
= k
;
375 /* Now use mmap to map the library into memory. */
377 close(interpreter_fd
);
380 * Now fill out the bss section. First pad the last page up
381 * to the page boundary, and then perform a mmap to make sure
382 * that there are zeromapped pages up to and including the last
385 padzero(elf_bss
, last_bss
);
386 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
388 /* Map the last of the bss segment */
389 if (last_bss
> elf_bss
) {
390 target_mmap(elf_bss
, last_bss
- elf_bss
,
391 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
392 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
396 *interp_load_addr
= load_addr
;
397 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
400 static int symfind(const void *s0
, const void *s1
)
402 target_ulong addr
= *(target_ulong
*)s0
;
403 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
405 if (addr
< sym
->st_value
) {
407 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
413 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
415 #if ELF_CLASS == ELFCLASS32
416 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
418 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
424 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
426 return s
->disas_strtab
+ sym
->st_name
;
432 /* FIXME: This should use elf_ops.h */
433 static int symcmp(const void *s0
, const void *s1
)
435 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
436 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
437 return (sym0
->st_value
< sym1
->st_value
)
439 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
442 /* Best attempt to load symbols from this ELF object. */
443 static void load_symbols(struct elfhdr
*hdr
, int fd
)
445 unsigned int i
, nsyms
;
446 struct elf_shdr sechdr
, symtab
, strtab
;
449 struct elf_sym
*syms
, *new_syms
;
451 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
452 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
453 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
455 bswap_shdr(&sechdr
, 1);
456 if (sechdr
.sh_type
== SHT_SYMTAB
) {
458 lseek(fd
, hdr
->e_shoff
459 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
460 if (read(fd
, &strtab
, sizeof(strtab
))
463 bswap_shdr(&strtab
, 1);
467 return; /* Shouldn't happen... */
470 /* Now know where the strtab and symtab are. Snarf them. */
471 s
= malloc(sizeof(*s
));
472 syms
= malloc(symtab
.sh_size
);
477 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
478 if (!s
->disas_strtab
) {
484 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
485 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
492 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
497 // Throw away entries which we do not need.
498 if (syms
[i
].st_shndx
== SHN_UNDEF
||
499 syms
[i
].st_shndx
>= SHN_LORESERVE
||
500 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
503 syms
[i
] = syms
[nsyms
];
510 /* Attempt to free the storage associated with the local symbols
511 that we threw away. Whether or not this has any effect on the
512 memory allocation depends on the malloc implementation and how
513 many symbols we managed to discard. */
514 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
515 if (new_syms
== NULL
) {
523 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
525 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
526 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
532 s
->disas_num_syms
= nsyms
;
533 #if ELF_CLASS == ELFCLASS32
534 s
->disas_symtab
.elf32
= syms
;
535 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
537 s
->disas_symtab
.elf64
= syms
;
538 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
544 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
545 struct image_info
*info
)
547 struct elfhdr elf_ex
;
548 struct elfhdr interp_elf_ex
;
549 int interpreter_fd
= -1; /* avoid warning */
550 abi_ulong load_addr
, load_bias
;
551 int load_addr_set
= 0;
553 struct elf_phdr
* elf_ppnt
;
554 struct elf_phdr
*elf_phdata
;
555 abi_ulong elf_bss
, k
, elf_brk
;
557 char * elf_interpreter
;
558 abi_ulong elf_entry
, interp_load_addr
= 0;
559 abi_ulong start_code
, end_code
, start_data
, end_data
;
560 abi_ulong reloc_func_desc
= 0;
562 abi_ulong elf_stack
= ~((abi_ulong
)0UL);
567 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
570 /* First of all, some simple consistency checks */
571 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
572 (!elf_check_arch(elf_ex
.e_machine
))) {
576 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
577 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
,bprm
->p
);
578 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
,bprm
->p
);
583 /* Now read in all of the header information */
584 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
585 if (elf_phdata
== NULL
) {
589 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
591 retval
= read(bprm
->fd
, (char *)elf_phdata
,
592 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
596 perror("load_elf_binary");
602 bswap_phdr(elf_phdata
, elf_ex
.e_phnum
);
604 elf_ppnt
= elf_phdata
;
610 elf_interpreter
= NULL
;
611 start_code
= ~((abi_ulong
)0UL);
616 for (i
= 0;i
< elf_ex
.e_phnum
; i
++) {
617 if (elf_ppnt
->p_type
== PT_INTERP
) {
618 if (elf_interpreter
!= NULL
)
621 free(elf_interpreter
);
626 /* This is the program interpreter used for
627 * shared libraries - for now assume that this
628 * is an a.out format binary
631 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
633 if (elf_interpreter
== NULL
) {
639 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
641 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
644 perror("load_elf_binary2");
649 retval
= open(path(elf_interpreter
), O_RDONLY
);
651 interpreter_fd
= retval
;
654 perror(elf_interpreter
);
656 /* retval = -errno; */
661 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
663 retval
= read(interpreter_fd
, bprm
->buf
, 128);
667 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
670 perror("load_elf_binary3");
673 free(elf_interpreter
);
681 /* Some simple consistency checks for the interpreter */
682 if (elf_interpreter
) {
683 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
684 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
685 free(elf_interpreter
);
692 /* OK, we are done with that, now set up the arg stuff,
693 and then start this sucker up */
696 free(elf_interpreter
);
702 /* OK, This is the point of no return */
705 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
707 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
709 /* Do this so that we can load the interpreter, if need be. We will
710 change some of these later */
712 setup_arg_pages(bprm
, info
, &bprm
->p
, &bprm
->stringp
);
713 info
->start_stack
= bprm
->p
;
715 /* Now we do a little grungy work by mmaping the ELF image into
716 * the correct location in memory. At this point, we assume that
717 * the image should be loaded at fixed address, not at a variable
721 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
726 if (elf_ppnt
->p_type
!= PT_LOAD
)
729 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
730 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
731 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
732 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
733 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
734 elf_flags
|= MAP_FIXED
;
735 } else if (elf_ex
.e_type
== ET_DYN
) {
736 /* Try and get dynamic programs out of the way of the default mmap
737 base, as well as whatever program they might try to exec. This
738 is because the brk will follow the loader, and is not movable. */
739 /* NOTE: for qemu, we do a big mmap to get enough space
740 without hardcoding any address */
741 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
742 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
748 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
751 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
752 (elf_ppnt
->p_filesz
+
753 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
755 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
757 (elf_ppnt
->p_offset
-
758 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
765 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
766 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
769 if (!load_addr_set
) {
771 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
772 if (elf_ex
.e_type
== ET_DYN
) {
774 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
775 load_addr
+= load_bias
;
776 reloc_func_desc
= load_bias
;
779 k
= elf_ppnt
->p_vaddr
;
784 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
787 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
791 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
792 if (k
> elf_brk
) elf_brk
= k
;
795 elf_entry
+= load_bias
;
796 elf_bss
+= load_bias
;
797 elf_brk
+= load_bias
;
798 start_code
+= load_bias
;
799 end_code
+= load_bias
;
800 start_data
+= load_bias
;
801 end_data
+= load_bias
;
803 if (elf_interpreter
) {
804 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
806 reloc_func_desc
= interp_load_addr
;
808 close(interpreter_fd
);
809 free(elf_interpreter
);
811 if (elf_entry
== ~((abi_ulong
)0UL)) {
812 printf("Unable to load interpreter\n");
821 if (qemu_log_enabled())
822 load_symbols(&elf_ex
, bprm
->fd
);
827 info
->start_stack
= bprm
->p
= elf_stack
- 4;
829 bprm
->p
= target_create_elf_tables(bprm
->p
, bprm
->argc
, bprm
->envc
,
830 bprm
->stringp
, &elf_ex
, load_addr
,
831 load_bias
, interp_load_addr
, info
);
832 info
->load_addr
= reloc_func_desc
;
833 info
->start_brk
= info
->brk
= elf_brk
;
834 info
->end_code
= end_code
;
835 info
->start_code
= start_code
;
836 info
->start_data
= start_data
;
837 info
->end_data
= end_data
;
838 info
->start_stack
= bprm
->p
;
840 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
842 set_brk(elf_bss
, elf_brk
);
844 padzero(elf_bss
, elf_brk
);
847 printf("(start_brk) %x\n" , info
->start_brk
);
848 printf("(end_code) %x\n" , info
->end_code
);
849 printf("(start_code) %x\n" , info
->start_code
);
850 printf("(end_data) %x\n" , info
->end_data
);
851 printf("(start_stack) %x\n" , info
->start_stack
);
852 printf("(brk) %x\n" , info
->brk
);
855 info
->entry
= elf_entry
;
860 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
863 target_thread_init(regs
, infop
);