4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 static abi_ulong target_auxents
; /* Where the AUX entries are in target */
27 static size_t target_auxents_sz
; /* Size of AUX entries including AT_NULL */
29 #include "target_arch_reg.h"
30 #include "target_os_elf.h"
31 #include "target_os_stack.h"
32 #include "target_os_thread.h"
33 #include "target_os_user.h"
35 abi_ulong target_stksiz
;
36 abi_ulong target_stkbas
;
38 static int elf_core_dump(int signr
, CPUArchState
*env
);
40 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
46 static void bswap_ehdr(struct elfhdr
*ehdr
)
48 bswap16s(&ehdr
->e_type
); /* Object file type */
49 bswap16s(&ehdr
->e_machine
); /* Architecture */
50 bswap32s(&ehdr
->e_version
); /* Object file version */
51 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
52 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
53 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
54 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
55 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
56 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
57 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
58 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
59 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
60 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
63 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
)
67 for (i
= 0; i
< phnum
; i
++, phdr
++) {
68 bswap32s(&phdr
->p_type
); /* Segment type */
69 bswap32s(&phdr
->p_flags
); /* Segment flags */
70 bswaptls(&phdr
->p_offset
); /* Segment file offset */
71 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
72 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
73 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
74 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
75 bswaptls(&phdr
->p_align
); /* Segment alignment */
79 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
)
83 for (i
= 0; i
< shnum
; i
++, shdr
++) {
84 bswap32s(&shdr
->sh_name
);
85 bswap32s(&shdr
->sh_type
);
86 bswaptls(&shdr
->sh_flags
);
87 bswaptls(&shdr
->sh_addr
);
88 bswaptls(&shdr
->sh_offset
);
89 bswaptls(&shdr
->sh_size
);
90 bswap32s(&shdr
->sh_link
);
91 bswap32s(&shdr
->sh_info
);
92 bswaptls(&shdr
->sh_addralign
);
93 bswaptls(&shdr
->sh_entsize
);
97 static void bswap_sym(struct elf_sym
*sym
)
99 bswap32s(&sym
->st_name
);
100 bswaptls(&sym
->st_value
);
101 bswaptls(&sym
->st_size
);
102 bswap16s(&sym
->st_shndx
);
105 static void bswap_note(struct elf_note
*en
)
107 bswap32s(&en
->n_namesz
);
108 bswap32s(&en
->n_descsz
);
109 bswap32s(&en
->n_type
);
112 #else /* ! BSWAP_NEEDED */
114 static void bswap_ehdr(struct elfhdr
*ehdr
) { }
115 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
) { }
116 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
) { }
117 static void bswap_sym(struct elf_sym
*sym
) { }
118 static void bswap_note(struct elf_note
*en
) { }
120 #endif /* ! BSWAP_NEEDED */
125 * 'copy_elf_strings()' copies argument/envelope strings from user
126 * memory to free pages in kernel mem. These are in a format ready
127 * to be put directly into the top of new user memory.
130 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
133 char *tmp
, *tmp1
, *pag
= NULL
;
137 return 0; /* bullet-proofing */
142 fprintf(stderr
, "VFS: argc is wrong");
148 if (p
< len
) { /* this shouldn't happen - 128kB */
154 offset
= p
% TARGET_PAGE_SIZE
;
155 pag
= (char *)page
[p
/ TARGET_PAGE_SIZE
];
157 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
158 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
163 if (len
== 0 || offset
== 0) {
164 *(pag
+ offset
) = *tmp
;
167 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
168 tmp
-= bytes_to_copy
;
170 offset
-= bytes_to_copy
;
171 len
-= bytes_to_copy
;
172 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
179 static void setup_arg_pages(struct bsd_binprm
*bprm
, struct image_info
*info
,
180 abi_ulong
*stackp
, abi_ulong
*stringp
)
182 abi_ulong stack_base
, size
;
185 /* Create enough stack to hold everything. If we don't use
186 * it for args, we'll use it for something else...
188 size
= target_dflssiz
;
189 stack_base
= TARGET_USRSTACK
- size
;
190 addr
= target_mmap(stack_base
,
191 size
+ qemu_host_page_size
,
192 PROT_READ
| PROT_WRITE
,
193 MAP_PRIVATE
| MAP_ANON
,
199 /* we reserve one extra page at the top of the stack as guard */
200 target_mprotect(addr
+ size
, qemu_host_page_size
, PROT_NONE
);
202 target_stksiz
= size
;
203 target_stkbas
= addr
;
205 if (setup_initial_stack(bprm
, stackp
, stringp
) != 0) {
211 static void set_brk(abi_ulong start
, abi_ulong end
)
213 /* page-align the start and end addresses... */
214 start
= HOST_PAGE_ALIGN(start
);
215 end
= HOST_PAGE_ALIGN(end
);
218 if (target_mmap(start
, end
- start
,
219 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
220 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
221 perror("cannot mmap brk");
227 /* We need to explicitly zero any fractional pages after the data
228 section (i.e. bss). This would contain the junk from the file that
229 should not be in memory. */
230 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
234 if (elf_bss
>= last_bss
)
237 /* XXX: this is really a hack : if the real host page size is
238 smaller than the target page size, some pages after the end
239 of the file may not be mapped. A better fix would be to
240 patch target_mmap(), but it is more complicated as the file
241 size must be known */
242 if (qemu_real_host_page_size
< qemu_host_page_size
) {
243 abi_ulong end_addr
, end_addr1
;
244 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
245 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
246 if (end_addr1
< end_addr
) {
247 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
248 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
249 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
253 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
255 nbyte
= qemu_host_page_size
- nbyte
;
257 /* FIXME - what to do if put_user() fails? */
258 put_user_u8(0, elf_bss
);
264 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
266 abi_ulong
*interp_load_addr
)
268 struct elf_phdr
*elf_phdata
= NULL
;
269 struct elf_phdr
*eppnt
;
270 abi_ulong load_addr
= 0;
271 int load_addr_set
= 0;
273 abi_ulong last_bss
, elf_bss
;
281 bswap_ehdr(interp_elf_ex
);
282 /* First of all, some simple consistency checks */
283 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
284 interp_elf_ex
->e_type
!= ET_DYN
) ||
285 !elf_check_arch(interp_elf_ex
->e_machine
)) {
286 return ~((abi_ulong
)0UL);
290 /* Now read in all of the header information */
292 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
293 return ~(abi_ulong
)0UL;
295 elf_phdata
= (struct elf_phdr
*)
296 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
299 return ~((abi_ulong
)0UL);
302 * If the size of this structure has changed, then punt, since
303 * we will be doing the wrong thing.
305 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
307 return ~((abi_ulong
)0UL);
310 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
312 retval
= read(interpreter_fd
,
314 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
317 perror("load_elf_interp");
322 bswap_phdr(elf_phdata
, interp_elf_ex
->e_phnum
);
324 if (interp_elf_ex
->e_type
== ET_DYN
) {
325 /* in order to avoid hardcoding the interpreter load
326 address in qemu, we allocate a big enough memory zone */
327 error
= target_mmap(0, INTERP_MAP_SIZE
,
328 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
339 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++)
340 if (eppnt
->p_type
== PT_LOAD
) {
341 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
346 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
347 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
348 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
349 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
350 elf_type
|= MAP_FIXED
;
351 vaddr
= eppnt
->p_vaddr
;
353 error
= target_mmap(load_addr
+ TARGET_ELF_PAGESTART(vaddr
),
354 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
358 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
362 close(interpreter_fd
);
364 return ~((abi_ulong
)0UL);
367 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
373 * Find the end of the file mapping for this phdr, and keep
374 * track of the largest address we see for this.
376 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
377 if (k
> elf_bss
) elf_bss
= k
;
380 * Do the same thing for the memory mapping - between
381 * elf_bss and last_bss is the bss section.
383 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
384 if (k
> last_bss
) last_bss
= k
;
387 /* Now use mmap to map the library into memory. */
389 close(interpreter_fd
);
392 * Now fill out the bss section. First pad the last page up
393 * to the page boundary, and then perform a mmap to make sure
394 * that there are zeromapped pages up to and including the last
397 padzero(elf_bss
, last_bss
);
398 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
400 /* Map the last of the bss segment */
401 if (last_bss
> elf_bss
) {
402 target_mmap(elf_bss
, last_bss
- elf_bss
,
403 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
404 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
408 *interp_load_addr
= load_addr
;
409 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
412 static int symfind(const void *s0
, const void *s1
)
414 target_ulong addr
= *(target_ulong
*)s0
;
415 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
417 if (addr
< sym
->st_value
) {
419 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
425 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
427 #if ELF_CLASS == ELFCLASS32
428 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
430 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
436 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
438 return s
->disas_strtab
+ sym
->st_name
;
444 /* FIXME: This should use elf_ops.h */
445 static int symcmp(const void *s0
, const void *s1
)
447 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
448 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
449 return (sym0
->st_value
< sym1
->st_value
)
451 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
454 /* Best attempt to load symbols from this ELF object. */
455 static void load_symbols(struct elfhdr
*hdr
, int fd
)
457 unsigned int i
, nsyms
;
458 struct elf_shdr sechdr
, symtab
, strtab
;
461 struct elf_sym
*syms
, *new_syms
;
463 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
464 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
465 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
467 bswap_shdr(&sechdr
, 1);
468 if (sechdr
.sh_type
== SHT_SYMTAB
) {
470 lseek(fd
, hdr
->e_shoff
471 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
472 if (read(fd
, &strtab
, sizeof(strtab
))
475 bswap_shdr(&strtab
, 1);
479 return; /* Shouldn't happen... */
482 /* Now know where the strtab and symtab are. Snarf them. */
483 s
= malloc(sizeof(*s
));
484 syms
= malloc(symtab
.sh_size
);
489 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
490 if (!s
->disas_strtab
) {
496 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
497 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
504 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
509 // Throw away entries which we do not need.
510 if (syms
[i
].st_shndx
== SHN_UNDEF
||
511 syms
[i
].st_shndx
>= SHN_LORESERVE
||
512 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
515 syms
[i
] = syms
[nsyms
];
522 /* Attempt to free the storage associated with the local symbols
523 that we threw away. Whether or not this has any effect on the
524 memory allocation depends on the malloc implementation and how
525 many symbols we managed to discard. */
526 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
527 if (new_syms
== NULL
) {
535 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
537 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
538 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
544 s
->disas_num_syms
= nsyms
;
545 #if ELF_CLASS == ELFCLASS32
546 s
->disas_symtab
.elf32
= syms
;
547 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
549 s
->disas_symtab
.elf64
= syms
;
550 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
556 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
557 struct image_info
*info
)
559 struct elfhdr elf_ex
;
560 struct elfhdr interp_elf_ex
;
561 int interpreter_fd
= -1; /* avoid warning */
562 abi_ulong load_addr
, load_bias
;
563 int load_addr_set
= 0;
565 struct elf_phdr
* elf_ppnt
;
566 struct elf_phdr
*elf_phdata
;
567 abi_ulong elf_bss
, k
, elf_brk
;
569 char * elf_interpreter
;
570 abi_ulong elf_entry
, interp_load_addr
= 0;
571 abi_ulong start_code
, end_code
, start_data
, end_data
;
572 abi_ulong reloc_func_desc
= 0;
576 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
579 /* First of all, some simple consistency checks */
580 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
581 (!elf_check_arch(elf_ex
.e_machine
))) {
585 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
586 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
,bprm
->p
);
587 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
,bprm
->p
);
592 /* Now read in all of the header information */
593 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
594 if (elf_phdata
== NULL
) {
598 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
600 retval
= read(bprm
->fd
, (char *)elf_phdata
,
601 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
605 perror("load_elf_binary");
611 bswap_phdr(elf_phdata
, elf_ex
.e_phnum
);
613 elf_ppnt
= elf_phdata
;
619 elf_interpreter
= NULL
;
620 start_code
= ~((abi_ulong
)0UL);
625 for (i
= 0;i
< elf_ex
.e_phnum
; i
++) {
626 if (elf_ppnt
->p_type
== PT_INTERP
) {
627 if (elf_interpreter
!= NULL
)
630 free(elf_interpreter
);
635 /* This is the program interpreter used for
636 * shared libraries - for now assume that this
637 * is an a.out format binary
640 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
642 if (elf_interpreter
== NULL
) {
648 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
650 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
653 perror("load_elf_binary2");
658 retval
= open(path(elf_interpreter
), O_RDONLY
);
660 interpreter_fd
= retval
;
663 perror(elf_interpreter
);
665 /* retval = -errno; */
670 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
672 retval
= read(interpreter_fd
, bprm
->buf
, 128);
676 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
679 perror("load_elf_binary3");
682 free(elf_interpreter
);
690 /* Some simple consistency checks for the interpreter */
691 if (elf_interpreter
) {
692 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
693 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
694 free(elf_interpreter
);
701 /* OK, we are done with that, now set up the arg stuff,
702 and then start this sucker up */
705 free(elf_interpreter
);
711 /* OK, This is the point of no return */
714 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
716 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
718 /* Do this so that we can load the interpreter, if need be. We will
719 change some of these later */
721 setup_arg_pages(bprm
, info
, &bprm
->p
, &bprm
->stringp
);
722 info
->start_stack
= bprm
->p
;
724 /* Now we do a little grungy work by mmaping the ELF image into
725 * the correct location in memory. At this point, we assume that
726 * the image should be loaded at fixed address, not at a variable
730 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
735 if (elf_ppnt
->p_type
!= PT_LOAD
)
738 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
739 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
740 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
741 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
742 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
743 elf_flags
|= MAP_FIXED
;
744 } else if (elf_ex
.e_type
== ET_DYN
) {
745 /* Try and get dynamic programs out of the way of the default mmap
746 base, as well as whatever program they might try to exec. This
747 is because the brk will follow the loader, and is not movable. */
748 /* NOTE: for qemu, we do a big mmap to get enough space
749 without hardcoding any address */
750 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
751 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
757 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
760 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
761 (elf_ppnt
->p_filesz
+
762 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
764 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
766 (elf_ppnt
->p_offset
-
767 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
773 if (!load_addr_set
) {
775 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
776 if (elf_ex
.e_type
== ET_DYN
) {
778 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
779 load_addr
+= load_bias
;
780 reloc_func_desc
= load_bias
;
783 k
= elf_ppnt
->p_vaddr
;
788 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
791 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
795 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
796 if (k
> elf_brk
) elf_brk
= k
;
799 elf_entry
+= load_bias
;
800 elf_bss
+= load_bias
;
801 elf_brk
+= load_bias
;
802 start_code
+= load_bias
;
803 end_code
+= load_bias
;
804 start_data
+= load_bias
;
805 end_data
+= load_bias
;
807 if (elf_interpreter
) {
808 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
810 reloc_func_desc
= interp_load_addr
;
812 close(interpreter_fd
);
813 free(elf_interpreter
);
815 if (elf_entry
== ~((abi_ulong
)0UL)) {
816 printf("Unable to load interpreter\n");
825 if (qemu_log_enabled())
826 load_symbols(&elf_ex
, bprm
->fd
);
830 bprm
->p
= target_create_elf_tables(bprm
->p
, bprm
->argc
, bprm
->envc
,
831 bprm
->stringp
, &elf_ex
, load_addr
,
832 load_bias
, interp_load_addr
, info
);
833 info
->load_addr
= reloc_func_desc
;
834 info
->start_brk
= info
->brk
= elf_brk
;
835 info
->end_code
= end_code
;
836 info
->start_code
= start_code
;
837 info
->start_data
= start_data
;
838 info
->end_data
= end_data
;
839 info
->start_stack
= bprm
->p
;
841 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
843 set_brk(elf_bss
, elf_brk
);
845 padzero(elf_bss
, elf_brk
);
847 info
->entry
= elf_entry
;
849 #ifdef USE_ELF_CORE_DUMP
850 bprm
->core_dump
= &elf_core_dump
;
852 bprm
->core_dump
= NULL
;
858 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
861 target_thread_init(regs
, infop
);