]> git.proxmox.com Git - mirror_qemu.git/blob - bsd-user/elfload.c
bsd-user: add stubbed out core dump support
[mirror_qemu.git] / bsd-user / elfload.c
1 /*
2 * ELF loading code
3 *
4 * Copyright (c) 2013 Stacey D. Son
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 #include "qemu.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
25
26 static abi_ulong target_auxents; /* Where the AUX entries are in target */
27 static size_t target_auxents_sz; /* Size of AUX entries including AT_NULL */
28
29 #include "target_arch_reg.h"
30 #include "target_os_elf.h"
31 #include "target_os_stack.h"
32 #include "target_os_thread.h"
33 #include "target_os_user.h"
34
35 abi_ulong target_stksiz;
36 abi_ulong target_stkbas;
37
38 static int elf_core_dump(int signr, CPUArchState *env);
39
40 static inline void memcpy_fromfs(void *to, const void *from, unsigned long n)
41 {
42 memcpy(to, from, n);
43 }
44
45 #ifdef BSWAP_NEEDED
46 static void bswap_ehdr(struct elfhdr *ehdr)
47 {
48 bswap16s(&ehdr->e_type); /* Object file type */
49 bswap16s(&ehdr->e_machine); /* Architecture */
50 bswap32s(&ehdr->e_version); /* Object file version */
51 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
52 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
53 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
54 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
55 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
56 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
57 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
58 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
59 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
60 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
61 }
62
63 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
64 {
65 int i;
66
67 for (i = 0; i < phnum; i++, phdr++) {
68 bswap32s(&phdr->p_type); /* Segment type */
69 bswap32s(&phdr->p_flags); /* Segment flags */
70 bswaptls(&phdr->p_offset); /* Segment file offset */
71 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
72 bswaptls(&phdr->p_paddr); /* Segment physical address */
73 bswaptls(&phdr->p_filesz); /* Segment size in file */
74 bswaptls(&phdr->p_memsz); /* Segment size in memory */
75 bswaptls(&phdr->p_align); /* Segment alignment */
76 }
77 }
78
79 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
80 {
81 int i;
82
83 for (i = 0; i < shnum; i++, shdr++) {
84 bswap32s(&shdr->sh_name);
85 bswap32s(&shdr->sh_type);
86 bswaptls(&shdr->sh_flags);
87 bswaptls(&shdr->sh_addr);
88 bswaptls(&shdr->sh_offset);
89 bswaptls(&shdr->sh_size);
90 bswap32s(&shdr->sh_link);
91 bswap32s(&shdr->sh_info);
92 bswaptls(&shdr->sh_addralign);
93 bswaptls(&shdr->sh_entsize);
94 }
95 }
96
97 static void bswap_sym(struct elf_sym *sym)
98 {
99 bswap32s(&sym->st_name);
100 bswaptls(&sym->st_value);
101 bswaptls(&sym->st_size);
102 bswap16s(&sym->st_shndx);
103 }
104
105 static void bswap_note(struct elf_note *en)
106 {
107 bswap32s(&en->n_namesz);
108 bswap32s(&en->n_descsz);
109 bswap32s(&en->n_type);
110 }
111
112 #else /* ! BSWAP_NEEDED */
113
114 static void bswap_ehdr(struct elfhdr *ehdr) { }
115 static void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
116 static void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
117 static void bswap_sym(struct elf_sym *sym) { }
118 static void bswap_note(struct elf_note *en) { }
119
120 #endif /* ! BSWAP_NEEDED */
121
122 #include "elfcore.c"
123
124 /*
125 * 'copy_elf_strings()' copies argument/envelope strings from user
126 * memory to free pages in kernel mem. These are in a format ready
127 * to be put directly into the top of new user memory.
128 *
129 */
130 static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
131 abi_ulong p)
132 {
133 char *tmp, *tmp1, *pag = NULL;
134 int len, offset = 0;
135
136 if (!p) {
137 return 0; /* bullet-proofing */
138 }
139 while (argc-- > 0) {
140 tmp = argv[argc];
141 if (!tmp) {
142 fprintf(stderr, "VFS: argc is wrong");
143 exit(-1);
144 }
145 tmp1 = tmp;
146 while (*tmp++);
147 len = tmp - tmp1;
148 if (p < len) { /* this shouldn't happen - 128kB */
149 return 0;
150 }
151 while (len) {
152 --p; --tmp; --len;
153 if (--offset < 0) {
154 offset = p % TARGET_PAGE_SIZE;
155 pag = (char *)page[p / TARGET_PAGE_SIZE];
156 if (!pag) {
157 pag = g_try_malloc0(TARGET_PAGE_SIZE);
158 page[p / TARGET_PAGE_SIZE] = pag;
159 if (!pag)
160 return 0;
161 }
162 }
163 if (len == 0 || offset == 0) {
164 *(pag + offset) = *tmp;
165 }
166 else {
167 int bytes_to_copy = (len > offset) ? offset : len;
168 tmp -= bytes_to_copy;
169 p -= bytes_to_copy;
170 offset -= bytes_to_copy;
171 len -= bytes_to_copy;
172 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
173 }
174 }
175 }
176 return p;
177 }
178
179 static void setup_arg_pages(struct bsd_binprm *bprm, struct image_info *info,
180 abi_ulong *stackp, abi_ulong *stringp)
181 {
182 abi_ulong stack_base, size;
183 abi_long addr;
184
185 /* Create enough stack to hold everything. If we don't use
186 * it for args, we'll use it for something else...
187 */
188 size = target_dflssiz;
189 stack_base = TARGET_USRSTACK - size;
190 addr = target_mmap(stack_base,
191 size + qemu_host_page_size,
192 PROT_READ | PROT_WRITE,
193 MAP_PRIVATE | MAP_ANON,
194 -1, 0);
195 if (addr == -1) {
196 perror("stk mmap");
197 exit(-1);
198 }
199 /* we reserve one extra page at the top of the stack as guard */
200 target_mprotect(addr + size, qemu_host_page_size, PROT_NONE);
201
202 target_stksiz = size;
203 target_stkbas = addr;
204
205 if (setup_initial_stack(bprm, stackp, stringp) != 0) {
206 perror("stk setup");
207 exit(-1);
208 }
209 }
210
211 static void set_brk(abi_ulong start, abi_ulong end)
212 {
213 /* page-align the start and end addresses... */
214 start = HOST_PAGE_ALIGN(start);
215 end = HOST_PAGE_ALIGN(end);
216 if (end <= start)
217 return;
218 if (target_mmap(start, end - start,
219 PROT_READ | PROT_WRITE | PROT_EXEC,
220 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
221 perror("cannot mmap brk");
222 exit(-1);
223 }
224 }
225
226
227 /* We need to explicitly zero any fractional pages after the data
228 section (i.e. bss). This would contain the junk from the file that
229 should not be in memory. */
230 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
231 {
232 abi_ulong nbyte;
233
234 if (elf_bss >= last_bss)
235 return;
236
237 /* XXX: this is really a hack : if the real host page size is
238 smaller than the target page size, some pages after the end
239 of the file may not be mapped. A better fix would be to
240 patch target_mmap(), but it is more complicated as the file
241 size must be known */
242 if (qemu_real_host_page_size < qemu_host_page_size) {
243 abi_ulong end_addr, end_addr1;
244 end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
245 end_addr = HOST_PAGE_ALIGN(elf_bss);
246 if (end_addr1 < end_addr) {
247 mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1,
248 PROT_READ | PROT_WRITE | PROT_EXEC,
249 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
250 }
251 }
252
253 nbyte = elf_bss & (qemu_host_page_size - 1);
254 if (nbyte) {
255 nbyte = qemu_host_page_size - nbyte;
256 do {
257 /* FIXME - what to do if put_user() fails? */
258 put_user_u8(0, elf_bss);
259 elf_bss++;
260 } while (--nbyte);
261 }
262 }
263
264 static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex,
265 int interpreter_fd,
266 abi_ulong *interp_load_addr)
267 {
268 struct elf_phdr *elf_phdata = NULL;
269 struct elf_phdr *eppnt;
270 abi_ulong load_addr = 0;
271 int load_addr_set = 0;
272 int retval;
273 abi_ulong last_bss, elf_bss;
274 abi_ulong error;
275 int i;
276
277 elf_bss = 0;
278 last_bss = 0;
279 error = 0;
280
281 bswap_ehdr(interp_elf_ex);
282 /* First of all, some simple consistency checks */
283 if ((interp_elf_ex->e_type != ET_EXEC &&
284 interp_elf_ex->e_type != ET_DYN) ||
285 !elf_check_arch(interp_elf_ex->e_machine)) {
286 return ~((abi_ulong)0UL);
287 }
288
289
290 /* Now read in all of the header information */
291
292 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
293 return ~(abi_ulong)0UL;
294
295 elf_phdata = (struct elf_phdr *)
296 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
297
298 if (!elf_phdata)
299 return ~((abi_ulong)0UL);
300
301 /*
302 * If the size of this structure has changed, then punt, since
303 * we will be doing the wrong thing.
304 */
305 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
306 free(elf_phdata);
307 return ~((abi_ulong)0UL);
308 }
309
310 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
311 if (retval >= 0) {
312 retval = read(interpreter_fd,
313 (char *) elf_phdata,
314 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
315 }
316 if (retval < 0) {
317 perror("load_elf_interp");
318 exit(-1);
319 free(elf_phdata);
320 return retval;
321 }
322 bswap_phdr(elf_phdata, interp_elf_ex->e_phnum);
323
324 if (interp_elf_ex->e_type == ET_DYN) {
325 /* in order to avoid hardcoding the interpreter load
326 address in qemu, we allocate a big enough memory zone */
327 error = target_mmap(0, INTERP_MAP_SIZE,
328 PROT_NONE, MAP_PRIVATE | MAP_ANON,
329 -1, 0);
330 if (error == -1) {
331 perror("mmap");
332 exit(-1);
333 }
334 load_addr = error;
335 load_addr_set = 1;
336 }
337
338 eppnt = elf_phdata;
339 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++)
340 if (eppnt->p_type == PT_LOAD) {
341 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
342 int elf_prot = 0;
343 abi_ulong vaddr = 0;
344 abi_ulong k;
345
346 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
347 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
348 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
349 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
350 elf_type |= MAP_FIXED;
351 vaddr = eppnt->p_vaddr;
352 }
353 error = target_mmap(load_addr + TARGET_ELF_PAGESTART(vaddr),
354 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
355 elf_prot,
356 elf_type,
357 interpreter_fd,
358 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
359
360 if (error == -1) {
361 /* Real error */
362 close(interpreter_fd);
363 free(elf_phdata);
364 return ~((abi_ulong)0UL);
365 }
366
367 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
368 load_addr = error;
369 load_addr_set = 1;
370 }
371
372 /*
373 * Find the end of the file mapping for this phdr, and keep
374 * track of the largest address we see for this.
375 */
376 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
377 if (k > elf_bss) elf_bss = k;
378
379 /*
380 * Do the same thing for the memory mapping - between
381 * elf_bss and last_bss is the bss section.
382 */
383 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
384 if (k > last_bss) last_bss = k;
385 }
386
387 /* Now use mmap to map the library into memory. */
388
389 close(interpreter_fd);
390
391 /*
392 * Now fill out the bss section. First pad the last page up
393 * to the page boundary, and then perform a mmap to make sure
394 * that there are zeromapped pages up to and including the last
395 * bss page.
396 */
397 padzero(elf_bss, last_bss);
398 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
399
400 /* Map the last of the bss segment */
401 if (last_bss > elf_bss) {
402 target_mmap(elf_bss, last_bss - elf_bss,
403 PROT_READ | PROT_WRITE | PROT_EXEC,
404 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
405 }
406 free(elf_phdata);
407
408 *interp_load_addr = load_addr;
409 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
410 }
411
412 static int symfind(const void *s0, const void *s1)
413 {
414 target_ulong addr = *(target_ulong *)s0;
415 struct elf_sym *sym = (struct elf_sym *)s1;
416 int result = 0;
417 if (addr < sym->st_value) {
418 result = -1;
419 } else if (addr >= sym->st_value + sym->st_size) {
420 result = 1;
421 }
422 return result;
423 }
424
425 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
426 {
427 #if ELF_CLASS == ELFCLASS32
428 struct elf_sym *syms = s->disas_symtab.elf32;
429 #else
430 struct elf_sym *syms = s->disas_symtab.elf64;
431 #endif
432
433 // binary search
434 struct elf_sym *sym;
435
436 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
437 if (sym != NULL) {
438 return s->disas_strtab + sym->st_name;
439 }
440
441 return "";
442 }
443
444 /* FIXME: This should use elf_ops.h */
445 static int symcmp(const void *s0, const void *s1)
446 {
447 struct elf_sym *sym0 = (struct elf_sym *)s0;
448 struct elf_sym *sym1 = (struct elf_sym *)s1;
449 return (sym0->st_value < sym1->st_value)
450 ? -1
451 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
452 }
453
454 /* Best attempt to load symbols from this ELF object. */
455 static void load_symbols(struct elfhdr *hdr, int fd)
456 {
457 unsigned int i, nsyms;
458 struct elf_shdr sechdr, symtab, strtab;
459 char *strings;
460 struct syminfo *s;
461 struct elf_sym *syms, *new_syms;
462
463 lseek(fd, hdr->e_shoff, SEEK_SET);
464 for (i = 0; i < hdr->e_shnum; i++) {
465 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
466 return;
467 bswap_shdr(&sechdr, 1);
468 if (sechdr.sh_type == SHT_SYMTAB) {
469 symtab = sechdr;
470 lseek(fd, hdr->e_shoff
471 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
472 if (read(fd, &strtab, sizeof(strtab))
473 != sizeof(strtab))
474 return;
475 bswap_shdr(&strtab, 1);
476 goto found;
477 }
478 }
479 return; /* Shouldn't happen... */
480
481 found:
482 /* Now know where the strtab and symtab are. Snarf them. */
483 s = malloc(sizeof(*s));
484 syms = malloc(symtab.sh_size);
485 if (!syms) {
486 free(s);
487 return;
488 }
489 s->disas_strtab = strings = malloc(strtab.sh_size);
490 if (!s->disas_strtab) {
491 free(s);
492 free(syms);
493 return;
494 }
495
496 lseek(fd, symtab.sh_offset, SEEK_SET);
497 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) {
498 free(s);
499 free(syms);
500 free(strings);
501 return;
502 }
503
504 nsyms = symtab.sh_size / sizeof(struct elf_sym);
505
506 i = 0;
507 while (i < nsyms) {
508 bswap_sym(syms + i);
509 // Throw away entries which we do not need.
510 if (syms[i].st_shndx == SHN_UNDEF ||
511 syms[i].st_shndx >= SHN_LORESERVE ||
512 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
513 nsyms--;
514 if (i < nsyms) {
515 syms[i] = syms[nsyms];
516 }
517 continue;
518 }
519 i++;
520 }
521
522 /* Attempt to free the storage associated with the local symbols
523 that we threw away. Whether or not this has any effect on the
524 memory allocation depends on the malloc implementation and how
525 many symbols we managed to discard. */
526 new_syms = realloc(syms, nsyms * sizeof(*syms));
527 if (new_syms == NULL) {
528 free(s);
529 free(syms);
530 free(strings);
531 return;
532 }
533 syms = new_syms;
534
535 qsort(syms, nsyms, sizeof(*syms), symcmp);
536
537 lseek(fd, strtab.sh_offset, SEEK_SET);
538 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) {
539 free(s);
540 free(syms);
541 free(strings);
542 return;
543 }
544 s->disas_num_syms = nsyms;
545 #if ELF_CLASS == ELFCLASS32
546 s->disas_symtab.elf32 = syms;
547 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
548 #else
549 s->disas_symtab.elf64 = syms;
550 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
551 #endif
552 s->next = syminfos;
553 syminfos = s;
554 }
555
556 int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
557 struct image_info *info)
558 {
559 struct elfhdr elf_ex;
560 struct elfhdr interp_elf_ex;
561 int interpreter_fd = -1; /* avoid warning */
562 abi_ulong load_addr, load_bias;
563 int load_addr_set = 0;
564 int i;
565 struct elf_phdr * elf_ppnt;
566 struct elf_phdr *elf_phdata;
567 abi_ulong elf_bss, k, elf_brk;
568 int retval;
569 char * elf_interpreter;
570 abi_ulong elf_entry, interp_load_addr = 0;
571 abi_ulong start_code, end_code, start_data, end_data;
572 abi_ulong reloc_func_desc = 0;
573
574 load_addr = 0;
575 load_bias = 0;
576 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
577 bswap_ehdr(&elf_ex);
578
579 /* First of all, some simple consistency checks */
580 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
581 (!elf_check_arch(elf_ex.e_machine))) {
582 return -ENOEXEC;
583 }
584
585 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
586 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page,bprm->p);
587 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page,bprm->p);
588 if (!bprm->p) {
589 retval = -E2BIG;
590 }
591
592 /* Now read in all of the header information */
593 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
594 if (elf_phdata == NULL) {
595 return -ENOMEM;
596 }
597
598 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
599 if (retval > 0) {
600 retval = read(bprm->fd, (char *)elf_phdata,
601 elf_ex.e_phentsize * elf_ex.e_phnum);
602 }
603
604 if (retval < 0) {
605 perror("load_elf_binary");
606 exit(-1);
607 free(elf_phdata);
608 return -errno;
609 }
610
611 bswap_phdr(elf_phdata, elf_ex.e_phnum);
612
613 elf_ppnt = elf_phdata;
614
615 elf_bss = 0;
616 elf_brk = 0;
617
618
619 elf_interpreter = NULL;
620 start_code = ~((abi_ulong)0UL);
621 end_code = 0;
622 start_data = 0;
623 end_data = 0;
624
625 for (i = 0;i < elf_ex.e_phnum; i++) {
626 if (elf_ppnt->p_type == PT_INTERP) {
627 if (elf_interpreter != NULL)
628 {
629 free(elf_phdata);
630 free(elf_interpreter);
631 close(bprm->fd);
632 return -EINVAL;
633 }
634
635 /* This is the program interpreter used for
636 * shared libraries - for now assume that this
637 * is an a.out format binary
638 */
639
640 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
641
642 if (elf_interpreter == NULL) {
643 free(elf_phdata);
644 close(bprm->fd);
645 return -ENOMEM;
646 }
647
648 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
649 if (retval >= 0) {
650 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
651 }
652 if (retval < 0) {
653 perror("load_elf_binary2");
654 exit(-1);
655 }
656
657 if (retval >= 0) {
658 retval = open(path(elf_interpreter), O_RDONLY);
659 if (retval >= 0) {
660 interpreter_fd = retval;
661 }
662 else {
663 perror(elf_interpreter);
664 exit(-1);
665 /* retval = -errno; */
666 }
667 }
668
669 if (retval >= 0) {
670 retval = lseek(interpreter_fd, 0, SEEK_SET);
671 if (retval >= 0) {
672 retval = read(interpreter_fd, bprm->buf, 128);
673 }
674 }
675 if (retval >= 0) {
676 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
677 }
678 if (retval < 0) {
679 perror("load_elf_binary3");
680 exit(-1);
681 free(elf_phdata);
682 free(elf_interpreter);
683 close(bprm->fd);
684 return retval;
685 }
686 }
687 elf_ppnt++;
688 }
689
690 /* Some simple consistency checks for the interpreter */
691 if (elf_interpreter) {
692 if (interp_elf_ex.e_ident[0] != 0x7f ||
693 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) {
694 free(elf_interpreter);
695 free(elf_phdata);
696 close(bprm->fd);
697 return -ELIBBAD;
698 }
699 }
700
701 /* OK, we are done with that, now set up the arg stuff,
702 and then start this sucker up */
703
704 if (!bprm->p) {
705 free(elf_interpreter);
706 free(elf_phdata);
707 close(bprm->fd);
708 return -E2BIG;
709 }
710
711 /* OK, This is the point of no return */
712 info->end_data = 0;
713 info->end_code = 0;
714 info->start_mmap = (abi_ulong)ELF_START_MMAP;
715 info->mmap = 0;
716 elf_entry = (abi_ulong) elf_ex.e_entry;
717
718 /* Do this so that we can load the interpreter, if need be. We will
719 change some of these later */
720 info->rss = 0;
721 setup_arg_pages(bprm, info, &bprm->p, &bprm->stringp);
722 info->start_stack = bprm->p;
723
724 /* Now we do a little grungy work by mmaping the ELF image into
725 * the correct location in memory. At this point, we assume that
726 * the image should be loaded at fixed address, not at a variable
727 * address.
728 */
729
730 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
731 int elf_prot = 0;
732 int elf_flags = 0;
733 abi_ulong error;
734
735 if (elf_ppnt->p_type != PT_LOAD)
736 continue;
737
738 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
739 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
740 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
741 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
742 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
743 elf_flags |= MAP_FIXED;
744 } else if (elf_ex.e_type == ET_DYN) {
745 /* Try and get dynamic programs out of the way of the default mmap
746 base, as well as whatever program they might try to exec. This
747 is because the brk will follow the loader, and is not movable. */
748 /* NOTE: for qemu, we do a big mmap to get enough space
749 without hardcoding any address */
750 error = target_mmap(0, ET_DYN_MAP_SIZE,
751 PROT_NONE, MAP_PRIVATE | MAP_ANON,
752 -1, 0);
753 if (error == -1) {
754 perror("mmap");
755 exit(-1);
756 }
757 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
758 }
759
760 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
761 (elf_ppnt->p_filesz +
762 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
763 elf_prot,
764 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
765 bprm->fd,
766 (elf_ppnt->p_offset -
767 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
768 if (error == -1) {
769 perror("mmap");
770 exit(-1);
771 }
772
773 if (!load_addr_set) {
774 load_addr_set = 1;
775 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
776 if (elf_ex.e_type == ET_DYN) {
777 load_bias += error -
778 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
779 load_addr += load_bias;
780 reloc_func_desc = load_bias;
781 }
782 }
783 k = elf_ppnt->p_vaddr;
784 if (k < start_code)
785 start_code = k;
786 if (start_data < k)
787 start_data = k;
788 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
789 if (k > elf_bss)
790 elf_bss = k;
791 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
792 end_code = k;
793 if (end_data < k)
794 end_data = k;
795 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
796 if (k > elf_brk) elf_brk = k;
797 }
798
799 elf_entry += load_bias;
800 elf_bss += load_bias;
801 elf_brk += load_bias;
802 start_code += load_bias;
803 end_code += load_bias;
804 start_data += load_bias;
805 end_data += load_bias;
806
807 if (elf_interpreter) {
808 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
809 &interp_load_addr);
810 reloc_func_desc = interp_load_addr;
811
812 close(interpreter_fd);
813 free(elf_interpreter);
814
815 if (elf_entry == ~((abi_ulong)0UL)) {
816 printf("Unable to load interpreter\n");
817 free(elf_phdata);
818 exit(-1);
819 return 0;
820 }
821 }
822
823 free(elf_phdata);
824
825 if (qemu_log_enabled())
826 load_symbols(&elf_ex, bprm->fd);
827
828 close(bprm->fd);
829
830 bprm->p = target_create_elf_tables(bprm->p, bprm->argc, bprm->envc,
831 bprm->stringp, &elf_ex, load_addr,
832 load_bias, interp_load_addr, info);
833 info->load_addr = reloc_func_desc;
834 info->start_brk = info->brk = elf_brk;
835 info->end_code = end_code;
836 info->start_code = start_code;
837 info->start_data = start_data;
838 info->end_data = end_data;
839 info->start_stack = bprm->p;
840
841 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
842 sections */
843 set_brk(elf_bss, elf_brk);
844
845 padzero(elf_bss, elf_brk);
846
847 info->entry = elf_entry;
848
849 #ifdef USE_ELF_CORE_DUMP
850 bprm->core_dump = &elf_core_dump;
851 #else
852 bprm->core_dump = NULL;
853 #endif
854
855 return 0;
856 }
857
858 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
859 {
860
861 target_thread_init(regs, infop);
862 }