]> git.proxmox.com Git - mirror_qemu.git/blob - bsd-user/elfload.c
bsd-user: remove a.out support
[mirror_qemu.git] / bsd-user / elfload.c
1 /*
2 * ELF loading code
3 *
4 * Copyright (c) 2013 Stacey D. Son
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 #include "qemu.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
25
26 #include "target_arch_elf.h"
27
28 /* this flag is uneffective under linux too, should be deleted */
29 #ifndef MAP_DENYWRITE
30 #define MAP_DENYWRITE 0
31 #endif
32
33 /* should probably go in elf.h */
34 #ifndef ELIBBAD
35 #define ELIBBAD 80
36 #endif
37
38 #ifndef ELF_PLATFORM
39 #define ELF_PLATFORM (NULL)
40 #endif
41
42 #ifndef ELF_HWCAP
43 #define ELF_HWCAP 0
44 #endif
45
46 #ifdef TARGET_ABI32
47 #undef ELF_CLASS
48 #define ELF_CLASS ELFCLASS32
49 #undef bswaptls
50 #define bswaptls(ptr) bswap32s(ptr)
51 #endif
52
53 #include "elf.h"
54
55 /* max code+data+bss space allocated to elf interpreter */
56 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
57
58 /* max code+data+bss+brk space allocated to ET_DYN executables */
59 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
60
61 /* Necessary parameters */
62 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
63 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1))
64 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1))
65
66 #define DLINFO_ITEMS 12
67
68 static inline void memcpy_fromfs(void *to, const void *from, unsigned long n)
69 {
70 memcpy(to, from, n);
71 }
72
73 #ifdef BSWAP_NEEDED
74 static void bswap_ehdr(struct elfhdr *ehdr)
75 {
76 bswap16s(&ehdr->e_type); /* Object file type */
77 bswap16s(&ehdr->e_machine); /* Architecture */
78 bswap32s(&ehdr->e_version); /* Object file version */
79 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
80 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
81 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
82 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
83 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
84 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
85 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
86 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
87 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
88 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
89 }
90
91 static void bswap_phdr(struct elf_phdr *phdr)
92 {
93 bswap32s(&phdr->p_type); /* Segment type */
94 bswaptls(&phdr->p_offset); /* Segment file offset */
95 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
96 bswaptls(&phdr->p_paddr); /* Segment physical address */
97 bswaptls(&phdr->p_filesz); /* Segment size in file */
98 bswaptls(&phdr->p_memsz); /* Segment size in memory */
99 bswap32s(&phdr->p_flags); /* Segment flags */
100 bswaptls(&phdr->p_align); /* Segment alignment */
101 }
102
103 static void bswap_shdr(struct elf_shdr *shdr)
104 {
105 bswap32s(&shdr->sh_name);
106 bswap32s(&shdr->sh_type);
107 bswaptls(&shdr->sh_flags);
108 bswaptls(&shdr->sh_addr);
109 bswaptls(&shdr->sh_offset);
110 bswaptls(&shdr->sh_size);
111 bswap32s(&shdr->sh_link);
112 bswap32s(&shdr->sh_info);
113 bswaptls(&shdr->sh_addralign);
114 bswaptls(&shdr->sh_entsize);
115 }
116
117 static void bswap_sym(struct elf_sym *sym)
118 {
119 bswap32s(&sym->st_name);
120 bswaptls(&sym->st_value);
121 bswaptls(&sym->st_size);
122 bswap16s(&sym->st_shndx);
123 }
124 #endif
125
126 /*
127 * 'copy_elf_strings()' copies argument/envelope strings from user
128 * memory to free pages in kernel mem. These are in a format ready
129 * to be put directly into the top of new user memory.
130 *
131 */
132 static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
133 abi_ulong p)
134 {
135 char *tmp, *tmp1, *pag = NULL;
136 int len, offset = 0;
137
138 if (!p) {
139 return 0; /* bullet-proofing */
140 }
141 while (argc-- > 0) {
142 tmp = argv[argc];
143 if (!tmp) {
144 fprintf(stderr, "VFS: argc is wrong");
145 exit(-1);
146 }
147 tmp1 = tmp;
148 while (*tmp++);
149 len = tmp - tmp1;
150 if (p < len) { /* this shouldn't happen - 128kB */
151 return 0;
152 }
153 while (len) {
154 --p; --tmp; --len;
155 if (--offset < 0) {
156 offset = p % TARGET_PAGE_SIZE;
157 pag = (char *)page[p / TARGET_PAGE_SIZE];
158 if (!pag) {
159 pag = g_try_malloc0(TARGET_PAGE_SIZE);
160 page[p / TARGET_PAGE_SIZE] = pag;
161 if (!pag)
162 return 0;
163 }
164 }
165 if (len == 0 || offset == 0) {
166 *(pag + offset) = *tmp;
167 }
168 else {
169 int bytes_to_copy = (len > offset) ? offset : len;
170 tmp -= bytes_to_copy;
171 p -= bytes_to_copy;
172 offset -= bytes_to_copy;
173 len -= bytes_to_copy;
174 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
175 }
176 }
177 }
178 return p;
179 }
180
181 static abi_ulong setup_arg_pages(abi_ulong p, struct bsd_binprm *bprm,
182 struct image_info *info)
183 {
184 abi_ulong stack_base, size, error;
185 int i;
186
187 /* Create enough stack to hold everything. If we don't use
188 * it for args, we'll use it for something else...
189 */
190 size = x86_stack_size;
191 if (size < MAX_ARG_PAGES * TARGET_PAGE_SIZE)
192 size = MAX_ARG_PAGES * TARGET_PAGE_SIZE;
193 error = target_mmap(0,
194 size + qemu_host_page_size,
195 PROT_READ | PROT_WRITE,
196 MAP_PRIVATE | MAP_ANON,
197 -1, 0);
198 if (error == -1) {
199 perror("stk mmap");
200 exit(-1);
201 }
202 /* we reserve one extra page at the top of the stack as guard */
203 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
204
205 stack_base = error + size - MAX_ARG_PAGES * TARGET_PAGE_SIZE;
206 p += stack_base;
207
208 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
209 if (bprm->page[i]) {
210 info->rss++;
211 /* FIXME - check return value of memcpy_to_target() for failure */
212 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
213 g_free(bprm->page[i]);
214 }
215 stack_base += TARGET_PAGE_SIZE;
216 }
217 return p;
218 }
219
220 static void set_brk(abi_ulong start, abi_ulong end)
221 {
222 /* page-align the start and end addresses... */
223 start = HOST_PAGE_ALIGN(start);
224 end = HOST_PAGE_ALIGN(end);
225 if (end <= start)
226 return;
227 if (target_mmap(start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC,
229 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
230 perror("cannot mmap brk");
231 exit(-1);
232 }
233 }
234
235
236 /* We need to explicitly zero any fractional pages after the data
237 section (i.e. bss). This would contain the junk from the file that
238 should not be in memory. */
239 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
240 {
241 abi_ulong nbyte;
242
243 if (elf_bss >= last_bss)
244 return;
245
246 /* XXX: this is really a hack : if the real host page size is
247 smaller than the target page size, some pages after the end
248 of the file may not be mapped. A better fix would be to
249 patch target_mmap(), but it is more complicated as the file
250 size must be known */
251 if (qemu_real_host_page_size < qemu_host_page_size) {
252 abi_ulong end_addr, end_addr1;
253 end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
254 end_addr = HOST_PAGE_ALIGN(elf_bss);
255 if (end_addr1 < end_addr) {
256 mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1,
257 PROT_READ | PROT_WRITE | PROT_EXEC,
258 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
259 }
260 }
261
262 nbyte = elf_bss & (qemu_host_page_size - 1);
263 if (nbyte) {
264 nbyte = qemu_host_page_size - nbyte;
265 do {
266 /* FIXME - what to do if put_user() fails? */
267 put_user_u8(0, elf_bss);
268 elf_bss++;
269 } while (--nbyte);
270 }
271 }
272
273
274 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
275 struct elfhdr * exec,
276 abi_ulong load_addr,
277 abi_ulong load_bias,
278 abi_ulong interp_load_addr,
279 struct image_info *info)
280 {
281 abi_ulong sp;
282 int size;
283 abi_ulong u_platform;
284 const char *k_platform;
285 const int n = sizeof(elf_addr_t);
286
287 sp = p;
288 u_platform = 0;
289 k_platform = ELF_PLATFORM;
290 if (k_platform) {
291 size_t len = strlen(k_platform) + 1;
292 sp -= (len + n - 1) & ~(n - 1);
293 u_platform = sp;
294 /* FIXME - check return value of memcpy_to_target() for failure */
295 memcpy_to_target(sp, k_platform, len);
296 }
297 /*
298 * Force 16 byte _final_ alignment here for generality.
299 */
300 sp = sp & ~(abi_ulong)15;
301 size = (DLINFO_ITEMS + 1) * 2;
302 if (k_platform)
303 size += 2;
304 #ifdef DLINFO_ARCH_ITEMS
305 size += DLINFO_ARCH_ITEMS * 2;
306 #endif
307 size += envc + argc + 2;
308 size += 1; /* argc itself */
309 size *= n;
310 if (size & 15)
311 sp -= 16 - (size & 15);
312
313 /* This is correct because Linux defines
314 * elf_addr_t as Elf32_Off / Elf64_Off
315 */
316 #define NEW_AUX_ENT(id, val) do { \
317 sp -= n; put_user_ual(val, sp); \
318 sp -= n; put_user_ual(id, sp); \
319 } while (0)
320
321 NEW_AUX_ENT(AT_NULL, 0);
322
323 /* There must be exactly DLINFO_ITEMS entries here. */
324 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
325 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr)));
326 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
327 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
328 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
329 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
330 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
331 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
332 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
333 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
334 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
335 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
336 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
337 if (k_platform)
338 NEW_AUX_ENT(AT_PLATFORM, u_platform);
339 #ifdef ARCH_DLINFO
340 /*
341 * ARCH_DLINFO must come last so platform specific code can enforce
342 * special alignment requirements on the AUXV if necessary (eg. PPC).
343 */
344 ARCH_DLINFO;
345 #endif
346 #undef NEW_AUX_ENT
347
348 sp = loader_build_argptr(envc, argc, sp, p);
349 return sp;
350 }
351
352
353 static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex,
354 int interpreter_fd,
355 abi_ulong *interp_load_addr)
356 {
357 struct elf_phdr *elf_phdata = NULL;
358 struct elf_phdr *eppnt;
359 abi_ulong load_addr = 0;
360 int load_addr_set = 0;
361 int retval;
362 abi_ulong last_bss, elf_bss;
363 abi_ulong error;
364 int i;
365
366 elf_bss = 0;
367 last_bss = 0;
368 error = 0;
369
370 #ifdef BSWAP_NEEDED
371 bswap_ehdr(interp_elf_ex);
372 #endif
373 /* First of all, some simple consistency checks */
374 if ((interp_elf_ex->e_type != ET_EXEC &&
375 interp_elf_ex->e_type != ET_DYN) ||
376 !elf_check_arch(interp_elf_ex->e_machine)) {
377 return ~((abi_ulong)0UL);
378 }
379
380
381 /* Now read in all of the header information */
382
383 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
384 return ~(abi_ulong)0UL;
385
386 elf_phdata = (struct elf_phdr *)
387 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
388
389 if (!elf_phdata)
390 return ~((abi_ulong)0UL);
391
392 /*
393 * If the size of this structure has changed, then punt, since
394 * we will be doing the wrong thing.
395 */
396 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
397 free(elf_phdata);
398 return ~((abi_ulong)0UL);
399 }
400
401 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
402 if (retval >= 0) {
403 retval = read(interpreter_fd,
404 (char *) elf_phdata,
405 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
406 }
407 if (retval < 0) {
408 perror("load_elf_interp");
409 exit(-1);
410 free(elf_phdata);
411 return retval;
412 }
413 #ifdef BSWAP_NEEDED
414 eppnt = elf_phdata;
415 for (i = 0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
416 bswap_phdr(eppnt);
417 }
418 #endif
419
420 if (interp_elf_ex->e_type == ET_DYN) {
421 /* in order to avoid hardcoding the interpreter load
422 address in qemu, we allocate a big enough memory zone */
423 error = target_mmap(0, INTERP_MAP_SIZE,
424 PROT_NONE, MAP_PRIVATE | MAP_ANON,
425 -1, 0);
426 if (error == -1) {
427 perror("mmap");
428 exit(-1);
429 }
430 load_addr = error;
431 load_addr_set = 1;
432 }
433
434 eppnt = elf_phdata;
435 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++)
436 if (eppnt->p_type == PT_LOAD) {
437 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
438 int elf_prot = 0;
439 abi_ulong vaddr = 0;
440 abi_ulong k;
441
442 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
443 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
444 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
445 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
446 elf_type |= MAP_FIXED;
447 vaddr = eppnt->p_vaddr;
448 }
449 error = target_mmap(load_addr + TARGET_ELF_PAGESTART(vaddr),
450 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
451 elf_prot,
452 elf_type,
453 interpreter_fd,
454 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
455
456 if (error == -1) {
457 /* Real error */
458 close(interpreter_fd);
459 free(elf_phdata);
460 return ~((abi_ulong)0UL);
461 }
462
463 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
464 load_addr = error;
465 load_addr_set = 1;
466 }
467
468 /*
469 * Find the end of the file mapping for this phdr, and keep
470 * track of the largest address we see for this.
471 */
472 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
473 if (k > elf_bss) elf_bss = k;
474
475 /*
476 * Do the same thing for the memory mapping - between
477 * elf_bss and last_bss is the bss section.
478 */
479 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
480 if (k > last_bss) last_bss = k;
481 }
482
483 /* Now use mmap to map the library into memory. */
484
485 close(interpreter_fd);
486
487 /*
488 * Now fill out the bss section. First pad the last page up
489 * to the page boundary, and then perform a mmap to make sure
490 * that there are zeromapped pages up to and including the last
491 * bss page.
492 */
493 padzero(elf_bss, last_bss);
494 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
495
496 /* Map the last of the bss segment */
497 if (last_bss > elf_bss) {
498 target_mmap(elf_bss, last_bss - elf_bss,
499 PROT_READ | PROT_WRITE | PROT_EXEC,
500 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
501 }
502 free(elf_phdata);
503
504 *interp_load_addr = load_addr;
505 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
506 }
507
508 static int symfind(const void *s0, const void *s1)
509 {
510 target_ulong addr = *(target_ulong *)s0;
511 struct elf_sym *sym = (struct elf_sym *)s1;
512 int result = 0;
513 if (addr < sym->st_value) {
514 result = -1;
515 } else if (addr >= sym->st_value + sym->st_size) {
516 result = 1;
517 }
518 return result;
519 }
520
521 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
522 {
523 #if ELF_CLASS == ELFCLASS32
524 struct elf_sym *syms = s->disas_symtab.elf32;
525 #else
526 struct elf_sym *syms = s->disas_symtab.elf64;
527 #endif
528
529 // binary search
530 struct elf_sym *sym;
531
532 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
533 if (sym != NULL) {
534 return s->disas_strtab + sym->st_name;
535 }
536
537 return "";
538 }
539
540 /* FIXME: This should use elf_ops.h */
541 static int symcmp(const void *s0, const void *s1)
542 {
543 struct elf_sym *sym0 = (struct elf_sym *)s0;
544 struct elf_sym *sym1 = (struct elf_sym *)s1;
545 return (sym0->st_value < sym1->st_value)
546 ? -1
547 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
548 }
549
550 /* Best attempt to load symbols from this ELF object. */
551 static void load_symbols(struct elfhdr *hdr, int fd)
552 {
553 unsigned int i, nsyms;
554 struct elf_shdr sechdr, symtab, strtab;
555 char *strings;
556 struct syminfo *s;
557 struct elf_sym *syms, *new_syms;
558
559 lseek(fd, hdr->e_shoff, SEEK_SET);
560 for (i = 0; i < hdr->e_shnum; i++) {
561 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
562 return;
563 #ifdef BSWAP_NEEDED
564 bswap_shdr(&sechdr);
565 #endif
566 if (sechdr.sh_type == SHT_SYMTAB) {
567 symtab = sechdr;
568 lseek(fd, hdr->e_shoff
569 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
570 if (read(fd, &strtab, sizeof(strtab))
571 != sizeof(strtab))
572 return;
573 #ifdef BSWAP_NEEDED
574 bswap_shdr(&strtab);
575 #endif
576 goto found;
577 }
578 }
579 return; /* Shouldn't happen... */
580
581 found:
582 /* Now know where the strtab and symtab are. Snarf them. */
583 s = malloc(sizeof(*s));
584 syms = malloc(symtab.sh_size);
585 if (!syms) {
586 free(s);
587 return;
588 }
589 s->disas_strtab = strings = malloc(strtab.sh_size);
590 if (!s->disas_strtab) {
591 free(s);
592 free(syms);
593 return;
594 }
595
596 lseek(fd, symtab.sh_offset, SEEK_SET);
597 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) {
598 free(s);
599 free(syms);
600 free(strings);
601 return;
602 }
603
604 nsyms = symtab.sh_size / sizeof(struct elf_sym);
605
606 i = 0;
607 while (i < nsyms) {
608 #ifdef BSWAP_NEEDED
609 bswap_sym(syms + i);
610 #endif
611 // Throw away entries which we do not need.
612 if (syms[i].st_shndx == SHN_UNDEF ||
613 syms[i].st_shndx >= SHN_LORESERVE ||
614 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
615 nsyms--;
616 if (i < nsyms) {
617 syms[i] = syms[nsyms];
618 }
619 continue;
620 }
621 i++;
622 }
623
624 /* Attempt to free the storage associated with the local symbols
625 that we threw away. Whether or not this has any effect on the
626 memory allocation depends on the malloc implementation and how
627 many symbols we managed to discard. */
628 new_syms = realloc(syms, nsyms * sizeof(*syms));
629 if (new_syms == NULL) {
630 free(s);
631 free(syms);
632 free(strings);
633 return;
634 }
635 syms = new_syms;
636
637 qsort(syms, nsyms, sizeof(*syms), symcmp);
638
639 lseek(fd, strtab.sh_offset, SEEK_SET);
640 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) {
641 free(s);
642 free(syms);
643 free(strings);
644 return;
645 }
646 s->disas_num_syms = nsyms;
647 #if ELF_CLASS == ELFCLASS32
648 s->disas_symtab.elf32 = syms;
649 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
650 #else
651 s->disas_symtab.elf64 = syms;
652 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
653 #endif
654 s->next = syminfos;
655 syminfos = s;
656 }
657
658 int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
659 struct image_info *info)
660 {
661 struct elfhdr elf_ex;
662 struct elfhdr interp_elf_ex;
663 int interpreter_fd = -1; /* avoid warning */
664 abi_ulong load_addr, load_bias;
665 int load_addr_set = 0;
666 int i;
667 struct elf_phdr * elf_ppnt;
668 struct elf_phdr *elf_phdata;
669 abi_ulong elf_bss, k, elf_brk;
670 int retval;
671 char * elf_interpreter;
672 abi_ulong elf_entry, interp_load_addr = 0;
673 abi_ulong start_code, end_code, start_data, end_data;
674 abi_ulong reloc_func_desc = 0;
675 #ifdef LOW_ELF_STACK
676 abi_ulong elf_stack = ~((abi_ulong)0UL);
677 #endif
678
679 load_addr = 0;
680 load_bias = 0;
681 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
682 #ifdef BSWAP_NEEDED
683 bswap_ehdr(&elf_ex);
684 #endif
685
686 /* First of all, some simple consistency checks */
687 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
688 (!elf_check_arch(elf_ex.e_machine))) {
689 return -ENOEXEC;
690 }
691
692 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
693 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page,bprm->p);
694 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page,bprm->p);
695 if (!bprm->p) {
696 retval = -E2BIG;
697 }
698
699 /* Now read in all of the header information */
700 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
701 if (elf_phdata == NULL) {
702 return -ENOMEM;
703 }
704
705 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
706 if (retval > 0) {
707 retval = read(bprm->fd, (char *)elf_phdata,
708 elf_ex.e_phentsize * elf_ex.e_phnum);
709 }
710
711 if (retval < 0) {
712 perror("load_elf_binary");
713 exit(-1);
714 free(elf_phdata);
715 return -errno;
716 }
717
718 #ifdef BSWAP_NEEDED
719 elf_ppnt = elf_phdata;
720 for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++) {
721 bswap_phdr(elf_ppnt);
722 }
723 #endif
724 elf_ppnt = elf_phdata;
725
726 elf_bss = 0;
727 elf_brk = 0;
728
729
730 elf_interpreter = NULL;
731 start_code = ~((abi_ulong)0UL);
732 end_code = 0;
733 start_data = 0;
734 end_data = 0;
735
736 for (i = 0;i < elf_ex.e_phnum; i++) {
737 if (elf_ppnt->p_type == PT_INTERP) {
738 if (elf_interpreter != NULL)
739 {
740 free(elf_phdata);
741 free(elf_interpreter);
742 close(bprm->fd);
743 return -EINVAL;
744 }
745
746 /* This is the program interpreter used for
747 * shared libraries - for now assume that this
748 * is an a.out format binary
749 */
750
751 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
752
753 if (elf_interpreter == NULL) {
754 free(elf_phdata);
755 close(bprm->fd);
756 return -ENOMEM;
757 }
758
759 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
760 if (retval >= 0) {
761 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
762 }
763 if (retval < 0) {
764 perror("load_elf_binary2");
765 exit(-1);
766 }
767
768 if (retval >= 0) {
769 retval = open(path(elf_interpreter), O_RDONLY);
770 if (retval >= 0) {
771 interpreter_fd = retval;
772 }
773 else {
774 perror(elf_interpreter);
775 exit(-1);
776 /* retval = -errno; */
777 }
778 }
779
780 if (retval >= 0) {
781 retval = lseek(interpreter_fd, 0, SEEK_SET);
782 if (retval >= 0) {
783 retval = read(interpreter_fd, bprm->buf, 128);
784 }
785 }
786 if (retval >= 0) {
787 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
788 }
789 if (retval < 0) {
790 perror("load_elf_binary3");
791 exit(-1);
792 free(elf_phdata);
793 free(elf_interpreter);
794 close(bprm->fd);
795 return retval;
796 }
797 }
798 elf_ppnt++;
799 }
800
801 /* Some simple consistency checks for the interpreter */
802 if (elf_interpreter) {
803 if (interp_elf_ex.e_ident[0] != 0x7f ||
804 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) {
805 free(elf_interpreter);
806 free(elf_phdata);
807 close(bprm->fd);
808 return -ELIBBAD;
809 }
810 }
811
812 /* OK, we are done with that, now set up the arg stuff,
813 and then start this sucker up */
814
815 if (!bprm->p) {
816 free(elf_interpreter);
817 free(elf_phdata);
818 close(bprm->fd);
819 return -E2BIG;
820 }
821
822 /* OK, This is the point of no return */
823 info->end_data = 0;
824 info->end_code = 0;
825 info->start_mmap = (abi_ulong)ELF_START_MMAP;
826 info->mmap = 0;
827 elf_entry = (abi_ulong) elf_ex.e_entry;
828
829 /*
830 * In case where user has not explicitly set the guest_base, we
831 * probe here that should we set it automatically.
832 */
833 if (!have_guest_base) {
834 /*
835 * Go through ELF program header table and find out whether
836 * any of the segments drop below our current mmap_min_addr and
837 * in that case set guest_base to corresponding address.
838 */
839 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
840 i++, elf_ppnt++) {
841 if (elf_ppnt->p_type != PT_LOAD)
842 continue;
843 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
844 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
845 break;
846 }
847 }
848 }
849
850 /* Do this so that we can load the interpreter, if need be. We will
851 change some of these later */
852 info->rss = 0;
853 bprm->p = setup_arg_pages(bprm->p, bprm, info);
854 info->start_stack = bprm->p;
855
856 /* Now we do a little grungy work by mmaping the ELF image into
857 * the correct location in memory. At this point, we assume that
858 * the image should be loaded at fixed address, not at a variable
859 * address.
860 */
861
862 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
863 int elf_prot = 0;
864 int elf_flags = 0;
865 abi_ulong error;
866
867 if (elf_ppnt->p_type != PT_LOAD)
868 continue;
869
870 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
871 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
872 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
873 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
874 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
875 elf_flags |= MAP_FIXED;
876 } else if (elf_ex.e_type == ET_DYN) {
877 /* Try and get dynamic programs out of the way of the default mmap
878 base, as well as whatever program they might try to exec. This
879 is because the brk will follow the loader, and is not movable. */
880 /* NOTE: for qemu, we do a big mmap to get enough space
881 without hardcoding any address */
882 error = target_mmap(0, ET_DYN_MAP_SIZE,
883 PROT_NONE, MAP_PRIVATE | MAP_ANON,
884 -1, 0);
885 if (error == -1) {
886 perror("mmap");
887 exit(-1);
888 }
889 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
890 }
891
892 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
893 (elf_ppnt->p_filesz +
894 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
895 elf_prot,
896 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
897 bprm->fd,
898 (elf_ppnt->p_offset -
899 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
900 if (error == -1) {
901 perror("mmap");
902 exit(-1);
903 }
904
905 #ifdef LOW_ELF_STACK
906 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
907 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
908 #endif
909
910 if (!load_addr_set) {
911 load_addr_set = 1;
912 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
913 if (elf_ex.e_type == ET_DYN) {
914 load_bias += error -
915 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
916 load_addr += load_bias;
917 reloc_func_desc = load_bias;
918 }
919 }
920 k = elf_ppnt->p_vaddr;
921 if (k < start_code)
922 start_code = k;
923 if (start_data < k)
924 start_data = k;
925 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
926 if (k > elf_bss)
927 elf_bss = k;
928 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
929 end_code = k;
930 if (end_data < k)
931 end_data = k;
932 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
933 if (k > elf_brk) elf_brk = k;
934 }
935
936 elf_entry += load_bias;
937 elf_bss += load_bias;
938 elf_brk += load_bias;
939 start_code += load_bias;
940 end_code += load_bias;
941 start_data += load_bias;
942 end_data += load_bias;
943
944 if (elf_interpreter) {
945 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
946 &interp_load_addr);
947 reloc_func_desc = interp_load_addr;
948
949 close(interpreter_fd);
950 free(elf_interpreter);
951
952 if (elf_entry == ~((abi_ulong)0UL)) {
953 printf("Unable to load interpreter\n");
954 free(elf_phdata);
955 exit(-1);
956 return 0;
957 }
958 }
959
960 free(elf_phdata);
961
962 if (qemu_log_enabled())
963 load_symbols(&elf_ex, bprm->fd);
964
965 close(bprm->fd);
966
967 #ifdef LOW_ELF_STACK
968 info->start_stack = bprm->p = elf_stack - 4;
969 #endif
970 bprm->p = create_elf_tables(bprm->p,
971 bprm->argc,
972 bprm->envc,
973 &elf_ex,
974 load_addr, load_bias,
975 interp_load_addr,
976 info);
977 info->load_addr = reloc_func_desc;
978 info->start_brk = info->brk = elf_brk;
979 info->end_code = end_code;
980 info->start_code = start_code;
981 info->start_data = start_data;
982 info->end_data = end_data;
983 info->start_stack = bprm->p;
984
985 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
986 sections */
987 set_brk(elf_bss, elf_brk);
988
989 padzero(elf_bss, elf_brk);
990
991 #if 0
992 printf("(start_brk) %x\n" , info->start_brk);
993 printf("(end_code) %x\n" , info->end_code);
994 printf("(start_code) %x\n" , info->start_code);
995 printf("(end_data) %x\n" , info->end_data);
996 printf("(start_stack) %x\n" , info->start_stack);
997 printf("(brk) %x\n" , info->brk);
998 #endif
999
1000 info->entry = elf_entry;
1001
1002 return 0;
1003 }
1004
1005 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1006 {
1007 init_thread(regs, infop);
1008 }