]> git.proxmox.com Git - mirror_qemu.git/blob - bsd-user/elfload.c
bsd-user: Add system independent stack, data and text limiting
[mirror_qemu.git] / bsd-user / elfload.c
1 /*
2 * ELF loading code
3 *
4 * Copyright (c) 2013 Stacey D. Son
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 #include "qemu.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
25
26 #include "target_arch_elf.h"
27 #include "target_os_thread.h"
28
29 /* this flag is uneffective under linux too, should be deleted */
30 #ifndef MAP_DENYWRITE
31 #define MAP_DENYWRITE 0
32 #endif
33
34 /* should probably go in elf.h */
35 #ifndef ELIBBAD
36 #define ELIBBAD 80
37 #endif
38
39 #ifndef ELF_PLATFORM
40 #define ELF_PLATFORM (NULL)
41 #endif
42
43 #ifndef ELF_HWCAP
44 #define ELF_HWCAP 0
45 #endif
46
47 #ifdef TARGET_ABI32
48 #undef ELF_CLASS
49 #define ELF_CLASS ELFCLASS32
50 #undef bswaptls
51 #define bswaptls(ptr) bswap32s(ptr)
52 #endif
53
54 #include "elf.h"
55
56 /* max code+data+bss space allocated to elf interpreter */
57 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
58
59 /* max code+data+bss+brk space allocated to ET_DYN executables */
60 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
61
62 /* Necessary parameters */
63 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
64 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE - 1))
65 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE - 1))
66
67 #define DLINFO_ITEMS 12
68
69 static inline void memcpy_fromfs(void *to, const void *from, unsigned long n)
70 {
71 memcpy(to, from, n);
72 }
73
74 #ifdef BSWAP_NEEDED
75 static void bswap_ehdr(struct elfhdr *ehdr)
76 {
77 bswap16s(&ehdr->e_type); /* Object file type */
78 bswap16s(&ehdr->e_machine); /* Architecture */
79 bswap32s(&ehdr->e_version); /* Object file version */
80 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
81 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
82 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
83 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
84 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
85 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
86 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
87 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
88 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
89 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
90 }
91
92 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
93 {
94 int i;
95
96 for (i = 0; i < phnum; i++, phdr++) {
97 bswap32s(&phdr->p_type); /* Segment type */
98 bswap32s(&phdr->p_flags); /* Segment flags */
99 bswaptls(&phdr->p_offset); /* Segment file offset */
100 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
101 bswaptls(&phdr->p_paddr); /* Segment physical address */
102 bswaptls(&phdr->p_filesz); /* Segment size in file */
103 bswaptls(&phdr->p_memsz); /* Segment size in memory */
104 bswaptls(&phdr->p_align); /* Segment alignment */
105 }
106 }
107
108 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
109 {
110 int i;
111
112 for (i = 0; i < shnum; i++, shdr++) {
113 bswap32s(&shdr->sh_name);
114 bswap32s(&shdr->sh_type);
115 bswaptls(&shdr->sh_flags);
116 bswaptls(&shdr->sh_addr);
117 bswaptls(&shdr->sh_offset);
118 bswaptls(&shdr->sh_size);
119 bswap32s(&shdr->sh_link);
120 bswap32s(&shdr->sh_info);
121 bswaptls(&shdr->sh_addralign);
122 bswaptls(&shdr->sh_entsize);
123 }
124 }
125
126 static void bswap_sym(struct elf_sym *sym)
127 {
128 bswap32s(&sym->st_name);
129 bswaptls(&sym->st_value);
130 bswaptls(&sym->st_size);
131 bswap16s(&sym->st_shndx);
132 }
133
134 #else /* ! BSWAP_NEEDED */
135
136 static void bswap_ehdr(struct elfhdr *ehdr) { }
137 static void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
138 static void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
139 static void bswap_sym(struct elf_sym *sym) { }
140
141 #endif /* ! BSWAP_NEEDED */
142
143 /*
144 * 'copy_elf_strings()' copies argument/envelope strings from user
145 * memory to free pages in kernel mem. These are in a format ready
146 * to be put directly into the top of new user memory.
147 *
148 */
149 static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
150 abi_ulong p)
151 {
152 char *tmp, *tmp1, *pag = NULL;
153 int len, offset = 0;
154
155 if (!p) {
156 return 0; /* bullet-proofing */
157 }
158 while (argc-- > 0) {
159 tmp = argv[argc];
160 if (!tmp) {
161 fprintf(stderr, "VFS: argc is wrong");
162 exit(-1);
163 }
164 tmp1 = tmp;
165 while (*tmp++);
166 len = tmp - tmp1;
167 if (p < len) { /* this shouldn't happen - 128kB */
168 return 0;
169 }
170 while (len) {
171 --p; --tmp; --len;
172 if (--offset < 0) {
173 offset = p % TARGET_PAGE_SIZE;
174 pag = (char *)page[p / TARGET_PAGE_SIZE];
175 if (!pag) {
176 pag = g_try_malloc0(TARGET_PAGE_SIZE);
177 page[p / TARGET_PAGE_SIZE] = pag;
178 if (!pag)
179 return 0;
180 }
181 }
182 if (len == 0 || offset == 0) {
183 *(pag + offset) = *tmp;
184 }
185 else {
186 int bytes_to_copy = (len > offset) ? offset : len;
187 tmp -= bytes_to_copy;
188 p -= bytes_to_copy;
189 offset -= bytes_to_copy;
190 len -= bytes_to_copy;
191 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
192 }
193 }
194 }
195 return p;
196 }
197
198 static abi_ulong setup_arg_pages(abi_ulong p, struct bsd_binprm *bprm,
199 struct image_info *info)
200 {
201 abi_ulong stack_base, size, error;
202 int i;
203
204 /* Create enough stack to hold everything. If we don't use
205 * it for args, we'll use it for something else...
206 */
207 size = target_dflssiz;
208 if (size < MAX_ARG_PAGES * TARGET_PAGE_SIZE)
209 size = MAX_ARG_PAGES * TARGET_PAGE_SIZE;
210 error = target_mmap(0,
211 size + qemu_host_page_size,
212 PROT_READ | PROT_WRITE,
213 MAP_PRIVATE | MAP_ANON,
214 -1, 0);
215 if (error == -1) {
216 perror("stk mmap");
217 exit(-1);
218 }
219 /* we reserve one extra page at the top of the stack as guard */
220 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
221
222 stack_base = error + size - MAX_ARG_PAGES * TARGET_PAGE_SIZE;
223 p += stack_base;
224
225 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
226 if (bprm->page[i]) {
227 info->rss++;
228 /* FIXME - check return value of memcpy_to_target() for failure */
229 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
230 g_free(bprm->page[i]);
231 }
232 stack_base += TARGET_PAGE_SIZE;
233 }
234 return p;
235 }
236
237 static void set_brk(abi_ulong start, abi_ulong end)
238 {
239 /* page-align the start and end addresses... */
240 start = HOST_PAGE_ALIGN(start);
241 end = HOST_PAGE_ALIGN(end);
242 if (end <= start)
243 return;
244 if (target_mmap(start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC,
246 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
247 perror("cannot mmap brk");
248 exit(-1);
249 }
250 }
251
252
253 /* We need to explicitly zero any fractional pages after the data
254 section (i.e. bss). This would contain the junk from the file that
255 should not be in memory. */
256 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
257 {
258 abi_ulong nbyte;
259
260 if (elf_bss >= last_bss)
261 return;
262
263 /* XXX: this is really a hack : if the real host page size is
264 smaller than the target page size, some pages after the end
265 of the file may not be mapped. A better fix would be to
266 patch target_mmap(), but it is more complicated as the file
267 size must be known */
268 if (qemu_real_host_page_size < qemu_host_page_size) {
269 abi_ulong end_addr, end_addr1;
270 end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
271 end_addr = HOST_PAGE_ALIGN(elf_bss);
272 if (end_addr1 < end_addr) {
273 mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1,
274 PROT_READ | PROT_WRITE | PROT_EXEC,
275 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
276 }
277 }
278
279 nbyte = elf_bss & (qemu_host_page_size - 1);
280 if (nbyte) {
281 nbyte = qemu_host_page_size - nbyte;
282 do {
283 /* FIXME - what to do if put_user() fails? */
284 put_user_u8(0, elf_bss);
285 elf_bss++;
286 } while (--nbyte);
287 }
288 }
289
290
291 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
292 struct elfhdr * exec,
293 abi_ulong load_addr,
294 abi_ulong load_bias,
295 abi_ulong interp_load_addr,
296 struct image_info *info)
297 {
298 abi_ulong sp;
299 int size;
300 abi_ulong u_platform;
301 const char *k_platform;
302 const int n = sizeof(elf_addr_t);
303
304 sp = p;
305 u_platform = 0;
306 k_platform = ELF_PLATFORM;
307 if (k_platform) {
308 size_t len = strlen(k_platform) + 1;
309 sp -= (len + n - 1) & ~(n - 1);
310 u_platform = sp;
311 /* FIXME - check return value of memcpy_to_target() for failure */
312 memcpy_to_target(sp, k_platform, len);
313 }
314 /*
315 * Force 16 byte _final_ alignment here for generality.
316 */
317 sp = sp & ~(abi_ulong)15;
318 size = (DLINFO_ITEMS + 1) * 2;
319 if (k_platform)
320 size += 2;
321 #ifdef DLINFO_ARCH_ITEMS
322 size += DLINFO_ARCH_ITEMS * 2;
323 #endif
324 size += envc + argc + 2;
325 size += 1; /* argc itself */
326 size *= n;
327 if (size & 15)
328 sp -= 16 - (size & 15);
329
330 /* This is correct because Linux defines
331 * elf_addr_t as Elf32_Off / Elf64_Off
332 */
333 #define NEW_AUX_ENT(id, val) do { \
334 sp -= n; put_user_ual(val, sp); \
335 sp -= n; put_user_ual(id, sp); \
336 } while (0)
337
338 NEW_AUX_ENT(AT_NULL, 0);
339
340 /* There must be exactly DLINFO_ITEMS entries here. */
341 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
342 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof(struct elf_phdr)));
343 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
344 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
345 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
346 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
347 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
348 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
349 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
350 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
351 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
352 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
353 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
354 if (k_platform)
355 NEW_AUX_ENT(AT_PLATFORM, u_platform);
356 #ifdef ARCH_DLINFO
357 /*
358 * ARCH_DLINFO must come last so platform specific code can enforce
359 * special alignment requirements on the AUXV if necessary (eg. PPC).
360 */
361 ARCH_DLINFO;
362 #endif
363 #undef NEW_AUX_ENT
364
365 sp = loader_build_argptr(envc, argc, sp, p);
366 return sp;
367 }
368
369
370 static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex,
371 int interpreter_fd,
372 abi_ulong *interp_load_addr)
373 {
374 struct elf_phdr *elf_phdata = NULL;
375 struct elf_phdr *eppnt;
376 abi_ulong load_addr = 0;
377 int load_addr_set = 0;
378 int retval;
379 abi_ulong last_bss, elf_bss;
380 abi_ulong error;
381 int i;
382
383 elf_bss = 0;
384 last_bss = 0;
385 error = 0;
386
387 bswap_ehdr(interp_elf_ex);
388 /* First of all, some simple consistency checks */
389 if ((interp_elf_ex->e_type != ET_EXEC &&
390 interp_elf_ex->e_type != ET_DYN) ||
391 !elf_check_arch(interp_elf_ex->e_machine)) {
392 return ~((abi_ulong)0UL);
393 }
394
395
396 /* Now read in all of the header information */
397
398 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
399 return ~(abi_ulong)0UL;
400
401 elf_phdata = (struct elf_phdr *)
402 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
403
404 if (!elf_phdata)
405 return ~((abi_ulong)0UL);
406
407 /*
408 * If the size of this structure has changed, then punt, since
409 * we will be doing the wrong thing.
410 */
411 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
412 free(elf_phdata);
413 return ~((abi_ulong)0UL);
414 }
415
416 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
417 if (retval >= 0) {
418 retval = read(interpreter_fd,
419 (char *) elf_phdata,
420 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
421 }
422 if (retval < 0) {
423 perror("load_elf_interp");
424 exit(-1);
425 free(elf_phdata);
426 return retval;
427 }
428 bswap_phdr(elf_phdata, interp_elf_ex->e_phnum);
429
430 if (interp_elf_ex->e_type == ET_DYN) {
431 /* in order to avoid hardcoding the interpreter load
432 address in qemu, we allocate a big enough memory zone */
433 error = target_mmap(0, INTERP_MAP_SIZE,
434 PROT_NONE, MAP_PRIVATE | MAP_ANON,
435 -1, 0);
436 if (error == -1) {
437 perror("mmap");
438 exit(-1);
439 }
440 load_addr = error;
441 load_addr_set = 1;
442 }
443
444 eppnt = elf_phdata;
445 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++)
446 if (eppnt->p_type == PT_LOAD) {
447 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
448 int elf_prot = 0;
449 abi_ulong vaddr = 0;
450 abi_ulong k;
451
452 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
453 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
454 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
455 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
456 elf_type |= MAP_FIXED;
457 vaddr = eppnt->p_vaddr;
458 }
459 error = target_mmap(load_addr + TARGET_ELF_PAGESTART(vaddr),
460 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
461 elf_prot,
462 elf_type,
463 interpreter_fd,
464 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
465
466 if (error == -1) {
467 /* Real error */
468 close(interpreter_fd);
469 free(elf_phdata);
470 return ~((abi_ulong)0UL);
471 }
472
473 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
474 load_addr = error;
475 load_addr_set = 1;
476 }
477
478 /*
479 * Find the end of the file mapping for this phdr, and keep
480 * track of the largest address we see for this.
481 */
482 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
483 if (k > elf_bss) elf_bss = k;
484
485 /*
486 * Do the same thing for the memory mapping - between
487 * elf_bss and last_bss is the bss section.
488 */
489 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
490 if (k > last_bss) last_bss = k;
491 }
492
493 /* Now use mmap to map the library into memory. */
494
495 close(interpreter_fd);
496
497 /*
498 * Now fill out the bss section. First pad the last page up
499 * to the page boundary, and then perform a mmap to make sure
500 * that there are zeromapped pages up to and including the last
501 * bss page.
502 */
503 padzero(elf_bss, last_bss);
504 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
505
506 /* Map the last of the bss segment */
507 if (last_bss > elf_bss) {
508 target_mmap(elf_bss, last_bss - elf_bss,
509 PROT_READ | PROT_WRITE | PROT_EXEC,
510 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
511 }
512 free(elf_phdata);
513
514 *interp_load_addr = load_addr;
515 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
516 }
517
518 static int symfind(const void *s0, const void *s1)
519 {
520 target_ulong addr = *(target_ulong *)s0;
521 struct elf_sym *sym = (struct elf_sym *)s1;
522 int result = 0;
523 if (addr < sym->st_value) {
524 result = -1;
525 } else if (addr >= sym->st_value + sym->st_size) {
526 result = 1;
527 }
528 return result;
529 }
530
531 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
532 {
533 #if ELF_CLASS == ELFCLASS32
534 struct elf_sym *syms = s->disas_symtab.elf32;
535 #else
536 struct elf_sym *syms = s->disas_symtab.elf64;
537 #endif
538
539 // binary search
540 struct elf_sym *sym;
541
542 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
543 if (sym != NULL) {
544 return s->disas_strtab + sym->st_name;
545 }
546
547 return "";
548 }
549
550 /* FIXME: This should use elf_ops.h */
551 static int symcmp(const void *s0, const void *s1)
552 {
553 struct elf_sym *sym0 = (struct elf_sym *)s0;
554 struct elf_sym *sym1 = (struct elf_sym *)s1;
555 return (sym0->st_value < sym1->st_value)
556 ? -1
557 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
558 }
559
560 /* Best attempt to load symbols from this ELF object. */
561 static void load_symbols(struct elfhdr *hdr, int fd)
562 {
563 unsigned int i, nsyms;
564 struct elf_shdr sechdr, symtab, strtab;
565 char *strings;
566 struct syminfo *s;
567 struct elf_sym *syms, *new_syms;
568
569 lseek(fd, hdr->e_shoff, SEEK_SET);
570 for (i = 0; i < hdr->e_shnum; i++) {
571 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
572 return;
573 bswap_shdr(&sechdr, 1);
574 if (sechdr.sh_type == SHT_SYMTAB) {
575 symtab = sechdr;
576 lseek(fd, hdr->e_shoff
577 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
578 if (read(fd, &strtab, sizeof(strtab))
579 != sizeof(strtab))
580 return;
581 bswap_shdr(&strtab, 1);
582 goto found;
583 }
584 }
585 return; /* Shouldn't happen... */
586
587 found:
588 /* Now know where the strtab and symtab are. Snarf them. */
589 s = malloc(sizeof(*s));
590 syms = malloc(symtab.sh_size);
591 if (!syms) {
592 free(s);
593 return;
594 }
595 s->disas_strtab = strings = malloc(strtab.sh_size);
596 if (!s->disas_strtab) {
597 free(s);
598 free(syms);
599 return;
600 }
601
602 lseek(fd, symtab.sh_offset, SEEK_SET);
603 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) {
604 free(s);
605 free(syms);
606 free(strings);
607 return;
608 }
609
610 nsyms = symtab.sh_size / sizeof(struct elf_sym);
611
612 i = 0;
613 while (i < nsyms) {
614 bswap_sym(syms + i);
615 // Throw away entries which we do not need.
616 if (syms[i].st_shndx == SHN_UNDEF ||
617 syms[i].st_shndx >= SHN_LORESERVE ||
618 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
619 nsyms--;
620 if (i < nsyms) {
621 syms[i] = syms[nsyms];
622 }
623 continue;
624 }
625 i++;
626 }
627
628 /* Attempt to free the storage associated with the local symbols
629 that we threw away. Whether or not this has any effect on the
630 memory allocation depends on the malloc implementation and how
631 many symbols we managed to discard. */
632 new_syms = realloc(syms, nsyms * sizeof(*syms));
633 if (new_syms == NULL) {
634 free(s);
635 free(syms);
636 free(strings);
637 return;
638 }
639 syms = new_syms;
640
641 qsort(syms, nsyms, sizeof(*syms), symcmp);
642
643 lseek(fd, strtab.sh_offset, SEEK_SET);
644 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) {
645 free(s);
646 free(syms);
647 free(strings);
648 return;
649 }
650 s->disas_num_syms = nsyms;
651 #if ELF_CLASS == ELFCLASS32
652 s->disas_symtab.elf32 = syms;
653 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
654 #else
655 s->disas_symtab.elf64 = syms;
656 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
657 #endif
658 s->next = syminfos;
659 syminfos = s;
660 }
661
662 int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
663 struct image_info *info)
664 {
665 struct elfhdr elf_ex;
666 struct elfhdr interp_elf_ex;
667 int interpreter_fd = -1; /* avoid warning */
668 abi_ulong load_addr, load_bias;
669 int load_addr_set = 0;
670 int i;
671 struct elf_phdr * elf_ppnt;
672 struct elf_phdr *elf_phdata;
673 abi_ulong elf_bss, k, elf_brk;
674 int retval;
675 char * elf_interpreter;
676 abi_ulong elf_entry, interp_load_addr = 0;
677 abi_ulong start_code, end_code, start_data, end_data;
678 abi_ulong reloc_func_desc = 0;
679 #ifdef LOW_ELF_STACK
680 abi_ulong elf_stack = ~((abi_ulong)0UL);
681 #endif
682
683 load_addr = 0;
684 load_bias = 0;
685 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
686 bswap_ehdr(&elf_ex);
687
688 /* First of all, some simple consistency checks */
689 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
690 (!elf_check_arch(elf_ex.e_machine))) {
691 return -ENOEXEC;
692 }
693
694 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
695 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page,bprm->p);
696 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page,bprm->p);
697 if (!bprm->p) {
698 retval = -E2BIG;
699 }
700
701 /* Now read in all of the header information */
702 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
703 if (elf_phdata == NULL) {
704 return -ENOMEM;
705 }
706
707 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
708 if (retval > 0) {
709 retval = read(bprm->fd, (char *)elf_phdata,
710 elf_ex.e_phentsize * elf_ex.e_phnum);
711 }
712
713 if (retval < 0) {
714 perror("load_elf_binary");
715 exit(-1);
716 free(elf_phdata);
717 return -errno;
718 }
719
720 bswap_phdr(elf_phdata, elf_ex.e_phnum);
721
722 elf_ppnt = elf_phdata;
723
724 elf_bss = 0;
725 elf_brk = 0;
726
727
728 elf_interpreter = NULL;
729 start_code = ~((abi_ulong)0UL);
730 end_code = 0;
731 start_data = 0;
732 end_data = 0;
733
734 for (i = 0;i < elf_ex.e_phnum; i++) {
735 if (elf_ppnt->p_type == PT_INTERP) {
736 if (elf_interpreter != NULL)
737 {
738 free(elf_phdata);
739 free(elf_interpreter);
740 close(bprm->fd);
741 return -EINVAL;
742 }
743
744 /* This is the program interpreter used for
745 * shared libraries - for now assume that this
746 * is an a.out format binary
747 */
748
749 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
750
751 if (elf_interpreter == NULL) {
752 free(elf_phdata);
753 close(bprm->fd);
754 return -ENOMEM;
755 }
756
757 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
758 if (retval >= 0) {
759 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
760 }
761 if (retval < 0) {
762 perror("load_elf_binary2");
763 exit(-1);
764 }
765
766 if (retval >= 0) {
767 retval = open(path(elf_interpreter), O_RDONLY);
768 if (retval >= 0) {
769 interpreter_fd = retval;
770 }
771 else {
772 perror(elf_interpreter);
773 exit(-1);
774 /* retval = -errno; */
775 }
776 }
777
778 if (retval >= 0) {
779 retval = lseek(interpreter_fd, 0, SEEK_SET);
780 if (retval >= 0) {
781 retval = read(interpreter_fd, bprm->buf, 128);
782 }
783 }
784 if (retval >= 0) {
785 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
786 }
787 if (retval < 0) {
788 perror("load_elf_binary3");
789 exit(-1);
790 free(elf_phdata);
791 free(elf_interpreter);
792 close(bprm->fd);
793 return retval;
794 }
795 }
796 elf_ppnt++;
797 }
798
799 /* Some simple consistency checks for the interpreter */
800 if (elf_interpreter) {
801 if (interp_elf_ex.e_ident[0] != 0x7f ||
802 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) {
803 free(elf_interpreter);
804 free(elf_phdata);
805 close(bprm->fd);
806 return -ELIBBAD;
807 }
808 }
809
810 /* OK, we are done with that, now set up the arg stuff,
811 and then start this sucker up */
812
813 if (!bprm->p) {
814 free(elf_interpreter);
815 free(elf_phdata);
816 close(bprm->fd);
817 return -E2BIG;
818 }
819
820 /* OK, This is the point of no return */
821 info->end_data = 0;
822 info->end_code = 0;
823 info->start_mmap = (abi_ulong)ELF_START_MMAP;
824 info->mmap = 0;
825 elf_entry = (abi_ulong) elf_ex.e_entry;
826
827 /*
828 * In case where user has not explicitly set the guest_base, we
829 * probe here that should we set it automatically.
830 */
831 if (!have_guest_base) {
832 /*
833 * Go through ELF program header table and find out whether
834 * any of the segments drop below our current mmap_min_addr and
835 * in that case set guest_base to corresponding address.
836 */
837 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
838 i++, elf_ppnt++) {
839 if (elf_ppnt->p_type != PT_LOAD)
840 continue;
841 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
842 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
843 break;
844 }
845 }
846 }
847
848 /* Do this so that we can load the interpreter, if need be. We will
849 change some of these later */
850 info->rss = 0;
851 bprm->p = setup_arg_pages(bprm->p, bprm, info);
852 info->start_stack = bprm->p;
853
854 /* Now we do a little grungy work by mmaping the ELF image into
855 * the correct location in memory. At this point, we assume that
856 * the image should be loaded at fixed address, not at a variable
857 * address.
858 */
859
860 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
861 int elf_prot = 0;
862 int elf_flags = 0;
863 abi_ulong error;
864
865 if (elf_ppnt->p_type != PT_LOAD)
866 continue;
867
868 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
869 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
870 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
871 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
872 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
873 elf_flags |= MAP_FIXED;
874 } else if (elf_ex.e_type == ET_DYN) {
875 /* Try and get dynamic programs out of the way of the default mmap
876 base, as well as whatever program they might try to exec. This
877 is because the brk will follow the loader, and is not movable. */
878 /* NOTE: for qemu, we do a big mmap to get enough space
879 without hardcoding any address */
880 error = target_mmap(0, ET_DYN_MAP_SIZE,
881 PROT_NONE, MAP_PRIVATE | MAP_ANON,
882 -1, 0);
883 if (error == -1) {
884 perror("mmap");
885 exit(-1);
886 }
887 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
888 }
889
890 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
891 (elf_ppnt->p_filesz +
892 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
893 elf_prot,
894 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
895 bprm->fd,
896 (elf_ppnt->p_offset -
897 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
898 if (error == -1) {
899 perror("mmap");
900 exit(-1);
901 }
902
903 #ifdef LOW_ELF_STACK
904 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
905 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
906 #endif
907
908 if (!load_addr_set) {
909 load_addr_set = 1;
910 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
911 if (elf_ex.e_type == ET_DYN) {
912 load_bias += error -
913 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
914 load_addr += load_bias;
915 reloc_func_desc = load_bias;
916 }
917 }
918 k = elf_ppnt->p_vaddr;
919 if (k < start_code)
920 start_code = k;
921 if (start_data < k)
922 start_data = k;
923 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
924 if (k > elf_bss)
925 elf_bss = k;
926 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
927 end_code = k;
928 if (end_data < k)
929 end_data = k;
930 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
931 if (k > elf_brk) elf_brk = k;
932 }
933
934 elf_entry += load_bias;
935 elf_bss += load_bias;
936 elf_brk += load_bias;
937 start_code += load_bias;
938 end_code += load_bias;
939 start_data += load_bias;
940 end_data += load_bias;
941
942 if (elf_interpreter) {
943 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
944 &interp_load_addr);
945 reloc_func_desc = interp_load_addr;
946
947 close(interpreter_fd);
948 free(elf_interpreter);
949
950 if (elf_entry == ~((abi_ulong)0UL)) {
951 printf("Unable to load interpreter\n");
952 free(elf_phdata);
953 exit(-1);
954 return 0;
955 }
956 }
957
958 free(elf_phdata);
959
960 if (qemu_log_enabled())
961 load_symbols(&elf_ex, bprm->fd);
962
963 close(bprm->fd);
964
965 #ifdef LOW_ELF_STACK
966 info->start_stack = bprm->p = elf_stack - 4;
967 #endif
968 bprm->p = create_elf_tables(bprm->p,
969 bprm->argc,
970 bprm->envc,
971 &elf_ex,
972 load_addr, load_bias,
973 interp_load_addr,
974 info);
975 info->load_addr = reloc_func_desc;
976 info->start_brk = info->brk = elf_brk;
977 info->end_code = end_code;
978 info->start_code = start_code;
979 info->start_data = start_data;
980 info->end_data = end_data;
981 info->start_stack = bprm->p;
982
983 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
984 sections */
985 set_brk(elf_bss, elf_brk);
986
987 padzero(elf_bss, elf_brk);
988
989 #if 0
990 printf("(start_brk) %x\n" , info->start_brk);
991 printf("(end_code) %x\n" , info->end_code);
992 printf("(start_code) %x\n" , info->start_code);
993 printf("(end_data) %x\n" , info->end_data);
994 printf("(start_stack) %x\n" , info->start_stack);
995 printf("(brk) %x\n" , info->brk);
996 #endif
997
998 info->entry = elf_entry;
999
1000 return 0;
1001 }
1002
1003 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1004 {
1005
1006 target_thread_init(regs, infop);
1007 }