]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/binfmt_elf.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / binfmt_elf.c
1 /*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/mm.h>
16 #include <linux/mman.h>
17 #include <linux/errno.h>
18 #include <linux/signal.h>
19 #include <linux/binfmts.h>
20 #include <linux/string.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/personality.h>
24 #include <linux/elfcore.h>
25 #include <linux/init.h>
26 #include <linux/highuid.h>
27 #include <linux/compiler.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/random.h>
33 #include <linux/elf.h>
34 #include <linux/elf-randomize.h>
35 #include <linux/utsname.h>
36 #include <linux/coredump.h>
37 #include <linux/sched.h>
38 #include <linux/sched/coredump.h>
39 #include <linux/sched/task_stack.h>
40 #include <linux/sched/cputime.h>
41 #include <linux/cred.h>
42 #include <linux/dax.h>
43 #include <linux/uaccess.h>
44 #include <asm/param.h>
45 #include <asm/page.h>
46
47 #ifndef user_long_t
48 #define user_long_t long
49 #endif
50 #ifndef user_siginfo_t
51 #define user_siginfo_t siginfo_t
52 #endif
53
54 static int load_elf_binary(struct linux_binprm *bprm);
55 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
56 int, int, unsigned long);
57
58 #ifdef CONFIG_USELIB
59 static int load_elf_library(struct file *);
60 #else
61 #define load_elf_library NULL
62 #endif
63
64 /*
65 * If we don't support core dumping, then supply a NULL so we
66 * don't even try.
67 */
68 #ifdef CONFIG_ELF_CORE
69 static int elf_core_dump(struct coredump_params *cprm);
70 #else
71 #define elf_core_dump NULL
72 #endif
73
74 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
75 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
76 #else
77 #define ELF_MIN_ALIGN PAGE_SIZE
78 #endif
79
80 #ifndef ELF_CORE_EFLAGS
81 #define ELF_CORE_EFLAGS 0
82 #endif
83
84 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
85 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
86 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
87
88 static struct linux_binfmt elf_format = {
89 .module = THIS_MODULE,
90 .load_binary = load_elf_binary,
91 .load_shlib = load_elf_library,
92 .core_dump = elf_core_dump,
93 .min_coredump = ELF_EXEC_PAGESIZE,
94 };
95
96 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
97
98 static int set_brk(unsigned long start, unsigned long end, int prot)
99 {
100 start = ELF_PAGEALIGN(start);
101 end = ELF_PAGEALIGN(end);
102 if (end > start) {
103 /*
104 * Map the last of the bss segment.
105 * If the header is requesting these pages to be
106 * executable, honour that (ppc32 needs this).
107 */
108 int error = vm_brk_flags(start, end - start,
109 prot & PROT_EXEC ? VM_EXEC : 0);
110 if (error)
111 return error;
112 }
113 current->mm->start_brk = current->mm->brk = end;
114 return 0;
115 }
116
117 /* We need to explicitly zero any fractional pages
118 after the data section (i.e. bss). This would
119 contain the junk from the file that should not
120 be in memory
121 */
122 static int padzero(unsigned long elf_bss)
123 {
124 unsigned long nbyte;
125
126 nbyte = ELF_PAGEOFFSET(elf_bss);
127 if (nbyte) {
128 nbyte = ELF_MIN_ALIGN - nbyte;
129 if (clear_user((void __user *) elf_bss, nbyte))
130 return -EFAULT;
131 }
132 return 0;
133 }
134
135 /* Let's use some macros to make this stack manipulation a little clearer */
136 #ifdef CONFIG_STACK_GROWSUP
137 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
138 #define STACK_ROUND(sp, items) \
139 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
140 #define STACK_ALLOC(sp, len) ({ \
141 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
142 old_sp; })
143 #else
144 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
145 #define STACK_ROUND(sp, items) \
146 (((unsigned long) (sp - items)) &~ 15UL)
147 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
148 #endif
149
150 #ifndef ELF_BASE_PLATFORM
151 /*
152 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
153 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
154 * will be copied to the user stack in the same manner as AT_PLATFORM.
155 */
156 #define ELF_BASE_PLATFORM NULL
157 #endif
158
159 static int
160 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
161 unsigned long load_addr, unsigned long interp_load_addr)
162 {
163 unsigned long p = bprm->p;
164 int argc = bprm->argc;
165 int envc = bprm->envc;
166 elf_addr_t __user *sp;
167 elf_addr_t __user *u_platform;
168 elf_addr_t __user *u_base_platform;
169 elf_addr_t __user *u_rand_bytes;
170 const char *k_platform = ELF_PLATFORM;
171 const char *k_base_platform = ELF_BASE_PLATFORM;
172 unsigned char k_rand_bytes[16];
173 int items;
174 elf_addr_t *elf_info;
175 int ei_index = 0;
176 const struct cred *cred = current_cred();
177 struct vm_area_struct *vma;
178
179 /*
180 * In some cases (e.g. Hyper-Threading), we want to avoid L1
181 * evictions by the processes running on the same package. One
182 * thing we can do is to shuffle the initial stack for them.
183 */
184
185 p = arch_align_stack(p);
186
187 /*
188 * If this architecture has a platform capability string, copy it
189 * to userspace. In some cases (Sparc), this info is impossible
190 * for userspace to get any other way, in others (i386) it is
191 * merely difficult.
192 */
193 u_platform = NULL;
194 if (k_platform) {
195 size_t len = strlen(k_platform) + 1;
196
197 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
198 if (__copy_to_user(u_platform, k_platform, len))
199 return -EFAULT;
200 }
201
202 /*
203 * If this architecture has a "base" platform capability
204 * string, copy it to userspace.
205 */
206 u_base_platform = NULL;
207 if (k_base_platform) {
208 size_t len = strlen(k_base_platform) + 1;
209
210 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
211 if (__copy_to_user(u_base_platform, k_base_platform, len))
212 return -EFAULT;
213 }
214
215 /*
216 * Generate 16 random bytes for userspace PRNG seeding.
217 */
218 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
219 u_rand_bytes = (elf_addr_t __user *)
220 STACK_ALLOC(p, sizeof(k_rand_bytes));
221 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
222 return -EFAULT;
223
224 /* Create the ELF interpreter info */
225 elf_info = (elf_addr_t *)current->mm->saved_auxv;
226 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
227 #define NEW_AUX_ENT(id, val) \
228 do { \
229 elf_info[ei_index++] = id; \
230 elf_info[ei_index++] = val; \
231 } while (0)
232
233 #ifdef ARCH_DLINFO
234 /*
235 * ARCH_DLINFO must come first so PPC can do its special alignment of
236 * AUXV.
237 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
238 * ARCH_DLINFO changes
239 */
240 ARCH_DLINFO;
241 #endif
242 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
243 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
244 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
245 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
246 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
247 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
248 NEW_AUX_ENT(AT_BASE, interp_load_addr);
249 NEW_AUX_ENT(AT_FLAGS, 0);
250 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
251 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
252 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
253 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
254 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
255 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
256 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
257 #ifdef ELF_HWCAP2
258 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
259 #endif
260 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
261 if (k_platform) {
262 NEW_AUX_ENT(AT_PLATFORM,
263 (elf_addr_t)(unsigned long)u_platform);
264 }
265 if (k_base_platform) {
266 NEW_AUX_ENT(AT_BASE_PLATFORM,
267 (elf_addr_t)(unsigned long)u_base_platform);
268 }
269 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
270 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
271 }
272 #undef NEW_AUX_ENT
273 /* AT_NULL is zero; clear the rest too */
274 memset(&elf_info[ei_index], 0,
275 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
276
277 /* And advance past the AT_NULL entry. */
278 ei_index += 2;
279
280 sp = STACK_ADD(p, ei_index);
281
282 items = (argc + 1) + (envc + 1) + 1;
283 bprm->p = STACK_ROUND(sp, items);
284
285 /* Point sp at the lowest address on the stack */
286 #ifdef CONFIG_STACK_GROWSUP
287 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
288 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
289 #else
290 sp = (elf_addr_t __user *)bprm->p;
291 #endif
292
293
294 /*
295 * Grow the stack manually; some architectures have a limit on how
296 * far ahead a user-space access may be in order to grow the stack.
297 */
298 vma = find_extend_vma(current->mm, bprm->p);
299 if (!vma)
300 return -EFAULT;
301
302 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
303 if (__put_user(argc, sp++))
304 return -EFAULT;
305
306 /* Populate list of argv pointers back to argv strings. */
307 p = current->mm->arg_end = current->mm->arg_start;
308 while (argc-- > 0) {
309 size_t len;
310 if (__put_user((elf_addr_t)p, sp++))
311 return -EFAULT;
312 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
313 if (!len || len > MAX_ARG_STRLEN)
314 return -EINVAL;
315 p += len;
316 }
317 if (__put_user(0, sp++))
318 return -EFAULT;
319 current->mm->arg_end = p;
320
321 /* Populate list of envp pointers back to envp strings. */
322 current->mm->env_end = current->mm->env_start = p;
323 while (envc-- > 0) {
324 size_t len;
325 if (__put_user((elf_addr_t)p, sp++))
326 return -EFAULT;
327 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
328 if (!len || len > MAX_ARG_STRLEN)
329 return -EINVAL;
330 p += len;
331 }
332 if (__put_user(0, sp++))
333 return -EFAULT;
334 current->mm->env_end = p;
335
336 /* Put the elf_info on the stack in the right place. */
337 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
338 return -EFAULT;
339 return 0;
340 }
341
342 #ifndef elf_map
343
344 static unsigned long elf_map(struct file *filep, unsigned long addr,
345 struct elf_phdr *eppnt, int prot, int type,
346 unsigned long total_size)
347 {
348 unsigned long map_addr;
349 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
350 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
351 addr = ELF_PAGESTART(addr);
352 size = ELF_PAGEALIGN(size);
353
354 /* mmap() will return -EINVAL if given a zero size, but a
355 * segment with zero filesize is perfectly valid */
356 if (!size)
357 return addr;
358
359 /*
360 * total_size is the size of the ELF (interpreter) image.
361 * The _first_ mmap needs to know the full size, otherwise
362 * randomization might put this image into an overlapping
363 * position with the ELF binary image. (since size < total_size)
364 * So we first map the 'big' image - and unmap the remainder at
365 * the end. (which unmap is needed for ELF images with holes.)
366 */
367 if (total_size) {
368 total_size = ELF_PAGEALIGN(total_size);
369 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
370 if (!BAD_ADDR(map_addr))
371 vm_munmap(map_addr+size, total_size-size);
372 } else
373 map_addr = vm_mmap(filep, addr, size, prot, type, off);
374
375 return(map_addr);
376 }
377
378 #endif /* !elf_map */
379
380 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
381 {
382 int i, first_idx = -1, last_idx = -1;
383
384 for (i = 0; i < nr; i++) {
385 if (cmds[i].p_type == PT_LOAD) {
386 last_idx = i;
387 if (first_idx == -1)
388 first_idx = i;
389 }
390 }
391 if (first_idx == -1)
392 return 0;
393
394 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
395 ELF_PAGESTART(cmds[first_idx].p_vaddr);
396 }
397
398 /**
399 * load_elf_phdrs() - load ELF program headers
400 * @elf_ex: ELF header of the binary whose program headers should be loaded
401 * @elf_file: the opened ELF binary file
402 *
403 * Loads ELF program headers from the binary file elf_file, which has the ELF
404 * header pointed to by elf_ex, into a newly allocated array. The caller is
405 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
406 */
407 static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
408 struct file *elf_file)
409 {
410 struct elf_phdr *elf_phdata = NULL;
411 int retval, size, err = -1;
412
413 /*
414 * If the size of this structure has changed, then punt, since
415 * we will be doing the wrong thing.
416 */
417 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
418 goto out;
419
420 /* Sanity check the number of program headers... */
421 if (elf_ex->e_phnum < 1 ||
422 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
423 goto out;
424
425 /* ...and their total size. */
426 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
427 if (size > ELF_MIN_ALIGN)
428 goto out;
429
430 elf_phdata = kmalloc(size, GFP_KERNEL);
431 if (!elf_phdata)
432 goto out;
433
434 /* Read in the program headers */
435 retval = kernel_read(elf_file, elf_ex->e_phoff,
436 (char *)elf_phdata, size);
437 if (retval != size) {
438 err = (retval < 0) ? retval : -EIO;
439 goto out;
440 }
441
442 /* Success! */
443 err = 0;
444 out:
445 if (err) {
446 kfree(elf_phdata);
447 elf_phdata = NULL;
448 }
449 return elf_phdata;
450 }
451
452 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
453
454 /**
455 * struct arch_elf_state - arch-specific ELF loading state
456 *
457 * This structure is used to preserve architecture specific data during
458 * the loading of an ELF file, throughout the checking of architecture
459 * specific ELF headers & through to the point where the ELF load is
460 * known to be proceeding (ie. SET_PERSONALITY).
461 *
462 * This implementation is a dummy for architectures which require no
463 * specific state.
464 */
465 struct arch_elf_state {
466 };
467
468 #define INIT_ARCH_ELF_STATE {}
469
470 /**
471 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
472 * @ehdr: The main ELF header
473 * @phdr: The program header to check
474 * @elf: The open ELF file
475 * @is_interp: True if the phdr is from the interpreter of the ELF being
476 * loaded, else false.
477 * @state: Architecture-specific state preserved throughout the process
478 * of loading the ELF.
479 *
480 * Inspects the program header phdr to validate its correctness and/or
481 * suitability for the system. Called once per ELF program header in the
482 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
483 * interpreter.
484 *
485 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
486 * with that return code.
487 */
488 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
489 struct elf_phdr *phdr,
490 struct file *elf, bool is_interp,
491 struct arch_elf_state *state)
492 {
493 /* Dummy implementation, always proceed */
494 return 0;
495 }
496
497 /**
498 * arch_check_elf() - check an ELF executable
499 * @ehdr: The main ELF header
500 * @has_interp: True if the ELF has an interpreter, else false.
501 * @interp_ehdr: The interpreter's ELF header
502 * @state: Architecture-specific state preserved throughout the process
503 * of loading the ELF.
504 *
505 * Provides a final opportunity for architecture code to reject the loading
506 * of the ELF & cause an exec syscall to return an error. This is called after
507 * all program headers to be checked by arch_elf_pt_proc have been.
508 *
509 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
510 * with that return code.
511 */
512 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
513 struct elfhdr *interp_ehdr,
514 struct arch_elf_state *state)
515 {
516 /* Dummy implementation, always proceed */
517 return 0;
518 }
519
520 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
521
522 /* This is much more generalized than the library routine read function,
523 so we keep this separate. Technically the library read function
524 is only provided so that we can read a.out libraries that have
525 an ELF header */
526
527 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
528 struct file *interpreter, unsigned long *interp_map_addr,
529 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
530 {
531 struct elf_phdr *eppnt;
532 unsigned long load_addr = 0;
533 int load_addr_set = 0;
534 unsigned long last_bss = 0, elf_bss = 0;
535 int bss_prot = 0;
536 unsigned long error = ~0UL;
537 unsigned long total_size;
538 int i;
539
540 /* First of all, some simple consistency checks */
541 if (interp_elf_ex->e_type != ET_EXEC &&
542 interp_elf_ex->e_type != ET_DYN)
543 goto out;
544 if (!elf_check_arch(interp_elf_ex))
545 goto out;
546 if (!interpreter->f_op->mmap)
547 goto out;
548
549 total_size = total_mapping_size(interp_elf_phdata,
550 interp_elf_ex->e_phnum);
551 if (!total_size) {
552 error = -EINVAL;
553 goto out;
554 }
555
556 eppnt = interp_elf_phdata;
557 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
558 if (eppnt->p_type == PT_LOAD) {
559 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
560 int elf_prot = 0;
561 unsigned long vaddr = 0;
562 unsigned long k, map_addr;
563
564 if (eppnt->p_flags & PF_R)
565 elf_prot = PROT_READ;
566 if (eppnt->p_flags & PF_W)
567 elf_prot |= PROT_WRITE;
568 if (eppnt->p_flags & PF_X)
569 elf_prot |= PROT_EXEC;
570 vaddr = eppnt->p_vaddr;
571 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
572 elf_type |= MAP_FIXED;
573 else if (no_base && interp_elf_ex->e_type == ET_DYN)
574 load_addr = -vaddr;
575
576 map_addr = elf_map(interpreter, load_addr + vaddr,
577 eppnt, elf_prot, elf_type, total_size);
578 total_size = 0;
579 if (!*interp_map_addr)
580 *interp_map_addr = map_addr;
581 error = map_addr;
582 if (BAD_ADDR(map_addr))
583 goto out;
584
585 if (!load_addr_set &&
586 interp_elf_ex->e_type == ET_DYN) {
587 load_addr = map_addr - ELF_PAGESTART(vaddr);
588 load_addr_set = 1;
589 }
590
591 /*
592 * Check to see if the section's size will overflow the
593 * allowed task size. Note that p_filesz must always be
594 * <= p_memsize so it's only necessary to check p_memsz.
595 */
596 k = load_addr + eppnt->p_vaddr;
597 if (BAD_ADDR(k) ||
598 eppnt->p_filesz > eppnt->p_memsz ||
599 eppnt->p_memsz > TASK_SIZE ||
600 TASK_SIZE - eppnt->p_memsz < k) {
601 error = -ENOMEM;
602 goto out;
603 }
604
605 /*
606 * Find the end of the file mapping for this phdr, and
607 * keep track of the largest address we see for this.
608 */
609 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
610 if (k > elf_bss)
611 elf_bss = k;
612
613 /*
614 * Do the same thing for the memory mapping - between
615 * elf_bss and last_bss is the bss section.
616 */
617 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
618 if (k > last_bss) {
619 last_bss = k;
620 bss_prot = elf_prot;
621 }
622 }
623 }
624
625 /*
626 * Now fill out the bss section: first pad the last page from
627 * the file up to the page boundary, and zero it from elf_bss
628 * up to the end of the page.
629 */
630 if (padzero(elf_bss)) {
631 error = -EFAULT;
632 goto out;
633 }
634 /*
635 * Next, align both the file and mem bss up to the page size,
636 * since this is where elf_bss was just zeroed up to, and where
637 * last_bss will end after the vm_brk_flags() below.
638 */
639 elf_bss = ELF_PAGEALIGN(elf_bss);
640 last_bss = ELF_PAGEALIGN(last_bss);
641 /* Finally, if there is still more bss to allocate, do it. */
642 if (last_bss > elf_bss) {
643 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
644 bss_prot & PROT_EXEC ? VM_EXEC : 0);
645 if (error)
646 goto out;
647 }
648
649 error = load_addr;
650 out:
651 return error;
652 }
653
654 /*
655 * These are the functions used to load ELF style executables and shared
656 * libraries. There is no binary dependent code anywhere else.
657 */
658
659 #ifndef STACK_RND_MASK
660 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
661 #endif
662
663 static unsigned long randomize_stack_top(unsigned long stack_top)
664 {
665 unsigned long random_variable = 0;
666
667 if (current->flags & PF_RANDOMIZE) {
668 random_variable = get_random_long();
669 random_variable &= STACK_RND_MASK;
670 random_variable <<= PAGE_SHIFT;
671 }
672 #ifdef CONFIG_STACK_GROWSUP
673 return PAGE_ALIGN(stack_top) + random_variable;
674 #else
675 return PAGE_ALIGN(stack_top) - random_variable;
676 #endif
677 }
678
679 static int load_elf_binary(struct linux_binprm *bprm)
680 {
681 struct file *interpreter = NULL; /* to shut gcc up */
682 unsigned long load_addr = 0, load_bias = 0;
683 int load_addr_set = 0;
684 char * elf_interpreter = NULL;
685 unsigned long error;
686 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
687 unsigned long elf_bss, elf_brk;
688 int bss_prot = 0;
689 int retval, i;
690 unsigned long elf_entry;
691 unsigned long interp_load_addr = 0;
692 unsigned long start_code, end_code, start_data, end_data;
693 unsigned long reloc_func_desc __maybe_unused = 0;
694 int executable_stack = EXSTACK_DEFAULT;
695 struct pt_regs *regs = current_pt_regs();
696 struct {
697 struct elfhdr elf_ex;
698 struct elfhdr interp_elf_ex;
699 } *loc;
700 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
701
702 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
703 if (!loc) {
704 retval = -ENOMEM;
705 goto out_ret;
706 }
707
708 /* Get the exec-header */
709 loc->elf_ex = *((struct elfhdr *)bprm->buf);
710
711 retval = -ENOEXEC;
712 /* First of all, some simple consistency checks */
713 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
714 goto out;
715
716 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
717 goto out;
718 if (!elf_check_arch(&loc->elf_ex))
719 goto out;
720 if (!bprm->file->f_op->mmap)
721 goto out;
722
723 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
724 if (!elf_phdata)
725 goto out;
726
727 elf_ppnt = elf_phdata;
728 elf_bss = 0;
729 elf_brk = 0;
730
731 start_code = ~0UL;
732 end_code = 0;
733 start_data = 0;
734 end_data = 0;
735
736 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
737 if (elf_ppnt->p_type == PT_INTERP) {
738 /* This is the program interpreter used for
739 * shared libraries - for now assume that this
740 * is an a.out format binary
741 */
742 retval = -ENOEXEC;
743 if (elf_ppnt->p_filesz > PATH_MAX ||
744 elf_ppnt->p_filesz < 2)
745 goto out_free_ph;
746
747 retval = -ENOMEM;
748 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
749 GFP_KERNEL);
750 if (!elf_interpreter)
751 goto out_free_ph;
752
753 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
754 elf_interpreter,
755 elf_ppnt->p_filesz);
756 if (retval != elf_ppnt->p_filesz) {
757 if (retval >= 0)
758 retval = -EIO;
759 goto out_free_interp;
760 }
761 /* make sure path is NULL terminated */
762 retval = -ENOEXEC;
763 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
764 goto out_free_interp;
765
766 interpreter = open_exec(elf_interpreter);
767 retval = PTR_ERR(interpreter);
768 if (IS_ERR(interpreter))
769 goto out_free_interp;
770
771 /*
772 * If the binary is not readable then enforce
773 * mm->dumpable = 0 regardless of the interpreter's
774 * permissions.
775 */
776 would_dump(bprm, interpreter);
777
778 /* Get the exec headers */
779 retval = kernel_read(interpreter, 0,
780 (void *)&loc->interp_elf_ex,
781 sizeof(loc->interp_elf_ex));
782 if (retval != sizeof(loc->interp_elf_ex)) {
783 if (retval >= 0)
784 retval = -EIO;
785 goto out_free_dentry;
786 }
787
788 break;
789 }
790 elf_ppnt++;
791 }
792
793 elf_ppnt = elf_phdata;
794 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
795 switch (elf_ppnt->p_type) {
796 case PT_GNU_STACK:
797 if (elf_ppnt->p_flags & PF_X)
798 executable_stack = EXSTACK_ENABLE_X;
799 else
800 executable_stack = EXSTACK_DISABLE_X;
801 break;
802
803 case PT_LOPROC ... PT_HIPROC:
804 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
805 bprm->file, false,
806 &arch_state);
807 if (retval)
808 goto out_free_dentry;
809 break;
810 }
811
812 /* Some simple consistency checks for the interpreter */
813 if (elf_interpreter) {
814 retval = -ELIBBAD;
815 /* Not an ELF interpreter */
816 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
817 goto out_free_dentry;
818 /* Verify the interpreter has a valid arch */
819 if (!elf_check_arch(&loc->interp_elf_ex))
820 goto out_free_dentry;
821
822 /* Load the interpreter program headers */
823 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
824 interpreter);
825 if (!interp_elf_phdata)
826 goto out_free_dentry;
827
828 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
829 elf_ppnt = interp_elf_phdata;
830 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
831 switch (elf_ppnt->p_type) {
832 case PT_LOPROC ... PT_HIPROC:
833 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
834 elf_ppnt, interpreter,
835 true, &arch_state);
836 if (retval)
837 goto out_free_dentry;
838 break;
839 }
840 }
841
842 /*
843 * Allow arch code to reject the ELF at this point, whilst it's
844 * still possible to return an error to the code that invoked
845 * the exec syscall.
846 */
847 retval = arch_check_elf(&loc->elf_ex,
848 !!interpreter, &loc->interp_elf_ex,
849 &arch_state);
850 if (retval)
851 goto out_free_dentry;
852
853 /* Flush all traces of the currently running executable */
854 retval = flush_old_exec(bprm);
855 if (retval)
856 goto out_free_dentry;
857
858 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
859 may depend on the personality. */
860 SET_PERSONALITY2(loc->elf_ex, &arch_state);
861 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
862 current->personality |= READ_IMPLIES_EXEC;
863
864 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
865 current->flags |= PF_RANDOMIZE;
866
867 setup_new_exec(bprm);
868 install_exec_creds(bprm);
869
870 /* Do this so that we can load the interpreter, if need be. We will
871 change some of these later */
872 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
873 executable_stack);
874 if (retval < 0)
875 goto out_free_dentry;
876
877 current->mm->start_stack = bprm->p;
878
879 /* Now we do a little grungy work by mmapping the ELF image into
880 the correct location in memory. */
881 for(i = 0, elf_ppnt = elf_phdata;
882 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
883 int elf_prot = 0, elf_flags;
884 unsigned long k, vaddr;
885 unsigned long total_size = 0;
886
887 if (elf_ppnt->p_type != PT_LOAD)
888 continue;
889
890 if (unlikely (elf_brk > elf_bss)) {
891 unsigned long nbyte;
892
893 /* There was a PT_LOAD segment with p_memsz > p_filesz
894 before this one. Map anonymous pages, if needed,
895 and clear the area. */
896 retval = set_brk(elf_bss + load_bias,
897 elf_brk + load_bias,
898 bss_prot);
899 if (retval)
900 goto out_free_dentry;
901 nbyte = ELF_PAGEOFFSET(elf_bss);
902 if (nbyte) {
903 nbyte = ELF_MIN_ALIGN - nbyte;
904 if (nbyte > elf_brk - elf_bss)
905 nbyte = elf_brk - elf_bss;
906 if (clear_user((void __user *)elf_bss +
907 load_bias, nbyte)) {
908 /*
909 * This bss-zeroing can fail if the ELF
910 * file specifies odd protections. So
911 * we don't check the return value
912 */
913 }
914 }
915 }
916
917 if (elf_ppnt->p_flags & PF_R)
918 elf_prot |= PROT_READ;
919 if (elf_ppnt->p_flags & PF_W)
920 elf_prot |= PROT_WRITE;
921 if (elf_ppnt->p_flags & PF_X)
922 elf_prot |= PROT_EXEC;
923
924 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
925
926 vaddr = elf_ppnt->p_vaddr;
927 /*
928 * If we are loading ET_EXEC or we have already performed
929 * the ET_DYN load_addr calculations, proceed normally.
930 */
931 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
932 elf_flags |= MAP_FIXED;
933 } else if (loc->elf_ex.e_type == ET_DYN) {
934 /*
935 * This logic is run once for the first LOAD Program
936 * Header for ET_DYN binaries to calculate the
937 * randomization (load_bias) for all the LOAD
938 * Program Headers, and to calculate the entire
939 * size of the ELF mapping (total_size). (Note that
940 * load_addr_set is set to true later once the
941 * initial mapping is performed.)
942 *
943 * There are effectively two types of ET_DYN
944 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
945 * and loaders (ET_DYN without INTERP, since they
946 * _are_ the ELF interpreter). The loaders must
947 * be loaded away from programs since the program
948 * may otherwise collide with the loader (especially
949 * for ET_EXEC which does not have a randomized
950 * position). For example to handle invocations of
951 * "./ld.so someprog" to test out a new version of
952 * the loader, the subsequent program that the
953 * loader loads must avoid the loader itself, so
954 * they cannot share the same load range. Sufficient
955 * room for the brk must be allocated with the
956 * loader as well, since brk must be available with
957 * the loader.
958 *
959 * Therefore, programs are loaded offset from
960 * ELF_ET_DYN_BASE and loaders are loaded into the
961 * independently randomized mmap region (0 load_bias
962 * without MAP_FIXED).
963 */
964 if (elf_interpreter) {
965 load_bias = ELF_ET_DYN_BASE;
966 if (current->flags & PF_RANDOMIZE)
967 load_bias += arch_mmap_rnd();
968 elf_flags |= MAP_FIXED;
969 } else
970 load_bias = 0;
971
972 /*
973 * Since load_bias is used for all subsequent loading
974 * calculations, we must lower it by the first vaddr
975 * so that the remaining calculations based on the
976 * ELF vaddrs will be correctly offset. The result
977 * is then page aligned.
978 */
979 load_bias = ELF_PAGESTART(load_bias - vaddr);
980
981 total_size = total_mapping_size(elf_phdata,
982 loc->elf_ex.e_phnum);
983 if (!total_size) {
984 retval = -EINVAL;
985 goto out_free_dentry;
986 }
987 }
988
989 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
990 elf_prot, elf_flags, total_size);
991 if (BAD_ADDR(error)) {
992 retval = IS_ERR((void *)error) ?
993 PTR_ERR((void*)error) : -EINVAL;
994 goto out_free_dentry;
995 }
996
997 if (!load_addr_set) {
998 load_addr_set = 1;
999 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
1000 if (loc->elf_ex.e_type == ET_DYN) {
1001 load_bias += error -
1002 ELF_PAGESTART(load_bias + vaddr);
1003 load_addr += load_bias;
1004 reloc_func_desc = load_bias;
1005 }
1006 }
1007 k = elf_ppnt->p_vaddr;
1008 if (k < start_code)
1009 start_code = k;
1010 if (start_data < k)
1011 start_data = k;
1012
1013 /*
1014 * Check to see if the section's size will overflow the
1015 * allowed task size. Note that p_filesz must always be
1016 * <= p_memsz so it is only necessary to check p_memsz.
1017 */
1018 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1019 elf_ppnt->p_memsz > TASK_SIZE ||
1020 TASK_SIZE - elf_ppnt->p_memsz < k) {
1021 /* set_brk can never work. Avoid overflows. */
1022 retval = -EINVAL;
1023 goto out_free_dentry;
1024 }
1025
1026 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1027
1028 if (k > elf_bss)
1029 elf_bss = k;
1030 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1031 end_code = k;
1032 if (end_data < k)
1033 end_data = k;
1034 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1035 if (k > elf_brk) {
1036 bss_prot = elf_prot;
1037 elf_brk = k;
1038 }
1039 }
1040
1041 loc->elf_ex.e_entry += load_bias;
1042 elf_bss += load_bias;
1043 elf_brk += load_bias;
1044 start_code += load_bias;
1045 end_code += load_bias;
1046 start_data += load_bias;
1047 end_data += load_bias;
1048
1049 /* Calling set_brk effectively mmaps the pages that we need
1050 * for the bss and break sections. We must do this before
1051 * mapping in the interpreter, to make sure it doesn't wind
1052 * up getting placed where the bss needs to go.
1053 */
1054 retval = set_brk(elf_bss, elf_brk, bss_prot);
1055 if (retval)
1056 goto out_free_dentry;
1057 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1058 retval = -EFAULT; /* Nobody gets to see this, but.. */
1059 goto out_free_dentry;
1060 }
1061
1062 if (elf_interpreter) {
1063 unsigned long interp_map_addr = 0;
1064
1065 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1066 interpreter,
1067 &interp_map_addr,
1068 load_bias, interp_elf_phdata);
1069 if (!IS_ERR((void *)elf_entry)) {
1070 /*
1071 * load_elf_interp() returns relocation
1072 * adjustment
1073 */
1074 interp_load_addr = elf_entry;
1075 elf_entry += loc->interp_elf_ex.e_entry;
1076 }
1077 if (BAD_ADDR(elf_entry)) {
1078 retval = IS_ERR((void *)elf_entry) ?
1079 (int)elf_entry : -EINVAL;
1080 goto out_free_dentry;
1081 }
1082 reloc_func_desc = interp_load_addr;
1083
1084 allow_write_access(interpreter);
1085 fput(interpreter);
1086 kfree(elf_interpreter);
1087 } else {
1088 elf_entry = loc->elf_ex.e_entry;
1089 if (BAD_ADDR(elf_entry)) {
1090 retval = -EINVAL;
1091 goto out_free_dentry;
1092 }
1093 }
1094
1095 kfree(interp_elf_phdata);
1096 kfree(elf_phdata);
1097
1098 set_binfmt(&elf_format);
1099
1100 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1101 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
1102 if (retval < 0)
1103 goto out;
1104 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1105
1106 retval = create_elf_tables(bprm, &loc->elf_ex,
1107 load_addr, interp_load_addr);
1108 if (retval < 0)
1109 goto out;
1110 /* N.B. passed_fileno might not be initialized? */
1111 current->mm->end_code = end_code;
1112 current->mm->start_code = start_code;
1113 current->mm->start_data = start_data;
1114 current->mm->end_data = end_data;
1115 current->mm->start_stack = bprm->p;
1116
1117 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1118 current->mm->brk = current->mm->start_brk =
1119 arch_randomize_brk(current->mm);
1120 #ifdef compat_brk_randomized
1121 current->brk_randomized = 1;
1122 #endif
1123 }
1124
1125 if (current->personality & MMAP_PAGE_ZERO) {
1126 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1127 and some applications "depend" upon this behavior.
1128 Since we do not have the power to recompile these, we
1129 emulate the SVr4 behavior. Sigh. */
1130 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1131 MAP_FIXED | MAP_PRIVATE, 0);
1132 }
1133
1134 #ifdef ELF_PLAT_INIT
1135 /*
1136 * The ABI may specify that certain registers be set up in special
1137 * ways (on i386 %edx is the address of a DT_FINI function, for
1138 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1139 * that the e_entry field is the address of the function descriptor
1140 * for the startup routine, rather than the address of the startup
1141 * routine itself. This macro performs whatever initialization to
1142 * the regs structure is required as well as any relocations to the
1143 * function descriptor entries when executing dynamically links apps.
1144 */
1145 ELF_PLAT_INIT(regs, reloc_func_desc);
1146 #endif
1147
1148 start_thread(regs, elf_entry, bprm->p);
1149 retval = 0;
1150 out:
1151 kfree(loc);
1152 out_ret:
1153 return retval;
1154
1155 /* error cleanup */
1156 out_free_dentry:
1157 kfree(interp_elf_phdata);
1158 allow_write_access(interpreter);
1159 if (interpreter)
1160 fput(interpreter);
1161 out_free_interp:
1162 kfree(elf_interpreter);
1163 out_free_ph:
1164 kfree(elf_phdata);
1165 goto out;
1166 }
1167
1168 #ifdef CONFIG_USELIB
1169 /* This is really simpleminded and specialized - we are loading an
1170 a.out library that is given an ELF header. */
1171 static int load_elf_library(struct file *file)
1172 {
1173 struct elf_phdr *elf_phdata;
1174 struct elf_phdr *eppnt;
1175 unsigned long elf_bss, bss, len;
1176 int retval, error, i, j;
1177 struct elfhdr elf_ex;
1178
1179 error = -ENOEXEC;
1180 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1181 if (retval != sizeof(elf_ex))
1182 goto out;
1183
1184 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1185 goto out;
1186
1187 /* First of all, some simple consistency checks */
1188 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1189 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1190 goto out;
1191
1192 /* Now read in all of the header information */
1193
1194 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1195 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1196
1197 error = -ENOMEM;
1198 elf_phdata = kmalloc(j, GFP_KERNEL);
1199 if (!elf_phdata)
1200 goto out;
1201
1202 eppnt = elf_phdata;
1203 error = -ENOEXEC;
1204 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1205 if (retval != j)
1206 goto out_free_ph;
1207
1208 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1209 if ((eppnt + i)->p_type == PT_LOAD)
1210 j++;
1211 if (j != 1)
1212 goto out_free_ph;
1213
1214 while (eppnt->p_type != PT_LOAD)
1215 eppnt++;
1216
1217 /* Now use mmap to map the library into memory. */
1218 error = vm_mmap(file,
1219 ELF_PAGESTART(eppnt->p_vaddr),
1220 (eppnt->p_filesz +
1221 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1222 PROT_READ | PROT_WRITE | PROT_EXEC,
1223 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1224 (eppnt->p_offset -
1225 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1226 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1227 goto out_free_ph;
1228
1229 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1230 if (padzero(elf_bss)) {
1231 error = -EFAULT;
1232 goto out_free_ph;
1233 }
1234
1235 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1236 ELF_MIN_ALIGN - 1);
1237 bss = eppnt->p_memsz + eppnt->p_vaddr;
1238 if (bss > len) {
1239 error = vm_brk(len, bss - len);
1240 if (error)
1241 goto out_free_ph;
1242 }
1243 error = 0;
1244
1245 out_free_ph:
1246 kfree(elf_phdata);
1247 out:
1248 return error;
1249 }
1250 #endif /* #ifdef CONFIG_USELIB */
1251
1252 #ifdef CONFIG_ELF_CORE
1253 /*
1254 * ELF core dumper
1255 *
1256 * Modelled on fs/exec.c:aout_core_dump()
1257 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1258 */
1259
1260 /*
1261 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1262 * that are useful for post-mortem analysis are included in every core dump.
1263 * In that way we ensure that the core dump is fully interpretable later
1264 * without matching up the same kernel and hardware config to see what PC values
1265 * meant. These special mappings include - vDSO, vsyscall, and other
1266 * architecture specific mappings
1267 */
1268 static bool always_dump_vma(struct vm_area_struct *vma)
1269 {
1270 /* Any vsyscall mappings? */
1271 if (vma == get_gate_vma(vma->vm_mm))
1272 return true;
1273
1274 /*
1275 * Assume that all vmas with a .name op should always be dumped.
1276 * If this changes, a new vm_ops field can easily be added.
1277 */
1278 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1279 return true;
1280
1281 /*
1282 * arch_vma_name() returns non-NULL for special architecture mappings,
1283 * such as vDSO sections.
1284 */
1285 if (arch_vma_name(vma))
1286 return true;
1287
1288 return false;
1289 }
1290
1291 /*
1292 * Decide what to dump of a segment, part, all or none.
1293 */
1294 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1295 unsigned long mm_flags)
1296 {
1297 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1298
1299 /* always dump the vdso and vsyscall sections */
1300 if (always_dump_vma(vma))
1301 goto whole;
1302
1303 if (vma->vm_flags & VM_DONTDUMP)
1304 return 0;
1305
1306 /* support for DAX */
1307 if (vma_is_dax(vma)) {
1308 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1309 goto whole;
1310 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1311 goto whole;
1312 return 0;
1313 }
1314
1315 /* Hugetlb memory check */
1316 if (vma->vm_flags & VM_HUGETLB) {
1317 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1318 goto whole;
1319 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1320 goto whole;
1321 return 0;
1322 }
1323
1324 /* Do not dump I/O mapped devices or special mappings */
1325 if (vma->vm_flags & VM_IO)
1326 return 0;
1327
1328 /* By default, dump shared memory if mapped from an anonymous file. */
1329 if (vma->vm_flags & VM_SHARED) {
1330 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1331 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1332 goto whole;
1333 return 0;
1334 }
1335
1336 /* Dump segments that have been written to. */
1337 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1338 goto whole;
1339 if (vma->vm_file == NULL)
1340 return 0;
1341
1342 if (FILTER(MAPPED_PRIVATE))
1343 goto whole;
1344
1345 /*
1346 * If this looks like the beginning of a DSO or executable mapping,
1347 * check for an ELF header. If we find one, dump the first page to
1348 * aid in determining what was mapped here.
1349 */
1350 if (FILTER(ELF_HEADERS) &&
1351 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1352 u32 __user *header = (u32 __user *) vma->vm_start;
1353 u32 word;
1354 mm_segment_t fs = get_fs();
1355 /*
1356 * Doing it this way gets the constant folded by GCC.
1357 */
1358 union {
1359 u32 cmp;
1360 char elfmag[SELFMAG];
1361 } magic;
1362 BUILD_BUG_ON(SELFMAG != sizeof word);
1363 magic.elfmag[EI_MAG0] = ELFMAG0;
1364 magic.elfmag[EI_MAG1] = ELFMAG1;
1365 magic.elfmag[EI_MAG2] = ELFMAG2;
1366 magic.elfmag[EI_MAG3] = ELFMAG3;
1367 /*
1368 * Switch to the user "segment" for get_user(),
1369 * then put back what elf_core_dump() had in place.
1370 */
1371 set_fs(USER_DS);
1372 if (unlikely(get_user(word, header)))
1373 word = 0;
1374 set_fs(fs);
1375 if (word == magic.cmp)
1376 return PAGE_SIZE;
1377 }
1378
1379 #undef FILTER
1380
1381 return 0;
1382
1383 whole:
1384 return vma->vm_end - vma->vm_start;
1385 }
1386
1387 /* An ELF note in memory */
1388 struct memelfnote
1389 {
1390 const char *name;
1391 int type;
1392 unsigned int datasz;
1393 void *data;
1394 };
1395
1396 static int notesize(struct memelfnote *en)
1397 {
1398 int sz;
1399
1400 sz = sizeof(struct elf_note);
1401 sz += roundup(strlen(en->name) + 1, 4);
1402 sz += roundup(en->datasz, 4);
1403
1404 return sz;
1405 }
1406
1407 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1408 {
1409 struct elf_note en;
1410 en.n_namesz = strlen(men->name) + 1;
1411 en.n_descsz = men->datasz;
1412 en.n_type = men->type;
1413
1414 return dump_emit(cprm, &en, sizeof(en)) &&
1415 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1416 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1417 }
1418
1419 static void fill_elf_header(struct elfhdr *elf, int segs,
1420 u16 machine, u32 flags)
1421 {
1422 memset(elf, 0, sizeof(*elf));
1423
1424 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1425 elf->e_ident[EI_CLASS] = ELF_CLASS;
1426 elf->e_ident[EI_DATA] = ELF_DATA;
1427 elf->e_ident[EI_VERSION] = EV_CURRENT;
1428 elf->e_ident[EI_OSABI] = ELF_OSABI;
1429
1430 elf->e_type = ET_CORE;
1431 elf->e_machine = machine;
1432 elf->e_version = EV_CURRENT;
1433 elf->e_phoff = sizeof(struct elfhdr);
1434 elf->e_flags = flags;
1435 elf->e_ehsize = sizeof(struct elfhdr);
1436 elf->e_phentsize = sizeof(struct elf_phdr);
1437 elf->e_phnum = segs;
1438
1439 return;
1440 }
1441
1442 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1443 {
1444 phdr->p_type = PT_NOTE;
1445 phdr->p_offset = offset;
1446 phdr->p_vaddr = 0;
1447 phdr->p_paddr = 0;
1448 phdr->p_filesz = sz;
1449 phdr->p_memsz = 0;
1450 phdr->p_flags = 0;
1451 phdr->p_align = 0;
1452 return;
1453 }
1454
1455 static void fill_note(struct memelfnote *note, const char *name, int type,
1456 unsigned int sz, void *data)
1457 {
1458 note->name = name;
1459 note->type = type;
1460 note->datasz = sz;
1461 note->data = data;
1462 return;
1463 }
1464
1465 /*
1466 * fill up all the fields in prstatus from the given task struct, except
1467 * registers which need to be filled up separately.
1468 */
1469 static void fill_prstatus(struct elf_prstatus *prstatus,
1470 struct task_struct *p, long signr)
1471 {
1472 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1473 prstatus->pr_sigpend = p->pending.signal.sig[0];
1474 prstatus->pr_sighold = p->blocked.sig[0];
1475 rcu_read_lock();
1476 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1477 rcu_read_unlock();
1478 prstatus->pr_pid = task_pid_vnr(p);
1479 prstatus->pr_pgrp = task_pgrp_vnr(p);
1480 prstatus->pr_sid = task_session_vnr(p);
1481 if (thread_group_leader(p)) {
1482 struct task_cputime cputime;
1483
1484 /*
1485 * This is the record for the group leader. It shows the
1486 * group-wide total, not its individual thread total.
1487 */
1488 thread_group_cputime(p, &cputime);
1489 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1490 prstatus->pr_stime = ns_to_timeval(cputime.stime);
1491 } else {
1492 u64 utime, stime;
1493
1494 task_cputime(p, &utime, &stime);
1495 prstatus->pr_utime = ns_to_timeval(utime);
1496 prstatus->pr_stime = ns_to_timeval(stime);
1497 }
1498
1499 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1500 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
1501 }
1502
1503 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1504 struct mm_struct *mm)
1505 {
1506 const struct cred *cred;
1507 unsigned int i, len;
1508
1509 /* first copy the parameters from user space */
1510 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1511
1512 len = mm->arg_end - mm->arg_start;
1513 if (len >= ELF_PRARGSZ)
1514 len = ELF_PRARGSZ-1;
1515 if (copy_from_user(&psinfo->pr_psargs,
1516 (const char __user *)mm->arg_start, len))
1517 return -EFAULT;
1518 for(i = 0; i < len; i++)
1519 if (psinfo->pr_psargs[i] == 0)
1520 psinfo->pr_psargs[i] = ' ';
1521 psinfo->pr_psargs[len] = 0;
1522
1523 rcu_read_lock();
1524 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1525 rcu_read_unlock();
1526 psinfo->pr_pid = task_pid_vnr(p);
1527 psinfo->pr_pgrp = task_pgrp_vnr(p);
1528 psinfo->pr_sid = task_session_vnr(p);
1529
1530 i = p->state ? ffz(~p->state) + 1 : 0;
1531 psinfo->pr_state = i;
1532 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1533 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1534 psinfo->pr_nice = task_nice(p);
1535 psinfo->pr_flag = p->flags;
1536 rcu_read_lock();
1537 cred = __task_cred(p);
1538 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1539 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1540 rcu_read_unlock();
1541 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1542
1543 return 0;
1544 }
1545
1546 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1547 {
1548 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1549 int i = 0;
1550 do
1551 i += 2;
1552 while (auxv[i - 2] != AT_NULL);
1553 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1554 }
1555
1556 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1557 const siginfo_t *siginfo)
1558 {
1559 mm_segment_t old_fs = get_fs();
1560 set_fs(KERNEL_DS);
1561 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1562 set_fs(old_fs);
1563 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1564 }
1565
1566 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1567 /*
1568 * Format of NT_FILE note:
1569 *
1570 * long count -- how many files are mapped
1571 * long page_size -- units for file_ofs
1572 * array of [COUNT] elements of
1573 * long start
1574 * long end
1575 * long file_ofs
1576 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1577 */
1578 static int fill_files_note(struct memelfnote *note)
1579 {
1580 struct vm_area_struct *vma;
1581 unsigned count, size, names_ofs, remaining, n;
1582 user_long_t *data;
1583 user_long_t *start_end_ofs;
1584 char *name_base, *name_curpos;
1585
1586 /* *Estimated* file count and total data size needed */
1587 count = current->mm->map_count;
1588 size = count * 64;
1589
1590 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1591 alloc:
1592 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1593 return -EINVAL;
1594 size = round_up(size, PAGE_SIZE);
1595 data = vmalloc(size);
1596 if (!data)
1597 return -ENOMEM;
1598
1599 start_end_ofs = data + 2;
1600 name_base = name_curpos = ((char *)data) + names_ofs;
1601 remaining = size - names_ofs;
1602 count = 0;
1603 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1604 struct file *file;
1605 const char *filename;
1606
1607 file = vma->vm_file;
1608 if (!file)
1609 continue;
1610 filename = file_path(file, name_curpos, remaining);
1611 if (IS_ERR(filename)) {
1612 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1613 vfree(data);
1614 size = size * 5 / 4;
1615 goto alloc;
1616 }
1617 continue;
1618 }
1619
1620 /* file_path() fills at the end, move name down */
1621 /* n = strlen(filename) + 1: */
1622 n = (name_curpos + remaining) - filename;
1623 remaining = filename - name_curpos;
1624 memmove(name_curpos, filename, n);
1625 name_curpos += n;
1626
1627 *start_end_ofs++ = vma->vm_start;
1628 *start_end_ofs++ = vma->vm_end;
1629 *start_end_ofs++ = vma->vm_pgoff;
1630 count++;
1631 }
1632
1633 /* Now we know exact count of files, can store it */
1634 data[0] = count;
1635 data[1] = PAGE_SIZE;
1636 /*
1637 * Count usually is less than current->mm->map_count,
1638 * we need to move filenames down.
1639 */
1640 n = current->mm->map_count - count;
1641 if (n != 0) {
1642 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1643 memmove(name_base - shift_bytes, name_base,
1644 name_curpos - name_base);
1645 name_curpos -= shift_bytes;
1646 }
1647
1648 size = name_curpos - (char *)data;
1649 fill_note(note, "CORE", NT_FILE, size, data);
1650 return 0;
1651 }
1652
1653 #ifdef CORE_DUMP_USE_REGSET
1654 #include <linux/regset.h>
1655
1656 struct elf_thread_core_info {
1657 struct elf_thread_core_info *next;
1658 struct task_struct *task;
1659 struct elf_prstatus prstatus;
1660 struct memelfnote notes[0];
1661 };
1662
1663 struct elf_note_info {
1664 struct elf_thread_core_info *thread;
1665 struct memelfnote psinfo;
1666 struct memelfnote signote;
1667 struct memelfnote auxv;
1668 struct memelfnote files;
1669 user_siginfo_t csigdata;
1670 size_t size;
1671 int thread_notes;
1672 };
1673
1674 /*
1675 * When a regset has a writeback hook, we call it on each thread before
1676 * dumping user memory. On register window machines, this makes sure the
1677 * user memory backing the register data is up to date before we read it.
1678 */
1679 static void do_thread_regset_writeback(struct task_struct *task,
1680 const struct user_regset *regset)
1681 {
1682 if (regset->writeback)
1683 regset->writeback(task, regset, 1);
1684 }
1685
1686 #ifndef PRSTATUS_SIZE
1687 #define PRSTATUS_SIZE(S, R) sizeof(S)
1688 #endif
1689
1690 #ifndef SET_PR_FPVALID
1691 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
1692 #endif
1693
1694 static int fill_thread_core_info(struct elf_thread_core_info *t,
1695 const struct user_regset_view *view,
1696 long signr, size_t *total)
1697 {
1698 unsigned int i;
1699 unsigned int regset_size = view->regsets[0].n * view->regsets[0].size;
1700
1701 /*
1702 * NT_PRSTATUS is the one special case, because the regset data
1703 * goes into the pr_reg field inside the note contents, rather
1704 * than being the whole note contents. We fill the reset in here.
1705 * We assume that regset 0 is NT_PRSTATUS.
1706 */
1707 fill_prstatus(&t->prstatus, t->task, signr);
1708 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size,
1709 &t->prstatus.pr_reg, NULL);
1710
1711 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1712 PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus);
1713 *total += notesize(&t->notes[0]);
1714
1715 do_thread_regset_writeback(t->task, &view->regsets[0]);
1716
1717 /*
1718 * Each other regset might generate a note too. For each regset
1719 * that has no core_note_type or is inactive, we leave t->notes[i]
1720 * all zero and we'll know to skip writing it later.
1721 */
1722 for (i = 1; i < view->n; ++i) {
1723 const struct user_regset *regset = &view->regsets[i];
1724 do_thread_regset_writeback(t->task, regset);
1725 if (regset->core_note_type && regset->get &&
1726 (!regset->active || regset->active(t->task, regset))) {
1727 int ret;
1728 size_t size = regset->n * regset->size;
1729 void *data = kmalloc(size, GFP_KERNEL);
1730 if (unlikely(!data))
1731 return 0;
1732 ret = regset->get(t->task, regset,
1733 0, size, data, NULL);
1734 if (unlikely(ret))
1735 kfree(data);
1736 else {
1737 if (regset->core_note_type != NT_PRFPREG)
1738 fill_note(&t->notes[i], "LINUX",
1739 regset->core_note_type,
1740 size, data);
1741 else {
1742 SET_PR_FPVALID(&t->prstatus,
1743 1, regset_size);
1744 fill_note(&t->notes[i], "CORE",
1745 NT_PRFPREG, size, data);
1746 }
1747 *total += notesize(&t->notes[i]);
1748 }
1749 }
1750 }
1751
1752 return 1;
1753 }
1754
1755 static int fill_note_info(struct elfhdr *elf, int phdrs,
1756 struct elf_note_info *info,
1757 const siginfo_t *siginfo, struct pt_regs *regs)
1758 {
1759 struct task_struct *dump_task = current;
1760 const struct user_regset_view *view = task_user_regset_view(dump_task);
1761 struct elf_thread_core_info *t;
1762 struct elf_prpsinfo *psinfo;
1763 struct core_thread *ct;
1764 unsigned int i;
1765
1766 info->size = 0;
1767 info->thread = NULL;
1768
1769 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1770 if (psinfo == NULL) {
1771 info->psinfo.data = NULL; /* So we don't free this wrongly */
1772 return 0;
1773 }
1774
1775 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1776
1777 /*
1778 * Figure out how many notes we're going to need for each thread.
1779 */
1780 info->thread_notes = 0;
1781 for (i = 0; i < view->n; ++i)
1782 if (view->regsets[i].core_note_type != 0)
1783 ++info->thread_notes;
1784
1785 /*
1786 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1787 * since it is our one special case.
1788 */
1789 if (unlikely(info->thread_notes == 0) ||
1790 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1791 WARN_ON(1);
1792 return 0;
1793 }
1794
1795 /*
1796 * Initialize the ELF file header.
1797 */
1798 fill_elf_header(elf, phdrs,
1799 view->e_machine, view->e_flags);
1800
1801 /*
1802 * Allocate a structure for each thread.
1803 */
1804 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1805 t = kzalloc(offsetof(struct elf_thread_core_info,
1806 notes[info->thread_notes]),
1807 GFP_KERNEL);
1808 if (unlikely(!t))
1809 return 0;
1810
1811 t->task = ct->task;
1812 if (ct->task == dump_task || !info->thread) {
1813 t->next = info->thread;
1814 info->thread = t;
1815 } else {
1816 /*
1817 * Make sure to keep the original task at
1818 * the head of the list.
1819 */
1820 t->next = info->thread->next;
1821 info->thread->next = t;
1822 }
1823 }
1824
1825 /*
1826 * Now fill in each thread's information.
1827 */
1828 for (t = info->thread; t != NULL; t = t->next)
1829 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
1830 return 0;
1831
1832 /*
1833 * Fill in the two process-wide notes.
1834 */
1835 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1836 info->size += notesize(&info->psinfo);
1837
1838 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1839 info->size += notesize(&info->signote);
1840
1841 fill_auxv_note(&info->auxv, current->mm);
1842 info->size += notesize(&info->auxv);
1843
1844 if (fill_files_note(&info->files) == 0)
1845 info->size += notesize(&info->files);
1846
1847 return 1;
1848 }
1849
1850 static size_t get_note_info_size(struct elf_note_info *info)
1851 {
1852 return info->size;
1853 }
1854
1855 /*
1856 * Write all the notes for each thread. When writing the first thread, the
1857 * process-wide notes are interleaved after the first thread-specific note.
1858 */
1859 static int write_note_info(struct elf_note_info *info,
1860 struct coredump_params *cprm)
1861 {
1862 bool first = true;
1863 struct elf_thread_core_info *t = info->thread;
1864
1865 do {
1866 int i;
1867
1868 if (!writenote(&t->notes[0], cprm))
1869 return 0;
1870
1871 if (first && !writenote(&info->psinfo, cprm))
1872 return 0;
1873 if (first && !writenote(&info->signote, cprm))
1874 return 0;
1875 if (first && !writenote(&info->auxv, cprm))
1876 return 0;
1877 if (first && info->files.data &&
1878 !writenote(&info->files, cprm))
1879 return 0;
1880
1881 for (i = 1; i < info->thread_notes; ++i)
1882 if (t->notes[i].data &&
1883 !writenote(&t->notes[i], cprm))
1884 return 0;
1885
1886 first = false;
1887 t = t->next;
1888 } while (t);
1889
1890 return 1;
1891 }
1892
1893 static void free_note_info(struct elf_note_info *info)
1894 {
1895 struct elf_thread_core_info *threads = info->thread;
1896 while (threads) {
1897 unsigned int i;
1898 struct elf_thread_core_info *t = threads;
1899 threads = t->next;
1900 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1901 for (i = 1; i < info->thread_notes; ++i)
1902 kfree(t->notes[i].data);
1903 kfree(t);
1904 }
1905 kfree(info->psinfo.data);
1906 vfree(info->files.data);
1907 }
1908
1909 #else
1910
1911 /* Here is the structure in which status of each thread is captured. */
1912 struct elf_thread_status
1913 {
1914 struct list_head list;
1915 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1916 elf_fpregset_t fpu; /* NT_PRFPREG */
1917 struct task_struct *thread;
1918 #ifdef ELF_CORE_COPY_XFPREGS
1919 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1920 #endif
1921 struct memelfnote notes[3];
1922 int num_notes;
1923 };
1924
1925 /*
1926 * In order to add the specific thread information for the elf file format,
1927 * we need to keep a linked list of every threads pr_status and then create
1928 * a single section for them in the final core file.
1929 */
1930 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1931 {
1932 int sz = 0;
1933 struct task_struct *p = t->thread;
1934 t->num_notes = 0;
1935
1936 fill_prstatus(&t->prstatus, p, signr);
1937 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1938
1939 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1940 &(t->prstatus));
1941 t->num_notes++;
1942 sz += notesize(&t->notes[0]);
1943
1944 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1945 &t->fpu))) {
1946 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1947 &(t->fpu));
1948 t->num_notes++;
1949 sz += notesize(&t->notes[1]);
1950 }
1951
1952 #ifdef ELF_CORE_COPY_XFPREGS
1953 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1954 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1955 sizeof(t->xfpu), &t->xfpu);
1956 t->num_notes++;
1957 sz += notesize(&t->notes[2]);
1958 }
1959 #endif
1960 return sz;
1961 }
1962
1963 struct elf_note_info {
1964 struct memelfnote *notes;
1965 struct memelfnote *notes_files;
1966 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1967 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1968 struct list_head thread_list;
1969 elf_fpregset_t *fpu;
1970 #ifdef ELF_CORE_COPY_XFPREGS
1971 elf_fpxregset_t *xfpu;
1972 #endif
1973 user_siginfo_t csigdata;
1974 int thread_status_size;
1975 int numnote;
1976 };
1977
1978 static int elf_note_info_init(struct elf_note_info *info)
1979 {
1980 memset(info, 0, sizeof(*info));
1981 INIT_LIST_HEAD(&info->thread_list);
1982
1983 /* Allocate space for ELF notes */
1984 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
1985 if (!info->notes)
1986 return 0;
1987 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1988 if (!info->psinfo)
1989 return 0;
1990 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1991 if (!info->prstatus)
1992 return 0;
1993 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1994 if (!info->fpu)
1995 return 0;
1996 #ifdef ELF_CORE_COPY_XFPREGS
1997 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1998 if (!info->xfpu)
1999 return 0;
2000 #endif
2001 return 1;
2002 }
2003
2004 static int fill_note_info(struct elfhdr *elf, int phdrs,
2005 struct elf_note_info *info,
2006 const siginfo_t *siginfo, struct pt_regs *regs)
2007 {
2008 struct list_head *t;
2009 struct core_thread *ct;
2010 struct elf_thread_status *ets;
2011
2012 if (!elf_note_info_init(info))
2013 return 0;
2014
2015 for (ct = current->mm->core_state->dumper.next;
2016 ct; ct = ct->next) {
2017 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2018 if (!ets)
2019 return 0;
2020
2021 ets->thread = ct->task;
2022 list_add(&ets->list, &info->thread_list);
2023 }
2024
2025 list_for_each(t, &info->thread_list) {
2026 int sz;
2027
2028 ets = list_entry(t, struct elf_thread_status, list);
2029 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2030 info->thread_status_size += sz;
2031 }
2032 /* now collect the dump for the current */
2033 memset(info->prstatus, 0, sizeof(*info->prstatus));
2034 fill_prstatus(info->prstatus, current, siginfo->si_signo);
2035 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2036
2037 /* Set up header */
2038 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2039
2040 /*
2041 * Set up the notes in similar form to SVR4 core dumps made
2042 * with info from their /proc.
2043 */
2044
2045 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2046 sizeof(*info->prstatus), info->prstatus);
2047 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2048 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2049 sizeof(*info->psinfo), info->psinfo);
2050
2051 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2052 fill_auxv_note(info->notes + 3, current->mm);
2053 info->numnote = 4;
2054
2055 if (fill_files_note(info->notes + info->numnote) == 0) {
2056 info->notes_files = info->notes + info->numnote;
2057 info->numnote++;
2058 }
2059
2060 /* Try to dump the FPU. */
2061 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2062 info->fpu);
2063 if (info->prstatus->pr_fpvalid)
2064 fill_note(info->notes + info->numnote++,
2065 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2066 #ifdef ELF_CORE_COPY_XFPREGS
2067 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2068 fill_note(info->notes + info->numnote++,
2069 "LINUX", ELF_CORE_XFPREG_TYPE,
2070 sizeof(*info->xfpu), info->xfpu);
2071 #endif
2072
2073 return 1;
2074 }
2075
2076 static size_t get_note_info_size(struct elf_note_info *info)
2077 {
2078 int sz = 0;
2079 int i;
2080
2081 for (i = 0; i < info->numnote; i++)
2082 sz += notesize(info->notes + i);
2083
2084 sz += info->thread_status_size;
2085
2086 return sz;
2087 }
2088
2089 static int write_note_info(struct elf_note_info *info,
2090 struct coredump_params *cprm)
2091 {
2092 int i;
2093 struct list_head *t;
2094
2095 for (i = 0; i < info->numnote; i++)
2096 if (!writenote(info->notes + i, cprm))
2097 return 0;
2098
2099 /* write out the thread status notes section */
2100 list_for_each(t, &info->thread_list) {
2101 struct elf_thread_status *tmp =
2102 list_entry(t, struct elf_thread_status, list);
2103
2104 for (i = 0; i < tmp->num_notes; i++)
2105 if (!writenote(&tmp->notes[i], cprm))
2106 return 0;
2107 }
2108
2109 return 1;
2110 }
2111
2112 static void free_note_info(struct elf_note_info *info)
2113 {
2114 while (!list_empty(&info->thread_list)) {
2115 struct list_head *tmp = info->thread_list.next;
2116 list_del(tmp);
2117 kfree(list_entry(tmp, struct elf_thread_status, list));
2118 }
2119
2120 /* Free data possibly allocated by fill_files_note(): */
2121 if (info->notes_files)
2122 vfree(info->notes_files->data);
2123
2124 kfree(info->prstatus);
2125 kfree(info->psinfo);
2126 kfree(info->notes);
2127 kfree(info->fpu);
2128 #ifdef ELF_CORE_COPY_XFPREGS
2129 kfree(info->xfpu);
2130 #endif
2131 }
2132
2133 #endif
2134
2135 static struct vm_area_struct *first_vma(struct task_struct *tsk,
2136 struct vm_area_struct *gate_vma)
2137 {
2138 struct vm_area_struct *ret = tsk->mm->mmap;
2139
2140 if (ret)
2141 return ret;
2142 return gate_vma;
2143 }
2144 /*
2145 * Helper function for iterating across a vma list. It ensures that the caller
2146 * will visit `gate_vma' prior to terminating the search.
2147 */
2148 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2149 struct vm_area_struct *gate_vma)
2150 {
2151 struct vm_area_struct *ret;
2152
2153 ret = this_vma->vm_next;
2154 if (ret)
2155 return ret;
2156 if (this_vma == gate_vma)
2157 return NULL;
2158 return gate_vma;
2159 }
2160
2161 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2162 elf_addr_t e_shoff, int segs)
2163 {
2164 elf->e_shoff = e_shoff;
2165 elf->e_shentsize = sizeof(*shdr4extnum);
2166 elf->e_shnum = 1;
2167 elf->e_shstrndx = SHN_UNDEF;
2168
2169 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2170
2171 shdr4extnum->sh_type = SHT_NULL;
2172 shdr4extnum->sh_size = elf->e_shnum;
2173 shdr4extnum->sh_link = elf->e_shstrndx;
2174 shdr4extnum->sh_info = segs;
2175 }
2176
2177 /*
2178 * Actual dumper
2179 *
2180 * This is a two-pass process; first we find the offsets of the bits,
2181 * and then they are actually written out. If we run out of core limit
2182 * we just truncate.
2183 */
2184 static int elf_core_dump(struct coredump_params *cprm)
2185 {
2186 int has_dumped = 0;
2187 mm_segment_t fs;
2188 int segs, i;
2189 size_t vma_data_size = 0;
2190 struct vm_area_struct *vma, *gate_vma;
2191 struct elfhdr *elf = NULL;
2192 loff_t offset = 0, dataoff;
2193 struct elf_note_info info = { };
2194 struct elf_phdr *phdr4note = NULL;
2195 struct elf_shdr *shdr4extnum = NULL;
2196 Elf_Half e_phnum;
2197 elf_addr_t e_shoff;
2198 elf_addr_t *vma_filesz = NULL;
2199
2200 /*
2201 * We no longer stop all VM operations.
2202 *
2203 * This is because those proceses that could possibly change map_count
2204 * or the mmap / vma pages are now blocked in do_exit on current
2205 * finishing this core dump.
2206 *
2207 * Only ptrace can touch these memory addresses, but it doesn't change
2208 * the map_count or the pages allocated. So no possibility of crashing
2209 * exists while dumping the mm->vm_next areas to the core file.
2210 */
2211
2212 /* alloc memory for large data structures: too large to be on stack */
2213 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2214 if (!elf)
2215 goto out;
2216 /*
2217 * The number of segs are recored into ELF header as 16bit value.
2218 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2219 */
2220 segs = current->mm->map_count;
2221 segs += elf_core_extra_phdrs();
2222
2223 gate_vma = get_gate_vma(current->mm);
2224 if (gate_vma != NULL)
2225 segs++;
2226
2227 /* for notes section */
2228 segs++;
2229
2230 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2231 * this, kernel supports extended numbering. Have a look at
2232 * include/linux/elf.h for further information. */
2233 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2234
2235 /*
2236 * Collect all the non-memory information about the process for the
2237 * notes. This also sets up the file header.
2238 */
2239 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
2240 goto cleanup;
2241
2242 has_dumped = 1;
2243
2244 fs = get_fs();
2245 set_fs(KERNEL_DS);
2246
2247 offset += sizeof(*elf); /* Elf header */
2248 offset += segs * sizeof(struct elf_phdr); /* Program headers */
2249
2250 /* Write notes phdr entry */
2251 {
2252 size_t sz = get_note_info_size(&info);
2253
2254 sz += elf_coredump_extra_notes_size();
2255
2256 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2257 if (!phdr4note)
2258 goto end_coredump;
2259
2260 fill_elf_note_phdr(phdr4note, sz, offset);
2261 offset += sz;
2262 }
2263
2264 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2265
2266 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
2267 goto end_coredump;
2268 vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
2269 if (!vma_filesz)
2270 goto end_coredump;
2271
2272 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2273 vma = next_vma(vma, gate_vma)) {
2274 unsigned long dump_size;
2275
2276 dump_size = vma_dump_size(vma, cprm->mm_flags);
2277 vma_filesz[i++] = dump_size;
2278 vma_data_size += dump_size;
2279 }
2280
2281 offset += vma_data_size;
2282 offset += elf_core_extra_data_size();
2283 e_shoff = offset;
2284
2285 if (e_phnum == PN_XNUM) {
2286 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2287 if (!shdr4extnum)
2288 goto end_coredump;
2289 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2290 }
2291
2292 offset = dataoff;
2293
2294 if (!dump_emit(cprm, elf, sizeof(*elf)))
2295 goto end_coredump;
2296
2297 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2298 goto end_coredump;
2299
2300 /* Write program headers for segments dump */
2301 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2302 vma = next_vma(vma, gate_vma)) {
2303 struct elf_phdr phdr;
2304
2305 phdr.p_type = PT_LOAD;
2306 phdr.p_offset = offset;
2307 phdr.p_vaddr = vma->vm_start;
2308 phdr.p_paddr = 0;
2309 phdr.p_filesz = vma_filesz[i++];
2310 phdr.p_memsz = vma->vm_end - vma->vm_start;
2311 offset += phdr.p_filesz;
2312 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2313 if (vma->vm_flags & VM_WRITE)
2314 phdr.p_flags |= PF_W;
2315 if (vma->vm_flags & VM_EXEC)
2316 phdr.p_flags |= PF_X;
2317 phdr.p_align = ELF_EXEC_PAGESIZE;
2318
2319 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2320 goto end_coredump;
2321 }
2322
2323 if (!elf_core_write_extra_phdrs(cprm, offset))
2324 goto end_coredump;
2325
2326 /* write out the notes section */
2327 if (!write_note_info(&info, cprm))
2328 goto end_coredump;
2329
2330 if (elf_coredump_extra_notes_write(cprm))
2331 goto end_coredump;
2332
2333 /* Align to page */
2334 if (!dump_skip(cprm, dataoff - cprm->pos))
2335 goto end_coredump;
2336
2337 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2338 vma = next_vma(vma, gate_vma)) {
2339 unsigned long addr;
2340 unsigned long end;
2341
2342 end = vma->vm_start + vma_filesz[i++];
2343
2344 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2345 struct page *page;
2346 int stop;
2347
2348 page = get_dump_page(addr);
2349 if (page) {
2350 void *kaddr = kmap(page);
2351 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2352 kunmap(page);
2353 put_page(page);
2354 } else
2355 stop = !dump_skip(cprm, PAGE_SIZE);
2356 if (stop)
2357 goto end_coredump;
2358 }
2359 }
2360 dump_truncate(cprm);
2361
2362 if (!elf_core_write_extra_data(cprm))
2363 goto end_coredump;
2364
2365 if (e_phnum == PN_XNUM) {
2366 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2367 goto end_coredump;
2368 }
2369
2370 end_coredump:
2371 set_fs(fs);
2372
2373 cleanup:
2374 free_note_info(&info);
2375 kfree(shdr4extnum);
2376 vfree(vma_filesz);
2377 kfree(phdr4note);
2378 kfree(elf);
2379 out:
2380 return has_dumped;
2381 }
2382
2383 #endif /* CONFIG_ELF_CORE */
2384
2385 static int __init init_elf_binfmt(void)
2386 {
2387 register_binfmt(&elf_format);
2388 return 0;
2389 }
2390
2391 static void __exit exit_elf_binfmt(void)
2392 {
2393 /* Remove the COFF and ELF loaders. */
2394 unregister_binfmt(&elf_format);
2395 }
2396
2397 core_initcall(init_elf_binfmt);
2398 module_exit(exit_elf_binfmt);
2399 MODULE_LICENSE("GPL");