]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/binfmt_elf.c
UBUNTU: Start new release
[mirror_ubuntu-artful-kernel.git] / fs / binfmt_elf.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
1da177e4
LT
15#include <linux/mm.h>
16#include <linux/mman.h>
1da177e4
LT
17#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/binfmts.h>
20#include <linux/string.h>
21#include <linux/file.h>
1da177e4 22#include <linux/slab.h>
1da177e4
LT
23#include <linux/personality.h>
24#include <linux/elfcore.h>
25#include <linux/init.h>
26#include <linux/highuid.h>
1da177e4
LT
27#include <linux/compiler.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
2aa362c4 30#include <linux/vmalloc.h>
1da177e4 31#include <linux/security.h>
1da177e4 32#include <linux/random.h>
f4e5cc2c 33#include <linux/elf.h>
d1fd836d 34#include <linux/elf-randomize.h>
7e80d0d0 35#include <linux/utsname.h>
088e7af7 36#include <linux/coredump.h>
6fac4829 37#include <linux/sched.h>
5037835c 38#include <linux/dax.h>
1da177e4
LT
39#include <asm/uaccess.h>
40#include <asm/param.h>
41#include <asm/page.h>
42
2aa362c4
DV
43#ifndef user_long_t
44#define user_long_t long
45#endif
49ae4d4b
DV
46#ifndef user_siginfo_t
47#define user_siginfo_t siginfo_t
48#endif
49
71613c3b 50static int load_elf_binary(struct linux_binprm *bprm);
bb1ad820
AM
51static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
52 int, int, unsigned long);
1da177e4 53
69369a70
JT
54#ifdef CONFIG_USELIB
55static int load_elf_library(struct file *);
56#else
57#define load_elf_library NULL
58#endif
59
1da177e4
LT
60/*
61 * If we don't support core dumping, then supply a NULL so we
62 * don't even try.
63 */
698ba7b5 64#ifdef CONFIG_ELF_CORE
f6151dfe 65static int elf_core_dump(struct coredump_params *cprm);
1da177e4
LT
66#else
67#define elf_core_dump NULL
68#endif
69
70#if ELF_EXEC_PAGESIZE > PAGE_SIZE
f4e5cc2c 71#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
1da177e4 72#else
f4e5cc2c 73#define ELF_MIN_ALIGN PAGE_SIZE
1da177e4
LT
74#endif
75
76#ifndef ELF_CORE_EFLAGS
77#define ELF_CORE_EFLAGS 0
78#endif
79
80#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
81#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
82#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
83
84static struct linux_binfmt elf_format = {
f670d0ec
MP
85 .module = THIS_MODULE,
86 .load_binary = load_elf_binary,
87 .load_shlib = load_elf_library,
88 .core_dump = elf_core_dump,
89 .min_coredump = ELF_EXEC_PAGESIZE,
1da177e4
LT
90};
91
d4e3cc38 92#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
1da177e4
LT
93
94static int set_brk(unsigned long start, unsigned long end)
95{
96 start = ELF_PAGEALIGN(start);
97 end = ELF_PAGEALIGN(end);
98 if (end > start) {
99 unsigned long addr;
e4eb1ff6 100 addr = vm_brk(start, end - start);
1da177e4
LT
101 if (BAD_ADDR(addr))
102 return addr;
103 }
104 current->mm->start_brk = current->mm->brk = end;
105 return 0;
106}
107
1da177e4
LT
108/* We need to explicitly zero any fractional pages
109 after the data section (i.e. bss). This would
110 contain the junk from the file that should not
f4e5cc2c
JJ
111 be in memory
112 */
1da177e4
LT
113static int padzero(unsigned long elf_bss)
114{
115 unsigned long nbyte;
116
117 nbyte = ELF_PAGEOFFSET(elf_bss);
118 if (nbyte) {
119 nbyte = ELF_MIN_ALIGN - nbyte;
120 if (clear_user((void __user *) elf_bss, nbyte))
121 return -EFAULT;
122 }
123 return 0;
124}
125
09c6dd3c 126/* Let's use some macros to make this stack manipulation a little clearer */
1da177e4
LT
127#ifdef CONFIG_STACK_GROWSUP
128#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
129#define STACK_ROUND(sp, items) \
130 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
f4e5cc2c
JJ
131#define STACK_ALLOC(sp, len) ({ \
132 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
133 old_sp; })
1da177e4
LT
134#else
135#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
136#define STACK_ROUND(sp, items) \
137 (((unsigned long) (sp - items)) &~ 15UL)
138#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
139#endif
140
483fad1c
NL
141#ifndef ELF_BASE_PLATFORM
142/*
143 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
144 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
145 * will be copied to the user stack in the same manner as AT_PLATFORM.
146 */
147#define ELF_BASE_PLATFORM NULL
148#endif
149
1da177e4 150static int
f4e5cc2c 151create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
d20894a2 152 unsigned long load_addr, unsigned long interp_load_addr)
1da177e4
LT
153{
154 unsigned long p = bprm->p;
155 int argc = bprm->argc;
156 int envc = bprm->envc;
157 elf_addr_t __user *argv;
158 elf_addr_t __user *envp;
159 elf_addr_t __user *sp;
160 elf_addr_t __user *u_platform;
483fad1c 161 elf_addr_t __user *u_base_platform;
f06295b4 162 elf_addr_t __user *u_rand_bytes;
1da177e4 163 const char *k_platform = ELF_PLATFORM;
483fad1c 164 const char *k_base_platform = ELF_BASE_PLATFORM;
f06295b4 165 unsigned char k_rand_bytes[16];
1da177e4
LT
166 int items;
167 elf_addr_t *elf_info;
168 int ei_index = 0;
86a264ab 169 const struct cred *cred = current_cred();
b6a2fea3 170 struct vm_area_struct *vma;
1da177e4 171
d68c9d6a
FBH
172 /*
173 * In some cases (e.g. Hyper-Threading), we want to avoid L1
174 * evictions by the processes running on the same package. One
175 * thing we can do is to shuffle the initial stack for them.
176 */
177
178 p = arch_align_stack(p);
179
1da177e4
LT
180 /*
181 * If this architecture has a platform capability string, copy it
182 * to userspace. In some cases (Sparc), this info is impossible
183 * for userspace to get any other way, in others (i386) it is
184 * merely difficult.
185 */
1da177e4
LT
186 u_platform = NULL;
187 if (k_platform) {
188 size_t len = strlen(k_platform) + 1;
189
1da177e4
LT
190 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
191 if (__copy_to_user(u_platform, k_platform, len))
192 return -EFAULT;
193 }
194
483fad1c
NL
195 /*
196 * If this architecture has a "base" platform capability
197 * string, copy it to userspace.
198 */
199 u_base_platform = NULL;
200 if (k_base_platform) {
201 size_t len = strlen(k_base_platform) + 1;
202
203 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
204 if (__copy_to_user(u_base_platform, k_base_platform, len))
205 return -EFAULT;
206 }
207
f06295b4
KC
208 /*
209 * Generate 16 random bytes for userspace PRNG seeding.
210 */
211 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
212 u_rand_bytes = (elf_addr_t __user *)
213 STACK_ALLOC(p, sizeof(k_rand_bytes));
214 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
215 return -EFAULT;
216
1da177e4 217 /* Create the ELF interpreter info */
785d5570 218 elf_info = (elf_addr_t *)current->mm->saved_auxv;
4f9a58d7 219 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
1da177e4 220#define NEW_AUX_ENT(id, val) \
f4e5cc2c 221 do { \
785d5570
JJ
222 elf_info[ei_index++] = id; \
223 elf_info[ei_index++] = val; \
f4e5cc2c 224 } while (0)
1da177e4
LT
225
226#ifdef ARCH_DLINFO
227 /*
228 * ARCH_DLINFO must come first so PPC can do its special alignment of
229 * AUXV.
4f9a58d7
OH
230 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
231 * ARCH_DLINFO changes
1da177e4
LT
232 */
233 ARCH_DLINFO;
234#endif
235 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
236 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
237 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
238 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
f4e5cc2c 239 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
1da177e4
LT
240 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
241 NEW_AUX_ENT(AT_BASE, interp_load_addr);
242 NEW_AUX_ENT(AT_FLAGS, 0);
243 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
ebc887b2
EB
244 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
245 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
246 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
247 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
785d5570 248 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
f06295b4 249 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
2171364d
MN
250#ifdef ELF_HWCAP2
251 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
252#endif
65191087 253 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
1da177e4 254 if (k_platform) {
f4e5cc2c 255 NEW_AUX_ENT(AT_PLATFORM,
785d5570 256 (elf_addr_t)(unsigned long)u_platform);
1da177e4 257 }
483fad1c
NL
258 if (k_base_platform) {
259 NEW_AUX_ENT(AT_BASE_PLATFORM,
260 (elf_addr_t)(unsigned long)u_base_platform);
261 }
1da177e4 262 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
785d5570 263 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
1da177e4
LT
264 }
265#undef NEW_AUX_ENT
266 /* AT_NULL is zero; clear the rest too */
267 memset(&elf_info[ei_index], 0,
268 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
269
270 /* And advance past the AT_NULL entry. */
271 ei_index += 2;
272
273 sp = STACK_ADD(p, ei_index);
274
d20894a2 275 items = (argc + 1) + (envc + 1) + 1;
1da177e4
LT
276 bprm->p = STACK_ROUND(sp, items);
277
278 /* Point sp at the lowest address on the stack */
279#ifdef CONFIG_STACK_GROWSUP
280 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
f4e5cc2c 281 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
1da177e4
LT
282#else
283 sp = (elf_addr_t __user *)bprm->p;
284#endif
285
b6a2fea3
OW
286
287 /*
288 * Grow the stack manually; some architectures have a limit on how
289 * far ahead a user-space access may be in order to grow the stack.
290 */
291 vma = find_extend_vma(current->mm, bprm->p);
292 if (!vma)
293 return -EFAULT;
294
1da177e4
LT
295 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
296 if (__put_user(argc, sp++))
297 return -EFAULT;
d20894a2
AK
298 argv = sp;
299 envp = argv + argc + 1;
1da177e4
LT
300
301 /* Populate argv and envp */
a84a5059 302 p = current->mm->arg_end = current->mm->arg_start;
1da177e4
LT
303 while (argc-- > 0) {
304 size_t len;
841d5fb7
HC
305 if (__put_user((elf_addr_t)p, argv++))
306 return -EFAULT;
b6a2fea3
OW
307 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
308 if (!len || len > MAX_ARG_STRLEN)
23c4971e 309 return -EINVAL;
1da177e4
LT
310 p += len;
311 }
312 if (__put_user(0, argv))
313 return -EFAULT;
314 current->mm->arg_end = current->mm->env_start = p;
315 while (envc-- > 0) {
316 size_t len;
841d5fb7
HC
317 if (__put_user((elf_addr_t)p, envp++))
318 return -EFAULT;
b6a2fea3
OW
319 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
320 if (!len || len > MAX_ARG_STRLEN)
23c4971e 321 return -EINVAL;
1da177e4
LT
322 p += len;
323 }
324 if (__put_user(0, envp))
325 return -EFAULT;
326 current->mm->env_end = p;
327
328 /* Put the elf_info on the stack in the right place. */
329 sp = (elf_addr_t __user *)envp + 1;
330 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
331 return -EFAULT;
332 return 0;
333}
334
c07380be
JH
335#ifndef elf_map
336
1da177e4 337static unsigned long elf_map(struct file *filep, unsigned long addr,
cc503c1b
JK
338 struct elf_phdr *eppnt, int prot, int type,
339 unsigned long total_size)
1da177e4
LT
340{
341 unsigned long map_addr;
cc503c1b
JK
342 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
343 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
344 addr = ELF_PAGESTART(addr);
345 size = ELF_PAGEALIGN(size);
1da177e4 346
dda6ebde
DG
347 /* mmap() will return -EINVAL if given a zero size, but a
348 * segment with zero filesize is perfectly valid */
cc503c1b
JK
349 if (!size)
350 return addr;
351
cc503c1b
JK
352 /*
353 * total_size is the size of the ELF (interpreter) image.
354 * The _first_ mmap needs to know the full size, otherwise
355 * randomization might put this image into an overlapping
356 * position with the ELF binary image. (since size < total_size)
357 * So we first map the 'big' image - and unmap the remainder at
358 * the end. (which unmap is needed for ELF images with holes.)
359 */
360 if (total_size) {
361 total_size = ELF_PAGEALIGN(total_size);
5a5e4c2e 362 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
cc503c1b 363 if (!BAD_ADDR(map_addr))
5a5e4c2e 364 vm_munmap(map_addr+size, total_size-size);
cc503c1b 365 } else
5a5e4c2e 366 map_addr = vm_mmap(filep, addr, size, prot, type, off);
cc503c1b 367
1da177e4
LT
368 return(map_addr);
369}
370
c07380be
JH
371#endif /* !elf_map */
372
cc503c1b
JK
373static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
374{
375 int i, first_idx = -1, last_idx = -1;
376
377 for (i = 0; i < nr; i++) {
378 if (cmds[i].p_type == PT_LOAD) {
379 last_idx = i;
380 if (first_idx == -1)
381 first_idx = i;
382 }
383 }
384 if (first_idx == -1)
385 return 0;
386
387 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
388 ELF_PAGESTART(cmds[first_idx].p_vaddr);
389}
390
6a8d3894
PB
391/**
392 * load_elf_phdrs() - load ELF program headers
393 * @elf_ex: ELF header of the binary whose program headers should be loaded
394 * @elf_file: the opened ELF binary file
395 *
396 * Loads ELF program headers from the binary file elf_file, which has the ELF
397 * header pointed to by elf_ex, into a newly allocated array. The caller is
398 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
399 */
400static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
401 struct file *elf_file)
402{
403 struct elf_phdr *elf_phdata = NULL;
404 int retval, size, err = -1;
405
406 /*
407 * If the size of this structure has changed, then punt, since
408 * we will be doing the wrong thing.
409 */
410 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
411 goto out;
412
413 /* Sanity check the number of program headers... */
414 if (elf_ex->e_phnum < 1 ||
415 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
416 goto out;
417
418 /* ...and their total size. */
419 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
420 if (size > ELF_MIN_ALIGN)
421 goto out;
422
423 elf_phdata = kmalloc(size, GFP_KERNEL);
424 if (!elf_phdata)
425 goto out;
426
427 /* Read in the program headers */
428 retval = kernel_read(elf_file, elf_ex->e_phoff,
429 (char *)elf_phdata, size);
430 if (retval != size) {
431 err = (retval < 0) ? retval : -EIO;
432 goto out;
433 }
434
435 /* Success! */
436 err = 0;
437out:
438 if (err) {
439 kfree(elf_phdata);
440 elf_phdata = NULL;
441 }
442 return elf_phdata;
443}
cc503c1b 444
774c105e
PB
445#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
446
447/**
448 * struct arch_elf_state - arch-specific ELF loading state
449 *
450 * This structure is used to preserve architecture specific data during
451 * the loading of an ELF file, throughout the checking of architecture
452 * specific ELF headers & through to the point where the ELF load is
453 * known to be proceeding (ie. SET_PERSONALITY).
454 *
455 * This implementation is a dummy for architectures which require no
456 * specific state.
457 */
458struct arch_elf_state {
459};
460
461#define INIT_ARCH_ELF_STATE {}
462
463/**
464 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
465 * @ehdr: The main ELF header
466 * @phdr: The program header to check
467 * @elf: The open ELF file
468 * @is_interp: True if the phdr is from the interpreter of the ELF being
469 * loaded, else false.
470 * @state: Architecture-specific state preserved throughout the process
471 * of loading the ELF.
472 *
473 * Inspects the program header phdr to validate its correctness and/or
474 * suitability for the system. Called once per ELF program header in the
475 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
476 * interpreter.
477 *
478 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
479 * with that return code.
480 */
481static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
482 struct elf_phdr *phdr,
483 struct file *elf, bool is_interp,
484 struct arch_elf_state *state)
485{
486 /* Dummy implementation, always proceed */
487 return 0;
488}
489
490/**
54d15714 491 * arch_check_elf() - check an ELF executable
774c105e
PB
492 * @ehdr: The main ELF header
493 * @has_interp: True if the ELF has an interpreter, else false.
494 * @state: Architecture-specific state preserved throughout the process
495 * of loading the ELF.
496 *
497 * Provides a final opportunity for architecture code to reject the loading
498 * of the ELF & cause an exec syscall to return an error. This is called after
499 * all program headers to be checked by arch_elf_pt_proc have been.
500 *
501 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
502 * with that return code.
503 */
504static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
505 struct arch_elf_state *state)
506{
507 /* Dummy implementation, always proceed */
508 return 0;
509}
510
511#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
cc503c1b 512
1da177e4
LT
513/* This is much more generalized than the library routine read function,
514 so we keep this separate. Technically the library read function
515 is only provided so that we can read a.out libraries that have
516 an ELF header */
517
f4e5cc2c 518static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
cc503c1b 519 struct file *interpreter, unsigned long *interp_map_addr,
a9d9ef13 520 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
1da177e4 521{
1da177e4
LT
522 struct elf_phdr *eppnt;
523 unsigned long load_addr = 0;
524 int load_addr_set = 0;
525 unsigned long last_bss = 0, elf_bss = 0;
526 unsigned long error = ~0UL;
cc503c1b 527 unsigned long total_size;
6a8d3894 528 int i;
1da177e4
LT
529
530 /* First of all, some simple consistency checks */
531 if (interp_elf_ex->e_type != ET_EXEC &&
532 interp_elf_ex->e_type != ET_DYN)
533 goto out;
534 if (!elf_check_arch(interp_elf_ex))
535 goto out;
72c2d531 536 if (!interpreter->f_op->mmap)
1da177e4
LT
537 goto out;
538
a9d9ef13
PB
539 total_size = total_mapping_size(interp_elf_phdata,
540 interp_elf_ex->e_phnum);
cc503c1b
JK
541 if (!total_size) {
542 error = -EINVAL;
a9d9ef13 543 goto out;
cc503c1b
JK
544 }
545
a9d9ef13 546 eppnt = interp_elf_phdata;
f4e5cc2c
JJ
547 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
548 if (eppnt->p_type == PT_LOAD) {
549 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
550 int elf_prot = 0;
551 unsigned long vaddr = 0;
552 unsigned long k, map_addr;
553
554 if (eppnt->p_flags & PF_R)
555 elf_prot = PROT_READ;
556 if (eppnt->p_flags & PF_W)
557 elf_prot |= PROT_WRITE;
558 if (eppnt->p_flags & PF_X)
559 elf_prot |= PROT_EXEC;
560 vaddr = eppnt->p_vaddr;
561 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
562 elf_type |= MAP_FIXED;
cc503c1b
JK
563 else if (no_base && interp_elf_ex->e_type == ET_DYN)
564 load_addr = -vaddr;
f4e5cc2c
JJ
565
566 map_addr = elf_map(interpreter, load_addr + vaddr,
bb1ad820 567 eppnt, elf_prot, elf_type, total_size);
cc503c1b
JK
568 total_size = 0;
569 if (!*interp_map_addr)
570 *interp_map_addr = map_addr;
f4e5cc2c
JJ
571 error = map_addr;
572 if (BAD_ADDR(map_addr))
a9d9ef13 573 goto out;
f4e5cc2c
JJ
574
575 if (!load_addr_set &&
576 interp_elf_ex->e_type == ET_DYN) {
577 load_addr = map_addr - ELF_PAGESTART(vaddr);
578 load_addr_set = 1;
579 }
580
581 /*
582 * Check to see if the section's size will overflow the
583 * allowed task size. Note that p_filesz must always be
584 * <= p_memsize so it's only necessary to check p_memsz.
585 */
586 k = load_addr + eppnt->p_vaddr;
ce51059b 587 if (BAD_ADDR(k) ||
f4e5cc2c
JJ
588 eppnt->p_filesz > eppnt->p_memsz ||
589 eppnt->p_memsz > TASK_SIZE ||
590 TASK_SIZE - eppnt->p_memsz < k) {
591 error = -ENOMEM;
a9d9ef13 592 goto out;
f4e5cc2c
JJ
593 }
594
595 /*
596 * Find the end of the file mapping for this phdr, and
597 * keep track of the largest address we see for this.
598 */
599 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
600 if (k > elf_bss)
601 elf_bss = k;
602
603 /*
604 * Do the same thing for the memory mapping - between
605 * elf_bss and last_bss is the bss section.
606 */
607 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
608 if (k > last_bss)
609 last_bss = k;
610 }
1da177e4
LT
611 }
612
752015d1
RM
613 if (last_bss > elf_bss) {
614 /*
615 * Now fill out the bss section. First pad the last page up
616 * to the page boundary, and then perform a mmap to make sure
617 * that there are zero-mapped pages up to and including the
618 * last bss page.
619 */
620 if (padzero(elf_bss)) {
621 error = -EFAULT;
a9d9ef13 622 goto out;
752015d1 623 }
1da177e4 624
752015d1
RM
625 /* What we have mapped so far */
626 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
1da177e4 627
752015d1 628 /* Map the last of the bss segment */
e4eb1ff6 629 error = vm_brk(elf_bss, last_bss - elf_bss);
1da177e4 630 if (BAD_ADDR(error))
a9d9ef13 631 goto out;
1da177e4
LT
632 }
633
cc503c1b 634 error = load_addr;
1da177e4
LT
635out:
636 return error;
637}
638
1da177e4
LT
639/*
640 * These are the functions used to load ELF style executables and shared
641 * libraries. There is no binary dependent code anywhere else.
642 */
643
913bd906 644#ifndef STACK_RND_MASK
d1cabd63 645#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
913bd906 646#endif
1da177e4
LT
647
648static unsigned long randomize_stack_top(unsigned long stack_top)
649{
4e7c22d4 650 unsigned long random_variable = 0;
1da177e4 651
c16b63e0
AK
652 if ((current->flags & PF_RANDOMIZE) &&
653 !(current->personality & ADDR_NO_RANDOMIZE)) {
4e7c22d4
HMG
654 random_variable = (unsigned long) get_random_int();
655 random_variable &= STACK_RND_MASK;
913bd906
AK
656 random_variable <<= PAGE_SHIFT;
657 }
1da177e4 658#ifdef CONFIG_STACK_GROWSUP
913bd906 659 return PAGE_ALIGN(stack_top) + random_variable;
1da177e4 660#else
913bd906 661 return PAGE_ALIGN(stack_top) - random_variable;
1da177e4
LT
662#endif
663}
664
71613c3b 665static int load_elf_binary(struct linux_binprm *bprm)
1da177e4
LT
666{
667 struct file *interpreter = NULL; /* to shut gcc up */
668 unsigned long load_addr = 0, load_bias = 0;
669 int load_addr_set = 0;
670 char * elf_interpreter = NULL;
1da177e4 671 unsigned long error;
a9d9ef13 672 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
1da177e4 673 unsigned long elf_bss, elf_brk;
1da177e4 674 int retval, i;
cc503c1b
JK
675 unsigned long elf_entry;
676 unsigned long interp_load_addr = 0;
1da177e4 677 unsigned long start_code, end_code, start_data, end_data;
1a530a6f 678 unsigned long reloc_func_desc __maybe_unused = 0;
8de61e69 679 int executable_stack = EXSTACK_DEFAULT;
71613c3b 680 struct pt_regs *regs = current_pt_regs();
1da177e4
LT
681 struct {
682 struct elfhdr elf_ex;
683 struct elfhdr interp_elf_ex;
1da177e4 684 } *loc;
774c105e 685 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
1da177e4
LT
686
687 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
688 if (!loc) {
689 retval = -ENOMEM;
690 goto out_ret;
691 }
692
693 /* Get the exec-header */
f4e5cc2c 694 loc->elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
695
696 retval = -ENOEXEC;
697 /* First of all, some simple consistency checks */
698 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
699 goto out;
700
701 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
702 goto out;
703 if (!elf_check_arch(&loc->elf_ex))
704 goto out;
72c2d531 705 if (!bprm->file->f_op->mmap)
1da177e4
LT
706 goto out;
707
6a8d3894 708 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
1da177e4
LT
709 if (!elf_phdata)
710 goto out;
711
1da177e4
LT
712 elf_ppnt = elf_phdata;
713 elf_bss = 0;
714 elf_brk = 0;
715
716 start_code = ~0UL;
717 end_code = 0;
718 start_data = 0;
719 end_data = 0;
720
721 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
722 if (elf_ppnt->p_type == PT_INTERP) {
723 /* This is the program interpreter used for
724 * shared libraries - for now assume that this
725 * is an a.out format binary
726 */
1da177e4
LT
727 retval = -ENOEXEC;
728 if (elf_ppnt->p_filesz > PATH_MAX ||
729 elf_ppnt->p_filesz < 2)
e7b9b550 730 goto out_free_ph;
1da177e4
LT
731
732 retval = -ENOMEM;
792db3af 733 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
f4e5cc2c 734 GFP_KERNEL);
1da177e4 735 if (!elf_interpreter)
e7b9b550 736 goto out_free_ph;
1da177e4
LT
737
738 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
f4e5cc2c
JJ
739 elf_interpreter,
740 elf_ppnt->p_filesz);
1da177e4
LT
741 if (retval != elf_ppnt->p_filesz) {
742 if (retval >= 0)
743 retval = -EIO;
744 goto out_free_interp;
745 }
746 /* make sure path is NULL terminated */
747 retval = -ENOEXEC;
748 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
749 goto out_free_interp;
750
1da177e4
LT
751 interpreter = open_exec(elf_interpreter);
752 retval = PTR_ERR(interpreter);
753 if (IS_ERR(interpreter))
754 goto out_free_interp;
1fb84496
AD
755
756 /*
757 * If the binary is not readable then enforce
758 * mm->dumpable = 0 regardless of the interpreter's
759 * permissions.
760 */
1b5d783c 761 would_dump(bprm, interpreter);
1fb84496 762
b582ef5c
MR
763 /* Get the exec headers */
764 retval = kernel_read(interpreter, 0,
765 (void *)&loc->interp_elf_ex,
766 sizeof(loc->interp_elf_ex));
767 if (retval != sizeof(loc->interp_elf_ex)) {
1da177e4
LT
768 if (retval >= 0)
769 retval = -EIO;
770 goto out_free_dentry;
771 }
772
1da177e4
LT
773 break;
774 }
775 elf_ppnt++;
776 }
777
778 elf_ppnt = elf_phdata;
779 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
774c105e
PB
780 switch (elf_ppnt->p_type) {
781 case PT_GNU_STACK:
1da177e4
LT
782 if (elf_ppnt->p_flags & PF_X)
783 executable_stack = EXSTACK_ENABLE_X;
784 else
785 executable_stack = EXSTACK_DISABLE_X;
786 break;
774c105e
PB
787
788 case PT_LOPROC ... PT_HIPROC:
789 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
790 bprm->file, false,
791 &arch_state);
792 if (retval)
793 goto out_free_dentry;
794 break;
1da177e4 795 }
1da177e4
LT
796
797 /* Some simple consistency checks for the interpreter */
798 if (elf_interpreter) {
1da177e4 799 retval = -ELIBBAD;
d20894a2
AK
800 /* Not an ELF interpreter */
801 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1da177e4 802 goto out_free_dentry;
1da177e4 803 /* Verify the interpreter has a valid arch */
d20894a2 804 if (!elf_check_arch(&loc->interp_elf_ex))
1da177e4 805 goto out_free_dentry;
a9d9ef13
PB
806
807 /* Load the interpreter program headers */
808 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
809 interpreter);
810 if (!interp_elf_phdata)
811 goto out_free_dentry;
774c105e
PB
812
813 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
814 elf_ppnt = interp_elf_phdata;
815 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
816 switch (elf_ppnt->p_type) {
817 case PT_LOPROC ... PT_HIPROC:
818 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
819 elf_ppnt, interpreter,
820 true, &arch_state);
821 if (retval)
822 goto out_free_dentry;
823 break;
824 }
1da177e4
LT
825 }
826
774c105e
PB
827 /*
828 * Allow arch code to reject the ELF at this point, whilst it's
829 * still possible to return an error to the code that invoked
830 * the exec syscall.
831 */
832 retval = arch_check_elf(&loc->elf_ex, !!interpreter, &arch_state);
833 if (retval)
834 goto out_free_dentry;
835
1da177e4
LT
836 /* Flush all traces of the currently running executable */
837 retval = flush_old_exec(bprm);
838 if (retval)
839 goto out_free_dentry;
840
1da177e4
LT
841 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
842 may depend on the personality. */
774c105e 843 SET_PERSONALITY2(loc->elf_ex, &arch_state);
1da177e4
LT
844 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
845 current->personality |= READ_IMPLIES_EXEC;
846
f4e5cc2c 847 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1da177e4 848 current->flags |= PF_RANDOMIZE;
221af7f8
LT
849
850 setup_new_exec(bprm);
1da177e4
LT
851
852 /* Do this so that we can load the interpreter, if need be. We will
853 change some of these later */
1da177e4
LT
854 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
855 executable_stack);
19d860a1 856 if (retval < 0)
1da177e4 857 goto out_free_dentry;
1da177e4 858
1da177e4
LT
859 current->mm->start_stack = bprm->p;
860
af901ca1 861 /* Now we do a little grungy work by mmapping the ELF image into
cc503c1b 862 the correct location in memory. */
f4e5cc2c
JJ
863 for(i = 0, elf_ppnt = elf_phdata;
864 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
1da177e4
LT
865 int elf_prot = 0, elf_flags;
866 unsigned long k, vaddr;
a87938b2 867 unsigned long total_size = 0;
1da177e4
LT
868
869 if (elf_ppnt->p_type != PT_LOAD)
870 continue;
871
872 if (unlikely (elf_brk > elf_bss)) {
873 unsigned long nbyte;
874
875 /* There was a PT_LOAD segment with p_memsz > p_filesz
876 before this one. Map anonymous pages, if needed,
877 and clear the area. */
f670d0ec
MP
878 retval = set_brk(elf_bss + load_bias,
879 elf_brk + load_bias);
19d860a1 880 if (retval)
1da177e4 881 goto out_free_dentry;
1da177e4
LT
882 nbyte = ELF_PAGEOFFSET(elf_bss);
883 if (nbyte) {
884 nbyte = ELF_MIN_ALIGN - nbyte;
885 if (nbyte > elf_brk - elf_bss)
886 nbyte = elf_brk - elf_bss;
887 if (clear_user((void __user *)elf_bss +
888 load_bias, nbyte)) {
889 /*
890 * This bss-zeroing can fail if the ELF
f4e5cc2c 891 * file specifies odd protections. So
1da177e4
LT
892 * we don't check the return value
893 */
894 }
895 }
896 }
897
f4e5cc2c
JJ
898 if (elf_ppnt->p_flags & PF_R)
899 elf_prot |= PROT_READ;
900 if (elf_ppnt->p_flags & PF_W)
901 elf_prot |= PROT_WRITE;
902 if (elf_ppnt->p_flags & PF_X)
903 elf_prot |= PROT_EXEC;
1da177e4 904
f4e5cc2c 905 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1da177e4
LT
906
907 vaddr = elf_ppnt->p_vaddr;
1cc9ab1a
KC
908 /*
909 * If we are loading ET_EXEC or we have already performed
910 * the ET_DYN load_addr calculations, proceed normally.
911 */
1da177e4
LT
912 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
913 elf_flags |= MAP_FIXED;
914 } else if (loc->elf_ex.e_type == ET_DYN) {
1cc9ab1a
KC
915 /*
916 * This logic is run once for the first LOAD Program
917 * Header for ET_DYN binaries to calculate the
918 * randomization (load_bias) for all the LOAD
919 * Program Headers, and to calculate the entire
920 * size of the ELF mapping (total_size). (Note that
921 * load_addr_set is set to true later once the
922 * initial mapping is performed.)
923 *
924 * There are effectively two types of ET_DYN
925 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
926 * and loaders (ET_DYN without INTERP, since they
927 * _are_ the ELF interpreter). The loaders must
928 * be loaded away from programs since the program
929 * may otherwise collide with the loader (especially
930 * for ET_EXEC which does not have a randomized
931 * position). For example to handle invocations of
932 * "./ld.so someprog" to test out a new version of
933 * the loader, the subsequent program that the
934 * loader loads must avoid the loader itself, so
935 * they cannot share the same load range. Sufficient
936 * room for the brk must be allocated with the
937 * loader as well, since brk must be available with
938 * the loader.
939 *
940 * Therefore, programs are loaded offset from
941 * ELF_ET_DYN_BASE and loaders are loaded into the
942 * independently randomized mmap region (0 load_bias
943 * without MAP_FIXED).
944 */
945 if (elf_interpreter) {
946 load_bias = ELF_ET_DYN_BASE;
947 if (current->flags & PF_RANDOMIZE)
948 load_bias += arch_mmap_rnd();
949 elf_flags |= MAP_FIXED;
950 } else
951 load_bias = 0;
952
953 /*
954 * Since load_bias is used for all subsequent loading
955 * calculations, we must lower it by the first vaddr
956 * so that the remaining calculations based on the
957 * ELF vaddrs will be correctly offset. The result
958 * is then page aligned.
959 */
960 load_bias = ELF_PAGESTART(load_bias - vaddr);
961
a87938b2
MD
962 total_size = total_mapping_size(elf_phdata,
963 loc->elf_ex.e_phnum);
964 if (!total_size) {
2b1d3ae9 965 retval = -EINVAL;
a87938b2
MD
966 goto out_free_dentry;
967 }
1da177e4
LT
968 }
969
f4e5cc2c 970 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
a87938b2 971 elf_prot, elf_flags, total_size);
1da177e4 972 if (BAD_ADDR(error)) {
b140f251
AK
973 retval = IS_ERR((void *)error) ?
974 PTR_ERR((void*)error) : -EINVAL;
1da177e4
LT
975 goto out_free_dentry;
976 }
977
978 if (!load_addr_set) {
979 load_addr_set = 1;
980 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
981 if (loc->elf_ex.e_type == ET_DYN) {
982 load_bias += error -
983 ELF_PAGESTART(load_bias + vaddr);
984 load_addr += load_bias;
985 reloc_func_desc = load_bias;
986 }
987 }
988 k = elf_ppnt->p_vaddr;
f4e5cc2c
JJ
989 if (k < start_code)
990 start_code = k;
991 if (start_data < k)
992 start_data = k;
1da177e4
LT
993
994 /*
995 * Check to see if the section's size will overflow the
996 * allowed task size. Note that p_filesz must always be
997 * <= p_memsz so it is only necessary to check p_memsz.
998 */
ce51059b 999 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1da177e4
LT
1000 elf_ppnt->p_memsz > TASK_SIZE ||
1001 TASK_SIZE - elf_ppnt->p_memsz < k) {
f4e5cc2c 1002 /* set_brk can never work. Avoid overflows. */
b140f251 1003 retval = -EINVAL;
1da177e4
LT
1004 goto out_free_dentry;
1005 }
1006
1007 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1008
1009 if (k > elf_bss)
1010 elf_bss = k;
1011 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1012 end_code = k;
1013 if (end_data < k)
1014 end_data = k;
1015 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1016 if (k > elf_brk)
1017 elf_brk = k;
1018 }
1019
1020 loc->elf_ex.e_entry += load_bias;
1021 elf_bss += load_bias;
1022 elf_brk += load_bias;
1023 start_code += load_bias;
1024 end_code += load_bias;
1025 start_data += load_bias;
1026 end_data += load_bias;
1027
1028 /* Calling set_brk effectively mmaps the pages that we need
1029 * for the bss and break sections. We must do this before
1030 * mapping in the interpreter, to make sure it doesn't wind
1031 * up getting placed where the bss needs to go.
1032 */
1033 retval = set_brk(elf_bss, elf_brk);
19d860a1 1034 if (retval)
1da177e4 1035 goto out_free_dentry;
6de50517 1036 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1da177e4
LT
1037 retval = -EFAULT; /* Nobody gets to see this, but.. */
1038 goto out_free_dentry;
1039 }
1040
1041 if (elf_interpreter) {
6eec482f 1042 unsigned long interp_map_addr = 0;
d20894a2
AK
1043
1044 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1045 interpreter,
1046 &interp_map_addr,
a9d9ef13 1047 load_bias, interp_elf_phdata);
d20894a2
AK
1048 if (!IS_ERR((void *)elf_entry)) {
1049 /*
1050 * load_elf_interp() returns relocation
1051 * adjustment
1052 */
1053 interp_load_addr = elf_entry;
1054 elf_entry += loc->interp_elf_ex.e_entry;
cc503c1b 1055 }
1da177e4 1056 if (BAD_ADDR(elf_entry)) {
ce51059b
CE
1057 retval = IS_ERR((void *)elf_entry) ?
1058 (int)elf_entry : -EINVAL;
1da177e4
LT
1059 goto out_free_dentry;
1060 }
1061 reloc_func_desc = interp_load_addr;
1062
1063 allow_write_access(interpreter);
1064 fput(interpreter);
1065 kfree(elf_interpreter);
1066 } else {
1067 elf_entry = loc->elf_ex.e_entry;
5342fba5 1068 if (BAD_ADDR(elf_entry)) {
ce51059b 1069 retval = -EINVAL;
5342fba5
SS
1070 goto out_free_dentry;
1071 }
1da177e4
LT
1072 }
1073
774c105e 1074 kfree(interp_elf_phdata);
1da177e4
LT
1075 kfree(elf_phdata);
1076
1da177e4
LT
1077 set_binfmt(&elf_format);
1078
547ee84c 1079#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
fc5243d9 1080 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
19d860a1 1081 if (retval < 0)
18c8baff 1082 goto out;
547ee84c
BH
1083#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1084
a6f76f23 1085 install_exec_creds(bprm);
b6a2fea3 1086 retval = create_elf_tables(bprm, &loc->elf_ex,
f4e5cc2c 1087 load_addr, interp_load_addr);
19d860a1 1088 if (retval < 0)
b6a2fea3 1089 goto out;
1da177e4 1090 /* N.B. passed_fileno might not be initialized? */
1da177e4
LT
1091 current->mm->end_code = end_code;
1092 current->mm->start_code = start_code;
1093 current->mm->start_data = start_data;
1094 current->mm->end_data = end_data;
1095 current->mm->start_stack = bprm->p;
1096
4471a675 1097 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
c1d171a0
JK
1098 current->mm->brk = current->mm->start_brk =
1099 arch_randomize_brk(current->mm);
204db6ed 1100#ifdef compat_brk_randomized
4471a675
JK
1101 current->brk_randomized = 1;
1102#endif
1103 }
c1d171a0 1104
1da177e4
LT
1105 if (current->personality & MMAP_PAGE_ZERO) {
1106 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1107 and some applications "depend" upon this behavior.
1108 Since we do not have the power to recompile these, we
f4e5cc2c 1109 emulate the SVr4 behavior. Sigh. */
6be5ceb0 1110 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1da177e4 1111 MAP_FIXED | MAP_PRIVATE, 0);
1da177e4
LT
1112 }
1113
1114#ifdef ELF_PLAT_INIT
1115 /*
1116 * The ABI may specify that certain registers be set up in special
1117 * ways (on i386 %edx is the address of a DT_FINI function, for
1118 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1119 * that the e_entry field is the address of the function descriptor
1120 * for the startup routine, rather than the address of the startup
1121 * routine itself. This macro performs whatever initialization to
1122 * the regs structure is required as well as any relocations to the
1123 * function descriptor entries when executing dynamically links apps.
1124 */
1125 ELF_PLAT_INIT(regs, reloc_func_desc);
1126#endif
1127
1128 start_thread(regs, elf_entry, bprm->p);
1da177e4
LT
1129 retval = 0;
1130out:
1131 kfree(loc);
1132out_ret:
1133 return retval;
1134
1135 /* error cleanup */
1136out_free_dentry:
a9d9ef13 1137 kfree(interp_elf_phdata);
1da177e4
LT
1138 allow_write_access(interpreter);
1139 if (interpreter)
1140 fput(interpreter);
1141out_free_interp:
f99d49ad 1142 kfree(elf_interpreter);
1da177e4
LT
1143out_free_ph:
1144 kfree(elf_phdata);
1145 goto out;
1146}
1147
69369a70 1148#ifdef CONFIG_USELIB
1da177e4
LT
1149/* This is really simpleminded and specialized - we are loading an
1150 a.out library that is given an ELF header. */
1da177e4
LT
1151static int load_elf_library(struct file *file)
1152{
1153 struct elf_phdr *elf_phdata;
1154 struct elf_phdr *eppnt;
1155 unsigned long elf_bss, bss, len;
1156 int retval, error, i, j;
1157 struct elfhdr elf_ex;
1158
1159 error = -ENOEXEC;
f4e5cc2c 1160 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1da177e4
LT
1161 if (retval != sizeof(elf_ex))
1162 goto out;
1163
1164 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1165 goto out;
1166
1167 /* First of all, some simple consistency checks */
1168 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
72c2d531 1169 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1da177e4
LT
1170 goto out;
1171
1172 /* Now read in all of the header information */
1173
1174 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1175 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1176
1177 error = -ENOMEM;
1178 elf_phdata = kmalloc(j, GFP_KERNEL);
1179 if (!elf_phdata)
1180 goto out;
1181
1182 eppnt = elf_phdata;
1183 error = -ENOEXEC;
1184 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1185 if (retval != j)
1186 goto out_free_ph;
1187
1188 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1189 if ((eppnt + i)->p_type == PT_LOAD)
1190 j++;
1191 if (j != 1)
1192 goto out_free_ph;
1193
1194 while (eppnt->p_type != PT_LOAD)
1195 eppnt++;
1196
1197 /* Now use mmap to map the library into memory. */
6be5ceb0 1198 error = vm_mmap(file,
1da177e4
LT
1199 ELF_PAGESTART(eppnt->p_vaddr),
1200 (eppnt->p_filesz +
1201 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1202 PROT_READ | PROT_WRITE | PROT_EXEC,
1203 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1204 (eppnt->p_offset -
1205 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1da177e4
LT
1206 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1207 goto out_free_ph;
1208
1209 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1210 if (padzero(elf_bss)) {
1211 error = -EFAULT;
1212 goto out_free_ph;
1213 }
1214
f4e5cc2c
JJ
1215 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1216 ELF_MIN_ALIGN - 1);
1da177e4 1217 bss = eppnt->p_memsz + eppnt->p_vaddr;
e4eb1ff6
LT
1218 if (bss > len)
1219 vm_brk(len, bss - len);
1da177e4
LT
1220 error = 0;
1221
1222out_free_ph:
1223 kfree(elf_phdata);
1224out:
1225 return error;
1226}
69369a70 1227#endif /* #ifdef CONFIG_USELIB */
1da177e4 1228
698ba7b5 1229#ifdef CONFIG_ELF_CORE
1da177e4
LT
1230/*
1231 * ELF core dumper
1232 *
1233 * Modelled on fs/exec.c:aout_core_dump()
1234 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1235 */
1da177e4 1236
909af768
JB
1237/*
1238 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1239 * that are useful for post-mortem analysis are included in every core dump.
1240 * In that way we ensure that the core dump is fully interpretable later
1241 * without matching up the same kernel and hardware config to see what PC values
1242 * meant. These special mappings include - vDSO, vsyscall, and other
1243 * architecture specific mappings
1244 */
1245static bool always_dump_vma(struct vm_area_struct *vma)
1246{
1247 /* Any vsyscall mappings? */
1248 if (vma == get_gate_vma(vma->vm_mm))
1249 return true;
78d683e8
AL
1250
1251 /*
1252 * Assume that all vmas with a .name op should always be dumped.
1253 * If this changes, a new vm_ops field can easily be added.
1254 */
1255 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1256 return true;
1257
909af768
JB
1258 /*
1259 * arch_vma_name() returns non-NULL for special architecture mappings,
1260 * such as vDSO sections.
1261 */
1262 if (arch_vma_name(vma))
1263 return true;
1264
1265 return false;
1266}
1267
1da177e4 1268/*
82df3973 1269 * Decide what to dump of a segment, part, all or none.
1da177e4 1270 */
82df3973
RM
1271static unsigned long vma_dump_size(struct vm_area_struct *vma,
1272 unsigned long mm_flags)
1da177e4 1273{
e575f111
KM
1274#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1275
909af768
JB
1276 /* always dump the vdso and vsyscall sections */
1277 if (always_dump_vma(vma))
82df3973 1278 goto whole;
e5b97dde 1279
0103bd16 1280 if (vma->vm_flags & VM_DONTDUMP)
accb61fe
JB
1281 return 0;
1282
5037835c
RZ
1283 /* support for DAX */
1284 if (vma_is_dax(vma)) {
1285 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1286 goto whole;
1287 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1288 goto whole;
1289 return 0;
1290 }
1291
e575f111
KM
1292 /* Hugetlb memory check */
1293 if (vma->vm_flags & VM_HUGETLB) {
1294 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1295 goto whole;
1296 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1297 goto whole;
23d9e482 1298 return 0;
e575f111
KM
1299 }
1300
1da177e4 1301 /* Do not dump I/O mapped devices or special mappings */
314e51b9 1302 if (vma->vm_flags & VM_IO)
1da177e4
LT
1303 return 0;
1304
a1b59e80
KH
1305 /* By default, dump shared memory if mapped from an anonymous file. */
1306 if (vma->vm_flags & VM_SHARED) {
496ad9aa 1307 if (file_inode(vma->vm_file)->i_nlink == 0 ?
82df3973
RM
1308 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1309 goto whole;
1310 return 0;
a1b59e80 1311 }
1da177e4 1312
82df3973
RM
1313 /* Dump segments that have been written to. */
1314 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1315 goto whole;
1316 if (vma->vm_file == NULL)
1317 return 0;
1da177e4 1318
82df3973
RM
1319 if (FILTER(MAPPED_PRIVATE))
1320 goto whole;
1321
1322 /*
1323 * If this looks like the beginning of a DSO or executable mapping,
1324 * check for an ELF header. If we find one, dump the first page to
1325 * aid in determining what was mapped here.
1326 */
92dc07b1
RM
1327 if (FILTER(ELF_HEADERS) &&
1328 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
82df3973
RM
1329 u32 __user *header = (u32 __user *) vma->vm_start;
1330 u32 word;
92dc07b1 1331 mm_segment_t fs = get_fs();
82df3973
RM
1332 /*
1333 * Doing it this way gets the constant folded by GCC.
1334 */
1335 union {
1336 u32 cmp;
1337 char elfmag[SELFMAG];
1338 } magic;
1339 BUILD_BUG_ON(SELFMAG != sizeof word);
1340 magic.elfmag[EI_MAG0] = ELFMAG0;
1341 magic.elfmag[EI_MAG1] = ELFMAG1;
1342 magic.elfmag[EI_MAG2] = ELFMAG2;
1343 magic.elfmag[EI_MAG3] = ELFMAG3;
92dc07b1
RM
1344 /*
1345 * Switch to the user "segment" for get_user(),
1346 * then put back what elf_core_dump() had in place.
1347 */
1348 set_fs(USER_DS);
1349 if (unlikely(get_user(word, header)))
1350 word = 0;
1351 set_fs(fs);
1352 if (word == magic.cmp)
82df3973
RM
1353 return PAGE_SIZE;
1354 }
1355
1356#undef FILTER
1357
1358 return 0;
1359
1360whole:
1361 return vma->vm_end - vma->vm_start;
1da177e4
LT
1362}
1363
1da177e4
LT
1364/* An ELF note in memory */
1365struct memelfnote
1366{
1367 const char *name;
1368 int type;
1369 unsigned int datasz;
1370 void *data;
1371};
1372
1373static int notesize(struct memelfnote *en)
1374{
1375 int sz;
1376
1377 sz = sizeof(struct elf_note);
1378 sz += roundup(strlen(en->name) + 1, 4);
1379 sz += roundup(en->datasz, 4);
1380
1381 return sz;
1382}
1383
ecc8c772 1384static int writenote(struct memelfnote *men, struct coredump_params *cprm)
d025c9db
AK
1385{
1386 struct elf_note en;
1da177e4
LT
1387 en.n_namesz = strlen(men->name) + 1;
1388 en.n_descsz = men->datasz;
1389 en.n_type = men->type;
1390
ecc8c772 1391 return dump_emit(cprm, &en, sizeof(en)) &&
22a8cb82
AV
1392 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1393 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1da177e4 1394}
1da177e4 1395
3aba481f 1396static void fill_elf_header(struct elfhdr *elf, int segs,
d3330cf0 1397 u16 machine, u32 flags)
1da177e4 1398{
6970c8ef
CG
1399 memset(elf, 0, sizeof(*elf));
1400
1da177e4
LT
1401 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1402 elf->e_ident[EI_CLASS] = ELF_CLASS;
1403 elf->e_ident[EI_DATA] = ELF_DATA;
1404 elf->e_ident[EI_VERSION] = EV_CURRENT;
1405 elf->e_ident[EI_OSABI] = ELF_OSABI;
1da177e4
LT
1406
1407 elf->e_type = ET_CORE;
3aba481f 1408 elf->e_machine = machine;
1da177e4 1409 elf->e_version = EV_CURRENT;
1da177e4 1410 elf->e_phoff = sizeof(struct elfhdr);
3aba481f 1411 elf->e_flags = flags;
1da177e4
LT
1412 elf->e_ehsize = sizeof(struct elfhdr);
1413 elf->e_phentsize = sizeof(struct elf_phdr);
1414 elf->e_phnum = segs;
6970c8ef 1415
1da177e4
LT
1416 return;
1417}
1418
8d6b5eee 1419static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1da177e4
LT
1420{
1421 phdr->p_type = PT_NOTE;
1422 phdr->p_offset = offset;
1423 phdr->p_vaddr = 0;
1424 phdr->p_paddr = 0;
1425 phdr->p_filesz = sz;
1426 phdr->p_memsz = 0;
1427 phdr->p_flags = 0;
1428 phdr->p_align = 0;
1429 return;
1430}
1431
1432static void fill_note(struct memelfnote *note, const char *name, int type,
1433 unsigned int sz, void *data)
1434{
1435 note->name = name;
1436 note->type = type;
1437 note->datasz = sz;
1438 note->data = data;
1439 return;
1440}
1441
1442/*
f4e5cc2c
JJ
1443 * fill up all the fields in prstatus from the given task struct, except
1444 * registers which need to be filled up separately.
1da177e4
LT
1445 */
1446static void fill_prstatus(struct elf_prstatus *prstatus,
f4e5cc2c 1447 struct task_struct *p, long signr)
1da177e4
LT
1448{
1449 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1450 prstatus->pr_sigpend = p->pending.signal.sig[0];
1451 prstatus->pr_sighold = p->blocked.sig[0];
3b34fc58
ON
1452 rcu_read_lock();
1453 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1454 rcu_read_unlock();
b488893a 1455 prstatus->pr_pid = task_pid_vnr(p);
b488893a
PE
1456 prstatus->pr_pgrp = task_pgrp_vnr(p);
1457 prstatus->pr_sid = task_session_vnr(p);
1da177e4 1458 if (thread_group_leader(p)) {
f06febc9
FM
1459 struct task_cputime cputime;
1460
1da177e4 1461 /*
f06febc9
FM
1462 * This is the record for the group leader. It shows the
1463 * group-wide total, not its individual thread total.
1da177e4 1464 */
f06febc9
FM
1465 thread_group_cputime(p, &cputime);
1466 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1467 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1da177e4 1468 } else {
6fac4829
FW
1469 cputime_t utime, stime;
1470
1471 task_cputime(p, &utime, &stime);
1472 cputime_to_timeval(utime, &prstatus->pr_utime);
1473 cputime_to_timeval(stime, &prstatus->pr_stime);
1da177e4
LT
1474 }
1475 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1476 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1477}
1478
1479static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1480 struct mm_struct *mm)
1481{
c69e8d9c 1482 const struct cred *cred;
a84a5059 1483 unsigned int i, len;
1da177e4
LT
1484
1485 /* first copy the parameters from user space */
1486 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1487
1488 len = mm->arg_end - mm->arg_start;
1489 if (len >= ELF_PRARGSZ)
1490 len = ELF_PRARGSZ-1;
1491 if (copy_from_user(&psinfo->pr_psargs,
1492 (const char __user *)mm->arg_start, len))
1493 return -EFAULT;
1494 for(i = 0; i < len; i++)
1495 if (psinfo->pr_psargs[i] == 0)
1496 psinfo->pr_psargs[i] = ' ';
1497 psinfo->pr_psargs[len] = 0;
1498
3b34fc58
ON
1499 rcu_read_lock();
1500 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1501 rcu_read_unlock();
b488893a 1502 psinfo->pr_pid = task_pid_vnr(p);
b488893a
PE
1503 psinfo->pr_pgrp = task_pgrp_vnr(p);
1504 psinfo->pr_sid = task_session_vnr(p);
1da177e4
LT
1505
1506 i = p->state ? ffz(~p->state) + 1 : 0;
1507 psinfo->pr_state = i;
55148548 1508 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1da177e4
LT
1509 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1510 psinfo->pr_nice = task_nice(p);
1511 psinfo->pr_flag = p->flags;
c69e8d9c
DH
1512 rcu_read_lock();
1513 cred = __task_cred(p);
ebc887b2
EB
1514 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1515 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
c69e8d9c 1516 rcu_read_unlock();
1da177e4
LT
1517 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1518
1519 return 0;
1520}
1521
3aba481f
RM
1522static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1523{
1524 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1525 int i = 0;
1526 do
1527 i += 2;
1528 while (auxv[i - 2] != AT_NULL);
1529 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1530}
1531
49ae4d4b 1532static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
ce395960 1533 const siginfo_t *siginfo)
49ae4d4b
DV
1534{
1535 mm_segment_t old_fs = get_fs();
1536 set_fs(KERNEL_DS);
1537 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1538 set_fs(old_fs);
1539 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1540}
1541
2aa362c4
DV
1542#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1543/*
1544 * Format of NT_FILE note:
1545 *
1546 * long count -- how many files are mapped
1547 * long page_size -- units for file_ofs
1548 * array of [COUNT] elements of
1549 * long start
1550 * long end
1551 * long file_ofs
1552 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1553 */
72023656 1554static int fill_files_note(struct memelfnote *note)
2aa362c4
DV
1555{
1556 struct vm_area_struct *vma;
1557 unsigned count, size, names_ofs, remaining, n;
1558 user_long_t *data;
1559 user_long_t *start_end_ofs;
1560 char *name_base, *name_curpos;
1561
1562 /* *Estimated* file count and total data size needed */
1563 count = current->mm->map_count;
1564 size = count * 64;
1565
1566 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1567 alloc:
1568 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
72023656 1569 return -EINVAL;
2aa362c4
DV
1570 size = round_up(size, PAGE_SIZE);
1571 data = vmalloc(size);
1572 if (!data)
72023656 1573 return -ENOMEM;
2aa362c4
DV
1574
1575 start_end_ofs = data + 2;
1576 name_base = name_curpos = ((char *)data) + names_ofs;
1577 remaining = size - names_ofs;
1578 count = 0;
1579 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1580 struct file *file;
1581 const char *filename;
1582
1583 file = vma->vm_file;
1584 if (!file)
1585 continue;
9bf39ab2 1586 filename = file_path(file, name_curpos, remaining);
2aa362c4
DV
1587 if (IS_ERR(filename)) {
1588 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1589 vfree(data);
1590 size = size * 5 / 4;
1591 goto alloc;
1592 }
1593 continue;
1594 }
1595
9bf39ab2 1596 /* file_path() fills at the end, move name down */
2aa362c4
DV
1597 /* n = strlen(filename) + 1: */
1598 n = (name_curpos + remaining) - filename;
1599 remaining = filename - name_curpos;
1600 memmove(name_curpos, filename, n);
1601 name_curpos += n;
1602
1603 *start_end_ofs++ = vma->vm_start;
1604 *start_end_ofs++ = vma->vm_end;
1605 *start_end_ofs++ = vma->vm_pgoff;
1606 count++;
1607 }
1608
1609 /* Now we know exact count of files, can store it */
1610 data[0] = count;
1611 data[1] = PAGE_SIZE;
1612 /*
1613 * Count usually is less than current->mm->map_count,
1614 * we need to move filenames down.
1615 */
1616 n = current->mm->map_count - count;
1617 if (n != 0) {
1618 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1619 memmove(name_base - shift_bytes, name_base,
1620 name_curpos - name_base);
1621 name_curpos -= shift_bytes;
1622 }
1623
1624 size = name_curpos - (char *)data;
1625 fill_note(note, "CORE", NT_FILE, size, data);
72023656 1626 return 0;
2aa362c4
DV
1627}
1628
4206d3aa
RM
1629#ifdef CORE_DUMP_USE_REGSET
1630#include <linux/regset.h>
1631
1632struct elf_thread_core_info {
1633 struct elf_thread_core_info *next;
1634 struct task_struct *task;
1635 struct elf_prstatus prstatus;
1636 struct memelfnote notes[0];
1637};
1638
1639struct elf_note_info {
1640 struct elf_thread_core_info *thread;
1641 struct memelfnote psinfo;
49ae4d4b 1642 struct memelfnote signote;
4206d3aa 1643 struct memelfnote auxv;
2aa362c4 1644 struct memelfnote files;
49ae4d4b 1645 user_siginfo_t csigdata;
4206d3aa
RM
1646 size_t size;
1647 int thread_notes;
1648};
1649
d31472b6
RM
1650/*
1651 * When a regset has a writeback hook, we call it on each thread before
1652 * dumping user memory. On register window machines, this makes sure the
1653 * user memory backing the register data is up to date before we read it.
1654 */
1655static void do_thread_regset_writeback(struct task_struct *task,
1656 const struct user_regset *regset)
1657{
1658 if (regset->writeback)
1659 regset->writeback(task, regset, 1);
1660}
1661
0953f65d
L
1662#ifndef PR_REG_SIZE
1663#define PR_REG_SIZE(S) sizeof(S)
1664#endif
1665
1666#ifndef PRSTATUS_SIZE
1667#define PRSTATUS_SIZE(S) sizeof(S)
1668#endif
1669
1670#ifndef PR_REG_PTR
1671#define PR_REG_PTR(S) (&((S)->pr_reg))
1672#endif
1673
1674#ifndef SET_PR_FPVALID
1675#define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V))
1676#endif
1677
4206d3aa
RM
1678static int fill_thread_core_info(struct elf_thread_core_info *t,
1679 const struct user_regset_view *view,
1680 long signr, size_t *total)
1681{
1682 unsigned int i;
1683
1684 /*
1685 * NT_PRSTATUS is the one special case, because the regset data
1686 * goes into the pr_reg field inside the note contents, rather
1687 * than being the whole note contents. We fill the reset in here.
1688 * We assume that regset 0 is NT_PRSTATUS.
1689 */
1690 fill_prstatus(&t->prstatus, t->task, signr);
1691 (void) view->regsets[0].get(t->task, &view->regsets[0],
0953f65d
L
1692 0, PR_REG_SIZE(t->prstatus.pr_reg),
1693 PR_REG_PTR(&t->prstatus), NULL);
4206d3aa
RM
1694
1695 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
0953f65d 1696 PRSTATUS_SIZE(t->prstatus), &t->prstatus);
4206d3aa
RM
1697 *total += notesize(&t->notes[0]);
1698
d31472b6
RM
1699 do_thread_regset_writeback(t->task, &view->regsets[0]);
1700
4206d3aa
RM
1701 /*
1702 * Each other regset might generate a note too. For each regset
1703 * that has no core_note_type or is inactive, we leave t->notes[i]
1704 * all zero and we'll know to skip writing it later.
1705 */
1706 for (i = 1; i < view->n; ++i) {
1707 const struct user_regset *regset = &view->regsets[i];
d31472b6 1708 do_thread_regset_writeback(t->task, regset);
c8e25258 1709 if (regset->core_note_type && regset->get &&
4206d3aa
RM
1710 (!regset->active || regset->active(t->task, regset))) {
1711 int ret;
1712 size_t size = regset->n * regset->size;
1713 void *data = kmalloc(size, GFP_KERNEL);
1714 if (unlikely(!data))
1715 return 0;
1716 ret = regset->get(t->task, regset,
1717 0, size, data, NULL);
1718 if (unlikely(ret))
1719 kfree(data);
1720 else {
1721 if (regset->core_note_type != NT_PRFPREG)
1722 fill_note(&t->notes[i], "LINUX",
1723 regset->core_note_type,
1724 size, data);
1725 else {
0953f65d 1726 SET_PR_FPVALID(&t->prstatus, 1);
4206d3aa
RM
1727 fill_note(&t->notes[i], "CORE",
1728 NT_PRFPREG, size, data);
1729 }
1730 *total += notesize(&t->notes[i]);
1731 }
1732 }
1733 }
1734
1735 return 1;
1736}
1737
1738static int fill_note_info(struct elfhdr *elf, int phdrs,
1739 struct elf_note_info *info,
ec57941e 1740 const siginfo_t *siginfo, struct pt_regs *regs)
4206d3aa
RM
1741{
1742 struct task_struct *dump_task = current;
1743 const struct user_regset_view *view = task_user_regset_view(dump_task);
1744 struct elf_thread_core_info *t;
1745 struct elf_prpsinfo *psinfo;
83914441 1746 struct core_thread *ct;
4206d3aa
RM
1747 unsigned int i;
1748
1749 info->size = 0;
1750 info->thread = NULL;
1751
1752 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
6899e92d
AC
1753 if (psinfo == NULL) {
1754 info->psinfo.data = NULL; /* So we don't free this wrongly */
4206d3aa 1755 return 0;
6899e92d 1756 }
4206d3aa 1757
e2dbe125
AW
1758 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1759
4206d3aa
RM
1760 /*
1761 * Figure out how many notes we're going to need for each thread.
1762 */
1763 info->thread_notes = 0;
1764 for (i = 0; i < view->n; ++i)
1765 if (view->regsets[i].core_note_type != 0)
1766 ++info->thread_notes;
1767
1768 /*
1769 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1770 * since it is our one special case.
1771 */
1772 if (unlikely(info->thread_notes == 0) ||
1773 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1774 WARN_ON(1);
1775 return 0;
1776 }
1777
1778 /*
1779 * Initialize the ELF file header.
1780 */
1781 fill_elf_header(elf, phdrs,
d3330cf0 1782 view->e_machine, view->e_flags);
4206d3aa
RM
1783
1784 /*
1785 * Allocate a structure for each thread.
1786 */
83914441
ON
1787 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1788 t = kzalloc(offsetof(struct elf_thread_core_info,
1789 notes[info->thread_notes]),
1790 GFP_KERNEL);
1791 if (unlikely(!t))
1792 return 0;
1793
1794 t->task = ct->task;
1795 if (ct->task == dump_task || !info->thread) {
1796 t->next = info->thread;
1797 info->thread = t;
1798 } else {
1799 /*
1800 * Make sure to keep the original task at
1801 * the head of the list.
1802 */
1803 t->next = info->thread->next;
1804 info->thread->next = t;
4206d3aa 1805 }
83914441 1806 }
4206d3aa
RM
1807
1808 /*
1809 * Now fill in each thread's information.
1810 */
1811 for (t = info->thread; t != NULL; t = t->next)
5ab1c309 1812 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
4206d3aa
RM
1813 return 0;
1814
1815 /*
1816 * Fill in the two process-wide notes.
1817 */
1818 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1819 info->size += notesize(&info->psinfo);
1820
49ae4d4b
DV
1821 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1822 info->size += notesize(&info->signote);
1823
4206d3aa
RM
1824 fill_auxv_note(&info->auxv, current->mm);
1825 info->size += notesize(&info->auxv);
1826
72023656
DA
1827 if (fill_files_note(&info->files) == 0)
1828 info->size += notesize(&info->files);
2aa362c4 1829
4206d3aa
RM
1830 return 1;
1831}
1832
1833static size_t get_note_info_size(struct elf_note_info *info)
1834{
1835 return info->size;
1836}
1837
1838/*
1839 * Write all the notes for each thread. When writing the first thread, the
1840 * process-wide notes are interleaved after the first thread-specific note.
1841 */
1842static int write_note_info(struct elf_note_info *info,
ecc8c772 1843 struct coredump_params *cprm)
4206d3aa 1844{
b219e25f 1845 bool first = true;
4206d3aa
RM
1846 struct elf_thread_core_info *t = info->thread;
1847
1848 do {
1849 int i;
1850
ecc8c772 1851 if (!writenote(&t->notes[0], cprm))
4206d3aa
RM
1852 return 0;
1853
ecc8c772 1854 if (first && !writenote(&info->psinfo, cprm))
4206d3aa 1855 return 0;
ecc8c772 1856 if (first && !writenote(&info->signote, cprm))
49ae4d4b 1857 return 0;
ecc8c772 1858 if (first && !writenote(&info->auxv, cprm))
4206d3aa 1859 return 0;
72023656 1860 if (first && info->files.data &&
ecc8c772 1861 !writenote(&info->files, cprm))
2aa362c4 1862 return 0;
4206d3aa
RM
1863
1864 for (i = 1; i < info->thread_notes; ++i)
1865 if (t->notes[i].data &&
ecc8c772 1866 !writenote(&t->notes[i], cprm))
4206d3aa
RM
1867 return 0;
1868
b219e25f 1869 first = false;
4206d3aa
RM
1870 t = t->next;
1871 } while (t);
1872
1873 return 1;
1874}
1875
1876static void free_note_info(struct elf_note_info *info)
1877{
1878 struct elf_thread_core_info *threads = info->thread;
1879 while (threads) {
1880 unsigned int i;
1881 struct elf_thread_core_info *t = threads;
1882 threads = t->next;
1883 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1884 for (i = 1; i < info->thread_notes; ++i)
1885 kfree(t->notes[i].data);
1886 kfree(t);
1887 }
1888 kfree(info->psinfo.data);
2aa362c4 1889 vfree(info->files.data);
4206d3aa
RM
1890}
1891
1892#else
1893
1da177e4
LT
1894/* Here is the structure in which status of each thread is captured. */
1895struct elf_thread_status
1896{
1897 struct list_head list;
1898 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1899 elf_fpregset_t fpu; /* NT_PRFPREG */
1900 struct task_struct *thread;
1901#ifdef ELF_CORE_COPY_XFPREGS
5b20cd80 1902 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1da177e4
LT
1903#endif
1904 struct memelfnote notes[3];
1905 int num_notes;
1906};
1907
1908/*
1909 * In order to add the specific thread information for the elf file format,
f4e5cc2c
JJ
1910 * we need to keep a linked list of every threads pr_status and then create
1911 * a single section for them in the final core file.
1da177e4
LT
1912 */
1913static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1914{
1915 int sz = 0;
1916 struct task_struct *p = t->thread;
1917 t->num_notes = 0;
1918
1919 fill_prstatus(&t->prstatus, p, signr);
1920 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1921
f4e5cc2c
JJ
1922 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1923 &(t->prstatus));
1da177e4
LT
1924 t->num_notes++;
1925 sz += notesize(&t->notes[0]);
1926
f4e5cc2c
JJ
1927 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1928 &t->fpu))) {
1929 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1930 &(t->fpu));
1da177e4
LT
1931 t->num_notes++;
1932 sz += notesize(&t->notes[1]);
1933 }
1934
1935#ifdef ELF_CORE_COPY_XFPREGS
1936 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
5b20cd80
MN
1937 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1938 sizeof(t->xfpu), &t->xfpu);
1da177e4
LT
1939 t->num_notes++;
1940 sz += notesize(&t->notes[2]);
1941 }
1942#endif
1943 return sz;
1944}
1945
3aba481f
RM
1946struct elf_note_info {
1947 struct memelfnote *notes;
72023656 1948 struct memelfnote *notes_files;
3aba481f
RM
1949 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1950 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1951 struct list_head thread_list;
1952 elf_fpregset_t *fpu;
1953#ifdef ELF_CORE_COPY_XFPREGS
1954 elf_fpxregset_t *xfpu;
1955#endif
49ae4d4b 1956 user_siginfo_t csigdata;
3aba481f
RM
1957 int thread_status_size;
1958 int numnote;
1959};
1960
0cf062d0 1961static int elf_note_info_init(struct elf_note_info *info)
3aba481f 1962{
0cf062d0 1963 memset(info, 0, sizeof(*info));
3aba481f
RM
1964 INIT_LIST_HEAD(&info->thread_list);
1965
49ae4d4b 1966 /* Allocate space for ELF notes */
2aa362c4 1967 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
3aba481f
RM
1968 if (!info->notes)
1969 return 0;
1970 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1971 if (!info->psinfo)
f34f9d18 1972 return 0;
3aba481f
RM
1973 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1974 if (!info->prstatus)
f34f9d18 1975 return 0;
3aba481f
RM
1976 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1977 if (!info->fpu)
f34f9d18 1978 return 0;
3aba481f
RM
1979#ifdef ELF_CORE_COPY_XFPREGS
1980 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1981 if (!info->xfpu)
f34f9d18 1982 return 0;
3aba481f 1983#endif
0cf062d0 1984 return 1;
0cf062d0
AW
1985}
1986
1987static int fill_note_info(struct elfhdr *elf, int phdrs,
1988 struct elf_note_info *info,
ec57941e 1989 const siginfo_t *siginfo, struct pt_regs *regs)
0cf062d0
AW
1990{
1991 struct list_head *t;
afabada9
AV
1992 struct core_thread *ct;
1993 struct elf_thread_status *ets;
0cf062d0
AW
1994
1995 if (!elf_note_info_init(info))
1996 return 0;
3aba481f 1997
afabada9
AV
1998 for (ct = current->mm->core_state->dumper.next;
1999 ct; ct = ct->next) {
2000 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2001 if (!ets)
2002 return 0;
83914441 2003
afabada9
AV
2004 ets->thread = ct->task;
2005 list_add(&ets->list, &info->thread_list);
2006 }
83914441 2007
afabada9
AV
2008 list_for_each(t, &info->thread_list) {
2009 int sz;
3aba481f 2010
afabada9
AV
2011 ets = list_entry(t, struct elf_thread_status, list);
2012 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2013 info->thread_status_size += sz;
3aba481f
RM
2014 }
2015 /* now collect the dump for the current */
2016 memset(info->prstatus, 0, sizeof(*info->prstatus));
5ab1c309 2017 fill_prstatus(info->prstatus, current, siginfo->si_signo);
3aba481f
RM
2018 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2019
2020 /* Set up header */
d3330cf0 2021 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
3aba481f
RM
2022
2023 /*
2024 * Set up the notes in similar form to SVR4 core dumps made
2025 * with info from their /proc.
2026 */
2027
2028 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2029 sizeof(*info->prstatus), info->prstatus);
2030 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2031 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2032 sizeof(*info->psinfo), info->psinfo);
2033
2aa362c4
DV
2034 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2035 fill_auxv_note(info->notes + 3, current->mm);
72023656 2036 info->numnote = 4;
3aba481f 2037
72023656
DA
2038 if (fill_files_note(info->notes + info->numnote) == 0) {
2039 info->notes_files = info->notes + info->numnote;
2040 info->numnote++;
2041 }
3aba481f
RM
2042
2043 /* Try to dump the FPU. */
2044 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2045 info->fpu);
2046 if (info->prstatus->pr_fpvalid)
2047 fill_note(info->notes + info->numnote++,
2048 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2049#ifdef ELF_CORE_COPY_XFPREGS
2050 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2051 fill_note(info->notes + info->numnote++,
2052 "LINUX", ELF_CORE_XFPREG_TYPE,
2053 sizeof(*info->xfpu), info->xfpu);
2054#endif
2055
2056 return 1;
3aba481f
RM
2057}
2058
2059static size_t get_note_info_size(struct elf_note_info *info)
2060{
2061 int sz = 0;
2062 int i;
2063
2064 for (i = 0; i < info->numnote; i++)
2065 sz += notesize(info->notes + i);
2066
2067 sz += info->thread_status_size;
2068
2069 return sz;
2070}
2071
2072static int write_note_info(struct elf_note_info *info,
ecc8c772 2073 struct coredump_params *cprm)
3aba481f
RM
2074{
2075 int i;
2076 struct list_head *t;
2077
2078 for (i = 0; i < info->numnote; i++)
ecc8c772 2079 if (!writenote(info->notes + i, cprm))
3aba481f
RM
2080 return 0;
2081
2082 /* write out the thread status notes section */
2083 list_for_each(t, &info->thread_list) {
2084 struct elf_thread_status *tmp =
2085 list_entry(t, struct elf_thread_status, list);
2086
2087 for (i = 0; i < tmp->num_notes; i++)
ecc8c772 2088 if (!writenote(&tmp->notes[i], cprm))
3aba481f
RM
2089 return 0;
2090 }
2091
2092 return 1;
2093}
2094
2095static void free_note_info(struct elf_note_info *info)
2096{
2097 while (!list_empty(&info->thread_list)) {
2098 struct list_head *tmp = info->thread_list.next;
2099 list_del(tmp);
2100 kfree(list_entry(tmp, struct elf_thread_status, list));
2101 }
2102
72023656
DA
2103 /* Free data possibly allocated by fill_files_note(): */
2104 if (info->notes_files)
2105 vfree(info->notes_files->data);
2aa362c4 2106
3aba481f
RM
2107 kfree(info->prstatus);
2108 kfree(info->psinfo);
2109 kfree(info->notes);
2110 kfree(info->fpu);
2111#ifdef ELF_CORE_COPY_XFPREGS
2112 kfree(info->xfpu);
2113#endif
2114}
2115
4206d3aa
RM
2116#endif
2117
f47aef55
RM
2118static struct vm_area_struct *first_vma(struct task_struct *tsk,
2119 struct vm_area_struct *gate_vma)
2120{
2121 struct vm_area_struct *ret = tsk->mm->mmap;
2122
2123 if (ret)
2124 return ret;
2125 return gate_vma;
2126}
2127/*
2128 * Helper function for iterating across a vma list. It ensures that the caller
2129 * will visit `gate_vma' prior to terminating the search.
2130 */
2131static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2132 struct vm_area_struct *gate_vma)
2133{
2134 struct vm_area_struct *ret;
2135
2136 ret = this_vma->vm_next;
2137 if (ret)
2138 return ret;
2139 if (this_vma == gate_vma)
2140 return NULL;
2141 return gate_vma;
2142}
2143
8d9032bb
DH
2144static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2145 elf_addr_t e_shoff, int segs)
2146{
2147 elf->e_shoff = e_shoff;
2148 elf->e_shentsize = sizeof(*shdr4extnum);
2149 elf->e_shnum = 1;
2150 elf->e_shstrndx = SHN_UNDEF;
2151
2152 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2153
2154 shdr4extnum->sh_type = SHT_NULL;
2155 shdr4extnum->sh_size = elf->e_shnum;
2156 shdr4extnum->sh_link = elf->e_shstrndx;
2157 shdr4extnum->sh_info = segs;
2158}
2159
1da177e4
LT
2160/*
2161 * Actual dumper
2162 *
2163 * This is a two-pass process; first we find the offsets of the bits,
2164 * and then they are actually written out. If we run out of core limit
2165 * we just truncate.
2166 */
f6151dfe 2167static int elf_core_dump(struct coredump_params *cprm)
1da177e4 2168{
1da177e4
LT
2169 int has_dumped = 0;
2170 mm_segment_t fs;
52f5592e
JL
2171 int segs, i;
2172 size_t vma_data_size = 0;
f47aef55 2173 struct vm_area_struct *vma, *gate_vma;
1da177e4 2174 struct elfhdr *elf = NULL;
cdc3d562 2175 loff_t offset = 0, dataoff;
72023656 2176 struct elf_note_info info = { };
93eb211e 2177 struct elf_phdr *phdr4note = NULL;
8d9032bb
DH
2178 struct elf_shdr *shdr4extnum = NULL;
2179 Elf_Half e_phnum;
2180 elf_addr_t e_shoff;
52f5592e 2181 elf_addr_t *vma_filesz = NULL;
1da177e4
LT
2182
2183 /*
2184 * We no longer stop all VM operations.
2185 *
f4e5cc2c
JJ
2186 * This is because those proceses that could possibly change map_count
2187 * or the mmap / vma pages are now blocked in do_exit on current
2188 * finishing this core dump.
1da177e4
LT
2189 *
2190 * Only ptrace can touch these memory addresses, but it doesn't change
f4e5cc2c 2191 * the map_count or the pages allocated. So no possibility of crashing
1da177e4
LT
2192 * exists while dumping the mm->vm_next areas to the core file.
2193 */
2194
2195 /* alloc memory for large data structures: too large to be on stack */
2196 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2197 if (!elf)
5f719558 2198 goto out;
341c87bf
KH
2199 /*
2200 * The number of segs are recored into ELF header as 16bit value.
2201 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2202 */
1da177e4 2203 segs = current->mm->map_count;
1fcccbac 2204 segs += elf_core_extra_phdrs();
1da177e4 2205
31db58b3 2206 gate_vma = get_gate_vma(current->mm);
f47aef55
RM
2207 if (gate_vma != NULL)
2208 segs++;
2209
8d9032bb
DH
2210 /* for notes section */
2211 segs++;
2212
2213 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2214 * this, kernel supports extended numbering. Have a look at
2215 * include/linux/elf.h for further information. */
2216 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2217
1da177e4 2218 /*
3aba481f
RM
2219 * Collect all the non-memory information about the process for the
2220 * notes. This also sets up the file header.
1da177e4 2221 */
5ab1c309 2222 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
3aba481f 2223 goto cleanup;
1da177e4 2224
3aba481f 2225 has_dumped = 1;
079148b9 2226
1da177e4
LT
2227 fs = get_fs();
2228 set_fs(KERNEL_DS);
2229
1da177e4 2230 offset += sizeof(*elf); /* Elf header */
8d9032bb 2231 offset += segs * sizeof(struct elf_phdr); /* Program headers */
1da177e4
LT
2232
2233 /* Write notes phdr entry */
2234 {
3aba481f 2235 size_t sz = get_note_info_size(&info);
1da177e4 2236
e5501492 2237 sz += elf_coredump_extra_notes_size();
bf1ab978 2238
93eb211e
DH
2239 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2240 if (!phdr4note)
088e7af7 2241 goto end_coredump;
93eb211e
DH
2242
2243 fill_elf_note_phdr(phdr4note, sz, offset);
2244 offset += sz;
1da177e4
LT
2245 }
2246
1da177e4
LT
2247 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2248
52f5592e
JL
2249 vma_filesz = kmalloc_array(segs - 1, sizeof(*vma_filesz), GFP_KERNEL);
2250 if (!vma_filesz)
2251 goto end_coredump;
2252
2253 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2254 vma = next_vma(vma, gate_vma)) {
2255 unsigned long dump_size;
2256
2257 dump_size = vma_dump_size(vma, cprm->mm_flags);
2258 vma_filesz[i++] = dump_size;
2259 vma_data_size += dump_size;
2260 }
2261
2262 offset += vma_data_size;
8d9032bb
DH
2263 offset += elf_core_extra_data_size();
2264 e_shoff = offset;
2265
2266 if (e_phnum == PN_XNUM) {
2267 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2268 if (!shdr4extnum)
2269 goto end_coredump;
2270 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2271 }
2272
2273 offset = dataoff;
2274
ecc8c772 2275 if (!dump_emit(cprm, elf, sizeof(*elf)))
93eb211e
DH
2276 goto end_coredump;
2277
ecc8c772 2278 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
93eb211e
DH
2279 goto end_coredump;
2280
1da177e4 2281 /* Write program headers for segments dump */
52f5592e 2282 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
f47aef55 2283 vma = next_vma(vma, gate_vma)) {
1da177e4 2284 struct elf_phdr phdr;
1da177e4
LT
2285
2286 phdr.p_type = PT_LOAD;
2287 phdr.p_offset = offset;
2288 phdr.p_vaddr = vma->vm_start;
2289 phdr.p_paddr = 0;
52f5592e 2290 phdr.p_filesz = vma_filesz[i++];
82df3973 2291 phdr.p_memsz = vma->vm_end - vma->vm_start;
1da177e4
LT
2292 offset += phdr.p_filesz;
2293 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
f4e5cc2c
JJ
2294 if (vma->vm_flags & VM_WRITE)
2295 phdr.p_flags |= PF_W;
2296 if (vma->vm_flags & VM_EXEC)
2297 phdr.p_flags |= PF_X;
1da177e4
LT
2298 phdr.p_align = ELF_EXEC_PAGESIZE;
2299
ecc8c772 2300 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
088e7af7 2301 goto end_coredump;
1da177e4
LT
2302 }
2303
506f21c5 2304 if (!elf_core_write_extra_phdrs(cprm, offset))
1fcccbac 2305 goto end_coredump;
1da177e4
LT
2306
2307 /* write out the notes section */
ecc8c772 2308 if (!write_note_info(&info, cprm))
3aba481f 2309 goto end_coredump;
1da177e4 2310
cdc3d562 2311 if (elf_coredump_extra_notes_write(cprm))
e5501492 2312 goto end_coredump;
bf1ab978 2313
d025c9db 2314 /* Align to page */
9b56d543 2315 if (!dump_skip(cprm, dataoff - cprm->written))
f3e8fccd 2316 goto end_coredump;
1da177e4 2317
52f5592e 2318 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
f47aef55 2319 vma = next_vma(vma, gate_vma)) {
1da177e4 2320 unsigned long addr;
82df3973 2321 unsigned long end;
1da177e4 2322
52f5592e 2323 end = vma->vm_start + vma_filesz[i++];
1da177e4 2324
82df3973 2325 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
f4e5cc2c 2326 struct page *page;
f3e8fccd
HD
2327 int stop;
2328
2329 page = get_dump_page(addr);
2330 if (page) {
2331 void *kaddr = kmap(page);
13046ece 2332 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
f3e8fccd 2333 kunmap(page);
1da177e4 2334 page_cache_release(page);
f3e8fccd 2335 } else
9b56d543 2336 stop = !dump_skip(cprm, PAGE_SIZE);
f3e8fccd
HD
2337 if (stop)
2338 goto end_coredump;
1da177e4
LT
2339 }
2340 }
22c08ef5 2341 dump_truncate(cprm);
1da177e4 2342
aa3e7eaf 2343 if (!elf_core_write_extra_data(cprm))
1fcccbac 2344 goto end_coredump;
1da177e4 2345
8d9032bb 2346 if (e_phnum == PN_XNUM) {
13046ece 2347 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
8d9032bb
DH
2348 goto end_coredump;
2349 }
2350
1da177e4
LT
2351end_coredump:
2352 set_fs(fs);
2353
2354cleanup:
3aba481f 2355 free_note_info(&info);
8d9032bb 2356 kfree(shdr4extnum);
52f5592e 2357 kfree(vma_filesz);
93eb211e 2358 kfree(phdr4note);
5f719558
WC
2359 kfree(elf);
2360out:
1da177e4 2361 return has_dumped;
1da177e4
LT
2362}
2363
698ba7b5 2364#endif /* CONFIG_ELF_CORE */
1da177e4
LT
2365
2366static int __init init_elf_binfmt(void)
2367{
8fc3dc5a
AV
2368 register_binfmt(&elf_format);
2369 return 0;
1da177e4
LT
2370}
2371
2372static void __exit exit_elf_binfmt(void)
2373{
2374 /* Remove the COFF and ELF loaders. */
2375 unregister_binfmt(&elf_format);
2376}
2377
2378core_initcall(init_elf_binfmt);
2379module_exit(exit_elf_binfmt);
2380MODULE_LICENSE("GPL");