]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/binfmt_elf_fdpic.c
sched/headers: Prepare to move the task_lock()/unlock() APIs to <linux/sched/task.h>
[mirror_ubuntu-bionic-kernel.git] / fs / binfmt_elf_fdpic.c
CommitLineData
1da177e4
LT
1/* binfmt_elf_fdpic.c: FDPIC ELF binary format
2 *
8a2ab7f5 3 * Copyright (C) 2003, 2004, 2006 Red Hat, Inc. All Rights Reserved.
1da177e4
LT
4 * Written by David Howells (dhowells@redhat.com)
5 * Derived from binfmt_elf.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/module.h>
14
15#include <linux/fs.h>
16#include <linux/stat.h>
17#include <linux/sched.h>
68db0cf1
IM
18#include <linux/sched/coredump.h>
19#include <linux/sched/task_stack.h>
1da177e4
LT
20#include <linux/mm.h>
21#include <linux/mman.h>
22#include <linux/errno.h>
23#include <linux/signal.h>
24#include <linux/binfmts.h>
25#include <linux/string.h>
26#include <linux/file.h>
27#include <linux/fcntl.h>
28#include <linux/slab.h>
6d8c4e3b 29#include <linux/pagemap.h>
5edc2a51 30#include <linux/security.h>
1da177e4 31#include <linux/highmem.h>
6d8c4e3b 32#include <linux/highuid.h>
1da177e4
LT
33#include <linux/personality.h>
34#include <linux/ptrace.h>
35#include <linux/init.h>
1da177e4
LT
36#include <linux/elf.h>
37#include <linux/elf-fdpic.h>
38#include <linux/elfcore.h>
088e7af7 39#include <linux/coredump.h>
ab27a8d0 40#include <linux/dax.h>
1da177e4 41
7c0f6ba6 42#include <linux/uaccess.h>
1da177e4
LT
43#include <asm/param.h>
44#include <asm/pgalloc.h>
45
46typedef char *elf_caddr_t;
1da177e4
LT
47
48#if 0
49#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
50#else
51#define kdebug(fmt, ...) do {} while(0)
52#endif
53
6d8c4e3b
DH
54#if 0
55#define kdcore(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
56#else
57#define kdcore(fmt, ...) do {} while(0)
58#endif
59
1da177e4
LT
60MODULE_LICENSE("GPL");
61
71613c3b 62static int load_elf_fdpic_binary(struct linux_binprm *);
8a2ab7f5
DH
63static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *, struct file *);
64static int elf_fdpic_map_file(struct elf_fdpic_params *, struct file *,
65 struct mm_struct *, const char *);
1da177e4 66
8a2ab7f5
DH
67static int create_elf_fdpic_tables(struct linux_binprm *, struct mm_struct *,
68 struct elf_fdpic_params *,
69 struct elf_fdpic_params *);
1da177e4
LT
70
71#ifndef CONFIG_MMU
8a2ab7f5
DH
72static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
73 struct file *,
74 struct mm_struct *);
1da177e4
LT
75#endif
76
8a2ab7f5
DH
77static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
78 struct file *, struct mm_struct *);
1da177e4 79
698ba7b5 80#ifdef CONFIG_ELF_CORE
f6151dfe 81static int elf_fdpic_core_dump(struct coredump_params *cprm);
6d8c4e3b
DH
82#endif
83
1da177e4
LT
84static struct linux_binfmt elf_fdpic_format = {
85 .module = THIS_MODULE,
86 .load_binary = load_elf_fdpic_binary,
698ba7b5 87#ifdef CONFIG_ELF_CORE
6d8c4e3b
DH
88 .core_dump = elf_fdpic_core_dump,
89#endif
1da177e4
LT
90 .min_coredump = ELF_EXEC_PAGESIZE,
91};
92
8a2ab7f5
DH
93static int __init init_elf_fdpic_binfmt(void)
94{
8fc3dc5a
AV
95 register_binfmt(&elf_fdpic_format);
96 return 0;
8a2ab7f5 97}
1da177e4 98
8a2ab7f5
DH
99static void __exit exit_elf_fdpic_binfmt(void)
100{
101 unregister_binfmt(&elf_fdpic_format);
102}
103
6d8c4e3b 104core_initcall(init_elf_fdpic_binfmt);
8a2ab7f5 105module_exit(exit_elf_fdpic_binfmt);
1da177e4 106
1bde925d 107static int is_elf(struct elfhdr *hdr, struct file *file)
1da177e4
LT
108{
109 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0)
110 return 0;
111 if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)
112 return 0;
1bde925d 113 if (!elf_check_arch(hdr))
1da177e4 114 return 0;
72c2d531 115 if (!file->f_op->mmap)
1da177e4
LT
116 return 0;
117 return 1;
118}
119
1bde925d
RF
120#ifndef elf_check_fdpic
121#define elf_check_fdpic(x) 0
122#endif
123
124#ifndef elf_check_const_displacement
125#define elf_check_const_displacement(x) 0
126#endif
127
128static int is_constdisp(struct elfhdr *hdr)
129{
130 if (!elf_check_fdpic(hdr))
131 return 1;
132 if (elf_check_const_displacement(hdr))
133 return 1;
134 return 0;
135}
136
1da177e4
LT
137/*****************************************************************************/
138/*
139 * read the program headers table into memory
140 */
8a2ab7f5
DH
141static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params,
142 struct file *file)
1da177e4
LT
143{
144 struct elf32_phdr *phdr;
145 unsigned long size;
146 int retval, loop;
147
148 if (params->hdr.e_phentsize != sizeof(struct elf_phdr))
149 return -ENOMEM;
150 if (params->hdr.e_phnum > 65536U / sizeof(struct elf_phdr))
151 return -ENOMEM;
152
153 size = params->hdr.e_phnum * sizeof(struct elf_phdr);
154 params->phdrs = kmalloc(size, GFP_KERNEL);
155 if (!params->phdrs)
156 return -ENOMEM;
157
8a2ab7f5
DH
158 retval = kernel_read(file, params->hdr.e_phoff,
159 (char *) params->phdrs, size);
e1d2c8b6
DH
160 if (unlikely(retval != size))
161 return retval < 0 ? retval : -ENOEXEC;
1da177e4
LT
162
163 /* determine stack size for this binary */
164 phdr = params->phdrs;
165 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
166 if (phdr->p_type != PT_GNU_STACK)
167 continue;
168
169 if (phdr->p_flags & PF_X)
170 params->flags |= ELF_FDPIC_FLAG_EXEC_STACK;
171 else
172 params->flags |= ELF_FDPIC_FLAG_NOEXEC_STACK;
173
174 params->stack_size = phdr->p_memsz;
175 break;
176 }
177
178 return 0;
8a2ab7f5 179}
1da177e4
LT
180
181/*****************************************************************************/
182/*
183 * load an fdpic binary into various bits of memory
184 */
71613c3b 185static int load_elf_fdpic_binary(struct linux_binprm *bprm)
1da177e4
LT
186{
187 struct elf_fdpic_params exec_params, interp_params;
71613c3b 188 struct pt_regs *regs = current_pt_regs();
1da177e4 189 struct elf_phdr *phdr;
8a2ab7f5 190 unsigned long stack_size, entryaddr;
8a2ab7f5
DH
191#ifdef ELF_FDPIC_PLAT_INIT
192 unsigned long dynaddr;
04e4f2b1
MF
193#endif
194#ifndef CONFIG_MMU
195 unsigned long stack_prot;
8a2ab7f5 196#endif
1da177e4
LT
197 struct file *interpreter = NULL; /* to shut gcc up */
198 char *interpreter_name = NULL;
199 int executable_stack;
200 int retval, i;
201
aa289b47
DH
202 kdebug("____ LOAD %d ____", current->pid);
203
1da177e4
LT
204 memset(&exec_params, 0, sizeof(exec_params));
205 memset(&interp_params, 0, sizeof(interp_params));
206
207 exec_params.hdr = *(struct elfhdr *) bprm->buf;
208 exec_params.flags = ELF_FDPIC_FLAG_PRESENT | ELF_FDPIC_FLAG_EXECUTABLE;
209
210 /* check that this is a binary we know how to deal with */
211 retval = -ENOEXEC;
1bde925d 212 if (!is_elf(&exec_params.hdr, bprm->file))
1da177e4 213 goto error;
1bde925d
RF
214 if (!elf_check_fdpic(&exec_params.hdr)) {
215#ifdef CONFIG_MMU
216 /* binfmt_elf handles non-fdpic elf except on nommu */
217 goto error;
218#else
219 /* nommu can only load ET_DYN (PIE) ELF */
220 if (exec_params.hdr.e_type != ET_DYN)
221 goto error;
222#endif
223 }
1da177e4
LT
224
225 /* read the program header table */
226 retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file);
227 if (retval < 0)
228 goto error;
229
230 /* scan for a program header that specifies an interpreter */
231 phdr = exec_params.phdrs;
232
233 for (i = 0; i < exec_params.hdr.e_phnum; i++, phdr++) {
234 switch (phdr->p_type) {
235 case PT_INTERP:
236 retval = -ENOMEM;
237 if (phdr->p_filesz > PATH_MAX)
238 goto error;
239 retval = -ENOENT;
240 if (phdr->p_filesz < 2)
241 goto error;
242
243 /* read the name of the interpreter into memory */
792db3af 244 interpreter_name = kmalloc(phdr->p_filesz, GFP_KERNEL);
1da177e4
LT
245 if (!interpreter_name)
246 goto error;
247
248 retval = kernel_read(bprm->file,
249 phdr->p_offset,
250 interpreter_name,
251 phdr->p_filesz);
e1d2c8b6
DH
252 if (unlikely(retval != phdr->p_filesz)) {
253 if (retval >= 0)
254 retval = -ENOEXEC;
1da177e4 255 goto error;
e1d2c8b6 256 }
1da177e4
LT
257
258 retval = -ENOENT;
259 if (interpreter_name[phdr->p_filesz - 1] != '\0')
260 goto error;
261
262 kdebug("Using ELF interpreter %s", interpreter_name);
263
264 /* replace the program with the interpreter */
265 interpreter = open_exec(interpreter_name);
266 retval = PTR_ERR(interpreter);
267 if (IS_ERR(interpreter)) {
268 interpreter = NULL;
269 goto error;
270 }
271
1fb84496
AD
272 /*
273 * If the binary is not readable then enforce
274 * mm->dumpable = 0 regardless of the interpreter's
275 * permissions.
276 */
1b5d783c 277 would_dump(bprm, interpreter);
1fb84496 278
8a2ab7f5
DH
279 retval = kernel_read(interpreter, 0, bprm->buf,
280 BINPRM_BUF_SIZE);
e1d2c8b6
DH
281 if (unlikely(retval != BINPRM_BUF_SIZE)) {
282 if (retval >= 0)
283 retval = -ENOEXEC;
1da177e4 284 goto error;
e1d2c8b6 285 }
1da177e4
LT
286
287 interp_params.hdr = *((struct elfhdr *) bprm->buf);
288 break;
289
290 case PT_LOAD:
291#ifdef CONFIG_MMU
292 if (exec_params.load_addr == 0)
293 exec_params.load_addr = phdr->p_vaddr;
294#endif
295 break;
296 }
297
298 }
299
1bde925d 300 if (is_constdisp(&exec_params.hdr))
1da177e4
LT
301 exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
302
303 /* perform insanity checks on the interpreter */
304 if (interpreter_name) {
305 retval = -ELIBBAD;
1bde925d 306 if (!is_elf(&interp_params.hdr, interpreter))
1da177e4
LT
307 goto error;
308
309 interp_params.flags = ELF_FDPIC_FLAG_PRESENT;
310
311 /* read the interpreter's program header table */
312 retval = elf_fdpic_fetch_phdrs(&interp_params, interpreter);
313 if (retval < 0)
314 goto error;
315 }
316
317 stack_size = exec_params.stack_size;
1da177e4
LT
318 if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
319 executable_stack = EXSTACK_ENABLE_X;
320 else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
321 executable_stack = EXSTACK_DISABLE_X;
1da177e4
LT
322 else
323 executable_stack = EXSTACK_DEFAULT;
324
8e8b63a6
DH
325 if (stack_size == 0) {
326 stack_size = interp_params.stack_size;
327 if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
328 executable_stack = EXSTACK_ENABLE_X;
329 else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK)
330 executable_stack = EXSTACK_DISABLE_X;
331 else
332 executable_stack = EXSTACK_DEFAULT;
333 }
334
1da177e4
LT
335 retval = -ENOEXEC;
336 if (stack_size == 0)
1bde925d 337 stack_size = 131072UL; /* same as exec.c's default commit */
1da177e4 338
1bde925d 339 if (is_constdisp(&interp_params.hdr))
1da177e4
LT
340 interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
341
342 /* flush all traces of the currently running executable */
343 retval = flush_old_exec(bprm);
344 if (retval)
345 goto error;
346
347 /* there's now no turning back... the old userspace image is dead,
19d860a1
AV
348 * defunct, deceased, etc.
349 */
1bde925d
RF
350 if (elf_check_fdpic(&exec_params.hdr))
351 set_personality(PER_LINUX_FDPIC);
352 else
353 set_personality(PER_LINUX);
04e4f2b1
MF
354 if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
355 current->personality |= READ_IMPLIES_EXEC;
221af7f8
LT
356
357 setup_new_exec(bprm);
358
1da177e4
LT
359 set_binfmt(&elf_fdpic_format);
360
361 current->mm->start_code = 0;
362 current->mm->end_code = 0;
363 current->mm->start_stack = 0;
364 current->mm->start_data = 0;
365 current->mm->end_data = 0;
366 current->mm->context.exec_fdpic_loadmap = 0;
367 current->mm->context.interp_fdpic_loadmap = 0;
368
1da177e4
LT
369#ifdef CONFIG_MMU
370 elf_fdpic_arch_lay_out_mm(&exec_params,
371 &interp_params,
372 &current->mm->start_stack,
373 &current->mm->start_brk);
1da177e4 374
8a2ab7f5
DH
375 retval = setup_arg_pages(bprm, current->mm->start_stack,
376 executable_stack);
19d860a1
AV
377 if (retval < 0)
378 goto error;
1da177e4
LT
379#endif
380
381 /* load the executable and interpreter into memory */
8a2ab7f5
DH
382 retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm,
383 "executable");
1da177e4 384 if (retval < 0)
19d860a1 385 goto error;
1da177e4
LT
386
387 if (interpreter_name) {
388 retval = elf_fdpic_map_file(&interp_params, interpreter,
389 current->mm, "interpreter");
390 if (retval < 0) {
391 printk(KERN_ERR "Unable to load interpreter\n");
19d860a1 392 goto error;
1da177e4
LT
393 }
394
395 allow_write_access(interpreter);
396 fput(interpreter);
397 interpreter = NULL;
398 }
399
400#ifdef CONFIG_MMU
401 if (!current->mm->start_brk)
402 current->mm->start_brk = current->mm->end_data;
403
8a2ab7f5
DH
404 current->mm->brk = current->mm->start_brk =
405 PAGE_ALIGN(current->mm->start_brk);
1da177e4
LT
406
407#else
4ac31311 408 /* create a stack area and zero-size brk area */
1da177e4
LT
409 stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK;
410 if (stack_size < PAGE_SIZE * 2)
411 stack_size = PAGE_SIZE * 2;
412
04e4f2b1
MF
413 stack_prot = PROT_READ | PROT_WRITE;
414 if (executable_stack == EXSTACK_ENABLE_X ||
415 (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
416 stack_prot |= PROT_EXEC;
417
6be5ceb0 418 current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot,
ea637639
JZ
419 MAP_PRIVATE | MAP_ANONYMOUS |
420 MAP_UNINITIALIZED | MAP_GROWSDOWN,
1da177e4
LT
421 0);
422
8a2ab7f5 423 if (IS_ERR_VALUE(current->mm->start_brk)) {
1da177e4
LT
424 retval = current->mm->start_brk;
425 current->mm->start_brk = 0;
19d860a1 426 goto error;
1da177e4
LT
427 }
428
1da177e4
LT
429 current->mm->brk = current->mm->start_brk;
430 current->mm->context.end_brk = current->mm->start_brk;
1da177e4
LT
431 current->mm->start_stack = current->mm->start_brk + stack_size;
432#endif
433
a6f76f23 434 install_exec_creds(bprm);
8a2ab7f5
DH
435 if (create_elf_fdpic_tables(bprm, current->mm,
436 &exec_params, &interp_params) < 0)
19d860a1 437 goto error;
1da177e4 438
8a2ab7f5
DH
439 kdebug("- start_code %lx", current->mm->start_code);
440 kdebug("- end_code %lx", current->mm->end_code);
441 kdebug("- start_data %lx", current->mm->start_data);
442 kdebug("- end_data %lx", current->mm->end_data);
443 kdebug("- start_brk %lx", current->mm->start_brk);
444 kdebug("- brk %lx", current->mm->brk);
445 kdebug("- start_stack %lx", current->mm->start_stack);
1da177e4
LT
446
447#ifdef ELF_FDPIC_PLAT_INIT
448 /*
449 * The ABI may specify that certain registers be set up in special
450 * ways (on i386 %edx is the address of a DT_FINI function, for
451 * example. This macro performs whatever initialization to
452 * the regs structure is required.
453 */
8a2ab7f5
DH
454 dynaddr = interp_params.dynamic_addr ?: exec_params.dynamic_addr;
455 ELF_FDPIC_PLAT_INIT(regs, exec_params.map_addr, interp_params.map_addr,
456 dynaddr);
1da177e4
LT
457#endif
458
459 /* everything is now ready... get the userspace context ready to roll */
8a2ab7f5
DH
460 entryaddr = interp_params.entry_addr ?: exec_params.entry_addr;
461 start_thread(regs, entryaddr, current->mm->start_stack);
1da177e4 462
1da177e4
LT
463 retval = 0;
464
465error:
466 if (interpreter) {
467 allow_write_access(interpreter);
468 fput(interpreter);
469 }
f99d49ad
JJ
470 kfree(interpreter_name);
471 kfree(exec_params.phdrs);
472 kfree(exec_params.loadmap);
473 kfree(interp_params.phdrs);
474 kfree(interp_params.loadmap);
1da177e4 475 return retval;
8a2ab7f5 476}
1da177e4
LT
477
478/*****************************************************************************/
ec23847d
PM
479
480#ifndef ELF_BASE_PLATFORM
481/*
482 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
483 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
484 * will be copied to the user stack in the same manner as AT_PLATFORM.
485 */
486#define ELF_BASE_PLATFORM NULL
487#endif
488
1da177e4 489/*
c7637941
PM
490 * present useful information to the program by shovelling it onto the new
491 * process's stack
1da177e4
LT
492 */
493static int create_elf_fdpic_tables(struct linux_binprm *bprm,
494 struct mm_struct *mm,
495 struct elf_fdpic_params *exec_params,
496 struct elf_fdpic_params *interp_params)
497{
86a264ab 498 const struct cred *cred = current_cred();
1da177e4 499 unsigned long sp, csp, nitems;
530018bf 500 elf_caddr_t __user *argv, *envp;
1da177e4 501 size_t platform_len = 0, len;
ec23847d
PM
502 char *k_platform, *k_base_platform;
503 char __user *u_platform, *u_base_platform, *p;
1da177e4 504 int loop;
9b14ec35 505 int nr; /* reset for each csp adjustment */
1da177e4 506
1da177e4 507#ifdef CONFIG_MMU
c7637941
PM
508 /* In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
509 * by the processes running on the same package. One thing we can do is
510 * to shuffle the initial stack for them, so we give the architecture
511 * an opportunity to do so here.
512 */
513 sp = arch_align_stack(bprm->p);
1da177e4
LT
514#else
515 sp = mm->start_stack;
516
517 /* stack the program arguments and environment */
7e7ec6a9 518 if (transfer_args_to_stack(bprm, &sp) < 0)
1da177e4 519 return -EFAULT;
7e7ec6a9 520 sp &= ~15;
1da177e4
LT
521#endif
522
ec23847d
PM
523 /*
524 * If this architecture has a platform capability string, copy it
525 * to userspace. In some cases (Sparc), this info is impossible
526 * for userspace to get any other way, in others (i386) it is
527 * merely difficult.
528 */
1da177e4 529 k_platform = ELF_PLATFORM;
1aeb21d6 530 u_platform = NULL;
1da177e4
LT
531
532 if (k_platform) {
533 platform_len = strlen(k_platform) + 1;
534 sp -= platform_len;
530018bf 535 u_platform = (char __user *) sp;
1da177e4
LT
536 if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
537 return -EFAULT;
538 }
539
ec23847d
PM
540 /*
541 * If this architecture has a "base" platform capability
542 * string, copy it to userspace.
543 */
544 k_base_platform = ELF_BASE_PLATFORM;
545 u_base_platform = NULL;
546
547 if (k_base_platform) {
548 platform_len = strlen(k_base_platform) + 1;
549 sp -= platform_len;
550 u_base_platform = (char __user *) sp;
551 if (__copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
552 return -EFAULT;
553 }
554
1da177e4
LT
555 sp &= ~7UL;
556
557 /* stack the load map(s) */
558 len = sizeof(struct elf32_fdpic_loadmap);
559 len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs;
560 sp = (sp - len) & ~7UL;
561 exec_params->map_addr = sp;
562
530018bf 563 if (copy_to_user((void __user *) sp, exec_params->loadmap, len) != 0)
1da177e4
LT
564 return -EFAULT;
565
566 current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
567
568 if (interp_params->loadmap) {
569 len = sizeof(struct elf32_fdpic_loadmap);
8a2ab7f5
DH
570 len += sizeof(struct elf32_fdpic_loadseg) *
571 interp_params->loadmap->nsegs;
1da177e4
LT
572 sp = (sp - len) & ~7UL;
573 interp_params->map_addr = sp;
574
8a2ab7f5
DH
575 if (copy_to_user((void __user *) sp, interp_params->loadmap,
576 len) != 0)
1da177e4
LT
577 return -EFAULT;
578
579 current->mm->context.interp_fdpic_loadmap = (unsigned long) sp;
580 }
581
582 /* force 16 byte _final_ alignment here for generality */
5edc2a51 583#define DLINFO_ITEMS 15
1da177e4 584
ec23847d
PM
585 nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0) +
586 (k_base_platform ? 1 : 0) + AT_VECTOR_SIZE_ARCH;
1da177e4 587
5edc2a51
PM
588 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD)
589 nitems++;
590
1da177e4
LT
591 csp = sp;
592 sp -= nitems * 2 * sizeof(unsigned long);
593 sp -= (bprm->envc + 1) * sizeof(char *); /* envv[] */
594 sp -= (bprm->argc + 1) * sizeof(char *); /* argv[] */
595 sp -= 1 * sizeof(unsigned long); /* argc */
596
597 csp -= sp & 15UL;
598 sp -= sp & 15UL;
599
600 /* put the ELF interpreter info on the stack */
9b14ec35 601#define NEW_AUX_ENT(id, val) \
8a2ab7f5
DH
602 do { \
603 struct { unsigned long _id, _val; } __user *ent; \
604 \
605 ent = (void __user *) csp; \
606 __put_user((id), &ent[nr]._id); \
607 __put_user((val), &ent[nr]._val); \
9b14ec35 608 nr++; \
1da177e4
LT
609 } while (0)
610
9b14ec35 611 nr = 0;
1da177e4 612 csp -= 2 * sizeof(unsigned long);
9b14ec35 613 NEW_AUX_ENT(AT_NULL, 0);
1da177e4 614 if (k_platform) {
9b14ec35 615 nr = 0;
1da177e4 616 csp -= 2 * sizeof(unsigned long);
9b14ec35 617 NEW_AUX_ENT(AT_PLATFORM,
8a2ab7f5 618 (elf_addr_t) (unsigned long) u_platform);
1da177e4
LT
619 }
620
ec23847d
PM
621 if (k_base_platform) {
622 nr = 0;
623 csp -= 2 * sizeof(unsigned long);
624 NEW_AUX_ENT(AT_BASE_PLATFORM,
625 (elf_addr_t) (unsigned long) u_base_platform);
626 }
627
5edc2a51
PM
628 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
629 nr = 0;
630 csp -= 2 * sizeof(unsigned long);
631 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
632 }
633
9b14ec35 634 nr = 0;
1da177e4 635 csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long);
2171364d
MN
636 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
637#ifdef ELF_HWCAP2
638 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
639#endif
9b14ec35
PM
640 NEW_AUX_ENT(AT_PAGESZ, PAGE_SIZE);
641 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
642 NEW_AUX_ENT(AT_PHDR, exec_params->ph_addr);
643 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
644 NEW_AUX_ENT(AT_PHNUM, exec_params->hdr.e_phnum);
645 NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr);
646 NEW_AUX_ENT(AT_FLAGS, 0);
647 NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr);
ebc887b2
EB
648 NEW_AUX_ENT(AT_UID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->uid));
649 NEW_AUX_ENT(AT_EUID, (elf_addr_t) from_kuid_munged(cred->user_ns, cred->euid));
650 NEW_AUX_ENT(AT_GID, (elf_addr_t) from_kgid_munged(cred->user_ns, cred->gid));
651 NEW_AUX_ENT(AT_EGID, (elf_addr_t) from_kgid_munged(cred->user_ns, cred->egid));
5edc2a51
PM
652 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
653 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
1da177e4
LT
654
655#ifdef ARCH_DLINFO
9b14ec35
PM
656 nr = 0;
657 csp -= AT_VECTOR_SIZE_ARCH * 2 * sizeof(unsigned long);
658
1da177e4
LT
659 /* ARCH_DLINFO must come last so platform specific code can enforce
660 * special alignment requirements on the AUXV if necessary (eg. PPC).
661 */
662 ARCH_DLINFO;
663#endif
664#undef NEW_AUX_ENT
665
666 /* allocate room for argv[] and envv[] */
667 csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
530018bf 668 envp = (elf_caddr_t __user *) csp;
1da177e4 669 csp -= (bprm->argc + 1) * sizeof(elf_caddr_t);
530018bf 670 argv = (elf_caddr_t __user *) csp;
1da177e4
LT
671
672 /* stack argc */
673 csp -= sizeof(unsigned long);
530018bf 674 __put_user(bprm->argc, (unsigned long __user *) csp);
1da177e4 675
88bcd512 676 BUG_ON(csp != sp);
1da177e4
LT
677
678 /* fill in the argv[] array */
679#ifdef CONFIG_MMU
680 current->mm->arg_start = bprm->p;
681#else
8a2ab7f5
DH
682 current->mm->arg_start = current->mm->start_stack -
683 (MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
1da177e4
LT
684#endif
685
530018bf 686 p = (char __user *) current->mm->arg_start;
1da177e4
LT
687 for (loop = bprm->argc; loop > 0; loop--) {
688 __put_user((elf_caddr_t) p, argv++);
b6a2fea3
OW
689 len = strnlen_user(p, MAX_ARG_STRLEN);
690 if (!len || len > MAX_ARG_STRLEN)
1da177e4
LT
691 return -EINVAL;
692 p += len;
693 }
694 __put_user(NULL, argv);
695 current->mm->arg_end = (unsigned long) p;
696
697 /* fill in the envv[] array */
698 current->mm->env_start = (unsigned long) p;
699 for (loop = bprm->envc; loop > 0; loop--) {
700 __put_user((elf_caddr_t)(unsigned long) p, envp++);
b6a2fea3
OW
701 len = strnlen_user(p, MAX_ARG_STRLEN);
702 if (!len || len > MAX_ARG_STRLEN)
1da177e4
LT
703 return -EINVAL;
704 p += len;
705 }
706 __put_user(NULL, envp);
707 current->mm->env_end = (unsigned long) p;
708
709 mm->start_stack = (unsigned long) sp;
710 return 0;
8a2ab7f5 711}
1da177e4
LT
712
713/*****************************************************************************/
1da177e4
LT
714/*
715 * load the appropriate binary image (executable or interpreter) into memory
716 * - we assume no MMU is available
717 * - if no other PIC bits are set in params->hdr->e_flags
718 * - we assume that the LOADable segments in the binary are independently relocatable
719 * - we assume R/O executable segments are shareable
720 * - else
721 * - we assume the loadable parts of the image to require fixed displacement
722 * - the image is not shareable
723 */
724static int elf_fdpic_map_file(struct elf_fdpic_params *params,
725 struct file *file,
726 struct mm_struct *mm,
727 const char *what)
728{
729 struct elf32_fdpic_loadmap *loadmap;
730#ifdef CONFIG_MMU
731 struct elf32_fdpic_loadseg *mseg;
732#endif
733 struct elf32_fdpic_loadseg *seg;
734 struct elf32_phdr *phdr;
735 unsigned long load_addr, stop;
736 unsigned nloads, tmp;
737 size_t size;
738 int loop, ret;
739
740 /* allocate a load map table */
741 nloads = 0;
742 for (loop = 0; loop < params->hdr.e_phnum; loop++)
743 if (params->phdrs[loop].p_type == PT_LOAD)
744 nloads++;
745
746 if (nloads == 0)
747 return -ELIBBAD;
748
749 size = sizeof(*loadmap) + nloads * sizeof(*seg);
b87576d5 750 loadmap = kzalloc(size, GFP_KERNEL);
1da177e4
LT
751 if (!loadmap)
752 return -ENOMEM;
753
754 params->loadmap = loadmap;
1da177e4
LT
755
756 loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
757 loadmap->nsegs = nloads;
758
759 load_addr = params->load_addr;
760 seg = loadmap->segs;
761
762 /* map the requested LOADs into the memory space */
763 switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
764 case ELF_FDPIC_FLAG_CONSTDISP:
765 case ELF_FDPIC_FLAG_CONTIGUOUS:
766#ifndef CONFIG_MMU
767 ret = elf_fdpic_map_file_constdisp_on_uclinux(params, file, mm);
768 if (ret < 0)
769 return ret;
770 break;
771#endif
772 default:
773 ret = elf_fdpic_map_file_by_direct_mmap(params, file, mm);
774 if (ret < 0)
775 return ret;
776 break;
777 }
778
779 /* map the entry point */
780 if (params->hdr.e_entry) {
781 seg = loadmap->segs;
782 for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
783 if (params->hdr.e_entry >= seg->p_vaddr &&
8a2ab7f5 784 params->hdr.e_entry < seg->p_vaddr + seg->p_memsz) {
1da177e4 785 params->entry_addr =
8a2ab7f5
DH
786 (params->hdr.e_entry - seg->p_vaddr) +
787 seg->addr;
1da177e4
LT
788 break;
789 }
790 }
791 }
792
793 /* determine where the program header table has wound up if mapped */
8a2ab7f5
DH
794 stop = params->hdr.e_phoff;
795 stop += params->hdr.e_phnum * sizeof (struct elf_phdr);
1da177e4
LT
796 phdr = params->phdrs;
797
798 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
799 if (phdr->p_type != PT_LOAD)
800 continue;
801
802 if (phdr->p_offset > params->hdr.e_phoff ||
803 phdr->p_offset + phdr->p_filesz < stop)
804 continue;
805
806 seg = loadmap->segs;
807 for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
808 if (phdr->p_vaddr >= seg->p_vaddr &&
8a2ab7f5
DH
809 phdr->p_vaddr + phdr->p_filesz <=
810 seg->p_vaddr + seg->p_memsz) {
811 params->ph_addr =
812 (phdr->p_vaddr - seg->p_vaddr) +
813 seg->addr +
1da177e4
LT
814 params->hdr.e_phoff - phdr->p_offset;
815 break;
816 }
817 }
818 break;
819 }
820
821 /* determine where the dynamic section has wound up if there is one */
822 phdr = params->phdrs;
823 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
824 if (phdr->p_type != PT_DYNAMIC)
825 continue;
826
827 seg = loadmap->segs;
828 for (loop = loadmap->nsegs; loop > 0; loop--, seg++) {
829 if (phdr->p_vaddr >= seg->p_vaddr &&
8a2ab7f5
DH
830 phdr->p_vaddr + phdr->p_memsz <=
831 seg->p_vaddr + seg->p_memsz) {
832 params->dynamic_addr =
833 (phdr->p_vaddr - seg->p_vaddr) +
834 seg->addr;
835
836 /* check the dynamic section contains at least
837 * one item, and that the last item is a NULL
838 * entry */
1da177e4
LT
839 if (phdr->p_memsz == 0 ||
840 phdr->p_memsz % sizeof(Elf32_Dyn) != 0)
841 goto dynamic_error;
842
843 tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
8a2ab7f5
DH
844 if (((Elf32_Dyn *)
845 params->dynamic_addr)[tmp - 1].d_tag != 0)
1da177e4
LT
846 goto dynamic_error;
847 break;
848 }
849 }
850 break;
851 }
852
853 /* now elide adjacent segments in the load map on MMU linux
8a2ab7f5
DH
854 * - on uClinux the holes between may actually be filled with system
855 * stuff or stuff from other processes
1da177e4
LT
856 */
857#ifdef CONFIG_MMU
858 nloads = loadmap->nsegs;
859 mseg = loadmap->segs;
860 seg = mseg + 1;
861 for (loop = 1; loop < nloads; loop++) {
862 /* see if we have a candidate for merging */
863 if (seg->p_vaddr - mseg->p_vaddr == seg->addr - mseg->addr) {
864 load_addr = PAGE_ALIGN(mseg->addr + mseg->p_memsz);
865 if (load_addr == (seg->addr & PAGE_MASK)) {
8a2ab7f5
DH
866 mseg->p_memsz +=
867 load_addr -
868 (mseg->addr + mseg->p_memsz);
1da177e4
LT
869 mseg->p_memsz += seg->addr & ~PAGE_MASK;
870 mseg->p_memsz += seg->p_memsz;
871 loadmap->nsegs--;
872 continue;
873 }
874 }
875
876 mseg++;
877 if (mseg != seg)
878 *mseg = *seg;
879 }
880#endif
881
882 kdebug("Mapped Object [%s]:", what);
883 kdebug("- elfhdr : %lx", params->elfhdr_addr);
884 kdebug("- entry : %lx", params->entry_addr);
885 kdebug("- PHDR[] : %lx", params->ph_addr);
886 kdebug("- DYNAMIC[]: %lx", params->dynamic_addr);
887 seg = loadmap->segs;
888 for (loop = 0; loop < loadmap->nsegs; loop++, seg++)
889 kdebug("- LOAD[%d] : %08x-%08x [va=%x ms=%x]",
890 loop,
891 seg->addr, seg->addr + seg->p_memsz - 1,
892 seg->p_vaddr, seg->p_memsz);
893
894 return 0;
895
8a2ab7f5 896dynamic_error:
1da177e4 897 printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n",
496ad9aa 898 what, file_inode(file)->i_ino);
1da177e4 899 return -ELIBBAD;
8a2ab7f5 900}
1da177e4
LT
901
902/*****************************************************************************/
903/*
904 * map a file with constant displacement under uClinux
905 */
906#ifndef CONFIG_MMU
8a2ab7f5
DH
907static int elf_fdpic_map_file_constdisp_on_uclinux(
908 struct elf_fdpic_params *params,
909 struct file *file,
910 struct mm_struct *mm)
1da177e4
LT
911{
912 struct elf32_fdpic_loadseg *seg;
913 struct elf32_phdr *phdr;
914 unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags;
1da177e4
LT
915 int loop, ret;
916
917 load_addr = params->load_addr;
918 seg = params->loadmap->segs;
919
8a2ab7f5
DH
920 /* determine the bounds of the contiguous overall allocation we must
921 * make */
1da177e4
LT
922 phdr = params->phdrs;
923 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
924 if (params->phdrs[loop].p_type != PT_LOAD)
925 continue;
926
927 if (base > phdr->p_vaddr)
928 base = phdr->p_vaddr;
929 if (top < phdr->p_vaddr + phdr->p_memsz)
930 top = phdr->p_vaddr + phdr->p_memsz;
931 }
932
933 /* allocate one big anon block for everything */
934 mflags = MAP_PRIVATE;
935 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
936 mflags |= MAP_EXECUTABLE;
937
6be5ceb0 938 maddr = vm_mmap(NULL, load_addr, top - base,
1da177e4 939 PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
8a2ab7f5 940 if (IS_ERR_VALUE(maddr))
1da177e4
LT
941 return (int) maddr;
942
943 if (load_addr != 0)
944 load_addr += PAGE_ALIGN(top - base);
945
946 /* and then load the file segments into it */
947 phdr = params->phdrs;
948 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
949 if (params->phdrs[loop].p_type != PT_LOAD)
950 continue;
951
1da177e4
LT
952 seg->addr = maddr + (phdr->p_vaddr - base);
953 seg->p_vaddr = phdr->p_vaddr;
954 seg->p_memsz = phdr->p_memsz;
955
3dc20cb2
AV
956 ret = read_code(file, seg->addr, phdr->p_offset,
957 phdr->p_filesz);
1da177e4
LT
958 if (ret < 0)
959 return ret;
960
961 /* map the ELF header address if in this segment */
962 if (phdr->p_offset == 0)
963 params->elfhdr_addr = seg->addr;
964
965 /* clear any space allocated but not loaded */
ab4ad555 966 if (phdr->p_filesz < phdr->p_memsz) {
e30c7c3b
TY
967 if (clear_user((void *) (seg->addr + phdr->p_filesz),
968 phdr->p_memsz - phdr->p_filesz))
969 return -EFAULT;
ab4ad555 970 }
1da177e4
LT
971
972 if (mm) {
973 if (phdr->p_flags & PF_X) {
aa289b47
DH
974 if (!mm->start_code) {
975 mm->start_code = seg->addr;
976 mm->end_code = seg->addr +
977 phdr->p_memsz;
978 }
8a2ab7f5 979 } else if (!mm->start_data) {
1da177e4 980 mm->start_data = seg->addr;
1da177e4 981 mm->end_data = seg->addr + phdr->p_memsz;
1da177e4 982 }
1da177e4
LT
983 }
984
985 seg++;
986 }
987
988 return 0;
8a2ab7f5 989}
1da177e4
LT
990#endif
991
992/*****************************************************************************/
993/*
994 * map a binary by direct mmap() of the individual PT_LOAD segments
995 */
996static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
997 struct file *file,
998 struct mm_struct *mm)
999{
1000 struct elf32_fdpic_loadseg *seg;
1001 struct elf32_phdr *phdr;
1002 unsigned long load_addr, delta_vaddr;
e30c7c3b 1003 int loop, dvset;
1da177e4
LT
1004
1005 load_addr = params->load_addr;
1006 delta_vaddr = 0;
1007 dvset = 0;
1008
1009 seg = params->loadmap->segs;
1010
1011 /* deal with each load segment separately */
1012 phdr = params->phdrs;
1013 for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
1014 unsigned long maddr, disp, excess, excess1;
1015 int prot = 0, flags;
1016
1017 if (phdr->p_type != PT_LOAD)
1018 continue;
1019
1020 kdebug("[LOAD] va=%lx of=%lx fs=%lx ms=%lx",
1021 (unsigned long) phdr->p_vaddr,
1022 (unsigned long) phdr->p_offset,
1023 (unsigned long) phdr->p_filesz,
1024 (unsigned long) phdr->p_memsz);
1025
1026 /* determine the mapping parameters */
1027 if (phdr->p_flags & PF_R) prot |= PROT_READ;
1028 if (phdr->p_flags & PF_W) prot |= PROT_WRITE;
1029 if (phdr->p_flags & PF_X) prot |= PROT_EXEC;
1030
1031 flags = MAP_PRIVATE | MAP_DENYWRITE;
1032 if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
1033 flags |= MAP_EXECUTABLE;
1034
1035 maddr = 0;
1036
1037 switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
1038 case ELF_FDPIC_FLAG_INDEPENDENT:
1039 /* PT_LOADs are independently locatable */
1040 break;
1041
1042 case ELF_FDPIC_FLAG_HONOURVADDR:
1043 /* the specified virtual address must be honoured */
1044 maddr = phdr->p_vaddr;
1045 flags |= MAP_FIXED;
1046 break;
1047
1048 case ELF_FDPIC_FLAG_CONSTDISP:
1049 /* constant displacement
8a2ab7f5
DH
1050 * - can be mapped anywhere, but must be mapped as a
1051 * unit
1da177e4
LT
1052 */
1053 if (!dvset) {
1054 maddr = load_addr;
1055 delta_vaddr = phdr->p_vaddr;
1056 dvset = 1;
8a2ab7f5 1057 } else {
1da177e4
LT
1058 maddr = load_addr + phdr->p_vaddr - delta_vaddr;
1059 flags |= MAP_FIXED;
1060 }
1061 break;
1062
1063 case ELF_FDPIC_FLAG_CONTIGUOUS:
1064 /* contiguity handled later */
1065 break;
1066
1067 default:
1068 BUG();
1069 }
1070
1071 maddr &= PAGE_MASK;
1072
1073 /* create the mapping */
1074 disp = phdr->p_vaddr & ~PAGE_MASK;
6be5ceb0 1075 maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
1da177e4 1076 phdr->p_offset - disp);
1da177e4
LT
1077
1078 kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
8a2ab7f5
DH
1079 loop, phdr->p_memsz + disp, prot, flags,
1080 phdr->p_offset - disp, maddr);
1da177e4 1081
8a2ab7f5 1082 if (IS_ERR_VALUE(maddr))
1da177e4
LT
1083 return (int) maddr;
1084
8a2ab7f5
DH
1085 if ((params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) ==
1086 ELF_FDPIC_FLAG_CONTIGUOUS)
1da177e4
LT
1087 load_addr += PAGE_ALIGN(phdr->p_memsz + disp);
1088
1089 seg->addr = maddr + disp;
1090 seg->p_vaddr = phdr->p_vaddr;
1091 seg->p_memsz = phdr->p_memsz;
1092
1093 /* map the ELF header address if in this segment */
1094 if (phdr->p_offset == 0)
1095 params->elfhdr_addr = seg->addr;
1096
8a2ab7f5
DH
1097 /* clear the bit between beginning of mapping and beginning of
1098 * PT_LOAD */
1da177e4
LT
1099 if (prot & PROT_WRITE && disp > 0) {
1100 kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
e30c7c3b
TY
1101 if (clear_user((void __user *) maddr, disp))
1102 return -EFAULT;
1da177e4
LT
1103 maddr += disp;
1104 }
1105
1106 /* clear any space allocated but not loaded
1107 * - on uClinux we can just clear the lot
1108 * - on MMU linux we'll get a SIGBUS beyond the last page
1109 * extant in the file
1110 */
1111 excess = phdr->p_memsz - phdr->p_filesz;
1112 excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
1113
1114#ifdef CONFIG_MMU
1da177e4
LT
1115 if (excess > excess1) {
1116 unsigned long xaddr = maddr + phdr->p_filesz + excess1;
1117 unsigned long xmaddr;
1118
1119 flags |= MAP_FIXED | MAP_ANONYMOUS;
6be5ceb0 1120 xmaddr = vm_mmap(NULL, xaddr, excess - excess1,
8a2ab7f5 1121 prot, flags, 0);
1da177e4
LT
1122
1123 kdebug("mmap[%d] <anon>"
1124 " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
8a2ab7f5
DH
1125 loop, xaddr, excess - excess1, prot, flags,
1126 xmaddr);
1da177e4
LT
1127
1128 if (xmaddr != xaddr)
1129 return -ENOMEM;
1130 }
1131
1132 if (prot & PROT_WRITE && excess1 > 0) {
1133 kdebug("clear[%d] ad=%lx sz=%lx",
1134 loop, maddr + phdr->p_filesz, excess1);
e30c7c3b
TY
1135 if (clear_user((void __user *) maddr + phdr->p_filesz,
1136 excess1))
1137 return -EFAULT;
1da177e4
LT
1138 }
1139
1140#else
1141 if (excess > 0) {
1142 kdebug("clear[%d] ad=%lx sz=%lx",
1143 loop, maddr + phdr->p_filesz, excess);
e30c7c3b
TY
1144 if (clear_user((void *) maddr + phdr->p_filesz, excess))
1145 return -EFAULT;
1da177e4
LT
1146 }
1147#endif
1148
1149 if (mm) {
1150 if (phdr->p_flags & PF_X) {
aa289b47
DH
1151 if (!mm->start_code) {
1152 mm->start_code = maddr;
1153 mm->end_code = maddr + phdr->p_memsz;
1154 }
8a2ab7f5 1155 } else if (!mm->start_data) {
1da177e4
LT
1156 mm->start_data = maddr;
1157 mm->end_data = maddr + phdr->p_memsz;
1158 }
1159 }
1160
1161 seg++;
1162 }
1163
1164 return 0;
8a2ab7f5 1165}
6d8c4e3b
DH
1166
1167/*****************************************************************************/
1168/*
1169 * ELF-FDPIC core dumper
1170 *
1171 * Modelled on fs/exec.c:aout_core_dump()
1172 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1173 *
1174 * Modelled on fs/binfmt_elf.c core dumper
1175 */
698ba7b5 1176#ifdef CONFIG_ELF_CORE
6d8c4e3b 1177
6d8c4e3b
DH
1178/*
1179 * Decide whether a segment is worth dumping; default is yes to be
1180 * sure (missing info is worse than too much; etc).
1181 * Personally I'd include everything, and use the coredump limit...
1182 *
1183 * I think we should skip something. But I am not sure how. H.J.
1184 */
ee78b0a6 1185static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
6d8c4e3b 1186{
ee78b0a6
KH
1187 int dump_ok;
1188
6d8c4e3b 1189 /* Do not dump I/O mapped devices or special mappings */
314e51b9 1190 if (vma->vm_flags & VM_IO) {
6d8c4e3b
DH
1191 kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
1192 return 0;
1193 }
1194
1195 /* If we may not read the contents, don't allow us to dump
1196 * them either. "dump_write()" can't handle it anyway.
1197 */
1198 if (!(vma->vm_flags & VM_READ)) {
1199 kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags);
1200 return 0;
1201 }
1202
ab27a8d0
RZ
1203 /* support for DAX */
1204 if (vma_is_dax(vma)) {
1205 if (vma->vm_flags & VM_SHARED) {
1206 dump_ok = test_bit(MMF_DUMP_DAX_SHARED, &mm_flags);
1207 kdcore("%08lx: %08lx: %s (DAX shared)", vma->vm_start,
1208 vma->vm_flags, dump_ok ? "yes" : "no");
1209 } else {
1210 dump_ok = test_bit(MMF_DUMP_DAX_PRIVATE, &mm_flags);
1211 kdcore("%08lx: %08lx: %s (DAX private)", vma->vm_start,
1212 vma->vm_flags, dump_ok ? "yes" : "no");
1213 }
1214 return dump_ok;
1215 }
1216
ee78b0a6 1217 /* By default, dump shared memory if mapped from an anonymous file. */
6d8c4e3b 1218 if (vma->vm_flags & VM_SHARED) {
496ad9aa 1219 if (file_inode(vma->vm_file)->i_nlink == 0) {
ee78b0a6
KH
1220 dump_ok = test_bit(MMF_DUMP_ANON_SHARED, &mm_flags);
1221 kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
1222 vma->vm_flags, dump_ok ? "yes" : "no");
1223 return dump_ok;
6d8c4e3b
DH
1224 }
1225
ee78b0a6
KH
1226 dump_ok = test_bit(MMF_DUMP_MAPPED_SHARED, &mm_flags);
1227 kdcore("%08lx: %08lx: %s (share)", vma->vm_start,
1228 vma->vm_flags, dump_ok ? "yes" : "no");
1229 return dump_ok;
6d8c4e3b
DH
1230 }
1231
1232#ifdef CONFIG_MMU
ee78b0a6 1233 /* By default, if it hasn't been written to, don't write it out */
6d8c4e3b 1234 if (!vma->anon_vma) {
ee78b0a6
KH
1235 dump_ok = test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
1236 kdcore("%08lx: %08lx: %s (!anon)", vma->vm_start,
1237 vma->vm_flags, dump_ok ? "yes" : "no");
1238 return dump_ok;
6d8c4e3b
DH
1239 }
1240#endif
1241
ee78b0a6
KH
1242 dump_ok = test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
1243 kdcore("%08lx: %08lx: %s", vma->vm_start, vma->vm_flags,
1244 dump_ok ? "yes" : "no");
1245 return dump_ok;
6d8c4e3b
DH
1246}
1247
1248/* An ELF note in memory */
1249struct memelfnote
1250{
1251 const char *name;
1252 int type;
1253 unsigned int datasz;
1254 void *data;
1255};
1256
1257static int notesize(struct memelfnote *en)
1258{
1259 int sz;
1260
1261 sz = sizeof(struct elf_note);
1262 sz += roundup(strlen(en->name) + 1, 4);
1263 sz += roundup(en->datasz, 4);
1264
1265 return sz;
1266}
1267
1268/* #define DEBUG */
1269
e6c1baa9 1270static int writenote(struct memelfnote *men, struct coredump_params *cprm)
05f47fda
DH
1271{
1272 struct elf_note en;
6d8c4e3b
DH
1273 en.n_namesz = strlen(men->name) + 1;
1274 en.n_descsz = men->datasz;
1275 en.n_type = men->type;
1276
e6c1baa9 1277 return dump_emit(cprm, &en, sizeof(en)) &&
22a8cb82
AV
1278 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1279 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
6d8c4e3b 1280}
6d8c4e3b 1281
6d8c4e3b
DH
1282static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
1283{
1284 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1285 elf->e_ident[EI_CLASS] = ELF_CLASS;
1286 elf->e_ident[EI_DATA] = ELF_DATA;
1287 elf->e_ident[EI_VERSION] = EV_CURRENT;
1288 elf->e_ident[EI_OSABI] = ELF_OSABI;
1289 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1290
1291 elf->e_type = ET_CORE;
1292 elf->e_machine = ELF_ARCH;
1293 elf->e_version = EV_CURRENT;
1294 elf->e_entry = 0;
1295 elf->e_phoff = sizeof(struct elfhdr);
1296 elf->e_shoff = 0;
1297 elf->e_flags = ELF_FDPIC_CORE_EFLAGS;
1298 elf->e_ehsize = sizeof(struct elfhdr);
1299 elf->e_phentsize = sizeof(struct elf_phdr);
1300 elf->e_phnum = segs;
1301 elf->e_shentsize = 0;
1302 elf->e_shnum = 0;
1303 elf->e_shstrndx = 0;
1304 return;
1305}
1306
1307static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1308{
1309 phdr->p_type = PT_NOTE;
1310 phdr->p_offset = offset;
1311 phdr->p_vaddr = 0;
1312 phdr->p_paddr = 0;
1313 phdr->p_filesz = sz;
1314 phdr->p_memsz = 0;
1315 phdr->p_flags = 0;
1316 phdr->p_align = 0;
1317 return;
1318}
1319
1320static inline void fill_note(struct memelfnote *note, const char *name, int type,
1321 unsigned int sz, void *data)
1322{
1323 note->name = name;
1324 note->type = type;
1325 note->datasz = sz;
1326 note->data = data;
1327 return;
1328}
1329
1330/*
1331 * fill up all the fields in prstatus from the given task struct, except
3ad2f3fb 1332 * registers which need to be filled up separately.
6d8c4e3b
DH
1333 */
1334static void fill_prstatus(struct elf_prstatus *prstatus,
1335 struct task_struct *p, long signr)
1336{
1337 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1338 prstatus->pr_sigpend = p->pending.signal.sig[0];
1339 prstatus->pr_sighold = p->blocked.sig[0];
3b34fc58
ON
1340 rcu_read_lock();
1341 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1342 rcu_read_unlock();
b488893a 1343 prstatus->pr_pid = task_pid_vnr(p);
b488893a
PE
1344 prstatus->pr_pgrp = task_pgrp_vnr(p);
1345 prstatus->pr_sid = task_session_vnr(p);
6d8c4e3b 1346 if (thread_group_leader(p)) {
cd19c364 1347 struct task_cputime cputime;
2515ddc6 1348
6d8c4e3b 1349 /*
2515ddc6
PM
1350 * This is the record for the group leader. It shows the
1351 * group-wide total, not its individual thread total.
6d8c4e3b 1352 */
cd19c364
FW
1353 thread_group_cputime(p, &cputime);
1354 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1355 prstatus->pr_stime = ns_to_timeval(cputime.stime);
6d8c4e3b 1356 } else {
cd19c364 1357 u64 utime, stime;
6fac4829 1358
cd19c364
FW
1359 task_cputime(p, &utime, &stime);
1360 prstatus->pr_utime = ns_to_timeval(utime);
1361 prstatus->pr_stime = ns_to_timeval(stime);
6d8c4e3b 1362 }
5613fda9
FW
1363 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1364 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
6d8c4e3b
DH
1365
1366 prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
1367 prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
1368}
1369
1370static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1371 struct mm_struct *mm)
1372{
c69e8d9c 1373 const struct cred *cred;
6d8c4e3b
DH
1374 unsigned int i, len;
1375
1376 /* first copy the parameters from user space */
1377 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1378
1379 len = mm->arg_end - mm->arg_start;
1380 if (len >= ELF_PRARGSZ)
1381 len = ELF_PRARGSZ - 1;
1382 if (copy_from_user(&psinfo->pr_psargs,
1383 (const char __user *) mm->arg_start, len))
1384 return -EFAULT;
1385 for (i = 0; i < len; i++)
1386 if (psinfo->pr_psargs[i] == 0)
1387 psinfo->pr_psargs[i] = ' ';
1388 psinfo->pr_psargs[len] = 0;
1389
3b34fc58
ON
1390 rcu_read_lock();
1391 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1392 rcu_read_unlock();
b488893a 1393 psinfo->pr_pid = task_pid_vnr(p);
b488893a
PE
1394 psinfo->pr_pgrp = task_pgrp_vnr(p);
1395 psinfo->pr_sid = task_session_vnr(p);
6d8c4e3b
DH
1396
1397 i = p->state ? ffz(~p->state) + 1 : 0;
1398 psinfo->pr_state = i;
1399 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1400 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1401 psinfo->pr_nice = task_nice(p);
1402 psinfo->pr_flag = p->flags;
c69e8d9c
DH
1403 rcu_read_lock();
1404 cred = __task_cred(p);
ebc887b2
EB
1405 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1406 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
c69e8d9c 1407 rcu_read_unlock();
6d8c4e3b
DH
1408 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1409
1410 return 0;
1411}
1412
1413/* Here is the structure in which status of each thread is captured. */
1414struct elf_thread_status
1415{
1416 struct list_head list;
1417 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1418 elf_fpregset_t fpu; /* NT_PRFPREG */
1419 struct task_struct *thread;
1420#ifdef ELF_CORE_COPY_XFPREGS
5b20cd80 1421 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
6d8c4e3b
DH
1422#endif
1423 struct memelfnote notes[3];
1424 int num_notes;
1425};
1426
1427/*
1428 * In order to add the specific thread information for the elf file format,
1429 * we need to keep a linked list of every thread's pr_status and then create
1430 * a single section for them in the final core file.
1431 */
1432static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1433{
1434 struct task_struct *p = t->thread;
1435 int sz = 0;
1436
1437 t->num_notes = 0;
1438
1439 fill_prstatus(&t->prstatus, p, signr);
1440 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1441
1442 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1443 &t->prstatus);
1444 t->num_notes++;
1445 sz += notesize(&t->notes[0]);
1446
1447 t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu);
1448 if (t->prstatus.pr_fpvalid) {
1449 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1450 &t->fpu);
1451 t->num_notes++;
1452 sz += notesize(&t->notes[1]);
1453 }
1454
1455#ifdef ELF_CORE_COPY_XFPREGS
1456 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
5b20cd80
MN
1457 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1458 sizeof(t->xfpu), &t->xfpu);
6d8c4e3b
DH
1459 t->num_notes++;
1460 sz += notesize(&t->notes[2]);
1461 }
1462#endif
1463 return sz;
1464}
1465
8d9032bb
DH
1466static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
1467 elf_addr_t e_shoff, int segs)
1468{
1469 elf->e_shoff = e_shoff;
1470 elf->e_shentsize = sizeof(*shdr4extnum);
1471 elf->e_shnum = 1;
1472 elf->e_shstrndx = SHN_UNDEF;
1473
1474 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
1475
1476 shdr4extnum->sh_type = SHT_NULL;
1477 shdr4extnum->sh_size = elf->e_shnum;
1478 shdr4extnum->sh_link = elf->e_shstrndx;
1479 shdr4extnum->sh_info = segs;
1480}
1481
6d8c4e3b
DH
1482/*
1483 * dump the segments for an MMU process
1484 */
e6c1baa9 1485static bool elf_fdpic_dump_segments(struct coredump_params *cprm)
6d8c4e3b
DH
1486{
1487 struct vm_area_struct *vma;
1488
1489 for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
1490 unsigned long addr;
1491
e6c1baa9 1492 if (!maydump(vma, cprm->mm_flags))
6d8c4e3b
DH
1493 continue;
1494
e6c1baa9 1495#ifdef CONFIG_MMU
f3e8fccd
HD
1496 for (addr = vma->vm_start; addr < vma->vm_end;
1497 addr += PAGE_SIZE) {
e6c1baa9 1498 bool res;
f3e8fccd
HD
1499 struct page *page = get_dump_page(addr);
1500 if (page) {
1501 void *kaddr = kmap(page);
e6c1baa9 1502 res = dump_emit(cprm, kaddr, PAGE_SIZE);
6d8c4e3b 1503 kunmap(page);
09cbfeaf 1504 put_page(page);
e6c1baa9 1505 } else {
9b56d543 1506 res = dump_skip(cprm, PAGE_SIZE);
e6c1baa9
AV
1507 }
1508 if (!res)
1509 return false;
6d8c4e3b 1510 }
e6c1baa9
AV
1511#else
1512 if (!dump_emit(cprm, (void *) vma->vm_start,
6d8c4e3b 1513 vma->vm_end - vma->vm_start))
e6c1baa9
AV
1514 return false;
1515#endif
6d8c4e3b 1516 }
e6c1baa9 1517 return true;
6d8c4e3b 1518}
6d8c4e3b 1519
8d9032bb
DH
1520static size_t elf_core_vma_data_size(unsigned long mm_flags)
1521{
1522 struct vm_area_struct *vma;
1523 size_t size = 0;
1524
47568d4c 1525 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
8d9032bb
DH
1526 if (maydump(vma, mm_flags))
1527 size += vma->vm_end - vma->vm_start;
1528 return size;
1529}
1530
6d8c4e3b
DH
1531/*
1532 * Actual dumper
1533 *
1534 * This is a two-pass process; first we find the offsets of the bits,
1535 * and then they are actually written out. If we run out of core limit
1536 * we just truncate.
1537 */
f6151dfe 1538static int elf_fdpic_core_dump(struct coredump_params *cprm)
6d8c4e3b
DH
1539{
1540#define NUM_NOTES 6
1541 int has_dumped = 0;
1542 mm_segment_t fs;
1543 int segs;
6d8c4e3b
DH
1544 int i;
1545 struct vm_area_struct *vma;
1546 struct elfhdr *elf = NULL;
9b56d543 1547 loff_t offset = 0, dataoff;
6d8c4e3b
DH
1548 int numnote;
1549 struct memelfnote *notes = NULL;
1550 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1551 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
6d8c4e3b
DH
1552 LIST_HEAD(thread_list);
1553 struct list_head *t;
1554 elf_fpregset_t *fpu = NULL;
1555#ifdef ELF_CORE_COPY_XFPREGS
1556 elf_fpxregset_t *xfpu = NULL;
1557#endif
1558 int thread_status_size = 0;
6d8c4e3b 1559 elf_addr_t *auxv;
93eb211e 1560 struct elf_phdr *phdr4note = NULL;
8d9032bb
DH
1561 struct elf_shdr *shdr4extnum = NULL;
1562 Elf_Half e_phnum;
1563 elf_addr_t e_shoff;
afabada9
AV
1564 struct core_thread *ct;
1565 struct elf_thread_status *tmp;
6d8c4e3b
DH
1566
1567 /*
1568 * We no longer stop all VM operations.
1569 *
1570 * This is because those proceses that could possibly change map_count
1571 * or the mmap / vma pages are now blocked in do_exit on current
1572 * finishing this core dump.
1573 *
1574 * Only ptrace can touch these memory addresses, but it doesn't change
1575 * the map_count or the pages allocated. So no possibility of crashing
1576 * exists while dumping the mm->vm_next areas to the core file.
1577 */
1578
1579 /* alloc memory for large data structures: too large to be on stack */
1580 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1581 if (!elf)
1582 goto cleanup;
1583 prstatus = kzalloc(sizeof(*prstatus), GFP_KERNEL);
1584 if (!prstatus)
1585 goto cleanup;
1586 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1587 if (!psinfo)
1588 goto cleanup;
1589 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1590 if (!notes)
1591 goto cleanup;
1592 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1593 if (!fpu)
1594 goto cleanup;
1595#ifdef ELF_CORE_COPY_XFPREGS
1596 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1597 if (!xfpu)
1598 goto cleanup;
1599#endif
1600
afabada9
AV
1601 for (ct = current->mm->core_state->dumper.next;
1602 ct; ct = ct->next) {
1603 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
1604 if (!tmp)
1605 goto cleanup;
182c515f 1606
afabada9
AV
1607 tmp->thread = ct->task;
1608 list_add(&tmp->list, &thread_list);
1609 }
182c515f 1610
afabada9
AV
1611 list_for_each(t, &thread_list) {
1612 struct elf_thread_status *tmp;
1613 int sz;
6d8c4e3b 1614
afabada9
AV
1615 tmp = list_entry(t, struct elf_thread_status, list);
1616 sz = elf_dump_thread_status(cprm->siginfo->si_signo, tmp);
1617 thread_status_size += sz;
6d8c4e3b
DH
1618 }
1619
1620 /* now collect the dump for the current */
5ab1c309 1621 fill_prstatus(prstatus, current, cprm->siginfo->si_signo);
f6151dfe 1622 elf_core_copy_regs(&prstatus->pr_reg, cprm->regs);
6d8c4e3b 1623
6d8c4e3b 1624 segs = current->mm->map_count;
1fcccbac 1625 segs += elf_core_extra_phdrs();
6d8c4e3b 1626
8d9032bb
DH
1627 /* for notes section */
1628 segs++;
1629
1630 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
1631 * this, kernel supports extended numbering. Have a look at
1632 * include/linux/elf.h for further information. */
1633 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
1634
6d8c4e3b 1635 /* Set up header */
8d9032bb 1636 fill_elf_fdpic_header(elf, e_phnum);
6d8c4e3b
DH
1637
1638 has_dumped = 1;
6d8c4e3b
DH
1639 /*
1640 * Set up the notes in similar form to SVR4 core dumps made
1641 * with info from their /proc.
1642 */
1643
1644 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1645 fill_psinfo(psinfo, current->group_leader, current->mm);
1646 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1647
1648 numnote = 2;
1649
1650 auxv = (elf_addr_t *) current->mm->saved_auxv;
1651
1652 i = 0;
1653 do
1654 i += 2;
1655 while (auxv[i - 2] != AT_NULL);
1656 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1657 i * sizeof(elf_addr_t), auxv);
1658
1659 /* Try to dump the FPU. */
1660 if ((prstatus->pr_fpvalid =
f6151dfe 1661 elf_core_copy_task_fpregs(current, cprm->regs, fpu)))
6d8c4e3b
DH
1662 fill_note(notes + numnote++,
1663 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1664#ifdef ELF_CORE_COPY_XFPREGS
1665 if (elf_core_copy_task_xfpregs(current, xfpu))
1666 fill_note(notes + numnote++,
5b20cd80 1667 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
6d8c4e3b
DH
1668#endif
1669
1670 fs = get_fs();
1671 set_fs(KERNEL_DS);
1672
6d8c4e3b 1673 offset += sizeof(*elf); /* Elf header */
8d9032bb 1674 offset += segs * sizeof(struct elf_phdr); /* Program headers */
6d8c4e3b
DH
1675
1676 /* Write notes phdr entry */
1677 {
6d8c4e3b
DH
1678 int sz = 0;
1679
1680 for (i = 0; i < numnote; i++)
1681 sz += notesize(notes + i);
1682
1683 sz += thread_status_size;
1684
93eb211e
DH
1685 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
1686 if (!phdr4note)
088e7af7 1687 goto end_coredump;
93eb211e
DH
1688
1689 fill_elf_note_phdr(phdr4note, sz, offset);
1690 offset += sz;
6d8c4e3b
DH
1691 }
1692
1693 /* Page-align dumped data */
1694 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1695
30736a4d 1696 offset += elf_core_vma_data_size(cprm->mm_flags);
8d9032bb
DH
1697 offset += elf_core_extra_data_size();
1698 e_shoff = offset;
1699
1700 if (e_phnum == PN_XNUM) {
1701 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
1702 if (!shdr4extnum)
1703 goto end_coredump;
1704 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
1705 }
1706
1707 offset = dataoff;
1708
e6c1baa9 1709 if (!dump_emit(cprm, elf, sizeof(*elf)))
93eb211e
DH
1710 goto end_coredump;
1711
e6c1baa9 1712 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
93eb211e
DH
1713 goto end_coredump;
1714
6d8c4e3b 1715 /* write program headers for segments dump */
8feae131 1716 for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
6d8c4e3b
DH
1717 struct elf_phdr phdr;
1718 size_t sz;
1719
6d8c4e3b
DH
1720 sz = vma->vm_end - vma->vm_start;
1721
1722 phdr.p_type = PT_LOAD;
1723 phdr.p_offset = offset;
1724 phdr.p_vaddr = vma->vm_start;
1725 phdr.p_paddr = 0;
30736a4d 1726 phdr.p_filesz = maydump(vma, cprm->mm_flags) ? sz : 0;
6d8c4e3b
DH
1727 phdr.p_memsz = sz;
1728 offset += phdr.p_filesz;
1729 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1730 if (vma->vm_flags & VM_WRITE)
1731 phdr.p_flags |= PF_W;
1732 if (vma->vm_flags & VM_EXEC)
1733 phdr.p_flags |= PF_X;
1734 phdr.p_align = ELF_EXEC_PAGESIZE;
1735
e6c1baa9 1736 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
088e7af7 1737 goto end_coredump;
6d8c4e3b
DH
1738 }
1739
506f21c5 1740 if (!elf_core_write_extra_phdrs(cprm, offset))
1fcccbac 1741 goto end_coredump;
6d8c4e3b
DH
1742
1743 /* write out the notes section */
1744 for (i = 0; i < numnote; i++)
e6c1baa9 1745 if (!writenote(notes + i, cprm))
6d8c4e3b
DH
1746 goto end_coredump;
1747
1748 /* write out the thread status notes section */
1749 list_for_each(t, &thread_list) {
1750 struct elf_thread_status *tmp =
1751 list_entry(t, struct elf_thread_status, list);
1752
1753 for (i = 0; i < tmp->num_notes; i++)
e6c1baa9 1754 if (!writenote(&tmp->notes[i], cprm))
6d8c4e3b
DH
1755 goto end_coredump;
1756 }
1757
1607f09c 1758 if (!dump_skip(cprm, dataoff - cprm->pos))
f3e8fccd 1759 goto end_coredump;
6d8c4e3b 1760
e6c1baa9 1761 if (!elf_fdpic_dump_segments(cprm))
6d8c4e3b
DH
1762 goto end_coredump;
1763
aa3e7eaf 1764 if (!elf_core_write_extra_data(cprm))
1fcccbac 1765 goto end_coredump;
6d8c4e3b 1766
8d9032bb 1767 if (e_phnum == PN_XNUM) {
e6c1baa9 1768 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
8d9032bb
DH
1769 goto end_coredump;
1770 }
1771
2f48912d 1772 if (cprm->file->f_pos != offset) {
6d8c4e3b
DH
1773 /* Sanity check */
1774 printk(KERN_WARNING
1775 "elf_core_dump: file->f_pos (%lld) != offset (%lld)\n",
2f48912d 1776 cprm->file->f_pos, offset);
6d8c4e3b
DH
1777 }
1778
1779end_coredump:
1780 set_fs(fs);
1781
1782cleanup:
1783 while (!list_empty(&thread_list)) {
1784 struct list_head *tmp = thread_list.next;
1785 list_del(tmp);
1786 kfree(list_entry(tmp, struct elf_thread_status, list));
1787 }
93eb211e 1788 kfree(phdr4note);
6d8c4e3b
DH
1789 kfree(elf);
1790 kfree(prstatus);
1791 kfree(psinfo);
1792 kfree(notes);
1793 kfree(fpu);
bcb65a79 1794 kfree(shdr4extnum);
6d8c4e3b
DH
1795#ifdef ELF_CORE_COPY_XFPREGS
1796 kfree(xfpu);
1797#endif
1798 return has_dumped;
1799#undef NUM_NOTES
1800}
1801
698ba7b5 1802#endif /* CONFIG_ELF_CORE */