]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/binfmt_elf.c | |
3 | * | |
4 | * These are the functions used to load ELF format executables as used | |
5 | * on SVr4 machines. Information on the format may be found in the book | |
6 | * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support | |
7 | * Tools". | |
8 | * | |
9 | * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/signal.h> | |
19 | #include <linux/binfmts.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/personality.h> | |
24 | #include <linux/elfcore.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/highuid.h> | |
27 | #include <linux/compiler.h> | |
28 | #include <linux/highmem.h> | |
29 | #include <linux/pagemap.h> | |
30 | #include <linux/vmalloc.h> | |
31 | #include <linux/security.h> | |
32 | #include <linux/random.h> | |
33 | #include <linux/elf.h> | |
34 | #include <linux/elf-randomize.h> | |
35 | #include <linux/utsname.h> | |
36 | #include <linux/coredump.h> | |
37 | #include <linux/sched.h> | |
38 | #include <linux/sched/coredump.h> | |
39 | #include <linux/sched/task_stack.h> | |
40 | #include <linux/sched/cputime.h> | |
41 | #include <linux/cred.h> | |
42 | #include <linux/dax.h> | |
43 | #include <linux/uaccess.h> | |
44 | #include <asm/param.h> | |
45 | #include <asm/page.h> | |
46 | ||
47 | #ifndef user_long_t | |
48 | #define user_long_t long | |
49 | #endif | |
50 | #ifndef user_siginfo_t | |
51 | #define user_siginfo_t siginfo_t | |
52 | #endif | |
53 | ||
54 | /* That's for binfmt_elf_fdpic to deal with */ | |
55 | #ifndef elf_check_fdpic | |
56 | #define elf_check_fdpic(ex) false | |
57 | #endif | |
58 | ||
59 | static int load_elf_binary(struct linux_binprm *bprm); | |
60 | ||
61 | #ifdef CONFIG_USELIB | |
62 | static int load_elf_library(struct file *); | |
63 | #else | |
64 | #define load_elf_library NULL | |
65 | #endif | |
66 | ||
67 | /* | |
68 | * If we don't support core dumping, then supply a NULL so we | |
69 | * don't even try. | |
70 | */ | |
71 | #ifdef CONFIG_ELF_CORE | |
72 | static int elf_core_dump(struct coredump_params *cprm); | |
73 | #else | |
74 | #define elf_core_dump NULL | |
75 | #endif | |
76 | ||
77 | #if ELF_EXEC_PAGESIZE > PAGE_SIZE | |
78 | #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE | |
79 | #else | |
80 | #define ELF_MIN_ALIGN PAGE_SIZE | |
81 | #endif | |
82 | ||
83 | #ifndef ELF_CORE_EFLAGS | |
84 | #define ELF_CORE_EFLAGS 0 | |
85 | #endif | |
86 | ||
87 | #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) | |
88 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) | |
89 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) | |
90 | ||
91 | static struct linux_binfmt elf_format = { | |
92 | .module = THIS_MODULE, | |
93 | .load_binary = load_elf_binary, | |
94 | .load_shlib = load_elf_library, | |
95 | .core_dump = elf_core_dump, | |
96 | .min_coredump = ELF_EXEC_PAGESIZE, | |
97 | }; | |
98 | ||
99 | #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) | |
100 | ||
101 | static int set_brk(unsigned long start, unsigned long end, int prot) | |
102 | { | |
103 | start = ELF_PAGEALIGN(start); | |
104 | end = ELF_PAGEALIGN(end); | |
105 | if (end > start) { | |
106 | /* | |
107 | * Map the last of the bss segment. | |
108 | * If the header is requesting these pages to be | |
109 | * executable, honour that (ppc32 needs this). | |
110 | */ | |
111 | int error = vm_brk_flags(start, end - start, | |
112 | prot & PROT_EXEC ? VM_EXEC : 0); | |
113 | if (error) | |
114 | return error; | |
115 | } | |
116 | current->mm->start_brk = current->mm->brk = end; | |
117 | return 0; | |
118 | } | |
119 | ||
120 | /* We need to explicitly zero any fractional pages | |
121 | after the data section (i.e. bss). This would | |
122 | contain the junk from the file that should not | |
123 | be in memory | |
124 | */ | |
125 | static int padzero(unsigned long elf_bss) | |
126 | { | |
127 | unsigned long nbyte; | |
128 | ||
129 | nbyte = ELF_PAGEOFFSET(elf_bss); | |
130 | if (nbyte) { | |
131 | nbyte = ELF_MIN_ALIGN - nbyte; | |
132 | if (clear_user((void __user *) elf_bss, nbyte)) | |
133 | return -EFAULT; | |
134 | } | |
135 | return 0; | |
136 | } | |
137 | ||
138 | /* Let's use some macros to make this stack manipulation a little clearer */ | |
139 | #ifdef CONFIG_STACK_GROWSUP | |
140 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) | |
141 | #define STACK_ROUND(sp, items) \ | |
142 | ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) | |
143 | #define STACK_ALLOC(sp, len) ({ \ | |
144 | elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ | |
145 | old_sp; }) | |
146 | #else | |
147 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) | |
148 | #define STACK_ROUND(sp, items) \ | |
149 | (((unsigned long) (sp - items)) &~ 15UL) | |
150 | #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) | |
151 | #endif | |
152 | ||
153 | #ifndef ELF_BASE_PLATFORM | |
154 | /* | |
155 | * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture. | |
156 | * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value | |
157 | * will be copied to the user stack in the same manner as AT_PLATFORM. | |
158 | */ | |
159 | #define ELF_BASE_PLATFORM NULL | |
160 | #endif | |
161 | ||
162 | static int | |
163 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | |
164 | unsigned long load_addr, unsigned long interp_load_addr) | |
165 | { | |
166 | unsigned long p = bprm->p; | |
167 | int argc = bprm->argc; | |
168 | int envc = bprm->envc; | |
169 | elf_addr_t __user *sp; | |
170 | elf_addr_t __user *u_platform; | |
171 | elf_addr_t __user *u_base_platform; | |
172 | elf_addr_t __user *u_rand_bytes; | |
173 | const char *k_platform = ELF_PLATFORM; | |
174 | const char *k_base_platform = ELF_BASE_PLATFORM; | |
175 | unsigned char k_rand_bytes[16]; | |
176 | int items; | |
177 | elf_addr_t *elf_info; | |
178 | int ei_index = 0; | |
179 | const struct cred *cred = current_cred(); | |
180 | struct vm_area_struct *vma; | |
181 | ||
182 | /* | |
183 | * In some cases (e.g. Hyper-Threading), we want to avoid L1 | |
184 | * evictions by the processes running on the same package. One | |
185 | * thing we can do is to shuffle the initial stack for them. | |
186 | */ | |
187 | ||
188 | p = arch_align_stack(p); | |
189 | ||
190 | /* | |
191 | * If this architecture has a platform capability string, copy it | |
192 | * to userspace. In some cases (Sparc), this info is impossible | |
193 | * for userspace to get any other way, in others (i386) it is | |
194 | * merely difficult. | |
195 | */ | |
196 | u_platform = NULL; | |
197 | if (k_platform) { | |
198 | size_t len = strlen(k_platform) + 1; | |
199 | ||
200 | u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); | |
201 | if (__copy_to_user(u_platform, k_platform, len)) | |
202 | return -EFAULT; | |
203 | } | |
204 | ||
205 | /* | |
206 | * If this architecture has a "base" platform capability | |
207 | * string, copy it to userspace. | |
208 | */ | |
209 | u_base_platform = NULL; | |
210 | if (k_base_platform) { | |
211 | size_t len = strlen(k_base_platform) + 1; | |
212 | ||
213 | u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); | |
214 | if (__copy_to_user(u_base_platform, k_base_platform, len)) | |
215 | return -EFAULT; | |
216 | } | |
217 | ||
218 | /* | |
219 | * Generate 16 random bytes for userspace PRNG seeding. | |
220 | */ | |
221 | get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); | |
222 | u_rand_bytes = (elf_addr_t __user *) | |
223 | STACK_ALLOC(p, sizeof(k_rand_bytes)); | |
224 | if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) | |
225 | return -EFAULT; | |
226 | ||
227 | /* Create the ELF interpreter info */ | |
228 | elf_info = (elf_addr_t *)current->mm->saved_auxv; | |
229 | /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */ | |
230 | #define NEW_AUX_ENT(id, val) \ | |
231 | do { \ | |
232 | elf_info[ei_index++] = id; \ | |
233 | elf_info[ei_index++] = val; \ | |
234 | } while (0) | |
235 | ||
236 | #ifdef ARCH_DLINFO | |
237 | /* | |
238 | * ARCH_DLINFO must come first so PPC can do its special alignment of | |
239 | * AUXV. | |
240 | * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in | |
241 | * ARCH_DLINFO changes | |
242 | */ | |
243 | ARCH_DLINFO; | |
244 | #endif | |
245 | NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); | |
246 | NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); | |
247 | NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); | |
248 | NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); | |
249 | NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); | |
250 | NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); | |
251 | NEW_AUX_ENT(AT_BASE, interp_load_addr); | |
252 | NEW_AUX_ENT(AT_FLAGS, 0); | |
253 | NEW_AUX_ENT(AT_ENTRY, exec->e_entry); | |
254 | NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid)); | |
255 | NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid)); | |
256 | NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid)); | |
257 | NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid)); | |
258 | NEW_AUX_ENT(AT_SECURE, bprm->secureexec); | |
259 | NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); | |
260 | #ifdef ELF_HWCAP2 | |
261 | NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); | |
262 | #endif | |
263 | NEW_AUX_ENT(AT_EXECFN, bprm->exec); | |
264 | if (k_platform) { | |
265 | NEW_AUX_ENT(AT_PLATFORM, | |
266 | (elf_addr_t)(unsigned long)u_platform); | |
267 | } | |
268 | if (k_base_platform) { | |
269 | NEW_AUX_ENT(AT_BASE_PLATFORM, | |
270 | (elf_addr_t)(unsigned long)u_base_platform); | |
271 | } | |
272 | if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { | |
273 | NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); | |
274 | } | |
275 | #undef NEW_AUX_ENT | |
276 | /* AT_NULL is zero; clear the rest too */ | |
277 | memset(&elf_info[ei_index], 0, | |
278 | sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); | |
279 | ||
280 | /* And advance past the AT_NULL entry. */ | |
281 | ei_index += 2; | |
282 | ||
283 | sp = STACK_ADD(p, ei_index); | |
284 | ||
285 | items = (argc + 1) + (envc + 1) + 1; | |
286 | bprm->p = STACK_ROUND(sp, items); | |
287 | ||
288 | /* Point sp at the lowest address on the stack */ | |
289 | #ifdef CONFIG_STACK_GROWSUP | |
290 | sp = (elf_addr_t __user *)bprm->p - items - ei_index; | |
291 | bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ | |
292 | #else | |
293 | sp = (elf_addr_t __user *)bprm->p; | |
294 | #endif | |
295 | ||
296 | ||
297 | /* | |
298 | * Grow the stack manually; some architectures have a limit on how | |
299 | * far ahead a user-space access may be in order to grow the stack. | |
300 | */ | |
301 | vma = find_extend_vma(current->mm, bprm->p); | |
302 | if (!vma) | |
303 | return -EFAULT; | |
304 | ||
305 | /* Now, let's put argc (and argv, envp if appropriate) on the stack */ | |
306 | if (__put_user(argc, sp++)) | |
307 | return -EFAULT; | |
308 | ||
309 | /* Populate list of argv pointers back to argv strings. */ | |
310 | p = current->mm->arg_end = current->mm->arg_start; | |
311 | while (argc-- > 0) { | |
312 | size_t len; | |
313 | if (__put_user((elf_addr_t)p, sp++)) | |
314 | return -EFAULT; | |
315 | len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); | |
316 | if (!len || len > MAX_ARG_STRLEN) | |
317 | return -EINVAL; | |
318 | p += len; | |
319 | } | |
320 | if (__put_user(0, sp++)) | |
321 | return -EFAULT; | |
322 | current->mm->arg_end = p; | |
323 | ||
324 | /* Populate list of envp pointers back to envp strings. */ | |
325 | current->mm->env_end = current->mm->env_start = p; | |
326 | while (envc-- > 0) { | |
327 | size_t len; | |
328 | if (__put_user((elf_addr_t)p, sp++)) | |
329 | return -EFAULT; | |
330 | len = strnlen_user((void __user *)p, MAX_ARG_STRLEN); | |
331 | if (!len || len > MAX_ARG_STRLEN) | |
332 | return -EINVAL; | |
333 | p += len; | |
334 | } | |
335 | if (__put_user(0, sp++)) | |
336 | return -EFAULT; | |
337 | current->mm->env_end = p; | |
338 | ||
339 | /* Put the elf_info on the stack in the right place. */ | |
340 | if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) | |
341 | return -EFAULT; | |
342 | return 0; | |
343 | } | |
344 | ||
345 | #ifndef elf_map | |
346 | ||
347 | static unsigned long elf_map(struct file *filep, unsigned long addr, | |
348 | const struct elf_phdr *eppnt, int prot, int type, | |
349 | unsigned long total_size) | |
350 | { | |
351 | unsigned long map_addr; | |
352 | unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr); | |
353 | unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr); | |
354 | addr = ELF_PAGESTART(addr); | |
355 | size = ELF_PAGEALIGN(size); | |
356 | ||
357 | /* mmap() will return -EINVAL if given a zero size, but a | |
358 | * segment with zero filesize is perfectly valid */ | |
359 | if (!size) | |
360 | return addr; | |
361 | ||
362 | /* | |
363 | * total_size is the size of the ELF (interpreter) image. | |
364 | * The _first_ mmap needs to know the full size, otherwise | |
365 | * randomization might put this image into an overlapping | |
366 | * position with the ELF binary image. (since size < total_size) | |
367 | * So we first map the 'big' image - and unmap the remainder at | |
368 | * the end. (which unmap is needed for ELF images with holes.) | |
369 | */ | |
370 | if (total_size) { | |
371 | total_size = ELF_PAGEALIGN(total_size); | |
372 | map_addr = vm_mmap(filep, addr, total_size, prot, type, off); | |
373 | if (!BAD_ADDR(map_addr)) | |
374 | vm_munmap(map_addr+size, total_size-size); | |
375 | } else | |
376 | map_addr = vm_mmap(filep, addr, size, prot, type, off); | |
377 | ||
378 | if ((type & MAP_FIXED_NOREPLACE) && | |
379 | PTR_ERR((void *)map_addr) == -EEXIST) | |
380 | pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", | |
381 | task_pid_nr(current), current->comm, (void *)addr); | |
382 | ||
383 | return(map_addr); | |
384 | } | |
385 | ||
386 | #endif /* !elf_map */ | |
387 | ||
388 | static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr) | |
389 | { | |
390 | int i, first_idx = -1, last_idx = -1; | |
391 | ||
392 | for (i = 0; i < nr; i++) { | |
393 | if (cmds[i].p_type == PT_LOAD) { | |
394 | last_idx = i; | |
395 | if (first_idx == -1) | |
396 | first_idx = i; | |
397 | } | |
398 | } | |
399 | if (first_idx == -1) | |
400 | return 0; | |
401 | ||
402 | return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz - | |
403 | ELF_PAGESTART(cmds[first_idx].p_vaddr); | |
404 | } | |
405 | ||
406 | /** | |
407 | * load_elf_phdrs() - load ELF program headers | |
408 | * @elf_ex: ELF header of the binary whose program headers should be loaded | |
409 | * @elf_file: the opened ELF binary file | |
410 | * | |
411 | * Loads ELF program headers from the binary file elf_file, which has the ELF | |
412 | * header pointed to by elf_ex, into a newly allocated array. The caller is | |
413 | * responsible for freeing the allocated data. Returns an ERR_PTR upon failure. | |
414 | */ | |
415 | static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex, | |
416 | struct file *elf_file) | |
417 | { | |
418 | struct elf_phdr *elf_phdata = NULL; | |
419 | int retval, err = -1; | |
420 | loff_t pos = elf_ex->e_phoff; | |
421 | unsigned int size; | |
422 | ||
423 | /* | |
424 | * If the size of this structure has changed, then punt, since | |
425 | * we will be doing the wrong thing. | |
426 | */ | |
427 | if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) | |
428 | goto out; | |
429 | ||
430 | /* Sanity check the number of program headers... */ | |
431 | /* ...and their total size. */ | |
432 | size = sizeof(struct elf_phdr) * elf_ex->e_phnum; | |
433 | if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN) | |
434 | goto out; | |
435 | ||
436 | elf_phdata = kmalloc(size, GFP_KERNEL); | |
437 | if (!elf_phdata) | |
438 | goto out; | |
439 | ||
440 | /* Read in the program headers */ | |
441 | retval = kernel_read(elf_file, elf_phdata, size, &pos); | |
442 | if (retval != size) { | |
443 | err = (retval < 0) ? retval : -EIO; | |
444 | goto out; | |
445 | } | |
446 | ||
447 | /* Success! */ | |
448 | err = 0; | |
449 | out: | |
450 | if (err) { | |
451 | kfree(elf_phdata); | |
452 | elf_phdata = NULL; | |
453 | } | |
454 | return elf_phdata; | |
455 | } | |
456 | ||
457 | #ifndef CONFIG_ARCH_BINFMT_ELF_STATE | |
458 | ||
459 | /** | |
460 | * struct arch_elf_state - arch-specific ELF loading state | |
461 | * | |
462 | * This structure is used to preserve architecture specific data during | |
463 | * the loading of an ELF file, throughout the checking of architecture | |
464 | * specific ELF headers & through to the point where the ELF load is | |
465 | * known to be proceeding (ie. SET_PERSONALITY). | |
466 | * | |
467 | * This implementation is a dummy for architectures which require no | |
468 | * specific state. | |
469 | */ | |
470 | struct arch_elf_state { | |
471 | }; | |
472 | ||
473 | #define INIT_ARCH_ELF_STATE {} | |
474 | ||
475 | /** | |
476 | * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header | |
477 | * @ehdr: The main ELF header | |
478 | * @phdr: The program header to check | |
479 | * @elf: The open ELF file | |
480 | * @is_interp: True if the phdr is from the interpreter of the ELF being | |
481 | * loaded, else false. | |
482 | * @state: Architecture-specific state preserved throughout the process | |
483 | * of loading the ELF. | |
484 | * | |
485 | * Inspects the program header phdr to validate its correctness and/or | |
486 | * suitability for the system. Called once per ELF program header in the | |
487 | * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its | |
488 | * interpreter. | |
489 | * | |
490 | * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load | |
491 | * with that return code. | |
492 | */ | |
493 | static inline int arch_elf_pt_proc(struct elfhdr *ehdr, | |
494 | struct elf_phdr *phdr, | |
495 | struct file *elf, bool is_interp, | |
496 | struct arch_elf_state *state) | |
497 | { | |
498 | /* Dummy implementation, always proceed */ | |
499 | return 0; | |
500 | } | |
501 | ||
502 | /** | |
503 | * arch_check_elf() - check an ELF executable | |
504 | * @ehdr: The main ELF header | |
505 | * @has_interp: True if the ELF has an interpreter, else false. | |
506 | * @interp_ehdr: The interpreter's ELF header | |
507 | * @state: Architecture-specific state preserved throughout the process | |
508 | * of loading the ELF. | |
509 | * | |
510 | * Provides a final opportunity for architecture code to reject the loading | |
511 | * of the ELF & cause an exec syscall to return an error. This is called after | |
512 | * all program headers to be checked by arch_elf_pt_proc have been. | |
513 | * | |
514 | * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load | |
515 | * with that return code. | |
516 | */ | |
517 | static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp, | |
518 | struct elfhdr *interp_ehdr, | |
519 | struct arch_elf_state *state) | |
520 | { | |
521 | /* Dummy implementation, always proceed */ | |
522 | return 0; | |
523 | } | |
524 | ||
525 | #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */ | |
526 | ||
527 | static inline int make_prot(u32 p_flags) | |
528 | { | |
529 | int prot = 0; | |
530 | ||
531 | if (p_flags & PF_R) | |
532 | prot |= PROT_READ; | |
533 | if (p_flags & PF_W) | |
534 | prot |= PROT_WRITE; | |
535 | if (p_flags & PF_X) | |
536 | prot |= PROT_EXEC; | |
537 | return prot; | |
538 | } | |
539 | ||
540 | /* This is much more generalized than the library routine read function, | |
541 | so we keep this separate. Technically the library read function | |
542 | is only provided so that we can read a.out libraries that have | |
543 | an ELF header */ | |
544 | ||
545 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | |
546 | struct file *interpreter, unsigned long *interp_map_addr, | |
547 | unsigned long no_base, struct elf_phdr *interp_elf_phdata) | |
548 | { | |
549 | struct elf_phdr *eppnt; | |
550 | unsigned long load_addr = 0; | |
551 | int load_addr_set = 0; | |
552 | unsigned long last_bss = 0, elf_bss = 0; | |
553 | int bss_prot = 0; | |
554 | unsigned long error = ~0UL; | |
555 | unsigned long total_size; | |
556 | int i; | |
557 | ||
558 | /* First of all, some simple consistency checks */ | |
559 | if (interp_elf_ex->e_type != ET_EXEC && | |
560 | interp_elf_ex->e_type != ET_DYN) | |
561 | goto out; | |
562 | if (!elf_check_arch(interp_elf_ex) || | |
563 | elf_check_fdpic(interp_elf_ex)) | |
564 | goto out; | |
565 | if (!interpreter->f_op->mmap) | |
566 | goto out; | |
567 | ||
568 | total_size = total_mapping_size(interp_elf_phdata, | |
569 | interp_elf_ex->e_phnum); | |
570 | if (!total_size) { | |
571 | error = -EINVAL; | |
572 | goto out; | |
573 | } | |
574 | ||
575 | eppnt = interp_elf_phdata; | |
576 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { | |
577 | if (eppnt->p_type == PT_LOAD) { | |
578 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; | |
579 | int elf_prot = make_prot(eppnt->p_flags); | |
580 | unsigned long vaddr = 0; | |
581 | unsigned long k, map_addr; | |
582 | ||
583 | vaddr = eppnt->p_vaddr; | |
584 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) | |
585 | elf_type |= MAP_FIXED_NOREPLACE; | |
586 | else if (no_base && interp_elf_ex->e_type == ET_DYN) | |
587 | load_addr = -vaddr; | |
588 | ||
589 | map_addr = elf_map(interpreter, load_addr + vaddr, | |
590 | eppnt, elf_prot, elf_type, total_size); | |
591 | total_size = 0; | |
592 | if (!*interp_map_addr) | |
593 | *interp_map_addr = map_addr; | |
594 | error = map_addr; | |
595 | if (BAD_ADDR(map_addr)) | |
596 | goto out; | |
597 | ||
598 | if (!load_addr_set && | |
599 | interp_elf_ex->e_type == ET_DYN) { | |
600 | load_addr = map_addr - ELF_PAGESTART(vaddr); | |
601 | load_addr_set = 1; | |
602 | } | |
603 | ||
604 | /* | |
605 | * Check to see if the section's size will overflow the | |
606 | * allowed task size. Note that p_filesz must always be | |
607 | * <= p_memsize so it's only necessary to check p_memsz. | |
608 | */ | |
609 | k = load_addr + eppnt->p_vaddr; | |
610 | if (BAD_ADDR(k) || | |
611 | eppnt->p_filesz > eppnt->p_memsz || | |
612 | eppnt->p_memsz > TASK_SIZE || | |
613 | TASK_SIZE - eppnt->p_memsz < k) { | |
614 | error = -ENOMEM; | |
615 | goto out; | |
616 | } | |
617 | ||
618 | /* | |
619 | * Find the end of the file mapping for this phdr, and | |
620 | * keep track of the largest address we see for this. | |
621 | */ | |
622 | k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; | |
623 | if (k > elf_bss) | |
624 | elf_bss = k; | |
625 | ||
626 | /* | |
627 | * Do the same thing for the memory mapping - between | |
628 | * elf_bss and last_bss is the bss section. | |
629 | */ | |
630 | k = load_addr + eppnt->p_vaddr + eppnt->p_memsz; | |
631 | if (k > last_bss) { | |
632 | last_bss = k; | |
633 | bss_prot = elf_prot; | |
634 | } | |
635 | } | |
636 | } | |
637 | ||
638 | /* | |
639 | * Now fill out the bss section: first pad the last page from | |
640 | * the file up to the page boundary, and zero it from elf_bss | |
641 | * up to the end of the page. | |
642 | */ | |
643 | if (padzero(elf_bss)) { | |
644 | error = -EFAULT; | |
645 | goto out; | |
646 | } | |
647 | /* | |
648 | * Next, align both the file and mem bss up to the page size, | |
649 | * since this is where elf_bss was just zeroed up to, and where | |
650 | * last_bss will end after the vm_brk_flags() below. | |
651 | */ | |
652 | elf_bss = ELF_PAGEALIGN(elf_bss); | |
653 | last_bss = ELF_PAGEALIGN(last_bss); | |
654 | /* Finally, if there is still more bss to allocate, do it. */ | |
655 | if (last_bss > elf_bss) { | |
656 | error = vm_brk_flags(elf_bss, last_bss - elf_bss, | |
657 | bss_prot & PROT_EXEC ? VM_EXEC : 0); | |
658 | if (error) | |
659 | goto out; | |
660 | } | |
661 | ||
662 | error = load_addr; | |
663 | out: | |
664 | return error; | |
665 | } | |
666 | ||
667 | /* | |
668 | * These are the functions used to load ELF style executables and shared | |
669 | * libraries. There is no binary dependent code anywhere else. | |
670 | */ | |
671 | ||
672 | #ifndef STACK_RND_MASK | |
673 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ | |
674 | #endif | |
675 | ||
676 | static unsigned long randomize_stack_top(unsigned long stack_top) | |
677 | { | |
678 | unsigned long random_variable = 0; | |
679 | ||
680 | if (current->flags & PF_RANDOMIZE) { | |
681 | random_variable = get_random_long(); | |
682 | random_variable &= STACK_RND_MASK; | |
683 | random_variable <<= PAGE_SHIFT; | |
684 | } | |
685 | #ifdef CONFIG_STACK_GROWSUP | |
686 | return PAGE_ALIGN(stack_top) + random_variable; | |
687 | #else | |
688 | return PAGE_ALIGN(stack_top) - random_variable; | |
689 | #endif | |
690 | } | |
691 | ||
692 | static int load_elf_binary(struct linux_binprm *bprm) | |
693 | { | |
694 | struct file *interpreter = NULL; /* to shut gcc up */ | |
695 | unsigned long load_addr = 0, load_bias = 0; | |
696 | int load_addr_set = 0; | |
697 | unsigned long error; | |
698 | struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; | |
699 | unsigned long elf_bss, elf_brk; | |
700 | int bss_prot = 0; | |
701 | int retval, i; | |
702 | unsigned long elf_entry; | |
703 | unsigned long interp_load_addr = 0; | |
704 | unsigned long start_code, end_code, start_data, end_data; | |
705 | unsigned long reloc_func_desc __maybe_unused = 0; | |
706 | int executable_stack = EXSTACK_DEFAULT; | |
707 | struct { | |
708 | struct elfhdr elf_ex; | |
709 | struct elfhdr interp_elf_ex; | |
710 | } *loc; | |
711 | struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE; | |
712 | struct pt_regs *regs; | |
713 | ||
714 | loc = kmalloc(sizeof(*loc), GFP_KERNEL); | |
715 | if (!loc) { | |
716 | retval = -ENOMEM; | |
717 | goto out_ret; | |
718 | } | |
719 | ||
720 | /* Get the exec-header */ | |
721 | loc->elf_ex = *((struct elfhdr *)bprm->buf); | |
722 | ||
723 | retval = -ENOEXEC; | |
724 | /* First of all, some simple consistency checks */ | |
725 | if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | |
726 | goto out; | |
727 | ||
728 | if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) | |
729 | goto out; | |
730 | if (!elf_check_arch(&loc->elf_ex)) | |
731 | goto out; | |
732 | if (elf_check_fdpic(&loc->elf_ex)) | |
733 | goto out; | |
734 | if (!bprm->file->f_op->mmap) | |
735 | goto out; | |
736 | ||
737 | elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file); | |
738 | if (!elf_phdata) | |
739 | goto out; | |
740 | ||
741 | elf_ppnt = elf_phdata; | |
742 | for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { | |
743 | char *elf_interpreter; | |
744 | loff_t pos; | |
745 | ||
746 | if (elf_ppnt->p_type != PT_INTERP) | |
747 | continue; | |
748 | ||
749 | /* | |
750 | * This is the program interpreter used for shared libraries - | |
751 | * for now assume that this is an a.out format binary. | |
752 | */ | |
753 | retval = -ENOEXEC; | |
754 | if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) | |
755 | goto out_free_ph; | |
756 | ||
757 | retval = -ENOMEM; | |
758 | elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); | |
759 | if (!elf_interpreter) | |
760 | goto out_free_ph; | |
761 | ||
762 | pos = elf_ppnt->p_offset; | |
763 | retval = kernel_read(bprm->file, elf_interpreter, | |
764 | elf_ppnt->p_filesz, &pos); | |
765 | if (retval != elf_ppnt->p_filesz) { | |
766 | if (retval >= 0) | |
767 | retval = -EIO; | |
768 | goto out_free_interp; | |
769 | } | |
770 | /* make sure path is NULL terminated */ | |
771 | retval = -ENOEXEC; | |
772 | if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') | |
773 | goto out_free_interp; | |
774 | ||
775 | interpreter = open_exec(elf_interpreter); | |
776 | kfree(elf_interpreter); | |
777 | retval = PTR_ERR(interpreter); | |
778 | if (IS_ERR(interpreter)) | |
779 | goto out_free_ph; | |
780 | ||
781 | /* | |
782 | * If the binary is not readable then enforce mm->dumpable = 0 | |
783 | * regardless of the interpreter's permissions. | |
784 | */ | |
785 | would_dump(bprm, interpreter); | |
786 | ||
787 | /* Get the exec headers */ | |
788 | pos = 0; | |
789 | retval = kernel_read(interpreter, &loc->interp_elf_ex, | |
790 | sizeof(loc->interp_elf_ex), &pos); | |
791 | if (retval != sizeof(loc->interp_elf_ex)) { | |
792 | if (retval >= 0) | |
793 | retval = -EIO; | |
794 | goto out_free_dentry; | |
795 | } | |
796 | ||
797 | break; | |
798 | ||
799 | out_free_interp: | |
800 | kfree(elf_interpreter); | |
801 | goto out_free_ph; | |
802 | } | |
803 | ||
804 | elf_ppnt = elf_phdata; | |
805 | for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) | |
806 | switch (elf_ppnt->p_type) { | |
807 | case PT_GNU_STACK: | |
808 | if (elf_ppnt->p_flags & PF_X) | |
809 | executable_stack = EXSTACK_ENABLE_X; | |
810 | else | |
811 | executable_stack = EXSTACK_DISABLE_X; | |
812 | break; | |
813 | ||
814 | case PT_LOPROC ... PT_HIPROC: | |
815 | retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt, | |
816 | bprm->file, false, | |
817 | &arch_state); | |
818 | if (retval) | |
819 | goto out_free_dentry; | |
820 | break; | |
821 | } | |
822 | ||
823 | /* Some simple consistency checks for the interpreter */ | |
824 | if (interpreter) { | |
825 | retval = -ELIBBAD; | |
826 | /* Not an ELF interpreter */ | |
827 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | |
828 | goto out_free_dentry; | |
829 | /* Verify the interpreter has a valid arch */ | |
830 | if (!elf_check_arch(&loc->interp_elf_ex) || | |
831 | elf_check_fdpic(&loc->interp_elf_ex)) | |
832 | goto out_free_dentry; | |
833 | ||
834 | /* Load the interpreter program headers */ | |
835 | interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex, | |
836 | interpreter); | |
837 | if (!interp_elf_phdata) | |
838 | goto out_free_dentry; | |
839 | ||
840 | /* Pass PT_LOPROC..PT_HIPROC headers to arch code */ | |
841 | elf_ppnt = interp_elf_phdata; | |
842 | for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++) | |
843 | switch (elf_ppnt->p_type) { | |
844 | case PT_LOPROC ... PT_HIPROC: | |
845 | retval = arch_elf_pt_proc(&loc->interp_elf_ex, | |
846 | elf_ppnt, interpreter, | |
847 | true, &arch_state); | |
848 | if (retval) | |
849 | goto out_free_dentry; | |
850 | break; | |
851 | } | |
852 | } | |
853 | ||
854 | /* | |
855 | * Allow arch code to reject the ELF at this point, whilst it's | |
856 | * still possible to return an error to the code that invoked | |
857 | * the exec syscall. | |
858 | */ | |
859 | retval = arch_check_elf(&loc->elf_ex, | |
860 | !!interpreter, &loc->interp_elf_ex, | |
861 | &arch_state); | |
862 | if (retval) | |
863 | goto out_free_dentry; | |
864 | ||
865 | /* Flush all traces of the currently running executable */ | |
866 | retval = flush_old_exec(bprm); | |
867 | if (retval) | |
868 | goto out_free_dentry; | |
869 | ||
870 | /* Do this immediately, since STACK_TOP as used in setup_arg_pages | |
871 | may depend on the personality. */ | |
872 | SET_PERSONALITY2(loc->elf_ex, &arch_state); | |
873 | if (elf_read_implies_exec(loc->elf_ex, executable_stack)) | |
874 | current->personality |= READ_IMPLIES_EXEC; | |
875 | ||
876 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
877 | current->flags |= PF_RANDOMIZE; | |
878 | ||
879 | setup_new_exec(bprm); | |
880 | install_exec_creds(bprm); | |
881 | ||
882 | /* Do this so that we can load the interpreter, if need be. We will | |
883 | change some of these later */ | |
884 | retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), | |
885 | executable_stack); | |
886 | if (retval < 0) | |
887 | goto out_free_dentry; | |
888 | ||
889 | elf_bss = 0; | |
890 | elf_brk = 0; | |
891 | ||
892 | start_code = ~0UL; | |
893 | end_code = 0; | |
894 | start_data = 0; | |
895 | end_data = 0; | |
896 | ||
897 | /* Now we do a little grungy work by mmapping the ELF image into | |
898 | the correct location in memory. */ | |
899 | for(i = 0, elf_ppnt = elf_phdata; | |
900 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { | |
901 | int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE; | |
902 | unsigned long k, vaddr; | |
903 | unsigned long total_size = 0; | |
904 | ||
905 | if (elf_ppnt->p_type != PT_LOAD) | |
906 | continue; | |
907 | ||
908 | if (unlikely (elf_brk > elf_bss)) { | |
909 | unsigned long nbyte; | |
910 | ||
911 | /* There was a PT_LOAD segment with p_memsz > p_filesz | |
912 | before this one. Map anonymous pages, if needed, | |
913 | and clear the area. */ | |
914 | retval = set_brk(elf_bss + load_bias, | |
915 | elf_brk + load_bias, | |
916 | bss_prot); | |
917 | if (retval) | |
918 | goto out_free_dentry; | |
919 | nbyte = ELF_PAGEOFFSET(elf_bss); | |
920 | if (nbyte) { | |
921 | nbyte = ELF_MIN_ALIGN - nbyte; | |
922 | if (nbyte > elf_brk - elf_bss) | |
923 | nbyte = elf_brk - elf_bss; | |
924 | if (clear_user((void __user *)elf_bss + | |
925 | load_bias, nbyte)) { | |
926 | /* | |
927 | * This bss-zeroing can fail if the ELF | |
928 | * file specifies odd protections. So | |
929 | * we don't check the return value | |
930 | */ | |
931 | } | |
932 | } | |
933 | ||
934 | /* | |
935 | * Some binaries have overlapping elf segments and then | |
936 | * we have to forcefully map over an existing mapping | |
937 | * e.g. over this newly established brk mapping. | |
938 | */ | |
939 | elf_fixed = MAP_FIXED; | |
940 | } | |
941 | ||
942 | elf_prot = make_prot(elf_ppnt->p_flags); | |
943 | ||
944 | elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; | |
945 | ||
946 | vaddr = elf_ppnt->p_vaddr; | |
947 | /* | |
948 | * If we are loading ET_EXEC or we have already performed | |
949 | * the ET_DYN load_addr calculations, proceed normally. | |
950 | */ | |
951 | if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { | |
952 | elf_flags |= elf_fixed; | |
953 | } else if (loc->elf_ex.e_type == ET_DYN) { | |
954 | /* | |
955 | * This logic is run once for the first LOAD Program | |
956 | * Header for ET_DYN binaries to calculate the | |
957 | * randomization (load_bias) for all the LOAD | |
958 | * Program Headers, and to calculate the entire | |
959 | * size of the ELF mapping (total_size). (Note that | |
960 | * load_addr_set is set to true later once the | |
961 | * initial mapping is performed.) | |
962 | * | |
963 | * There are effectively two types of ET_DYN | |
964 | * binaries: programs (i.e. PIE: ET_DYN with INTERP) | |
965 | * and loaders (ET_DYN without INTERP, since they | |
966 | * _are_ the ELF interpreter). The loaders must | |
967 | * be loaded away from programs since the program | |
968 | * may otherwise collide with the loader (especially | |
969 | * for ET_EXEC which does not have a randomized | |
970 | * position). For example to handle invocations of | |
971 | * "./ld.so someprog" to test out a new version of | |
972 | * the loader, the subsequent program that the | |
973 | * loader loads must avoid the loader itself, so | |
974 | * they cannot share the same load range. Sufficient | |
975 | * room for the brk must be allocated with the | |
976 | * loader as well, since brk must be available with | |
977 | * the loader. | |
978 | * | |
979 | * Therefore, programs are loaded offset from | |
980 | * ELF_ET_DYN_BASE and loaders are loaded into the | |
981 | * independently randomized mmap region (0 load_bias | |
982 | * without MAP_FIXED). | |
983 | */ | |
984 | if (interpreter) { | |
985 | load_bias = ELF_ET_DYN_BASE; | |
986 | if (current->flags & PF_RANDOMIZE) | |
987 | load_bias += arch_mmap_rnd(); | |
988 | elf_flags |= elf_fixed; | |
989 | } else | |
990 | load_bias = 0; | |
991 | ||
992 | /* | |
993 | * Since load_bias is used for all subsequent loading | |
994 | * calculations, we must lower it by the first vaddr | |
995 | * so that the remaining calculations based on the | |
996 | * ELF vaddrs will be correctly offset. The result | |
997 | * is then page aligned. | |
998 | */ | |
999 | load_bias = ELF_PAGESTART(load_bias - vaddr); | |
1000 | ||
1001 | total_size = total_mapping_size(elf_phdata, | |
1002 | loc->elf_ex.e_phnum); | |
1003 | if (!total_size) { | |
1004 | retval = -EINVAL; | |
1005 | goto out_free_dentry; | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, | |
1010 | elf_prot, elf_flags, total_size); | |
1011 | if (BAD_ADDR(error)) { | |
1012 | retval = IS_ERR((void *)error) ? | |
1013 | PTR_ERR((void*)error) : -EINVAL; | |
1014 | goto out_free_dentry; | |
1015 | } | |
1016 | ||
1017 | if (!load_addr_set) { | |
1018 | load_addr_set = 1; | |
1019 | load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); | |
1020 | if (loc->elf_ex.e_type == ET_DYN) { | |
1021 | load_bias += error - | |
1022 | ELF_PAGESTART(load_bias + vaddr); | |
1023 | load_addr += load_bias; | |
1024 | reloc_func_desc = load_bias; | |
1025 | } | |
1026 | } | |
1027 | k = elf_ppnt->p_vaddr; | |
1028 | if (k < start_code) | |
1029 | start_code = k; | |
1030 | if (start_data < k) | |
1031 | start_data = k; | |
1032 | ||
1033 | /* | |
1034 | * Check to see if the section's size will overflow the | |
1035 | * allowed task size. Note that p_filesz must always be | |
1036 | * <= p_memsz so it is only necessary to check p_memsz. | |
1037 | */ | |
1038 | if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || | |
1039 | elf_ppnt->p_memsz > TASK_SIZE || | |
1040 | TASK_SIZE - elf_ppnt->p_memsz < k) { | |
1041 | /* set_brk can never work. Avoid overflows. */ | |
1042 | retval = -EINVAL; | |
1043 | goto out_free_dentry; | |
1044 | } | |
1045 | ||
1046 | k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; | |
1047 | ||
1048 | if (k > elf_bss) | |
1049 | elf_bss = k; | |
1050 | if ((elf_ppnt->p_flags & PF_X) && end_code < k) | |
1051 | end_code = k; | |
1052 | if (end_data < k) | |
1053 | end_data = k; | |
1054 | k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; | |
1055 | if (k > elf_brk) { | |
1056 | bss_prot = elf_prot; | |
1057 | elf_brk = k; | |
1058 | } | |
1059 | } | |
1060 | ||
1061 | loc->elf_ex.e_entry += load_bias; | |
1062 | elf_bss += load_bias; | |
1063 | elf_brk += load_bias; | |
1064 | start_code += load_bias; | |
1065 | end_code += load_bias; | |
1066 | start_data += load_bias; | |
1067 | end_data += load_bias; | |
1068 | ||
1069 | /* Calling set_brk effectively mmaps the pages that we need | |
1070 | * for the bss and break sections. We must do this before | |
1071 | * mapping in the interpreter, to make sure it doesn't wind | |
1072 | * up getting placed where the bss needs to go. | |
1073 | */ | |
1074 | retval = set_brk(elf_bss, elf_brk, bss_prot); | |
1075 | if (retval) | |
1076 | goto out_free_dentry; | |
1077 | if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { | |
1078 | retval = -EFAULT; /* Nobody gets to see this, but.. */ | |
1079 | goto out_free_dentry; | |
1080 | } | |
1081 | ||
1082 | if (interpreter) { | |
1083 | unsigned long interp_map_addr = 0; | |
1084 | ||
1085 | elf_entry = load_elf_interp(&loc->interp_elf_ex, | |
1086 | interpreter, | |
1087 | &interp_map_addr, | |
1088 | load_bias, interp_elf_phdata); | |
1089 | if (!IS_ERR((void *)elf_entry)) { | |
1090 | /* | |
1091 | * load_elf_interp() returns relocation | |
1092 | * adjustment | |
1093 | */ | |
1094 | interp_load_addr = elf_entry; | |
1095 | elf_entry += loc->interp_elf_ex.e_entry; | |
1096 | } | |
1097 | if (BAD_ADDR(elf_entry)) { | |
1098 | retval = IS_ERR((void *)elf_entry) ? | |
1099 | (int)elf_entry : -EINVAL; | |
1100 | goto out_free_dentry; | |
1101 | } | |
1102 | reloc_func_desc = interp_load_addr; | |
1103 | ||
1104 | allow_write_access(interpreter); | |
1105 | fput(interpreter); | |
1106 | } else { | |
1107 | elf_entry = loc->elf_ex.e_entry; | |
1108 | if (BAD_ADDR(elf_entry)) { | |
1109 | retval = -EINVAL; | |
1110 | goto out_free_dentry; | |
1111 | } | |
1112 | } | |
1113 | ||
1114 | kfree(interp_elf_phdata); | |
1115 | kfree(elf_phdata); | |
1116 | ||
1117 | set_binfmt(&elf_format); | |
1118 | ||
1119 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES | |
1120 | retval = arch_setup_additional_pages(bprm, !!interpreter); | |
1121 | if (retval < 0) | |
1122 | goto out; | |
1123 | #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ | |
1124 | ||
1125 | retval = create_elf_tables(bprm, &loc->elf_ex, | |
1126 | load_addr, interp_load_addr); | |
1127 | if (retval < 0) | |
1128 | goto out; | |
1129 | /* N.B. passed_fileno might not be initialized? */ | |
1130 | current->mm->end_code = end_code; | |
1131 | current->mm->start_code = start_code; | |
1132 | current->mm->start_data = start_data; | |
1133 | current->mm->end_data = end_data; | |
1134 | current->mm->start_stack = bprm->p; | |
1135 | ||
1136 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { | |
1137 | current->mm->brk = current->mm->start_brk = | |
1138 | arch_randomize_brk(current->mm); | |
1139 | #ifdef compat_brk_randomized | |
1140 | current->brk_randomized = 1; | |
1141 | #endif | |
1142 | } | |
1143 | ||
1144 | if (current->personality & MMAP_PAGE_ZERO) { | |
1145 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, | |
1146 | and some applications "depend" upon this behavior. | |
1147 | Since we do not have the power to recompile these, we | |
1148 | emulate the SVr4 behavior. Sigh. */ | |
1149 | error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, | |
1150 | MAP_FIXED | MAP_PRIVATE, 0); | |
1151 | } | |
1152 | ||
1153 | regs = current_pt_regs(); | |
1154 | #ifdef ELF_PLAT_INIT | |
1155 | /* | |
1156 | * The ABI may specify that certain registers be set up in special | |
1157 | * ways (on i386 %edx is the address of a DT_FINI function, for | |
1158 | * example. In addition, it may also specify (eg, PowerPC64 ELF) | |
1159 | * that the e_entry field is the address of the function descriptor | |
1160 | * for the startup routine, rather than the address of the startup | |
1161 | * routine itself. This macro performs whatever initialization to | |
1162 | * the regs structure is required as well as any relocations to the | |
1163 | * function descriptor entries when executing dynamically links apps. | |
1164 | */ | |
1165 | ELF_PLAT_INIT(regs, reloc_func_desc); | |
1166 | #endif | |
1167 | ||
1168 | finalize_exec(bprm); | |
1169 | start_thread(regs, elf_entry, bprm->p); | |
1170 | retval = 0; | |
1171 | out: | |
1172 | kfree(loc); | |
1173 | out_ret: | |
1174 | return retval; | |
1175 | ||
1176 | /* error cleanup */ | |
1177 | out_free_dentry: | |
1178 | kfree(interp_elf_phdata); | |
1179 | allow_write_access(interpreter); | |
1180 | if (interpreter) | |
1181 | fput(interpreter); | |
1182 | out_free_ph: | |
1183 | kfree(elf_phdata); | |
1184 | goto out; | |
1185 | } | |
1186 | ||
1187 | #ifdef CONFIG_USELIB | |
1188 | /* This is really simpleminded and specialized - we are loading an | |
1189 | a.out library that is given an ELF header. */ | |
1190 | static int load_elf_library(struct file *file) | |
1191 | { | |
1192 | struct elf_phdr *elf_phdata; | |
1193 | struct elf_phdr *eppnt; | |
1194 | unsigned long elf_bss, bss, len; | |
1195 | int retval, error, i, j; | |
1196 | struct elfhdr elf_ex; | |
1197 | loff_t pos = 0; | |
1198 | ||
1199 | error = -ENOEXEC; | |
1200 | retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos); | |
1201 | if (retval != sizeof(elf_ex)) | |
1202 | goto out; | |
1203 | ||
1204 | if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | |
1205 | goto out; | |
1206 | ||
1207 | /* First of all, some simple consistency checks */ | |
1208 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || | |
1209 | !elf_check_arch(&elf_ex) || !file->f_op->mmap) | |
1210 | goto out; | |
1211 | if (elf_check_fdpic(&elf_ex)) | |
1212 | goto out; | |
1213 | ||
1214 | /* Now read in all of the header information */ | |
1215 | ||
1216 | j = sizeof(struct elf_phdr) * elf_ex.e_phnum; | |
1217 | /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ | |
1218 | ||
1219 | error = -ENOMEM; | |
1220 | elf_phdata = kmalloc(j, GFP_KERNEL); | |
1221 | if (!elf_phdata) | |
1222 | goto out; | |
1223 | ||
1224 | eppnt = elf_phdata; | |
1225 | error = -ENOEXEC; | |
1226 | pos = elf_ex.e_phoff; | |
1227 | retval = kernel_read(file, eppnt, j, &pos); | |
1228 | if (retval != j) | |
1229 | goto out_free_ph; | |
1230 | ||
1231 | for (j = 0, i = 0; i<elf_ex.e_phnum; i++) | |
1232 | if ((eppnt + i)->p_type == PT_LOAD) | |
1233 | j++; | |
1234 | if (j != 1) | |
1235 | goto out_free_ph; | |
1236 | ||
1237 | while (eppnt->p_type != PT_LOAD) | |
1238 | eppnt++; | |
1239 | ||
1240 | /* Now use mmap to map the library into memory. */ | |
1241 | error = vm_mmap(file, | |
1242 | ELF_PAGESTART(eppnt->p_vaddr), | |
1243 | (eppnt->p_filesz + | |
1244 | ELF_PAGEOFFSET(eppnt->p_vaddr)), | |
1245 | PROT_READ | PROT_WRITE | PROT_EXEC, | |
1246 | MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE, | |
1247 | (eppnt->p_offset - | |
1248 | ELF_PAGEOFFSET(eppnt->p_vaddr))); | |
1249 | if (error != ELF_PAGESTART(eppnt->p_vaddr)) | |
1250 | goto out_free_ph; | |
1251 | ||
1252 | elf_bss = eppnt->p_vaddr + eppnt->p_filesz; | |
1253 | if (padzero(elf_bss)) { | |
1254 | error = -EFAULT; | |
1255 | goto out_free_ph; | |
1256 | } | |
1257 | ||
1258 | len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr); | |
1259 | bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr); | |
1260 | if (bss > len) { | |
1261 | error = vm_brk(len, bss - len); | |
1262 | if (error) | |
1263 | goto out_free_ph; | |
1264 | } | |
1265 | error = 0; | |
1266 | ||
1267 | out_free_ph: | |
1268 | kfree(elf_phdata); | |
1269 | out: | |
1270 | return error; | |
1271 | } | |
1272 | #endif /* #ifdef CONFIG_USELIB */ | |
1273 | ||
1274 | #ifdef CONFIG_ELF_CORE | |
1275 | /* | |
1276 | * ELF core dumper | |
1277 | * | |
1278 | * Modelled on fs/exec.c:aout_core_dump() | |
1279 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> | |
1280 | */ | |
1281 | ||
1282 | /* | |
1283 | * The purpose of always_dump_vma() is to make sure that special kernel mappings | |
1284 | * that are useful for post-mortem analysis are included in every core dump. | |
1285 | * In that way we ensure that the core dump is fully interpretable later | |
1286 | * without matching up the same kernel and hardware config to see what PC values | |
1287 | * meant. These special mappings include - vDSO, vsyscall, and other | |
1288 | * architecture specific mappings | |
1289 | */ | |
1290 | static bool always_dump_vma(struct vm_area_struct *vma) | |
1291 | { | |
1292 | /* Any vsyscall mappings? */ | |
1293 | if (vma == get_gate_vma(vma->vm_mm)) | |
1294 | return true; | |
1295 | ||
1296 | /* | |
1297 | * Assume that all vmas with a .name op should always be dumped. | |
1298 | * If this changes, a new vm_ops field can easily be added. | |
1299 | */ | |
1300 | if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) | |
1301 | return true; | |
1302 | ||
1303 | /* | |
1304 | * arch_vma_name() returns non-NULL for special architecture mappings, | |
1305 | * such as vDSO sections. | |
1306 | */ | |
1307 | if (arch_vma_name(vma)) | |
1308 | return true; | |
1309 | ||
1310 | return false; | |
1311 | } | |
1312 | ||
1313 | /* | |
1314 | * Decide what to dump of a segment, part, all or none. | |
1315 | */ | |
1316 | static unsigned long vma_dump_size(struct vm_area_struct *vma, | |
1317 | unsigned long mm_flags) | |
1318 | { | |
1319 | #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) | |
1320 | ||
1321 | /* always dump the vdso and vsyscall sections */ | |
1322 | if (always_dump_vma(vma)) | |
1323 | goto whole; | |
1324 | ||
1325 | if (vma->vm_flags & VM_DONTDUMP) | |
1326 | return 0; | |
1327 | ||
1328 | /* support for DAX */ | |
1329 | if (vma_is_dax(vma)) { | |
1330 | if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) | |
1331 | goto whole; | |
1332 | if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) | |
1333 | goto whole; | |
1334 | return 0; | |
1335 | } | |
1336 | ||
1337 | /* Hugetlb memory check */ | |
1338 | if (vma->vm_flags & VM_HUGETLB) { | |
1339 | if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) | |
1340 | goto whole; | |
1341 | if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) | |
1342 | goto whole; | |
1343 | return 0; | |
1344 | } | |
1345 | ||
1346 | /* Do not dump I/O mapped devices or special mappings */ | |
1347 | if (vma->vm_flags & VM_IO) | |
1348 | return 0; | |
1349 | ||
1350 | /* By default, dump shared memory if mapped from an anonymous file. */ | |
1351 | if (vma->vm_flags & VM_SHARED) { | |
1352 | if (file_inode(vma->vm_file)->i_nlink == 0 ? | |
1353 | FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) | |
1354 | goto whole; | |
1355 | return 0; | |
1356 | } | |
1357 | ||
1358 | /* Dump segments that have been written to. */ | |
1359 | if (vma->anon_vma && FILTER(ANON_PRIVATE)) | |
1360 | goto whole; | |
1361 | if (vma->vm_file == NULL) | |
1362 | return 0; | |
1363 | ||
1364 | if (FILTER(MAPPED_PRIVATE)) | |
1365 | goto whole; | |
1366 | ||
1367 | /* | |
1368 | * If this looks like the beginning of a DSO or executable mapping, | |
1369 | * check for an ELF header. If we find one, dump the first page to | |
1370 | * aid in determining what was mapped here. | |
1371 | */ | |
1372 | if (FILTER(ELF_HEADERS) && | |
1373 | vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { | |
1374 | u32 __user *header = (u32 __user *) vma->vm_start; | |
1375 | u32 word; | |
1376 | mm_segment_t fs = get_fs(); | |
1377 | /* | |
1378 | * Doing it this way gets the constant folded by GCC. | |
1379 | */ | |
1380 | union { | |
1381 | u32 cmp; | |
1382 | char elfmag[SELFMAG]; | |
1383 | } magic; | |
1384 | BUILD_BUG_ON(SELFMAG != sizeof word); | |
1385 | magic.elfmag[EI_MAG0] = ELFMAG0; | |
1386 | magic.elfmag[EI_MAG1] = ELFMAG1; | |
1387 | magic.elfmag[EI_MAG2] = ELFMAG2; | |
1388 | magic.elfmag[EI_MAG3] = ELFMAG3; | |
1389 | /* | |
1390 | * Switch to the user "segment" for get_user(), | |
1391 | * then put back what elf_core_dump() had in place. | |
1392 | */ | |
1393 | set_fs(USER_DS); | |
1394 | if (unlikely(get_user(word, header))) | |
1395 | word = 0; | |
1396 | set_fs(fs); | |
1397 | if (word == magic.cmp) | |
1398 | return PAGE_SIZE; | |
1399 | } | |
1400 | ||
1401 | #undef FILTER | |
1402 | ||
1403 | return 0; | |
1404 | ||
1405 | whole: | |
1406 | return vma->vm_end - vma->vm_start; | |
1407 | } | |
1408 | ||
1409 | /* An ELF note in memory */ | |
1410 | struct memelfnote | |
1411 | { | |
1412 | const char *name; | |
1413 | int type; | |
1414 | unsigned int datasz; | |
1415 | void *data; | |
1416 | }; | |
1417 | ||
1418 | static int notesize(struct memelfnote *en) | |
1419 | { | |
1420 | int sz; | |
1421 | ||
1422 | sz = sizeof(struct elf_note); | |
1423 | sz += roundup(strlen(en->name) + 1, 4); | |
1424 | sz += roundup(en->datasz, 4); | |
1425 | ||
1426 | return sz; | |
1427 | } | |
1428 | ||
1429 | static int writenote(struct memelfnote *men, struct coredump_params *cprm) | |
1430 | { | |
1431 | struct elf_note en; | |
1432 | en.n_namesz = strlen(men->name) + 1; | |
1433 | en.n_descsz = men->datasz; | |
1434 | en.n_type = men->type; | |
1435 | ||
1436 | return dump_emit(cprm, &en, sizeof(en)) && | |
1437 | dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) && | |
1438 | dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4); | |
1439 | } | |
1440 | ||
1441 | static void fill_elf_header(struct elfhdr *elf, int segs, | |
1442 | u16 machine, u32 flags) | |
1443 | { | |
1444 | memset(elf, 0, sizeof(*elf)); | |
1445 | ||
1446 | memcpy(elf->e_ident, ELFMAG, SELFMAG); | |
1447 | elf->e_ident[EI_CLASS] = ELF_CLASS; | |
1448 | elf->e_ident[EI_DATA] = ELF_DATA; | |
1449 | elf->e_ident[EI_VERSION] = EV_CURRENT; | |
1450 | elf->e_ident[EI_OSABI] = ELF_OSABI; | |
1451 | ||
1452 | elf->e_type = ET_CORE; | |
1453 | elf->e_machine = machine; | |
1454 | elf->e_version = EV_CURRENT; | |
1455 | elf->e_phoff = sizeof(struct elfhdr); | |
1456 | elf->e_flags = flags; | |
1457 | elf->e_ehsize = sizeof(struct elfhdr); | |
1458 | elf->e_phentsize = sizeof(struct elf_phdr); | |
1459 | elf->e_phnum = segs; | |
1460 | } | |
1461 | ||
1462 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) | |
1463 | { | |
1464 | phdr->p_type = PT_NOTE; | |
1465 | phdr->p_offset = offset; | |
1466 | phdr->p_vaddr = 0; | |
1467 | phdr->p_paddr = 0; | |
1468 | phdr->p_filesz = sz; | |
1469 | phdr->p_memsz = 0; | |
1470 | phdr->p_flags = 0; | |
1471 | phdr->p_align = 0; | |
1472 | } | |
1473 | ||
1474 | static void fill_note(struct memelfnote *note, const char *name, int type, | |
1475 | unsigned int sz, void *data) | |
1476 | { | |
1477 | note->name = name; | |
1478 | note->type = type; | |
1479 | note->datasz = sz; | |
1480 | note->data = data; | |
1481 | } | |
1482 | ||
1483 | /* | |
1484 | * fill up all the fields in prstatus from the given task struct, except | |
1485 | * registers which need to be filled up separately. | |
1486 | */ | |
1487 | static void fill_prstatus(struct elf_prstatus *prstatus, | |
1488 | struct task_struct *p, long signr) | |
1489 | { | |
1490 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; | |
1491 | prstatus->pr_sigpend = p->pending.signal.sig[0]; | |
1492 | prstatus->pr_sighold = p->blocked.sig[0]; | |
1493 | rcu_read_lock(); | |
1494 | prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); | |
1495 | rcu_read_unlock(); | |
1496 | prstatus->pr_pid = task_pid_vnr(p); | |
1497 | prstatus->pr_pgrp = task_pgrp_vnr(p); | |
1498 | prstatus->pr_sid = task_session_vnr(p); | |
1499 | if (thread_group_leader(p)) { | |
1500 | struct task_cputime cputime; | |
1501 | ||
1502 | /* | |
1503 | * This is the record for the group leader. It shows the | |
1504 | * group-wide total, not its individual thread total. | |
1505 | */ | |
1506 | thread_group_cputime(p, &cputime); | |
1507 | prstatus->pr_utime = ns_to_timeval(cputime.utime); | |
1508 | prstatus->pr_stime = ns_to_timeval(cputime.stime); | |
1509 | } else { | |
1510 | u64 utime, stime; | |
1511 | ||
1512 | task_cputime(p, &utime, &stime); | |
1513 | prstatus->pr_utime = ns_to_timeval(utime); | |
1514 | prstatus->pr_stime = ns_to_timeval(stime); | |
1515 | } | |
1516 | ||
1517 | prstatus->pr_cutime = ns_to_timeval(p->signal->cutime); | |
1518 | prstatus->pr_cstime = ns_to_timeval(p->signal->cstime); | |
1519 | } | |
1520 | ||
1521 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, | |
1522 | struct mm_struct *mm) | |
1523 | { | |
1524 | const struct cred *cred; | |
1525 | unsigned int i, len; | |
1526 | ||
1527 | /* first copy the parameters from user space */ | |
1528 | memset(psinfo, 0, sizeof(struct elf_prpsinfo)); | |
1529 | ||
1530 | len = mm->arg_end - mm->arg_start; | |
1531 | if (len >= ELF_PRARGSZ) | |
1532 | len = ELF_PRARGSZ-1; | |
1533 | if (copy_from_user(&psinfo->pr_psargs, | |
1534 | (const char __user *)mm->arg_start, len)) | |
1535 | return -EFAULT; | |
1536 | for(i = 0; i < len; i++) | |
1537 | if (psinfo->pr_psargs[i] == 0) | |
1538 | psinfo->pr_psargs[i] = ' '; | |
1539 | psinfo->pr_psargs[len] = 0; | |
1540 | ||
1541 | rcu_read_lock(); | |
1542 | psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); | |
1543 | rcu_read_unlock(); | |
1544 | psinfo->pr_pid = task_pid_vnr(p); | |
1545 | psinfo->pr_pgrp = task_pgrp_vnr(p); | |
1546 | psinfo->pr_sid = task_session_vnr(p); | |
1547 | ||
1548 | i = p->state ? ffz(~p->state) + 1 : 0; | |
1549 | psinfo->pr_state = i; | |
1550 | psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; | |
1551 | psinfo->pr_zomb = psinfo->pr_sname == 'Z'; | |
1552 | psinfo->pr_nice = task_nice(p); | |
1553 | psinfo->pr_flag = p->flags; | |
1554 | rcu_read_lock(); | |
1555 | cred = __task_cred(p); | |
1556 | SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid)); | |
1557 | SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid)); | |
1558 | rcu_read_unlock(); | |
1559 | strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); | |
1560 | ||
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm) | |
1565 | { | |
1566 | elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; | |
1567 | int i = 0; | |
1568 | do | |
1569 | i += 2; | |
1570 | while (auxv[i - 2] != AT_NULL); | |
1571 | fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); | |
1572 | } | |
1573 | ||
1574 | static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata, | |
1575 | const kernel_siginfo_t *siginfo) | |
1576 | { | |
1577 | mm_segment_t old_fs = get_fs(); | |
1578 | set_fs(KERNEL_DS); | |
1579 | copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo); | |
1580 | set_fs(old_fs); | |
1581 | fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata); | |
1582 | } | |
1583 | ||
1584 | #define MAX_FILE_NOTE_SIZE (4*1024*1024) | |
1585 | /* | |
1586 | * Format of NT_FILE note: | |
1587 | * | |
1588 | * long count -- how many files are mapped | |
1589 | * long page_size -- units for file_ofs | |
1590 | * array of [COUNT] elements of | |
1591 | * long start | |
1592 | * long end | |
1593 | * long file_ofs | |
1594 | * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL... | |
1595 | */ | |
1596 | static int fill_files_note(struct memelfnote *note) | |
1597 | { | |
1598 | struct vm_area_struct *vma; | |
1599 | unsigned count, size, names_ofs, remaining, n; | |
1600 | user_long_t *data; | |
1601 | user_long_t *start_end_ofs; | |
1602 | char *name_base, *name_curpos; | |
1603 | ||
1604 | /* *Estimated* file count and total data size needed */ | |
1605 | count = current->mm->map_count; | |
1606 | if (count > UINT_MAX / 64) | |
1607 | return -EINVAL; | |
1608 | size = count * 64; | |
1609 | ||
1610 | names_ofs = (2 + 3 * count) * sizeof(data[0]); | |
1611 | alloc: | |
1612 | if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ | |
1613 | return -EINVAL; | |
1614 | size = round_up(size, PAGE_SIZE); | |
1615 | data = kvmalloc(size, GFP_KERNEL); | |
1616 | if (ZERO_OR_NULL_PTR(data)) | |
1617 | return -ENOMEM; | |
1618 | ||
1619 | start_end_ofs = data + 2; | |
1620 | name_base = name_curpos = ((char *)data) + names_ofs; | |
1621 | remaining = size - names_ofs; | |
1622 | count = 0; | |
1623 | for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { | |
1624 | struct file *file; | |
1625 | const char *filename; | |
1626 | ||
1627 | file = vma->vm_file; | |
1628 | if (!file) | |
1629 | continue; | |
1630 | filename = file_path(file, name_curpos, remaining); | |
1631 | if (IS_ERR(filename)) { | |
1632 | if (PTR_ERR(filename) == -ENAMETOOLONG) { | |
1633 | kvfree(data); | |
1634 | size = size * 5 / 4; | |
1635 | goto alloc; | |
1636 | } | |
1637 | continue; | |
1638 | } | |
1639 | ||
1640 | /* file_path() fills at the end, move name down */ | |
1641 | /* n = strlen(filename) + 1: */ | |
1642 | n = (name_curpos + remaining) - filename; | |
1643 | remaining = filename - name_curpos; | |
1644 | memmove(name_curpos, filename, n); | |
1645 | name_curpos += n; | |
1646 | ||
1647 | *start_end_ofs++ = vma->vm_start; | |
1648 | *start_end_ofs++ = vma->vm_end; | |
1649 | *start_end_ofs++ = vma->vm_pgoff; | |
1650 | count++; | |
1651 | } | |
1652 | ||
1653 | /* Now we know exact count of files, can store it */ | |
1654 | data[0] = count; | |
1655 | data[1] = PAGE_SIZE; | |
1656 | /* | |
1657 | * Count usually is less than current->mm->map_count, | |
1658 | * we need to move filenames down. | |
1659 | */ | |
1660 | n = current->mm->map_count - count; | |
1661 | if (n != 0) { | |
1662 | unsigned shift_bytes = n * 3 * sizeof(data[0]); | |
1663 | memmove(name_base - shift_bytes, name_base, | |
1664 | name_curpos - name_base); | |
1665 | name_curpos -= shift_bytes; | |
1666 | } | |
1667 | ||
1668 | size = name_curpos - (char *)data; | |
1669 | fill_note(note, "CORE", NT_FILE, size, data); | |
1670 | return 0; | |
1671 | } | |
1672 | ||
1673 | #ifdef CORE_DUMP_USE_REGSET | |
1674 | #include <linux/regset.h> | |
1675 | ||
1676 | struct elf_thread_core_info { | |
1677 | struct elf_thread_core_info *next; | |
1678 | struct task_struct *task; | |
1679 | struct elf_prstatus prstatus; | |
1680 | struct memelfnote notes[0]; | |
1681 | }; | |
1682 | ||
1683 | struct elf_note_info { | |
1684 | struct elf_thread_core_info *thread; | |
1685 | struct memelfnote psinfo; | |
1686 | struct memelfnote signote; | |
1687 | struct memelfnote auxv; | |
1688 | struct memelfnote files; | |
1689 | user_siginfo_t csigdata; | |
1690 | size_t size; | |
1691 | int thread_notes; | |
1692 | }; | |
1693 | ||
1694 | /* | |
1695 | * When a regset has a writeback hook, we call it on each thread before | |
1696 | * dumping user memory. On register window machines, this makes sure the | |
1697 | * user memory backing the register data is up to date before we read it. | |
1698 | */ | |
1699 | static void do_thread_regset_writeback(struct task_struct *task, | |
1700 | const struct user_regset *regset) | |
1701 | { | |
1702 | if (regset->writeback) | |
1703 | regset->writeback(task, regset, 1); | |
1704 | } | |
1705 | ||
1706 | #ifndef PRSTATUS_SIZE | |
1707 | #define PRSTATUS_SIZE(S, R) sizeof(S) | |
1708 | #endif | |
1709 | ||
1710 | #ifndef SET_PR_FPVALID | |
1711 | #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V)) | |
1712 | #endif | |
1713 | ||
1714 | static int fill_thread_core_info(struct elf_thread_core_info *t, | |
1715 | const struct user_regset_view *view, | |
1716 | long signr, size_t *total) | |
1717 | { | |
1718 | unsigned int i; | |
1719 | unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); | |
1720 | ||
1721 | /* | |
1722 | * NT_PRSTATUS is the one special case, because the regset data | |
1723 | * goes into the pr_reg field inside the note contents, rather | |
1724 | * than being the whole note contents. We fill the reset in here. | |
1725 | * We assume that regset 0 is NT_PRSTATUS. | |
1726 | */ | |
1727 | fill_prstatus(&t->prstatus, t->task, signr); | |
1728 | (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, | |
1729 | &t->prstatus.pr_reg, NULL); | |
1730 | ||
1731 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, | |
1732 | PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus); | |
1733 | *total += notesize(&t->notes[0]); | |
1734 | ||
1735 | do_thread_regset_writeback(t->task, &view->regsets[0]); | |
1736 | ||
1737 | /* | |
1738 | * Each other regset might generate a note too. For each regset | |
1739 | * that has no core_note_type or is inactive, we leave t->notes[i] | |
1740 | * all zero and we'll know to skip writing it later. | |
1741 | */ | |
1742 | for (i = 1; i < view->n; ++i) { | |
1743 | const struct user_regset *regset = &view->regsets[i]; | |
1744 | do_thread_regset_writeback(t->task, regset); | |
1745 | if (regset->core_note_type && regset->get && | |
1746 | (!regset->active || regset->active(t->task, regset) > 0)) { | |
1747 | int ret; | |
1748 | size_t size = regset_size(t->task, regset); | |
1749 | void *data = kmalloc(size, GFP_KERNEL); | |
1750 | if (unlikely(!data)) | |
1751 | return 0; | |
1752 | ret = regset->get(t->task, regset, | |
1753 | 0, size, data, NULL); | |
1754 | if (unlikely(ret)) | |
1755 | kfree(data); | |
1756 | else { | |
1757 | if (regset->core_note_type != NT_PRFPREG) | |
1758 | fill_note(&t->notes[i], "LINUX", | |
1759 | regset->core_note_type, | |
1760 | size, data); | |
1761 | else { | |
1762 | SET_PR_FPVALID(&t->prstatus, | |
1763 | 1, regset0_size); | |
1764 | fill_note(&t->notes[i], "CORE", | |
1765 | NT_PRFPREG, size, data); | |
1766 | } | |
1767 | *total += notesize(&t->notes[i]); | |
1768 | } | |
1769 | } | |
1770 | } | |
1771 | ||
1772 | return 1; | |
1773 | } | |
1774 | ||
1775 | static int fill_note_info(struct elfhdr *elf, int phdrs, | |
1776 | struct elf_note_info *info, | |
1777 | const kernel_siginfo_t *siginfo, struct pt_regs *regs) | |
1778 | { | |
1779 | struct task_struct *dump_task = current; | |
1780 | const struct user_regset_view *view = task_user_regset_view(dump_task); | |
1781 | struct elf_thread_core_info *t; | |
1782 | struct elf_prpsinfo *psinfo; | |
1783 | struct core_thread *ct; | |
1784 | unsigned int i; | |
1785 | ||
1786 | info->size = 0; | |
1787 | info->thread = NULL; | |
1788 | ||
1789 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); | |
1790 | if (psinfo == NULL) { | |
1791 | info->psinfo.data = NULL; /* So we don't free this wrongly */ | |
1792 | return 0; | |
1793 | } | |
1794 | ||
1795 | fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | |
1796 | ||
1797 | /* | |
1798 | * Figure out how many notes we're going to need for each thread. | |
1799 | */ | |
1800 | info->thread_notes = 0; | |
1801 | for (i = 0; i < view->n; ++i) | |
1802 | if (view->regsets[i].core_note_type != 0) | |
1803 | ++info->thread_notes; | |
1804 | ||
1805 | /* | |
1806 | * Sanity check. We rely on regset 0 being in NT_PRSTATUS, | |
1807 | * since it is our one special case. | |
1808 | */ | |
1809 | if (unlikely(info->thread_notes == 0) || | |
1810 | unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) { | |
1811 | WARN_ON(1); | |
1812 | return 0; | |
1813 | } | |
1814 | ||
1815 | /* | |
1816 | * Initialize the ELF file header. | |
1817 | */ | |
1818 | fill_elf_header(elf, phdrs, | |
1819 | view->e_machine, view->e_flags); | |
1820 | ||
1821 | /* | |
1822 | * Allocate a structure for each thread. | |
1823 | */ | |
1824 | for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) { | |
1825 | t = kzalloc(offsetof(struct elf_thread_core_info, | |
1826 | notes[info->thread_notes]), | |
1827 | GFP_KERNEL); | |
1828 | if (unlikely(!t)) | |
1829 | return 0; | |
1830 | ||
1831 | t->task = ct->task; | |
1832 | if (ct->task == dump_task || !info->thread) { | |
1833 | t->next = info->thread; | |
1834 | info->thread = t; | |
1835 | } else { | |
1836 | /* | |
1837 | * Make sure to keep the original task at | |
1838 | * the head of the list. | |
1839 | */ | |
1840 | t->next = info->thread->next; | |
1841 | info->thread->next = t; | |
1842 | } | |
1843 | } | |
1844 | ||
1845 | /* | |
1846 | * Now fill in each thread's information. | |
1847 | */ | |
1848 | for (t = info->thread; t != NULL; t = t->next) | |
1849 | if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size)) | |
1850 | return 0; | |
1851 | ||
1852 | /* | |
1853 | * Fill in the two process-wide notes. | |
1854 | */ | |
1855 | fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm); | |
1856 | info->size += notesize(&info->psinfo); | |
1857 | ||
1858 | fill_siginfo_note(&info->signote, &info->csigdata, siginfo); | |
1859 | info->size += notesize(&info->signote); | |
1860 | ||
1861 | fill_auxv_note(&info->auxv, current->mm); | |
1862 | info->size += notesize(&info->auxv); | |
1863 | ||
1864 | if (fill_files_note(&info->files) == 0) | |
1865 | info->size += notesize(&info->files); | |
1866 | ||
1867 | return 1; | |
1868 | } | |
1869 | ||
1870 | static size_t get_note_info_size(struct elf_note_info *info) | |
1871 | { | |
1872 | return info->size; | |
1873 | } | |
1874 | ||
1875 | /* | |
1876 | * Write all the notes for each thread. When writing the first thread, the | |
1877 | * process-wide notes are interleaved after the first thread-specific note. | |
1878 | */ | |
1879 | static int write_note_info(struct elf_note_info *info, | |
1880 | struct coredump_params *cprm) | |
1881 | { | |
1882 | bool first = true; | |
1883 | struct elf_thread_core_info *t = info->thread; | |
1884 | ||
1885 | do { | |
1886 | int i; | |
1887 | ||
1888 | if (!writenote(&t->notes[0], cprm)) | |
1889 | return 0; | |
1890 | ||
1891 | if (first && !writenote(&info->psinfo, cprm)) | |
1892 | return 0; | |
1893 | if (first && !writenote(&info->signote, cprm)) | |
1894 | return 0; | |
1895 | if (first && !writenote(&info->auxv, cprm)) | |
1896 | return 0; | |
1897 | if (first && info->files.data && | |
1898 | !writenote(&info->files, cprm)) | |
1899 | return 0; | |
1900 | ||
1901 | for (i = 1; i < info->thread_notes; ++i) | |
1902 | if (t->notes[i].data && | |
1903 | !writenote(&t->notes[i], cprm)) | |
1904 | return 0; | |
1905 | ||
1906 | first = false; | |
1907 | t = t->next; | |
1908 | } while (t); | |
1909 | ||
1910 | return 1; | |
1911 | } | |
1912 | ||
1913 | static void free_note_info(struct elf_note_info *info) | |
1914 | { | |
1915 | struct elf_thread_core_info *threads = info->thread; | |
1916 | while (threads) { | |
1917 | unsigned int i; | |
1918 | struct elf_thread_core_info *t = threads; | |
1919 | threads = t->next; | |
1920 | WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus); | |
1921 | for (i = 1; i < info->thread_notes; ++i) | |
1922 | kfree(t->notes[i].data); | |
1923 | kfree(t); | |
1924 | } | |
1925 | kfree(info->psinfo.data); | |
1926 | kvfree(info->files.data); | |
1927 | } | |
1928 | ||
1929 | #else | |
1930 | ||
1931 | /* Here is the structure in which status of each thread is captured. */ | |
1932 | struct elf_thread_status | |
1933 | { | |
1934 | struct list_head list; | |
1935 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ | |
1936 | elf_fpregset_t fpu; /* NT_PRFPREG */ | |
1937 | struct task_struct *thread; | |
1938 | #ifdef ELF_CORE_COPY_XFPREGS | |
1939 | elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ | |
1940 | #endif | |
1941 | struct memelfnote notes[3]; | |
1942 | int num_notes; | |
1943 | }; | |
1944 | ||
1945 | /* | |
1946 | * In order to add the specific thread information for the elf file format, | |
1947 | * we need to keep a linked list of every threads pr_status and then create | |
1948 | * a single section for them in the final core file. | |
1949 | */ | |
1950 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) | |
1951 | { | |
1952 | int sz = 0; | |
1953 | struct task_struct *p = t->thread; | |
1954 | t->num_notes = 0; | |
1955 | ||
1956 | fill_prstatus(&t->prstatus, p, signr); | |
1957 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); | |
1958 | ||
1959 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), | |
1960 | &(t->prstatus)); | |
1961 | t->num_notes++; | |
1962 | sz += notesize(&t->notes[0]); | |
1963 | ||
1964 | if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, | |
1965 | &t->fpu))) { | |
1966 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), | |
1967 | &(t->fpu)); | |
1968 | t->num_notes++; | |
1969 | sz += notesize(&t->notes[1]); | |
1970 | } | |
1971 | ||
1972 | #ifdef ELF_CORE_COPY_XFPREGS | |
1973 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { | |
1974 | fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, | |
1975 | sizeof(t->xfpu), &t->xfpu); | |
1976 | t->num_notes++; | |
1977 | sz += notesize(&t->notes[2]); | |
1978 | } | |
1979 | #endif | |
1980 | return sz; | |
1981 | } | |
1982 | ||
1983 | struct elf_note_info { | |
1984 | struct memelfnote *notes; | |
1985 | struct memelfnote *notes_files; | |
1986 | struct elf_prstatus *prstatus; /* NT_PRSTATUS */ | |
1987 | struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ | |
1988 | struct list_head thread_list; | |
1989 | elf_fpregset_t *fpu; | |
1990 | #ifdef ELF_CORE_COPY_XFPREGS | |
1991 | elf_fpxregset_t *xfpu; | |
1992 | #endif | |
1993 | user_siginfo_t csigdata; | |
1994 | int thread_status_size; | |
1995 | int numnote; | |
1996 | }; | |
1997 | ||
1998 | static int elf_note_info_init(struct elf_note_info *info) | |
1999 | { | |
2000 | memset(info, 0, sizeof(*info)); | |
2001 | INIT_LIST_HEAD(&info->thread_list); | |
2002 | ||
2003 | /* Allocate space for ELF notes */ | |
2004 | info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL); | |
2005 | if (!info->notes) | |
2006 | return 0; | |
2007 | info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL); | |
2008 | if (!info->psinfo) | |
2009 | return 0; | |
2010 | info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL); | |
2011 | if (!info->prstatus) | |
2012 | return 0; | |
2013 | info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); | |
2014 | if (!info->fpu) | |
2015 | return 0; | |
2016 | #ifdef ELF_CORE_COPY_XFPREGS | |
2017 | info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); | |
2018 | if (!info->xfpu) | |
2019 | return 0; | |
2020 | #endif | |
2021 | return 1; | |
2022 | } | |
2023 | ||
2024 | static int fill_note_info(struct elfhdr *elf, int phdrs, | |
2025 | struct elf_note_info *info, | |
2026 | const kernel_siginfo_t *siginfo, struct pt_regs *regs) | |
2027 | { | |
2028 | struct core_thread *ct; | |
2029 | struct elf_thread_status *ets; | |
2030 | ||
2031 | if (!elf_note_info_init(info)) | |
2032 | return 0; | |
2033 | ||
2034 | for (ct = current->mm->core_state->dumper.next; | |
2035 | ct; ct = ct->next) { | |
2036 | ets = kzalloc(sizeof(*ets), GFP_KERNEL); | |
2037 | if (!ets) | |
2038 | return 0; | |
2039 | ||
2040 | ets->thread = ct->task; | |
2041 | list_add(&ets->list, &info->thread_list); | |
2042 | } | |
2043 | ||
2044 | list_for_each_entry(ets, &info->thread_list, list) { | |
2045 | int sz; | |
2046 | ||
2047 | sz = elf_dump_thread_status(siginfo->si_signo, ets); | |
2048 | info->thread_status_size += sz; | |
2049 | } | |
2050 | /* now collect the dump for the current */ | |
2051 | memset(info->prstatus, 0, sizeof(*info->prstatus)); | |
2052 | fill_prstatus(info->prstatus, current, siginfo->si_signo); | |
2053 | elf_core_copy_regs(&info->prstatus->pr_reg, regs); | |
2054 | ||
2055 | /* Set up header */ | |
2056 | fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS); | |
2057 | ||
2058 | /* | |
2059 | * Set up the notes in similar form to SVR4 core dumps made | |
2060 | * with info from their /proc. | |
2061 | */ | |
2062 | ||
2063 | fill_note(info->notes + 0, "CORE", NT_PRSTATUS, | |
2064 | sizeof(*info->prstatus), info->prstatus); | |
2065 | fill_psinfo(info->psinfo, current->group_leader, current->mm); | |
2066 | fill_note(info->notes + 1, "CORE", NT_PRPSINFO, | |
2067 | sizeof(*info->psinfo), info->psinfo); | |
2068 | ||
2069 | fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo); | |
2070 | fill_auxv_note(info->notes + 3, current->mm); | |
2071 | info->numnote = 4; | |
2072 | ||
2073 | if (fill_files_note(info->notes + info->numnote) == 0) { | |
2074 | info->notes_files = info->notes + info->numnote; | |
2075 | info->numnote++; | |
2076 | } | |
2077 | ||
2078 | /* Try to dump the FPU. */ | |
2079 | info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, | |
2080 | info->fpu); | |
2081 | if (info->prstatus->pr_fpvalid) | |
2082 | fill_note(info->notes + info->numnote++, | |
2083 | "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu); | |
2084 | #ifdef ELF_CORE_COPY_XFPREGS | |
2085 | if (elf_core_copy_task_xfpregs(current, info->xfpu)) | |
2086 | fill_note(info->notes + info->numnote++, | |
2087 | "LINUX", ELF_CORE_XFPREG_TYPE, | |
2088 | sizeof(*info->xfpu), info->xfpu); | |
2089 | #endif | |
2090 | ||
2091 | return 1; | |
2092 | } | |
2093 | ||
2094 | static size_t get_note_info_size(struct elf_note_info *info) | |
2095 | { | |
2096 | int sz = 0; | |
2097 | int i; | |
2098 | ||
2099 | for (i = 0; i < info->numnote; i++) | |
2100 | sz += notesize(info->notes + i); | |
2101 | ||
2102 | sz += info->thread_status_size; | |
2103 | ||
2104 | return sz; | |
2105 | } | |
2106 | ||
2107 | static int write_note_info(struct elf_note_info *info, | |
2108 | struct coredump_params *cprm) | |
2109 | { | |
2110 | struct elf_thread_status *ets; | |
2111 | int i; | |
2112 | ||
2113 | for (i = 0; i < info->numnote; i++) | |
2114 | if (!writenote(info->notes + i, cprm)) | |
2115 | return 0; | |
2116 | ||
2117 | /* write out the thread status notes section */ | |
2118 | list_for_each_entry(ets, &info->thread_list, list) { | |
2119 | for (i = 0; i < ets->num_notes; i++) | |
2120 | if (!writenote(&ets->notes[i], cprm)) | |
2121 | return 0; | |
2122 | } | |
2123 | ||
2124 | return 1; | |
2125 | } | |
2126 | ||
2127 | static void free_note_info(struct elf_note_info *info) | |
2128 | { | |
2129 | while (!list_empty(&info->thread_list)) { | |
2130 | struct list_head *tmp = info->thread_list.next; | |
2131 | list_del(tmp); | |
2132 | kfree(list_entry(tmp, struct elf_thread_status, list)); | |
2133 | } | |
2134 | ||
2135 | /* Free data possibly allocated by fill_files_note(): */ | |
2136 | if (info->notes_files) | |
2137 | kvfree(info->notes_files->data); | |
2138 | ||
2139 | kfree(info->prstatus); | |
2140 | kfree(info->psinfo); | |
2141 | kfree(info->notes); | |
2142 | kfree(info->fpu); | |
2143 | #ifdef ELF_CORE_COPY_XFPREGS | |
2144 | kfree(info->xfpu); | |
2145 | #endif | |
2146 | } | |
2147 | ||
2148 | #endif | |
2149 | ||
2150 | static struct vm_area_struct *first_vma(struct task_struct *tsk, | |
2151 | struct vm_area_struct *gate_vma) | |
2152 | { | |
2153 | struct vm_area_struct *ret = tsk->mm->mmap; | |
2154 | ||
2155 | if (ret) | |
2156 | return ret; | |
2157 | return gate_vma; | |
2158 | } | |
2159 | /* | |
2160 | * Helper function for iterating across a vma list. It ensures that the caller | |
2161 | * will visit `gate_vma' prior to terminating the search. | |
2162 | */ | |
2163 | static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, | |
2164 | struct vm_area_struct *gate_vma) | |
2165 | { | |
2166 | struct vm_area_struct *ret; | |
2167 | ||
2168 | ret = this_vma->vm_next; | |
2169 | if (ret) | |
2170 | return ret; | |
2171 | if (this_vma == gate_vma) | |
2172 | return NULL; | |
2173 | return gate_vma; | |
2174 | } | |
2175 | ||
2176 | static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, | |
2177 | elf_addr_t e_shoff, int segs) | |
2178 | { | |
2179 | elf->e_shoff = e_shoff; | |
2180 | elf->e_shentsize = sizeof(*shdr4extnum); | |
2181 | elf->e_shnum = 1; | |
2182 | elf->e_shstrndx = SHN_UNDEF; | |
2183 | ||
2184 | memset(shdr4extnum, 0, sizeof(*shdr4extnum)); | |
2185 | ||
2186 | shdr4extnum->sh_type = SHT_NULL; | |
2187 | shdr4extnum->sh_size = elf->e_shnum; | |
2188 | shdr4extnum->sh_link = elf->e_shstrndx; | |
2189 | shdr4extnum->sh_info = segs; | |
2190 | } | |
2191 | ||
2192 | /* | |
2193 | * Actual dumper | |
2194 | * | |
2195 | * This is a two-pass process; first we find the offsets of the bits, | |
2196 | * and then they are actually written out. If we run out of core limit | |
2197 | * we just truncate. | |
2198 | */ | |
2199 | static int elf_core_dump(struct coredump_params *cprm) | |
2200 | { | |
2201 | int has_dumped = 0; | |
2202 | mm_segment_t fs; | |
2203 | int segs, i; | |
2204 | size_t vma_data_size = 0; | |
2205 | struct vm_area_struct *vma, *gate_vma; | |
2206 | struct elfhdr *elf = NULL; | |
2207 | loff_t offset = 0, dataoff; | |
2208 | struct elf_note_info info = { }; | |
2209 | struct elf_phdr *phdr4note = NULL; | |
2210 | struct elf_shdr *shdr4extnum = NULL; | |
2211 | Elf_Half e_phnum; | |
2212 | elf_addr_t e_shoff; | |
2213 | elf_addr_t *vma_filesz = NULL; | |
2214 | ||
2215 | /* | |
2216 | * We no longer stop all VM operations. | |
2217 | * | |
2218 | * This is because those proceses that could possibly change map_count | |
2219 | * or the mmap / vma pages are now blocked in do_exit on current | |
2220 | * finishing this core dump. | |
2221 | * | |
2222 | * Only ptrace can touch these memory addresses, but it doesn't change | |
2223 | * the map_count or the pages allocated. So no possibility of crashing | |
2224 | * exists while dumping the mm->vm_next areas to the core file. | |
2225 | */ | |
2226 | ||
2227 | /* alloc memory for large data structures: too large to be on stack */ | |
2228 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); | |
2229 | if (!elf) | |
2230 | goto out; | |
2231 | /* | |
2232 | * The number of segs are recored into ELF header as 16bit value. | |
2233 | * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here. | |
2234 | */ | |
2235 | segs = current->mm->map_count; | |
2236 | segs += elf_core_extra_phdrs(); | |
2237 | ||
2238 | gate_vma = get_gate_vma(current->mm); | |
2239 | if (gate_vma != NULL) | |
2240 | segs++; | |
2241 | ||
2242 | /* for notes section */ | |
2243 | segs++; | |
2244 | ||
2245 | /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid | |
2246 | * this, kernel supports extended numbering. Have a look at | |
2247 | * include/linux/elf.h for further information. */ | |
2248 | e_phnum = segs > PN_XNUM ? PN_XNUM : segs; | |
2249 | ||
2250 | /* | |
2251 | * Collect all the non-memory information about the process for the | |
2252 | * notes. This also sets up the file header. | |
2253 | */ | |
2254 | if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs)) | |
2255 | goto cleanup; | |
2256 | ||
2257 | has_dumped = 1; | |
2258 | ||
2259 | fs = get_fs(); | |
2260 | set_fs(KERNEL_DS); | |
2261 | ||
2262 | offset += sizeof(*elf); /* Elf header */ | |
2263 | offset += segs * sizeof(struct elf_phdr); /* Program headers */ | |
2264 | ||
2265 | /* Write notes phdr entry */ | |
2266 | { | |
2267 | size_t sz = get_note_info_size(&info); | |
2268 | ||
2269 | sz += elf_coredump_extra_notes_size(); | |
2270 | ||
2271 | phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); | |
2272 | if (!phdr4note) | |
2273 | goto end_coredump; | |
2274 | ||
2275 | fill_elf_note_phdr(phdr4note, sz, offset); | |
2276 | offset += sz; | |
2277 | } | |
2278 | ||
2279 | dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); | |
2280 | ||
2281 | if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz)) | |
2282 | goto end_coredump; | |
2283 | vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)), | |
2284 | GFP_KERNEL); | |
2285 | if (ZERO_OR_NULL_PTR(vma_filesz)) | |
2286 | goto end_coredump; | |
2287 | ||
2288 | for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; | |
2289 | vma = next_vma(vma, gate_vma)) { | |
2290 | unsigned long dump_size; | |
2291 | ||
2292 | dump_size = vma_dump_size(vma, cprm->mm_flags); | |
2293 | vma_filesz[i++] = dump_size; | |
2294 | vma_data_size += dump_size; | |
2295 | } | |
2296 | ||
2297 | offset += vma_data_size; | |
2298 | offset += elf_core_extra_data_size(); | |
2299 | e_shoff = offset; | |
2300 | ||
2301 | if (e_phnum == PN_XNUM) { | |
2302 | shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL); | |
2303 | if (!shdr4extnum) | |
2304 | goto end_coredump; | |
2305 | fill_extnum_info(elf, shdr4extnum, e_shoff, segs); | |
2306 | } | |
2307 | ||
2308 | offset = dataoff; | |
2309 | ||
2310 | if (!dump_emit(cprm, elf, sizeof(*elf))) | |
2311 | goto end_coredump; | |
2312 | ||
2313 | if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note))) | |
2314 | goto end_coredump; | |
2315 | ||
2316 | /* Write program headers for segments dump */ | |
2317 | for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; | |
2318 | vma = next_vma(vma, gate_vma)) { | |
2319 | struct elf_phdr phdr; | |
2320 | ||
2321 | phdr.p_type = PT_LOAD; | |
2322 | phdr.p_offset = offset; | |
2323 | phdr.p_vaddr = vma->vm_start; | |
2324 | phdr.p_paddr = 0; | |
2325 | phdr.p_filesz = vma_filesz[i++]; | |
2326 | phdr.p_memsz = vma->vm_end - vma->vm_start; | |
2327 | offset += phdr.p_filesz; | |
2328 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; | |
2329 | if (vma->vm_flags & VM_WRITE) | |
2330 | phdr.p_flags |= PF_W; | |
2331 | if (vma->vm_flags & VM_EXEC) | |
2332 | phdr.p_flags |= PF_X; | |
2333 | phdr.p_align = ELF_EXEC_PAGESIZE; | |
2334 | ||
2335 | if (!dump_emit(cprm, &phdr, sizeof(phdr))) | |
2336 | goto end_coredump; | |
2337 | } | |
2338 | ||
2339 | if (!elf_core_write_extra_phdrs(cprm, offset)) | |
2340 | goto end_coredump; | |
2341 | ||
2342 | /* write out the notes section */ | |
2343 | if (!write_note_info(&info, cprm)) | |
2344 | goto end_coredump; | |
2345 | ||
2346 | if (elf_coredump_extra_notes_write(cprm)) | |
2347 | goto end_coredump; | |
2348 | ||
2349 | /* Align to page */ | |
2350 | if (!dump_skip(cprm, dataoff - cprm->pos)) | |
2351 | goto end_coredump; | |
2352 | ||
2353 | for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; | |
2354 | vma = next_vma(vma, gate_vma)) { | |
2355 | unsigned long addr; | |
2356 | unsigned long end; | |
2357 | ||
2358 | end = vma->vm_start + vma_filesz[i++]; | |
2359 | ||
2360 | for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { | |
2361 | struct page *page; | |
2362 | int stop; | |
2363 | ||
2364 | page = get_dump_page(addr); | |
2365 | if (page) { | |
2366 | void *kaddr = kmap(page); | |
2367 | stop = !dump_emit(cprm, kaddr, PAGE_SIZE); | |
2368 | kunmap(page); | |
2369 | put_page(page); | |
2370 | } else | |
2371 | stop = !dump_skip(cprm, PAGE_SIZE); | |
2372 | if (stop) | |
2373 | goto end_coredump; | |
2374 | } | |
2375 | } | |
2376 | dump_truncate(cprm); | |
2377 | ||
2378 | if (!elf_core_write_extra_data(cprm)) | |
2379 | goto end_coredump; | |
2380 | ||
2381 | if (e_phnum == PN_XNUM) { | |
2382 | if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum))) | |
2383 | goto end_coredump; | |
2384 | } | |
2385 | ||
2386 | end_coredump: | |
2387 | set_fs(fs); | |
2388 | ||
2389 | cleanup: | |
2390 | free_note_info(&info); | |
2391 | kfree(shdr4extnum); | |
2392 | kvfree(vma_filesz); | |
2393 | kfree(phdr4note); | |
2394 | kfree(elf); | |
2395 | out: | |
2396 | return has_dumped; | |
2397 | } | |
2398 | ||
2399 | #endif /* CONFIG_ELF_CORE */ | |
2400 | ||
2401 | static int __init init_elf_binfmt(void) | |
2402 | { | |
2403 | register_binfmt(&elf_format); | |
2404 | return 0; | |
2405 | } | |
2406 | ||
2407 | static void __exit exit_elf_binfmt(void) | |
2408 | { | |
2409 | /* Remove the COFF and ELF loaders. */ | |
2410 | unregister_binfmt(&elf_format); | |
2411 | } | |
2412 | ||
2413 | core_initcall(init_elf_binfmt); | |
2414 | module_exit(exit_elf_binfmt); | |
2415 | MODULE_LICENSE("GPL"); |