]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/exec.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-hirsute-kernel.git] / fs / exec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/exec.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 /*
9 * #!-checking implemented by tytso.
10 */
11 /*
12 * Demand-loading implemented 01.12.91 - no need to read anything but
13 * the header into memory. The inode of the executable is put into
14 * "current->executable", and page faults do the actual loading. Clean.
15 *
16 * Once more I can proudly say that linux stood up to being changed: it
17 * was less than 2 hours work to get demand-loading completely implemented.
18 *
19 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
20 * current->executable is only used by the procfs. This allows a dispatch
21 * table to check for several different types of binary formats. We keep
22 * trying until we recognize the file or we run out of supported binary
23 * formats.
24 */
25
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/mm.h>
30 #include <linux/vmacache.h>
31 #include <linux/stat.h>
32 #include <linux/fcntl.h>
33 #include <linux/swap.h>
34 #include <linux/string.h>
35 #include <linux/init.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/coredump.h>
38 #include <linux/sched/signal.h>
39 #include <linux/sched/numa_balancing.h>
40 #include <linux/sched/task.h>
41 #include <linux/pagemap.h>
42 #include <linux/perf_event.h>
43 #include <linux/highmem.h>
44 #include <linux/spinlock.h>
45 #include <linux/key.h>
46 #include <linux/personality.h>
47 #include <linux/binfmts.h>
48 #include <linux/utsname.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/module.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/security.h>
54 #include <linux/syscalls.h>
55 #include <linux/tsacct_kern.h>
56 #include <linux/cn_proc.h>
57 #include <linux/audit.h>
58 #include <linux/tracehook.h>
59 #include <linux/kmod.h>
60 #include <linux/fsnotify.h>
61 #include <linux/fs_struct.h>
62 #include <linux/oom.h>
63 #include <linux/compat.h>
64 #include <linux/vmalloc.h>
65
66 #include <linux/uaccess.h>
67 #include <asm/mmu_context.h>
68 #include <asm/tlb.h>
69
70 #include <trace/events/task.h>
71 #include "internal.h"
72
73 #include <trace/events/sched.h>
74
75 static int bprm_creds_from_file(struct linux_binprm *bprm);
76
77 int suid_dumpable = 0;
78
79 static LIST_HEAD(formats);
80 static DEFINE_RWLOCK(binfmt_lock);
81
82 void __register_binfmt(struct linux_binfmt * fmt, int insert)
83 {
84 BUG_ON(!fmt);
85 if (WARN_ON(!fmt->load_binary))
86 return;
87 write_lock(&binfmt_lock);
88 insert ? list_add(&fmt->lh, &formats) :
89 list_add_tail(&fmt->lh, &formats);
90 write_unlock(&binfmt_lock);
91 }
92
93 EXPORT_SYMBOL(__register_binfmt);
94
95 void unregister_binfmt(struct linux_binfmt * fmt)
96 {
97 write_lock(&binfmt_lock);
98 list_del(&fmt->lh);
99 write_unlock(&binfmt_lock);
100 }
101
102 EXPORT_SYMBOL(unregister_binfmt);
103
104 static inline void put_binfmt(struct linux_binfmt * fmt)
105 {
106 module_put(fmt->module);
107 }
108
109 bool path_noexec(const struct path *path)
110 {
111 return (path->mnt->mnt_flags & MNT_NOEXEC) ||
112 (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
113 }
114
115 #ifdef CONFIG_USELIB
116 /*
117 * Note that a shared library must be both readable and executable due to
118 * security reasons.
119 *
120 * Also note that we take the address to load from from the file itself.
121 */
122 SYSCALL_DEFINE1(uselib, const char __user *, library)
123 {
124 struct linux_binfmt *fmt;
125 struct file *file;
126 struct filename *tmp = getname(library);
127 int error = PTR_ERR(tmp);
128 static const struct open_flags uselib_flags = {
129 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
130 .acc_mode = MAY_READ | MAY_EXEC,
131 .intent = LOOKUP_OPEN,
132 .lookup_flags = LOOKUP_FOLLOW,
133 };
134
135 if (IS_ERR(tmp))
136 goto out;
137
138 file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
139 putname(tmp);
140 error = PTR_ERR(file);
141 if (IS_ERR(file))
142 goto out;
143
144 error = -EINVAL;
145 if (!S_ISREG(file_inode(file)->i_mode))
146 goto exit;
147
148 error = -EACCES;
149 if (path_noexec(&file->f_path))
150 goto exit;
151
152 fsnotify_open(file);
153
154 error = -ENOEXEC;
155
156 read_lock(&binfmt_lock);
157 list_for_each_entry(fmt, &formats, lh) {
158 if (!fmt->load_shlib)
159 continue;
160 if (!try_module_get(fmt->module))
161 continue;
162 read_unlock(&binfmt_lock);
163 error = fmt->load_shlib(file);
164 read_lock(&binfmt_lock);
165 put_binfmt(fmt);
166 if (error != -ENOEXEC)
167 break;
168 }
169 read_unlock(&binfmt_lock);
170 exit:
171 fput(file);
172 out:
173 return error;
174 }
175 #endif /* #ifdef CONFIG_USELIB */
176
177 #ifdef CONFIG_MMU
178 /*
179 * The nascent bprm->mm is not visible until exec_mmap() but it can
180 * use a lot of memory, account these pages in current->mm temporary
181 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
182 * change the counter back via acct_arg_size(0).
183 */
184 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
185 {
186 struct mm_struct *mm = current->mm;
187 long diff = (long)(pages - bprm->vma_pages);
188
189 if (!mm || !diff)
190 return;
191
192 bprm->vma_pages = pages;
193 add_mm_counter(mm, MM_ANONPAGES, diff);
194 }
195
196 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
197 int write)
198 {
199 struct page *page;
200 int ret;
201 unsigned int gup_flags = FOLL_FORCE;
202
203 #ifdef CONFIG_STACK_GROWSUP
204 if (write) {
205 ret = expand_downwards(bprm->vma, pos);
206 if (ret < 0)
207 return NULL;
208 }
209 #endif
210
211 if (write)
212 gup_flags |= FOLL_WRITE;
213
214 /*
215 * We are doing an exec(). 'current' is the process
216 * doing the exec and bprm->mm is the new process's mm.
217 */
218 ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
219 &page, NULL, NULL);
220 if (ret <= 0)
221 return NULL;
222
223 if (write)
224 acct_arg_size(bprm, vma_pages(bprm->vma));
225
226 return page;
227 }
228
229 static void put_arg_page(struct page *page)
230 {
231 put_page(page);
232 }
233
234 static void free_arg_pages(struct linux_binprm *bprm)
235 {
236 }
237
238 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
239 struct page *page)
240 {
241 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
242 }
243
244 static int __bprm_mm_init(struct linux_binprm *bprm)
245 {
246 int err;
247 struct vm_area_struct *vma = NULL;
248 struct mm_struct *mm = bprm->mm;
249
250 bprm->vma = vma = vm_area_alloc(mm);
251 if (!vma)
252 return -ENOMEM;
253 vma_set_anonymous(vma);
254
255 if (down_write_killable(&mm->mmap_sem)) {
256 err = -EINTR;
257 goto err_free;
258 }
259
260 /*
261 * Place the stack at the largest stack address the architecture
262 * supports. Later, we'll move this to an appropriate place. We don't
263 * use STACK_TOP because that can depend on attributes which aren't
264 * configured yet.
265 */
266 BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
267 vma->vm_end = STACK_TOP_MAX;
268 vma->vm_start = vma->vm_end - PAGE_SIZE;
269 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
270 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
271
272 err = insert_vm_struct(mm, vma);
273 if (err)
274 goto err;
275
276 mm->stack_vm = mm->total_vm = 1;
277 up_write(&mm->mmap_sem);
278 bprm->p = vma->vm_end - sizeof(void *);
279 return 0;
280 err:
281 up_write(&mm->mmap_sem);
282 err_free:
283 bprm->vma = NULL;
284 vm_area_free(vma);
285 return err;
286 }
287
288 static bool valid_arg_len(struct linux_binprm *bprm, long len)
289 {
290 return len <= MAX_ARG_STRLEN;
291 }
292
293 #else
294
295 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
296 {
297 }
298
299 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
300 int write)
301 {
302 struct page *page;
303
304 page = bprm->page[pos / PAGE_SIZE];
305 if (!page && write) {
306 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
307 if (!page)
308 return NULL;
309 bprm->page[pos / PAGE_SIZE] = page;
310 }
311
312 return page;
313 }
314
315 static void put_arg_page(struct page *page)
316 {
317 }
318
319 static void free_arg_page(struct linux_binprm *bprm, int i)
320 {
321 if (bprm->page[i]) {
322 __free_page(bprm->page[i]);
323 bprm->page[i] = NULL;
324 }
325 }
326
327 static void free_arg_pages(struct linux_binprm *bprm)
328 {
329 int i;
330
331 for (i = 0; i < MAX_ARG_PAGES; i++)
332 free_arg_page(bprm, i);
333 }
334
335 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
336 struct page *page)
337 {
338 }
339
340 static int __bprm_mm_init(struct linux_binprm *bprm)
341 {
342 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
343 return 0;
344 }
345
346 static bool valid_arg_len(struct linux_binprm *bprm, long len)
347 {
348 return len <= bprm->p;
349 }
350
351 #endif /* CONFIG_MMU */
352
353 /*
354 * Create a new mm_struct and populate it with a temporary stack
355 * vm_area_struct. We don't have enough context at this point to set the stack
356 * flags, permissions, and offset, so we use temporary values. We'll update
357 * them later in setup_arg_pages().
358 */
359 static int bprm_mm_init(struct linux_binprm *bprm)
360 {
361 int err;
362 struct mm_struct *mm = NULL;
363
364 bprm->mm = mm = mm_alloc();
365 err = -ENOMEM;
366 if (!mm)
367 goto err;
368
369 /* Save current stack limit for all calculations made during exec. */
370 task_lock(current->group_leader);
371 bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
372 task_unlock(current->group_leader);
373
374 err = __bprm_mm_init(bprm);
375 if (err)
376 goto err;
377
378 return 0;
379
380 err:
381 if (mm) {
382 bprm->mm = NULL;
383 mmdrop(mm);
384 }
385
386 return err;
387 }
388
389 struct user_arg_ptr {
390 #ifdef CONFIG_COMPAT
391 bool is_compat;
392 #endif
393 union {
394 const char __user *const __user *native;
395 #ifdef CONFIG_COMPAT
396 const compat_uptr_t __user *compat;
397 #endif
398 } ptr;
399 };
400
401 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
402 {
403 const char __user *native;
404
405 #ifdef CONFIG_COMPAT
406 if (unlikely(argv.is_compat)) {
407 compat_uptr_t compat;
408
409 if (get_user(compat, argv.ptr.compat + nr))
410 return ERR_PTR(-EFAULT);
411
412 return compat_ptr(compat);
413 }
414 #endif
415
416 if (get_user(native, argv.ptr.native + nr))
417 return ERR_PTR(-EFAULT);
418
419 return native;
420 }
421
422 /*
423 * count() counts the number of strings in array ARGV.
424 */
425 static int count(struct user_arg_ptr argv, int max)
426 {
427 int i = 0;
428
429 if (argv.ptr.native != NULL) {
430 for (;;) {
431 const char __user *p = get_user_arg_ptr(argv, i);
432
433 if (!p)
434 break;
435
436 if (IS_ERR(p))
437 return -EFAULT;
438
439 if (i >= max)
440 return -E2BIG;
441 ++i;
442
443 if (fatal_signal_pending(current))
444 return -ERESTARTNOHAND;
445 cond_resched();
446 }
447 }
448 return i;
449 }
450
451 static int prepare_arg_pages(struct linux_binprm *bprm,
452 struct user_arg_ptr argv, struct user_arg_ptr envp)
453 {
454 unsigned long limit, ptr_size;
455
456 bprm->argc = count(argv, MAX_ARG_STRINGS);
457 if (bprm->argc < 0)
458 return bprm->argc;
459
460 bprm->envc = count(envp, MAX_ARG_STRINGS);
461 if (bprm->envc < 0)
462 return bprm->envc;
463
464 /*
465 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
466 * (whichever is smaller) for the argv+env strings.
467 * This ensures that:
468 * - the remaining binfmt code will not run out of stack space,
469 * - the program will have a reasonable amount of stack left
470 * to work from.
471 */
472 limit = _STK_LIM / 4 * 3;
473 limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
474 /*
475 * We've historically supported up to 32 pages (ARG_MAX)
476 * of argument strings even with small stacks
477 */
478 limit = max_t(unsigned long, limit, ARG_MAX);
479 /*
480 * We must account for the size of all the argv and envp pointers to
481 * the argv and envp strings, since they will also take up space in
482 * the stack. They aren't stored until much later when we can't
483 * signal to the parent that the child has run out of stack space.
484 * Instead, calculate it here so it's possible to fail gracefully.
485 */
486 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
487 if (limit <= ptr_size)
488 return -E2BIG;
489 limit -= ptr_size;
490
491 bprm->argmin = bprm->p - limit;
492 return 0;
493 }
494
495 /*
496 * 'copy_strings()' copies argument/environment strings from the old
497 * processes's memory to the new process's stack. The call to get_user_pages()
498 * ensures the destination page is created and not swapped out.
499 */
500 static int copy_strings(int argc, struct user_arg_ptr argv,
501 struct linux_binprm *bprm)
502 {
503 struct page *kmapped_page = NULL;
504 char *kaddr = NULL;
505 unsigned long kpos = 0;
506 int ret;
507
508 while (argc-- > 0) {
509 const char __user *str;
510 int len;
511 unsigned long pos;
512
513 ret = -EFAULT;
514 str = get_user_arg_ptr(argv, argc);
515 if (IS_ERR(str))
516 goto out;
517
518 len = strnlen_user(str, MAX_ARG_STRLEN);
519 if (!len)
520 goto out;
521
522 ret = -E2BIG;
523 if (!valid_arg_len(bprm, len))
524 goto out;
525
526 /* We're going to work our way backwords. */
527 pos = bprm->p;
528 str += len;
529 bprm->p -= len;
530 #ifdef CONFIG_MMU
531 if (bprm->p < bprm->argmin)
532 goto out;
533 #endif
534
535 while (len > 0) {
536 int offset, bytes_to_copy;
537
538 if (fatal_signal_pending(current)) {
539 ret = -ERESTARTNOHAND;
540 goto out;
541 }
542 cond_resched();
543
544 offset = pos % PAGE_SIZE;
545 if (offset == 0)
546 offset = PAGE_SIZE;
547
548 bytes_to_copy = offset;
549 if (bytes_to_copy > len)
550 bytes_to_copy = len;
551
552 offset -= bytes_to_copy;
553 pos -= bytes_to_copy;
554 str -= bytes_to_copy;
555 len -= bytes_to_copy;
556
557 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
558 struct page *page;
559
560 page = get_arg_page(bprm, pos, 1);
561 if (!page) {
562 ret = -E2BIG;
563 goto out;
564 }
565
566 if (kmapped_page) {
567 flush_kernel_dcache_page(kmapped_page);
568 kunmap(kmapped_page);
569 put_arg_page(kmapped_page);
570 }
571 kmapped_page = page;
572 kaddr = kmap(kmapped_page);
573 kpos = pos & PAGE_MASK;
574 flush_arg_page(bprm, kpos, kmapped_page);
575 }
576 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
577 ret = -EFAULT;
578 goto out;
579 }
580 }
581 }
582 ret = 0;
583 out:
584 if (kmapped_page) {
585 flush_kernel_dcache_page(kmapped_page);
586 kunmap(kmapped_page);
587 put_arg_page(kmapped_page);
588 }
589 return ret;
590 }
591
592 /*
593 * Copy and argument/environment string from the kernel to the processes stack.
594 */
595 int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
596 {
597 int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
598 unsigned long pos = bprm->p;
599
600 if (len == 0)
601 return -EFAULT;
602 if (!valid_arg_len(bprm, len))
603 return -E2BIG;
604
605 /* We're going to work our way backwards. */
606 arg += len;
607 bprm->p -= len;
608 if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
609 return -E2BIG;
610
611 while (len > 0) {
612 unsigned int bytes_to_copy = min_t(unsigned int, len,
613 min_not_zero(offset_in_page(pos), PAGE_SIZE));
614 struct page *page;
615 char *kaddr;
616
617 pos -= bytes_to_copy;
618 arg -= bytes_to_copy;
619 len -= bytes_to_copy;
620
621 page = get_arg_page(bprm, pos, 1);
622 if (!page)
623 return -E2BIG;
624 kaddr = kmap_atomic(page);
625 flush_arg_page(bprm, pos & PAGE_MASK, page);
626 memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
627 flush_kernel_dcache_page(page);
628 kunmap_atomic(kaddr);
629 put_arg_page(page);
630 }
631
632 return 0;
633 }
634 EXPORT_SYMBOL(copy_string_kernel);
635
636 #ifdef CONFIG_MMU
637
638 /*
639 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
640 * the binfmt code determines where the new stack should reside, we shift it to
641 * its final location. The process proceeds as follows:
642 *
643 * 1) Use shift to calculate the new vma endpoints.
644 * 2) Extend vma to cover both the old and new ranges. This ensures the
645 * arguments passed to subsequent functions are consistent.
646 * 3) Move vma's page tables to the new range.
647 * 4) Free up any cleared pgd range.
648 * 5) Shrink the vma to cover only the new range.
649 */
650 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
651 {
652 struct mm_struct *mm = vma->vm_mm;
653 unsigned long old_start = vma->vm_start;
654 unsigned long old_end = vma->vm_end;
655 unsigned long length = old_end - old_start;
656 unsigned long new_start = old_start - shift;
657 unsigned long new_end = old_end - shift;
658 struct mmu_gather tlb;
659
660 BUG_ON(new_start > new_end);
661
662 /*
663 * ensure there are no vmas between where we want to go
664 * and where we are
665 */
666 if (vma != find_vma(mm, new_start))
667 return -EFAULT;
668
669 /*
670 * cover the whole range: [new_start, old_end)
671 */
672 if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
673 return -ENOMEM;
674
675 /*
676 * move the page tables downwards, on failure we rely on
677 * process cleanup to remove whatever mess we made.
678 */
679 if (length != move_page_tables(vma, old_start,
680 vma, new_start, length, false))
681 return -ENOMEM;
682
683 lru_add_drain();
684 tlb_gather_mmu(&tlb, mm, old_start, old_end);
685 if (new_end > old_start) {
686 /*
687 * when the old and new regions overlap clear from new_end.
688 */
689 free_pgd_range(&tlb, new_end, old_end, new_end,
690 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
691 } else {
692 /*
693 * otherwise, clean from old_start; this is done to not touch
694 * the address space in [new_end, old_start) some architectures
695 * have constraints on va-space that make this illegal (IA64) -
696 * for the others its just a little faster.
697 */
698 free_pgd_range(&tlb, old_start, old_end, new_end,
699 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
700 }
701 tlb_finish_mmu(&tlb, old_start, old_end);
702
703 /*
704 * Shrink the vma to just the new range. Always succeeds.
705 */
706 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
707
708 return 0;
709 }
710
711 /*
712 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
713 * the stack is optionally relocated, and some extra space is added.
714 */
715 int setup_arg_pages(struct linux_binprm *bprm,
716 unsigned long stack_top,
717 int executable_stack)
718 {
719 unsigned long ret;
720 unsigned long stack_shift;
721 struct mm_struct *mm = current->mm;
722 struct vm_area_struct *vma = bprm->vma;
723 struct vm_area_struct *prev = NULL;
724 unsigned long vm_flags;
725 unsigned long stack_base;
726 unsigned long stack_size;
727 unsigned long stack_expand;
728 unsigned long rlim_stack;
729
730 #ifdef CONFIG_STACK_GROWSUP
731 /* Limit stack size */
732 stack_base = bprm->rlim_stack.rlim_max;
733 if (stack_base > STACK_SIZE_MAX)
734 stack_base = STACK_SIZE_MAX;
735
736 /* Add space for stack randomization. */
737 stack_base += (STACK_RND_MASK << PAGE_SHIFT);
738
739 /* Make sure we didn't let the argument array grow too large. */
740 if (vma->vm_end - vma->vm_start > stack_base)
741 return -ENOMEM;
742
743 stack_base = PAGE_ALIGN(stack_top - stack_base);
744
745 stack_shift = vma->vm_start - stack_base;
746 mm->arg_start = bprm->p - stack_shift;
747 bprm->p = vma->vm_end - stack_shift;
748 #else
749 stack_top = arch_align_stack(stack_top);
750 stack_top = PAGE_ALIGN(stack_top);
751
752 if (unlikely(stack_top < mmap_min_addr) ||
753 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
754 return -ENOMEM;
755
756 stack_shift = vma->vm_end - stack_top;
757
758 bprm->p -= stack_shift;
759 mm->arg_start = bprm->p;
760 #endif
761
762 if (bprm->loader)
763 bprm->loader -= stack_shift;
764 bprm->exec -= stack_shift;
765
766 if (down_write_killable(&mm->mmap_sem))
767 return -EINTR;
768
769 vm_flags = VM_STACK_FLAGS;
770
771 /*
772 * Adjust stack execute permissions; explicitly enable for
773 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
774 * (arch default) otherwise.
775 */
776 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
777 vm_flags |= VM_EXEC;
778 else if (executable_stack == EXSTACK_DISABLE_X)
779 vm_flags &= ~VM_EXEC;
780 vm_flags |= mm->def_flags;
781 vm_flags |= VM_STACK_INCOMPLETE_SETUP;
782
783 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
784 vm_flags);
785 if (ret)
786 goto out_unlock;
787 BUG_ON(prev != vma);
788
789 if (unlikely(vm_flags & VM_EXEC)) {
790 pr_warn_once("process '%pD4' started with executable stack\n",
791 bprm->file);
792 }
793
794 /* Move stack pages down in memory. */
795 if (stack_shift) {
796 ret = shift_arg_pages(vma, stack_shift);
797 if (ret)
798 goto out_unlock;
799 }
800
801 /* mprotect_fixup is overkill to remove the temporary stack flags */
802 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
803
804 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
805 stack_size = vma->vm_end - vma->vm_start;
806 /*
807 * Align this down to a page boundary as expand_stack
808 * will align it up.
809 */
810 rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
811 #ifdef CONFIG_STACK_GROWSUP
812 if (stack_size + stack_expand > rlim_stack)
813 stack_base = vma->vm_start + rlim_stack;
814 else
815 stack_base = vma->vm_end + stack_expand;
816 #else
817 if (stack_size + stack_expand > rlim_stack)
818 stack_base = vma->vm_end - rlim_stack;
819 else
820 stack_base = vma->vm_start - stack_expand;
821 #endif
822 current->mm->start_stack = bprm->p;
823 ret = expand_stack(vma, stack_base);
824 if (ret)
825 ret = -EFAULT;
826
827 out_unlock:
828 up_write(&mm->mmap_sem);
829 return ret;
830 }
831 EXPORT_SYMBOL(setup_arg_pages);
832
833 #else
834
835 /*
836 * Transfer the program arguments and environment from the holding pages
837 * onto the stack. The provided stack pointer is adjusted accordingly.
838 */
839 int transfer_args_to_stack(struct linux_binprm *bprm,
840 unsigned long *sp_location)
841 {
842 unsigned long index, stop, sp;
843 int ret = 0;
844
845 stop = bprm->p >> PAGE_SHIFT;
846 sp = *sp_location;
847
848 for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
849 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
850 char *src = kmap(bprm->page[index]) + offset;
851 sp -= PAGE_SIZE - offset;
852 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
853 ret = -EFAULT;
854 kunmap(bprm->page[index]);
855 if (ret)
856 goto out;
857 }
858
859 *sp_location = sp;
860
861 out:
862 return ret;
863 }
864 EXPORT_SYMBOL(transfer_args_to_stack);
865
866 #endif /* CONFIG_MMU */
867
868 static struct file *do_open_execat(int fd, struct filename *name, int flags)
869 {
870 struct file *file;
871 int err;
872 struct open_flags open_exec_flags = {
873 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
874 .acc_mode = MAY_EXEC,
875 .intent = LOOKUP_OPEN,
876 .lookup_flags = LOOKUP_FOLLOW,
877 };
878
879 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
880 return ERR_PTR(-EINVAL);
881 if (flags & AT_SYMLINK_NOFOLLOW)
882 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
883 if (flags & AT_EMPTY_PATH)
884 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
885
886 file = do_filp_open(fd, name, &open_exec_flags);
887 if (IS_ERR(file))
888 goto out;
889
890 err = -EACCES;
891 if (!S_ISREG(file_inode(file)->i_mode))
892 goto exit;
893
894 if (path_noexec(&file->f_path))
895 goto exit;
896
897 err = deny_write_access(file);
898 if (err)
899 goto exit;
900
901 if (name->name[0] != '\0')
902 fsnotify_open(file);
903
904 out:
905 return file;
906
907 exit:
908 fput(file);
909 return ERR_PTR(err);
910 }
911
912 struct file *open_exec(const char *name)
913 {
914 struct filename *filename = getname_kernel(name);
915 struct file *f = ERR_CAST(filename);
916
917 if (!IS_ERR(filename)) {
918 f = do_open_execat(AT_FDCWD, filename, 0);
919 putname(filename);
920 }
921 return f;
922 }
923 EXPORT_SYMBOL(open_exec);
924
925 int kernel_read_file(struct file *file, void **buf, loff_t *size,
926 loff_t max_size, enum kernel_read_file_id id)
927 {
928 loff_t i_size, pos;
929 ssize_t bytes = 0;
930 int ret;
931
932 if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
933 return -EINVAL;
934
935 ret = deny_write_access(file);
936 if (ret)
937 return ret;
938
939 ret = security_kernel_read_file(file, id);
940 if (ret)
941 goto out;
942
943 i_size = i_size_read(file_inode(file));
944 if (i_size <= 0) {
945 ret = -EINVAL;
946 goto out;
947 }
948 if (i_size > SIZE_MAX || (max_size > 0 && i_size > max_size)) {
949 ret = -EFBIG;
950 goto out;
951 }
952
953 if (id != READING_FIRMWARE_PREALLOC_BUFFER)
954 *buf = vmalloc(i_size);
955 if (!*buf) {
956 ret = -ENOMEM;
957 goto out;
958 }
959
960 pos = 0;
961 while (pos < i_size) {
962 bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
963 if (bytes < 0) {
964 ret = bytes;
965 goto out_free;
966 }
967
968 if (bytes == 0)
969 break;
970 }
971
972 if (pos != i_size) {
973 ret = -EIO;
974 goto out_free;
975 }
976
977 ret = security_kernel_post_read_file(file, *buf, i_size, id);
978 if (!ret)
979 *size = pos;
980
981 out_free:
982 if (ret < 0) {
983 if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
984 vfree(*buf);
985 *buf = NULL;
986 }
987 }
988
989 out:
990 allow_write_access(file);
991 return ret;
992 }
993 EXPORT_SYMBOL_GPL(kernel_read_file);
994
995 int kernel_read_file_from_path(const char *path, void **buf, loff_t *size,
996 loff_t max_size, enum kernel_read_file_id id)
997 {
998 struct file *file;
999 int ret;
1000
1001 if (!path || !*path)
1002 return -EINVAL;
1003
1004 file = filp_open(path, O_RDONLY, 0);
1005 if (IS_ERR(file))
1006 return PTR_ERR(file);
1007
1008 ret = kernel_read_file(file, buf, size, max_size, id);
1009 fput(file);
1010 return ret;
1011 }
1012 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
1013
1014 int kernel_read_file_from_path_initns(const char *path, void **buf,
1015 loff_t *size, loff_t max_size,
1016 enum kernel_read_file_id id)
1017 {
1018 struct file *file;
1019 struct path root;
1020 int ret;
1021
1022 if (!path || !*path)
1023 return -EINVAL;
1024
1025 task_lock(&init_task);
1026 get_fs_root(init_task.fs, &root);
1027 task_unlock(&init_task);
1028
1029 file = file_open_root(root.dentry, root.mnt, path, O_RDONLY, 0);
1030 path_put(&root);
1031 if (IS_ERR(file))
1032 return PTR_ERR(file);
1033
1034 ret = kernel_read_file(file, buf, size, max_size, id);
1035 fput(file);
1036 return ret;
1037 }
1038 EXPORT_SYMBOL_GPL(kernel_read_file_from_path_initns);
1039
1040 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
1041 enum kernel_read_file_id id)
1042 {
1043 struct fd f = fdget(fd);
1044 int ret = -EBADF;
1045
1046 if (!f.file)
1047 goto out;
1048
1049 ret = kernel_read_file(f.file, buf, size, max_size, id);
1050 out:
1051 fdput(f);
1052 return ret;
1053 }
1054 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
1055
1056 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
1057 {
1058 ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
1059 if (res > 0)
1060 flush_icache_range(addr, addr + len);
1061 return res;
1062 }
1063 EXPORT_SYMBOL(read_code);
1064
1065 /*
1066 * Maps the mm_struct mm into the current task struct.
1067 * On success, this function returns with the mutex
1068 * exec_update_mutex locked.
1069 */
1070 static int exec_mmap(struct mm_struct *mm)
1071 {
1072 struct task_struct *tsk;
1073 struct mm_struct *old_mm, *active_mm;
1074 int ret;
1075
1076 /* Notify parent that we're no longer interested in the old VM */
1077 tsk = current;
1078 old_mm = current->mm;
1079 exec_mm_release(tsk, old_mm);
1080 if (old_mm)
1081 sync_mm_rss(old_mm);
1082
1083 ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
1084 if (ret)
1085 return ret;
1086
1087 if (old_mm) {
1088 /*
1089 * Make sure that if there is a core dump in progress
1090 * for the old mm, we get out and die instead of going
1091 * through with the exec. We must hold mmap_sem around
1092 * checking core_state and changing tsk->mm.
1093 */
1094 down_read(&old_mm->mmap_sem);
1095 if (unlikely(old_mm->core_state)) {
1096 up_read(&old_mm->mmap_sem);
1097 mutex_unlock(&tsk->signal->exec_update_mutex);
1098 return -EINTR;
1099 }
1100 }
1101
1102 task_lock(tsk);
1103 active_mm = tsk->active_mm;
1104 membarrier_exec_mmap(mm);
1105 tsk->mm = mm;
1106 tsk->active_mm = mm;
1107 activate_mm(active_mm, mm);
1108 tsk->mm->vmacache_seqnum = 0;
1109 vmacache_flush(tsk);
1110 task_unlock(tsk);
1111 if (old_mm) {
1112 up_read(&old_mm->mmap_sem);
1113 BUG_ON(active_mm != old_mm);
1114 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1115 mm_update_next_owner(old_mm);
1116 mmput(old_mm);
1117 return 0;
1118 }
1119 mmdrop(active_mm);
1120 return 0;
1121 }
1122
1123 static int de_thread(struct task_struct *tsk)
1124 {
1125 struct signal_struct *sig = tsk->signal;
1126 struct sighand_struct *oldsighand = tsk->sighand;
1127 spinlock_t *lock = &oldsighand->siglock;
1128
1129 if (thread_group_empty(tsk))
1130 goto no_thread_group;
1131
1132 /*
1133 * Kill all other threads in the thread group.
1134 */
1135 spin_lock_irq(lock);
1136 if (signal_group_exit(sig)) {
1137 /*
1138 * Another group action in progress, just
1139 * return so that the signal is processed.
1140 */
1141 spin_unlock_irq(lock);
1142 return -EAGAIN;
1143 }
1144
1145 sig->group_exit_task = tsk;
1146 sig->notify_count = zap_other_threads(tsk);
1147 if (!thread_group_leader(tsk))
1148 sig->notify_count--;
1149
1150 while (sig->notify_count) {
1151 __set_current_state(TASK_KILLABLE);
1152 spin_unlock_irq(lock);
1153 schedule();
1154 if (__fatal_signal_pending(tsk))
1155 goto killed;
1156 spin_lock_irq(lock);
1157 }
1158 spin_unlock_irq(lock);
1159
1160 /*
1161 * At this point all other threads have exited, all we have to
1162 * do is to wait for the thread group leader to become inactive,
1163 * and to assume its PID:
1164 */
1165 if (!thread_group_leader(tsk)) {
1166 struct task_struct *leader = tsk->group_leader;
1167
1168 for (;;) {
1169 cgroup_threadgroup_change_begin(tsk);
1170 write_lock_irq(&tasklist_lock);
1171 /*
1172 * Do this under tasklist_lock to ensure that
1173 * exit_notify() can't miss ->group_exit_task
1174 */
1175 sig->notify_count = -1;
1176 if (likely(leader->exit_state))
1177 break;
1178 __set_current_state(TASK_KILLABLE);
1179 write_unlock_irq(&tasklist_lock);
1180 cgroup_threadgroup_change_end(tsk);
1181 schedule();
1182 if (__fatal_signal_pending(tsk))
1183 goto killed;
1184 }
1185
1186 /*
1187 * The only record we have of the real-time age of a
1188 * process, regardless of execs it's done, is start_time.
1189 * All the past CPU time is accumulated in signal_struct
1190 * from sister threads now dead. But in this non-leader
1191 * exec, nothing survives from the original leader thread,
1192 * whose birth marks the true age of this process now.
1193 * When we take on its identity by switching to its PID, we
1194 * also take its birthdate (always earlier than our own).
1195 */
1196 tsk->start_time = leader->start_time;
1197 tsk->start_boottime = leader->start_boottime;
1198
1199 BUG_ON(!same_thread_group(leader, tsk));
1200 /*
1201 * An exec() starts a new thread group with the
1202 * TGID of the previous thread group. Rehash the
1203 * two threads with a switched PID, and release
1204 * the former thread group leader:
1205 */
1206
1207 /* Become a process group leader with the old leader's pid.
1208 * The old leader becomes a thread of the this thread group.
1209 */
1210 exchange_tids(tsk, leader);
1211 transfer_pid(leader, tsk, PIDTYPE_TGID);
1212 transfer_pid(leader, tsk, PIDTYPE_PGID);
1213 transfer_pid(leader, tsk, PIDTYPE_SID);
1214
1215 list_replace_rcu(&leader->tasks, &tsk->tasks);
1216 list_replace_init(&leader->sibling, &tsk->sibling);
1217
1218 tsk->group_leader = tsk;
1219 leader->group_leader = tsk;
1220
1221 tsk->exit_signal = SIGCHLD;
1222 leader->exit_signal = -1;
1223
1224 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1225 leader->exit_state = EXIT_DEAD;
1226
1227 /*
1228 * We are going to release_task()->ptrace_unlink() silently,
1229 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1230 * the tracer wont't block again waiting for this thread.
1231 */
1232 if (unlikely(leader->ptrace))
1233 __wake_up_parent(leader, leader->parent);
1234 write_unlock_irq(&tasklist_lock);
1235 cgroup_threadgroup_change_end(tsk);
1236
1237 release_task(leader);
1238 }
1239
1240 sig->group_exit_task = NULL;
1241 sig->notify_count = 0;
1242
1243 no_thread_group:
1244 /* we have changed execution domain */
1245 tsk->exit_signal = SIGCHLD;
1246
1247 BUG_ON(!thread_group_leader(tsk));
1248 return 0;
1249
1250 killed:
1251 /* protects against exit_notify() and __exit_signal() */
1252 read_lock(&tasklist_lock);
1253 sig->group_exit_task = NULL;
1254 sig->notify_count = 0;
1255 read_unlock(&tasklist_lock);
1256 return -EAGAIN;
1257 }
1258
1259
1260 /*
1261 * This function makes sure the current process has its own signal table,
1262 * so that flush_signal_handlers can later reset the handlers without
1263 * disturbing other processes. (Other processes might share the signal
1264 * table via the CLONE_SIGHAND option to clone().)
1265 */
1266 static int unshare_sighand(struct task_struct *me)
1267 {
1268 struct sighand_struct *oldsighand = me->sighand;
1269
1270 if (refcount_read(&oldsighand->count) != 1) {
1271 struct sighand_struct *newsighand;
1272 /*
1273 * This ->sighand is shared with the CLONE_SIGHAND
1274 * but not CLONE_THREAD task, switch to the new one.
1275 */
1276 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1277 if (!newsighand)
1278 return -ENOMEM;
1279
1280 refcount_set(&newsighand->count, 1);
1281 memcpy(newsighand->action, oldsighand->action,
1282 sizeof(newsighand->action));
1283
1284 write_lock_irq(&tasklist_lock);
1285 spin_lock(&oldsighand->siglock);
1286 rcu_assign_pointer(me->sighand, newsighand);
1287 spin_unlock(&oldsighand->siglock);
1288 write_unlock_irq(&tasklist_lock);
1289
1290 __cleanup_sighand(oldsighand);
1291 }
1292 return 0;
1293 }
1294
1295 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1296 {
1297 task_lock(tsk);
1298 strncpy(buf, tsk->comm, buf_size);
1299 task_unlock(tsk);
1300 return buf;
1301 }
1302 EXPORT_SYMBOL_GPL(__get_task_comm);
1303
1304 /*
1305 * These functions flushes out all traces of the currently running executable
1306 * so that a new one can be started
1307 */
1308
1309 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1310 {
1311 task_lock(tsk);
1312 trace_task_rename(tsk, buf);
1313 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1314 task_unlock(tsk);
1315 perf_event_comm(tsk, exec);
1316 }
1317
1318 /*
1319 * Calling this is the point of no return. None of the failures will be
1320 * seen by userspace since either the process is already taking a fatal
1321 * signal (via de_thread() or coredump), or will have SEGV raised
1322 * (after exec_mmap()) by search_binary_handler (see below).
1323 */
1324 int begin_new_exec(struct linux_binprm * bprm)
1325 {
1326 struct task_struct *me = current;
1327 int retval;
1328
1329 /* Once we are committed compute the creds */
1330 retval = bprm_creds_from_file(bprm);
1331 if (retval)
1332 return retval;
1333
1334 /*
1335 * Ensure all future errors are fatal.
1336 */
1337 bprm->point_of_no_return = true;
1338
1339 /*
1340 * Make this the only thread in the thread group.
1341 */
1342 retval = de_thread(me);
1343 if (retval)
1344 goto out;
1345
1346 /*
1347 * Must be called _before_ exec_mmap() as bprm->mm is
1348 * not visibile until then. This also enables the update
1349 * to be lockless.
1350 */
1351 set_mm_exe_file(bprm->mm, bprm->file);
1352
1353 /* If the binary is not readable then enforce mm->dumpable=0 */
1354 would_dump(bprm, bprm->file);
1355 if (bprm->have_execfd)
1356 would_dump(bprm, bprm->executable);
1357
1358 /*
1359 * Release all of the old mmap stuff
1360 */
1361 acct_arg_size(bprm, 0);
1362 retval = exec_mmap(bprm->mm);
1363 if (retval)
1364 goto out;
1365
1366 bprm->mm = NULL;
1367
1368 #ifdef CONFIG_POSIX_TIMERS
1369 exit_itimers(me->signal);
1370 flush_itimer_signals();
1371 #endif
1372
1373 /*
1374 * Make the signal table private.
1375 */
1376 retval = unshare_sighand(me);
1377 if (retval)
1378 goto out_unlock;
1379
1380 set_fs(USER_DS);
1381 me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1382 PF_NOFREEZE | PF_NO_SETAFFINITY);
1383 flush_thread();
1384 me->personality &= ~bprm->per_clear;
1385
1386 /*
1387 * We have to apply CLOEXEC before we change whether the process is
1388 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1389 * trying to access the should-be-closed file descriptors of a process
1390 * undergoing exec(2).
1391 */
1392 do_close_on_exec(me->files);
1393
1394 if (bprm->secureexec) {
1395 /* Make sure parent cannot signal privileged process. */
1396 me->pdeath_signal = 0;
1397
1398 /*
1399 * For secureexec, reset the stack limit to sane default to
1400 * avoid bad behavior from the prior rlimits. This has to
1401 * happen before arch_pick_mmap_layout(), which examines
1402 * RLIMIT_STACK, but after the point of no return to avoid
1403 * needing to clean up the change on failure.
1404 */
1405 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1406 bprm->rlim_stack.rlim_cur = _STK_LIM;
1407 }
1408
1409 me->sas_ss_sp = me->sas_ss_size = 0;
1410
1411 /*
1412 * Figure out dumpability. Note that this checking only of current
1413 * is wrong, but userspace depends on it. This should be testing
1414 * bprm->secureexec instead.
1415 */
1416 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1417 !(uid_eq(current_euid(), current_uid()) &&
1418 gid_eq(current_egid(), current_gid())))
1419 set_dumpable(current->mm, suid_dumpable);
1420 else
1421 set_dumpable(current->mm, SUID_DUMP_USER);
1422
1423 perf_event_exec();
1424 __set_task_comm(me, kbasename(bprm->filename), true);
1425
1426 /* An exec changes our domain. We are no longer part of the thread
1427 group */
1428 WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1429 flush_signal_handlers(me, 0);
1430
1431 /*
1432 * install the new credentials for this executable
1433 */
1434 security_bprm_committing_creds(bprm);
1435
1436 commit_creds(bprm->cred);
1437 bprm->cred = NULL;
1438
1439 /*
1440 * Disable monitoring for regular users
1441 * when executing setuid binaries. Must
1442 * wait until new credentials are committed
1443 * by commit_creds() above
1444 */
1445 if (get_dumpable(me->mm) != SUID_DUMP_USER)
1446 perf_event_exit_task(me);
1447 /*
1448 * cred_guard_mutex must be held at least to this point to prevent
1449 * ptrace_attach() from altering our determination of the task's
1450 * credentials; any time after this it may be unlocked.
1451 */
1452 security_bprm_committed_creds(bprm);
1453
1454 /* Pass the opened binary to the interpreter. */
1455 if (bprm->have_execfd) {
1456 retval = get_unused_fd_flags(0);
1457 if (retval < 0)
1458 goto out_unlock;
1459 fd_install(retval, bprm->executable);
1460 bprm->executable = NULL;
1461 bprm->execfd = retval;
1462 }
1463 return 0;
1464
1465 out_unlock:
1466 mutex_unlock(&me->signal->exec_update_mutex);
1467 out:
1468 return retval;
1469 }
1470 EXPORT_SYMBOL(begin_new_exec);
1471
1472 void would_dump(struct linux_binprm *bprm, struct file *file)
1473 {
1474 struct inode *inode = file_inode(file);
1475 if (inode_permission(inode, MAY_READ) < 0) {
1476 struct user_namespace *old, *user_ns;
1477 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1478
1479 /* Ensure mm->user_ns contains the executable */
1480 user_ns = old = bprm->mm->user_ns;
1481 while ((user_ns != &init_user_ns) &&
1482 !privileged_wrt_inode_uidgid(user_ns, inode))
1483 user_ns = user_ns->parent;
1484
1485 if (old != user_ns) {
1486 bprm->mm->user_ns = get_user_ns(user_ns);
1487 put_user_ns(old);
1488 }
1489 }
1490 }
1491 EXPORT_SYMBOL(would_dump);
1492
1493 void setup_new_exec(struct linux_binprm * bprm)
1494 {
1495 /* Setup things that can depend upon the personality */
1496 struct task_struct *me = current;
1497
1498 arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1499
1500 arch_setup_new_exec();
1501
1502 /* Set the new mm task size. We have to do that late because it may
1503 * depend on TIF_32BIT which is only updated in flush_thread() on
1504 * some architectures like powerpc
1505 */
1506 me->mm->task_size = TASK_SIZE;
1507 mutex_unlock(&me->signal->exec_update_mutex);
1508 mutex_unlock(&me->signal->cred_guard_mutex);
1509 }
1510 EXPORT_SYMBOL(setup_new_exec);
1511
1512 /* Runs immediately before start_thread() takes over. */
1513 void finalize_exec(struct linux_binprm *bprm)
1514 {
1515 /* Store any stack rlimit changes before starting thread. */
1516 task_lock(current->group_leader);
1517 current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1518 task_unlock(current->group_leader);
1519 }
1520 EXPORT_SYMBOL(finalize_exec);
1521
1522 /*
1523 * Prepare credentials and lock ->cred_guard_mutex.
1524 * setup_new_exec() commits the new creds and drops the lock.
1525 * Or, if exec fails before, free_bprm() should release ->cred and
1526 * and unlock.
1527 */
1528 static int prepare_bprm_creds(struct linux_binprm *bprm)
1529 {
1530 if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1531 return -ERESTARTNOINTR;
1532
1533 bprm->cred = prepare_exec_creds();
1534 if (likely(bprm->cred))
1535 return 0;
1536
1537 mutex_unlock(&current->signal->cred_guard_mutex);
1538 return -ENOMEM;
1539 }
1540
1541 static void free_bprm(struct linux_binprm *bprm)
1542 {
1543 free_arg_pages(bprm);
1544 if (bprm->cred) {
1545 mutex_unlock(&current->signal->cred_guard_mutex);
1546 abort_creds(bprm->cred);
1547 }
1548 if (bprm->file) {
1549 allow_write_access(bprm->file);
1550 fput(bprm->file);
1551 }
1552 if (bprm->executable)
1553 fput(bprm->executable);
1554 /* If a binfmt changed the interp, free it. */
1555 if (bprm->interp != bprm->filename)
1556 kfree(bprm->interp);
1557 kfree(bprm);
1558 }
1559
1560 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1561 {
1562 /* If a binfmt changed the interp, free it first. */
1563 if (bprm->interp != bprm->filename)
1564 kfree(bprm->interp);
1565 bprm->interp = kstrdup(interp, GFP_KERNEL);
1566 if (!bprm->interp)
1567 return -ENOMEM;
1568 return 0;
1569 }
1570 EXPORT_SYMBOL(bprm_change_interp);
1571
1572 /*
1573 * determine how safe it is to execute the proposed program
1574 * - the caller must hold ->cred_guard_mutex to protect against
1575 * PTRACE_ATTACH or seccomp thread-sync
1576 */
1577 static void check_unsafe_exec(struct linux_binprm *bprm)
1578 {
1579 struct task_struct *p = current, *t;
1580 unsigned n_fs;
1581
1582 if (p->ptrace)
1583 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1584
1585 /*
1586 * This isn't strictly necessary, but it makes it harder for LSMs to
1587 * mess up.
1588 */
1589 if (task_no_new_privs(current))
1590 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1591
1592 t = p;
1593 n_fs = 1;
1594 spin_lock(&p->fs->lock);
1595 rcu_read_lock();
1596 while_each_thread(p, t) {
1597 if (t->fs == p->fs)
1598 n_fs++;
1599 }
1600 rcu_read_unlock();
1601
1602 if (p->fs->users > n_fs)
1603 bprm->unsafe |= LSM_UNSAFE_SHARE;
1604 else
1605 p->fs->in_exec = 1;
1606 spin_unlock(&p->fs->lock);
1607 }
1608
1609 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1610 {
1611 /* Handle suid and sgid on files */
1612 struct inode *inode;
1613 unsigned int mode;
1614 kuid_t uid;
1615 kgid_t gid;
1616
1617 if (!mnt_may_suid(file->f_path.mnt))
1618 return;
1619
1620 if (task_no_new_privs(current))
1621 return;
1622
1623 inode = file->f_path.dentry->d_inode;
1624 mode = READ_ONCE(inode->i_mode);
1625 if (!(mode & (S_ISUID|S_ISGID)))
1626 return;
1627
1628 /* Be careful if suid/sgid is set */
1629 inode_lock(inode);
1630
1631 /* reload atomically mode/uid/gid now that lock held */
1632 mode = inode->i_mode;
1633 uid = inode->i_uid;
1634 gid = inode->i_gid;
1635 inode_unlock(inode);
1636
1637 /* We ignore suid/sgid if there are no mappings for them in the ns */
1638 if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1639 !kgid_has_mapping(bprm->cred->user_ns, gid))
1640 return;
1641
1642 if (mode & S_ISUID) {
1643 bprm->per_clear |= PER_CLEAR_ON_SETID;
1644 bprm->cred->euid = uid;
1645 }
1646
1647 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1648 bprm->per_clear |= PER_CLEAR_ON_SETID;
1649 bprm->cred->egid = gid;
1650 }
1651 }
1652
1653 /*
1654 * Compute brpm->cred based upon the final binary.
1655 */
1656 static int bprm_creds_from_file(struct linux_binprm *bprm)
1657 {
1658 /* Compute creds based on which file? */
1659 struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1660
1661 bprm_fill_uid(bprm, file);
1662 return security_bprm_creds_from_file(bprm, file);
1663 }
1664
1665 /*
1666 * Fill the binprm structure from the inode.
1667 * Read the first BINPRM_BUF_SIZE bytes
1668 *
1669 * This may be called multiple times for binary chains (scripts for example).
1670 */
1671 static int prepare_binprm(struct linux_binprm *bprm)
1672 {
1673 loff_t pos = 0;
1674
1675 memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1676 return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1677 }
1678
1679 /*
1680 * Arguments are '\0' separated strings found at the location bprm->p
1681 * points to; chop off the first by relocating brpm->p to right after
1682 * the first '\0' encountered.
1683 */
1684 int remove_arg_zero(struct linux_binprm *bprm)
1685 {
1686 int ret = 0;
1687 unsigned long offset;
1688 char *kaddr;
1689 struct page *page;
1690
1691 if (!bprm->argc)
1692 return 0;
1693
1694 do {
1695 offset = bprm->p & ~PAGE_MASK;
1696 page = get_arg_page(bprm, bprm->p, 0);
1697 if (!page) {
1698 ret = -EFAULT;
1699 goto out;
1700 }
1701 kaddr = kmap_atomic(page);
1702
1703 for (; offset < PAGE_SIZE && kaddr[offset];
1704 offset++, bprm->p++)
1705 ;
1706
1707 kunmap_atomic(kaddr);
1708 put_arg_page(page);
1709 } while (offset == PAGE_SIZE);
1710
1711 bprm->p++;
1712 bprm->argc--;
1713 ret = 0;
1714
1715 out:
1716 return ret;
1717 }
1718 EXPORT_SYMBOL(remove_arg_zero);
1719
1720 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1721 /*
1722 * cycle the list of binary formats handler, until one recognizes the image
1723 */
1724 static int search_binary_handler(struct linux_binprm *bprm)
1725 {
1726 bool need_retry = IS_ENABLED(CONFIG_MODULES);
1727 struct linux_binfmt *fmt;
1728 int retval;
1729
1730 retval = prepare_binprm(bprm);
1731 if (retval < 0)
1732 return retval;
1733
1734 retval = security_bprm_check(bprm);
1735 if (retval)
1736 return retval;
1737
1738 retval = -ENOENT;
1739 retry:
1740 read_lock(&binfmt_lock);
1741 list_for_each_entry(fmt, &formats, lh) {
1742 if (!try_module_get(fmt->module))
1743 continue;
1744 read_unlock(&binfmt_lock);
1745
1746 retval = fmt->load_binary(bprm);
1747
1748 read_lock(&binfmt_lock);
1749 put_binfmt(fmt);
1750 if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1751 read_unlock(&binfmt_lock);
1752 return retval;
1753 }
1754 }
1755 read_unlock(&binfmt_lock);
1756
1757 if (need_retry) {
1758 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1759 printable(bprm->buf[2]) && printable(bprm->buf[3]))
1760 return retval;
1761 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1762 return retval;
1763 need_retry = false;
1764 goto retry;
1765 }
1766
1767 return retval;
1768 }
1769
1770 static int exec_binprm(struct linux_binprm *bprm)
1771 {
1772 pid_t old_pid, old_vpid;
1773 int ret, depth;
1774
1775 /* Need to fetch pid before load_binary changes it */
1776 old_pid = current->pid;
1777 rcu_read_lock();
1778 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1779 rcu_read_unlock();
1780
1781 /* This allows 4 levels of binfmt rewrites before failing hard. */
1782 for (depth = 0;; depth++) {
1783 struct file *exec;
1784 if (depth > 5)
1785 return -ELOOP;
1786
1787 ret = search_binary_handler(bprm);
1788 if (ret < 0)
1789 return ret;
1790 if (!bprm->interpreter)
1791 break;
1792
1793 exec = bprm->file;
1794 bprm->file = bprm->interpreter;
1795 bprm->interpreter = NULL;
1796
1797 allow_write_access(exec);
1798 if (unlikely(bprm->have_execfd)) {
1799 if (bprm->executable) {
1800 fput(exec);
1801 return -ENOEXEC;
1802 }
1803 bprm->executable = exec;
1804 } else
1805 fput(exec);
1806 }
1807
1808 audit_bprm(bprm);
1809 trace_sched_process_exec(current, old_pid, bprm);
1810 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1811 proc_exec_connector(current);
1812 return 0;
1813 }
1814
1815 /*
1816 * sys_execve() executes a new program.
1817 */
1818 static int __do_execve_file(int fd, struct filename *filename,
1819 struct user_arg_ptr argv,
1820 struct user_arg_ptr envp,
1821 int flags, struct file *file)
1822 {
1823 char *pathbuf = NULL;
1824 struct linux_binprm *bprm;
1825 struct files_struct *displaced;
1826 int retval;
1827
1828 if (IS_ERR(filename))
1829 return PTR_ERR(filename);
1830
1831 /*
1832 * We move the actual failure in case of RLIMIT_NPROC excess from
1833 * set*uid() to execve() because too many poorly written programs
1834 * don't check setuid() return code. Here we additionally recheck
1835 * whether NPROC limit is still exceeded.
1836 */
1837 if ((current->flags & PF_NPROC_EXCEEDED) &&
1838 atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1839 retval = -EAGAIN;
1840 goto out_ret;
1841 }
1842
1843 /* We're below the limit (still or again), so we don't want to make
1844 * further execve() calls fail. */
1845 current->flags &= ~PF_NPROC_EXCEEDED;
1846
1847 retval = unshare_files(&displaced);
1848 if (retval)
1849 goto out_ret;
1850
1851 retval = -ENOMEM;
1852 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1853 if (!bprm)
1854 goto out_files;
1855
1856 retval = prepare_bprm_creds(bprm);
1857 if (retval)
1858 goto out_free;
1859
1860 check_unsafe_exec(bprm);
1861 current->in_execve = 1;
1862
1863 if (!file)
1864 file = do_open_execat(fd, filename, flags);
1865 retval = PTR_ERR(file);
1866 if (IS_ERR(file))
1867 goto out_unmark;
1868
1869 sched_exec();
1870
1871 bprm->file = file;
1872 if (!filename) {
1873 bprm->filename = "none";
1874 } else if (fd == AT_FDCWD || filename->name[0] == '/') {
1875 bprm->filename = filename->name;
1876 } else {
1877 if (filename->name[0] == '\0')
1878 pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1879 else
1880 pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1881 fd, filename->name);
1882 if (!pathbuf) {
1883 retval = -ENOMEM;
1884 goto out_unmark;
1885 }
1886 /*
1887 * Record that a name derived from an O_CLOEXEC fd will be
1888 * inaccessible after exec. Relies on having exclusive access to
1889 * current->files (due to unshare_files above).
1890 */
1891 if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1892 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1893 bprm->filename = pathbuf;
1894 }
1895 bprm->interp = bprm->filename;
1896
1897 retval = bprm_mm_init(bprm);
1898 if (retval)
1899 goto out_unmark;
1900
1901 retval = prepare_arg_pages(bprm, argv, envp);
1902 if (retval < 0)
1903 goto out;
1904
1905 /* Set the unchanging part of bprm->cred */
1906 retval = security_bprm_creds_for_exec(bprm);
1907 if (retval)
1908 goto out;
1909
1910 retval = copy_string_kernel(bprm->filename, bprm);
1911 if (retval < 0)
1912 goto out;
1913
1914 bprm->exec = bprm->p;
1915 retval = copy_strings(bprm->envc, envp, bprm);
1916 if (retval < 0)
1917 goto out;
1918
1919 retval = copy_strings(bprm->argc, argv, bprm);
1920 if (retval < 0)
1921 goto out;
1922
1923 retval = exec_binprm(bprm);
1924 if (retval < 0)
1925 goto out;
1926
1927 /* execve succeeded */
1928 current->fs->in_exec = 0;
1929 current->in_execve = 0;
1930 rseq_execve(current);
1931 acct_update_integrals(current);
1932 task_numa_free(current, false);
1933 free_bprm(bprm);
1934 kfree(pathbuf);
1935 if (filename)
1936 putname(filename);
1937 if (displaced)
1938 put_files_struct(displaced);
1939 return retval;
1940
1941 out:
1942 /*
1943 * If past the point of no return ensure the the code never
1944 * returns to the userspace process. Use an existing fatal
1945 * signal if present otherwise terminate the process with
1946 * SIGSEGV.
1947 */
1948 if (bprm->point_of_no_return && !fatal_signal_pending(current))
1949 force_sigsegv(SIGSEGV);
1950 if (bprm->mm) {
1951 acct_arg_size(bprm, 0);
1952 mmput(bprm->mm);
1953 }
1954
1955 out_unmark:
1956 current->fs->in_exec = 0;
1957 current->in_execve = 0;
1958
1959 out_free:
1960 free_bprm(bprm);
1961 kfree(pathbuf);
1962
1963 out_files:
1964 if (displaced)
1965 reset_files_struct(displaced);
1966 out_ret:
1967 if (filename)
1968 putname(filename);
1969 return retval;
1970 }
1971
1972 static int do_execveat_common(int fd, struct filename *filename,
1973 struct user_arg_ptr argv,
1974 struct user_arg_ptr envp,
1975 int flags)
1976 {
1977 return __do_execve_file(fd, filename, argv, envp, flags, NULL);
1978 }
1979
1980 int do_execve_file(struct file *file, void *__argv, void *__envp)
1981 {
1982 struct user_arg_ptr argv = { .ptr.native = __argv };
1983 struct user_arg_ptr envp = { .ptr.native = __envp };
1984
1985 return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file);
1986 }
1987
1988 int do_execve(struct filename *filename,
1989 const char __user *const __user *__argv,
1990 const char __user *const __user *__envp)
1991 {
1992 struct user_arg_ptr argv = { .ptr.native = __argv };
1993 struct user_arg_ptr envp = { .ptr.native = __envp };
1994 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1995 }
1996
1997 int do_execveat(int fd, struct filename *filename,
1998 const char __user *const __user *__argv,
1999 const char __user *const __user *__envp,
2000 int flags)
2001 {
2002 struct user_arg_ptr argv = { .ptr.native = __argv };
2003 struct user_arg_ptr envp = { .ptr.native = __envp };
2004
2005 return do_execveat_common(fd, filename, argv, envp, flags);
2006 }
2007
2008 #ifdef CONFIG_COMPAT
2009 static int compat_do_execve(struct filename *filename,
2010 const compat_uptr_t __user *__argv,
2011 const compat_uptr_t __user *__envp)
2012 {
2013 struct user_arg_ptr argv = {
2014 .is_compat = true,
2015 .ptr.compat = __argv,
2016 };
2017 struct user_arg_ptr envp = {
2018 .is_compat = true,
2019 .ptr.compat = __envp,
2020 };
2021 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2022 }
2023
2024 static int compat_do_execveat(int fd, struct filename *filename,
2025 const compat_uptr_t __user *__argv,
2026 const compat_uptr_t __user *__envp,
2027 int flags)
2028 {
2029 struct user_arg_ptr argv = {
2030 .is_compat = true,
2031 .ptr.compat = __argv,
2032 };
2033 struct user_arg_ptr envp = {
2034 .is_compat = true,
2035 .ptr.compat = __envp,
2036 };
2037 return do_execveat_common(fd, filename, argv, envp, flags);
2038 }
2039 #endif
2040
2041 void set_binfmt(struct linux_binfmt *new)
2042 {
2043 struct mm_struct *mm = current->mm;
2044
2045 if (mm->binfmt)
2046 module_put(mm->binfmt->module);
2047
2048 mm->binfmt = new;
2049 if (new)
2050 __module_get(new->module);
2051 }
2052 EXPORT_SYMBOL(set_binfmt);
2053
2054 /*
2055 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2056 */
2057 void set_dumpable(struct mm_struct *mm, int value)
2058 {
2059 if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2060 return;
2061
2062 set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2063 }
2064
2065 SYSCALL_DEFINE3(execve,
2066 const char __user *, filename,
2067 const char __user *const __user *, argv,
2068 const char __user *const __user *, envp)
2069 {
2070 return do_execve(getname(filename), argv, envp);
2071 }
2072
2073 SYSCALL_DEFINE5(execveat,
2074 int, fd, const char __user *, filename,
2075 const char __user *const __user *, argv,
2076 const char __user *const __user *, envp,
2077 int, flags)
2078 {
2079 int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2080
2081 return do_execveat(fd,
2082 getname_flags(filename, lookup_flags, NULL),
2083 argv, envp, flags);
2084 }
2085
2086 #ifdef CONFIG_COMPAT
2087 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2088 const compat_uptr_t __user *, argv,
2089 const compat_uptr_t __user *, envp)
2090 {
2091 return compat_do_execve(getname(filename), argv, envp);
2092 }
2093
2094 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2095 const char __user *, filename,
2096 const compat_uptr_t __user *, argv,
2097 const compat_uptr_t __user *, envp,
2098 int, flags)
2099 {
2100 int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2101
2102 return compat_do_execveat(fd,
2103 getname_flags(filename, lookup_flags, NULL),
2104 argv, envp, flags);
2105 }
2106 #endif