4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/pagemap.h>
36 #include <linux/perf_event.h>
37 #include <linux/highmem.h>
38 #include <linux/spinlock.h>
39 #include <linux/key.h>
40 #include <linux/personality.h>
41 #include <linux/binfmts.h>
42 #include <linux/utsname.h>
43 #include <linux/pid_namespace.h>
44 #include <linux/module.h>
45 #include <linux/namei.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/tsacct_kern.h>
50 #include <linux/cn_proc.h>
51 #include <linux/audit.h>
52 #include <linux/tracehook.h>
53 #include <linux/kmod.h>
54 #include <linux/fsnotify.h>
55 #include <linux/fs_struct.h>
56 #include <linux/pipe_fs_i.h>
57 #include <linux/oom.h>
58 #include <linux/compat.h>
59 #include <linux/user_namespace.h>
61 #include <trace/events/fs.h>
63 #include <asm/uaccess.h>
64 #include <asm/mmu_context.h>
67 #include <trace/events/task.h>
70 #include <trace/events/sched.h>
72 int suid_dumpable
= 0;
74 static LIST_HEAD(formats
);
75 static DEFINE_RWLOCK(binfmt_lock
);
77 void __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
80 if (WARN_ON(!fmt
->load_binary
))
82 write_lock(&binfmt_lock
);
83 insert
? list_add(&fmt
->lh
, &formats
) :
84 list_add_tail(&fmt
->lh
, &formats
);
85 write_unlock(&binfmt_lock
);
88 EXPORT_SYMBOL(__register_binfmt
);
90 void unregister_binfmt(struct linux_binfmt
* fmt
)
92 write_lock(&binfmt_lock
);
94 write_unlock(&binfmt_lock
);
97 EXPORT_SYMBOL(unregister_binfmt
);
99 static inline void put_binfmt(struct linux_binfmt
* fmt
)
101 module_put(fmt
->module
);
104 bool path_noexec(const struct path
*path
)
106 return (path
->mnt
->mnt_flags
& MNT_NOEXEC
) ||
107 (path
->mnt
->mnt_sb
->s_iflags
& SB_I_NOEXEC
);
109 EXPORT_SYMBOL(path_noexec
);
111 bool path_nosuid(const struct path
*path
)
113 return !mnt_may_suid(path
->mnt
) ||
114 (path
->mnt
->mnt_sb
->s_iflags
& SB_I_NOSUID
);
116 EXPORT_SYMBOL(path_nosuid
);
120 * Note that a shared library must be both readable and executable due to
123 * Also note that we take the address to load from from the file itself.
125 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
127 struct linux_binfmt
*fmt
;
129 struct filename
*tmp
= getname(library
);
130 int error
= PTR_ERR(tmp
);
131 static const struct open_flags uselib_flags
= {
132 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
133 .acc_mode
= MAY_READ
| MAY_EXEC
| MAY_OPEN
,
134 .intent
= LOOKUP_OPEN
,
135 .lookup_flags
= LOOKUP_FOLLOW
,
141 file
= do_filp_open(AT_FDCWD
, tmp
, &uselib_flags
);
143 error
= PTR_ERR(file
);
148 if (!S_ISREG(file_inode(file
)->i_mode
))
152 if (path_noexec(&file
->f_path
))
159 read_lock(&binfmt_lock
);
160 list_for_each_entry(fmt
, &formats
, lh
) {
161 if (!fmt
->load_shlib
)
163 if (!try_module_get(fmt
->module
))
165 read_unlock(&binfmt_lock
);
166 error
= fmt
->load_shlib(file
);
167 read_lock(&binfmt_lock
);
169 if (error
!= -ENOEXEC
)
172 read_unlock(&binfmt_lock
);
178 #endif /* #ifdef CONFIG_USELIB */
182 * The nascent bprm->mm is not visible until exec_mmap() but it can
183 * use a lot of memory, account these pages in current->mm temporary
184 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
185 * change the counter back via acct_arg_size(0).
187 static void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
189 struct mm_struct
*mm
= current
->mm
;
190 long diff
= (long)(pages
- bprm
->vma_pages
);
195 bprm
->vma_pages
= pages
;
196 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
199 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
205 #ifdef CONFIG_STACK_GROWSUP
207 ret
= expand_downwards(bprm
->vma
, pos
);
212 ret
= get_user_pages(current
, bprm
->mm
, pos
,
213 1, write
, 1, &page
, NULL
);
218 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
221 acct_arg_size(bprm
, size
/ PAGE_SIZE
);
224 * We've historically supported up to 32 pages (ARG_MAX)
225 * of argument strings even with small stacks
231 * Limit to 1/4-th the stack size for the argv+env strings.
233 * - the remaining binfmt code will not run out of stack space,
234 * - the program will have a reasonable amount of stack left
237 rlim
= current
->signal
->rlim
;
238 if (size
> ACCESS_ONCE(rlim
[RLIMIT_STACK
].rlim_cur
) / 4) {
247 static void put_arg_page(struct page
*page
)
252 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
256 static void free_arg_pages(struct linux_binprm
*bprm
)
260 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
263 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
266 static int __bprm_mm_init(struct linux_binprm
*bprm
)
269 struct vm_area_struct
*vma
= NULL
;
270 struct mm_struct
*mm
= bprm
->mm
;
272 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
276 down_write(&mm
->mmap_sem
);
280 * Place the stack at the largest stack address the architecture
281 * supports. Later, we'll move this to an appropriate place. We don't
282 * use STACK_TOP because that can depend on attributes which aren't
285 BUILD_BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
286 vma
->vm_end
= STACK_TOP_MAX
;
287 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
288 vma
->vm_flags
= VM_SOFTDIRTY
| VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
289 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
290 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
292 err
= insert_vm_struct(mm
, vma
);
296 mm
->stack_vm
= mm
->total_vm
= 1;
297 arch_bprm_mm_init(mm
, vma
);
298 up_write(&mm
->mmap_sem
);
299 bprm
->p
= vma
->vm_end
- sizeof(void *);
302 up_write(&mm
->mmap_sem
);
304 kmem_cache_free(vm_area_cachep
, vma
);
308 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
310 return len
<= MAX_ARG_STRLEN
;
315 static inline void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
319 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
324 page
= bprm
->page
[pos
/ PAGE_SIZE
];
325 if (!page
&& write
) {
326 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
329 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
335 static void put_arg_page(struct page
*page
)
339 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
342 __free_page(bprm
->page
[i
]);
343 bprm
->page
[i
] = NULL
;
347 static void free_arg_pages(struct linux_binprm
*bprm
)
351 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
352 free_arg_page(bprm
, i
);
355 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
360 static int __bprm_mm_init(struct linux_binprm
*bprm
)
362 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
366 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
368 return len
<= bprm
->p
;
371 #endif /* CONFIG_MMU */
374 * Create a new mm_struct and populate it with a temporary stack
375 * vm_area_struct. We don't have enough context at this point to set the stack
376 * flags, permissions, and offset, so we use temporary values. We'll update
377 * them later in setup_arg_pages().
379 static int bprm_mm_init(struct linux_binprm
*bprm
)
382 struct mm_struct
*mm
= NULL
;
384 bprm
->mm
= mm
= mm_alloc();
389 err
= __bprm_mm_init(bprm
);
404 struct user_arg_ptr
{
409 const char __user
*const __user
*native
;
411 const compat_uptr_t __user
*compat
;
416 static const char __user
*get_user_arg_ptr(struct user_arg_ptr argv
, int nr
)
418 const char __user
*native
;
421 if (unlikely(argv
.is_compat
)) {
422 compat_uptr_t compat
;
424 if (get_user(compat
, argv
.ptr
.compat
+ nr
))
425 return ERR_PTR(-EFAULT
);
427 return compat_ptr(compat
);
431 if (get_user(native
, argv
.ptr
.native
+ nr
))
432 return ERR_PTR(-EFAULT
);
438 * count() counts the number of strings in array ARGV.
440 static int count(struct user_arg_ptr argv
, int max
)
444 if (argv
.ptr
.native
!= NULL
) {
446 const char __user
*p
= get_user_arg_ptr(argv
, i
);
458 if (fatal_signal_pending(current
))
459 return -ERESTARTNOHAND
;
467 * 'copy_strings()' copies argument/environment strings from the old
468 * processes's memory to the new process's stack. The call to get_user_pages()
469 * ensures the destination page is created and not swapped out.
471 static int copy_strings(int argc
, struct user_arg_ptr argv
,
472 struct linux_binprm
*bprm
)
474 struct page
*kmapped_page
= NULL
;
476 unsigned long kpos
= 0;
480 const char __user
*str
;
485 str
= get_user_arg_ptr(argv
, argc
);
489 len
= strnlen_user(str
, MAX_ARG_STRLEN
);
494 if (!valid_arg_len(bprm
, len
))
497 /* We're going to work our way backwords. */
503 int offset
, bytes_to_copy
;
505 if (fatal_signal_pending(current
)) {
506 ret
= -ERESTARTNOHAND
;
511 offset
= pos
% PAGE_SIZE
;
515 bytes_to_copy
= offset
;
516 if (bytes_to_copy
> len
)
519 offset
-= bytes_to_copy
;
520 pos
-= bytes_to_copy
;
521 str
-= bytes_to_copy
;
522 len
-= bytes_to_copy
;
524 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
527 page
= get_arg_page(bprm
, pos
, 1);
534 flush_kernel_dcache_page(kmapped_page
);
535 kunmap(kmapped_page
);
536 put_arg_page(kmapped_page
);
539 kaddr
= kmap(kmapped_page
);
540 kpos
= pos
& PAGE_MASK
;
541 flush_arg_page(bprm
, kpos
, kmapped_page
);
543 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
552 flush_kernel_dcache_page(kmapped_page
);
553 kunmap(kmapped_page
);
554 put_arg_page(kmapped_page
);
560 * Like copy_strings, but get argv and its values from kernel memory.
562 int copy_strings_kernel(int argc
, const char *const *__argv
,
563 struct linux_binprm
*bprm
)
566 mm_segment_t oldfs
= get_fs();
567 struct user_arg_ptr argv
= {
568 .ptr
.native
= (const char __user
*const __user
*)__argv
,
572 r
= copy_strings(argc
, argv
, bprm
);
577 EXPORT_SYMBOL(copy_strings_kernel
);
582 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
583 * the binfmt code determines where the new stack should reside, we shift it to
584 * its final location. The process proceeds as follows:
586 * 1) Use shift to calculate the new vma endpoints.
587 * 2) Extend vma to cover both the old and new ranges. This ensures the
588 * arguments passed to subsequent functions are consistent.
589 * 3) Move vma's page tables to the new range.
590 * 4) Free up any cleared pgd range.
591 * 5) Shrink the vma to cover only the new range.
593 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
595 struct mm_struct
*mm
= vma
->vm_mm
;
596 unsigned long old_start
= vma
->vm_start
;
597 unsigned long old_end
= vma
->vm_end
;
598 unsigned long length
= old_end
- old_start
;
599 unsigned long new_start
= old_start
- shift
;
600 unsigned long new_end
= old_end
- shift
;
601 struct mmu_gather tlb
;
603 BUG_ON(new_start
> new_end
);
606 * ensure there are no vmas between where we want to go
609 if (vma
!= find_vma(mm
, new_start
))
613 * cover the whole range: [new_start, old_end)
615 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
619 * move the page tables downwards, on failure we rely on
620 * process cleanup to remove whatever mess we made.
622 if (length
!= move_page_tables(vma
, old_start
,
623 vma
, new_start
, length
, false))
627 tlb_gather_mmu(&tlb
, mm
, old_start
, old_end
);
628 if (new_end
> old_start
) {
630 * when the old and new regions overlap clear from new_end.
632 free_pgd_range(&tlb
, new_end
, old_end
, new_end
,
633 vma
->vm_next
? vma
->vm_next
->vm_start
: USER_PGTABLES_CEILING
);
636 * otherwise, clean from old_start; this is done to not touch
637 * the address space in [new_end, old_start) some architectures
638 * have constraints on va-space that make this illegal (IA64) -
639 * for the others its just a little faster.
641 free_pgd_range(&tlb
, old_start
, old_end
, new_end
,
642 vma
->vm_next
? vma
->vm_next
->vm_start
: USER_PGTABLES_CEILING
);
644 tlb_finish_mmu(&tlb
, old_start
, old_end
);
647 * Shrink the vma to just the new range. Always succeeds.
649 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
655 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
656 * the stack is optionally relocated, and some extra space is added.
658 int setup_arg_pages(struct linux_binprm
*bprm
,
659 unsigned long stack_top
,
660 int executable_stack
)
663 unsigned long stack_shift
;
664 struct mm_struct
*mm
= current
->mm
;
665 struct vm_area_struct
*vma
= bprm
->vma
;
666 struct vm_area_struct
*prev
= NULL
;
667 unsigned long vm_flags
;
668 unsigned long stack_base
;
669 unsigned long stack_size
;
670 unsigned long stack_expand
;
671 unsigned long rlim_stack
;
673 #ifdef CONFIG_STACK_GROWSUP
674 /* Limit stack size */
675 stack_base
= rlimit_max(RLIMIT_STACK
);
676 if (stack_base
> STACK_SIZE_MAX
)
677 stack_base
= STACK_SIZE_MAX
;
679 /* Add space for stack randomization. */
680 stack_base
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
682 /* Make sure we didn't let the argument array grow too large. */
683 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
686 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
688 stack_shift
= vma
->vm_start
- stack_base
;
689 mm
->arg_start
= bprm
->p
- stack_shift
;
690 bprm
->p
= vma
->vm_end
- stack_shift
;
692 stack_top
= arch_align_stack(stack_top
);
693 stack_top
= PAGE_ALIGN(stack_top
);
695 if (unlikely(stack_top
< mmap_min_addr
) ||
696 unlikely(vma
->vm_end
- vma
->vm_start
>= stack_top
- mmap_min_addr
))
699 stack_shift
= vma
->vm_end
- stack_top
;
701 bprm
->p
-= stack_shift
;
702 mm
->arg_start
= bprm
->p
;
706 bprm
->loader
-= stack_shift
;
707 bprm
->exec
-= stack_shift
;
709 down_write(&mm
->mmap_sem
);
710 vm_flags
= VM_STACK_FLAGS
;
713 * Adjust stack execute permissions; explicitly enable for
714 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
715 * (arch default) otherwise.
717 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
719 else if (executable_stack
== EXSTACK_DISABLE_X
)
720 vm_flags
&= ~VM_EXEC
;
721 vm_flags
|= mm
->def_flags
;
722 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
724 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
730 /* Move stack pages down in memory. */
732 ret
= shift_arg_pages(vma
, stack_shift
);
737 /* mprotect_fixup is overkill to remove the temporary stack flags */
738 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
740 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
741 stack_size
= vma
->vm_end
- vma
->vm_start
;
743 * Align this down to a page boundary as expand_stack
746 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
747 #ifdef CONFIG_STACK_GROWSUP
748 if (stack_size
+ stack_expand
> rlim_stack
)
749 stack_base
= vma
->vm_start
+ rlim_stack
;
751 stack_base
= vma
->vm_end
+ stack_expand
;
753 if (stack_size
+ stack_expand
> rlim_stack
)
754 stack_base
= vma
->vm_end
- rlim_stack
;
756 stack_base
= vma
->vm_start
- stack_expand
;
758 current
->mm
->start_stack
= bprm
->p
;
759 ret
= expand_stack(vma
, stack_base
);
764 up_write(&mm
->mmap_sem
);
767 EXPORT_SYMBOL(setup_arg_pages
);
769 #endif /* CONFIG_MMU */
771 static struct file
*do_open_execat(int fd
, struct filename
*name
, int flags
)
775 struct open_flags open_exec_flags
= {
776 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
777 .acc_mode
= MAY_EXEC
| MAY_OPEN
,
778 .intent
= LOOKUP_OPEN
,
779 .lookup_flags
= LOOKUP_FOLLOW
,
782 if ((flags
& ~(AT_SYMLINK_NOFOLLOW
| AT_EMPTY_PATH
)) != 0)
783 return ERR_PTR(-EINVAL
);
784 if (flags
& AT_SYMLINK_NOFOLLOW
)
785 open_exec_flags
.lookup_flags
&= ~LOOKUP_FOLLOW
;
786 if (flags
& AT_EMPTY_PATH
)
787 open_exec_flags
.lookup_flags
|= LOOKUP_EMPTY
;
789 file
= do_filp_open(fd
, name
, &open_exec_flags
);
794 if (!S_ISREG(file_inode(file
)->i_mode
))
797 if (path_noexec(&file
->f_path
))
800 err
= deny_write_access(file
);
804 if (name
->name
[0] != '\0')
807 trace_open_exec(name
->name
);
817 struct file
*open_exec(const char *name
)
819 struct filename
*filename
= getname_kernel(name
);
820 struct file
*f
= ERR_CAST(filename
);
822 if (!IS_ERR(filename
)) {
823 f
= do_open_execat(AT_FDCWD
, filename
, 0);
828 EXPORT_SYMBOL(open_exec
);
830 int kernel_read(struct file
*file
, loff_t offset
,
831 char *addr
, unsigned long count
)
839 /* The cast to a user pointer is valid due to the set_fs() */
840 result
= vfs_read(file
, (void __user
*)addr
, count
, &pos
);
845 EXPORT_SYMBOL(kernel_read
);
847 ssize_t
read_code(struct file
*file
, unsigned long addr
, loff_t pos
, size_t len
)
849 ssize_t res
= vfs_read(file
, (void __user
*)addr
, len
, &pos
);
851 flush_icache_range(addr
, addr
+ len
);
854 EXPORT_SYMBOL(read_code
);
856 static int exec_mmap(struct mm_struct
*mm
)
858 struct task_struct
*tsk
;
859 struct mm_struct
*old_mm
, *active_mm
;
861 /* Notify parent that we're no longer interested in the old VM */
863 old_mm
= current
->mm
;
864 mm_release(tsk
, old_mm
);
869 * Make sure that if there is a core dump in progress
870 * for the old mm, we get out and die instead of going
871 * through with the exec. We must hold mmap_sem around
872 * checking core_state and changing tsk->mm.
874 down_read(&old_mm
->mmap_sem
);
875 if (unlikely(old_mm
->core_state
)) {
876 up_read(&old_mm
->mmap_sem
);
881 active_mm
= tsk
->active_mm
;
884 activate_mm(active_mm
, mm
);
885 tsk
->mm
->vmacache_seqnum
= 0;
889 up_read(&old_mm
->mmap_sem
);
890 BUG_ON(active_mm
!= old_mm
);
891 setmax_mm_hiwater_rss(&tsk
->signal
->maxrss
, old_mm
);
892 mm_update_next_owner(old_mm
);
901 * This function makes sure the current process has its own signal table,
902 * so that flush_signal_handlers can later reset the handlers without
903 * disturbing other processes. (Other processes might share the signal
904 * table via the CLONE_SIGHAND option to clone().)
906 static int de_thread(struct task_struct
*tsk
)
908 struct signal_struct
*sig
= tsk
->signal
;
909 struct sighand_struct
*oldsighand
= tsk
->sighand
;
910 spinlock_t
*lock
= &oldsighand
->siglock
;
912 if (thread_group_empty(tsk
))
913 goto no_thread_group
;
916 * Kill all other threads in the thread group.
919 if (signal_group_exit(sig
)) {
921 * Another group action in progress, just
922 * return so that the signal is processed.
924 spin_unlock_irq(lock
);
928 sig
->group_exit_task
= tsk
;
929 sig
->notify_count
= zap_other_threads(tsk
);
930 if (!thread_group_leader(tsk
))
933 while (sig
->notify_count
) {
934 __set_current_state(TASK_KILLABLE
);
935 spin_unlock_irq(lock
);
937 if (unlikely(__fatal_signal_pending(tsk
)))
941 spin_unlock_irq(lock
);
944 * At this point all other threads have exited, all we have to
945 * do is to wait for the thread group leader to become inactive,
946 * and to assume its PID:
948 if (!thread_group_leader(tsk
)) {
949 struct task_struct
*leader
= tsk
->group_leader
;
952 threadgroup_change_begin(tsk
);
953 write_lock_irq(&tasklist_lock
);
955 * Do this under tasklist_lock to ensure that
956 * exit_notify() can't miss ->group_exit_task
958 sig
->notify_count
= -1;
959 if (likely(leader
->exit_state
))
961 __set_current_state(TASK_KILLABLE
);
962 write_unlock_irq(&tasklist_lock
);
963 threadgroup_change_end(tsk
);
965 if (unlikely(__fatal_signal_pending(tsk
)))
970 * The only record we have of the real-time age of a
971 * process, regardless of execs it's done, is start_time.
972 * All the past CPU time is accumulated in signal_struct
973 * from sister threads now dead. But in this non-leader
974 * exec, nothing survives from the original leader thread,
975 * whose birth marks the true age of this process now.
976 * When we take on its identity by switching to its PID, we
977 * also take its birthdate (always earlier than our own).
979 tsk
->start_time
= leader
->start_time
;
980 tsk
->real_start_time
= leader
->real_start_time
;
982 BUG_ON(!same_thread_group(leader
, tsk
));
983 BUG_ON(has_group_leader_pid(tsk
));
985 * An exec() starts a new thread group with the
986 * TGID of the previous thread group. Rehash the
987 * two threads with a switched PID, and release
988 * the former thread group leader:
991 /* Become a process group leader with the old leader's pid.
992 * The old leader becomes a thread of the this thread group.
993 * Note: The old leader also uses this pid until release_task
994 * is called. Odd but simple and correct.
996 tsk
->pid
= leader
->pid
;
997 change_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
998 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
999 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
1001 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
1002 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
1004 tsk
->group_leader
= tsk
;
1005 leader
->group_leader
= tsk
;
1007 tsk
->exit_signal
= SIGCHLD
;
1008 leader
->exit_signal
= -1;
1010 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
1011 leader
->exit_state
= EXIT_DEAD
;
1014 * We are going to release_task()->ptrace_unlink() silently,
1015 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1016 * the tracer wont't block again waiting for this thread.
1018 if (unlikely(leader
->ptrace
))
1019 __wake_up_parent(leader
, leader
->parent
);
1020 write_unlock_irq(&tasklist_lock
);
1021 threadgroup_change_end(tsk
);
1023 release_task(leader
);
1026 sig
->group_exit_task
= NULL
;
1027 sig
->notify_count
= 0;
1030 /* we have changed execution domain */
1031 tsk
->exit_signal
= SIGCHLD
;
1034 flush_itimer_signals();
1036 if (atomic_read(&oldsighand
->count
) != 1) {
1037 struct sighand_struct
*newsighand
;
1039 * This ->sighand is shared with the CLONE_SIGHAND
1040 * but not CLONE_THREAD task, switch to the new one.
1042 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1046 atomic_set(&newsighand
->count
, 1);
1047 memcpy(newsighand
->action
, oldsighand
->action
,
1048 sizeof(newsighand
->action
));
1050 write_lock_irq(&tasklist_lock
);
1051 spin_lock(&oldsighand
->siglock
);
1052 rcu_assign_pointer(tsk
->sighand
, newsighand
);
1053 spin_unlock(&oldsighand
->siglock
);
1054 write_unlock_irq(&tasklist_lock
);
1056 __cleanup_sighand(oldsighand
);
1059 BUG_ON(!thread_group_leader(tsk
));
1063 /* protects against exit_notify() and __exit_signal() */
1064 read_lock(&tasklist_lock
);
1065 sig
->group_exit_task
= NULL
;
1066 sig
->notify_count
= 0;
1067 read_unlock(&tasklist_lock
);
1071 char *get_task_comm(char *buf
, struct task_struct
*tsk
)
1073 /* buf must be at least sizeof(tsk->comm) in size */
1075 strncpy(buf
, tsk
->comm
, sizeof(tsk
->comm
));
1079 EXPORT_SYMBOL_GPL(get_task_comm
);
1082 * These functions flushes out all traces of the currently running executable
1083 * so that a new one can be started
1086 void __set_task_comm(struct task_struct
*tsk
, const char *buf
, bool exec
)
1089 trace_task_rename(tsk
, buf
);
1090 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
1092 perf_event_comm(tsk
, exec
);
1095 int flush_old_exec(struct linux_binprm
* bprm
)
1100 * Make sure we have a private signal table and that
1101 * we are unassociated from the previous thread group.
1103 retval
= de_thread(current
);
1108 * Must be called _before_ exec_mmap() as bprm->mm is
1109 * not visibile until then. This also enables the update
1112 set_mm_exe_file(bprm
->mm
, bprm
->file
);
1115 * Release all of the old mmap stuff
1117 acct_arg_size(bprm
, 0);
1118 retval
= exec_mmap(bprm
->mm
);
1122 bprm
->mm
= NULL
; /* We're using it now */
1125 current
->flags
&= ~(PF_RANDOMIZE
| PF_FORKNOEXEC
| PF_KTHREAD
|
1126 PF_NOFREEZE
| PF_NO_SETAFFINITY
);
1128 current
->personality
&= ~bprm
->per_clear
;
1131 * We have to apply CLOEXEC before we change whether the process is
1132 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1133 * trying to access the should-be-closed file descriptors of a process
1134 * undergoing exec(2).
1136 do_close_on_exec(current
->files
);
1142 EXPORT_SYMBOL(flush_old_exec
);
1144 void would_dump(struct linux_binprm
*bprm
, struct file
*file
)
1146 struct inode
*inode
= file_inode(file
);
1147 if (inode_permission(inode
, MAY_READ
) < 0) {
1148 struct user_namespace
*old
, *user_ns
;
1149 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
1151 /* Ensure mm->user_ns contains the executable */
1152 user_ns
= old
= bprm
->mm
->user_ns
;
1153 while ((user_ns
!= &init_user_ns
) &&
1154 !privileged_wrt_inode_uidgid(user_ns
, inode
))
1155 user_ns
= user_ns
->parent
;
1157 if (old
!= user_ns
) {
1158 bprm
->mm
->user_ns
= get_user_ns(user_ns
);
1163 EXPORT_SYMBOL(would_dump
);
1165 void setup_new_exec(struct linux_binprm
* bprm
)
1167 arch_pick_mmap_layout(current
->mm
);
1169 /* This is the point of no return */
1170 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1172 if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1173 set_dumpable(current
->mm
, SUID_DUMP_USER
);
1175 set_dumpable(current
->mm
, suid_dumpable
);
1178 __set_task_comm(current
, kbasename(bprm
->filename
), true);
1180 /* Set the new mm task size. We have to do that late because it may
1181 * depend on TIF_32BIT which is only updated in flush_thread() on
1182 * some architectures like powerpc
1184 current
->mm
->task_size
= TASK_SIZE
;
1186 /* install the new credentials */
1187 if (!uid_eq(bprm
->cred
->uid
, current_euid()) ||
1188 !gid_eq(bprm
->cred
->gid
, current_egid())) {
1189 current
->pdeath_signal
= 0;
1191 if (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
)
1192 set_dumpable(current
->mm
, suid_dumpable
);
1195 /* An exec changes our domain. We are no longer part of the thread
1197 current
->self_exec_id
++;
1198 flush_signal_handlers(current
, 0);
1200 EXPORT_SYMBOL(setup_new_exec
);
1203 * Prepare credentials and lock ->cred_guard_mutex.
1204 * install_exec_creds() commits the new creds and drops the lock.
1205 * Or, if exec fails before, free_bprm() should release ->cred and
1208 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1210 if (mutex_lock_interruptible(¤t
->signal
->cred_guard_mutex
))
1211 return -ERESTARTNOINTR
;
1213 bprm
->cred
= prepare_exec_creds();
1214 if (likely(bprm
->cred
))
1217 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1221 static void free_bprm(struct linux_binprm
*bprm
)
1223 free_arg_pages(bprm
);
1225 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1226 abort_creds(bprm
->cred
);
1229 allow_write_access(bprm
->file
);
1232 /* If a binfmt changed the interp, free it. */
1233 if (bprm
->interp
!= bprm
->filename
)
1234 kfree(bprm
->interp
);
1238 int bprm_change_interp(char *interp
, struct linux_binprm
*bprm
)
1240 /* If a binfmt changed the interp, free it first. */
1241 if (bprm
->interp
!= bprm
->filename
)
1242 kfree(bprm
->interp
);
1243 bprm
->interp
= kstrdup(interp
, GFP_KERNEL
);
1248 EXPORT_SYMBOL(bprm_change_interp
);
1251 * install the new credentials for this executable
1253 void install_exec_creds(struct linux_binprm
*bprm
)
1255 security_bprm_committing_creds(bprm
);
1257 commit_creds(bprm
->cred
);
1261 * Disable monitoring for regular users
1262 * when executing setuid binaries. Must
1263 * wait until new credentials are committed
1264 * by commit_creds() above
1266 if (get_dumpable(current
->mm
) != SUID_DUMP_USER
)
1267 perf_event_exit_task(current
);
1269 * cred_guard_mutex must be held at least to this point to prevent
1270 * ptrace_attach() from altering our determination of the task's
1271 * credentials; any time after this it may be unlocked.
1273 security_bprm_committed_creds(bprm
);
1274 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1276 EXPORT_SYMBOL(install_exec_creds
);
1279 * determine how safe it is to execute the proposed program
1280 * - the caller must hold ->cred_guard_mutex to protect against
1281 * PTRACE_ATTACH or seccomp thread-sync
1283 static void check_unsafe_exec(struct linux_binprm
*bprm
)
1285 struct task_struct
*p
= current
, *t
;
1290 if (ptracer_capable(p
, current_user_ns()))
1291 bprm
->unsafe
|= LSM_UNSAFE_PTRACE_CAP
;
1293 bprm
->unsafe
|= LSM_UNSAFE_PTRACE
;
1297 * This isn't strictly necessary, but it makes it harder for LSMs to
1300 if (task_no_new_privs(current
))
1301 bprm
->unsafe
|= LSM_UNSAFE_NO_NEW_PRIVS
;
1307 spin_lock(&p
->fs
->lock
);
1309 while_each_thread(p
, t
) {
1312 if (t
->flags
& (PF_EXITING
| PF_FORKNOEXEC
))
1317 if (p
->fs
->users
> n_fs
) {
1319 spin_unlock(&p
->fs
->lock
);
1322 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1325 spin_unlock(&p
->fs
->lock
);
1328 static void bprm_fill_uid(struct linux_binprm
*bprm
)
1330 struct inode
*inode
;
1335 /* clear any previous set[ug]id data from a previous binary */
1336 bprm
->cred
->euid
= current_euid();
1337 bprm
->cred
->egid
= current_egid();
1339 if (path_nosuid(&bprm
->file
->f_path
))
1342 if (task_no_new_privs(current
))
1345 inode
= file_inode(bprm
->file
);
1346 mode
= READ_ONCE(inode
->i_mode
);
1347 if (!(mode
& (S_ISUID
|S_ISGID
)))
1350 /* Be careful if suid/sgid is set */
1351 mutex_lock(&inode
->i_mutex
);
1353 /* reload atomically mode/uid/gid now that lock held */
1354 mode
= inode
->i_mode
;
1357 mutex_unlock(&inode
->i_mutex
);
1359 /* We ignore suid/sgid if there are no mappings for them in the ns */
1360 if (!kuid_has_mapping(bprm
->cred
->user_ns
, uid
) ||
1361 !kgid_has_mapping(bprm
->cred
->user_ns
, gid
))
1364 if (mode
& S_ISUID
) {
1365 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1366 bprm
->cred
->euid
= uid
;
1369 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1370 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1371 bprm
->cred
->egid
= gid
;
1376 * Fill the binprm structure from the inode.
1377 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1379 * This may be called multiple times for binary chains (scripts for example).
1381 int prepare_binprm(struct linux_binprm
*bprm
)
1385 bprm_fill_uid(bprm
);
1387 /* fill in binprm security blob */
1388 retval
= security_bprm_set_creds(bprm
);
1391 bprm
->cred_prepared
= 1;
1393 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1394 return kernel_read(bprm
->file
, 0, bprm
->buf
, BINPRM_BUF_SIZE
);
1397 EXPORT_SYMBOL(prepare_binprm
);
1400 * Arguments are '\0' separated strings found at the location bprm->p
1401 * points to; chop off the first by relocating brpm->p to right after
1402 * the first '\0' encountered.
1404 int remove_arg_zero(struct linux_binprm
*bprm
)
1407 unsigned long offset
;
1415 offset
= bprm
->p
& ~PAGE_MASK
;
1416 page
= get_arg_page(bprm
, bprm
->p
, 0);
1421 kaddr
= kmap_atomic(page
);
1423 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1424 offset
++, bprm
->p
++)
1427 kunmap_atomic(kaddr
);
1430 if (offset
== PAGE_SIZE
)
1431 free_arg_page(bprm
, (bprm
->p
>> PAGE_SHIFT
) - 1);
1432 } while (offset
== PAGE_SIZE
);
1441 EXPORT_SYMBOL(remove_arg_zero
);
1443 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1445 * cycle the list of binary formats handler, until one recognizes the image
1447 int search_binary_handler(struct linux_binprm
*bprm
)
1449 bool need_retry
= IS_ENABLED(CONFIG_MODULES
);
1450 struct linux_binfmt
*fmt
;
1453 /* This allows 4 levels of binfmt rewrites before failing hard. */
1454 if (bprm
->recursion_depth
> 5)
1457 retval
= security_bprm_check(bprm
);
1463 read_lock(&binfmt_lock
);
1464 list_for_each_entry(fmt
, &formats
, lh
) {
1465 if (!try_module_get(fmt
->module
))
1467 read_unlock(&binfmt_lock
);
1468 bprm
->recursion_depth
++;
1469 retval
= fmt
->load_binary(bprm
);
1470 read_lock(&binfmt_lock
);
1472 bprm
->recursion_depth
--;
1473 if (retval
< 0 && !bprm
->mm
) {
1474 /* we got to flush_old_exec() and failed after it */
1475 read_unlock(&binfmt_lock
);
1476 force_sigsegv(SIGSEGV
, current
);
1479 if (retval
!= -ENOEXEC
|| !bprm
->file
) {
1480 read_unlock(&binfmt_lock
);
1484 read_unlock(&binfmt_lock
);
1487 if (printable(bprm
->buf
[0]) && printable(bprm
->buf
[1]) &&
1488 printable(bprm
->buf
[2]) && printable(bprm
->buf
[3]))
1490 if (request_module("binfmt-%04x", *(ushort
*)(bprm
->buf
+ 2)) < 0)
1498 EXPORT_SYMBOL(search_binary_handler
);
1500 static int exec_binprm(struct linux_binprm
*bprm
)
1502 pid_t old_pid
, old_vpid
;
1505 /* Need to fetch pid before load_binary changes it */
1506 old_pid
= current
->pid
;
1508 old_vpid
= task_pid_nr_ns(current
, task_active_pid_ns(current
->parent
));
1511 ret
= search_binary_handler(bprm
);
1514 trace_sched_process_exec(current
, old_pid
, bprm
);
1515 ptrace_event(PTRACE_EVENT_EXEC
, old_vpid
);
1516 proc_exec_connector(current
);
1523 * sys_execve() executes a new program.
1525 static int do_execveat_common(int fd
, struct filename
*filename
,
1526 struct user_arg_ptr argv
,
1527 struct user_arg_ptr envp
,
1530 char *pathbuf
= NULL
;
1531 struct linux_binprm
*bprm
;
1533 struct files_struct
*displaced
;
1536 if (IS_ERR(filename
))
1537 return PTR_ERR(filename
);
1540 * We move the actual failure in case of RLIMIT_NPROC excess from
1541 * set*uid() to execve() because too many poorly written programs
1542 * don't check setuid() return code. Here we additionally recheck
1543 * whether NPROC limit is still exceeded.
1545 if ((current
->flags
& PF_NPROC_EXCEEDED
) &&
1546 atomic_read(¤t_user()->processes
) > rlimit(RLIMIT_NPROC
)) {
1551 /* We're below the limit (still or again), so we don't want to make
1552 * further execve() calls fail. */
1553 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1555 retval
= unshare_files(&displaced
);
1560 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1564 retval
= prepare_bprm_creds(bprm
);
1568 check_unsafe_exec(bprm
);
1569 current
->in_execve
= 1;
1571 file
= do_open_execat(fd
, filename
, flags
);
1572 retval
= PTR_ERR(file
);
1579 if (fd
== AT_FDCWD
|| filename
->name
[0] == '/') {
1580 bprm
->filename
= filename
->name
;
1582 if (filename
->name
[0] == '\0')
1583 pathbuf
= kasprintf(GFP_TEMPORARY
, "/dev/fd/%d", fd
);
1585 pathbuf
= kasprintf(GFP_TEMPORARY
, "/dev/fd/%d/%s",
1586 fd
, filename
->name
);
1592 * Record that a name derived from an O_CLOEXEC fd will be
1593 * inaccessible after exec. Relies on having exclusive access to
1594 * current->files (due to unshare_files above).
1596 if (close_on_exec(fd
, rcu_dereference_raw(current
->files
->fdt
)))
1597 bprm
->interp_flags
|= BINPRM_FLAGS_PATH_INACCESSIBLE
;
1598 bprm
->filename
= pathbuf
;
1600 bprm
->interp
= bprm
->filename
;
1602 retval
= bprm_mm_init(bprm
);
1606 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1607 if ((retval
= bprm
->argc
) < 0)
1610 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1611 if ((retval
= bprm
->envc
) < 0)
1614 retval
= prepare_binprm(bprm
);
1618 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1622 bprm
->exec
= bprm
->p
;
1623 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1627 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1631 would_dump(bprm
, bprm
->file
);
1633 retval
= exec_binprm(bprm
);
1637 /* execve succeeded */
1638 current
->fs
->in_exec
= 0;
1639 current
->in_execve
= 0;
1640 acct_update_integrals(current
);
1641 task_numa_free(current
);
1646 put_files_struct(displaced
);
1651 acct_arg_size(bprm
, 0);
1656 current
->fs
->in_exec
= 0;
1657 current
->in_execve
= 0;
1665 reset_files_struct(displaced
);
1671 int do_execve(struct filename
*filename
,
1672 const char __user
*const __user
*__argv
,
1673 const char __user
*const __user
*__envp
)
1675 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1676 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1677 return do_execveat_common(AT_FDCWD
, filename
, argv
, envp
, 0);
1680 int do_execveat(int fd
, struct filename
*filename
,
1681 const char __user
*const __user
*__argv
,
1682 const char __user
*const __user
*__envp
,
1685 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1686 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1688 return do_execveat_common(fd
, filename
, argv
, envp
, flags
);
1691 #ifdef CONFIG_COMPAT
1692 static int compat_do_execve(struct filename
*filename
,
1693 const compat_uptr_t __user
*__argv
,
1694 const compat_uptr_t __user
*__envp
)
1696 struct user_arg_ptr argv
= {
1698 .ptr
.compat
= __argv
,
1700 struct user_arg_ptr envp
= {
1702 .ptr
.compat
= __envp
,
1704 return do_execveat_common(AT_FDCWD
, filename
, argv
, envp
, 0);
1707 static int compat_do_execveat(int fd
, struct filename
*filename
,
1708 const compat_uptr_t __user
*__argv
,
1709 const compat_uptr_t __user
*__envp
,
1712 struct user_arg_ptr argv
= {
1714 .ptr
.compat
= __argv
,
1716 struct user_arg_ptr envp
= {
1718 .ptr
.compat
= __envp
,
1720 return do_execveat_common(fd
, filename
, argv
, envp
, flags
);
1724 void set_binfmt(struct linux_binfmt
*new)
1726 struct mm_struct
*mm
= current
->mm
;
1729 module_put(mm
->binfmt
->module
);
1733 __module_get(new->module
);
1735 EXPORT_SYMBOL(set_binfmt
);
1738 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1740 void set_dumpable(struct mm_struct
*mm
, int value
)
1742 unsigned long old
, new;
1744 if (WARN_ON((unsigned)value
> SUID_DUMP_ROOT
))
1748 old
= ACCESS_ONCE(mm
->flags
);
1749 new = (old
& ~MMF_DUMPABLE_MASK
) | value
;
1750 } while (cmpxchg(&mm
->flags
, old
, new) != old
);
1753 SYSCALL_DEFINE3(execve
,
1754 const char __user
*, filename
,
1755 const char __user
*const __user
*, argv
,
1756 const char __user
*const __user
*, envp
)
1758 return do_execve(getname(filename
), argv
, envp
);
1761 SYSCALL_DEFINE5(execveat
,
1762 int, fd
, const char __user
*, filename
,
1763 const char __user
*const __user
*, argv
,
1764 const char __user
*const __user
*, envp
,
1767 int lookup_flags
= (flags
& AT_EMPTY_PATH
) ? LOOKUP_EMPTY
: 0;
1769 return do_execveat(fd
,
1770 getname_flags(filename
, lookup_flags
, NULL
),
1774 #ifdef CONFIG_COMPAT
1775 COMPAT_SYSCALL_DEFINE3(execve
, const char __user
*, filename
,
1776 const compat_uptr_t __user
*, argv
,
1777 const compat_uptr_t __user
*, envp
)
1779 return compat_do_execve(getname(filename
), argv
, envp
);
1782 COMPAT_SYSCALL_DEFINE5(execveat
, int, fd
,
1783 const char __user
*, filename
,
1784 const compat_uptr_t __user
*, argv
,
1785 const compat_uptr_t __user
*, envp
,
1788 int lookup_flags
= (flags
& AT_EMPTY_PATH
) ? LOOKUP_EMPTY
: 0;
1790 return compat_do_execveat(fd
,
1791 getname_flags(filename
, lookup_flags
, NULL
),