4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * #!-checking implemented by tytso.
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/coredump.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/numa_balancing.h>
39 #include <linux/sched/task.h>
40 #include <linux/pagemap.h>
41 #include <linux/perf_event.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/key.h>
45 #include <linux/personality.h>
46 #include <linux/binfmts.h>
47 #include <linux/utsname.h>
48 #include <linux/pid_namespace.h>
49 #include <linux/module.h>
50 #include <linux/namei.h>
51 #include <linux/mount.h>
52 #include <linux/security.h>
53 #include <linux/syscalls.h>
54 #include <linux/tsacct_kern.h>
55 #include <linux/cn_proc.h>
56 #include <linux/audit.h>
57 #include <linux/tracehook.h>
58 #include <linux/kmod.h>
59 #include <linux/fsnotify.h>
60 #include <linux/fs_struct.h>
61 #include <linux/pipe_fs_i.h>
62 #include <linux/oom.h>
63 #include <linux/compat.h>
64 #include <linux/vmalloc.h>
66 #include <trace/events/fs.h>
68 #include <linux/uaccess.h>
69 #include <asm/mmu_context.h>
72 #include <trace/events/task.h>
75 #include <trace/events/sched.h>
77 int suid_dumpable
= 0;
79 static LIST_HEAD(formats
);
80 static DEFINE_RWLOCK(binfmt_lock
);
82 void __register_binfmt(struct linux_binfmt
* fmt
, int insert
)
85 if (WARN_ON(!fmt
->load_binary
))
87 write_lock(&binfmt_lock
);
88 insert
? list_add(&fmt
->lh
, &formats
) :
89 list_add_tail(&fmt
->lh
, &formats
);
90 write_unlock(&binfmt_lock
);
93 EXPORT_SYMBOL(__register_binfmt
);
95 void unregister_binfmt(struct linux_binfmt
* fmt
)
97 write_lock(&binfmt_lock
);
99 write_unlock(&binfmt_lock
);
102 EXPORT_SYMBOL(unregister_binfmt
);
104 static inline void put_binfmt(struct linux_binfmt
* fmt
)
106 module_put(fmt
->module
);
109 bool path_noexec(const struct path
*path
)
111 return (path
->mnt
->mnt_flags
& MNT_NOEXEC
) ||
112 (path
->mnt
->mnt_sb
->s_iflags
& SB_I_NOEXEC
);
115 bool path_nosuid(const struct path
*path
)
117 return !mnt_may_suid(path
->mnt
) ||
118 (path
->mnt
->mnt_sb
->s_iflags
& SB_I_NOSUID
);
120 EXPORT_SYMBOL(path_nosuid
);
124 * Note that a shared library must be both readable and executable due to
127 * Also note that we take the address to load from from the file itself.
129 SYSCALL_DEFINE1(uselib
, const char __user
*, library
)
131 struct linux_binfmt
*fmt
;
133 struct filename
*tmp
= getname(library
);
134 int error
= PTR_ERR(tmp
);
135 static const struct open_flags uselib_flags
= {
136 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
137 .acc_mode
= MAY_READ
| MAY_EXEC
,
138 .intent
= LOOKUP_OPEN
,
139 .lookup_flags
= LOOKUP_FOLLOW
,
145 file
= do_filp_open(AT_FDCWD
, tmp
, &uselib_flags
);
147 error
= PTR_ERR(file
);
152 if (!S_ISREG(file_inode(file
)->i_mode
))
156 if (path_noexec(&file
->f_path
))
163 read_lock(&binfmt_lock
);
164 list_for_each_entry(fmt
, &formats
, lh
) {
165 if (!fmt
->load_shlib
)
167 if (!try_module_get(fmt
->module
))
169 read_unlock(&binfmt_lock
);
170 error
= fmt
->load_shlib(file
);
171 read_lock(&binfmt_lock
);
173 if (error
!= -ENOEXEC
)
176 read_unlock(&binfmt_lock
);
182 #endif /* #ifdef CONFIG_USELIB */
186 * The nascent bprm->mm is not visible until exec_mmap() but it can
187 * use a lot of memory, account these pages in current->mm temporary
188 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
189 * change the counter back via acct_arg_size(0).
191 static void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
193 struct mm_struct
*mm
= current
->mm
;
194 long diff
= (long)(pages
- bprm
->vma_pages
);
199 bprm
->vma_pages
= pages
;
200 add_mm_counter(mm
, MM_ANONPAGES
, diff
);
203 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
208 unsigned int gup_flags
= FOLL_FORCE
;
210 #ifdef CONFIG_STACK_GROWSUP
212 ret
= expand_downwards(bprm
->vma
, pos
);
219 gup_flags
|= FOLL_WRITE
;
222 * We are doing an exec(). 'current' is the process
223 * doing the exec and bprm->mm is the new process's mm.
225 ret
= get_user_pages_remote(current
, bprm
->mm
, pos
, 1, gup_flags
,
231 unsigned long size
= bprm
->vma
->vm_end
- bprm
->vma
->vm_start
;
232 unsigned long ptr_size
, limit
;
235 * Since the stack will hold pointers to the strings, we
236 * must account for them as well.
238 * The size calculation is the entire vma while each arg page is
239 * built, so each time we get here it's calculating how far it
240 * is currently (rather than each call being just the newly
241 * added size from the arg page). As a result, we need to
242 * always add the entire size of the pointers, so that on the
243 * last call to get_arg_page() we'll actually have the entire
246 ptr_size
= (bprm
->argc
+ bprm
->envc
) * sizeof(void *);
247 if (ptr_size
> ULONG_MAX
- size
)
251 acct_arg_size(bprm
, size
/ PAGE_SIZE
);
254 * We've historically supported up to 32 pages (ARG_MAX)
255 * of argument strings even with small stacks
261 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
262 * (whichever is smaller) for the argv+env strings.
264 * - the remaining binfmt code will not run out of stack space,
265 * - the program will have a reasonable amount of stack left
268 limit
= _STK_LIM
/ 4 * 3;
269 limit
= min(limit
, rlimit(RLIMIT_STACK
) / 4);
281 static void put_arg_page(struct page
*page
)
286 static void free_arg_pages(struct linux_binprm
*bprm
)
290 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
293 flush_cache_page(bprm
->vma
, pos
, page_to_pfn(page
));
296 static int __bprm_mm_init(struct linux_binprm
*bprm
)
299 struct vm_area_struct
*vma
= NULL
;
300 struct mm_struct
*mm
= bprm
->mm
;
302 bprm
->vma
= vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
306 if (down_write_killable(&mm
->mmap_sem
)) {
313 * Place the stack at the largest stack address the architecture
314 * supports. Later, we'll move this to an appropriate place. We don't
315 * use STACK_TOP because that can depend on attributes which aren't
318 BUILD_BUG_ON(VM_STACK_FLAGS
& VM_STACK_INCOMPLETE_SETUP
);
319 vma
->vm_end
= STACK_TOP_MAX
;
320 vma
->vm_start
= vma
->vm_end
- PAGE_SIZE
;
321 vma
->vm_flags
= VM_SOFTDIRTY
| VM_STACK_FLAGS
| VM_STACK_INCOMPLETE_SETUP
;
322 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
323 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
325 err
= insert_vm_struct(mm
, vma
);
329 mm
->stack_vm
= mm
->total_vm
= 1;
330 arch_bprm_mm_init(mm
, vma
);
331 up_write(&mm
->mmap_sem
);
332 bprm
->p
= vma
->vm_end
- sizeof(void *);
335 up_write(&mm
->mmap_sem
);
338 kmem_cache_free(vm_area_cachep
, vma
);
342 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
344 return len
<= MAX_ARG_STRLEN
;
349 static inline void acct_arg_size(struct linux_binprm
*bprm
, unsigned long pages
)
353 static struct page
*get_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
358 page
= bprm
->page
[pos
/ PAGE_SIZE
];
359 if (!page
&& write
) {
360 page
= alloc_page(GFP_HIGHUSER
|__GFP_ZERO
);
363 bprm
->page
[pos
/ PAGE_SIZE
] = page
;
369 static void put_arg_page(struct page
*page
)
373 static void free_arg_page(struct linux_binprm
*bprm
, int i
)
376 __free_page(bprm
->page
[i
]);
377 bprm
->page
[i
] = NULL
;
381 static void free_arg_pages(struct linux_binprm
*bprm
)
385 for (i
= 0; i
< MAX_ARG_PAGES
; i
++)
386 free_arg_page(bprm
, i
);
389 static void flush_arg_page(struct linux_binprm
*bprm
, unsigned long pos
,
394 static int __bprm_mm_init(struct linux_binprm
*bprm
)
396 bprm
->p
= PAGE_SIZE
* MAX_ARG_PAGES
- sizeof(void *);
400 static bool valid_arg_len(struct linux_binprm
*bprm
, long len
)
402 return len
<= bprm
->p
;
405 #endif /* CONFIG_MMU */
408 * Create a new mm_struct and populate it with a temporary stack
409 * vm_area_struct. We don't have enough context at this point to set the stack
410 * flags, permissions, and offset, so we use temporary values. We'll update
411 * them later in setup_arg_pages().
413 static int bprm_mm_init(struct linux_binprm
*bprm
)
416 struct mm_struct
*mm
= NULL
;
418 bprm
->mm
= mm
= mm_alloc();
423 err
= __bprm_mm_init(bprm
);
438 struct user_arg_ptr
{
443 const char __user
*const __user
*native
;
445 const compat_uptr_t __user
*compat
;
450 static const char __user
*get_user_arg_ptr(struct user_arg_ptr argv
, int nr
)
452 const char __user
*native
;
455 if (unlikely(argv
.is_compat
)) {
456 compat_uptr_t compat
;
458 if (get_user(compat
, argv
.ptr
.compat
+ nr
))
459 return ERR_PTR(-EFAULT
);
461 return compat_ptr(compat
);
465 if (get_user(native
, argv
.ptr
.native
+ nr
))
466 return ERR_PTR(-EFAULT
);
472 * count() counts the number of strings in array ARGV.
474 static int count(struct user_arg_ptr argv
, int max
)
478 if (argv
.ptr
.native
!= NULL
) {
480 const char __user
*p
= get_user_arg_ptr(argv
, i
);
492 if (fatal_signal_pending(current
))
493 return -ERESTARTNOHAND
;
501 * 'copy_strings()' copies argument/environment strings from the old
502 * processes's memory to the new process's stack. The call to get_user_pages()
503 * ensures the destination page is created and not swapped out.
505 static int copy_strings(int argc
, struct user_arg_ptr argv
,
506 struct linux_binprm
*bprm
)
508 struct page
*kmapped_page
= NULL
;
510 unsigned long kpos
= 0;
514 const char __user
*str
;
519 str
= get_user_arg_ptr(argv
, argc
);
523 len
= strnlen_user(str
, MAX_ARG_STRLEN
);
528 if (!valid_arg_len(bprm
, len
))
531 /* We're going to work our way backwords. */
537 int offset
, bytes_to_copy
;
539 if (fatal_signal_pending(current
)) {
540 ret
= -ERESTARTNOHAND
;
545 offset
= pos
% PAGE_SIZE
;
549 bytes_to_copy
= offset
;
550 if (bytes_to_copy
> len
)
553 offset
-= bytes_to_copy
;
554 pos
-= bytes_to_copy
;
555 str
-= bytes_to_copy
;
556 len
-= bytes_to_copy
;
558 if (!kmapped_page
|| kpos
!= (pos
& PAGE_MASK
)) {
561 page
= get_arg_page(bprm
, pos
, 1);
568 flush_kernel_dcache_page(kmapped_page
);
569 kunmap(kmapped_page
);
570 put_arg_page(kmapped_page
);
573 kaddr
= kmap(kmapped_page
);
574 kpos
= pos
& PAGE_MASK
;
575 flush_arg_page(bprm
, kpos
, kmapped_page
);
577 if (copy_from_user(kaddr
+offset
, str
, bytes_to_copy
)) {
586 flush_kernel_dcache_page(kmapped_page
);
587 kunmap(kmapped_page
);
588 put_arg_page(kmapped_page
);
594 * Like copy_strings, but get argv and its values from kernel memory.
596 int copy_strings_kernel(int argc
, const char *const *__argv
,
597 struct linux_binprm
*bprm
)
600 mm_segment_t oldfs
= get_fs();
601 struct user_arg_ptr argv
= {
602 .ptr
.native
= (const char __user
*const __user
*)__argv
,
606 r
= copy_strings(argc
, argv
, bprm
);
611 EXPORT_SYMBOL(copy_strings_kernel
);
616 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
617 * the binfmt code determines where the new stack should reside, we shift it to
618 * its final location. The process proceeds as follows:
620 * 1) Use shift to calculate the new vma endpoints.
621 * 2) Extend vma to cover both the old and new ranges. This ensures the
622 * arguments passed to subsequent functions are consistent.
623 * 3) Move vma's page tables to the new range.
624 * 4) Free up any cleared pgd range.
625 * 5) Shrink the vma to cover only the new range.
627 static int shift_arg_pages(struct vm_area_struct
*vma
, unsigned long shift
)
629 struct mm_struct
*mm
= vma
->vm_mm
;
630 unsigned long old_start
= vma
->vm_start
;
631 unsigned long old_end
= vma
->vm_end
;
632 unsigned long length
= old_end
- old_start
;
633 unsigned long new_start
= old_start
- shift
;
634 unsigned long new_end
= old_end
- shift
;
635 struct mmu_gather tlb
;
637 BUG_ON(new_start
> new_end
);
640 * ensure there are no vmas between where we want to go
643 if (vma
!= find_vma(mm
, new_start
))
647 * cover the whole range: [new_start, old_end)
649 if (vma_adjust(vma
, new_start
, old_end
, vma
->vm_pgoff
, NULL
))
653 * move the page tables downwards, on failure we rely on
654 * process cleanup to remove whatever mess we made.
656 if (length
!= move_page_tables(vma
, old_start
,
657 vma
, new_start
, length
, false))
661 tlb_gather_mmu(&tlb
, mm
, old_start
, old_end
);
662 if (new_end
> old_start
) {
664 * when the old and new regions overlap clear from new_end.
666 free_pgd_range(&tlb
, new_end
, old_end
, new_end
,
667 vma
->vm_next
? vma
->vm_next
->vm_start
: USER_PGTABLES_CEILING
);
670 * otherwise, clean from old_start; this is done to not touch
671 * the address space in [new_end, old_start) some architectures
672 * have constraints on va-space that make this illegal (IA64) -
673 * for the others its just a little faster.
675 free_pgd_range(&tlb
, old_start
, old_end
, new_end
,
676 vma
->vm_next
? vma
->vm_next
->vm_start
: USER_PGTABLES_CEILING
);
678 tlb_finish_mmu(&tlb
, old_start
, old_end
);
681 * Shrink the vma to just the new range. Always succeeds.
683 vma_adjust(vma
, new_start
, new_end
, vma
->vm_pgoff
, NULL
);
689 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
690 * the stack is optionally relocated, and some extra space is added.
692 int setup_arg_pages(struct linux_binprm
*bprm
,
693 unsigned long stack_top
,
694 int executable_stack
)
697 unsigned long stack_shift
;
698 struct mm_struct
*mm
= current
->mm
;
699 struct vm_area_struct
*vma
= bprm
->vma
;
700 struct vm_area_struct
*prev
= NULL
;
701 unsigned long vm_flags
;
702 unsigned long stack_base
;
703 unsigned long stack_size
;
704 unsigned long stack_expand
;
705 unsigned long rlim_stack
;
707 #ifdef CONFIG_STACK_GROWSUP
708 /* Limit stack size */
709 stack_base
= rlimit_max(RLIMIT_STACK
);
710 if (stack_base
> STACK_SIZE_MAX
)
711 stack_base
= STACK_SIZE_MAX
;
713 /* Add space for stack randomization. */
714 stack_base
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
716 /* Make sure we didn't let the argument array grow too large. */
717 if (vma
->vm_end
- vma
->vm_start
> stack_base
)
720 stack_base
= PAGE_ALIGN(stack_top
- stack_base
);
722 stack_shift
= vma
->vm_start
- stack_base
;
723 mm
->arg_start
= bprm
->p
- stack_shift
;
724 bprm
->p
= vma
->vm_end
- stack_shift
;
726 stack_top
= arch_align_stack(stack_top
);
727 stack_top
= PAGE_ALIGN(stack_top
);
729 if (unlikely(stack_top
< mmap_min_addr
) ||
730 unlikely(vma
->vm_end
- vma
->vm_start
>= stack_top
- mmap_min_addr
))
733 stack_shift
= vma
->vm_end
- stack_top
;
735 bprm
->p
-= stack_shift
;
736 mm
->arg_start
= bprm
->p
;
740 bprm
->loader
-= stack_shift
;
741 bprm
->exec
-= stack_shift
;
743 if (down_write_killable(&mm
->mmap_sem
))
746 vm_flags
= VM_STACK_FLAGS
;
749 * Adjust stack execute permissions; explicitly enable for
750 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
751 * (arch default) otherwise.
753 if (unlikely(executable_stack
== EXSTACK_ENABLE_X
))
755 else if (executable_stack
== EXSTACK_DISABLE_X
)
756 vm_flags
&= ~VM_EXEC
;
757 vm_flags
|= mm
->def_flags
;
758 vm_flags
|= VM_STACK_INCOMPLETE_SETUP
;
760 ret
= mprotect_fixup(vma
, &prev
, vma
->vm_start
, vma
->vm_end
,
766 /* Move stack pages down in memory. */
768 ret
= shift_arg_pages(vma
, stack_shift
);
773 /* mprotect_fixup is overkill to remove the temporary stack flags */
774 vma
->vm_flags
&= ~VM_STACK_INCOMPLETE_SETUP
;
776 stack_expand
= 131072UL; /* randomly 32*4k (or 2*64k) pages */
777 stack_size
= vma
->vm_end
- vma
->vm_start
;
779 * Align this down to a page boundary as expand_stack
782 rlim_stack
= rlimit(RLIMIT_STACK
) & PAGE_MASK
;
783 #ifdef CONFIG_STACK_GROWSUP
784 if (stack_size
+ stack_expand
> rlim_stack
)
785 stack_base
= vma
->vm_start
+ rlim_stack
;
787 stack_base
= vma
->vm_end
+ stack_expand
;
789 if (stack_size
+ stack_expand
> rlim_stack
)
790 stack_base
= vma
->vm_end
- rlim_stack
;
792 stack_base
= vma
->vm_start
- stack_expand
;
794 current
->mm
->start_stack
= bprm
->p
;
795 ret
= expand_stack(vma
, stack_base
);
800 up_write(&mm
->mmap_sem
);
803 EXPORT_SYMBOL(setup_arg_pages
);
808 * Transfer the program arguments and environment from the holding pages
809 * onto the stack. The provided stack pointer is adjusted accordingly.
811 int transfer_args_to_stack(struct linux_binprm
*bprm
,
812 unsigned long *sp_location
)
814 unsigned long index
, stop
, sp
;
817 stop
= bprm
->p
>> PAGE_SHIFT
;
820 for (index
= MAX_ARG_PAGES
- 1; index
>= stop
; index
--) {
821 unsigned int offset
= index
== stop
? bprm
->p
& ~PAGE_MASK
: 0;
822 char *src
= kmap(bprm
->page
[index
]) + offset
;
823 sp
-= PAGE_SIZE
- offset
;
824 if (copy_to_user((void *) sp
, src
, PAGE_SIZE
- offset
) != 0)
826 kunmap(bprm
->page
[index
]);
836 EXPORT_SYMBOL(transfer_args_to_stack
);
838 #endif /* CONFIG_MMU */
840 static struct file
*do_open_execat(int fd
, struct filename
*name
, int flags
)
844 struct open_flags open_exec_flags
= {
845 .open_flag
= O_LARGEFILE
| O_RDONLY
| __FMODE_EXEC
,
846 .acc_mode
= MAY_EXEC
,
847 .intent
= LOOKUP_OPEN
,
848 .lookup_flags
= LOOKUP_FOLLOW
,
851 if ((flags
& ~(AT_SYMLINK_NOFOLLOW
| AT_EMPTY_PATH
)) != 0)
852 return ERR_PTR(-EINVAL
);
853 if (flags
& AT_SYMLINK_NOFOLLOW
)
854 open_exec_flags
.lookup_flags
&= ~LOOKUP_FOLLOW
;
855 if (flags
& AT_EMPTY_PATH
)
856 open_exec_flags
.lookup_flags
|= LOOKUP_EMPTY
;
858 file
= do_filp_open(fd
, name
, &open_exec_flags
);
863 if (!S_ISREG(file_inode(file
)->i_mode
))
866 if (path_noexec(&file
->f_path
))
869 err
= deny_write_access(file
);
873 if (name
->name
[0] != '\0')
876 trace_open_exec(name
->name
);
886 struct file
*open_exec(const char *name
)
888 struct filename
*filename
= getname_kernel(name
);
889 struct file
*f
= ERR_CAST(filename
);
891 if (!IS_ERR(filename
)) {
892 f
= do_open_execat(AT_FDCWD
, filename
, 0);
897 EXPORT_SYMBOL(open_exec
);
899 int kernel_read_file(struct file
*file
, void **buf
, loff_t
*size
,
900 loff_t max_size
, enum kernel_read_file_id id
)
906 if (!S_ISREG(file_inode(file
)->i_mode
) || max_size
< 0)
909 ret
= security_kernel_read_file(file
, id
);
913 ret
= deny_write_access(file
);
917 i_size
= i_size_read(file_inode(file
));
918 if (max_size
> 0 && i_size
> max_size
) {
927 if (id
!= READING_FIRMWARE_PREALLOC_BUFFER
)
928 *buf
= vmalloc(i_size
);
935 while (pos
< i_size
) {
936 bytes
= kernel_read(file
, *buf
+ pos
, i_size
- pos
, &pos
);
951 ret
= security_kernel_post_read_file(file
, *buf
, i_size
, id
);
957 if (id
!= READING_FIRMWARE_PREALLOC_BUFFER
) {
964 allow_write_access(file
);
967 EXPORT_SYMBOL_GPL(kernel_read_file
);
969 int kernel_read_file_from_path(const char *path
, void **buf
, loff_t
*size
,
970 loff_t max_size
, enum kernel_read_file_id id
)
978 file
= filp_open(path
, O_RDONLY
, 0);
980 return PTR_ERR(file
);
982 ret
= kernel_read_file(file
, buf
, size
, max_size
, id
);
986 EXPORT_SYMBOL_GPL(kernel_read_file_from_path
);
988 int kernel_read_file_from_fd(int fd
, void **buf
, loff_t
*size
, loff_t max_size
,
989 enum kernel_read_file_id id
)
991 struct fd f
= fdget(fd
);
997 ret
= kernel_read_file(f
.file
, buf
, size
, max_size
, id
);
1002 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd
);
1004 ssize_t
read_code(struct file
*file
, unsigned long addr
, loff_t pos
, size_t len
)
1006 ssize_t res
= vfs_read(file
, (void __user
*)addr
, len
, &pos
);
1008 flush_icache_range(addr
, addr
+ len
);
1011 EXPORT_SYMBOL(read_code
);
1013 static int exec_mmap(struct mm_struct
*mm
)
1015 struct task_struct
*tsk
;
1016 struct mm_struct
*old_mm
, *active_mm
;
1018 /* Notify parent that we're no longer interested in the old VM */
1020 old_mm
= current
->mm
;
1021 mm_release(tsk
, old_mm
);
1024 sync_mm_rss(old_mm
);
1026 * Make sure that if there is a core dump in progress
1027 * for the old mm, we get out and die instead of going
1028 * through with the exec. We must hold mmap_sem around
1029 * checking core_state and changing tsk->mm.
1031 down_read(&old_mm
->mmap_sem
);
1032 if (unlikely(old_mm
->core_state
)) {
1033 up_read(&old_mm
->mmap_sem
);
1038 active_mm
= tsk
->active_mm
;
1040 tsk
->active_mm
= mm
;
1041 activate_mm(active_mm
, mm
);
1042 tsk
->mm
->vmacache_seqnum
= 0;
1043 vmacache_flush(tsk
);
1046 up_read(&old_mm
->mmap_sem
);
1047 BUG_ON(active_mm
!= old_mm
);
1048 setmax_mm_hiwater_rss(&tsk
->signal
->maxrss
, old_mm
);
1049 mm_update_next_owner(old_mm
);
1058 * This function makes sure the current process has its own signal table,
1059 * so that flush_signal_handlers can later reset the handlers without
1060 * disturbing other processes. (Other processes might share the signal
1061 * table via the CLONE_SIGHAND option to clone().)
1063 static int de_thread(struct task_struct
*tsk
)
1065 struct signal_struct
*sig
= tsk
->signal
;
1066 struct sighand_struct
*oldsighand
= tsk
->sighand
;
1067 spinlock_t
*lock
= &oldsighand
->siglock
;
1069 if (thread_group_empty(tsk
))
1070 goto no_thread_group
;
1073 * Kill all other threads in the thread group.
1075 spin_lock_irq(lock
);
1076 if (signal_group_exit(sig
)) {
1078 * Another group action in progress, just
1079 * return so that the signal is processed.
1081 spin_unlock_irq(lock
);
1085 sig
->group_exit_task
= tsk
;
1086 sig
->notify_count
= zap_other_threads(tsk
);
1087 if (!thread_group_leader(tsk
))
1088 sig
->notify_count
--;
1090 while (sig
->notify_count
) {
1091 __set_current_state(TASK_KILLABLE
);
1092 spin_unlock_irq(lock
);
1094 if (unlikely(__fatal_signal_pending(tsk
)))
1096 spin_lock_irq(lock
);
1098 spin_unlock_irq(lock
);
1101 * At this point all other threads have exited, all we have to
1102 * do is to wait for the thread group leader to become inactive,
1103 * and to assume its PID:
1105 if (!thread_group_leader(tsk
)) {
1106 struct task_struct
*leader
= tsk
->group_leader
;
1109 cgroup_threadgroup_change_begin(tsk
);
1110 write_lock_irq(&tasklist_lock
);
1112 * Do this under tasklist_lock to ensure that
1113 * exit_notify() can't miss ->group_exit_task
1115 sig
->notify_count
= -1;
1116 if (likely(leader
->exit_state
))
1118 __set_current_state(TASK_KILLABLE
);
1119 write_unlock_irq(&tasklist_lock
);
1120 cgroup_threadgroup_change_end(tsk
);
1122 if (unlikely(__fatal_signal_pending(tsk
)))
1127 * The only record we have of the real-time age of a
1128 * process, regardless of execs it's done, is start_time.
1129 * All the past CPU time is accumulated in signal_struct
1130 * from sister threads now dead. But in this non-leader
1131 * exec, nothing survives from the original leader thread,
1132 * whose birth marks the true age of this process now.
1133 * When we take on its identity by switching to its PID, we
1134 * also take its birthdate (always earlier than our own).
1136 tsk
->start_time
= leader
->start_time
;
1137 tsk
->real_start_time
= leader
->real_start_time
;
1139 BUG_ON(!same_thread_group(leader
, tsk
));
1140 BUG_ON(has_group_leader_pid(tsk
));
1142 * An exec() starts a new thread group with the
1143 * TGID of the previous thread group. Rehash the
1144 * two threads with a switched PID, and release
1145 * the former thread group leader:
1148 /* Become a process group leader with the old leader's pid.
1149 * The old leader becomes a thread of the this thread group.
1150 * Note: The old leader also uses this pid until release_task
1151 * is called. Odd but simple and correct.
1153 tsk
->pid
= leader
->pid
;
1154 change_pid(tsk
, PIDTYPE_PID
, task_pid(leader
));
1155 transfer_pid(leader
, tsk
, PIDTYPE_PGID
);
1156 transfer_pid(leader
, tsk
, PIDTYPE_SID
);
1158 list_replace_rcu(&leader
->tasks
, &tsk
->tasks
);
1159 list_replace_init(&leader
->sibling
, &tsk
->sibling
);
1161 tsk
->group_leader
= tsk
;
1162 leader
->group_leader
= tsk
;
1164 tsk
->exit_signal
= SIGCHLD
;
1165 leader
->exit_signal
= -1;
1167 BUG_ON(leader
->exit_state
!= EXIT_ZOMBIE
);
1168 leader
->exit_state
= EXIT_DEAD
;
1171 * We are going to release_task()->ptrace_unlink() silently,
1172 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1173 * the tracer wont't block again waiting for this thread.
1175 if (unlikely(leader
->ptrace
))
1176 __wake_up_parent(leader
, leader
->parent
);
1177 write_unlock_irq(&tasklist_lock
);
1178 cgroup_threadgroup_change_end(tsk
);
1180 release_task(leader
);
1183 sig
->group_exit_task
= NULL
;
1184 sig
->notify_count
= 0;
1187 /* we have changed execution domain */
1188 tsk
->exit_signal
= SIGCHLD
;
1190 #ifdef CONFIG_POSIX_TIMERS
1192 flush_itimer_signals();
1195 if (atomic_read(&oldsighand
->count
) != 1) {
1196 struct sighand_struct
*newsighand
;
1198 * This ->sighand is shared with the CLONE_SIGHAND
1199 * but not CLONE_THREAD task, switch to the new one.
1201 newsighand
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1205 atomic_set(&newsighand
->count
, 1);
1206 memcpy(newsighand
->action
, oldsighand
->action
,
1207 sizeof(newsighand
->action
));
1209 write_lock_irq(&tasklist_lock
);
1210 spin_lock(&oldsighand
->siglock
);
1211 rcu_assign_pointer(tsk
->sighand
, newsighand
);
1212 spin_unlock(&oldsighand
->siglock
);
1213 write_unlock_irq(&tasklist_lock
);
1215 __cleanup_sighand(oldsighand
);
1218 BUG_ON(!thread_group_leader(tsk
));
1222 /* protects against exit_notify() and __exit_signal() */
1223 read_lock(&tasklist_lock
);
1224 sig
->group_exit_task
= NULL
;
1225 sig
->notify_count
= 0;
1226 read_unlock(&tasklist_lock
);
1230 char *__get_task_comm(char *buf
, size_t buf_size
, struct task_struct
*tsk
)
1233 strncpy(buf
, tsk
->comm
, buf_size
);
1237 EXPORT_SYMBOL_GPL(__get_task_comm
);
1240 * These functions flushes out all traces of the currently running executable
1241 * so that a new one can be started
1244 void __set_task_comm(struct task_struct
*tsk
, const char *buf
, bool exec
)
1247 trace_task_rename(tsk
, buf
);
1248 strlcpy(tsk
->comm
, buf
, sizeof(tsk
->comm
));
1250 perf_event_comm(tsk
, exec
);
1254 * Calling this is the point of no return. None of the failures will be
1255 * seen by userspace since either the process is already taking a fatal
1256 * signal (via de_thread() or coredump), or will have SEGV raised
1257 * (after exec_mmap()) by search_binary_handlers (see below).
1259 int flush_old_exec(struct linux_binprm
* bprm
)
1264 * Make sure we have a private signal table and that
1265 * we are unassociated from the previous thread group.
1267 retval
= de_thread(current
);
1272 * Must be called _before_ exec_mmap() as bprm->mm is
1273 * not visibile until then. This also enables the update
1276 set_mm_exe_file(bprm
->mm
, bprm
->file
);
1279 * Release all of the old mmap stuff
1281 acct_arg_size(bprm
, 0);
1282 retval
= exec_mmap(bprm
->mm
);
1287 * After clearing bprm->mm (to mark that current is using the
1288 * prepared mm now), we have nothing left of the original
1289 * process. If anything from here on returns an error, the check
1290 * in search_binary_handler() will SEGV current.
1295 current
->flags
&= ~(PF_RANDOMIZE
| PF_FORKNOEXEC
| PF_KTHREAD
|
1296 PF_NOFREEZE
| PF_NO_SETAFFINITY
);
1298 current
->personality
&= ~bprm
->per_clear
;
1301 * We have to apply CLOEXEC before we change whether the process is
1302 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1303 * trying to access the should-be-closed file descriptors of a process
1304 * undergoing exec(2).
1306 do_close_on_exec(current
->files
);
1312 EXPORT_SYMBOL(flush_old_exec
);
1314 void would_dump(struct linux_binprm
*bprm
, struct file
*file
)
1316 struct inode
*inode
= file_inode(file
);
1317 if (inode_permission(inode
, MAY_READ
) < 0) {
1318 struct user_namespace
*old
, *user_ns
;
1319 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
1321 /* Ensure mm->user_ns contains the executable */
1322 user_ns
= old
= bprm
->mm
->user_ns
;
1323 while ((user_ns
!= &init_user_ns
) &&
1324 !privileged_wrt_inode_uidgid(user_ns
, inode
))
1325 user_ns
= user_ns
->parent
;
1327 if (old
!= user_ns
) {
1328 bprm
->mm
->user_ns
= get_user_ns(user_ns
);
1333 EXPORT_SYMBOL(would_dump
);
1335 void setup_new_exec(struct linux_binprm
* bprm
)
1338 * Once here, prepare_binrpm() will not be called any more, so
1339 * the final state of setuid/setgid/fscaps can be merged into the
1342 bprm
->secureexec
|= bprm
->cap_elevated
;
1344 if (bprm
->secureexec
) {
1345 /* Make sure parent cannot signal privileged process. */
1346 current
->pdeath_signal
= 0;
1349 * For secureexec, reset the stack limit to sane default to
1350 * avoid bad behavior from the prior rlimits. This has to
1351 * happen before arch_pick_mmap_layout(), which examines
1352 * RLIMIT_STACK, but after the point of no return to avoid
1353 * needing to clean up the change on failure.
1355 if (current
->signal
->rlim
[RLIMIT_STACK
].rlim_cur
> _STK_LIM
)
1356 current
->signal
->rlim
[RLIMIT_STACK
].rlim_cur
= _STK_LIM
;
1359 arch_pick_mmap_layout(current
->mm
);
1361 current
->sas_ss_sp
= current
->sas_ss_size
= 0;
1364 * Figure out dumpability. Note that this checking only of current
1365 * is wrong, but userspace depends on it. This should be testing
1366 * bprm->secureexec instead.
1368 if (bprm
->interp_flags
& BINPRM_FLAGS_ENFORCE_NONDUMP
||
1369 !(uid_eq(current_euid(), current_uid()) &&
1370 gid_eq(current_egid(), current_gid())))
1371 set_dumpable(current
->mm
, suid_dumpable
);
1373 set_dumpable(current
->mm
, SUID_DUMP_USER
);
1375 arch_setup_new_exec();
1377 __set_task_comm(current
, kbasename(bprm
->filename
), true);
1379 /* Set the new mm task size. We have to do that late because it may
1380 * depend on TIF_32BIT which is only updated in flush_thread() on
1381 * some architectures like powerpc
1383 current
->mm
->task_size
= TASK_SIZE
;
1385 /* An exec changes our domain. We are no longer part of the thread
1387 current
->self_exec_id
++;
1388 flush_signal_handlers(current
, 0);
1390 EXPORT_SYMBOL(setup_new_exec
);
1393 * Prepare credentials and lock ->cred_guard_mutex.
1394 * install_exec_creds() commits the new creds and drops the lock.
1395 * Or, if exec fails before, free_bprm() should release ->cred and
1398 int prepare_bprm_creds(struct linux_binprm
*bprm
)
1400 if (mutex_lock_interruptible(¤t
->signal
->cred_guard_mutex
))
1401 return -ERESTARTNOINTR
;
1403 bprm
->cred
= prepare_exec_creds();
1404 if (likely(bprm
->cred
))
1407 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1411 static void free_bprm(struct linux_binprm
*bprm
)
1413 free_arg_pages(bprm
);
1415 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1416 abort_creds(bprm
->cred
);
1419 allow_write_access(bprm
->file
);
1422 /* If a binfmt changed the interp, free it. */
1423 if (bprm
->interp
!= bprm
->filename
)
1424 kfree(bprm
->interp
);
1428 int bprm_change_interp(const char *interp
, struct linux_binprm
*bprm
)
1430 /* If a binfmt changed the interp, free it first. */
1431 if (bprm
->interp
!= bprm
->filename
)
1432 kfree(bprm
->interp
);
1433 bprm
->interp
= kstrdup(interp
, GFP_KERNEL
);
1438 EXPORT_SYMBOL(bprm_change_interp
);
1441 * install the new credentials for this executable
1443 void install_exec_creds(struct linux_binprm
*bprm
)
1445 security_bprm_committing_creds(bprm
);
1447 commit_creds(bprm
->cred
);
1451 * Disable monitoring for regular users
1452 * when executing setuid binaries. Must
1453 * wait until new credentials are committed
1454 * by commit_creds() above
1456 if (get_dumpable(current
->mm
) != SUID_DUMP_USER
)
1457 perf_event_exit_task(current
);
1459 * cred_guard_mutex must be held at least to this point to prevent
1460 * ptrace_attach() from altering our determination of the task's
1461 * credentials; any time after this it may be unlocked.
1463 security_bprm_committed_creds(bprm
);
1464 mutex_unlock(¤t
->signal
->cred_guard_mutex
);
1466 EXPORT_SYMBOL(install_exec_creds
);
1469 * determine how safe it is to execute the proposed program
1470 * - the caller must hold ->cred_guard_mutex to protect against
1471 * PTRACE_ATTACH or seccomp thread-sync
1473 static void check_unsafe_exec(struct linux_binprm
*bprm
)
1475 struct task_struct
*p
= current
, *t
;
1479 bprm
->unsafe
|= LSM_UNSAFE_PTRACE
;
1482 * This isn't strictly necessary, but it makes it harder for LSMs to
1485 if (task_no_new_privs(current
))
1486 bprm
->unsafe
|= LSM_UNSAFE_NO_NEW_PRIVS
;
1490 spin_lock(&p
->fs
->lock
);
1492 while_each_thread(p
, t
) {
1498 if (p
->fs
->users
> n_fs
)
1499 bprm
->unsafe
|= LSM_UNSAFE_SHARE
;
1502 spin_unlock(&p
->fs
->lock
);
1505 static void bprm_fill_uid(struct linux_binprm
*bprm
)
1507 struct inode
*inode
;
1513 * Since this can be called multiple times (via prepare_binprm),
1514 * we must clear any previous work done when setting set[ug]id
1515 * bits from any earlier bprm->file uses (for example when run
1516 * first for a setuid script then again for its interpreter).
1518 bprm
->cred
->euid
= current_euid();
1519 bprm
->cred
->egid
= current_egid();
1521 if (path_nosuid(&bprm
->file
->f_path
))
1524 if (task_no_new_privs(current
))
1527 inode
= bprm
->file
->f_path
.dentry
->d_inode
;
1528 mode
= READ_ONCE(inode
->i_mode
);
1529 if (!(mode
& (S_ISUID
|S_ISGID
)))
1532 /* Be careful if suid/sgid is set */
1535 /* reload atomically mode/uid/gid now that lock held */
1536 mode
= inode
->i_mode
;
1539 inode_unlock(inode
);
1541 /* We ignore suid/sgid if there are no mappings for them in the ns */
1542 if (!kuid_has_mapping(bprm
->cred
->user_ns
, uid
) ||
1543 !kgid_has_mapping(bprm
->cred
->user_ns
, gid
))
1546 if (mode
& S_ISUID
) {
1547 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1548 bprm
->cred
->euid
= uid
;
1551 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
1552 bprm
->per_clear
|= PER_CLEAR_ON_SETID
;
1553 bprm
->cred
->egid
= gid
;
1558 * Fill the binprm structure from the inode.
1559 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1561 * This may be called multiple times for binary chains (scripts for example).
1563 int prepare_binprm(struct linux_binprm
*bprm
)
1568 bprm_fill_uid(bprm
);
1570 /* fill in binprm security blob */
1571 retval
= security_bprm_set_creds(bprm
);
1574 bprm
->called_set_creds
= 1;
1576 memset(bprm
->buf
, 0, BINPRM_BUF_SIZE
);
1577 return kernel_read(bprm
->file
, bprm
->buf
, BINPRM_BUF_SIZE
, &pos
);
1580 EXPORT_SYMBOL(prepare_binprm
);
1583 * Arguments are '\0' separated strings found at the location bprm->p
1584 * points to; chop off the first by relocating brpm->p to right after
1585 * the first '\0' encountered.
1587 int remove_arg_zero(struct linux_binprm
*bprm
)
1590 unsigned long offset
;
1598 offset
= bprm
->p
& ~PAGE_MASK
;
1599 page
= get_arg_page(bprm
, bprm
->p
, 0);
1604 kaddr
= kmap_atomic(page
);
1606 for (; offset
< PAGE_SIZE
&& kaddr
[offset
];
1607 offset
++, bprm
->p
++)
1610 kunmap_atomic(kaddr
);
1612 } while (offset
== PAGE_SIZE
);
1621 EXPORT_SYMBOL(remove_arg_zero
);
1623 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1625 * cycle the list of binary formats handler, until one recognizes the image
1627 int search_binary_handler(struct linux_binprm
*bprm
)
1629 bool need_retry
= IS_ENABLED(CONFIG_MODULES
);
1630 struct linux_binfmt
*fmt
;
1633 /* This allows 4 levels of binfmt rewrites before failing hard. */
1634 if (bprm
->recursion_depth
> 5)
1637 retval
= security_bprm_check(bprm
);
1643 read_lock(&binfmt_lock
);
1644 list_for_each_entry(fmt
, &formats
, lh
) {
1645 if (!try_module_get(fmt
->module
))
1647 read_unlock(&binfmt_lock
);
1648 bprm
->recursion_depth
++;
1649 retval
= fmt
->load_binary(bprm
);
1650 read_lock(&binfmt_lock
);
1652 bprm
->recursion_depth
--;
1653 if (retval
< 0 && !bprm
->mm
) {
1654 /* we got to flush_old_exec() and failed after it */
1655 read_unlock(&binfmt_lock
);
1656 force_sigsegv(SIGSEGV
, current
);
1659 if (retval
!= -ENOEXEC
|| !bprm
->file
) {
1660 read_unlock(&binfmt_lock
);
1664 read_unlock(&binfmt_lock
);
1667 if (printable(bprm
->buf
[0]) && printable(bprm
->buf
[1]) &&
1668 printable(bprm
->buf
[2]) && printable(bprm
->buf
[3]))
1670 if (request_module("binfmt-%04x", *(ushort
*)(bprm
->buf
+ 2)) < 0)
1678 EXPORT_SYMBOL(search_binary_handler
);
1680 static int exec_binprm(struct linux_binprm
*bprm
)
1682 pid_t old_pid
, old_vpid
;
1685 /* Need to fetch pid before load_binary changes it */
1686 old_pid
= current
->pid
;
1688 old_vpid
= task_pid_nr_ns(current
, task_active_pid_ns(current
->parent
));
1691 ret
= search_binary_handler(bprm
);
1694 trace_sched_process_exec(current
, old_pid
, bprm
);
1695 ptrace_event(PTRACE_EVENT_EXEC
, old_vpid
);
1696 proc_exec_connector(current
);
1703 * sys_execve() executes a new program.
1705 static int do_execveat_common(int fd
, struct filename
*filename
,
1706 struct user_arg_ptr argv
,
1707 struct user_arg_ptr envp
,
1710 char *pathbuf
= NULL
;
1711 struct linux_binprm
*bprm
;
1713 struct files_struct
*displaced
;
1716 if (IS_ERR(filename
))
1717 return PTR_ERR(filename
);
1720 * We move the actual failure in case of RLIMIT_NPROC excess from
1721 * set*uid() to execve() because too many poorly written programs
1722 * don't check setuid() return code. Here we additionally recheck
1723 * whether NPROC limit is still exceeded.
1725 if ((current
->flags
& PF_NPROC_EXCEEDED
) &&
1726 atomic_read(¤t_user()->processes
) > rlimit(RLIMIT_NPROC
)) {
1731 /* We're below the limit (still or again), so we don't want to make
1732 * further execve() calls fail. */
1733 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1735 retval
= unshare_files(&displaced
);
1740 bprm
= kzalloc(sizeof(*bprm
), GFP_KERNEL
);
1744 retval
= prepare_bprm_creds(bprm
);
1748 check_unsafe_exec(bprm
);
1749 current
->in_execve
= 1;
1751 file
= do_open_execat(fd
, filename
, flags
);
1752 retval
= PTR_ERR(file
);
1759 if (fd
== AT_FDCWD
|| filename
->name
[0] == '/') {
1760 bprm
->filename
= filename
->name
;
1762 if (filename
->name
[0] == '\0')
1763 pathbuf
= kasprintf(GFP_KERNEL
, "/dev/fd/%d", fd
);
1765 pathbuf
= kasprintf(GFP_KERNEL
, "/dev/fd/%d/%s",
1766 fd
, filename
->name
);
1772 * Record that a name derived from an O_CLOEXEC fd will be
1773 * inaccessible after exec. Relies on having exclusive access to
1774 * current->files (due to unshare_files above).
1776 if (close_on_exec(fd
, rcu_dereference_raw(current
->files
->fdt
)))
1777 bprm
->interp_flags
|= BINPRM_FLAGS_PATH_INACCESSIBLE
;
1778 bprm
->filename
= pathbuf
;
1780 bprm
->interp
= bprm
->filename
;
1782 retval
= bprm_mm_init(bprm
);
1786 bprm
->argc
= count(argv
, MAX_ARG_STRINGS
);
1787 if ((retval
= bprm
->argc
) < 0)
1790 bprm
->envc
= count(envp
, MAX_ARG_STRINGS
);
1791 if ((retval
= bprm
->envc
) < 0)
1794 retval
= prepare_binprm(bprm
);
1798 retval
= copy_strings_kernel(1, &bprm
->filename
, bprm
);
1802 bprm
->exec
= bprm
->p
;
1803 retval
= copy_strings(bprm
->envc
, envp
, bprm
);
1807 retval
= copy_strings(bprm
->argc
, argv
, bprm
);
1811 would_dump(bprm
, bprm
->file
);
1813 retval
= exec_binprm(bprm
);
1817 /* execve succeeded */
1818 current
->fs
->in_exec
= 0;
1819 current
->in_execve
= 0;
1820 membarrier_execve(current
);
1821 acct_update_integrals(current
);
1822 task_numa_free(current
);
1827 put_files_struct(displaced
);
1832 acct_arg_size(bprm
, 0);
1837 current
->fs
->in_exec
= 0;
1838 current
->in_execve
= 0;
1846 reset_files_struct(displaced
);
1852 int do_execve(struct filename
*filename
,
1853 const char __user
*const __user
*__argv
,
1854 const char __user
*const __user
*__envp
)
1856 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1857 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1858 return do_execveat_common(AT_FDCWD
, filename
, argv
, envp
, 0);
1861 int do_execveat(int fd
, struct filename
*filename
,
1862 const char __user
*const __user
*__argv
,
1863 const char __user
*const __user
*__envp
,
1866 struct user_arg_ptr argv
= { .ptr
.native
= __argv
};
1867 struct user_arg_ptr envp
= { .ptr
.native
= __envp
};
1869 return do_execveat_common(fd
, filename
, argv
, envp
, flags
);
1872 #ifdef CONFIG_COMPAT
1873 static int compat_do_execve(struct filename
*filename
,
1874 const compat_uptr_t __user
*__argv
,
1875 const compat_uptr_t __user
*__envp
)
1877 struct user_arg_ptr argv
= {
1879 .ptr
.compat
= __argv
,
1881 struct user_arg_ptr envp
= {
1883 .ptr
.compat
= __envp
,
1885 return do_execveat_common(AT_FDCWD
, filename
, argv
, envp
, 0);
1888 static int compat_do_execveat(int fd
, struct filename
*filename
,
1889 const compat_uptr_t __user
*__argv
,
1890 const compat_uptr_t __user
*__envp
,
1893 struct user_arg_ptr argv
= {
1895 .ptr
.compat
= __argv
,
1897 struct user_arg_ptr envp
= {
1899 .ptr
.compat
= __envp
,
1901 return do_execveat_common(fd
, filename
, argv
, envp
, flags
);
1905 void set_binfmt(struct linux_binfmt
*new)
1907 struct mm_struct
*mm
= current
->mm
;
1910 module_put(mm
->binfmt
->module
);
1914 __module_get(new->module
);
1916 EXPORT_SYMBOL(set_binfmt
);
1919 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1921 void set_dumpable(struct mm_struct
*mm
, int value
)
1923 unsigned long old
, new;
1925 if (WARN_ON((unsigned)value
> SUID_DUMP_ROOT
))
1929 old
= READ_ONCE(mm
->flags
);
1930 new = (old
& ~MMF_DUMPABLE_MASK
) | value
;
1931 } while (cmpxchg(&mm
->flags
, old
, new) != old
);
1934 SYSCALL_DEFINE3(execve
,
1935 const char __user
*, filename
,
1936 const char __user
*const __user
*, argv
,
1937 const char __user
*const __user
*, envp
)
1939 return do_execve(getname(filename
), argv
, envp
);
1942 SYSCALL_DEFINE5(execveat
,
1943 int, fd
, const char __user
*, filename
,
1944 const char __user
*const __user
*, argv
,
1945 const char __user
*const __user
*, envp
,
1948 int lookup_flags
= (flags
& AT_EMPTY_PATH
) ? LOOKUP_EMPTY
: 0;
1950 return do_execveat(fd
,
1951 getname_flags(filename
, lookup_flags
, NULL
),
1955 #ifdef CONFIG_COMPAT
1956 COMPAT_SYSCALL_DEFINE3(execve
, const char __user
*, filename
,
1957 const compat_uptr_t __user
*, argv
,
1958 const compat_uptr_t __user
*, envp
)
1960 return compat_do_execve(getname(filename
), argv
, envp
);
1963 COMPAT_SYSCALL_DEFINE5(execveat
, int, fd
,
1964 const char __user
*, filename
,
1965 const compat_uptr_t __user
*, argv
,
1966 const compat_uptr_t __user
*, envp
,
1969 int lookup_flags
= (flags
& AT_EMPTY_PATH
) ? LOOKUP_EMPTY
: 0;
1971 return compat_do_execveat(fd
,
1972 getname_flags(filename
, lookup_flags
, NULL
),