1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 'fork.c' contains the help-routines for the 'fork' system call
10 * (see also entry.S and others).
11 * Fork is rather simple, once you get the hang of it, but the memory
12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
15 #include <linux/anon_inodes.h>
16 #include <linux/slab.h>
17 #include <linux/sched/autogroup.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/coredump.h>
20 #include <linux/sched/user.h>
21 #include <linux/sched/numa_balancing.h>
22 #include <linux/sched/stat.h>
23 #include <linux/sched/task.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/sched/cputime.h>
26 #include <linux/seq_file.h>
27 #include <linux/rtmutex.h>
28 #include <linux/init.h>
29 #include <linux/unistd.h>
30 #include <linux/module.h>
31 #include <linux/vmalloc.h>
32 #include <linux/completion.h>
33 #include <linux/personality.h>
34 #include <linux/mempolicy.h>
35 #include <linux/sem.h>
36 #include <linux/file.h>
37 #include <linux/fdtable.h>
38 #include <linux/iocontext.h>
39 #include <linux/key.h>
40 #include <linux/binfmts.h>
41 #include <linux/mman.h>
42 #include <linux/mmu_notifier.h>
45 #include <linux/vmacache.h>
46 #include <linux/nsproxy.h>
47 #include <linux/capability.h>
48 #include <linux/cpu.h>
49 #include <linux/cgroup.h>
50 #include <linux/security.h>
51 #include <linux/hugetlb.h>
52 #include <linux/seccomp.h>
53 #include <linux/swap.h>
54 #include <linux/syscalls.h>
55 #include <linux/jiffies.h>
56 #include <linux/futex.h>
57 #include <linux/compat.h>
58 #include <linux/kthread.h>
59 #include <linux/task_io_accounting_ops.h>
60 #include <linux/rcupdate.h>
61 #include <linux/ptrace.h>
62 #include <linux/mount.h>
63 #include <linux/audit.h>
64 #include <linux/memcontrol.h>
65 #include <linux/ftrace.h>
66 #include <linux/proc_fs.h>
67 #include <linux/profile.h>
68 #include <linux/rmap.h>
69 #include <linux/ksm.h>
70 #include <linux/acct.h>
71 #include <linux/userfaultfd_k.h>
72 #include <linux/tsacct_kern.h>
73 #include <linux/cn_proc.h>
74 #include <linux/freezer.h>
75 #include <linux/delayacct.h>
76 #include <linux/taskstats_kern.h>
77 #include <linux/random.h>
78 #include <linux/tty.h>
79 #include <linux/blkdev.h>
80 #include <linux/fs_struct.h>
81 #include <linux/magic.h>
82 #include <linux/perf_event.h>
83 #include <linux/posix-timers.h>
84 #include <linux/user-return-notifier.h>
85 #include <linux/oom.h>
86 #include <linux/khugepaged.h>
87 #include <linux/signalfd.h>
88 #include <linux/uprobes.h>
89 #include <linux/aio.h>
90 #include <linux/compiler.h>
91 #include <linux/sysctl.h>
92 #include <linux/kcov.h>
93 #include <linux/livepatch.h>
94 #include <linux/thread_info.h>
95 #include <linux/stackleak.h>
96 #include <linux/kasan.h>
97 #include <linux/scs.h>
98 #include <linux/io_uring.h>
99 #include <linux/bpf.h>
101 #include <asm/pgalloc.h>
102 #include <linux/uaccess.h>
103 #include <asm/mmu_context.h>
104 #include <asm/cacheflush.h>
105 #include <asm/tlbflush.h>
107 #include <trace/events/sched.h>
109 #define CREATE_TRACE_POINTS
110 #include <trace/events/task.h>
111 #ifdef CONFIG_USER_NS
112 extern int unprivileged_userns_clone
;
114 #define unprivileged_userns_clone 0
118 * Minimum number of threads to boot the kernel
120 #define MIN_THREADS 20
123 * Maximum number of threads
125 #define MAX_THREADS FUTEX_TID_MASK
128 * Protected counters by write_lock_irq(&tasklist_lock)
130 unsigned long total_forks
; /* Handle normal Linux uptimes. */
131 int nr_threads
; /* The idle threads do not count.. */
133 static int max_threads
; /* tunable limit on nr_threads */
135 #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
137 static const char * const resident_page_types
[] = {
138 NAMED_ARRAY_INDEX(MM_FILEPAGES
),
139 NAMED_ARRAY_INDEX(MM_ANONPAGES
),
140 NAMED_ARRAY_INDEX(MM_SWAPENTS
),
141 NAMED_ARRAY_INDEX(MM_SHMEMPAGES
),
144 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
146 __cacheline_aligned
DEFINE_RWLOCK(tasklist_lock
); /* outer */
148 #ifdef CONFIG_PROVE_RCU
149 int lockdep_tasklist_lock_is_held(void)
151 return lockdep_is_held(&tasklist_lock
);
153 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held
);
154 #endif /* #ifdef CONFIG_PROVE_RCU */
156 int nr_processes(void)
161 for_each_possible_cpu(cpu
)
162 total
+= per_cpu(process_counts
, cpu
);
167 void __weak
arch_release_task_struct(struct task_struct
*tsk
)
171 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
172 static struct kmem_cache
*task_struct_cachep
;
174 static inline struct task_struct
*alloc_task_struct_node(int node
)
176 return kmem_cache_alloc_node(task_struct_cachep
, GFP_KERNEL
, node
);
179 static inline void free_task_struct(struct task_struct
*tsk
)
181 kmem_cache_free(task_struct_cachep
, tsk
);
185 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
188 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
189 * kmemcache based allocator.
191 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
193 #ifdef CONFIG_VMAP_STACK
195 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
196 * flush. Try to minimize the number of calls by caching stacks.
198 #define NR_CACHED_STACKS 2
199 static DEFINE_PER_CPU(struct vm_struct
*, cached_stacks
[NR_CACHED_STACKS
]);
201 static int free_vm_stack_cache(unsigned int cpu
)
203 struct vm_struct
**cached_vm_stacks
= per_cpu_ptr(cached_stacks
, cpu
);
206 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
207 struct vm_struct
*vm_stack
= cached_vm_stacks
[i
];
212 vfree(vm_stack
->addr
);
213 cached_vm_stacks
[i
] = NULL
;
220 static unsigned long *alloc_thread_stack_node(struct task_struct
*tsk
, int node
)
222 #ifdef CONFIG_VMAP_STACK
226 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
229 s
= this_cpu_xchg(cached_stacks
[i
], NULL
);
234 /* Mark stack accessible for KASAN. */
235 kasan_unpoison_range(s
->addr
, THREAD_SIZE
);
237 /* Clear stale pointers from reused stack. */
238 memset(s
->addr
, 0, THREAD_SIZE
);
240 tsk
->stack_vm_area
= s
;
241 tsk
->stack
= s
->addr
;
246 * Allocated stacks are cached and later reused by new threads,
247 * so memcg accounting is performed manually on assigning/releasing
248 * stacks to tasks. Drop __GFP_ACCOUNT.
250 stack
= __vmalloc_node_range(THREAD_SIZE
, THREAD_ALIGN
,
251 VMALLOC_START
, VMALLOC_END
,
252 THREADINFO_GFP
& ~__GFP_ACCOUNT
,
254 0, node
, __builtin_return_address(0));
257 * We can't call find_vm_area() in interrupt context, and
258 * free_thread_stack() can be called in interrupt context,
259 * so cache the vm_struct.
262 tsk
->stack_vm_area
= find_vm_area(stack
);
267 struct page
*page
= alloc_pages_node(node
, THREADINFO_GFP
,
271 tsk
->stack
= kasan_reset_tag(page_address(page
));
278 static inline void free_thread_stack(struct task_struct
*tsk
)
280 #ifdef CONFIG_VMAP_STACK
281 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
286 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++)
287 memcg_kmem_uncharge_page(vm
->pages
[i
], 0);
289 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
290 if (this_cpu_cmpxchg(cached_stacks
[i
],
291 NULL
, tsk
->stack_vm_area
) != NULL
)
297 vfree_atomic(tsk
->stack
);
302 __free_pages(virt_to_page(tsk
->stack
), THREAD_SIZE_ORDER
);
305 static struct kmem_cache
*thread_stack_cache
;
307 static unsigned long *alloc_thread_stack_node(struct task_struct
*tsk
,
310 unsigned long *stack
;
311 stack
= kmem_cache_alloc_node(thread_stack_cache
, THREADINFO_GFP
, node
);
312 stack
= kasan_reset_tag(stack
);
317 static void free_thread_stack(struct task_struct
*tsk
)
319 kmem_cache_free(thread_stack_cache
, tsk
->stack
);
322 void thread_stack_cache_init(void)
324 thread_stack_cache
= kmem_cache_create_usercopy("thread_stack",
325 THREAD_SIZE
, THREAD_SIZE
, 0, 0,
327 BUG_ON(thread_stack_cache
== NULL
);
332 /* SLAB cache for signal_struct structures (tsk->signal) */
333 static struct kmem_cache
*signal_cachep
;
335 /* SLAB cache for sighand_struct structures (tsk->sighand) */
336 struct kmem_cache
*sighand_cachep
;
338 /* SLAB cache for files_struct structures (tsk->files) */
339 struct kmem_cache
*files_cachep
;
341 /* SLAB cache for fs_struct structures (tsk->fs) */
342 struct kmem_cache
*fs_cachep
;
344 /* SLAB cache for vm_area_struct structures */
345 static struct kmem_cache
*vm_area_cachep
;
347 /* SLAB cache for mm_struct structures (tsk->mm) */
348 static struct kmem_cache
*mm_cachep
;
350 struct vm_area_struct
*vm_area_alloc(struct mm_struct
*mm
)
352 struct vm_area_struct
*vma
;
354 vma
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
360 struct vm_area_struct
*vm_area_dup(struct vm_area_struct
*orig
)
362 struct vm_area_struct
*new = kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
365 ASSERT_EXCLUSIVE_WRITER(orig
->vm_flags
);
366 ASSERT_EXCLUSIVE_WRITER(orig
->vm_file
);
368 * orig->shared.rb may be modified concurrently, but the clone
369 * will be reinitialized.
371 *new = data_race(*orig
);
372 INIT_LIST_HEAD(&new->anon_vma_chain
);
373 new->vm_next
= new->vm_prev
= NULL
;
378 void vm_area_free(struct vm_area_struct
*vma
)
380 kmem_cache_free(vm_area_cachep
, vma
);
383 static void account_kernel_stack(struct task_struct
*tsk
, int account
)
385 void *stack
= task_stack_page(tsk
);
386 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
391 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++)
392 mod_lruvec_page_state(vm
->pages
[i
], NR_KERNEL_STACK_KB
,
393 account
* (PAGE_SIZE
/ 1024));
395 /* All stack pages are in the same node. */
396 mod_lruvec_kmem_state(stack
, NR_KERNEL_STACK_KB
,
397 account
* (THREAD_SIZE
/ 1024));
401 static int memcg_charge_kernel_stack(struct task_struct
*tsk
)
403 #ifdef CONFIG_VMAP_STACK
404 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
407 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK
) && PAGE_SIZE
% 1024 != 0);
412 BUG_ON(vm
->nr_pages
!= THREAD_SIZE
/ PAGE_SIZE
);
414 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++) {
416 * If memcg_kmem_charge_page() fails, page's
417 * memory cgroup pointer is NULL, and
418 * memcg_kmem_uncharge_page() in free_thread_stack()
419 * will ignore this page.
421 ret
= memcg_kmem_charge_page(vm
->pages
[i
], GFP_KERNEL
,
431 static void release_task_stack(struct task_struct
*tsk
)
433 if (WARN_ON(READ_ONCE(tsk
->__state
) != TASK_DEAD
))
434 return; /* Better to leak the stack than to free prematurely */
436 account_kernel_stack(tsk
, -1);
437 free_thread_stack(tsk
);
439 #ifdef CONFIG_VMAP_STACK
440 tsk
->stack_vm_area
= NULL
;
444 #ifdef CONFIG_THREAD_INFO_IN_TASK
445 void put_task_stack(struct task_struct
*tsk
)
447 if (refcount_dec_and_test(&tsk
->stack_refcount
))
448 release_task_stack(tsk
);
452 void free_task(struct task_struct
*tsk
)
454 release_user_cpus_ptr(tsk
);
457 #ifndef CONFIG_THREAD_INFO_IN_TASK
459 * The task is finally done with both the stack and thread_info,
462 release_task_stack(tsk
);
465 * If the task had a separate stack allocation, it should be gone
468 WARN_ON_ONCE(refcount_read(&tsk
->stack_refcount
) != 0);
470 rt_mutex_debug_task_free(tsk
);
471 ftrace_graph_exit_task(tsk
);
472 arch_release_task_struct(tsk
);
473 if (tsk
->flags
& PF_KTHREAD
)
474 free_kthread_struct(tsk
);
475 free_task_struct(tsk
);
477 EXPORT_SYMBOL(free_task
);
479 static void dup_mm_exe_file(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
481 struct file
*exe_file
;
483 exe_file
= get_mm_exe_file(oldmm
);
484 RCU_INIT_POINTER(mm
->exe_file
, exe_file
);
486 * We depend on the oldmm having properly denied write access to the
489 if (exe_file
&& deny_write_access(exe_file
))
490 pr_warn_once("deny_write_access() failed in %s\n", __func__
);
494 static __latent_entropy
int dup_mmap(struct mm_struct
*mm
,
495 struct mm_struct
*oldmm
)
497 struct vm_area_struct
*mpnt
, *tmp
, *prev
, **pprev
;
498 struct rb_node
**rb_link
, *rb_parent
;
500 unsigned long charge
;
503 uprobe_start_dup_mmap();
504 if (mmap_write_lock_killable(oldmm
)) {
506 goto fail_uprobe_end
;
508 flush_cache_dup_mm(oldmm
);
509 uprobe_dup_mmap(oldmm
, mm
);
511 * Not linked in yet - no deadlock potential:
513 mmap_write_lock_nested(mm
, SINGLE_DEPTH_NESTING
);
515 /* No ordering required: file already has been exposed. */
516 dup_mm_exe_file(mm
, oldmm
);
518 mm
->total_vm
= oldmm
->total_vm
;
519 mm
->data_vm
= oldmm
->data_vm
;
520 mm
->exec_vm
= oldmm
->exec_vm
;
521 mm
->stack_vm
= oldmm
->stack_vm
;
523 rb_link
= &mm
->mm_rb
.rb_node
;
526 retval
= ksm_fork(mm
, oldmm
);
529 retval
= khugepaged_fork(mm
, oldmm
);
534 for (mpnt
= oldmm
->mmap
; mpnt
; mpnt
= mpnt
->vm_next
) {
537 if (mpnt
->vm_flags
& VM_DONTCOPY
) {
538 vm_stat_account(mm
, mpnt
->vm_flags
, -vma_pages(mpnt
));
543 * Don't duplicate many vmas if we've been oom-killed (for
546 if (fatal_signal_pending(current
)) {
550 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
551 unsigned long len
= vma_pages(mpnt
);
553 if (security_vm_enough_memory_mm(oldmm
, len
)) /* sic */
557 tmp
= vm_area_dup(mpnt
);
560 retval
= vma_dup_policy(mpnt
, tmp
);
562 goto fail_nomem_policy
;
564 retval
= dup_userfaultfd(tmp
, &uf
);
566 goto fail_nomem_anon_vma_fork
;
567 if (tmp
->vm_flags
& VM_WIPEONFORK
) {
569 * VM_WIPEONFORK gets a clean slate in the child.
570 * Don't prepare anon_vma until fault since we don't
571 * copy page for current vma.
573 tmp
->anon_vma
= NULL
;
574 } else if (anon_vma_fork(tmp
, mpnt
))
575 goto fail_nomem_anon_vma_fork
;
576 tmp
->vm_flags
&= ~(VM_LOCKED
| VM_LOCKONFAULT
);
579 struct address_space
*mapping
= file
->f_mapping
;
582 i_mmap_lock_write(mapping
);
583 if (tmp
->vm_flags
& VM_SHARED
)
584 mapping_allow_writable(mapping
);
585 flush_dcache_mmap_lock(mapping
);
586 /* insert tmp into the share list, just after mpnt */
587 vma_interval_tree_insert_after(tmp
, mpnt
,
589 flush_dcache_mmap_unlock(mapping
);
590 i_mmap_unlock_write(mapping
);
594 * Clear hugetlb-related page reserves for children. This only
595 * affects MAP_PRIVATE mappings. Faults generated by the child
596 * are not guaranteed to succeed, even if read-only
598 if (is_vm_hugetlb_page(tmp
))
599 reset_vma_resv_huge_pages(tmp
);
602 * Link in the new vma and copy the page table entries.
605 pprev
= &tmp
->vm_next
;
609 __vma_link_rb(mm
, tmp
, rb_link
, rb_parent
);
610 rb_link
= &tmp
->vm_rb
.rb_right
;
611 rb_parent
= &tmp
->vm_rb
;
614 if (!(tmp
->vm_flags
& VM_WIPEONFORK
))
615 retval
= copy_page_range(tmp
, mpnt
);
617 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
618 tmp
->vm_ops
->open(tmp
);
623 /* a new mm has just been created */
624 retval
= arch_dup_mmap(oldmm
, mm
);
626 mmap_write_unlock(mm
);
628 mmap_write_unlock(oldmm
);
629 dup_userfaultfd_complete(&uf
);
631 uprobe_end_dup_mmap();
633 fail_nomem_anon_vma_fork
:
634 mpol_put(vma_policy(tmp
));
639 vm_unacct_memory(charge
);
643 static inline int mm_alloc_pgd(struct mm_struct
*mm
)
645 mm
->pgd
= pgd_alloc(mm
);
646 if (unlikely(!mm
->pgd
))
651 static inline void mm_free_pgd(struct mm_struct
*mm
)
653 pgd_free(mm
, mm
->pgd
);
656 static int dup_mmap(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
658 mmap_write_lock(oldmm
);
659 dup_mm_exe_file(mm
, oldmm
);
660 mmap_write_unlock(oldmm
);
663 #define mm_alloc_pgd(mm) (0)
664 #define mm_free_pgd(mm)
665 #endif /* CONFIG_MMU */
667 static void check_mm(struct mm_struct
*mm
)
671 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types
) != NR_MM_COUNTERS
,
672 "Please make sure 'struct resident_page_types[]' is updated as well");
674 for (i
= 0; i
< NR_MM_COUNTERS
; i
++) {
675 long x
= atomic_long_read(&mm
->rss_stat
.count
[i
]);
678 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
679 mm
, resident_page_types
[i
], x
);
682 if (mm_pgtables_bytes(mm
))
683 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
684 mm_pgtables_bytes(mm
));
686 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
687 VM_BUG_ON_MM(mm
->pmd_huge_pte
, mm
);
691 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
692 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
695 * Called when the last reference to the mm
696 * is dropped: either by a lazy thread or by
697 * mmput. Free the page directory and the mm.
699 void __mmdrop(struct mm_struct
*mm
)
701 BUG_ON(mm
== &init_mm
);
702 WARN_ON_ONCE(mm
== current
->mm
);
703 WARN_ON_ONCE(mm
== current
->active_mm
);
706 mmu_notifier_subscriptions_destroy(mm
);
708 put_user_ns(mm
->user_ns
);
711 EXPORT_SYMBOL_GPL(__mmdrop
);
713 static void mmdrop_async_fn(struct work_struct
*work
)
715 struct mm_struct
*mm
;
717 mm
= container_of(work
, struct mm_struct
, async_put_work
);
721 static void mmdrop_async(struct mm_struct
*mm
)
723 if (unlikely(atomic_dec_and_test(&mm
->mm_count
))) {
724 INIT_WORK(&mm
->async_put_work
, mmdrop_async_fn
);
725 schedule_work(&mm
->async_put_work
);
729 static inline void free_signal_struct(struct signal_struct
*sig
)
731 taskstats_tgid_free(sig
);
732 sched_autogroup_exit(sig
);
734 * __mmdrop is not safe to call from softirq context on x86 due to
735 * pgd_dtor so postpone it to the async context
738 mmdrop_async(sig
->oom_mm
);
739 kmem_cache_free(signal_cachep
, sig
);
742 static inline void put_signal_struct(struct signal_struct
*sig
)
744 if (refcount_dec_and_test(&sig
->sigcnt
))
745 free_signal_struct(sig
);
748 void __put_task_struct(struct task_struct
*tsk
)
750 WARN_ON(!tsk
->exit_state
);
751 WARN_ON(refcount_read(&tsk
->usage
));
752 WARN_ON(tsk
== current
);
756 task_numa_free(tsk
, true);
757 security_task_free(tsk
);
758 bpf_task_storage_free(tsk
);
760 delayacct_tsk_free(tsk
);
761 put_signal_struct(tsk
->signal
);
762 sched_core_free(tsk
);
764 if (!profile_handoff_task(tsk
))
767 EXPORT_SYMBOL_GPL(__put_task_struct
);
769 void __init __weak
arch_task_cache_init(void) { }
774 static void set_max_threads(unsigned int max_threads_suggested
)
777 unsigned long nr_pages
= totalram_pages();
780 * The number of threads shall be limited such that the thread
781 * structures may only consume a small part of the available memory.
783 if (fls64(nr_pages
) + fls64(PAGE_SIZE
) > 64)
784 threads
= MAX_THREADS
;
786 threads
= div64_u64((u64
) nr_pages
* (u64
) PAGE_SIZE
,
787 (u64
) THREAD_SIZE
* 8UL);
789 if (threads
> max_threads_suggested
)
790 threads
= max_threads_suggested
;
792 max_threads
= clamp_t(u64
, threads
, MIN_THREADS
, MAX_THREADS
);
795 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
796 /* Initialized by the architecture: */
797 int arch_task_struct_size __read_mostly
;
800 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
801 static void task_struct_whitelist(unsigned long *offset
, unsigned long *size
)
803 /* Fetch thread_struct whitelist for the architecture. */
804 arch_thread_struct_whitelist(offset
, size
);
807 * Handle zero-sized whitelist or empty thread_struct, otherwise
808 * adjust offset to position of thread_struct in task_struct.
810 if (unlikely(*size
== 0))
813 *offset
+= offsetof(struct task_struct
, thread
);
815 #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */
817 void __init
fork_init(void)
820 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
821 #ifndef ARCH_MIN_TASKALIGN
822 #define ARCH_MIN_TASKALIGN 0
824 int align
= max_t(int, L1_CACHE_BYTES
, ARCH_MIN_TASKALIGN
);
825 unsigned long useroffset
, usersize
;
827 /* create a slab on which task_structs can be allocated */
828 task_struct_whitelist(&useroffset
, &usersize
);
829 task_struct_cachep
= kmem_cache_create_usercopy("task_struct",
830 arch_task_struct_size
, align
,
831 SLAB_PANIC
|SLAB_ACCOUNT
,
832 useroffset
, usersize
, NULL
);
835 /* do the arch specific task caches init */
836 arch_task_cache_init();
838 set_max_threads(MAX_THREADS
);
840 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
841 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
842 init_task
.signal
->rlim
[RLIMIT_SIGPENDING
] =
843 init_task
.signal
->rlim
[RLIMIT_NPROC
];
845 for (i
= 0; i
< MAX_PER_NAMESPACE_UCOUNTS
; i
++)
846 init_user_ns
.ucount_max
[i
] = max_threads
/2;
848 set_rlimit_ucount_max(&init_user_ns
, UCOUNT_RLIMIT_NPROC
, RLIM_INFINITY
);
849 set_rlimit_ucount_max(&init_user_ns
, UCOUNT_RLIMIT_MSGQUEUE
, RLIM_INFINITY
);
850 set_rlimit_ucount_max(&init_user_ns
, UCOUNT_RLIMIT_SIGPENDING
, RLIM_INFINITY
);
851 set_rlimit_ucount_max(&init_user_ns
, UCOUNT_RLIMIT_MEMLOCK
, RLIM_INFINITY
);
853 #ifdef CONFIG_VMAP_STACK
854 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN
, "fork:vm_stack_cache",
855 NULL
, free_vm_stack_cache
);
860 lockdep_init_task(&init_task
);
864 int __weak
arch_dup_task_struct(struct task_struct
*dst
,
865 struct task_struct
*src
)
871 void set_task_stack_end_magic(struct task_struct
*tsk
)
873 unsigned long *stackend
;
875 stackend
= end_of_stack(tsk
);
876 *stackend
= STACK_END_MAGIC
; /* for overflow detection */
879 static struct task_struct
*dup_task_struct(struct task_struct
*orig
, int node
)
881 struct task_struct
*tsk
;
882 unsigned long *stack
;
883 struct vm_struct
*stack_vm_area __maybe_unused
;
886 if (node
== NUMA_NO_NODE
)
887 node
= tsk_fork_get_node(orig
);
888 tsk
= alloc_task_struct_node(node
);
892 stack
= alloc_thread_stack_node(tsk
, node
);
896 if (memcg_charge_kernel_stack(tsk
))
899 stack_vm_area
= task_stack_vm_area(tsk
);
901 err
= arch_dup_task_struct(tsk
, orig
);
904 * arch_dup_task_struct() clobbers the stack-related fields. Make
905 * sure they're properly initialized before using any stack-related
909 #ifdef CONFIG_VMAP_STACK
910 tsk
->stack_vm_area
= stack_vm_area
;
912 #ifdef CONFIG_THREAD_INFO_IN_TASK
913 refcount_set(&tsk
->stack_refcount
, 1);
919 err
= scs_prepare(tsk
, node
);
923 #ifdef CONFIG_SECCOMP
925 * We must handle setting up seccomp filters once we're under
926 * the sighand lock in case orig has changed between now and
927 * then. Until then, filter must be NULL to avoid messing up
928 * the usage counts on the error path calling free_task.
930 tsk
->seccomp
.filter
= NULL
;
933 setup_thread_stack(tsk
, orig
);
934 clear_user_return_notifier(tsk
);
935 clear_tsk_need_resched(tsk
);
936 set_task_stack_end_magic(tsk
);
937 clear_syscall_work_syscall_user_dispatch(tsk
);
939 #ifdef CONFIG_STACKPROTECTOR
940 tsk
->stack_canary
= get_random_canary();
942 if (orig
->cpus_ptr
== &orig
->cpus_mask
)
943 tsk
->cpus_ptr
= &tsk
->cpus_mask
;
944 dup_user_cpus_ptr(tsk
, orig
, node
);
947 * One for the user space visible state that goes away when reaped.
948 * One for the scheduler.
950 refcount_set(&tsk
->rcu_users
, 2);
951 /* One for the rcu users */
952 refcount_set(&tsk
->usage
, 1);
953 #ifdef CONFIG_BLK_DEV_IO_TRACE
956 tsk
->splice_pipe
= NULL
;
957 tsk
->task_frag
.page
= NULL
;
958 tsk
->wake_q
.next
= NULL
;
959 tsk
->pf_io_worker
= NULL
;
961 account_kernel_stack(tsk
, 1);
964 kmap_local_fork(tsk
);
966 #ifdef CONFIG_FAULT_INJECTION
970 #ifdef CONFIG_BLK_CGROUP
971 tsk
->throttle_queue
= NULL
;
972 tsk
->use_memdelay
= 0;
976 tsk
->active_memcg
= NULL
;
981 free_thread_stack(tsk
);
983 free_task_struct(tsk
);
987 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(mmlist_lock
);
989 static unsigned long default_dump_filter
= MMF_DUMP_FILTER_DEFAULT
;
991 static int __init
coredump_filter_setup(char *s
)
993 default_dump_filter
=
994 (simple_strtoul(s
, NULL
, 0) << MMF_DUMP_FILTER_SHIFT
) &
995 MMF_DUMP_FILTER_MASK
;
999 __setup("coredump_filter=", coredump_filter_setup
);
1001 #include <linux/init_task.h>
1003 static void mm_init_aio(struct mm_struct
*mm
)
1006 spin_lock_init(&mm
->ioctx_lock
);
1007 mm
->ioctx_table
= NULL
;
1011 static __always_inline
void mm_clear_owner(struct mm_struct
*mm
,
1012 struct task_struct
*p
)
1016 WRITE_ONCE(mm
->owner
, NULL
);
1020 static void mm_init_owner(struct mm_struct
*mm
, struct task_struct
*p
)
1027 static void mm_init_pasid(struct mm_struct
*mm
)
1029 #ifdef CONFIG_IOMMU_SUPPORT
1030 mm
->pasid
= INIT_PASID
;
1034 static void mm_init_uprobes_state(struct mm_struct
*mm
)
1036 #ifdef CONFIG_UPROBES
1037 mm
->uprobes_state
.xol_area
= NULL
;
1041 static struct mm_struct
*mm_init(struct mm_struct
*mm
, struct task_struct
*p
,
1042 struct user_namespace
*user_ns
)
1045 mm
->mm_rb
= RB_ROOT
;
1046 mm
->vmacache_seqnum
= 0;
1047 atomic_set(&mm
->mm_users
, 1);
1048 atomic_set(&mm
->mm_count
, 1);
1049 seqcount_init(&mm
->write_protect_seq
);
1051 INIT_LIST_HEAD(&mm
->mmlist
);
1052 mm
->core_state
= NULL
;
1053 mm_pgtables_bytes_init(mm
);
1056 atomic64_set(&mm
->pinned_vm
, 0);
1057 memset(&mm
->rss_stat
, 0, sizeof(mm
->rss_stat
));
1058 spin_lock_init(&mm
->page_table_lock
);
1059 spin_lock_init(&mm
->arg_lock
);
1060 mm_init_cpumask(mm
);
1062 mm_init_owner(mm
, p
);
1064 RCU_INIT_POINTER(mm
->exe_file
, NULL
);
1065 mmu_notifier_subscriptions_init(mm
);
1066 init_tlb_flush_pending(mm
);
1067 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1068 mm
->pmd_huge_pte
= NULL
;
1070 mm_init_uprobes_state(mm
);
1071 hugetlb_count_init(mm
);
1074 mm
->flags
= current
->mm
->flags
& MMF_INIT_MASK
;
1075 mm
->def_flags
= current
->mm
->def_flags
& VM_INIT_DEF_MASK
;
1077 mm
->flags
= default_dump_filter
;
1081 if (mm_alloc_pgd(mm
))
1084 if (init_new_context(p
, mm
))
1085 goto fail_nocontext
;
1087 mm
->user_ns
= get_user_ns(user_ns
);
1098 * Allocate and initialize an mm_struct.
1100 struct mm_struct
*mm_alloc(void)
1102 struct mm_struct
*mm
;
1108 memset(mm
, 0, sizeof(*mm
));
1109 return mm_init(mm
, current
, current_user_ns());
1112 static inline void __mmput(struct mm_struct
*mm
)
1114 VM_BUG_ON(atomic_read(&mm
->mm_users
));
1116 uprobe_clear_state(mm
);
1119 khugepaged_exit(mm
); /* must run before exit_mmap */
1121 mm_put_huge_zero_page(mm
);
1122 set_mm_exe_file(mm
, NULL
);
1123 if (!list_empty(&mm
->mmlist
)) {
1124 spin_lock(&mmlist_lock
);
1125 list_del(&mm
->mmlist
);
1126 spin_unlock(&mmlist_lock
);
1129 module_put(mm
->binfmt
->module
);
1134 * Decrement the use count and release all resources for an mm.
1136 void mmput(struct mm_struct
*mm
)
1140 if (atomic_dec_and_test(&mm
->mm_users
))
1143 EXPORT_SYMBOL_GPL(mmput
);
1146 static void mmput_async_fn(struct work_struct
*work
)
1148 struct mm_struct
*mm
= container_of(work
, struct mm_struct
,
1154 void mmput_async(struct mm_struct
*mm
)
1156 if (atomic_dec_and_test(&mm
->mm_users
)) {
1157 INIT_WORK(&mm
->async_put_work
, mmput_async_fn
);
1158 schedule_work(&mm
->async_put_work
);
1161 EXPORT_SYMBOL(mmput_async
);
1165 * set_mm_exe_file - change a reference to the mm's executable file
1167 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1169 * Main users are mmput() and sys_execve(). Callers prevent concurrent
1170 * invocations: in mmput() nobody alive left, in execve task is single
1173 * Can only fail if new_exe_file != NULL.
1175 int set_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
1177 struct file
*old_exe_file
;
1180 * It is safe to dereference the exe_file without RCU as
1181 * this function is only called if nobody else can access
1182 * this mm -- see comment above for justification.
1184 old_exe_file
= rcu_dereference_raw(mm
->exe_file
);
1188 * We expect the caller (i.e., sys_execve) to already denied
1189 * write access, so this is unlikely to fail.
1191 if (unlikely(deny_write_access(new_exe_file
)))
1193 get_file(new_exe_file
);
1195 rcu_assign_pointer(mm
->exe_file
, new_exe_file
);
1197 allow_write_access(old_exe_file
);
1204 * replace_mm_exe_file - replace a reference to the mm's executable file
1206 * This changes mm's executable file (shown as symlink /proc/[pid]/exe),
1207 * dealing with concurrent invocation and without grabbing the mmap lock in
1210 * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
1212 int replace_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
1214 struct vm_area_struct
*vma
;
1215 struct file
*old_exe_file
;
1218 /* Forbid mm->exe_file change if old file still mapped. */
1219 old_exe_file
= get_mm_exe_file(mm
);
1222 for (vma
= mm
->mmap
; vma
&& !ret
; vma
= vma
->vm_next
) {
1225 if (path_equal(&vma
->vm_file
->f_path
,
1226 &old_exe_file
->f_path
))
1229 mmap_read_unlock(mm
);
1235 /* set the new file, lockless */
1236 ret
= deny_write_access(new_exe_file
);
1239 get_file(new_exe_file
);
1241 old_exe_file
= xchg(&mm
->exe_file
, new_exe_file
);
1244 * Don't race with dup_mmap() getting the file and disallowing
1245 * write access while someone might open the file writable.
1248 allow_write_access(old_exe_file
);
1250 mmap_read_unlock(mm
);
1256 * get_mm_exe_file - acquire a reference to the mm's executable file
1258 * Returns %NULL if mm has no associated executable file.
1259 * User must release file via fput().
1261 struct file
*get_mm_exe_file(struct mm_struct
*mm
)
1263 struct file
*exe_file
;
1266 exe_file
= rcu_dereference(mm
->exe_file
);
1267 if (exe_file
&& !get_file_rcu(exe_file
))
1274 * get_task_exe_file - acquire a reference to the task's executable file
1276 * Returns %NULL if task's mm (if any) has no associated executable file or
1277 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1278 * User must release file via fput().
1280 struct file
*get_task_exe_file(struct task_struct
*task
)
1282 struct file
*exe_file
= NULL
;
1283 struct mm_struct
*mm
;
1288 if (!(task
->flags
& PF_KTHREAD
))
1289 exe_file
= get_mm_exe_file(mm
);
1296 * get_task_mm - acquire a reference to the task's mm
1298 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1299 * this kernel workthread has transiently adopted a user mm with use_mm,
1300 * to do its AIO) is not set and if so returns a reference to it, after
1301 * bumping up the use count. User must release the mm via mmput()
1302 * after use. Typically used by /proc and ptrace.
1304 struct mm_struct
*get_task_mm(struct task_struct
*task
)
1306 struct mm_struct
*mm
;
1311 if (task
->flags
& PF_KTHREAD
)
1319 EXPORT_SYMBOL_GPL(get_task_mm
);
1321 struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
)
1323 struct mm_struct
*mm
;
1326 err
= down_read_killable(&task
->signal
->exec_update_lock
);
1328 return ERR_PTR(err
);
1330 mm
= get_task_mm(task
);
1331 if (mm
&& mm
!= current
->mm
&&
1332 !ptrace_may_access(task
, mode
)) {
1334 mm
= ERR_PTR(-EACCES
);
1336 up_read(&task
->signal
->exec_update_lock
);
1341 static void complete_vfork_done(struct task_struct
*tsk
)
1343 struct completion
*vfork
;
1346 vfork
= tsk
->vfork_done
;
1347 if (likely(vfork
)) {
1348 tsk
->vfork_done
= NULL
;
1354 static int wait_for_vfork_done(struct task_struct
*child
,
1355 struct completion
*vfork
)
1359 freezer_do_not_count();
1360 cgroup_enter_frozen();
1361 killed
= wait_for_completion_killable(vfork
);
1362 cgroup_leave_frozen(false);
1367 child
->vfork_done
= NULL
;
1371 put_task_struct(child
);
1375 /* Please note the differences between mmput and mm_release.
1376 * mmput is called whenever we stop holding onto a mm_struct,
1377 * error success whatever.
1379 * mm_release is called after a mm_struct has been removed
1380 * from the current process.
1382 * This difference is important for error handling, when we
1383 * only half set up a mm_struct for a new process and need to restore
1384 * the old one. Because we mmput the new mm_struct before
1385 * restoring the old one. . .
1386 * Eric Biederman 10 January 1998
1388 static void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1390 uprobe_free_utask(tsk
);
1392 /* Get rid of any cached register state */
1393 deactivate_mm(tsk
, mm
);
1396 * Signal userspace if we're not exiting with a core dump
1397 * because we want to leave the value intact for debugging
1400 if (tsk
->clear_child_tid
) {
1401 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_COREDUMP
) &&
1402 atomic_read(&mm
->mm_users
) > 1) {
1404 * We don't check the error code - if userspace has
1405 * not set up a proper pointer then tough luck.
1407 put_user(0, tsk
->clear_child_tid
);
1408 do_futex(tsk
->clear_child_tid
, FUTEX_WAKE
,
1409 1, NULL
, NULL
, 0, 0);
1411 tsk
->clear_child_tid
= NULL
;
1415 * All done, finally we can wake up parent and return this mm to him.
1416 * Also kthread_stop() uses this completion for synchronization.
1418 if (tsk
->vfork_done
)
1419 complete_vfork_done(tsk
);
1422 void exit_mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1424 futex_exit_release(tsk
);
1425 mm_release(tsk
, mm
);
1428 void exec_mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1430 futex_exec_release(tsk
);
1431 mm_release(tsk
, mm
);
1435 * dup_mm() - duplicates an existing mm structure
1436 * @tsk: the task_struct with which the new mm will be associated.
1437 * @oldmm: the mm to duplicate.
1439 * Allocates a new mm structure and duplicates the provided @oldmm structure
1442 * Return: the duplicated mm or NULL on failure.
1444 static struct mm_struct
*dup_mm(struct task_struct
*tsk
,
1445 struct mm_struct
*oldmm
)
1447 struct mm_struct
*mm
;
1454 memcpy(mm
, oldmm
, sizeof(*mm
));
1456 if (!mm_init(mm
, tsk
, mm
->user_ns
))
1459 err
= dup_mmap(mm
, oldmm
);
1463 mm
->hiwater_rss
= get_mm_rss(mm
);
1464 mm
->hiwater_vm
= mm
->total_vm
;
1466 if (mm
->binfmt
&& !try_module_get(mm
->binfmt
->module
))
1472 /* don't put binfmt in mmput, we haven't got module yet */
1474 mm_init_owner(mm
, NULL
);
1481 static int copy_mm(unsigned long clone_flags
, struct task_struct
*tsk
)
1483 struct mm_struct
*mm
, *oldmm
;
1485 tsk
->min_flt
= tsk
->maj_flt
= 0;
1486 tsk
->nvcsw
= tsk
->nivcsw
= 0;
1487 #ifdef CONFIG_DETECT_HUNG_TASK
1488 tsk
->last_switch_count
= tsk
->nvcsw
+ tsk
->nivcsw
;
1489 tsk
->last_switch_time
= 0;
1493 tsk
->active_mm
= NULL
;
1496 * Are we cloning a kernel thread?
1498 * We need to steal a active VM for that..
1500 oldmm
= current
->mm
;
1504 /* initialize the new vmacache entries */
1505 vmacache_flush(tsk
);
1507 if (clone_flags
& CLONE_VM
) {
1511 mm
= dup_mm(tsk
, current
->mm
);
1517 tsk
->active_mm
= mm
;
1521 static int copy_fs(unsigned long clone_flags
, struct task_struct
*tsk
)
1523 struct fs_struct
*fs
= current
->fs
;
1524 if (clone_flags
& CLONE_FS
) {
1525 /* tsk->fs is already what we want */
1526 spin_lock(&fs
->lock
);
1528 spin_unlock(&fs
->lock
);
1532 spin_unlock(&fs
->lock
);
1535 tsk
->fs
= copy_fs_struct(fs
);
1541 static int copy_files(unsigned long clone_flags
, struct task_struct
*tsk
)
1543 struct files_struct
*oldf
, *newf
;
1547 * A background process may not have any files ...
1549 oldf
= current
->files
;
1553 if (clone_flags
& CLONE_FILES
) {
1554 atomic_inc(&oldf
->count
);
1558 newf
= dup_fd(oldf
, NR_OPEN_MAX
, &error
);
1568 static int copy_io(unsigned long clone_flags
, struct task_struct
*tsk
)
1571 struct io_context
*ioc
= current
->io_context
;
1572 struct io_context
*new_ioc
;
1577 * Share io context with parent, if CLONE_IO is set
1579 if (clone_flags
& CLONE_IO
) {
1581 tsk
->io_context
= ioc
;
1582 } else if (ioprio_valid(ioc
->ioprio
)) {
1583 new_ioc
= get_task_io_context(tsk
, GFP_KERNEL
, NUMA_NO_NODE
);
1584 if (unlikely(!new_ioc
))
1587 new_ioc
->ioprio
= ioc
->ioprio
;
1588 put_io_context(new_ioc
);
1594 static int copy_sighand(unsigned long clone_flags
, struct task_struct
*tsk
)
1596 struct sighand_struct
*sig
;
1598 if (clone_flags
& CLONE_SIGHAND
) {
1599 refcount_inc(¤t
->sighand
->count
);
1602 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1603 RCU_INIT_POINTER(tsk
->sighand
, sig
);
1607 refcount_set(&sig
->count
, 1);
1608 spin_lock_irq(¤t
->sighand
->siglock
);
1609 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
1610 spin_unlock_irq(¤t
->sighand
->siglock
);
1612 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1613 if (clone_flags
& CLONE_CLEAR_SIGHAND
)
1614 flush_signal_handlers(tsk
, 0);
1619 void __cleanup_sighand(struct sighand_struct
*sighand
)
1621 if (refcount_dec_and_test(&sighand
->count
)) {
1622 signalfd_cleanup(sighand
);
1624 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1625 * without an RCU grace period, see __lock_task_sighand().
1627 kmem_cache_free(sighand_cachep
, sighand
);
1632 * Initialize POSIX timer handling for a thread group.
1634 static void posix_cpu_timers_init_group(struct signal_struct
*sig
)
1636 struct posix_cputimers
*pct
= &sig
->posix_cputimers
;
1637 unsigned long cpu_limit
;
1639 cpu_limit
= READ_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1640 posix_cputimers_group_init(pct
, cpu_limit
);
1643 static int copy_signal(unsigned long clone_flags
, struct task_struct
*tsk
)
1645 struct signal_struct
*sig
;
1647 if (clone_flags
& CLONE_THREAD
)
1650 sig
= kmem_cache_zalloc(signal_cachep
, GFP_KERNEL
);
1655 sig
->nr_threads
= 1;
1656 atomic_set(&sig
->live
, 1);
1657 refcount_set(&sig
->sigcnt
, 1);
1659 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1660 sig
->thread_head
= (struct list_head
)LIST_HEAD_INIT(tsk
->thread_node
);
1661 tsk
->thread_node
= (struct list_head
)LIST_HEAD_INIT(sig
->thread_head
);
1663 init_waitqueue_head(&sig
->wait_chldexit
);
1664 sig
->curr_target
= tsk
;
1665 init_sigpending(&sig
->shared_pending
);
1666 INIT_HLIST_HEAD(&sig
->multiprocess
);
1667 seqlock_init(&sig
->stats_lock
);
1668 prev_cputime_init(&sig
->prev_cputime
);
1670 #ifdef CONFIG_POSIX_TIMERS
1671 INIT_LIST_HEAD(&sig
->posix_timers
);
1672 hrtimer_init(&sig
->real_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1673 sig
->real_timer
.function
= it_real_fn
;
1676 task_lock(current
->group_leader
);
1677 memcpy(sig
->rlim
, current
->signal
->rlim
, sizeof sig
->rlim
);
1678 task_unlock(current
->group_leader
);
1680 posix_cpu_timers_init_group(sig
);
1682 tty_audit_fork(sig
);
1683 sched_autogroup_fork(sig
);
1685 sig
->oom_score_adj
= current
->signal
->oom_score_adj
;
1686 sig
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1688 mutex_init(&sig
->cred_guard_mutex
);
1689 init_rwsem(&sig
->exec_update_lock
);
1694 static void copy_seccomp(struct task_struct
*p
)
1696 #ifdef CONFIG_SECCOMP
1698 * Must be called with sighand->lock held, which is common to
1699 * all threads in the group. Holding cred_guard_mutex is not
1700 * needed because this new task is not yet running and cannot
1703 assert_spin_locked(¤t
->sighand
->siglock
);
1705 /* Ref-count the new filter user, and assign it. */
1706 get_seccomp_filter(current
);
1707 p
->seccomp
= current
->seccomp
;
1710 * Explicitly enable no_new_privs here in case it got set
1711 * between the task_struct being duplicated and holding the
1712 * sighand lock. The seccomp state and nnp must be in sync.
1714 if (task_no_new_privs(current
))
1715 task_set_no_new_privs(p
);
1718 * If the parent gained a seccomp mode after copying thread
1719 * flags and between before we held the sighand lock, we have
1720 * to manually enable the seccomp thread flag here.
1722 if (p
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
)
1723 set_task_syscall_work(p
, SECCOMP
);
1727 SYSCALL_DEFINE1(set_tid_address
, int __user
*, tidptr
)
1729 current
->clear_child_tid
= tidptr
;
1731 return task_pid_vnr(current
);
1734 static void rt_mutex_init_task(struct task_struct
*p
)
1736 raw_spin_lock_init(&p
->pi_lock
);
1737 #ifdef CONFIG_RT_MUTEXES
1738 p
->pi_waiters
= RB_ROOT_CACHED
;
1739 p
->pi_top_task
= NULL
;
1740 p
->pi_blocked_on
= NULL
;
1744 static inline void init_task_pid_links(struct task_struct
*task
)
1748 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
)
1749 INIT_HLIST_NODE(&task
->pid_links
[type
]);
1753 init_task_pid(struct task_struct
*task
, enum pid_type type
, struct pid
*pid
)
1755 if (type
== PIDTYPE_PID
)
1756 task
->thread_pid
= pid
;
1758 task
->signal
->pids
[type
] = pid
;
1761 static inline void rcu_copy_process(struct task_struct
*p
)
1763 #ifdef CONFIG_PREEMPT_RCU
1764 p
->rcu_read_lock_nesting
= 0;
1765 p
->rcu_read_unlock_special
.s
= 0;
1766 p
->rcu_blocked_node
= NULL
;
1767 INIT_LIST_HEAD(&p
->rcu_node_entry
);
1768 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1769 #ifdef CONFIG_TASKS_RCU
1770 p
->rcu_tasks_holdout
= false;
1771 INIT_LIST_HEAD(&p
->rcu_tasks_holdout_list
);
1772 p
->rcu_tasks_idle_cpu
= -1;
1773 #endif /* #ifdef CONFIG_TASKS_RCU */
1774 #ifdef CONFIG_TASKS_TRACE_RCU
1775 p
->trc_reader_nesting
= 0;
1776 p
->trc_reader_special
.s
= 0;
1777 INIT_LIST_HEAD(&p
->trc_holdout_list
);
1778 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
1781 struct pid
*pidfd_pid(const struct file
*file
)
1783 if (file
->f_op
== &pidfd_fops
)
1784 return file
->private_data
;
1786 return ERR_PTR(-EBADF
);
1789 static int pidfd_release(struct inode
*inode
, struct file
*file
)
1791 struct pid
*pid
= file
->private_data
;
1793 file
->private_data
= NULL
;
1798 #ifdef CONFIG_PROC_FS
1800 * pidfd_show_fdinfo - print information about a pidfd
1801 * @m: proc fdinfo file
1802 * @f: file referencing a pidfd
1805 * This function will print the pid that a given pidfd refers to in the
1806 * pid namespace of the procfs instance.
1807 * If the pid namespace of the process is not a descendant of the pid
1808 * namespace of the procfs instance 0 will be shown as its pid. This is
1809 * similar to calling getppid() on a process whose parent is outside of
1810 * its pid namespace.
1813 * If pid namespaces are supported then this function will also print
1814 * the pid of a given pidfd refers to for all descendant pid namespaces
1815 * starting from the current pid namespace of the instance, i.e. the
1816 * Pid field and the first entry in the NSpid field will be identical.
1817 * If the pid namespace of the process is not a descendant of the pid
1818 * namespace of the procfs instance 0 will be shown as its first NSpid
1819 * entry and no others will be shown.
1820 * Note that this differs from the Pid and NSpid fields in
1821 * /proc/<pid>/status where Pid and NSpid are always shown relative to
1822 * the pid namespace of the procfs instance. The difference becomes
1823 * obvious when sending around a pidfd between pid namespaces from a
1824 * different branch of the tree, i.e. where no ancestral relation is
1825 * present between the pid namespaces:
1826 * - create two new pid namespaces ns1 and ns2 in the initial pid
1827 * namespace (also take care to create new mount namespaces in the
1828 * new pid namespace and mount procfs)
1829 * - create a process with a pidfd in ns1
1830 * - send pidfd from ns1 to ns2
1831 * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
1832 * have exactly one entry, which is 0
1834 static void pidfd_show_fdinfo(struct seq_file
*m
, struct file
*f
)
1836 struct pid
*pid
= f
->private_data
;
1837 struct pid_namespace
*ns
;
1840 if (likely(pid_has_task(pid
, PIDTYPE_PID
))) {
1841 ns
= proc_pid_ns(file_inode(m
->file
)->i_sb
);
1842 nr
= pid_nr_ns(pid
, ns
);
1845 seq_put_decimal_ll(m
, "Pid:\t", nr
);
1847 #ifdef CONFIG_PID_NS
1848 seq_put_decimal_ll(m
, "\nNSpid:\t", nr
);
1852 /* If nr is non-zero it means that 'pid' is valid and that
1853 * ns, i.e. the pid namespace associated with the procfs
1854 * instance, is in the pid namespace hierarchy of pid.
1855 * Start at one below the already printed level.
1857 for (i
= ns
->level
+ 1; i
<= pid
->level
; i
++)
1858 seq_put_decimal_ll(m
, "\t", pid
->numbers
[i
].nr
);
1866 * Poll support for process exit notification.
1868 static __poll_t
pidfd_poll(struct file
*file
, struct poll_table_struct
*pts
)
1870 struct pid
*pid
= file
->private_data
;
1871 __poll_t poll_flags
= 0;
1873 poll_wait(file
, &pid
->wait_pidfd
, pts
);
1876 * Inform pollers only when the whole thread group exits.
1877 * If the thread group leader exits before all other threads in the
1878 * group, then poll(2) should block, similar to the wait(2) family.
1880 if (thread_group_exited(pid
))
1881 poll_flags
= EPOLLIN
| EPOLLRDNORM
;
1886 const struct file_operations pidfd_fops
= {
1887 .release
= pidfd_release
,
1889 #ifdef CONFIG_PROC_FS
1890 .show_fdinfo
= pidfd_show_fdinfo
,
1894 static void __delayed_free_task(struct rcu_head
*rhp
)
1896 struct task_struct
*tsk
= container_of(rhp
, struct task_struct
, rcu
);
1901 static __always_inline
void delayed_free_task(struct task_struct
*tsk
)
1903 if (IS_ENABLED(CONFIG_MEMCG
))
1904 call_rcu(&tsk
->rcu
, __delayed_free_task
);
1909 static void copy_oom_score_adj(u64 clone_flags
, struct task_struct
*tsk
)
1911 /* Skip if kernel thread */
1915 /* Skip if spawning a thread or using vfork */
1916 if ((clone_flags
& (CLONE_VM
| CLONE_THREAD
| CLONE_VFORK
)) != CLONE_VM
)
1919 /* We need to synchronize with __set_oom_adj */
1920 mutex_lock(&oom_adj_mutex
);
1921 set_bit(MMF_MULTIPROCESS
, &tsk
->mm
->flags
);
1922 /* Update the values in case they were changed after copy_signal */
1923 tsk
->signal
->oom_score_adj
= current
->signal
->oom_score_adj
;
1924 tsk
->signal
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1925 mutex_unlock(&oom_adj_mutex
);
1929 * This creates a new process as a copy of the old one,
1930 * but does not actually start it yet.
1932 * It copies the registers, and all the appropriate
1933 * parts of the process environment (as per the clone
1934 * flags). The actual kick-off is left to the caller.
1936 static __latent_entropy
struct task_struct
*copy_process(
1940 struct kernel_clone_args
*args
)
1942 int pidfd
= -1, retval
;
1943 struct task_struct
*p
;
1944 struct multiprocess_signals delayed
;
1945 struct file
*pidfile
= NULL
;
1946 u64 clone_flags
= args
->flags
;
1947 struct nsproxy
*nsp
= current
->nsproxy
;
1950 * Don't allow sharing the root directory with processes in a different
1953 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
))
1954 return ERR_PTR(-EINVAL
);
1956 if ((clone_flags
& (CLONE_NEWUSER
|CLONE_FS
)) == (CLONE_NEWUSER
|CLONE_FS
))
1957 return ERR_PTR(-EINVAL
);
1959 if ((clone_flags
& CLONE_NEWUSER
) && !unprivileged_userns_clone
)
1960 if (!capable(CAP_SYS_ADMIN
))
1961 return ERR_PTR(-EPERM
);
1964 * Thread groups must share signals as well, and detached threads
1965 * can only be started up within the thread group.
1967 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
))
1968 return ERR_PTR(-EINVAL
);
1971 * Shared signal handlers imply shared VM. By way of the above,
1972 * thread groups also imply shared VM. Blocking this case allows
1973 * for various simplifications in other code.
1975 if ((clone_flags
& CLONE_SIGHAND
) && !(clone_flags
& CLONE_VM
))
1976 return ERR_PTR(-EINVAL
);
1979 * Siblings of global init remain as zombies on exit since they are
1980 * not reaped by their parent (swapper). To solve this and to avoid
1981 * multi-rooted process trees, prevent global and container-inits
1982 * from creating siblings.
1984 if ((clone_flags
& CLONE_PARENT
) &&
1985 current
->signal
->flags
& SIGNAL_UNKILLABLE
)
1986 return ERR_PTR(-EINVAL
);
1989 * If the new process will be in a different pid or user namespace
1990 * do not allow it to share a thread group with the forking task.
1992 if (clone_flags
& CLONE_THREAD
) {
1993 if ((clone_flags
& (CLONE_NEWUSER
| CLONE_NEWPID
)) ||
1994 (task_active_pid_ns(current
) != nsp
->pid_ns_for_children
))
1995 return ERR_PTR(-EINVAL
);
1999 * If the new process will be in a different time namespace
2000 * do not allow it to share VM or a thread group with the forking task.
2002 if (clone_flags
& (CLONE_THREAD
| CLONE_VM
)) {
2003 if (nsp
->time_ns
!= nsp
->time_ns_for_children
)
2004 return ERR_PTR(-EINVAL
);
2007 if (clone_flags
& CLONE_PIDFD
) {
2009 * - CLONE_DETACHED is blocked so that we can potentially
2010 * reuse it later for CLONE_PIDFD.
2011 * - CLONE_THREAD is blocked until someone really needs it.
2013 if (clone_flags
& (CLONE_DETACHED
| CLONE_THREAD
))
2014 return ERR_PTR(-EINVAL
);
2018 * Force any signals received before this point to be delivered
2019 * before the fork happens. Collect up signals sent to multiple
2020 * processes that happen during the fork and delay them so that
2021 * they appear to happen after the fork.
2023 sigemptyset(&delayed
.signal
);
2024 INIT_HLIST_NODE(&delayed
.node
);
2026 spin_lock_irq(¤t
->sighand
->siglock
);
2027 if (!(clone_flags
& CLONE_THREAD
))
2028 hlist_add_head(&delayed
.node
, ¤t
->signal
->multiprocess
);
2029 recalc_sigpending();
2030 spin_unlock_irq(¤t
->sighand
->siglock
);
2031 retval
= -ERESTARTNOINTR
;
2032 if (task_sigpending(current
))
2036 p
= dup_task_struct(current
, node
);
2039 if (args
->io_thread
) {
2041 * Mark us an IO worker, and block any signal that isn't
2044 p
->flags
|= PF_IO_WORKER
;
2045 siginitsetinv(&p
->blocked
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2049 * This _must_ happen before we call free_task(), i.e. before we jump
2050 * to any of the bad_fork_* labels. This is to avoid freeing
2051 * p->set_child_tid which is (ab)used as a kthread's data pointer for
2052 * kernel threads (PF_KTHREAD).
2054 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? args
->child_tid
: NULL
;
2056 * Clear TID on mm_release()?
2058 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? args
->child_tid
: NULL
;
2060 ftrace_graph_init_task(p
);
2062 rt_mutex_init_task(p
);
2064 lockdep_assert_irqs_enabled();
2065 #ifdef CONFIG_PROVE_LOCKING
2066 DEBUG_LOCKS_WARN_ON(!p
->softirqs_enabled
);
2068 retval
= copy_creds(p
, clone_flags
);
2073 if (is_ucounts_overlimit(task_ucounts(p
), UCOUNT_RLIMIT_NPROC
, rlimit(RLIMIT_NPROC
))) {
2074 if (p
->real_cred
->user
!= INIT_USER
&&
2075 !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
))
2076 goto bad_fork_cleanup_count
;
2078 current
->flags
&= ~PF_NPROC_EXCEEDED
;
2081 * If multiple threads are within copy_process(), then this check
2082 * triggers too late. This doesn't hurt, the check is only there
2083 * to stop root fork bombs.
2086 if (data_race(nr_threads
>= max_threads
))
2087 goto bad_fork_cleanup_count
;
2089 delayacct_tsk_init(p
); /* Must remain after dup_task_struct() */
2090 p
->flags
&= ~(PF_SUPERPRIV
| PF_WQ_WORKER
| PF_IDLE
| PF_NO_SETAFFINITY
);
2091 p
->flags
|= PF_FORKNOEXEC
;
2092 INIT_LIST_HEAD(&p
->children
);
2093 INIT_LIST_HEAD(&p
->sibling
);
2094 rcu_copy_process(p
);
2095 p
->vfork_done
= NULL
;
2096 spin_lock_init(&p
->alloc_lock
);
2098 init_sigpending(&p
->pending
);
2100 p
->utime
= p
->stime
= p
->gtime
= 0;
2101 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2102 p
->utimescaled
= p
->stimescaled
= 0;
2104 prev_cputime_init(&p
->prev_cputime
);
2106 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2107 seqcount_init(&p
->vtime
.seqcount
);
2108 p
->vtime
.starttime
= 0;
2109 p
->vtime
.state
= VTIME_INACTIVE
;
2112 #ifdef CONFIG_IO_URING
2116 #if defined(SPLIT_RSS_COUNTING)
2117 memset(&p
->rss_stat
, 0, sizeof(p
->rss_stat
));
2120 p
->default_timer_slack_ns
= current
->timer_slack_ns
;
2126 task_io_accounting_init(&p
->ioac
);
2127 acct_clear_integrals(p
);
2129 posix_cputimers_init(&p
->posix_cputimers
);
2131 p
->io_context
= NULL
;
2132 audit_set_context(p
, NULL
);
2135 p
->mempolicy
= mpol_dup(p
->mempolicy
);
2136 if (IS_ERR(p
->mempolicy
)) {
2137 retval
= PTR_ERR(p
->mempolicy
);
2138 p
->mempolicy
= NULL
;
2139 goto bad_fork_cleanup_threadgroup_lock
;
2142 #ifdef CONFIG_CPUSETS
2143 p
->cpuset_mem_spread_rotor
= NUMA_NO_NODE
;
2144 p
->cpuset_slab_spread_rotor
= NUMA_NO_NODE
;
2145 seqcount_spinlock_init(&p
->mems_allowed_seq
, &p
->alloc_lock
);
2147 #ifdef CONFIG_TRACE_IRQFLAGS
2148 memset(&p
->irqtrace
, 0, sizeof(p
->irqtrace
));
2149 p
->irqtrace
.hardirq_disable_ip
= _THIS_IP_
;
2150 p
->irqtrace
.softirq_enable_ip
= _THIS_IP_
;
2151 p
->softirqs_enabled
= 1;
2152 p
->softirq_context
= 0;
2155 p
->pagefault_disabled
= 0;
2157 #ifdef CONFIG_LOCKDEP
2158 lockdep_init_task(p
);
2161 #ifdef CONFIG_DEBUG_MUTEXES
2162 p
->blocked_on
= NULL
; /* not blocked yet */
2164 #ifdef CONFIG_BCACHE
2165 p
->sequential_io
= 0;
2166 p
->sequential_io_avg
= 0;
2168 #ifdef CONFIG_BPF_SYSCALL
2169 RCU_INIT_POINTER(p
->bpf_storage
, NULL
);
2173 /* Perform scheduler related setup. Assign this task to a CPU. */
2174 retval
= sched_fork(clone_flags
, p
);
2176 goto bad_fork_cleanup_policy
;
2178 retval
= perf_event_init_task(p
, clone_flags
);
2180 goto bad_fork_cleanup_policy
;
2181 retval
= audit_alloc(p
);
2183 goto bad_fork_cleanup_perf
;
2184 /* copy all the process information */
2186 retval
= security_task_alloc(p
, clone_flags
);
2188 goto bad_fork_cleanup_audit
;
2189 retval
= copy_semundo(clone_flags
, p
);
2191 goto bad_fork_cleanup_security
;
2192 retval
= copy_files(clone_flags
, p
);
2194 goto bad_fork_cleanup_semundo
;
2195 retval
= copy_fs(clone_flags
, p
);
2197 goto bad_fork_cleanup_files
;
2198 retval
= copy_sighand(clone_flags
, p
);
2200 goto bad_fork_cleanup_fs
;
2201 retval
= copy_signal(clone_flags
, p
);
2203 goto bad_fork_cleanup_sighand
;
2204 retval
= copy_mm(clone_flags
, p
);
2206 goto bad_fork_cleanup_signal
;
2207 retval
= copy_namespaces(clone_flags
, p
);
2209 goto bad_fork_cleanup_mm
;
2210 retval
= copy_io(clone_flags
, p
);
2212 goto bad_fork_cleanup_namespaces
;
2213 retval
= copy_thread(clone_flags
, args
->stack
, args
->stack_size
, p
, args
->tls
);
2215 goto bad_fork_cleanup_io
;
2217 stackleak_task_init(p
);
2219 if (pid
!= &init_struct_pid
) {
2220 pid
= alloc_pid(p
->nsproxy
->pid_ns_for_children
, args
->set_tid
,
2221 args
->set_tid_size
);
2223 retval
= PTR_ERR(pid
);
2224 goto bad_fork_cleanup_thread
;
2229 * This has to happen after we've potentially unshared the file
2230 * descriptor table (so that the pidfd doesn't leak into the child
2231 * if the fd table isn't shared).
2233 if (clone_flags
& CLONE_PIDFD
) {
2234 retval
= get_unused_fd_flags(O_RDWR
| O_CLOEXEC
);
2236 goto bad_fork_free_pid
;
2240 pidfile
= anon_inode_getfile("[pidfd]", &pidfd_fops
, pid
,
2241 O_RDWR
| O_CLOEXEC
);
2242 if (IS_ERR(pidfile
)) {
2243 put_unused_fd(pidfd
);
2244 retval
= PTR_ERR(pidfile
);
2245 goto bad_fork_free_pid
;
2247 get_pid(pid
); /* held by pidfile now */
2249 retval
= put_user(pidfd
, args
->pidfd
);
2251 goto bad_fork_put_pidfd
;
2260 * sigaltstack should be cleared when sharing the same VM
2262 if ((clone_flags
& (CLONE_VM
|CLONE_VFORK
)) == CLONE_VM
)
2266 * Syscall tracing and stepping should be turned off in the
2267 * child regardless of CLONE_PTRACE.
2269 user_disable_single_step(p
);
2270 clear_task_syscall_work(p
, SYSCALL_TRACE
);
2271 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
2272 clear_task_syscall_work(p
, SYSCALL_EMU
);
2274 clear_tsk_latency_tracing(p
);
2276 /* ok, now we should be set up.. */
2277 p
->pid
= pid_nr(pid
);
2278 if (clone_flags
& CLONE_THREAD
) {
2279 p
->group_leader
= current
->group_leader
;
2280 p
->tgid
= current
->tgid
;
2282 p
->group_leader
= p
;
2287 p
->nr_dirtied_pause
= 128 >> (PAGE_SHIFT
- 10);
2288 p
->dirty_paused_when
= 0;
2290 p
->pdeath_signal
= 0;
2291 INIT_LIST_HEAD(&p
->thread_group
);
2292 p
->task_works
= NULL
;
2293 clear_posix_cputimers_work(p
);
2295 #ifdef CONFIG_KRETPROBES
2296 p
->kretprobe_instances
.first
= NULL
;
2300 * Ensure that the cgroup subsystem policies allow the new process to be
2301 * forked. It should be noted that the new process's css_set can be changed
2302 * between here and cgroup_post_fork() if an organisation operation is in
2305 retval
= cgroup_can_fork(p
, args
);
2307 goto bad_fork_put_pidfd
;
2310 * Now that the cgroups are pinned, re-clone the parent cgroup and put
2311 * the new task on the correct runqueue. All this *before* the task
2314 * This isn't part of ->can_fork() because while the re-cloning is
2315 * cgroup specific, it unconditionally needs to place the task on a
2318 sched_cgroup_fork(p
, args
);
2321 * From this point on we must avoid any synchronous user-space
2322 * communication until we take the tasklist-lock. In particular, we do
2323 * not want user-space to be able to predict the process start-time by
2324 * stalling fork(2) after we recorded the start_time but before it is
2325 * visible to the system.
2328 p
->start_time
= ktime_get_ns();
2329 p
->start_boottime
= ktime_get_boottime_ns();
2332 * Make it visible to the rest of the system, but dont wake it up yet.
2333 * Need tasklist lock for parent etc handling!
2335 write_lock_irq(&tasklist_lock
);
2337 /* CLONE_PARENT re-uses the old parent */
2338 if (clone_flags
& (CLONE_PARENT
|CLONE_THREAD
)) {
2339 p
->real_parent
= current
->real_parent
;
2340 p
->parent_exec_id
= current
->parent_exec_id
;
2341 if (clone_flags
& CLONE_THREAD
)
2342 p
->exit_signal
= -1;
2344 p
->exit_signal
= current
->group_leader
->exit_signal
;
2346 p
->real_parent
= current
;
2347 p
->parent_exec_id
= current
->self_exec_id
;
2348 p
->exit_signal
= args
->exit_signal
;
2351 klp_copy_process(p
);
2355 spin_lock(¤t
->sighand
->siglock
);
2358 * Copy seccomp details explicitly here, in case they were changed
2359 * before holding sighand lock.
2363 rseq_fork(p
, clone_flags
);
2365 /* Don't start children in a dying pid namespace */
2366 if (unlikely(!(ns_of_pid(pid
)->pid_allocated
& PIDNS_ADDING
))) {
2368 goto bad_fork_cancel_cgroup
;
2371 /* Let kill terminate clone/fork in the middle */
2372 if (fatal_signal_pending(current
)) {
2374 goto bad_fork_cancel_cgroup
;
2377 init_task_pid_links(p
);
2378 if (likely(p
->pid
)) {
2379 ptrace_init_task(p
, (clone_flags
& CLONE_PTRACE
) || trace
);
2381 init_task_pid(p
, PIDTYPE_PID
, pid
);
2382 if (thread_group_leader(p
)) {
2383 init_task_pid(p
, PIDTYPE_TGID
, pid
);
2384 init_task_pid(p
, PIDTYPE_PGID
, task_pgrp(current
));
2385 init_task_pid(p
, PIDTYPE_SID
, task_session(current
));
2387 if (is_child_reaper(pid
)) {
2388 ns_of_pid(pid
)->child_reaper
= p
;
2389 p
->signal
->flags
|= SIGNAL_UNKILLABLE
;
2391 p
->signal
->shared_pending
.signal
= delayed
.signal
;
2392 p
->signal
->tty
= tty_kref_get(current
->signal
->tty
);
2394 * Inherit has_child_subreaper flag under the same
2395 * tasklist_lock with adding child to the process tree
2396 * for propagate_has_child_subreaper optimization.
2398 p
->signal
->has_child_subreaper
= p
->real_parent
->signal
->has_child_subreaper
||
2399 p
->real_parent
->signal
->is_child_subreaper
;
2400 list_add_tail(&p
->sibling
, &p
->real_parent
->children
);
2401 list_add_tail_rcu(&p
->tasks
, &init_task
.tasks
);
2402 attach_pid(p
, PIDTYPE_TGID
);
2403 attach_pid(p
, PIDTYPE_PGID
);
2404 attach_pid(p
, PIDTYPE_SID
);
2405 __this_cpu_inc(process_counts
);
2407 current
->signal
->nr_threads
++;
2408 atomic_inc(¤t
->signal
->live
);
2409 refcount_inc(¤t
->signal
->sigcnt
);
2410 task_join_group_stop(p
);
2411 list_add_tail_rcu(&p
->thread_group
,
2412 &p
->group_leader
->thread_group
);
2413 list_add_tail_rcu(&p
->thread_node
,
2414 &p
->signal
->thread_head
);
2416 attach_pid(p
, PIDTYPE_PID
);
2420 hlist_del_init(&delayed
.node
);
2421 spin_unlock(¤t
->sighand
->siglock
);
2422 syscall_tracepoint_update(p
);
2423 write_unlock_irq(&tasklist_lock
);
2426 fd_install(pidfd
, pidfile
);
2428 proc_fork_connector(p
);
2430 cgroup_post_fork(p
, args
);
2433 trace_task_newtask(p
, clone_flags
);
2434 uprobe_copy_process(p
, clone_flags
);
2436 copy_oom_score_adj(clone_flags
, p
);
2440 bad_fork_cancel_cgroup
:
2442 spin_unlock(¤t
->sighand
->siglock
);
2443 write_unlock_irq(&tasklist_lock
);
2444 cgroup_cancel_fork(p
, args
);
2446 if (clone_flags
& CLONE_PIDFD
) {
2448 put_unused_fd(pidfd
);
2451 if (pid
!= &init_struct_pid
)
2453 bad_fork_cleanup_thread
:
2455 bad_fork_cleanup_io
:
2458 bad_fork_cleanup_namespaces
:
2459 exit_task_namespaces(p
);
2460 bad_fork_cleanup_mm
:
2462 mm_clear_owner(p
->mm
, p
);
2465 bad_fork_cleanup_signal
:
2466 if (!(clone_flags
& CLONE_THREAD
))
2467 free_signal_struct(p
->signal
);
2468 bad_fork_cleanup_sighand
:
2469 __cleanup_sighand(p
->sighand
);
2470 bad_fork_cleanup_fs
:
2471 exit_fs(p
); /* blocking */
2472 bad_fork_cleanup_files
:
2473 exit_files(p
); /* blocking */
2474 bad_fork_cleanup_semundo
:
2476 bad_fork_cleanup_security
:
2477 security_task_free(p
);
2478 bad_fork_cleanup_audit
:
2480 bad_fork_cleanup_perf
:
2481 perf_event_free_task(p
);
2482 bad_fork_cleanup_policy
:
2483 lockdep_free_task(p
);
2485 mpol_put(p
->mempolicy
);
2486 bad_fork_cleanup_threadgroup_lock
:
2488 delayacct_tsk_free(p
);
2489 bad_fork_cleanup_count
:
2490 dec_rlimit_ucounts(task_ucounts(p
), UCOUNT_RLIMIT_NPROC
, 1);
2493 WRITE_ONCE(p
->__state
, TASK_DEAD
);
2495 delayed_free_task(p
);
2497 spin_lock_irq(¤t
->sighand
->siglock
);
2498 hlist_del_init(&delayed
.node
);
2499 spin_unlock_irq(¤t
->sighand
->siglock
);
2500 return ERR_PTR(retval
);
2503 static inline void init_idle_pids(struct task_struct
*idle
)
2507 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
2508 INIT_HLIST_NODE(&idle
->pid_links
[type
]); /* not really needed */
2509 init_task_pid(idle
, type
, &init_struct_pid
);
2513 struct task_struct
* __init
fork_idle(int cpu
)
2515 struct task_struct
*task
;
2516 struct kernel_clone_args args
= {
2520 task
= copy_process(&init_struct_pid
, 0, cpu_to_node(cpu
), &args
);
2521 if (!IS_ERR(task
)) {
2522 init_idle_pids(task
);
2523 init_idle(task
, cpu
);
2529 struct mm_struct
*copy_init_mm(void)
2531 return dup_mm(NULL
, &init_mm
);
2535 * This is like kernel_clone(), but shaved down and tailored to just
2536 * creating io_uring workers. It returns a created task, or an error pointer.
2537 * The returned task is inactive, and the caller must fire it up through
2538 * wake_up_new_task(p). All signals are blocked in the created task.
2540 struct task_struct
*create_io_thread(int (*fn
)(void *), void *arg
, int node
)
2542 unsigned long flags
= CLONE_FS
|CLONE_FILES
|CLONE_SIGHAND
|CLONE_THREAD
|
2544 struct kernel_clone_args args
= {
2545 .flags
= ((lower_32_bits(flags
) | CLONE_VM
|
2546 CLONE_UNTRACED
) & ~CSIGNAL
),
2547 .exit_signal
= (lower_32_bits(flags
) & CSIGNAL
),
2548 .stack
= (unsigned long)fn
,
2549 .stack_size
= (unsigned long)arg
,
2553 return copy_process(NULL
, 0, node
, &args
);
2557 * Ok, this is the main fork-routine.
2559 * It copies the process, and if successful kick-starts
2560 * it and waits for it to finish using the VM if required.
2562 * args->exit_signal is expected to be checked for sanity by the caller.
2564 pid_t
kernel_clone(struct kernel_clone_args
*args
)
2566 u64 clone_flags
= args
->flags
;
2567 struct completion vfork
;
2569 struct task_struct
*p
;
2574 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2575 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2576 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2577 * field in struct clone_args and it still doesn't make sense to have
2578 * them both point at the same memory location. Performing this check
2579 * here has the advantage that we don't need to have a separate helper
2580 * to check for legacy clone().
2582 if ((args
->flags
& CLONE_PIDFD
) &&
2583 (args
->flags
& CLONE_PARENT_SETTID
) &&
2584 (args
->pidfd
== args
->parent_tid
))
2588 * Determine whether and which event to report to ptracer. When
2589 * called from kernel_thread or CLONE_UNTRACED is explicitly
2590 * requested, no event is reported; otherwise, report if the event
2591 * for the type of forking is enabled.
2593 if (!(clone_flags
& CLONE_UNTRACED
)) {
2594 if (clone_flags
& CLONE_VFORK
)
2595 trace
= PTRACE_EVENT_VFORK
;
2596 else if (args
->exit_signal
!= SIGCHLD
)
2597 trace
= PTRACE_EVENT_CLONE
;
2599 trace
= PTRACE_EVENT_FORK
;
2601 if (likely(!ptrace_event_enabled(current
, trace
)))
2605 p
= copy_process(NULL
, trace
, NUMA_NO_NODE
, args
);
2606 add_latent_entropy();
2612 * Do this prior waking up the new thread - the thread pointer
2613 * might get invalid after that point, if the thread exits quickly.
2615 trace_sched_process_fork(current
, p
);
2617 pid
= get_task_pid(p
, PIDTYPE_PID
);
2620 if (clone_flags
& CLONE_PARENT_SETTID
)
2621 put_user(nr
, args
->parent_tid
);
2623 if (clone_flags
& CLONE_VFORK
) {
2624 p
->vfork_done
= &vfork
;
2625 init_completion(&vfork
);
2629 wake_up_new_task(p
);
2631 /* forking complete and child started to run, tell ptracer */
2632 if (unlikely(trace
))
2633 ptrace_event_pid(trace
, pid
);
2635 if (clone_flags
& CLONE_VFORK
) {
2636 if (!wait_for_vfork_done(p
, &vfork
))
2637 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE
, pid
);
2645 * Create a kernel thread.
2647 pid_t
kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
2649 struct kernel_clone_args args
= {
2650 .flags
= ((lower_32_bits(flags
) | CLONE_VM
|
2651 CLONE_UNTRACED
) & ~CSIGNAL
),
2652 .exit_signal
= (lower_32_bits(flags
) & CSIGNAL
),
2653 .stack
= (unsigned long)fn
,
2654 .stack_size
= (unsigned long)arg
,
2657 return kernel_clone(&args
);
2660 #ifdef __ARCH_WANT_SYS_FORK
2661 SYSCALL_DEFINE0(fork
)
2664 struct kernel_clone_args args
= {
2665 .exit_signal
= SIGCHLD
,
2668 return kernel_clone(&args
);
2670 /* can not support in nommu mode */
2676 #ifdef __ARCH_WANT_SYS_VFORK
2677 SYSCALL_DEFINE0(vfork
)
2679 struct kernel_clone_args args
= {
2680 .flags
= CLONE_VFORK
| CLONE_VM
,
2681 .exit_signal
= SIGCHLD
,
2684 return kernel_clone(&args
);
2688 #ifdef __ARCH_WANT_SYS_CLONE
2689 #ifdef CONFIG_CLONE_BACKWARDS
2690 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2691 int __user
*, parent_tidptr
,
2693 int __user
*, child_tidptr
)
2694 #elif defined(CONFIG_CLONE_BACKWARDS2)
2695 SYSCALL_DEFINE5(clone
, unsigned long, newsp
, unsigned long, clone_flags
,
2696 int __user
*, parent_tidptr
,
2697 int __user
*, child_tidptr
,
2699 #elif defined(CONFIG_CLONE_BACKWARDS3)
2700 SYSCALL_DEFINE6(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2702 int __user
*, parent_tidptr
,
2703 int __user
*, child_tidptr
,
2706 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2707 int __user
*, parent_tidptr
,
2708 int __user
*, child_tidptr
,
2712 struct kernel_clone_args args
= {
2713 .flags
= (lower_32_bits(clone_flags
) & ~CSIGNAL
),
2714 .pidfd
= parent_tidptr
,
2715 .child_tid
= child_tidptr
,
2716 .parent_tid
= parent_tidptr
,
2717 .exit_signal
= (lower_32_bits(clone_flags
) & CSIGNAL
),
2722 return kernel_clone(&args
);
2726 #ifdef __ARCH_WANT_SYS_CLONE3
2728 noinline
static int copy_clone_args_from_user(struct kernel_clone_args
*kargs
,
2729 struct clone_args __user
*uargs
,
2733 struct clone_args args
;
2734 pid_t
*kset_tid
= kargs
->set_tid
;
2736 BUILD_BUG_ON(offsetofend(struct clone_args
, tls
) !=
2737 CLONE_ARGS_SIZE_VER0
);
2738 BUILD_BUG_ON(offsetofend(struct clone_args
, set_tid_size
) !=
2739 CLONE_ARGS_SIZE_VER1
);
2740 BUILD_BUG_ON(offsetofend(struct clone_args
, cgroup
) !=
2741 CLONE_ARGS_SIZE_VER2
);
2742 BUILD_BUG_ON(sizeof(struct clone_args
) != CLONE_ARGS_SIZE_VER2
);
2744 if (unlikely(usize
> PAGE_SIZE
))
2746 if (unlikely(usize
< CLONE_ARGS_SIZE_VER0
))
2749 err
= copy_struct_from_user(&args
, sizeof(args
), uargs
, usize
);
2753 if (unlikely(args
.set_tid_size
> MAX_PID_NS_LEVEL
))
2756 if (unlikely(!args
.set_tid
&& args
.set_tid_size
> 0))
2759 if (unlikely(args
.set_tid
&& args
.set_tid_size
== 0))
2763 * Verify that higher 32bits of exit_signal are unset and that
2764 * it is a valid signal
2766 if (unlikely((args
.exit_signal
& ~((u64
)CSIGNAL
)) ||
2767 !valid_signal(args
.exit_signal
)))
2770 if ((args
.flags
& CLONE_INTO_CGROUP
) &&
2771 (args
.cgroup
> INT_MAX
|| usize
< CLONE_ARGS_SIZE_VER2
))
2774 *kargs
= (struct kernel_clone_args
){
2775 .flags
= args
.flags
,
2776 .pidfd
= u64_to_user_ptr(args
.pidfd
),
2777 .child_tid
= u64_to_user_ptr(args
.child_tid
),
2778 .parent_tid
= u64_to_user_ptr(args
.parent_tid
),
2779 .exit_signal
= args
.exit_signal
,
2780 .stack
= args
.stack
,
2781 .stack_size
= args
.stack_size
,
2783 .set_tid_size
= args
.set_tid_size
,
2784 .cgroup
= args
.cgroup
,
2788 copy_from_user(kset_tid
, u64_to_user_ptr(args
.set_tid
),
2789 (kargs
->set_tid_size
* sizeof(pid_t
))))
2792 kargs
->set_tid
= kset_tid
;
2798 * clone3_stack_valid - check and prepare stack
2799 * @kargs: kernel clone args
2801 * Verify that the stack arguments userspace gave us are sane.
2802 * In addition, set the stack direction for userspace since it's easy for us to
2805 static inline bool clone3_stack_valid(struct kernel_clone_args
*kargs
)
2807 if (kargs
->stack
== 0) {
2808 if (kargs
->stack_size
> 0)
2811 if (kargs
->stack_size
== 0)
2814 if (!access_ok((void __user
*)kargs
->stack
, kargs
->stack_size
))
2817 #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
2818 kargs
->stack
+= kargs
->stack_size
;
2825 static bool clone3_args_valid(struct kernel_clone_args
*kargs
)
2827 /* Verify that no unknown flags are passed along. */
2829 ~(CLONE_LEGACY_FLAGS
| CLONE_CLEAR_SIGHAND
| CLONE_INTO_CGROUP
))
2833 * - make the CLONE_DETACHED bit reusable for clone3
2834 * - make the CSIGNAL bits reusable for clone3
2836 if (kargs
->flags
& (CLONE_DETACHED
| CSIGNAL
))
2839 if ((kargs
->flags
& (CLONE_SIGHAND
| CLONE_CLEAR_SIGHAND
)) ==
2840 (CLONE_SIGHAND
| CLONE_CLEAR_SIGHAND
))
2843 if ((kargs
->flags
& (CLONE_THREAD
| CLONE_PARENT
)) &&
2847 if (!clone3_stack_valid(kargs
))
2854 * clone3 - create a new process with specific properties
2855 * @uargs: argument structure
2856 * @size: size of @uargs
2858 * clone3() is the extensible successor to clone()/clone2().
2859 * It takes a struct as argument that is versioned by its size.
2861 * Return: On success, a positive PID for the child process.
2862 * On error, a negative errno number.
2864 SYSCALL_DEFINE2(clone3
, struct clone_args __user
*, uargs
, size_t, size
)
2868 struct kernel_clone_args kargs
;
2869 pid_t set_tid
[MAX_PID_NS_LEVEL
];
2871 kargs
.set_tid
= set_tid
;
2873 err
= copy_clone_args_from_user(&kargs
, uargs
, size
);
2877 if (!clone3_args_valid(&kargs
))
2880 return kernel_clone(&kargs
);
2884 void walk_process_tree(struct task_struct
*top
, proc_visitor visitor
, void *data
)
2886 struct task_struct
*leader
, *parent
, *child
;
2889 read_lock(&tasklist_lock
);
2890 leader
= top
= top
->group_leader
;
2892 for_each_thread(leader
, parent
) {
2893 list_for_each_entry(child
, &parent
->children
, sibling
) {
2894 res
= visitor(child
, data
);
2906 if (leader
!= top
) {
2908 parent
= child
->real_parent
;
2909 leader
= parent
->group_leader
;
2913 read_unlock(&tasklist_lock
);
2916 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
2917 #define ARCH_MIN_MMSTRUCT_ALIGN 0
2920 static void sighand_ctor(void *data
)
2922 struct sighand_struct
*sighand
= data
;
2924 spin_lock_init(&sighand
->siglock
);
2925 init_waitqueue_head(&sighand
->signalfd_wqh
);
2928 void __init
proc_caches_init(void)
2930 unsigned int mm_size
;
2932 sighand_cachep
= kmem_cache_create("sighand_cache",
2933 sizeof(struct sighand_struct
), 0,
2934 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_TYPESAFE_BY_RCU
|
2935 SLAB_ACCOUNT
, sighand_ctor
);
2936 signal_cachep
= kmem_cache_create("signal_cache",
2937 sizeof(struct signal_struct
), 0,
2938 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2940 files_cachep
= kmem_cache_create("files_cache",
2941 sizeof(struct files_struct
), 0,
2942 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2944 fs_cachep
= kmem_cache_create("fs_cache",
2945 sizeof(struct fs_struct
), 0,
2946 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2950 * The mm_cpumask is located at the end of mm_struct, and is
2951 * dynamically sized based on the maximum CPU number this system
2952 * can have, taking hotplug into account (nr_cpu_ids).
2954 mm_size
= sizeof(struct mm_struct
) + cpumask_size();
2956 mm_cachep
= kmem_cache_create_usercopy("mm_struct",
2957 mm_size
, ARCH_MIN_MMSTRUCT_ALIGN
,
2958 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2959 offsetof(struct mm_struct
, saved_auxv
),
2960 sizeof_field(struct mm_struct
, saved_auxv
),
2962 vm_area_cachep
= KMEM_CACHE(vm_area_struct
, SLAB_PANIC
|SLAB_ACCOUNT
);
2964 nsproxy_cache_init();
2968 * Check constraints on flags passed to the unshare system call.
2970 static int check_unshare_flags(unsigned long unshare_flags
)
2972 if (unshare_flags
& ~(CLONE_THREAD
|CLONE_FS
|CLONE_NEWNS
|CLONE_SIGHAND
|
2973 CLONE_VM
|CLONE_FILES
|CLONE_SYSVSEM
|
2974 CLONE_NEWUTS
|CLONE_NEWIPC
|CLONE_NEWNET
|
2975 CLONE_NEWUSER
|CLONE_NEWPID
|CLONE_NEWCGROUP
|
2979 * Not implemented, but pretend it works if there is nothing
2980 * to unshare. Note that unsharing the address space or the
2981 * signal handlers also need to unshare the signal queues (aka
2984 if (unshare_flags
& (CLONE_THREAD
| CLONE_SIGHAND
| CLONE_VM
)) {
2985 if (!thread_group_empty(current
))
2988 if (unshare_flags
& (CLONE_SIGHAND
| CLONE_VM
)) {
2989 if (refcount_read(¤t
->sighand
->count
) > 1)
2992 if (unshare_flags
& CLONE_VM
) {
2993 if (!current_is_single_threaded())
3001 * Unshare the filesystem structure if it is being shared
3003 static int unshare_fs(unsigned long unshare_flags
, struct fs_struct
**new_fsp
)
3005 struct fs_struct
*fs
= current
->fs
;
3007 if (!(unshare_flags
& CLONE_FS
) || !fs
)
3010 /* don't need lock here; in the worst case we'll do useless copy */
3014 *new_fsp
= copy_fs_struct(fs
);
3022 * Unshare file descriptor table if it is being shared
3024 int unshare_fd(unsigned long unshare_flags
, unsigned int max_fds
,
3025 struct files_struct
**new_fdp
)
3027 struct files_struct
*fd
= current
->files
;
3030 if ((unshare_flags
& CLONE_FILES
) &&
3031 (fd
&& atomic_read(&fd
->count
) > 1)) {
3032 *new_fdp
= dup_fd(fd
, max_fds
, &error
);
3041 * unshare allows a process to 'unshare' part of the process
3042 * context which was originally shared using clone. copy_*
3043 * functions used by kernel_clone() cannot be used here directly
3044 * because they modify an inactive task_struct that is being
3045 * constructed. Here we are modifying the current, active,
3048 int ksys_unshare(unsigned long unshare_flags
)
3050 struct fs_struct
*fs
, *new_fs
= NULL
;
3051 struct files_struct
*fd
, *new_fd
= NULL
;
3052 struct cred
*new_cred
= NULL
;
3053 struct nsproxy
*new_nsproxy
= NULL
;
3058 * If unsharing a user namespace must also unshare the thread group
3059 * and unshare the filesystem root and working directories.
3061 if (unshare_flags
& CLONE_NEWUSER
)
3062 unshare_flags
|= CLONE_THREAD
| CLONE_FS
;
3064 * If unsharing vm, must also unshare signal handlers.
3066 if (unshare_flags
& CLONE_VM
)
3067 unshare_flags
|= CLONE_SIGHAND
;
3069 * If unsharing a signal handlers, must also unshare the signal queues.
3071 if (unshare_flags
& CLONE_SIGHAND
)
3072 unshare_flags
|= CLONE_THREAD
;
3074 * If unsharing namespace, must also unshare filesystem information.
3076 if (unshare_flags
& CLONE_NEWNS
)
3077 unshare_flags
|= CLONE_FS
;
3079 if ((unshare_flags
& CLONE_NEWUSER
) && !unprivileged_userns_clone
) {
3081 if (!capable(CAP_SYS_ADMIN
))
3082 goto bad_unshare_out
;
3085 err
= check_unshare_flags(unshare_flags
);
3087 goto bad_unshare_out
;
3089 * CLONE_NEWIPC must also detach from the undolist: after switching
3090 * to a new ipc namespace, the semaphore arrays from the old
3091 * namespace are unreachable.
3093 if (unshare_flags
& (CLONE_NEWIPC
|CLONE_SYSVSEM
))
3095 err
= unshare_fs(unshare_flags
, &new_fs
);
3097 goto bad_unshare_out
;
3098 err
= unshare_fd(unshare_flags
, NR_OPEN_MAX
, &new_fd
);
3100 goto bad_unshare_cleanup_fs
;
3101 err
= unshare_userns(unshare_flags
, &new_cred
);
3103 goto bad_unshare_cleanup_fd
;
3104 err
= unshare_nsproxy_namespaces(unshare_flags
, &new_nsproxy
,
3107 goto bad_unshare_cleanup_cred
;
3110 err
= set_cred_ucounts(new_cred
);
3112 goto bad_unshare_cleanup_cred
;
3115 if (new_fs
|| new_fd
|| do_sysvsem
|| new_cred
|| new_nsproxy
) {
3118 * CLONE_SYSVSEM is equivalent to sys_exit().
3122 if (unshare_flags
& CLONE_NEWIPC
) {
3123 /* Orphan segments in old ns (see sem above). */
3125 shm_init_task(current
);
3129 switch_task_namespaces(current
, new_nsproxy
);
3135 spin_lock(&fs
->lock
);
3136 current
->fs
= new_fs
;
3141 spin_unlock(&fs
->lock
);
3145 fd
= current
->files
;
3146 current
->files
= new_fd
;
3150 task_unlock(current
);
3153 /* Install the new user namespace */
3154 commit_creds(new_cred
);
3159 perf_event_namespaces(current
);
3161 bad_unshare_cleanup_cred
:
3164 bad_unshare_cleanup_fd
:
3166 put_files_struct(new_fd
);
3168 bad_unshare_cleanup_fs
:
3170 free_fs_struct(new_fs
);
3176 SYSCALL_DEFINE1(unshare
, unsigned long, unshare_flags
)
3178 return ksys_unshare(unshare_flags
);
3182 * Helper to unshare the files of the current task.
3183 * We don't want to expose copy_files internals to
3184 * the exec layer of the kernel.
3187 int unshare_files(void)
3189 struct task_struct
*task
= current
;
3190 struct files_struct
*old
, *copy
= NULL
;
3193 error
= unshare_fd(CLONE_FILES
, NR_OPEN_MAX
, ©
);
3201 put_files_struct(old
);
3205 int sysctl_max_threads(struct ctl_table
*table
, int write
,
3206 void *buffer
, size_t *lenp
, loff_t
*ppos
)
3210 int threads
= max_threads
;
3212 int max
= MAX_THREADS
;
3219 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
3223 max_threads
= threads
;