1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 'fork.c' contains the help-routines for the 'fork' system call
10 * (see also entry.S and others).
11 * Fork is rather simple, once you get the hang of it, but the memory
12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
15 #include <linux/anon_inodes.h>
16 #include <linux/slab.h>
17 #include <linux/sched/autogroup.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/coredump.h>
20 #include <linux/sched/user.h>
21 #include <linux/sched/numa_balancing.h>
22 #include <linux/sched/stat.h>
23 #include <linux/sched/task.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/sched/cputime.h>
26 #include <linux/seq_file.h>
27 #include <linux/rtmutex.h>
28 #include <linux/init.h>
29 #include <linux/unistd.h>
30 #include <linux/module.h>
31 #include <linux/vmalloc.h>
32 #include <linux/completion.h>
33 #include <linux/personality.h>
34 #include <linux/mempolicy.h>
35 #include <linux/sem.h>
36 #include <linux/file.h>
37 #include <linux/fdtable.h>
38 #include <linux/iocontext.h>
39 #include <linux/key.h>
40 #include <linux/binfmts.h>
41 #include <linux/mman.h>
42 #include <linux/mmu_notifier.h>
43 #include <linux/hmm.h>
46 #include <linux/vmacache.h>
47 #include <linux/nsproxy.h>
48 #include <linux/capability.h>
49 #include <linux/cpu.h>
50 #include <linux/cgroup.h>
51 #include <linux/security.h>
52 #include <linux/hugetlb.h>
53 #include <linux/seccomp.h>
54 #include <linux/swap.h>
55 #include <linux/syscalls.h>
56 #include <linux/jiffies.h>
57 #include <linux/futex.h>
58 #include <linux/compat.h>
59 #include <linux/kthread.h>
60 #include <linux/task_io_accounting_ops.h>
61 #include <linux/rcupdate.h>
62 #include <linux/ptrace.h>
63 #include <linux/mount.h>
64 #include <linux/audit.h>
65 #include <linux/memcontrol.h>
66 #include <linux/ftrace.h>
67 #include <linux/proc_fs.h>
68 #include <linux/profile.h>
69 #include <linux/rmap.h>
70 #include <linux/ksm.h>
71 #include <linux/acct.h>
72 #include <linux/userfaultfd_k.h>
73 #include <linux/tsacct_kern.h>
74 #include <linux/cn_proc.h>
75 #include <linux/freezer.h>
76 #include <linux/delayacct.h>
77 #include <linux/taskstats_kern.h>
78 #include <linux/random.h>
79 #include <linux/tty.h>
80 #include <linux/blkdev.h>
81 #include <linux/fs_struct.h>
82 #include <linux/magic.h>
83 #include <linux/perf_event.h>
84 #include <linux/posix-timers.h>
85 #include <linux/user-return-notifier.h>
86 #include <linux/oom.h>
87 #include <linux/khugepaged.h>
88 #include <linux/signalfd.h>
89 #include <linux/uprobes.h>
90 #include <linux/aio.h>
91 #include <linux/compiler.h>
92 #include <linux/sysctl.h>
93 #include <linux/kcov.h>
94 #include <linux/livepatch.h>
95 #include <linux/thread_info.h>
96 #include <linux/stackleak.h>
98 #include <asm/pgtable.h>
99 #include <asm/pgalloc.h>
100 #include <linux/uaccess.h>
101 #include <asm/mmu_context.h>
102 #include <asm/cacheflush.h>
103 #include <asm/tlbflush.h>
105 #include <trace/events/sched.h>
107 #define CREATE_TRACE_POINTS
108 #include <trace/events/task.h>
111 * Minimum number of threads to boot the kernel
113 #define MIN_THREADS 20
116 * Maximum number of threads
118 #define MAX_THREADS FUTEX_TID_MASK
121 * Protected counters by write_lock_irq(&tasklist_lock)
123 unsigned long total_forks
; /* Handle normal Linux uptimes. */
124 int nr_threads
; /* The idle threads do not count.. */
126 static int max_threads
; /* tunable limit on nr_threads */
128 DEFINE_PER_CPU(unsigned long, process_counts
) = 0;
130 __cacheline_aligned
DEFINE_RWLOCK(tasklist_lock
); /* outer */
132 #ifdef CONFIG_PROVE_RCU
133 int lockdep_tasklist_lock_is_held(void)
135 return lockdep_is_held(&tasklist_lock
);
137 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held
);
138 #endif /* #ifdef CONFIG_PROVE_RCU */
140 int nr_processes(void)
145 for_each_possible_cpu(cpu
)
146 total
+= per_cpu(process_counts
, cpu
);
151 void __weak
arch_release_task_struct(struct task_struct
*tsk
)
155 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
156 static struct kmem_cache
*task_struct_cachep
;
158 static inline struct task_struct
*alloc_task_struct_node(int node
)
160 return kmem_cache_alloc_node(task_struct_cachep
, GFP_KERNEL
, node
);
163 static inline void free_task_struct(struct task_struct
*tsk
)
165 kmem_cache_free(task_struct_cachep
, tsk
);
169 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
172 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
173 * kmemcache based allocator.
175 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
177 #ifdef CONFIG_VMAP_STACK
179 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
180 * flush. Try to minimize the number of calls by caching stacks.
182 #define NR_CACHED_STACKS 2
183 static DEFINE_PER_CPU(struct vm_struct
*, cached_stacks
[NR_CACHED_STACKS
]);
185 static int free_vm_stack_cache(unsigned int cpu
)
187 struct vm_struct
**cached_vm_stacks
= per_cpu_ptr(cached_stacks
, cpu
);
190 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
191 struct vm_struct
*vm_stack
= cached_vm_stacks
[i
];
196 vfree(vm_stack
->addr
);
197 cached_vm_stacks
[i
] = NULL
;
204 static unsigned long *alloc_thread_stack_node(struct task_struct
*tsk
, int node
)
206 #ifdef CONFIG_VMAP_STACK
210 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
213 s
= this_cpu_xchg(cached_stacks
[i
], NULL
);
218 /* Clear stale pointers from reused stack. */
219 memset(s
->addr
, 0, THREAD_SIZE
);
221 tsk
->stack_vm_area
= s
;
222 tsk
->stack
= s
->addr
;
227 * Allocated stacks are cached and later reused by new threads,
228 * so memcg accounting is performed manually on assigning/releasing
229 * stacks to tasks. Drop __GFP_ACCOUNT.
231 stack
= __vmalloc_node_range(THREAD_SIZE
, THREAD_ALIGN
,
232 VMALLOC_START
, VMALLOC_END
,
233 THREADINFO_GFP
& ~__GFP_ACCOUNT
,
235 0, node
, __builtin_return_address(0));
238 * We can't call find_vm_area() in interrupt context, and
239 * free_thread_stack() can be called in interrupt context,
240 * so cache the vm_struct.
243 tsk
->stack_vm_area
= find_vm_area(stack
);
248 struct page
*page
= alloc_pages_node(node
, THREADINFO_GFP
,
252 tsk
->stack
= page_address(page
);
259 static inline void free_thread_stack(struct task_struct
*tsk
)
261 #ifdef CONFIG_VMAP_STACK
262 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
267 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++) {
268 mod_memcg_page_state(vm
->pages
[i
],
269 MEMCG_KERNEL_STACK_KB
,
270 -(int)(PAGE_SIZE
/ 1024));
272 memcg_kmem_uncharge(vm
->pages
[i
], 0);
275 for (i
= 0; i
< NR_CACHED_STACKS
; i
++) {
276 if (this_cpu_cmpxchg(cached_stacks
[i
],
277 NULL
, tsk
->stack_vm_area
) != NULL
)
283 vfree_atomic(tsk
->stack
);
288 __free_pages(virt_to_page(tsk
->stack
), THREAD_SIZE_ORDER
);
291 static struct kmem_cache
*thread_stack_cache
;
293 static unsigned long *alloc_thread_stack_node(struct task_struct
*tsk
,
296 unsigned long *stack
;
297 stack
= kmem_cache_alloc_node(thread_stack_cache
, THREADINFO_GFP
, node
);
302 static void free_thread_stack(struct task_struct
*tsk
)
304 kmem_cache_free(thread_stack_cache
, tsk
->stack
);
307 void thread_stack_cache_init(void)
309 thread_stack_cache
= kmem_cache_create_usercopy("thread_stack",
310 THREAD_SIZE
, THREAD_SIZE
, 0, 0,
312 BUG_ON(thread_stack_cache
== NULL
);
317 /* SLAB cache for signal_struct structures (tsk->signal) */
318 static struct kmem_cache
*signal_cachep
;
320 /* SLAB cache for sighand_struct structures (tsk->sighand) */
321 struct kmem_cache
*sighand_cachep
;
323 /* SLAB cache for files_struct structures (tsk->files) */
324 struct kmem_cache
*files_cachep
;
326 /* SLAB cache for fs_struct structures (tsk->fs) */
327 struct kmem_cache
*fs_cachep
;
329 /* SLAB cache for vm_area_struct structures */
330 static struct kmem_cache
*vm_area_cachep
;
332 /* SLAB cache for mm_struct structures (tsk->mm) */
333 static struct kmem_cache
*mm_cachep
;
335 struct vm_area_struct
*vm_area_alloc(struct mm_struct
*mm
)
337 struct vm_area_struct
*vma
;
339 vma
= kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
345 struct vm_area_struct
*vm_area_dup(struct vm_area_struct
*orig
)
347 struct vm_area_struct
*new = kmem_cache_alloc(vm_area_cachep
, GFP_KERNEL
);
351 INIT_LIST_HEAD(&new->anon_vma_chain
);
356 void vm_area_free(struct vm_area_struct
*vma
)
358 kmem_cache_free(vm_area_cachep
, vma
);
361 static void account_kernel_stack(struct task_struct
*tsk
, int account
)
363 void *stack
= task_stack_page(tsk
);
364 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
366 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK
) && PAGE_SIZE
% 1024 != 0);
371 BUG_ON(vm
->nr_pages
!= THREAD_SIZE
/ PAGE_SIZE
);
373 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++) {
374 mod_zone_page_state(page_zone(vm
->pages
[i
]),
376 PAGE_SIZE
/ 1024 * account
);
380 * All stack pages are in the same zone and belong to the
383 struct page
*first_page
= virt_to_page(stack
);
385 mod_zone_page_state(page_zone(first_page
), NR_KERNEL_STACK_KB
,
386 THREAD_SIZE
/ 1024 * account
);
388 mod_memcg_page_state(first_page
, MEMCG_KERNEL_STACK_KB
,
389 account
* (THREAD_SIZE
/ 1024));
393 static int memcg_charge_kernel_stack(struct task_struct
*tsk
)
395 #ifdef CONFIG_VMAP_STACK
396 struct vm_struct
*vm
= task_stack_vm_area(tsk
);
402 for (i
= 0; i
< THREAD_SIZE
/ PAGE_SIZE
; i
++) {
404 * If memcg_kmem_charge() fails, page->mem_cgroup
405 * pointer is NULL, and both memcg_kmem_uncharge()
406 * and mod_memcg_page_state() in free_thread_stack()
407 * will ignore this page. So it's safe.
409 ret
= memcg_kmem_charge(vm
->pages
[i
], GFP_KERNEL
, 0);
413 mod_memcg_page_state(vm
->pages
[i
],
414 MEMCG_KERNEL_STACK_KB
,
422 static void release_task_stack(struct task_struct
*tsk
)
424 if (WARN_ON(tsk
->state
!= TASK_DEAD
))
425 return; /* Better to leak the stack than to free prematurely */
427 account_kernel_stack(tsk
, -1);
428 free_thread_stack(tsk
);
430 #ifdef CONFIG_VMAP_STACK
431 tsk
->stack_vm_area
= NULL
;
435 #ifdef CONFIG_THREAD_INFO_IN_TASK
436 void put_task_stack(struct task_struct
*tsk
)
438 if (refcount_dec_and_test(&tsk
->stack_refcount
))
439 release_task_stack(tsk
);
443 void free_task(struct task_struct
*tsk
)
445 #ifndef CONFIG_THREAD_INFO_IN_TASK
447 * The task is finally done with both the stack and thread_info,
450 release_task_stack(tsk
);
453 * If the task had a separate stack allocation, it should be gone
456 WARN_ON_ONCE(refcount_read(&tsk
->stack_refcount
) != 0);
458 rt_mutex_debug_task_free(tsk
);
459 ftrace_graph_exit_task(tsk
);
460 put_seccomp_filter(tsk
);
461 arch_release_task_struct(tsk
);
462 if (tsk
->flags
& PF_KTHREAD
)
463 free_kthread_struct(tsk
);
464 free_task_struct(tsk
);
466 EXPORT_SYMBOL(free_task
);
469 static __latent_entropy
int dup_mmap(struct mm_struct
*mm
,
470 struct mm_struct
*oldmm
)
472 struct vm_area_struct
*mpnt
, *tmp
, *prev
, **pprev
;
473 struct rb_node
**rb_link
, *rb_parent
;
475 unsigned long charge
;
478 uprobe_start_dup_mmap();
479 if (down_write_killable(&oldmm
->mmap_sem
)) {
481 goto fail_uprobe_end
;
483 flush_cache_dup_mm(oldmm
);
484 uprobe_dup_mmap(oldmm
, mm
);
486 * Not linked in yet - no deadlock potential:
488 down_write_nested(&mm
->mmap_sem
, SINGLE_DEPTH_NESTING
);
490 /* No ordering required: file already has been exposed. */
491 RCU_INIT_POINTER(mm
->exe_file
, get_mm_exe_file(oldmm
));
493 mm
->total_vm
= oldmm
->total_vm
;
494 mm
->data_vm
= oldmm
->data_vm
;
495 mm
->exec_vm
= oldmm
->exec_vm
;
496 mm
->stack_vm
= oldmm
->stack_vm
;
498 rb_link
= &mm
->mm_rb
.rb_node
;
501 retval
= ksm_fork(mm
, oldmm
);
504 retval
= khugepaged_fork(mm
, oldmm
);
509 for (mpnt
= oldmm
->mmap
; mpnt
; mpnt
= mpnt
->vm_next
) {
512 if (mpnt
->vm_flags
& VM_DONTCOPY
) {
513 vm_stat_account(mm
, mpnt
->vm_flags
, -vma_pages(mpnt
));
518 * Don't duplicate many vmas if we've been oom-killed (for
521 if (fatal_signal_pending(current
)) {
525 if (mpnt
->vm_flags
& VM_ACCOUNT
) {
526 unsigned long len
= vma_pages(mpnt
);
528 if (security_vm_enough_memory_mm(oldmm
, len
)) /* sic */
532 tmp
= vm_area_dup(mpnt
);
535 retval
= vma_dup_policy(mpnt
, tmp
);
537 goto fail_nomem_policy
;
539 retval
= dup_userfaultfd(tmp
, &uf
);
541 goto fail_nomem_anon_vma_fork
;
542 if (tmp
->vm_flags
& VM_WIPEONFORK
) {
543 /* VM_WIPEONFORK gets a clean slate in the child. */
544 tmp
->anon_vma
= NULL
;
545 if (anon_vma_prepare(tmp
))
546 goto fail_nomem_anon_vma_fork
;
547 } else if (anon_vma_fork(tmp
, mpnt
))
548 goto fail_nomem_anon_vma_fork
;
549 tmp
->vm_flags
&= ~(VM_LOCKED
| VM_LOCKONFAULT
);
550 tmp
->vm_next
= tmp
->vm_prev
= NULL
;
553 struct inode
*inode
= file_inode(file
);
554 struct address_space
*mapping
= file
->f_mapping
;
557 if (tmp
->vm_flags
& VM_DENYWRITE
)
558 atomic_dec(&inode
->i_writecount
);
559 i_mmap_lock_write(mapping
);
560 if (tmp
->vm_flags
& VM_SHARED
)
561 atomic_inc(&mapping
->i_mmap_writable
);
562 flush_dcache_mmap_lock(mapping
);
563 /* insert tmp into the share list, just after mpnt */
564 vma_interval_tree_insert_after(tmp
, mpnt
,
566 flush_dcache_mmap_unlock(mapping
);
567 i_mmap_unlock_write(mapping
);
571 * Clear hugetlb-related page reserves for children. This only
572 * affects MAP_PRIVATE mappings. Faults generated by the child
573 * are not guaranteed to succeed, even if read-only
575 if (is_vm_hugetlb_page(tmp
))
576 reset_vma_resv_huge_pages(tmp
);
579 * Link in the new vma and copy the page table entries.
582 pprev
= &tmp
->vm_next
;
586 __vma_link_rb(mm
, tmp
, rb_link
, rb_parent
);
587 rb_link
= &tmp
->vm_rb
.rb_right
;
588 rb_parent
= &tmp
->vm_rb
;
591 if (!(tmp
->vm_flags
& VM_WIPEONFORK
))
592 retval
= copy_page_range(mm
, oldmm
, mpnt
);
594 if (tmp
->vm_ops
&& tmp
->vm_ops
->open
)
595 tmp
->vm_ops
->open(tmp
);
600 /* a new mm has just been created */
601 retval
= arch_dup_mmap(oldmm
, mm
);
603 up_write(&mm
->mmap_sem
);
605 up_write(&oldmm
->mmap_sem
);
606 dup_userfaultfd_complete(&uf
);
608 uprobe_end_dup_mmap();
610 fail_nomem_anon_vma_fork
:
611 mpol_put(vma_policy(tmp
));
616 vm_unacct_memory(charge
);
620 static inline int mm_alloc_pgd(struct mm_struct
*mm
)
622 mm
->pgd
= pgd_alloc(mm
);
623 if (unlikely(!mm
->pgd
))
628 static inline void mm_free_pgd(struct mm_struct
*mm
)
630 pgd_free(mm
, mm
->pgd
);
633 static int dup_mmap(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
635 down_write(&oldmm
->mmap_sem
);
636 RCU_INIT_POINTER(mm
->exe_file
, get_mm_exe_file(oldmm
));
637 up_write(&oldmm
->mmap_sem
);
640 #define mm_alloc_pgd(mm) (0)
641 #define mm_free_pgd(mm)
642 #endif /* CONFIG_MMU */
644 static void check_mm(struct mm_struct
*mm
)
648 for (i
= 0; i
< NR_MM_COUNTERS
; i
++) {
649 long x
= atomic_long_read(&mm
->rss_stat
.count
[i
]);
652 printk(KERN_ALERT
"BUG: Bad rss-counter state "
653 "mm:%p idx:%d val:%ld\n", mm
, i
, x
);
656 if (mm_pgtables_bytes(mm
))
657 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
658 mm_pgtables_bytes(mm
));
660 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
661 VM_BUG_ON_MM(mm
->pmd_huge_pte
, mm
);
665 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
666 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
669 * Called when the last reference to the mm
670 * is dropped: either by a lazy thread or by
671 * mmput. Free the page directory and the mm.
673 void __mmdrop(struct mm_struct
*mm
)
675 BUG_ON(mm
== &init_mm
);
676 WARN_ON_ONCE(mm
== current
->mm
);
677 WARN_ON_ONCE(mm
== current
->active_mm
);
680 mmu_notifier_mm_destroy(mm
);
682 put_user_ns(mm
->user_ns
);
685 EXPORT_SYMBOL_GPL(__mmdrop
);
687 static void mmdrop_async_fn(struct work_struct
*work
)
689 struct mm_struct
*mm
;
691 mm
= container_of(work
, struct mm_struct
, async_put_work
);
695 static void mmdrop_async(struct mm_struct
*mm
)
697 if (unlikely(atomic_dec_and_test(&mm
->mm_count
))) {
698 INIT_WORK(&mm
->async_put_work
, mmdrop_async_fn
);
699 schedule_work(&mm
->async_put_work
);
703 static inline void free_signal_struct(struct signal_struct
*sig
)
705 taskstats_tgid_free(sig
);
706 sched_autogroup_exit(sig
);
708 * __mmdrop is not safe to call from softirq context on x86 due to
709 * pgd_dtor so postpone it to the async context
712 mmdrop_async(sig
->oom_mm
);
713 kmem_cache_free(signal_cachep
, sig
);
716 static inline void put_signal_struct(struct signal_struct
*sig
)
718 if (refcount_dec_and_test(&sig
->sigcnt
))
719 free_signal_struct(sig
);
722 void __put_task_struct(struct task_struct
*tsk
)
724 WARN_ON(!tsk
->exit_state
);
725 WARN_ON(refcount_read(&tsk
->usage
));
726 WARN_ON(tsk
== current
);
729 task_numa_free(tsk
, true);
730 security_task_free(tsk
);
732 delayacct_tsk_free(tsk
);
733 put_signal_struct(tsk
->signal
);
735 if (!profile_handoff_task(tsk
))
738 EXPORT_SYMBOL_GPL(__put_task_struct
);
740 void __init __weak
arch_task_cache_init(void) { }
745 static void set_max_threads(unsigned int max_threads_suggested
)
748 unsigned long nr_pages
= totalram_pages();
751 * The number of threads shall be limited such that the thread
752 * structures may only consume a small part of the available memory.
754 if (fls64(nr_pages
) + fls64(PAGE_SIZE
) > 64)
755 threads
= MAX_THREADS
;
757 threads
= div64_u64((u64
) nr_pages
* (u64
) PAGE_SIZE
,
758 (u64
) THREAD_SIZE
* 8UL);
760 if (threads
> max_threads_suggested
)
761 threads
= max_threads_suggested
;
763 max_threads
= clamp_t(u64
, threads
, MIN_THREADS
, MAX_THREADS
);
766 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
767 /* Initialized by the architecture: */
768 int arch_task_struct_size __read_mostly
;
771 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
772 static void task_struct_whitelist(unsigned long *offset
, unsigned long *size
)
774 /* Fetch thread_struct whitelist for the architecture. */
775 arch_thread_struct_whitelist(offset
, size
);
778 * Handle zero-sized whitelist or empty thread_struct, otherwise
779 * adjust offset to position of thread_struct in task_struct.
781 if (unlikely(*size
== 0))
784 *offset
+= offsetof(struct task_struct
, thread
);
786 #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */
788 void __init
fork_init(void)
791 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
792 #ifndef ARCH_MIN_TASKALIGN
793 #define ARCH_MIN_TASKALIGN 0
795 int align
= max_t(int, L1_CACHE_BYTES
, ARCH_MIN_TASKALIGN
);
796 unsigned long useroffset
, usersize
;
798 /* create a slab on which task_structs can be allocated */
799 task_struct_whitelist(&useroffset
, &usersize
);
800 task_struct_cachep
= kmem_cache_create_usercopy("task_struct",
801 arch_task_struct_size
, align
,
802 SLAB_PANIC
|SLAB_ACCOUNT
,
803 useroffset
, usersize
, NULL
);
806 /* do the arch specific task caches init */
807 arch_task_cache_init();
809 set_max_threads(MAX_THREADS
);
811 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_cur
= max_threads
/2;
812 init_task
.signal
->rlim
[RLIMIT_NPROC
].rlim_max
= max_threads
/2;
813 init_task
.signal
->rlim
[RLIMIT_SIGPENDING
] =
814 init_task
.signal
->rlim
[RLIMIT_NPROC
];
816 for (i
= 0; i
< UCOUNT_COUNTS
; i
++) {
817 init_user_ns
.ucount_max
[i
] = max_threads
/2;
820 #ifdef CONFIG_VMAP_STACK
821 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN
, "fork:vm_stack_cache",
822 NULL
, free_vm_stack_cache
);
825 lockdep_init_task(&init_task
);
829 int __weak
arch_dup_task_struct(struct task_struct
*dst
,
830 struct task_struct
*src
)
836 void set_task_stack_end_magic(struct task_struct
*tsk
)
838 unsigned long *stackend
;
840 stackend
= end_of_stack(tsk
);
841 *stackend
= STACK_END_MAGIC
; /* for overflow detection */
844 static struct task_struct
*dup_task_struct(struct task_struct
*orig
, int node
)
846 struct task_struct
*tsk
;
847 unsigned long *stack
;
848 struct vm_struct
*stack_vm_area __maybe_unused
;
851 if (node
== NUMA_NO_NODE
)
852 node
= tsk_fork_get_node(orig
);
853 tsk
= alloc_task_struct_node(node
);
857 stack
= alloc_thread_stack_node(tsk
, node
);
861 if (memcg_charge_kernel_stack(tsk
))
864 stack_vm_area
= task_stack_vm_area(tsk
);
866 err
= arch_dup_task_struct(tsk
, orig
);
869 * arch_dup_task_struct() clobbers the stack-related fields. Make
870 * sure they're properly initialized before using any stack-related
874 #ifdef CONFIG_VMAP_STACK
875 tsk
->stack_vm_area
= stack_vm_area
;
877 #ifdef CONFIG_THREAD_INFO_IN_TASK
878 refcount_set(&tsk
->stack_refcount
, 1);
884 #ifdef CONFIG_SECCOMP
886 * We must handle setting up seccomp filters once we're under
887 * the sighand lock in case orig has changed between now and
888 * then. Until then, filter must be NULL to avoid messing up
889 * the usage counts on the error path calling free_task.
891 tsk
->seccomp
.filter
= NULL
;
894 setup_thread_stack(tsk
, orig
);
895 clear_user_return_notifier(tsk
);
896 clear_tsk_need_resched(tsk
);
897 set_task_stack_end_magic(tsk
);
899 #ifdef CONFIG_STACKPROTECTOR
900 tsk
->stack_canary
= get_random_canary();
902 if (orig
->cpus_ptr
== &orig
->cpus_mask
)
903 tsk
->cpus_ptr
= &tsk
->cpus_mask
;
906 * One for us, one for whoever does the "release_task()" (usually
909 refcount_set(&tsk
->usage
, 2);
910 #ifdef CONFIG_BLK_DEV_IO_TRACE
913 tsk
->splice_pipe
= NULL
;
914 tsk
->task_frag
.page
= NULL
;
915 tsk
->wake_q
.next
= NULL
;
917 account_kernel_stack(tsk
, 1);
921 #ifdef CONFIG_FAULT_INJECTION
925 #ifdef CONFIG_BLK_CGROUP
926 tsk
->throttle_queue
= NULL
;
927 tsk
->use_memdelay
= 0;
931 tsk
->active_memcg
= NULL
;
936 free_thread_stack(tsk
);
938 free_task_struct(tsk
);
942 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(mmlist_lock
);
944 static unsigned long default_dump_filter
= MMF_DUMP_FILTER_DEFAULT
;
946 static int __init
coredump_filter_setup(char *s
)
948 default_dump_filter
=
949 (simple_strtoul(s
, NULL
, 0) << MMF_DUMP_FILTER_SHIFT
) &
950 MMF_DUMP_FILTER_MASK
;
954 __setup("coredump_filter=", coredump_filter_setup
);
956 #include <linux/init_task.h>
958 static void mm_init_aio(struct mm_struct
*mm
)
961 spin_lock_init(&mm
->ioctx_lock
);
962 mm
->ioctx_table
= NULL
;
966 static __always_inline
void mm_clear_owner(struct mm_struct
*mm
,
967 struct task_struct
*p
)
971 WRITE_ONCE(mm
->owner
, NULL
);
975 static void mm_init_owner(struct mm_struct
*mm
, struct task_struct
*p
)
982 static void mm_init_uprobes_state(struct mm_struct
*mm
)
984 #ifdef CONFIG_UPROBES
985 mm
->uprobes_state
.xol_area
= NULL
;
989 static struct mm_struct
*mm_init(struct mm_struct
*mm
, struct task_struct
*p
,
990 struct user_namespace
*user_ns
)
994 mm
->vmacache_seqnum
= 0;
995 atomic_set(&mm
->mm_users
, 1);
996 atomic_set(&mm
->mm_count
, 1);
997 init_rwsem(&mm
->mmap_sem
);
998 INIT_LIST_HEAD(&mm
->mmlist
);
999 mm
->core_state
= NULL
;
1000 mm_pgtables_bytes_init(mm
);
1003 atomic64_set(&mm
->pinned_vm
, 0);
1004 memset(&mm
->rss_stat
, 0, sizeof(mm
->rss_stat
));
1005 spin_lock_init(&mm
->page_table_lock
);
1006 spin_lock_init(&mm
->arg_lock
);
1007 mm_init_cpumask(mm
);
1009 mm_init_owner(mm
, p
);
1010 RCU_INIT_POINTER(mm
->exe_file
, NULL
);
1011 mmu_notifier_mm_init(mm
);
1012 init_tlb_flush_pending(mm
);
1013 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1014 mm
->pmd_huge_pte
= NULL
;
1016 mm_init_uprobes_state(mm
);
1019 mm
->flags
= current
->mm
->flags
& MMF_INIT_MASK
;
1020 mm
->def_flags
= current
->mm
->def_flags
& VM_INIT_DEF_MASK
;
1022 mm
->flags
= default_dump_filter
;
1026 if (mm_alloc_pgd(mm
))
1029 if (init_new_context(p
, mm
))
1030 goto fail_nocontext
;
1032 mm
->user_ns
= get_user_ns(user_ns
);
1043 * Allocate and initialize an mm_struct.
1045 struct mm_struct
*mm_alloc(void)
1047 struct mm_struct
*mm
;
1053 memset(mm
, 0, sizeof(*mm
));
1054 return mm_init(mm
, current
, current_user_ns());
1057 static inline void __mmput(struct mm_struct
*mm
)
1059 VM_BUG_ON(atomic_read(&mm
->mm_users
));
1061 uprobe_clear_state(mm
);
1064 khugepaged_exit(mm
); /* must run before exit_mmap */
1066 mm_put_huge_zero_page(mm
);
1067 set_mm_exe_file(mm
, NULL
);
1068 if (!list_empty(&mm
->mmlist
)) {
1069 spin_lock(&mmlist_lock
);
1070 list_del(&mm
->mmlist
);
1071 spin_unlock(&mmlist_lock
);
1074 module_put(mm
->binfmt
->module
);
1079 * Decrement the use count and release all resources for an mm.
1081 void mmput(struct mm_struct
*mm
)
1085 if (atomic_dec_and_test(&mm
->mm_users
))
1088 EXPORT_SYMBOL_GPL(mmput
);
1091 static void mmput_async_fn(struct work_struct
*work
)
1093 struct mm_struct
*mm
= container_of(work
, struct mm_struct
,
1099 void mmput_async(struct mm_struct
*mm
)
1101 if (atomic_dec_and_test(&mm
->mm_users
)) {
1102 INIT_WORK(&mm
->async_put_work
, mmput_async_fn
);
1103 schedule_work(&mm
->async_put_work
);
1109 * set_mm_exe_file - change a reference to the mm's executable file
1111 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1113 * Main users are mmput() and sys_execve(). Callers prevent concurrent
1114 * invocations: in mmput() nobody alive left, in execve task is single
1115 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
1116 * mm->exe_file, but does so without using set_mm_exe_file() in order
1117 * to do avoid the need for any locks.
1119 void set_mm_exe_file(struct mm_struct
*mm
, struct file
*new_exe_file
)
1121 struct file
*old_exe_file
;
1124 * It is safe to dereference the exe_file without RCU as
1125 * this function is only called if nobody else can access
1126 * this mm -- see comment above for justification.
1128 old_exe_file
= rcu_dereference_raw(mm
->exe_file
);
1131 get_file(new_exe_file
);
1132 rcu_assign_pointer(mm
->exe_file
, new_exe_file
);
1138 * get_mm_exe_file - acquire a reference to the mm's executable file
1140 * Returns %NULL if mm has no associated executable file.
1141 * User must release file via fput().
1143 struct file
*get_mm_exe_file(struct mm_struct
*mm
)
1145 struct file
*exe_file
;
1148 exe_file
= rcu_dereference(mm
->exe_file
);
1149 if (exe_file
&& !get_file_rcu(exe_file
))
1154 EXPORT_SYMBOL(get_mm_exe_file
);
1157 * get_task_exe_file - acquire a reference to the task's executable file
1159 * Returns %NULL if task's mm (if any) has no associated executable file or
1160 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1161 * User must release file via fput().
1163 struct file
*get_task_exe_file(struct task_struct
*task
)
1165 struct file
*exe_file
= NULL
;
1166 struct mm_struct
*mm
;
1171 if (!(task
->flags
& PF_KTHREAD
))
1172 exe_file
= get_mm_exe_file(mm
);
1177 EXPORT_SYMBOL(get_task_exe_file
);
1180 * get_task_mm - acquire a reference to the task's mm
1182 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1183 * this kernel workthread has transiently adopted a user mm with use_mm,
1184 * to do its AIO) is not set and if so returns a reference to it, after
1185 * bumping up the use count. User must release the mm via mmput()
1186 * after use. Typically used by /proc and ptrace.
1188 struct mm_struct
*get_task_mm(struct task_struct
*task
)
1190 struct mm_struct
*mm
;
1195 if (task
->flags
& PF_KTHREAD
)
1203 EXPORT_SYMBOL_GPL(get_task_mm
);
1205 struct mm_struct
*mm_access(struct task_struct
*task
, unsigned int mode
)
1207 struct mm_struct
*mm
;
1210 err
= mutex_lock_killable(&task
->signal
->cred_guard_mutex
);
1212 return ERR_PTR(err
);
1214 mm
= get_task_mm(task
);
1215 if (mm
&& mm
!= current
->mm
&&
1216 !ptrace_may_access(task
, mode
)) {
1218 mm
= ERR_PTR(-EACCES
);
1220 mutex_unlock(&task
->signal
->cred_guard_mutex
);
1225 static void complete_vfork_done(struct task_struct
*tsk
)
1227 struct completion
*vfork
;
1230 vfork
= tsk
->vfork_done
;
1231 if (likely(vfork
)) {
1232 tsk
->vfork_done
= NULL
;
1238 static int wait_for_vfork_done(struct task_struct
*child
,
1239 struct completion
*vfork
)
1243 freezer_do_not_count();
1244 cgroup_enter_frozen();
1245 killed
= wait_for_completion_killable(vfork
);
1246 cgroup_leave_frozen(false);
1251 child
->vfork_done
= NULL
;
1255 put_task_struct(child
);
1259 /* Please note the differences between mmput and mm_release.
1260 * mmput is called whenever we stop holding onto a mm_struct,
1261 * error success whatever.
1263 * mm_release is called after a mm_struct has been removed
1264 * from the current process.
1266 * This difference is important for error handling, when we
1267 * only half set up a mm_struct for a new process and need to restore
1268 * the old one. Because we mmput the new mm_struct before
1269 * restoring the old one. . .
1270 * Eric Biederman 10 January 1998
1272 void mm_release(struct task_struct
*tsk
, struct mm_struct
*mm
)
1274 /* Get rid of any futexes when releasing the mm */
1276 if (unlikely(tsk
->robust_list
)) {
1277 exit_robust_list(tsk
);
1278 tsk
->robust_list
= NULL
;
1280 #ifdef CONFIG_COMPAT
1281 if (unlikely(tsk
->compat_robust_list
)) {
1282 compat_exit_robust_list(tsk
);
1283 tsk
->compat_robust_list
= NULL
;
1286 if (unlikely(!list_empty(&tsk
->pi_state_list
)))
1287 exit_pi_state_list(tsk
);
1290 uprobe_free_utask(tsk
);
1292 /* Get rid of any cached register state */
1293 deactivate_mm(tsk
, mm
);
1296 * Signal userspace if we're not exiting with a core dump
1297 * because we want to leave the value intact for debugging
1300 if (tsk
->clear_child_tid
) {
1301 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_COREDUMP
) &&
1302 atomic_read(&mm
->mm_users
) > 1) {
1304 * We don't check the error code - if userspace has
1305 * not set up a proper pointer then tough luck.
1307 put_user(0, tsk
->clear_child_tid
);
1308 do_futex(tsk
->clear_child_tid
, FUTEX_WAKE
,
1309 1, NULL
, NULL
, 0, 0);
1311 tsk
->clear_child_tid
= NULL
;
1315 * All done, finally we can wake up parent and return this mm to him.
1316 * Also kthread_stop() uses this completion for synchronization.
1318 if (tsk
->vfork_done
)
1319 complete_vfork_done(tsk
);
1323 * dup_mm() - duplicates an existing mm structure
1324 * @tsk: the task_struct with which the new mm will be associated.
1325 * @oldmm: the mm to duplicate.
1327 * Allocates a new mm structure and duplicates the provided @oldmm structure
1330 * Return: the duplicated mm or NULL on failure.
1332 static struct mm_struct
*dup_mm(struct task_struct
*tsk
,
1333 struct mm_struct
*oldmm
)
1335 struct mm_struct
*mm
;
1342 memcpy(mm
, oldmm
, sizeof(*mm
));
1344 if (!mm_init(mm
, tsk
, mm
->user_ns
))
1347 err
= dup_mmap(mm
, oldmm
);
1351 mm
->hiwater_rss
= get_mm_rss(mm
);
1352 mm
->hiwater_vm
= mm
->total_vm
;
1354 if (mm
->binfmt
&& !try_module_get(mm
->binfmt
->module
))
1360 /* don't put binfmt in mmput, we haven't got module yet */
1362 mm_init_owner(mm
, NULL
);
1369 static int copy_mm(unsigned long clone_flags
, struct task_struct
*tsk
)
1371 struct mm_struct
*mm
, *oldmm
;
1374 tsk
->min_flt
= tsk
->maj_flt
= 0;
1375 tsk
->nvcsw
= tsk
->nivcsw
= 0;
1376 #ifdef CONFIG_DETECT_HUNG_TASK
1377 tsk
->last_switch_count
= tsk
->nvcsw
+ tsk
->nivcsw
;
1378 tsk
->last_switch_time
= 0;
1382 tsk
->active_mm
= NULL
;
1385 * Are we cloning a kernel thread?
1387 * We need to steal a active VM for that..
1389 oldmm
= current
->mm
;
1393 /* initialize the new vmacache entries */
1394 vmacache_flush(tsk
);
1396 if (clone_flags
& CLONE_VM
) {
1403 mm
= dup_mm(tsk
, current
->mm
);
1409 tsk
->active_mm
= mm
;
1416 static int copy_fs(unsigned long clone_flags
, struct task_struct
*tsk
)
1418 struct fs_struct
*fs
= current
->fs
;
1419 if (clone_flags
& CLONE_FS
) {
1420 /* tsk->fs is already what we want */
1421 spin_lock(&fs
->lock
);
1423 spin_unlock(&fs
->lock
);
1427 spin_unlock(&fs
->lock
);
1430 tsk
->fs
= copy_fs_struct(fs
);
1436 static int copy_files(unsigned long clone_flags
, struct task_struct
*tsk
)
1438 struct files_struct
*oldf
, *newf
;
1442 * A background process may not have any files ...
1444 oldf
= current
->files
;
1448 if (clone_flags
& CLONE_FILES
) {
1449 atomic_inc(&oldf
->count
);
1453 newf
= dup_fd(oldf
, &error
);
1463 static int copy_io(unsigned long clone_flags
, struct task_struct
*tsk
)
1466 struct io_context
*ioc
= current
->io_context
;
1467 struct io_context
*new_ioc
;
1472 * Share io context with parent, if CLONE_IO is set
1474 if (clone_flags
& CLONE_IO
) {
1476 tsk
->io_context
= ioc
;
1477 } else if (ioprio_valid(ioc
->ioprio
)) {
1478 new_ioc
= get_task_io_context(tsk
, GFP_KERNEL
, NUMA_NO_NODE
);
1479 if (unlikely(!new_ioc
))
1482 new_ioc
->ioprio
= ioc
->ioprio
;
1483 put_io_context(new_ioc
);
1489 static int copy_sighand(unsigned long clone_flags
, struct task_struct
*tsk
)
1491 struct sighand_struct
*sig
;
1493 if (clone_flags
& CLONE_SIGHAND
) {
1494 refcount_inc(¤t
->sighand
->count
);
1497 sig
= kmem_cache_alloc(sighand_cachep
, GFP_KERNEL
);
1498 rcu_assign_pointer(tsk
->sighand
, sig
);
1502 refcount_set(&sig
->count
, 1);
1503 spin_lock_irq(¤t
->sighand
->siglock
);
1504 memcpy(sig
->action
, current
->sighand
->action
, sizeof(sig
->action
));
1505 spin_unlock_irq(¤t
->sighand
->siglock
);
1509 void __cleanup_sighand(struct sighand_struct
*sighand
)
1511 if (refcount_dec_and_test(&sighand
->count
)) {
1512 signalfd_cleanup(sighand
);
1514 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1515 * without an RCU grace period, see __lock_task_sighand().
1517 kmem_cache_free(sighand_cachep
, sighand
);
1522 * Initialize POSIX timer handling for a thread group.
1524 static void posix_cpu_timers_init_group(struct signal_struct
*sig
)
1526 struct posix_cputimers
*pct
= &sig
->posix_cputimers
;
1527 unsigned long cpu_limit
;
1529 cpu_limit
= READ_ONCE(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1530 posix_cputimers_group_init(pct
, cpu_limit
);
1533 static int copy_signal(unsigned long clone_flags
, struct task_struct
*tsk
)
1535 struct signal_struct
*sig
;
1537 if (clone_flags
& CLONE_THREAD
)
1540 sig
= kmem_cache_zalloc(signal_cachep
, GFP_KERNEL
);
1545 sig
->nr_threads
= 1;
1546 atomic_set(&sig
->live
, 1);
1547 refcount_set(&sig
->sigcnt
, 1);
1549 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1550 sig
->thread_head
= (struct list_head
)LIST_HEAD_INIT(tsk
->thread_node
);
1551 tsk
->thread_node
= (struct list_head
)LIST_HEAD_INIT(sig
->thread_head
);
1553 init_waitqueue_head(&sig
->wait_chldexit
);
1554 sig
->curr_target
= tsk
;
1555 init_sigpending(&sig
->shared_pending
);
1556 INIT_HLIST_HEAD(&sig
->multiprocess
);
1557 seqlock_init(&sig
->stats_lock
);
1558 prev_cputime_init(&sig
->prev_cputime
);
1560 #ifdef CONFIG_POSIX_TIMERS
1561 INIT_LIST_HEAD(&sig
->posix_timers
);
1562 hrtimer_init(&sig
->real_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1563 sig
->real_timer
.function
= it_real_fn
;
1566 task_lock(current
->group_leader
);
1567 memcpy(sig
->rlim
, current
->signal
->rlim
, sizeof sig
->rlim
);
1568 task_unlock(current
->group_leader
);
1570 posix_cpu_timers_init_group(sig
);
1572 tty_audit_fork(sig
);
1573 sched_autogroup_fork(sig
);
1575 sig
->oom_score_adj
= current
->signal
->oom_score_adj
;
1576 sig
->oom_score_adj_min
= current
->signal
->oom_score_adj_min
;
1578 mutex_init(&sig
->cred_guard_mutex
);
1583 static void copy_seccomp(struct task_struct
*p
)
1585 #ifdef CONFIG_SECCOMP
1587 * Must be called with sighand->lock held, which is common to
1588 * all threads in the group. Holding cred_guard_mutex is not
1589 * needed because this new task is not yet running and cannot
1592 assert_spin_locked(¤t
->sighand
->siglock
);
1594 /* Ref-count the new filter user, and assign it. */
1595 get_seccomp_filter(current
);
1596 p
->seccomp
= current
->seccomp
;
1599 * Explicitly enable no_new_privs here in case it got set
1600 * between the task_struct being duplicated and holding the
1601 * sighand lock. The seccomp state and nnp must be in sync.
1603 if (task_no_new_privs(current
))
1604 task_set_no_new_privs(p
);
1607 * If the parent gained a seccomp mode after copying thread
1608 * flags and between before we held the sighand lock, we have
1609 * to manually enable the seccomp thread flag here.
1611 if (p
->seccomp
.mode
!= SECCOMP_MODE_DISABLED
)
1612 set_tsk_thread_flag(p
, TIF_SECCOMP
);
1616 SYSCALL_DEFINE1(set_tid_address
, int __user
*, tidptr
)
1618 current
->clear_child_tid
= tidptr
;
1620 return task_pid_vnr(current
);
1623 static void rt_mutex_init_task(struct task_struct
*p
)
1625 raw_spin_lock_init(&p
->pi_lock
);
1626 #ifdef CONFIG_RT_MUTEXES
1627 p
->pi_waiters
= RB_ROOT_CACHED
;
1628 p
->pi_top_task
= NULL
;
1629 p
->pi_blocked_on
= NULL
;
1633 static inline void init_task_pid_links(struct task_struct
*task
)
1637 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
1638 INIT_HLIST_NODE(&task
->pid_links
[type
]);
1643 init_task_pid(struct task_struct
*task
, enum pid_type type
, struct pid
*pid
)
1645 if (type
== PIDTYPE_PID
)
1646 task
->thread_pid
= pid
;
1648 task
->signal
->pids
[type
] = pid
;
1651 static inline void rcu_copy_process(struct task_struct
*p
)
1653 #ifdef CONFIG_PREEMPT_RCU
1654 p
->rcu_read_lock_nesting
= 0;
1655 p
->rcu_read_unlock_special
.s
= 0;
1656 p
->rcu_blocked_node
= NULL
;
1657 INIT_LIST_HEAD(&p
->rcu_node_entry
);
1658 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1659 #ifdef CONFIG_TASKS_RCU
1660 p
->rcu_tasks_holdout
= false;
1661 INIT_LIST_HEAD(&p
->rcu_tasks_holdout_list
);
1662 p
->rcu_tasks_idle_cpu
= -1;
1663 #endif /* #ifdef CONFIG_TASKS_RCU */
1666 struct pid
*pidfd_pid(const struct file
*file
)
1668 if (file
->f_op
== &pidfd_fops
)
1669 return file
->private_data
;
1671 return ERR_PTR(-EBADF
);
1674 static int pidfd_release(struct inode
*inode
, struct file
*file
)
1676 struct pid
*pid
= file
->private_data
;
1678 file
->private_data
= NULL
;
1683 #ifdef CONFIG_PROC_FS
1684 static void pidfd_show_fdinfo(struct seq_file
*m
, struct file
*f
)
1686 struct pid_namespace
*ns
= proc_pid_ns(file_inode(m
->file
));
1687 struct pid
*pid
= f
->private_data
;
1689 seq_put_decimal_ull(m
, "Pid:\t", pid_nr_ns(pid
, ns
));
1695 * Poll support for process exit notification.
1697 static unsigned int pidfd_poll(struct file
*file
, struct poll_table_struct
*pts
)
1699 struct task_struct
*task
;
1700 struct pid
*pid
= file
->private_data
;
1703 poll_wait(file
, &pid
->wait_pidfd
, pts
);
1706 task
= pid_task(pid
, PIDTYPE_PID
);
1708 * Inform pollers only when the whole thread group exits.
1709 * If the thread group leader exits before all other threads in the
1710 * group, then poll(2) should block, similar to the wait(2) family.
1712 if (!task
|| (task
->exit_state
&& thread_group_empty(task
)))
1713 poll_flags
= POLLIN
| POLLRDNORM
;
1719 const struct file_operations pidfd_fops
= {
1720 .release
= pidfd_release
,
1722 #ifdef CONFIG_PROC_FS
1723 .show_fdinfo
= pidfd_show_fdinfo
,
1727 static void __delayed_free_task(struct rcu_head
*rhp
)
1729 struct task_struct
*tsk
= container_of(rhp
, struct task_struct
, rcu
);
1734 static __always_inline
void delayed_free_task(struct task_struct
*tsk
)
1736 if (IS_ENABLED(CONFIG_MEMCG
))
1737 call_rcu(&tsk
->rcu
, __delayed_free_task
);
1743 * This creates a new process as a copy of the old one,
1744 * but does not actually start it yet.
1746 * It copies the registers, and all the appropriate
1747 * parts of the process environment (as per the clone
1748 * flags). The actual kick-off is left to the caller.
1750 static __latent_entropy
struct task_struct
*copy_process(
1754 struct kernel_clone_args
*args
)
1756 int pidfd
= -1, retval
;
1757 struct task_struct
*p
;
1758 struct multiprocess_signals delayed
;
1759 struct file
*pidfile
= NULL
;
1760 u64 clone_flags
= args
->flags
;
1763 * Don't allow sharing the root directory with processes in a different
1766 if ((clone_flags
& (CLONE_NEWNS
|CLONE_FS
)) == (CLONE_NEWNS
|CLONE_FS
))
1767 return ERR_PTR(-EINVAL
);
1769 if ((clone_flags
& (CLONE_NEWUSER
|CLONE_FS
)) == (CLONE_NEWUSER
|CLONE_FS
))
1770 return ERR_PTR(-EINVAL
);
1773 * Thread groups must share signals as well, and detached threads
1774 * can only be started up within the thread group.
1776 if ((clone_flags
& CLONE_THREAD
) && !(clone_flags
& CLONE_SIGHAND
))
1777 return ERR_PTR(-EINVAL
);
1780 * Shared signal handlers imply shared VM. By way of the above,
1781 * thread groups also imply shared VM. Blocking this case allows
1782 * for various simplifications in other code.
1784 if ((clone_flags
& CLONE_SIGHAND
) && !(clone_flags
& CLONE_VM
))
1785 return ERR_PTR(-EINVAL
);
1788 * Siblings of global init remain as zombies on exit since they are
1789 * not reaped by their parent (swapper). To solve this and to avoid
1790 * multi-rooted process trees, prevent global and container-inits
1791 * from creating siblings.
1793 if ((clone_flags
& CLONE_PARENT
) &&
1794 current
->signal
->flags
& SIGNAL_UNKILLABLE
)
1795 return ERR_PTR(-EINVAL
);
1798 * If the new process will be in a different pid or user namespace
1799 * do not allow it to share a thread group with the forking task.
1801 if (clone_flags
& CLONE_THREAD
) {
1802 if ((clone_flags
& (CLONE_NEWUSER
| CLONE_NEWPID
)) ||
1803 (task_active_pid_ns(current
) !=
1804 current
->nsproxy
->pid_ns_for_children
))
1805 return ERR_PTR(-EINVAL
);
1808 if (clone_flags
& CLONE_PIDFD
) {
1810 * - CLONE_DETACHED is blocked so that we can potentially
1811 * reuse it later for CLONE_PIDFD.
1812 * - CLONE_THREAD is blocked until someone really needs it.
1814 if (clone_flags
& (CLONE_DETACHED
| CLONE_THREAD
))
1815 return ERR_PTR(-EINVAL
);
1819 * Force any signals received before this point to be delivered
1820 * before the fork happens. Collect up signals sent to multiple
1821 * processes that happen during the fork and delay them so that
1822 * they appear to happen after the fork.
1824 sigemptyset(&delayed
.signal
);
1825 INIT_HLIST_NODE(&delayed
.node
);
1827 spin_lock_irq(¤t
->sighand
->siglock
);
1828 if (!(clone_flags
& CLONE_THREAD
))
1829 hlist_add_head(&delayed
.node
, ¤t
->signal
->multiprocess
);
1830 recalc_sigpending();
1831 spin_unlock_irq(¤t
->sighand
->siglock
);
1832 retval
= -ERESTARTNOINTR
;
1833 if (signal_pending(current
))
1837 p
= dup_task_struct(current
, node
);
1842 * This _must_ happen before we call free_task(), i.e. before we jump
1843 * to any of the bad_fork_* labels. This is to avoid freeing
1844 * p->set_child_tid which is (ab)used as a kthread's data pointer for
1845 * kernel threads (PF_KTHREAD).
1847 p
->set_child_tid
= (clone_flags
& CLONE_CHILD_SETTID
) ? args
->child_tid
: NULL
;
1849 * Clear TID on mm_release()?
1851 p
->clear_child_tid
= (clone_flags
& CLONE_CHILD_CLEARTID
) ? args
->child_tid
: NULL
;
1853 ftrace_graph_init_task(p
);
1855 rt_mutex_init_task(p
);
1857 #ifdef CONFIG_PROVE_LOCKING
1858 DEBUG_LOCKS_WARN_ON(!p
->hardirqs_enabled
);
1859 DEBUG_LOCKS_WARN_ON(!p
->softirqs_enabled
);
1862 if (atomic_read(&p
->real_cred
->user
->processes
) >=
1863 task_rlimit(p
, RLIMIT_NPROC
)) {
1864 if (p
->real_cred
->user
!= INIT_USER
&&
1865 !capable(CAP_SYS_RESOURCE
) && !capable(CAP_SYS_ADMIN
))
1868 current
->flags
&= ~PF_NPROC_EXCEEDED
;
1870 retval
= copy_creds(p
, clone_flags
);
1875 * If multiple threads are within copy_process(), then this check
1876 * triggers too late. This doesn't hurt, the check is only there
1877 * to stop root fork bombs.
1880 if (nr_threads
>= max_threads
)
1881 goto bad_fork_cleanup_count
;
1883 delayacct_tsk_init(p
); /* Must remain after dup_task_struct() */
1884 p
->flags
&= ~(PF_SUPERPRIV
| PF_WQ_WORKER
| PF_IDLE
);
1885 p
->flags
|= PF_FORKNOEXEC
;
1886 INIT_LIST_HEAD(&p
->children
);
1887 INIT_LIST_HEAD(&p
->sibling
);
1888 rcu_copy_process(p
);
1889 p
->vfork_done
= NULL
;
1890 spin_lock_init(&p
->alloc_lock
);
1892 init_sigpending(&p
->pending
);
1894 p
->utime
= p
->stime
= p
->gtime
= 0;
1895 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1896 p
->utimescaled
= p
->stimescaled
= 0;
1898 prev_cputime_init(&p
->prev_cputime
);
1900 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1901 seqcount_init(&p
->vtime
.seqcount
);
1902 p
->vtime
.starttime
= 0;
1903 p
->vtime
.state
= VTIME_INACTIVE
;
1906 #if defined(SPLIT_RSS_COUNTING)
1907 memset(&p
->rss_stat
, 0, sizeof(p
->rss_stat
));
1910 p
->default_timer_slack_ns
= current
->timer_slack_ns
;
1916 task_io_accounting_init(&p
->ioac
);
1917 acct_clear_integrals(p
);
1919 posix_cputimers_init(&p
->posix_cputimers
);
1921 p
->io_context
= NULL
;
1922 audit_set_context(p
, NULL
);
1925 p
->mempolicy
= mpol_dup(p
->mempolicy
);
1926 if (IS_ERR(p
->mempolicy
)) {
1927 retval
= PTR_ERR(p
->mempolicy
);
1928 p
->mempolicy
= NULL
;
1929 goto bad_fork_cleanup_threadgroup_lock
;
1932 #ifdef CONFIG_CPUSETS
1933 p
->cpuset_mem_spread_rotor
= NUMA_NO_NODE
;
1934 p
->cpuset_slab_spread_rotor
= NUMA_NO_NODE
;
1935 seqcount_init(&p
->mems_allowed_seq
);
1937 #ifdef CONFIG_TRACE_IRQFLAGS
1939 p
->hardirqs_enabled
= 0;
1940 p
->hardirq_enable_ip
= 0;
1941 p
->hardirq_enable_event
= 0;
1942 p
->hardirq_disable_ip
= _THIS_IP_
;
1943 p
->hardirq_disable_event
= 0;
1944 p
->softirqs_enabled
= 1;
1945 p
->softirq_enable_ip
= _THIS_IP_
;
1946 p
->softirq_enable_event
= 0;
1947 p
->softirq_disable_ip
= 0;
1948 p
->softirq_disable_event
= 0;
1949 p
->hardirq_context
= 0;
1950 p
->softirq_context
= 0;
1953 p
->pagefault_disabled
= 0;
1955 #ifdef CONFIG_LOCKDEP
1956 lockdep_init_task(p
);
1959 #ifdef CONFIG_DEBUG_MUTEXES
1960 p
->blocked_on
= NULL
; /* not blocked yet */
1962 #ifdef CONFIG_BCACHE
1963 p
->sequential_io
= 0;
1964 p
->sequential_io_avg
= 0;
1967 /* Perform scheduler related setup. Assign this task to a CPU. */
1968 retval
= sched_fork(clone_flags
, p
);
1970 goto bad_fork_cleanup_policy
;
1972 retval
= perf_event_init_task(p
);
1974 goto bad_fork_cleanup_policy
;
1975 retval
= audit_alloc(p
);
1977 goto bad_fork_cleanup_perf
;
1978 /* copy all the process information */
1980 retval
= security_task_alloc(p
, clone_flags
);
1982 goto bad_fork_cleanup_audit
;
1983 retval
= copy_semundo(clone_flags
, p
);
1985 goto bad_fork_cleanup_security
;
1986 retval
= copy_files(clone_flags
, p
);
1988 goto bad_fork_cleanup_semundo
;
1989 retval
= copy_fs(clone_flags
, p
);
1991 goto bad_fork_cleanup_files
;
1992 retval
= copy_sighand(clone_flags
, p
);
1994 goto bad_fork_cleanup_fs
;
1995 retval
= copy_signal(clone_flags
, p
);
1997 goto bad_fork_cleanup_sighand
;
1998 retval
= copy_mm(clone_flags
, p
);
2000 goto bad_fork_cleanup_signal
;
2001 retval
= copy_namespaces(clone_flags
, p
);
2003 goto bad_fork_cleanup_mm
;
2004 retval
= copy_io(clone_flags
, p
);
2006 goto bad_fork_cleanup_namespaces
;
2007 retval
= copy_thread_tls(clone_flags
, args
->stack
, args
->stack_size
, p
,
2010 goto bad_fork_cleanup_io
;
2012 stackleak_task_init(p
);
2014 if (pid
!= &init_struct_pid
) {
2015 pid
= alloc_pid(p
->nsproxy
->pid_ns_for_children
);
2017 retval
= PTR_ERR(pid
);
2018 goto bad_fork_cleanup_thread
;
2023 * This has to happen after we've potentially unshared the file
2024 * descriptor table (so that the pidfd doesn't leak into the child
2025 * if the fd table isn't shared).
2027 if (clone_flags
& CLONE_PIDFD
) {
2028 retval
= get_unused_fd_flags(O_RDWR
| O_CLOEXEC
);
2030 goto bad_fork_free_pid
;
2034 pidfile
= anon_inode_getfile("[pidfd]", &pidfd_fops
, pid
,
2035 O_RDWR
| O_CLOEXEC
);
2036 if (IS_ERR(pidfile
)) {
2037 put_unused_fd(pidfd
);
2038 retval
= PTR_ERR(pidfile
);
2039 goto bad_fork_free_pid
;
2041 get_pid(pid
); /* held by pidfile now */
2043 retval
= put_user(pidfd
, args
->pidfd
);
2045 goto bad_fork_put_pidfd
;
2052 p
->robust_list
= NULL
;
2053 #ifdef CONFIG_COMPAT
2054 p
->compat_robust_list
= NULL
;
2056 INIT_LIST_HEAD(&p
->pi_state_list
);
2057 p
->pi_state_cache
= NULL
;
2060 * sigaltstack should be cleared when sharing the same VM
2062 if ((clone_flags
& (CLONE_VM
|CLONE_VFORK
)) == CLONE_VM
)
2066 * Syscall tracing and stepping should be turned off in the
2067 * child regardless of CLONE_PTRACE.
2069 user_disable_single_step(p
);
2070 clear_tsk_thread_flag(p
, TIF_SYSCALL_TRACE
);
2071 #ifdef TIF_SYSCALL_EMU
2072 clear_tsk_thread_flag(p
, TIF_SYSCALL_EMU
);
2074 clear_tsk_latency_tracing(p
);
2076 /* ok, now we should be set up.. */
2077 p
->pid
= pid_nr(pid
);
2078 if (clone_flags
& CLONE_THREAD
) {
2079 p
->exit_signal
= -1;
2080 p
->group_leader
= current
->group_leader
;
2081 p
->tgid
= current
->tgid
;
2083 if (clone_flags
& CLONE_PARENT
)
2084 p
->exit_signal
= current
->group_leader
->exit_signal
;
2086 p
->exit_signal
= args
->exit_signal
;
2087 p
->group_leader
= p
;
2092 p
->nr_dirtied_pause
= 128 >> (PAGE_SHIFT
- 10);
2093 p
->dirty_paused_when
= 0;
2095 p
->pdeath_signal
= 0;
2096 INIT_LIST_HEAD(&p
->thread_group
);
2097 p
->task_works
= NULL
;
2099 cgroup_threadgroup_change_begin(current
);
2101 * Ensure that the cgroup subsystem policies allow the new process to be
2102 * forked. It should be noted the the new process's css_set can be changed
2103 * between here and cgroup_post_fork() if an organisation operation is in
2106 retval
= cgroup_can_fork(p
);
2108 goto bad_fork_cgroup_threadgroup_change_end
;
2111 * From this point on we must avoid any synchronous user-space
2112 * communication until we take the tasklist-lock. In particular, we do
2113 * not want user-space to be able to predict the process start-time by
2114 * stalling fork(2) after we recorded the start_time but before it is
2115 * visible to the system.
2118 p
->start_time
= ktime_get_ns();
2119 p
->real_start_time
= ktime_get_boottime_ns();
2122 * Make it visible to the rest of the system, but dont wake it up yet.
2123 * Need tasklist lock for parent etc handling!
2125 write_lock_irq(&tasklist_lock
);
2127 /* CLONE_PARENT re-uses the old parent */
2128 if (clone_flags
& (CLONE_PARENT
|CLONE_THREAD
)) {
2129 p
->real_parent
= current
->real_parent
;
2130 p
->parent_exec_id
= current
->parent_exec_id
;
2132 p
->real_parent
= current
;
2133 p
->parent_exec_id
= current
->self_exec_id
;
2136 klp_copy_process(p
);
2138 spin_lock(¤t
->sighand
->siglock
);
2141 * Copy seccomp details explicitly here, in case they were changed
2142 * before holding sighand lock.
2146 rseq_fork(p
, clone_flags
);
2148 /* Don't start children in a dying pid namespace */
2149 if (unlikely(!(ns_of_pid(pid
)->pid_allocated
& PIDNS_ADDING
))) {
2151 goto bad_fork_cancel_cgroup
;
2154 /* Let kill terminate clone/fork in the middle */
2155 if (fatal_signal_pending(current
)) {
2157 goto bad_fork_cancel_cgroup
;
2160 /* past the last point of failure */
2162 fd_install(pidfd
, pidfile
);
2164 init_task_pid_links(p
);
2165 if (likely(p
->pid
)) {
2166 ptrace_init_task(p
, (clone_flags
& CLONE_PTRACE
) || trace
);
2168 init_task_pid(p
, PIDTYPE_PID
, pid
);
2169 if (thread_group_leader(p
)) {
2170 init_task_pid(p
, PIDTYPE_TGID
, pid
);
2171 init_task_pid(p
, PIDTYPE_PGID
, task_pgrp(current
));
2172 init_task_pid(p
, PIDTYPE_SID
, task_session(current
));
2174 if (is_child_reaper(pid
)) {
2175 ns_of_pid(pid
)->child_reaper
= p
;
2176 p
->signal
->flags
|= SIGNAL_UNKILLABLE
;
2178 p
->signal
->shared_pending
.signal
= delayed
.signal
;
2179 p
->signal
->tty
= tty_kref_get(current
->signal
->tty
);
2181 * Inherit has_child_subreaper flag under the same
2182 * tasklist_lock with adding child to the process tree
2183 * for propagate_has_child_subreaper optimization.
2185 p
->signal
->has_child_subreaper
= p
->real_parent
->signal
->has_child_subreaper
||
2186 p
->real_parent
->signal
->is_child_subreaper
;
2187 list_add_tail(&p
->sibling
, &p
->real_parent
->children
);
2188 list_add_tail_rcu(&p
->tasks
, &init_task
.tasks
);
2189 attach_pid(p
, PIDTYPE_TGID
);
2190 attach_pid(p
, PIDTYPE_PGID
);
2191 attach_pid(p
, PIDTYPE_SID
);
2192 __this_cpu_inc(process_counts
);
2194 current
->signal
->nr_threads
++;
2195 atomic_inc(¤t
->signal
->live
);
2196 refcount_inc(¤t
->signal
->sigcnt
);
2197 task_join_group_stop(p
);
2198 list_add_tail_rcu(&p
->thread_group
,
2199 &p
->group_leader
->thread_group
);
2200 list_add_tail_rcu(&p
->thread_node
,
2201 &p
->signal
->thread_head
);
2203 attach_pid(p
, PIDTYPE_PID
);
2207 hlist_del_init(&delayed
.node
);
2208 spin_unlock(¤t
->sighand
->siglock
);
2209 syscall_tracepoint_update(p
);
2210 write_unlock_irq(&tasklist_lock
);
2212 proc_fork_connector(p
);
2213 cgroup_post_fork(p
);
2214 cgroup_threadgroup_change_end(current
);
2217 trace_task_newtask(p
, clone_flags
);
2218 uprobe_copy_process(p
, clone_flags
);
2222 bad_fork_cancel_cgroup
:
2223 spin_unlock(¤t
->sighand
->siglock
);
2224 write_unlock_irq(&tasklist_lock
);
2225 cgroup_cancel_fork(p
);
2226 bad_fork_cgroup_threadgroup_change_end
:
2227 cgroup_threadgroup_change_end(current
);
2229 if (clone_flags
& CLONE_PIDFD
) {
2231 put_unused_fd(pidfd
);
2234 if (pid
!= &init_struct_pid
)
2236 bad_fork_cleanup_thread
:
2238 bad_fork_cleanup_io
:
2241 bad_fork_cleanup_namespaces
:
2242 exit_task_namespaces(p
);
2243 bad_fork_cleanup_mm
:
2245 mm_clear_owner(p
->mm
, p
);
2248 bad_fork_cleanup_signal
:
2249 if (!(clone_flags
& CLONE_THREAD
))
2250 free_signal_struct(p
->signal
);
2251 bad_fork_cleanup_sighand
:
2252 __cleanup_sighand(p
->sighand
);
2253 bad_fork_cleanup_fs
:
2254 exit_fs(p
); /* blocking */
2255 bad_fork_cleanup_files
:
2256 exit_files(p
); /* blocking */
2257 bad_fork_cleanup_semundo
:
2259 bad_fork_cleanup_security
:
2260 security_task_free(p
);
2261 bad_fork_cleanup_audit
:
2263 bad_fork_cleanup_perf
:
2264 perf_event_free_task(p
);
2265 bad_fork_cleanup_policy
:
2266 lockdep_free_task(p
);
2268 mpol_put(p
->mempolicy
);
2269 bad_fork_cleanup_threadgroup_lock
:
2271 delayacct_tsk_free(p
);
2272 bad_fork_cleanup_count
:
2273 atomic_dec(&p
->cred
->user
->processes
);
2276 p
->state
= TASK_DEAD
;
2278 delayed_free_task(p
);
2280 spin_lock_irq(¤t
->sighand
->siglock
);
2281 hlist_del_init(&delayed
.node
);
2282 spin_unlock_irq(¤t
->sighand
->siglock
);
2283 return ERR_PTR(retval
);
2286 static inline void init_idle_pids(struct task_struct
*idle
)
2290 for (type
= PIDTYPE_PID
; type
< PIDTYPE_MAX
; ++type
) {
2291 INIT_HLIST_NODE(&idle
->pid_links
[type
]); /* not really needed */
2292 init_task_pid(idle
, type
, &init_struct_pid
);
2296 struct task_struct
*fork_idle(int cpu
)
2298 struct task_struct
*task
;
2299 struct kernel_clone_args args
= {
2303 task
= copy_process(&init_struct_pid
, 0, cpu_to_node(cpu
), &args
);
2304 if (!IS_ERR(task
)) {
2305 init_idle_pids(task
);
2306 init_idle(task
, cpu
);
2312 struct mm_struct
*copy_init_mm(void)
2314 return dup_mm(NULL
, &init_mm
);
2318 * Ok, this is the main fork-routine.
2320 * It copies the process, and if successful kick-starts
2321 * it and waits for it to finish using the VM if required.
2323 * args->exit_signal is expected to be checked for sanity by the caller.
2325 long _do_fork(struct kernel_clone_args
*args
)
2327 u64 clone_flags
= args
->flags
;
2328 struct completion vfork
;
2330 struct task_struct
*p
;
2335 * Determine whether and which event to report to ptracer. When
2336 * called from kernel_thread or CLONE_UNTRACED is explicitly
2337 * requested, no event is reported; otherwise, report if the event
2338 * for the type of forking is enabled.
2340 if (!(clone_flags
& CLONE_UNTRACED
)) {
2341 if (clone_flags
& CLONE_VFORK
)
2342 trace
= PTRACE_EVENT_VFORK
;
2343 else if (args
->exit_signal
!= SIGCHLD
)
2344 trace
= PTRACE_EVENT_CLONE
;
2346 trace
= PTRACE_EVENT_FORK
;
2348 if (likely(!ptrace_event_enabled(current
, trace
)))
2352 p
= copy_process(NULL
, trace
, NUMA_NO_NODE
, args
);
2353 add_latent_entropy();
2359 * Do this prior waking up the new thread - the thread pointer
2360 * might get invalid after that point, if the thread exits quickly.
2362 trace_sched_process_fork(current
, p
);
2364 pid
= get_task_pid(p
, PIDTYPE_PID
);
2367 if (clone_flags
& CLONE_PARENT_SETTID
)
2368 put_user(nr
, args
->parent_tid
);
2370 if (clone_flags
& CLONE_VFORK
) {
2371 p
->vfork_done
= &vfork
;
2372 init_completion(&vfork
);
2376 wake_up_new_task(p
);
2378 /* forking complete and child started to run, tell ptracer */
2379 if (unlikely(trace
))
2380 ptrace_event_pid(trace
, pid
);
2382 if (clone_flags
& CLONE_VFORK
) {
2383 if (!wait_for_vfork_done(p
, &vfork
))
2384 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE
, pid
);
2391 bool legacy_clone_args_valid(const struct kernel_clone_args
*kargs
)
2393 /* clone(CLONE_PIDFD) uses parent_tidptr to return a pidfd */
2394 if ((kargs
->flags
& CLONE_PIDFD
) &&
2395 (kargs
->flags
& CLONE_PARENT_SETTID
))
2401 #ifndef CONFIG_HAVE_COPY_THREAD_TLS
2402 /* For compatibility with architectures that call do_fork directly rather than
2403 * using the syscall entry points below. */
2404 long do_fork(unsigned long clone_flags
,
2405 unsigned long stack_start
,
2406 unsigned long stack_size
,
2407 int __user
*parent_tidptr
,
2408 int __user
*child_tidptr
)
2410 struct kernel_clone_args args
= {
2411 .flags
= (clone_flags
& ~CSIGNAL
),
2412 .pidfd
= parent_tidptr
,
2413 .child_tid
= child_tidptr
,
2414 .parent_tid
= parent_tidptr
,
2415 .exit_signal
= (clone_flags
& CSIGNAL
),
2416 .stack
= stack_start
,
2417 .stack_size
= stack_size
,
2420 if (!legacy_clone_args_valid(&args
))
2423 return _do_fork(&args
);
2428 * Create a kernel thread.
2430 pid_t
kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
2432 struct kernel_clone_args args
= {
2433 .flags
= ((flags
| CLONE_VM
| CLONE_UNTRACED
) & ~CSIGNAL
),
2434 .exit_signal
= (flags
& CSIGNAL
),
2435 .stack
= (unsigned long)fn
,
2436 .stack_size
= (unsigned long)arg
,
2439 return _do_fork(&args
);
2442 #ifdef __ARCH_WANT_SYS_FORK
2443 SYSCALL_DEFINE0(fork
)
2446 struct kernel_clone_args args
= {
2447 .exit_signal
= SIGCHLD
,
2450 return _do_fork(&args
);
2452 /* can not support in nommu mode */
2458 #ifdef __ARCH_WANT_SYS_VFORK
2459 SYSCALL_DEFINE0(vfork
)
2461 struct kernel_clone_args args
= {
2462 .flags
= CLONE_VFORK
| CLONE_VM
,
2463 .exit_signal
= SIGCHLD
,
2466 return _do_fork(&args
);
2470 #ifdef __ARCH_WANT_SYS_CLONE
2471 #ifdef CONFIG_CLONE_BACKWARDS
2472 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2473 int __user
*, parent_tidptr
,
2475 int __user
*, child_tidptr
)
2476 #elif defined(CONFIG_CLONE_BACKWARDS2)
2477 SYSCALL_DEFINE5(clone
, unsigned long, newsp
, unsigned long, clone_flags
,
2478 int __user
*, parent_tidptr
,
2479 int __user
*, child_tidptr
,
2481 #elif defined(CONFIG_CLONE_BACKWARDS3)
2482 SYSCALL_DEFINE6(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2484 int __user
*, parent_tidptr
,
2485 int __user
*, child_tidptr
,
2488 SYSCALL_DEFINE5(clone
, unsigned long, clone_flags
, unsigned long, newsp
,
2489 int __user
*, parent_tidptr
,
2490 int __user
*, child_tidptr
,
2494 struct kernel_clone_args args
= {
2495 .flags
= (clone_flags
& ~CSIGNAL
),
2496 .pidfd
= parent_tidptr
,
2497 .child_tid
= child_tidptr
,
2498 .parent_tid
= parent_tidptr
,
2499 .exit_signal
= (clone_flags
& CSIGNAL
),
2504 if (!legacy_clone_args_valid(&args
))
2507 return _do_fork(&args
);
2511 #ifdef __ARCH_WANT_SYS_CLONE3
2512 noinline
static int copy_clone_args_from_user(struct kernel_clone_args
*kargs
,
2513 struct clone_args __user
*uargs
,
2516 struct clone_args args
;
2518 if (unlikely(size
> PAGE_SIZE
))
2521 if (unlikely(size
< sizeof(struct clone_args
)))
2524 if (unlikely(!access_ok(uargs
, size
)))
2527 if (size
> sizeof(struct clone_args
)) {
2528 unsigned char __user
*addr
;
2529 unsigned char __user
*end
;
2532 addr
= (void __user
*)uargs
+ sizeof(struct clone_args
);
2533 end
= (void __user
*)uargs
+ size
;
2535 for (; addr
< end
; addr
++) {
2536 if (get_user(val
, addr
))
2542 size
= sizeof(struct clone_args
);
2545 if (copy_from_user(&args
, uargs
, size
))
2549 * Verify that higher 32bits of exit_signal are unset and that
2550 * it is a valid signal
2552 if (unlikely((args
.exit_signal
& ~((u64
)CSIGNAL
)) ||
2553 !valid_signal(args
.exit_signal
)))
2556 *kargs
= (struct kernel_clone_args
){
2557 .flags
= args
.flags
,
2558 .pidfd
= u64_to_user_ptr(args
.pidfd
),
2559 .child_tid
= u64_to_user_ptr(args
.child_tid
),
2560 .parent_tid
= u64_to_user_ptr(args
.parent_tid
),
2561 .exit_signal
= args
.exit_signal
,
2562 .stack
= args
.stack
,
2563 .stack_size
= args
.stack_size
,
2570 static bool clone3_args_valid(const struct kernel_clone_args
*kargs
)
2573 * All lower bits of the flag word are taken.
2574 * Verify that no other unknown flags are passed along.
2576 if (kargs
->flags
& ~CLONE_LEGACY_FLAGS
)
2580 * - make the CLONE_DETACHED bit reuseable for clone3
2581 * - make the CSIGNAL bits reuseable for clone3
2583 if (kargs
->flags
& (CLONE_DETACHED
| CSIGNAL
))
2586 if ((kargs
->flags
& (CLONE_THREAD
| CLONE_PARENT
)) &&
2593 SYSCALL_DEFINE2(clone3
, struct clone_args __user
*, uargs
, size_t, size
)
2597 struct kernel_clone_args kargs
;
2599 err
= copy_clone_args_from_user(&kargs
, uargs
, size
);
2603 if (!clone3_args_valid(&kargs
))
2606 return _do_fork(&kargs
);
2610 void walk_process_tree(struct task_struct
*top
, proc_visitor visitor
, void *data
)
2612 struct task_struct
*leader
, *parent
, *child
;
2615 read_lock(&tasklist_lock
);
2616 leader
= top
= top
->group_leader
;
2618 for_each_thread(leader
, parent
) {
2619 list_for_each_entry(child
, &parent
->children
, sibling
) {
2620 res
= visitor(child
, data
);
2632 if (leader
!= top
) {
2634 parent
= child
->real_parent
;
2635 leader
= parent
->group_leader
;
2639 read_unlock(&tasklist_lock
);
2642 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
2643 #define ARCH_MIN_MMSTRUCT_ALIGN 0
2646 static void sighand_ctor(void *data
)
2648 struct sighand_struct
*sighand
= data
;
2650 spin_lock_init(&sighand
->siglock
);
2651 init_waitqueue_head(&sighand
->signalfd_wqh
);
2654 void __init
proc_caches_init(void)
2656 unsigned int mm_size
;
2658 sighand_cachep
= kmem_cache_create("sighand_cache",
2659 sizeof(struct sighand_struct
), 0,
2660 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_TYPESAFE_BY_RCU
|
2661 SLAB_ACCOUNT
, sighand_ctor
);
2662 signal_cachep
= kmem_cache_create("signal_cache",
2663 sizeof(struct signal_struct
), 0,
2664 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2666 files_cachep
= kmem_cache_create("files_cache",
2667 sizeof(struct files_struct
), 0,
2668 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2670 fs_cachep
= kmem_cache_create("fs_cache",
2671 sizeof(struct fs_struct
), 0,
2672 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2676 * The mm_cpumask is located at the end of mm_struct, and is
2677 * dynamically sized based on the maximum CPU number this system
2678 * can have, taking hotplug into account (nr_cpu_ids).
2680 mm_size
= sizeof(struct mm_struct
) + cpumask_size();
2682 mm_cachep
= kmem_cache_create_usercopy("mm_struct",
2683 mm_size
, ARCH_MIN_MMSTRUCT_ALIGN
,
2684 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|SLAB_ACCOUNT
,
2685 offsetof(struct mm_struct
, saved_auxv
),
2686 sizeof_field(struct mm_struct
, saved_auxv
),
2688 vm_area_cachep
= KMEM_CACHE(vm_area_struct
, SLAB_PANIC
|SLAB_ACCOUNT
);
2690 nsproxy_cache_init();
2694 * Check constraints on flags passed to the unshare system call.
2696 static int check_unshare_flags(unsigned long unshare_flags
)
2698 if (unshare_flags
& ~(CLONE_THREAD
|CLONE_FS
|CLONE_NEWNS
|CLONE_SIGHAND
|
2699 CLONE_VM
|CLONE_FILES
|CLONE_SYSVSEM
|
2700 CLONE_NEWUTS
|CLONE_NEWIPC
|CLONE_NEWNET
|
2701 CLONE_NEWUSER
|CLONE_NEWPID
|CLONE_NEWCGROUP
))
2704 * Not implemented, but pretend it works if there is nothing
2705 * to unshare. Note that unsharing the address space or the
2706 * signal handlers also need to unshare the signal queues (aka
2709 if (unshare_flags
& (CLONE_THREAD
| CLONE_SIGHAND
| CLONE_VM
)) {
2710 if (!thread_group_empty(current
))
2713 if (unshare_flags
& (CLONE_SIGHAND
| CLONE_VM
)) {
2714 if (refcount_read(¤t
->sighand
->count
) > 1)
2717 if (unshare_flags
& CLONE_VM
) {
2718 if (!current_is_single_threaded())
2726 * Unshare the filesystem structure if it is being shared
2728 static int unshare_fs(unsigned long unshare_flags
, struct fs_struct
**new_fsp
)
2730 struct fs_struct
*fs
= current
->fs
;
2732 if (!(unshare_flags
& CLONE_FS
) || !fs
)
2735 /* don't need lock here; in the worst case we'll do useless copy */
2739 *new_fsp
= copy_fs_struct(fs
);
2747 * Unshare file descriptor table if it is being shared
2749 static int unshare_fd(unsigned long unshare_flags
, struct files_struct
**new_fdp
)
2751 struct files_struct
*fd
= current
->files
;
2754 if ((unshare_flags
& CLONE_FILES
) &&
2755 (fd
&& atomic_read(&fd
->count
) > 1)) {
2756 *new_fdp
= dup_fd(fd
, &error
);
2765 * unshare allows a process to 'unshare' part of the process
2766 * context which was originally shared using clone. copy_*
2767 * functions used by do_fork() cannot be used here directly
2768 * because they modify an inactive task_struct that is being
2769 * constructed. Here we are modifying the current, active,
2772 int ksys_unshare(unsigned long unshare_flags
)
2774 struct fs_struct
*fs
, *new_fs
= NULL
;
2775 struct files_struct
*fd
, *new_fd
= NULL
;
2776 struct cred
*new_cred
= NULL
;
2777 struct nsproxy
*new_nsproxy
= NULL
;
2782 * If unsharing a user namespace must also unshare the thread group
2783 * and unshare the filesystem root and working directories.
2785 if (unshare_flags
& CLONE_NEWUSER
)
2786 unshare_flags
|= CLONE_THREAD
| CLONE_FS
;
2788 * If unsharing vm, must also unshare signal handlers.
2790 if (unshare_flags
& CLONE_VM
)
2791 unshare_flags
|= CLONE_SIGHAND
;
2793 * If unsharing a signal handlers, must also unshare the signal queues.
2795 if (unshare_flags
& CLONE_SIGHAND
)
2796 unshare_flags
|= CLONE_THREAD
;
2798 * If unsharing namespace, must also unshare filesystem information.
2800 if (unshare_flags
& CLONE_NEWNS
)
2801 unshare_flags
|= CLONE_FS
;
2803 err
= check_unshare_flags(unshare_flags
);
2805 goto bad_unshare_out
;
2807 * CLONE_NEWIPC must also detach from the undolist: after switching
2808 * to a new ipc namespace, the semaphore arrays from the old
2809 * namespace are unreachable.
2811 if (unshare_flags
& (CLONE_NEWIPC
|CLONE_SYSVSEM
))
2813 err
= unshare_fs(unshare_flags
, &new_fs
);
2815 goto bad_unshare_out
;
2816 err
= unshare_fd(unshare_flags
, &new_fd
);
2818 goto bad_unshare_cleanup_fs
;
2819 err
= unshare_userns(unshare_flags
, &new_cred
);
2821 goto bad_unshare_cleanup_fd
;
2822 err
= unshare_nsproxy_namespaces(unshare_flags
, &new_nsproxy
,
2825 goto bad_unshare_cleanup_cred
;
2827 if (new_fs
|| new_fd
|| do_sysvsem
|| new_cred
|| new_nsproxy
) {
2830 * CLONE_SYSVSEM is equivalent to sys_exit().
2834 if (unshare_flags
& CLONE_NEWIPC
) {
2835 /* Orphan segments in old ns (see sem above). */
2837 shm_init_task(current
);
2841 switch_task_namespaces(current
, new_nsproxy
);
2847 spin_lock(&fs
->lock
);
2848 current
->fs
= new_fs
;
2853 spin_unlock(&fs
->lock
);
2857 fd
= current
->files
;
2858 current
->files
= new_fd
;
2862 task_unlock(current
);
2865 /* Install the new user namespace */
2866 commit_creds(new_cred
);
2871 perf_event_namespaces(current
);
2873 bad_unshare_cleanup_cred
:
2876 bad_unshare_cleanup_fd
:
2878 put_files_struct(new_fd
);
2880 bad_unshare_cleanup_fs
:
2882 free_fs_struct(new_fs
);
2888 SYSCALL_DEFINE1(unshare
, unsigned long, unshare_flags
)
2890 return ksys_unshare(unshare_flags
);
2894 * Helper to unshare the files of the current task.
2895 * We don't want to expose copy_files internals to
2896 * the exec layer of the kernel.
2899 int unshare_files(struct files_struct
**displaced
)
2901 struct task_struct
*task
= current
;
2902 struct files_struct
*copy
= NULL
;
2905 error
= unshare_fd(CLONE_FILES
, ©
);
2906 if (error
|| !copy
) {
2910 *displaced
= task
->files
;
2917 int sysctl_max_threads(struct ctl_table
*table
, int write
,
2918 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2922 int threads
= max_threads
;
2923 int min
= MIN_THREADS
;
2924 int max
= MAX_THREADS
;
2931 ret
= proc_dointvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
2935 set_max_threads(threads
);