]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
29930025 IM |
2 | #ifndef _LINUX_SCHED_TASK_H |
3 | #define _LINUX_SCHED_TASK_H | |
4 | ||
901b14bd IM |
5 | /* |
6 | * Interface between the scheduler and various task lifetime (fork()/exit()) | |
7 | * functionality: | |
8 | */ | |
9 | ||
29930025 | 10 | #include <linux/sched.h> |
7f192e3c | 11 | #include <linux/uaccess.h> |
29930025 | 12 | |
cdc75e9f | 13 | struct task_struct; |
92ebce5a | 14 | struct rusage; |
cdc75e9f IM |
15 | union thread_union; |
16 | ||
7f192e3c CB |
17 | /* All the bits taken by the old clone syscall. */ |
18 | #define CLONE_LEGACY_FLAGS 0xffffffffULL | |
19 | ||
20 | struct kernel_clone_args { | |
21 | u64 flags; | |
22 | int __user *pidfd; | |
23 | int __user *child_tid; | |
24 | int __user *parent_tid; | |
25 | int exit_signal; | |
26 | unsigned long stack; | |
27 | unsigned long stack_size; | |
28 | unsigned long tls; | |
49cb2fc4 AR |
29 | pid_t *set_tid; |
30 | /* Number of elements in *set_tid */ | |
31 | size_t set_tid_size; | |
7f192e3c CB |
32 | }; |
33 | ||
901b14bd IM |
34 | /* |
35 | * This serializes "schedule()" and also protects | |
36 | * the run-queue from deletions/modifications (but | |
37 | * _adding_ to the beginning of the run-queue has | |
38 | * a separate lock). | |
39 | */ | |
40 | extern rwlock_t tasklist_lock; | |
41 | extern spinlock_t mmlist_lock; | |
42 | ||
cdc75e9f IM |
43 | extern union thread_union init_thread_union; |
44 | extern struct task_struct init_task; | |
45 | ||
901b14bd IM |
46 | #ifdef CONFIG_PROVE_RCU |
47 | extern int lockdep_tasklist_lock_is_held(void); | |
48 | #endif /* #ifdef CONFIG_PROVE_RCU */ | |
49 | ||
50 | extern asmlinkage void schedule_tail(struct task_struct *prev); | |
51 | extern void init_idle(struct task_struct *idle, int cpu); | |
901b14bd | 52 | |
901b14bd IM |
53 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); |
54 | extern void sched_dead(struct task_struct *p); | |
55 | ||
56 | void __noreturn do_task_dead(void); | |
57 | ||
58 | extern void proc_caches_init(void); | |
59 | ||
fb5bf317 YW |
60 | extern void fork_init(void); |
61 | ||
901b14bd IM |
62 | extern void release_task(struct task_struct * p); |
63 | ||
64 | #ifdef CONFIG_HAVE_COPY_THREAD_TLS | |
65 | extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, | |
66 | struct task_struct *, unsigned long); | |
67 | #else | |
68 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | |
69 | struct task_struct *); | |
70 | ||
71 | /* Architectures that haven't opted into copy_thread_tls get the tls argument | |
72 | * via pt_regs, so ignore the tls argument passed via C. */ | |
73 | static inline int copy_thread_tls( | |
74 | unsigned long clone_flags, unsigned long sp, unsigned long arg, | |
75 | struct task_struct *p, unsigned long tls) | |
76 | { | |
77 | return copy_thread(clone_flags, sp, arg, p); | |
78 | } | |
79 | #endif | |
80 | extern void flush_thread(void); | |
81 | ||
82 | #ifdef CONFIG_HAVE_EXIT_THREAD | |
83 | extern void exit_thread(struct task_struct *tsk); | |
84 | #else | |
85 | static inline void exit_thread(struct task_struct *tsk) | |
86 | { | |
87 | } | |
88 | #endif | |
89 | extern void do_group_exit(int); | |
90 | ||
42011db0 IM |
91 | extern void exit_files(struct task_struct *); |
92 | extern void exit_itimers(struct signal_struct *); | |
93 | ||
7f192e3c | 94 | extern long _do_fork(struct kernel_clone_args *kargs); |
028b6e8a | 95 | extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); |
901b14bd IM |
96 | extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
97 | struct task_struct *fork_idle(int); | |
13585fa0 | 98 | struct mm_struct *copy_init_mm(void); |
901b14bd | 99 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
f88a333b | 100 | extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); |
901b14bd | 101 | |
cda66725 IM |
102 | extern void free_task(struct task_struct *tsk); |
103 | ||
6f175fc9 IM |
104 | /* sched_exec is called by processes performing an exec */ |
105 | #ifdef CONFIG_SMP | |
106 | extern void sched_exec(void); | |
107 | #else | |
108 | #define sched_exec() {} | |
109 | #endif | |
110 | ||
7b3c92b8 MWO |
111 | static inline struct task_struct *get_task_struct(struct task_struct *t) |
112 | { | |
113 | refcount_inc(&t->usage); | |
114 | return t; | |
115 | } | |
cda66725 IM |
116 | |
117 | extern void __put_task_struct(struct task_struct *t); | |
118 | ||
119 | static inline void put_task_struct(struct task_struct *t) | |
120 | { | |
ec1d2819 | 121 | if (refcount_dec_and_test(&t->usage)) |
cda66725 IM |
122 | __put_task_struct(t); |
123 | } | |
124 | ||
3fbd7ee2 | 125 | void put_task_struct_rcu_user(struct task_struct *task); |
901b14bd IM |
126 | |
127 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT | |
128 | extern int arch_task_struct_size __read_mostly; | |
129 | #else | |
130 | # define arch_task_struct_size (sizeof(struct task_struct)) | |
131 | #endif | |
132 | ||
5905429a KC |
133 | #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST |
134 | /* | |
135 | * If an architecture has not declared a thread_struct whitelist we | |
136 | * must assume something there may need to be copied to userspace. | |
137 | */ | |
138 | static inline void arch_thread_struct_whitelist(unsigned long *offset, | |
139 | unsigned long *size) | |
140 | { | |
141 | *offset = 0; | |
142 | /* Handle dynamically sized thread_struct. */ | |
143 | *size = arch_task_struct_size - offsetof(struct task_struct, thread); | |
144 | } | |
145 | #endif | |
146 | ||
901b14bd IM |
147 | #ifdef CONFIG_VMAP_STACK |
148 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | |
149 | { | |
150 | return t->stack_vm_area; | |
151 | } | |
152 | #else | |
153 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | |
154 | { | |
155 | return NULL; | |
156 | } | |
157 | #endif | |
158 | ||
56cd6973 IM |
159 | /* |
160 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | |
161 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | |
162 | * pins the final release of task.io_context. Also protects ->cpuset and | |
163 | * ->cgroup.subsys[]. And ->vfork_done. | |
164 | * | |
165 | * Nests both inside and outside of read_lock(&tasklist_lock). | |
166 | * It must not be nested with write_lock_irq(&tasklist_lock), | |
167 | * neither inside nor outside. | |
168 | */ | |
169 | static inline void task_lock(struct task_struct *p) | |
170 | { | |
171 | spin_lock(&p->alloc_lock); | |
172 | } | |
173 | ||
174 | static inline void task_unlock(struct task_struct *p) | |
175 | { | |
176 | spin_unlock(&p->alloc_lock); | |
177 | } | |
178 | ||
29930025 | 179 | #endif /* _LINUX_SCHED_TASK_H */ |