]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_SCHED_H |
2 | #define _LINUX_SCHED_H | |
3 | ||
607ca46e | 4 | #include <uapi/linux/sched.h> |
b7b3c76a | 5 | |
5c228079 DY |
6 | #include <linux/sched/prio.h> |
7 | ||
1da177e4 | 8 | #include <linux/capability.h> |
b69339ba | 9 | #include <linux/mutex.h> |
fb00aca4 | 10 | #include <linux/plist.h> |
c92ff1bd | 11 | #include <linux/mm_types.h> |
1da177e4 | 12 | #include <asm/ptrace.h> |
1da177e4 | 13 | |
1da177e4 | 14 | #include <linux/sem.h> |
ab602f79 | 15 | #include <linux/shm.h> |
1da177e4 | 16 | #include <linux/signal.h> |
f361bf4a | 17 | #include <linux/signal_types.h> |
1da177e4 | 18 | #include <linux/pid.h> |
1da177e4 | 19 | #include <linux/seccomp.h> |
05725f7e | 20 | #include <linux/rculist.h> |
23f78d4a | 21 | #include <linux/rtmutex.h> |
1da177e4 | 22 | |
a3b6714e | 23 | #include <linux/resource.h> |
a3b6714e | 24 | #include <linux/hrtimer.h> |
5c9a8750 | 25 | #include <linux/kcov.h> |
7c3ab738 | 26 | #include <linux/task_io_accounting.h> |
9745512c | 27 | #include <linux/latencytop.h> |
9e2b2dc4 | 28 | #include <linux/cred.h> |
21caf2fc | 29 | #include <linux/gfp.h> |
fd771233 | 30 | #include <linux/topology.h> |
d4311ff1 | 31 | #include <linux/magic.h> |
7d7efec3 | 32 | #include <linux/cgroup-defs.h> |
a3b6714e | 33 | |
70b8157e IM |
34 | #include <asm/current.h> |
35 | ||
c7af7877 IM |
36 | /* task_struct member predeclarations: */ |
37 | struct audit_context; | |
38 | struct autogroup; | |
39 | struct backing_dev_info; | |
bddd87c7 | 40 | struct bio_list; |
73c10101 | 41 | struct blk_plug; |
c7af7877 | 42 | struct cfs_rq; |
c4ad8f98 | 43 | struct filename; |
c7af7877 IM |
44 | struct fs_struct; |
45 | struct futex_pi_state; | |
46 | struct io_context; | |
47 | struct mempolicy; | |
89076bc3 | 48 | struct nameidata; |
c7af7877 IM |
49 | struct nsproxy; |
50 | struct perf_event_context; | |
51 | struct pid_namespace; | |
52 | struct pipe_inode_info; | |
53 | struct rcu_node; | |
54 | struct reclaim_state; | |
55 | struct robust_list_head; | |
56 | struct sched_attr; | |
57 | struct sched_param; | |
43ae34cb | 58 | struct seq_file; |
c7af7877 IM |
59 | struct sighand_struct; |
60 | struct signal_struct; | |
61 | struct task_delay_info; | |
4cf86d77 | 62 | struct task_group; |
c7af7877 IM |
63 | struct task_struct; |
64 | struct uts_namespace; | |
1da177e4 | 65 | |
4a8342d2 LT |
66 | /* |
67 | * Task state bitmask. NOTE! These bits are also | |
68 | * encoded in fs/proc/array.c: get_task_state(). | |
69 | * | |
70 | * We have two separate sets of flags: task->state | |
71 | * is about runnability, while task->exit_state are | |
72 | * about the task exiting. Confusing, but this way | |
73 | * modifying one set can't modify the other one by | |
74 | * mistake. | |
75 | */ | |
1da177e4 LT |
76 | #define TASK_RUNNING 0 |
77 | #define TASK_INTERRUPTIBLE 1 | |
78 | #define TASK_UNINTERRUPTIBLE 2 | |
f021a3c2 MW |
79 | #define __TASK_STOPPED 4 |
80 | #define __TASK_TRACED 8 | |
4a8342d2 | 81 | /* in tsk->exit_state */ |
ad86622b ON |
82 | #define EXIT_DEAD 16 |
83 | #define EXIT_ZOMBIE 32 | |
abd50b39 | 84 | #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) |
4a8342d2 | 85 | /* in tsk->state again */ |
af927232 | 86 | #define TASK_DEAD 64 |
f021a3c2 | 87 | #define TASK_WAKEKILL 128 |
e9c84311 | 88 | #define TASK_WAKING 256 |
f2530dc7 | 89 | #define TASK_PARKED 512 |
80ed87c8 | 90 | #define TASK_NOLOAD 1024 |
7dc603c9 PZ |
91 | #define TASK_NEW 2048 |
92 | #define TASK_STATE_MAX 4096 | |
f021a3c2 | 93 | |
7dc603c9 | 94 | #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" |
73342151 | 95 | |
642fa448 | 96 | /* Convenience macros for the sake of set_current_state */ |
f021a3c2 MW |
97 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
98 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) | |
99 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) | |
1da177e4 | 100 | |
80ed87c8 PZ |
101 | #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) |
102 | ||
92a1f4bc MW |
103 | /* Convenience macros for the sake of wake_up */ |
104 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) | |
f021a3c2 | 105 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) |
92a1f4bc MW |
106 | |
107 | /* get_task_state() */ | |
108 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ | |
f021a3c2 | 109 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
74e37200 | 110 | __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) |
92a1f4bc | 111 | |
f021a3c2 MW |
112 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
113 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) | |
92a1f4bc | 114 | #define task_is_stopped_or_traced(task) \ |
f021a3c2 | 115 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
92a1f4bc | 116 | #define task_contributes_to_load(task) \ |
e3c8ca83 | 117 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
80ed87c8 PZ |
118 | (task->flags & PF_FROZEN) == 0 && \ |
119 | (task->state & TASK_NOLOAD) == 0) | |
1da177e4 | 120 | |
8eb23b9f PZ |
121 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
122 | ||
8eb23b9f PZ |
123 | #define __set_current_state(state_value) \ |
124 | do { \ | |
125 | current->task_state_change = _THIS_IP_; \ | |
126 | current->state = (state_value); \ | |
127 | } while (0) | |
128 | #define set_current_state(state_value) \ | |
129 | do { \ | |
130 | current->task_state_change = _THIS_IP_; \ | |
a2250238 | 131 | smp_store_mb(current->state, (state_value)); \ |
8eb23b9f PZ |
132 | } while (0) |
133 | ||
134 | #else | |
498d0c57 AM |
135 | /* |
136 | * set_current_state() includes a barrier so that the write of current->state | |
137 | * is correctly serialised wrt the caller's subsequent test of whether to | |
138 | * actually sleep: | |
139 | * | |
a2250238 | 140 | * for (;;) { |
498d0c57 | 141 | * set_current_state(TASK_UNINTERRUPTIBLE); |
a2250238 PZ |
142 | * if (!need_sleep) |
143 | * break; | |
144 | * | |
145 | * schedule(); | |
146 | * } | |
147 | * __set_current_state(TASK_RUNNING); | |
148 | * | |
149 | * If the caller does not need such serialisation (because, for instance, the | |
150 | * condition test and condition change and wakeup are under the same lock) then | |
151 | * use __set_current_state(). | |
152 | * | |
153 | * The above is typically ordered against the wakeup, which does: | |
154 | * | |
155 | * need_sleep = false; | |
156 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); | |
157 | * | |
158 | * Where wake_up_state() (and all other wakeup primitives) imply enough | |
159 | * barriers to order the store of the variable against wakeup. | |
160 | * | |
161 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, | |
162 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a | |
163 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). | |
498d0c57 | 164 | * |
a2250238 | 165 | * This is obviously fine, since they both store the exact same value. |
498d0c57 | 166 | * |
a2250238 | 167 | * Also see the comments of try_to_wake_up(). |
498d0c57 | 168 | */ |
8eb23b9f | 169 | #define __set_current_state(state_value) \ |
1da177e4 | 170 | do { current->state = (state_value); } while (0) |
8eb23b9f | 171 | #define set_current_state(state_value) \ |
b92b8b35 | 172 | smp_store_mb(current->state, (state_value)) |
1da177e4 | 173 | |
8eb23b9f PZ |
174 | #endif |
175 | ||
1da177e4 LT |
176 | /* Task command name length */ |
177 | #define TASK_COMM_LEN 16 | |
178 | ||
1da177e4 LT |
179 | extern void sched_init(void); |
180 | extern void sched_init_smp(void); | |
1da177e4 | 181 | |
3fa0818b RR |
182 | extern cpumask_var_t cpu_isolated_map; |
183 | ||
89f19f04 | 184 | extern int runqueue_is_locked(int cpu); |
017730c1 | 185 | |
1da177e4 LT |
186 | extern void cpu_init (void); |
187 | extern void trap_init(void); | |
188 | extern void update_process_times(int user); | |
189 | extern void scheduler_tick(void); | |
190 | ||
1da177e4 | 191 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
b3c97528 | 192 | extern signed long schedule_timeout(signed long timeout); |
64ed93a2 | 193 | extern signed long schedule_timeout_interruptible(signed long timeout); |
294d5cc2 | 194 | extern signed long schedule_timeout_killable(signed long timeout); |
64ed93a2 | 195 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
69b27baf | 196 | extern signed long schedule_timeout_idle(signed long timeout); |
1da177e4 | 197 | asmlinkage void schedule(void); |
c5491ea7 | 198 | extern void schedule_preempt_disabled(void); |
1da177e4 | 199 | |
10ab5643 TH |
200 | extern int __must_check io_schedule_prepare(void); |
201 | extern void io_schedule_finish(int token); | |
9cff8ade | 202 | extern long io_schedule_timeout(long timeout); |
10ab5643 | 203 | extern void io_schedule(void); |
9cff8ade | 204 | |
d37f761d | 205 | /** |
9d7fb042 | 206 | * struct prev_cputime - snaphsot of system and user cputime |
d37f761d FW |
207 | * @utime: time spent in user mode |
208 | * @stime: time spent in system mode | |
9d7fb042 | 209 | * @lock: protects the above two fields |
d37f761d | 210 | * |
9d7fb042 PZ |
211 | * Stores previous user/system time values such that we can guarantee |
212 | * monotonicity. | |
d37f761d | 213 | */ |
9d7fb042 PZ |
214 | struct prev_cputime { |
215 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
5613fda9 FW |
216 | u64 utime; |
217 | u64 stime; | |
9d7fb042 PZ |
218 | raw_spinlock_t lock; |
219 | #endif | |
d37f761d FW |
220 | }; |
221 | ||
9d7fb042 PZ |
222 | static inline void prev_cputime_init(struct prev_cputime *prev) |
223 | { | |
224 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | |
225 | prev->utime = prev->stime = 0; | |
226 | raw_spin_lock_init(&prev->lock); | |
227 | #endif | |
228 | } | |
229 | ||
f06febc9 FM |
230 | /** |
231 | * struct task_cputime - collected CPU time counts | |
5613fda9 FW |
232 | * @utime: time spent in user mode, in nanoseconds |
233 | * @stime: time spent in kernel mode, in nanoseconds | |
f06febc9 | 234 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
5ce73a4a | 235 | * |
9d7fb042 PZ |
236 | * This structure groups together three kinds of CPU time that are tracked for |
237 | * threads and thread groups. Most things considering CPU time want to group | |
238 | * these counts together and treat all three of them in parallel. | |
f06febc9 FM |
239 | */ |
240 | struct task_cputime { | |
5613fda9 FW |
241 | u64 utime; |
242 | u64 stime; | |
f06febc9 FM |
243 | unsigned long long sum_exec_runtime; |
244 | }; | |
9d7fb042 | 245 | |
f06febc9 | 246 | /* Alternate field names when used to cache expirations. */ |
f06febc9 | 247 | #define virt_exp utime |
9d7fb042 | 248 | #define prof_exp stime |
f06febc9 FM |
249 | #define sched_exp sum_exec_runtime |
250 | ||
971e8a98 JL |
251 | /* |
252 | * This is the atomic variant of task_cputime, which can be used for | |
253 | * storing and updating task_cputime statistics without locking. | |
254 | */ | |
255 | struct task_cputime_atomic { | |
256 | atomic64_t utime; | |
257 | atomic64_t stime; | |
258 | atomic64_t sum_exec_runtime; | |
259 | }; | |
260 | ||
261 | #define INIT_CPUTIME_ATOMIC \ | |
262 | (struct task_cputime_atomic) { \ | |
263 | .utime = ATOMIC64_INIT(0), \ | |
264 | .stime = ATOMIC64_INIT(0), \ | |
265 | .sum_exec_runtime = ATOMIC64_INIT(0), \ | |
266 | } | |
267 | ||
f06febc9 | 268 | /** |
4cd4c1b4 | 269 | * struct thread_group_cputimer - thread group interval timer counts |
920ce39f | 270 | * @cputime_atomic: atomic thread group interval timers. |
d5c373eb JL |
271 | * @running: true when there are timers running and |
272 | * @cputime_atomic receives updates. | |
c8d75aa4 JL |
273 | * @checking_timer: true when a thread in the group is in the |
274 | * process of checking for thread group timers. | |
f06febc9 FM |
275 | * |
276 | * This structure contains the version of task_cputime, above, that is | |
4cd4c1b4 | 277 | * used for thread group CPU timer calculations. |
f06febc9 | 278 | */ |
4cd4c1b4 | 279 | struct thread_group_cputimer { |
71107445 | 280 | struct task_cputime_atomic cputime_atomic; |
d5c373eb | 281 | bool running; |
c8d75aa4 | 282 | bool checking_timer; |
f06febc9 | 283 | }; |
f06febc9 | 284 | |
4714d1d3 | 285 | #include <linux/rwsem.h> |
1da177e4 | 286 | |
f6db8347 | 287 | #ifdef CONFIG_SCHED_INFO |
1da177e4 LT |
288 | struct sched_info { |
289 | /* cumulative counters */ | |
2d72376b | 290 | unsigned long pcount; /* # of times run on this cpu */ |
9c2c4802 | 291 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
1da177e4 LT |
292 | |
293 | /* timestamps */ | |
172ba844 BS |
294 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
295 | last_queued; /* when we were last queued to run */ | |
1da177e4 | 296 | }; |
f6db8347 | 297 | #endif /* CONFIG_SCHED_INFO */ |
1da177e4 | 298 | |
52f17b6c CS |
299 | static inline int sched_info_on(void) |
300 | { | |
301 | #ifdef CONFIG_SCHEDSTATS | |
302 | return 1; | |
303 | #elif defined(CONFIG_TASK_DELAY_ACCT) | |
304 | extern int delayacct_on; | |
305 | return delayacct_on; | |
306 | #else | |
307 | return 0; | |
ca74e92b | 308 | #endif |
52f17b6c | 309 | } |
ca74e92b | 310 | |
cb251765 MG |
311 | #ifdef CONFIG_SCHEDSTATS |
312 | void force_schedstat_enabled(void); | |
313 | #endif | |
314 | ||
6ecdd749 YD |
315 | /* |
316 | * Integer metrics need fixed point arithmetic, e.g., sched/fair | |
317 | * has a few: load, load_avg, util_avg, freq, and capacity. | |
318 | * | |
319 | * We define a basic fixed point arithmetic range, and then formalize | |
320 | * all these metrics based on that basic range. | |
321 | */ | |
322 | # define SCHED_FIXEDPOINT_SHIFT 10 | |
323 | # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) | |
324 | ||
383f2835 | 325 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
36c8b586 | 326 | extern void prefetch_stack(struct task_struct *t); |
383f2835 KC |
327 | #else |
328 | static inline void prefetch_stack(struct task_struct *t) { } | |
329 | #endif | |
1da177e4 | 330 | |
20b8a59f | 331 | struct load_weight { |
9dbdb155 PZ |
332 | unsigned long weight; |
333 | u32 inv_weight; | |
20b8a59f IM |
334 | }; |
335 | ||
9d89c257 | 336 | /* |
7b595334 YD |
337 | * The load_avg/util_avg accumulates an infinite geometric series |
338 | * (see __update_load_avg() in kernel/sched/fair.c). | |
339 | * | |
340 | * [load_avg definition] | |
341 | * | |
342 | * load_avg = runnable% * scale_load_down(load) | |
343 | * | |
344 | * where runnable% is the time ratio that a sched_entity is runnable. | |
345 | * For cfs_rq, it is the aggregated load_avg of all runnable and | |
9d89c257 | 346 | * blocked sched_entities. |
7b595334 YD |
347 | * |
348 | * load_avg may also take frequency scaling into account: | |
349 | * | |
350 | * load_avg = runnable% * scale_load_down(load) * freq% | |
351 | * | |
352 | * where freq% is the CPU frequency normalized to the highest frequency. | |
353 | * | |
354 | * [util_avg definition] | |
355 | * | |
356 | * util_avg = running% * SCHED_CAPACITY_SCALE | |
357 | * | |
358 | * where running% is the time ratio that a sched_entity is running on | |
359 | * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable | |
360 | * and blocked sched_entities. | |
361 | * | |
362 | * util_avg may also factor frequency scaling and CPU capacity scaling: | |
363 | * | |
364 | * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% | |
365 | * | |
366 | * where freq% is the same as above, and capacity% is the CPU capacity | |
367 | * normalized to the greatest capacity (due to uarch differences, etc). | |
368 | * | |
369 | * N.B., the above ratios (runnable%, running%, freq%, and capacity%) | |
370 | * themselves are in the range of [0, 1]. To do fixed point arithmetics, | |
371 | * we therefore scale them to as large a range as necessary. This is for | |
372 | * example reflected by util_avg's SCHED_CAPACITY_SCALE. | |
373 | * | |
374 | * [Overflow issue] | |
375 | * | |
376 | * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities | |
377 | * with the highest load (=88761), always runnable on a single cfs_rq, | |
378 | * and should not overflow as the number already hits PID_MAX_LIMIT. | |
379 | * | |
380 | * For all other cases (including 32-bit kernels), struct load_weight's | |
381 | * weight will overflow first before we do, because: | |
382 | * | |
383 | * Max(load_avg) <= Max(load.weight) | |
384 | * | |
385 | * Then it is the load_weight's responsibility to consider overflow | |
386 | * issues. | |
9d89c257 | 387 | */ |
9d85f21c | 388 | struct sched_avg { |
9d89c257 YD |
389 | u64 last_update_time, load_sum; |
390 | u32 util_sum, period_contrib; | |
391 | unsigned long load_avg, util_avg; | |
9d85f21c PT |
392 | }; |
393 | ||
94c18227 | 394 | #ifdef CONFIG_SCHEDSTATS |
41acab88 | 395 | struct sched_statistics { |
20b8a59f | 396 | u64 wait_start; |
94c18227 | 397 | u64 wait_max; |
6d082592 AV |
398 | u64 wait_count; |
399 | u64 wait_sum; | |
8f0dfc34 AV |
400 | u64 iowait_count; |
401 | u64 iowait_sum; | |
94c18227 | 402 | |
20b8a59f | 403 | u64 sleep_start; |
20b8a59f | 404 | u64 sleep_max; |
94c18227 IM |
405 | s64 sum_sleep_runtime; |
406 | ||
407 | u64 block_start; | |
20b8a59f IM |
408 | u64 block_max; |
409 | u64 exec_max; | |
eba1ed4b | 410 | u64 slice_max; |
cc367732 | 411 | |
cc367732 IM |
412 | u64 nr_migrations_cold; |
413 | u64 nr_failed_migrations_affine; | |
414 | u64 nr_failed_migrations_running; | |
415 | u64 nr_failed_migrations_hot; | |
416 | u64 nr_forced_migrations; | |
cc367732 IM |
417 | |
418 | u64 nr_wakeups; | |
419 | u64 nr_wakeups_sync; | |
420 | u64 nr_wakeups_migrate; | |
421 | u64 nr_wakeups_local; | |
422 | u64 nr_wakeups_remote; | |
423 | u64 nr_wakeups_affine; | |
424 | u64 nr_wakeups_affine_attempts; | |
425 | u64 nr_wakeups_passive; | |
426 | u64 nr_wakeups_idle; | |
41acab88 LDM |
427 | }; |
428 | #endif | |
429 | ||
430 | struct sched_entity { | |
431 | struct load_weight load; /* for load-balancing */ | |
432 | struct rb_node run_node; | |
433 | struct list_head group_node; | |
434 | unsigned int on_rq; | |
435 | ||
436 | u64 exec_start; | |
437 | u64 sum_exec_runtime; | |
438 | u64 vruntime; | |
439 | u64 prev_sum_exec_runtime; | |
440 | ||
41acab88 LDM |
441 | u64 nr_migrations; |
442 | ||
41acab88 LDM |
443 | #ifdef CONFIG_SCHEDSTATS |
444 | struct sched_statistics statistics; | |
94c18227 IM |
445 | #endif |
446 | ||
20b8a59f | 447 | #ifdef CONFIG_FAIR_GROUP_SCHED |
fed14d45 | 448 | int depth; |
20b8a59f IM |
449 | struct sched_entity *parent; |
450 | /* rq on which this entity is (to be) queued: */ | |
451 | struct cfs_rq *cfs_rq; | |
452 | /* rq "owned" by this entity/group: */ | |
453 | struct cfs_rq *my_q; | |
454 | #endif | |
8bd75c77 | 455 | |
141965c7 | 456 | #ifdef CONFIG_SMP |
5a107804 JO |
457 | /* |
458 | * Per entity load average tracking. | |
459 | * | |
460 | * Put into separate cache line so it does not | |
461 | * collide with read-mostly values above. | |
462 | */ | |
463 | struct sched_avg avg ____cacheline_aligned_in_smp; | |
9d85f21c | 464 | #endif |
20b8a59f | 465 | }; |
70b97a7f | 466 | |
fa717060 PZ |
467 | struct sched_rt_entity { |
468 | struct list_head run_list; | |
78f2c7db | 469 | unsigned long timeout; |
57d2aa00 | 470 | unsigned long watchdog_stamp; |
bee367ed | 471 | unsigned int time_slice; |
ff77e468 PZ |
472 | unsigned short on_rq; |
473 | unsigned short on_list; | |
6f505b16 | 474 | |
58d6c2d7 | 475 | struct sched_rt_entity *back; |
052f1dc7 | 476 | #ifdef CONFIG_RT_GROUP_SCHED |
6f505b16 PZ |
477 | struct sched_rt_entity *parent; |
478 | /* rq on which this entity is (to be) queued: */ | |
479 | struct rt_rq *rt_rq; | |
480 | /* rq "owned" by this entity/group: */ | |
481 | struct rt_rq *my_q; | |
482 | #endif | |
fa717060 PZ |
483 | }; |
484 | ||
aab03e05 DF |
485 | struct sched_dl_entity { |
486 | struct rb_node rb_node; | |
487 | ||
488 | /* | |
489 | * Original scheduling parameters. Copied here from sched_attr | |
4027d080 | 490 | * during sched_setattr(), they will remain the same until |
491 | * the next sched_setattr(). | |
aab03e05 DF |
492 | */ |
493 | u64 dl_runtime; /* maximum runtime for each instance */ | |
494 | u64 dl_deadline; /* relative deadline of each instance */ | |
755378a4 | 495 | u64 dl_period; /* separation of two instances (period) */ |
332ac17e | 496 | u64 dl_bw; /* dl_runtime / dl_deadline */ |
aab03e05 DF |
497 | |
498 | /* | |
499 | * Actual scheduling parameters. Initialized with the values above, | |
500 | * they are continously updated during task execution. Note that | |
501 | * the remaining runtime could be < 0 in case we are in overrun. | |
502 | */ | |
503 | s64 runtime; /* remaining runtime for this instance */ | |
504 | u64 deadline; /* absolute deadline for this instance */ | |
505 | unsigned int flags; /* specifying the scheduler behaviour */ | |
506 | ||
507 | /* | |
508 | * Some bool flags: | |
509 | * | |
510 | * @dl_throttled tells if we exhausted the runtime. If so, the | |
511 | * task has to wait for a replenishment to be performed at the | |
512 | * next firing of dl_timer. | |
513 | * | |
2d3d891d DF |
514 | * @dl_boosted tells if we are boosted due to DI. If so we are |
515 | * outside bandwidth enforcement mechanism (but only until we | |
5bfd126e JL |
516 | * exit the critical section); |
517 | * | |
518 | * @dl_yielded tells if task gave up the cpu before consuming | |
519 | * all its available runtime during the last job. | |
aab03e05 | 520 | */ |
72f9f3fd | 521 | int dl_throttled, dl_boosted, dl_yielded; |
aab03e05 DF |
522 | |
523 | /* | |
524 | * Bandwidth enforcement timer. Each -deadline task has its | |
525 | * own bandwidth to be enforced, thus we need one timer per task. | |
526 | */ | |
527 | struct hrtimer dl_timer; | |
528 | }; | |
8bd75c77 | 529 | |
1d082fd0 PM |
530 | union rcu_special { |
531 | struct { | |
8203d6d0 PM |
532 | u8 blocked; |
533 | u8 need_qs; | |
534 | u8 exp_need_qs; | |
535 | u8 pad; /* Otherwise the compiler can store garbage here. */ | |
536 | } b; /* Bits. */ | |
537 | u32 s; /* Set of bits. */ | |
1d082fd0 | 538 | }; |
86848966 | 539 | |
8dc85d54 PZ |
540 | enum perf_event_task_context { |
541 | perf_invalid_context = -1, | |
542 | perf_hw_context = 0, | |
89a1e187 | 543 | perf_sw_context, |
8dc85d54 PZ |
544 | perf_nr_task_contexts, |
545 | }; | |
546 | ||
eb61baf6 IM |
547 | struct wake_q_node { |
548 | struct wake_q_node *next; | |
549 | }; | |
550 | ||
72b252ae MG |
551 | /* Track pages that require TLB flushes */ |
552 | struct tlbflush_unmap_batch { | |
553 | /* | |
554 | * Each bit set is a CPU that potentially has a TLB entry for one of | |
555 | * the PFNs being flushed. See set_tlb_ubc_flush_pending(). | |
556 | */ | |
557 | struct cpumask cpumask; | |
558 | ||
559 | /* True if any bit in cpumask is set */ | |
560 | bool flush_required; | |
d950c947 MG |
561 | |
562 | /* | |
563 | * If true then the PTE was dirty when unmapped. The entry must be | |
564 | * flushed before IO is initiated or a stale TLB entry potentially | |
565 | * allows an update without redirtying the page. | |
566 | */ | |
567 | bool writable; | |
72b252ae MG |
568 | }; |
569 | ||
1da177e4 | 570 | struct task_struct { |
c65eacbe AL |
571 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
572 | /* | |
573 | * For reasons of header soup (see current_thread_info()), this | |
574 | * must be the first element of task_struct. | |
575 | */ | |
576 | struct thread_info thread_info; | |
577 | #endif | |
1da177e4 | 578 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
f7e4217b | 579 | void *stack; |
1da177e4 | 580 | atomic_t usage; |
97dc32cd WC |
581 | unsigned int flags; /* per process flags, defined below */ |
582 | unsigned int ptrace; | |
1da177e4 | 583 | |
2dd73a4f | 584 | #ifdef CONFIG_SMP |
fa14ff4a | 585 | struct llist_node wake_entry; |
3ca7a440 | 586 | int on_cpu; |
c65eacbe AL |
587 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
588 | unsigned int cpu; /* current CPU */ | |
589 | #endif | |
63b0e9ed | 590 | unsigned int wakee_flips; |
62470419 | 591 | unsigned long wakee_flip_decay_ts; |
63b0e9ed | 592 | struct task_struct *last_wakee; |
ac66f547 PZ |
593 | |
594 | int wake_cpu; | |
2dd73a4f | 595 | #endif |
fd2f4419 | 596 | int on_rq; |
50e645a8 | 597 | |
b29739f9 | 598 | int prio, static_prio, normal_prio; |
c7aceaba | 599 | unsigned int rt_priority; |
5522d5d5 | 600 | const struct sched_class *sched_class; |
20b8a59f | 601 | struct sched_entity se; |
fa717060 | 602 | struct sched_rt_entity rt; |
8323f26c PZ |
603 | #ifdef CONFIG_CGROUP_SCHED |
604 | struct task_group *sched_task_group; | |
605 | #endif | |
aab03e05 | 606 | struct sched_dl_entity dl; |
1da177e4 | 607 | |
e107be36 AK |
608 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
609 | /* list of struct preempt_notifier: */ | |
610 | struct hlist_head preempt_notifiers; | |
611 | #endif | |
612 | ||
6c5c9341 | 613 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
2056a782 | 614 | unsigned int btrace_seq; |
6c5c9341 | 615 | #endif |
1da177e4 | 616 | |
97dc32cd | 617 | unsigned int policy; |
29baa747 | 618 | int nr_cpus_allowed; |
1da177e4 | 619 | cpumask_t cpus_allowed; |
1da177e4 | 620 | |
a57eb940 | 621 | #ifdef CONFIG_PREEMPT_RCU |
e260be67 | 622 | int rcu_read_lock_nesting; |
1d082fd0 | 623 | union rcu_special rcu_read_unlock_special; |
f41d911f | 624 | struct list_head rcu_node_entry; |
a57eb940 | 625 | struct rcu_node *rcu_blocked_node; |
28f6569a | 626 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
8315f422 PM |
627 | #ifdef CONFIG_TASKS_RCU |
628 | unsigned long rcu_tasks_nvcsw; | |
629 | bool rcu_tasks_holdout; | |
630 | struct list_head rcu_tasks_holdout_list; | |
176f8f7a | 631 | int rcu_tasks_idle_cpu; |
8315f422 | 632 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
e260be67 | 633 | |
f6db8347 | 634 | #ifdef CONFIG_SCHED_INFO |
1da177e4 LT |
635 | struct sched_info sched_info; |
636 | #endif | |
637 | ||
638 | struct list_head tasks; | |
806c09a7 | 639 | #ifdef CONFIG_SMP |
917b627d | 640 | struct plist_node pushable_tasks; |
1baca4ce | 641 | struct rb_node pushable_dl_tasks; |
806c09a7 | 642 | #endif |
1da177e4 LT |
643 | |
644 | struct mm_struct *mm, *active_mm; | |
314ff785 IM |
645 | |
646 | /* Per-thread vma caching: */ | |
647 | struct vmacache vmacache; | |
648 | ||
34e55232 KH |
649 | #if defined(SPLIT_RSS_COUNTING) |
650 | struct task_rss_stat rss_stat; | |
651 | #endif | |
1da177e4 | 652 | /* task state */ |
97dc32cd | 653 | int exit_state; |
1da177e4 LT |
654 | int exit_code, exit_signal; |
655 | int pdeath_signal; /* The signal sent when the parent dies */ | |
e7cc4173 | 656 | unsigned long jobctl; /* JOBCTL_*, siglock protected */ |
9b89f6ba AE |
657 | |
658 | /* Used for emulating ABI behavior of previous Linux versions */ | |
97dc32cd | 659 | unsigned int personality; |
9b89f6ba | 660 | |
be958bdc | 661 | /* scheduler bits, serialized by scheduler locks */ |
ca94c442 | 662 | unsigned sched_reset_on_fork:1; |
a8e4f2ea | 663 | unsigned sched_contributes_to_load:1; |
ff303e66 | 664 | unsigned sched_migrated:1; |
b7e7ade3 | 665 | unsigned sched_remote_wakeup:1; |
be958bdc PZ |
666 | unsigned :0; /* force alignment to the next boundary */ |
667 | ||
668 | /* unserialized, strictly 'current' */ | |
669 | unsigned in_execve:1; /* bit to tell LSMs we're in execve */ | |
670 | unsigned in_iowait:1; | |
7e781418 AL |
671 | #if !defined(TIF_RESTORE_SIGMASK) |
672 | unsigned restore_sigmask:1; | |
673 | #endif | |
626ebc41 TH |
674 | #ifdef CONFIG_MEMCG |
675 | unsigned memcg_may_oom:1; | |
127424c8 | 676 | #ifndef CONFIG_SLOB |
6f185c29 VD |
677 | unsigned memcg_kmem_skip_account:1; |
678 | #endif | |
127424c8 | 679 | #endif |
ff303e66 PZ |
680 | #ifdef CONFIG_COMPAT_BRK |
681 | unsigned brk_randomized:1; | |
682 | #endif | |
6f185c29 | 683 | |
1d4457f9 KC |
684 | unsigned long atomic_flags; /* Flags needing atomic access. */ |
685 | ||
f56141e3 AL |
686 | struct restart_block restart_block; |
687 | ||
1da177e4 LT |
688 | pid_t pid; |
689 | pid_t tgid; | |
0a425405 | 690 | |
1314562a | 691 | #ifdef CONFIG_CC_STACKPROTECTOR |
0a425405 AV |
692 | /* Canary value for the -fstack-protector gcc feature */ |
693 | unsigned long stack_canary; | |
1314562a | 694 | #endif |
4d1d61a6 | 695 | /* |
1da177e4 | 696 | * pointers to (original) parent process, youngest child, younger sibling, |
4d1d61a6 | 697 | * older sibling, respectively. (p->father can be replaced with |
f470021a | 698 | * p->real_parent->pid) |
1da177e4 | 699 | */ |
abd63bc3 KC |
700 | struct task_struct __rcu *real_parent; /* real parent process */ |
701 | struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ | |
1da177e4 | 702 | /* |
f470021a | 703 | * children/sibling forms the list of my natural children |
1da177e4 LT |
704 | */ |
705 | struct list_head children; /* list of my children */ | |
706 | struct list_head sibling; /* linkage in my parent's children list */ | |
707 | struct task_struct *group_leader; /* threadgroup leader */ | |
708 | ||
f470021a RM |
709 | /* |
710 | * ptraced is the list of tasks this task is using ptrace on. | |
711 | * This includes both natural children and PTRACE_ATTACH targets. | |
712 | * p->ptrace_entry is p's link on the p->parent->ptraced list. | |
713 | */ | |
714 | struct list_head ptraced; | |
715 | struct list_head ptrace_entry; | |
716 | ||
1da177e4 | 717 | /* PID/PID hash table linkage. */ |
92476d7f | 718 | struct pid_link pids[PIDTYPE_MAX]; |
47e65328 | 719 | struct list_head thread_group; |
0c740d0a | 720 | struct list_head thread_node; |
1da177e4 LT |
721 | |
722 | struct completion *vfork_done; /* for vfork() */ | |
723 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | |
724 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ | |
725 | ||
5613fda9 | 726 | u64 utime, stime; |
40565b5a | 727 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
5613fda9 | 728 | u64 utimescaled, stimescaled; |
40565b5a | 729 | #endif |
16a6d9be | 730 | u64 gtime; |
9d7fb042 | 731 | struct prev_cputime prev_cputime; |
6a61671b | 732 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
b7ce2277 | 733 | seqcount_t vtime_seqcount; |
6a61671b FW |
734 | unsigned long long vtime_snap; |
735 | enum { | |
7098c1ea FW |
736 | /* Task is sleeping or running in a CPU with VTIME inactive */ |
737 | VTIME_INACTIVE = 0, | |
738 | /* Task runs in userspace in a CPU with VTIME active */ | |
6a61671b | 739 | VTIME_USER, |
7098c1ea | 740 | /* Task runs in kernelspace in a CPU with VTIME active */ |
6a61671b FW |
741 | VTIME_SYS, |
742 | } vtime_snap_whence; | |
d99ca3b9 | 743 | #endif |
d027d45d FW |
744 | |
745 | #ifdef CONFIG_NO_HZ_FULL | |
f009a7a7 | 746 | atomic_t tick_dep_mask; |
d027d45d | 747 | #endif |
1da177e4 | 748 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
ccbf62d8 | 749 | u64 start_time; /* monotonic time in nsec */ |
57e0be04 | 750 | u64 real_start_time; /* boot based time in nsec */ |
1da177e4 LT |
751 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ |
752 | unsigned long min_flt, maj_flt; | |
753 | ||
b18b6a9c | 754 | #ifdef CONFIG_POSIX_TIMERS |
f06febc9 | 755 | struct task_cputime cputime_expires; |
1da177e4 | 756 | struct list_head cpu_timers[3]; |
b18b6a9c | 757 | #endif |
1da177e4 LT |
758 | |
759 | /* process credentials */ | |
64b875f7 | 760 | const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ |
1b0ba1c9 | 761 | const struct cred __rcu *real_cred; /* objective and real subjective task |
3b11a1de | 762 | * credentials (COW) */ |
1b0ba1c9 | 763 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
3b11a1de | 764 | * credentials (COW) */ |
36772092 PBG |
765 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
766 | - access with [gs]et_task_comm (which lock | |
767 | it with task_lock()) | |
221af7f8 | 768 | - initialized normally by setup_new_exec */ |
1da177e4 | 769 | /* file system info */ |
756daf26 | 770 | struct nameidata *nameidata; |
3d5b6fcc | 771 | #ifdef CONFIG_SYSVIPC |
1da177e4 LT |
772 | /* ipc stuff */ |
773 | struct sysv_sem sysvsem; | |
ab602f79 | 774 | struct sysv_shm sysvshm; |
3d5b6fcc | 775 | #endif |
e162b39a | 776 | #ifdef CONFIG_DETECT_HUNG_TASK |
82a1fcb9 | 777 | /* hung task detection */ |
82a1fcb9 IM |
778 | unsigned long last_switch_count; |
779 | #endif | |
1da177e4 LT |
780 | /* filesystem information */ |
781 | struct fs_struct *fs; | |
782 | /* open file information */ | |
783 | struct files_struct *files; | |
1651e14e | 784 | /* namespaces */ |
ab516013 | 785 | struct nsproxy *nsproxy; |
1da177e4 LT |
786 | /* signal handlers */ |
787 | struct signal_struct *signal; | |
788 | struct sighand_struct *sighand; | |
789 | ||
790 | sigset_t blocked, real_blocked; | |
f3de272b | 791 | sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ |
1da177e4 LT |
792 | struct sigpending pending; |
793 | ||
794 | unsigned long sas_ss_sp; | |
795 | size_t sas_ss_size; | |
2a742138 | 796 | unsigned sas_ss_flags; |
2e01fabe | 797 | |
67d12145 | 798 | struct callback_head *task_works; |
e73f8959 | 799 | |
1da177e4 | 800 | struct audit_context *audit_context; |
bfef93a5 | 801 | #ifdef CONFIG_AUDITSYSCALL |
e1760bd5 | 802 | kuid_t loginuid; |
4746ec5b | 803 | unsigned int sessionid; |
bfef93a5 | 804 | #endif |
932ecebb | 805 | struct seccomp seccomp; |
1da177e4 LT |
806 | |
807 | /* Thread group tracking */ | |
808 | u32 parent_exec_id; | |
809 | u32 self_exec_id; | |
58568d2a MX |
810 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, |
811 | * mempolicy */ | |
1da177e4 | 812 | spinlock_t alloc_lock; |
1da177e4 | 813 | |
b29739f9 | 814 | /* Protection of the PI data structures: */ |
1d615482 | 815 | raw_spinlock_t pi_lock; |
b29739f9 | 816 | |
76751049 PZ |
817 | struct wake_q_node wake_q; |
818 | ||
23f78d4a IM |
819 | #ifdef CONFIG_RT_MUTEXES |
820 | /* PI waiters blocked on a rt_mutex held by this task */ | |
fb00aca4 PZ |
821 | struct rb_root pi_waiters; |
822 | struct rb_node *pi_waiters_leftmost; | |
23f78d4a IM |
823 | /* Deadlock detection and priority inheritance handling */ |
824 | struct rt_mutex_waiter *pi_blocked_on; | |
23f78d4a IM |
825 | #endif |
826 | ||
408894ee IM |
827 | #ifdef CONFIG_DEBUG_MUTEXES |
828 | /* mutex deadlock detection */ | |
829 | struct mutex_waiter *blocked_on; | |
830 | #endif | |
de30a2b3 IM |
831 | #ifdef CONFIG_TRACE_IRQFLAGS |
832 | unsigned int irq_events; | |
de30a2b3 | 833 | unsigned long hardirq_enable_ip; |
de30a2b3 | 834 | unsigned long hardirq_disable_ip; |
fa1452e8 | 835 | unsigned int hardirq_enable_event; |
de30a2b3 | 836 | unsigned int hardirq_disable_event; |
fa1452e8 HS |
837 | int hardirqs_enabled; |
838 | int hardirq_context; | |
de30a2b3 | 839 | unsigned long softirq_disable_ip; |
de30a2b3 | 840 | unsigned long softirq_enable_ip; |
fa1452e8 | 841 | unsigned int softirq_disable_event; |
de30a2b3 | 842 | unsigned int softirq_enable_event; |
fa1452e8 | 843 | int softirqs_enabled; |
de30a2b3 IM |
844 | int softirq_context; |
845 | #endif | |
fbb9ce95 | 846 | #ifdef CONFIG_LOCKDEP |
bdb9441e | 847 | # define MAX_LOCK_DEPTH 48UL |
fbb9ce95 IM |
848 | u64 curr_chain_key; |
849 | int lockdep_depth; | |
fbb9ce95 | 850 | unsigned int lockdep_recursion; |
c7aceaba | 851 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
cf40bd16 | 852 | gfp_t lockdep_reclaim_gfp; |
fbb9ce95 | 853 | #endif |
c6d30853 AR |
854 | #ifdef CONFIG_UBSAN |
855 | unsigned int in_ubsan; | |
856 | #endif | |
408894ee | 857 | |
1da177e4 LT |
858 | /* journalling filesystem info */ |
859 | void *journal_info; | |
860 | ||
d89d8796 | 861 | /* stacked block device info */ |
bddd87c7 | 862 | struct bio_list *bio_list; |
d89d8796 | 863 | |
73c10101 JA |
864 | #ifdef CONFIG_BLOCK |
865 | /* stack plugging */ | |
866 | struct blk_plug *plug; | |
867 | #endif | |
868 | ||
1da177e4 LT |
869 | /* VM state */ |
870 | struct reclaim_state *reclaim_state; | |
871 | ||
1da177e4 LT |
872 | struct backing_dev_info *backing_dev_info; |
873 | ||
874 | struct io_context *io_context; | |
875 | ||
876 | unsigned long ptrace_message; | |
877 | siginfo_t *last_siginfo; /* For ptrace use. */ | |
7c3ab738 | 878 | struct task_io_accounting ioac; |
8f0ab514 | 879 | #if defined(CONFIG_TASK_XACCT) |
1da177e4 LT |
880 | u64 acct_rss_mem1; /* accumulated rss usage */ |
881 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ | |
605dc2b3 | 882 | u64 acct_timexpd; /* stime + utime since last update */ |
1da177e4 LT |
883 | #endif |
884 | #ifdef CONFIG_CPUSETS | |
58568d2a | 885 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
cc9a6c87 | 886 | seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ |
825a46af | 887 | int cpuset_mem_spread_rotor; |
6adef3eb | 888 | int cpuset_slab_spread_rotor; |
1da177e4 | 889 | #endif |
ddbcc7e8 | 890 | #ifdef CONFIG_CGROUPS |
817929ec | 891 | /* Control Group info protected by css_set_lock */ |
2c392b8c | 892 | struct css_set __rcu *cgroups; |
817929ec PM |
893 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
894 | struct list_head cg_list; | |
ddbcc7e8 | 895 | #endif |
e02737d5 FY |
896 | #ifdef CONFIG_INTEL_RDT_A |
897 | int closid; | |
898 | #endif | |
42b2dd0a | 899 | #ifdef CONFIG_FUTEX |
0771dfef | 900 | struct robust_list_head __user *robust_list; |
34f192c6 IM |
901 | #ifdef CONFIG_COMPAT |
902 | struct compat_robust_list_head __user *compat_robust_list; | |
903 | #endif | |
c87e2837 IM |
904 | struct list_head pi_state_list; |
905 | struct futex_pi_state *pi_state_cache; | |
c7aceaba | 906 | #endif |
cdd6c482 | 907 | #ifdef CONFIG_PERF_EVENTS |
8dc85d54 | 908 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
cdd6c482 IM |
909 | struct mutex perf_event_mutex; |
910 | struct list_head perf_event_list; | |
a63eaf34 | 911 | #endif |
8f47b187 TG |
912 | #ifdef CONFIG_DEBUG_PREEMPT |
913 | unsigned long preempt_disable_ip; | |
914 | #endif | |
c7aceaba | 915 | #ifdef CONFIG_NUMA |
58568d2a | 916 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
c7aceaba | 917 | short il_next; |
207205a2 | 918 | short pref_node_fork; |
42b2dd0a | 919 | #endif |
cbee9f88 PZ |
920 | #ifdef CONFIG_NUMA_BALANCING |
921 | int numa_scan_seq; | |
cbee9f88 | 922 | unsigned int numa_scan_period; |
598f0ec0 | 923 | unsigned int numa_scan_period_max; |
de1c9ce6 | 924 | int numa_preferred_nid; |
6b9a7460 | 925 | unsigned long numa_migrate_retry; |
cbee9f88 | 926 | u64 node_stamp; /* migration stamp */ |
7e2703e6 RR |
927 | u64 last_task_numa_placement; |
928 | u64 last_sum_exec_runtime; | |
cbee9f88 | 929 | struct callback_head numa_work; |
f809ca9a | 930 | |
8c8a743c PZ |
931 | struct list_head numa_entry; |
932 | struct numa_group *numa_group; | |
933 | ||
745d6147 | 934 | /* |
44dba3d5 IM |
935 | * numa_faults is an array split into four regions: |
936 | * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer | |
937 | * in this precise order. | |
938 | * | |
939 | * faults_memory: Exponential decaying average of faults on a per-node | |
940 | * basis. Scheduling placement decisions are made based on these | |
941 | * counts. The values remain static for the duration of a PTE scan. | |
942 | * faults_cpu: Track the nodes the process was running on when a NUMA | |
943 | * hinting fault was incurred. | |
944 | * faults_memory_buffer and faults_cpu_buffer: Record faults per node | |
945 | * during the current scan window. When the scan completes, the counts | |
946 | * in faults_memory and faults_cpu decay and these values are copied. | |
745d6147 | 947 | */ |
44dba3d5 | 948 | unsigned long *numa_faults; |
83e1d2cd | 949 | unsigned long total_numa_faults; |
745d6147 | 950 | |
04bb2f94 RR |
951 | /* |
952 | * numa_faults_locality tracks if faults recorded during the last | |
074c2381 MG |
953 | * scan window were remote/local or failed to migrate. The task scan |
954 | * period is adapted based on the locality of the faults with different | |
955 | * weights depending on whether they were shared or private faults | |
04bb2f94 | 956 | */ |
074c2381 | 957 | unsigned long numa_faults_locality[3]; |
04bb2f94 | 958 | |
b32e86b4 | 959 | unsigned long numa_pages_migrated; |
cbee9f88 PZ |
960 | #endif /* CONFIG_NUMA_BALANCING */ |
961 | ||
72b252ae MG |
962 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
963 | struct tlbflush_unmap_batch tlb_ubc; | |
964 | #endif | |
965 | ||
e56d0903 | 966 | struct rcu_head rcu; |
b92ce558 JA |
967 | |
968 | /* | |
969 | * cache last used pipe for splice | |
970 | */ | |
971 | struct pipe_inode_info *splice_pipe; | |
5640f768 ED |
972 | |
973 | struct page_frag task_frag; | |
974 | ||
47913d4e IM |
975 | #ifdef CONFIG_TASK_DELAY_ACCT |
976 | struct task_delay_info *delays; | |
f4f154fd | 977 | #endif |
47913d4e | 978 | |
f4f154fd AM |
979 | #ifdef CONFIG_FAULT_INJECTION |
980 | int make_it_fail; | |
ca74e92b | 981 | #endif |
9d823e8f WF |
982 | /* |
983 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call | |
984 | * balance_dirty_pages() for some dirty throttling pause | |
985 | */ | |
986 | int nr_dirtied; | |
987 | int nr_dirtied_pause; | |
83712358 | 988 | unsigned long dirty_paused_when; /* start of a write-and-pause period */ |
9d823e8f | 989 | |
9745512c AV |
990 | #ifdef CONFIG_LATENCYTOP |
991 | int latency_record_count; | |
992 | struct latency_record latency_record[LT_SAVECOUNT]; | |
993 | #endif | |
6976675d AV |
994 | /* |
995 | * time slack values; these are used to round up poll() and | |
996 | * select() etc timeout values. These are in nanoseconds. | |
997 | */ | |
da8b44d5 JS |
998 | u64 timer_slack_ns; |
999 | u64 default_timer_slack_ns; | |
f8d570a4 | 1000 | |
0b24becc AR |
1001 | #ifdef CONFIG_KASAN |
1002 | unsigned int kasan_depth; | |
1003 | #endif | |
fb52607a | 1004 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
3ad2f3fb | 1005 | /* Index of current stored address in ret_stack */ |
f201ae23 FW |
1006 | int curr_ret_stack; |
1007 | /* Stack of return addresses for return function tracing */ | |
1008 | struct ftrace_ret_stack *ret_stack; | |
8aef2d28 SR |
1009 | /* time stamp for last schedule */ |
1010 | unsigned long long ftrace_timestamp; | |
f201ae23 FW |
1011 | /* |
1012 | * Number of functions that haven't been traced | |
1013 | * because of depth overrun. | |
1014 | */ | |
1015 | atomic_t trace_overrun; | |
380c4b14 FW |
1016 | /* Pause for the tracing */ |
1017 | atomic_t tracing_graph_pause; | |
f201ae23 | 1018 | #endif |
ea4e2bc4 SR |
1019 | #ifdef CONFIG_TRACING |
1020 | /* state flags for use by tracers */ | |
1021 | unsigned long trace; | |
b1cff0ad | 1022 | /* bitmask and counter of trace recursion */ |
261842b7 SR |
1023 | unsigned long trace_recursion; |
1024 | #endif /* CONFIG_TRACING */ | |
5c9a8750 DV |
1025 | #ifdef CONFIG_KCOV |
1026 | /* Coverage collection mode enabled for this task (0 if disabled). */ | |
1027 | enum kcov_mode kcov_mode; | |
1028 | /* Size of the kcov_area. */ | |
1029 | unsigned kcov_size; | |
1030 | /* Buffer for coverage collection. */ | |
1031 | void *kcov_area; | |
1032 | /* kcov desciptor wired with this task or NULL. */ | |
1033 | struct kcov *kcov; | |
1034 | #endif | |
6f185c29 | 1035 | #ifdef CONFIG_MEMCG |
626ebc41 TH |
1036 | struct mem_cgroup *memcg_in_oom; |
1037 | gfp_t memcg_oom_gfp_mask; | |
1038 | int memcg_oom_order; | |
b23afb93 TH |
1039 | |
1040 | /* number of pages to reclaim on returning to userland */ | |
1041 | unsigned int memcg_nr_pages_over_high; | |
569b846d | 1042 | #endif |
0326f5a9 SD |
1043 | #ifdef CONFIG_UPROBES |
1044 | struct uprobe_task *utask; | |
0326f5a9 | 1045 | #endif |
cafe5635 KO |
1046 | #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) |
1047 | unsigned int sequential_io; | |
1048 | unsigned int sequential_io_avg; | |
1049 | #endif | |
8eb23b9f PZ |
1050 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
1051 | unsigned long task_state_change; | |
1052 | #endif | |
8bcbde54 | 1053 | int pagefault_disabled; |
03049269 | 1054 | #ifdef CONFIG_MMU |
29c696e1 | 1055 | struct task_struct *oom_reaper_list; |
03049269 | 1056 | #endif |
ba14a194 AL |
1057 | #ifdef CONFIG_VMAP_STACK |
1058 | struct vm_struct *stack_vm_area; | |
1059 | #endif | |
68f24b08 AL |
1060 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
1061 | /* A live task holds one reference. */ | |
1062 | atomic_t stack_refcount; | |
1063 | #endif | |
0c8c0f03 DH |
1064 | /* CPU-specific state of this task */ |
1065 | struct thread_struct thread; | |
1066 | /* | |
1067 | * WARNING: on x86, 'thread_struct' contains a variable-sized | |
1068 | * structure. It *MUST* be at the end of 'task_struct'. | |
1069 | * | |
1070 | * Do not put anything below here! | |
1071 | */ | |
1da177e4 LT |
1072 | }; |
1073 | ||
e868171a | 1074 | static inline struct pid *task_pid(struct task_struct *task) |
22c935f4 EB |
1075 | { |
1076 | return task->pids[PIDTYPE_PID].pid; | |
1077 | } | |
1078 | ||
e868171a | 1079 | static inline struct pid *task_tgid(struct task_struct *task) |
22c935f4 EB |
1080 | { |
1081 | return task->group_leader->pids[PIDTYPE_PID].pid; | |
1082 | } | |
1083 | ||
6dda81f4 ON |
1084 | /* |
1085 | * Without tasklist or rcu lock it is not safe to dereference | |
1086 | * the result of task_pgrp/task_session even if task == current, | |
1087 | * we can race with another thread doing sys_setsid/sys_setpgid. | |
1088 | */ | |
e868171a | 1089 | static inline struct pid *task_pgrp(struct task_struct *task) |
22c935f4 EB |
1090 | { |
1091 | return task->group_leader->pids[PIDTYPE_PGID].pid; | |
1092 | } | |
1093 | ||
e868171a | 1094 | static inline struct pid *task_session(struct task_struct *task) |
22c935f4 EB |
1095 | { |
1096 | return task->group_leader->pids[PIDTYPE_SID].pid; | |
1097 | } | |
1098 | ||
7af57294 PE |
1099 | /* |
1100 | * the helpers to get the task's different pids as they are seen | |
1101 | * from various namespaces | |
1102 | * | |
1103 | * task_xid_nr() : global id, i.e. the id seen from the init namespace; | |
44c4e1b2 EB |
1104 | * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of |
1105 | * current. | |
7af57294 PE |
1106 | * task_xid_nr_ns() : id seen from the ns specified; |
1107 | * | |
1108 | * set_task_vxid() : assigns a virtual id to a task; | |
1109 | * | |
7af57294 PE |
1110 | * see also pid_nr() etc in include/linux/pid.h |
1111 | */ | |
52ee2dfd ON |
1112 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
1113 | struct pid_namespace *ns); | |
7af57294 | 1114 | |
e868171a | 1115 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
7af57294 PE |
1116 | { |
1117 | return tsk->pid; | |
1118 | } | |
1119 | ||
52ee2dfd ON |
1120 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
1121 | struct pid_namespace *ns) | |
1122 | { | |
1123 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | |
1124 | } | |
7af57294 PE |
1125 | |
1126 | static inline pid_t task_pid_vnr(struct task_struct *tsk) | |
1127 | { | |
52ee2dfd | 1128 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
7af57294 PE |
1129 | } |
1130 | ||
1131 | ||
e868171a | 1132 | static inline pid_t task_tgid_nr(struct task_struct *tsk) |
7af57294 PE |
1133 | { |
1134 | return tsk->tgid; | |
1135 | } | |
1136 | ||
2f2a3a46 | 1137 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); |
7af57294 PE |
1138 | |
1139 | static inline pid_t task_tgid_vnr(struct task_struct *tsk) | |
1140 | { | |
1141 | return pid_vnr(task_tgid(tsk)); | |
1142 | } | |
1143 | ||
1144 | ||
80e0b6e8 | 1145 | static inline int pid_alive(const struct task_struct *p); |
ad36d282 RGB |
1146 | static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) |
1147 | { | |
1148 | pid_t pid = 0; | |
1149 | ||
1150 | rcu_read_lock(); | |
1151 | if (pid_alive(tsk)) | |
1152 | pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); | |
1153 | rcu_read_unlock(); | |
1154 | ||
1155 | return pid; | |
1156 | } | |
1157 | ||
1158 | static inline pid_t task_ppid_nr(const struct task_struct *tsk) | |
1159 | { | |
1160 | return task_ppid_nr_ns(tsk, &init_pid_ns); | |
1161 | } | |
1162 | ||
52ee2dfd ON |
1163 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
1164 | struct pid_namespace *ns) | |
7af57294 | 1165 | { |
52ee2dfd | 1166 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
7af57294 PE |
1167 | } |
1168 | ||
7af57294 PE |
1169 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
1170 | { | |
52ee2dfd | 1171 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
7af57294 PE |
1172 | } |
1173 | ||
1174 | ||
52ee2dfd ON |
1175 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
1176 | struct pid_namespace *ns) | |
7af57294 | 1177 | { |
52ee2dfd | 1178 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
7af57294 PE |
1179 | } |
1180 | ||
7af57294 PE |
1181 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
1182 | { | |
52ee2dfd | 1183 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
7af57294 PE |
1184 | } |
1185 | ||
1b0f7ffd ON |
1186 | /* obsolete, do not use */ |
1187 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | |
1188 | { | |
1189 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | |
1190 | } | |
7af57294 | 1191 | |
1da177e4 LT |
1192 | /** |
1193 | * pid_alive - check that a task structure is not stale | |
1194 | * @p: Task structure to be checked. | |
1195 | * | |
1196 | * Test if a process is not yet dead (at most zombie state) | |
1197 | * If pid_alive fails, then pointers within the task structure | |
1198 | * can be stale and must not be dereferenced. | |
e69f6186 YB |
1199 | * |
1200 | * Return: 1 if the process is alive. 0 otherwise. | |
1da177e4 | 1201 | */ |
ad36d282 | 1202 | static inline int pid_alive(const struct task_struct *p) |
1da177e4 | 1203 | { |
92476d7f | 1204 | return p->pids[PIDTYPE_PID].pid != NULL; |
1da177e4 LT |
1205 | } |
1206 | ||
f400e198 | 1207 | /** |
570f5241 SS |
1208 | * is_global_init - check if a task structure is init. Since init |
1209 | * is free to have sub-threads we need to check tgid. | |
3260259f HK |
1210 | * @tsk: Task structure to be checked. |
1211 | * | |
1212 | * Check if a task structure is the first user space task the kernel created. | |
e69f6186 YB |
1213 | * |
1214 | * Return: 1 if the task structure is init. 0 otherwise. | |
b460cbc5 | 1215 | */ |
e868171a | 1216 | static inline int is_global_init(struct task_struct *tsk) |
b461cc03 | 1217 | { |
570f5241 | 1218 | return task_tgid_nr(tsk) == 1; |
b461cc03 | 1219 | } |
b460cbc5 | 1220 | |
9ec52099 CLG |
1221 | extern struct pid *cad_pid; |
1222 | ||
1da177e4 | 1223 | extern void free_task(struct task_struct *tsk); |
1da177e4 | 1224 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
e56d0903 | 1225 | |
158d9ebd | 1226 | extern void __put_task_struct(struct task_struct *t); |
e56d0903 IM |
1227 | |
1228 | static inline void put_task_struct(struct task_struct *t) | |
1229 | { | |
1230 | if (atomic_dec_and_test(&t->usage)) | |
8c7904a0 | 1231 | __put_task_struct(t); |
e56d0903 | 1232 | } |
1da177e4 | 1233 | |
150593bf ON |
1234 | struct task_struct *task_rcu_dereference(struct task_struct **ptask); |
1235 | struct task_struct *try_get_task_struct(struct task_struct **ptask); | |
1236 | ||
6a61671b FW |
1237 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
1238 | extern void task_cputime(struct task_struct *t, | |
5613fda9 | 1239 | u64 *utime, u64 *stime); |
16a6d9be | 1240 | extern u64 task_gtime(struct task_struct *t); |
6a61671b | 1241 | #else |
6fac4829 | 1242 | static inline void task_cputime(struct task_struct *t, |
5613fda9 | 1243 | u64 *utime, u64 *stime) |
6fac4829 | 1244 | { |
353c50eb SG |
1245 | *utime = t->utime; |
1246 | *stime = t->stime; | |
6fac4829 FW |
1247 | } |
1248 | ||
16a6d9be | 1249 | static inline u64 task_gtime(struct task_struct *t) |
40565b5a SG |
1250 | { |
1251 | return t->gtime; | |
1252 | } | |
1253 | #endif | |
1254 | ||
1255 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME | |
6fac4829 | 1256 | static inline void task_cputime_scaled(struct task_struct *t, |
5613fda9 FW |
1257 | u64 *utimescaled, |
1258 | u64 *stimescaled) | |
6fac4829 | 1259 | { |
353c50eb SG |
1260 | *utimescaled = t->utimescaled; |
1261 | *stimescaled = t->stimescaled; | |
6fac4829 | 1262 | } |
40565b5a SG |
1263 | #else |
1264 | static inline void task_cputime_scaled(struct task_struct *t, | |
5613fda9 FW |
1265 | u64 *utimescaled, |
1266 | u64 *stimescaled) | |
6a61671b | 1267 | { |
40565b5a | 1268 | task_cputime(t, utimescaled, stimescaled); |
6a61671b FW |
1269 | } |
1270 | #endif | |
40565b5a | 1271 | |
5613fda9 FW |
1272 | extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); |
1273 | extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st); | |
49048622 | 1274 | |
1da177e4 LT |
1275 | /* |
1276 | * Per process flags | |
1277 | */ | |
c1de45ca | 1278 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
1da177e4 | 1279 | #define PF_EXITING 0x00000004 /* getting shut down */ |
778e9a9c | 1280 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
94886b84 | 1281 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
21aa9af0 | 1282 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
1da177e4 | 1283 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
4db96cf0 | 1284 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
1da177e4 LT |
1285 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
1286 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | |
1287 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ | |
1288 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | |
72fa5997 | 1289 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ |
1da177e4 | 1290 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
774a1221 | 1291 | #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ |
1da177e4 LT |
1292 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
1293 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ | |
1294 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ | |
1295 | #define PF_KSWAPD 0x00040000 /* I am kswapd */ | |
21caf2fc | 1296 | #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ |
1da177e4 | 1297 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
246bb0b1 | 1298 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
b31dc66a JA |
1299 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
1300 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | |
14a40ffc | 1301 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
4db96cf0 | 1302 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
61a87122 | 1303 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
58a69cb4 | 1304 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
2b44c4db | 1305 | #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ |
1da177e4 LT |
1306 | |
1307 | /* | |
1308 | * Only the _current_ task can read/write to tsk->flags, but other | |
1309 | * tasks can access tsk->flags in readonly mode for example | |
1310 | * with tsk_used_math (like during threaded core dumping). | |
1311 | * There is however an exception to this rule during ptrace | |
1312 | * or during fork: the ptracer task is allowed to write to the | |
1313 | * child->flags of its traced child (same goes for fork, the parent | |
1314 | * can write to the child->flags), because we're guaranteed the | |
1315 | * child is not running and in turn not changing child->flags | |
1316 | * at the same time the parent does it. | |
1317 | */ | |
1318 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) | |
1319 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) | |
1320 | #define clear_used_math() clear_stopped_child_used_math(current) | |
1321 | #define set_used_math() set_stopped_child_used_math(current) | |
1322 | #define conditional_stopped_child_used_math(condition, child) \ | |
1323 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) | |
1324 | #define conditional_used_math(condition) \ | |
1325 | conditional_stopped_child_used_math(condition, current) | |
1326 | #define copy_to_stopped_child_used_math(child) \ | |
1327 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) | |
1328 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ | |
1329 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | |
1330 | #define used_math() tsk_used_math(current) | |
1331 | ||
1d4457f9 | 1332 | /* Per-process atomic flags. */ |
a2b86f77 | 1333 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
2ad654bc ZL |
1334 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ |
1335 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ | |
77ed2c57 | 1336 | #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ |
2ad654bc | 1337 | |
1d4457f9 | 1338 | |
e0e5070b ZL |
1339 | #define TASK_PFA_TEST(name, func) \ |
1340 | static inline bool task_##func(struct task_struct *p) \ | |
1341 | { return test_bit(PFA_##name, &p->atomic_flags); } | |
1342 | #define TASK_PFA_SET(name, func) \ | |
1343 | static inline void task_set_##func(struct task_struct *p) \ | |
1344 | { set_bit(PFA_##name, &p->atomic_flags); } | |
1345 | #define TASK_PFA_CLEAR(name, func) \ | |
1346 | static inline void task_clear_##func(struct task_struct *p) \ | |
1347 | { clear_bit(PFA_##name, &p->atomic_flags); } | |
1348 | ||
1349 | TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) | |
1350 | TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) | |
1d4457f9 | 1351 | |
2ad654bc ZL |
1352 | TASK_PFA_TEST(SPREAD_PAGE, spread_page) |
1353 | TASK_PFA_SET(SPREAD_PAGE, spread_page) | |
1354 | TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) | |
1355 | ||
1356 | TASK_PFA_TEST(SPREAD_SLAB, spread_slab) | |
1357 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) | |
1358 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) | |
1d4457f9 | 1359 | |
77ed2c57 TH |
1360 | TASK_PFA_TEST(LMK_WAITING, lmk_waiting) |
1361 | TASK_PFA_SET(LMK_WAITING, lmk_waiting) | |
1362 | ||
907aed48 MG |
1363 | static inline void tsk_restore_flags(struct task_struct *task, |
1364 | unsigned long orig_flags, unsigned long flags) | |
1365 | { | |
1366 | task->flags &= ~flags; | |
1367 | task->flags |= orig_flags & flags; | |
1368 | } | |
1369 | ||
f82f8042 JL |
1370 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, |
1371 | const struct cpumask *trial); | |
7f51412a JL |
1372 | extern int task_can_attach(struct task_struct *p, |
1373 | const struct cpumask *cs_cpus_allowed); | |
1da177e4 | 1374 | #ifdef CONFIG_SMP |
1e1b6c51 KM |
1375 | extern void do_set_cpus_allowed(struct task_struct *p, |
1376 | const struct cpumask *new_mask); | |
1377 | ||
cd8ba7cd | 1378 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
96f874e2 | 1379 | const struct cpumask *new_mask); |
1da177e4 | 1380 | #else |
1e1b6c51 KM |
1381 | static inline void do_set_cpus_allowed(struct task_struct *p, |
1382 | const struct cpumask *new_mask) | |
1383 | { | |
1384 | } | |
cd8ba7cd | 1385 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
96f874e2 | 1386 | const struct cpumask *new_mask) |
1da177e4 | 1387 | { |
96f874e2 | 1388 | if (!cpumask_test_cpu(0, new_mask)) |
1da177e4 LT |
1389 | return -EINVAL; |
1390 | return 0; | |
1391 | } | |
1392 | #endif | |
e0ad9556 | 1393 | |
6d0d2878 CB |
1394 | #ifndef cpu_relax_yield |
1395 | #define cpu_relax_yield() cpu_relax() | |
1396 | #endif | |
1397 | ||
36c8b586 | 1398 | extern unsigned long long |
41b86e9c | 1399 | task_sched_runtime(struct task_struct *task); |
1da177e4 LT |
1400 | |
1401 | /* sched_exec is called by processes performing an exec */ | |
1402 | #ifdef CONFIG_SMP | |
1403 | extern void sched_exec(void); | |
1404 | #else | |
1405 | #define sched_exec() {} | |
1406 | #endif | |
1407 | ||
fa93384f | 1408 | extern int yield_to(struct task_struct *p, bool preempt); |
36c8b586 IM |
1409 | extern void set_user_nice(struct task_struct *p, long nice); |
1410 | extern int task_prio(const struct task_struct *p); | |
d0ea0268 DY |
1411 | /** |
1412 | * task_nice - return the nice value of a given task. | |
1413 | * @p: the task in question. | |
1414 | * | |
1415 | * Return: The nice value [ -20 ... 0 ... 19 ]. | |
1416 | */ | |
1417 | static inline int task_nice(const struct task_struct *p) | |
1418 | { | |
1419 | return PRIO_TO_NICE((p)->static_prio); | |
1420 | } | |
36c8b586 IM |
1421 | extern int can_nice(const struct task_struct *p, const int nice); |
1422 | extern int task_curr(const struct task_struct *p); | |
1da177e4 | 1423 | extern int idle_cpu(int cpu); |
fe7de49f KM |
1424 | extern int sched_setscheduler(struct task_struct *, int, |
1425 | const struct sched_param *); | |
961ccddd | 1426 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
fe7de49f | 1427 | const struct sched_param *); |
d50dde5a DF |
1428 | extern int sched_setattr(struct task_struct *, |
1429 | const struct sched_attr *); | |
36c8b586 | 1430 | extern struct task_struct *idle_task(int cpu); |
c4f30608 PM |
1431 | /** |
1432 | * is_idle_task - is the specified task an idle task? | |
fa757281 | 1433 | * @p: the task in question. |
e69f6186 YB |
1434 | * |
1435 | * Return: 1 if @p is an idle task. 0 otherwise. | |
c4f30608 | 1436 | */ |
7061ca3b | 1437 | static inline bool is_idle_task(const struct task_struct *p) |
c4f30608 | 1438 | { |
c1de45ca | 1439 | return !!(p->flags & PF_IDLE); |
c4f30608 | 1440 | } |
36c8b586 | 1441 | extern struct task_struct *curr_task(int cpu); |
a458ae2e | 1442 | extern void ia64_set_curr_task(int cpu, struct task_struct *p); |
1da177e4 LT |
1443 | |
1444 | void yield(void); | |
1445 | ||
1da177e4 | 1446 | union thread_union { |
c65eacbe | 1447 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
1da177e4 | 1448 | struct thread_info thread_info; |
c65eacbe | 1449 | #endif |
1da177e4 LT |
1450 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
1451 | }; | |
1452 | ||
f3ac6067 IM |
1453 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
1454 | static inline struct thread_info *task_thread_info(struct task_struct *task) | |
1455 | { | |
1456 | return &task->thread_info; | |
1457 | } | |
1458 | #elif !defined(__HAVE_THREAD_FUNCTIONS) | |
1459 | # define task_thread_info(task) ((struct thread_info *)(task)->stack) | |
1460 | #endif | |
1461 | ||
1da177e4 LT |
1462 | #ifndef __HAVE_ARCH_KSTACK_END |
1463 | static inline int kstack_end(void *addr) | |
1464 | { | |
1465 | /* Reliable end of stack detection: | |
1466 | * Some APM bios versions misalign the stack | |
1467 | */ | |
1468 | return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); | |
1469 | } | |
1470 | #endif | |
1471 | ||
1472 | extern union thread_union init_thread_union; | |
1473 | extern struct task_struct init_task; | |
1474 | ||
198fe21b PE |
1475 | extern struct pid_namespace init_pid_ns; |
1476 | ||
1477 | /* | |
1478 | * find a task by one of its numerical ids | |
1479 | * | |
198fe21b PE |
1480 | * find_task_by_pid_ns(): |
1481 | * finds a task by its pid in the specified namespace | |
228ebcbe PE |
1482 | * find_task_by_vpid(): |
1483 | * finds a task by its virtual pid | |
198fe21b | 1484 | * |
e49859e7 | 1485 | * see also find_vpid() etc in include/linux/pid.h |
198fe21b PE |
1486 | */ |
1487 | ||
228ebcbe PE |
1488 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
1489 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | |
1490 | struct pid_namespace *ns); | |
198fe21b | 1491 | |
b3c97528 HH |
1492 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
1493 | extern int wake_up_process(struct task_struct *tsk); | |
3e51e3ed | 1494 | extern void wake_up_new_task(struct task_struct *tsk); |
1da177e4 LT |
1495 | #ifdef CONFIG_SMP |
1496 | extern void kick_process(struct task_struct *tsk); | |
1497 | #else | |
1498 | static inline void kick_process(struct task_struct *tsk) { } | |
1499 | #endif | |
1da177e4 | 1500 | |
1da177e4 | 1501 | extern void exit_files(struct task_struct *); |
cbaffba1 | 1502 | |
1da177e4 LT |
1503 | extern void exit_itimers(struct signal_struct *); |
1504 | ||
c4ad8f98 | 1505 | extern int do_execve(struct filename *, |
d7627467 | 1506 | const char __user * const __user *, |
da3d4c5f | 1507 | const char __user * const __user *); |
51f39a1f DD |
1508 | extern int do_execveat(int, struct filename *, |
1509 | const char __user * const __user *, | |
1510 | const char __user * const __user *, | |
1511 | int); | |
1da177e4 | 1512 | |
82b89778 AH |
1513 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); |
1514 | static inline void set_task_comm(struct task_struct *tsk, const char *from) | |
1515 | { | |
1516 | __set_task_comm(tsk, from, false); | |
1517 | } | |
59714d65 | 1518 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
1da177e4 LT |
1519 | |
1520 | #ifdef CONFIG_SMP | |
317f3941 | 1521 | void scheduler_ipi(void); |
85ba2d86 | 1522 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
1da177e4 | 1523 | #else |
184748cc | 1524 | static inline void scheduler_ipi(void) { } |
85ba2d86 RM |
1525 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
1526 | long match_state) | |
1527 | { | |
1528 | return 1; | |
1529 | } | |
1da177e4 LT |
1530 | #endif |
1531 | ||
1da177e4 | 1532 | /* |
260ea101 | 1533 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
22e2c507 | 1534 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
ddbcc7e8 | 1535 | * pins the final release of task.io_context. Also protects ->cpuset and |
d68b46fe | 1536 | * ->cgroup.subsys[]. And ->vfork_done. |
1da177e4 LT |
1537 | * |
1538 | * Nests both inside and outside of read_lock(&tasklist_lock). | |
1539 | * It must not be nested with write_lock_irq(&tasklist_lock), | |
1540 | * neither inside nor outside. | |
1541 | */ | |
1542 | static inline void task_lock(struct task_struct *p) | |
1543 | { | |
1544 | spin_lock(&p->alloc_lock); | |
1545 | } | |
1546 | ||
1547 | static inline void task_unlock(struct task_struct *p) | |
1548 | { | |
1549 | spin_unlock(&p->alloc_lock); | |
1550 | } | |
1551 | ||
1552 | /* set thread flags in other task's structures | |
1553 | * - see asm/thread_info.h for TIF_xxxx flags available | |
1554 | */ | |
1555 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) | |
1556 | { | |
a1261f54 | 1557 | set_ti_thread_flag(task_thread_info(tsk), flag); |
1da177e4 LT |
1558 | } |
1559 | ||
1560 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) | |
1561 | { | |
a1261f54 | 1562 | clear_ti_thread_flag(task_thread_info(tsk), flag); |
1da177e4 LT |
1563 | } |
1564 | ||
1565 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) | |
1566 | { | |
a1261f54 | 1567 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); |
1da177e4 LT |
1568 | } |
1569 | ||
1570 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) | |
1571 | { | |
a1261f54 | 1572 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); |
1da177e4 LT |
1573 | } |
1574 | ||
1575 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | |
1576 | { | |
a1261f54 | 1577 | return test_ti_thread_flag(task_thread_info(tsk), flag); |
1da177e4 LT |
1578 | } |
1579 | ||
1580 | static inline void set_tsk_need_resched(struct task_struct *tsk) | |
1581 | { | |
1582 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | |
1583 | } | |
1584 | ||
1585 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | |
1586 | { | |
1587 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | |
1588 | } | |
1589 | ||
8ae121ac GH |
1590 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
1591 | { | |
1592 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); | |
1593 | } | |
1594 | ||
1da177e4 LT |
1595 | /* |
1596 | * cond_resched() and cond_resched_lock(): latency reduction via | |
1597 | * explicit rescheduling in places that are safe. The return | |
1598 | * value indicates whether a reschedule was done in fact. | |
1599 | * cond_resched_lock() will drop the spinlock before scheduling, | |
1600 | * cond_resched_softirq() will enable bhs before scheduling. | |
1601 | */ | |
35a773a0 | 1602 | #ifndef CONFIG_PREEMPT |
c3921ab7 | 1603 | extern int _cond_resched(void); |
35a773a0 PZ |
1604 | #else |
1605 | static inline int _cond_resched(void) { return 0; } | |
1606 | #endif | |
6f80bd98 | 1607 | |
613afbf8 | 1608 | #define cond_resched() ({ \ |
3427445a | 1609 | ___might_sleep(__FILE__, __LINE__, 0); \ |
613afbf8 FW |
1610 | _cond_resched(); \ |
1611 | }) | |
6f80bd98 | 1612 | |
613afbf8 FW |
1613 | extern int __cond_resched_lock(spinlock_t *lock); |
1614 | ||
1615 | #define cond_resched_lock(lock) ({ \ | |
3427445a | 1616 | ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ |
613afbf8 FW |
1617 | __cond_resched_lock(lock); \ |
1618 | }) | |
1619 | ||
1620 | extern int __cond_resched_softirq(void); | |
1621 | ||
75e1056f | 1622 | #define cond_resched_softirq() ({ \ |
3427445a | 1623 | ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
75e1056f | 1624 | __cond_resched_softirq(); \ |
613afbf8 | 1625 | }) |
1da177e4 | 1626 | |
f6f3c437 SH |
1627 | static inline void cond_resched_rcu(void) |
1628 | { | |
1629 | #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) | |
1630 | rcu_read_unlock(); | |
1631 | cond_resched(); | |
1632 | rcu_read_lock(); | |
1633 | #endif | |
1634 | } | |
1635 | ||
1da177e4 LT |
1636 | /* |
1637 | * Does a critical section need to be broken due to another | |
95c354fe NP |
1638 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
1639 | * but a general need for low latency) | |
1da177e4 | 1640 | */ |
95c354fe | 1641 | static inline int spin_needbreak(spinlock_t *lock) |
1da177e4 | 1642 | { |
95c354fe NP |
1643 | #ifdef CONFIG_PREEMPT |
1644 | return spin_is_contended(lock); | |
1645 | #else | |
1da177e4 | 1646 | return 0; |
95c354fe | 1647 | #endif |
1da177e4 LT |
1648 | } |
1649 | ||
75f93fed PZ |
1650 | static __always_inline bool need_resched(void) |
1651 | { | |
1652 | return unlikely(tif_need_resched()); | |
1653 | } | |
1654 | ||
f06febc9 FM |
1655 | /* |
1656 | * Thread group CPU time accounting. | |
1657 | */ | |
4cd4c1b4 | 1658 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); |
4da94d49 | 1659 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); |
f06febc9 | 1660 | |
1da177e4 LT |
1661 | /* |
1662 | * Wrappers for p->thread_info->cpu access. No-op on UP. | |
1663 | */ | |
1664 | #ifdef CONFIG_SMP | |
1665 | ||
1666 | static inline unsigned int task_cpu(const struct task_struct *p) | |
1667 | { | |
c65eacbe AL |
1668 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
1669 | return p->cpu; | |
1670 | #else | |
a1261f54 | 1671 | return task_thread_info(p)->cpu; |
c65eacbe | 1672 | #endif |
1da177e4 LT |
1673 | } |
1674 | ||
b32e86b4 IM |
1675 | static inline int task_node(const struct task_struct *p) |
1676 | { | |
1677 | return cpu_to_node(task_cpu(p)); | |
1678 | } | |
1679 | ||
c65cc870 | 1680 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); |
1da177e4 LT |
1681 | |
1682 | #else | |
1683 | ||
1684 | static inline unsigned int task_cpu(const struct task_struct *p) | |
1685 | { | |
1686 | return 0; | |
1687 | } | |
1688 | ||
1689 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |
1690 | { | |
1691 | } | |
1692 | ||
1693 | #endif /* CONFIG_SMP */ | |
1694 | ||
d9345c65 PX |
1695 | /* |
1696 | * In order to reduce various lock holder preemption latencies provide an | |
1697 | * interface to see if a vCPU is currently running or not. | |
1698 | * | |
1699 | * This allows us to terminate optimistic spin loops and block, analogous to | |
1700 | * the native optimistic spin heuristic of testing if the lock owner task is | |
1701 | * running or not. | |
1702 | */ | |
1703 | #ifndef vcpu_is_preempted | |
1704 | # define vcpu_is_preempted(cpu) false | |
1705 | #endif | |
1706 | ||
96f874e2 RR |
1707 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
1708 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | |
5c45bf27 | 1709 | |
7c941438 | 1710 | #ifdef CONFIG_CGROUP_SCHED |
07e06b01 | 1711 | extern struct task_group root_task_group; |
8323f26c | 1712 | #endif /* CONFIG_CGROUP_SCHED */ |
9b5b7751 | 1713 | |
54e99124 DG |
1714 | extern int task_can_switch_user(struct user_struct *up, |
1715 | struct task_struct *tsk); | |
1716 | ||
82455257 DH |
1717 | #ifndef TASK_SIZE_OF |
1718 | #define TASK_SIZE_OF(tsk) TASK_SIZE | |
1719 | #endif | |
1720 | ||
1da177e4 | 1721 | #endif |