]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/sched.h
kthread: add __printf attributes
[mirror_ubuntu-artful-kernel.git] / include / linux / sched.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
607ca46e 4#include <uapi/linux/sched.h>
b7b3c76a 5
5c228079
DY
6#include <linux/sched/prio.h>
7
b7b3c76a
DW
8
9struct sched_param {
10 int sched_priority;
11};
12
1da177e4
LT
13#include <asm/param.h> /* for HZ */
14
1da177e4
LT
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
fb00aca4 21#include <linux/plist.h>
1da177e4
LT
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
c92ff1bd 27#include <linux/mm_types.h>
92cf2118 28#include <linux/preempt.h>
1da177e4 29
1da177e4
LT
30#include <asm/page.h>
31#include <asm/ptrace.h>
bfc3f028 32#include <linux/cputime.h>
1da177e4
LT
33
34#include <linux/smp.h>
35#include <linux/sem.h>
ab602f79 36#include <linux/shm.h>
1da177e4 37#include <linux/signal.h>
1da177e4
LT
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/seccomp.h>
e56d0903 44#include <linux/rcupdate.h>
05725f7e 45#include <linux/rculist.h>
23f78d4a 46#include <linux/rtmutex.h>
1da177e4 47
a3b6714e
DW
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
5c9a8750 53#include <linux/kcov.h>
7c3ab738 54#include <linux/task_io_accounting.h>
9745512c 55#include <linux/latencytop.h>
9e2b2dc4 56#include <linux/cred.h>
fa14ff4a 57#include <linux/llist.h>
7b44ab97 58#include <linux/uidgid.h>
21caf2fc 59#include <linux/gfp.h>
d4311ff1 60#include <linux/magic.h>
7d7efec3 61#include <linux/cgroup-defs.h>
a3b6714e
DW
62
63#include <asm/processor.h>
36d57ac4 64
d50dde5a
DF
65#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
66
67/*
68 * Extended scheduling parameters data structure.
69 *
70 * This is needed because the original struct sched_param can not be
71 * altered without introducing ABI issues with legacy applications
72 * (e.g., in sched_getparam()).
73 *
74 * However, the possibility of specifying more than just a priority for
75 * the tasks may be useful for a wide variety of application fields, e.g.,
76 * multimedia, streaming, automation and control, and many others.
77 *
78 * This variant (sched_attr) is meant at describing a so-called
79 * sporadic time-constrained task. In such model a task is specified by:
80 * - the activation period or minimum instance inter-arrival time;
81 * - the maximum (or average, depending on the actual scheduling
82 * discipline) computation time of all instances, a.k.a. runtime;
83 * - the deadline (relative to the actual activation time) of each
84 * instance.
85 * Very briefly, a periodic (sporadic) task asks for the execution of
86 * some specific computation --which is typically called an instance--
87 * (at most) every period. Moreover, each instance typically lasts no more
88 * than the runtime and must be completed by time instant t equal to
89 * the instance activation time + the deadline.
90 *
91 * This is reflected by the actual fields of the sched_attr structure:
92 *
93 * @size size of the structure, for fwd/bwd compat.
94 *
95 * @sched_policy task's scheduling policy
96 * @sched_flags for customizing the scheduler behaviour
97 * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
98 * @sched_priority task's static priority (SCHED_FIFO/RR)
99 * @sched_deadline representative of the task's deadline
100 * @sched_runtime representative of the task's runtime
101 * @sched_period representative of the task's period
102 *
103 * Given this task model, there are a multiplicity of scheduling algorithms
104 * and policies, that can be used to ensure all the tasks will make their
105 * timing constraints.
aab03e05
DF
106 *
107 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
108 * only user of this new interface. More information about the algorithm
109 * available in the scheduling class file or in Documentation/.
d50dde5a
DF
110 */
111struct sched_attr {
112 u32 size;
113
114 u32 sched_policy;
115 u64 sched_flags;
116
117 /* SCHED_NORMAL, SCHED_BATCH */
118 s32 sched_nice;
119
120 /* SCHED_FIFO, SCHED_RR */
121 u32 sched_priority;
122
123 /* SCHED_DEADLINE */
124 u64 sched_runtime;
125 u64 sched_deadline;
126 u64 sched_period;
127};
128
c87e2837 129struct futex_pi_state;
286100a6 130struct robust_list_head;
bddd87c7 131struct bio_list;
5ad4e53b 132struct fs_struct;
cdd6c482 133struct perf_event_context;
73c10101 134struct blk_plug;
c4ad8f98 135struct filename;
89076bc3 136struct nameidata;
1da177e4 137
615d6e87
DB
138#define VMACACHE_BITS 2
139#define VMACACHE_SIZE (1U << VMACACHE_BITS)
140#define VMACACHE_MASK (VMACACHE_SIZE - 1)
141
1da177e4
LT
142/*
143 * These are the constant used to fake the fixed-point load-average
144 * counting. Some notes:
145 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
146 * a load-average precision of 10 bits integer + 11 bits fractional
147 * - if you want to count load-averages more often, you need more
148 * precision, or rounding will get you. With 2-second counting freq,
149 * the EXP_n values would be 1981, 2034 and 2043 if still using only
150 * 11 bit fractions.
151 */
152extern unsigned long avenrun[]; /* Load averages */
2d02494f 153extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1da177e4
LT
154
155#define FSHIFT 11 /* nr of bits of precision */
156#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
0c2043ab 157#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
1da177e4
LT
158#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
159#define EXP_5 2014 /* 1/exp(5sec/5min) */
160#define EXP_15 2037 /* 1/exp(5sec/15min) */
161
162#define CALC_LOAD(load,exp,n) \
163 load *= exp; \
164 load += n*(FIXED_1-exp); \
165 load >>= FSHIFT;
166
167extern unsigned long total_forks;
168extern int nr_threads;
1da177e4
LT
169DECLARE_PER_CPU(unsigned long, process_counts);
170extern int nr_processes(void);
171extern unsigned long nr_running(void);
2ee507c4 172extern bool single_task_running(void);
1da177e4 173extern unsigned long nr_iowait(void);
8c215bd3 174extern unsigned long nr_iowait_cpu(int cpu);
372ba8cb 175extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
69d25870 176
0f004f5a 177extern void calc_global_load(unsigned long ticks);
3289bdb4
PZ
178
179#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
1f41906a
FW
180extern void cpu_load_update_nohz_start(void);
181extern void cpu_load_update_nohz_stop(void);
3289bdb4 182#else
1f41906a
FW
183static inline void cpu_load_update_nohz_start(void) { }
184static inline void cpu_load_update_nohz_stop(void) { }
3289bdb4 185#endif
1da177e4 186
b637a328
PM
187extern void dump_cpu_task(int cpu);
188
43ae34cb
IM
189struct seq_file;
190struct cfs_rq;
4cf86d77 191struct task_group;
43ae34cb
IM
192#ifdef CONFIG_SCHED_DEBUG
193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194extern void proc_sched_set_task(struct task_struct *p);
43ae34cb 195#endif
1da177e4 196
4a8342d2
LT
197/*
198 * Task state bitmask. NOTE! These bits are also
199 * encoded in fs/proc/array.c: get_task_state().
200 *
201 * We have two separate sets of flags: task->state
202 * is about runnability, while task->exit_state are
203 * about the task exiting. Confusing, but this way
204 * modifying one set can't modify the other one by
205 * mistake.
206 */
1da177e4
LT
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
f021a3c2
MW
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
4a8342d2 212/* in tsk->exit_state */
ad86622b
ON
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
abd50b39 215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
4a8342d2 216/* in tsk->state again */
af927232 217#define TASK_DEAD 64
f021a3c2 218#define TASK_WAKEKILL 128
e9c84311 219#define TASK_WAKING 256
f2530dc7 220#define TASK_PARKED 512
80ed87c8 221#define TASK_NOLOAD 1024
7dc603c9
PZ
222#define TASK_NEW 2048
223#define TASK_STATE_MAX 4096
f021a3c2 224
7dc603c9 225#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
73342151 226
e1781538
PZ
227extern char ___assert_task_state[1 - 2*!!(
228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
f021a3c2
MW
229
230/* Convenience macros for the sake of set_task_state */
231#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
1da177e4 234
80ed87c8
PZ
235#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
236
92a1f4bc
MW
237/* Convenience macros for the sake of wake_up */
238#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
f021a3c2 239#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
92a1f4bc
MW
240
241/* get_task_state() */
242#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
f021a3c2 243 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
74e37200 244 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
92a1f4bc 245
f021a3c2
MW
246#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
247#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
92a1f4bc 248#define task_is_stopped_or_traced(task) \
f021a3c2 249 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
92a1f4bc 250#define task_contributes_to_load(task) \
e3c8ca83 251 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
80ed87c8
PZ
252 (task->flags & PF_FROZEN) == 0 && \
253 (task->state & TASK_NOLOAD) == 0)
1da177e4 254
8eb23b9f
PZ
255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256
257#define __set_task_state(tsk, state_value) \
258 do { \
259 (tsk)->task_state_change = _THIS_IP_; \
260 (tsk)->state = (state_value); \
261 } while (0)
262#define set_task_state(tsk, state_value) \
263 do { \
264 (tsk)->task_state_change = _THIS_IP_; \
a2250238 265 smp_store_mb((tsk)->state, (state_value)); \
8eb23b9f
PZ
266 } while (0)
267
8eb23b9f
PZ
268#define __set_current_state(state_value) \
269 do { \
270 current->task_state_change = _THIS_IP_; \
271 current->state = (state_value); \
272 } while (0)
273#define set_current_state(state_value) \
274 do { \
275 current->task_state_change = _THIS_IP_; \
a2250238 276 smp_store_mb(current->state, (state_value)); \
8eb23b9f
PZ
277 } while (0)
278
279#else
280
a2250238
PZ
281/*
282 * @tsk had better be current, or you get to keep the pieces.
283 *
284 * The only reason is that computing current can be more expensive than
285 * using a pointer that's already available.
286 *
287 * Therefore, see set_current_state().
288 */
1da177e4
LT
289#define __set_task_state(tsk, state_value) \
290 do { (tsk)->state = (state_value); } while (0)
291#define set_task_state(tsk, state_value) \
b92b8b35 292 smp_store_mb((tsk)->state, (state_value))
1da177e4 293
498d0c57
AM
294/*
295 * set_current_state() includes a barrier so that the write of current->state
296 * is correctly serialised wrt the caller's subsequent test of whether to
297 * actually sleep:
298 *
a2250238 299 * for (;;) {
498d0c57 300 * set_current_state(TASK_UNINTERRUPTIBLE);
a2250238
PZ
301 * if (!need_sleep)
302 * break;
303 *
304 * schedule();
305 * }
306 * __set_current_state(TASK_RUNNING);
307 *
308 * If the caller does not need such serialisation (because, for instance, the
309 * condition test and condition change and wakeup are under the same lock) then
310 * use __set_current_state().
311 *
312 * The above is typically ordered against the wakeup, which does:
313 *
314 * need_sleep = false;
315 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
316 *
317 * Where wake_up_state() (and all other wakeup primitives) imply enough
318 * barriers to order the store of the variable against wakeup.
319 *
320 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
321 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
322 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
498d0c57 323 *
a2250238 324 * This is obviously fine, since they both store the exact same value.
498d0c57 325 *
a2250238 326 * Also see the comments of try_to_wake_up().
498d0c57 327 */
8eb23b9f 328#define __set_current_state(state_value) \
1da177e4 329 do { current->state = (state_value); } while (0)
8eb23b9f 330#define set_current_state(state_value) \
b92b8b35 331 smp_store_mb(current->state, (state_value))
1da177e4 332
8eb23b9f
PZ
333#endif
334
1da177e4
LT
335/* Task command name length */
336#define TASK_COMM_LEN 16
337
1da177e4
LT
338#include <linux/spinlock.h>
339
340/*
341 * This serializes "schedule()" and also protects
342 * the run-queue from deletions/modifications (but
343 * _adding_ to the beginning of the run-queue has
344 * a separate lock).
345 */
346extern rwlock_t tasklist_lock;
347extern spinlock_t mmlist_lock;
348
36c8b586 349struct task_struct;
1da177e4 350
db1466b3
PM
351#ifdef CONFIG_PROVE_RCU
352extern int lockdep_tasklist_lock_is_held(void);
353#endif /* #ifdef CONFIG_PROVE_RCU */
354
1da177e4
LT
355extern void sched_init(void);
356extern void sched_init_smp(void);
2d07b255 357extern asmlinkage void schedule_tail(struct task_struct *prev);
36c8b586 358extern void init_idle(struct task_struct *idle, int cpu);
1df21055 359extern void init_idle_bootup_task(struct task_struct *idle);
1da177e4 360
3fa0818b
RR
361extern cpumask_var_t cpu_isolated_map;
362
89f19f04 363extern int runqueue_is_locked(int cpu);
017730c1 364
3451d024 365#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
c1cc017c 366extern void nohz_balance_enter_idle(int cpu);
69e1e811 367extern void set_cpu_sd_state_idle(void);
bc7a34b8 368extern int get_nohz_timer_target(void);
46cb4b7c 369#else
c1cc017c 370static inline void nohz_balance_enter_idle(int cpu) { }
fdaabd80 371static inline void set_cpu_sd_state_idle(void) { }
46cb4b7c 372#endif
1da177e4 373
e59e2ae2 374/*
39bc89fd 375 * Only dump TASK_* tasks. (0 for all tasks)
e59e2ae2
IM
376 */
377extern void show_state_filter(unsigned long state_filter);
378
379static inline void show_state(void)
380{
39bc89fd 381 show_state_filter(0);
e59e2ae2
IM
382}
383
1da177e4
LT
384extern void show_regs(struct pt_regs *);
385
386/*
387 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
388 * task), SP is the stack pointer of the first frame that should be shown in the back
389 * trace (or NULL if the entire call-chain of the task should be shown).
390 */
391extern void show_stack(struct task_struct *task, unsigned long *sp);
392
1da177e4
LT
393extern void cpu_init (void);
394extern void trap_init(void);
395extern void update_process_times(int user);
396extern void scheduler_tick(void);
9cf7243d 397extern int sched_cpu_starting(unsigned int cpu);
40190a78
TG
398extern int sched_cpu_activate(unsigned int cpu);
399extern int sched_cpu_deactivate(unsigned int cpu);
1da177e4 400
f2785ddb
TG
401#ifdef CONFIG_HOTPLUG_CPU
402extern int sched_cpu_dying(unsigned int cpu);
403#else
404# define sched_cpu_dying NULL
405#endif
1da177e4 406
82a1fcb9
IM
407extern void sched_show_task(struct task_struct *p);
408
19cc36c0 409#ifdef CONFIG_LOCKUP_DETECTOR
03e0d461 410extern void touch_softlockup_watchdog_sched(void);
8446f1d3 411extern void touch_softlockup_watchdog(void);
d6ad3e28 412extern void touch_softlockup_watchdog_sync(void);
04c9167f 413extern void touch_all_softlockup_watchdogs(void);
332fbdbc
DZ
414extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
415 void __user *buffer,
416 size_t *lenp, loff_t *ppos);
9c44bc03 417extern unsigned int softlockup_panic;
ac1f5912 418extern unsigned int hardlockup_panic;
004417a6 419void lockup_detector_init(void);
8446f1d3 420#else
03e0d461
TH
421static inline void touch_softlockup_watchdog_sched(void)
422{
423}
8446f1d3
IM
424static inline void touch_softlockup_watchdog(void)
425{
426}
d6ad3e28
JW
427static inline void touch_softlockup_watchdog_sync(void)
428{
429}
04c9167f
JF
430static inline void touch_all_softlockup_watchdogs(void)
431{
432}
004417a6
PZ
433static inline void lockup_detector_init(void)
434{
435}
8446f1d3
IM
436#endif
437
8b414521
MT
438#ifdef CONFIG_DETECT_HUNG_TASK
439void reset_hung_task_detector(void);
440#else
441static inline void reset_hung_task_detector(void)
442{
443}
444#endif
445
1da177e4
LT
446/* Attach to any functions which should be ignored in wchan output. */
447#define __sched __attribute__((__section__(".sched.text")))
deaf2227
IM
448
449/* Linker adds these: start and end of __sched functions */
450extern char __sched_text_start[], __sched_text_end[];
451
1da177e4
LT
452/* Is this address in the __sched functions? */
453extern int in_sched_functions(unsigned long addr);
454
455#define MAX_SCHEDULE_TIMEOUT LONG_MAX
b3c97528 456extern signed long schedule_timeout(signed long timeout);
64ed93a2 457extern signed long schedule_timeout_interruptible(signed long timeout);
294d5cc2 458extern signed long schedule_timeout_killable(signed long timeout);
64ed93a2 459extern signed long schedule_timeout_uninterruptible(signed long timeout);
69b27baf 460extern signed long schedule_timeout_idle(signed long timeout);
1da177e4 461asmlinkage void schedule(void);
c5491ea7 462extern void schedule_preempt_disabled(void);
1da177e4 463
9cff8ade
N
464extern long io_schedule_timeout(long timeout);
465
466static inline void io_schedule(void)
467{
468 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
469}
470
9af6528e
PZ
471void __noreturn do_task_dead(void);
472
ab516013 473struct nsproxy;
acce292c 474struct user_namespace;
1da177e4 475
efc1a3b1
DH
476#ifdef CONFIG_MMU
477extern void arch_pick_mmap_layout(struct mm_struct *mm);
1da177e4
LT
478extern unsigned long
479arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
480 unsigned long, unsigned long);
481extern unsigned long
482arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
483 unsigned long len, unsigned long pgoff,
484 unsigned long flags);
efc1a3b1
DH
485#else
486static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
487#endif
1da177e4 488
d049f74f
KC
489#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
490#define SUID_DUMP_USER 1 /* Dump as user of process */
491#define SUID_DUMP_ROOT 2 /* Dump as root */
492
6c5d5238 493/* mm flags */
f8af4da3 494
7288e118 495/* for SUID_DUMP_* above */
3cb4a0bb 496#define MMF_DUMPABLE_BITS 2
f8af4da3 497#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3cb4a0bb 498
942be387
ON
499extern void set_dumpable(struct mm_struct *mm, int value);
500/*
501 * This returns the actual value of the suid_dumpable flag. For things
502 * that are using this for checking for privilege transitions, it must
503 * test against SUID_DUMP_USER rather than treating it as a boolean
504 * value.
505 */
506static inline int __get_dumpable(unsigned long mm_flags)
507{
508 return mm_flags & MMF_DUMPABLE_MASK;
509}
510
511static inline int get_dumpable(struct mm_struct *mm)
512{
513 return __get_dumpable(mm->flags);
514}
515
3cb4a0bb
KH
516/* coredump filter bits */
517#define MMF_DUMP_ANON_PRIVATE 2
518#define MMF_DUMP_ANON_SHARED 3
519#define MMF_DUMP_MAPPED_PRIVATE 4
520#define MMF_DUMP_MAPPED_SHARED 5
82df3973 521#define MMF_DUMP_ELF_HEADERS 6
e575f111
KM
522#define MMF_DUMP_HUGETLB_PRIVATE 7
523#define MMF_DUMP_HUGETLB_SHARED 8
5037835c
RZ
524#define MMF_DUMP_DAX_PRIVATE 9
525#define MMF_DUMP_DAX_SHARED 10
f8af4da3 526
3cb4a0bb 527#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
5037835c 528#define MMF_DUMP_FILTER_BITS 9
3cb4a0bb
KH
529#define MMF_DUMP_FILTER_MASK \
530 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
531#define MMF_DUMP_FILTER_DEFAULT \
e575f111 532 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
656eb2cd
RM
533 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
534
535#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
536# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
537#else
538# define MMF_DUMP_MASK_DEFAULT_ELF 0
539#endif
f8af4da3
HD
540 /* leave room for more dump flags */
541#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
ba76149f 542#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
bafb282d 543#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
f8af4da3 544
9f68f672
ON
545#define MMF_HAS_UPROBES 19 /* has uprobes */
546#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
862e3073 547#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
3f70dc38 548#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
6fcb52a5 549#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
f8ac4ec9 550
f8af4da3 551#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
6c5d5238 552
1da177e4
LT
553struct sighand_struct {
554 atomic_t count;
555 struct k_sigaction action[_NSIG];
556 spinlock_t siglock;
b8fceee1 557 wait_queue_head_t signalfd_wqh;
1da177e4
LT
558};
559
0e464814 560struct pacct_struct {
f6ec29a4
KK
561 int ac_flag;
562 long ac_exitcode;
0e464814 563 unsigned long ac_mem;
77787bfb
KK
564 cputime_t ac_utime, ac_stime;
565 unsigned long ac_minflt, ac_majflt;
0e464814
KK
566};
567
42c4ab41
SG
568struct cpu_itimer {
569 cputime_t expires;
570 cputime_t incr;
8356b5f9
SG
571 u32 error;
572 u32 incr_error;
42c4ab41
SG
573};
574
d37f761d 575/**
9d7fb042 576 * struct prev_cputime - snaphsot of system and user cputime
d37f761d
FW
577 * @utime: time spent in user mode
578 * @stime: time spent in system mode
9d7fb042 579 * @lock: protects the above two fields
d37f761d 580 *
9d7fb042
PZ
581 * Stores previous user/system time values such that we can guarantee
582 * monotonicity.
d37f761d 583 */
9d7fb042
PZ
584struct prev_cputime {
585#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
d37f761d
FW
586 cputime_t utime;
587 cputime_t stime;
9d7fb042
PZ
588 raw_spinlock_t lock;
589#endif
d37f761d
FW
590};
591
9d7fb042
PZ
592static inline void prev_cputime_init(struct prev_cputime *prev)
593{
594#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
595 prev->utime = prev->stime = 0;
596 raw_spin_lock_init(&prev->lock);
597#endif
598}
599
f06febc9
FM
600/**
601 * struct task_cputime - collected CPU time counts
602 * @utime: time spent in user mode, in &cputime_t units
603 * @stime: time spent in kernel mode, in &cputime_t units
604 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
5ce73a4a 605 *
9d7fb042
PZ
606 * This structure groups together three kinds of CPU time that are tracked for
607 * threads and thread groups. Most things considering CPU time want to group
608 * these counts together and treat all three of them in parallel.
f06febc9
FM
609 */
610struct task_cputime {
611 cputime_t utime;
612 cputime_t stime;
613 unsigned long long sum_exec_runtime;
614};
9d7fb042 615
f06febc9 616/* Alternate field names when used to cache expirations. */
f06febc9 617#define virt_exp utime
9d7fb042 618#define prof_exp stime
f06febc9
FM
619#define sched_exp sum_exec_runtime
620
4cd4c1b4
PZ
621#define INIT_CPUTIME \
622 (struct task_cputime) { \
64861634
MS
623 .utime = 0, \
624 .stime = 0, \
4cd4c1b4
PZ
625 .sum_exec_runtime = 0, \
626 }
627
971e8a98
JL
628/*
629 * This is the atomic variant of task_cputime, which can be used for
630 * storing and updating task_cputime statistics without locking.
631 */
632struct task_cputime_atomic {
633 atomic64_t utime;
634 atomic64_t stime;
635 atomic64_t sum_exec_runtime;
636};
637
638#define INIT_CPUTIME_ATOMIC \
639 (struct task_cputime_atomic) { \
640 .utime = ATOMIC64_INIT(0), \
641 .stime = ATOMIC64_INIT(0), \
642 .sum_exec_runtime = ATOMIC64_INIT(0), \
643 }
644
609ca066 645#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
a233f112 646
c99e6efe 647/*
87dcbc06
PZ
648 * Disable preemption until the scheduler is running -- use an unconditional
649 * value so that it also works on !PREEMPT_COUNT kernels.
d86ee480 650 *
87dcbc06 651 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
c99e6efe 652 */
87dcbc06 653#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
a233f112 654
c99e6efe 655/*
609ca066
PZ
656 * Initial preempt_count value; reflects the preempt_count schedule invariant
657 * which states that during context switches:
d86ee480 658 *
609ca066
PZ
659 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
660 *
661 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
662 * Note: See finish_task_switch().
c99e6efe 663 */
609ca066 664#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
c99e6efe 665
f06febc9 666/**
4cd4c1b4 667 * struct thread_group_cputimer - thread group interval timer counts
920ce39f 668 * @cputime_atomic: atomic thread group interval timers.
d5c373eb
JL
669 * @running: true when there are timers running and
670 * @cputime_atomic receives updates.
c8d75aa4
JL
671 * @checking_timer: true when a thread in the group is in the
672 * process of checking for thread group timers.
f06febc9
FM
673 *
674 * This structure contains the version of task_cputime, above, that is
4cd4c1b4 675 * used for thread group CPU timer calculations.
f06febc9 676 */
4cd4c1b4 677struct thread_group_cputimer {
71107445 678 struct task_cputime_atomic cputime_atomic;
d5c373eb 679 bool running;
c8d75aa4 680 bool checking_timer;
f06febc9 681};
f06febc9 682
4714d1d3 683#include <linux/rwsem.h>
5091faa4
MG
684struct autogroup;
685
1da177e4 686/*
e815f0a8 687 * NOTE! "signal_struct" does not have its own
1da177e4
LT
688 * locking, because a shared signal_struct always
689 * implies a shared sighand_struct, so locking
690 * sighand_struct is always a proper superset of
691 * the locking of signal_struct.
692 */
693struct signal_struct {
ea6d290c 694 atomic_t sigcnt;
1da177e4 695 atomic_t live;
b3ac022c 696 int nr_threads;
0c740d0a 697 struct list_head thread_head;
1da177e4
LT
698
699 wait_queue_head_t wait_chldexit; /* for wait4() */
700
701 /* current thread group signal load-balancing target: */
36c8b586 702 struct task_struct *curr_target;
1da177e4
LT
703
704 /* shared signal handling: */
705 struct sigpending shared_pending;
706
707 /* thread group exit support */
708 int group_exit_code;
709 /* overloaded:
710 * - notify group_exit_task when ->count is equal to notify_count
711 * - everyone except group_exit_task is stopped during signal delivery
712 * of fatal signals, group_exit_task processes the signal.
713 */
1da177e4 714 int notify_count;
07dd20e0 715 struct task_struct *group_exit_task;
1da177e4
LT
716
717 /* thread group stop support, overloads group_exit_code too */
718 int group_stop_count;
719 unsigned int flags; /* see SIGNAL_* flags below */
720
ebec18a6
LP
721 /*
722 * PR_SET_CHILD_SUBREAPER marks a process, like a service
723 * manager, to re-parent orphan (double-forking) child processes
724 * to this process instead of 'init'. The service manager is
725 * able to receive SIGCHLD signals and is able to investigate
726 * the process until it calls wait(). All children of this
727 * process will inherit a flag if they should look for a
728 * child_subreaper process at exit.
729 */
730 unsigned int is_child_subreaper:1;
731 unsigned int has_child_subreaper:1;
732
1da177e4 733 /* POSIX.1b Interval Timers */
5ed67f05
PE
734 int posix_timer_id;
735 struct list_head posix_timers;
1da177e4
LT
736
737 /* ITIMER_REAL timer for the process */
2ff678b8 738 struct hrtimer real_timer;
fea9d175 739 struct pid *leader_pid;
2ff678b8 740 ktime_t it_real_incr;
1da177e4 741
42c4ab41
SG
742 /*
743 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
744 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
745 * values are defined to 0 and 1 respectively
746 */
747 struct cpu_itimer it[2];
1da177e4 748
f06febc9 749 /*
4cd4c1b4
PZ
750 * Thread group totals for process CPU timers.
751 * See thread_group_cputimer(), et al, for details.
f06febc9 752 */
4cd4c1b4 753 struct thread_group_cputimer cputimer;
f06febc9
FM
754
755 /* Earliest-expiration cache. */
756 struct task_cputime cputime_expires;
757
d027d45d 758#ifdef CONFIG_NO_HZ_FULL
f009a7a7 759 atomic_t tick_dep_mask;
d027d45d
FW
760#endif
761
f06febc9
FM
762 struct list_head cpu_timers[3];
763
ab521dc0 764 struct pid *tty_old_pgrp;
1ec320af 765
1da177e4
LT
766 /* boolean value for session group leader */
767 int leader;
768
769 struct tty_struct *tty; /* NULL if no tty */
770
5091faa4
MG
771#ifdef CONFIG_SCHED_AUTOGROUP
772 struct autogroup *autogroup;
773#endif
1da177e4
LT
774 /*
775 * Cumulative resource counters for dead threads in the group,
776 * and for reaped dead child processes forked by this group.
777 * Live threads maintain their own counters and add to these
778 * in __exit_signal, except for the group leader.
779 */
e78c3496 780 seqlock_t stats_lock;
32bd671d 781 cputime_t utime, stime, cutime, cstime;
9ac52315
LV
782 cputime_t gtime;
783 cputime_t cgtime;
9d7fb042 784 struct prev_cputime prev_cputime;
1da177e4
LT
785 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
786 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6eaeeaba 787 unsigned long inblock, oublock, cinblock, coublock;
1f10206c 788 unsigned long maxrss, cmaxrss;
940389b8 789 struct task_io_accounting ioac;
1da177e4 790
32bd671d
PZ
791 /*
792 * Cumulative ns of schedule CPU time fo dead threads in the
793 * group, not including a zombie group leader, (This only differs
794 * from jiffies_to_ns(utime + stime) if sched_clock uses something
795 * other than jiffies.)
796 */
797 unsigned long long sum_sched_runtime;
798
1da177e4
LT
799 /*
800 * We don't bother to synchronize most readers of this at all,
801 * because there is no reader checking a limit that actually needs
802 * to get both rlim_cur and rlim_max atomically, and either one
803 * alone is a single word that can safely be read normally.
804 * getrlimit/setrlimit use task_lock(current->group_leader) to
805 * protect this instead of the siglock, because they really
806 * have no need to disable irqs.
807 */
808 struct rlimit rlim[RLIM_NLIMITS];
809
0e464814
KK
810#ifdef CONFIG_BSD_PROCESS_ACCT
811 struct pacct_struct pacct; /* per-process accounting information */
812#endif
ad4ecbcb 813#ifdef CONFIG_TASKSTATS
ad4ecbcb
SN
814 struct taskstats *stats;
815#endif
522ed776
MT
816#ifdef CONFIG_AUDIT
817 unsigned audit_tty;
818 struct tty_audit_buf *tty_audit_buf;
819#endif
28b83c51 820
c96fc2d8
TH
821 /*
822 * Thread is the potential origin of an oom condition; kill first on
823 * oom
824 */
825 bool oom_flag_origin;
a9c58b90
DR
826 short oom_score_adj; /* OOM kill score adjustment */
827 short oom_score_adj_min; /* OOM kill score adjustment min value.
828 * Only settable by CAP_SYS_RESOURCE. */
26db62f1
MH
829 struct mm_struct *oom_mm; /* recorded mm when the thread group got
830 * killed by the oom killer */
9b1bf12d
KM
831
832 struct mutex cred_guard_mutex; /* guard against foreign influences on
833 * credential calculations
834 * (notably. ptrace) */
1da177e4
LT
835};
836
837/*
838 * Bits in flags field of signal_struct.
839 */
840#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
ee77f075
ON
841#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
842#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
403bad72 843#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
e4420551
ON
844/*
845 * Pending notifications to parent.
846 */
847#define SIGNAL_CLD_STOPPED 0x00000010
848#define SIGNAL_CLD_CONTINUED 0x00000020
849#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
1da177e4 850
fae5fa44
ON
851#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
852
ed5d2cac
ON
853/* If true, all threads except ->group_exit_task have pending SIGKILL */
854static inline int signal_group_exit(const struct signal_struct *sig)
855{
856 return (sig->flags & SIGNAL_GROUP_EXIT) ||
857 (sig->group_exit_task != NULL);
858}
859
1da177e4
LT
860/*
861 * Some day this will be a full-fledged user tracking system..
862 */
863struct user_struct {
864 atomic_t __count; /* reference count */
865 atomic_t processes; /* How many processes does this user have? */
1da177e4 866 atomic_t sigpending; /* How many pending signals does this user have? */
2d9048e2 867#ifdef CONFIG_INOTIFY_USER
0eeca283
RL
868 atomic_t inotify_watches; /* How many inotify watches does this user have? */
869 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
870#endif
4afeff85
EP
871#ifdef CONFIG_FANOTIFY
872 atomic_t fanotify_listeners;
873#endif
7ef9964e 874#ifdef CONFIG_EPOLL
52bd19f7 875 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7ef9964e 876#endif
970a8645 877#ifdef CONFIG_POSIX_MQUEUE
1da177e4
LT
878 /* protected by mq_lock */
879 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
970a8645 880#endif
1da177e4 881 unsigned long locked_shm; /* How many pages of mlocked shm ? */
712f4aad 882 unsigned long unix_inflight; /* How many files in flight in unix sockets */
759c0114 883 atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
1da177e4
LT
884
885#ifdef CONFIG_KEYS
886 struct key *uid_keyring; /* UID specific keyring */
887 struct key *session_keyring; /* UID's default session keyring */
888#endif
889
890 /* Hash table maintenance information */
735de223 891 struct hlist_node uidhash_node;
7b44ab97 892 kuid_t uid;
24e377a8 893
aaac3ba9 894#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
789f90fc
PZ
895 atomic_long_t locked_vm;
896#endif
1da177e4
LT
897};
898
eb41d946 899extern int uids_sysfs_init(void);
5cb350ba 900
7b44ab97 901extern struct user_struct *find_user(kuid_t);
1da177e4
LT
902
903extern struct user_struct root_user;
904#define INIT_USER (&root_user)
905
b6dff3ec 906
1da177e4
LT
907struct backing_dev_info;
908struct reclaim_state;
909
f6db8347 910#ifdef CONFIG_SCHED_INFO
1da177e4
LT
911struct sched_info {
912 /* cumulative counters */
2d72376b 913 unsigned long pcount; /* # of times run on this cpu */
9c2c4802 914 unsigned long long run_delay; /* time spent waiting on a runqueue */
1da177e4
LT
915
916 /* timestamps */
172ba844
BS
917 unsigned long long last_arrival,/* when we last ran on a cpu */
918 last_queued; /* when we were last queued to run */
1da177e4 919};
f6db8347 920#endif /* CONFIG_SCHED_INFO */
1da177e4 921
ca74e92b
SN
922#ifdef CONFIG_TASK_DELAY_ACCT
923struct task_delay_info {
924 spinlock_t lock;
925 unsigned int flags; /* Private per-task flags */
926
927 /* For each stat XXX, add following, aligned appropriately
928 *
929 * struct timespec XXX_start, XXX_end;
930 * u64 XXX_delay;
931 * u32 XXX_count;
932 *
933 * Atomicity of updates to XXX_delay, XXX_count protected by
934 * single lock above (split into XXX_lock if contention is an issue).
935 */
0ff92245
SN
936
937 /*
938 * XXX_count is incremented on every XXX operation, the delay
939 * associated with the operation is added to XXX_delay.
940 * XXX_delay contains the accumulated delay time in nanoseconds.
941 */
9667a23d 942 u64 blkio_start; /* Shared by blkio, swapin */
0ff92245
SN
943 u64 blkio_delay; /* wait for sync block io completion */
944 u64 swapin_delay; /* wait for swapin block io completion */
945 u32 blkio_count; /* total count of the number of sync block */
946 /* io operations performed */
947 u32 swapin_count; /* total count of the number of swapin block */
948 /* io operations performed */
873b4771 949
9667a23d 950 u64 freepages_start;
873b4771
KK
951 u64 freepages_delay; /* wait for memory reclaim */
952 u32 freepages_count; /* total count of memory reclaim */
ca74e92b 953};
52f17b6c
CS
954#endif /* CONFIG_TASK_DELAY_ACCT */
955
956static inline int sched_info_on(void)
957{
958#ifdef CONFIG_SCHEDSTATS
959 return 1;
960#elif defined(CONFIG_TASK_DELAY_ACCT)
961 extern int delayacct_on;
962 return delayacct_on;
963#else
964 return 0;
ca74e92b 965#endif
52f17b6c 966}
ca74e92b 967
cb251765
MG
968#ifdef CONFIG_SCHEDSTATS
969void force_schedstat_enabled(void);
970#endif
971
d15bcfdb
IM
972enum cpu_idle_type {
973 CPU_IDLE,
974 CPU_NOT_IDLE,
975 CPU_NEWLY_IDLE,
976 CPU_MAX_IDLE_TYPES
1da177e4
LT
977};
978
6ecdd749
YD
979/*
980 * Integer metrics need fixed point arithmetic, e.g., sched/fair
981 * has a few: load, load_avg, util_avg, freq, and capacity.
982 *
983 * We define a basic fixed point arithmetic range, and then formalize
984 * all these metrics based on that basic range.
985 */
986# define SCHED_FIXEDPOINT_SHIFT 10
987# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
988
1399fa78 989/*
ca8ce3d0 990 * Increase resolution of cpu_capacity calculations
1399fa78 991 */
6ecdd749 992#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
ca8ce3d0 993#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
1da177e4 994
76751049
PZ
995/*
996 * Wake-queues are lists of tasks with a pending wakeup, whose
997 * callers have already marked the task as woken internally,
998 * and can thus carry on. A common use case is being able to
999 * do the wakeups once the corresponding user lock as been
1000 * released.
1001 *
1002 * We hold reference to each task in the list across the wakeup,
1003 * thus guaranteeing that the memory is still valid by the time
1004 * the actual wakeups are performed in wake_up_q().
1005 *
1006 * One per task suffices, because there's never a need for a task to be
1007 * in two wake queues simultaneously; it is forbidden to abandon a task
1008 * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
1009 * already in a wake queue, the wakeup will happen soon and the second
1010 * waker can just skip it.
1011 *
194a6b5b 1012 * The DEFINE_WAKE_Q macro declares and initializes the list head.
76751049
PZ
1013 * wake_up_q() does NOT reinitialize the list; it's expected to be
1014 * called near the end of a function, where the fact that the queue is
1015 * not used again will be easy to see by inspection.
1016 *
1017 * Note that this can cause spurious wakeups. schedule() callers
1018 * must ensure the call is done inside a loop, confirming that the
1019 * wakeup condition has in fact occurred.
1020 */
1021struct wake_q_node {
1022 struct wake_q_node *next;
1023};
1024
1025struct wake_q_head {
1026 struct wake_q_node *first;
1027 struct wake_q_node **lastp;
1028};
1029
1030#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1031
194a6b5b 1032#define DEFINE_WAKE_Q(name) \
76751049
PZ
1033 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1034
1035extern void wake_q_add(struct wake_q_head *head,
1036 struct task_struct *task);
1037extern void wake_up_q(struct wake_q_head *head);
1038
1399fa78
NR
1039/*
1040 * sched-domains (multiprocessor balancing) declarations:
1041 */
2dd73a4f 1042#ifdef CONFIG_SMP
b5d978e0
PZ
1043#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
1044#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
1045#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
1046#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
c88d5910 1047#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
b5d978e0 1048#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
1f6e6c7c 1049#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
bd425d4b 1050#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
d77b3ed5 1051#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
b5d978e0
PZ
1052#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
1053#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
532cb4c4 1054#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
b5d978e0 1055#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
e3589f6c 1056#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
3a7053b3 1057#define SD_NUMA 0x4000 /* cross-node balancing */
5c45bf27 1058
143e1e28 1059#ifdef CONFIG_SCHED_SMT
b6220ad6 1060static inline int cpu_smt_flags(void)
143e1e28 1061{
5d4dfddd 1062 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
143e1e28
VG
1063}
1064#endif
1065
1066#ifdef CONFIG_SCHED_MC
b6220ad6 1067static inline int cpu_core_flags(void)
143e1e28
VG
1068{
1069 return SD_SHARE_PKG_RESOURCES;
1070}
1071#endif
1072
1073#ifdef CONFIG_NUMA
b6220ad6 1074static inline int cpu_numa_flags(void)
143e1e28
VG
1075{
1076 return SD_NUMA;
1077}
1078#endif
532cb4c4 1079
afe06efd
TC
1080extern int arch_asym_cpu_priority(int cpu);
1081
1d3504fc
HS
1082struct sched_domain_attr {
1083 int relax_domain_level;
1084};
1085
1086#define SD_ATTR_INIT (struct sched_domain_attr) { \
1087 .relax_domain_level = -1, \
1088}
1089
60495e77
PZ
1090extern int sched_domain_level_max;
1091
5e6521ea
LZ
1092struct sched_group;
1093
24fc7edb
PZ
1094struct sched_domain_shared {
1095 atomic_t ref;
0e369d75 1096 atomic_t nr_busy_cpus;
10e2f1ac 1097 int has_idle_cores;
24fc7edb
PZ
1098};
1099
1da177e4
LT
1100struct sched_domain {
1101 /* These fields must be setup */
1102 struct sched_domain *parent; /* top domain must be null terminated */
1a848870 1103 struct sched_domain *child; /* bottom domain must be null terminated */
1da177e4 1104 struct sched_group *groups; /* the balancing groups of the domain */
1da177e4
LT
1105 unsigned long min_interval; /* Minimum balance interval ms */
1106 unsigned long max_interval; /* Maximum balance interval ms */
1107 unsigned int busy_factor; /* less balancing by factor if busy */
1108 unsigned int imbalance_pct; /* No balance until over watermark */
1da177e4 1109 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
7897986b
NP
1110 unsigned int busy_idx;
1111 unsigned int idle_idx;
1112 unsigned int newidle_idx;
1113 unsigned int wake_idx;
147cbb4b 1114 unsigned int forkexec_idx;
a52bfd73 1115 unsigned int smt_gain;
25f55d9d
VG
1116
1117 int nohz_idle; /* NOHZ IDLE status */
1da177e4 1118 int flags; /* See SD_* */
60495e77 1119 int level;
1da177e4
LT
1120
1121 /* Runtime fields. */
1122 unsigned long last_balance; /* init to jiffies. units in jiffies */
1123 unsigned int balance_interval; /* initialise to 1. units in ms. */
1124 unsigned int nr_balance_failed; /* initialise to 0 */
1125
f48627e6 1126 /* idle_balance() stats */
9bd721c5 1127 u64 max_newidle_lb_cost;
f48627e6 1128 unsigned long next_decay_max_lb_cost;
2398f2c6 1129
10e2f1ac
PZ
1130 u64 avg_scan_cost; /* select_idle_sibling */
1131
1da177e4
LT
1132#ifdef CONFIG_SCHEDSTATS
1133 /* load_balance() stats */
480b9434
KC
1134 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1135 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1136 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1137 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1138 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1139 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1140 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1141 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1da177e4
LT
1142
1143 /* Active load balancing */
480b9434
KC
1144 unsigned int alb_count;
1145 unsigned int alb_failed;
1146 unsigned int alb_pushed;
1da177e4 1147
68767a0a 1148 /* SD_BALANCE_EXEC stats */
480b9434
KC
1149 unsigned int sbe_count;
1150 unsigned int sbe_balanced;
1151 unsigned int sbe_pushed;
1da177e4 1152
68767a0a 1153 /* SD_BALANCE_FORK stats */
480b9434
KC
1154 unsigned int sbf_count;
1155 unsigned int sbf_balanced;
1156 unsigned int sbf_pushed;
68767a0a 1157
1da177e4 1158 /* try_to_wake_up() stats */
480b9434
KC
1159 unsigned int ttwu_wake_remote;
1160 unsigned int ttwu_move_affine;
1161 unsigned int ttwu_move_balance;
1da177e4 1162#endif
a5d8c348
IM
1163#ifdef CONFIG_SCHED_DEBUG
1164 char *name;
1165#endif
dce840a0
PZ
1166 union {
1167 void *private; /* used during construction */
1168 struct rcu_head rcu; /* used during destruction */
1169 };
24fc7edb 1170 struct sched_domain_shared *shared;
6c99e9ad 1171
669c55e9 1172 unsigned int span_weight;
4200efd9
IM
1173 /*
1174 * Span of all CPUs in this domain.
1175 *
1176 * NOTE: this field is variable length. (Allocated dynamically
1177 * by attaching extra space to the end of the structure,
1178 * depending on how many CPUs the kernel has booted up with)
4200efd9
IM
1179 */
1180 unsigned long span[0];
1da177e4
LT
1181};
1182
758b2cdc
RR
1183static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1184{
6c99e9ad 1185 return to_cpumask(sd->span);
758b2cdc
RR
1186}
1187
acc3f5d7 1188extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 1189 struct sched_domain_attr *dattr_new);
029190c5 1190
acc3f5d7
RR
1191/* Allocate an array of sched domains, for partition_sched_domains(). */
1192cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1193void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1194
39be3501
PZ
1195bool cpus_share_cache(int this_cpu, int that_cpu);
1196
143e1e28 1197typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
b6220ad6 1198typedef int (*sched_domain_flags_f)(void);
143e1e28
VG
1199
1200#define SDTL_OVERLAP 0x01
1201
1202struct sd_data {
1203 struct sched_domain **__percpu sd;
24fc7edb 1204 struct sched_domain_shared **__percpu sds;
143e1e28 1205 struct sched_group **__percpu sg;
63b2ca30 1206 struct sched_group_capacity **__percpu sgc;
143e1e28
VG
1207};
1208
1209struct sched_domain_topology_level {
1210 sched_domain_mask_f mask;
1211 sched_domain_flags_f sd_flags;
1212 int flags;
1213 int numa_level;
1214 struct sd_data data;
1215#ifdef CONFIG_SCHED_DEBUG
1216 char *name;
1217#endif
1218};
1219
143e1e28 1220extern void set_sched_topology(struct sched_domain_topology_level *tl);
f6be8af1 1221extern void wake_up_if_idle(int cpu);
143e1e28
VG
1222
1223#ifdef CONFIG_SCHED_DEBUG
1224# define SD_INIT_NAME(type) .name = #type
1225#else
1226# define SD_INIT_NAME(type)
1227#endif
1228
1b427c15 1229#else /* CONFIG_SMP */
1da177e4 1230
1b427c15 1231struct sched_domain_attr;
d02c7a8c 1232
1b427c15 1233static inline void
acc3f5d7 1234partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1b427c15
IM
1235 struct sched_domain_attr *dattr_new)
1236{
d02c7a8c 1237}
39be3501
PZ
1238
1239static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1240{
1241 return true;
1242}
1243
1b427c15 1244#endif /* !CONFIG_SMP */
1da177e4 1245
47fe38fc 1246
1da177e4 1247struct io_context; /* See blkdev.h */
1da177e4 1248
1da177e4 1249
383f2835 1250#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
36c8b586 1251extern void prefetch_stack(struct task_struct *t);
383f2835
KC
1252#else
1253static inline void prefetch_stack(struct task_struct *t) { }
1254#endif
1da177e4
LT
1255
1256struct audit_context; /* See audit.c */
1257struct mempolicy;
b92ce558 1258struct pipe_inode_info;
4865ecf1 1259struct uts_namespace;
1da177e4 1260
20b8a59f 1261struct load_weight {
9dbdb155
PZ
1262 unsigned long weight;
1263 u32 inv_weight;
20b8a59f
IM
1264};
1265
9d89c257 1266/*
7b595334
YD
1267 * The load_avg/util_avg accumulates an infinite geometric series
1268 * (see __update_load_avg() in kernel/sched/fair.c).
1269 *
1270 * [load_avg definition]
1271 *
1272 * load_avg = runnable% * scale_load_down(load)
1273 *
1274 * where runnable% is the time ratio that a sched_entity is runnable.
1275 * For cfs_rq, it is the aggregated load_avg of all runnable and
9d89c257 1276 * blocked sched_entities.
7b595334
YD
1277 *
1278 * load_avg may also take frequency scaling into account:
1279 *
1280 * load_avg = runnable% * scale_load_down(load) * freq%
1281 *
1282 * where freq% is the CPU frequency normalized to the highest frequency.
1283 *
1284 * [util_avg definition]
1285 *
1286 * util_avg = running% * SCHED_CAPACITY_SCALE
1287 *
1288 * where running% is the time ratio that a sched_entity is running on
1289 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
1290 * and blocked sched_entities.
1291 *
1292 * util_avg may also factor frequency scaling and CPU capacity scaling:
1293 *
1294 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
1295 *
1296 * where freq% is the same as above, and capacity% is the CPU capacity
1297 * normalized to the greatest capacity (due to uarch differences, etc).
1298 *
1299 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
1300 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
1301 * we therefore scale them to as large a range as necessary. This is for
1302 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
1303 *
1304 * [Overflow issue]
1305 *
1306 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
1307 * with the highest load (=88761), always runnable on a single cfs_rq,
1308 * and should not overflow as the number already hits PID_MAX_LIMIT.
1309 *
1310 * For all other cases (including 32-bit kernels), struct load_weight's
1311 * weight will overflow first before we do, because:
1312 *
1313 * Max(load_avg) <= Max(load.weight)
1314 *
1315 * Then it is the load_weight's responsibility to consider overflow
1316 * issues.
9d89c257 1317 */
9d85f21c 1318struct sched_avg {
9d89c257
YD
1319 u64 last_update_time, load_sum;
1320 u32 util_sum, period_contrib;
1321 unsigned long load_avg, util_avg;
9d85f21c
PT
1322};
1323
94c18227 1324#ifdef CONFIG_SCHEDSTATS
41acab88 1325struct sched_statistics {
20b8a59f 1326 u64 wait_start;
94c18227 1327 u64 wait_max;
6d082592
AV
1328 u64 wait_count;
1329 u64 wait_sum;
8f0dfc34
AV
1330 u64 iowait_count;
1331 u64 iowait_sum;
94c18227 1332
20b8a59f 1333 u64 sleep_start;
20b8a59f 1334 u64 sleep_max;
94c18227
IM
1335 s64 sum_sleep_runtime;
1336
1337 u64 block_start;
20b8a59f
IM
1338 u64 block_max;
1339 u64 exec_max;
eba1ed4b 1340 u64 slice_max;
cc367732 1341
cc367732
IM
1342 u64 nr_migrations_cold;
1343 u64 nr_failed_migrations_affine;
1344 u64 nr_failed_migrations_running;
1345 u64 nr_failed_migrations_hot;
1346 u64 nr_forced_migrations;
cc367732
IM
1347
1348 u64 nr_wakeups;
1349 u64 nr_wakeups_sync;
1350 u64 nr_wakeups_migrate;
1351 u64 nr_wakeups_local;
1352 u64 nr_wakeups_remote;
1353 u64 nr_wakeups_affine;
1354 u64 nr_wakeups_affine_attempts;
1355 u64 nr_wakeups_passive;
1356 u64 nr_wakeups_idle;
41acab88
LDM
1357};
1358#endif
1359
1360struct sched_entity {
1361 struct load_weight load; /* for load-balancing */
1362 struct rb_node run_node;
1363 struct list_head group_node;
1364 unsigned int on_rq;
1365
1366 u64 exec_start;
1367 u64 sum_exec_runtime;
1368 u64 vruntime;
1369 u64 prev_sum_exec_runtime;
1370
41acab88
LDM
1371 u64 nr_migrations;
1372
41acab88
LDM
1373#ifdef CONFIG_SCHEDSTATS
1374 struct sched_statistics statistics;
94c18227
IM
1375#endif
1376
20b8a59f 1377#ifdef CONFIG_FAIR_GROUP_SCHED
fed14d45 1378 int depth;
20b8a59f
IM
1379 struct sched_entity *parent;
1380 /* rq on which this entity is (to be) queued: */
1381 struct cfs_rq *cfs_rq;
1382 /* rq "owned" by this entity/group: */
1383 struct cfs_rq *my_q;
1384#endif
8bd75c77 1385
141965c7 1386#ifdef CONFIG_SMP
5a107804
JO
1387 /*
1388 * Per entity load average tracking.
1389 *
1390 * Put into separate cache line so it does not
1391 * collide with read-mostly values above.
1392 */
1393 struct sched_avg avg ____cacheline_aligned_in_smp;
9d85f21c 1394#endif
20b8a59f 1395};
70b97a7f 1396
fa717060
PZ
1397struct sched_rt_entity {
1398 struct list_head run_list;
78f2c7db 1399 unsigned long timeout;
57d2aa00 1400 unsigned long watchdog_stamp;
bee367ed 1401 unsigned int time_slice;
ff77e468
PZ
1402 unsigned short on_rq;
1403 unsigned short on_list;
6f505b16 1404
58d6c2d7 1405 struct sched_rt_entity *back;
052f1dc7 1406#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
1407 struct sched_rt_entity *parent;
1408 /* rq on which this entity is (to be) queued: */
1409 struct rt_rq *rt_rq;
1410 /* rq "owned" by this entity/group: */
1411 struct rt_rq *my_q;
1412#endif
fa717060
PZ
1413};
1414
aab03e05
DF
1415struct sched_dl_entity {
1416 struct rb_node rb_node;
1417
1418 /*
1419 * Original scheduling parameters. Copied here from sched_attr
4027d080 1420 * during sched_setattr(), they will remain the same until
1421 * the next sched_setattr().
aab03e05
DF
1422 */
1423 u64 dl_runtime; /* maximum runtime for each instance */
1424 u64 dl_deadline; /* relative deadline of each instance */
755378a4 1425 u64 dl_period; /* separation of two instances (period) */
332ac17e 1426 u64 dl_bw; /* dl_runtime / dl_deadline */
aab03e05
DF
1427
1428 /*
1429 * Actual scheduling parameters. Initialized with the values above,
1430 * they are continously updated during task execution. Note that
1431 * the remaining runtime could be < 0 in case we are in overrun.
1432 */
1433 s64 runtime; /* remaining runtime for this instance */
1434 u64 deadline; /* absolute deadline for this instance */
1435 unsigned int flags; /* specifying the scheduler behaviour */
1436
1437 /*
1438 * Some bool flags:
1439 *
1440 * @dl_throttled tells if we exhausted the runtime. If so, the
1441 * task has to wait for a replenishment to be performed at the
1442 * next firing of dl_timer.
1443 *
2d3d891d
DF
1444 * @dl_boosted tells if we are boosted due to DI. If so we are
1445 * outside bandwidth enforcement mechanism (but only until we
5bfd126e
JL
1446 * exit the critical section);
1447 *
1448 * @dl_yielded tells if task gave up the cpu before consuming
1449 * all its available runtime during the last job.
aab03e05 1450 */
72f9f3fd 1451 int dl_throttled, dl_boosted, dl_yielded;
aab03e05
DF
1452
1453 /*
1454 * Bandwidth enforcement timer. Each -deadline task has its
1455 * own bandwidth to be enforced, thus we need one timer per task.
1456 */
1457 struct hrtimer dl_timer;
1458};
8bd75c77 1459
1d082fd0
PM
1460union rcu_special {
1461 struct {
8203d6d0
PM
1462 u8 blocked;
1463 u8 need_qs;
1464 u8 exp_need_qs;
1465 u8 pad; /* Otherwise the compiler can store garbage here. */
1466 } b; /* Bits. */
1467 u32 s; /* Set of bits. */
1d082fd0 1468};
86848966
PM
1469struct rcu_node;
1470
8dc85d54
PZ
1471enum perf_event_task_context {
1472 perf_invalid_context = -1,
1473 perf_hw_context = 0,
89a1e187 1474 perf_sw_context,
8dc85d54
PZ
1475 perf_nr_task_contexts,
1476};
1477
72b252ae
MG
1478/* Track pages that require TLB flushes */
1479struct tlbflush_unmap_batch {
1480 /*
1481 * Each bit set is a CPU that potentially has a TLB entry for one of
1482 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1483 */
1484 struct cpumask cpumask;
1485
1486 /* True if any bit in cpumask is set */
1487 bool flush_required;
d950c947
MG
1488
1489 /*
1490 * If true then the PTE was dirty when unmapped. The entry must be
1491 * flushed before IO is initiated or a stale TLB entry potentially
1492 * allows an update without redirtying the page.
1493 */
1494 bool writable;
72b252ae
MG
1495};
1496
1da177e4 1497struct task_struct {
c65eacbe
AL
1498#ifdef CONFIG_THREAD_INFO_IN_TASK
1499 /*
1500 * For reasons of header soup (see current_thread_info()), this
1501 * must be the first element of task_struct.
1502 */
1503 struct thread_info thread_info;
1504#endif
1da177e4 1505 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
f7e4217b 1506 void *stack;
1da177e4 1507 atomic_t usage;
97dc32cd
WC
1508 unsigned int flags; /* per process flags, defined below */
1509 unsigned int ptrace;
1da177e4 1510
2dd73a4f 1511#ifdef CONFIG_SMP
fa14ff4a 1512 struct llist_node wake_entry;
3ca7a440 1513 int on_cpu;
c65eacbe
AL
1514#ifdef CONFIG_THREAD_INFO_IN_TASK
1515 unsigned int cpu; /* current CPU */
1516#endif
63b0e9ed 1517 unsigned int wakee_flips;
62470419 1518 unsigned long wakee_flip_decay_ts;
63b0e9ed 1519 struct task_struct *last_wakee;
ac66f547
PZ
1520
1521 int wake_cpu;
2dd73a4f 1522#endif
fd2f4419 1523 int on_rq;
50e645a8 1524
b29739f9 1525 int prio, static_prio, normal_prio;
c7aceaba 1526 unsigned int rt_priority;
5522d5d5 1527 const struct sched_class *sched_class;
20b8a59f 1528 struct sched_entity se;
fa717060 1529 struct sched_rt_entity rt;
8323f26c
PZ
1530#ifdef CONFIG_CGROUP_SCHED
1531 struct task_group *sched_task_group;
1532#endif
aab03e05 1533 struct sched_dl_entity dl;
1da177e4 1534
e107be36
AK
1535#ifdef CONFIG_PREEMPT_NOTIFIERS
1536 /* list of struct preempt_notifier: */
1537 struct hlist_head preempt_notifiers;
1538#endif
1539
6c5c9341 1540#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 1541 unsigned int btrace_seq;
6c5c9341 1542#endif
1da177e4 1543
97dc32cd 1544 unsigned int policy;
29baa747 1545 int nr_cpus_allowed;
1da177e4 1546 cpumask_t cpus_allowed;
1da177e4 1547
a57eb940 1548#ifdef CONFIG_PREEMPT_RCU
e260be67 1549 int rcu_read_lock_nesting;
1d082fd0 1550 union rcu_special rcu_read_unlock_special;
f41d911f 1551 struct list_head rcu_node_entry;
a57eb940 1552 struct rcu_node *rcu_blocked_node;
28f6569a 1553#endif /* #ifdef CONFIG_PREEMPT_RCU */
8315f422
PM
1554#ifdef CONFIG_TASKS_RCU
1555 unsigned long rcu_tasks_nvcsw;
1556 bool rcu_tasks_holdout;
1557 struct list_head rcu_tasks_holdout_list;
176f8f7a 1558 int rcu_tasks_idle_cpu;
8315f422 1559#endif /* #ifdef CONFIG_TASKS_RCU */
e260be67 1560
f6db8347 1561#ifdef CONFIG_SCHED_INFO
1da177e4
LT
1562 struct sched_info sched_info;
1563#endif
1564
1565 struct list_head tasks;
806c09a7 1566#ifdef CONFIG_SMP
917b627d 1567 struct plist_node pushable_tasks;
1baca4ce 1568 struct rb_node pushable_dl_tasks;
806c09a7 1569#endif
1da177e4
LT
1570
1571 struct mm_struct *mm, *active_mm;
615d6e87
DB
1572 /* per-thread vma caching */
1573 u32 vmacache_seqnum;
1574 struct vm_area_struct *vmacache[VMACACHE_SIZE];
34e55232
KH
1575#if defined(SPLIT_RSS_COUNTING)
1576 struct task_rss_stat rss_stat;
1577#endif
1da177e4 1578/* task state */
97dc32cd 1579 int exit_state;
1da177e4
LT
1580 int exit_code, exit_signal;
1581 int pdeath_signal; /* The signal sent when the parent dies */
e7cc4173 1582 unsigned long jobctl; /* JOBCTL_*, siglock protected */
9b89f6ba
AE
1583
1584 /* Used for emulating ABI behavior of previous Linux versions */
97dc32cd 1585 unsigned int personality;
9b89f6ba 1586
be958bdc 1587 /* scheduler bits, serialized by scheduler locks */
ca94c442 1588 unsigned sched_reset_on_fork:1;
a8e4f2ea 1589 unsigned sched_contributes_to_load:1;
ff303e66 1590 unsigned sched_migrated:1;
b7e7ade3 1591 unsigned sched_remote_wakeup:1;
be958bdc
PZ
1592 unsigned :0; /* force alignment to the next boundary */
1593
1594 /* unserialized, strictly 'current' */
1595 unsigned in_execve:1; /* bit to tell LSMs we're in execve */
1596 unsigned in_iowait:1;
7e781418
AL
1597#if !defined(TIF_RESTORE_SIGMASK)
1598 unsigned restore_sigmask:1;
1599#endif
626ebc41
TH
1600#ifdef CONFIG_MEMCG
1601 unsigned memcg_may_oom:1;
127424c8 1602#ifndef CONFIG_SLOB
6f185c29
VD
1603 unsigned memcg_kmem_skip_account:1;
1604#endif
127424c8 1605#endif
ff303e66
PZ
1606#ifdef CONFIG_COMPAT_BRK
1607 unsigned brk_randomized:1;
1608#endif
6f185c29 1609
1d4457f9
KC
1610 unsigned long atomic_flags; /* Flags needing atomic access. */
1611
f56141e3
AL
1612 struct restart_block restart_block;
1613
1da177e4
LT
1614 pid_t pid;
1615 pid_t tgid;
0a425405 1616
1314562a 1617#ifdef CONFIG_CC_STACKPROTECTOR
0a425405
AV
1618 /* Canary value for the -fstack-protector gcc feature */
1619 unsigned long stack_canary;
1314562a 1620#endif
4d1d61a6 1621 /*
1da177e4 1622 * pointers to (original) parent process, youngest child, younger sibling,
4d1d61a6 1623 * older sibling, respectively. (p->father can be replaced with
f470021a 1624 * p->real_parent->pid)
1da177e4 1625 */
abd63bc3
KC
1626 struct task_struct __rcu *real_parent; /* real parent process */
1627 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1da177e4 1628 /*
f470021a 1629 * children/sibling forms the list of my natural children
1da177e4
LT
1630 */
1631 struct list_head children; /* list of my children */
1632 struct list_head sibling; /* linkage in my parent's children list */
1633 struct task_struct *group_leader; /* threadgroup leader */
1634
f470021a
RM
1635 /*
1636 * ptraced is the list of tasks this task is using ptrace on.
1637 * This includes both natural children and PTRACE_ATTACH targets.
1638 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1639 */
1640 struct list_head ptraced;
1641 struct list_head ptrace_entry;
1642
1da177e4 1643 /* PID/PID hash table linkage. */
92476d7f 1644 struct pid_link pids[PIDTYPE_MAX];
47e65328 1645 struct list_head thread_group;
0c740d0a 1646 struct list_head thread_node;
1da177e4
LT
1647
1648 struct completion *vfork_done; /* for vfork() */
1649 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1650 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1651
40565b5a
SG
1652 cputime_t utime, stime;
1653#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1654 cputime_t utimescaled, stimescaled;
1655#endif
9ac52315 1656 cputime_t gtime;
9d7fb042 1657 struct prev_cputime prev_cputime;
6a61671b 1658#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
b7ce2277 1659 seqcount_t vtime_seqcount;
6a61671b
FW
1660 unsigned long long vtime_snap;
1661 enum {
7098c1ea
FW
1662 /* Task is sleeping or running in a CPU with VTIME inactive */
1663 VTIME_INACTIVE = 0,
1664 /* Task runs in userspace in a CPU with VTIME active */
6a61671b 1665 VTIME_USER,
7098c1ea 1666 /* Task runs in kernelspace in a CPU with VTIME active */
6a61671b
FW
1667 VTIME_SYS,
1668 } vtime_snap_whence;
d99ca3b9 1669#endif
d027d45d
FW
1670
1671#ifdef CONFIG_NO_HZ_FULL
f009a7a7 1672 atomic_t tick_dep_mask;
d027d45d 1673#endif
1da177e4 1674 unsigned long nvcsw, nivcsw; /* context switch counts */
ccbf62d8 1675 u64 start_time; /* monotonic time in nsec */
57e0be04 1676 u64 real_start_time; /* boot based time in nsec */
1da177e4
LT
1677/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1678 unsigned long min_flt, maj_flt;
1679
f06febc9 1680 struct task_cputime cputime_expires;
1da177e4
LT
1681 struct list_head cpu_timers[3];
1682
1683/* process credentials */
1b0ba1c9 1684 const struct cred __rcu *real_cred; /* objective and real subjective task
3b11a1de 1685 * credentials (COW) */
1b0ba1c9 1686 const struct cred __rcu *cred; /* effective (overridable) subjective task
3b11a1de 1687 * credentials (COW) */
36772092
PBG
1688 char comm[TASK_COMM_LEN]; /* executable name excluding path
1689 - access with [gs]et_task_comm (which lock
1690 it with task_lock())
221af7f8 1691 - initialized normally by setup_new_exec */
1da177e4 1692/* file system info */
756daf26 1693 struct nameidata *nameidata;
3d5b6fcc 1694#ifdef CONFIG_SYSVIPC
1da177e4
LT
1695/* ipc stuff */
1696 struct sysv_sem sysvsem;
ab602f79 1697 struct sysv_shm sysvshm;
3d5b6fcc 1698#endif
e162b39a 1699#ifdef CONFIG_DETECT_HUNG_TASK
82a1fcb9 1700/* hung task detection */
82a1fcb9
IM
1701 unsigned long last_switch_count;
1702#endif
1da177e4
LT
1703/* filesystem information */
1704 struct fs_struct *fs;
1705/* open file information */
1706 struct files_struct *files;
1651e14e 1707/* namespaces */
ab516013 1708 struct nsproxy *nsproxy;
1da177e4
LT
1709/* signal handlers */
1710 struct signal_struct *signal;
1711 struct sighand_struct *sighand;
1712
1713 sigset_t blocked, real_blocked;
f3de272b 1714 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1da177e4
LT
1715 struct sigpending pending;
1716
1717 unsigned long sas_ss_sp;
1718 size_t sas_ss_size;
2a742138 1719 unsigned sas_ss_flags;
2e01fabe 1720
67d12145 1721 struct callback_head *task_works;
e73f8959 1722
1da177e4 1723 struct audit_context *audit_context;
bfef93a5 1724#ifdef CONFIG_AUDITSYSCALL
e1760bd5 1725 kuid_t loginuid;
4746ec5b 1726 unsigned int sessionid;
bfef93a5 1727#endif
932ecebb 1728 struct seccomp seccomp;
1da177e4
LT
1729
1730/* Thread group tracking */
1731 u32 parent_exec_id;
1732 u32 self_exec_id;
58568d2a
MX
1733/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1734 * mempolicy */
1da177e4 1735 spinlock_t alloc_lock;
1da177e4 1736
b29739f9 1737 /* Protection of the PI data structures: */
1d615482 1738 raw_spinlock_t pi_lock;
b29739f9 1739
76751049
PZ
1740 struct wake_q_node wake_q;
1741
23f78d4a
IM
1742#ifdef CONFIG_RT_MUTEXES
1743 /* PI waiters blocked on a rt_mutex held by this task */
fb00aca4
PZ
1744 struct rb_root pi_waiters;
1745 struct rb_node *pi_waiters_leftmost;
23f78d4a
IM
1746 /* Deadlock detection and priority inheritance handling */
1747 struct rt_mutex_waiter *pi_blocked_on;
23f78d4a
IM
1748#endif
1749
408894ee
IM
1750#ifdef CONFIG_DEBUG_MUTEXES
1751 /* mutex deadlock detection */
1752 struct mutex_waiter *blocked_on;
1753#endif
de30a2b3
IM
1754#ifdef CONFIG_TRACE_IRQFLAGS
1755 unsigned int irq_events;
de30a2b3 1756 unsigned long hardirq_enable_ip;
de30a2b3 1757 unsigned long hardirq_disable_ip;
fa1452e8 1758 unsigned int hardirq_enable_event;
de30a2b3 1759 unsigned int hardirq_disable_event;
fa1452e8
HS
1760 int hardirqs_enabled;
1761 int hardirq_context;
de30a2b3 1762 unsigned long softirq_disable_ip;
de30a2b3 1763 unsigned long softirq_enable_ip;
fa1452e8 1764 unsigned int softirq_disable_event;
de30a2b3 1765 unsigned int softirq_enable_event;
fa1452e8 1766 int softirqs_enabled;
de30a2b3
IM
1767 int softirq_context;
1768#endif
fbb9ce95 1769#ifdef CONFIG_LOCKDEP
bdb9441e 1770# define MAX_LOCK_DEPTH 48UL
fbb9ce95
IM
1771 u64 curr_chain_key;
1772 int lockdep_depth;
fbb9ce95 1773 unsigned int lockdep_recursion;
c7aceaba 1774 struct held_lock held_locks[MAX_LOCK_DEPTH];
cf40bd16 1775 gfp_t lockdep_reclaim_gfp;
fbb9ce95 1776#endif
c6d30853
AR
1777#ifdef CONFIG_UBSAN
1778 unsigned int in_ubsan;
1779#endif
408894ee 1780
1da177e4
LT
1781/* journalling filesystem info */
1782 void *journal_info;
1783
d89d8796 1784/* stacked block device info */
bddd87c7 1785 struct bio_list *bio_list;
d89d8796 1786
73c10101
JA
1787#ifdef CONFIG_BLOCK
1788/* stack plugging */
1789 struct blk_plug *plug;
1790#endif
1791
1da177e4
LT
1792/* VM state */
1793 struct reclaim_state *reclaim_state;
1794
1da177e4
LT
1795 struct backing_dev_info *backing_dev_info;
1796
1797 struct io_context *io_context;
1798
1799 unsigned long ptrace_message;
1800 siginfo_t *last_siginfo; /* For ptrace use. */
7c3ab738 1801 struct task_io_accounting ioac;
8f0ab514 1802#if defined(CONFIG_TASK_XACCT)
1da177e4
LT
1803 u64 acct_rss_mem1; /* accumulated rss usage */
1804 u64 acct_vm_mem1; /* accumulated virtual memory usage */
49b5cf34 1805 cputime_t acct_timexpd; /* stime + utime since last update */
1da177e4
LT
1806#endif
1807#ifdef CONFIG_CPUSETS
58568d2a 1808 nodemask_t mems_allowed; /* Protected by alloc_lock */
cc9a6c87 1809 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
825a46af 1810 int cpuset_mem_spread_rotor;
6adef3eb 1811 int cpuset_slab_spread_rotor;
1da177e4 1812#endif
ddbcc7e8 1813#ifdef CONFIG_CGROUPS
817929ec 1814 /* Control Group info protected by css_set_lock */
2c392b8c 1815 struct css_set __rcu *cgroups;
817929ec
PM
1816 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1817 struct list_head cg_list;
ddbcc7e8 1818#endif
42b2dd0a 1819#ifdef CONFIG_FUTEX
0771dfef 1820 struct robust_list_head __user *robust_list;
34f192c6
IM
1821#ifdef CONFIG_COMPAT
1822 struct compat_robust_list_head __user *compat_robust_list;
1823#endif
c87e2837
IM
1824 struct list_head pi_state_list;
1825 struct futex_pi_state *pi_state_cache;
c7aceaba 1826#endif
cdd6c482 1827#ifdef CONFIG_PERF_EVENTS
8dc85d54 1828 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
cdd6c482
IM
1829 struct mutex perf_event_mutex;
1830 struct list_head perf_event_list;
a63eaf34 1831#endif
8f47b187
TG
1832#ifdef CONFIG_DEBUG_PREEMPT
1833 unsigned long preempt_disable_ip;
1834#endif
c7aceaba 1835#ifdef CONFIG_NUMA
58568d2a 1836 struct mempolicy *mempolicy; /* Protected by alloc_lock */
c7aceaba 1837 short il_next;
207205a2 1838 short pref_node_fork;
42b2dd0a 1839#endif
cbee9f88
PZ
1840#ifdef CONFIG_NUMA_BALANCING
1841 int numa_scan_seq;
cbee9f88 1842 unsigned int numa_scan_period;
598f0ec0 1843 unsigned int numa_scan_period_max;
de1c9ce6 1844 int numa_preferred_nid;
6b9a7460 1845 unsigned long numa_migrate_retry;
cbee9f88 1846 u64 node_stamp; /* migration stamp */
7e2703e6
RR
1847 u64 last_task_numa_placement;
1848 u64 last_sum_exec_runtime;
cbee9f88 1849 struct callback_head numa_work;
f809ca9a 1850
8c8a743c
PZ
1851 struct list_head numa_entry;
1852 struct numa_group *numa_group;
1853
745d6147 1854 /*
44dba3d5
IM
1855 * numa_faults is an array split into four regions:
1856 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1857 * in this precise order.
1858 *
1859 * faults_memory: Exponential decaying average of faults on a per-node
1860 * basis. Scheduling placement decisions are made based on these
1861 * counts. The values remain static for the duration of a PTE scan.
1862 * faults_cpu: Track the nodes the process was running on when a NUMA
1863 * hinting fault was incurred.
1864 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1865 * during the current scan window. When the scan completes, the counts
1866 * in faults_memory and faults_cpu decay and these values are copied.
745d6147 1867 */
44dba3d5 1868 unsigned long *numa_faults;
83e1d2cd 1869 unsigned long total_numa_faults;
745d6147 1870
04bb2f94
RR
1871 /*
1872 * numa_faults_locality tracks if faults recorded during the last
074c2381
MG
1873 * scan window were remote/local or failed to migrate. The task scan
1874 * period is adapted based on the locality of the faults with different
1875 * weights depending on whether they were shared or private faults
04bb2f94 1876 */
074c2381 1877 unsigned long numa_faults_locality[3];
04bb2f94 1878
b32e86b4 1879 unsigned long numa_pages_migrated;
cbee9f88
PZ
1880#endif /* CONFIG_NUMA_BALANCING */
1881
72b252ae
MG
1882#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1883 struct tlbflush_unmap_batch tlb_ubc;
1884#endif
1885
e56d0903 1886 struct rcu_head rcu;
b92ce558
JA
1887
1888 /*
1889 * cache last used pipe for splice
1890 */
1891 struct pipe_inode_info *splice_pipe;
5640f768
ED
1892
1893 struct page_frag task_frag;
1894
ca74e92b
SN
1895#ifdef CONFIG_TASK_DELAY_ACCT
1896 struct task_delay_info *delays;
f4f154fd
AM
1897#endif
1898#ifdef CONFIG_FAULT_INJECTION
1899 int make_it_fail;
ca74e92b 1900#endif
9d823e8f
WF
1901 /*
1902 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1903 * balance_dirty_pages() for some dirty throttling pause
1904 */
1905 int nr_dirtied;
1906 int nr_dirtied_pause;
83712358 1907 unsigned long dirty_paused_when; /* start of a write-and-pause period */
9d823e8f 1908
9745512c
AV
1909#ifdef CONFIG_LATENCYTOP
1910 int latency_record_count;
1911 struct latency_record latency_record[LT_SAVECOUNT];
1912#endif
6976675d
AV
1913 /*
1914 * time slack values; these are used to round up poll() and
1915 * select() etc timeout values. These are in nanoseconds.
1916 */
da8b44d5
JS
1917 u64 timer_slack_ns;
1918 u64 default_timer_slack_ns;
f8d570a4 1919
0b24becc
AR
1920#ifdef CONFIG_KASAN
1921 unsigned int kasan_depth;
1922#endif
fb52607a 1923#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3ad2f3fb 1924 /* Index of current stored address in ret_stack */
f201ae23
FW
1925 int curr_ret_stack;
1926 /* Stack of return addresses for return function tracing */
1927 struct ftrace_ret_stack *ret_stack;
8aef2d28
SR
1928 /* time stamp for last schedule */
1929 unsigned long long ftrace_timestamp;
f201ae23
FW
1930 /*
1931 * Number of functions that haven't been traced
1932 * because of depth overrun.
1933 */
1934 atomic_t trace_overrun;
380c4b14
FW
1935 /* Pause for the tracing */
1936 atomic_t tracing_graph_pause;
f201ae23 1937#endif
ea4e2bc4
SR
1938#ifdef CONFIG_TRACING
1939 /* state flags for use by tracers */
1940 unsigned long trace;
b1cff0ad 1941 /* bitmask and counter of trace recursion */
261842b7
SR
1942 unsigned long trace_recursion;
1943#endif /* CONFIG_TRACING */
5c9a8750
DV
1944#ifdef CONFIG_KCOV
1945 /* Coverage collection mode enabled for this task (0 if disabled). */
1946 enum kcov_mode kcov_mode;
1947 /* Size of the kcov_area. */
1948 unsigned kcov_size;
1949 /* Buffer for coverage collection. */
1950 void *kcov_area;
1951 /* kcov desciptor wired with this task or NULL. */
1952 struct kcov *kcov;
1953#endif
6f185c29 1954#ifdef CONFIG_MEMCG
626ebc41
TH
1955 struct mem_cgroup *memcg_in_oom;
1956 gfp_t memcg_oom_gfp_mask;
1957 int memcg_oom_order;
b23afb93
TH
1958
1959 /* number of pages to reclaim on returning to userland */
1960 unsigned int memcg_nr_pages_over_high;
569b846d 1961#endif
0326f5a9
SD
1962#ifdef CONFIG_UPROBES
1963 struct uprobe_task *utask;
0326f5a9 1964#endif
cafe5635
KO
1965#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1966 unsigned int sequential_io;
1967 unsigned int sequential_io_avg;
1968#endif
8eb23b9f
PZ
1969#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1970 unsigned long task_state_change;
1971#endif
8bcbde54 1972 int pagefault_disabled;
03049269 1973#ifdef CONFIG_MMU
29c696e1 1974 struct task_struct *oom_reaper_list;
03049269 1975#endif
ba14a194
AL
1976#ifdef CONFIG_VMAP_STACK
1977 struct vm_struct *stack_vm_area;
1978#endif
68f24b08
AL
1979#ifdef CONFIG_THREAD_INFO_IN_TASK
1980 /* A live task holds one reference. */
1981 atomic_t stack_refcount;
1982#endif
0c8c0f03
DH
1983/* CPU-specific state of this task */
1984 struct thread_struct thread;
1985/*
1986 * WARNING: on x86, 'thread_struct' contains a variable-sized
1987 * structure. It *MUST* be at the end of 'task_struct'.
1988 *
1989 * Do not put anything below here!
1990 */
1da177e4
LT
1991};
1992
5aaeb5c0
IM
1993#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1994extern int arch_task_struct_size __read_mostly;
1995#else
1996# define arch_task_struct_size (sizeof(struct task_struct))
1997#endif
0c8c0f03 1998
ba14a194
AL
1999#ifdef CONFIG_VMAP_STACK
2000static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
2001{
2002 return t->stack_vm_area;
2003}
2004#else
2005static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
2006{
2007 return NULL;
2008}
2009#endif
2010
76e6eee0 2011/* Future-safe accessor for struct task_struct's cpus_allowed. */
a4636818 2012#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76e6eee0 2013
50605ffb
TG
2014static inline int tsk_nr_cpus_allowed(struct task_struct *p)
2015{
2016 return p->nr_cpus_allowed;
2017}
2018
6688cc05
PZ
2019#define TNF_MIGRATED 0x01
2020#define TNF_NO_GROUP 0x02
dabe1d99 2021#define TNF_SHARED 0x04
04bb2f94 2022#define TNF_FAULT_LOCAL 0x08
074c2381 2023#define TNF_MIGRATE_FAIL 0x10
6688cc05 2024
b18dc5f2
MH
2025static inline bool in_vfork(struct task_struct *tsk)
2026{
2027 bool ret;
2028
2029 /*
2030 * need RCU to access ->real_parent if CLONE_VM was used along with
2031 * CLONE_PARENT.
2032 *
2033 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
2034 * imply CLONE_VM
2035 *
2036 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
2037 * ->real_parent is not necessarily the task doing vfork(), so in
2038 * theory we can't rely on task_lock() if we want to dereference it.
2039 *
2040 * And in this case we can't trust the real_parent->mm == tsk->mm
2041 * check, it can be false negative. But we do not care, if init or
2042 * another oom-unkillable task does this it should blame itself.
2043 */
2044 rcu_read_lock();
2045 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
2046 rcu_read_unlock();
2047
2048 return ret;
2049}
2050
cbee9f88 2051#ifdef CONFIG_NUMA_BALANCING
6688cc05 2052extern void task_numa_fault(int last_node, int node, int pages, int flags);
e29cf08b 2053extern pid_t task_numa_group_id(struct task_struct *p);
1a687c2e 2054extern void set_numabalancing_state(bool enabled);
82727018 2055extern void task_numa_free(struct task_struct *p);
10f39042
RR
2056extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
2057 int src_nid, int dst_cpu);
cbee9f88 2058#else
ac8e895b 2059static inline void task_numa_fault(int last_node, int node, int pages,
6688cc05 2060 int flags)
cbee9f88
PZ
2061{
2062}
e29cf08b
MG
2063static inline pid_t task_numa_group_id(struct task_struct *p)
2064{
2065 return 0;
2066}
1a687c2e
MG
2067static inline void set_numabalancing_state(bool enabled)
2068{
2069}
82727018
RR
2070static inline void task_numa_free(struct task_struct *p)
2071{
2072}
10f39042
RR
2073static inline bool should_numa_migrate_memory(struct task_struct *p,
2074 struct page *page, int src_nid, int dst_cpu)
2075{
2076 return true;
2077}
cbee9f88
PZ
2078#endif
2079
e868171a 2080static inline struct pid *task_pid(struct task_struct *task)
22c935f4
EB
2081{
2082 return task->pids[PIDTYPE_PID].pid;
2083}
2084
e868171a 2085static inline struct pid *task_tgid(struct task_struct *task)
22c935f4
EB
2086{
2087 return task->group_leader->pids[PIDTYPE_PID].pid;
2088}
2089
6dda81f4
ON
2090/*
2091 * Without tasklist or rcu lock it is not safe to dereference
2092 * the result of task_pgrp/task_session even if task == current,
2093 * we can race with another thread doing sys_setsid/sys_setpgid.
2094 */
e868171a 2095static inline struct pid *task_pgrp(struct task_struct *task)
22c935f4
EB
2096{
2097 return task->group_leader->pids[PIDTYPE_PGID].pid;
2098}
2099
e868171a 2100static inline struct pid *task_session(struct task_struct *task)
22c935f4
EB
2101{
2102 return task->group_leader->pids[PIDTYPE_SID].pid;
2103}
2104
7af57294
PE
2105struct pid_namespace;
2106
2107/*
2108 * the helpers to get the task's different pids as they are seen
2109 * from various namespaces
2110 *
2111 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
44c4e1b2
EB
2112 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
2113 * current.
7af57294
PE
2114 * task_xid_nr_ns() : id seen from the ns specified;
2115 *
2116 * set_task_vxid() : assigns a virtual id to a task;
2117 *
7af57294
PE
2118 * see also pid_nr() etc in include/linux/pid.h
2119 */
52ee2dfd
ON
2120pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2121 struct pid_namespace *ns);
7af57294 2122
e868171a 2123static inline pid_t task_pid_nr(struct task_struct *tsk)
7af57294
PE
2124{
2125 return tsk->pid;
2126}
2127
52ee2dfd
ON
2128static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2129 struct pid_namespace *ns)
2130{
2131 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2132}
7af57294
PE
2133
2134static inline pid_t task_pid_vnr(struct task_struct *tsk)
2135{
52ee2dfd 2136 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
7af57294
PE
2137}
2138
2139
e868171a 2140static inline pid_t task_tgid_nr(struct task_struct *tsk)
7af57294
PE
2141{
2142 return tsk->tgid;
2143}
2144
2f2a3a46 2145pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
7af57294
PE
2146
2147static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2148{
2149 return pid_vnr(task_tgid(tsk));
2150}
2151
2152
80e0b6e8 2153static inline int pid_alive(const struct task_struct *p);
ad36d282
RGB
2154static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2155{
2156 pid_t pid = 0;
2157
2158 rcu_read_lock();
2159 if (pid_alive(tsk))
2160 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2161 rcu_read_unlock();
2162
2163 return pid;
2164}
2165
2166static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2167{
2168 return task_ppid_nr_ns(tsk, &init_pid_ns);
2169}
2170
52ee2dfd
ON
2171static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2172 struct pid_namespace *ns)
7af57294 2173{
52ee2dfd 2174 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
7af57294
PE
2175}
2176
7af57294
PE
2177static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2178{
52ee2dfd 2179 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
7af57294
PE
2180}
2181
2182
52ee2dfd
ON
2183static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2184 struct pid_namespace *ns)
7af57294 2185{
52ee2dfd 2186 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
7af57294
PE
2187}
2188
7af57294
PE
2189static inline pid_t task_session_vnr(struct task_struct *tsk)
2190{
52ee2dfd 2191 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
7af57294
PE
2192}
2193
1b0f7ffd
ON
2194/* obsolete, do not use */
2195static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2196{
2197 return task_pgrp_nr_ns(tsk, &init_pid_ns);
2198}
7af57294 2199
1da177e4
LT
2200/**
2201 * pid_alive - check that a task structure is not stale
2202 * @p: Task structure to be checked.
2203 *
2204 * Test if a process is not yet dead (at most zombie state)
2205 * If pid_alive fails, then pointers within the task structure
2206 * can be stale and must not be dereferenced.
e69f6186
YB
2207 *
2208 * Return: 1 if the process is alive. 0 otherwise.
1da177e4 2209 */
ad36d282 2210static inline int pid_alive(const struct task_struct *p)
1da177e4 2211{
92476d7f 2212 return p->pids[PIDTYPE_PID].pid != NULL;
1da177e4
LT
2213}
2214
f400e198 2215/**
570f5241
SS
2216 * is_global_init - check if a task structure is init. Since init
2217 * is free to have sub-threads we need to check tgid.
3260259f
HK
2218 * @tsk: Task structure to be checked.
2219 *
2220 * Check if a task structure is the first user space task the kernel created.
e69f6186
YB
2221 *
2222 * Return: 1 if the task structure is init. 0 otherwise.
b460cbc5 2223 */
e868171a 2224static inline int is_global_init(struct task_struct *tsk)
b461cc03 2225{
570f5241 2226 return task_tgid_nr(tsk) == 1;
b461cc03 2227}
b460cbc5 2228
9ec52099
CLG
2229extern struct pid *cad_pid;
2230
1da177e4 2231extern void free_task(struct task_struct *tsk);
1da177e4 2232#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
e56d0903 2233
158d9ebd 2234extern void __put_task_struct(struct task_struct *t);
e56d0903
IM
2235
2236static inline void put_task_struct(struct task_struct *t)
2237{
2238 if (atomic_dec_and_test(&t->usage))
8c7904a0 2239 __put_task_struct(t);
e56d0903 2240}
1da177e4 2241
150593bf
ON
2242struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2243struct task_struct *try_get_task_struct(struct task_struct **ptask);
2244
6a61671b
FW
2245#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2246extern void task_cputime(struct task_struct *t,
2247 cputime_t *utime, cputime_t *stime);
6a61671b
FW
2248extern cputime_t task_gtime(struct task_struct *t);
2249#else
6fac4829
FW
2250static inline void task_cputime(struct task_struct *t,
2251 cputime_t *utime, cputime_t *stime)
2252{
353c50eb
SG
2253 *utime = t->utime;
2254 *stime = t->stime;
6fac4829
FW
2255}
2256
40565b5a
SG
2257static inline cputime_t task_gtime(struct task_struct *t)
2258{
2259 return t->gtime;
2260}
2261#endif
2262
2263#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
6fac4829
FW
2264static inline void task_cputime_scaled(struct task_struct *t,
2265 cputime_t *utimescaled,
2266 cputime_t *stimescaled)
2267{
353c50eb
SG
2268 *utimescaled = t->utimescaled;
2269 *stimescaled = t->stimescaled;
6fac4829 2270}
40565b5a
SG
2271#else
2272static inline void task_cputime_scaled(struct task_struct *t,
2273 cputime_t *utimescaled,
2274 cputime_t *stimescaled)
6a61671b 2275{
40565b5a 2276 task_cputime(t, utimescaled, stimescaled);
6a61671b
FW
2277}
2278#endif
40565b5a 2279
e80d0a1a
FW
2280extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2281extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
49048622 2282
1da177e4
LT
2283/*
2284 * Per process flags
2285 */
1da177e4 2286#define PF_EXITING 0x00000004 /* getting shut down */
778e9a9c 2287#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
94886b84 2288#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
21aa9af0 2289#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1da177e4 2290#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
4db96cf0 2291#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1da177e4
LT
2292#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
2293#define PF_DUMPCORE 0x00000200 /* dumped core */
2294#define PF_SIGNALED 0x00000400 /* killed by a signal */
2295#define PF_MEMALLOC 0x00000800 /* Allocating memory */
72fa5997 2296#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1da177e4 2297#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
774a1221 2298#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1da177e4
LT
2299#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
2300#define PF_FROZEN 0x00010000 /* frozen for system suspend */
2301#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
2302#define PF_KSWAPD 0x00040000 /* I am kswapd */
21caf2fc 2303#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1da177e4 2304#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
246bb0b1 2305#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
b31dc66a
JA
2306#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
2307#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
14a40ffc 2308#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
4db96cf0 2309#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
61a87122 2310#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
58a69cb4 2311#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
2b44c4db 2312#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1da177e4
LT
2313
2314/*
2315 * Only the _current_ task can read/write to tsk->flags, but other
2316 * tasks can access tsk->flags in readonly mode for example
2317 * with tsk_used_math (like during threaded core dumping).
2318 * There is however an exception to this rule during ptrace
2319 * or during fork: the ptracer task is allowed to write to the
2320 * child->flags of its traced child (same goes for fork, the parent
2321 * can write to the child->flags), because we're guaranteed the
2322 * child is not running and in turn not changing child->flags
2323 * at the same time the parent does it.
2324 */
2325#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2326#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2327#define clear_used_math() clear_stopped_child_used_math(current)
2328#define set_used_math() set_stopped_child_used_math(current)
2329#define conditional_stopped_child_used_math(condition, child) \
2330 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2331#define conditional_used_math(condition) \
2332 conditional_stopped_child_used_math(condition, current)
2333#define copy_to_stopped_child_used_math(child) \
2334 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2335/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2336#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2337#define used_math() tsk_used_math(current)
2338
934f3072
JB
2339/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2340 * __GFP_FS is also cleared as it implies __GFP_IO.
2341 */
21caf2fc
ML
2342static inline gfp_t memalloc_noio_flags(gfp_t flags)
2343{
2344 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
934f3072 2345 flags &= ~(__GFP_IO | __GFP_FS);
21caf2fc
ML
2346 return flags;
2347}
2348
2349static inline unsigned int memalloc_noio_save(void)
2350{
2351 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2352 current->flags |= PF_MEMALLOC_NOIO;
2353 return flags;
2354}
2355
2356static inline void memalloc_noio_restore(unsigned int flags)
2357{
2358 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2359}
2360
1d4457f9 2361/* Per-process atomic flags. */
a2b86f77 2362#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
2ad654bc
ZL
2363#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
2364#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
77ed2c57 2365#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
2ad654bc 2366
1d4457f9 2367
e0e5070b
ZL
2368#define TASK_PFA_TEST(name, func) \
2369 static inline bool task_##func(struct task_struct *p) \
2370 { return test_bit(PFA_##name, &p->atomic_flags); }
2371#define TASK_PFA_SET(name, func) \
2372 static inline void task_set_##func(struct task_struct *p) \
2373 { set_bit(PFA_##name, &p->atomic_flags); }
2374#define TASK_PFA_CLEAR(name, func) \
2375 static inline void task_clear_##func(struct task_struct *p) \
2376 { clear_bit(PFA_##name, &p->atomic_flags); }
2377
2378TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2379TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1d4457f9 2380
2ad654bc
ZL
2381TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2382TASK_PFA_SET(SPREAD_PAGE, spread_page)
2383TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2384
2385TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2386TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2387TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1d4457f9 2388
77ed2c57
TH
2389TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2390TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2391
e5c1902e 2392/*
a8f072c1 2393 * task->jobctl flags
e5c1902e 2394 */
a8f072c1 2395#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
e5c1902e 2396
a8f072c1
TH
2397#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
2398#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
2399#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
73ddff2b 2400#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
fb1d910c 2401#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
a8f072c1 2402#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
544b2c91 2403#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
a8f072c1 2404
b76808e6
PD
2405#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2406#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2407#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2408#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2409#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2410#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2411#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
a8f072c1 2412
fb1d910c 2413#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
73ddff2b 2414#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
3759a0d9 2415
7dd3db54 2416extern bool task_set_jobctl_pending(struct task_struct *task,
b76808e6 2417 unsigned long mask);
73ddff2b 2418extern void task_clear_jobctl_trapping(struct task_struct *task);
3759a0d9 2419extern void task_clear_jobctl_pending(struct task_struct *task,
b76808e6 2420 unsigned long mask);
39efa3ef 2421
f41d911f
PM
2422static inline void rcu_copy_process(struct task_struct *p)
2423{
8315f422 2424#ifdef CONFIG_PREEMPT_RCU
f41d911f 2425 p->rcu_read_lock_nesting = 0;
1d082fd0 2426 p->rcu_read_unlock_special.s = 0;
dd5d19ba 2427 p->rcu_blocked_node = NULL;
f41d911f 2428 INIT_LIST_HEAD(&p->rcu_node_entry);
8315f422
PM
2429#endif /* #ifdef CONFIG_PREEMPT_RCU */
2430#ifdef CONFIG_TASKS_RCU
2431 p->rcu_tasks_holdout = false;
2432 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
176f8f7a 2433 p->rcu_tasks_idle_cpu = -1;
8315f422 2434#endif /* #ifdef CONFIG_TASKS_RCU */
f41d911f
PM
2435}
2436
907aed48
MG
2437static inline void tsk_restore_flags(struct task_struct *task,
2438 unsigned long orig_flags, unsigned long flags)
2439{
2440 task->flags &= ~flags;
2441 task->flags |= orig_flags & flags;
2442}
2443
f82f8042
JL
2444extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2445 const struct cpumask *trial);
7f51412a
JL
2446extern int task_can_attach(struct task_struct *p,
2447 const struct cpumask *cs_cpus_allowed);
1da177e4 2448#ifdef CONFIG_SMP
1e1b6c51
KM
2449extern void do_set_cpus_allowed(struct task_struct *p,
2450 const struct cpumask *new_mask);
2451
cd8ba7cd 2452extern int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 2453 const struct cpumask *new_mask);
1da177e4 2454#else
1e1b6c51
KM
2455static inline void do_set_cpus_allowed(struct task_struct *p,
2456 const struct cpumask *new_mask)
2457{
2458}
cd8ba7cd 2459static inline int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 2460 const struct cpumask *new_mask)
1da177e4 2461{
96f874e2 2462 if (!cpumask_test_cpu(0, new_mask))
1da177e4
LT
2463 return -EINVAL;
2464 return 0;
2465}
2466#endif
e0ad9556 2467
3451d024 2468#ifdef CONFIG_NO_HZ_COMMON
5167e8d5
PZ
2469void calc_load_enter_idle(void);
2470void calc_load_exit_idle(void);
2471#else
2472static inline void calc_load_enter_idle(void) { }
2473static inline void calc_load_exit_idle(void) { }
3451d024 2474#endif /* CONFIG_NO_HZ_COMMON */
5167e8d5 2475
6d0d2878
CB
2476#ifndef cpu_relax_yield
2477#define cpu_relax_yield() cpu_relax()
2478#endif
2479
b342501c 2480/*
c676329a
PZ
2481 * Do not use outside of architecture code which knows its limitations.
2482 *
2483 * sched_clock() has no promise of monotonicity or bounded drift between
2484 * CPUs, use (which you should not) requires disabling IRQs.
2485 *
2486 * Please use one of the three interfaces below.
b342501c 2487 */
1bbfa6f2 2488extern unsigned long long notrace sched_clock(void);
c676329a 2489/*
489a71b0 2490 * See the comment in kernel/sched/clock.c
c676329a 2491 */
545a2bf7 2492extern u64 running_clock(void);
c676329a
PZ
2493extern u64 sched_clock_cpu(int cpu);
2494
e436d800 2495
c1955a3d 2496extern void sched_clock_init(void);
3e51f33f 2497
c1955a3d 2498#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
3e51f33f
PZ
2499static inline void sched_clock_tick(void)
2500{
2501}
2502
2503static inline void sched_clock_idle_sleep_event(void)
2504{
2505}
2506
2507static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2508{
2509}
2c923e94
DL
2510
2511static inline u64 cpu_clock(int cpu)
2512{
2513 return sched_clock();
2514}
2515
2516static inline u64 local_clock(void)
2517{
2518 return sched_clock();
2519}
3e51f33f 2520#else
c676329a
PZ
2521/*
2522 * Architectures can set this to 1 if they have specified
2523 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2524 * but then during bootup it turns out that sched_clock()
2525 * is reliable after all:
2526 */
35af99e6
PZ
2527extern int sched_clock_stable(void);
2528extern void set_sched_clock_stable(void);
2529extern void clear_sched_clock_stable(void);
c676329a 2530
3e51f33f
PZ
2531extern void sched_clock_tick(void);
2532extern void sched_clock_idle_sleep_event(void);
2533extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2c923e94
DL
2534
2535/*
2536 * As outlined in clock.c, provides a fast, high resolution, nanosecond
2537 * time source that is monotonic per cpu argument and has bounded drift
2538 * between cpus.
2539 *
2540 * ######################### BIG FAT WARNING ##########################
2541 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
2542 * # go backwards !! #
2543 * ####################################################################
2544 */
2545static inline u64 cpu_clock(int cpu)
2546{
2547 return sched_clock_cpu(cpu);
2548}
2549
2550static inline u64 local_clock(void)
2551{
2552 return sched_clock_cpu(raw_smp_processor_id());
2553}
3e51f33f
PZ
2554#endif
2555
b52bfee4
VP
2556#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2557/*
2558 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2559 * The reason for this explicit opt-in is not to have perf penalty with
2560 * slow sched_clocks.
2561 */
2562extern void enable_sched_clock_irqtime(void);
2563extern void disable_sched_clock_irqtime(void);
2564#else
2565static inline void enable_sched_clock_irqtime(void) {}
2566static inline void disable_sched_clock_irqtime(void) {}
2567#endif
2568
36c8b586 2569extern unsigned long long
41b86e9c 2570task_sched_runtime(struct task_struct *task);
1da177e4
LT
2571
2572/* sched_exec is called by processes performing an exec */
2573#ifdef CONFIG_SMP
2574extern void sched_exec(void);
2575#else
2576#define sched_exec() {}
2577#endif
2578
2aa44d05
IM
2579extern void sched_clock_idle_sleep_event(void);
2580extern void sched_clock_idle_wakeup_event(u64 delta_ns);
bb29ab26 2581
1da177e4
LT
2582#ifdef CONFIG_HOTPLUG_CPU
2583extern void idle_task_exit(void);
2584#else
2585static inline void idle_task_exit(void) {}
2586#endif
2587
3451d024 2588#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1c20091e 2589extern void wake_up_nohz_cpu(int cpu);
06d8308c 2590#else
1c20091e 2591static inline void wake_up_nohz_cpu(int cpu) { }
06d8308c
TG
2592#endif
2593
ce831b38 2594#ifdef CONFIG_NO_HZ_FULL
265f22a9 2595extern u64 scheduler_tick_max_deferment(void);
06d8308c
TG
2596#endif
2597
5091faa4 2598#ifdef CONFIG_SCHED_AUTOGROUP
5091faa4
MG
2599extern void sched_autogroup_create_attach(struct task_struct *p);
2600extern void sched_autogroup_detach(struct task_struct *p);
2601extern void sched_autogroup_fork(struct signal_struct *sig);
2602extern void sched_autogroup_exit(struct signal_struct *sig);
8e5bfa8c 2603extern void sched_autogroup_exit_task(struct task_struct *p);
5091faa4
MG
2604#ifdef CONFIG_PROC_FS
2605extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2e5b5b3a 2606extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
5091faa4
MG
2607#endif
2608#else
2609static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2610static inline void sched_autogroup_detach(struct task_struct *p) { }
2611static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2612static inline void sched_autogroup_exit(struct signal_struct *sig) { }
8e5bfa8c 2613static inline void sched_autogroup_exit_task(struct task_struct *p) { }
5091faa4
MG
2614#endif
2615
fa93384f 2616extern int yield_to(struct task_struct *p, bool preempt);
36c8b586
IM
2617extern void set_user_nice(struct task_struct *p, long nice);
2618extern int task_prio(const struct task_struct *p);
d0ea0268
DY
2619/**
2620 * task_nice - return the nice value of a given task.
2621 * @p: the task in question.
2622 *
2623 * Return: The nice value [ -20 ... 0 ... 19 ].
2624 */
2625static inline int task_nice(const struct task_struct *p)
2626{
2627 return PRIO_TO_NICE((p)->static_prio);
2628}
36c8b586
IM
2629extern int can_nice(const struct task_struct *p, const int nice);
2630extern int task_curr(const struct task_struct *p);
1da177e4 2631extern int idle_cpu(int cpu);
fe7de49f
KM
2632extern int sched_setscheduler(struct task_struct *, int,
2633 const struct sched_param *);
961ccddd 2634extern int sched_setscheduler_nocheck(struct task_struct *, int,
fe7de49f 2635 const struct sched_param *);
d50dde5a
DF
2636extern int sched_setattr(struct task_struct *,
2637 const struct sched_attr *);
36c8b586 2638extern struct task_struct *idle_task(int cpu);
c4f30608
PM
2639/**
2640 * is_idle_task - is the specified task an idle task?
fa757281 2641 * @p: the task in question.
e69f6186
YB
2642 *
2643 * Return: 1 if @p is an idle task. 0 otherwise.
c4f30608 2644 */
7061ca3b 2645static inline bool is_idle_task(const struct task_struct *p)
c4f30608
PM
2646{
2647 return p->pid == 0;
2648}
36c8b586 2649extern struct task_struct *curr_task(int cpu);
a458ae2e 2650extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1da177e4
LT
2651
2652void yield(void);
2653
1da177e4 2654union thread_union {
c65eacbe 2655#ifndef CONFIG_THREAD_INFO_IN_TASK
1da177e4 2656 struct thread_info thread_info;
c65eacbe 2657#endif
1da177e4
LT
2658 unsigned long stack[THREAD_SIZE/sizeof(long)];
2659};
2660
2661#ifndef __HAVE_ARCH_KSTACK_END
2662static inline int kstack_end(void *addr)
2663{
2664 /* Reliable end of stack detection:
2665 * Some APM bios versions misalign the stack
2666 */
2667 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2668}
2669#endif
2670
2671extern union thread_union init_thread_union;
2672extern struct task_struct init_task;
2673
2674extern struct mm_struct init_mm;
2675
198fe21b
PE
2676extern struct pid_namespace init_pid_ns;
2677
2678/*
2679 * find a task by one of its numerical ids
2680 *
198fe21b
PE
2681 * find_task_by_pid_ns():
2682 * finds a task by its pid in the specified namespace
228ebcbe
PE
2683 * find_task_by_vpid():
2684 * finds a task by its virtual pid
198fe21b 2685 *
e49859e7 2686 * see also find_vpid() etc in include/linux/pid.h
198fe21b
PE
2687 */
2688
228ebcbe
PE
2689extern struct task_struct *find_task_by_vpid(pid_t nr);
2690extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2691 struct pid_namespace *ns);
198fe21b 2692
1da177e4 2693/* per-UID process charging. */
7b44ab97 2694extern struct user_struct * alloc_uid(kuid_t);
1da177e4
LT
2695static inline struct user_struct *get_uid(struct user_struct *u)
2696{
2697 atomic_inc(&u->__count);
2698 return u;
2699}
2700extern void free_uid(struct user_struct *);
1da177e4
LT
2701
2702#include <asm/current.h>
2703
f0af911a 2704extern void xtime_update(unsigned long ticks);
1da177e4 2705
b3c97528
HH
2706extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2707extern int wake_up_process(struct task_struct *tsk);
3e51e3ed 2708extern void wake_up_new_task(struct task_struct *tsk);
1da177e4
LT
2709#ifdef CONFIG_SMP
2710 extern void kick_process(struct task_struct *tsk);
2711#else
2712 static inline void kick_process(struct task_struct *tsk) { }
2713#endif
aab03e05 2714extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
ad46c2c4 2715extern void sched_dead(struct task_struct *p);
1da177e4 2716
1da177e4
LT
2717extern void proc_caches_init(void);
2718extern void flush_signals(struct task_struct *);
10ab825b 2719extern void ignore_signals(struct task_struct *);
1da177e4
LT
2720extern void flush_signal_handlers(struct task_struct *, int force_default);
2721extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2722
be0e6f29 2723static inline int kernel_dequeue_signal(siginfo_t *info)
1da177e4 2724{
be0e6f29
ON
2725 struct task_struct *tsk = current;
2726 siginfo_t __info;
1da177e4
LT
2727 int ret;
2728
be0e6f29
ON
2729 spin_lock_irq(&tsk->sighand->siglock);
2730 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2731 spin_unlock_irq(&tsk->sighand->siglock);
1da177e4
LT
2732
2733 return ret;
53c8f9f1 2734}
1da177e4 2735
9a13049e
ON
2736static inline void kernel_signal_stop(void)
2737{
2738 spin_lock_irq(&current->sighand->siglock);
2739 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2740 __set_current_state(TASK_STOPPED);
2741 spin_unlock_irq(&current->sighand->siglock);
2742
2743 schedule();
2744}
2745
1da177e4
LT
2746extern void release_task(struct task_struct * p);
2747extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1da177e4
LT
2748extern int force_sigsegv(int, struct task_struct *);
2749extern int force_sig_info(int, struct siginfo *, struct task_struct *);
c4b92fc1 2750extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
c4b92fc1 2751extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
d178bc3a
SH
2752extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2753 const struct cred *, u32);
c4b92fc1
EB
2754extern int kill_pgrp(struct pid *pid, int sig, int priv);
2755extern int kill_pid(struct pid *pid, int sig, int priv);
c3de4b38 2756extern int kill_proc_info(int, struct siginfo *, pid_t);
86773473 2757extern __must_check bool do_notify_parent(struct task_struct *, int);
a7f0765e 2758extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
1da177e4 2759extern void force_sig(int, struct task_struct *);
1da177e4 2760extern int send_sig(int, struct task_struct *, int);
09faef11 2761extern int zap_other_threads(struct task_struct *p);
1da177e4
LT
2762extern struct sigqueue *sigqueue_alloc(void);
2763extern void sigqueue_free(struct sigqueue *);
ac5c2153 2764extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
9ac95f2f 2765extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1da177e4 2766
7e781418
AL
2767#ifdef TIF_RESTORE_SIGMASK
2768/*
2769 * Legacy restore_sigmask accessors. These are inefficient on
2770 * SMP architectures because they require atomic operations.
2771 */
2772
2773/**
2774 * set_restore_sigmask() - make sure saved_sigmask processing gets done
2775 *
2776 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
2777 * will run before returning to user mode, to process the flag. For
2778 * all callers, TIF_SIGPENDING is already set or it's no harm to set
2779 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
2780 * arch code will notice on return to user mode, in case those bits
2781 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
2782 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
2783 */
2784static inline void set_restore_sigmask(void)
2785{
2786 set_thread_flag(TIF_RESTORE_SIGMASK);
2787 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2788}
2789static inline void clear_restore_sigmask(void)
2790{
2791 clear_thread_flag(TIF_RESTORE_SIGMASK);
2792}
2793static inline bool test_restore_sigmask(void)
2794{
2795 return test_thread_flag(TIF_RESTORE_SIGMASK);
2796}
2797static inline bool test_and_clear_restore_sigmask(void)
2798{
2799 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2800}
2801
2802#else /* TIF_RESTORE_SIGMASK */
2803
2804/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
2805static inline void set_restore_sigmask(void)
2806{
2807 current->restore_sigmask = true;
2808 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2809}
2810static inline void clear_restore_sigmask(void)
2811{
2812 current->restore_sigmask = false;
2813}
2814static inline bool test_restore_sigmask(void)
2815{
2816 return current->restore_sigmask;
2817}
2818static inline bool test_and_clear_restore_sigmask(void)
2819{
2820 if (!current->restore_sigmask)
2821 return false;
2822 current->restore_sigmask = false;
2823 return true;
2824}
2825#endif
2826
51a7b448
AV
2827static inline void restore_saved_sigmask(void)
2828{
2829 if (test_and_clear_restore_sigmask())
77097ae5 2830 __set_current_blocked(&current->saved_sigmask);
51a7b448
AV
2831}
2832
b7f9a11a
AV
2833static inline sigset_t *sigmask_to_save(void)
2834{
2835 sigset_t *res = &current->blocked;
2836 if (unlikely(test_restore_sigmask()))
2837 res = &current->saved_sigmask;
2838 return res;
2839}
2840
9ec52099
CLG
2841static inline int kill_cad_pid(int sig, int priv)
2842{
2843 return kill_pid(cad_pid, sig, priv);
2844}
2845
1da177e4
LT
2846/* These can be the second arg to send_sig_info/send_group_sig_info. */
2847#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2848#define SEND_SIG_PRIV ((struct siginfo *) 1)
2849#define SEND_SIG_FORCED ((struct siginfo *) 2)
2850
2a855dd0
SAS
2851/*
2852 * True if we are on the alternate signal stack.
2853 */
1da177e4
LT
2854static inline int on_sig_stack(unsigned long sp)
2855{
c876eeab
AL
2856 /*
2857 * If the signal stack is SS_AUTODISARM then, by construction, we
2858 * can't be on the signal stack unless user code deliberately set
2859 * SS_AUTODISARM when we were already on it.
2860 *
2861 * This improves reliability: if user state gets corrupted such that
2862 * the stack pointer points very close to the end of the signal stack,
2863 * then this check will enable the signal to be handled anyway.
2864 */
2865 if (current->sas_ss_flags & SS_AUTODISARM)
2866 return 0;
2867
2a855dd0
SAS
2868#ifdef CONFIG_STACK_GROWSUP
2869 return sp >= current->sas_ss_sp &&
2870 sp - current->sas_ss_sp < current->sas_ss_size;
2871#else
2872 return sp > current->sas_ss_sp &&
2873 sp - current->sas_ss_sp <= current->sas_ss_size;
2874#endif
1da177e4
LT
2875}
2876
2877static inline int sas_ss_flags(unsigned long sp)
2878{
72f15c03
RW
2879 if (!current->sas_ss_size)
2880 return SS_DISABLE;
2881
2882 return on_sig_stack(sp) ? SS_ONSTACK : 0;
1da177e4
LT
2883}
2884
2a742138
SS
2885static inline void sas_ss_reset(struct task_struct *p)
2886{
2887 p->sas_ss_sp = 0;
2888 p->sas_ss_size = 0;
2889 p->sas_ss_flags = SS_DISABLE;
2890}
2891
5a1b98d3
AV
2892static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2893{
2894 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2895#ifdef CONFIG_STACK_GROWSUP
2896 return current->sas_ss_sp;
2897#else
2898 return current->sas_ss_sp + current->sas_ss_size;
2899#endif
2900 return sp;
2901}
2902
1da177e4
LT
2903/*
2904 * Routines for handling mm_structs
2905 */
2906extern struct mm_struct * mm_alloc(void);
2907
2908/* mmdrop drops the mm and the page tables */
b3c97528 2909extern void __mmdrop(struct mm_struct *);
d2005e3f 2910static inline void mmdrop(struct mm_struct *mm)
1da177e4 2911{
6fb43d7b 2912 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
1da177e4
LT
2913 __mmdrop(mm);
2914}
2915
7283094e
MH
2916static inline void mmdrop_async_fn(struct work_struct *work)
2917{
2918 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
2919 __mmdrop(mm);
2920}
2921
2922static inline void mmdrop_async(struct mm_struct *mm)
2923{
2924 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
2925 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
2926 schedule_work(&mm->async_put_work);
2927 }
2928}
2929
d2005e3f
ON
2930static inline bool mmget_not_zero(struct mm_struct *mm)
2931{
2932 return atomic_inc_not_zero(&mm->mm_users);
2933}
2934
1da177e4
LT
2935/* mmput gets rid of the mappings and all user-space */
2936extern void mmput(struct mm_struct *);
7ef949d7
MH
2937#ifdef CONFIG_MMU
2938/* same as above but performs the slow path from the async context. Can
ec8d7c14
MH
2939 * be called from the atomic context as well
2940 */
2941extern void mmput_async(struct mm_struct *);
7ef949d7 2942#endif
ec8d7c14 2943
1da177e4
LT
2944/* Grab a reference to a task's mm, if it is not already going away */
2945extern struct mm_struct *get_task_mm(struct task_struct *task);
8cdb878d
CY
2946/*
2947 * Grab a reference to a task's mm, if it is not already going away
2948 * and ptrace_may_access with the mode parameter passed to it
2949 * succeeds.
2950 */
2951extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
1da177e4
LT
2952/* Remove the current tasks stale references to the old mm_struct */
2953extern void mm_release(struct task_struct *, struct mm_struct *);
2954
3033f14a
JT
2955#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2956extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2957 struct task_struct *, unsigned long);
2958#else
6f2c55b8 2959extern int copy_thread(unsigned long, unsigned long, unsigned long,
afa86fc4 2960 struct task_struct *);
3033f14a
JT
2961
2962/* Architectures that haven't opted into copy_thread_tls get the tls argument
2963 * via pt_regs, so ignore the tls argument passed via C. */
2964static inline int copy_thread_tls(
2965 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2966 struct task_struct *p, unsigned long tls)
2967{
2968 return copy_thread(clone_flags, sp, arg, p);
2969}
2970#endif
1da177e4 2971extern void flush_thread(void);
5f56a5df
JS
2972
2973#ifdef CONFIG_HAVE_EXIT_THREAD
e6464694 2974extern void exit_thread(struct task_struct *tsk);
5f56a5df 2975#else
e6464694 2976static inline void exit_thread(struct task_struct *tsk)
5f56a5df
JS
2977{
2978}
2979#endif
1da177e4 2980
1da177e4 2981extern void exit_files(struct task_struct *);
a7e5328a 2982extern void __cleanup_sighand(struct sighand_struct *);
cbaffba1 2983
1da177e4 2984extern void exit_itimers(struct signal_struct *);
cbaffba1 2985extern void flush_itimer_signals(void);
1da177e4 2986
9402c95f 2987extern void do_group_exit(int);
1da177e4 2988
c4ad8f98 2989extern int do_execve(struct filename *,
d7627467 2990 const char __user * const __user *,
da3d4c5f 2991 const char __user * const __user *);
51f39a1f
DD
2992extern int do_execveat(int, struct filename *,
2993 const char __user * const __user *,
2994 const char __user * const __user *,
2995 int);
3033f14a 2996extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
e80d6661 2997extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
36c8b586 2998struct task_struct *fork_idle(int);
2aa3a7f8 2999extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
1da177e4 3000
82b89778
AH
3001extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
3002static inline void set_task_comm(struct task_struct *tsk, const char *from)
3003{
3004 __set_task_comm(tsk, from, false);
3005}
59714d65 3006extern char *get_task_comm(char *to, struct task_struct *tsk);
1da177e4
LT
3007
3008#ifdef CONFIG_SMP
317f3941 3009void scheduler_ipi(void);
85ba2d86 3010extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1da177e4 3011#else
184748cc 3012static inline void scheduler_ipi(void) { }
85ba2d86
RM
3013static inline unsigned long wait_task_inactive(struct task_struct *p,
3014 long match_state)
3015{
3016 return 1;
3017}
1da177e4
LT
3018#endif
3019
fafe870f
FW
3020#define tasklist_empty() \
3021 list_empty(&init_task.tasks)
3022
05725f7e
JP
3023#define next_task(p) \
3024 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
1da177e4
LT
3025
3026#define for_each_process(p) \
3027 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
3028
5bb459bb 3029extern bool current_is_single_threaded(void);
d84f4f99 3030
1da177e4
LT
3031/*
3032 * Careful: do_each_thread/while_each_thread is a double loop so
3033 * 'break' will not work as expected - use goto instead.
3034 */
3035#define do_each_thread(g, t) \
3036 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
3037
3038#define while_each_thread(g, t) \
3039 while ((t = next_thread(t)) != g)
3040
0c740d0a
ON
3041#define __for_each_thread(signal, t) \
3042 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
3043
3044#define for_each_thread(p, t) \
3045 __for_each_thread((p)->signal, t)
3046
3047/* Careful: this is a double loop, 'break' won't work as expected. */
3048#define for_each_process_thread(p, t) \
3049 for_each_process(p) for_each_thread(p, t)
3050
7e49827c
ON
3051static inline int get_nr_threads(struct task_struct *tsk)
3052{
b3ac022c 3053 return tsk->signal->nr_threads;
7e49827c
ON
3054}
3055
087806b1
ON
3056static inline bool thread_group_leader(struct task_struct *p)
3057{
3058 return p->exit_signal >= 0;
3059}
1da177e4 3060
0804ef4b
EB
3061/* Do to the insanities of de_thread it is possible for a process
3062 * to have the pid of the thread group leader without actually being
3063 * the thread group leader. For iteration through the pids in proc
3064 * all we care about is that we have a task with the appropriate
3065 * pid, we don't actually care if we have the right task.
3066 */
e1403b8e 3067static inline bool has_group_leader_pid(struct task_struct *p)
0804ef4b 3068{
e1403b8e 3069 return task_pid(p) == p->signal->leader_pid;
0804ef4b
EB
3070}
3071
bac0abd6 3072static inline
e1403b8e 3073bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
bac0abd6 3074{
e1403b8e 3075 return p1->signal == p2->signal;
bac0abd6
PE
3076}
3077
36c8b586 3078static inline struct task_struct *next_thread(const struct task_struct *p)
47e65328 3079{
05725f7e
JP
3080 return list_entry_rcu(p->thread_group.next,
3081 struct task_struct, thread_group);
47e65328
ON
3082}
3083
e868171a 3084static inline int thread_group_empty(struct task_struct *p)
1da177e4 3085{
47e65328 3086 return list_empty(&p->thread_group);
1da177e4
LT
3087}
3088
3089#define delay_group_leader(p) \
3090 (thread_group_leader(p) && !thread_group_empty(p))
3091
1da177e4 3092/*
260ea101 3093 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
22e2c507 3094 * subscriptions and synchronises with wait4(). Also used in procfs. Also
ddbcc7e8 3095 * pins the final release of task.io_context. Also protects ->cpuset and
d68b46fe 3096 * ->cgroup.subsys[]. And ->vfork_done.
1da177e4
LT
3097 *
3098 * Nests both inside and outside of read_lock(&tasklist_lock).
3099 * It must not be nested with write_lock_irq(&tasklist_lock),
3100 * neither inside nor outside.
3101 */
3102static inline void task_lock(struct task_struct *p)
3103{
3104 spin_lock(&p->alloc_lock);
3105}
3106
3107static inline void task_unlock(struct task_struct *p)
3108{
3109 spin_unlock(&p->alloc_lock);
3110}
3111
b8ed374e 3112extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
f63ee72e
ON
3113 unsigned long *flags);
3114
9388dc30
AV
3115static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
3116 unsigned long *flags)
3117{
3118 struct sighand_struct *ret;
3119
3120 ret = __lock_task_sighand(tsk, flags);
3121 (void)__cond_lock(&tsk->sighand->siglock, ret);
3122 return ret;
3123}
b8ed374e 3124
f63ee72e
ON
3125static inline void unlock_task_sighand(struct task_struct *tsk,
3126 unsigned long *flags)
3127{
3128 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
3129}
3130
77e4ef99 3131/**
7d7efec3
TH
3132 * threadgroup_change_begin - mark the beginning of changes to a threadgroup
3133 * @tsk: task causing the changes
77e4ef99 3134 *
7d7efec3
TH
3135 * All operations which modify a threadgroup - a new thread joining the
3136 * group, death of a member thread (the assertion of PF_EXITING) and
3137 * exec(2) dethreading the process and replacing the leader - are wrapped
3138 * by threadgroup_change_{begin|end}(). This is to provide a place which
3139 * subsystems needing threadgroup stability can hook into for
3140 * synchronization.
77e4ef99 3141 */
7d7efec3 3142static inline void threadgroup_change_begin(struct task_struct *tsk)
4714d1d3 3143{
7d7efec3
TH
3144 might_sleep();
3145 cgroup_threadgroup_change_begin(tsk);
4714d1d3 3146}
77e4ef99
TH
3147
3148/**
7d7efec3
TH
3149 * threadgroup_change_end - mark the end of changes to a threadgroup
3150 * @tsk: task causing the changes
77e4ef99 3151 *
7d7efec3 3152 * See threadgroup_change_begin().
77e4ef99 3153 */
7d7efec3 3154static inline void threadgroup_change_end(struct task_struct *tsk)
4714d1d3 3155{
7d7efec3 3156 cgroup_threadgroup_change_end(tsk);
4714d1d3 3157}
4714d1d3 3158
c65eacbe
AL
3159#ifdef CONFIG_THREAD_INFO_IN_TASK
3160
3161static inline struct thread_info *task_thread_info(struct task_struct *task)
3162{
3163 return &task->thread_info;
3164}
c6c314a6
AL
3165
3166/*
3167 * When accessing the stack of a non-current task that might exit, use
3168 * try_get_task_stack() instead. task_stack_page will return a pointer
3169 * that could get freed out from under you.
3170 */
c65eacbe
AL
3171static inline void *task_stack_page(const struct task_struct *task)
3172{
3173 return task->stack;
3174}
c6c314a6 3175
c65eacbe 3176#define setup_thread_stack(new,old) do { } while(0)
c6c314a6 3177
c65eacbe
AL
3178static inline unsigned long *end_of_stack(const struct task_struct *task)
3179{
3180 return task->stack;
3181}
3182
3183#elif !defined(__HAVE_THREAD_FUNCTIONS)
f037360f 3184
f7e4217b 3185#define task_thread_info(task) ((struct thread_info *)(task)->stack)
c65eacbe 3186#define task_stack_page(task) ((void *)(task)->stack)
a1261f54 3187
10ebffde
AV
3188static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
3189{
3190 *task_thread_info(p) = *task_thread_info(org);
3191 task_thread_info(p)->task = p;
3192}
3193
6a40281a
CE
3194/*
3195 * Return the address of the last usable long on the stack.
3196 *
3197 * When the stack grows down, this is just above the thread
3198 * info struct. Going any lower will corrupt the threadinfo.
3199 *
3200 * When the stack grows up, this is the highest address.
3201 * Beyond that position, we corrupt data on the next page.
3202 */
10ebffde
AV
3203static inline unsigned long *end_of_stack(struct task_struct *p)
3204{
6a40281a
CE
3205#ifdef CONFIG_STACK_GROWSUP
3206 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
3207#else
f7e4217b 3208 return (unsigned long *)(task_thread_info(p) + 1);
6a40281a 3209#endif
10ebffde
AV
3210}
3211
f037360f 3212#endif
c6c314a6 3213
68f24b08
AL
3214#ifdef CONFIG_THREAD_INFO_IN_TASK
3215static inline void *try_get_task_stack(struct task_struct *tsk)
3216{
3217 return atomic_inc_not_zero(&tsk->stack_refcount) ?
3218 task_stack_page(tsk) : NULL;
3219}
3220
3221extern void put_task_stack(struct task_struct *tsk);
3222#else
c6c314a6
AL
3223static inline void *try_get_task_stack(struct task_struct *tsk)
3224{
3225 return task_stack_page(tsk);
3226}
3227
3228static inline void put_task_stack(struct task_struct *tsk) {}
68f24b08 3229#endif
c6c314a6 3230
a70857e4
AT
3231#define task_stack_end_corrupted(task) \
3232 (*(end_of_stack(task)) != STACK_END_MAGIC)
f037360f 3233
8b05c7e6
FT
3234static inline int object_is_on_stack(void *obj)
3235{
3236 void *stack = task_stack_page(current);
3237
3238 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3239}
3240
b235beea 3241extern void thread_stack_cache_init(void);
8c9843e5 3242
7c9f8861
ES
3243#ifdef CONFIG_DEBUG_STACK_USAGE
3244static inline unsigned long stack_not_used(struct task_struct *p)
3245{
3246 unsigned long *n = end_of_stack(p);
3247
3248 do { /* Skip over canary */
6c31da34
HD
3249# ifdef CONFIG_STACK_GROWSUP
3250 n--;
3251# else
7c9f8861 3252 n++;
6c31da34 3253# endif
7c9f8861
ES
3254 } while (!*n);
3255
6c31da34
HD
3256# ifdef CONFIG_STACK_GROWSUP
3257 return (unsigned long)end_of_stack(p) - (unsigned long)n;
3258# else
7c9f8861 3259 return (unsigned long)n - (unsigned long)end_of_stack(p);
6c31da34 3260# endif
7c9f8861
ES
3261}
3262#endif
d4311ff1 3263extern void set_task_stack_end_magic(struct task_struct *tsk);
7c9f8861 3264
1da177e4
LT
3265/* set thread flags in other task's structures
3266 * - see asm/thread_info.h for TIF_xxxx flags available
3267 */
3268static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3269{
a1261f54 3270 set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3271}
3272
3273static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3274{
a1261f54 3275 clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3276}
3277
3278static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3279{
a1261f54 3280 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3281}
3282
3283static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3284{
a1261f54 3285 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3286}
3287
3288static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3289{
a1261f54 3290 return test_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3291}
3292
3293static inline void set_tsk_need_resched(struct task_struct *tsk)
3294{
3295 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3296}
3297
3298static inline void clear_tsk_need_resched(struct task_struct *tsk)
3299{
3300 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3301}
3302
8ae121ac
GH
3303static inline int test_tsk_need_resched(struct task_struct *tsk)
3304{
3305 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3306}
3307
690cc3ff
EB
3308static inline int restart_syscall(void)
3309{
3310 set_tsk_thread_flag(current, TIF_SIGPENDING);
3311 return -ERESTARTNOINTR;
3312}
3313
1da177e4
LT
3314static inline int signal_pending(struct task_struct *p)
3315{
3316 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3317}
f776d12d 3318
d9588725
RM
3319static inline int __fatal_signal_pending(struct task_struct *p)
3320{
3321 return unlikely(sigismember(&p->pending.signal, SIGKILL));
3322}
f776d12d
MW
3323
3324static inline int fatal_signal_pending(struct task_struct *p)
3325{
3326 return signal_pending(p) && __fatal_signal_pending(p);
3327}
3328
16882c1e
ON
3329static inline int signal_pending_state(long state, struct task_struct *p)
3330{
3331 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3332 return 0;
3333 if (!signal_pending(p))
3334 return 0;
3335
16882c1e
ON
3336 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3337}
3338
1da177e4
LT
3339/*
3340 * cond_resched() and cond_resched_lock(): latency reduction via
3341 * explicit rescheduling in places that are safe. The return
3342 * value indicates whether a reschedule was done in fact.
3343 * cond_resched_lock() will drop the spinlock before scheduling,
3344 * cond_resched_softirq() will enable bhs before scheduling.
3345 */
35a773a0 3346#ifndef CONFIG_PREEMPT
c3921ab7 3347extern int _cond_resched(void);
35a773a0
PZ
3348#else
3349static inline int _cond_resched(void) { return 0; }
3350#endif
6f80bd98 3351
613afbf8 3352#define cond_resched() ({ \
3427445a 3353 ___might_sleep(__FILE__, __LINE__, 0); \
613afbf8
FW
3354 _cond_resched(); \
3355})
6f80bd98 3356
613afbf8
FW
3357extern int __cond_resched_lock(spinlock_t *lock);
3358
3359#define cond_resched_lock(lock) ({ \
3427445a 3360 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
613afbf8
FW
3361 __cond_resched_lock(lock); \
3362})
3363
3364extern int __cond_resched_softirq(void);
3365
75e1056f 3366#define cond_resched_softirq() ({ \
3427445a 3367 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
75e1056f 3368 __cond_resched_softirq(); \
613afbf8 3369})
1da177e4 3370
f6f3c437
SH
3371static inline void cond_resched_rcu(void)
3372{
3373#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3374 rcu_read_unlock();
3375 cond_resched();
3376 rcu_read_lock();
3377#endif
3378}
3379
d1c6d149
VN
3380static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
3381{
3382#ifdef CONFIG_DEBUG_PREEMPT
3383 return p->preempt_disable_ip;
3384#else
3385 return 0;
3386#endif
3387}
3388
1da177e4
LT
3389/*
3390 * Does a critical section need to be broken due to another
95c354fe
NP
3391 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
3392 * but a general need for low latency)
1da177e4 3393 */
95c354fe 3394static inline int spin_needbreak(spinlock_t *lock)
1da177e4 3395{
95c354fe
NP
3396#ifdef CONFIG_PREEMPT
3397 return spin_is_contended(lock);
3398#else
1da177e4 3399 return 0;
95c354fe 3400#endif
1da177e4
LT
3401}
3402
ee761f62
TG
3403/*
3404 * Idle thread specific functions to determine the need_resched
69dd0f84 3405 * polling state.
ee761f62 3406 */
69dd0f84 3407#ifdef TIF_POLLING_NRFLAG
ee761f62
TG
3408static inline int tsk_is_polling(struct task_struct *p)
3409{
3410 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3411}
ea811747
PZ
3412
3413static inline void __current_set_polling(void)
3a98f871
TG
3414{
3415 set_thread_flag(TIF_POLLING_NRFLAG);
3416}
3417
ea811747
PZ
3418static inline bool __must_check current_set_polling_and_test(void)
3419{
3420 __current_set_polling();
3421
3422 /*
3423 * Polling state must be visible before we test NEED_RESCHED,
8875125e 3424 * paired by resched_curr()
ea811747 3425 */
4e857c58 3426 smp_mb__after_atomic();
ea811747
PZ
3427
3428 return unlikely(tif_need_resched());
3429}
3430
3431static inline void __current_clr_polling(void)
3a98f871
TG
3432{
3433 clear_thread_flag(TIF_POLLING_NRFLAG);
3434}
ea811747
PZ
3435
3436static inline bool __must_check current_clr_polling_and_test(void)
3437{
3438 __current_clr_polling();
3439
3440 /*
3441 * Polling state must be visible before we test NEED_RESCHED,
8875125e 3442 * paired by resched_curr()
ea811747 3443 */
4e857c58 3444 smp_mb__after_atomic();
ea811747
PZ
3445
3446 return unlikely(tif_need_resched());
3447}
3448
ee761f62
TG
3449#else
3450static inline int tsk_is_polling(struct task_struct *p) { return 0; }
ea811747
PZ
3451static inline void __current_set_polling(void) { }
3452static inline void __current_clr_polling(void) { }
3453
3454static inline bool __must_check current_set_polling_and_test(void)
3455{
3456 return unlikely(tif_need_resched());
3457}
3458static inline bool __must_check current_clr_polling_and_test(void)
3459{
3460 return unlikely(tif_need_resched());
3461}
ee761f62
TG
3462#endif
3463
8cb75e0c
PZ
3464static inline void current_clr_polling(void)
3465{
3466 __current_clr_polling();
3467
3468 /*
3469 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
3470 * Once the bit is cleared, we'll get IPIs with every new
3471 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
3472 * fold.
3473 */
8875125e 3474 smp_mb(); /* paired with resched_curr() */
8cb75e0c
PZ
3475
3476 preempt_fold_need_resched();
3477}
3478
75f93fed
PZ
3479static __always_inline bool need_resched(void)
3480{
3481 return unlikely(tif_need_resched());
3482}
3483
f06febc9
FM
3484/*
3485 * Thread group CPU time accounting.
3486 */
4cd4c1b4 3487void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
4da94d49 3488void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
f06febc9 3489
7bb44ade
RM
3490/*
3491 * Reevaluate whether the task has signals pending delivery.
3492 * Wake the task if so.
3493 * This is required every time the blocked sigset_t changes.
3494 * callers must hold sighand->siglock.
3495 */
3496extern void recalc_sigpending_and_wake(struct task_struct *t);
1da177e4
LT
3497extern void recalc_sigpending(void);
3498
910ffdb1
ON
3499extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3500
3501static inline void signal_wake_up(struct task_struct *t, bool resume)
3502{
3503 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3504}
3505static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3506{
3507 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3508}
1da177e4
LT
3509
3510/*
3511 * Wrappers for p->thread_info->cpu access. No-op on UP.
3512 */
3513#ifdef CONFIG_SMP
3514
3515static inline unsigned int task_cpu(const struct task_struct *p)
3516{
c65eacbe
AL
3517#ifdef CONFIG_THREAD_INFO_IN_TASK
3518 return p->cpu;
3519#else
a1261f54 3520 return task_thread_info(p)->cpu;
c65eacbe 3521#endif
1da177e4
LT
3522}
3523
b32e86b4
IM
3524static inline int task_node(const struct task_struct *p)
3525{
3526 return cpu_to_node(task_cpu(p));
3527}
3528
c65cc870 3529extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1da177e4
LT
3530
3531#else
3532
3533static inline unsigned int task_cpu(const struct task_struct *p)
3534{
3535 return 0;
3536}
3537
3538static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3539{
3540}
3541
3542#endif /* CONFIG_SMP */
3543
d9345c65
PX
3544/*
3545 * In order to reduce various lock holder preemption latencies provide an
3546 * interface to see if a vCPU is currently running or not.
3547 *
3548 * This allows us to terminate optimistic spin loops and block, analogous to
3549 * the native optimistic spin heuristic of testing if the lock owner task is
3550 * running or not.
3551 */
3552#ifndef vcpu_is_preempted
3553# define vcpu_is_preempted(cpu) false
3554#endif
3555
96f874e2
RR
3556extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3557extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
5c45bf27 3558
7c941438 3559#ifdef CONFIG_CGROUP_SCHED
07e06b01 3560extern struct task_group root_task_group;
8323f26c 3561#endif /* CONFIG_CGROUP_SCHED */
9b5b7751 3562
54e99124
DG
3563extern int task_can_switch_user(struct user_struct *up,
3564 struct task_struct *tsk);
3565
4b98d11b
AD
3566#ifdef CONFIG_TASK_XACCT
3567static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3568{
940389b8 3569 tsk->ioac.rchar += amt;
4b98d11b
AD
3570}
3571
3572static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3573{
940389b8 3574 tsk->ioac.wchar += amt;
4b98d11b
AD
3575}
3576
3577static inline void inc_syscr(struct task_struct *tsk)
3578{
940389b8 3579 tsk->ioac.syscr++;
4b98d11b
AD
3580}
3581
3582static inline void inc_syscw(struct task_struct *tsk)
3583{
940389b8 3584 tsk->ioac.syscw++;
4b98d11b
AD
3585}
3586#else
3587static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3588{
3589}
3590
3591static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3592{
3593}
3594
3595static inline void inc_syscr(struct task_struct *tsk)
3596{
3597}
3598
3599static inline void inc_syscw(struct task_struct *tsk)
3600{
3601}
3602#endif
3603
82455257
DH
3604#ifndef TASK_SIZE_OF
3605#define TASK_SIZE_OF(tsk) TASK_SIZE
3606#endif
3607
f98bafa0 3608#ifdef CONFIG_MEMCG
cf475ad2 3609extern void mm_update_next_owner(struct mm_struct *mm);
cf475ad2
BS
3610#else
3611static inline void mm_update_next_owner(struct mm_struct *mm)
3612{
3613}
f98bafa0 3614#endif /* CONFIG_MEMCG */
cf475ad2 3615
3e10e716
JS
3616static inline unsigned long task_rlimit(const struct task_struct *tsk,
3617 unsigned int limit)
3618{
316c1608 3619 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3e10e716
JS
3620}
3621
3622static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3623 unsigned int limit)
3624{
316c1608 3625 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3e10e716
JS
3626}
3627
3628static inline unsigned long rlimit(unsigned int limit)
3629{
3630 return task_rlimit(current, limit);
3631}
3632
3633static inline unsigned long rlimit_max(unsigned int limit)
3634{
3635 return task_rlimit_max(current, limit);
3636}
3637
58919e83
RW
3638#define SCHED_CPUFREQ_RT (1U << 0)
3639#define SCHED_CPUFREQ_DL (1U << 1)
8c34ab19 3640#define SCHED_CPUFREQ_IOWAIT (1U << 2)
58919e83
RW
3641
3642#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
3643
adaf9fcd
RW
3644#ifdef CONFIG_CPU_FREQ
3645struct update_util_data {
58919e83 3646 void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
adaf9fcd
RW
3647};
3648
0bed612b 3649void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
58919e83
RW
3650 void (*func)(struct update_util_data *data, u64 time,
3651 unsigned int flags));
0bed612b 3652void cpufreq_remove_update_util_hook(int cpu);
adaf9fcd
RW
3653#endif /* CONFIG_CPU_FREQ */
3654
1da177e4 3655#endif