]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/sched.h
kernel, oom: fix potential pgd_lock deadlock from __mmdrop
[mirror_ubuntu-artful-kernel.git] / include / linux / sched.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
607ca46e 4#include <uapi/linux/sched.h>
b7b3c76a 5
5c228079
DY
6#include <linux/sched/prio.h>
7
b7b3c76a
DW
8
9struct sched_param {
10 int sched_priority;
11};
12
1da177e4
LT
13#include <asm/param.h> /* for HZ */
14
1da177e4
LT
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
fb00aca4 21#include <linux/plist.h>
1da177e4
LT
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
c92ff1bd 27#include <linux/mm_types.h>
92cf2118 28#include <linux/preempt.h>
1da177e4 29
1da177e4
LT
30#include <asm/page.h>
31#include <asm/ptrace.h>
bfc3f028 32#include <linux/cputime.h>
1da177e4
LT
33
34#include <linux/smp.h>
35#include <linux/sem.h>
ab602f79 36#include <linux/shm.h>
1da177e4 37#include <linux/signal.h>
1da177e4
LT
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/seccomp.h>
e56d0903 44#include <linux/rcupdate.h>
05725f7e 45#include <linux/rculist.h>
23f78d4a 46#include <linux/rtmutex.h>
1da177e4 47
a3b6714e
DW
48#include <linux/time.h>
49#include <linux/param.h>
50#include <linux/resource.h>
51#include <linux/timer.h>
52#include <linux/hrtimer.h>
5c9a8750 53#include <linux/kcov.h>
7c3ab738 54#include <linux/task_io_accounting.h>
9745512c 55#include <linux/latencytop.h>
9e2b2dc4 56#include <linux/cred.h>
fa14ff4a 57#include <linux/llist.h>
7b44ab97 58#include <linux/uidgid.h>
21caf2fc 59#include <linux/gfp.h>
d4311ff1 60#include <linux/magic.h>
7d7efec3 61#include <linux/cgroup-defs.h>
a3b6714e
DW
62
63#include <asm/processor.h>
36d57ac4 64
d50dde5a
DF
65#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
66
67/*
68 * Extended scheduling parameters data structure.
69 *
70 * This is needed because the original struct sched_param can not be
71 * altered without introducing ABI issues with legacy applications
72 * (e.g., in sched_getparam()).
73 *
74 * However, the possibility of specifying more than just a priority for
75 * the tasks may be useful for a wide variety of application fields, e.g.,
76 * multimedia, streaming, automation and control, and many others.
77 *
78 * This variant (sched_attr) is meant at describing a so-called
79 * sporadic time-constrained task. In such model a task is specified by:
80 * - the activation period or minimum instance inter-arrival time;
81 * - the maximum (or average, depending on the actual scheduling
82 * discipline) computation time of all instances, a.k.a. runtime;
83 * - the deadline (relative to the actual activation time) of each
84 * instance.
85 * Very briefly, a periodic (sporadic) task asks for the execution of
86 * some specific computation --which is typically called an instance--
87 * (at most) every period. Moreover, each instance typically lasts no more
88 * than the runtime and must be completed by time instant t equal to
89 * the instance activation time + the deadline.
90 *
91 * This is reflected by the actual fields of the sched_attr structure:
92 *
93 * @size size of the structure, for fwd/bwd compat.
94 *
95 * @sched_policy task's scheduling policy
96 * @sched_flags for customizing the scheduler behaviour
97 * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
98 * @sched_priority task's static priority (SCHED_FIFO/RR)
99 * @sched_deadline representative of the task's deadline
100 * @sched_runtime representative of the task's runtime
101 * @sched_period representative of the task's period
102 *
103 * Given this task model, there are a multiplicity of scheduling algorithms
104 * and policies, that can be used to ensure all the tasks will make their
105 * timing constraints.
aab03e05
DF
106 *
107 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
108 * only user of this new interface. More information about the algorithm
109 * available in the scheduling class file or in Documentation/.
d50dde5a
DF
110 */
111struct sched_attr {
112 u32 size;
113
114 u32 sched_policy;
115 u64 sched_flags;
116
117 /* SCHED_NORMAL, SCHED_BATCH */
118 s32 sched_nice;
119
120 /* SCHED_FIFO, SCHED_RR */
121 u32 sched_priority;
122
123 /* SCHED_DEADLINE */
124 u64 sched_runtime;
125 u64 sched_deadline;
126 u64 sched_period;
127};
128
c87e2837 129struct futex_pi_state;
286100a6 130struct robust_list_head;
bddd87c7 131struct bio_list;
5ad4e53b 132struct fs_struct;
cdd6c482 133struct perf_event_context;
73c10101 134struct blk_plug;
c4ad8f98 135struct filename;
89076bc3 136struct nameidata;
1da177e4 137
615d6e87
DB
138#define VMACACHE_BITS 2
139#define VMACACHE_SIZE (1U << VMACACHE_BITS)
140#define VMACACHE_MASK (VMACACHE_SIZE - 1)
141
1da177e4
LT
142/*
143 * These are the constant used to fake the fixed-point load-average
144 * counting. Some notes:
145 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
146 * a load-average precision of 10 bits integer + 11 bits fractional
147 * - if you want to count load-averages more often, you need more
148 * precision, or rounding will get you. With 2-second counting freq,
149 * the EXP_n values would be 1981, 2034 and 2043 if still using only
150 * 11 bit fractions.
151 */
152extern unsigned long avenrun[]; /* Load averages */
2d02494f 153extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1da177e4
LT
154
155#define FSHIFT 11 /* nr of bits of precision */
156#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
0c2043ab 157#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
1da177e4
LT
158#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
159#define EXP_5 2014 /* 1/exp(5sec/5min) */
160#define EXP_15 2037 /* 1/exp(5sec/15min) */
161
162#define CALC_LOAD(load,exp,n) \
163 load *= exp; \
164 load += n*(FIXED_1-exp); \
165 load >>= FSHIFT;
166
167extern unsigned long total_forks;
168extern int nr_threads;
1da177e4
LT
169DECLARE_PER_CPU(unsigned long, process_counts);
170extern int nr_processes(void);
171extern unsigned long nr_running(void);
2ee507c4 172extern bool single_task_running(void);
1da177e4 173extern unsigned long nr_iowait(void);
8c215bd3 174extern unsigned long nr_iowait_cpu(int cpu);
372ba8cb 175extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
69d25870 176
0f004f5a 177extern void calc_global_load(unsigned long ticks);
3289bdb4
PZ
178
179#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
1f41906a
FW
180extern void cpu_load_update_nohz_start(void);
181extern void cpu_load_update_nohz_stop(void);
3289bdb4 182#else
1f41906a
FW
183static inline void cpu_load_update_nohz_start(void) { }
184static inline void cpu_load_update_nohz_stop(void) { }
3289bdb4 185#endif
1da177e4 186
b637a328
PM
187extern void dump_cpu_task(int cpu);
188
43ae34cb
IM
189struct seq_file;
190struct cfs_rq;
4cf86d77 191struct task_group;
43ae34cb
IM
192#ifdef CONFIG_SCHED_DEBUG
193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194extern void proc_sched_set_task(struct task_struct *p);
43ae34cb 195#endif
1da177e4 196
4a8342d2
LT
197/*
198 * Task state bitmask. NOTE! These bits are also
199 * encoded in fs/proc/array.c: get_task_state().
200 *
201 * We have two separate sets of flags: task->state
202 * is about runnability, while task->exit_state are
203 * about the task exiting. Confusing, but this way
204 * modifying one set can't modify the other one by
205 * mistake.
206 */
1da177e4
LT
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
f021a3c2
MW
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
4a8342d2 212/* in tsk->exit_state */
ad86622b
ON
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
abd50b39 215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
4a8342d2 216/* in tsk->state again */
af927232 217#define TASK_DEAD 64
f021a3c2 218#define TASK_WAKEKILL 128
e9c84311 219#define TASK_WAKING 256
f2530dc7 220#define TASK_PARKED 512
80ed87c8 221#define TASK_NOLOAD 1024
7dc603c9
PZ
222#define TASK_NEW 2048
223#define TASK_STATE_MAX 4096
f021a3c2 224
7dc603c9 225#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
73342151 226
e1781538
PZ
227extern char ___assert_task_state[1 - 2*!!(
228 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
f021a3c2
MW
229
230/* Convenience macros for the sake of set_task_state */
231#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
232#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
233#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
1da177e4 234
80ed87c8
PZ
235#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
236
92a1f4bc
MW
237/* Convenience macros for the sake of wake_up */
238#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
f021a3c2 239#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
92a1f4bc
MW
240
241/* get_task_state() */
242#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
f021a3c2 243 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
74e37200 244 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
92a1f4bc 245
f021a3c2
MW
246#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
247#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
92a1f4bc 248#define task_is_stopped_or_traced(task) \
f021a3c2 249 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
92a1f4bc 250#define task_contributes_to_load(task) \
e3c8ca83 251 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
80ed87c8
PZ
252 (task->flags & PF_FROZEN) == 0 && \
253 (task->state & TASK_NOLOAD) == 0)
1da177e4 254
8eb23b9f
PZ
255#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
256
257#define __set_task_state(tsk, state_value) \
258 do { \
259 (tsk)->task_state_change = _THIS_IP_; \
260 (tsk)->state = (state_value); \
261 } while (0)
262#define set_task_state(tsk, state_value) \
263 do { \
264 (tsk)->task_state_change = _THIS_IP_; \
b92b8b35 265 smp_store_mb((tsk)->state, (state_value)); \
8eb23b9f
PZ
266 } while (0)
267
268/*
269 * set_current_state() includes a barrier so that the write of current->state
270 * is correctly serialised wrt the caller's subsequent test of whether to
271 * actually sleep:
272 *
273 * set_current_state(TASK_UNINTERRUPTIBLE);
274 * if (do_i_need_to_sleep())
275 * schedule();
276 *
277 * If the caller does not need such serialisation then use __set_current_state()
278 */
279#define __set_current_state(state_value) \
280 do { \
281 current->task_state_change = _THIS_IP_; \
282 current->state = (state_value); \
283 } while (0)
284#define set_current_state(state_value) \
285 do { \
286 current->task_state_change = _THIS_IP_; \
b92b8b35 287 smp_store_mb(current->state, (state_value)); \
8eb23b9f
PZ
288 } while (0)
289
290#else
291
1da177e4
LT
292#define __set_task_state(tsk, state_value) \
293 do { (tsk)->state = (state_value); } while (0)
294#define set_task_state(tsk, state_value) \
b92b8b35 295 smp_store_mb((tsk)->state, (state_value))
1da177e4 296
498d0c57
AM
297/*
298 * set_current_state() includes a barrier so that the write of current->state
299 * is correctly serialised wrt the caller's subsequent test of whether to
300 * actually sleep:
301 *
302 * set_current_state(TASK_UNINTERRUPTIBLE);
303 * if (do_i_need_to_sleep())
304 * schedule();
305 *
306 * If the caller does not need such serialisation then use __set_current_state()
307 */
8eb23b9f 308#define __set_current_state(state_value) \
1da177e4 309 do { current->state = (state_value); } while (0)
8eb23b9f 310#define set_current_state(state_value) \
b92b8b35 311 smp_store_mb(current->state, (state_value))
1da177e4 312
8eb23b9f
PZ
313#endif
314
1da177e4
LT
315/* Task command name length */
316#define TASK_COMM_LEN 16
317
1da177e4
LT
318#include <linux/spinlock.h>
319
320/*
321 * This serializes "schedule()" and also protects
322 * the run-queue from deletions/modifications (but
323 * _adding_ to the beginning of the run-queue has
324 * a separate lock).
325 */
326extern rwlock_t tasklist_lock;
327extern spinlock_t mmlist_lock;
328
36c8b586 329struct task_struct;
1da177e4 330
db1466b3
PM
331#ifdef CONFIG_PROVE_RCU
332extern int lockdep_tasklist_lock_is_held(void);
333#endif /* #ifdef CONFIG_PROVE_RCU */
334
1da177e4
LT
335extern void sched_init(void);
336extern void sched_init_smp(void);
2d07b255 337extern asmlinkage void schedule_tail(struct task_struct *prev);
36c8b586 338extern void init_idle(struct task_struct *idle, int cpu);
1df21055 339extern void init_idle_bootup_task(struct task_struct *idle);
1da177e4 340
3fa0818b
RR
341extern cpumask_var_t cpu_isolated_map;
342
89f19f04 343extern int runqueue_is_locked(int cpu);
017730c1 344
3451d024 345#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
c1cc017c 346extern void nohz_balance_enter_idle(int cpu);
69e1e811 347extern void set_cpu_sd_state_idle(void);
bc7a34b8 348extern int get_nohz_timer_target(void);
46cb4b7c 349#else
c1cc017c 350static inline void nohz_balance_enter_idle(int cpu) { }
fdaabd80 351static inline void set_cpu_sd_state_idle(void) { }
46cb4b7c 352#endif
1da177e4 353
e59e2ae2 354/*
39bc89fd 355 * Only dump TASK_* tasks. (0 for all tasks)
e59e2ae2
IM
356 */
357extern void show_state_filter(unsigned long state_filter);
358
359static inline void show_state(void)
360{
39bc89fd 361 show_state_filter(0);
e59e2ae2
IM
362}
363
1da177e4
LT
364extern void show_regs(struct pt_regs *);
365
366/*
367 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
368 * task), SP is the stack pointer of the first frame that should be shown in the back
369 * trace (or NULL if the entire call-chain of the task should be shown).
370 */
371extern void show_stack(struct task_struct *task, unsigned long *sp);
372
1da177e4
LT
373extern void cpu_init (void);
374extern void trap_init(void);
375extern void update_process_times(int user);
376extern void scheduler_tick(void);
9cf7243d 377extern int sched_cpu_starting(unsigned int cpu);
40190a78
TG
378extern int sched_cpu_activate(unsigned int cpu);
379extern int sched_cpu_deactivate(unsigned int cpu);
1da177e4 380
f2785ddb
TG
381#ifdef CONFIG_HOTPLUG_CPU
382extern int sched_cpu_dying(unsigned int cpu);
383#else
384# define sched_cpu_dying NULL
385#endif
1da177e4 386
82a1fcb9
IM
387extern void sched_show_task(struct task_struct *p);
388
19cc36c0 389#ifdef CONFIG_LOCKUP_DETECTOR
03e0d461 390extern void touch_softlockup_watchdog_sched(void);
8446f1d3 391extern void touch_softlockup_watchdog(void);
d6ad3e28 392extern void touch_softlockup_watchdog_sync(void);
04c9167f 393extern void touch_all_softlockup_watchdogs(void);
332fbdbc
DZ
394extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
395 void __user *buffer,
396 size_t *lenp, loff_t *ppos);
9c44bc03 397extern unsigned int softlockup_panic;
ac1f5912 398extern unsigned int hardlockup_panic;
004417a6 399void lockup_detector_init(void);
8446f1d3 400#else
03e0d461
TH
401static inline void touch_softlockup_watchdog_sched(void)
402{
403}
8446f1d3
IM
404static inline void touch_softlockup_watchdog(void)
405{
406}
d6ad3e28
JW
407static inline void touch_softlockup_watchdog_sync(void)
408{
409}
04c9167f
JF
410static inline void touch_all_softlockup_watchdogs(void)
411{
412}
004417a6
PZ
413static inline void lockup_detector_init(void)
414{
415}
8446f1d3
IM
416#endif
417
8b414521
MT
418#ifdef CONFIG_DETECT_HUNG_TASK
419void reset_hung_task_detector(void);
420#else
421static inline void reset_hung_task_detector(void)
422{
423}
424#endif
425
1da177e4
LT
426/* Attach to any functions which should be ignored in wchan output. */
427#define __sched __attribute__((__section__(".sched.text")))
deaf2227
IM
428
429/* Linker adds these: start and end of __sched functions */
430extern char __sched_text_start[], __sched_text_end[];
431
1da177e4
LT
432/* Is this address in the __sched functions? */
433extern int in_sched_functions(unsigned long addr);
434
435#define MAX_SCHEDULE_TIMEOUT LONG_MAX
b3c97528 436extern signed long schedule_timeout(signed long timeout);
64ed93a2 437extern signed long schedule_timeout_interruptible(signed long timeout);
294d5cc2 438extern signed long schedule_timeout_killable(signed long timeout);
64ed93a2 439extern signed long schedule_timeout_uninterruptible(signed long timeout);
69b27baf 440extern signed long schedule_timeout_idle(signed long timeout);
1da177e4 441asmlinkage void schedule(void);
c5491ea7 442extern void schedule_preempt_disabled(void);
1da177e4 443
9cff8ade
N
444extern long io_schedule_timeout(long timeout);
445
446static inline void io_schedule(void)
447{
448 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
449}
450
9af6528e
PZ
451void __noreturn do_task_dead(void);
452
ab516013 453struct nsproxy;
acce292c 454struct user_namespace;
1da177e4 455
efc1a3b1
DH
456#ifdef CONFIG_MMU
457extern void arch_pick_mmap_layout(struct mm_struct *mm);
1da177e4
LT
458extern unsigned long
459arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
460 unsigned long, unsigned long);
461extern unsigned long
462arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
463 unsigned long len, unsigned long pgoff,
464 unsigned long flags);
efc1a3b1
DH
465#else
466static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
467#endif
1da177e4 468
d049f74f
KC
469#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
470#define SUID_DUMP_USER 1 /* Dump as user of process */
471#define SUID_DUMP_ROOT 2 /* Dump as root */
472
6c5d5238 473/* mm flags */
f8af4da3 474
7288e118 475/* for SUID_DUMP_* above */
3cb4a0bb 476#define MMF_DUMPABLE_BITS 2
f8af4da3 477#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3cb4a0bb 478
942be387
ON
479extern void set_dumpable(struct mm_struct *mm, int value);
480/*
481 * This returns the actual value of the suid_dumpable flag. For things
482 * that are using this for checking for privilege transitions, it must
483 * test against SUID_DUMP_USER rather than treating it as a boolean
484 * value.
485 */
486static inline int __get_dumpable(unsigned long mm_flags)
487{
488 return mm_flags & MMF_DUMPABLE_MASK;
489}
490
491static inline int get_dumpable(struct mm_struct *mm)
492{
493 return __get_dumpable(mm->flags);
494}
495
3cb4a0bb
KH
496/* coredump filter bits */
497#define MMF_DUMP_ANON_PRIVATE 2
498#define MMF_DUMP_ANON_SHARED 3
499#define MMF_DUMP_MAPPED_PRIVATE 4
500#define MMF_DUMP_MAPPED_SHARED 5
82df3973 501#define MMF_DUMP_ELF_HEADERS 6
e575f111
KM
502#define MMF_DUMP_HUGETLB_PRIVATE 7
503#define MMF_DUMP_HUGETLB_SHARED 8
5037835c
RZ
504#define MMF_DUMP_DAX_PRIVATE 9
505#define MMF_DUMP_DAX_SHARED 10
f8af4da3 506
3cb4a0bb 507#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
5037835c 508#define MMF_DUMP_FILTER_BITS 9
3cb4a0bb
KH
509#define MMF_DUMP_FILTER_MASK \
510 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
511#define MMF_DUMP_FILTER_DEFAULT \
e575f111 512 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
656eb2cd
RM
513 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
514
515#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
516# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
517#else
518# define MMF_DUMP_MASK_DEFAULT_ELF 0
519#endif
f8af4da3
HD
520 /* leave room for more dump flags */
521#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
ba76149f 522#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
bafb282d 523#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
f8af4da3 524
9f68f672
ON
525#define MMF_HAS_UPROBES 19 /* has uprobes */
526#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
bb8a4b7f 527#define MMF_OOM_REAPED 21 /* mm has been already reaped */
f8ac4ec9 528
f8af4da3 529#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
6c5d5238 530
1da177e4
LT
531struct sighand_struct {
532 atomic_t count;
533 struct k_sigaction action[_NSIG];
534 spinlock_t siglock;
b8fceee1 535 wait_queue_head_t signalfd_wqh;
1da177e4
LT
536};
537
0e464814 538struct pacct_struct {
f6ec29a4
KK
539 int ac_flag;
540 long ac_exitcode;
0e464814 541 unsigned long ac_mem;
77787bfb
KK
542 cputime_t ac_utime, ac_stime;
543 unsigned long ac_minflt, ac_majflt;
0e464814
KK
544};
545
42c4ab41
SG
546struct cpu_itimer {
547 cputime_t expires;
548 cputime_t incr;
8356b5f9
SG
549 u32 error;
550 u32 incr_error;
42c4ab41
SG
551};
552
d37f761d 553/**
9d7fb042 554 * struct prev_cputime - snaphsot of system and user cputime
d37f761d
FW
555 * @utime: time spent in user mode
556 * @stime: time spent in system mode
9d7fb042 557 * @lock: protects the above two fields
d37f761d 558 *
9d7fb042
PZ
559 * Stores previous user/system time values such that we can guarantee
560 * monotonicity.
d37f761d 561 */
9d7fb042
PZ
562struct prev_cputime {
563#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
d37f761d
FW
564 cputime_t utime;
565 cputime_t stime;
9d7fb042
PZ
566 raw_spinlock_t lock;
567#endif
d37f761d
FW
568};
569
9d7fb042
PZ
570static inline void prev_cputime_init(struct prev_cputime *prev)
571{
572#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
573 prev->utime = prev->stime = 0;
574 raw_spin_lock_init(&prev->lock);
575#endif
576}
577
f06febc9
FM
578/**
579 * struct task_cputime - collected CPU time counts
580 * @utime: time spent in user mode, in &cputime_t units
581 * @stime: time spent in kernel mode, in &cputime_t units
582 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
5ce73a4a 583 *
9d7fb042
PZ
584 * This structure groups together three kinds of CPU time that are tracked for
585 * threads and thread groups. Most things considering CPU time want to group
586 * these counts together and treat all three of them in parallel.
f06febc9
FM
587 */
588struct task_cputime {
589 cputime_t utime;
590 cputime_t stime;
591 unsigned long long sum_exec_runtime;
592};
9d7fb042 593
f06febc9 594/* Alternate field names when used to cache expirations. */
f06febc9 595#define virt_exp utime
9d7fb042 596#define prof_exp stime
f06febc9
FM
597#define sched_exp sum_exec_runtime
598
4cd4c1b4
PZ
599#define INIT_CPUTIME \
600 (struct task_cputime) { \
64861634
MS
601 .utime = 0, \
602 .stime = 0, \
4cd4c1b4
PZ
603 .sum_exec_runtime = 0, \
604 }
605
971e8a98
JL
606/*
607 * This is the atomic variant of task_cputime, which can be used for
608 * storing and updating task_cputime statistics without locking.
609 */
610struct task_cputime_atomic {
611 atomic64_t utime;
612 atomic64_t stime;
613 atomic64_t sum_exec_runtime;
614};
615
616#define INIT_CPUTIME_ATOMIC \
617 (struct task_cputime_atomic) { \
618 .utime = ATOMIC64_INIT(0), \
619 .stime = ATOMIC64_INIT(0), \
620 .sum_exec_runtime = ATOMIC64_INIT(0), \
621 }
622
609ca066 623#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
a233f112 624
c99e6efe 625/*
87dcbc06
PZ
626 * Disable preemption until the scheduler is running -- use an unconditional
627 * value so that it also works on !PREEMPT_COUNT kernels.
d86ee480 628 *
87dcbc06 629 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
c99e6efe 630 */
87dcbc06 631#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
a233f112 632
c99e6efe 633/*
609ca066
PZ
634 * Initial preempt_count value; reflects the preempt_count schedule invariant
635 * which states that during context switches:
d86ee480 636 *
609ca066
PZ
637 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
638 *
639 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
640 * Note: See finish_task_switch().
c99e6efe 641 */
609ca066 642#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
c99e6efe 643
f06febc9 644/**
4cd4c1b4 645 * struct thread_group_cputimer - thread group interval timer counts
920ce39f 646 * @cputime_atomic: atomic thread group interval timers.
d5c373eb
JL
647 * @running: true when there are timers running and
648 * @cputime_atomic receives updates.
c8d75aa4
JL
649 * @checking_timer: true when a thread in the group is in the
650 * process of checking for thread group timers.
f06febc9
FM
651 *
652 * This structure contains the version of task_cputime, above, that is
4cd4c1b4 653 * used for thread group CPU timer calculations.
f06febc9 654 */
4cd4c1b4 655struct thread_group_cputimer {
71107445 656 struct task_cputime_atomic cputime_atomic;
d5c373eb 657 bool running;
c8d75aa4 658 bool checking_timer;
f06febc9 659};
f06febc9 660
4714d1d3 661#include <linux/rwsem.h>
5091faa4
MG
662struct autogroup;
663
1da177e4 664/*
e815f0a8 665 * NOTE! "signal_struct" does not have its own
1da177e4
LT
666 * locking, because a shared signal_struct always
667 * implies a shared sighand_struct, so locking
668 * sighand_struct is always a proper superset of
669 * the locking of signal_struct.
670 */
671struct signal_struct {
ea6d290c 672 atomic_t sigcnt;
1da177e4 673 atomic_t live;
b3ac022c 674 int nr_threads;
f44666b0 675 atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
0c740d0a 676 struct list_head thread_head;
1da177e4
LT
677
678 wait_queue_head_t wait_chldexit; /* for wait4() */
679
680 /* current thread group signal load-balancing target: */
36c8b586 681 struct task_struct *curr_target;
1da177e4
LT
682
683 /* shared signal handling: */
684 struct sigpending shared_pending;
685
686 /* thread group exit support */
687 int group_exit_code;
688 /* overloaded:
689 * - notify group_exit_task when ->count is equal to notify_count
690 * - everyone except group_exit_task is stopped during signal delivery
691 * of fatal signals, group_exit_task processes the signal.
692 */
1da177e4 693 int notify_count;
07dd20e0 694 struct task_struct *group_exit_task;
1da177e4
LT
695
696 /* thread group stop support, overloads group_exit_code too */
697 int group_stop_count;
698 unsigned int flags; /* see SIGNAL_* flags below */
699
ebec18a6
LP
700 /*
701 * PR_SET_CHILD_SUBREAPER marks a process, like a service
702 * manager, to re-parent orphan (double-forking) child processes
703 * to this process instead of 'init'. The service manager is
704 * able to receive SIGCHLD signals and is able to investigate
705 * the process until it calls wait(). All children of this
706 * process will inherit a flag if they should look for a
707 * child_subreaper process at exit.
708 */
709 unsigned int is_child_subreaper:1;
710 unsigned int has_child_subreaper:1;
711
1da177e4 712 /* POSIX.1b Interval Timers */
5ed67f05
PE
713 int posix_timer_id;
714 struct list_head posix_timers;
1da177e4
LT
715
716 /* ITIMER_REAL timer for the process */
2ff678b8 717 struct hrtimer real_timer;
fea9d175 718 struct pid *leader_pid;
2ff678b8 719 ktime_t it_real_incr;
1da177e4 720
42c4ab41
SG
721 /*
722 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
723 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
724 * values are defined to 0 and 1 respectively
725 */
726 struct cpu_itimer it[2];
1da177e4 727
f06febc9 728 /*
4cd4c1b4
PZ
729 * Thread group totals for process CPU timers.
730 * See thread_group_cputimer(), et al, for details.
f06febc9 731 */
4cd4c1b4 732 struct thread_group_cputimer cputimer;
f06febc9
FM
733
734 /* Earliest-expiration cache. */
735 struct task_cputime cputime_expires;
736
d027d45d 737#ifdef CONFIG_NO_HZ_FULL
f009a7a7 738 atomic_t tick_dep_mask;
d027d45d
FW
739#endif
740
f06febc9
FM
741 struct list_head cpu_timers[3];
742
ab521dc0 743 struct pid *tty_old_pgrp;
1ec320af 744
1da177e4
LT
745 /* boolean value for session group leader */
746 int leader;
747
748 struct tty_struct *tty; /* NULL if no tty */
749
5091faa4
MG
750#ifdef CONFIG_SCHED_AUTOGROUP
751 struct autogroup *autogroup;
752#endif
1da177e4
LT
753 /*
754 * Cumulative resource counters for dead threads in the group,
755 * and for reaped dead child processes forked by this group.
756 * Live threads maintain their own counters and add to these
757 * in __exit_signal, except for the group leader.
758 */
e78c3496 759 seqlock_t stats_lock;
32bd671d 760 cputime_t utime, stime, cutime, cstime;
9ac52315
LV
761 cputime_t gtime;
762 cputime_t cgtime;
9d7fb042 763 struct prev_cputime prev_cputime;
1da177e4
LT
764 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
765 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6eaeeaba 766 unsigned long inblock, oublock, cinblock, coublock;
1f10206c 767 unsigned long maxrss, cmaxrss;
940389b8 768 struct task_io_accounting ioac;
1da177e4 769
32bd671d
PZ
770 /*
771 * Cumulative ns of schedule CPU time fo dead threads in the
772 * group, not including a zombie group leader, (This only differs
773 * from jiffies_to_ns(utime + stime) if sched_clock uses something
774 * other than jiffies.)
775 */
776 unsigned long long sum_sched_runtime;
777
1da177e4
LT
778 /*
779 * We don't bother to synchronize most readers of this at all,
780 * because there is no reader checking a limit that actually needs
781 * to get both rlim_cur and rlim_max atomically, and either one
782 * alone is a single word that can safely be read normally.
783 * getrlimit/setrlimit use task_lock(current->group_leader) to
784 * protect this instead of the siglock, because they really
785 * have no need to disable irqs.
786 */
787 struct rlimit rlim[RLIM_NLIMITS];
788
0e464814
KK
789#ifdef CONFIG_BSD_PROCESS_ACCT
790 struct pacct_struct pacct; /* per-process accounting information */
791#endif
ad4ecbcb 792#ifdef CONFIG_TASKSTATS
ad4ecbcb
SN
793 struct taskstats *stats;
794#endif
522ed776
MT
795#ifdef CONFIG_AUDIT
796 unsigned audit_tty;
797 struct tty_audit_buf *tty_audit_buf;
798#endif
28b83c51 799
c96fc2d8
TH
800 /*
801 * Thread is the potential origin of an oom condition; kill first on
802 * oom
803 */
804 bool oom_flag_origin;
a9c58b90
DR
805 short oom_score_adj; /* OOM kill score adjustment */
806 short oom_score_adj_min; /* OOM kill score adjustment min value.
807 * Only settable by CAP_SYS_RESOURCE. */
26db62f1
MH
808 struct mm_struct *oom_mm; /* recorded mm when the thread group got
809 * killed by the oom killer */
9b1bf12d
KM
810
811 struct mutex cred_guard_mutex; /* guard against foreign influences on
812 * credential calculations
813 * (notably. ptrace) */
1da177e4
LT
814};
815
816/*
817 * Bits in flags field of signal_struct.
818 */
819#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
ee77f075
ON
820#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
821#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
403bad72 822#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
e4420551
ON
823/*
824 * Pending notifications to parent.
825 */
826#define SIGNAL_CLD_STOPPED 0x00000010
827#define SIGNAL_CLD_CONTINUED 0x00000020
828#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
1da177e4 829
fae5fa44
ON
830#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
831
ed5d2cac
ON
832/* If true, all threads except ->group_exit_task have pending SIGKILL */
833static inline int signal_group_exit(const struct signal_struct *sig)
834{
835 return (sig->flags & SIGNAL_GROUP_EXIT) ||
836 (sig->group_exit_task != NULL);
837}
838
1da177e4
LT
839/*
840 * Some day this will be a full-fledged user tracking system..
841 */
842struct user_struct {
843 atomic_t __count; /* reference count */
844 atomic_t processes; /* How many processes does this user have? */
1da177e4 845 atomic_t sigpending; /* How many pending signals does this user have? */
2d9048e2 846#ifdef CONFIG_INOTIFY_USER
0eeca283
RL
847 atomic_t inotify_watches; /* How many inotify watches does this user have? */
848 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
849#endif
4afeff85
EP
850#ifdef CONFIG_FANOTIFY
851 atomic_t fanotify_listeners;
852#endif
7ef9964e 853#ifdef CONFIG_EPOLL
52bd19f7 854 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7ef9964e 855#endif
970a8645 856#ifdef CONFIG_POSIX_MQUEUE
1da177e4
LT
857 /* protected by mq_lock */
858 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
970a8645 859#endif
1da177e4 860 unsigned long locked_shm; /* How many pages of mlocked shm ? */
712f4aad 861 unsigned long unix_inflight; /* How many files in flight in unix sockets */
759c0114 862 atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
1da177e4
LT
863
864#ifdef CONFIG_KEYS
865 struct key *uid_keyring; /* UID specific keyring */
866 struct key *session_keyring; /* UID's default session keyring */
867#endif
868
869 /* Hash table maintenance information */
735de223 870 struct hlist_node uidhash_node;
7b44ab97 871 kuid_t uid;
24e377a8 872
aaac3ba9 873#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
789f90fc
PZ
874 atomic_long_t locked_vm;
875#endif
1da177e4
LT
876};
877
eb41d946 878extern int uids_sysfs_init(void);
5cb350ba 879
7b44ab97 880extern struct user_struct *find_user(kuid_t);
1da177e4
LT
881
882extern struct user_struct root_user;
883#define INIT_USER (&root_user)
884
b6dff3ec 885
1da177e4
LT
886struct backing_dev_info;
887struct reclaim_state;
888
f6db8347 889#ifdef CONFIG_SCHED_INFO
1da177e4
LT
890struct sched_info {
891 /* cumulative counters */
2d72376b 892 unsigned long pcount; /* # of times run on this cpu */
9c2c4802 893 unsigned long long run_delay; /* time spent waiting on a runqueue */
1da177e4
LT
894
895 /* timestamps */
172ba844
BS
896 unsigned long long last_arrival,/* when we last ran on a cpu */
897 last_queued; /* when we were last queued to run */
1da177e4 898};
f6db8347 899#endif /* CONFIG_SCHED_INFO */
1da177e4 900
ca74e92b
SN
901#ifdef CONFIG_TASK_DELAY_ACCT
902struct task_delay_info {
903 spinlock_t lock;
904 unsigned int flags; /* Private per-task flags */
905
906 /* For each stat XXX, add following, aligned appropriately
907 *
908 * struct timespec XXX_start, XXX_end;
909 * u64 XXX_delay;
910 * u32 XXX_count;
911 *
912 * Atomicity of updates to XXX_delay, XXX_count protected by
913 * single lock above (split into XXX_lock if contention is an issue).
914 */
0ff92245
SN
915
916 /*
917 * XXX_count is incremented on every XXX operation, the delay
918 * associated with the operation is added to XXX_delay.
919 * XXX_delay contains the accumulated delay time in nanoseconds.
920 */
9667a23d 921 u64 blkio_start; /* Shared by blkio, swapin */
0ff92245
SN
922 u64 blkio_delay; /* wait for sync block io completion */
923 u64 swapin_delay; /* wait for swapin block io completion */
924 u32 blkio_count; /* total count of the number of sync block */
925 /* io operations performed */
926 u32 swapin_count; /* total count of the number of swapin block */
927 /* io operations performed */
873b4771 928
9667a23d 929 u64 freepages_start;
873b4771
KK
930 u64 freepages_delay; /* wait for memory reclaim */
931 u32 freepages_count; /* total count of memory reclaim */
ca74e92b 932};
52f17b6c
CS
933#endif /* CONFIG_TASK_DELAY_ACCT */
934
935static inline int sched_info_on(void)
936{
937#ifdef CONFIG_SCHEDSTATS
938 return 1;
939#elif defined(CONFIG_TASK_DELAY_ACCT)
940 extern int delayacct_on;
941 return delayacct_on;
942#else
943 return 0;
ca74e92b 944#endif
52f17b6c 945}
ca74e92b 946
cb251765
MG
947#ifdef CONFIG_SCHEDSTATS
948void force_schedstat_enabled(void);
949#endif
950
d15bcfdb
IM
951enum cpu_idle_type {
952 CPU_IDLE,
953 CPU_NOT_IDLE,
954 CPU_NEWLY_IDLE,
955 CPU_MAX_IDLE_TYPES
1da177e4
LT
956};
957
6ecdd749
YD
958/*
959 * Integer metrics need fixed point arithmetic, e.g., sched/fair
960 * has a few: load, load_avg, util_avg, freq, and capacity.
961 *
962 * We define a basic fixed point arithmetic range, and then formalize
963 * all these metrics based on that basic range.
964 */
965# define SCHED_FIXEDPOINT_SHIFT 10
966# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
967
1399fa78 968/*
ca8ce3d0 969 * Increase resolution of cpu_capacity calculations
1399fa78 970 */
6ecdd749 971#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
ca8ce3d0 972#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
1da177e4 973
76751049
PZ
974/*
975 * Wake-queues are lists of tasks with a pending wakeup, whose
976 * callers have already marked the task as woken internally,
977 * and can thus carry on. A common use case is being able to
978 * do the wakeups once the corresponding user lock as been
979 * released.
980 *
981 * We hold reference to each task in the list across the wakeup,
982 * thus guaranteeing that the memory is still valid by the time
983 * the actual wakeups are performed in wake_up_q().
984 *
985 * One per task suffices, because there's never a need for a task to be
986 * in two wake queues simultaneously; it is forbidden to abandon a task
987 * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
988 * already in a wake queue, the wakeup will happen soon and the second
989 * waker can just skip it.
990 *
991 * The WAKE_Q macro declares and initializes the list head.
992 * wake_up_q() does NOT reinitialize the list; it's expected to be
993 * called near the end of a function, where the fact that the queue is
994 * not used again will be easy to see by inspection.
995 *
996 * Note that this can cause spurious wakeups. schedule() callers
997 * must ensure the call is done inside a loop, confirming that the
998 * wakeup condition has in fact occurred.
999 */
1000struct wake_q_node {
1001 struct wake_q_node *next;
1002};
1003
1004struct wake_q_head {
1005 struct wake_q_node *first;
1006 struct wake_q_node **lastp;
1007};
1008
1009#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
1010
1011#define WAKE_Q(name) \
1012 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
1013
1014extern void wake_q_add(struct wake_q_head *head,
1015 struct task_struct *task);
1016extern void wake_up_q(struct wake_q_head *head);
1017
1399fa78
NR
1018/*
1019 * sched-domains (multiprocessor balancing) declarations:
1020 */
2dd73a4f 1021#ifdef CONFIG_SMP
b5d978e0
PZ
1022#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
1023#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
1024#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
1025#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
c88d5910 1026#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
b5d978e0 1027#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
1f6e6c7c 1028#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
bd425d4b 1029#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
d77b3ed5 1030#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
b5d978e0
PZ
1031#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
1032#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
532cb4c4 1033#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
b5d978e0 1034#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
e3589f6c 1035#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
3a7053b3 1036#define SD_NUMA 0x4000 /* cross-node balancing */
5c45bf27 1037
143e1e28 1038#ifdef CONFIG_SCHED_SMT
b6220ad6 1039static inline int cpu_smt_flags(void)
143e1e28 1040{
5d4dfddd 1041 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
143e1e28
VG
1042}
1043#endif
1044
1045#ifdef CONFIG_SCHED_MC
b6220ad6 1046static inline int cpu_core_flags(void)
143e1e28
VG
1047{
1048 return SD_SHARE_PKG_RESOURCES;
1049}
1050#endif
1051
1052#ifdef CONFIG_NUMA
b6220ad6 1053static inline int cpu_numa_flags(void)
143e1e28
VG
1054{
1055 return SD_NUMA;
1056}
1057#endif
532cb4c4 1058
1d3504fc
HS
1059struct sched_domain_attr {
1060 int relax_domain_level;
1061};
1062
1063#define SD_ATTR_INIT (struct sched_domain_attr) { \
1064 .relax_domain_level = -1, \
1065}
1066
60495e77
PZ
1067extern int sched_domain_level_max;
1068
5e6521ea
LZ
1069struct sched_group;
1070
24fc7edb
PZ
1071struct sched_domain_shared {
1072 atomic_t ref;
0e369d75 1073 atomic_t nr_busy_cpus;
10e2f1ac 1074 int has_idle_cores;
24fc7edb
PZ
1075};
1076
1da177e4
LT
1077struct sched_domain {
1078 /* These fields must be setup */
1079 struct sched_domain *parent; /* top domain must be null terminated */
1a848870 1080 struct sched_domain *child; /* bottom domain must be null terminated */
1da177e4 1081 struct sched_group *groups; /* the balancing groups of the domain */
1da177e4
LT
1082 unsigned long min_interval; /* Minimum balance interval ms */
1083 unsigned long max_interval; /* Maximum balance interval ms */
1084 unsigned int busy_factor; /* less balancing by factor if busy */
1085 unsigned int imbalance_pct; /* No balance until over watermark */
1da177e4 1086 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
7897986b
NP
1087 unsigned int busy_idx;
1088 unsigned int idle_idx;
1089 unsigned int newidle_idx;
1090 unsigned int wake_idx;
147cbb4b 1091 unsigned int forkexec_idx;
a52bfd73 1092 unsigned int smt_gain;
25f55d9d
VG
1093
1094 int nohz_idle; /* NOHZ IDLE status */
1da177e4 1095 int flags; /* See SD_* */
60495e77 1096 int level;
1da177e4
LT
1097
1098 /* Runtime fields. */
1099 unsigned long last_balance; /* init to jiffies. units in jiffies */
1100 unsigned int balance_interval; /* initialise to 1. units in ms. */
1101 unsigned int nr_balance_failed; /* initialise to 0 */
1102
f48627e6 1103 /* idle_balance() stats */
9bd721c5 1104 u64 max_newidle_lb_cost;
f48627e6 1105 unsigned long next_decay_max_lb_cost;
2398f2c6 1106
10e2f1ac
PZ
1107 u64 avg_scan_cost; /* select_idle_sibling */
1108
1da177e4
LT
1109#ifdef CONFIG_SCHEDSTATS
1110 /* load_balance() stats */
480b9434
KC
1111 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1112 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1113 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1114 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1115 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1116 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1117 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1118 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1da177e4
LT
1119
1120 /* Active load balancing */
480b9434
KC
1121 unsigned int alb_count;
1122 unsigned int alb_failed;
1123 unsigned int alb_pushed;
1da177e4 1124
68767a0a 1125 /* SD_BALANCE_EXEC stats */
480b9434
KC
1126 unsigned int sbe_count;
1127 unsigned int sbe_balanced;
1128 unsigned int sbe_pushed;
1da177e4 1129
68767a0a 1130 /* SD_BALANCE_FORK stats */
480b9434
KC
1131 unsigned int sbf_count;
1132 unsigned int sbf_balanced;
1133 unsigned int sbf_pushed;
68767a0a 1134
1da177e4 1135 /* try_to_wake_up() stats */
480b9434
KC
1136 unsigned int ttwu_wake_remote;
1137 unsigned int ttwu_move_affine;
1138 unsigned int ttwu_move_balance;
1da177e4 1139#endif
a5d8c348
IM
1140#ifdef CONFIG_SCHED_DEBUG
1141 char *name;
1142#endif
dce840a0
PZ
1143 union {
1144 void *private; /* used during construction */
1145 struct rcu_head rcu; /* used during destruction */
1146 };
24fc7edb 1147 struct sched_domain_shared *shared;
6c99e9ad 1148
669c55e9 1149 unsigned int span_weight;
4200efd9
IM
1150 /*
1151 * Span of all CPUs in this domain.
1152 *
1153 * NOTE: this field is variable length. (Allocated dynamically
1154 * by attaching extra space to the end of the structure,
1155 * depending on how many CPUs the kernel has booted up with)
4200efd9
IM
1156 */
1157 unsigned long span[0];
1da177e4
LT
1158};
1159
758b2cdc
RR
1160static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1161{
6c99e9ad 1162 return to_cpumask(sd->span);
758b2cdc
RR
1163}
1164
acc3f5d7 1165extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 1166 struct sched_domain_attr *dattr_new);
029190c5 1167
acc3f5d7
RR
1168/* Allocate an array of sched domains, for partition_sched_domains(). */
1169cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1170void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1171
39be3501
PZ
1172bool cpus_share_cache(int this_cpu, int that_cpu);
1173
143e1e28 1174typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
b6220ad6 1175typedef int (*sched_domain_flags_f)(void);
143e1e28
VG
1176
1177#define SDTL_OVERLAP 0x01
1178
1179struct sd_data {
1180 struct sched_domain **__percpu sd;
24fc7edb 1181 struct sched_domain_shared **__percpu sds;
143e1e28 1182 struct sched_group **__percpu sg;
63b2ca30 1183 struct sched_group_capacity **__percpu sgc;
143e1e28
VG
1184};
1185
1186struct sched_domain_topology_level {
1187 sched_domain_mask_f mask;
1188 sched_domain_flags_f sd_flags;
1189 int flags;
1190 int numa_level;
1191 struct sd_data data;
1192#ifdef CONFIG_SCHED_DEBUG
1193 char *name;
1194#endif
1195};
1196
143e1e28 1197extern void set_sched_topology(struct sched_domain_topology_level *tl);
f6be8af1 1198extern void wake_up_if_idle(int cpu);
143e1e28
VG
1199
1200#ifdef CONFIG_SCHED_DEBUG
1201# define SD_INIT_NAME(type) .name = #type
1202#else
1203# define SD_INIT_NAME(type)
1204#endif
1205
1b427c15 1206#else /* CONFIG_SMP */
1da177e4 1207
1b427c15 1208struct sched_domain_attr;
d02c7a8c 1209
1b427c15 1210static inline void
acc3f5d7 1211partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1b427c15
IM
1212 struct sched_domain_attr *dattr_new)
1213{
d02c7a8c 1214}
39be3501
PZ
1215
1216static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1217{
1218 return true;
1219}
1220
1b427c15 1221#endif /* !CONFIG_SMP */
1da177e4 1222
47fe38fc 1223
1da177e4 1224struct io_context; /* See blkdev.h */
1da177e4 1225
1da177e4 1226
383f2835 1227#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
36c8b586 1228extern void prefetch_stack(struct task_struct *t);
383f2835
KC
1229#else
1230static inline void prefetch_stack(struct task_struct *t) { }
1231#endif
1da177e4
LT
1232
1233struct audit_context; /* See audit.c */
1234struct mempolicy;
b92ce558 1235struct pipe_inode_info;
4865ecf1 1236struct uts_namespace;
1da177e4 1237
20b8a59f 1238struct load_weight {
9dbdb155
PZ
1239 unsigned long weight;
1240 u32 inv_weight;
20b8a59f
IM
1241};
1242
9d89c257 1243/*
7b595334
YD
1244 * The load_avg/util_avg accumulates an infinite geometric series
1245 * (see __update_load_avg() in kernel/sched/fair.c).
1246 *
1247 * [load_avg definition]
1248 *
1249 * load_avg = runnable% * scale_load_down(load)
1250 *
1251 * where runnable% is the time ratio that a sched_entity is runnable.
1252 * For cfs_rq, it is the aggregated load_avg of all runnable and
9d89c257 1253 * blocked sched_entities.
7b595334
YD
1254 *
1255 * load_avg may also take frequency scaling into account:
1256 *
1257 * load_avg = runnable% * scale_load_down(load) * freq%
1258 *
1259 * where freq% is the CPU frequency normalized to the highest frequency.
1260 *
1261 * [util_avg definition]
1262 *
1263 * util_avg = running% * SCHED_CAPACITY_SCALE
1264 *
1265 * where running% is the time ratio that a sched_entity is running on
1266 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
1267 * and blocked sched_entities.
1268 *
1269 * util_avg may also factor frequency scaling and CPU capacity scaling:
1270 *
1271 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
1272 *
1273 * where freq% is the same as above, and capacity% is the CPU capacity
1274 * normalized to the greatest capacity (due to uarch differences, etc).
1275 *
1276 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
1277 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
1278 * we therefore scale them to as large a range as necessary. This is for
1279 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
1280 *
1281 * [Overflow issue]
1282 *
1283 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
1284 * with the highest load (=88761), always runnable on a single cfs_rq,
1285 * and should not overflow as the number already hits PID_MAX_LIMIT.
1286 *
1287 * For all other cases (including 32-bit kernels), struct load_weight's
1288 * weight will overflow first before we do, because:
1289 *
1290 * Max(load_avg) <= Max(load.weight)
1291 *
1292 * Then it is the load_weight's responsibility to consider overflow
1293 * issues.
9d89c257 1294 */
9d85f21c 1295struct sched_avg {
9d89c257
YD
1296 u64 last_update_time, load_sum;
1297 u32 util_sum, period_contrib;
1298 unsigned long load_avg, util_avg;
9d85f21c
PT
1299};
1300
94c18227 1301#ifdef CONFIG_SCHEDSTATS
41acab88 1302struct sched_statistics {
20b8a59f 1303 u64 wait_start;
94c18227 1304 u64 wait_max;
6d082592
AV
1305 u64 wait_count;
1306 u64 wait_sum;
8f0dfc34
AV
1307 u64 iowait_count;
1308 u64 iowait_sum;
94c18227 1309
20b8a59f 1310 u64 sleep_start;
20b8a59f 1311 u64 sleep_max;
94c18227
IM
1312 s64 sum_sleep_runtime;
1313
1314 u64 block_start;
20b8a59f
IM
1315 u64 block_max;
1316 u64 exec_max;
eba1ed4b 1317 u64 slice_max;
cc367732 1318
cc367732
IM
1319 u64 nr_migrations_cold;
1320 u64 nr_failed_migrations_affine;
1321 u64 nr_failed_migrations_running;
1322 u64 nr_failed_migrations_hot;
1323 u64 nr_forced_migrations;
cc367732
IM
1324
1325 u64 nr_wakeups;
1326 u64 nr_wakeups_sync;
1327 u64 nr_wakeups_migrate;
1328 u64 nr_wakeups_local;
1329 u64 nr_wakeups_remote;
1330 u64 nr_wakeups_affine;
1331 u64 nr_wakeups_affine_attempts;
1332 u64 nr_wakeups_passive;
1333 u64 nr_wakeups_idle;
41acab88
LDM
1334};
1335#endif
1336
1337struct sched_entity {
1338 struct load_weight load; /* for load-balancing */
1339 struct rb_node run_node;
1340 struct list_head group_node;
1341 unsigned int on_rq;
1342
1343 u64 exec_start;
1344 u64 sum_exec_runtime;
1345 u64 vruntime;
1346 u64 prev_sum_exec_runtime;
1347
41acab88
LDM
1348 u64 nr_migrations;
1349
41acab88
LDM
1350#ifdef CONFIG_SCHEDSTATS
1351 struct sched_statistics statistics;
94c18227
IM
1352#endif
1353
20b8a59f 1354#ifdef CONFIG_FAIR_GROUP_SCHED
fed14d45 1355 int depth;
20b8a59f
IM
1356 struct sched_entity *parent;
1357 /* rq on which this entity is (to be) queued: */
1358 struct cfs_rq *cfs_rq;
1359 /* rq "owned" by this entity/group: */
1360 struct cfs_rq *my_q;
1361#endif
8bd75c77 1362
141965c7 1363#ifdef CONFIG_SMP
5a107804
JO
1364 /*
1365 * Per entity load average tracking.
1366 *
1367 * Put into separate cache line so it does not
1368 * collide with read-mostly values above.
1369 */
1370 struct sched_avg avg ____cacheline_aligned_in_smp;
9d85f21c 1371#endif
20b8a59f 1372};
70b97a7f 1373
fa717060
PZ
1374struct sched_rt_entity {
1375 struct list_head run_list;
78f2c7db 1376 unsigned long timeout;
57d2aa00 1377 unsigned long watchdog_stamp;
bee367ed 1378 unsigned int time_slice;
ff77e468
PZ
1379 unsigned short on_rq;
1380 unsigned short on_list;
6f505b16 1381
58d6c2d7 1382 struct sched_rt_entity *back;
052f1dc7 1383#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
1384 struct sched_rt_entity *parent;
1385 /* rq on which this entity is (to be) queued: */
1386 struct rt_rq *rt_rq;
1387 /* rq "owned" by this entity/group: */
1388 struct rt_rq *my_q;
1389#endif
fa717060
PZ
1390};
1391
aab03e05
DF
1392struct sched_dl_entity {
1393 struct rb_node rb_node;
1394
1395 /*
1396 * Original scheduling parameters. Copied here from sched_attr
4027d080 1397 * during sched_setattr(), they will remain the same until
1398 * the next sched_setattr().
aab03e05
DF
1399 */
1400 u64 dl_runtime; /* maximum runtime for each instance */
1401 u64 dl_deadline; /* relative deadline of each instance */
755378a4 1402 u64 dl_period; /* separation of two instances (period) */
332ac17e 1403 u64 dl_bw; /* dl_runtime / dl_deadline */
aab03e05
DF
1404
1405 /*
1406 * Actual scheduling parameters. Initialized with the values above,
1407 * they are continously updated during task execution. Note that
1408 * the remaining runtime could be < 0 in case we are in overrun.
1409 */
1410 s64 runtime; /* remaining runtime for this instance */
1411 u64 deadline; /* absolute deadline for this instance */
1412 unsigned int flags; /* specifying the scheduler behaviour */
1413
1414 /*
1415 * Some bool flags:
1416 *
1417 * @dl_throttled tells if we exhausted the runtime. If so, the
1418 * task has to wait for a replenishment to be performed at the
1419 * next firing of dl_timer.
1420 *
2d3d891d
DF
1421 * @dl_boosted tells if we are boosted due to DI. If so we are
1422 * outside bandwidth enforcement mechanism (but only until we
5bfd126e
JL
1423 * exit the critical section);
1424 *
1425 * @dl_yielded tells if task gave up the cpu before consuming
1426 * all its available runtime during the last job.
aab03e05 1427 */
72f9f3fd 1428 int dl_throttled, dl_boosted, dl_yielded;
aab03e05
DF
1429
1430 /*
1431 * Bandwidth enforcement timer. Each -deadline task has its
1432 * own bandwidth to be enforced, thus we need one timer per task.
1433 */
1434 struct hrtimer dl_timer;
1435};
8bd75c77 1436
1d082fd0
PM
1437union rcu_special {
1438 struct {
8203d6d0
PM
1439 u8 blocked;
1440 u8 need_qs;
1441 u8 exp_need_qs;
1442 u8 pad; /* Otherwise the compiler can store garbage here. */
1443 } b; /* Bits. */
1444 u32 s; /* Set of bits. */
1d082fd0 1445};
86848966
PM
1446struct rcu_node;
1447
8dc85d54
PZ
1448enum perf_event_task_context {
1449 perf_invalid_context = -1,
1450 perf_hw_context = 0,
89a1e187 1451 perf_sw_context,
8dc85d54
PZ
1452 perf_nr_task_contexts,
1453};
1454
72b252ae
MG
1455/* Track pages that require TLB flushes */
1456struct tlbflush_unmap_batch {
1457 /*
1458 * Each bit set is a CPU that potentially has a TLB entry for one of
1459 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1460 */
1461 struct cpumask cpumask;
1462
1463 /* True if any bit in cpumask is set */
1464 bool flush_required;
d950c947
MG
1465
1466 /*
1467 * If true then the PTE was dirty when unmapped. The entry must be
1468 * flushed before IO is initiated or a stale TLB entry potentially
1469 * allows an update without redirtying the page.
1470 */
1471 bool writable;
72b252ae
MG
1472};
1473
1da177e4 1474struct task_struct {
c65eacbe
AL
1475#ifdef CONFIG_THREAD_INFO_IN_TASK
1476 /*
1477 * For reasons of header soup (see current_thread_info()), this
1478 * must be the first element of task_struct.
1479 */
1480 struct thread_info thread_info;
1481#endif
1da177e4 1482 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
f7e4217b 1483 void *stack;
1da177e4 1484 atomic_t usage;
97dc32cd
WC
1485 unsigned int flags; /* per process flags, defined below */
1486 unsigned int ptrace;
1da177e4 1487
2dd73a4f 1488#ifdef CONFIG_SMP
fa14ff4a 1489 struct llist_node wake_entry;
3ca7a440 1490 int on_cpu;
c65eacbe
AL
1491#ifdef CONFIG_THREAD_INFO_IN_TASK
1492 unsigned int cpu; /* current CPU */
1493#endif
63b0e9ed 1494 unsigned int wakee_flips;
62470419 1495 unsigned long wakee_flip_decay_ts;
63b0e9ed 1496 struct task_struct *last_wakee;
ac66f547
PZ
1497
1498 int wake_cpu;
2dd73a4f 1499#endif
fd2f4419 1500 int on_rq;
50e645a8 1501
b29739f9 1502 int prio, static_prio, normal_prio;
c7aceaba 1503 unsigned int rt_priority;
5522d5d5 1504 const struct sched_class *sched_class;
20b8a59f 1505 struct sched_entity se;
fa717060 1506 struct sched_rt_entity rt;
8323f26c
PZ
1507#ifdef CONFIG_CGROUP_SCHED
1508 struct task_group *sched_task_group;
1509#endif
aab03e05 1510 struct sched_dl_entity dl;
1da177e4 1511
e107be36
AK
1512#ifdef CONFIG_PREEMPT_NOTIFIERS
1513 /* list of struct preempt_notifier: */
1514 struct hlist_head preempt_notifiers;
1515#endif
1516
6c5c9341 1517#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 1518 unsigned int btrace_seq;
6c5c9341 1519#endif
1da177e4 1520
97dc32cd 1521 unsigned int policy;
29baa747 1522 int nr_cpus_allowed;
1da177e4 1523 cpumask_t cpus_allowed;
1da177e4 1524
a57eb940 1525#ifdef CONFIG_PREEMPT_RCU
e260be67 1526 int rcu_read_lock_nesting;
1d082fd0 1527 union rcu_special rcu_read_unlock_special;
f41d911f 1528 struct list_head rcu_node_entry;
a57eb940 1529 struct rcu_node *rcu_blocked_node;
28f6569a 1530#endif /* #ifdef CONFIG_PREEMPT_RCU */
8315f422
PM
1531#ifdef CONFIG_TASKS_RCU
1532 unsigned long rcu_tasks_nvcsw;
1533 bool rcu_tasks_holdout;
1534 struct list_head rcu_tasks_holdout_list;
176f8f7a 1535 int rcu_tasks_idle_cpu;
8315f422 1536#endif /* #ifdef CONFIG_TASKS_RCU */
e260be67 1537
f6db8347 1538#ifdef CONFIG_SCHED_INFO
1da177e4
LT
1539 struct sched_info sched_info;
1540#endif
1541
1542 struct list_head tasks;
806c09a7 1543#ifdef CONFIG_SMP
917b627d 1544 struct plist_node pushable_tasks;
1baca4ce 1545 struct rb_node pushable_dl_tasks;
806c09a7 1546#endif
1da177e4
LT
1547
1548 struct mm_struct *mm, *active_mm;
615d6e87
DB
1549 /* per-thread vma caching */
1550 u32 vmacache_seqnum;
1551 struct vm_area_struct *vmacache[VMACACHE_SIZE];
34e55232
KH
1552#if defined(SPLIT_RSS_COUNTING)
1553 struct task_rss_stat rss_stat;
1554#endif
1da177e4 1555/* task state */
97dc32cd 1556 int exit_state;
1da177e4
LT
1557 int exit_code, exit_signal;
1558 int pdeath_signal; /* The signal sent when the parent dies */
e7cc4173 1559 unsigned long jobctl; /* JOBCTL_*, siglock protected */
9b89f6ba
AE
1560
1561 /* Used for emulating ABI behavior of previous Linux versions */
97dc32cd 1562 unsigned int personality;
9b89f6ba 1563
be958bdc 1564 /* scheduler bits, serialized by scheduler locks */
ca94c442 1565 unsigned sched_reset_on_fork:1;
a8e4f2ea 1566 unsigned sched_contributes_to_load:1;
ff303e66 1567 unsigned sched_migrated:1;
b7e7ade3 1568 unsigned sched_remote_wakeup:1;
be958bdc
PZ
1569 unsigned :0; /* force alignment to the next boundary */
1570
1571 /* unserialized, strictly 'current' */
1572 unsigned in_execve:1; /* bit to tell LSMs we're in execve */
1573 unsigned in_iowait:1;
7e781418
AL
1574#if !defined(TIF_RESTORE_SIGMASK)
1575 unsigned restore_sigmask:1;
1576#endif
626ebc41
TH
1577#ifdef CONFIG_MEMCG
1578 unsigned memcg_may_oom:1;
127424c8 1579#ifndef CONFIG_SLOB
6f185c29
VD
1580 unsigned memcg_kmem_skip_account:1;
1581#endif
127424c8 1582#endif
ff303e66
PZ
1583#ifdef CONFIG_COMPAT_BRK
1584 unsigned brk_randomized:1;
1585#endif
6f185c29 1586
1d4457f9
KC
1587 unsigned long atomic_flags; /* Flags needing atomic access. */
1588
f56141e3
AL
1589 struct restart_block restart_block;
1590
1da177e4
LT
1591 pid_t pid;
1592 pid_t tgid;
0a425405 1593
1314562a 1594#ifdef CONFIG_CC_STACKPROTECTOR
0a425405
AV
1595 /* Canary value for the -fstack-protector gcc feature */
1596 unsigned long stack_canary;
1314562a 1597#endif
4d1d61a6 1598 /*
1da177e4 1599 * pointers to (original) parent process, youngest child, younger sibling,
4d1d61a6 1600 * older sibling, respectively. (p->father can be replaced with
f470021a 1601 * p->real_parent->pid)
1da177e4 1602 */
abd63bc3
KC
1603 struct task_struct __rcu *real_parent; /* real parent process */
1604 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1da177e4 1605 /*
f470021a 1606 * children/sibling forms the list of my natural children
1da177e4
LT
1607 */
1608 struct list_head children; /* list of my children */
1609 struct list_head sibling; /* linkage in my parent's children list */
1610 struct task_struct *group_leader; /* threadgroup leader */
1611
f470021a
RM
1612 /*
1613 * ptraced is the list of tasks this task is using ptrace on.
1614 * This includes both natural children and PTRACE_ATTACH targets.
1615 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1616 */
1617 struct list_head ptraced;
1618 struct list_head ptrace_entry;
1619
1da177e4 1620 /* PID/PID hash table linkage. */
92476d7f 1621 struct pid_link pids[PIDTYPE_MAX];
47e65328 1622 struct list_head thread_group;
0c740d0a 1623 struct list_head thread_node;
1da177e4
LT
1624
1625 struct completion *vfork_done; /* for vfork() */
1626 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1627 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1628
c66f08be 1629 cputime_t utime, stime, utimescaled, stimescaled;
9ac52315 1630 cputime_t gtime;
9d7fb042 1631 struct prev_cputime prev_cputime;
6a61671b 1632#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
b7ce2277 1633 seqcount_t vtime_seqcount;
6a61671b
FW
1634 unsigned long long vtime_snap;
1635 enum {
7098c1ea
FW
1636 /* Task is sleeping or running in a CPU with VTIME inactive */
1637 VTIME_INACTIVE = 0,
1638 /* Task runs in userspace in a CPU with VTIME active */
6a61671b 1639 VTIME_USER,
7098c1ea 1640 /* Task runs in kernelspace in a CPU with VTIME active */
6a61671b
FW
1641 VTIME_SYS,
1642 } vtime_snap_whence;
d99ca3b9 1643#endif
d027d45d
FW
1644
1645#ifdef CONFIG_NO_HZ_FULL
f009a7a7 1646 atomic_t tick_dep_mask;
d027d45d 1647#endif
1da177e4 1648 unsigned long nvcsw, nivcsw; /* context switch counts */
ccbf62d8 1649 u64 start_time; /* monotonic time in nsec */
57e0be04 1650 u64 real_start_time; /* boot based time in nsec */
1da177e4
LT
1651/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1652 unsigned long min_flt, maj_flt;
1653
f06febc9 1654 struct task_cputime cputime_expires;
1da177e4
LT
1655 struct list_head cpu_timers[3];
1656
1657/* process credentials */
1b0ba1c9 1658 const struct cred __rcu *real_cred; /* objective and real subjective task
3b11a1de 1659 * credentials (COW) */
1b0ba1c9 1660 const struct cred __rcu *cred; /* effective (overridable) subjective task
3b11a1de 1661 * credentials (COW) */
36772092
PBG
1662 char comm[TASK_COMM_LEN]; /* executable name excluding path
1663 - access with [gs]et_task_comm (which lock
1664 it with task_lock())
221af7f8 1665 - initialized normally by setup_new_exec */
1da177e4 1666/* file system info */
756daf26 1667 struct nameidata *nameidata;
3d5b6fcc 1668#ifdef CONFIG_SYSVIPC
1da177e4
LT
1669/* ipc stuff */
1670 struct sysv_sem sysvsem;
ab602f79 1671 struct sysv_shm sysvshm;
3d5b6fcc 1672#endif
e162b39a 1673#ifdef CONFIG_DETECT_HUNG_TASK
82a1fcb9 1674/* hung task detection */
82a1fcb9
IM
1675 unsigned long last_switch_count;
1676#endif
1da177e4
LT
1677/* filesystem information */
1678 struct fs_struct *fs;
1679/* open file information */
1680 struct files_struct *files;
1651e14e 1681/* namespaces */
ab516013 1682 struct nsproxy *nsproxy;
1da177e4
LT
1683/* signal handlers */
1684 struct signal_struct *signal;
1685 struct sighand_struct *sighand;
1686
1687 sigset_t blocked, real_blocked;
f3de272b 1688 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1da177e4
LT
1689 struct sigpending pending;
1690
1691 unsigned long sas_ss_sp;
1692 size_t sas_ss_size;
2a742138 1693 unsigned sas_ss_flags;
2e01fabe 1694
67d12145 1695 struct callback_head *task_works;
e73f8959 1696
1da177e4 1697 struct audit_context *audit_context;
bfef93a5 1698#ifdef CONFIG_AUDITSYSCALL
e1760bd5 1699 kuid_t loginuid;
4746ec5b 1700 unsigned int sessionid;
bfef93a5 1701#endif
932ecebb 1702 struct seccomp seccomp;
1da177e4
LT
1703
1704/* Thread group tracking */
1705 u32 parent_exec_id;
1706 u32 self_exec_id;
58568d2a
MX
1707/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1708 * mempolicy */
1da177e4 1709 spinlock_t alloc_lock;
1da177e4 1710
b29739f9 1711 /* Protection of the PI data structures: */
1d615482 1712 raw_spinlock_t pi_lock;
b29739f9 1713
76751049
PZ
1714 struct wake_q_node wake_q;
1715
23f78d4a
IM
1716#ifdef CONFIG_RT_MUTEXES
1717 /* PI waiters blocked on a rt_mutex held by this task */
fb00aca4
PZ
1718 struct rb_root pi_waiters;
1719 struct rb_node *pi_waiters_leftmost;
23f78d4a
IM
1720 /* Deadlock detection and priority inheritance handling */
1721 struct rt_mutex_waiter *pi_blocked_on;
23f78d4a
IM
1722#endif
1723
408894ee
IM
1724#ifdef CONFIG_DEBUG_MUTEXES
1725 /* mutex deadlock detection */
1726 struct mutex_waiter *blocked_on;
1727#endif
de30a2b3
IM
1728#ifdef CONFIG_TRACE_IRQFLAGS
1729 unsigned int irq_events;
de30a2b3 1730 unsigned long hardirq_enable_ip;
de30a2b3 1731 unsigned long hardirq_disable_ip;
fa1452e8 1732 unsigned int hardirq_enable_event;
de30a2b3 1733 unsigned int hardirq_disable_event;
fa1452e8
HS
1734 int hardirqs_enabled;
1735 int hardirq_context;
de30a2b3 1736 unsigned long softirq_disable_ip;
de30a2b3 1737 unsigned long softirq_enable_ip;
fa1452e8 1738 unsigned int softirq_disable_event;
de30a2b3 1739 unsigned int softirq_enable_event;
fa1452e8 1740 int softirqs_enabled;
de30a2b3
IM
1741 int softirq_context;
1742#endif
fbb9ce95 1743#ifdef CONFIG_LOCKDEP
bdb9441e 1744# define MAX_LOCK_DEPTH 48UL
fbb9ce95
IM
1745 u64 curr_chain_key;
1746 int lockdep_depth;
fbb9ce95 1747 unsigned int lockdep_recursion;
c7aceaba 1748 struct held_lock held_locks[MAX_LOCK_DEPTH];
cf40bd16 1749 gfp_t lockdep_reclaim_gfp;
fbb9ce95 1750#endif
c6d30853
AR
1751#ifdef CONFIG_UBSAN
1752 unsigned int in_ubsan;
1753#endif
408894ee 1754
1da177e4
LT
1755/* journalling filesystem info */
1756 void *journal_info;
1757
d89d8796 1758/* stacked block device info */
bddd87c7 1759 struct bio_list *bio_list;
d89d8796 1760
73c10101
JA
1761#ifdef CONFIG_BLOCK
1762/* stack plugging */
1763 struct blk_plug *plug;
1764#endif
1765
1da177e4
LT
1766/* VM state */
1767 struct reclaim_state *reclaim_state;
1768
1da177e4
LT
1769 struct backing_dev_info *backing_dev_info;
1770
1771 struct io_context *io_context;
1772
1773 unsigned long ptrace_message;
1774 siginfo_t *last_siginfo; /* For ptrace use. */
7c3ab738 1775 struct task_io_accounting ioac;
8f0ab514 1776#if defined(CONFIG_TASK_XACCT)
1da177e4
LT
1777 u64 acct_rss_mem1; /* accumulated rss usage */
1778 u64 acct_vm_mem1; /* accumulated virtual memory usage */
49b5cf34 1779 cputime_t acct_timexpd; /* stime + utime since last update */
1da177e4
LT
1780#endif
1781#ifdef CONFIG_CPUSETS
58568d2a 1782 nodemask_t mems_allowed; /* Protected by alloc_lock */
cc9a6c87 1783 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
825a46af 1784 int cpuset_mem_spread_rotor;
6adef3eb 1785 int cpuset_slab_spread_rotor;
1da177e4 1786#endif
ddbcc7e8 1787#ifdef CONFIG_CGROUPS
817929ec 1788 /* Control Group info protected by css_set_lock */
2c392b8c 1789 struct css_set __rcu *cgroups;
817929ec
PM
1790 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1791 struct list_head cg_list;
ddbcc7e8 1792#endif
42b2dd0a 1793#ifdef CONFIG_FUTEX
0771dfef 1794 struct robust_list_head __user *robust_list;
34f192c6
IM
1795#ifdef CONFIG_COMPAT
1796 struct compat_robust_list_head __user *compat_robust_list;
1797#endif
c87e2837
IM
1798 struct list_head pi_state_list;
1799 struct futex_pi_state *pi_state_cache;
c7aceaba 1800#endif
cdd6c482 1801#ifdef CONFIG_PERF_EVENTS
8dc85d54 1802 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
cdd6c482
IM
1803 struct mutex perf_event_mutex;
1804 struct list_head perf_event_list;
a63eaf34 1805#endif
8f47b187
TG
1806#ifdef CONFIG_DEBUG_PREEMPT
1807 unsigned long preempt_disable_ip;
1808#endif
c7aceaba 1809#ifdef CONFIG_NUMA
58568d2a 1810 struct mempolicy *mempolicy; /* Protected by alloc_lock */
c7aceaba 1811 short il_next;
207205a2 1812 short pref_node_fork;
42b2dd0a 1813#endif
cbee9f88
PZ
1814#ifdef CONFIG_NUMA_BALANCING
1815 int numa_scan_seq;
cbee9f88 1816 unsigned int numa_scan_period;
598f0ec0 1817 unsigned int numa_scan_period_max;
de1c9ce6 1818 int numa_preferred_nid;
6b9a7460 1819 unsigned long numa_migrate_retry;
cbee9f88 1820 u64 node_stamp; /* migration stamp */
7e2703e6
RR
1821 u64 last_task_numa_placement;
1822 u64 last_sum_exec_runtime;
cbee9f88 1823 struct callback_head numa_work;
f809ca9a 1824
8c8a743c
PZ
1825 struct list_head numa_entry;
1826 struct numa_group *numa_group;
1827
745d6147 1828 /*
44dba3d5
IM
1829 * numa_faults is an array split into four regions:
1830 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1831 * in this precise order.
1832 *
1833 * faults_memory: Exponential decaying average of faults on a per-node
1834 * basis. Scheduling placement decisions are made based on these
1835 * counts. The values remain static for the duration of a PTE scan.
1836 * faults_cpu: Track the nodes the process was running on when a NUMA
1837 * hinting fault was incurred.
1838 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1839 * during the current scan window. When the scan completes, the counts
1840 * in faults_memory and faults_cpu decay and these values are copied.
745d6147 1841 */
44dba3d5 1842 unsigned long *numa_faults;
83e1d2cd 1843 unsigned long total_numa_faults;
745d6147 1844
04bb2f94
RR
1845 /*
1846 * numa_faults_locality tracks if faults recorded during the last
074c2381
MG
1847 * scan window were remote/local or failed to migrate. The task scan
1848 * period is adapted based on the locality of the faults with different
1849 * weights depending on whether they were shared or private faults
04bb2f94 1850 */
074c2381 1851 unsigned long numa_faults_locality[3];
04bb2f94 1852
b32e86b4 1853 unsigned long numa_pages_migrated;
cbee9f88
PZ
1854#endif /* CONFIG_NUMA_BALANCING */
1855
72b252ae
MG
1856#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1857 struct tlbflush_unmap_batch tlb_ubc;
1858#endif
1859
e56d0903 1860 struct rcu_head rcu;
b92ce558
JA
1861
1862 /*
1863 * cache last used pipe for splice
1864 */
1865 struct pipe_inode_info *splice_pipe;
5640f768
ED
1866
1867 struct page_frag task_frag;
1868
ca74e92b
SN
1869#ifdef CONFIG_TASK_DELAY_ACCT
1870 struct task_delay_info *delays;
f4f154fd
AM
1871#endif
1872#ifdef CONFIG_FAULT_INJECTION
1873 int make_it_fail;
ca74e92b 1874#endif
9d823e8f
WF
1875 /*
1876 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1877 * balance_dirty_pages() for some dirty throttling pause
1878 */
1879 int nr_dirtied;
1880 int nr_dirtied_pause;
83712358 1881 unsigned long dirty_paused_when; /* start of a write-and-pause period */
9d823e8f 1882
9745512c
AV
1883#ifdef CONFIG_LATENCYTOP
1884 int latency_record_count;
1885 struct latency_record latency_record[LT_SAVECOUNT];
1886#endif
6976675d
AV
1887 /*
1888 * time slack values; these are used to round up poll() and
1889 * select() etc timeout values. These are in nanoseconds.
1890 */
da8b44d5
JS
1891 u64 timer_slack_ns;
1892 u64 default_timer_slack_ns;
f8d570a4 1893
0b24becc
AR
1894#ifdef CONFIG_KASAN
1895 unsigned int kasan_depth;
1896#endif
fb52607a 1897#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3ad2f3fb 1898 /* Index of current stored address in ret_stack */
f201ae23
FW
1899 int curr_ret_stack;
1900 /* Stack of return addresses for return function tracing */
1901 struct ftrace_ret_stack *ret_stack;
8aef2d28
SR
1902 /* time stamp for last schedule */
1903 unsigned long long ftrace_timestamp;
f201ae23
FW
1904 /*
1905 * Number of functions that haven't been traced
1906 * because of depth overrun.
1907 */
1908 atomic_t trace_overrun;
380c4b14
FW
1909 /* Pause for the tracing */
1910 atomic_t tracing_graph_pause;
f201ae23 1911#endif
ea4e2bc4
SR
1912#ifdef CONFIG_TRACING
1913 /* state flags for use by tracers */
1914 unsigned long trace;
b1cff0ad 1915 /* bitmask and counter of trace recursion */
261842b7
SR
1916 unsigned long trace_recursion;
1917#endif /* CONFIG_TRACING */
5c9a8750
DV
1918#ifdef CONFIG_KCOV
1919 /* Coverage collection mode enabled for this task (0 if disabled). */
1920 enum kcov_mode kcov_mode;
1921 /* Size of the kcov_area. */
1922 unsigned kcov_size;
1923 /* Buffer for coverage collection. */
1924 void *kcov_area;
1925 /* kcov desciptor wired with this task or NULL. */
1926 struct kcov *kcov;
1927#endif
6f185c29 1928#ifdef CONFIG_MEMCG
626ebc41
TH
1929 struct mem_cgroup *memcg_in_oom;
1930 gfp_t memcg_oom_gfp_mask;
1931 int memcg_oom_order;
b23afb93
TH
1932
1933 /* number of pages to reclaim on returning to userland */
1934 unsigned int memcg_nr_pages_over_high;
569b846d 1935#endif
0326f5a9
SD
1936#ifdef CONFIG_UPROBES
1937 struct uprobe_task *utask;
0326f5a9 1938#endif
cafe5635
KO
1939#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1940 unsigned int sequential_io;
1941 unsigned int sequential_io_avg;
1942#endif
8eb23b9f
PZ
1943#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1944 unsigned long task_state_change;
1945#endif
8bcbde54 1946 int pagefault_disabled;
03049269 1947#ifdef CONFIG_MMU
29c696e1 1948 struct task_struct *oom_reaper_list;
03049269 1949#endif
ba14a194
AL
1950#ifdef CONFIG_VMAP_STACK
1951 struct vm_struct *stack_vm_area;
1952#endif
68f24b08
AL
1953#ifdef CONFIG_THREAD_INFO_IN_TASK
1954 /* A live task holds one reference. */
1955 atomic_t stack_refcount;
1956#endif
0c8c0f03
DH
1957/* CPU-specific state of this task */
1958 struct thread_struct thread;
1959/*
1960 * WARNING: on x86, 'thread_struct' contains a variable-sized
1961 * structure. It *MUST* be at the end of 'task_struct'.
1962 *
1963 * Do not put anything below here!
1964 */
1da177e4
LT
1965};
1966
5aaeb5c0
IM
1967#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1968extern int arch_task_struct_size __read_mostly;
1969#else
1970# define arch_task_struct_size (sizeof(struct task_struct))
1971#endif
0c8c0f03 1972
ba14a194
AL
1973#ifdef CONFIG_VMAP_STACK
1974static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1975{
1976 return t->stack_vm_area;
1977}
1978#else
1979static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1980{
1981 return NULL;
1982}
1983#endif
1984
76e6eee0 1985/* Future-safe accessor for struct task_struct's cpus_allowed. */
a4636818 1986#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76e6eee0 1987
50605ffb
TG
1988static inline int tsk_nr_cpus_allowed(struct task_struct *p)
1989{
1990 return p->nr_cpus_allowed;
1991}
1992
6688cc05
PZ
1993#define TNF_MIGRATED 0x01
1994#define TNF_NO_GROUP 0x02
dabe1d99 1995#define TNF_SHARED 0x04
04bb2f94 1996#define TNF_FAULT_LOCAL 0x08
074c2381 1997#define TNF_MIGRATE_FAIL 0x10
6688cc05 1998
b18dc5f2
MH
1999static inline bool in_vfork(struct task_struct *tsk)
2000{
2001 bool ret;
2002
2003 /*
2004 * need RCU to access ->real_parent if CLONE_VM was used along with
2005 * CLONE_PARENT.
2006 *
2007 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
2008 * imply CLONE_VM
2009 *
2010 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
2011 * ->real_parent is not necessarily the task doing vfork(), so in
2012 * theory we can't rely on task_lock() if we want to dereference it.
2013 *
2014 * And in this case we can't trust the real_parent->mm == tsk->mm
2015 * check, it can be false negative. But we do not care, if init or
2016 * another oom-unkillable task does this it should blame itself.
2017 */
2018 rcu_read_lock();
2019 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
2020 rcu_read_unlock();
2021
2022 return ret;
2023}
2024
cbee9f88 2025#ifdef CONFIG_NUMA_BALANCING
6688cc05 2026extern void task_numa_fault(int last_node, int node, int pages, int flags);
e29cf08b 2027extern pid_t task_numa_group_id(struct task_struct *p);
1a687c2e 2028extern void set_numabalancing_state(bool enabled);
82727018 2029extern void task_numa_free(struct task_struct *p);
10f39042
RR
2030extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
2031 int src_nid, int dst_cpu);
cbee9f88 2032#else
ac8e895b 2033static inline void task_numa_fault(int last_node, int node, int pages,
6688cc05 2034 int flags)
cbee9f88
PZ
2035{
2036}
e29cf08b
MG
2037static inline pid_t task_numa_group_id(struct task_struct *p)
2038{
2039 return 0;
2040}
1a687c2e
MG
2041static inline void set_numabalancing_state(bool enabled)
2042{
2043}
82727018
RR
2044static inline void task_numa_free(struct task_struct *p)
2045{
2046}
10f39042
RR
2047static inline bool should_numa_migrate_memory(struct task_struct *p,
2048 struct page *page, int src_nid, int dst_cpu)
2049{
2050 return true;
2051}
cbee9f88
PZ
2052#endif
2053
e868171a 2054static inline struct pid *task_pid(struct task_struct *task)
22c935f4
EB
2055{
2056 return task->pids[PIDTYPE_PID].pid;
2057}
2058
e868171a 2059static inline struct pid *task_tgid(struct task_struct *task)
22c935f4
EB
2060{
2061 return task->group_leader->pids[PIDTYPE_PID].pid;
2062}
2063
6dda81f4
ON
2064/*
2065 * Without tasklist or rcu lock it is not safe to dereference
2066 * the result of task_pgrp/task_session even if task == current,
2067 * we can race with another thread doing sys_setsid/sys_setpgid.
2068 */
e868171a 2069static inline struct pid *task_pgrp(struct task_struct *task)
22c935f4
EB
2070{
2071 return task->group_leader->pids[PIDTYPE_PGID].pid;
2072}
2073
e868171a 2074static inline struct pid *task_session(struct task_struct *task)
22c935f4
EB
2075{
2076 return task->group_leader->pids[PIDTYPE_SID].pid;
2077}
2078
7af57294
PE
2079struct pid_namespace;
2080
2081/*
2082 * the helpers to get the task's different pids as they are seen
2083 * from various namespaces
2084 *
2085 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
44c4e1b2
EB
2086 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
2087 * current.
7af57294
PE
2088 * task_xid_nr_ns() : id seen from the ns specified;
2089 *
2090 * set_task_vxid() : assigns a virtual id to a task;
2091 *
7af57294
PE
2092 * see also pid_nr() etc in include/linux/pid.h
2093 */
52ee2dfd
ON
2094pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
2095 struct pid_namespace *ns);
7af57294 2096
e868171a 2097static inline pid_t task_pid_nr(struct task_struct *tsk)
7af57294
PE
2098{
2099 return tsk->pid;
2100}
2101
52ee2dfd
ON
2102static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
2103 struct pid_namespace *ns)
2104{
2105 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
2106}
7af57294
PE
2107
2108static inline pid_t task_pid_vnr(struct task_struct *tsk)
2109{
52ee2dfd 2110 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
7af57294
PE
2111}
2112
2113
e868171a 2114static inline pid_t task_tgid_nr(struct task_struct *tsk)
7af57294
PE
2115{
2116 return tsk->tgid;
2117}
2118
2f2a3a46 2119pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
7af57294
PE
2120
2121static inline pid_t task_tgid_vnr(struct task_struct *tsk)
2122{
2123 return pid_vnr(task_tgid(tsk));
2124}
2125
2126
80e0b6e8 2127static inline int pid_alive(const struct task_struct *p);
ad36d282
RGB
2128static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
2129{
2130 pid_t pid = 0;
2131
2132 rcu_read_lock();
2133 if (pid_alive(tsk))
2134 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
2135 rcu_read_unlock();
2136
2137 return pid;
2138}
2139
2140static inline pid_t task_ppid_nr(const struct task_struct *tsk)
2141{
2142 return task_ppid_nr_ns(tsk, &init_pid_ns);
2143}
2144
52ee2dfd
ON
2145static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2146 struct pid_namespace *ns)
7af57294 2147{
52ee2dfd 2148 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
7af57294
PE
2149}
2150
7af57294
PE
2151static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2152{
52ee2dfd 2153 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
7af57294
PE
2154}
2155
2156
52ee2dfd
ON
2157static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2158 struct pid_namespace *ns)
7af57294 2159{
52ee2dfd 2160 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
7af57294
PE
2161}
2162
7af57294
PE
2163static inline pid_t task_session_vnr(struct task_struct *tsk)
2164{
52ee2dfd 2165 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
7af57294
PE
2166}
2167
1b0f7ffd
ON
2168/* obsolete, do not use */
2169static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2170{
2171 return task_pgrp_nr_ns(tsk, &init_pid_ns);
2172}
7af57294 2173
1da177e4
LT
2174/**
2175 * pid_alive - check that a task structure is not stale
2176 * @p: Task structure to be checked.
2177 *
2178 * Test if a process is not yet dead (at most zombie state)
2179 * If pid_alive fails, then pointers within the task structure
2180 * can be stale and must not be dereferenced.
e69f6186
YB
2181 *
2182 * Return: 1 if the process is alive. 0 otherwise.
1da177e4 2183 */
ad36d282 2184static inline int pid_alive(const struct task_struct *p)
1da177e4 2185{
92476d7f 2186 return p->pids[PIDTYPE_PID].pid != NULL;
1da177e4
LT
2187}
2188
f400e198 2189/**
570f5241
SS
2190 * is_global_init - check if a task structure is init. Since init
2191 * is free to have sub-threads we need to check tgid.
3260259f
HK
2192 * @tsk: Task structure to be checked.
2193 *
2194 * Check if a task structure is the first user space task the kernel created.
e69f6186
YB
2195 *
2196 * Return: 1 if the task structure is init. 0 otherwise.
b460cbc5 2197 */
e868171a 2198static inline int is_global_init(struct task_struct *tsk)
b461cc03 2199{
570f5241 2200 return task_tgid_nr(tsk) == 1;
b461cc03 2201}
b460cbc5 2202
9ec52099
CLG
2203extern struct pid *cad_pid;
2204
1da177e4 2205extern void free_task(struct task_struct *tsk);
1da177e4 2206#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
e56d0903 2207
158d9ebd 2208extern void __put_task_struct(struct task_struct *t);
e56d0903
IM
2209
2210static inline void put_task_struct(struct task_struct *t)
2211{
2212 if (atomic_dec_and_test(&t->usage))
8c7904a0 2213 __put_task_struct(t);
e56d0903 2214}
1da177e4 2215
150593bf
ON
2216struct task_struct *task_rcu_dereference(struct task_struct **ptask);
2217struct task_struct *try_get_task_struct(struct task_struct **ptask);
2218
6a61671b
FW
2219#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2220extern void task_cputime(struct task_struct *t,
2221 cputime_t *utime, cputime_t *stime);
2222extern void task_cputime_scaled(struct task_struct *t,
2223 cputime_t *utimescaled, cputime_t *stimescaled);
2224extern cputime_t task_gtime(struct task_struct *t);
2225#else
6fac4829
FW
2226static inline void task_cputime(struct task_struct *t,
2227 cputime_t *utime, cputime_t *stime)
2228{
2229 if (utime)
2230 *utime = t->utime;
2231 if (stime)
2232 *stime = t->stime;
2233}
2234
2235static inline void task_cputime_scaled(struct task_struct *t,
2236 cputime_t *utimescaled,
2237 cputime_t *stimescaled)
2238{
2239 if (utimescaled)
2240 *utimescaled = t->utimescaled;
2241 if (stimescaled)
2242 *stimescaled = t->stimescaled;
2243}
6a61671b
FW
2244
2245static inline cputime_t task_gtime(struct task_struct *t)
2246{
2247 return t->gtime;
2248}
2249#endif
e80d0a1a
FW
2250extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2251extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
49048622 2252
1da177e4
LT
2253/*
2254 * Per process flags
2255 */
1da177e4 2256#define PF_EXITING 0x00000004 /* getting shut down */
778e9a9c 2257#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
94886b84 2258#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
21aa9af0 2259#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1da177e4 2260#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
4db96cf0 2261#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1da177e4
LT
2262#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
2263#define PF_DUMPCORE 0x00000200 /* dumped core */
2264#define PF_SIGNALED 0x00000400 /* killed by a signal */
2265#define PF_MEMALLOC 0x00000800 /* Allocating memory */
72fa5997 2266#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1da177e4 2267#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
774a1221 2268#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1da177e4
LT
2269#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
2270#define PF_FROZEN 0x00010000 /* frozen for system suspend */
2271#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
2272#define PF_KSWAPD 0x00040000 /* I am kswapd */
21caf2fc 2273#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1da177e4 2274#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
246bb0b1 2275#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
b31dc66a
JA
2276#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
2277#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
14a40ffc 2278#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
4db96cf0 2279#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
61a87122 2280#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
58a69cb4 2281#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
2b44c4db 2282#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1da177e4
LT
2283
2284/*
2285 * Only the _current_ task can read/write to tsk->flags, but other
2286 * tasks can access tsk->flags in readonly mode for example
2287 * with tsk_used_math (like during threaded core dumping).
2288 * There is however an exception to this rule during ptrace
2289 * or during fork: the ptracer task is allowed to write to the
2290 * child->flags of its traced child (same goes for fork, the parent
2291 * can write to the child->flags), because we're guaranteed the
2292 * child is not running and in turn not changing child->flags
2293 * at the same time the parent does it.
2294 */
2295#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2296#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2297#define clear_used_math() clear_stopped_child_used_math(current)
2298#define set_used_math() set_stopped_child_used_math(current)
2299#define conditional_stopped_child_used_math(condition, child) \
2300 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2301#define conditional_used_math(condition) \
2302 conditional_stopped_child_used_math(condition, current)
2303#define copy_to_stopped_child_used_math(child) \
2304 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2305/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
2306#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2307#define used_math() tsk_used_math(current)
2308
934f3072
JB
2309/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
2310 * __GFP_FS is also cleared as it implies __GFP_IO.
2311 */
21caf2fc
ML
2312static inline gfp_t memalloc_noio_flags(gfp_t flags)
2313{
2314 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
934f3072 2315 flags &= ~(__GFP_IO | __GFP_FS);
21caf2fc
ML
2316 return flags;
2317}
2318
2319static inline unsigned int memalloc_noio_save(void)
2320{
2321 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2322 current->flags |= PF_MEMALLOC_NOIO;
2323 return flags;
2324}
2325
2326static inline void memalloc_noio_restore(unsigned int flags)
2327{
2328 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2329}
2330
1d4457f9 2331/* Per-process atomic flags. */
a2b86f77 2332#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
2ad654bc
ZL
2333#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
2334#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
77ed2c57 2335#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
2ad654bc 2336
1d4457f9 2337
e0e5070b
ZL
2338#define TASK_PFA_TEST(name, func) \
2339 static inline bool task_##func(struct task_struct *p) \
2340 { return test_bit(PFA_##name, &p->atomic_flags); }
2341#define TASK_PFA_SET(name, func) \
2342 static inline void task_set_##func(struct task_struct *p) \
2343 { set_bit(PFA_##name, &p->atomic_flags); }
2344#define TASK_PFA_CLEAR(name, func) \
2345 static inline void task_clear_##func(struct task_struct *p) \
2346 { clear_bit(PFA_##name, &p->atomic_flags); }
2347
2348TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2349TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1d4457f9 2350
2ad654bc
ZL
2351TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2352TASK_PFA_SET(SPREAD_PAGE, spread_page)
2353TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2354
2355TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2356TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2357TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1d4457f9 2358
77ed2c57
TH
2359TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
2360TASK_PFA_SET(LMK_WAITING, lmk_waiting)
2361
e5c1902e 2362/*
a8f072c1 2363 * task->jobctl flags
e5c1902e 2364 */
a8f072c1 2365#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
e5c1902e 2366
a8f072c1
TH
2367#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
2368#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
2369#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
73ddff2b 2370#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
fb1d910c 2371#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
a8f072c1 2372#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
544b2c91 2373#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
a8f072c1 2374
b76808e6
PD
2375#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2376#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2377#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2378#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2379#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2380#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2381#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
a8f072c1 2382
fb1d910c 2383#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
73ddff2b 2384#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
3759a0d9 2385
7dd3db54 2386extern bool task_set_jobctl_pending(struct task_struct *task,
b76808e6 2387 unsigned long mask);
73ddff2b 2388extern void task_clear_jobctl_trapping(struct task_struct *task);
3759a0d9 2389extern void task_clear_jobctl_pending(struct task_struct *task,
b76808e6 2390 unsigned long mask);
39efa3ef 2391
f41d911f
PM
2392static inline void rcu_copy_process(struct task_struct *p)
2393{
8315f422 2394#ifdef CONFIG_PREEMPT_RCU
f41d911f 2395 p->rcu_read_lock_nesting = 0;
1d082fd0 2396 p->rcu_read_unlock_special.s = 0;
dd5d19ba 2397 p->rcu_blocked_node = NULL;
f41d911f 2398 INIT_LIST_HEAD(&p->rcu_node_entry);
8315f422
PM
2399#endif /* #ifdef CONFIG_PREEMPT_RCU */
2400#ifdef CONFIG_TASKS_RCU
2401 p->rcu_tasks_holdout = false;
2402 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
176f8f7a 2403 p->rcu_tasks_idle_cpu = -1;
8315f422 2404#endif /* #ifdef CONFIG_TASKS_RCU */
f41d911f
PM
2405}
2406
907aed48
MG
2407static inline void tsk_restore_flags(struct task_struct *task,
2408 unsigned long orig_flags, unsigned long flags)
2409{
2410 task->flags &= ~flags;
2411 task->flags |= orig_flags & flags;
2412}
2413
f82f8042
JL
2414extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2415 const struct cpumask *trial);
7f51412a
JL
2416extern int task_can_attach(struct task_struct *p,
2417 const struct cpumask *cs_cpus_allowed);
1da177e4 2418#ifdef CONFIG_SMP
1e1b6c51
KM
2419extern void do_set_cpus_allowed(struct task_struct *p,
2420 const struct cpumask *new_mask);
2421
cd8ba7cd 2422extern int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 2423 const struct cpumask *new_mask);
1da177e4 2424#else
1e1b6c51
KM
2425static inline void do_set_cpus_allowed(struct task_struct *p,
2426 const struct cpumask *new_mask)
2427{
2428}
cd8ba7cd 2429static inline int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 2430 const struct cpumask *new_mask)
1da177e4 2431{
96f874e2 2432 if (!cpumask_test_cpu(0, new_mask))
1da177e4
LT
2433 return -EINVAL;
2434 return 0;
2435}
2436#endif
e0ad9556 2437
3451d024 2438#ifdef CONFIG_NO_HZ_COMMON
5167e8d5
PZ
2439void calc_load_enter_idle(void);
2440void calc_load_exit_idle(void);
2441#else
2442static inline void calc_load_enter_idle(void) { }
2443static inline void calc_load_exit_idle(void) { }
3451d024 2444#endif /* CONFIG_NO_HZ_COMMON */
5167e8d5 2445
b342501c 2446/*
c676329a
PZ
2447 * Do not use outside of architecture code which knows its limitations.
2448 *
2449 * sched_clock() has no promise of monotonicity or bounded drift between
2450 * CPUs, use (which you should not) requires disabling IRQs.
2451 *
2452 * Please use one of the three interfaces below.
b342501c 2453 */
1bbfa6f2 2454extern unsigned long long notrace sched_clock(void);
c676329a 2455/*
489a71b0 2456 * See the comment in kernel/sched/clock.c
c676329a 2457 */
545a2bf7 2458extern u64 running_clock(void);
c676329a
PZ
2459extern u64 sched_clock_cpu(int cpu);
2460
e436d800 2461
c1955a3d 2462extern void sched_clock_init(void);
3e51f33f 2463
c1955a3d 2464#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
3e51f33f
PZ
2465static inline void sched_clock_tick(void)
2466{
2467}
2468
2469static inline void sched_clock_idle_sleep_event(void)
2470{
2471}
2472
2473static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2474{
2475}
2c923e94
DL
2476
2477static inline u64 cpu_clock(int cpu)
2478{
2479 return sched_clock();
2480}
2481
2482static inline u64 local_clock(void)
2483{
2484 return sched_clock();
2485}
3e51f33f 2486#else
c676329a
PZ
2487/*
2488 * Architectures can set this to 1 if they have specified
2489 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2490 * but then during bootup it turns out that sched_clock()
2491 * is reliable after all:
2492 */
35af99e6
PZ
2493extern int sched_clock_stable(void);
2494extern void set_sched_clock_stable(void);
2495extern void clear_sched_clock_stable(void);
c676329a 2496
3e51f33f
PZ
2497extern void sched_clock_tick(void);
2498extern void sched_clock_idle_sleep_event(void);
2499extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2c923e94
DL
2500
2501/*
2502 * As outlined in clock.c, provides a fast, high resolution, nanosecond
2503 * time source that is monotonic per cpu argument and has bounded drift
2504 * between cpus.
2505 *
2506 * ######################### BIG FAT WARNING ##########################
2507 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
2508 * # go backwards !! #
2509 * ####################################################################
2510 */
2511static inline u64 cpu_clock(int cpu)
2512{
2513 return sched_clock_cpu(cpu);
2514}
2515
2516static inline u64 local_clock(void)
2517{
2518 return sched_clock_cpu(raw_smp_processor_id());
2519}
3e51f33f
PZ
2520#endif
2521
b52bfee4
VP
2522#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2523/*
2524 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2525 * The reason for this explicit opt-in is not to have perf penalty with
2526 * slow sched_clocks.
2527 */
2528extern void enable_sched_clock_irqtime(void);
2529extern void disable_sched_clock_irqtime(void);
2530#else
2531static inline void enable_sched_clock_irqtime(void) {}
2532static inline void disable_sched_clock_irqtime(void) {}
2533#endif
2534
36c8b586 2535extern unsigned long long
41b86e9c 2536task_sched_runtime(struct task_struct *task);
1da177e4
LT
2537
2538/* sched_exec is called by processes performing an exec */
2539#ifdef CONFIG_SMP
2540extern void sched_exec(void);
2541#else
2542#define sched_exec() {}
2543#endif
2544
2aa44d05
IM
2545extern void sched_clock_idle_sleep_event(void);
2546extern void sched_clock_idle_wakeup_event(u64 delta_ns);
bb29ab26 2547
1da177e4
LT
2548#ifdef CONFIG_HOTPLUG_CPU
2549extern void idle_task_exit(void);
2550#else
2551static inline void idle_task_exit(void) {}
2552#endif
2553
3451d024 2554#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1c20091e 2555extern void wake_up_nohz_cpu(int cpu);
06d8308c 2556#else
1c20091e 2557static inline void wake_up_nohz_cpu(int cpu) { }
06d8308c
TG
2558#endif
2559
ce831b38 2560#ifdef CONFIG_NO_HZ_FULL
265f22a9 2561extern u64 scheduler_tick_max_deferment(void);
06d8308c
TG
2562#endif
2563
5091faa4 2564#ifdef CONFIG_SCHED_AUTOGROUP
5091faa4
MG
2565extern void sched_autogroup_create_attach(struct task_struct *p);
2566extern void sched_autogroup_detach(struct task_struct *p);
2567extern void sched_autogroup_fork(struct signal_struct *sig);
2568extern void sched_autogroup_exit(struct signal_struct *sig);
2569#ifdef CONFIG_PROC_FS
2570extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2e5b5b3a 2571extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
5091faa4
MG
2572#endif
2573#else
2574static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2575static inline void sched_autogroup_detach(struct task_struct *p) { }
2576static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2577static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2578#endif
2579
fa93384f 2580extern int yield_to(struct task_struct *p, bool preempt);
36c8b586
IM
2581extern void set_user_nice(struct task_struct *p, long nice);
2582extern int task_prio(const struct task_struct *p);
d0ea0268
DY
2583/**
2584 * task_nice - return the nice value of a given task.
2585 * @p: the task in question.
2586 *
2587 * Return: The nice value [ -20 ... 0 ... 19 ].
2588 */
2589static inline int task_nice(const struct task_struct *p)
2590{
2591 return PRIO_TO_NICE((p)->static_prio);
2592}
36c8b586
IM
2593extern int can_nice(const struct task_struct *p, const int nice);
2594extern int task_curr(const struct task_struct *p);
1da177e4 2595extern int idle_cpu(int cpu);
fe7de49f
KM
2596extern int sched_setscheduler(struct task_struct *, int,
2597 const struct sched_param *);
961ccddd 2598extern int sched_setscheduler_nocheck(struct task_struct *, int,
fe7de49f 2599 const struct sched_param *);
d50dde5a
DF
2600extern int sched_setattr(struct task_struct *,
2601 const struct sched_attr *);
36c8b586 2602extern struct task_struct *idle_task(int cpu);
c4f30608
PM
2603/**
2604 * is_idle_task - is the specified task an idle task?
fa757281 2605 * @p: the task in question.
e69f6186
YB
2606 *
2607 * Return: 1 if @p is an idle task. 0 otherwise.
c4f30608 2608 */
7061ca3b 2609static inline bool is_idle_task(const struct task_struct *p)
c4f30608
PM
2610{
2611 return p->pid == 0;
2612}
36c8b586 2613extern struct task_struct *curr_task(int cpu);
a458ae2e 2614extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1da177e4
LT
2615
2616void yield(void);
2617
1da177e4 2618union thread_union {
c65eacbe 2619#ifndef CONFIG_THREAD_INFO_IN_TASK
1da177e4 2620 struct thread_info thread_info;
c65eacbe 2621#endif
1da177e4
LT
2622 unsigned long stack[THREAD_SIZE/sizeof(long)];
2623};
2624
2625#ifndef __HAVE_ARCH_KSTACK_END
2626static inline int kstack_end(void *addr)
2627{
2628 /* Reliable end of stack detection:
2629 * Some APM bios versions misalign the stack
2630 */
2631 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2632}
2633#endif
2634
2635extern union thread_union init_thread_union;
2636extern struct task_struct init_task;
2637
2638extern struct mm_struct init_mm;
2639
198fe21b
PE
2640extern struct pid_namespace init_pid_ns;
2641
2642/*
2643 * find a task by one of its numerical ids
2644 *
198fe21b
PE
2645 * find_task_by_pid_ns():
2646 * finds a task by its pid in the specified namespace
228ebcbe
PE
2647 * find_task_by_vpid():
2648 * finds a task by its virtual pid
198fe21b 2649 *
e49859e7 2650 * see also find_vpid() etc in include/linux/pid.h
198fe21b
PE
2651 */
2652
228ebcbe
PE
2653extern struct task_struct *find_task_by_vpid(pid_t nr);
2654extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2655 struct pid_namespace *ns);
198fe21b 2656
1da177e4 2657/* per-UID process charging. */
7b44ab97 2658extern struct user_struct * alloc_uid(kuid_t);
1da177e4
LT
2659static inline struct user_struct *get_uid(struct user_struct *u)
2660{
2661 atomic_inc(&u->__count);
2662 return u;
2663}
2664extern void free_uid(struct user_struct *);
1da177e4
LT
2665
2666#include <asm/current.h>
2667
f0af911a 2668extern void xtime_update(unsigned long ticks);
1da177e4 2669
b3c97528
HH
2670extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2671extern int wake_up_process(struct task_struct *tsk);
3e51e3ed 2672extern void wake_up_new_task(struct task_struct *tsk);
1da177e4
LT
2673#ifdef CONFIG_SMP
2674 extern void kick_process(struct task_struct *tsk);
2675#else
2676 static inline void kick_process(struct task_struct *tsk) { }
2677#endif
aab03e05 2678extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
ad46c2c4 2679extern void sched_dead(struct task_struct *p);
1da177e4 2680
1da177e4
LT
2681extern void proc_caches_init(void);
2682extern void flush_signals(struct task_struct *);
10ab825b 2683extern void ignore_signals(struct task_struct *);
1da177e4
LT
2684extern void flush_signal_handlers(struct task_struct *, int force_default);
2685extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2686
be0e6f29 2687static inline int kernel_dequeue_signal(siginfo_t *info)
1da177e4 2688{
be0e6f29
ON
2689 struct task_struct *tsk = current;
2690 siginfo_t __info;
1da177e4
LT
2691 int ret;
2692
be0e6f29
ON
2693 spin_lock_irq(&tsk->sighand->siglock);
2694 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2695 spin_unlock_irq(&tsk->sighand->siglock);
1da177e4
LT
2696
2697 return ret;
53c8f9f1 2698}
1da177e4 2699
9a13049e
ON
2700static inline void kernel_signal_stop(void)
2701{
2702 spin_lock_irq(&current->sighand->siglock);
2703 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2704 __set_current_state(TASK_STOPPED);
2705 spin_unlock_irq(&current->sighand->siglock);
2706
2707 schedule();
2708}
2709
1da177e4
LT
2710extern void release_task(struct task_struct * p);
2711extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1da177e4
LT
2712extern int force_sigsegv(int, struct task_struct *);
2713extern int force_sig_info(int, struct siginfo *, struct task_struct *);
c4b92fc1 2714extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
c4b92fc1 2715extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
d178bc3a
SH
2716extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2717 const struct cred *, u32);
c4b92fc1
EB
2718extern int kill_pgrp(struct pid *pid, int sig, int priv);
2719extern int kill_pid(struct pid *pid, int sig, int priv);
c3de4b38 2720extern int kill_proc_info(int, struct siginfo *, pid_t);
86773473 2721extern __must_check bool do_notify_parent(struct task_struct *, int);
a7f0765e 2722extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
1da177e4 2723extern void force_sig(int, struct task_struct *);
1da177e4 2724extern int send_sig(int, struct task_struct *, int);
09faef11 2725extern int zap_other_threads(struct task_struct *p);
1da177e4
LT
2726extern struct sigqueue *sigqueue_alloc(void);
2727extern void sigqueue_free(struct sigqueue *);
ac5c2153 2728extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
9ac95f2f 2729extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1da177e4 2730
7e781418
AL
2731#ifdef TIF_RESTORE_SIGMASK
2732/*
2733 * Legacy restore_sigmask accessors. These are inefficient on
2734 * SMP architectures because they require atomic operations.
2735 */
2736
2737/**
2738 * set_restore_sigmask() - make sure saved_sigmask processing gets done
2739 *
2740 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
2741 * will run before returning to user mode, to process the flag. For
2742 * all callers, TIF_SIGPENDING is already set or it's no harm to set
2743 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
2744 * arch code will notice on return to user mode, in case those bits
2745 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
2746 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
2747 */
2748static inline void set_restore_sigmask(void)
2749{
2750 set_thread_flag(TIF_RESTORE_SIGMASK);
2751 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2752}
2753static inline void clear_restore_sigmask(void)
2754{
2755 clear_thread_flag(TIF_RESTORE_SIGMASK);
2756}
2757static inline bool test_restore_sigmask(void)
2758{
2759 return test_thread_flag(TIF_RESTORE_SIGMASK);
2760}
2761static inline bool test_and_clear_restore_sigmask(void)
2762{
2763 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
2764}
2765
2766#else /* TIF_RESTORE_SIGMASK */
2767
2768/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
2769static inline void set_restore_sigmask(void)
2770{
2771 current->restore_sigmask = true;
2772 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2773}
2774static inline void clear_restore_sigmask(void)
2775{
2776 current->restore_sigmask = false;
2777}
2778static inline bool test_restore_sigmask(void)
2779{
2780 return current->restore_sigmask;
2781}
2782static inline bool test_and_clear_restore_sigmask(void)
2783{
2784 if (!current->restore_sigmask)
2785 return false;
2786 current->restore_sigmask = false;
2787 return true;
2788}
2789#endif
2790
51a7b448
AV
2791static inline void restore_saved_sigmask(void)
2792{
2793 if (test_and_clear_restore_sigmask())
77097ae5 2794 __set_current_blocked(&current->saved_sigmask);
51a7b448
AV
2795}
2796
b7f9a11a
AV
2797static inline sigset_t *sigmask_to_save(void)
2798{
2799 sigset_t *res = &current->blocked;
2800 if (unlikely(test_restore_sigmask()))
2801 res = &current->saved_sigmask;
2802 return res;
2803}
2804
9ec52099
CLG
2805static inline int kill_cad_pid(int sig, int priv)
2806{
2807 return kill_pid(cad_pid, sig, priv);
2808}
2809
1da177e4
LT
2810/* These can be the second arg to send_sig_info/send_group_sig_info. */
2811#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2812#define SEND_SIG_PRIV ((struct siginfo *) 1)
2813#define SEND_SIG_FORCED ((struct siginfo *) 2)
2814
2a855dd0
SAS
2815/*
2816 * True if we are on the alternate signal stack.
2817 */
1da177e4
LT
2818static inline int on_sig_stack(unsigned long sp)
2819{
c876eeab
AL
2820 /*
2821 * If the signal stack is SS_AUTODISARM then, by construction, we
2822 * can't be on the signal stack unless user code deliberately set
2823 * SS_AUTODISARM when we were already on it.
2824 *
2825 * This improves reliability: if user state gets corrupted such that
2826 * the stack pointer points very close to the end of the signal stack,
2827 * then this check will enable the signal to be handled anyway.
2828 */
2829 if (current->sas_ss_flags & SS_AUTODISARM)
2830 return 0;
2831
2a855dd0
SAS
2832#ifdef CONFIG_STACK_GROWSUP
2833 return sp >= current->sas_ss_sp &&
2834 sp - current->sas_ss_sp < current->sas_ss_size;
2835#else
2836 return sp > current->sas_ss_sp &&
2837 sp - current->sas_ss_sp <= current->sas_ss_size;
2838#endif
1da177e4
LT
2839}
2840
2841static inline int sas_ss_flags(unsigned long sp)
2842{
72f15c03
RW
2843 if (!current->sas_ss_size)
2844 return SS_DISABLE;
2845
2846 return on_sig_stack(sp) ? SS_ONSTACK : 0;
1da177e4
LT
2847}
2848
2a742138
SS
2849static inline void sas_ss_reset(struct task_struct *p)
2850{
2851 p->sas_ss_sp = 0;
2852 p->sas_ss_size = 0;
2853 p->sas_ss_flags = SS_DISABLE;
2854}
2855
5a1b98d3
AV
2856static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2857{
2858 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2859#ifdef CONFIG_STACK_GROWSUP
2860 return current->sas_ss_sp;
2861#else
2862 return current->sas_ss_sp + current->sas_ss_size;
2863#endif
2864 return sp;
2865}
2866
1da177e4
LT
2867/*
2868 * Routines for handling mm_structs
2869 */
2870extern struct mm_struct * mm_alloc(void);
2871
2872/* mmdrop drops the mm and the page tables */
b3c97528 2873extern void __mmdrop(struct mm_struct *);
d2005e3f 2874static inline void mmdrop(struct mm_struct *mm)
1da177e4 2875{
6fb43d7b 2876 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
1da177e4
LT
2877 __mmdrop(mm);
2878}
2879
7283094e
MH
2880static inline void mmdrop_async_fn(struct work_struct *work)
2881{
2882 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
2883 __mmdrop(mm);
2884}
2885
2886static inline void mmdrop_async(struct mm_struct *mm)
2887{
2888 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
2889 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
2890 schedule_work(&mm->async_put_work);
2891 }
2892}
2893
d2005e3f
ON
2894static inline bool mmget_not_zero(struct mm_struct *mm)
2895{
2896 return atomic_inc_not_zero(&mm->mm_users);
2897}
2898
1da177e4
LT
2899/* mmput gets rid of the mappings and all user-space */
2900extern void mmput(struct mm_struct *);
7ef949d7
MH
2901#ifdef CONFIG_MMU
2902/* same as above but performs the slow path from the async context. Can
ec8d7c14
MH
2903 * be called from the atomic context as well
2904 */
2905extern void mmput_async(struct mm_struct *);
7ef949d7 2906#endif
ec8d7c14 2907
1da177e4
LT
2908/* Grab a reference to a task's mm, if it is not already going away */
2909extern struct mm_struct *get_task_mm(struct task_struct *task);
8cdb878d
CY
2910/*
2911 * Grab a reference to a task's mm, if it is not already going away
2912 * and ptrace_may_access with the mode parameter passed to it
2913 * succeeds.
2914 */
2915extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
1da177e4
LT
2916/* Remove the current tasks stale references to the old mm_struct */
2917extern void mm_release(struct task_struct *, struct mm_struct *);
2918
3033f14a
JT
2919#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2920extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2921 struct task_struct *, unsigned long);
2922#else
6f2c55b8 2923extern int copy_thread(unsigned long, unsigned long, unsigned long,
afa86fc4 2924 struct task_struct *);
3033f14a
JT
2925
2926/* Architectures that haven't opted into copy_thread_tls get the tls argument
2927 * via pt_regs, so ignore the tls argument passed via C. */
2928static inline int copy_thread_tls(
2929 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2930 struct task_struct *p, unsigned long tls)
2931{
2932 return copy_thread(clone_flags, sp, arg, p);
2933}
2934#endif
1da177e4 2935extern void flush_thread(void);
5f56a5df
JS
2936
2937#ifdef CONFIG_HAVE_EXIT_THREAD
e6464694 2938extern void exit_thread(struct task_struct *tsk);
5f56a5df 2939#else
e6464694 2940static inline void exit_thread(struct task_struct *tsk)
5f56a5df
JS
2941{
2942}
2943#endif
1da177e4 2944
1da177e4 2945extern void exit_files(struct task_struct *);
a7e5328a 2946extern void __cleanup_sighand(struct sighand_struct *);
cbaffba1 2947
1da177e4 2948extern void exit_itimers(struct signal_struct *);
cbaffba1 2949extern void flush_itimer_signals(void);
1da177e4 2950
9402c95f 2951extern void do_group_exit(int);
1da177e4 2952
c4ad8f98 2953extern int do_execve(struct filename *,
d7627467 2954 const char __user * const __user *,
da3d4c5f 2955 const char __user * const __user *);
51f39a1f
DD
2956extern int do_execveat(int, struct filename *,
2957 const char __user * const __user *,
2958 const char __user * const __user *,
2959 int);
3033f14a 2960extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
e80d6661 2961extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
36c8b586 2962struct task_struct *fork_idle(int);
2aa3a7f8 2963extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
1da177e4 2964
82b89778
AH
2965extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2966static inline void set_task_comm(struct task_struct *tsk, const char *from)
2967{
2968 __set_task_comm(tsk, from, false);
2969}
59714d65 2970extern char *get_task_comm(char *to, struct task_struct *tsk);
1da177e4
LT
2971
2972#ifdef CONFIG_SMP
317f3941 2973void scheduler_ipi(void);
85ba2d86 2974extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1da177e4 2975#else
184748cc 2976static inline void scheduler_ipi(void) { }
85ba2d86
RM
2977static inline unsigned long wait_task_inactive(struct task_struct *p,
2978 long match_state)
2979{
2980 return 1;
2981}
1da177e4
LT
2982#endif
2983
fafe870f
FW
2984#define tasklist_empty() \
2985 list_empty(&init_task.tasks)
2986
05725f7e
JP
2987#define next_task(p) \
2988 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
1da177e4
LT
2989
2990#define for_each_process(p) \
2991 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2992
5bb459bb 2993extern bool current_is_single_threaded(void);
d84f4f99 2994
1da177e4
LT
2995/*
2996 * Careful: do_each_thread/while_each_thread is a double loop so
2997 * 'break' will not work as expected - use goto instead.
2998 */
2999#define do_each_thread(g, t) \
3000 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
3001
3002#define while_each_thread(g, t) \
3003 while ((t = next_thread(t)) != g)
3004
0c740d0a
ON
3005#define __for_each_thread(signal, t) \
3006 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
3007
3008#define for_each_thread(p, t) \
3009 __for_each_thread((p)->signal, t)
3010
3011/* Careful: this is a double loop, 'break' won't work as expected. */
3012#define for_each_process_thread(p, t) \
3013 for_each_process(p) for_each_thread(p, t)
3014
7e49827c
ON
3015static inline int get_nr_threads(struct task_struct *tsk)
3016{
b3ac022c 3017 return tsk->signal->nr_threads;
7e49827c
ON
3018}
3019
087806b1
ON
3020static inline bool thread_group_leader(struct task_struct *p)
3021{
3022 return p->exit_signal >= 0;
3023}
1da177e4 3024
0804ef4b
EB
3025/* Do to the insanities of de_thread it is possible for a process
3026 * to have the pid of the thread group leader without actually being
3027 * the thread group leader. For iteration through the pids in proc
3028 * all we care about is that we have a task with the appropriate
3029 * pid, we don't actually care if we have the right task.
3030 */
e1403b8e 3031static inline bool has_group_leader_pid(struct task_struct *p)
0804ef4b 3032{
e1403b8e 3033 return task_pid(p) == p->signal->leader_pid;
0804ef4b
EB
3034}
3035
bac0abd6 3036static inline
e1403b8e 3037bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
bac0abd6 3038{
e1403b8e 3039 return p1->signal == p2->signal;
bac0abd6
PE
3040}
3041
36c8b586 3042static inline struct task_struct *next_thread(const struct task_struct *p)
47e65328 3043{
05725f7e
JP
3044 return list_entry_rcu(p->thread_group.next,
3045 struct task_struct, thread_group);
47e65328
ON
3046}
3047
e868171a 3048static inline int thread_group_empty(struct task_struct *p)
1da177e4 3049{
47e65328 3050 return list_empty(&p->thread_group);
1da177e4
LT
3051}
3052
3053#define delay_group_leader(p) \
3054 (thread_group_leader(p) && !thread_group_empty(p))
3055
1da177e4 3056/*
260ea101 3057 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
22e2c507 3058 * subscriptions and synchronises with wait4(). Also used in procfs. Also
ddbcc7e8 3059 * pins the final release of task.io_context. Also protects ->cpuset and
d68b46fe 3060 * ->cgroup.subsys[]. And ->vfork_done.
1da177e4
LT
3061 *
3062 * Nests both inside and outside of read_lock(&tasklist_lock).
3063 * It must not be nested with write_lock_irq(&tasklist_lock),
3064 * neither inside nor outside.
3065 */
3066static inline void task_lock(struct task_struct *p)
3067{
3068 spin_lock(&p->alloc_lock);
3069}
3070
3071static inline void task_unlock(struct task_struct *p)
3072{
3073 spin_unlock(&p->alloc_lock);
3074}
3075
b8ed374e 3076extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
f63ee72e
ON
3077 unsigned long *flags);
3078
9388dc30
AV
3079static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
3080 unsigned long *flags)
3081{
3082 struct sighand_struct *ret;
3083
3084 ret = __lock_task_sighand(tsk, flags);
3085 (void)__cond_lock(&tsk->sighand->siglock, ret);
3086 return ret;
3087}
b8ed374e 3088
f63ee72e
ON
3089static inline void unlock_task_sighand(struct task_struct *tsk,
3090 unsigned long *flags)
3091{
3092 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
3093}
3094
77e4ef99 3095/**
7d7efec3
TH
3096 * threadgroup_change_begin - mark the beginning of changes to a threadgroup
3097 * @tsk: task causing the changes
77e4ef99 3098 *
7d7efec3
TH
3099 * All operations which modify a threadgroup - a new thread joining the
3100 * group, death of a member thread (the assertion of PF_EXITING) and
3101 * exec(2) dethreading the process and replacing the leader - are wrapped
3102 * by threadgroup_change_{begin|end}(). This is to provide a place which
3103 * subsystems needing threadgroup stability can hook into for
3104 * synchronization.
77e4ef99 3105 */
7d7efec3 3106static inline void threadgroup_change_begin(struct task_struct *tsk)
4714d1d3 3107{
7d7efec3
TH
3108 might_sleep();
3109 cgroup_threadgroup_change_begin(tsk);
4714d1d3 3110}
77e4ef99
TH
3111
3112/**
7d7efec3
TH
3113 * threadgroup_change_end - mark the end of changes to a threadgroup
3114 * @tsk: task causing the changes
77e4ef99 3115 *
7d7efec3 3116 * See threadgroup_change_begin().
77e4ef99 3117 */
7d7efec3 3118static inline void threadgroup_change_end(struct task_struct *tsk)
4714d1d3 3119{
7d7efec3 3120 cgroup_threadgroup_change_end(tsk);
4714d1d3 3121}
4714d1d3 3122
c65eacbe
AL
3123#ifdef CONFIG_THREAD_INFO_IN_TASK
3124
3125static inline struct thread_info *task_thread_info(struct task_struct *task)
3126{
3127 return &task->thread_info;
3128}
c6c314a6
AL
3129
3130/*
3131 * When accessing the stack of a non-current task that might exit, use
3132 * try_get_task_stack() instead. task_stack_page will return a pointer
3133 * that could get freed out from under you.
3134 */
c65eacbe
AL
3135static inline void *task_stack_page(const struct task_struct *task)
3136{
3137 return task->stack;
3138}
c6c314a6 3139
c65eacbe 3140#define setup_thread_stack(new,old) do { } while(0)
c6c314a6 3141
c65eacbe
AL
3142static inline unsigned long *end_of_stack(const struct task_struct *task)
3143{
3144 return task->stack;
3145}
3146
3147#elif !defined(__HAVE_THREAD_FUNCTIONS)
f037360f 3148
f7e4217b 3149#define task_thread_info(task) ((struct thread_info *)(task)->stack)
c65eacbe 3150#define task_stack_page(task) ((void *)(task)->stack)
a1261f54 3151
10ebffde
AV
3152static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
3153{
3154 *task_thread_info(p) = *task_thread_info(org);
3155 task_thread_info(p)->task = p;
3156}
3157
6a40281a
CE
3158/*
3159 * Return the address of the last usable long on the stack.
3160 *
3161 * When the stack grows down, this is just above the thread
3162 * info struct. Going any lower will corrupt the threadinfo.
3163 *
3164 * When the stack grows up, this is the highest address.
3165 * Beyond that position, we corrupt data on the next page.
3166 */
10ebffde
AV
3167static inline unsigned long *end_of_stack(struct task_struct *p)
3168{
6a40281a
CE
3169#ifdef CONFIG_STACK_GROWSUP
3170 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
3171#else
f7e4217b 3172 return (unsigned long *)(task_thread_info(p) + 1);
6a40281a 3173#endif
10ebffde
AV
3174}
3175
f037360f 3176#endif
c6c314a6 3177
68f24b08
AL
3178#ifdef CONFIG_THREAD_INFO_IN_TASK
3179static inline void *try_get_task_stack(struct task_struct *tsk)
3180{
3181 return atomic_inc_not_zero(&tsk->stack_refcount) ?
3182 task_stack_page(tsk) : NULL;
3183}
3184
3185extern void put_task_stack(struct task_struct *tsk);
3186#else
c6c314a6
AL
3187static inline void *try_get_task_stack(struct task_struct *tsk)
3188{
3189 return task_stack_page(tsk);
3190}
3191
3192static inline void put_task_stack(struct task_struct *tsk) {}
68f24b08 3193#endif
c6c314a6 3194
a70857e4
AT
3195#define task_stack_end_corrupted(task) \
3196 (*(end_of_stack(task)) != STACK_END_MAGIC)
f037360f 3197
8b05c7e6
FT
3198static inline int object_is_on_stack(void *obj)
3199{
3200 void *stack = task_stack_page(current);
3201
3202 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3203}
3204
b235beea 3205extern void thread_stack_cache_init(void);
8c9843e5 3206
7c9f8861
ES
3207#ifdef CONFIG_DEBUG_STACK_USAGE
3208static inline unsigned long stack_not_used(struct task_struct *p)
3209{
3210 unsigned long *n = end_of_stack(p);
3211
3212 do { /* Skip over canary */
6c31da34
HD
3213# ifdef CONFIG_STACK_GROWSUP
3214 n--;
3215# else
7c9f8861 3216 n++;
6c31da34 3217# endif
7c9f8861
ES
3218 } while (!*n);
3219
6c31da34
HD
3220# ifdef CONFIG_STACK_GROWSUP
3221 return (unsigned long)end_of_stack(p) - (unsigned long)n;
3222# else
7c9f8861 3223 return (unsigned long)n - (unsigned long)end_of_stack(p);
6c31da34 3224# endif
7c9f8861
ES
3225}
3226#endif
d4311ff1 3227extern void set_task_stack_end_magic(struct task_struct *tsk);
7c9f8861 3228
1da177e4
LT
3229/* set thread flags in other task's structures
3230 * - see asm/thread_info.h for TIF_xxxx flags available
3231 */
3232static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
3233{
a1261f54 3234 set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3235}
3236
3237static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3238{
a1261f54 3239 clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3240}
3241
3242static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
3243{
a1261f54 3244 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3245}
3246
3247static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
3248{
a1261f54 3249 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3250}
3251
3252static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
3253{
a1261f54 3254 return test_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
3255}
3256
3257static inline void set_tsk_need_resched(struct task_struct *tsk)
3258{
3259 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3260}
3261
3262static inline void clear_tsk_need_resched(struct task_struct *tsk)
3263{
3264 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
3265}
3266
8ae121ac
GH
3267static inline int test_tsk_need_resched(struct task_struct *tsk)
3268{
3269 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
3270}
3271
690cc3ff
EB
3272static inline int restart_syscall(void)
3273{
3274 set_tsk_thread_flag(current, TIF_SIGPENDING);
3275 return -ERESTARTNOINTR;
3276}
3277
1da177e4
LT
3278static inline int signal_pending(struct task_struct *p)
3279{
3280 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
3281}
f776d12d 3282
d9588725
RM
3283static inline int __fatal_signal_pending(struct task_struct *p)
3284{
3285 return unlikely(sigismember(&p->pending.signal, SIGKILL));
3286}
f776d12d
MW
3287
3288static inline int fatal_signal_pending(struct task_struct *p)
3289{
3290 return signal_pending(p) && __fatal_signal_pending(p);
3291}
3292
16882c1e
ON
3293static inline int signal_pending_state(long state, struct task_struct *p)
3294{
3295 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
3296 return 0;
3297 if (!signal_pending(p))
3298 return 0;
3299
16882c1e
ON
3300 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
3301}
3302
1da177e4
LT
3303/*
3304 * cond_resched() and cond_resched_lock(): latency reduction via
3305 * explicit rescheduling in places that are safe. The return
3306 * value indicates whether a reschedule was done in fact.
3307 * cond_resched_lock() will drop the spinlock before scheduling,
3308 * cond_resched_softirq() will enable bhs before scheduling.
3309 */
35a773a0 3310#ifndef CONFIG_PREEMPT
c3921ab7 3311extern int _cond_resched(void);
35a773a0
PZ
3312#else
3313static inline int _cond_resched(void) { return 0; }
3314#endif
6f80bd98 3315
613afbf8 3316#define cond_resched() ({ \
3427445a 3317 ___might_sleep(__FILE__, __LINE__, 0); \
613afbf8
FW
3318 _cond_resched(); \
3319})
6f80bd98 3320
613afbf8
FW
3321extern int __cond_resched_lock(spinlock_t *lock);
3322
3323#define cond_resched_lock(lock) ({ \
3427445a 3324 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
613afbf8
FW
3325 __cond_resched_lock(lock); \
3326})
3327
3328extern int __cond_resched_softirq(void);
3329
75e1056f 3330#define cond_resched_softirq() ({ \
3427445a 3331 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
75e1056f 3332 __cond_resched_softirq(); \
613afbf8 3333})
1da177e4 3334
f6f3c437
SH
3335static inline void cond_resched_rcu(void)
3336{
3337#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
3338 rcu_read_unlock();
3339 cond_resched();
3340 rcu_read_lock();
3341#endif
3342}
3343
d1c6d149
VN
3344static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
3345{
3346#ifdef CONFIG_DEBUG_PREEMPT
3347 return p->preempt_disable_ip;
3348#else
3349 return 0;
3350#endif
3351}
3352
1da177e4
LT
3353/*
3354 * Does a critical section need to be broken due to another
95c354fe
NP
3355 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
3356 * but a general need for low latency)
1da177e4 3357 */
95c354fe 3358static inline int spin_needbreak(spinlock_t *lock)
1da177e4 3359{
95c354fe
NP
3360#ifdef CONFIG_PREEMPT
3361 return spin_is_contended(lock);
3362#else
1da177e4 3363 return 0;
95c354fe 3364#endif
1da177e4
LT
3365}
3366
ee761f62
TG
3367/*
3368 * Idle thread specific functions to determine the need_resched
69dd0f84 3369 * polling state.
ee761f62 3370 */
69dd0f84 3371#ifdef TIF_POLLING_NRFLAG
ee761f62
TG
3372static inline int tsk_is_polling(struct task_struct *p)
3373{
3374 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3375}
ea811747
PZ
3376
3377static inline void __current_set_polling(void)
3a98f871
TG
3378{
3379 set_thread_flag(TIF_POLLING_NRFLAG);
3380}
3381
ea811747
PZ
3382static inline bool __must_check current_set_polling_and_test(void)
3383{
3384 __current_set_polling();
3385
3386 /*
3387 * Polling state must be visible before we test NEED_RESCHED,
8875125e 3388 * paired by resched_curr()
ea811747 3389 */
4e857c58 3390 smp_mb__after_atomic();
ea811747
PZ
3391
3392 return unlikely(tif_need_resched());
3393}
3394
3395static inline void __current_clr_polling(void)
3a98f871
TG
3396{
3397 clear_thread_flag(TIF_POLLING_NRFLAG);
3398}
ea811747
PZ
3399
3400static inline bool __must_check current_clr_polling_and_test(void)
3401{
3402 __current_clr_polling();
3403
3404 /*
3405 * Polling state must be visible before we test NEED_RESCHED,
8875125e 3406 * paired by resched_curr()
ea811747 3407 */
4e857c58 3408 smp_mb__after_atomic();
ea811747
PZ
3409
3410 return unlikely(tif_need_resched());
3411}
3412
ee761f62
TG
3413#else
3414static inline int tsk_is_polling(struct task_struct *p) { return 0; }
ea811747
PZ
3415static inline void __current_set_polling(void) { }
3416static inline void __current_clr_polling(void) { }
3417
3418static inline bool __must_check current_set_polling_and_test(void)
3419{
3420 return unlikely(tif_need_resched());
3421}
3422static inline bool __must_check current_clr_polling_and_test(void)
3423{
3424 return unlikely(tif_need_resched());
3425}
ee761f62
TG
3426#endif
3427
8cb75e0c
PZ
3428static inline void current_clr_polling(void)
3429{
3430 __current_clr_polling();
3431
3432 /*
3433 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
3434 * Once the bit is cleared, we'll get IPIs with every new
3435 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
3436 * fold.
3437 */
8875125e 3438 smp_mb(); /* paired with resched_curr() */
8cb75e0c
PZ
3439
3440 preempt_fold_need_resched();
3441}
3442
75f93fed
PZ
3443static __always_inline bool need_resched(void)
3444{
3445 return unlikely(tif_need_resched());
3446}
3447
f06febc9
FM
3448/*
3449 * Thread group CPU time accounting.
3450 */
4cd4c1b4 3451void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
4da94d49 3452void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
f06febc9 3453
7bb44ade
RM
3454/*
3455 * Reevaluate whether the task has signals pending delivery.
3456 * Wake the task if so.
3457 * This is required every time the blocked sigset_t changes.
3458 * callers must hold sighand->siglock.
3459 */
3460extern void recalc_sigpending_and_wake(struct task_struct *t);
1da177e4
LT
3461extern void recalc_sigpending(void);
3462
910ffdb1
ON
3463extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3464
3465static inline void signal_wake_up(struct task_struct *t, bool resume)
3466{
3467 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3468}
3469static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3470{
3471 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3472}
1da177e4
LT
3473
3474/*
3475 * Wrappers for p->thread_info->cpu access. No-op on UP.
3476 */
3477#ifdef CONFIG_SMP
3478
3479static inline unsigned int task_cpu(const struct task_struct *p)
3480{
c65eacbe
AL
3481#ifdef CONFIG_THREAD_INFO_IN_TASK
3482 return p->cpu;
3483#else
a1261f54 3484 return task_thread_info(p)->cpu;
c65eacbe 3485#endif
1da177e4
LT
3486}
3487
b32e86b4
IM
3488static inline int task_node(const struct task_struct *p)
3489{
3490 return cpu_to_node(task_cpu(p));
3491}
3492
c65cc870 3493extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1da177e4
LT
3494
3495#else
3496
3497static inline unsigned int task_cpu(const struct task_struct *p)
3498{
3499 return 0;
3500}
3501
3502static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3503{
3504}
3505
3506#endif /* CONFIG_SMP */
3507
96f874e2
RR
3508extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3509extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
5c45bf27 3510
7c941438 3511#ifdef CONFIG_CGROUP_SCHED
07e06b01 3512extern struct task_group root_task_group;
8323f26c 3513#endif /* CONFIG_CGROUP_SCHED */
9b5b7751 3514
54e99124
DG
3515extern int task_can_switch_user(struct user_struct *up,
3516 struct task_struct *tsk);
3517
4b98d11b
AD
3518#ifdef CONFIG_TASK_XACCT
3519static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3520{
940389b8 3521 tsk->ioac.rchar += amt;
4b98d11b
AD
3522}
3523
3524static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3525{
940389b8 3526 tsk->ioac.wchar += amt;
4b98d11b
AD
3527}
3528
3529static inline void inc_syscr(struct task_struct *tsk)
3530{
940389b8 3531 tsk->ioac.syscr++;
4b98d11b
AD
3532}
3533
3534static inline void inc_syscw(struct task_struct *tsk)
3535{
940389b8 3536 tsk->ioac.syscw++;
4b98d11b
AD
3537}
3538#else
3539static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3540{
3541}
3542
3543static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3544{
3545}
3546
3547static inline void inc_syscr(struct task_struct *tsk)
3548{
3549}
3550
3551static inline void inc_syscw(struct task_struct *tsk)
3552{
3553}
3554#endif
3555
82455257
DH
3556#ifndef TASK_SIZE_OF
3557#define TASK_SIZE_OF(tsk) TASK_SIZE
3558#endif
3559
f98bafa0 3560#ifdef CONFIG_MEMCG
cf475ad2 3561extern void mm_update_next_owner(struct mm_struct *mm);
cf475ad2
BS
3562#else
3563static inline void mm_update_next_owner(struct mm_struct *mm)
3564{
3565}
f98bafa0 3566#endif /* CONFIG_MEMCG */
cf475ad2 3567
3e10e716
JS
3568static inline unsigned long task_rlimit(const struct task_struct *tsk,
3569 unsigned int limit)
3570{
316c1608 3571 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3e10e716
JS
3572}
3573
3574static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3575 unsigned int limit)
3576{
316c1608 3577 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3e10e716
JS
3578}
3579
3580static inline unsigned long rlimit(unsigned int limit)
3581{
3582 return task_rlimit(current, limit);
3583}
3584
3585static inline unsigned long rlimit_max(unsigned int limit)
3586{
3587 return task_rlimit_max(current, limit);
3588}
3589
58919e83
RW
3590#define SCHED_CPUFREQ_RT (1U << 0)
3591#define SCHED_CPUFREQ_DL (1U << 1)
8c34ab19 3592#define SCHED_CPUFREQ_IOWAIT (1U << 2)
58919e83
RW
3593
3594#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
3595
adaf9fcd
RW
3596#ifdef CONFIG_CPU_FREQ
3597struct update_util_data {
58919e83 3598 void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
adaf9fcd
RW
3599};
3600
0bed612b 3601void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
58919e83
RW
3602 void (*func)(struct update_util_data *data, u64 time,
3603 unsigned int flags));
0bed612b 3604void cpufreq_remove_update_util_hook(int cpu);
adaf9fcd
RW
3605#endif /* CONFIG_CPU_FREQ */
3606
1da177e4 3607#endif