4 #include <uapi/linux/sched.h>
6 #include <linux/sched/prio.h>
8 #include <linux/capability.h>
9 #include <linux/mutex.h>
10 #include <linux/plist.h>
11 #include <linux/mm_types_task.h>
12 #include <asm/ptrace.h>
14 #include <linux/sem.h>
15 #include <linux/shm.h>
16 #include <linux/signal.h>
17 #include <linux/signal_types.h>
18 #include <linux/pid.h>
19 #include <linux/seccomp.h>
20 #include <linux/rculist.h>
21 #include <linux/rtmutex.h>
23 #include <linux/resource.h>
24 #include <linux/hrtimer.h>
25 #include <linux/kcov.h>
26 #include <linux/task_io_accounting.h>
27 #include <linux/latencytop.h>
28 #include <linux/cred.h>
29 #include <linux/gfp.h>
30 #include <linux/topology.h>
31 #include <linux/magic.h>
32 #include <linux/cgroup-defs.h>
34 #include <asm/current.h>
36 /* task_struct member predeclarations: */
39 struct backing_dev_info
;
45 struct futex_pi_state
;
50 struct perf_event_context
;
52 struct pipe_inode_info
;
55 struct robust_list_head
;
59 struct sighand_struct
;
61 struct task_delay_info
;
67 * Task state bitmask. NOTE! These bits are also
68 * encoded in fs/proc/array.c: get_task_state().
70 * We have two separate sets of flags: task->state
71 * is about runnability, while task->exit_state are
72 * about the task exiting. Confusing, but this way
73 * modifying one set can't modify the other one by
76 #define TASK_RUNNING 0
77 #define TASK_INTERRUPTIBLE 1
78 #define TASK_UNINTERRUPTIBLE 2
79 #define __TASK_STOPPED 4
80 #define __TASK_TRACED 8
81 /* in tsk->exit_state */
83 #define EXIT_ZOMBIE 32
84 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
85 /* in tsk->state again */
87 #define TASK_WAKEKILL 128
88 #define TASK_WAKING 256
89 #define TASK_PARKED 512
90 #define TASK_NOLOAD 1024
92 #define TASK_STATE_MAX 4096
94 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
96 /* Convenience macros for the sake of set_current_state */
97 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
98 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
99 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
101 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
103 /* Convenience macros for the sake of wake_up */
104 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
105 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
107 /* get_task_state() */
108 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
109 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
110 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
112 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
113 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
114 #define task_is_stopped_or_traced(task) \
115 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
116 #define task_contributes_to_load(task) \
117 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
118 (task->flags & PF_FROZEN) == 0 && \
119 (task->state & TASK_NOLOAD) == 0)
121 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
123 #define __set_current_state(state_value) \
125 current->task_state_change = _THIS_IP_; \
126 current->state = (state_value); \
128 #define set_current_state(state_value) \
130 current->task_state_change = _THIS_IP_; \
131 smp_store_mb(current->state, (state_value)); \
136 * set_current_state() includes a barrier so that the write of current->state
137 * is correctly serialised wrt the caller's subsequent test of whether to
141 * set_current_state(TASK_UNINTERRUPTIBLE);
147 * __set_current_state(TASK_RUNNING);
149 * If the caller does not need such serialisation (because, for instance, the
150 * condition test and condition change and wakeup are under the same lock) then
151 * use __set_current_state().
153 * The above is typically ordered against the wakeup, which does:
155 * need_sleep = false;
156 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
158 * Where wake_up_state() (and all other wakeup primitives) imply enough
159 * barriers to order the store of the variable against wakeup.
161 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
162 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
163 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
165 * This is obviously fine, since they both store the exact same value.
167 * Also see the comments of try_to_wake_up().
169 #define __set_current_state(state_value) \
170 do { current->state = (state_value); } while (0)
171 #define set_current_state(state_value) \
172 smp_store_mb(current->state, (state_value))
176 /* Task command name length */
177 #define TASK_COMM_LEN 16
179 extern cpumask_var_t cpu_isolated_map
;
181 extern int runqueue_is_locked(int cpu
);
183 extern void scheduler_tick(void);
185 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
186 extern signed long schedule_timeout(signed long timeout
);
187 extern signed long schedule_timeout_interruptible(signed long timeout
);
188 extern signed long schedule_timeout_killable(signed long timeout
);
189 extern signed long schedule_timeout_uninterruptible(signed long timeout
);
190 extern signed long schedule_timeout_idle(signed long timeout
);
191 asmlinkage
void schedule(void);
192 extern void schedule_preempt_disabled(void);
194 extern int __must_check
io_schedule_prepare(void);
195 extern void io_schedule_finish(int token
);
196 extern long io_schedule_timeout(long timeout
);
197 extern void io_schedule(void);
200 * struct prev_cputime - snaphsot of system and user cputime
201 * @utime: time spent in user mode
202 * @stime: time spent in system mode
203 * @lock: protects the above two fields
205 * Stores previous user/system time values such that we can guarantee
208 struct prev_cputime
{
209 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
217 * struct task_cputime - collected CPU time counts
218 * @utime: time spent in user mode, in nanoseconds
219 * @stime: time spent in kernel mode, in nanoseconds
220 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
222 * This structure groups together three kinds of CPU time that are tracked for
223 * threads and thread groups. Most things considering CPU time want to group
224 * these counts together and treat all three of them in parallel.
226 struct task_cputime
{
229 unsigned long long sum_exec_runtime
;
232 /* Alternate field names when used to cache expirations. */
233 #define virt_exp utime
234 #define prof_exp stime
235 #define sched_exp sum_exec_runtime
237 #include <linux/rwsem.h>
239 #ifdef CONFIG_SCHED_INFO
241 /* cumulative counters */
242 unsigned long pcount
; /* # of times run on this cpu */
243 unsigned long long run_delay
; /* time spent waiting on a runqueue */
246 unsigned long long last_arrival
,/* when we last ran on a cpu */
247 last_queued
; /* when we were last queued to run */
249 #endif /* CONFIG_SCHED_INFO */
252 * Integer metrics need fixed point arithmetic, e.g., sched/fair
253 * has a few: load, load_avg, util_avg, freq, and capacity.
255 * We define a basic fixed point arithmetic range, and then formalize
256 * all these metrics based on that basic range.
258 # define SCHED_FIXEDPOINT_SHIFT 10
259 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
261 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
262 extern void prefetch_stack(struct task_struct
*t
);
264 static inline void prefetch_stack(struct task_struct
*t
) { }
268 unsigned long weight
;
273 * The load_avg/util_avg accumulates an infinite geometric series
274 * (see __update_load_avg() in kernel/sched/fair.c).
276 * [load_avg definition]
278 * load_avg = runnable% * scale_load_down(load)
280 * where runnable% is the time ratio that a sched_entity is runnable.
281 * For cfs_rq, it is the aggregated load_avg of all runnable and
282 * blocked sched_entities.
284 * load_avg may also take frequency scaling into account:
286 * load_avg = runnable% * scale_load_down(load) * freq%
288 * where freq% is the CPU frequency normalized to the highest frequency.
290 * [util_avg definition]
292 * util_avg = running% * SCHED_CAPACITY_SCALE
294 * where running% is the time ratio that a sched_entity is running on
295 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
296 * and blocked sched_entities.
298 * util_avg may also factor frequency scaling and CPU capacity scaling:
300 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
302 * where freq% is the same as above, and capacity% is the CPU capacity
303 * normalized to the greatest capacity (due to uarch differences, etc).
305 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
306 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
307 * we therefore scale them to as large a range as necessary. This is for
308 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
312 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
313 * with the highest load (=88761), always runnable on a single cfs_rq,
314 * and should not overflow as the number already hits PID_MAX_LIMIT.
316 * For all other cases (including 32-bit kernels), struct load_weight's
317 * weight will overflow first before we do, because:
319 * Max(load_avg) <= Max(load.weight)
321 * Then it is the load_weight's responsibility to consider overflow
325 u64 last_update_time
, load_sum
;
326 u32 util_sum
, period_contrib
;
327 unsigned long load_avg
, util_avg
;
330 #ifdef CONFIG_SCHEDSTATS
331 struct sched_statistics
{
341 s64 sum_sleep_runtime
;
348 u64 nr_migrations_cold
;
349 u64 nr_failed_migrations_affine
;
350 u64 nr_failed_migrations_running
;
351 u64 nr_failed_migrations_hot
;
352 u64 nr_forced_migrations
;
356 u64 nr_wakeups_migrate
;
357 u64 nr_wakeups_local
;
358 u64 nr_wakeups_remote
;
359 u64 nr_wakeups_affine
;
360 u64 nr_wakeups_affine_attempts
;
361 u64 nr_wakeups_passive
;
366 struct sched_entity
{
367 struct load_weight load
; /* for load-balancing */
368 struct rb_node run_node
;
369 struct list_head group_node
;
373 u64 sum_exec_runtime
;
375 u64 prev_sum_exec_runtime
;
379 #ifdef CONFIG_SCHEDSTATS
380 struct sched_statistics statistics
;
383 #ifdef CONFIG_FAIR_GROUP_SCHED
385 struct sched_entity
*parent
;
386 /* rq on which this entity is (to be) queued: */
387 struct cfs_rq
*cfs_rq
;
388 /* rq "owned" by this entity/group: */
394 * Per entity load average tracking.
396 * Put into separate cache line so it does not
397 * collide with read-mostly values above.
399 struct sched_avg avg ____cacheline_aligned_in_smp
;
403 struct sched_rt_entity
{
404 struct list_head run_list
;
405 unsigned long timeout
;
406 unsigned long watchdog_stamp
;
407 unsigned int time_slice
;
408 unsigned short on_rq
;
409 unsigned short on_list
;
411 struct sched_rt_entity
*back
;
412 #ifdef CONFIG_RT_GROUP_SCHED
413 struct sched_rt_entity
*parent
;
414 /* rq on which this entity is (to be) queued: */
416 /* rq "owned" by this entity/group: */
421 struct sched_dl_entity
{
422 struct rb_node rb_node
;
425 * Original scheduling parameters. Copied here from sched_attr
426 * during sched_setattr(), they will remain the same until
427 * the next sched_setattr().
429 u64 dl_runtime
; /* maximum runtime for each instance */
430 u64 dl_deadline
; /* relative deadline of each instance */
431 u64 dl_period
; /* separation of two instances (period) */
432 u64 dl_bw
; /* dl_runtime / dl_deadline */
435 * Actual scheduling parameters. Initialized with the values above,
436 * they are continously updated during task execution. Note that
437 * the remaining runtime could be < 0 in case we are in overrun.
439 s64 runtime
; /* remaining runtime for this instance */
440 u64 deadline
; /* absolute deadline for this instance */
441 unsigned int flags
; /* specifying the scheduler behaviour */
446 * @dl_throttled tells if we exhausted the runtime. If so, the
447 * task has to wait for a replenishment to be performed at the
448 * next firing of dl_timer.
450 * @dl_boosted tells if we are boosted due to DI. If so we are
451 * outside bandwidth enforcement mechanism (but only until we
452 * exit the critical section);
454 * @dl_yielded tells if task gave up the cpu before consuming
455 * all its available runtime during the last job.
457 int dl_throttled
, dl_boosted
, dl_yielded
;
460 * Bandwidth enforcement timer. Each -deadline task has its
461 * own bandwidth to be enforced, thus we need one timer per task.
463 struct hrtimer dl_timer
;
471 u8 pad
; /* Otherwise the compiler can store garbage here. */
473 u32 s
; /* Set of bits. */
476 enum perf_event_task_context
{
477 perf_invalid_context
= -1,
480 perf_nr_task_contexts
,
484 struct wake_q_node
*next
;
488 #ifdef CONFIG_THREAD_INFO_IN_TASK
490 * For reasons of header soup (see current_thread_info()), this
491 * must be the first element of task_struct.
493 struct thread_info thread_info
;
495 volatile long state
; /* -1 unrunnable, 0 runnable, >0 stopped */
498 unsigned int flags
; /* per process flags, defined below */
502 struct llist_node wake_entry
;
504 #ifdef CONFIG_THREAD_INFO_IN_TASK
505 unsigned int cpu
; /* current CPU */
507 unsigned int wakee_flips
;
508 unsigned long wakee_flip_decay_ts
;
509 struct task_struct
*last_wakee
;
515 int prio
, static_prio
, normal_prio
;
516 unsigned int rt_priority
;
517 const struct sched_class
*sched_class
;
518 struct sched_entity se
;
519 struct sched_rt_entity rt
;
520 #ifdef CONFIG_CGROUP_SCHED
521 struct task_group
*sched_task_group
;
523 struct sched_dl_entity dl
;
525 #ifdef CONFIG_PREEMPT_NOTIFIERS
526 /* list of struct preempt_notifier: */
527 struct hlist_head preempt_notifiers
;
530 #ifdef CONFIG_BLK_DEV_IO_TRACE
531 unsigned int btrace_seq
;
536 cpumask_t cpus_allowed
;
538 #ifdef CONFIG_PREEMPT_RCU
539 int rcu_read_lock_nesting
;
540 union rcu_special rcu_read_unlock_special
;
541 struct list_head rcu_node_entry
;
542 struct rcu_node
*rcu_blocked_node
;
543 #endif /* #ifdef CONFIG_PREEMPT_RCU */
544 #ifdef CONFIG_TASKS_RCU
545 unsigned long rcu_tasks_nvcsw
;
546 bool rcu_tasks_holdout
;
547 struct list_head rcu_tasks_holdout_list
;
548 int rcu_tasks_idle_cpu
;
549 #endif /* #ifdef CONFIG_TASKS_RCU */
551 #ifdef CONFIG_SCHED_INFO
552 struct sched_info sched_info
;
555 struct list_head tasks
;
557 struct plist_node pushable_tasks
;
558 struct rb_node pushable_dl_tasks
;
561 struct mm_struct
*mm
, *active_mm
;
563 /* Per-thread vma caching: */
564 struct vmacache vmacache
;
566 #if defined(SPLIT_RSS_COUNTING)
567 struct task_rss_stat rss_stat
;
571 int exit_code
, exit_signal
;
572 int pdeath_signal
; /* The signal sent when the parent dies */
573 unsigned long jobctl
; /* JOBCTL_*, siglock protected */
575 /* Used for emulating ABI behavior of previous Linux versions */
576 unsigned int personality
;
578 /* scheduler bits, serialized by scheduler locks */
579 unsigned sched_reset_on_fork
:1;
580 unsigned sched_contributes_to_load
:1;
581 unsigned sched_migrated
:1;
582 unsigned sched_remote_wakeup
:1;
583 unsigned :0; /* force alignment to the next boundary */
585 /* unserialized, strictly 'current' */
586 unsigned in_execve
:1; /* bit to tell LSMs we're in execve */
587 unsigned in_iowait
:1;
588 #if !defined(TIF_RESTORE_SIGMASK)
589 unsigned restore_sigmask
:1;
592 unsigned memcg_may_oom
:1;
594 unsigned memcg_kmem_skip_account
:1;
597 #ifdef CONFIG_COMPAT_BRK
598 unsigned brk_randomized
:1;
601 unsigned long atomic_flags
; /* Flags needing atomic access. */
603 struct restart_block restart_block
;
608 #ifdef CONFIG_CC_STACKPROTECTOR
609 /* Canary value for the -fstack-protector gcc feature */
610 unsigned long stack_canary
;
613 * pointers to (original) parent process, youngest child, younger sibling,
614 * older sibling, respectively. (p->father can be replaced with
615 * p->real_parent->pid)
617 struct task_struct __rcu
*real_parent
; /* real parent process */
618 struct task_struct __rcu
*parent
; /* recipient of SIGCHLD, wait4() reports */
620 * children/sibling forms the list of my natural children
622 struct list_head children
; /* list of my children */
623 struct list_head sibling
; /* linkage in my parent's children list */
624 struct task_struct
*group_leader
; /* threadgroup leader */
627 * ptraced is the list of tasks this task is using ptrace on.
628 * This includes both natural children and PTRACE_ATTACH targets.
629 * p->ptrace_entry is p's link on the p->parent->ptraced list.
631 struct list_head ptraced
;
632 struct list_head ptrace_entry
;
634 /* PID/PID hash table linkage. */
635 struct pid_link pids
[PIDTYPE_MAX
];
636 struct list_head thread_group
;
637 struct list_head thread_node
;
639 struct completion
*vfork_done
; /* for vfork() */
640 int __user
*set_child_tid
; /* CLONE_CHILD_SETTID */
641 int __user
*clear_child_tid
; /* CLONE_CHILD_CLEARTID */
644 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
645 u64 utimescaled
, stimescaled
;
648 struct prev_cputime prev_cputime
;
649 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
650 seqcount_t vtime_seqcount
;
651 unsigned long long vtime_snap
;
653 /* Task is sleeping or running in a CPU with VTIME inactive */
655 /* Task runs in userspace in a CPU with VTIME active */
657 /* Task runs in kernelspace in a CPU with VTIME active */
662 #ifdef CONFIG_NO_HZ_FULL
663 atomic_t tick_dep_mask
;
665 unsigned long nvcsw
, nivcsw
; /* context switch counts */
666 u64 start_time
; /* monotonic time in nsec */
667 u64 real_start_time
; /* boot based time in nsec */
668 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
669 unsigned long min_flt
, maj_flt
;
671 #ifdef CONFIG_POSIX_TIMERS
672 struct task_cputime cputime_expires
;
673 struct list_head cpu_timers
[3];
676 /* process credentials */
677 const struct cred __rcu
*ptracer_cred
; /* Tracer's credentials at attach */
678 const struct cred __rcu
*real_cred
; /* objective and real subjective task
679 * credentials (COW) */
680 const struct cred __rcu
*cred
; /* effective (overridable) subjective task
681 * credentials (COW) */
682 char comm
[TASK_COMM_LEN
]; /* executable name excluding path
683 - access with [gs]et_task_comm (which lock
685 - initialized normally by setup_new_exec */
686 /* file system info */
687 struct nameidata
*nameidata
;
688 #ifdef CONFIG_SYSVIPC
690 struct sysv_sem sysvsem
;
691 struct sysv_shm sysvshm
;
693 #ifdef CONFIG_DETECT_HUNG_TASK
694 /* hung task detection */
695 unsigned long last_switch_count
;
697 /* filesystem information */
698 struct fs_struct
*fs
;
699 /* open file information */
700 struct files_struct
*files
;
702 struct nsproxy
*nsproxy
;
703 /* signal handlers */
704 struct signal_struct
*signal
;
705 struct sighand_struct
*sighand
;
707 sigset_t blocked
, real_blocked
;
708 sigset_t saved_sigmask
; /* restored if set_restore_sigmask() was used */
709 struct sigpending pending
;
711 unsigned long sas_ss_sp
;
713 unsigned sas_ss_flags
;
715 struct callback_head
*task_works
;
717 struct audit_context
*audit_context
;
718 #ifdef CONFIG_AUDITSYSCALL
720 unsigned int sessionid
;
722 struct seccomp seccomp
;
724 /* Thread group tracking */
727 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
729 spinlock_t alloc_lock
;
731 /* Protection of the PI data structures: */
732 raw_spinlock_t pi_lock
;
734 struct wake_q_node wake_q
;
736 #ifdef CONFIG_RT_MUTEXES
737 /* PI waiters blocked on a rt_mutex held by this task */
738 struct rb_root pi_waiters
;
739 struct rb_node
*pi_waiters_leftmost
;
740 /* Deadlock detection and priority inheritance handling */
741 struct rt_mutex_waiter
*pi_blocked_on
;
744 #ifdef CONFIG_DEBUG_MUTEXES
745 /* mutex deadlock detection */
746 struct mutex_waiter
*blocked_on
;
748 #ifdef CONFIG_TRACE_IRQFLAGS
749 unsigned int irq_events
;
750 unsigned long hardirq_enable_ip
;
751 unsigned long hardirq_disable_ip
;
752 unsigned int hardirq_enable_event
;
753 unsigned int hardirq_disable_event
;
754 int hardirqs_enabled
;
756 unsigned long softirq_disable_ip
;
757 unsigned long softirq_enable_ip
;
758 unsigned int softirq_disable_event
;
759 unsigned int softirq_enable_event
;
760 int softirqs_enabled
;
763 #ifdef CONFIG_LOCKDEP
764 # define MAX_LOCK_DEPTH 48UL
767 unsigned int lockdep_recursion
;
768 struct held_lock held_locks
[MAX_LOCK_DEPTH
];
769 gfp_t lockdep_reclaim_gfp
;
772 unsigned int in_ubsan
;
775 /* journalling filesystem info */
778 /* stacked block device info */
779 struct bio_list
*bio_list
;
783 struct blk_plug
*plug
;
787 struct reclaim_state
*reclaim_state
;
789 struct backing_dev_info
*backing_dev_info
;
791 struct io_context
*io_context
;
793 unsigned long ptrace_message
;
794 siginfo_t
*last_siginfo
; /* For ptrace use. */
795 struct task_io_accounting ioac
;
796 #if defined(CONFIG_TASK_XACCT)
797 u64 acct_rss_mem1
; /* accumulated rss usage */
798 u64 acct_vm_mem1
; /* accumulated virtual memory usage */
799 u64 acct_timexpd
; /* stime + utime since last update */
801 #ifdef CONFIG_CPUSETS
802 nodemask_t mems_allowed
; /* Protected by alloc_lock */
803 seqcount_t mems_allowed_seq
; /* Seqence no to catch updates */
804 int cpuset_mem_spread_rotor
;
805 int cpuset_slab_spread_rotor
;
807 #ifdef CONFIG_CGROUPS
808 /* Control Group info protected by css_set_lock */
809 struct css_set __rcu
*cgroups
;
810 /* cg_list protected by css_set_lock and tsk->alloc_lock */
811 struct list_head cg_list
;
813 #ifdef CONFIG_INTEL_RDT_A
817 struct robust_list_head __user
*robust_list
;
819 struct compat_robust_list_head __user
*compat_robust_list
;
821 struct list_head pi_state_list
;
822 struct futex_pi_state
*pi_state_cache
;
824 #ifdef CONFIG_PERF_EVENTS
825 struct perf_event_context
*perf_event_ctxp
[perf_nr_task_contexts
];
826 struct mutex perf_event_mutex
;
827 struct list_head perf_event_list
;
829 #ifdef CONFIG_DEBUG_PREEMPT
830 unsigned long preempt_disable_ip
;
833 struct mempolicy
*mempolicy
; /* Protected by alloc_lock */
835 short pref_node_fork
;
837 #ifdef CONFIG_NUMA_BALANCING
839 unsigned int numa_scan_period
;
840 unsigned int numa_scan_period_max
;
841 int numa_preferred_nid
;
842 unsigned long numa_migrate_retry
;
843 u64 node_stamp
; /* migration stamp */
844 u64 last_task_numa_placement
;
845 u64 last_sum_exec_runtime
;
846 struct callback_head numa_work
;
848 struct list_head numa_entry
;
849 struct numa_group
*numa_group
;
852 * numa_faults is an array split into four regions:
853 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
854 * in this precise order.
856 * faults_memory: Exponential decaying average of faults on a per-node
857 * basis. Scheduling placement decisions are made based on these
858 * counts. The values remain static for the duration of a PTE scan.
859 * faults_cpu: Track the nodes the process was running on when a NUMA
860 * hinting fault was incurred.
861 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
862 * during the current scan window. When the scan completes, the counts
863 * in faults_memory and faults_cpu decay and these values are copied.
865 unsigned long *numa_faults
;
866 unsigned long total_numa_faults
;
869 * numa_faults_locality tracks if faults recorded during the last
870 * scan window were remote/local or failed to migrate. The task scan
871 * period is adapted based on the locality of the faults with different
872 * weights depending on whether they were shared or private faults
874 unsigned long numa_faults_locality
[3];
876 unsigned long numa_pages_migrated
;
877 #endif /* CONFIG_NUMA_BALANCING */
879 struct tlbflush_unmap_batch tlb_ubc
;
884 * cache last used pipe for splice
886 struct pipe_inode_info
*splice_pipe
;
888 struct page_frag task_frag
;
890 #ifdef CONFIG_TASK_DELAY_ACCT
891 struct task_delay_info
*delays
;
894 #ifdef CONFIG_FAULT_INJECTION
898 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
899 * balance_dirty_pages() for some dirty throttling pause
902 int nr_dirtied_pause
;
903 unsigned long dirty_paused_when
; /* start of a write-and-pause period */
905 #ifdef CONFIG_LATENCYTOP
906 int latency_record_count
;
907 struct latency_record latency_record
[LT_SAVECOUNT
];
910 * time slack values; these are used to round up poll() and
911 * select() etc timeout values. These are in nanoseconds.
914 u64 default_timer_slack_ns
;
917 unsigned int kasan_depth
;
919 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
920 /* Index of current stored address in ret_stack */
922 /* Stack of return addresses for return function tracing */
923 struct ftrace_ret_stack
*ret_stack
;
924 /* time stamp for last schedule */
925 unsigned long long ftrace_timestamp
;
927 * Number of functions that haven't been traced
928 * because of depth overrun.
930 atomic_t trace_overrun
;
931 /* Pause for the tracing */
932 atomic_t tracing_graph_pause
;
934 #ifdef CONFIG_TRACING
935 /* state flags for use by tracers */
937 /* bitmask and counter of trace recursion */
938 unsigned long trace_recursion
;
939 #endif /* CONFIG_TRACING */
941 /* Coverage collection mode enabled for this task (0 if disabled). */
942 enum kcov_mode kcov_mode
;
943 /* Size of the kcov_area. */
945 /* Buffer for coverage collection. */
947 /* kcov desciptor wired with this task or NULL. */
951 struct mem_cgroup
*memcg_in_oom
;
952 gfp_t memcg_oom_gfp_mask
;
955 /* number of pages to reclaim on returning to userland */
956 unsigned int memcg_nr_pages_over_high
;
958 #ifdef CONFIG_UPROBES
959 struct uprobe_task
*utask
;
961 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
962 unsigned int sequential_io
;
963 unsigned int sequential_io_avg
;
965 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
966 unsigned long task_state_change
;
968 int pagefault_disabled
;
970 struct task_struct
*oom_reaper_list
;
972 #ifdef CONFIG_VMAP_STACK
973 struct vm_struct
*stack_vm_area
;
975 #ifdef CONFIG_THREAD_INFO_IN_TASK
976 /* A live task holds one reference. */
977 atomic_t stack_refcount
;
979 /* CPU-specific state of this task */
980 struct thread_struct thread
;
982 * WARNING: on x86, 'thread_struct' contains a variable-sized
983 * structure. It *MUST* be at the end of 'task_struct'.
985 * Do not put anything below here!
989 static inline struct pid
*task_pid(struct task_struct
*task
)
991 return task
->pids
[PIDTYPE_PID
].pid
;
994 static inline struct pid
*task_tgid(struct task_struct
*task
)
996 return task
->group_leader
->pids
[PIDTYPE_PID
].pid
;
1000 * Without tasklist or rcu lock it is not safe to dereference
1001 * the result of task_pgrp/task_session even if task == current,
1002 * we can race with another thread doing sys_setsid/sys_setpgid.
1004 static inline struct pid
*task_pgrp(struct task_struct
*task
)
1006 return task
->group_leader
->pids
[PIDTYPE_PGID
].pid
;
1009 static inline struct pid
*task_session(struct task_struct
*task
)
1011 return task
->group_leader
->pids
[PIDTYPE_SID
].pid
;
1015 * the helpers to get the task's different pids as they are seen
1016 * from various namespaces
1018 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1019 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1021 * task_xid_nr_ns() : id seen from the ns specified;
1023 * set_task_vxid() : assigns a virtual id to a task;
1025 * see also pid_nr() etc in include/linux/pid.h
1027 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
1028 struct pid_namespace
*ns
);
1030 static inline pid_t
task_pid_nr(struct task_struct
*tsk
)
1035 static inline pid_t
task_pid_nr_ns(struct task_struct
*tsk
,
1036 struct pid_namespace
*ns
)
1038 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, ns
);
1041 static inline pid_t
task_pid_vnr(struct task_struct
*tsk
)
1043 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, NULL
);
1047 static inline pid_t
task_tgid_nr(struct task_struct
*tsk
)
1052 pid_t
task_tgid_nr_ns(struct task_struct
*tsk
, struct pid_namespace
*ns
);
1054 static inline pid_t
task_tgid_vnr(struct task_struct
*tsk
)
1056 return pid_vnr(task_tgid(tsk
));
1060 static inline int pid_alive(const struct task_struct
*p
);
1061 static inline pid_t
task_ppid_nr_ns(const struct task_struct
*tsk
, struct pid_namespace
*ns
)
1067 pid
= task_tgid_nr_ns(rcu_dereference(tsk
->real_parent
), ns
);
1073 static inline pid_t
task_ppid_nr(const struct task_struct
*tsk
)
1075 return task_ppid_nr_ns(tsk
, &init_pid_ns
);
1078 static inline pid_t
task_pgrp_nr_ns(struct task_struct
*tsk
,
1079 struct pid_namespace
*ns
)
1081 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, ns
);
1084 static inline pid_t
task_pgrp_vnr(struct task_struct
*tsk
)
1086 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, NULL
);
1090 static inline pid_t
task_session_nr_ns(struct task_struct
*tsk
,
1091 struct pid_namespace
*ns
)
1093 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, ns
);
1096 static inline pid_t
task_session_vnr(struct task_struct
*tsk
)
1098 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, NULL
);
1101 /* obsolete, do not use */
1102 static inline pid_t
task_pgrp_nr(struct task_struct
*tsk
)
1104 return task_pgrp_nr_ns(tsk
, &init_pid_ns
);
1108 * pid_alive - check that a task structure is not stale
1109 * @p: Task structure to be checked.
1111 * Test if a process is not yet dead (at most zombie state)
1112 * If pid_alive fails, then pointers within the task structure
1113 * can be stale and must not be dereferenced.
1115 * Return: 1 if the process is alive. 0 otherwise.
1117 static inline int pid_alive(const struct task_struct
*p
)
1119 return p
->pids
[PIDTYPE_PID
].pid
!= NULL
;
1123 * is_global_init - check if a task structure is init. Since init
1124 * is free to have sub-threads we need to check tgid.
1125 * @tsk: Task structure to be checked.
1127 * Check if a task structure is the first user space task the kernel created.
1129 * Return: 1 if the task structure is init. 0 otherwise.
1131 static inline int is_global_init(struct task_struct
*tsk
)
1133 return task_tgid_nr(tsk
) == 1;
1136 extern struct pid
*cad_pid
;
1141 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1142 #define PF_EXITING 0x00000004 /* getting shut down */
1143 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1144 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1145 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1146 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1147 #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1148 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1149 #define PF_DUMPCORE 0x00000200 /* dumped core */
1150 #define PF_SIGNALED 0x00000400 /* killed by a signal */
1151 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1152 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1153 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1154 #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1155 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1156 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
1157 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1158 #define PF_KSWAPD 0x00040000 /* I am kswapd */
1159 #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1160 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1161 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1162 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1163 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1164 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1165 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1166 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1167 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1168 #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1171 * Only the _current_ task can read/write to tsk->flags, but other
1172 * tasks can access tsk->flags in readonly mode for example
1173 * with tsk_used_math (like during threaded core dumping).
1174 * There is however an exception to this rule during ptrace
1175 * or during fork: the ptracer task is allowed to write to the
1176 * child->flags of its traced child (same goes for fork, the parent
1177 * can write to the child->flags), because we're guaranteed the
1178 * child is not running and in turn not changing child->flags
1179 * at the same time the parent does it.
1181 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1182 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1183 #define clear_used_math() clear_stopped_child_used_math(current)
1184 #define set_used_math() set_stopped_child_used_math(current)
1185 #define conditional_stopped_child_used_math(condition, child) \
1186 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1187 #define conditional_used_math(condition) \
1188 conditional_stopped_child_used_math(condition, current)
1189 #define copy_to_stopped_child_used_math(child) \
1190 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1191 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1192 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1193 #define used_math() tsk_used_math(current)
1195 /* Per-process atomic flags. */
1196 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1197 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1198 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1199 #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
1202 #define TASK_PFA_TEST(name, func) \
1203 static inline bool task_##func(struct task_struct *p) \
1204 { return test_bit(PFA_##name, &p->atomic_flags); }
1205 #define TASK_PFA_SET(name, func) \
1206 static inline void task_set_##func(struct task_struct *p) \
1207 { set_bit(PFA_##name, &p->atomic_flags); }
1208 #define TASK_PFA_CLEAR(name, func) \
1209 static inline void task_clear_##func(struct task_struct *p) \
1210 { clear_bit(PFA_##name, &p->atomic_flags); }
1212 TASK_PFA_TEST(NO_NEW_PRIVS
, no_new_privs
)
1213 TASK_PFA_SET(NO_NEW_PRIVS
, no_new_privs
)
1215 TASK_PFA_TEST(SPREAD_PAGE
, spread_page
)
1216 TASK_PFA_SET(SPREAD_PAGE
, spread_page
)
1217 TASK_PFA_CLEAR(SPREAD_PAGE
, spread_page
)
1219 TASK_PFA_TEST(SPREAD_SLAB
, spread_slab
)
1220 TASK_PFA_SET(SPREAD_SLAB
, spread_slab
)
1221 TASK_PFA_CLEAR(SPREAD_SLAB
, spread_slab
)
1223 TASK_PFA_TEST(LMK_WAITING
, lmk_waiting
)
1224 TASK_PFA_SET(LMK_WAITING
, lmk_waiting
)
1226 static inline void tsk_restore_flags(struct task_struct
*task
,
1227 unsigned long orig_flags
, unsigned long flags
)
1229 task
->flags
&= ~flags
;
1230 task
->flags
|= orig_flags
& flags
;
1233 extern int cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
1234 const struct cpumask
*trial
);
1235 extern int task_can_attach(struct task_struct
*p
,
1236 const struct cpumask
*cs_cpus_allowed
);
1238 extern void do_set_cpus_allowed(struct task_struct
*p
,
1239 const struct cpumask
*new_mask
);
1241 extern int set_cpus_allowed_ptr(struct task_struct
*p
,
1242 const struct cpumask
*new_mask
);
1244 static inline void do_set_cpus_allowed(struct task_struct
*p
,
1245 const struct cpumask
*new_mask
)
1248 static inline int set_cpus_allowed_ptr(struct task_struct
*p
,
1249 const struct cpumask
*new_mask
)
1251 if (!cpumask_test_cpu(0, new_mask
))
1257 #ifndef cpu_relax_yield
1258 #define cpu_relax_yield() cpu_relax()
1261 /* sched_exec is called by processes performing an exec */
1263 extern void sched_exec(void);
1265 #define sched_exec() {}
1268 extern int yield_to(struct task_struct
*p
, bool preempt
);
1269 extern void set_user_nice(struct task_struct
*p
, long nice
);
1270 extern int task_prio(const struct task_struct
*p
);
1272 * task_nice - return the nice value of a given task.
1273 * @p: the task in question.
1275 * Return: The nice value [ -20 ... 0 ... 19 ].
1277 static inline int task_nice(const struct task_struct
*p
)
1279 return PRIO_TO_NICE((p
)->static_prio
);
1281 extern int can_nice(const struct task_struct
*p
, const int nice
);
1282 extern int task_curr(const struct task_struct
*p
);
1283 extern int idle_cpu(int cpu
);
1284 extern int sched_setscheduler(struct task_struct
*, int,
1285 const struct sched_param
*);
1286 extern int sched_setscheduler_nocheck(struct task_struct
*, int,
1287 const struct sched_param
*);
1288 extern int sched_setattr(struct task_struct
*,
1289 const struct sched_attr
*);
1290 extern struct task_struct
*idle_task(int cpu
);
1292 * is_idle_task - is the specified task an idle task?
1293 * @p: the task in question.
1295 * Return: 1 if @p is an idle task. 0 otherwise.
1297 static inline bool is_idle_task(const struct task_struct
*p
)
1299 return !!(p
->flags
& PF_IDLE
);
1301 extern struct task_struct
*curr_task(int cpu
);
1302 extern void ia64_set_curr_task(int cpu
, struct task_struct
*p
);
1306 union thread_union
{
1307 #ifndef CONFIG_THREAD_INFO_IN_TASK
1308 struct thread_info thread_info
;
1310 unsigned long stack
[THREAD_SIZE
/sizeof(long)];
1313 #ifdef CONFIG_THREAD_INFO_IN_TASK
1314 static inline struct thread_info
*task_thread_info(struct task_struct
*task
)
1316 return &task
->thread_info
;
1318 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1319 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1322 extern struct pid_namespace init_pid_ns
;
1325 * find a task by one of its numerical ids
1327 * find_task_by_pid_ns():
1328 * finds a task by its pid in the specified namespace
1329 * find_task_by_vpid():
1330 * finds a task by its virtual pid
1332 * see also find_vpid() etc in include/linux/pid.h
1335 extern struct task_struct
*find_task_by_vpid(pid_t nr
);
1336 extern struct task_struct
*find_task_by_pid_ns(pid_t nr
,
1337 struct pid_namespace
*ns
);
1339 extern int wake_up_state(struct task_struct
*tsk
, unsigned int state
);
1340 extern int wake_up_process(struct task_struct
*tsk
);
1341 extern void wake_up_new_task(struct task_struct
*tsk
);
1343 extern void kick_process(struct task_struct
*tsk
);
1345 static inline void kick_process(struct task_struct
*tsk
) { }
1348 extern void __set_task_comm(struct task_struct
*tsk
, const char *from
, bool exec
);
1349 static inline void set_task_comm(struct task_struct
*tsk
, const char *from
)
1351 __set_task_comm(tsk
, from
, false);
1353 extern char *get_task_comm(char *to
, struct task_struct
*tsk
);
1356 void scheduler_ipi(void);
1357 extern unsigned long wait_task_inactive(struct task_struct
*, long match_state
);
1359 static inline void scheduler_ipi(void) { }
1360 static inline unsigned long wait_task_inactive(struct task_struct
*p
,
1367 /* set thread flags in other task's structures
1368 * - see asm/thread_info.h for TIF_xxxx flags available
1370 static inline void set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1372 set_ti_thread_flag(task_thread_info(tsk
), flag
);
1375 static inline void clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1377 clear_ti_thread_flag(task_thread_info(tsk
), flag
);
1380 static inline int test_and_set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1382 return test_and_set_ti_thread_flag(task_thread_info(tsk
), flag
);
1385 static inline int test_and_clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1387 return test_and_clear_ti_thread_flag(task_thread_info(tsk
), flag
);
1390 static inline int test_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1392 return test_ti_thread_flag(task_thread_info(tsk
), flag
);
1395 static inline void set_tsk_need_resched(struct task_struct
*tsk
)
1397 set_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
1400 static inline void clear_tsk_need_resched(struct task_struct
*tsk
)
1402 clear_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
1405 static inline int test_tsk_need_resched(struct task_struct
*tsk
)
1407 return unlikely(test_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
));
1411 * cond_resched() and cond_resched_lock(): latency reduction via
1412 * explicit rescheduling in places that are safe. The return
1413 * value indicates whether a reschedule was done in fact.
1414 * cond_resched_lock() will drop the spinlock before scheduling,
1415 * cond_resched_softirq() will enable bhs before scheduling.
1417 #ifndef CONFIG_PREEMPT
1418 extern int _cond_resched(void);
1420 static inline int _cond_resched(void) { return 0; }
1423 #define cond_resched() ({ \
1424 ___might_sleep(__FILE__, __LINE__, 0); \
1428 extern int __cond_resched_lock(spinlock_t
*lock
);
1430 #define cond_resched_lock(lock) ({ \
1431 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1432 __cond_resched_lock(lock); \
1435 extern int __cond_resched_softirq(void);
1437 #define cond_resched_softirq() ({ \
1438 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
1439 __cond_resched_softirq(); \
1442 static inline void cond_resched_rcu(void)
1444 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1452 * Does a critical section need to be broken due to another
1453 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1454 * but a general need for low latency)
1456 static inline int spin_needbreak(spinlock_t
*lock
)
1458 #ifdef CONFIG_PREEMPT
1459 return spin_is_contended(lock
);
1465 static __always_inline
bool need_resched(void)
1467 return unlikely(tif_need_resched());
1471 * Wrappers for p->thread_info->cpu access. No-op on UP.
1475 static inline unsigned int task_cpu(const struct task_struct
*p
)
1477 #ifdef CONFIG_THREAD_INFO_IN_TASK
1480 return task_thread_info(p
)->cpu
;
1484 static inline int task_node(const struct task_struct
*p
)
1486 return cpu_to_node(task_cpu(p
));
1489 extern void set_task_cpu(struct task_struct
*p
, unsigned int cpu
);
1493 static inline unsigned int task_cpu(const struct task_struct
*p
)
1498 static inline void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
1502 #endif /* CONFIG_SMP */
1505 * In order to reduce various lock holder preemption latencies provide an
1506 * interface to see if a vCPU is currently running or not.
1508 * This allows us to terminate optimistic spin loops and block, analogous to
1509 * the native optimistic spin heuristic of testing if the lock owner task is
1512 #ifndef vcpu_is_preempted
1513 # define vcpu_is_preempted(cpu) false
1516 extern long sched_setaffinity(pid_t pid
, const struct cpumask
*new_mask
);
1517 extern long sched_getaffinity(pid_t pid
, struct cpumask
*mask
);
1519 extern int task_can_switch_user(struct user_struct
*up
,
1520 struct task_struct
*tsk
);
1522 #ifndef TASK_SIZE_OF
1523 #define TASK_SIZE_OF(tsk) TASK_SIZE