4 #include <uapi/linux/sched.h>
6 #include <linux/sched/prio.h>
8 #include <linux/mutex.h>
9 #include <linux/plist.h>
10 #include <linux/mm_types_task.h>
12 #include <linux/sem.h>
13 #include <linux/shm.h>
14 #include <linux/signal.h>
15 #include <linux/signal_types.h>
16 #include <linux/pid.h>
17 #include <linux/seccomp.h>
18 #include <linux/rculist.h>
19 #include <linux/rtmutex.h>
21 #include <linux/resource.h>
22 #include <linux/hrtimer.h>
23 #include <linux/kcov.h>
24 #include <linux/task_io_accounting.h>
25 #include <linux/latencytop.h>
26 #include <linux/gfp.h>
27 #include <linux/topology.h>
28 #include <linux/magic.h>
30 #include <asm/current.h>
32 /* task_struct member predeclarations: */
35 struct backing_dev_info
;
41 struct futex_pi_state
;
46 struct perf_event_context
;
48 struct pipe_inode_info
;
51 struct robust_list_head
;
55 struct sighand_struct
;
57 struct task_delay_info
;
63 * Task state bitmask. NOTE! These bits are also
64 * encoded in fs/proc/array.c: get_task_state().
66 * We have two separate sets of flags: task->state
67 * is about runnability, while task->exit_state are
68 * about the task exiting. Confusing, but this way
69 * modifying one set can't modify the other one by
72 #define TASK_RUNNING 0
73 #define TASK_INTERRUPTIBLE 1
74 #define TASK_UNINTERRUPTIBLE 2
75 #define __TASK_STOPPED 4
76 #define __TASK_TRACED 8
77 /* in tsk->exit_state */
79 #define EXIT_ZOMBIE 32
80 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
81 /* in tsk->state again */
83 #define TASK_WAKEKILL 128
84 #define TASK_WAKING 256
85 #define TASK_PARKED 512
86 #define TASK_NOLOAD 1024
88 #define TASK_STATE_MAX 4096
90 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
92 /* Convenience macros for the sake of set_current_state */
93 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
94 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
95 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
97 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
99 /* Convenience macros for the sake of wake_up */
100 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
101 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
103 /* get_task_state() */
104 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
105 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
106 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
108 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
109 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
110 #define task_is_stopped_or_traced(task) \
111 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
112 #define task_contributes_to_load(task) \
113 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
114 (task->flags & PF_FROZEN) == 0 && \
115 (task->state & TASK_NOLOAD) == 0)
117 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
119 #define __set_current_state(state_value) \
121 current->task_state_change = _THIS_IP_; \
122 current->state = (state_value); \
124 #define set_current_state(state_value) \
126 current->task_state_change = _THIS_IP_; \
127 smp_store_mb(current->state, (state_value)); \
132 * set_current_state() includes a barrier so that the write of current->state
133 * is correctly serialised wrt the caller's subsequent test of whether to
137 * set_current_state(TASK_UNINTERRUPTIBLE);
143 * __set_current_state(TASK_RUNNING);
145 * If the caller does not need such serialisation (because, for instance, the
146 * condition test and condition change and wakeup are under the same lock) then
147 * use __set_current_state().
149 * The above is typically ordered against the wakeup, which does:
151 * need_sleep = false;
152 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
154 * Where wake_up_state() (and all other wakeup primitives) imply enough
155 * barriers to order the store of the variable against wakeup.
157 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
158 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
159 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
161 * This is obviously fine, since they both store the exact same value.
163 * Also see the comments of try_to_wake_up().
165 #define __set_current_state(state_value) \
166 do { current->state = (state_value); } while (0)
167 #define set_current_state(state_value) \
168 smp_store_mb(current->state, (state_value))
172 /* Task command name length */
173 #define TASK_COMM_LEN 16
175 extern cpumask_var_t cpu_isolated_map
;
177 extern int runqueue_is_locked(int cpu
);
179 extern void scheduler_tick(void);
181 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
182 extern signed long schedule_timeout(signed long timeout
);
183 extern signed long schedule_timeout_interruptible(signed long timeout
);
184 extern signed long schedule_timeout_killable(signed long timeout
);
185 extern signed long schedule_timeout_uninterruptible(signed long timeout
);
186 extern signed long schedule_timeout_idle(signed long timeout
);
187 asmlinkage
void schedule(void);
188 extern void schedule_preempt_disabled(void);
190 extern int __must_check
io_schedule_prepare(void);
191 extern void io_schedule_finish(int token
);
192 extern long io_schedule_timeout(long timeout
);
193 extern void io_schedule(void);
196 * struct prev_cputime - snaphsot of system and user cputime
197 * @utime: time spent in user mode
198 * @stime: time spent in system mode
199 * @lock: protects the above two fields
201 * Stores previous user/system time values such that we can guarantee
204 struct prev_cputime
{
205 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
213 * struct task_cputime - collected CPU time counts
214 * @utime: time spent in user mode, in nanoseconds
215 * @stime: time spent in kernel mode, in nanoseconds
216 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
218 * This structure groups together three kinds of CPU time that are tracked for
219 * threads and thread groups. Most things considering CPU time want to group
220 * these counts together and treat all three of them in parallel.
222 struct task_cputime
{
225 unsigned long long sum_exec_runtime
;
228 /* Alternate field names when used to cache expirations. */
229 #define virt_exp utime
230 #define prof_exp stime
231 #define sched_exp sum_exec_runtime
233 #include <linux/rwsem.h>
235 #ifdef CONFIG_SCHED_INFO
237 /* cumulative counters */
238 unsigned long pcount
; /* # of times run on this cpu */
239 unsigned long long run_delay
; /* time spent waiting on a runqueue */
242 unsigned long long last_arrival
,/* when we last ran on a cpu */
243 last_queued
; /* when we were last queued to run */
245 #endif /* CONFIG_SCHED_INFO */
248 * Integer metrics need fixed point arithmetic, e.g., sched/fair
249 * has a few: load, load_avg, util_avg, freq, and capacity.
251 * We define a basic fixed point arithmetic range, and then formalize
252 * all these metrics based on that basic range.
254 # define SCHED_FIXEDPOINT_SHIFT 10
255 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
257 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
258 extern void prefetch_stack(struct task_struct
*t
);
260 static inline void prefetch_stack(struct task_struct
*t
) { }
264 unsigned long weight
;
269 * The load_avg/util_avg accumulates an infinite geometric series
270 * (see __update_load_avg() in kernel/sched/fair.c).
272 * [load_avg definition]
274 * load_avg = runnable% * scale_load_down(load)
276 * where runnable% is the time ratio that a sched_entity is runnable.
277 * For cfs_rq, it is the aggregated load_avg of all runnable and
278 * blocked sched_entities.
280 * load_avg may also take frequency scaling into account:
282 * load_avg = runnable% * scale_load_down(load) * freq%
284 * where freq% is the CPU frequency normalized to the highest frequency.
286 * [util_avg definition]
288 * util_avg = running% * SCHED_CAPACITY_SCALE
290 * where running% is the time ratio that a sched_entity is running on
291 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
292 * and blocked sched_entities.
294 * util_avg may also factor frequency scaling and CPU capacity scaling:
296 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
298 * where freq% is the same as above, and capacity% is the CPU capacity
299 * normalized to the greatest capacity (due to uarch differences, etc).
301 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
302 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
303 * we therefore scale them to as large a range as necessary. This is for
304 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
308 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
309 * with the highest load (=88761), always runnable on a single cfs_rq,
310 * and should not overflow as the number already hits PID_MAX_LIMIT.
312 * For all other cases (including 32-bit kernels), struct load_weight's
313 * weight will overflow first before we do, because:
315 * Max(load_avg) <= Max(load.weight)
317 * Then it is the load_weight's responsibility to consider overflow
321 u64 last_update_time
, load_sum
;
322 u32 util_sum
, period_contrib
;
323 unsigned long load_avg
, util_avg
;
326 #ifdef CONFIG_SCHEDSTATS
327 struct sched_statistics
{
337 s64 sum_sleep_runtime
;
344 u64 nr_migrations_cold
;
345 u64 nr_failed_migrations_affine
;
346 u64 nr_failed_migrations_running
;
347 u64 nr_failed_migrations_hot
;
348 u64 nr_forced_migrations
;
352 u64 nr_wakeups_migrate
;
353 u64 nr_wakeups_local
;
354 u64 nr_wakeups_remote
;
355 u64 nr_wakeups_affine
;
356 u64 nr_wakeups_affine_attempts
;
357 u64 nr_wakeups_passive
;
362 struct sched_entity
{
363 struct load_weight load
; /* for load-balancing */
364 struct rb_node run_node
;
365 struct list_head group_node
;
369 u64 sum_exec_runtime
;
371 u64 prev_sum_exec_runtime
;
375 #ifdef CONFIG_SCHEDSTATS
376 struct sched_statistics statistics
;
379 #ifdef CONFIG_FAIR_GROUP_SCHED
381 struct sched_entity
*parent
;
382 /* rq on which this entity is (to be) queued: */
383 struct cfs_rq
*cfs_rq
;
384 /* rq "owned" by this entity/group: */
390 * Per entity load average tracking.
392 * Put into separate cache line so it does not
393 * collide with read-mostly values above.
395 struct sched_avg avg ____cacheline_aligned_in_smp
;
399 struct sched_rt_entity
{
400 struct list_head run_list
;
401 unsigned long timeout
;
402 unsigned long watchdog_stamp
;
403 unsigned int time_slice
;
404 unsigned short on_rq
;
405 unsigned short on_list
;
407 struct sched_rt_entity
*back
;
408 #ifdef CONFIG_RT_GROUP_SCHED
409 struct sched_rt_entity
*parent
;
410 /* rq on which this entity is (to be) queued: */
412 /* rq "owned" by this entity/group: */
417 struct sched_dl_entity
{
418 struct rb_node rb_node
;
421 * Original scheduling parameters. Copied here from sched_attr
422 * during sched_setattr(), they will remain the same until
423 * the next sched_setattr().
425 u64 dl_runtime
; /* maximum runtime for each instance */
426 u64 dl_deadline
; /* relative deadline of each instance */
427 u64 dl_period
; /* separation of two instances (period) */
428 u64 dl_bw
; /* dl_runtime / dl_deadline */
431 * Actual scheduling parameters. Initialized with the values above,
432 * they are continously updated during task execution. Note that
433 * the remaining runtime could be < 0 in case we are in overrun.
435 s64 runtime
; /* remaining runtime for this instance */
436 u64 deadline
; /* absolute deadline for this instance */
437 unsigned int flags
; /* specifying the scheduler behaviour */
442 * @dl_throttled tells if we exhausted the runtime. If so, the
443 * task has to wait for a replenishment to be performed at the
444 * next firing of dl_timer.
446 * @dl_boosted tells if we are boosted due to DI. If so we are
447 * outside bandwidth enforcement mechanism (but only until we
448 * exit the critical section);
450 * @dl_yielded tells if task gave up the cpu before consuming
451 * all its available runtime during the last job.
453 int dl_throttled
, dl_boosted
, dl_yielded
;
456 * Bandwidth enforcement timer. Each -deadline task has its
457 * own bandwidth to be enforced, thus we need one timer per task.
459 struct hrtimer dl_timer
;
467 u8 pad
; /* Otherwise the compiler can store garbage here. */
469 u32 s
; /* Set of bits. */
472 enum perf_event_task_context
{
473 perf_invalid_context
= -1,
476 perf_nr_task_contexts
,
480 struct wake_q_node
*next
;
484 #ifdef CONFIG_THREAD_INFO_IN_TASK
486 * For reasons of header soup (see current_thread_info()), this
487 * must be the first element of task_struct.
489 struct thread_info thread_info
;
491 volatile long state
; /* -1 unrunnable, 0 runnable, >0 stopped */
494 unsigned int flags
; /* per process flags, defined below */
498 struct llist_node wake_entry
;
500 #ifdef CONFIG_THREAD_INFO_IN_TASK
501 unsigned int cpu
; /* current CPU */
503 unsigned int wakee_flips
;
504 unsigned long wakee_flip_decay_ts
;
505 struct task_struct
*last_wakee
;
511 int prio
, static_prio
, normal_prio
;
512 unsigned int rt_priority
;
513 const struct sched_class
*sched_class
;
514 struct sched_entity se
;
515 struct sched_rt_entity rt
;
516 #ifdef CONFIG_CGROUP_SCHED
517 struct task_group
*sched_task_group
;
519 struct sched_dl_entity dl
;
521 #ifdef CONFIG_PREEMPT_NOTIFIERS
522 /* list of struct preempt_notifier: */
523 struct hlist_head preempt_notifiers
;
526 #ifdef CONFIG_BLK_DEV_IO_TRACE
527 unsigned int btrace_seq
;
532 cpumask_t cpus_allowed
;
534 #ifdef CONFIG_PREEMPT_RCU
535 int rcu_read_lock_nesting
;
536 union rcu_special rcu_read_unlock_special
;
537 struct list_head rcu_node_entry
;
538 struct rcu_node
*rcu_blocked_node
;
539 #endif /* #ifdef CONFIG_PREEMPT_RCU */
540 #ifdef CONFIG_TASKS_RCU
541 unsigned long rcu_tasks_nvcsw
;
542 bool rcu_tasks_holdout
;
543 struct list_head rcu_tasks_holdout_list
;
544 int rcu_tasks_idle_cpu
;
545 #endif /* #ifdef CONFIG_TASKS_RCU */
547 #ifdef CONFIG_SCHED_INFO
548 struct sched_info sched_info
;
551 struct list_head tasks
;
553 struct plist_node pushable_tasks
;
554 struct rb_node pushable_dl_tasks
;
557 struct mm_struct
*mm
, *active_mm
;
559 /* Per-thread vma caching: */
560 struct vmacache vmacache
;
562 #if defined(SPLIT_RSS_COUNTING)
563 struct task_rss_stat rss_stat
;
567 int exit_code
, exit_signal
;
568 int pdeath_signal
; /* The signal sent when the parent dies */
569 unsigned long jobctl
; /* JOBCTL_*, siglock protected */
571 /* Used for emulating ABI behavior of previous Linux versions */
572 unsigned int personality
;
574 /* scheduler bits, serialized by scheduler locks */
575 unsigned sched_reset_on_fork
:1;
576 unsigned sched_contributes_to_load
:1;
577 unsigned sched_migrated
:1;
578 unsigned sched_remote_wakeup
:1;
579 unsigned :0; /* force alignment to the next boundary */
581 /* unserialized, strictly 'current' */
582 unsigned in_execve
:1; /* bit to tell LSMs we're in execve */
583 unsigned in_iowait
:1;
584 #if !defined(TIF_RESTORE_SIGMASK)
585 unsigned restore_sigmask
:1;
588 unsigned memcg_may_oom
:1;
590 unsigned memcg_kmem_skip_account
:1;
593 #ifdef CONFIG_COMPAT_BRK
594 unsigned brk_randomized
:1;
597 unsigned long atomic_flags
; /* Flags needing atomic access. */
599 struct restart_block restart_block
;
604 #ifdef CONFIG_CC_STACKPROTECTOR
605 /* Canary value for the -fstack-protector gcc feature */
606 unsigned long stack_canary
;
609 * pointers to (original) parent process, youngest child, younger sibling,
610 * older sibling, respectively. (p->father can be replaced with
611 * p->real_parent->pid)
613 struct task_struct __rcu
*real_parent
; /* real parent process */
614 struct task_struct __rcu
*parent
; /* recipient of SIGCHLD, wait4() reports */
616 * children/sibling forms the list of my natural children
618 struct list_head children
; /* list of my children */
619 struct list_head sibling
; /* linkage in my parent's children list */
620 struct task_struct
*group_leader
; /* threadgroup leader */
623 * ptraced is the list of tasks this task is using ptrace on.
624 * This includes both natural children and PTRACE_ATTACH targets.
625 * p->ptrace_entry is p's link on the p->parent->ptraced list.
627 struct list_head ptraced
;
628 struct list_head ptrace_entry
;
630 /* PID/PID hash table linkage. */
631 struct pid_link pids
[PIDTYPE_MAX
];
632 struct list_head thread_group
;
633 struct list_head thread_node
;
635 struct completion
*vfork_done
; /* for vfork() */
636 int __user
*set_child_tid
; /* CLONE_CHILD_SETTID */
637 int __user
*clear_child_tid
; /* CLONE_CHILD_CLEARTID */
640 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
641 u64 utimescaled
, stimescaled
;
644 struct prev_cputime prev_cputime
;
645 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
646 seqcount_t vtime_seqcount
;
647 unsigned long long vtime_snap
;
649 /* Task is sleeping or running in a CPU with VTIME inactive */
651 /* Task runs in userspace in a CPU with VTIME active */
653 /* Task runs in kernelspace in a CPU with VTIME active */
658 #ifdef CONFIG_NO_HZ_FULL
659 atomic_t tick_dep_mask
;
661 unsigned long nvcsw
, nivcsw
; /* context switch counts */
662 u64 start_time
; /* monotonic time in nsec */
663 u64 real_start_time
; /* boot based time in nsec */
664 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
665 unsigned long min_flt
, maj_flt
;
667 #ifdef CONFIG_POSIX_TIMERS
668 struct task_cputime cputime_expires
;
669 struct list_head cpu_timers
[3];
672 /* process credentials */
673 const struct cred __rcu
*ptracer_cred
; /* Tracer's credentials at attach */
674 const struct cred __rcu
*real_cred
; /* objective and real subjective task
675 * credentials (COW) */
676 const struct cred __rcu
*cred
; /* effective (overridable) subjective task
677 * credentials (COW) */
678 char comm
[TASK_COMM_LEN
]; /* executable name excluding path
679 - access with [gs]et_task_comm (which lock
681 - initialized normally by setup_new_exec */
682 /* file system info */
683 struct nameidata
*nameidata
;
684 #ifdef CONFIG_SYSVIPC
686 struct sysv_sem sysvsem
;
687 struct sysv_shm sysvshm
;
689 #ifdef CONFIG_DETECT_HUNG_TASK
690 /* hung task detection */
691 unsigned long last_switch_count
;
693 /* filesystem information */
694 struct fs_struct
*fs
;
695 /* open file information */
696 struct files_struct
*files
;
698 struct nsproxy
*nsproxy
;
699 /* signal handlers */
700 struct signal_struct
*signal
;
701 struct sighand_struct
*sighand
;
703 sigset_t blocked
, real_blocked
;
704 sigset_t saved_sigmask
; /* restored if set_restore_sigmask() was used */
705 struct sigpending pending
;
707 unsigned long sas_ss_sp
;
709 unsigned sas_ss_flags
;
711 struct callback_head
*task_works
;
713 struct audit_context
*audit_context
;
714 #ifdef CONFIG_AUDITSYSCALL
716 unsigned int sessionid
;
718 struct seccomp seccomp
;
720 /* Thread group tracking */
723 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
725 spinlock_t alloc_lock
;
727 /* Protection of the PI data structures: */
728 raw_spinlock_t pi_lock
;
730 struct wake_q_node wake_q
;
732 #ifdef CONFIG_RT_MUTEXES
733 /* PI waiters blocked on a rt_mutex held by this task */
734 struct rb_root pi_waiters
;
735 struct rb_node
*pi_waiters_leftmost
;
736 /* Deadlock detection and priority inheritance handling */
737 struct rt_mutex_waiter
*pi_blocked_on
;
740 #ifdef CONFIG_DEBUG_MUTEXES
741 /* mutex deadlock detection */
742 struct mutex_waiter
*blocked_on
;
744 #ifdef CONFIG_TRACE_IRQFLAGS
745 unsigned int irq_events
;
746 unsigned long hardirq_enable_ip
;
747 unsigned long hardirq_disable_ip
;
748 unsigned int hardirq_enable_event
;
749 unsigned int hardirq_disable_event
;
750 int hardirqs_enabled
;
752 unsigned long softirq_disable_ip
;
753 unsigned long softirq_enable_ip
;
754 unsigned int softirq_disable_event
;
755 unsigned int softirq_enable_event
;
756 int softirqs_enabled
;
759 #ifdef CONFIG_LOCKDEP
760 # define MAX_LOCK_DEPTH 48UL
763 unsigned int lockdep_recursion
;
764 struct held_lock held_locks
[MAX_LOCK_DEPTH
];
765 gfp_t lockdep_reclaim_gfp
;
768 unsigned int in_ubsan
;
771 /* journalling filesystem info */
774 /* stacked block device info */
775 struct bio_list
*bio_list
;
779 struct blk_plug
*plug
;
783 struct reclaim_state
*reclaim_state
;
785 struct backing_dev_info
*backing_dev_info
;
787 struct io_context
*io_context
;
789 unsigned long ptrace_message
;
790 siginfo_t
*last_siginfo
; /* For ptrace use. */
791 struct task_io_accounting ioac
;
792 #if defined(CONFIG_TASK_XACCT)
793 u64 acct_rss_mem1
; /* accumulated rss usage */
794 u64 acct_vm_mem1
; /* accumulated virtual memory usage */
795 u64 acct_timexpd
; /* stime + utime since last update */
797 #ifdef CONFIG_CPUSETS
798 nodemask_t mems_allowed
; /* Protected by alloc_lock */
799 seqcount_t mems_allowed_seq
; /* Seqence no to catch updates */
800 int cpuset_mem_spread_rotor
;
801 int cpuset_slab_spread_rotor
;
803 #ifdef CONFIG_CGROUPS
804 /* Control Group info protected by css_set_lock */
805 struct css_set __rcu
*cgroups
;
806 /* cg_list protected by css_set_lock and tsk->alloc_lock */
807 struct list_head cg_list
;
809 #ifdef CONFIG_INTEL_RDT_A
813 struct robust_list_head __user
*robust_list
;
815 struct compat_robust_list_head __user
*compat_robust_list
;
817 struct list_head pi_state_list
;
818 struct futex_pi_state
*pi_state_cache
;
820 #ifdef CONFIG_PERF_EVENTS
821 struct perf_event_context
*perf_event_ctxp
[perf_nr_task_contexts
];
822 struct mutex perf_event_mutex
;
823 struct list_head perf_event_list
;
825 #ifdef CONFIG_DEBUG_PREEMPT
826 unsigned long preempt_disable_ip
;
829 struct mempolicy
*mempolicy
; /* Protected by alloc_lock */
831 short pref_node_fork
;
833 #ifdef CONFIG_NUMA_BALANCING
835 unsigned int numa_scan_period
;
836 unsigned int numa_scan_period_max
;
837 int numa_preferred_nid
;
838 unsigned long numa_migrate_retry
;
839 u64 node_stamp
; /* migration stamp */
840 u64 last_task_numa_placement
;
841 u64 last_sum_exec_runtime
;
842 struct callback_head numa_work
;
844 struct list_head numa_entry
;
845 struct numa_group
*numa_group
;
848 * numa_faults is an array split into four regions:
849 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
850 * in this precise order.
852 * faults_memory: Exponential decaying average of faults on a per-node
853 * basis. Scheduling placement decisions are made based on these
854 * counts. The values remain static for the duration of a PTE scan.
855 * faults_cpu: Track the nodes the process was running on when a NUMA
856 * hinting fault was incurred.
857 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
858 * during the current scan window. When the scan completes, the counts
859 * in faults_memory and faults_cpu decay and these values are copied.
861 unsigned long *numa_faults
;
862 unsigned long total_numa_faults
;
865 * numa_faults_locality tracks if faults recorded during the last
866 * scan window were remote/local or failed to migrate. The task scan
867 * period is adapted based on the locality of the faults with different
868 * weights depending on whether they were shared or private faults
870 unsigned long numa_faults_locality
[3];
872 unsigned long numa_pages_migrated
;
873 #endif /* CONFIG_NUMA_BALANCING */
875 struct tlbflush_unmap_batch tlb_ubc
;
880 * cache last used pipe for splice
882 struct pipe_inode_info
*splice_pipe
;
884 struct page_frag task_frag
;
886 #ifdef CONFIG_TASK_DELAY_ACCT
887 struct task_delay_info
*delays
;
890 #ifdef CONFIG_FAULT_INJECTION
894 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
895 * balance_dirty_pages() for some dirty throttling pause
898 int nr_dirtied_pause
;
899 unsigned long dirty_paused_when
; /* start of a write-and-pause period */
901 #ifdef CONFIG_LATENCYTOP
902 int latency_record_count
;
903 struct latency_record latency_record
[LT_SAVECOUNT
];
906 * time slack values; these are used to round up poll() and
907 * select() etc timeout values. These are in nanoseconds.
910 u64 default_timer_slack_ns
;
913 unsigned int kasan_depth
;
915 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
916 /* Index of current stored address in ret_stack */
918 /* Stack of return addresses for return function tracing */
919 struct ftrace_ret_stack
*ret_stack
;
920 /* time stamp for last schedule */
921 unsigned long long ftrace_timestamp
;
923 * Number of functions that haven't been traced
924 * because of depth overrun.
926 atomic_t trace_overrun
;
927 /* Pause for the tracing */
928 atomic_t tracing_graph_pause
;
930 #ifdef CONFIG_TRACING
931 /* state flags for use by tracers */
933 /* bitmask and counter of trace recursion */
934 unsigned long trace_recursion
;
935 #endif /* CONFIG_TRACING */
937 /* Coverage collection mode enabled for this task (0 if disabled). */
938 enum kcov_mode kcov_mode
;
939 /* Size of the kcov_area. */
941 /* Buffer for coverage collection. */
943 /* kcov desciptor wired with this task or NULL. */
947 struct mem_cgroup
*memcg_in_oom
;
948 gfp_t memcg_oom_gfp_mask
;
951 /* number of pages to reclaim on returning to userland */
952 unsigned int memcg_nr_pages_over_high
;
954 #ifdef CONFIG_UPROBES
955 struct uprobe_task
*utask
;
957 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
958 unsigned int sequential_io
;
959 unsigned int sequential_io_avg
;
961 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
962 unsigned long task_state_change
;
964 int pagefault_disabled
;
966 struct task_struct
*oom_reaper_list
;
968 #ifdef CONFIG_VMAP_STACK
969 struct vm_struct
*stack_vm_area
;
971 #ifdef CONFIG_THREAD_INFO_IN_TASK
972 /* A live task holds one reference. */
973 atomic_t stack_refcount
;
975 /* CPU-specific state of this task */
976 struct thread_struct thread
;
978 * WARNING: on x86, 'thread_struct' contains a variable-sized
979 * structure. It *MUST* be at the end of 'task_struct'.
981 * Do not put anything below here!
985 static inline struct pid
*task_pid(struct task_struct
*task
)
987 return task
->pids
[PIDTYPE_PID
].pid
;
990 static inline struct pid
*task_tgid(struct task_struct
*task
)
992 return task
->group_leader
->pids
[PIDTYPE_PID
].pid
;
996 * Without tasklist or rcu lock it is not safe to dereference
997 * the result of task_pgrp/task_session even if task == current,
998 * we can race with another thread doing sys_setsid/sys_setpgid.
1000 static inline struct pid
*task_pgrp(struct task_struct
*task
)
1002 return task
->group_leader
->pids
[PIDTYPE_PGID
].pid
;
1005 static inline struct pid
*task_session(struct task_struct
*task
)
1007 return task
->group_leader
->pids
[PIDTYPE_SID
].pid
;
1011 * the helpers to get the task's different pids as they are seen
1012 * from various namespaces
1014 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1015 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1017 * task_xid_nr_ns() : id seen from the ns specified;
1019 * set_task_vxid() : assigns a virtual id to a task;
1021 * see also pid_nr() etc in include/linux/pid.h
1023 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
1024 struct pid_namespace
*ns
);
1026 static inline pid_t
task_pid_nr(struct task_struct
*tsk
)
1031 static inline pid_t
task_pid_nr_ns(struct task_struct
*tsk
,
1032 struct pid_namespace
*ns
)
1034 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, ns
);
1037 static inline pid_t
task_pid_vnr(struct task_struct
*tsk
)
1039 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, NULL
);
1043 static inline pid_t
task_tgid_nr(struct task_struct
*tsk
)
1048 pid_t
task_tgid_nr_ns(struct task_struct
*tsk
, struct pid_namespace
*ns
);
1050 static inline pid_t
task_tgid_vnr(struct task_struct
*tsk
)
1052 return pid_vnr(task_tgid(tsk
));
1056 static inline int pid_alive(const struct task_struct
*p
);
1057 static inline pid_t
task_ppid_nr_ns(const struct task_struct
*tsk
, struct pid_namespace
*ns
)
1063 pid
= task_tgid_nr_ns(rcu_dereference(tsk
->real_parent
), ns
);
1069 static inline pid_t
task_ppid_nr(const struct task_struct
*tsk
)
1071 return task_ppid_nr_ns(tsk
, &init_pid_ns
);
1074 static inline pid_t
task_pgrp_nr_ns(struct task_struct
*tsk
,
1075 struct pid_namespace
*ns
)
1077 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, ns
);
1080 static inline pid_t
task_pgrp_vnr(struct task_struct
*tsk
)
1082 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, NULL
);
1086 static inline pid_t
task_session_nr_ns(struct task_struct
*tsk
,
1087 struct pid_namespace
*ns
)
1089 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, ns
);
1092 static inline pid_t
task_session_vnr(struct task_struct
*tsk
)
1094 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, NULL
);
1097 /* obsolete, do not use */
1098 static inline pid_t
task_pgrp_nr(struct task_struct
*tsk
)
1100 return task_pgrp_nr_ns(tsk
, &init_pid_ns
);
1104 * pid_alive - check that a task structure is not stale
1105 * @p: Task structure to be checked.
1107 * Test if a process is not yet dead (at most zombie state)
1108 * If pid_alive fails, then pointers within the task structure
1109 * can be stale and must not be dereferenced.
1111 * Return: 1 if the process is alive. 0 otherwise.
1113 static inline int pid_alive(const struct task_struct
*p
)
1115 return p
->pids
[PIDTYPE_PID
].pid
!= NULL
;
1119 * is_global_init - check if a task structure is init. Since init
1120 * is free to have sub-threads we need to check tgid.
1121 * @tsk: Task structure to be checked.
1123 * Check if a task structure is the first user space task the kernel created.
1125 * Return: 1 if the task structure is init. 0 otherwise.
1127 static inline int is_global_init(struct task_struct
*tsk
)
1129 return task_tgid_nr(tsk
) == 1;
1132 extern struct pid
*cad_pid
;
1137 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1138 #define PF_EXITING 0x00000004 /* getting shut down */
1139 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1140 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1141 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1142 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1143 #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1144 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1145 #define PF_DUMPCORE 0x00000200 /* dumped core */
1146 #define PF_SIGNALED 0x00000400 /* killed by a signal */
1147 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1148 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1149 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1150 #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1151 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1152 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
1153 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1154 #define PF_KSWAPD 0x00040000 /* I am kswapd */
1155 #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1156 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1157 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1158 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1159 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1160 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1161 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1162 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1163 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1164 #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1167 * Only the _current_ task can read/write to tsk->flags, but other
1168 * tasks can access tsk->flags in readonly mode for example
1169 * with tsk_used_math (like during threaded core dumping).
1170 * There is however an exception to this rule during ptrace
1171 * or during fork: the ptracer task is allowed to write to the
1172 * child->flags of its traced child (same goes for fork, the parent
1173 * can write to the child->flags), because we're guaranteed the
1174 * child is not running and in turn not changing child->flags
1175 * at the same time the parent does it.
1177 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1178 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1179 #define clear_used_math() clear_stopped_child_used_math(current)
1180 #define set_used_math() set_stopped_child_used_math(current)
1181 #define conditional_stopped_child_used_math(condition, child) \
1182 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1183 #define conditional_used_math(condition) \
1184 conditional_stopped_child_used_math(condition, current)
1185 #define copy_to_stopped_child_used_math(child) \
1186 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1187 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1188 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1189 #define used_math() tsk_used_math(current)
1191 /* Per-process atomic flags. */
1192 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1193 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1194 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1195 #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
1198 #define TASK_PFA_TEST(name, func) \
1199 static inline bool task_##func(struct task_struct *p) \
1200 { return test_bit(PFA_##name, &p->atomic_flags); }
1201 #define TASK_PFA_SET(name, func) \
1202 static inline void task_set_##func(struct task_struct *p) \
1203 { set_bit(PFA_##name, &p->atomic_flags); }
1204 #define TASK_PFA_CLEAR(name, func) \
1205 static inline void task_clear_##func(struct task_struct *p) \
1206 { clear_bit(PFA_##name, &p->atomic_flags); }
1208 TASK_PFA_TEST(NO_NEW_PRIVS
, no_new_privs
)
1209 TASK_PFA_SET(NO_NEW_PRIVS
, no_new_privs
)
1211 TASK_PFA_TEST(SPREAD_PAGE
, spread_page
)
1212 TASK_PFA_SET(SPREAD_PAGE
, spread_page
)
1213 TASK_PFA_CLEAR(SPREAD_PAGE
, spread_page
)
1215 TASK_PFA_TEST(SPREAD_SLAB
, spread_slab
)
1216 TASK_PFA_SET(SPREAD_SLAB
, spread_slab
)
1217 TASK_PFA_CLEAR(SPREAD_SLAB
, spread_slab
)
1219 TASK_PFA_TEST(LMK_WAITING
, lmk_waiting
)
1220 TASK_PFA_SET(LMK_WAITING
, lmk_waiting
)
1222 static inline void tsk_restore_flags(struct task_struct
*task
,
1223 unsigned long orig_flags
, unsigned long flags
)
1225 task
->flags
&= ~flags
;
1226 task
->flags
|= orig_flags
& flags
;
1229 extern int cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
1230 const struct cpumask
*trial
);
1231 extern int task_can_attach(struct task_struct
*p
,
1232 const struct cpumask
*cs_cpus_allowed
);
1234 extern void do_set_cpus_allowed(struct task_struct
*p
,
1235 const struct cpumask
*new_mask
);
1237 extern int set_cpus_allowed_ptr(struct task_struct
*p
,
1238 const struct cpumask
*new_mask
);
1240 static inline void do_set_cpus_allowed(struct task_struct
*p
,
1241 const struct cpumask
*new_mask
)
1244 static inline int set_cpus_allowed_ptr(struct task_struct
*p
,
1245 const struct cpumask
*new_mask
)
1247 if (!cpumask_test_cpu(0, new_mask
))
1253 #ifndef cpu_relax_yield
1254 #define cpu_relax_yield() cpu_relax()
1257 extern int yield_to(struct task_struct
*p
, bool preempt
);
1258 extern void set_user_nice(struct task_struct
*p
, long nice
);
1259 extern int task_prio(const struct task_struct
*p
);
1261 * task_nice - return the nice value of a given task.
1262 * @p: the task in question.
1264 * Return: The nice value [ -20 ... 0 ... 19 ].
1266 static inline int task_nice(const struct task_struct
*p
)
1268 return PRIO_TO_NICE((p
)->static_prio
);
1270 extern int can_nice(const struct task_struct
*p
, const int nice
);
1271 extern int task_curr(const struct task_struct
*p
);
1272 extern int idle_cpu(int cpu
);
1273 extern int sched_setscheduler(struct task_struct
*, int,
1274 const struct sched_param
*);
1275 extern int sched_setscheduler_nocheck(struct task_struct
*, int,
1276 const struct sched_param
*);
1277 extern int sched_setattr(struct task_struct
*,
1278 const struct sched_attr
*);
1279 extern struct task_struct
*idle_task(int cpu
);
1281 * is_idle_task - is the specified task an idle task?
1282 * @p: the task in question.
1284 * Return: 1 if @p is an idle task. 0 otherwise.
1286 static inline bool is_idle_task(const struct task_struct
*p
)
1288 return !!(p
->flags
& PF_IDLE
);
1290 extern struct task_struct
*curr_task(int cpu
);
1291 extern void ia64_set_curr_task(int cpu
, struct task_struct
*p
);
1295 union thread_union
{
1296 #ifndef CONFIG_THREAD_INFO_IN_TASK
1297 struct thread_info thread_info
;
1299 unsigned long stack
[THREAD_SIZE
/sizeof(long)];
1302 #ifdef CONFIG_THREAD_INFO_IN_TASK
1303 static inline struct thread_info
*task_thread_info(struct task_struct
*task
)
1305 return &task
->thread_info
;
1307 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1308 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1311 extern struct pid_namespace init_pid_ns
;
1314 * find a task by one of its numerical ids
1316 * find_task_by_pid_ns():
1317 * finds a task by its pid in the specified namespace
1318 * find_task_by_vpid():
1319 * finds a task by its virtual pid
1321 * see also find_vpid() etc in include/linux/pid.h
1324 extern struct task_struct
*find_task_by_vpid(pid_t nr
);
1325 extern struct task_struct
*find_task_by_pid_ns(pid_t nr
,
1326 struct pid_namespace
*ns
);
1328 extern int wake_up_state(struct task_struct
*tsk
, unsigned int state
);
1329 extern int wake_up_process(struct task_struct
*tsk
);
1330 extern void wake_up_new_task(struct task_struct
*tsk
);
1332 extern void kick_process(struct task_struct
*tsk
);
1334 static inline void kick_process(struct task_struct
*tsk
) { }
1337 extern void __set_task_comm(struct task_struct
*tsk
, const char *from
, bool exec
);
1338 static inline void set_task_comm(struct task_struct
*tsk
, const char *from
)
1340 __set_task_comm(tsk
, from
, false);
1342 extern char *get_task_comm(char *to
, struct task_struct
*tsk
);
1345 void scheduler_ipi(void);
1346 extern unsigned long wait_task_inactive(struct task_struct
*, long match_state
);
1348 static inline void scheduler_ipi(void) { }
1349 static inline unsigned long wait_task_inactive(struct task_struct
*p
,
1356 /* set thread flags in other task's structures
1357 * - see asm/thread_info.h for TIF_xxxx flags available
1359 static inline void set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1361 set_ti_thread_flag(task_thread_info(tsk
), flag
);
1364 static inline void clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1366 clear_ti_thread_flag(task_thread_info(tsk
), flag
);
1369 static inline int test_and_set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1371 return test_and_set_ti_thread_flag(task_thread_info(tsk
), flag
);
1374 static inline int test_and_clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1376 return test_and_clear_ti_thread_flag(task_thread_info(tsk
), flag
);
1379 static inline int test_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1381 return test_ti_thread_flag(task_thread_info(tsk
), flag
);
1384 static inline void set_tsk_need_resched(struct task_struct
*tsk
)
1386 set_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
1389 static inline void clear_tsk_need_resched(struct task_struct
*tsk
)
1391 clear_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
1394 static inline int test_tsk_need_resched(struct task_struct
*tsk
)
1396 return unlikely(test_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
));
1400 * cond_resched() and cond_resched_lock(): latency reduction via
1401 * explicit rescheduling in places that are safe. The return
1402 * value indicates whether a reschedule was done in fact.
1403 * cond_resched_lock() will drop the spinlock before scheduling,
1404 * cond_resched_softirq() will enable bhs before scheduling.
1406 #ifndef CONFIG_PREEMPT
1407 extern int _cond_resched(void);
1409 static inline int _cond_resched(void) { return 0; }
1412 #define cond_resched() ({ \
1413 ___might_sleep(__FILE__, __LINE__, 0); \
1417 extern int __cond_resched_lock(spinlock_t
*lock
);
1419 #define cond_resched_lock(lock) ({ \
1420 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1421 __cond_resched_lock(lock); \
1424 extern int __cond_resched_softirq(void);
1426 #define cond_resched_softirq() ({ \
1427 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
1428 __cond_resched_softirq(); \
1431 static inline void cond_resched_rcu(void)
1433 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1441 * Does a critical section need to be broken due to another
1442 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1443 * but a general need for low latency)
1445 static inline int spin_needbreak(spinlock_t
*lock
)
1447 #ifdef CONFIG_PREEMPT
1448 return spin_is_contended(lock
);
1454 static __always_inline
bool need_resched(void)
1456 return unlikely(tif_need_resched());
1460 * Wrappers for p->thread_info->cpu access. No-op on UP.
1464 static inline unsigned int task_cpu(const struct task_struct
*p
)
1466 #ifdef CONFIG_THREAD_INFO_IN_TASK
1469 return task_thread_info(p
)->cpu
;
1473 static inline int task_node(const struct task_struct
*p
)
1475 return cpu_to_node(task_cpu(p
));
1478 extern void set_task_cpu(struct task_struct
*p
, unsigned int cpu
);
1482 static inline unsigned int task_cpu(const struct task_struct
*p
)
1487 static inline void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
1491 #endif /* CONFIG_SMP */
1494 * In order to reduce various lock holder preemption latencies provide an
1495 * interface to see if a vCPU is currently running or not.
1497 * This allows us to terminate optimistic spin loops and block, analogous to
1498 * the native optimistic spin heuristic of testing if the lock owner task is
1501 #ifndef vcpu_is_preempted
1502 # define vcpu_is_preempted(cpu) false
1505 extern long sched_setaffinity(pid_t pid
, const struct cpumask
*new_mask
);
1506 extern long sched_getaffinity(pid_t pid
, struct cpumask
*mask
);
1508 #ifndef TASK_SIZE_OF
1509 #define TASK_SIZE_OF(tsk) TASK_SIZE