4 #include <uapi/linux/sched.h>
6 #include <linux/sched/prio.h>
8 #include <linux/capability.h>
9 #include <linux/mutex.h>
10 #include <linux/plist.h>
11 #include <linux/mm_types.h>
12 #include <asm/ptrace.h>
14 #include <linux/sem.h>
15 #include <linux/shm.h>
16 #include <linux/signal.h>
17 #include <linux/signal_types.h>
18 #include <linux/pid.h>
19 #include <linux/seccomp.h>
20 #include <linux/rculist.h>
21 #include <linux/rtmutex.h>
23 #include <linux/resource.h>
24 #include <linux/hrtimer.h>
25 #include <linux/kcov.h>
26 #include <linux/task_io_accounting.h>
27 #include <linux/latencytop.h>
28 #include <linux/cred.h>
29 #include <linux/gfp.h>
30 #include <linux/topology.h>
31 #include <linux/magic.h>
32 #include <linux/cgroup-defs.h>
34 #include <asm/current.h>
36 /* task_struct member predeclarations: */
39 struct backing_dev_info
;
45 struct futex_pi_state
;
50 struct perf_event_context
;
52 struct pipe_inode_info
;
55 struct robust_list_head
;
59 struct sighand_struct
;
61 struct task_delay_info
;
67 * Task state bitmask. NOTE! These bits are also
68 * encoded in fs/proc/array.c: get_task_state().
70 * We have two separate sets of flags: task->state
71 * is about runnability, while task->exit_state are
72 * about the task exiting. Confusing, but this way
73 * modifying one set can't modify the other one by
76 #define TASK_RUNNING 0
77 #define TASK_INTERRUPTIBLE 1
78 #define TASK_UNINTERRUPTIBLE 2
79 #define __TASK_STOPPED 4
80 #define __TASK_TRACED 8
81 /* in tsk->exit_state */
83 #define EXIT_ZOMBIE 32
84 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
85 /* in tsk->state again */
87 #define TASK_WAKEKILL 128
88 #define TASK_WAKING 256
89 #define TASK_PARKED 512
90 #define TASK_NOLOAD 1024
92 #define TASK_STATE_MAX 4096
94 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
96 /* Convenience macros for the sake of set_current_state */
97 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
98 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
99 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
101 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
103 /* Convenience macros for the sake of wake_up */
104 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
105 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
107 /* get_task_state() */
108 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
109 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
110 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
112 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
113 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
114 #define task_is_stopped_or_traced(task) \
115 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
116 #define task_contributes_to_load(task) \
117 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
118 (task->flags & PF_FROZEN) == 0 && \
119 (task->state & TASK_NOLOAD) == 0)
121 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
123 #define __set_current_state(state_value) \
125 current->task_state_change = _THIS_IP_; \
126 current->state = (state_value); \
128 #define set_current_state(state_value) \
130 current->task_state_change = _THIS_IP_; \
131 smp_store_mb(current->state, (state_value)); \
136 * set_current_state() includes a barrier so that the write of current->state
137 * is correctly serialised wrt the caller's subsequent test of whether to
141 * set_current_state(TASK_UNINTERRUPTIBLE);
147 * __set_current_state(TASK_RUNNING);
149 * If the caller does not need such serialisation (because, for instance, the
150 * condition test and condition change and wakeup are under the same lock) then
151 * use __set_current_state().
153 * The above is typically ordered against the wakeup, which does:
155 * need_sleep = false;
156 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
158 * Where wake_up_state() (and all other wakeup primitives) imply enough
159 * barriers to order the store of the variable against wakeup.
161 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
162 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
163 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
165 * This is obviously fine, since they both store the exact same value.
167 * Also see the comments of try_to_wake_up().
169 #define __set_current_state(state_value) \
170 do { current->state = (state_value); } while (0)
171 #define set_current_state(state_value) \
172 smp_store_mb(current->state, (state_value))
176 /* Task command name length */
177 #define TASK_COMM_LEN 16
179 extern void sched_init(void);
180 extern void sched_init_smp(void);
182 extern cpumask_var_t cpu_isolated_map
;
184 extern int runqueue_is_locked(int cpu
);
186 extern void cpu_init (void);
187 extern void trap_init(void);
188 extern void update_process_times(int user
);
189 extern void scheduler_tick(void);
191 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
192 extern signed long schedule_timeout(signed long timeout
);
193 extern signed long schedule_timeout_interruptible(signed long timeout
);
194 extern signed long schedule_timeout_killable(signed long timeout
);
195 extern signed long schedule_timeout_uninterruptible(signed long timeout
);
196 extern signed long schedule_timeout_idle(signed long timeout
);
197 asmlinkage
void schedule(void);
198 extern void schedule_preempt_disabled(void);
200 extern int __must_check
io_schedule_prepare(void);
201 extern void io_schedule_finish(int token
);
202 extern long io_schedule_timeout(long timeout
);
203 extern void io_schedule(void);
206 * struct prev_cputime - snaphsot of system and user cputime
207 * @utime: time spent in user mode
208 * @stime: time spent in system mode
209 * @lock: protects the above two fields
211 * Stores previous user/system time values such that we can guarantee
214 struct prev_cputime
{
215 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
222 static inline void prev_cputime_init(struct prev_cputime
*prev
)
224 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
225 prev
->utime
= prev
->stime
= 0;
226 raw_spin_lock_init(&prev
->lock
);
231 * struct task_cputime - collected CPU time counts
232 * @utime: time spent in user mode, in nanoseconds
233 * @stime: time spent in kernel mode, in nanoseconds
234 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
236 * This structure groups together three kinds of CPU time that are tracked for
237 * threads and thread groups. Most things considering CPU time want to group
238 * these counts together and treat all three of them in parallel.
240 struct task_cputime
{
243 unsigned long long sum_exec_runtime
;
246 /* Alternate field names when used to cache expirations. */
247 #define virt_exp utime
248 #define prof_exp stime
249 #define sched_exp sum_exec_runtime
252 * This is the atomic variant of task_cputime, which can be used for
253 * storing and updating task_cputime statistics without locking.
255 struct task_cputime_atomic
{
258 atomic64_t sum_exec_runtime
;
261 #define INIT_CPUTIME_ATOMIC \
262 (struct task_cputime_atomic) { \
263 .utime = ATOMIC64_INIT(0), \
264 .stime = ATOMIC64_INIT(0), \
265 .sum_exec_runtime = ATOMIC64_INIT(0), \
269 * struct thread_group_cputimer - thread group interval timer counts
270 * @cputime_atomic: atomic thread group interval timers.
271 * @running: true when there are timers running and
272 * @cputime_atomic receives updates.
273 * @checking_timer: true when a thread in the group is in the
274 * process of checking for thread group timers.
276 * This structure contains the version of task_cputime, above, that is
277 * used for thread group CPU timer calculations.
279 struct thread_group_cputimer
{
280 struct task_cputime_atomic cputime_atomic
;
285 #include <linux/rwsem.h>
287 #ifdef CONFIG_SCHED_INFO
289 /* cumulative counters */
290 unsigned long pcount
; /* # of times run on this cpu */
291 unsigned long long run_delay
; /* time spent waiting on a runqueue */
294 unsigned long long last_arrival
,/* when we last ran on a cpu */
295 last_queued
; /* when we were last queued to run */
297 #endif /* CONFIG_SCHED_INFO */
299 static inline int sched_info_on(void)
301 #ifdef CONFIG_SCHEDSTATS
303 #elif defined(CONFIG_TASK_DELAY_ACCT)
304 extern int delayacct_on
;
311 #ifdef CONFIG_SCHEDSTATS
312 void force_schedstat_enabled(void);
316 * Integer metrics need fixed point arithmetic, e.g., sched/fair
317 * has a few: load, load_avg, util_avg, freq, and capacity.
319 * We define a basic fixed point arithmetic range, and then formalize
320 * all these metrics based on that basic range.
322 # define SCHED_FIXEDPOINT_SHIFT 10
323 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
325 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
326 extern void prefetch_stack(struct task_struct
*t
);
328 static inline void prefetch_stack(struct task_struct
*t
) { }
332 unsigned long weight
;
337 * The load_avg/util_avg accumulates an infinite geometric series
338 * (see __update_load_avg() in kernel/sched/fair.c).
340 * [load_avg definition]
342 * load_avg = runnable% * scale_load_down(load)
344 * where runnable% is the time ratio that a sched_entity is runnable.
345 * For cfs_rq, it is the aggregated load_avg of all runnable and
346 * blocked sched_entities.
348 * load_avg may also take frequency scaling into account:
350 * load_avg = runnable% * scale_load_down(load) * freq%
352 * where freq% is the CPU frequency normalized to the highest frequency.
354 * [util_avg definition]
356 * util_avg = running% * SCHED_CAPACITY_SCALE
358 * where running% is the time ratio that a sched_entity is running on
359 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
360 * and blocked sched_entities.
362 * util_avg may also factor frequency scaling and CPU capacity scaling:
364 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
366 * where freq% is the same as above, and capacity% is the CPU capacity
367 * normalized to the greatest capacity (due to uarch differences, etc).
369 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
370 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
371 * we therefore scale them to as large a range as necessary. This is for
372 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
376 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
377 * with the highest load (=88761), always runnable on a single cfs_rq,
378 * and should not overflow as the number already hits PID_MAX_LIMIT.
380 * For all other cases (including 32-bit kernels), struct load_weight's
381 * weight will overflow first before we do, because:
383 * Max(load_avg) <= Max(load.weight)
385 * Then it is the load_weight's responsibility to consider overflow
389 u64 last_update_time
, load_sum
;
390 u32 util_sum
, period_contrib
;
391 unsigned long load_avg
, util_avg
;
394 #ifdef CONFIG_SCHEDSTATS
395 struct sched_statistics
{
405 s64 sum_sleep_runtime
;
412 u64 nr_migrations_cold
;
413 u64 nr_failed_migrations_affine
;
414 u64 nr_failed_migrations_running
;
415 u64 nr_failed_migrations_hot
;
416 u64 nr_forced_migrations
;
420 u64 nr_wakeups_migrate
;
421 u64 nr_wakeups_local
;
422 u64 nr_wakeups_remote
;
423 u64 nr_wakeups_affine
;
424 u64 nr_wakeups_affine_attempts
;
425 u64 nr_wakeups_passive
;
430 struct sched_entity
{
431 struct load_weight load
; /* for load-balancing */
432 struct rb_node run_node
;
433 struct list_head group_node
;
437 u64 sum_exec_runtime
;
439 u64 prev_sum_exec_runtime
;
443 #ifdef CONFIG_SCHEDSTATS
444 struct sched_statistics statistics
;
447 #ifdef CONFIG_FAIR_GROUP_SCHED
449 struct sched_entity
*parent
;
450 /* rq on which this entity is (to be) queued: */
451 struct cfs_rq
*cfs_rq
;
452 /* rq "owned" by this entity/group: */
458 * Per entity load average tracking.
460 * Put into separate cache line so it does not
461 * collide with read-mostly values above.
463 struct sched_avg avg ____cacheline_aligned_in_smp
;
467 struct sched_rt_entity
{
468 struct list_head run_list
;
469 unsigned long timeout
;
470 unsigned long watchdog_stamp
;
471 unsigned int time_slice
;
472 unsigned short on_rq
;
473 unsigned short on_list
;
475 struct sched_rt_entity
*back
;
476 #ifdef CONFIG_RT_GROUP_SCHED
477 struct sched_rt_entity
*parent
;
478 /* rq on which this entity is (to be) queued: */
480 /* rq "owned" by this entity/group: */
485 struct sched_dl_entity
{
486 struct rb_node rb_node
;
489 * Original scheduling parameters. Copied here from sched_attr
490 * during sched_setattr(), they will remain the same until
491 * the next sched_setattr().
493 u64 dl_runtime
; /* maximum runtime for each instance */
494 u64 dl_deadline
; /* relative deadline of each instance */
495 u64 dl_period
; /* separation of two instances (period) */
496 u64 dl_bw
; /* dl_runtime / dl_deadline */
499 * Actual scheduling parameters. Initialized with the values above,
500 * they are continously updated during task execution. Note that
501 * the remaining runtime could be < 0 in case we are in overrun.
503 s64 runtime
; /* remaining runtime for this instance */
504 u64 deadline
; /* absolute deadline for this instance */
505 unsigned int flags
; /* specifying the scheduler behaviour */
510 * @dl_throttled tells if we exhausted the runtime. If so, the
511 * task has to wait for a replenishment to be performed at the
512 * next firing of dl_timer.
514 * @dl_boosted tells if we are boosted due to DI. If so we are
515 * outside bandwidth enforcement mechanism (but only until we
516 * exit the critical section);
518 * @dl_yielded tells if task gave up the cpu before consuming
519 * all its available runtime during the last job.
521 int dl_throttled
, dl_boosted
, dl_yielded
;
524 * Bandwidth enforcement timer. Each -deadline task has its
525 * own bandwidth to be enforced, thus we need one timer per task.
527 struct hrtimer dl_timer
;
535 u8 pad
; /* Otherwise the compiler can store garbage here. */
537 u32 s
; /* Set of bits. */
540 enum perf_event_task_context
{
541 perf_invalid_context
= -1,
544 perf_nr_task_contexts
,
548 struct wake_q_node
*next
;
551 /* Track pages that require TLB flushes */
552 struct tlbflush_unmap_batch
{
554 * Each bit set is a CPU that potentially has a TLB entry for one of
555 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
557 struct cpumask cpumask
;
559 /* True if any bit in cpumask is set */
563 * If true then the PTE was dirty when unmapped. The entry must be
564 * flushed before IO is initiated or a stale TLB entry potentially
565 * allows an update without redirtying the page.
571 #ifdef CONFIG_THREAD_INFO_IN_TASK
573 * For reasons of header soup (see current_thread_info()), this
574 * must be the first element of task_struct.
576 struct thread_info thread_info
;
578 volatile long state
; /* -1 unrunnable, 0 runnable, >0 stopped */
581 unsigned int flags
; /* per process flags, defined below */
585 struct llist_node wake_entry
;
587 #ifdef CONFIG_THREAD_INFO_IN_TASK
588 unsigned int cpu
; /* current CPU */
590 unsigned int wakee_flips
;
591 unsigned long wakee_flip_decay_ts
;
592 struct task_struct
*last_wakee
;
598 int prio
, static_prio
, normal_prio
;
599 unsigned int rt_priority
;
600 const struct sched_class
*sched_class
;
601 struct sched_entity se
;
602 struct sched_rt_entity rt
;
603 #ifdef CONFIG_CGROUP_SCHED
604 struct task_group
*sched_task_group
;
606 struct sched_dl_entity dl
;
608 #ifdef CONFIG_PREEMPT_NOTIFIERS
609 /* list of struct preempt_notifier: */
610 struct hlist_head preempt_notifiers
;
613 #ifdef CONFIG_BLK_DEV_IO_TRACE
614 unsigned int btrace_seq
;
619 cpumask_t cpus_allowed
;
621 #ifdef CONFIG_PREEMPT_RCU
622 int rcu_read_lock_nesting
;
623 union rcu_special rcu_read_unlock_special
;
624 struct list_head rcu_node_entry
;
625 struct rcu_node
*rcu_blocked_node
;
626 #endif /* #ifdef CONFIG_PREEMPT_RCU */
627 #ifdef CONFIG_TASKS_RCU
628 unsigned long rcu_tasks_nvcsw
;
629 bool rcu_tasks_holdout
;
630 struct list_head rcu_tasks_holdout_list
;
631 int rcu_tasks_idle_cpu
;
632 #endif /* #ifdef CONFIG_TASKS_RCU */
634 #ifdef CONFIG_SCHED_INFO
635 struct sched_info sched_info
;
638 struct list_head tasks
;
640 struct plist_node pushable_tasks
;
641 struct rb_node pushable_dl_tasks
;
644 struct mm_struct
*mm
, *active_mm
;
646 /* Per-thread vma caching: */
647 struct vmacache vmacache
;
649 #if defined(SPLIT_RSS_COUNTING)
650 struct task_rss_stat rss_stat
;
654 int exit_code
, exit_signal
;
655 int pdeath_signal
; /* The signal sent when the parent dies */
656 unsigned long jobctl
; /* JOBCTL_*, siglock protected */
658 /* Used for emulating ABI behavior of previous Linux versions */
659 unsigned int personality
;
661 /* scheduler bits, serialized by scheduler locks */
662 unsigned sched_reset_on_fork
:1;
663 unsigned sched_contributes_to_load
:1;
664 unsigned sched_migrated
:1;
665 unsigned sched_remote_wakeup
:1;
666 unsigned :0; /* force alignment to the next boundary */
668 /* unserialized, strictly 'current' */
669 unsigned in_execve
:1; /* bit to tell LSMs we're in execve */
670 unsigned in_iowait
:1;
671 #if !defined(TIF_RESTORE_SIGMASK)
672 unsigned restore_sigmask
:1;
675 unsigned memcg_may_oom
:1;
677 unsigned memcg_kmem_skip_account
:1;
680 #ifdef CONFIG_COMPAT_BRK
681 unsigned brk_randomized
:1;
684 unsigned long atomic_flags
; /* Flags needing atomic access. */
686 struct restart_block restart_block
;
691 #ifdef CONFIG_CC_STACKPROTECTOR
692 /* Canary value for the -fstack-protector gcc feature */
693 unsigned long stack_canary
;
696 * pointers to (original) parent process, youngest child, younger sibling,
697 * older sibling, respectively. (p->father can be replaced with
698 * p->real_parent->pid)
700 struct task_struct __rcu
*real_parent
; /* real parent process */
701 struct task_struct __rcu
*parent
; /* recipient of SIGCHLD, wait4() reports */
703 * children/sibling forms the list of my natural children
705 struct list_head children
; /* list of my children */
706 struct list_head sibling
; /* linkage in my parent's children list */
707 struct task_struct
*group_leader
; /* threadgroup leader */
710 * ptraced is the list of tasks this task is using ptrace on.
711 * This includes both natural children and PTRACE_ATTACH targets.
712 * p->ptrace_entry is p's link on the p->parent->ptraced list.
714 struct list_head ptraced
;
715 struct list_head ptrace_entry
;
717 /* PID/PID hash table linkage. */
718 struct pid_link pids
[PIDTYPE_MAX
];
719 struct list_head thread_group
;
720 struct list_head thread_node
;
722 struct completion
*vfork_done
; /* for vfork() */
723 int __user
*set_child_tid
; /* CLONE_CHILD_SETTID */
724 int __user
*clear_child_tid
; /* CLONE_CHILD_CLEARTID */
727 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
728 u64 utimescaled
, stimescaled
;
731 struct prev_cputime prev_cputime
;
732 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
733 seqcount_t vtime_seqcount
;
734 unsigned long long vtime_snap
;
736 /* Task is sleeping or running in a CPU with VTIME inactive */
738 /* Task runs in userspace in a CPU with VTIME active */
740 /* Task runs in kernelspace in a CPU with VTIME active */
745 #ifdef CONFIG_NO_HZ_FULL
746 atomic_t tick_dep_mask
;
748 unsigned long nvcsw
, nivcsw
; /* context switch counts */
749 u64 start_time
; /* monotonic time in nsec */
750 u64 real_start_time
; /* boot based time in nsec */
751 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
752 unsigned long min_flt
, maj_flt
;
754 #ifdef CONFIG_POSIX_TIMERS
755 struct task_cputime cputime_expires
;
756 struct list_head cpu_timers
[3];
759 /* process credentials */
760 const struct cred __rcu
*ptracer_cred
; /* Tracer's credentials at attach */
761 const struct cred __rcu
*real_cred
; /* objective and real subjective task
762 * credentials (COW) */
763 const struct cred __rcu
*cred
; /* effective (overridable) subjective task
764 * credentials (COW) */
765 char comm
[TASK_COMM_LEN
]; /* executable name excluding path
766 - access with [gs]et_task_comm (which lock
768 - initialized normally by setup_new_exec */
769 /* file system info */
770 struct nameidata
*nameidata
;
771 #ifdef CONFIG_SYSVIPC
773 struct sysv_sem sysvsem
;
774 struct sysv_shm sysvshm
;
776 #ifdef CONFIG_DETECT_HUNG_TASK
777 /* hung task detection */
778 unsigned long last_switch_count
;
780 /* filesystem information */
781 struct fs_struct
*fs
;
782 /* open file information */
783 struct files_struct
*files
;
785 struct nsproxy
*nsproxy
;
786 /* signal handlers */
787 struct signal_struct
*signal
;
788 struct sighand_struct
*sighand
;
790 sigset_t blocked
, real_blocked
;
791 sigset_t saved_sigmask
; /* restored if set_restore_sigmask() was used */
792 struct sigpending pending
;
794 unsigned long sas_ss_sp
;
796 unsigned sas_ss_flags
;
798 struct callback_head
*task_works
;
800 struct audit_context
*audit_context
;
801 #ifdef CONFIG_AUDITSYSCALL
803 unsigned int sessionid
;
805 struct seccomp seccomp
;
807 /* Thread group tracking */
810 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
812 spinlock_t alloc_lock
;
814 /* Protection of the PI data structures: */
815 raw_spinlock_t pi_lock
;
817 struct wake_q_node wake_q
;
819 #ifdef CONFIG_RT_MUTEXES
820 /* PI waiters blocked on a rt_mutex held by this task */
821 struct rb_root pi_waiters
;
822 struct rb_node
*pi_waiters_leftmost
;
823 /* Deadlock detection and priority inheritance handling */
824 struct rt_mutex_waiter
*pi_blocked_on
;
827 #ifdef CONFIG_DEBUG_MUTEXES
828 /* mutex deadlock detection */
829 struct mutex_waiter
*blocked_on
;
831 #ifdef CONFIG_TRACE_IRQFLAGS
832 unsigned int irq_events
;
833 unsigned long hardirq_enable_ip
;
834 unsigned long hardirq_disable_ip
;
835 unsigned int hardirq_enable_event
;
836 unsigned int hardirq_disable_event
;
837 int hardirqs_enabled
;
839 unsigned long softirq_disable_ip
;
840 unsigned long softirq_enable_ip
;
841 unsigned int softirq_disable_event
;
842 unsigned int softirq_enable_event
;
843 int softirqs_enabled
;
846 #ifdef CONFIG_LOCKDEP
847 # define MAX_LOCK_DEPTH 48UL
850 unsigned int lockdep_recursion
;
851 struct held_lock held_locks
[MAX_LOCK_DEPTH
];
852 gfp_t lockdep_reclaim_gfp
;
855 unsigned int in_ubsan
;
858 /* journalling filesystem info */
861 /* stacked block device info */
862 struct bio_list
*bio_list
;
866 struct blk_plug
*plug
;
870 struct reclaim_state
*reclaim_state
;
872 struct backing_dev_info
*backing_dev_info
;
874 struct io_context
*io_context
;
876 unsigned long ptrace_message
;
877 siginfo_t
*last_siginfo
; /* For ptrace use. */
878 struct task_io_accounting ioac
;
879 #if defined(CONFIG_TASK_XACCT)
880 u64 acct_rss_mem1
; /* accumulated rss usage */
881 u64 acct_vm_mem1
; /* accumulated virtual memory usage */
882 u64 acct_timexpd
; /* stime + utime since last update */
884 #ifdef CONFIG_CPUSETS
885 nodemask_t mems_allowed
; /* Protected by alloc_lock */
886 seqcount_t mems_allowed_seq
; /* Seqence no to catch updates */
887 int cpuset_mem_spread_rotor
;
888 int cpuset_slab_spread_rotor
;
890 #ifdef CONFIG_CGROUPS
891 /* Control Group info protected by css_set_lock */
892 struct css_set __rcu
*cgroups
;
893 /* cg_list protected by css_set_lock and tsk->alloc_lock */
894 struct list_head cg_list
;
896 #ifdef CONFIG_INTEL_RDT_A
900 struct robust_list_head __user
*robust_list
;
902 struct compat_robust_list_head __user
*compat_robust_list
;
904 struct list_head pi_state_list
;
905 struct futex_pi_state
*pi_state_cache
;
907 #ifdef CONFIG_PERF_EVENTS
908 struct perf_event_context
*perf_event_ctxp
[perf_nr_task_contexts
];
909 struct mutex perf_event_mutex
;
910 struct list_head perf_event_list
;
912 #ifdef CONFIG_DEBUG_PREEMPT
913 unsigned long preempt_disable_ip
;
916 struct mempolicy
*mempolicy
; /* Protected by alloc_lock */
918 short pref_node_fork
;
920 #ifdef CONFIG_NUMA_BALANCING
922 unsigned int numa_scan_period
;
923 unsigned int numa_scan_period_max
;
924 int numa_preferred_nid
;
925 unsigned long numa_migrate_retry
;
926 u64 node_stamp
; /* migration stamp */
927 u64 last_task_numa_placement
;
928 u64 last_sum_exec_runtime
;
929 struct callback_head numa_work
;
931 struct list_head numa_entry
;
932 struct numa_group
*numa_group
;
935 * numa_faults is an array split into four regions:
936 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
937 * in this precise order.
939 * faults_memory: Exponential decaying average of faults on a per-node
940 * basis. Scheduling placement decisions are made based on these
941 * counts. The values remain static for the duration of a PTE scan.
942 * faults_cpu: Track the nodes the process was running on when a NUMA
943 * hinting fault was incurred.
944 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
945 * during the current scan window. When the scan completes, the counts
946 * in faults_memory and faults_cpu decay and these values are copied.
948 unsigned long *numa_faults
;
949 unsigned long total_numa_faults
;
952 * numa_faults_locality tracks if faults recorded during the last
953 * scan window were remote/local or failed to migrate. The task scan
954 * period is adapted based on the locality of the faults with different
955 * weights depending on whether they were shared or private faults
957 unsigned long numa_faults_locality
[3];
959 unsigned long numa_pages_migrated
;
960 #endif /* CONFIG_NUMA_BALANCING */
962 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
963 struct tlbflush_unmap_batch tlb_ubc
;
969 * cache last used pipe for splice
971 struct pipe_inode_info
*splice_pipe
;
973 struct page_frag task_frag
;
975 #ifdef CONFIG_TASK_DELAY_ACCT
976 struct task_delay_info
*delays
;
979 #ifdef CONFIG_FAULT_INJECTION
983 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
984 * balance_dirty_pages() for some dirty throttling pause
987 int nr_dirtied_pause
;
988 unsigned long dirty_paused_when
; /* start of a write-and-pause period */
990 #ifdef CONFIG_LATENCYTOP
991 int latency_record_count
;
992 struct latency_record latency_record
[LT_SAVECOUNT
];
995 * time slack values; these are used to round up poll() and
996 * select() etc timeout values. These are in nanoseconds.
999 u64 default_timer_slack_ns
;
1002 unsigned int kasan_depth
;
1004 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1005 /* Index of current stored address in ret_stack */
1007 /* Stack of return addresses for return function tracing */
1008 struct ftrace_ret_stack
*ret_stack
;
1009 /* time stamp for last schedule */
1010 unsigned long long ftrace_timestamp
;
1012 * Number of functions that haven't been traced
1013 * because of depth overrun.
1015 atomic_t trace_overrun
;
1016 /* Pause for the tracing */
1017 atomic_t tracing_graph_pause
;
1019 #ifdef CONFIG_TRACING
1020 /* state flags for use by tracers */
1021 unsigned long trace
;
1022 /* bitmask and counter of trace recursion */
1023 unsigned long trace_recursion
;
1024 #endif /* CONFIG_TRACING */
1026 /* Coverage collection mode enabled for this task (0 if disabled). */
1027 enum kcov_mode kcov_mode
;
1028 /* Size of the kcov_area. */
1030 /* Buffer for coverage collection. */
1032 /* kcov desciptor wired with this task or NULL. */
1036 struct mem_cgroup
*memcg_in_oom
;
1037 gfp_t memcg_oom_gfp_mask
;
1038 int memcg_oom_order
;
1040 /* number of pages to reclaim on returning to userland */
1041 unsigned int memcg_nr_pages_over_high
;
1043 #ifdef CONFIG_UPROBES
1044 struct uprobe_task
*utask
;
1046 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1047 unsigned int sequential_io
;
1048 unsigned int sequential_io_avg
;
1050 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1051 unsigned long task_state_change
;
1053 int pagefault_disabled
;
1055 struct task_struct
*oom_reaper_list
;
1057 #ifdef CONFIG_VMAP_STACK
1058 struct vm_struct
*stack_vm_area
;
1060 #ifdef CONFIG_THREAD_INFO_IN_TASK
1061 /* A live task holds one reference. */
1062 atomic_t stack_refcount
;
1064 /* CPU-specific state of this task */
1065 struct thread_struct thread
;
1067 * WARNING: on x86, 'thread_struct' contains a variable-sized
1068 * structure. It *MUST* be at the end of 'task_struct'.
1070 * Do not put anything below here!
1074 static inline struct pid
*task_pid(struct task_struct
*task
)
1076 return task
->pids
[PIDTYPE_PID
].pid
;
1079 static inline struct pid
*task_tgid(struct task_struct
*task
)
1081 return task
->group_leader
->pids
[PIDTYPE_PID
].pid
;
1085 * Without tasklist or rcu lock it is not safe to dereference
1086 * the result of task_pgrp/task_session even if task == current,
1087 * we can race with another thread doing sys_setsid/sys_setpgid.
1089 static inline struct pid
*task_pgrp(struct task_struct
*task
)
1091 return task
->group_leader
->pids
[PIDTYPE_PGID
].pid
;
1094 static inline struct pid
*task_session(struct task_struct
*task
)
1096 return task
->group_leader
->pids
[PIDTYPE_SID
].pid
;
1100 * the helpers to get the task's different pids as they are seen
1101 * from various namespaces
1103 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1104 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1106 * task_xid_nr_ns() : id seen from the ns specified;
1108 * set_task_vxid() : assigns a virtual id to a task;
1110 * see also pid_nr() etc in include/linux/pid.h
1112 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
1113 struct pid_namespace
*ns
);
1115 static inline pid_t
task_pid_nr(struct task_struct
*tsk
)
1120 static inline pid_t
task_pid_nr_ns(struct task_struct
*tsk
,
1121 struct pid_namespace
*ns
)
1123 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, ns
);
1126 static inline pid_t
task_pid_vnr(struct task_struct
*tsk
)
1128 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, NULL
);
1132 static inline pid_t
task_tgid_nr(struct task_struct
*tsk
)
1137 pid_t
task_tgid_nr_ns(struct task_struct
*tsk
, struct pid_namespace
*ns
);
1139 static inline pid_t
task_tgid_vnr(struct task_struct
*tsk
)
1141 return pid_vnr(task_tgid(tsk
));
1145 static inline int pid_alive(const struct task_struct
*p
);
1146 static inline pid_t
task_ppid_nr_ns(const struct task_struct
*tsk
, struct pid_namespace
*ns
)
1152 pid
= task_tgid_nr_ns(rcu_dereference(tsk
->real_parent
), ns
);
1158 static inline pid_t
task_ppid_nr(const struct task_struct
*tsk
)
1160 return task_ppid_nr_ns(tsk
, &init_pid_ns
);
1163 static inline pid_t
task_pgrp_nr_ns(struct task_struct
*tsk
,
1164 struct pid_namespace
*ns
)
1166 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, ns
);
1169 static inline pid_t
task_pgrp_vnr(struct task_struct
*tsk
)
1171 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, NULL
);
1175 static inline pid_t
task_session_nr_ns(struct task_struct
*tsk
,
1176 struct pid_namespace
*ns
)
1178 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, ns
);
1181 static inline pid_t
task_session_vnr(struct task_struct
*tsk
)
1183 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, NULL
);
1186 /* obsolete, do not use */
1187 static inline pid_t
task_pgrp_nr(struct task_struct
*tsk
)
1189 return task_pgrp_nr_ns(tsk
, &init_pid_ns
);
1193 * pid_alive - check that a task structure is not stale
1194 * @p: Task structure to be checked.
1196 * Test if a process is not yet dead (at most zombie state)
1197 * If pid_alive fails, then pointers within the task structure
1198 * can be stale and must not be dereferenced.
1200 * Return: 1 if the process is alive. 0 otherwise.
1202 static inline int pid_alive(const struct task_struct
*p
)
1204 return p
->pids
[PIDTYPE_PID
].pid
!= NULL
;
1208 * is_global_init - check if a task structure is init. Since init
1209 * is free to have sub-threads we need to check tgid.
1210 * @tsk: Task structure to be checked.
1212 * Check if a task structure is the first user space task the kernel created.
1214 * Return: 1 if the task structure is init. 0 otherwise.
1216 static inline int is_global_init(struct task_struct
*tsk
)
1218 return task_tgid_nr(tsk
) == 1;
1221 extern struct pid
*cad_pid
;
1223 extern void free_task(struct task_struct
*tsk
);
1224 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1226 extern void __put_task_struct(struct task_struct
*t
);
1228 static inline void put_task_struct(struct task_struct
*t
)
1230 if (atomic_dec_and_test(&t
->usage
))
1231 __put_task_struct(t
);
1234 struct task_struct
*task_rcu_dereference(struct task_struct
**ptask
);
1235 struct task_struct
*try_get_task_struct(struct task_struct
**ptask
);
1237 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1238 extern void task_cputime(struct task_struct
*t
,
1239 u64
*utime
, u64
*stime
);
1240 extern u64
task_gtime(struct task_struct
*t
);
1242 static inline void task_cputime(struct task_struct
*t
,
1243 u64
*utime
, u64
*stime
)
1249 static inline u64
task_gtime(struct task_struct
*t
)
1255 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1256 static inline void task_cputime_scaled(struct task_struct
*t
,
1260 *utimescaled
= t
->utimescaled
;
1261 *stimescaled
= t
->stimescaled
;
1264 static inline void task_cputime_scaled(struct task_struct
*t
,
1268 task_cputime(t
, utimescaled
, stimescaled
);
1272 extern void task_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
);
1273 extern void thread_group_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
);
1278 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1279 #define PF_EXITING 0x00000004 /* getting shut down */
1280 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1281 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1282 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1283 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1284 #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1285 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1286 #define PF_DUMPCORE 0x00000200 /* dumped core */
1287 #define PF_SIGNALED 0x00000400 /* killed by a signal */
1288 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1289 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1290 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1291 #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1292 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1293 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
1294 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1295 #define PF_KSWAPD 0x00040000 /* I am kswapd */
1296 #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1297 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1298 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1299 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1300 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1301 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1302 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1303 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1304 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1305 #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1308 * Only the _current_ task can read/write to tsk->flags, but other
1309 * tasks can access tsk->flags in readonly mode for example
1310 * with tsk_used_math (like during threaded core dumping).
1311 * There is however an exception to this rule during ptrace
1312 * or during fork: the ptracer task is allowed to write to the
1313 * child->flags of its traced child (same goes for fork, the parent
1314 * can write to the child->flags), because we're guaranteed the
1315 * child is not running and in turn not changing child->flags
1316 * at the same time the parent does it.
1318 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1319 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1320 #define clear_used_math() clear_stopped_child_used_math(current)
1321 #define set_used_math() set_stopped_child_used_math(current)
1322 #define conditional_stopped_child_used_math(condition, child) \
1323 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1324 #define conditional_used_math(condition) \
1325 conditional_stopped_child_used_math(condition, current)
1326 #define copy_to_stopped_child_used_math(child) \
1327 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1328 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1329 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1330 #define used_math() tsk_used_math(current)
1332 /* Per-process atomic flags. */
1333 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1334 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1335 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1336 #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
1339 #define TASK_PFA_TEST(name, func) \
1340 static inline bool task_##func(struct task_struct *p) \
1341 { return test_bit(PFA_##name, &p->atomic_flags); }
1342 #define TASK_PFA_SET(name, func) \
1343 static inline void task_set_##func(struct task_struct *p) \
1344 { set_bit(PFA_##name, &p->atomic_flags); }
1345 #define TASK_PFA_CLEAR(name, func) \
1346 static inline void task_clear_##func(struct task_struct *p) \
1347 { clear_bit(PFA_##name, &p->atomic_flags); }
1349 TASK_PFA_TEST(NO_NEW_PRIVS
, no_new_privs
)
1350 TASK_PFA_SET(NO_NEW_PRIVS
, no_new_privs
)
1352 TASK_PFA_TEST(SPREAD_PAGE
, spread_page
)
1353 TASK_PFA_SET(SPREAD_PAGE
, spread_page
)
1354 TASK_PFA_CLEAR(SPREAD_PAGE
, spread_page
)
1356 TASK_PFA_TEST(SPREAD_SLAB
, spread_slab
)
1357 TASK_PFA_SET(SPREAD_SLAB
, spread_slab
)
1358 TASK_PFA_CLEAR(SPREAD_SLAB
, spread_slab
)
1360 TASK_PFA_TEST(LMK_WAITING
, lmk_waiting
)
1361 TASK_PFA_SET(LMK_WAITING
, lmk_waiting
)
1363 static inline void tsk_restore_flags(struct task_struct
*task
,
1364 unsigned long orig_flags
, unsigned long flags
)
1366 task
->flags
&= ~flags
;
1367 task
->flags
|= orig_flags
& flags
;
1370 extern int cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
1371 const struct cpumask
*trial
);
1372 extern int task_can_attach(struct task_struct
*p
,
1373 const struct cpumask
*cs_cpus_allowed
);
1375 extern void do_set_cpus_allowed(struct task_struct
*p
,
1376 const struct cpumask
*new_mask
);
1378 extern int set_cpus_allowed_ptr(struct task_struct
*p
,
1379 const struct cpumask
*new_mask
);
1381 static inline void do_set_cpus_allowed(struct task_struct
*p
,
1382 const struct cpumask
*new_mask
)
1385 static inline int set_cpus_allowed_ptr(struct task_struct
*p
,
1386 const struct cpumask
*new_mask
)
1388 if (!cpumask_test_cpu(0, new_mask
))
1394 #ifndef cpu_relax_yield
1395 #define cpu_relax_yield() cpu_relax()
1398 extern unsigned long long
1399 task_sched_runtime(struct task_struct
*task
);
1401 /* sched_exec is called by processes performing an exec */
1403 extern void sched_exec(void);
1405 #define sched_exec() {}
1408 extern int yield_to(struct task_struct
*p
, bool preempt
);
1409 extern void set_user_nice(struct task_struct
*p
, long nice
);
1410 extern int task_prio(const struct task_struct
*p
);
1412 * task_nice - return the nice value of a given task.
1413 * @p: the task in question.
1415 * Return: The nice value [ -20 ... 0 ... 19 ].
1417 static inline int task_nice(const struct task_struct
*p
)
1419 return PRIO_TO_NICE((p
)->static_prio
);
1421 extern int can_nice(const struct task_struct
*p
, const int nice
);
1422 extern int task_curr(const struct task_struct
*p
);
1423 extern int idle_cpu(int cpu
);
1424 extern int sched_setscheduler(struct task_struct
*, int,
1425 const struct sched_param
*);
1426 extern int sched_setscheduler_nocheck(struct task_struct
*, int,
1427 const struct sched_param
*);
1428 extern int sched_setattr(struct task_struct
*,
1429 const struct sched_attr
*);
1430 extern struct task_struct
*idle_task(int cpu
);
1432 * is_idle_task - is the specified task an idle task?
1433 * @p: the task in question.
1435 * Return: 1 if @p is an idle task. 0 otherwise.
1437 static inline bool is_idle_task(const struct task_struct
*p
)
1439 return !!(p
->flags
& PF_IDLE
);
1441 extern struct task_struct
*curr_task(int cpu
);
1442 extern void ia64_set_curr_task(int cpu
, struct task_struct
*p
);
1446 union thread_union
{
1447 #ifndef CONFIG_THREAD_INFO_IN_TASK
1448 struct thread_info thread_info
;
1450 unsigned long stack
[THREAD_SIZE
/sizeof(long)];
1453 #ifdef CONFIG_THREAD_INFO_IN_TASK
1454 static inline struct thread_info
*task_thread_info(struct task_struct
*task
)
1456 return &task
->thread_info
;
1458 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1459 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1462 #ifndef __HAVE_ARCH_KSTACK_END
1463 static inline int kstack_end(void *addr
)
1465 /* Reliable end of stack detection:
1466 * Some APM bios versions misalign the stack
1468 return !(((unsigned long)addr
+sizeof(void*)-1) & (THREAD_SIZE
-sizeof(void*)));
1472 extern union thread_union init_thread_union
;
1473 extern struct task_struct init_task
;
1475 extern struct pid_namespace init_pid_ns
;
1478 * find a task by one of its numerical ids
1480 * find_task_by_pid_ns():
1481 * finds a task by its pid in the specified namespace
1482 * find_task_by_vpid():
1483 * finds a task by its virtual pid
1485 * see also find_vpid() etc in include/linux/pid.h
1488 extern struct task_struct
*find_task_by_vpid(pid_t nr
);
1489 extern struct task_struct
*find_task_by_pid_ns(pid_t nr
,
1490 struct pid_namespace
*ns
);
1492 extern int wake_up_state(struct task_struct
*tsk
, unsigned int state
);
1493 extern int wake_up_process(struct task_struct
*tsk
);
1494 extern void wake_up_new_task(struct task_struct
*tsk
);
1496 extern void kick_process(struct task_struct
*tsk
);
1498 static inline void kick_process(struct task_struct
*tsk
) { }
1501 extern void exit_files(struct task_struct
*);
1503 extern void exit_itimers(struct signal_struct
*);
1505 extern int do_execve(struct filename
*,
1506 const char __user
* const __user
*,
1507 const char __user
* const __user
*);
1508 extern int do_execveat(int, struct filename
*,
1509 const char __user
* const __user
*,
1510 const char __user
* const __user
*,
1513 extern void __set_task_comm(struct task_struct
*tsk
, const char *from
, bool exec
);
1514 static inline void set_task_comm(struct task_struct
*tsk
, const char *from
)
1516 __set_task_comm(tsk
, from
, false);
1518 extern char *get_task_comm(char *to
, struct task_struct
*tsk
);
1521 void scheduler_ipi(void);
1522 extern unsigned long wait_task_inactive(struct task_struct
*, long match_state
);
1524 static inline void scheduler_ipi(void) { }
1525 static inline unsigned long wait_task_inactive(struct task_struct
*p
,
1533 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
1534 * subscriptions and synchronises with wait4(). Also used in procfs. Also
1535 * pins the final release of task.io_context. Also protects ->cpuset and
1536 * ->cgroup.subsys[]. And ->vfork_done.
1538 * Nests both inside and outside of read_lock(&tasklist_lock).
1539 * It must not be nested with write_lock_irq(&tasklist_lock),
1540 * neither inside nor outside.
1542 static inline void task_lock(struct task_struct
*p
)
1544 spin_lock(&p
->alloc_lock
);
1547 static inline void task_unlock(struct task_struct
*p
)
1549 spin_unlock(&p
->alloc_lock
);
1552 /* set thread flags in other task's structures
1553 * - see asm/thread_info.h for TIF_xxxx flags available
1555 static inline void set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1557 set_ti_thread_flag(task_thread_info(tsk
), flag
);
1560 static inline void clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1562 clear_ti_thread_flag(task_thread_info(tsk
), flag
);
1565 static inline int test_and_set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1567 return test_and_set_ti_thread_flag(task_thread_info(tsk
), flag
);
1570 static inline int test_and_clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1572 return test_and_clear_ti_thread_flag(task_thread_info(tsk
), flag
);
1575 static inline int test_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
1577 return test_ti_thread_flag(task_thread_info(tsk
), flag
);
1580 static inline void set_tsk_need_resched(struct task_struct
*tsk
)
1582 set_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
1585 static inline void clear_tsk_need_resched(struct task_struct
*tsk
)
1587 clear_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
1590 static inline int test_tsk_need_resched(struct task_struct
*tsk
)
1592 return unlikely(test_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
));
1596 * cond_resched() and cond_resched_lock(): latency reduction via
1597 * explicit rescheduling in places that are safe. The return
1598 * value indicates whether a reschedule was done in fact.
1599 * cond_resched_lock() will drop the spinlock before scheduling,
1600 * cond_resched_softirq() will enable bhs before scheduling.
1602 #ifndef CONFIG_PREEMPT
1603 extern int _cond_resched(void);
1605 static inline int _cond_resched(void) { return 0; }
1608 #define cond_resched() ({ \
1609 ___might_sleep(__FILE__, __LINE__, 0); \
1613 extern int __cond_resched_lock(spinlock_t
*lock
);
1615 #define cond_resched_lock(lock) ({ \
1616 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1617 __cond_resched_lock(lock); \
1620 extern int __cond_resched_softirq(void);
1622 #define cond_resched_softirq() ({ \
1623 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
1624 __cond_resched_softirq(); \
1627 static inline void cond_resched_rcu(void)
1629 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1637 * Does a critical section need to be broken due to another
1638 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1639 * but a general need for low latency)
1641 static inline int spin_needbreak(spinlock_t
*lock
)
1643 #ifdef CONFIG_PREEMPT
1644 return spin_is_contended(lock
);
1650 static __always_inline
bool need_resched(void)
1652 return unlikely(tif_need_resched());
1656 * Thread group CPU time accounting.
1658 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
);
1659 void thread_group_cputimer(struct task_struct
*tsk
, struct task_cputime
*times
);
1662 * Wrappers for p->thread_info->cpu access. No-op on UP.
1666 static inline unsigned int task_cpu(const struct task_struct
*p
)
1668 #ifdef CONFIG_THREAD_INFO_IN_TASK
1671 return task_thread_info(p
)->cpu
;
1675 static inline int task_node(const struct task_struct
*p
)
1677 return cpu_to_node(task_cpu(p
));
1680 extern void set_task_cpu(struct task_struct
*p
, unsigned int cpu
);
1684 static inline unsigned int task_cpu(const struct task_struct
*p
)
1689 static inline void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
1693 #endif /* CONFIG_SMP */
1696 * In order to reduce various lock holder preemption latencies provide an
1697 * interface to see if a vCPU is currently running or not.
1699 * This allows us to terminate optimistic spin loops and block, analogous to
1700 * the native optimistic spin heuristic of testing if the lock owner task is
1703 #ifndef vcpu_is_preempted
1704 # define vcpu_is_preempted(cpu) false
1707 extern long sched_setaffinity(pid_t pid
, const struct cpumask
*new_mask
);
1708 extern long sched_getaffinity(pid_t pid
, struct cpumask
*mask
);
1710 extern int task_can_switch_user(struct user_struct
*up
,
1711 struct task_struct
*tsk
);
1713 #ifndef TASK_SIZE_OF
1714 #define TASK_SIZE_OF(tsk) TASK_SIZE