4 #include <uapi/linux/sched.h>
6 #include <linux/sched/prio.h>
8 #include <asm/param.h> /* for HZ */
10 #include <linux/capability.h>
11 #include <linux/threads.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/timex.h>
15 #include <linux/jiffies.h>
16 #include <linux/mutex.h>
17 #include <linux/plist.h>
18 #include <linux/rbtree.h>
19 #include <linux/thread_info.h>
20 #include <linux/cpumask.h>
21 #include <linux/errno.h>
22 #include <linux/nodemask.h>
23 #include <linux/mm_types.h>
24 #include <linux/preempt.h>
27 #include <asm/ptrace.h>
29 #include <linux/smp.h>
30 #include <linux/sem.h>
31 #include <linux/shm.h>
32 #include <linux/signal.h>
33 #include <linux/compiler.h>
34 #include <linux/completion.h>
35 #include <linux/signal_types.h>
36 #include <linux/pid.h>
37 #include <linux/percpu.h>
38 #include <linux/topology.h>
39 #include <linux/seccomp.h>
40 #include <linux/rcupdate.h>
41 #include <linux/rculist.h>
42 #include <linux/rtmutex.h>
44 #include <linux/time.h>
45 #include <linux/param.h>
46 #include <linux/resource.h>
47 #include <linux/timer.h>
48 #include <linux/hrtimer.h>
49 #include <linux/kcov.h>
50 #include <linux/task_io_accounting.h>
51 #include <linux/latencytop.h>
52 #include <linux/cred.h>
53 #include <linux/llist.h>
54 #include <linux/uidgid.h>
55 #include <linux/gfp.h>
56 #include <linux/topology.h>
57 #include <linux/magic.h>
58 #include <linux/cgroup-defs.h>
60 #include <asm/processor.h>
65 struct futex_pi_state
;
66 struct robust_list_head
;
69 struct perf_event_context
;
75 struct sighand_struct
;
77 extern unsigned long total_forks
;
78 extern int nr_threads
;
79 DECLARE_PER_CPU(unsigned long, process_counts
);
80 extern int nr_processes(void);
81 extern unsigned long nr_running(void);
82 extern bool single_task_running(void);
83 extern unsigned long nr_iowait(void);
84 extern unsigned long nr_iowait_cpu(int cpu
);
85 extern void get_iowait_load(unsigned long *nr_waiters
, unsigned long *load
);
87 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
88 extern void cpu_load_update_nohz_start(void);
89 extern void cpu_load_update_nohz_stop(void);
91 static inline void cpu_load_update_nohz_start(void) { }
92 static inline void cpu_load_update_nohz_stop(void) { }
95 extern void dump_cpu_task(int cpu
);
100 #ifdef CONFIG_SCHED_DEBUG
101 extern void proc_sched_show_task(struct task_struct
*p
, struct seq_file
*m
);
102 extern void proc_sched_set_task(struct task_struct
*p
);
106 * Task state bitmask. NOTE! These bits are also
107 * encoded in fs/proc/array.c: get_task_state().
109 * We have two separate sets of flags: task->state
110 * is about runnability, while task->exit_state are
111 * about the task exiting. Confusing, but this way
112 * modifying one set can't modify the other one by
115 #define TASK_RUNNING 0
116 #define TASK_INTERRUPTIBLE 1
117 #define TASK_UNINTERRUPTIBLE 2
118 #define __TASK_STOPPED 4
119 #define __TASK_TRACED 8
120 /* in tsk->exit_state */
122 #define EXIT_ZOMBIE 32
123 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
124 /* in tsk->state again */
126 #define TASK_WAKEKILL 128
127 #define TASK_WAKING 256
128 #define TASK_PARKED 512
129 #define TASK_NOLOAD 1024
130 #define TASK_NEW 2048
131 #define TASK_STATE_MAX 4096
133 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
135 /* Convenience macros for the sake of set_current_state */
136 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
137 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
138 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
140 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
142 /* Convenience macros for the sake of wake_up */
143 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
144 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
146 /* get_task_state() */
147 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
148 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
149 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
151 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
152 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
153 #define task_is_stopped_or_traced(task) \
154 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
155 #define task_contributes_to_load(task) \
156 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
157 (task->flags & PF_FROZEN) == 0 && \
158 (task->state & TASK_NOLOAD) == 0)
160 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
162 #define __set_current_state(state_value) \
164 current->task_state_change = _THIS_IP_; \
165 current->state = (state_value); \
167 #define set_current_state(state_value) \
169 current->task_state_change = _THIS_IP_; \
170 smp_store_mb(current->state, (state_value)); \
175 * set_current_state() includes a barrier so that the write of current->state
176 * is correctly serialised wrt the caller's subsequent test of whether to
180 * set_current_state(TASK_UNINTERRUPTIBLE);
186 * __set_current_state(TASK_RUNNING);
188 * If the caller does not need such serialisation (because, for instance, the
189 * condition test and condition change and wakeup are under the same lock) then
190 * use __set_current_state().
192 * The above is typically ordered against the wakeup, which does:
194 * need_sleep = false;
195 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
197 * Where wake_up_state() (and all other wakeup primitives) imply enough
198 * barriers to order the store of the variable against wakeup.
200 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
201 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
202 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
204 * This is obviously fine, since they both store the exact same value.
206 * Also see the comments of try_to_wake_up().
208 #define __set_current_state(state_value) \
209 do { current->state = (state_value); } while (0)
210 #define set_current_state(state_value) \
211 smp_store_mb(current->state, (state_value))
215 /* Task command name length */
216 #define TASK_COMM_LEN 16
218 #include <linux/spinlock.h>
221 * This serializes "schedule()" and also protects
222 * the run-queue from deletions/modifications (but
223 * _adding_ to the beginning of the run-queue has
226 extern rwlock_t tasklist_lock
;
227 extern spinlock_t mmlist_lock
;
231 #ifdef CONFIG_PROVE_RCU
232 extern int lockdep_tasklist_lock_is_held(void);
233 #endif /* #ifdef CONFIG_PROVE_RCU */
235 extern void sched_init(void);
236 extern void sched_init_smp(void);
237 extern asmlinkage
void schedule_tail(struct task_struct
*prev
);
238 extern void init_idle(struct task_struct
*idle
, int cpu
);
239 extern void init_idle_bootup_task(struct task_struct
*idle
);
241 extern cpumask_var_t cpu_isolated_map
;
243 extern int runqueue_is_locked(int cpu
);
245 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
246 extern void nohz_balance_enter_idle(int cpu
);
247 extern void set_cpu_sd_state_idle(void);
248 extern int get_nohz_timer_target(void);
250 static inline void nohz_balance_enter_idle(int cpu
) { }
251 static inline void set_cpu_sd_state_idle(void) { }
255 * Only dump TASK_* tasks. (0 for all tasks)
257 extern void show_state_filter(unsigned long state_filter
);
259 static inline void show_state(void)
261 show_state_filter(0);
264 extern void show_regs(struct pt_regs
*);
267 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
268 * task), SP is the stack pointer of the first frame that should be shown in the back
269 * trace (or NULL if the entire call-chain of the task should be shown).
271 extern void show_stack(struct task_struct
*task
, unsigned long *sp
);
273 extern void cpu_init (void);
274 extern void trap_init(void);
275 extern void update_process_times(int user
);
276 extern void scheduler_tick(void);
277 extern int sched_cpu_starting(unsigned int cpu
);
278 extern int sched_cpu_activate(unsigned int cpu
);
279 extern int sched_cpu_deactivate(unsigned int cpu
);
281 #ifdef CONFIG_HOTPLUG_CPU
282 extern int sched_cpu_dying(unsigned int cpu
);
284 # define sched_cpu_dying NULL
287 extern void sched_show_task(struct task_struct
*p
);
289 #ifdef CONFIG_LOCKUP_DETECTOR
290 extern void touch_softlockup_watchdog_sched(void);
291 extern void touch_softlockup_watchdog(void);
292 extern void touch_softlockup_watchdog_sync(void);
293 extern void touch_all_softlockup_watchdogs(void);
294 extern int proc_dowatchdog_thresh(struct ctl_table
*table
, int write
,
296 size_t *lenp
, loff_t
*ppos
);
297 extern unsigned int softlockup_panic
;
298 extern unsigned int hardlockup_panic
;
299 void lockup_detector_init(void);
301 static inline void touch_softlockup_watchdog_sched(void)
304 static inline void touch_softlockup_watchdog(void)
307 static inline void touch_softlockup_watchdog_sync(void)
310 static inline void touch_all_softlockup_watchdogs(void)
313 static inline void lockup_detector_init(void)
318 #ifdef CONFIG_DETECT_HUNG_TASK
319 void reset_hung_task_detector(void);
321 static inline void reset_hung_task_detector(void)
326 /* Attach to any functions which should be ignored in wchan output. */
327 #define __sched __attribute__((__section__(".sched.text")))
329 /* Linker adds these: start and end of __sched functions */
330 extern char __sched_text_start
[], __sched_text_end
[];
332 /* Is this address in the __sched functions? */
333 extern int in_sched_functions(unsigned long addr
);
335 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
336 extern signed long schedule_timeout(signed long timeout
);
337 extern signed long schedule_timeout_interruptible(signed long timeout
);
338 extern signed long schedule_timeout_killable(signed long timeout
);
339 extern signed long schedule_timeout_uninterruptible(signed long timeout
);
340 extern signed long schedule_timeout_idle(signed long timeout
);
341 asmlinkage
void schedule(void);
342 extern void schedule_preempt_disabled(void);
344 extern int __must_check
io_schedule_prepare(void);
345 extern void io_schedule_finish(int token
);
346 extern long io_schedule_timeout(long timeout
);
347 extern void io_schedule(void);
349 void __noreturn
do_task_dead(void);
352 struct user_namespace
;
355 extern void arch_pick_mmap_layout(struct mm_struct
*mm
);
357 arch_get_unmapped_area(struct file
*, unsigned long, unsigned long,
358 unsigned long, unsigned long);
360 arch_get_unmapped_area_topdown(struct file
*filp
, unsigned long addr
,
361 unsigned long len
, unsigned long pgoff
,
362 unsigned long flags
);
364 static inline void arch_pick_mmap_layout(struct mm_struct
*mm
) {}
367 struct pacct_struct
{
370 unsigned long ac_mem
;
371 u64 ac_utime
, ac_stime
;
372 unsigned long ac_minflt
, ac_majflt
;
381 * struct prev_cputime - snaphsot of system and user cputime
382 * @utime: time spent in user mode
383 * @stime: time spent in system mode
384 * @lock: protects the above two fields
386 * Stores previous user/system time values such that we can guarantee
389 struct prev_cputime
{
390 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
397 static inline void prev_cputime_init(struct prev_cputime
*prev
)
399 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
400 prev
->utime
= prev
->stime
= 0;
401 raw_spin_lock_init(&prev
->lock
);
406 * struct task_cputime - collected CPU time counts
407 * @utime: time spent in user mode, in nanoseconds
408 * @stime: time spent in kernel mode, in nanoseconds
409 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
411 * This structure groups together three kinds of CPU time that are tracked for
412 * threads and thread groups. Most things considering CPU time want to group
413 * these counts together and treat all three of them in parallel.
415 struct task_cputime
{
418 unsigned long long sum_exec_runtime
;
421 /* Alternate field names when used to cache expirations. */
422 #define virt_exp utime
423 #define prof_exp stime
424 #define sched_exp sum_exec_runtime
427 * This is the atomic variant of task_cputime, which can be used for
428 * storing and updating task_cputime statistics without locking.
430 struct task_cputime_atomic
{
433 atomic64_t sum_exec_runtime
;
436 #define INIT_CPUTIME_ATOMIC \
437 (struct task_cputime_atomic) { \
438 .utime = ATOMIC64_INIT(0), \
439 .stime = ATOMIC64_INIT(0), \
440 .sum_exec_runtime = ATOMIC64_INIT(0), \
443 #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
446 * Disable preemption until the scheduler is running -- use an unconditional
447 * value so that it also works on !PREEMPT_COUNT kernels.
449 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
451 #define INIT_PREEMPT_COUNT PREEMPT_OFFSET
454 * Initial preempt_count value; reflects the preempt_count schedule invariant
455 * which states that during context switches:
457 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
459 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
460 * Note: See finish_task_switch().
462 #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
465 * struct thread_group_cputimer - thread group interval timer counts
466 * @cputime_atomic: atomic thread group interval timers.
467 * @running: true when there are timers running and
468 * @cputime_atomic receives updates.
469 * @checking_timer: true when a thread in the group is in the
470 * process of checking for thread group timers.
472 * This structure contains the version of task_cputime, above, that is
473 * used for thread group CPU timer calculations.
475 struct thread_group_cputimer
{
476 struct task_cputime_atomic cputime_atomic
;
481 #include <linux/rwsem.h>
485 * Some day this will be a full-fledged user tracking system..
488 atomic_t __count
; /* reference count */
489 atomic_t processes
; /* How many processes does this user have? */
490 atomic_t sigpending
; /* How many pending signals does this user have? */
491 #ifdef CONFIG_FANOTIFY
492 atomic_t fanotify_listeners
;
495 atomic_long_t epoll_watches
; /* The number of file descriptors currently watched */
497 #ifdef CONFIG_POSIX_MQUEUE
498 /* protected by mq_lock */
499 unsigned long mq_bytes
; /* How many bytes can be allocated to mqueue? */
501 unsigned long locked_shm
; /* How many pages of mlocked shm ? */
502 unsigned long unix_inflight
; /* How many files in flight in unix sockets */
503 atomic_long_t pipe_bufs
; /* how many pages are allocated in pipe buffers */
506 struct key
*uid_keyring
; /* UID specific keyring */
507 struct key
*session_keyring
; /* UID's default session keyring */
510 /* Hash table maintenance information */
511 struct hlist_node uidhash_node
;
514 #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
515 atomic_long_t locked_vm
;
519 extern int uids_sysfs_init(void);
521 extern struct user_struct
*find_user(kuid_t
);
523 extern struct user_struct root_user
;
524 #define INIT_USER (&root_user)
527 struct backing_dev_info
;
528 struct reclaim_state
;
530 #ifdef CONFIG_SCHED_INFO
532 /* cumulative counters */
533 unsigned long pcount
; /* # of times run on this cpu */
534 unsigned long long run_delay
; /* time spent waiting on a runqueue */
537 unsigned long long last_arrival
,/* when we last ran on a cpu */
538 last_queued
; /* when we were last queued to run */
540 #endif /* CONFIG_SCHED_INFO */
542 struct task_delay_info
;
544 static inline int sched_info_on(void)
546 #ifdef CONFIG_SCHEDSTATS
548 #elif defined(CONFIG_TASK_DELAY_ACCT)
549 extern int delayacct_on
;
556 #ifdef CONFIG_SCHEDSTATS
557 void force_schedstat_enabled(void);
561 * Integer metrics need fixed point arithmetic, e.g., sched/fair
562 * has a few: load, load_avg, util_avg, freq, and capacity.
564 * We define a basic fixed point arithmetic range, and then formalize
565 * all these metrics based on that basic range.
567 # define SCHED_FIXEDPOINT_SHIFT 10
568 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
570 struct io_context
; /* See blkdev.h */
573 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
574 extern void prefetch_stack(struct task_struct
*t
);
576 static inline void prefetch_stack(struct task_struct
*t
) { }
579 struct audit_context
; /* See audit.c */
581 struct pipe_inode_info
;
582 struct uts_namespace
;
585 unsigned long weight
;
590 * The load_avg/util_avg accumulates an infinite geometric series
591 * (see __update_load_avg() in kernel/sched/fair.c).
593 * [load_avg definition]
595 * load_avg = runnable% * scale_load_down(load)
597 * where runnable% is the time ratio that a sched_entity is runnable.
598 * For cfs_rq, it is the aggregated load_avg of all runnable and
599 * blocked sched_entities.
601 * load_avg may also take frequency scaling into account:
603 * load_avg = runnable% * scale_load_down(load) * freq%
605 * where freq% is the CPU frequency normalized to the highest frequency.
607 * [util_avg definition]
609 * util_avg = running% * SCHED_CAPACITY_SCALE
611 * where running% is the time ratio that a sched_entity is running on
612 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
613 * and blocked sched_entities.
615 * util_avg may also factor frequency scaling and CPU capacity scaling:
617 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
619 * where freq% is the same as above, and capacity% is the CPU capacity
620 * normalized to the greatest capacity (due to uarch differences, etc).
622 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
623 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
624 * we therefore scale them to as large a range as necessary. This is for
625 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
629 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
630 * with the highest load (=88761), always runnable on a single cfs_rq,
631 * and should not overflow as the number already hits PID_MAX_LIMIT.
633 * For all other cases (including 32-bit kernels), struct load_weight's
634 * weight will overflow first before we do, because:
636 * Max(load_avg) <= Max(load.weight)
638 * Then it is the load_weight's responsibility to consider overflow
642 u64 last_update_time
, load_sum
;
643 u32 util_sum
, period_contrib
;
644 unsigned long load_avg
, util_avg
;
647 #ifdef CONFIG_SCHEDSTATS
648 struct sched_statistics
{
658 s64 sum_sleep_runtime
;
665 u64 nr_migrations_cold
;
666 u64 nr_failed_migrations_affine
;
667 u64 nr_failed_migrations_running
;
668 u64 nr_failed_migrations_hot
;
669 u64 nr_forced_migrations
;
673 u64 nr_wakeups_migrate
;
674 u64 nr_wakeups_local
;
675 u64 nr_wakeups_remote
;
676 u64 nr_wakeups_affine
;
677 u64 nr_wakeups_affine_attempts
;
678 u64 nr_wakeups_passive
;
683 struct sched_entity
{
684 struct load_weight load
; /* for load-balancing */
685 struct rb_node run_node
;
686 struct list_head group_node
;
690 u64 sum_exec_runtime
;
692 u64 prev_sum_exec_runtime
;
696 #ifdef CONFIG_SCHEDSTATS
697 struct sched_statistics statistics
;
700 #ifdef CONFIG_FAIR_GROUP_SCHED
702 struct sched_entity
*parent
;
703 /* rq on which this entity is (to be) queued: */
704 struct cfs_rq
*cfs_rq
;
705 /* rq "owned" by this entity/group: */
711 * Per entity load average tracking.
713 * Put into separate cache line so it does not
714 * collide with read-mostly values above.
716 struct sched_avg avg ____cacheline_aligned_in_smp
;
720 struct sched_rt_entity
{
721 struct list_head run_list
;
722 unsigned long timeout
;
723 unsigned long watchdog_stamp
;
724 unsigned int time_slice
;
725 unsigned short on_rq
;
726 unsigned short on_list
;
728 struct sched_rt_entity
*back
;
729 #ifdef CONFIG_RT_GROUP_SCHED
730 struct sched_rt_entity
*parent
;
731 /* rq on which this entity is (to be) queued: */
733 /* rq "owned" by this entity/group: */
738 struct sched_dl_entity
{
739 struct rb_node rb_node
;
742 * Original scheduling parameters. Copied here from sched_attr
743 * during sched_setattr(), they will remain the same until
744 * the next sched_setattr().
746 u64 dl_runtime
; /* maximum runtime for each instance */
747 u64 dl_deadline
; /* relative deadline of each instance */
748 u64 dl_period
; /* separation of two instances (period) */
749 u64 dl_bw
; /* dl_runtime / dl_deadline */
752 * Actual scheduling parameters. Initialized with the values above,
753 * they are continously updated during task execution. Note that
754 * the remaining runtime could be < 0 in case we are in overrun.
756 s64 runtime
; /* remaining runtime for this instance */
757 u64 deadline
; /* absolute deadline for this instance */
758 unsigned int flags
; /* specifying the scheduler behaviour */
763 * @dl_throttled tells if we exhausted the runtime. If so, the
764 * task has to wait for a replenishment to be performed at the
765 * next firing of dl_timer.
767 * @dl_boosted tells if we are boosted due to DI. If so we are
768 * outside bandwidth enforcement mechanism (but only until we
769 * exit the critical section);
771 * @dl_yielded tells if task gave up the cpu before consuming
772 * all its available runtime during the last job.
774 int dl_throttled
, dl_boosted
, dl_yielded
;
777 * Bandwidth enforcement timer. Each -deadline task has its
778 * own bandwidth to be enforced, thus we need one timer per task.
780 struct hrtimer dl_timer
;
788 u8 pad
; /* Otherwise the compiler can store garbage here. */
790 u32 s
; /* Set of bits. */
794 enum perf_event_task_context
{
795 perf_invalid_context
= -1,
798 perf_nr_task_contexts
,
802 struct wake_q_node
*next
;
805 /* Track pages that require TLB flushes */
806 struct tlbflush_unmap_batch
{
808 * Each bit set is a CPU that potentially has a TLB entry for one of
809 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
811 struct cpumask cpumask
;
813 /* True if any bit in cpumask is set */
817 * If true then the PTE was dirty when unmapped. The entry must be
818 * flushed before IO is initiated or a stale TLB entry potentially
819 * allows an update without redirtying the page.
825 #ifdef CONFIG_THREAD_INFO_IN_TASK
827 * For reasons of header soup (see current_thread_info()), this
828 * must be the first element of task_struct.
830 struct thread_info thread_info
;
832 volatile long state
; /* -1 unrunnable, 0 runnable, >0 stopped */
835 unsigned int flags
; /* per process flags, defined below */
839 struct llist_node wake_entry
;
841 #ifdef CONFIG_THREAD_INFO_IN_TASK
842 unsigned int cpu
; /* current CPU */
844 unsigned int wakee_flips
;
845 unsigned long wakee_flip_decay_ts
;
846 struct task_struct
*last_wakee
;
852 int prio
, static_prio
, normal_prio
;
853 unsigned int rt_priority
;
854 const struct sched_class
*sched_class
;
855 struct sched_entity se
;
856 struct sched_rt_entity rt
;
857 #ifdef CONFIG_CGROUP_SCHED
858 struct task_group
*sched_task_group
;
860 struct sched_dl_entity dl
;
862 #ifdef CONFIG_PREEMPT_NOTIFIERS
863 /* list of struct preempt_notifier: */
864 struct hlist_head preempt_notifiers
;
867 #ifdef CONFIG_BLK_DEV_IO_TRACE
868 unsigned int btrace_seq
;
873 cpumask_t cpus_allowed
;
875 #ifdef CONFIG_PREEMPT_RCU
876 int rcu_read_lock_nesting
;
877 union rcu_special rcu_read_unlock_special
;
878 struct list_head rcu_node_entry
;
879 struct rcu_node
*rcu_blocked_node
;
880 #endif /* #ifdef CONFIG_PREEMPT_RCU */
881 #ifdef CONFIG_TASKS_RCU
882 unsigned long rcu_tasks_nvcsw
;
883 bool rcu_tasks_holdout
;
884 struct list_head rcu_tasks_holdout_list
;
885 int rcu_tasks_idle_cpu
;
886 #endif /* #ifdef CONFIG_TASKS_RCU */
888 #ifdef CONFIG_SCHED_INFO
889 struct sched_info sched_info
;
892 struct list_head tasks
;
894 struct plist_node pushable_tasks
;
895 struct rb_node pushable_dl_tasks
;
898 struct mm_struct
*mm
, *active_mm
;
900 /* Per-thread vma caching: */
901 struct vmacache vmacache
;
903 #if defined(SPLIT_RSS_COUNTING)
904 struct task_rss_stat rss_stat
;
908 int exit_code
, exit_signal
;
909 int pdeath_signal
; /* The signal sent when the parent dies */
910 unsigned long jobctl
; /* JOBCTL_*, siglock protected */
912 /* Used for emulating ABI behavior of previous Linux versions */
913 unsigned int personality
;
915 /* scheduler bits, serialized by scheduler locks */
916 unsigned sched_reset_on_fork
:1;
917 unsigned sched_contributes_to_load
:1;
918 unsigned sched_migrated
:1;
919 unsigned sched_remote_wakeup
:1;
920 unsigned :0; /* force alignment to the next boundary */
922 /* unserialized, strictly 'current' */
923 unsigned in_execve
:1; /* bit to tell LSMs we're in execve */
924 unsigned in_iowait
:1;
925 #if !defined(TIF_RESTORE_SIGMASK)
926 unsigned restore_sigmask
:1;
929 unsigned memcg_may_oom
:1;
931 unsigned memcg_kmem_skip_account
:1;
934 #ifdef CONFIG_COMPAT_BRK
935 unsigned brk_randomized
:1;
938 unsigned long atomic_flags
; /* Flags needing atomic access. */
940 struct restart_block restart_block
;
945 #ifdef CONFIG_CC_STACKPROTECTOR
946 /* Canary value for the -fstack-protector gcc feature */
947 unsigned long stack_canary
;
950 * pointers to (original) parent process, youngest child, younger sibling,
951 * older sibling, respectively. (p->father can be replaced with
952 * p->real_parent->pid)
954 struct task_struct __rcu
*real_parent
; /* real parent process */
955 struct task_struct __rcu
*parent
; /* recipient of SIGCHLD, wait4() reports */
957 * children/sibling forms the list of my natural children
959 struct list_head children
; /* list of my children */
960 struct list_head sibling
; /* linkage in my parent's children list */
961 struct task_struct
*group_leader
; /* threadgroup leader */
964 * ptraced is the list of tasks this task is using ptrace on.
965 * This includes both natural children and PTRACE_ATTACH targets.
966 * p->ptrace_entry is p's link on the p->parent->ptraced list.
968 struct list_head ptraced
;
969 struct list_head ptrace_entry
;
971 /* PID/PID hash table linkage. */
972 struct pid_link pids
[PIDTYPE_MAX
];
973 struct list_head thread_group
;
974 struct list_head thread_node
;
976 struct completion
*vfork_done
; /* for vfork() */
977 int __user
*set_child_tid
; /* CLONE_CHILD_SETTID */
978 int __user
*clear_child_tid
; /* CLONE_CHILD_CLEARTID */
981 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
982 u64 utimescaled
, stimescaled
;
985 struct prev_cputime prev_cputime
;
986 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
987 seqcount_t vtime_seqcount
;
988 unsigned long long vtime_snap
;
990 /* Task is sleeping or running in a CPU with VTIME inactive */
992 /* Task runs in userspace in a CPU with VTIME active */
994 /* Task runs in kernelspace in a CPU with VTIME active */
999 #ifdef CONFIG_NO_HZ_FULL
1000 atomic_t tick_dep_mask
;
1002 unsigned long nvcsw
, nivcsw
; /* context switch counts */
1003 u64 start_time
; /* monotonic time in nsec */
1004 u64 real_start_time
; /* boot based time in nsec */
1005 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1006 unsigned long min_flt
, maj_flt
;
1008 #ifdef CONFIG_POSIX_TIMERS
1009 struct task_cputime cputime_expires
;
1010 struct list_head cpu_timers
[3];
1013 /* process credentials */
1014 const struct cred __rcu
*ptracer_cred
; /* Tracer's credentials at attach */
1015 const struct cred __rcu
*real_cred
; /* objective and real subjective task
1016 * credentials (COW) */
1017 const struct cred __rcu
*cred
; /* effective (overridable) subjective task
1018 * credentials (COW) */
1019 char comm
[TASK_COMM_LEN
]; /* executable name excluding path
1020 - access with [gs]et_task_comm (which lock
1021 it with task_lock())
1022 - initialized normally by setup_new_exec */
1023 /* file system info */
1024 struct nameidata
*nameidata
;
1025 #ifdef CONFIG_SYSVIPC
1027 struct sysv_sem sysvsem
;
1028 struct sysv_shm sysvshm
;
1030 #ifdef CONFIG_DETECT_HUNG_TASK
1031 /* hung task detection */
1032 unsigned long last_switch_count
;
1034 /* filesystem information */
1035 struct fs_struct
*fs
;
1036 /* open file information */
1037 struct files_struct
*files
;
1039 struct nsproxy
*nsproxy
;
1040 /* signal handlers */
1041 struct signal_struct
*signal
;
1042 struct sighand_struct
*sighand
;
1044 sigset_t blocked
, real_blocked
;
1045 sigset_t saved_sigmask
; /* restored if set_restore_sigmask() was used */
1046 struct sigpending pending
;
1048 unsigned long sas_ss_sp
;
1050 unsigned sas_ss_flags
;
1052 struct callback_head
*task_works
;
1054 struct audit_context
*audit_context
;
1055 #ifdef CONFIG_AUDITSYSCALL
1057 unsigned int sessionid
;
1059 struct seccomp seccomp
;
1061 /* Thread group tracking */
1064 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1066 spinlock_t alloc_lock
;
1068 /* Protection of the PI data structures: */
1069 raw_spinlock_t pi_lock
;
1071 struct wake_q_node wake_q
;
1073 #ifdef CONFIG_RT_MUTEXES
1074 /* PI waiters blocked on a rt_mutex held by this task */
1075 struct rb_root pi_waiters
;
1076 struct rb_node
*pi_waiters_leftmost
;
1077 /* Deadlock detection and priority inheritance handling */
1078 struct rt_mutex_waiter
*pi_blocked_on
;
1081 #ifdef CONFIG_DEBUG_MUTEXES
1082 /* mutex deadlock detection */
1083 struct mutex_waiter
*blocked_on
;
1085 #ifdef CONFIG_TRACE_IRQFLAGS
1086 unsigned int irq_events
;
1087 unsigned long hardirq_enable_ip
;
1088 unsigned long hardirq_disable_ip
;
1089 unsigned int hardirq_enable_event
;
1090 unsigned int hardirq_disable_event
;
1091 int hardirqs_enabled
;
1092 int hardirq_context
;
1093 unsigned long softirq_disable_ip
;
1094 unsigned long softirq_enable_ip
;
1095 unsigned int softirq_disable_event
;
1096 unsigned int softirq_enable_event
;
1097 int softirqs_enabled
;
1098 int softirq_context
;
1100 #ifdef CONFIG_LOCKDEP
1101 # define MAX_LOCK_DEPTH 48UL
1104 unsigned int lockdep_recursion
;
1105 struct held_lock held_locks
[MAX_LOCK_DEPTH
];
1106 gfp_t lockdep_reclaim_gfp
;
1109 unsigned int in_ubsan
;
1112 /* journalling filesystem info */
1115 /* stacked block device info */
1116 struct bio_list
*bio_list
;
1119 /* stack plugging */
1120 struct blk_plug
*plug
;
1124 struct reclaim_state
*reclaim_state
;
1126 struct backing_dev_info
*backing_dev_info
;
1128 struct io_context
*io_context
;
1130 unsigned long ptrace_message
;
1131 siginfo_t
*last_siginfo
; /* For ptrace use. */
1132 struct task_io_accounting ioac
;
1133 #if defined(CONFIG_TASK_XACCT)
1134 u64 acct_rss_mem1
; /* accumulated rss usage */
1135 u64 acct_vm_mem1
; /* accumulated virtual memory usage */
1136 u64 acct_timexpd
; /* stime + utime since last update */
1138 #ifdef CONFIG_CPUSETS
1139 nodemask_t mems_allowed
; /* Protected by alloc_lock */
1140 seqcount_t mems_allowed_seq
; /* Seqence no to catch updates */
1141 int cpuset_mem_spread_rotor
;
1142 int cpuset_slab_spread_rotor
;
1144 #ifdef CONFIG_CGROUPS
1145 /* Control Group info protected by css_set_lock */
1146 struct css_set __rcu
*cgroups
;
1147 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1148 struct list_head cg_list
;
1150 #ifdef CONFIG_INTEL_RDT_A
1154 struct robust_list_head __user
*robust_list
;
1155 #ifdef CONFIG_COMPAT
1156 struct compat_robust_list_head __user
*compat_robust_list
;
1158 struct list_head pi_state_list
;
1159 struct futex_pi_state
*pi_state_cache
;
1161 #ifdef CONFIG_PERF_EVENTS
1162 struct perf_event_context
*perf_event_ctxp
[perf_nr_task_contexts
];
1163 struct mutex perf_event_mutex
;
1164 struct list_head perf_event_list
;
1166 #ifdef CONFIG_DEBUG_PREEMPT
1167 unsigned long preempt_disable_ip
;
1170 struct mempolicy
*mempolicy
; /* Protected by alloc_lock */
1172 short pref_node_fork
;
1174 #ifdef CONFIG_NUMA_BALANCING
1176 unsigned int numa_scan_period
;
1177 unsigned int numa_scan_period_max
;
1178 int numa_preferred_nid
;
1179 unsigned long numa_migrate_retry
;
1180 u64 node_stamp
; /* migration stamp */
1181 u64 last_task_numa_placement
;
1182 u64 last_sum_exec_runtime
;
1183 struct callback_head numa_work
;
1185 struct list_head numa_entry
;
1186 struct numa_group
*numa_group
;
1189 * numa_faults is an array split into four regions:
1190 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1191 * in this precise order.
1193 * faults_memory: Exponential decaying average of faults on a per-node
1194 * basis. Scheduling placement decisions are made based on these
1195 * counts. The values remain static for the duration of a PTE scan.
1196 * faults_cpu: Track the nodes the process was running on when a NUMA
1197 * hinting fault was incurred.
1198 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1199 * during the current scan window. When the scan completes, the counts
1200 * in faults_memory and faults_cpu decay and these values are copied.
1202 unsigned long *numa_faults
;
1203 unsigned long total_numa_faults
;
1206 * numa_faults_locality tracks if faults recorded during the last
1207 * scan window were remote/local or failed to migrate. The task scan
1208 * period is adapted based on the locality of the faults with different
1209 * weights depending on whether they were shared or private faults
1211 unsigned long numa_faults_locality
[3];
1213 unsigned long numa_pages_migrated
;
1214 #endif /* CONFIG_NUMA_BALANCING */
1216 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1217 struct tlbflush_unmap_batch tlb_ubc
;
1220 struct rcu_head rcu
;
1223 * cache last used pipe for splice
1225 struct pipe_inode_info
*splice_pipe
;
1227 struct page_frag task_frag
;
1229 #ifdef CONFIG_TASK_DELAY_ACCT
1230 struct task_delay_info
*delays
;
1233 #ifdef CONFIG_FAULT_INJECTION
1237 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1238 * balance_dirty_pages() for some dirty throttling pause
1241 int nr_dirtied_pause
;
1242 unsigned long dirty_paused_when
; /* start of a write-and-pause period */
1244 #ifdef CONFIG_LATENCYTOP
1245 int latency_record_count
;
1246 struct latency_record latency_record
[LT_SAVECOUNT
];
1249 * time slack values; these are used to round up poll() and
1250 * select() etc timeout values. These are in nanoseconds.
1253 u64 default_timer_slack_ns
;
1256 unsigned int kasan_depth
;
1258 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1259 /* Index of current stored address in ret_stack */
1261 /* Stack of return addresses for return function tracing */
1262 struct ftrace_ret_stack
*ret_stack
;
1263 /* time stamp for last schedule */
1264 unsigned long long ftrace_timestamp
;
1266 * Number of functions that haven't been traced
1267 * because of depth overrun.
1269 atomic_t trace_overrun
;
1270 /* Pause for the tracing */
1271 atomic_t tracing_graph_pause
;
1273 #ifdef CONFIG_TRACING
1274 /* state flags for use by tracers */
1275 unsigned long trace
;
1276 /* bitmask and counter of trace recursion */
1277 unsigned long trace_recursion
;
1278 #endif /* CONFIG_TRACING */
1280 /* Coverage collection mode enabled for this task (0 if disabled). */
1281 enum kcov_mode kcov_mode
;
1282 /* Size of the kcov_area. */
1284 /* Buffer for coverage collection. */
1286 /* kcov desciptor wired with this task or NULL. */
1290 struct mem_cgroup
*memcg_in_oom
;
1291 gfp_t memcg_oom_gfp_mask
;
1292 int memcg_oom_order
;
1294 /* number of pages to reclaim on returning to userland */
1295 unsigned int memcg_nr_pages_over_high
;
1297 #ifdef CONFIG_UPROBES
1298 struct uprobe_task
*utask
;
1300 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1301 unsigned int sequential_io
;
1302 unsigned int sequential_io_avg
;
1304 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1305 unsigned long task_state_change
;
1307 int pagefault_disabled
;
1309 struct task_struct
*oom_reaper_list
;
1311 #ifdef CONFIG_VMAP_STACK
1312 struct vm_struct
*stack_vm_area
;
1314 #ifdef CONFIG_THREAD_INFO_IN_TASK
1315 /* A live task holds one reference. */
1316 atomic_t stack_refcount
;
1318 /* CPU-specific state of this task */
1319 struct thread_struct thread
;
1321 * WARNING: on x86, 'thread_struct' contains a variable-sized
1322 * structure. It *MUST* be at the end of 'task_struct'.
1324 * Do not put anything below here!
1328 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1329 extern int arch_task_struct_size __read_mostly
;
1331 # define arch_task_struct_size (sizeof(struct task_struct))
1334 #ifdef CONFIG_VMAP_STACK
1335 static inline struct vm_struct
*task_stack_vm_area(const struct task_struct
*t
)
1337 return t
->stack_vm_area
;
1340 static inline struct vm_struct
*task_stack_vm_area(const struct task_struct
*t
)
1346 #define TNF_MIGRATED 0x01
1347 #define TNF_NO_GROUP 0x02
1348 #define TNF_SHARED 0x04
1349 #define TNF_FAULT_LOCAL 0x08
1350 #define TNF_MIGRATE_FAIL 0x10
1352 static inline bool in_vfork(struct task_struct
*tsk
)
1357 * need RCU to access ->real_parent if CLONE_VM was used along with
1360 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
1363 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
1364 * ->real_parent is not necessarily the task doing vfork(), so in
1365 * theory we can't rely on task_lock() if we want to dereference it.
1367 * And in this case we can't trust the real_parent->mm == tsk->mm
1368 * check, it can be false negative. But we do not care, if init or
1369 * another oom-unkillable task does this it should blame itself.
1372 ret
= tsk
->vfork_done
&& tsk
->real_parent
->mm
== tsk
->mm
;
1378 #ifdef CONFIG_NUMA_BALANCING
1379 extern void task_numa_fault(int last_node
, int node
, int pages
, int flags
);
1380 extern pid_t
task_numa_group_id(struct task_struct
*p
);
1381 extern void set_numabalancing_state(bool enabled
);
1382 extern void task_numa_free(struct task_struct
*p
);
1383 extern bool should_numa_migrate_memory(struct task_struct
*p
, struct page
*page
,
1384 int src_nid
, int dst_cpu
);
1386 static inline void task_numa_fault(int last_node
, int node
, int pages
,
1390 static inline pid_t
task_numa_group_id(struct task_struct
*p
)
1394 static inline void set_numabalancing_state(bool enabled
)
1397 static inline void task_numa_free(struct task_struct
*p
)
1400 static inline bool should_numa_migrate_memory(struct task_struct
*p
,
1401 struct page
*page
, int src_nid
, int dst_cpu
)
1407 static inline struct pid
*task_pid(struct task_struct
*task
)
1409 return task
->pids
[PIDTYPE_PID
].pid
;
1412 static inline struct pid
*task_tgid(struct task_struct
*task
)
1414 return task
->group_leader
->pids
[PIDTYPE_PID
].pid
;
1418 * Without tasklist or rcu lock it is not safe to dereference
1419 * the result of task_pgrp/task_session even if task == current,
1420 * we can race with another thread doing sys_setsid/sys_setpgid.
1422 static inline struct pid
*task_pgrp(struct task_struct
*task
)
1424 return task
->group_leader
->pids
[PIDTYPE_PGID
].pid
;
1427 static inline struct pid
*task_session(struct task_struct
*task
)
1429 return task
->group_leader
->pids
[PIDTYPE_SID
].pid
;
1432 struct pid_namespace
;
1435 * the helpers to get the task's different pids as they are seen
1436 * from various namespaces
1438 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1439 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1441 * task_xid_nr_ns() : id seen from the ns specified;
1443 * set_task_vxid() : assigns a virtual id to a task;
1445 * see also pid_nr() etc in include/linux/pid.h
1447 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
1448 struct pid_namespace
*ns
);
1450 static inline pid_t
task_pid_nr(struct task_struct
*tsk
)
1455 static inline pid_t
task_pid_nr_ns(struct task_struct
*tsk
,
1456 struct pid_namespace
*ns
)
1458 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, ns
);
1461 static inline pid_t
task_pid_vnr(struct task_struct
*tsk
)
1463 return __task_pid_nr_ns(tsk
, PIDTYPE_PID
, NULL
);
1467 static inline pid_t
task_tgid_nr(struct task_struct
*tsk
)
1472 pid_t
task_tgid_nr_ns(struct task_struct
*tsk
, struct pid_namespace
*ns
);
1474 static inline pid_t
task_tgid_vnr(struct task_struct
*tsk
)
1476 return pid_vnr(task_tgid(tsk
));
1480 static inline int pid_alive(const struct task_struct
*p
);
1481 static inline pid_t
task_ppid_nr_ns(const struct task_struct
*tsk
, struct pid_namespace
*ns
)
1487 pid
= task_tgid_nr_ns(rcu_dereference(tsk
->real_parent
), ns
);
1493 static inline pid_t
task_ppid_nr(const struct task_struct
*tsk
)
1495 return task_ppid_nr_ns(tsk
, &init_pid_ns
);
1498 static inline pid_t
task_pgrp_nr_ns(struct task_struct
*tsk
,
1499 struct pid_namespace
*ns
)
1501 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, ns
);
1504 static inline pid_t
task_pgrp_vnr(struct task_struct
*tsk
)
1506 return __task_pid_nr_ns(tsk
, PIDTYPE_PGID
, NULL
);
1510 static inline pid_t
task_session_nr_ns(struct task_struct
*tsk
,
1511 struct pid_namespace
*ns
)
1513 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, ns
);
1516 static inline pid_t
task_session_vnr(struct task_struct
*tsk
)
1518 return __task_pid_nr_ns(tsk
, PIDTYPE_SID
, NULL
);
1521 /* obsolete, do not use */
1522 static inline pid_t
task_pgrp_nr(struct task_struct
*tsk
)
1524 return task_pgrp_nr_ns(tsk
, &init_pid_ns
);
1528 * pid_alive - check that a task structure is not stale
1529 * @p: Task structure to be checked.
1531 * Test if a process is not yet dead (at most zombie state)
1532 * If pid_alive fails, then pointers within the task structure
1533 * can be stale and must not be dereferenced.
1535 * Return: 1 if the process is alive. 0 otherwise.
1537 static inline int pid_alive(const struct task_struct
*p
)
1539 return p
->pids
[PIDTYPE_PID
].pid
!= NULL
;
1543 * is_global_init - check if a task structure is init. Since init
1544 * is free to have sub-threads we need to check tgid.
1545 * @tsk: Task structure to be checked.
1547 * Check if a task structure is the first user space task the kernel created.
1549 * Return: 1 if the task structure is init. 0 otherwise.
1551 static inline int is_global_init(struct task_struct
*tsk
)
1553 return task_tgid_nr(tsk
) == 1;
1556 extern struct pid
*cad_pid
;
1558 extern void free_task(struct task_struct
*tsk
);
1559 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1561 extern void __put_task_struct(struct task_struct
*t
);
1563 static inline void put_task_struct(struct task_struct
*t
)
1565 if (atomic_dec_and_test(&t
->usage
))
1566 __put_task_struct(t
);
1569 struct task_struct
*task_rcu_dereference(struct task_struct
**ptask
);
1570 struct task_struct
*try_get_task_struct(struct task_struct
**ptask
);
1572 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1573 extern void task_cputime(struct task_struct
*t
,
1574 u64
*utime
, u64
*stime
);
1575 extern u64
task_gtime(struct task_struct
*t
);
1577 static inline void task_cputime(struct task_struct
*t
,
1578 u64
*utime
, u64
*stime
)
1584 static inline u64
task_gtime(struct task_struct
*t
)
1590 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1591 static inline void task_cputime_scaled(struct task_struct
*t
,
1595 *utimescaled
= t
->utimescaled
;
1596 *stimescaled
= t
->stimescaled
;
1599 static inline void task_cputime_scaled(struct task_struct
*t
,
1603 task_cputime(t
, utimescaled
, stimescaled
);
1607 extern void task_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
);
1608 extern void thread_group_cputime_adjusted(struct task_struct
*p
, u64
*ut
, u64
*st
);
1613 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1614 #define PF_EXITING 0x00000004 /* getting shut down */
1615 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1616 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1617 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1618 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1619 #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1620 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1621 #define PF_DUMPCORE 0x00000200 /* dumped core */
1622 #define PF_SIGNALED 0x00000400 /* killed by a signal */
1623 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1624 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1625 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1626 #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1627 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1628 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
1629 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1630 #define PF_KSWAPD 0x00040000 /* I am kswapd */
1631 #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1632 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1633 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1634 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1635 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1636 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1637 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1638 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1639 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1640 #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1643 * Only the _current_ task can read/write to tsk->flags, but other
1644 * tasks can access tsk->flags in readonly mode for example
1645 * with tsk_used_math (like during threaded core dumping).
1646 * There is however an exception to this rule during ptrace
1647 * or during fork: the ptracer task is allowed to write to the
1648 * child->flags of its traced child (same goes for fork, the parent
1649 * can write to the child->flags), because we're guaranteed the
1650 * child is not running and in turn not changing child->flags
1651 * at the same time the parent does it.
1653 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1654 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1655 #define clear_used_math() clear_stopped_child_used_math(current)
1656 #define set_used_math() set_stopped_child_used_math(current)
1657 #define conditional_stopped_child_used_math(condition, child) \
1658 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1659 #define conditional_used_math(condition) \
1660 conditional_stopped_child_used_math(condition, current)
1661 #define copy_to_stopped_child_used_math(child) \
1662 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1663 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1664 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1665 #define used_math() tsk_used_math(current)
1667 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
1668 * __GFP_FS is also cleared as it implies __GFP_IO.
1670 static inline gfp_t
memalloc_noio_flags(gfp_t flags
)
1672 if (unlikely(current
->flags
& PF_MEMALLOC_NOIO
))
1673 flags
&= ~(__GFP_IO
| __GFP_FS
);
1677 static inline unsigned int memalloc_noio_save(void)
1679 unsigned int flags
= current
->flags
& PF_MEMALLOC_NOIO
;
1680 current
->flags
|= PF_MEMALLOC_NOIO
;
1684 static inline void memalloc_noio_restore(unsigned int flags
)
1686 current
->flags
= (current
->flags
& ~PF_MEMALLOC_NOIO
) | flags
;
1689 /* Per-process atomic flags. */
1690 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1691 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1692 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1693 #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
1696 #define TASK_PFA_TEST(name, func) \
1697 static inline bool task_##func(struct task_struct *p) \
1698 { return test_bit(PFA_##name, &p->atomic_flags); }
1699 #define TASK_PFA_SET(name, func) \
1700 static inline void task_set_##func(struct task_struct *p) \
1701 { set_bit(PFA_##name, &p->atomic_flags); }
1702 #define TASK_PFA_CLEAR(name, func) \
1703 static inline void task_clear_##func(struct task_struct *p) \
1704 { clear_bit(PFA_##name, &p->atomic_flags); }
1706 TASK_PFA_TEST(NO_NEW_PRIVS
, no_new_privs
)
1707 TASK_PFA_SET(NO_NEW_PRIVS
, no_new_privs
)
1709 TASK_PFA_TEST(SPREAD_PAGE
, spread_page
)
1710 TASK_PFA_SET(SPREAD_PAGE
, spread_page
)
1711 TASK_PFA_CLEAR(SPREAD_PAGE
, spread_page
)
1713 TASK_PFA_TEST(SPREAD_SLAB
, spread_slab
)
1714 TASK_PFA_SET(SPREAD_SLAB
, spread_slab
)
1715 TASK_PFA_CLEAR(SPREAD_SLAB
, spread_slab
)
1717 TASK_PFA_TEST(LMK_WAITING
, lmk_waiting
)
1718 TASK_PFA_SET(LMK_WAITING
, lmk_waiting
)
1721 * task->jobctl flags
1723 #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1725 #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1726 #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1727 #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
1728 #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
1729 #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
1730 #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
1731 #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
1733 #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
1734 #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
1735 #define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
1736 #define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
1737 #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
1738 #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
1739 #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
1741 #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1742 #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1744 extern bool task_set_jobctl_pending(struct task_struct
*task
,
1745 unsigned long mask
);
1746 extern void task_clear_jobctl_trapping(struct task_struct
*task
);
1747 extern void task_clear_jobctl_pending(struct task_struct
*task
,
1748 unsigned long mask
);
1750 static inline void rcu_copy_process(struct task_struct
*p
)
1752 #ifdef CONFIG_PREEMPT_RCU
1753 p
->rcu_read_lock_nesting
= 0;
1754 p
->rcu_read_unlock_special
.s
= 0;
1755 p
->rcu_blocked_node
= NULL
;
1756 INIT_LIST_HEAD(&p
->rcu_node_entry
);
1757 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1758 #ifdef CONFIG_TASKS_RCU
1759 p
->rcu_tasks_holdout
= false;
1760 INIT_LIST_HEAD(&p
->rcu_tasks_holdout_list
);
1761 p
->rcu_tasks_idle_cpu
= -1;
1762 #endif /* #ifdef CONFIG_TASKS_RCU */
1765 static inline void tsk_restore_flags(struct task_struct
*task
,
1766 unsigned long orig_flags
, unsigned long flags
)
1768 task
->flags
&= ~flags
;
1769 task
->flags
|= orig_flags
& flags
;
1772 extern int cpuset_cpumask_can_shrink(const struct cpumask
*cur
,
1773 const struct cpumask
*trial
);
1774 extern int task_can_attach(struct task_struct
*p
,
1775 const struct cpumask
*cs_cpus_allowed
);
1777 extern void do_set_cpus_allowed(struct task_struct
*p
,
1778 const struct cpumask
*new_mask
);
1780 extern int set_cpus_allowed_ptr(struct task_struct
*p
,
1781 const struct cpumask
*new_mask
);
1783 static inline void do_set_cpus_allowed(struct task_struct
*p
,
1784 const struct cpumask
*new_mask
)
1787 static inline int set_cpus_allowed_ptr(struct task_struct
*p
,
1788 const struct cpumask
*new_mask
)
1790 if (!cpumask_test_cpu(0, new_mask
))
1796 #ifdef CONFIG_NO_HZ_COMMON
1797 void calc_load_enter_idle(void);
1798 void calc_load_exit_idle(void);
1800 static inline void calc_load_enter_idle(void) { }
1801 static inline void calc_load_exit_idle(void) { }
1802 #endif /* CONFIG_NO_HZ_COMMON */
1804 #ifndef cpu_relax_yield
1805 #define cpu_relax_yield() cpu_relax()
1808 extern unsigned long long
1809 task_sched_runtime(struct task_struct
*task
);
1811 /* sched_exec is called by processes performing an exec */
1813 extern void sched_exec(void);
1815 #define sched_exec() {}
1818 #ifdef CONFIG_HOTPLUG_CPU
1819 extern void idle_task_exit(void);
1821 static inline void idle_task_exit(void) {}
1824 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1825 extern void wake_up_nohz_cpu(int cpu
);
1827 static inline void wake_up_nohz_cpu(int cpu
) { }
1830 #ifdef CONFIG_NO_HZ_FULL
1831 extern u64
scheduler_tick_max_deferment(void);
1834 extern int yield_to(struct task_struct
*p
, bool preempt
);
1835 extern void set_user_nice(struct task_struct
*p
, long nice
);
1836 extern int task_prio(const struct task_struct
*p
);
1838 * task_nice - return the nice value of a given task.
1839 * @p: the task in question.
1841 * Return: The nice value [ -20 ... 0 ... 19 ].
1843 static inline int task_nice(const struct task_struct
*p
)
1845 return PRIO_TO_NICE((p
)->static_prio
);
1847 extern int can_nice(const struct task_struct
*p
, const int nice
);
1848 extern int task_curr(const struct task_struct
*p
);
1849 extern int idle_cpu(int cpu
);
1850 extern int sched_setscheduler(struct task_struct
*, int,
1851 const struct sched_param
*);
1852 extern int sched_setscheduler_nocheck(struct task_struct
*, int,
1853 const struct sched_param
*);
1854 extern int sched_setattr(struct task_struct
*,
1855 const struct sched_attr
*);
1856 extern struct task_struct
*idle_task(int cpu
);
1858 * is_idle_task - is the specified task an idle task?
1859 * @p: the task in question.
1861 * Return: 1 if @p is an idle task. 0 otherwise.
1863 static inline bool is_idle_task(const struct task_struct
*p
)
1865 return !!(p
->flags
& PF_IDLE
);
1867 extern struct task_struct
*curr_task(int cpu
);
1868 extern void ia64_set_curr_task(int cpu
, struct task_struct
*p
);
1872 union thread_union
{
1873 #ifndef CONFIG_THREAD_INFO_IN_TASK
1874 struct thread_info thread_info
;
1876 unsigned long stack
[THREAD_SIZE
/sizeof(long)];
1879 #ifndef __HAVE_ARCH_KSTACK_END
1880 static inline int kstack_end(void *addr
)
1882 /* Reliable end of stack detection:
1883 * Some APM bios versions misalign the stack
1885 return !(((unsigned long)addr
+sizeof(void*)-1) & (THREAD_SIZE
-sizeof(void*)));
1889 extern union thread_union init_thread_union
;
1890 extern struct task_struct init_task
;
1892 extern struct mm_struct init_mm
;
1894 extern struct pid_namespace init_pid_ns
;
1897 * find a task by one of its numerical ids
1899 * find_task_by_pid_ns():
1900 * finds a task by its pid in the specified namespace
1901 * find_task_by_vpid():
1902 * finds a task by its virtual pid
1904 * see also find_vpid() etc in include/linux/pid.h
1907 extern struct task_struct
*find_task_by_vpid(pid_t nr
);
1908 extern struct task_struct
*find_task_by_pid_ns(pid_t nr
,
1909 struct pid_namespace
*ns
);
1911 /* per-UID process charging. */
1912 extern struct user_struct
* alloc_uid(kuid_t
);
1913 static inline struct user_struct
*get_uid(struct user_struct
*u
)
1915 atomic_inc(&u
->__count
);
1918 extern void free_uid(struct user_struct
*);
1920 #include <asm/current.h>
1922 extern void xtime_update(unsigned long ticks
);
1924 extern int wake_up_state(struct task_struct
*tsk
, unsigned int state
);
1925 extern int wake_up_process(struct task_struct
*tsk
);
1926 extern void wake_up_new_task(struct task_struct
*tsk
);
1928 extern void kick_process(struct task_struct
*tsk
);
1930 static inline void kick_process(struct task_struct
*tsk
) { }
1932 extern int sched_fork(unsigned long clone_flags
, struct task_struct
*p
);
1933 extern void sched_dead(struct task_struct
*p
);
1935 extern void proc_caches_init(void);
1937 extern void release_task(struct task_struct
* p
);
1939 #ifdef CONFIG_HAVE_COPY_THREAD_TLS
1940 extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
1941 struct task_struct
*, unsigned long);
1943 extern int copy_thread(unsigned long, unsigned long, unsigned long,
1944 struct task_struct
*);
1946 /* Architectures that haven't opted into copy_thread_tls get the tls argument
1947 * via pt_regs, so ignore the tls argument passed via C. */
1948 static inline int copy_thread_tls(
1949 unsigned long clone_flags
, unsigned long sp
, unsigned long arg
,
1950 struct task_struct
*p
, unsigned long tls
)
1952 return copy_thread(clone_flags
, sp
, arg
, p
);
1955 extern void flush_thread(void);
1957 #ifdef CONFIG_HAVE_EXIT_THREAD
1958 extern void exit_thread(struct task_struct
*tsk
);
1960 static inline void exit_thread(struct task_struct
*tsk
)
1965 extern void exit_files(struct task_struct
*);
1967 extern void exit_itimers(struct signal_struct
*);
1969 extern void do_group_exit(int);
1971 extern int do_execve(struct filename
*,
1972 const char __user
* const __user
*,
1973 const char __user
* const __user
*);
1974 extern int do_execveat(int, struct filename
*,
1975 const char __user
* const __user
*,
1976 const char __user
* const __user
*,
1978 extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user
*, int __user
*, unsigned long);
1979 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user
*, int __user
*);
1980 struct task_struct
*fork_idle(int);
1981 extern pid_t
kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
);
1983 extern void __set_task_comm(struct task_struct
*tsk
, const char *from
, bool exec
);
1984 static inline void set_task_comm(struct task_struct
*tsk
, const char *from
)
1986 __set_task_comm(tsk
, from
, false);
1988 extern char *get_task_comm(char *to
, struct task_struct
*tsk
);
1991 void scheduler_ipi(void);
1992 extern unsigned long wait_task_inactive(struct task_struct
*, long match_state
);
1994 static inline void scheduler_ipi(void) { }
1995 static inline unsigned long wait_task_inactive(struct task_struct
*p
,
2003 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2004 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2005 * pins the final release of task.io_context. Also protects ->cpuset and
2006 * ->cgroup.subsys[]. And ->vfork_done.
2008 * Nests both inside and outside of read_lock(&tasklist_lock).
2009 * It must not be nested with write_lock_irq(&tasklist_lock),
2010 * neither inside nor outside.
2012 static inline void task_lock(struct task_struct
*p
)
2014 spin_lock(&p
->alloc_lock
);
2017 static inline void task_unlock(struct task_struct
*p
)
2019 spin_unlock(&p
->alloc_lock
);
2022 #ifdef CONFIG_THREAD_INFO_IN_TASK
2024 static inline struct thread_info
*task_thread_info(struct task_struct
*task
)
2026 return &task
->thread_info
;
2030 * When accessing the stack of a non-current task that might exit, use
2031 * try_get_task_stack() instead. task_stack_page will return a pointer
2032 * that could get freed out from under you.
2034 static inline void *task_stack_page(const struct task_struct
*task
)
2039 #define setup_thread_stack(new,old) do { } while(0)
2041 static inline unsigned long *end_of_stack(const struct task_struct
*task
)
2046 #elif !defined(__HAVE_THREAD_FUNCTIONS)
2048 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
2049 #define task_stack_page(task) ((void *)(task)->stack)
2051 static inline void setup_thread_stack(struct task_struct
*p
, struct task_struct
*org
)
2053 *task_thread_info(p
) = *task_thread_info(org
);
2054 task_thread_info(p
)->task
= p
;
2058 * Return the address of the last usable long on the stack.
2060 * When the stack grows down, this is just above the thread
2061 * info struct. Going any lower will corrupt the threadinfo.
2063 * When the stack grows up, this is the highest address.
2064 * Beyond that position, we corrupt data on the next page.
2066 static inline unsigned long *end_of_stack(struct task_struct
*p
)
2068 #ifdef CONFIG_STACK_GROWSUP
2069 return (unsigned long *)((unsigned long)task_thread_info(p
) + THREAD_SIZE
) - 1;
2071 return (unsigned long *)(task_thread_info(p
) + 1);
2077 #ifdef CONFIG_THREAD_INFO_IN_TASK
2078 static inline void *try_get_task_stack(struct task_struct
*tsk
)
2080 return atomic_inc_not_zero(&tsk
->stack_refcount
) ?
2081 task_stack_page(tsk
) : NULL
;
2084 extern void put_task_stack(struct task_struct
*tsk
);
2086 static inline void *try_get_task_stack(struct task_struct
*tsk
)
2088 return task_stack_page(tsk
);
2091 static inline void put_task_stack(struct task_struct
*tsk
) {}
2094 #define task_stack_end_corrupted(task) \
2095 (*(end_of_stack(task)) != STACK_END_MAGIC)
2097 static inline int object_is_on_stack(void *obj
)
2099 void *stack
= task_stack_page(current
);
2101 return (obj
>= stack
) && (obj
< (stack
+ THREAD_SIZE
));
2104 extern void thread_stack_cache_init(void);
2106 #ifdef CONFIG_DEBUG_STACK_USAGE
2107 static inline unsigned long stack_not_used(struct task_struct
*p
)
2109 unsigned long *n
= end_of_stack(p
);
2111 do { /* Skip over canary */
2112 # ifdef CONFIG_STACK_GROWSUP
2119 # ifdef CONFIG_STACK_GROWSUP
2120 return (unsigned long)end_of_stack(p
) - (unsigned long)n
;
2122 return (unsigned long)n
- (unsigned long)end_of_stack(p
);
2126 extern void set_task_stack_end_magic(struct task_struct
*tsk
);
2128 /* set thread flags in other task's structures
2129 * - see asm/thread_info.h for TIF_xxxx flags available
2131 static inline void set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
2133 set_ti_thread_flag(task_thread_info(tsk
), flag
);
2136 static inline void clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
2138 clear_ti_thread_flag(task_thread_info(tsk
), flag
);
2141 static inline int test_and_set_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
2143 return test_and_set_ti_thread_flag(task_thread_info(tsk
), flag
);
2146 static inline int test_and_clear_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
2148 return test_and_clear_ti_thread_flag(task_thread_info(tsk
), flag
);
2151 static inline int test_tsk_thread_flag(struct task_struct
*tsk
, int flag
)
2153 return test_ti_thread_flag(task_thread_info(tsk
), flag
);
2156 static inline void set_tsk_need_resched(struct task_struct
*tsk
)
2158 set_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
2161 static inline void clear_tsk_need_resched(struct task_struct
*tsk
)
2163 clear_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
);
2166 static inline int test_tsk_need_resched(struct task_struct
*tsk
)
2168 return unlikely(test_tsk_thread_flag(tsk
,TIF_NEED_RESCHED
));
2171 static inline int restart_syscall(void)
2173 set_tsk_thread_flag(current
, TIF_SIGPENDING
);
2174 return -ERESTARTNOINTR
;
2177 static inline int signal_pending(struct task_struct
*p
)
2179 return unlikely(test_tsk_thread_flag(p
,TIF_SIGPENDING
));
2182 static inline int __fatal_signal_pending(struct task_struct
*p
)
2184 return unlikely(sigismember(&p
->pending
.signal
, SIGKILL
));
2187 static inline int fatal_signal_pending(struct task_struct
*p
)
2189 return signal_pending(p
) && __fatal_signal_pending(p
);
2192 static inline int signal_pending_state(long state
, struct task_struct
*p
)
2194 if (!(state
& (TASK_INTERRUPTIBLE
| TASK_WAKEKILL
)))
2196 if (!signal_pending(p
))
2199 return (state
& TASK_INTERRUPTIBLE
) || __fatal_signal_pending(p
);
2203 * cond_resched() and cond_resched_lock(): latency reduction via
2204 * explicit rescheduling in places that are safe. The return
2205 * value indicates whether a reschedule was done in fact.
2206 * cond_resched_lock() will drop the spinlock before scheduling,
2207 * cond_resched_softirq() will enable bhs before scheduling.
2209 #ifndef CONFIG_PREEMPT
2210 extern int _cond_resched(void);
2212 static inline int _cond_resched(void) { return 0; }
2215 #define cond_resched() ({ \
2216 ___might_sleep(__FILE__, __LINE__, 0); \
2220 extern int __cond_resched_lock(spinlock_t
*lock
);
2222 #define cond_resched_lock(lock) ({ \
2223 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2224 __cond_resched_lock(lock); \
2227 extern int __cond_resched_softirq(void);
2229 #define cond_resched_softirq() ({ \
2230 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2231 __cond_resched_softirq(); \
2234 static inline void cond_resched_rcu(void)
2236 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2244 * Does a critical section need to be broken due to another
2245 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2246 * but a general need for low latency)
2248 static inline int spin_needbreak(spinlock_t
*lock
)
2250 #ifdef CONFIG_PREEMPT
2251 return spin_is_contended(lock
);
2257 static __always_inline
bool need_resched(void)
2259 return unlikely(tif_need_resched());
2263 * Thread group CPU time accounting.
2265 void thread_group_cputime(struct task_struct
*tsk
, struct task_cputime
*times
);
2266 void thread_group_cputimer(struct task_struct
*tsk
, struct task_cputime
*times
);
2269 * Reevaluate whether the task has signals pending delivery.
2270 * Wake the task if so.
2271 * This is required every time the blocked sigset_t changes.
2272 * callers must hold sighand->siglock.
2274 extern void recalc_sigpending_and_wake(struct task_struct
*t
);
2275 extern void recalc_sigpending(void);
2277 extern void signal_wake_up_state(struct task_struct
*t
, unsigned int state
);
2279 static inline void signal_wake_up(struct task_struct
*t
, bool resume
)
2281 signal_wake_up_state(t
, resume
? TASK_WAKEKILL
: 0);
2283 static inline void ptrace_signal_wake_up(struct task_struct
*t
, bool resume
)
2285 signal_wake_up_state(t
, resume
? __TASK_TRACED
: 0);
2289 * Wrappers for p->thread_info->cpu access. No-op on UP.
2293 static inline unsigned int task_cpu(const struct task_struct
*p
)
2295 #ifdef CONFIG_THREAD_INFO_IN_TASK
2298 return task_thread_info(p
)->cpu
;
2302 static inline int task_node(const struct task_struct
*p
)
2304 return cpu_to_node(task_cpu(p
));
2307 extern void set_task_cpu(struct task_struct
*p
, unsigned int cpu
);
2311 static inline unsigned int task_cpu(const struct task_struct
*p
)
2316 static inline void set_task_cpu(struct task_struct
*p
, unsigned int cpu
)
2320 #endif /* CONFIG_SMP */
2323 * In order to reduce various lock holder preemption latencies provide an
2324 * interface to see if a vCPU is currently running or not.
2326 * This allows us to terminate optimistic spin loops and block, analogous to
2327 * the native optimistic spin heuristic of testing if the lock owner task is
2330 #ifndef vcpu_is_preempted
2331 # define vcpu_is_preempted(cpu) false
2334 extern long sched_setaffinity(pid_t pid
, const struct cpumask
*new_mask
);
2335 extern long sched_getaffinity(pid_t pid
, struct cpumask
*mask
);
2337 #ifdef CONFIG_CGROUP_SCHED
2338 extern struct task_group root_task_group
;
2339 #endif /* CONFIG_CGROUP_SCHED */
2341 extern int task_can_switch_user(struct user_struct
*up
,
2342 struct task_struct
*tsk
);
2344 #ifdef CONFIG_TASK_XACCT
2345 static inline void add_rchar(struct task_struct
*tsk
, ssize_t amt
)
2347 tsk
->ioac
.rchar
+= amt
;
2350 static inline void add_wchar(struct task_struct
*tsk
, ssize_t amt
)
2352 tsk
->ioac
.wchar
+= amt
;
2355 static inline void inc_syscr(struct task_struct
*tsk
)
2360 static inline void inc_syscw(struct task_struct
*tsk
)
2365 static inline void add_rchar(struct task_struct
*tsk
, ssize_t amt
)
2369 static inline void add_wchar(struct task_struct
*tsk
, ssize_t amt
)
2373 static inline void inc_syscr(struct task_struct
*tsk
)
2377 static inline void inc_syscw(struct task_struct
*tsk
)
2382 #ifndef TASK_SIZE_OF
2383 #define TASK_SIZE_OF(tsk) TASK_SIZE
2387 extern void mm_update_next_owner(struct mm_struct
*mm
);
2389 static inline void mm_update_next_owner(struct mm_struct
*mm
)
2392 #endif /* CONFIG_MEMCG */
2394 #define SCHED_CPUFREQ_RT (1U << 0)
2395 #define SCHED_CPUFREQ_DL (1U << 1)
2396 #define SCHED_CPUFREQ_IOWAIT (1U << 2)
2398 #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
2400 #ifdef CONFIG_CPU_FREQ
2401 struct update_util_data
{
2402 void (*func
)(struct update_util_data
*data
, u64 time
, unsigned int flags
);
2405 void cpufreq_add_update_util_hook(int cpu
, struct update_util_data
*data
,
2406 void (*func
)(struct update_util_data
*data
, u64 time
,
2407 unsigned int flags
));
2408 void cpufreq_remove_update_util_hook(int cpu
);
2409 #endif /* CONFIG_CPU_FREQ */