]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/sched.h
mmc: core: prepend 0x to OCR entry in sysfs
[mirror_ubuntu-bionic-kernel.git] / include / linux / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4
5 /*
6 * Define 'struct task_struct' and provide the main scheduler
7 * APIs (schedule(), wakeup variants, etc.)
8 */
9
10 #include <uapi/linux/sched.h>
11
12 #include <asm/current.h>
13
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/kcov.h>
18 #include <linux/mutex.h>
19 #include <linux/plist.h>
20 #include <linux/hrtimer.h>
21 #include <linux/seccomp.h>
22 #include <linux/nodemask.h>
23 #include <linux/rcupdate.h>
24 #include <linux/resource.h>
25 #include <linux/latencytop.h>
26 #include <linux/sched/prio.h>
27 #include <linux/signal_types.h>
28 #include <linux/mm_types_task.h>
29 #include <linux/task_io_accounting.h>
30
31 /* task_struct member predeclarations (sorted alphabetically): */
32 struct audit_context;
33 struct backing_dev_info;
34 struct bio_list;
35 struct blk_plug;
36 struct cfs_rq;
37 struct fs_struct;
38 struct futex_pi_state;
39 struct io_context;
40 struct mempolicy;
41 struct nameidata;
42 struct nsproxy;
43 struct perf_event_context;
44 struct pid_namespace;
45 struct pipe_inode_info;
46 struct rcu_node;
47 struct reclaim_state;
48 struct robust_list_head;
49 struct sched_attr;
50 struct sched_param;
51 struct seq_file;
52 struct sighand_struct;
53 struct signal_struct;
54 struct task_delay_info;
55 struct task_group;
56
57 /*
58 * Task state bitmask. NOTE! These bits are also
59 * encoded in fs/proc/array.c: get_task_state().
60 *
61 * We have two separate sets of flags: task->state
62 * is about runnability, while task->exit_state are
63 * about the task exiting. Confusing, but this way
64 * modifying one set can't modify the other one by
65 * mistake.
66 */
67
68 /* Used in tsk->state: */
69 #define TASK_RUNNING 0x0000
70 #define TASK_INTERRUPTIBLE 0x0001
71 #define TASK_UNINTERRUPTIBLE 0x0002
72 #define __TASK_STOPPED 0x0004
73 #define __TASK_TRACED 0x0008
74 /* Used in tsk->exit_state: */
75 #define EXIT_DEAD 0x0010
76 #define EXIT_ZOMBIE 0x0020
77 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
78 /* Used in tsk->state again: */
79 #define TASK_PARKED 0x0040
80 #define TASK_DEAD 0x0080
81 #define TASK_WAKEKILL 0x0100
82 #define TASK_WAKING 0x0200
83 #define TASK_NOLOAD 0x0400
84 #define TASK_NEW 0x0800
85 #define TASK_STATE_MAX 0x1000
86
87 /* Convenience macros for the sake of set_current_state: */
88 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
89 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
90 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
91
92 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
93
94 /* Convenience macros for the sake of wake_up(): */
95 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
96 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
97
98 /* get_task_state(): */
99 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
100 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
101 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
102 TASK_PARKED)
103
104 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
105
106 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
107
108 #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
109
110 #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
111 (task->flags & PF_FROZEN) == 0 && \
112 (task->state & TASK_NOLOAD) == 0)
113
114 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
115
116 #define __set_current_state(state_value) \
117 do { \
118 current->task_state_change = _THIS_IP_; \
119 current->state = (state_value); \
120 } while (0)
121 #define set_current_state(state_value) \
122 do { \
123 current->task_state_change = _THIS_IP_; \
124 smp_store_mb(current->state, (state_value)); \
125 } while (0)
126
127 #else
128 /*
129 * set_current_state() includes a barrier so that the write of current->state
130 * is correctly serialised wrt the caller's subsequent test of whether to
131 * actually sleep:
132 *
133 * for (;;) {
134 * set_current_state(TASK_UNINTERRUPTIBLE);
135 * if (!need_sleep)
136 * break;
137 *
138 * schedule();
139 * }
140 * __set_current_state(TASK_RUNNING);
141 *
142 * If the caller does not need such serialisation (because, for instance, the
143 * condition test and condition change and wakeup are under the same lock) then
144 * use __set_current_state().
145 *
146 * The above is typically ordered against the wakeup, which does:
147 *
148 * need_sleep = false;
149 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
150 *
151 * Where wake_up_state() (and all other wakeup primitives) imply enough
152 * barriers to order the store of the variable against wakeup.
153 *
154 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
155 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
156 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
157 *
158 * This is obviously fine, since they both store the exact same value.
159 *
160 * Also see the comments of try_to_wake_up().
161 */
162 #define __set_current_state(state_value) do { current->state = (state_value); } while (0)
163 #define set_current_state(state_value) smp_store_mb(current->state, (state_value))
164 #endif
165
166 /* Task command name length: */
167 #define TASK_COMM_LEN 16
168
169 extern void scheduler_tick(void);
170
171 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
172
173 extern long schedule_timeout(long timeout);
174 extern long schedule_timeout_interruptible(long timeout);
175 extern long schedule_timeout_killable(long timeout);
176 extern long schedule_timeout_uninterruptible(long timeout);
177 extern long schedule_timeout_idle(long timeout);
178 asmlinkage void schedule(void);
179 extern void schedule_preempt_disabled(void);
180
181 extern int __must_check io_schedule_prepare(void);
182 extern void io_schedule_finish(int token);
183 extern long io_schedule_timeout(long timeout);
184 extern void io_schedule(void);
185
186 /**
187 * struct prev_cputime - snapshot of system and user cputime
188 * @utime: time spent in user mode
189 * @stime: time spent in system mode
190 * @lock: protects the above two fields
191 *
192 * Stores previous user/system time values such that we can guarantee
193 * monotonicity.
194 */
195 struct prev_cputime {
196 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
197 u64 utime;
198 u64 stime;
199 raw_spinlock_t lock;
200 #endif
201 };
202
203 /**
204 * struct task_cputime - collected CPU time counts
205 * @utime: time spent in user mode, in nanoseconds
206 * @stime: time spent in kernel mode, in nanoseconds
207 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
208 *
209 * This structure groups together three kinds of CPU time that are tracked for
210 * threads and thread groups. Most things considering CPU time want to group
211 * these counts together and treat all three of them in parallel.
212 */
213 struct task_cputime {
214 u64 utime;
215 u64 stime;
216 unsigned long long sum_exec_runtime;
217 };
218
219 /* Alternate field names when used on cache expirations: */
220 #define virt_exp utime
221 #define prof_exp stime
222 #define sched_exp sum_exec_runtime
223
224 enum vtime_state {
225 /* Task is sleeping or running in a CPU with VTIME inactive: */
226 VTIME_INACTIVE = 0,
227 /* Task runs in userspace in a CPU with VTIME active: */
228 VTIME_USER,
229 /* Task runs in kernelspace in a CPU with VTIME active: */
230 VTIME_SYS,
231 };
232
233 struct vtime {
234 seqcount_t seqcount;
235 unsigned long long starttime;
236 enum vtime_state state;
237 u64 utime;
238 u64 stime;
239 u64 gtime;
240 };
241
242 struct sched_info {
243 #ifdef CONFIG_SCHED_INFO
244 /* Cumulative counters: */
245
246 /* # of times we have run on this CPU: */
247 unsigned long pcount;
248
249 /* Time spent waiting on a runqueue: */
250 unsigned long long run_delay;
251
252 /* Timestamps: */
253
254 /* When did we last run on a CPU? */
255 unsigned long long last_arrival;
256
257 /* When were we last queued to run? */
258 unsigned long long last_queued;
259
260 #endif /* CONFIG_SCHED_INFO */
261 };
262
263 /*
264 * Integer metrics need fixed point arithmetic, e.g., sched/fair
265 * has a few: load, load_avg, util_avg, freq, and capacity.
266 *
267 * We define a basic fixed point arithmetic range, and then formalize
268 * all these metrics based on that basic range.
269 */
270 # define SCHED_FIXEDPOINT_SHIFT 10
271 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
272
273 struct load_weight {
274 unsigned long weight;
275 u32 inv_weight;
276 };
277
278 /*
279 * The load_avg/util_avg accumulates an infinite geometric series
280 * (see __update_load_avg() in kernel/sched/fair.c).
281 *
282 * [load_avg definition]
283 *
284 * load_avg = runnable% * scale_load_down(load)
285 *
286 * where runnable% is the time ratio that a sched_entity is runnable.
287 * For cfs_rq, it is the aggregated load_avg of all runnable and
288 * blocked sched_entities.
289 *
290 * load_avg may also take frequency scaling into account:
291 *
292 * load_avg = runnable% * scale_load_down(load) * freq%
293 *
294 * where freq% is the CPU frequency normalized to the highest frequency.
295 *
296 * [util_avg definition]
297 *
298 * util_avg = running% * SCHED_CAPACITY_SCALE
299 *
300 * where running% is the time ratio that a sched_entity is running on
301 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
302 * and blocked sched_entities.
303 *
304 * util_avg may also factor frequency scaling and CPU capacity scaling:
305 *
306 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
307 *
308 * where freq% is the same as above, and capacity% is the CPU capacity
309 * normalized to the greatest capacity (due to uarch differences, etc).
310 *
311 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
312 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
313 * we therefore scale them to as large a range as necessary. This is for
314 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
315 *
316 * [Overflow issue]
317 *
318 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
319 * with the highest load (=88761), always runnable on a single cfs_rq,
320 * and should not overflow as the number already hits PID_MAX_LIMIT.
321 *
322 * For all other cases (including 32-bit kernels), struct load_weight's
323 * weight will overflow first before we do, because:
324 *
325 * Max(load_avg) <= Max(load.weight)
326 *
327 * Then it is the load_weight's responsibility to consider overflow
328 * issues.
329 */
330 struct sched_avg {
331 u64 last_update_time;
332 u64 load_sum;
333 u64 runnable_load_sum;
334 u32 util_sum;
335 u32 period_contrib;
336 unsigned long load_avg;
337 unsigned long runnable_load_avg;
338 unsigned long util_avg;
339 };
340
341 struct sched_statistics {
342 #ifdef CONFIG_SCHEDSTATS
343 u64 wait_start;
344 u64 wait_max;
345 u64 wait_count;
346 u64 wait_sum;
347 u64 iowait_count;
348 u64 iowait_sum;
349
350 u64 sleep_start;
351 u64 sleep_max;
352 s64 sum_sleep_runtime;
353
354 u64 block_start;
355 u64 block_max;
356 u64 exec_max;
357 u64 slice_max;
358
359 u64 nr_migrations_cold;
360 u64 nr_failed_migrations_affine;
361 u64 nr_failed_migrations_running;
362 u64 nr_failed_migrations_hot;
363 u64 nr_forced_migrations;
364
365 u64 nr_wakeups;
366 u64 nr_wakeups_sync;
367 u64 nr_wakeups_migrate;
368 u64 nr_wakeups_local;
369 u64 nr_wakeups_remote;
370 u64 nr_wakeups_affine;
371 u64 nr_wakeups_affine_attempts;
372 u64 nr_wakeups_passive;
373 u64 nr_wakeups_idle;
374 #endif
375 };
376
377 struct sched_entity {
378 /* For load-balancing: */
379 struct load_weight load;
380 unsigned long runnable_weight;
381 struct rb_node run_node;
382 struct list_head group_node;
383 unsigned int on_rq;
384
385 u64 exec_start;
386 u64 sum_exec_runtime;
387 u64 vruntime;
388 u64 prev_sum_exec_runtime;
389
390 u64 nr_migrations;
391
392 struct sched_statistics statistics;
393
394 #ifdef CONFIG_FAIR_GROUP_SCHED
395 int depth;
396 struct sched_entity *parent;
397 /* rq on which this entity is (to be) queued: */
398 struct cfs_rq *cfs_rq;
399 /* rq "owned" by this entity/group: */
400 struct cfs_rq *my_q;
401 #endif
402
403 #ifdef CONFIG_SMP
404 /*
405 * Per entity load average tracking.
406 *
407 * Put into separate cache line so it does not
408 * collide with read-mostly values above.
409 */
410 struct sched_avg avg ____cacheline_aligned_in_smp;
411 #endif
412 };
413
414 struct sched_rt_entity {
415 struct list_head run_list;
416 unsigned long timeout;
417 unsigned long watchdog_stamp;
418 unsigned int time_slice;
419 unsigned short on_rq;
420 unsigned short on_list;
421
422 struct sched_rt_entity *back;
423 #ifdef CONFIG_RT_GROUP_SCHED
424 struct sched_rt_entity *parent;
425 /* rq on which this entity is (to be) queued: */
426 struct rt_rq *rt_rq;
427 /* rq "owned" by this entity/group: */
428 struct rt_rq *my_q;
429 #endif
430 } __randomize_layout;
431
432 struct sched_dl_entity {
433 struct rb_node rb_node;
434
435 /*
436 * Original scheduling parameters. Copied here from sched_attr
437 * during sched_setattr(), they will remain the same until
438 * the next sched_setattr().
439 */
440 u64 dl_runtime; /* Maximum runtime for each instance */
441 u64 dl_deadline; /* Relative deadline of each instance */
442 u64 dl_period; /* Separation of two instances (period) */
443 u64 dl_bw; /* dl_runtime / dl_period */
444 u64 dl_density; /* dl_runtime / dl_deadline */
445
446 /*
447 * Actual scheduling parameters. Initialized with the values above,
448 * they are continously updated during task execution. Note that
449 * the remaining runtime could be < 0 in case we are in overrun.
450 */
451 s64 runtime; /* Remaining runtime for this instance */
452 u64 deadline; /* Absolute deadline for this instance */
453 unsigned int flags; /* Specifying the scheduler behaviour */
454
455 /*
456 * Some bool flags:
457 *
458 * @dl_throttled tells if we exhausted the runtime. If so, the
459 * task has to wait for a replenishment to be performed at the
460 * next firing of dl_timer.
461 *
462 * @dl_boosted tells if we are boosted due to DI. If so we are
463 * outside bandwidth enforcement mechanism (but only until we
464 * exit the critical section);
465 *
466 * @dl_yielded tells if task gave up the CPU before consuming
467 * all its available runtime during the last job.
468 *
469 * @dl_non_contending tells if the task is inactive while still
470 * contributing to the active utilization. In other words, it
471 * indicates if the inactive timer has been armed and its handler
472 * has not been executed yet. This flag is useful to avoid race
473 * conditions between the inactive timer handler and the wakeup
474 * code.
475 */
476 int dl_throttled : 1;
477 int dl_boosted : 1;
478 int dl_yielded : 1;
479 int dl_non_contending : 1;
480
481 /*
482 * Bandwidth enforcement timer. Each -deadline task has its
483 * own bandwidth to be enforced, thus we need one timer per task.
484 */
485 struct hrtimer dl_timer;
486
487 /*
488 * Inactive timer, responsible for decreasing the active utilization
489 * at the "0-lag time". When a -deadline task blocks, it contributes
490 * to GRUB's active utilization until the "0-lag time", hence a
491 * timer is needed to decrease the active utilization at the correct
492 * time.
493 */
494 struct hrtimer inactive_timer;
495 };
496
497 union rcu_special {
498 struct {
499 u8 blocked;
500 u8 need_qs;
501 u8 exp_need_qs;
502
503 /* Otherwise the compiler can store garbage here: */
504 u8 pad;
505 } b; /* Bits. */
506 u32 s; /* Set of bits. */
507 };
508
509 enum perf_event_task_context {
510 perf_invalid_context = -1,
511 perf_hw_context = 0,
512 perf_sw_context,
513 perf_nr_task_contexts,
514 };
515
516 struct wake_q_node {
517 struct wake_q_node *next;
518 };
519
520 struct task_struct {
521 #ifdef CONFIG_THREAD_INFO_IN_TASK
522 /*
523 * For reasons of header soup (see current_thread_info()), this
524 * must be the first element of task_struct.
525 */
526 struct thread_info thread_info;
527 #endif
528 /* -1 unrunnable, 0 runnable, >0 stopped: */
529 volatile long state;
530
531 /*
532 * This begins the randomizable portion of task_struct. Only
533 * scheduling-critical items should be added above here.
534 */
535 randomized_struct_fields_start
536
537 void *stack;
538 atomic_t usage;
539 /* Per task flags (PF_*), defined further below: */
540 unsigned int flags;
541 unsigned int ptrace;
542
543 #ifdef CONFIG_SMP
544 struct llist_node wake_entry;
545 int on_cpu;
546 #ifdef CONFIG_THREAD_INFO_IN_TASK
547 /* Current CPU: */
548 unsigned int cpu;
549 #endif
550 unsigned int wakee_flips;
551 unsigned long wakee_flip_decay_ts;
552 struct task_struct *last_wakee;
553
554 int wake_cpu;
555 #endif
556 int on_rq;
557
558 int prio;
559 int static_prio;
560 int normal_prio;
561 unsigned int rt_priority;
562
563 const struct sched_class *sched_class;
564 struct sched_entity se;
565 struct sched_rt_entity rt;
566 #ifdef CONFIG_CGROUP_SCHED
567 struct task_group *sched_task_group;
568 #endif
569 struct sched_dl_entity dl;
570
571 #ifdef CONFIG_PREEMPT_NOTIFIERS
572 /* List of struct preempt_notifier: */
573 struct hlist_head preempt_notifiers;
574 #endif
575
576 #ifdef CONFIG_BLK_DEV_IO_TRACE
577 unsigned int btrace_seq;
578 #endif
579
580 unsigned int policy;
581 int nr_cpus_allowed;
582 cpumask_t cpus_allowed;
583
584 #ifdef CONFIG_PREEMPT_RCU
585 int rcu_read_lock_nesting;
586 union rcu_special rcu_read_unlock_special;
587 struct list_head rcu_node_entry;
588 struct rcu_node *rcu_blocked_node;
589 #endif /* #ifdef CONFIG_PREEMPT_RCU */
590
591 #ifdef CONFIG_TASKS_RCU
592 unsigned long rcu_tasks_nvcsw;
593 u8 rcu_tasks_holdout;
594 u8 rcu_tasks_idx;
595 int rcu_tasks_idle_cpu;
596 struct list_head rcu_tasks_holdout_list;
597 #endif /* #ifdef CONFIG_TASKS_RCU */
598
599 struct sched_info sched_info;
600
601 struct list_head tasks;
602 #ifdef CONFIG_SMP
603 struct plist_node pushable_tasks;
604 struct rb_node pushable_dl_tasks;
605 #endif
606
607 struct mm_struct *mm;
608 struct mm_struct *active_mm;
609
610 /* Per-thread vma caching: */
611 struct vmacache vmacache;
612
613 #ifdef SPLIT_RSS_COUNTING
614 struct task_rss_stat rss_stat;
615 #endif
616 int exit_state;
617 int exit_code;
618 int exit_signal;
619 /* The signal sent when the parent dies: */
620 int pdeath_signal;
621 /* JOBCTL_*, siglock protected: */
622 unsigned long jobctl;
623
624 /* Used for emulating ABI behavior of previous Linux versions: */
625 unsigned int personality;
626
627 /* Scheduler bits, serialized by scheduler locks: */
628 unsigned sched_reset_on_fork:1;
629 unsigned sched_contributes_to_load:1;
630 unsigned sched_migrated:1;
631 unsigned sched_remote_wakeup:1;
632 /* Force alignment to the next boundary: */
633 unsigned :0;
634
635 /* Unserialized, strictly 'current' */
636
637 /* Bit to tell LSMs we're in execve(): */
638 unsigned in_execve:1;
639 unsigned in_iowait:1;
640 #ifndef TIF_RESTORE_SIGMASK
641 unsigned restore_sigmask:1;
642 #endif
643 #ifdef CONFIG_MEMCG
644 unsigned memcg_may_oom:1;
645 #ifndef CONFIG_SLOB
646 unsigned memcg_kmem_skip_account:1;
647 #endif
648 #endif
649 #ifdef CONFIG_COMPAT_BRK
650 unsigned brk_randomized:1;
651 #endif
652 #ifdef CONFIG_CGROUPS
653 /* disallow userland-initiated cgroup migration */
654 unsigned no_cgroup_migration:1;
655 #endif
656
657 unsigned long atomic_flags; /* Flags requiring atomic access. */
658
659 struct restart_block restart_block;
660
661 pid_t pid;
662 pid_t tgid;
663
664 #ifdef CONFIG_CC_STACKPROTECTOR
665 /* Canary value for the -fstack-protector GCC feature: */
666 unsigned long stack_canary;
667 #endif
668 /*
669 * Pointers to the (original) parent process, youngest child, younger sibling,
670 * older sibling, respectively. (p->father can be replaced with
671 * p->real_parent->pid)
672 */
673
674 /* Real parent process: */
675 struct task_struct __rcu *real_parent;
676
677 /* Recipient of SIGCHLD, wait4() reports: */
678 struct task_struct __rcu *parent;
679
680 /*
681 * Children/sibling form the list of natural children:
682 */
683 struct list_head children;
684 struct list_head sibling;
685 struct task_struct *group_leader;
686
687 /*
688 * 'ptraced' is the list of tasks this task is using ptrace() on.
689 *
690 * This includes both natural children and PTRACE_ATTACH targets.
691 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
692 */
693 struct list_head ptraced;
694 struct list_head ptrace_entry;
695
696 /* PID/PID hash table linkage. */
697 struct pid_link pids[PIDTYPE_MAX];
698 struct list_head thread_group;
699 struct list_head thread_node;
700
701 struct completion *vfork_done;
702
703 /* CLONE_CHILD_SETTID: */
704 int __user *set_child_tid;
705
706 /* CLONE_CHILD_CLEARTID: */
707 int __user *clear_child_tid;
708
709 u64 utime;
710 u64 stime;
711 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
712 u64 utimescaled;
713 u64 stimescaled;
714 #endif
715 u64 gtime;
716 struct prev_cputime prev_cputime;
717 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
718 struct vtime vtime;
719 #endif
720
721 #ifdef CONFIG_NO_HZ_FULL
722 atomic_t tick_dep_mask;
723 #endif
724 /* Context switch counts: */
725 unsigned long nvcsw;
726 unsigned long nivcsw;
727
728 /* Monotonic time in nsecs: */
729 u64 start_time;
730
731 /* Boot based time in nsecs: */
732 u64 real_start_time;
733
734 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
735 unsigned long min_flt;
736 unsigned long maj_flt;
737
738 #ifdef CONFIG_POSIX_TIMERS
739 struct task_cputime cputime_expires;
740 struct list_head cpu_timers[3];
741 #endif
742
743 /* Process credentials: */
744
745 /* Tracer's credentials at attach: */
746 const struct cred __rcu *ptracer_cred;
747
748 /* Objective and real subjective task credentials (COW): */
749 const struct cred __rcu *real_cred;
750
751 /* Effective (overridable) subjective task credentials (COW): */
752 const struct cred __rcu *cred;
753
754 /*
755 * executable name, excluding path.
756 *
757 * - normally initialized setup_new_exec()
758 * - access it with [gs]et_task_comm()
759 * - lock it with task_lock()
760 */
761 char comm[TASK_COMM_LEN];
762
763 struct nameidata *nameidata;
764
765 #ifdef CONFIG_SYSVIPC
766 struct sysv_sem sysvsem;
767 struct sysv_shm sysvshm;
768 #endif
769 #ifdef CONFIG_DETECT_HUNG_TASK
770 unsigned long last_switch_count;
771 #endif
772 /* Filesystem information: */
773 struct fs_struct *fs;
774
775 /* Open file information: */
776 struct files_struct *files;
777
778 /* Namespaces: */
779 struct nsproxy *nsproxy;
780
781 /* Signal handlers: */
782 struct signal_struct *signal;
783 struct sighand_struct *sighand;
784 sigset_t blocked;
785 sigset_t real_blocked;
786 /* Restored if set_restore_sigmask() was used: */
787 sigset_t saved_sigmask;
788 struct sigpending pending;
789 unsigned long sas_ss_sp;
790 size_t sas_ss_size;
791 unsigned int sas_ss_flags;
792
793 struct callback_head *task_works;
794
795 struct audit_context *audit_context;
796 #ifdef CONFIG_AUDITSYSCALL
797 kuid_t loginuid;
798 unsigned int sessionid;
799 #endif
800 struct seccomp seccomp;
801
802 /* Thread group tracking: */
803 u32 parent_exec_id;
804 u32 self_exec_id;
805
806 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
807 spinlock_t alloc_lock;
808
809 /* Protection of the PI data structures: */
810 raw_spinlock_t pi_lock;
811
812 struct wake_q_node wake_q;
813
814 #ifdef CONFIG_RT_MUTEXES
815 /* PI waiters blocked on a rt_mutex held by this task: */
816 struct rb_root_cached pi_waiters;
817 /* Updated under owner's pi_lock and rq lock */
818 struct task_struct *pi_top_task;
819 /* Deadlock detection and priority inheritance handling: */
820 struct rt_mutex_waiter *pi_blocked_on;
821 #endif
822
823 #ifdef CONFIG_DEBUG_MUTEXES
824 /* Mutex deadlock detection: */
825 struct mutex_waiter *blocked_on;
826 #endif
827
828 #ifdef CONFIG_TRACE_IRQFLAGS
829 unsigned int irq_events;
830 unsigned long hardirq_enable_ip;
831 unsigned long hardirq_disable_ip;
832 unsigned int hardirq_enable_event;
833 unsigned int hardirq_disable_event;
834 int hardirqs_enabled;
835 int hardirq_context;
836 unsigned long softirq_disable_ip;
837 unsigned long softirq_enable_ip;
838 unsigned int softirq_disable_event;
839 unsigned int softirq_enable_event;
840 int softirqs_enabled;
841 int softirq_context;
842 #endif
843
844 #ifdef CONFIG_LOCKDEP
845 # define MAX_LOCK_DEPTH 48UL
846 u64 curr_chain_key;
847 int lockdep_depth;
848 unsigned int lockdep_recursion;
849 struct held_lock held_locks[MAX_LOCK_DEPTH];
850 #endif
851
852 #ifdef CONFIG_LOCKDEP_CROSSRELEASE
853 #define MAX_XHLOCKS_NR 64UL
854 struct hist_lock *xhlocks; /* Crossrelease history locks */
855 unsigned int xhlock_idx;
856 /* For restoring at history boundaries */
857 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
858 unsigned int hist_id;
859 /* For overwrite check at each context exit */
860 unsigned int hist_id_save[XHLOCK_CTX_NR];
861 #endif
862
863 #ifdef CONFIG_UBSAN
864 unsigned int in_ubsan;
865 #endif
866
867 /* Journalling filesystem info: */
868 void *journal_info;
869
870 /* Stacked block device info: */
871 struct bio_list *bio_list;
872
873 #ifdef CONFIG_BLOCK
874 /* Stack plugging: */
875 struct blk_plug *plug;
876 #endif
877
878 /* VM state: */
879 struct reclaim_state *reclaim_state;
880
881 struct backing_dev_info *backing_dev_info;
882
883 struct io_context *io_context;
884
885 /* Ptrace state: */
886 unsigned long ptrace_message;
887 siginfo_t *last_siginfo;
888
889 struct task_io_accounting ioac;
890 #ifdef CONFIG_TASK_XACCT
891 /* Accumulated RSS usage: */
892 u64 acct_rss_mem1;
893 /* Accumulated virtual memory usage: */
894 u64 acct_vm_mem1;
895 /* stime + utime since last update: */
896 u64 acct_timexpd;
897 #endif
898 #ifdef CONFIG_CPUSETS
899 /* Protected by ->alloc_lock: */
900 nodemask_t mems_allowed;
901 /* Seqence number to catch updates: */
902 seqcount_t mems_allowed_seq;
903 int cpuset_mem_spread_rotor;
904 int cpuset_slab_spread_rotor;
905 #endif
906 #ifdef CONFIG_CGROUPS
907 /* Control Group info protected by css_set_lock: */
908 struct css_set __rcu *cgroups;
909 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
910 struct list_head cg_list;
911 #endif
912 #ifdef CONFIG_INTEL_RDT
913 u32 closid;
914 u32 rmid;
915 #endif
916 #ifdef CONFIG_FUTEX
917 struct robust_list_head __user *robust_list;
918 #ifdef CONFIG_COMPAT
919 struct compat_robust_list_head __user *compat_robust_list;
920 #endif
921 struct list_head pi_state_list;
922 struct futex_pi_state *pi_state_cache;
923 #endif
924 #ifdef CONFIG_PERF_EVENTS
925 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
926 struct mutex perf_event_mutex;
927 struct list_head perf_event_list;
928 #endif
929 #ifdef CONFIG_DEBUG_PREEMPT
930 unsigned long preempt_disable_ip;
931 #endif
932 #ifdef CONFIG_NUMA
933 /* Protected by alloc_lock: */
934 struct mempolicy *mempolicy;
935 short il_prev;
936 short pref_node_fork;
937 #endif
938 #ifdef CONFIG_NUMA_BALANCING
939 int numa_scan_seq;
940 unsigned int numa_scan_period;
941 unsigned int numa_scan_period_max;
942 int numa_preferred_nid;
943 unsigned long numa_migrate_retry;
944 /* Migration stamp: */
945 u64 node_stamp;
946 u64 last_task_numa_placement;
947 u64 last_sum_exec_runtime;
948 struct callback_head numa_work;
949
950 struct list_head numa_entry;
951 struct numa_group *numa_group;
952
953 /*
954 * numa_faults is an array split into four regions:
955 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
956 * in this precise order.
957 *
958 * faults_memory: Exponential decaying average of faults on a per-node
959 * basis. Scheduling placement decisions are made based on these
960 * counts. The values remain static for the duration of a PTE scan.
961 * faults_cpu: Track the nodes the process was running on when a NUMA
962 * hinting fault was incurred.
963 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
964 * during the current scan window. When the scan completes, the counts
965 * in faults_memory and faults_cpu decay and these values are copied.
966 */
967 unsigned long *numa_faults;
968 unsigned long total_numa_faults;
969
970 /*
971 * numa_faults_locality tracks if faults recorded during the last
972 * scan window were remote/local or failed to migrate. The task scan
973 * period is adapted based on the locality of the faults with different
974 * weights depending on whether they were shared or private faults
975 */
976 unsigned long numa_faults_locality[3];
977
978 unsigned long numa_pages_migrated;
979 #endif /* CONFIG_NUMA_BALANCING */
980
981 struct tlbflush_unmap_batch tlb_ubc;
982
983 struct rcu_head rcu;
984
985 /* Cache last used pipe for splice(): */
986 struct pipe_inode_info *splice_pipe;
987
988 struct page_frag task_frag;
989
990 #ifdef CONFIG_TASK_DELAY_ACCT
991 struct task_delay_info *delays;
992 #endif
993
994 #ifdef CONFIG_FAULT_INJECTION
995 int make_it_fail;
996 unsigned int fail_nth;
997 #endif
998 /*
999 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1000 * balance_dirty_pages() for a dirty throttling pause:
1001 */
1002 int nr_dirtied;
1003 int nr_dirtied_pause;
1004 /* Start of a write-and-pause period: */
1005 unsigned long dirty_paused_when;
1006
1007 #ifdef CONFIG_LATENCYTOP
1008 int latency_record_count;
1009 struct latency_record latency_record[LT_SAVECOUNT];
1010 #endif
1011 /*
1012 * Time slack values; these are used to round up poll() and
1013 * select() etc timeout values. These are in nanoseconds.
1014 */
1015 u64 timer_slack_ns;
1016 u64 default_timer_slack_ns;
1017
1018 #ifdef CONFIG_KASAN
1019 unsigned int kasan_depth;
1020 #endif
1021
1022 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1023 /* Index of current stored address in ret_stack: */
1024 int curr_ret_stack;
1025
1026 /* Stack of return addresses for return function tracing: */
1027 struct ftrace_ret_stack *ret_stack;
1028
1029 /* Timestamp for last schedule: */
1030 unsigned long long ftrace_timestamp;
1031
1032 /*
1033 * Number of functions that haven't been traced
1034 * because of depth overrun:
1035 */
1036 atomic_t trace_overrun;
1037
1038 /* Pause tracing: */
1039 atomic_t tracing_graph_pause;
1040 #endif
1041
1042 #ifdef CONFIG_TRACING
1043 /* State flags for use by tracers: */
1044 unsigned long trace;
1045
1046 /* Bitmask and counter of trace recursion: */
1047 unsigned long trace_recursion;
1048 #endif /* CONFIG_TRACING */
1049
1050 #ifdef CONFIG_KCOV
1051 /* Coverage collection mode enabled for this task (0 if disabled): */
1052 enum kcov_mode kcov_mode;
1053
1054 /* Size of the kcov_area: */
1055 unsigned int kcov_size;
1056
1057 /* Buffer for coverage collection: */
1058 void *kcov_area;
1059
1060 /* KCOV descriptor wired with this task or NULL: */
1061 struct kcov *kcov;
1062 #endif
1063
1064 #ifdef CONFIG_MEMCG
1065 struct mem_cgroup *memcg_in_oom;
1066 gfp_t memcg_oom_gfp_mask;
1067 int memcg_oom_order;
1068
1069 /* Number of pages to reclaim on returning to userland: */
1070 unsigned int memcg_nr_pages_over_high;
1071 #endif
1072
1073 #ifdef CONFIG_UPROBES
1074 struct uprobe_task *utask;
1075 #endif
1076 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1077 unsigned int sequential_io;
1078 unsigned int sequential_io_avg;
1079 #endif
1080 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1081 unsigned long task_state_change;
1082 #endif
1083 int pagefault_disabled;
1084 #ifdef CONFIG_MMU
1085 struct task_struct *oom_reaper_list;
1086 #endif
1087 #ifdef CONFIG_VMAP_STACK
1088 struct vm_struct *stack_vm_area;
1089 #endif
1090 #ifdef CONFIG_THREAD_INFO_IN_TASK
1091 /* A live task holds one reference: */
1092 atomic_t stack_refcount;
1093 #endif
1094 #ifdef CONFIG_LIVEPATCH
1095 int patch_state;
1096 #endif
1097 #ifdef CONFIG_SECURITY
1098 /* Used by LSM modules for access restriction: */
1099 void *security;
1100 #endif
1101
1102 /*
1103 * New fields for task_struct should be added above here, so that
1104 * they are included in the randomized portion of task_struct.
1105 */
1106 randomized_struct_fields_end
1107
1108 /* CPU-specific state of this task: */
1109 struct thread_struct thread;
1110
1111 /*
1112 * WARNING: on x86, 'thread_struct' contains a variable-sized
1113 * structure. It *MUST* be at the end of 'task_struct'.
1114 *
1115 * Do not put anything below here!
1116 */
1117 };
1118
1119 static inline struct pid *task_pid(struct task_struct *task)
1120 {
1121 return task->pids[PIDTYPE_PID].pid;
1122 }
1123
1124 static inline struct pid *task_tgid(struct task_struct *task)
1125 {
1126 return task->group_leader->pids[PIDTYPE_PID].pid;
1127 }
1128
1129 /*
1130 * Without tasklist or RCU lock it is not safe to dereference
1131 * the result of task_pgrp/task_session even if task == current,
1132 * we can race with another thread doing sys_setsid/sys_setpgid.
1133 */
1134 static inline struct pid *task_pgrp(struct task_struct *task)
1135 {
1136 return task->group_leader->pids[PIDTYPE_PGID].pid;
1137 }
1138
1139 static inline struct pid *task_session(struct task_struct *task)
1140 {
1141 return task->group_leader->pids[PIDTYPE_SID].pid;
1142 }
1143
1144 /*
1145 * the helpers to get the task's different pids as they are seen
1146 * from various namespaces
1147 *
1148 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1149 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1150 * current.
1151 * task_xid_nr_ns() : id seen from the ns specified;
1152 *
1153 * see also pid_nr() etc in include/linux/pid.h
1154 */
1155 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1156
1157 static inline pid_t task_pid_nr(struct task_struct *tsk)
1158 {
1159 return tsk->pid;
1160 }
1161
1162 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1163 {
1164 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1165 }
1166
1167 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1168 {
1169 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1170 }
1171
1172
1173 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1174 {
1175 return tsk->tgid;
1176 }
1177
1178 /**
1179 * pid_alive - check that a task structure is not stale
1180 * @p: Task structure to be checked.
1181 *
1182 * Test if a process is not yet dead (at most zombie state)
1183 * If pid_alive fails, then pointers within the task structure
1184 * can be stale and must not be dereferenced.
1185 *
1186 * Return: 1 if the process is alive. 0 otherwise.
1187 */
1188 static inline int pid_alive(const struct task_struct *p)
1189 {
1190 return p->pids[PIDTYPE_PID].pid != NULL;
1191 }
1192
1193 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1194 {
1195 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1196 }
1197
1198 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1199 {
1200 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1201 }
1202
1203
1204 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1205 {
1206 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1207 }
1208
1209 static inline pid_t task_session_vnr(struct task_struct *tsk)
1210 {
1211 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1212 }
1213
1214 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1215 {
1216 return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1217 }
1218
1219 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1220 {
1221 return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1222 }
1223
1224 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1225 {
1226 pid_t pid = 0;
1227
1228 rcu_read_lock();
1229 if (pid_alive(tsk))
1230 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1231 rcu_read_unlock();
1232
1233 return pid;
1234 }
1235
1236 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1237 {
1238 return task_ppid_nr_ns(tsk, &init_pid_ns);
1239 }
1240
1241 /* Obsolete, do not use: */
1242 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1243 {
1244 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1245 }
1246
1247 #define TASK_REPORT_IDLE (TASK_REPORT + 1)
1248 #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1249
1250 static inline unsigned int task_state_index(struct task_struct *tsk)
1251 {
1252 unsigned int tsk_state = READ_ONCE(tsk->state);
1253 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1254
1255 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1256
1257 if (tsk_state == TASK_IDLE)
1258 state = TASK_REPORT_IDLE;
1259
1260 return fls(state);
1261 }
1262
1263 static inline char task_index_to_char(unsigned int state)
1264 {
1265 static const char state_char[] = "RSDTtXZPI";
1266
1267 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1268
1269 return state_char[state];
1270 }
1271
1272 static inline char task_state_to_char(struct task_struct *tsk)
1273 {
1274 return task_index_to_char(task_state_index(tsk));
1275 }
1276
1277 /**
1278 * is_global_init - check if a task structure is init. Since init
1279 * is free to have sub-threads we need to check tgid.
1280 * @tsk: Task structure to be checked.
1281 *
1282 * Check if a task structure is the first user space task the kernel created.
1283 *
1284 * Return: 1 if the task structure is init. 0 otherwise.
1285 */
1286 static inline int is_global_init(struct task_struct *tsk)
1287 {
1288 return task_tgid_nr(tsk) == 1;
1289 }
1290
1291 extern struct pid *cad_pid;
1292
1293 /*
1294 * Per process flags
1295 */
1296 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1297 #define PF_EXITING 0x00000004 /* Getting shut down */
1298 #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
1299 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1300 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1301 #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
1302 #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */
1303 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1304 #define PF_DUMPCORE 0x00000200 /* Dumped core */
1305 #define PF_SIGNALED 0x00000400 /* Killed by a signal */
1306 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1307 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
1308 #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
1309 #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
1310 #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
1311 #define PF_FROZEN 0x00010000 /* Frozen for system suspend */
1312 #define PF_KSWAPD 0x00020000 /* I am kswapd */
1313 #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
1314 #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
1315 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1316 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1317 #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
1318 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1319 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1320 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1321 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1322 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1323 #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
1324
1325 /*
1326 * Only the _current_ task can read/write to tsk->flags, but other
1327 * tasks can access tsk->flags in readonly mode for example
1328 * with tsk_used_math (like during threaded core dumping).
1329 * There is however an exception to this rule during ptrace
1330 * or during fork: the ptracer task is allowed to write to the
1331 * child->flags of its traced child (same goes for fork, the parent
1332 * can write to the child->flags), because we're guaranteed the
1333 * child is not running and in turn not changing child->flags
1334 * at the same time the parent does it.
1335 */
1336 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1337 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1338 #define clear_used_math() clear_stopped_child_used_math(current)
1339 #define set_used_math() set_stopped_child_used_math(current)
1340
1341 #define conditional_stopped_child_used_math(condition, child) \
1342 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1343
1344 #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1345
1346 #define copy_to_stopped_child_used_math(child) \
1347 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1348
1349 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1350 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1351 #define used_math() tsk_used_math(current)
1352
1353 static inline bool is_percpu_thread(void)
1354 {
1355 #ifdef CONFIG_SMP
1356 return (current->flags & PF_NO_SETAFFINITY) &&
1357 (current->nr_cpus_allowed == 1);
1358 #else
1359 return true;
1360 #endif
1361 }
1362
1363 /* Per-process atomic flags. */
1364 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1365 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1366 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1367
1368
1369 #define TASK_PFA_TEST(name, func) \
1370 static inline bool task_##func(struct task_struct *p) \
1371 { return test_bit(PFA_##name, &p->atomic_flags); }
1372
1373 #define TASK_PFA_SET(name, func) \
1374 static inline void task_set_##func(struct task_struct *p) \
1375 { set_bit(PFA_##name, &p->atomic_flags); }
1376
1377 #define TASK_PFA_CLEAR(name, func) \
1378 static inline void task_clear_##func(struct task_struct *p) \
1379 { clear_bit(PFA_##name, &p->atomic_flags); }
1380
1381 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1382 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1383
1384 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1385 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1386 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1387
1388 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1389 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1390 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1391
1392 static inline void
1393 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1394 {
1395 current->flags &= ~flags;
1396 current->flags |= orig_flags & flags;
1397 }
1398
1399 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1400 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1401 #ifdef CONFIG_SMP
1402 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1403 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1404 #else
1405 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1406 {
1407 }
1408 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1409 {
1410 if (!cpumask_test_cpu(0, new_mask))
1411 return -EINVAL;
1412 return 0;
1413 }
1414 #endif
1415
1416 #ifndef cpu_relax_yield
1417 #define cpu_relax_yield() cpu_relax()
1418 #endif
1419
1420 extern int yield_to(struct task_struct *p, bool preempt);
1421 extern void set_user_nice(struct task_struct *p, long nice);
1422 extern int task_prio(const struct task_struct *p);
1423
1424 /**
1425 * task_nice - return the nice value of a given task.
1426 * @p: the task in question.
1427 *
1428 * Return: The nice value [ -20 ... 0 ... 19 ].
1429 */
1430 static inline int task_nice(const struct task_struct *p)
1431 {
1432 return PRIO_TO_NICE((p)->static_prio);
1433 }
1434
1435 extern int can_nice(const struct task_struct *p, const int nice);
1436 extern int task_curr(const struct task_struct *p);
1437 extern int idle_cpu(int cpu);
1438 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1439 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1440 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1441 extern struct task_struct *idle_task(int cpu);
1442
1443 /**
1444 * is_idle_task - is the specified task an idle task?
1445 * @p: the task in question.
1446 *
1447 * Return: 1 if @p is an idle task. 0 otherwise.
1448 */
1449 static inline bool is_idle_task(const struct task_struct *p)
1450 {
1451 return !!(p->flags & PF_IDLE);
1452 }
1453
1454 extern struct task_struct *curr_task(int cpu);
1455 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1456
1457 void yield(void);
1458
1459 union thread_union {
1460 #ifndef CONFIG_THREAD_INFO_IN_TASK
1461 struct thread_info thread_info;
1462 #endif
1463 unsigned long stack[THREAD_SIZE/sizeof(long)];
1464 };
1465
1466 #ifdef CONFIG_THREAD_INFO_IN_TASK
1467 static inline struct thread_info *task_thread_info(struct task_struct *task)
1468 {
1469 return &task->thread_info;
1470 }
1471 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1472 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1473 #endif
1474
1475 /*
1476 * find a task by one of its numerical ids
1477 *
1478 * find_task_by_pid_ns():
1479 * finds a task by its pid in the specified namespace
1480 * find_task_by_vpid():
1481 * finds a task by its virtual pid
1482 *
1483 * see also find_vpid() etc in include/linux/pid.h
1484 */
1485
1486 extern struct task_struct *find_task_by_vpid(pid_t nr);
1487 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1488
1489 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1490 extern int wake_up_process(struct task_struct *tsk);
1491 extern void wake_up_new_task(struct task_struct *tsk);
1492
1493 #ifdef CONFIG_SMP
1494 extern void kick_process(struct task_struct *tsk);
1495 #else
1496 static inline void kick_process(struct task_struct *tsk) { }
1497 #endif
1498
1499 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1500
1501 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1502 {
1503 __set_task_comm(tsk, from, false);
1504 }
1505
1506 extern char *get_task_comm(char *to, struct task_struct *tsk);
1507
1508 #ifdef CONFIG_SMP
1509 void scheduler_ipi(void);
1510 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1511 #else
1512 static inline void scheduler_ipi(void) { }
1513 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1514 {
1515 return 1;
1516 }
1517 #endif
1518
1519 /*
1520 * Set thread flags in other task's structures.
1521 * See asm/thread_info.h for TIF_xxxx flags available:
1522 */
1523 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1524 {
1525 set_ti_thread_flag(task_thread_info(tsk), flag);
1526 }
1527
1528 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1529 {
1530 clear_ti_thread_flag(task_thread_info(tsk), flag);
1531 }
1532
1533 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1534 {
1535 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1536 }
1537
1538 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1539 {
1540 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1541 }
1542
1543 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1544 {
1545 return test_ti_thread_flag(task_thread_info(tsk), flag);
1546 }
1547
1548 static inline void set_tsk_need_resched(struct task_struct *tsk)
1549 {
1550 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1551 }
1552
1553 static inline void clear_tsk_need_resched(struct task_struct *tsk)
1554 {
1555 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1556 }
1557
1558 static inline int test_tsk_need_resched(struct task_struct *tsk)
1559 {
1560 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1561 }
1562
1563 /*
1564 * cond_resched() and cond_resched_lock(): latency reduction via
1565 * explicit rescheduling in places that are safe. The return
1566 * value indicates whether a reschedule was done in fact.
1567 * cond_resched_lock() will drop the spinlock before scheduling,
1568 * cond_resched_softirq() will enable bhs before scheduling.
1569 */
1570 #ifndef CONFIG_PREEMPT
1571 extern int _cond_resched(void);
1572 #else
1573 static inline int _cond_resched(void) { return 0; }
1574 #endif
1575
1576 #define cond_resched() ({ \
1577 ___might_sleep(__FILE__, __LINE__, 0); \
1578 _cond_resched(); \
1579 })
1580
1581 extern int __cond_resched_lock(spinlock_t *lock);
1582
1583 #define cond_resched_lock(lock) ({ \
1584 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1585 __cond_resched_lock(lock); \
1586 })
1587
1588 extern int __cond_resched_softirq(void);
1589
1590 #define cond_resched_softirq() ({ \
1591 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
1592 __cond_resched_softirq(); \
1593 })
1594
1595 static inline void cond_resched_rcu(void)
1596 {
1597 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1598 rcu_read_unlock();
1599 cond_resched();
1600 rcu_read_lock();
1601 #endif
1602 }
1603
1604 /*
1605 * Does a critical section need to be broken due to another
1606 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1607 * but a general need for low latency)
1608 */
1609 static inline int spin_needbreak(spinlock_t *lock)
1610 {
1611 #ifdef CONFIG_PREEMPT
1612 return spin_is_contended(lock);
1613 #else
1614 return 0;
1615 #endif
1616 }
1617
1618 static __always_inline bool need_resched(void)
1619 {
1620 return unlikely(tif_need_resched());
1621 }
1622
1623 /*
1624 * Wrappers for p->thread_info->cpu access. No-op on UP.
1625 */
1626 #ifdef CONFIG_SMP
1627
1628 static inline unsigned int task_cpu(const struct task_struct *p)
1629 {
1630 #ifdef CONFIG_THREAD_INFO_IN_TASK
1631 return p->cpu;
1632 #else
1633 return task_thread_info(p)->cpu;
1634 #endif
1635 }
1636
1637 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1638
1639 #else
1640
1641 static inline unsigned int task_cpu(const struct task_struct *p)
1642 {
1643 return 0;
1644 }
1645
1646 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1647 {
1648 }
1649
1650 #endif /* CONFIG_SMP */
1651
1652 /*
1653 * In order to reduce various lock holder preemption latencies provide an
1654 * interface to see if a vCPU is currently running or not.
1655 *
1656 * This allows us to terminate optimistic spin loops and block, analogous to
1657 * the native optimistic spin heuristic of testing if the lock owner task is
1658 * running or not.
1659 */
1660 #ifndef vcpu_is_preempted
1661 # define vcpu_is_preempted(cpu) false
1662 #endif
1663
1664 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1665 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1666
1667 #ifndef TASK_SIZE_OF
1668 #define TASK_SIZE_OF(tsk) TASK_SIZE
1669 #endif
1670
1671 #endif