]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/sched.h
Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / include / linux / sched.h
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3
4 /*
5 * cloning flags:
6 */
7 #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
8 #define CLONE_VM 0x00000100 /* set if VM shared between processes */
9 #define CLONE_FS 0x00000200 /* set if fs info shared between processes */
10 #define CLONE_FILES 0x00000400 /* set if open files shared between processes */
11 #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
12 #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
13 #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
14 #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
15 #define CLONE_THREAD 0x00010000 /* Same thread group? */
16 #define CLONE_NEWNS 0x00020000 /* New namespace group? */
17 #define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
18 #define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
19 #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
20 #define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
21 #define CLONE_DETACHED 0x00400000 /* Unused, ignored */
22 #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
23 #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
24 /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25 and is now available for re-use. */
26 #define CLONE_NEWUTS 0x04000000 /* New utsname group? */
27 #define CLONE_NEWIPC 0x08000000 /* New ipcs */
28 #define CLONE_NEWUSER 0x10000000 /* New user namespace */
29 #define CLONE_NEWPID 0x20000000 /* New pid namespace */
30 #define CLONE_NEWNET 0x40000000 /* New network namespace */
31 #define CLONE_IO 0x80000000 /* Clone io context */
32
33 /*
34 * Scheduling policies
35 */
36 #define SCHED_NORMAL 0
37 #define SCHED_FIFO 1
38 #define SCHED_RR 2
39 #define SCHED_BATCH 3
40 /* SCHED_ISO: reserved but not implemented yet */
41 #define SCHED_IDLE 5
42 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43 #define SCHED_RESET_ON_FORK 0x40000000
44
45 #ifdef __KERNEL__
46
47 struct sched_param {
48 int sched_priority;
49 };
50
51 #include <asm/param.h> /* for HZ */
52
53 #include <linux/capability.h>
54 #include <linux/threads.h>
55 #include <linux/kernel.h>
56 #include <linux/types.h>
57 #include <linux/timex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rbtree.h>
60 #include <linux/thread_info.h>
61 #include <linux/cpumask.h>
62 #include <linux/errno.h>
63 #include <linux/nodemask.h>
64 #include <linux/mm_types.h>
65
66 #include <asm/page.h>
67 #include <asm/ptrace.h>
68 #include <asm/cputime.h>
69
70 #include <linux/smp.h>
71 #include <linux/sem.h>
72 #include <linux/signal.h>
73 #include <linux/compiler.h>
74 #include <linux/completion.h>
75 #include <linux/pid.h>
76 #include <linux/percpu.h>
77 #include <linux/topology.h>
78 #include <linux/proportions.h>
79 #include <linux/seccomp.h>
80 #include <linux/rcupdate.h>
81 #include <linux/rculist.h>
82 #include <linux/rtmutex.h>
83
84 #include <linux/time.h>
85 #include <linux/param.h>
86 #include <linux/resource.h>
87 #include <linux/timer.h>
88 #include <linux/hrtimer.h>
89 #include <linux/task_io_accounting.h>
90 #include <linux/latencytop.h>
91 #include <linux/cred.h>
92 #include <linux/llist.h>
93 #include <linux/uidgid.h>
94
95 #include <asm/processor.h>
96
97 struct exec_domain;
98 struct futex_pi_state;
99 struct robust_list_head;
100 struct bio_list;
101 struct fs_struct;
102 struct perf_event_context;
103 struct blk_plug;
104
105 /*
106 * List of flags we want to share for kernel threads,
107 * if only because they are not used by them anyway.
108 */
109 #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
110
111 /*
112 * These are the constant used to fake the fixed-point load-average
113 * counting. Some notes:
114 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
115 * a load-average precision of 10 bits integer + 11 bits fractional
116 * - if you want to count load-averages more often, you need more
117 * precision, or rounding will get you. With 2-second counting freq,
118 * the EXP_n values would be 1981, 2034 and 2043 if still using only
119 * 11 bit fractions.
120 */
121 extern unsigned long avenrun[]; /* Load averages */
122 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
123
124 #define FSHIFT 11 /* nr of bits of precision */
125 #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
126 #define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
127 #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
128 #define EXP_5 2014 /* 1/exp(5sec/5min) */
129 #define EXP_15 2037 /* 1/exp(5sec/15min) */
130
131 #define CALC_LOAD(load,exp,n) \
132 load *= exp; \
133 load += n*(FIXED_1-exp); \
134 load >>= FSHIFT;
135
136 extern unsigned long total_forks;
137 extern int nr_threads;
138 DECLARE_PER_CPU(unsigned long, process_counts);
139 extern int nr_processes(void);
140 extern unsigned long nr_running(void);
141 extern unsigned long nr_uninterruptible(void);
142 extern unsigned long nr_iowait(void);
143 extern unsigned long nr_iowait_cpu(int cpu);
144 extern unsigned long this_cpu_load(void);
145
146
147 extern void calc_global_load(unsigned long ticks);
148 extern void update_cpu_load_nohz(void);
149
150 extern unsigned long get_parent_ip(unsigned long addr);
151
152 struct seq_file;
153 struct cfs_rq;
154 struct task_group;
155 #ifdef CONFIG_SCHED_DEBUG
156 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
157 extern void proc_sched_set_task(struct task_struct *p);
158 extern void
159 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
160 #else
161 static inline void
162 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
163 {
164 }
165 static inline void proc_sched_set_task(struct task_struct *p)
166 {
167 }
168 static inline void
169 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
170 {
171 }
172 #endif
173
174 /*
175 * Task state bitmask. NOTE! These bits are also
176 * encoded in fs/proc/array.c: get_task_state().
177 *
178 * We have two separate sets of flags: task->state
179 * is about runnability, while task->exit_state are
180 * about the task exiting. Confusing, but this way
181 * modifying one set can't modify the other one by
182 * mistake.
183 */
184 #define TASK_RUNNING 0
185 #define TASK_INTERRUPTIBLE 1
186 #define TASK_UNINTERRUPTIBLE 2
187 #define __TASK_STOPPED 4
188 #define __TASK_TRACED 8
189 /* in tsk->exit_state */
190 #define EXIT_ZOMBIE 16
191 #define EXIT_DEAD 32
192 /* in tsk->state again */
193 #define TASK_DEAD 64
194 #define TASK_WAKEKILL 128
195 #define TASK_WAKING 256
196 #define TASK_STATE_MAX 512
197
198 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
199
200 extern char ___assert_task_state[1 - 2*!!(
201 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
202
203 /* Convenience macros for the sake of set_task_state */
204 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
205 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
206 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
207
208 /* Convenience macros for the sake of wake_up */
209 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
210 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
211
212 /* get_task_state() */
213 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
214 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
215 __TASK_TRACED)
216
217 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
218 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
219 #define task_is_dead(task) ((task)->exit_state != 0)
220 #define task_is_stopped_or_traced(task) \
221 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
222 #define task_contributes_to_load(task) \
223 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
224 (task->flags & PF_FROZEN) == 0)
225
226 #define __set_task_state(tsk, state_value) \
227 do { (tsk)->state = (state_value); } while (0)
228 #define set_task_state(tsk, state_value) \
229 set_mb((tsk)->state, (state_value))
230
231 /*
232 * set_current_state() includes a barrier so that the write of current->state
233 * is correctly serialised wrt the caller's subsequent test of whether to
234 * actually sleep:
235 *
236 * set_current_state(TASK_UNINTERRUPTIBLE);
237 * if (do_i_need_to_sleep())
238 * schedule();
239 *
240 * If the caller does not need such serialisation then use __set_current_state()
241 */
242 #define __set_current_state(state_value) \
243 do { current->state = (state_value); } while (0)
244 #define set_current_state(state_value) \
245 set_mb(current->state, (state_value))
246
247 /* Task command name length */
248 #define TASK_COMM_LEN 16
249
250 #include <linux/spinlock.h>
251
252 /*
253 * This serializes "schedule()" and also protects
254 * the run-queue from deletions/modifications (but
255 * _adding_ to the beginning of the run-queue has
256 * a separate lock).
257 */
258 extern rwlock_t tasklist_lock;
259 extern spinlock_t mmlist_lock;
260
261 struct task_struct;
262
263 #ifdef CONFIG_PROVE_RCU
264 extern int lockdep_tasklist_lock_is_held(void);
265 #endif /* #ifdef CONFIG_PROVE_RCU */
266
267 extern void sched_init(void);
268 extern void sched_init_smp(void);
269 extern asmlinkage void schedule_tail(struct task_struct *prev);
270 extern void init_idle(struct task_struct *idle, int cpu);
271 extern void init_idle_bootup_task(struct task_struct *idle);
272
273 extern int runqueue_is_locked(int cpu);
274
275 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
276 extern void nohz_balance_enter_idle(int cpu);
277 extern void set_cpu_sd_state_idle(void);
278 extern int get_nohz_timer_target(void);
279 #else
280 static inline void nohz_balance_enter_idle(int cpu) { }
281 static inline void set_cpu_sd_state_idle(void) { }
282 #endif
283
284 /*
285 * Only dump TASK_* tasks. (0 for all tasks)
286 */
287 extern void show_state_filter(unsigned long state_filter);
288
289 static inline void show_state(void)
290 {
291 show_state_filter(0);
292 }
293
294 extern void show_regs(struct pt_regs *);
295
296 /*
297 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
298 * task), SP is the stack pointer of the first frame that should be shown in the back
299 * trace (or NULL if the entire call-chain of the task should be shown).
300 */
301 extern void show_stack(struct task_struct *task, unsigned long *sp);
302
303 void io_schedule(void);
304 long io_schedule_timeout(long timeout);
305
306 extern void cpu_init (void);
307 extern void trap_init(void);
308 extern void update_process_times(int user);
309 extern void scheduler_tick(void);
310
311 extern void sched_show_task(struct task_struct *p);
312
313 #ifdef CONFIG_LOCKUP_DETECTOR
314 extern void touch_softlockup_watchdog(void);
315 extern void touch_softlockup_watchdog_sync(void);
316 extern void touch_all_softlockup_watchdogs(void);
317 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
318 void __user *buffer,
319 size_t *lenp, loff_t *ppos);
320 extern unsigned int softlockup_panic;
321 void lockup_detector_init(void);
322 #else
323 static inline void touch_softlockup_watchdog(void)
324 {
325 }
326 static inline void touch_softlockup_watchdog_sync(void)
327 {
328 }
329 static inline void touch_all_softlockup_watchdogs(void)
330 {
331 }
332 static inline void lockup_detector_init(void)
333 {
334 }
335 #endif
336
337 #ifdef CONFIG_DETECT_HUNG_TASK
338 extern unsigned int sysctl_hung_task_panic;
339 extern unsigned long sysctl_hung_task_check_count;
340 extern unsigned long sysctl_hung_task_timeout_secs;
341 extern unsigned long sysctl_hung_task_warnings;
342 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
343 void __user *buffer,
344 size_t *lenp, loff_t *ppos);
345 #else
346 /* Avoid need for ifdefs elsewhere in the code */
347 enum { sysctl_hung_task_timeout_secs = 0 };
348 #endif
349
350 /* Attach to any functions which should be ignored in wchan output. */
351 #define __sched __attribute__((__section__(".sched.text")))
352
353 /* Linker adds these: start and end of __sched functions */
354 extern char __sched_text_start[], __sched_text_end[];
355
356 /* Is this address in the __sched functions? */
357 extern int in_sched_functions(unsigned long addr);
358
359 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
360 extern signed long schedule_timeout(signed long timeout);
361 extern signed long schedule_timeout_interruptible(signed long timeout);
362 extern signed long schedule_timeout_killable(signed long timeout);
363 extern signed long schedule_timeout_uninterruptible(signed long timeout);
364 asmlinkage void schedule(void);
365 extern void schedule_preempt_disabled(void);
366 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
367
368 struct nsproxy;
369 struct user_namespace;
370
371 /*
372 * Default maximum number of active map areas, this limits the number of vmas
373 * per mm struct. Users can overwrite this number by sysctl but there is a
374 * problem.
375 *
376 * When a program's coredump is generated as ELF format, a section is created
377 * per a vma. In ELF, the number of sections is represented in unsigned short.
378 * This means the number of sections should be smaller than 65535 at coredump.
379 * Because the kernel adds some informative sections to a image of program at
380 * generating coredump, we need some margin. The number of extra sections is
381 * 1-3 now and depends on arch. We use "5" as safe margin, here.
382 */
383 #define MAPCOUNT_ELF_CORE_MARGIN (5)
384 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
385
386 extern int sysctl_max_map_count;
387
388 #include <linux/aio.h>
389
390 #ifdef CONFIG_MMU
391 extern void arch_pick_mmap_layout(struct mm_struct *mm);
392 extern unsigned long
393 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
394 unsigned long, unsigned long);
395 extern unsigned long
396 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
397 unsigned long len, unsigned long pgoff,
398 unsigned long flags);
399 extern void arch_unmap_area(struct mm_struct *, unsigned long);
400 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
401 #else
402 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
403 #endif
404
405
406 extern void set_dumpable(struct mm_struct *mm, int value);
407 extern int get_dumpable(struct mm_struct *mm);
408
409 /* get/set_dumpable() values */
410 #define SUID_DUMPABLE_DISABLED 0
411 #define SUID_DUMPABLE_ENABLED 1
412 #define SUID_DUMPABLE_SAFE 2
413
414 /* mm flags */
415 /* dumpable bits */
416 #define MMF_DUMPABLE 0 /* core dump is permitted */
417 #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
418
419 #define MMF_DUMPABLE_BITS 2
420 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
421
422 /* coredump filter bits */
423 #define MMF_DUMP_ANON_PRIVATE 2
424 #define MMF_DUMP_ANON_SHARED 3
425 #define MMF_DUMP_MAPPED_PRIVATE 4
426 #define MMF_DUMP_MAPPED_SHARED 5
427 #define MMF_DUMP_ELF_HEADERS 6
428 #define MMF_DUMP_HUGETLB_PRIVATE 7
429 #define MMF_DUMP_HUGETLB_SHARED 8
430
431 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
432 #define MMF_DUMP_FILTER_BITS 7
433 #define MMF_DUMP_FILTER_MASK \
434 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
435 #define MMF_DUMP_FILTER_DEFAULT \
436 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
437 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
438
439 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
440 # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
441 #else
442 # define MMF_DUMP_MASK_DEFAULT_ELF 0
443 #endif
444 /* leave room for more dump flags */
445 #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
446 #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
447 #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
448
449 #define MMF_HAS_UPROBES 19 /* has uprobes */
450 #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
451
452 #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
453
454 struct sighand_struct {
455 atomic_t count;
456 struct k_sigaction action[_NSIG];
457 spinlock_t siglock;
458 wait_queue_head_t signalfd_wqh;
459 };
460
461 struct pacct_struct {
462 int ac_flag;
463 long ac_exitcode;
464 unsigned long ac_mem;
465 cputime_t ac_utime, ac_stime;
466 unsigned long ac_minflt, ac_majflt;
467 };
468
469 struct cpu_itimer {
470 cputime_t expires;
471 cputime_t incr;
472 u32 error;
473 u32 incr_error;
474 };
475
476 /**
477 * struct task_cputime - collected CPU time counts
478 * @utime: time spent in user mode, in &cputime_t units
479 * @stime: time spent in kernel mode, in &cputime_t units
480 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
481 *
482 * This structure groups together three kinds of CPU time that are
483 * tracked for threads and thread groups. Most things considering
484 * CPU time want to group these counts together and treat all three
485 * of them in parallel.
486 */
487 struct task_cputime {
488 cputime_t utime;
489 cputime_t stime;
490 unsigned long long sum_exec_runtime;
491 };
492 /* Alternate field names when used to cache expirations. */
493 #define prof_exp stime
494 #define virt_exp utime
495 #define sched_exp sum_exec_runtime
496
497 #define INIT_CPUTIME \
498 (struct task_cputime) { \
499 .utime = 0, \
500 .stime = 0, \
501 .sum_exec_runtime = 0, \
502 }
503
504 /*
505 * Disable preemption until the scheduler is running.
506 * Reset by start_kernel()->sched_init()->init_idle().
507 *
508 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
509 * before the scheduler is active -- see should_resched().
510 */
511 #define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
512
513 /**
514 * struct thread_group_cputimer - thread group interval timer counts
515 * @cputime: thread group interval timers.
516 * @running: non-zero when there are timers running and
517 * @cputime receives updates.
518 * @lock: lock for fields in this struct.
519 *
520 * This structure contains the version of task_cputime, above, that is
521 * used for thread group CPU timer calculations.
522 */
523 struct thread_group_cputimer {
524 struct task_cputime cputime;
525 int running;
526 raw_spinlock_t lock;
527 };
528
529 #include <linux/rwsem.h>
530 struct autogroup;
531
532 /*
533 * NOTE! "signal_struct" does not have its own
534 * locking, because a shared signal_struct always
535 * implies a shared sighand_struct, so locking
536 * sighand_struct is always a proper superset of
537 * the locking of signal_struct.
538 */
539 struct signal_struct {
540 atomic_t sigcnt;
541 atomic_t live;
542 int nr_threads;
543
544 wait_queue_head_t wait_chldexit; /* for wait4() */
545
546 /* current thread group signal load-balancing target: */
547 struct task_struct *curr_target;
548
549 /* shared signal handling: */
550 struct sigpending shared_pending;
551
552 /* thread group exit support */
553 int group_exit_code;
554 /* overloaded:
555 * - notify group_exit_task when ->count is equal to notify_count
556 * - everyone except group_exit_task is stopped during signal delivery
557 * of fatal signals, group_exit_task processes the signal.
558 */
559 int notify_count;
560 struct task_struct *group_exit_task;
561
562 /* thread group stop support, overloads group_exit_code too */
563 int group_stop_count;
564 unsigned int flags; /* see SIGNAL_* flags below */
565
566 /*
567 * PR_SET_CHILD_SUBREAPER marks a process, like a service
568 * manager, to re-parent orphan (double-forking) child processes
569 * to this process instead of 'init'. The service manager is
570 * able to receive SIGCHLD signals and is able to investigate
571 * the process until it calls wait(). All children of this
572 * process will inherit a flag if they should look for a
573 * child_subreaper process at exit.
574 */
575 unsigned int is_child_subreaper:1;
576 unsigned int has_child_subreaper:1;
577
578 /* POSIX.1b Interval Timers */
579 struct list_head posix_timers;
580
581 /* ITIMER_REAL timer for the process */
582 struct hrtimer real_timer;
583 struct pid *leader_pid;
584 ktime_t it_real_incr;
585
586 /*
587 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
588 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
589 * values are defined to 0 and 1 respectively
590 */
591 struct cpu_itimer it[2];
592
593 /*
594 * Thread group totals for process CPU timers.
595 * See thread_group_cputimer(), et al, for details.
596 */
597 struct thread_group_cputimer cputimer;
598
599 /* Earliest-expiration cache. */
600 struct task_cputime cputime_expires;
601
602 struct list_head cpu_timers[3];
603
604 struct pid *tty_old_pgrp;
605
606 /* boolean value for session group leader */
607 int leader;
608
609 struct tty_struct *tty; /* NULL if no tty */
610
611 #ifdef CONFIG_SCHED_AUTOGROUP
612 struct autogroup *autogroup;
613 #endif
614 /*
615 * Cumulative resource counters for dead threads in the group,
616 * and for reaped dead child processes forked by this group.
617 * Live threads maintain their own counters and add to these
618 * in __exit_signal, except for the group leader.
619 */
620 cputime_t utime, stime, cutime, cstime;
621 cputime_t gtime;
622 cputime_t cgtime;
623 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
624 cputime_t prev_utime, prev_stime;
625 #endif
626 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
627 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
628 unsigned long inblock, oublock, cinblock, coublock;
629 unsigned long maxrss, cmaxrss;
630 struct task_io_accounting ioac;
631
632 /*
633 * Cumulative ns of schedule CPU time fo dead threads in the
634 * group, not including a zombie group leader, (This only differs
635 * from jiffies_to_ns(utime + stime) if sched_clock uses something
636 * other than jiffies.)
637 */
638 unsigned long long sum_sched_runtime;
639
640 /*
641 * We don't bother to synchronize most readers of this at all,
642 * because there is no reader checking a limit that actually needs
643 * to get both rlim_cur and rlim_max atomically, and either one
644 * alone is a single word that can safely be read normally.
645 * getrlimit/setrlimit use task_lock(current->group_leader) to
646 * protect this instead of the siglock, because they really
647 * have no need to disable irqs.
648 */
649 struct rlimit rlim[RLIM_NLIMITS];
650
651 #ifdef CONFIG_BSD_PROCESS_ACCT
652 struct pacct_struct pacct; /* per-process accounting information */
653 #endif
654 #ifdef CONFIG_TASKSTATS
655 struct taskstats *stats;
656 #endif
657 #ifdef CONFIG_AUDIT
658 unsigned audit_tty;
659 struct tty_audit_buf *tty_audit_buf;
660 #endif
661 #ifdef CONFIG_CGROUPS
662 /*
663 * group_rwsem prevents new tasks from entering the threadgroup and
664 * member tasks from exiting,a more specifically, setting of
665 * PF_EXITING. fork and exit paths are protected with this rwsem
666 * using threadgroup_change_begin/end(). Users which require
667 * threadgroup to remain stable should use threadgroup_[un]lock()
668 * which also takes care of exec path. Currently, cgroup is the
669 * only user.
670 */
671 struct rw_semaphore group_rwsem;
672 #endif
673
674 int oom_score_adj; /* OOM kill score adjustment */
675 int oom_score_adj_min; /* OOM kill score adjustment minimum value.
676 * Only settable by CAP_SYS_RESOURCE. */
677
678 struct mutex cred_guard_mutex; /* guard against foreign influences on
679 * credential calculations
680 * (notably. ptrace) */
681 };
682
683 /*
684 * Bits in flags field of signal_struct.
685 */
686 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
687 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
688 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
689 /*
690 * Pending notifications to parent.
691 */
692 #define SIGNAL_CLD_STOPPED 0x00000010
693 #define SIGNAL_CLD_CONTINUED 0x00000020
694 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
695
696 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
697
698 /* If true, all threads except ->group_exit_task have pending SIGKILL */
699 static inline int signal_group_exit(const struct signal_struct *sig)
700 {
701 return (sig->flags & SIGNAL_GROUP_EXIT) ||
702 (sig->group_exit_task != NULL);
703 }
704
705 /*
706 * Some day this will be a full-fledged user tracking system..
707 */
708 struct user_struct {
709 atomic_t __count; /* reference count */
710 atomic_t processes; /* How many processes does this user have? */
711 atomic_t files; /* How many open files does this user have? */
712 atomic_t sigpending; /* How many pending signals does this user have? */
713 #ifdef CONFIG_INOTIFY_USER
714 atomic_t inotify_watches; /* How many inotify watches does this user have? */
715 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
716 #endif
717 #ifdef CONFIG_FANOTIFY
718 atomic_t fanotify_listeners;
719 #endif
720 #ifdef CONFIG_EPOLL
721 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
722 #endif
723 #ifdef CONFIG_POSIX_MQUEUE
724 /* protected by mq_lock */
725 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
726 #endif
727 unsigned long locked_shm; /* How many pages of mlocked shm ? */
728
729 #ifdef CONFIG_KEYS
730 struct key *uid_keyring; /* UID specific keyring */
731 struct key *session_keyring; /* UID's default session keyring */
732 #endif
733
734 /* Hash table maintenance information */
735 struct hlist_node uidhash_node;
736 kuid_t uid;
737
738 #ifdef CONFIG_PERF_EVENTS
739 atomic_long_t locked_vm;
740 #endif
741 };
742
743 extern int uids_sysfs_init(void);
744
745 extern struct user_struct *find_user(kuid_t);
746
747 extern struct user_struct root_user;
748 #define INIT_USER (&root_user)
749
750
751 struct backing_dev_info;
752 struct reclaim_state;
753
754 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
755 struct sched_info {
756 /* cumulative counters */
757 unsigned long pcount; /* # of times run on this cpu */
758 unsigned long long run_delay; /* time spent waiting on a runqueue */
759
760 /* timestamps */
761 unsigned long long last_arrival,/* when we last ran on a cpu */
762 last_queued; /* when we were last queued to run */
763 };
764 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
765
766 #ifdef CONFIG_TASK_DELAY_ACCT
767 struct task_delay_info {
768 spinlock_t lock;
769 unsigned int flags; /* Private per-task flags */
770
771 /* For each stat XXX, add following, aligned appropriately
772 *
773 * struct timespec XXX_start, XXX_end;
774 * u64 XXX_delay;
775 * u32 XXX_count;
776 *
777 * Atomicity of updates to XXX_delay, XXX_count protected by
778 * single lock above (split into XXX_lock if contention is an issue).
779 */
780
781 /*
782 * XXX_count is incremented on every XXX operation, the delay
783 * associated with the operation is added to XXX_delay.
784 * XXX_delay contains the accumulated delay time in nanoseconds.
785 */
786 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
787 u64 blkio_delay; /* wait for sync block io completion */
788 u64 swapin_delay; /* wait for swapin block io completion */
789 u32 blkio_count; /* total count of the number of sync block */
790 /* io operations performed */
791 u32 swapin_count; /* total count of the number of swapin block */
792 /* io operations performed */
793
794 struct timespec freepages_start, freepages_end;
795 u64 freepages_delay; /* wait for memory reclaim */
796 u32 freepages_count; /* total count of memory reclaim */
797 };
798 #endif /* CONFIG_TASK_DELAY_ACCT */
799
800 static inline int sched_info_on(void)
801 {
802 #ifdef CONFIG_SCHEDSTATS
803 return 1;
804 #elif defined(CONFIG_TASK_DELAY_ACCT)
805 extern int delayacct_on;
806 return delayacct_on;
807 #else
808 return 0;
809 #endif
810 }
811
812 enum cpu_idle_type {
813 CPU_IDLE,
814 CPU_NOT_IDLE,
815 CPU_NEWLY_IDLE,
816 CPU_MAX_IDLE_TYPES
817 };
818
819 /*
820 * Increase resolution of nice-level calculations for 64-bit architectures.
821 * The extra resolution improves shares distribution and load balancing of
822 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
823 * hierarchies, especially on larger systems. This is not a user-visible change
824 * and does not change the user-interface for setting shares/weights.
825 *
826 * We increase resolution only if we have enough bits to allow this increased
827 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
828 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
829 * increased costs.
830 */
831 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
832 # define SCHED_LOAD_RESOLUTION 10
833 # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
834 # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
835 #else
836 # define SCHED_LOAD_RESOLUTION 0
837 # define scale_load(w) (w)
838 # define scale_load_down(w) (w)
839 #endif
840
841 #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
842 #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
843
844 /*
845 * Increase resolution of cpu_power calculations
846 */
847 #define SCHED_POWER_SHIFT 10
848 #define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
849
850 /*
851 * sched-domains (multiprocessor balancing) declarations:
852 */
853 #ifdef CONFIG_SMP
854 #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
855 #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
856 #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
857 #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
858 #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
859 #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
860 #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
861 #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
862 #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
863 #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
864 #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
865 #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
866
867 extern int __weak arch_sd_sibiling_asym_packing(void);
868
869 struct sched_group_power {
870 atomic_t ref;
871 /*
872 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
873 * single CPU.
874 */
875 unsigned int power, power_orig;
876 unsigned long next_update;
877 /*
878 * Number of busy cpus in this group.
879 */
880 atomic_t nr_busy_cpus;
881
882 unsigned long cpumask[0]; /* iteration mask */
883 };
884
885 struct sched_group {
886 struct sched_group *next; /* Must be a circular list */
887 atomic_t ref;
888
889 unsigned int group_weight;
890 struct sched_group_power *sgp;
891
892 /*
893 * The CPUs this group covers.
894 *
895 * NOTE: this field is variable length. (Allocated dynamically
896 * by attaching extra space to the end of the structure,
897 * depending on how many CPUs the kernel has booted up with)
898 */
899 unsigned long cpumask[0];
900 };
901
902 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
903 {
904 return to_cpumask(sg->cpumask);
905 }
906
907 /*
908 * cpumask masking which cpus in the group are allowed to iterate up the domain
909 * tree.
910 */
911 static inline struct cpumask *sched_group_mask(struct sched_group *sg)
912 {
913 return to_cpumask(sg->sgp->cpumask);
914 }
915
916 /**
917 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
918 * @group: The group whose first cpu is to be returned.
919 */
920 static inline unsigned int group_first_cpu(struct sched_group *group)
921 {
922 return cpumask_first(sched_group_cpus(group));
923 }
924
925 struct sched_domain_attr {
926 int relax_domain_level;
927 };
928
929 #define SD_ATTR_INIT (struct sched_domain_attr) { \
930 .relax_domain_level = -1, \
931 }
932
933 extern int sched_domain_level_max;
934
935 struct sched_domain {
936 /* These fields must be setup */
937 struct sched_domain *parent; /* top domain must be null terminated */
938 struct sched_domain *child; /* bottom domain must be null terminated */
939 struct sched_group *groups; /* the balancing groups of the domain */
940 unsigned long min_interval; /* Minimum balance interval ms */
941 unsigned long max_interval; /* Maximum balance interval ms */
942 unsigned int busy_factor; /* less balancing by factor if busy */
943 unsigned int imbalance_pct; /* No balance until over watermark */
944 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
945 unsigned int busy_idx;
946 unsigned int idle_idx;
947 unsigned int newidle_idx;
948 unsigned int wake_idx;
949 unsigned int forkexec_idx;
950 unsigned int smt_gain;
951 int flags; /* See SD_* */
952 int level;
953
954 /* Runtime fields. */
955 unsigned long last_balance; /* init to jiffies. units in jiffies */
956 unsigned int balance_interval; /* initialise to 1. units in ms. */
957 unsigned int nr_balance_failed; /* initialise to 0 */
958
959 u64 last_update;
960
961 #ifdef CONFIG_SCHEDSTATS
962 /* load_balance() stats */
963 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
964 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
965 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
966 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
967 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
968 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
969 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
970 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
971
972 /* Active load balancing */
973 unsigned int alb_count;
974 unsigned int alb_failed;
975 unsigned int alb_pushed;
976
977 /* SD_BALANCE_EXEC stats */
978 unsigned int sbe_count;
979 unsigned int sbe_balanced;
980 unsigned int sbe_pushed;
981
982 /* SD_BALANCE_FORK stats */
983 unsigned int sbf_count;
984 unsigned int sbf_balanced;
985 unsigned int sbf_pushed;
986
987 /* try_to_wake_up() stats */
988 unsigned int ttwu_wake_remote;
989 unsigned int ttwu_move_affine;
990 unsigned int ttwu_move_balance;
991 #endif
992 #ifdef CONFIG_SCHED_DEBUG
993 char *name;
994 #endif
995 union {
996 void *private; /* used during construction */
997 struct rcu_head rcu; /* used during destruction */
998 };
999
1000 unsigned int span_weight;
1001 /*
1002 * Span of all CPUs in this domain.
1003 *
1004 * NOTE: this field is variable length. (Allocated dynamically
1005 * by attaching extra space to the end of the structure,
1006 * depending on how many CPUs the kernel has booted up with)
1007 */
1008 unsigned long span[0];
1009 };
1010
1011 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1012 {
1013 return to_cpumask(sd->span);
1014 }
1015
1016 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1017 struct sched_domain_attr *dattr_new);
1018
1019 /* Allocate an array of sched domains, for partition_sched_domains(). */
1020 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1021 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1022
1023 /* Test a flag in parent sched domain */
1024 static inline int test_sd_parent(struct sched_domain *sd, int flag)
1025 {
1026 if (sd->parent && (sd->parent->flags & flag))
1027 return 1;
1028
1029 return 0;
1030 }
1031
1032 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1033 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1034
1035 bool cpus_share_cache(int this_cpu, int that_cpu);
1036
1037 #else /* CONFIG_SMP */
1038
1039 struct sched_domain_attr;
1040
1041 static inline void
1042 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1043 struct sched_domain_attr *dattr_new)
1044 {
1045 }
1046
1047 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1048 {
1049 return true;
1050 }
1051
1052 #endif /* !CONFIG_SMP */
1053
1054
1055 struct io_context; /* See blkdev.h */
1056
1057
1058 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1059 extern void prefetch_stack(struct task_struct *t);
1060 #else
1061 static inline void prefetch_stack(struct task_struct *t) { }
1062 #endif
1063
1064 struct audit_context; /* See audit.c */
1065 struct mempolicy;
1066 struct pipe_inode_info;
1067 struct uts_namespace;
1068
1069 struct rq;
1070 struct sched_domain;
1071
1072 /*
1073 * wake flags
1074 */
1075 #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1076 #define WF_FORK 0x02 /* child wakeup after fork */
1077 #define WF_MIGRATED 0x04 /* internal use, task got migrated */
1078
1079 #define ENQUEUE_WAKEUP 1
1080 #define ENQUEUE_HEAD 2
1081 #ifdef CONFIG_SMP
1082 #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1083 #else
1084 #define ENQUEUE_WAKING 0
1085 #endif
1086
1087 #define DEQUEUE_SLEEP 1
1088
1089 struct sched_class {
1090 const struct sched_class *next;
1091
1092 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1093 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1094 void (*yield_task) (struct rq *rq);
1095 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1096
1097 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1098
1099 struct task_struct * (*pick_next_task) (struct rq *rq);
1100 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1101
1102 #ifdef CONFIG_SMP
1103 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1104
1105 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1106 void (*post_schedule) (struct rq *this_rq);
1107 void (*task_waking) (struct task_struct *task);
1108 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1109
1110 void (*set_cpus_allowed)(struct task_struct *p,
1111 const struct cpumask *newmask);
1112
1113 void (*rq_online)(struct rq *rq);
1114 void (*rq_offline)(struct rq *rq);
1115 #endif
1116
1117 void (*set_curr_task) (struct rq *rq);
1118 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1119 void (*task_fork) (struct task_struct *p);
1120
1121 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1122 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1123 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1124 int oldprio);
1125
1126 unsigned int (*get_rr_interval) (struct rq *rq,
1127 struct task_struct *task);
1128
1129 #ifdef CONFIG_FAIR_GROUP_SCHED
1130 void (*task_move_group) (struct task_struct *p, int on_rq);
1131 #endif
1132 };
1133
1134 struct load_weight {
1135 unsigned long weight, inv_weight;
1136 };
1137
1138 #ifdef CONFIG_SCHEDSTATS
1139 struct sched_statistics {
1140 u64 wait_start;
1141 u64 wait_max;
1142 u64 wait_count;
1143 u64 wait_sum;
1144 u64 iowait_count;
1145 u64 iowait_sum;
1146
1147 u64 sleep_start;
1148 u64 sleep_max;
1149 s64 sum_sleep_runtime;
1150
1151 u64 block_start;
1152 u64 block_max;
1153 u64 exec_max;
1154 u64 slice_max;
1155
1156 u64 nr_migrations_cold;
1157 u64 nr_failed_migrations_affine;
1158 u64 nr_failed_migrations_running;
1159 u64 nr_failed_migrations_hot;
1160 u64 nr_forced_migrations;
1161
1162 u64 nr_wakeups;
1163 u64 nr_wakeups_sync;
1164 u64 nr_wakeups_migrate;
1165 u64 nr_wakeups_local;
1166 u64 nr_wakeups_remote;
1167 u64 nr_wakeups_affine;
1168 u64 nr_wakeups_affine_attempts;
1169 u64 nr_wakeups_passive;
1170 u64 nr_wakeups_idle;
1171 };
1172 #endif
1173
1174 struct sched_entity {
1175 struct load_weight load; /* for load-balancing */
1176 struct rb_node run_node;
1177 struct list_head group_node;
1178 unsigned int on_rq;
1179
1180 u64 exec_start;
1181 u64 sum_exec_runtime;
1182 u64 vruntime;
1183 u64 prev_sum_exec_runtime;
1184
1185 u64 nr_migrations;
1186
1187 #ifdef CONFIG_SCHEDSTATS
1188 struct sched_statistics statistics;
1189 #endif
1190
1191 #ifdef CONFIG_FAIR_GROUP_SCHED
1192 struct sched_entity *parent;
1193 /* rq on which this entity is (to be) queued: */
1194 struct cfs_rq *cfs_rq;
1195 /* rq "owned" by this entity/group: */
1196 struct cfs_rq *my_q;
1197 #endif
1198 };
1199
1200 struct sched_rt_entity {
1201 struct list_head run_list;
1202 unsigned long timeout;
1203 unsigned int time_slice;
1204
1205 struct sched_rt_entity *back;
1206 #ifdef CONFIG_RT_GROUP_SCHED
1207 struct sched_rt_entity *parent;
1208 /* rq on which this entity is (to be) queued: */
1209 struct rt_rq *rt_rq;
1210 /* rq "owned" by this entity/group: */
1211 struct rt_rq *my_q;
1212 #endif
1213 };
1214
1215 /*
1216 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1217 * Timeslices get refilled after they expire.
1218 */
1219 #define RR_TIMESLICE (100 * HZ / 1000)
1220
1221 struct rcu_node;
1222
1223 enum perf_event_task_context {
1224 perf_invalid_context = -1,
1225 perf_hw_context = 0,
1226 perf_sw_context,
1227 perf_nr_task_contexts,
1228 };
1229
1230 struct task_struct {
1231 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1232 void *stack;
1233 atomic_t usage;
1234 unsigned int flags; /* per process flags, defined below */
1235 unsigned int ptrace;
1236
1237 #ifdef CONFIG_SMP
1238 struct llist_node wake_entry;
1239 int on_cpu;
1240 #endif
1241 int on_rq;
1242
1243 int prio, static_prio, normal_prio;
1244 unsigned int rt_priority;
1245 const struct sched_class *sched_class;
1246 struct sched_entity se;
1247 struct sched_rt_entity rt;
1248 #ifdef CONFIG_CGROUP_SCHED
1249 struct task_group *sched_task_group;
1250 #endif
1251
1252 #ifdef CONFIG_PREEMPT_NOTIFIERS
1253 /* list of struct preempt_notifier: */
1254 struct hlist_head preempt_notifiers;
1255 #endif
1256
1257 /*
1258 * fpu_counter contains the number of consecutive context switches
1259 * that the FPU is used. If this is over a threshold, the lazy fpu
1260 * saving becomes unlazy to save the trap. This is an unsigned char
1261 * so that after 256 times the counter wraps and the behavior turns
1262 * lazy again; this to deal with bursty apps that only use FPU for
1263 * a short time
1264 */
1265 unsigned char fpu_counter;
1266 #ifdef CONFIG_BLK_DEV_IO_TRACE
1267 unsigned int btrace_seq;
1268 #endif
1269
1270 unsigned int policy;
1271 int nr_cpus_allowed;
1272 cpumask_t cpus_allowed;
1273
1274 #ifdef CONFIG_PREEMPT_RCU
1275 int rcu_read_lock_nesting;
1276 char rcu_read_unlock_special;
1277 struct list_head rcu_node_entry;
1278 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1279 #ifdef CONFIG_TREE_PREEMPT_RCU
1280 struct rcu_node *rcu_blocked_node;
1281 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1282 #ifdef CONFIG_RCU_BOOST
1283 struct rt_mutex *rcu_boost_mutex;
1284 #endif /* #ifdef CONFIG_RCU_BOOST */
1285
1286 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1287 struct sched_info sched_info;
1288 #endif
1289
1290 struct list_head tasks;
1291 #ifdef CONFIG_SMP
1292 struct plist_node pushable_tasks;
1293 #endif
1294
1295 struct mm_struct *mm, *active_mm;
1296 #ifdef CONFIG_COMPAT_BRK
1297 unsigned brk_randomized:1;
1298 #endif
1299 #if defined(SPLIT_RSS_COUNTING)
1300 struct task_rss_stat rss_stat;
1301 #endif
1302 /* task state */
1303 int exit_state;
1304 int exit_code, exit_signal;
1305 int pdeath_signal; /* The signal sent when the parent dies */
1306 unsigned int jobctl; /* JOBCTL_*, siglock protected */
1307 /* ??? */
1308 unsigned int personality;
1309 unsigned did_exec:1;
1310 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1311 * execve */
1312 unsigned in_iowait:1;
1313
1314 /* task may not gain privileges */
1315 unsigned no_new_privs:1;
1316
1317 /* Revert to default priority/policy when forking */
1318 unsigned sched_reset_on_fork:1;
1319 unsigned sched_contributes_to_load:1;
1320
1321 pid_t pid;
1322 pid_t tgid;
1323
1324 #ifdef CONFIG_CC_STACKPROTECTOR
1325 /* Canary value for the -fstack-protector gcc feature */
1326 unsigned long stack_canary;
1327 #endif
1328 /*
1329 * pointers to (original) parent process, youngest child, younger sibling,
1330 * older sibling, respectively. (p->father can be replaced with
1331 * p->real_parent->pid)
1332 */
1333 struct task_struct __rcu *real_parent; /* real parent process */
1334 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1335 /*
1336 * children/sibling forms the list of my natural children
1337 */
1338 struct list_head children; /* list of my children */
1339 struct list_head sibling; /* linkage in my parent's children list */
1340 struct task_struct *group_leader; /* threadgroup leader */
1341
1342 /*
1343 * ptraced is the list of tasks this task is using ptrace on.
1344 * This includes both natural children and PTRACE_ATTACH targets.
1345 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1346 */
1347 struct list_head ptraced;
1348 struct list_head ptrace_entry;
1349
1350 /* PID/PID hash table linkage. */
1351 struct pid_link pids[PIDTYPE_MAX];
1352 struct list_head thread_group;
1353
1354 struct completion *vfork_done; /* for vfork() */
1355 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1356 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1357
1358 cputime_t utime, stime, utimescaled, stimescaled;
1359 cputime_t gtime;
1360 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1361 cputime_t prev_utime, prev_stime;
1362 #endif
1363 unsigned long nvcsw, nivcsw; /* context switch counts */
1364 struct timespec start_time; /* monotonic time */
1365 struct timespec real_start_time; /* boot based time */
1366 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1367 unsigned long min_flt, maj_flt;
1368
1369 struct task_cputime cputime_expires;
1370 struct list_head cpu_timers[3];
1371
1372 /* process credentials */
1373 const struct cred __rcu *real_cred; /* objective and real subjective task
1374 * credentials (COW) */
1375 const struct cred __rcu *cred; /* effective (overridable) subjective task
1376 * credentials (COW) */
1377 char comm[TASK_COMM_LEN]; /* executable name excluding path
1378 - access with [gs]et_task_comm (which lock
1379 it with task_lock())
1380 - initialized normally by setup_new_exec */
1381 /* file system info */
1382 int link_count, total_link_count;
1383 #ifdef CONFIG_SYSVIPC
1384 /* ipc stuff */
1385 struct sysv_sem sysvsem;
1386 #endif
1387 #ifdef CONFIG_DETECT_HUNG_TASK
1388 /* hung task detection */
1389 unsigned long last_switch_count;
1390 #endif
1391 /* CPU-specific state of this task */
1392 struct thread_struct thread;
1393 /* filesystem information */
1394 struct fs_struct *fs;
1395 /* open file information */
1396 struct files_struct *files;
1397 /* namespaces */
1398 struct nsproxy *nsproxy;
1399 /* signal handlers */
1400 struct signal_struct *signal;
1401 struct sighand_struct *sighand;
1402
1403 sigset_t blocked, real_blocked;
1404 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1405 struct sigpending pending;
1406
1407 unsigned long sas_ss_sp;
1408 size_t sas_ss_size;
1409 int (*notifier)(void *priv);
1410 void *notifier_data;
1411 sigset_t *notifier_mask;
1412 struct callback_head *task_works;
1413
1414 struct audit_context *audit_context;
1415 #ifdef CONFIG_AUDITSYSCALL
1416 kuid_t loginuid;
1417 unsigned int sessionid;
1418 #endif
1419 struct seccomp seccomp;
1420
1421 /* Thread group tracking */
1422 u32 parent_exec_id;
1423 u32 self_exec_id;
1424 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1425 * mempolicy */
1426 spinlock_t alloc_lock;
1427
1428 /* Protection of the PI data structures: */
1429 raw_spinlock_t pi_lock;
1430
1431 #ifdef CONFIG_RT_MUTEXES
1432 /* PI waiters blocked on a rt_mutex held by this task */
1433 struct plist_head pi_waiters;
1434 /* Deadlock detection and priority inheritance handling */
1435 struct rt_mutex_waiter *pi_blocked_on;
1436 #endif
1437
1438 #ifdef CONFIG_DEBUG_MUTEXES
1439 /* mutex deadlock detection */
1440 struct mutex_waiter *blocked_on;
1441 #endif
1442 #ifdef CONFIG_TRACE_IRQFLAGS
1443 unsigned int irq_events;
1444 unsigned long hardirq_enable_ip;
1445 unsigned long hardirq_disable_ip;
1446 unsigned int hardirq_enable_event;
1447 unsigned int hardirq_disable_event;
1448 int hardirqs_enabled;
1449 int hardirq_context;
1450 unsigned long softirq_disable_ip;
1451 unsigned long softirq_enable_ip;
1452 unsigned int softirq_disable_event;
1453 unsigned int softirq_enable_event;
1454 int softirqs_enabled;
1455 int softirq_context;
1456 #endif
1457 #ifdef CONFIG_LOCKDEP
1458 # define MAX_LOCK_DEPTH 48UL
1459 u64 curr_chain_key;
1460 int lockdep_depth;
1461 unsigned int lockdep_recursion;
1462 struct held_lock held_locks[MAX_LOCK_DEPTH];
1463 gfp_t lockdep_reclaim_gfp;
1464 #endif
1465
1466 /* journalling filesystem info */
1467 void *journal_info;
1468
1469 /* stacked block device info */
1470 struct bio_list *bio_list;
1471
1472 #ifdef CONFIG_BLOCK
1473 /* stack plugging */
1474 struct blk_plug *plug;
1475 #endif
1476
1477 /* VM state */
1478 struct reclaim_state *reclaim_state;
1479
1480 struct backing_dev_info *backing_dev_info;
1481
1482 struct io_context *io_context;
1483
1484 unsigned long ptrace_message;
1485 siginfo_t *last_siginfo; /* For ptrace use. */
1486 struct task_io_accounting ioac;
1487 #if defined(CONFIG_TASK_XACCT)
1488 u64 acct_rss_mem1; /* accumulated rss usage */
1489 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1490 cputime_t acct_timexpd; /* stime + utime since last update */
1491 #endif
1492 #ifdef CONFIG_CPUSETS
1493 nodemask_t mems_allowed; /* Protected by alloc_lock */
1494 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1495 int cpuset_mem_spread_rotor;
1496 int cpuset_slab_spread_rotor;
1497 #endif
1498 #ifdef CONFIG_CGROUPS
1499 /* Control Group info protected by css_set_lock */
1500 struct css_set __rcu *cgroups;
1501 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1502 struct list_head cg_list;
1503 #endif
1504 #ifdef CONFIG_FUTEX
1505 struct robust_list_head __user *robust_list;
1506 #ifdef CONFIG_COMPAT
1507 struct compat_robust_list_head __user *compat_robust_list;
1508 #endif
1509 struct list_head pi_state_list;
1510 struct futex_pi_state *pi_state_cache;
1511 #endif
1512 #ifdef CONFIG_PERF_EVENTS
1513 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1514 struct mutex perf_event_mutex;
1515 struct list_head perf_event_list;
1516 #endif
1517 #ifdef CONFIG_NUMA
1518 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1519 short il_next;
1520 short pref_node_fork;
1521 #endif
1522 struct rcu_head rcu;
1523
1524 /*
1525 * cache last used pipe for splice
1526 */
1527 struct pipe_inode_info *splice_pipe;
1528
1529 struct page_frag task_frag;
1530
1531 #ifdef CONFIG_TASK_DELAY_ACCT
1532 struct task_delay_info *delays;
1533 #endif
1534 #ifdef CONFIG_FAULT_INJECTION
1535 int make_it_fail;
1536 #endif
1537 /*
1538 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1539 * balance_dirty_pages() for some dirty throttling pause
1540 */
1541 int nr_dirtied;
1542 int nr_dirtied_pause;
1543 unsigned long dirty_paused_when; /* start of a write-and-pause period */
1544
1545 #ifdef CONFIG_LATENCYTOP
1546 int latency_record_count;
1547 struct latency_record latency_record[LT_SAVECOUNT];
1548 #endif
1549 /*
1550 * time slack values; these are used to round up poll() and
1551 * select() etc timeout values. These are in nanoseconds.
1552 */
1553 unsigned long timer_slack_ns;
1554 unsigned long default_timer_slack_ns;
1555
1556 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1557 /* Index of current stored address in ret_stack */
1558 int curr_ret_stack;
1559 /* Stack of return addresses for return function tracing */
1560 struct ftrace_ret_stack *ret_stack;
1561 /* time stamp for last schedule */
1562 unsigned long long ftrace_timestamp;
1563 /*
1564 * Number of functions that haven't been traced
1565 * because of depth overrun.
1566 */
1567 atomic_t trace_overrun;
1568 /* Pause for the tracing */
1569 atomic_t tracing_graph_pause;
1570 #endif
1571 #ifdef CONFIG_TRACING
1572 /* state flags for use by tracers */
1573 unsigned long trace;
1574 /* bitmask and counter of trace recursion */
1575 unsigned long trace_recursion;
1576 #endif /* CONFIG_TRACING */
1577 #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1578 struct memcg_batch_info {
1579 int do_batch; /* incremented when batch uncharge started */
1580 struct mem_cgroup *memcg; /* target memcg of uncharge */
1581 unsigned long nr_pages; /* uncharged usage */
1582 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1583 } memcg_batch;
1584 #endif
1585 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1586 atomic_t ptrace_bp_refcnt;
1587 #endif
1588 #ifdef CONFIG_UPROBES
1589 struct uprobe_task *utask;
1590 #endif
1591 };
1592
1593 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1594 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1595
1596 /*
1597 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1598 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1599 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1600 * values are inverted: lower p->prio value means higher priority.
1601 *
1602 * The MAX_USER_RT_PRIO value allows the actual maximum
1603 * RT priority to be separate from the value exported to
1604 * user-space. This allows kernel threads to set their
1605 * priority to a value higher than any user task. Note:
1606 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1607 */
1608
1609 #define MAX_USER_RT_PRIO 100
1610 #define MAX_RT_PRIO MAX_USER_RT_PRIO
1611
1612 #define MAX_PRIO (MAX_RT_PRIO + 40)
1613 #define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1614
1615 static inline int rt_prio(int prio)
1616 {
1617 if (unlikely(prio < MAX_RT_PRIO))
1618 return 1;
1619 return 0;
1620 }
1621
1622 static inline int rt_task(struct task_struct *p)
1623 {
1624 return rt_prio(p->prio);
1625 }
1626
1627 static inline struct pid *task_pid(struct task_struct *task)
1628 {
1629 return task->pids[PIDTYPE_PID].pid;
1630 }
1631
1632 static inline struct pid *task_tgid(struct task_struct *task)
1633 {
1634 return task->group_leader->pids[PIDTYPE_PID].pid;
1635 }
1636
1637 /*
1638 * Without tasklist or rcu lock it is not safe to dereference
1639 * the result of task_pgrp/task_session even if task == current,
1640 * we can race with another thread doing sys_setsid/sys_setpgid.
1641 */
1642 static inline struct pid *task_pgrp(struct task_struct *task)
1643 {
1644 return task->group_leader->pids[PIDTYPE_PGID].pid;
1645 }
1646
1647 static inline struct pid *task_session(struct task_struct *task)
1648 {
1649 return task->group_leader->pids[PIDTYPE_SID].pid;
1650 }
1651
1652 struct pid_namespace;
1653
1654 /*
1655 * the helpers to get the task's different pids as they are seen
1656 * from various namespaces
1657 *
1658 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1659 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1660 * current.
1661 * task_xid_nr_ns() : id seen from the ns specified;
1662 *
1663 * set_task_vxid() : assigns a virtual id to a task;
1664 *
1665 * see also pid_nr() etc in include/linux/pid.h
1666 */
1667 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1668 struct pid_namespace *ns);
1669
1670 static inline pid_t task_pid_nr(struct task_struct *tsk)
1671 {
1672 return tsk->pid;
1673 }
1674
1675 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1676 struct pid_namespace *ns)
1677 {
1678 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1679 }
1680
1681 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1682 {
1683 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1684 }
1685
1686
1687 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1688 {
1689 return tsk->tgid;
1690 }
1691
1692 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1693
1694 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1695 {
1696 return pid_vnr(task_tgid(tsk));
1697 }
1698
1699
1700 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1701 struct pid_namespace *ns)
1702 {
1703 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1704 }
1705
1706 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1707 {
1708 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1709 }
1710
1711
1712 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1713 struct pid_namespace *ns)
1714 {
1715 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1716 }
1717
1718 static inline pid_t task_session_vnr(struct task_struct *tsk)
1719 {
1720 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1721 }
1722
1723 /* obsolete, do not use */
1724 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1725 {
1726 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1727 }
1728
1729 /**
1730 * pid_alive - check that a task structure is not stale
1731 * @p: Task structure to be checked.
1732 *
1733 * Test if a process is not yet dead (at most zombie state)
1734 * If pid_alive fails, then pointers within the task structure
1735 * can be stale and must not be dereferenced.
1736 */
1737 static inline int pid_alive(struct task_struct *p)
1738 {
1739 return p->pids[PIDTYPE_PID].pid != NULL;
1740 }
1741
1742 /**
1743 * is_global_init - check if a task structure is init
1744 * @tsk: Task structure to be checked.
1745 *
1746 * Check if a task structure is the first user space task the kernel created.
1747 */
1748 static inline int is_global_init(struct task_struct *tsk)
1749 {
1750 return tsk->pid == 1;
1751 }
1752
1753 /*
1754 * is_container_init:
1755 * check whether in the task is init in its own pid namespace.
1756 */
1757 extern int is_container_init(struct task_struct *tsk);
1758
1759 extern struct pid *cad_pid;
1760
1761 extern void free_task(struct task_struct *tsk);
1762 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1763
1764 extern void __put_task_struct(struct task_struct *t);
1765
1766 static inline void put_task_struct(struct task_struct *t)
1767 {
1768 if (atomic_dec_and_test(&t->usage))
1769 __put_task_struct(t);
1770 }
1771
1772 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1773 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1774
1775 /*
1776 * Per process flags
1777 */
1778 #define PF_EXITING 0x00000004 /* getting shut down */
1779 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1780 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1781 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1782 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1783 #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1784 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1785 #define PF_DUMPCORE 0x00000200 /* dumped core */
1786 #define PF_SIGNALED 0x00000400 /* killed by a signal */
1787 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1788 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1789 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1790 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1791 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
1792 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1793 #define PF_KSWAPD 0x00040000 /* I am kswapd */
1794 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1795 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1796 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1797 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1798 #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1799 #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
1800 #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
1801 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1802 #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1803 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1804 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1805
1806 /*
1807 * Only the _current_ task can read/write to tsk->flags, but other
1808 * tasks can access tsk->flags in readonly mode for example
1809 * with tsk_used_math (like during threaded core dumping).
1810 * There is however an exception to this rule during ptrace
1811 * or during fork: the ptracer task is allowed to write to the
1812 * child->flags of its traced child (same goes for fork, the parent
1813 * can write to the child->flags), because we're guaranteed the
1814 * child is not running and in turn not changing child->flags
1815 * at the same time the parent does it.
1816 */
1817 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1818 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1819 #define clear_used_math() clear_stopped_child_used_math(current)
1820 #define set_used_math() set_stopped_child_used_math(current)
1821 #define conditional_stopped_child_used_math(condition, child) \
1822 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1823 #define conditional_used_math(condition) \
1824 conditional_stopped_child_used_math(condition, current)
1825 #define copy_to_stopped_child_used_math(child) \
1826 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1827 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1828 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1829 #define used_math() tsk_used_math(current)
1830
1831 /*
1832 * task->jobctl flags
1833 */
1834 #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1835
1836 #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
1837 #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
1838 #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
1839 #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
1840 #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
1841 #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
1842 #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
1843
1844 #define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1845 #define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1846 #define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1847 #define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1848 #define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1849 #define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1850 #define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1851
1852 #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1853 #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1854
1855 extern bool task_set_jobctl_pending(struct task_struct *task,
1856 unsigned int mask);
1857 extern void task_clear_jobctl_trapping(struct task_struct *task);
1858 extern void task_clear_jobctl_pending(struct task_struct *task,
1859 unsigned int mask);
1860
1861 #ifdef CONFIG_PREEMPT_RCU
1862
1863 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1864 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1865
1866 static inline void rcu_copy_process(struct task_struct *p)
1867 {
1868 p->rcu_read_lock_nesting = 0;
1869 p->rcu_read_unlock_special = 0;
1870 #ifdef CONFIG_TREE_PREEMPT_RCU
1871 p->rcu_blocked_node = NULL;
1872 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1873 #ifdef CONFIG_RCU_BOOST
1874 p->rcu_boost_mutex = NULL;
1875 #endif /* #ifdef CONFIG_RCU_BOOST */
1876 INIT_LIST_HEAD(&p->rcu_node_entry);
1877 }
1878
1879 #else
1880
1881 static inline void rcu_copy_process(struct task_struct *p)
1882 {
1883 }
1884
1885 #endif
1886
1887 static inline void rcu_switch(struct task_struct *prev,
1888 struct task_struct *next)
1889 {
1890 #ifdef CONFIG_RCU_USER_QS
1891 rcu_user_hooks_switch(prev, next);
1892 #endif
1893 }
1894
1895 static inline void tsk_restore_flags(struct task_struct *task,
1896 unsigned long orig_flags, unsigned long flags)
1897 {
1898 task->flags &= ~flags;
1899 task->flags |= orig_flags & flags;
1900 }
1901
1902 #ifdef CONFIG_SMP
1903 extern void do_set_cpus_allowed(struct task_struct *p,
1904 const struct cpumask *new_mask);
1905
1906 extern int set_cpus_allowed_ptr(struct task_struct *p,
1907 const struct cpumask *new_mask);
1908 #else
1909 static inline void do_set_cpus_allowed(struct task_struct *p,
1910 const struct cpumask *new_mask)
1911 {
1912 }
1913 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1914 const struct cpumask *new_mask)
1915 {
1916 if (!cpumask_test_cpu(0, new_mask))
1917 return -EINVAL;
1918 return 0;
1919 }
1920 #endif
1921
1922 #ifdef CONFIG_NO_HZ
1923 void calc_load_enter_idle(void);
1924 void calc_load_exit_idle(void);
1925 #else
1926 static inline void calc_load_enter_idle(void) { }
1927 static inline void calc_load_exit_idle(void) { }
1928 #endif /* CONFIG_NO_HZ */
1929
1930 #ifndef CONFIG_CPUMASK_OFFSTACK
1931 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1932 {
1933 return set_cpus_allowed_ptr(p, &new_mask);
1934 }
1935 #endif
1936
1937 /*
1938 * Do not use outside of architecture code which knows its limitations.
1939 *
1940 * sched_clock() has no promise of monotonicity or bounded drift between
1941 * CPUs, use (which you should not) requires disabling IRQs.
1942 *
1943 * Please use one of the three interfaces below.
1944 */
1945 extern unsigned long long notrace sched_clock(void);
1946 /*
1947 * See the comment in kernel/sched/clock.c
1948 */
1949 extern u64 cpu_clock(int cpu);
1950 extern u64 local_clock(void);
1951 extern u64 sched_clock_cpu(int cpu);
1952
1953
1954 extern void sched_clock_init(void);
1955
1956 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1957 static inline void sched_clock_tick(void)
1958 {
1959 }
1960
1961 static inline void sched_clock_idle_sleep_event(void)
1962 {
1963 }
1964
1965 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1966 {
1967 }
1968 #else
1969 /*
1970 * Architectures can set this to 1 if they have specified
1971 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1972 * but then during bootup it turns out that sched_clock()
1973 * is reliable after all:
1974 */
1975 extern int sched_clock_stable;
1976
1977 extern void sched_clock_tick(void);
1978 extern void sched_clock_idle_sleep_event(void);
1979 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1980 #endif
1981
1982 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1983 /*
1984 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1985 * The reason for this explicit opt-in is not to have perf penalty with
1986 * slow sched_clocks.
1987 */
1988 extern void enable_sched_clock_irqtime(void);
1989 extern void disable_sched_clock_irqtime(void);
1990 #else
1991 static inline void enable_sched_clock_irqtime(void) {}
1992 static inline void disable_sched_clock_irqtime(void) {}
1993 #endif
1994
1995 extern unsigned long long
1996 task_sched_runtime(struct task_struct *task);
1997
1998 /* sched_exec is called by processes performing an exec */
1999 #ifdef CONFIG_SMP
2000 extern void sched_exec(void);
2001 #else
2002 #define sched_exec() {}
2003 #endif
2004
2005 extern void sched_clock_idle_sleep_event(void);
2006 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2007
2008 #ifdef CONFIG_HOTPLUG_CPU
2009 extern void idle_task_exit(void);
2010 #else
2011 static inline void idle_task_exit(void) {}
2012 #endif
2013
2014 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
2015 extern void wake_up_idle_cpu(int cpu);
2016 #else
2017 static inline void wake_up_idle_cpu(int cpu) { }
2018 #endif
2019
2020 extern unsigned int sysctl_sched_latency;
2021 extern unsigned int sysctl_sched_min_granularity;
2022 extern unsigned int sysctl_sched_wakeup_granularity;
2023 extern unsigned int sysctl_sched_child_runs_first;
2024
2025 enum sched_tunable_scaling {
2026 SCHED_TUNABLESCALING_NONE,
2027 SCHED_TUNABLESCALING_LOG,
2028 SCHED_TUNABLESCALING_LINEAR,
2029 SCHED_TUNABLESCALING_END,
2030 };
2031 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2032
2033 #ifdef CONFIG_SCHED_DEBUG
2034 extern unsigned int sysctl_sched_migration_cost;
2035 extern unsigned int sysctl_sched_nr_migrate;
2036 extern unsigned int sysctl_sched_time_avg;
2037 extern unsigned int sysctl_timer_migration;
2038 extern unsigned int sysctl_sched_shares_window;
2039
2040 int sched_proc_update_handler(struct ctl_table *table, int write,
2041 void __user *buffer, size_t *length,
2042 loff_t *ppos);
2043 #endif
2044 #ifdef CONFIG_SCHED_DEBUG
2045 static inline unsigned int get_sysctl_timer_migration(void)
2046 {
2047 return sysctl_timer_migration;
2048 }
2049 #else
2050 static inline unsigned int get_sysctl_timer_migration(void)
2051 {
2052 return 1;
2053 }
2054 #endif
2055 extern unsigned int sysctl_sched_rt_period;
2056 extern int sysctl_sched_rt_runtime;
2057
2058 int sched_rt_handler(struct ctl_table *table, int write,
2059 void __user *buffer, size_t *lenp,
2060 loff_t *ppos);
2061
2062 #ifdef CONFIG_SCHED_AUTOGROUP
2063 extern unsigned int sysctl_sched_autogroup_enabled;
2064
2065 extern void sched_autogroup_create_attach(struct task_struct *p);
2066 extern void sched_autogroup_detach(struct task_struct *p);
2067 extern void sched_autogroup_fork(struct signal_struct *sig);
2068 extern void sched_autogroup_exit(struct signal_struct *sig);
2069 #ifdef CONFIG_PROC_FS
2070 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2071 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2072 #endif
2073 #else
2074 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2075 static inline void sched_autogroup_detach(struct task_struct *p) { }
2076 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2077 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2078 #endif
2079
2080 #ifdef CONFIG_CFS_BANDWIDTH
2081 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2082 #endif
2083
2084 #ifdef CONFIG_RT_MUTEXES
2085 extern int rt_mutex_getprio(struct task_struct *p);
2086 extern void rt_mutex_setprio(struct task_struct *p, int prio);
2087 extern void rt_mutex_adjust_pi(struct task_struct *p);
2088 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2089 {
2090 return tsk->pi_blocked_on != NULL;
2091 }
2092 #else
2093 static inline int rt_mutex_getprio(struct task_struct *p)
2094 {
2095 return p->normal_prio;
2096 }
2097 # define rt_mutex_adjust_pi(p) do { } while (0)
2098 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2099 {
2100 return false;
2101 }
2102 #endif
2103
2104 extern bool yield_to(struct task_struct *p, bool preempt);
2105 extern void set_user_nice(struct task_struct *p, long nice);
2106 extern int task_prio(const struct task_struct *p);
2107 extern int task_nice(const struct task_struct *p);
2108 extern int can_nice(const struct task_struct *p, const int nice);
2109 extern int task_curr(const struct task_struct *p);
2110 extern int idle_cpu(int cpu);
2111 extern int sched_setscheduler(struct task_struct *, int,
2112 const struct sched_param *);
2113 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2114 const struct sched_param *);
2115 extern struct task_struct *idle_task(int cpu);
2116 /**
2117 * is_idle_task - is the specified task an idle task?
2118 * @p: the task in question.
2119 */
2120 static inline bool is_idle_task(const struct task_struct *p)
2121 {
2122 return p->pid == 0;
2123 }
2124 extern struct task_struct *curr_task(int cpu);
2125 extern void set_curr_task(int cpu, struct task_struct *p);
2126
2127 void yield(void);
2128
2129 /*
2130 * The default (Linux) execution domain.
2131 */
2132 extern struct exec_domain default_exec_domain;
2133
2134 union thread_union {
2135 struct thread_info thread_info;
2136 unsigned long stack[THREAD_SIZE/sizeof(long)];
2137 };
2138
2139 #ifndef __HAVE_ARCH_KSTACK_END
2140 static inline int kstack_end(void *addr)
2141 {
2142 /* Reliable end of stack detection:
2143 * Some APM bios versions misalign the stack
2144 */
2145 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2146 }
2147 #endif
2148
2149 extern union thread_union init_thread_union;
2150 extern struct task_struct init_task;
2151
2152 extern struct mm_struct init_mm;
2153
2154 extern struct pid_namespace init_pid_ns;
2155
2156 /*
2157 * find a task by one of its numerical ids
2158 *
2159 * find_task_by_pid_ns():
2160 * finds a task by its pid in the specified namespace
2161 * find_task_by_vpid():
2162 * finds a task by its virtual pid
2163 *
2164 * see also find_vpid() etc in include/linux/pid.h
2165 */
2166
2167 extern struct task_struct *find_task_by_vpid(pid_t nr);
2168 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2169 struct pid_namespace *ns);
2170
2171 extern void __set_special_pids(struct pid *pid);
2172
2173 /* per-UID process charging. */
2174 extern struct user_struct * alloc_uid(kuid_t);
2175 static inline struct user_struct *get_uid(struct user_struct *u)
2176 {
2177 atomic_inc(&u->__count);
2178 return u;
2179 }
2180 extern void free_uid(struct user_struct *);
2181
2182 #include <asm/current.h>
2183
2184 extern void xtime_update(unsigned long ticks);
2185
2186 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2187 extern int wake_up_process(struct task_struct *tsk);
2188 extern void wake_up_new_task(struct task_struct *tsk);
2189 #ifdef CONFIG_SMP
2190 extern void kick_process(struct task_struct *tsk);
2191 #else
2192 static inline void kick_process(struct task_struct *tsk) { }
2193 #endif
2194 extern void sched_fork(struct task_struct *p);
2195 extern void sched_dead(struct task_struct *p);
2196
2197 extern void proc_caches_init(void);
2198 extern void flush_signals(struct task_struct *);
2199 extern void __flush_signals(struct task_struct *);
2200 extern void ignore_signals(struct task_struct *);
2201 extern void flush_signal_handlers(struct task_struct *, int force_default);
2202 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2203
2204 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2205 {
2206 unsigned long flags;
2207 int ret;
2208
2209 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2210 ret = dequeue_signal(tsk, mask, info);
2211 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2212
2213 return ret;
2214 }
2215
2216 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2217 sigset_t *mask);
2218 extern void unblock_all_signals(void);
2219 extern void release_task(struct task_struct * p);
2220 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2221 extern int force_sigsegv(int, struct task_struct *);
2222 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2223 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2224 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2225 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2226 const struct cred *, u32);
2227 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2228 extern int kill_pid(struct pid *pid, int sig, int priv);
2229 extern int kill_proc_info(int, struct siginfo *, pid_t);
2230 extern __must_check bool do_notify_parent(struct task_struct *, int);
2231 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2232 extern void force_sig(int, struct task_struct *);
2233 extern int send_sig(int, struct task_struct *, int);
2234 extern int zap_other_threads(struct task_struct *p);
2235 extern struct sigqueue *sigqueue_alloc(void);
2236 extern void sigqueue_free(struct sigqueue *);
2237 extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2238 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2239 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2240
2241 static inline void restore_saved_sigmask(void)
2242 {
2243 if (test_and_clear_restore_sigmask())
2244 __set_current_blocked(&current->saved_sigmask);
2245 }
2246
2247 static inline sigset_t *sigmask_to_save(void)
2248 {
2249 sigset_t *res = &current->blocked;
2250 if (unlikely(test_restore_sigmask()))
2251 res = &current->saved_sigmask;
2252 return res;
2253 }
2254
2255 static inline int kill_cad_pid(int sig, int priv)
2256 {
2257 return kill_pid(cad_pid, sig, priv);
2258 }
2259
2260 /* These can be the second arg to send_sig_info/send_group_sig_info. */
2261 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2262 #define SEND_SIG_PRIV ((struct siginfo *) 1)
2263 #define SEND_SIG_FORCED ((struct siginfo *) 2)
2264
2265 /*
2266 * True if we are on the alternate signal stack.
2267 */
2268 static inline int on_sig_stack(unsigned long sp)
2269 {
2270 #ifdef CONFIG_STACK_GROWSUP
2271 return sp >= current->sas_ss_sp &&
2272 sp - current->sas_ss_sp < current->sas_ss_size;
2273 #else
2274 return sp > current->sas_ss_sp &&
2275 sp - current->sas_ss_sp <= current->sas_ss_size;
2276 #endif
2277 }
2278
2279 static inline int sas_ss_flags(unsigned long sp)
2280 {
2281 return (current->sas_ss_size == 0 ? SS_DISABLE
2282 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2283 }
2284
2285 /*
2286 * Routines for handling mm_structs
2287 */
2288 extern struct mm_struct * mm_alloc(void);
2289
2290 /* mmdrop drops the mm and the page tables */
2291 extern void __mmdrop(struct mm_struct *);
2292 static inline void mmdrop(struct mm_struct * mm)
2293 {
2294 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2295 __mmdrop(mm);
2296 }
2297
2298 /* mmput gets rid of the mappings and all user-space */
2299 extern void mmput(struct mm_struct *);
2300 /* Grab a reference to a task's mm, if it is not already going away */
2301 extern struct mm_struct *get_task_mm(struct task_struct *task);
2302 /*
2303 * Grab a reference to a task's mm, if it is not already going away
2304 * and ptrace_may_access with the mode parameter passed to it
2305 * succeeds.
2306 */
2307 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2308 /* Remove the current tasks stale references to the old mm_struct */
2309 extern void mm_release(struct task_struct *, struct mm_struct *);
2310 /* Allocate a new mm structure and copy contents from tsk->mm */
2311 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2312
2313 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2314 struct task_struct *, struct pt_regs *);
2315 extern void flush_thread(void);
2316 extern void exit_thread(void);
2317
2318 extern void exit_files(struct task_struct *);
2319 extern void __cleanup_sighand(struct sighand_struct *);
2320
2321 extern void exit_itimers(struct signal_struct *);
2322 extern void flush_itimer_signals(void);
2323
2324 extern void do_group_exit(int);
2325
2326 extern void daemonize(const char *, ...);
2327 extern int allow_signal(int);
2328 extern int disallow_signal(int);
2329
2330 extern int do_execve(const char *,
2331 const char __user * const __user *,
2332 const char __user * const __user *, struct pt_regs *);
2333 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2334 struct task_struct *fork_idle(int);
2335 #ifdef CONFIG_GENERIC_KERNEL_THREAD
2336 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2337 #endif
2338
2339 extern void set_task_comm(struct task_struct *tsk, char *from);
2340 extern char *get_task_comm(char *to, struct task_struct *tsk);
2341
2342 #ifdef CONFIG_SMP
2343 void scheduler_ipi(void);
2344 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2345 #else
2346 static inline void scheduler_ipi(void) { }
2347 static inline unsigned long wait_task_inactive(struct task_struct *p,
2348 long match_state)
2349 {
2350 return 1;
2351 }
2352 #endif
2353
2354 #define next_task(p) \
2355 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2356
2357 #define for_each_process(p) \
2358 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2359
2360 extern bool current_is_single_threaded(void);
2361
2362 /*
2363 * Careful: do_each_thread/while_each_thread is a double loop so
2364 * 'break' will not work as expected - use goto instead.
2365 */
2366 #define do_each_thread(g, t) \
2367 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2368
2369 #define while_each_thread(g, t) \
2370 while ((t = next_thread(t)) != g)
2371
2372 static inline int get_nr_threads(struct task_struct *tsk)
2373 {
2374 return tsk->signal->nr_threads;
2375 }
2376
2377 static inline bool thread_group_leader(struct task_struct *p)
2378 {
2379 return p->exit_signal >= 0;
2380 }
2381
2382 /* Do to the insanities of de_thread it is possible for a process
2383 * to have the pid of the thread group leader without actually being
2384 * the thread group leader. For iteration through the pids in proc
2385 * all we care about is that we have a task with the appropriate
2386 * pid, we don't actually care if we have the right task.
2387 */
2388 static inline int has_group_leader_pid(struct task_struct *p)
2389 {
2390 return p->pid == p->tgid;
2391 }
2392
2393 static inline
2394 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2395 {
2396 return p1->tgid == p2->tgid;
2397 }
2398
2399 static inline struct task_struct *next_thread(const struct task_struct *p)
2400 {
2401 return list_entry_rcu(p->thread_group.next,
2402 struct task_struct, thread_group);
2403 }
2404
2405 static inline int thread_group_empty(struct task_struct *p)
2406 {
2407 return list_empty(&p->thread_group);
2408 }
2409
2410 #define delay_group_leader(p) \
2411 (thread_group_leader(p) && !thread_group_empty(p))
2412
2413 /*
2414 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2415 * subscriptions and synchronises with wait4(). Also used in procfs. Also
2416 * pins the final release of task.io_context. Also protects ->cpuset and
2417 * ->cgroup.subsys[]. And ->vfork_done.
2418 *
2419 * Nests both inside and outside of read_lock(&tasklist_lock).
2420 * It must not be nested with write_lock_irq(&tasklist_lock),
2421 * neither inside nor outside.
2422 */
2423 static inline void task_lock(struct task_struct *p)
2424 {
2425 spin_lock(&p->alloc_lock);
2426 }
2427
2428 static inline void task_unlock(struct task_struct *p)
2429 {
2430 spin_unlock(&p->alloc_lock);
2431 }
2432
2433 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2434 unsigned long *flags);
2435
2436 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2437 unsigned long *flags)
2438 {
2439 struct sighand_struct *ret;
2440
2441 ret = __lock_task_sighand(tsk, flags);
2442 (void)__cond_lock(&tsk->sighand->siglock, ret);
2443 return ret;
2444 }
2445
2446 static inline void unlock_task_sighand(struct task_struct *tsk,
2447 unsigned long *flags)
2448 {
2449 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2450 }
2451
2452 #ifdef CONFIG_CGROUPS
2453 static inline void threadgroup_change_begin(struct task_struct *tsk)
2454 {
2455 down_read(&tsk->signal->group_rwsem);
2456 }
2457 static inline void threadgroup_change_end(struct task_struct *tsk)
2458 {
2459 up_read(&tsk->signal->group_rwsem);
2460 }
2461
2462 /**
2463 * threadgroup_lock - lock threadgroup
2464 * @tsk: member task of the threadgroup to lock
2465 *
2466 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2467 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2468 * perform exec. This is useful for cases where the threadgroup needs to
2469 * stay stable across blockable operations.
2470 *
2471 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2472 * synchronization. While held, no new task will be added to threadgroup
2473 * and no existing live task will have its PF_EXITING set.
2474 *
2475 * During exec, a task goes and puts its thread group through unusual
2476 * changes. After de-threading, exclusive access is assumed to resources
2477 * which are usually shared by tasks in the same group - e.g. sighand may
2478 * be replaced with a new one. Also, the exec'ing task takes over group
2479 * leader role including its pid. Exclude these changes while locked by
2480 * grabbing cred_guard_mutex which is used to synchronize exec path.
2481 */
2482 static inline void threadgroup_lock(struct task_struct *tsk)
2483 {
2484 /*
2485 * exec uses exit for de-threading nesting group_rwsem inside
2486 * cred_guard_mutex. Grab cred_guard_mutex first.
2487 */
2488 mutex_lock(&tsk->signal->cred_guard_mutex);
2489 down_write(&tsk->signal->group_rwsem);
2490 }
2491
2492 /**
2493 * threadgroup_unlock - unlock threadgroup
2494 * @tsk: member task of the threadgroup to unlock
2495 *
2496 * Reverse threadgroup_lock().
2497 */
2498 static inline void threadgroup_unlock(struct task_struct *tsk)
2499 {
2500 up_write(&tsk->signal->group_rwsem);
2501 mutex_unlock(&tsk->signal->cred_guard_mutex);
2502 }
2503 #else
2504 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2505 static inline void threadgroup_change_end(struct task_struct *tsk) {}
2506 static inline void threadgroup_lock(struct task_struct *tsk) {}
2507 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2508 #endif
2509
2510 #ifndef __HAVE_THREAD_FUNCTIONS
2511
2512 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
2513 #define task_stack_page(task) ((task)->stack)
2514
2515 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2516 {
2517 *task_thread_info(p) = *task_thread_info(org);
2518 task_thread_info(p)->task = p;
2519 }
2520
2521 static inline unsigned long *end_of_stack(struct task_struct *p)
2522 {
2523 return (unsigned long *)(task_thread_info(p) + 1);
2524 }
2525
2526 #endif
2527
2528 static inline int object_is_on_stack(void *obj)
2529 {
2530 void *stack = task_stack_page(current);
2531
2532 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2533 }
2534
2535 extern void thread_info_cache_init(void);
2536
2537 #ifdef CONFIG_DEBUG_STACK_USAGE
2538 static inline unsigned long stack_not_used(struct task_struct *p)
2539 {
2540 unsigned long *n = end_of_stack(p);
2541
2542 do { /* Skip over canary */
2543 n++;
2544 } while (!*n);
2545
2546 return (unsigned long)n - (unsigned long)end_of_stack(p);
2547 }
2548 #endif
2549
2550 /* set thread flags in other task's structures
2551 * - see asm/thread_info.h for TIF_xxxx flags available
2552 */
2553 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2554 {
2555 set_ti_thread_flag(task_thread_info(tsk), flag);
2556 }
2557
2558 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2559 {
2560 clear_ti_thread_flag(task_thread_info(tsk), flag);
2561 }
2562
2563 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2564 {
2565 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2566 }
2567
2568 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2569 {
2570 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2571 }
2572
2573 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2574 {
2575 return test_ti_thread_flag(task_thread_info(tsk), flag);
2576 }
2577
2578 static inline void set_tsk_need_resched(struct task_struct *tsk)
2579 {
2580 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2581 }
2582
2583 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2584 {
2585 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2586 }
2587
2588 static inline int test_tsk_need_resched(struct task_struct *tsk)
2589 {
2590 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2591 }
2592
2593 static inline int restart_syscall(void)
2594 {
2595 set_tsk_thread_flag(current, TIF_SIGPENDING);
2596 return -ERESTARTNOINTR;
2597 }
2598
2599 static inline int signal_pending(struct task_struct *p)
2600 {
2601 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2602 }
2603
2604 static inline int __fatal_signal_pending(struct task_struct *p)
2605 {
2606 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2607 }
2608
2609 static inline int fatal_signal_pending(struct task_struct *p)
2610 {
2611 return signal_pending(p) && __fatal_signal_pending(p);
2612 }
2613
2614 static inline int signal_pending_state(long state, struct task_struct *p)
2615 {
2616 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2617 return 0;
2618 if (!signal_pending(p))
2619 return 0;
2620
2621 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2622 }
2623
2624 static inline int need_resched(void)
2625 {
2626 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2627 }
2628
2629 /*
2630 * cond_resched() and cond_resched_lock(): latency reduction via
2631 * explicit rescheduling in places that are safe. The return
2632 * value indicates whether a reschedule was done in fact.
2633 * cond_resched_lock() will drop the spinlock before scheduling,
2634 * cond_resched_softirq() will enable bhs before scheduling.
2635 */
2636 extern int _cond_resched(void);
2637
2638 #define cond_resched() ({ \
2639 __might_sleep(__FILE__, __LINE__, 0); \
2640 _cond_resched(); \
2641 })
2642
2643 extern int __cond_resched_lock(spinlock_t *lock);
2644
2645 #ifdef CONFIG_PREEMPT_COUNT
2646 #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2647 #else
2648 #define PREEMPT_LOCK_OFFSET 0
2649 #endif
2650
2651 #define cond_resched_lock(lock) ({ \
2652 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2653 __cond_resched_lock(lock); \
2654 })
2655
2656 extern int __cond_resched_softirq(void);
2657
2658 #define cond_resched_softirq() ({ \
2659 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2660 __cond_resched_softirq(); \
2661 })
2662
2663 /*
2664 * Does a critical section need to be broken due to another
2665 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2666 * but a general need for low latency)
2667 */
2668 static inline int spin_needbreak(spinlock_t *lock)
2669 {
2670 #ifdef CONFIG_PREEMPT
2671 return spin_is_contended(lock);
2672 #else
2673 return 0;
2674 #endif
2675 }
2676
2677 /*
2678 * Thread group CPU time accounting.
2679 */
2680 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2681 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2682
2683 static inline void thread_group_cputime_init(struct signal_struct *sig)
2684 {
2685 raw_spin_lock_init(&sig->cputimer.lock);
2686 }
2687
2688 /*
2689 * Reevaluate whether the task has signals pending delivery.
2690 * Wake the task if so.
2691 * This is required every time the blocked sigset_t changes.
2692 * callers must hold sighand->siglock.
2693 */
2694 extern void recalc_sigpending_and_wake(struct task_struct *t);
2695 extern void recalc_sigpending(void);
2696
2697 extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2698
2699 /*
2700 * Wrappers for p->thread_info->cpu access. No-op on UP.
2701 */
2702 #ifdef CONFIG_SMP
2703
2704 static inline unsigned int task_cpu(const struct task_struct *p)
2705 {
2706 return task_thread_info(p)->cpu;
2707 }
2708
2709 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2710
2711 #else
2712
2713 static inline unsigned int task_cpu(const struct task_struct *p)
2714 {
2715 return 0;
2716 }
2717
2718 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2719 {
2720 }
2721
2722 #endif /* CONFIG_SMP */
2723
2724 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2725 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2726
2727 extern void normalize_rt_tasks(void);
2728
2729 #ifdef CONFIG_CGROUP_SCHED
2730
2731 extern struct task_group root_task_group;
2732
2733 extern struct task_group *sched_create_group(struct task_group *parent);
2734 extern void sched_destroy_group(struct task_group *tg);
2735 extern void sched_move_task(struct task_struct *tsk);
2736 #ifdef CONFIG_FAIR_GROUP_SCHED
2737 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2738 extern unsigned long sched_group_shares(struct task_group *tg);
2739 #endif
2740 #ifdef CONFIG_RT_GROUP_SCHED
2741 extern int sched_group_set_rt_runtime(struct task_group *tg,
2742 long rt_runtime_us);
2743 extern long sched_group_rt_runtime(struct task_group *tg);
2744 extern int sched_group_set_rt_period(struct task_group *tg,
2745 long rt_period_us);
2746 extern long sched_group_rt_period(struct task_group *tg);
2747 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2748 #endif
2749 #endif /* CONFIG_CGROUP_SCHED */
2750
2751 extern int task_can_switch_user(struct user_struct *up,
2752 struct task_struct *tsk);
2753
2754 #ifdef CONFIG_TASK_XACCT
2755 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2756 {
2757 tsk->ioac.rchar += amt;
2758 }
2759
2760 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2761 {
2762 tsk->ioac.wchar += amt;
2763 }
2764
2765 static inline void inc_syscr(struct task_struct *tsk)
2766 {
2767 tsk->ioac.syscr++;
2768 }
2769
2770 static inline void inc_syscw(struct task_struct *tsk)
2771 {
2772 tsk->ioac.syscw++;
2773 }
2774 #else
2775 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2776 {
2777 }
2778
2779 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2780 {
2781 }
2782
2783 static inline void inc_syscr(struct task_struct *tsk)
2784 {
2785 }
2786
2787 static inline void inc_syscw(struct task_struct *tsk)
2788 {
2789 }
2790 #endif
2791
2792 #ifndef TASK_SIZE_OF
2793 #define TASK_SIZE_OF(tsk) TASK_SIZE
2794 #endif
2795
2796 #ifdef CONFIG_MM_OWNER
2797 extern void mm_update_next_owner(struct mm_struct *mm);
2798 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2799 #else
2800 static inline void mm_update_next_owner(struct mm_struct *mm)
2801 {
2802 }
2803
2804 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2805 {
2806 }
2807 #endif /* CONFIG_MM_OWNER */
2808
2809 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2810 unsigned int limit)
2811 {
2812 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2813 }
2814
2815 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2816 unsigned int limit)
2817 {
2818 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2819 }
2820
2821 static inline unsigned long rlimit(unsigned int limit)
2822 {
2823 return task_rlimit(current, limit);
2824 }
2825
2826 static inline unsigned long rlimit_max(unsigned int limit)
2827 {
2828 return task_rlimit_max(current, limit);
2829 }
2830
2831 #endif /* __KERNEL__ */
2832
2833 #endif