1 #ifndef _LINUX_SCHED_SIGNAL_H
2 #define _LINUX_SCHED_SIGNAL_H
4 #include <linux/rculist.h>
5 #include <linux/signal.h>
6 #include <linux/sched.h>
7 #include <linux/sched/jobctl.h>
8 #include <linux/sched/task.h>
9 #include <linux/cred.h>
12 * Types defining task->signal and task->sighand and APIs using them:
15 struct sighand_struct
{
17 struct k_sigaction action
[_NSIG
];
19 wait_queue_head_t signalfd_wqh
;
23 * Per-process accounting stats:
29 u64 ac_utime
, ac_stime
;
30 unsigned long ac_minflt
, ac_majflt
;
39 * This is the atomic variant of task_cputime, which can be used for
40 * storing and updating task_cputime statistics without locking.
42 struct task_cputime_atomic
{
45 atomic64_t sum_exec_runtime
;
48 #define INIT_CPUTIME_ATOMIC \
49 (struct task_cputime_atomic) { \
50 .utime = ATOMIC64_INIT(0), \
51 .stime = ATOMIC64_INIT(0), \
52 .sum_exec_runtime = ATOMIC64_INIT(0), \
55 * struct thread_group_cputimer - thread group interval timer counts
56 * @cputime_atomic: atomic thread group interval timers.
57 * @running: true when there are timers running and
58 * @cputime_atomic receives updates.
59 * @checking_timer: true when a thread in the group is in the
60 * process of checking for thread group timers.
62 * This structure contains the version of task_cputime, above, that is
63 * used for thread group CPU timer calculations.
65 struct thread_group_cputimer
{
66 struct task_cputime_atomic cputime_atomic
;
72 * NOTE! "signal_struct" does not have its own
73 * locking, because a shared signal_struct always
74 * implies a shared sighand_struct, so locking
75 * sighand_struct is always a proper superset of
76 * the locking of signal_struct.
78 struct signal_struct
{
82 struct list_head thread_head
;
84 wait_queue_head_t wait_chldexit
; /* for wait4() */
86 /* current thread group signal load-balancing target: */
87 struct task_struct
*curr_target
;
89 /* shared signal handling: */
90 struct sigpending shared_pending
;
92 /* thread group exit support */
95 * - notify group_exit_task when ->count is equal to notify_count
96 * - everyone except group_exit_task is stopped during signal delivery
97 * of fatal signals, group_exit_task processes the signal.
100 struct task_struct
*group_exit_task
;
102 /* thread group stop support, overloads group_exit_code too */
103 int group_stop_count
;
104 unsigned int flags
; /* see SIGNAL_* flags below */
107 * PR_SET_CHILD_SUBREAPER marks a process, like a service
108 * manager, to re-parent orphan (double-forking) child processes
109 * to this process instead of 'init'. The service manager is
110 * able to receive SIGCHLD signals and is able to investigate
111 * the process until it calls wait(). All children of this
112 * process will inherit a flag if they should look for a
113 * child_subreaper process at exit.
115 unsigned int is_child_subreaper
:1;
116 unsigned int has_child_subreaper
:1;
118 #ifdef CONFIG_POSIX_TIMERS
120 /* POSIX.1b Interval Timers */
122 struct list_head posix_timers
;
124 /* ITIMER_REAL timer for the process */
125 struct hrtimer real_timer
;
126 ktime_t it_real_incr
;
129 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
130 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
131 * values are defined to 0 and 1 respectively
133 struct cpu_itimer it
[2];
136 * Thread group totals for process CPU timers.
137 * See thread_group_cputimer(), et al, for details.
139 struct thread_group_cputimer cputimer
;
141 /* Earliest-expiration cache. */
142 struct task_cputime cputime_expires
;
144 struct list_head cpu_timers
[3];
148 struct pid
*leader_pid
;
150 #ifdef CONFIG_NO_HZ_FULL
151 atomic_t tick_dep_mask
;
154 struct pid
*tty_old_pgrp
;
156 /* boolean value for session group leader */
159 struct tty_struct
*tty
; /* NULL if no tty */
161 #ifdef CONFIG_SCHED_AUTOGROUP
162 struct autogroup
*autogroup
;
165 * Cumulative resource counters for dead threads in the group,
166 * and for reaped dead child processes forked by this group.
167 * Live threads maintain their own counters and add to these
168 * in __exit_signal, except for the group leader.
170 seqlock_t stats_lock
;
171 u64 utime
, stime
, cutime
, cstime
;
174 struct prev_cputime prev_cputime
;
175 unsigned long nvcsw
, nivcsw
, cnvcsw
, cnivcsw
;
176 unsigned long min_flt
, maj_flt
, cmin_flt
, cmaj_flt
;
177 unsigned long inblock
, oublock
, cinblock
, coublock
;
178 unsigned long maxrss
, cmaxrss
;
179 struct task_io_accounting ioac
;
182 * Cumulative ns of schedule CPU time fo dead threads in the
183 * group, not including a zombie group leader, (This only differs
184 * from jiffies_to_ns(utime + stime) if sched_clock uses something
185 * other than jiffies.)
187 unsigned long long sum_sched_runtime
;
190 * We don't bother to synchronize most readers of this at all,
191 * because there is no reader checking a limit that actually needs
192 * to get both rlim_cur and rlim_max atomically, and either one
193 * alone is a single word that can safely be read normally.
194 * getrlimit/setrlimit use task_lock(current->group_leader) to
195 * protect this instead of the siglock, because they really
196 * have no need to disable irqs.
198 struct rlimit rlim
[RLIM_NLIMITS
];
200 #ifdef CONFIG_BSD_PROCESS_ACCT
201 struct pacct_struct pacct
; /* per-process accounting information */
203 #ifdef CONFIG_TASKSTATS
204 struct taskstats
*stats
;
208 struct tty_audit_buf
*tty_audit_buf
;
212 * Thread is the potential origin of an oom condition; kill first on
215 bool oom_flag_origin
;
216 short oom_score_adj
; /* OOM kill score adjustment */
217 short oom_score_adj_min
; /* OOM kill score adjustment min value.
218 * Only settable by CAP_SYS_RESOURCE. */
219 struct mm_struct
*oom_mm
; /* recorded mm when the thread group got
220 * killed by the oom killer */
222 struct mutex cred_guard_mutex
; /* guard against foreign influences on
223 * credential calculations
224 * (notably. ptrace) */
225 } __randomize_layout
;
228 * Bits in flags field of signal_struct.
230 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
231 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
232 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
233 #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
235 * Pending notifications to parent.
237 #define SIGNAL_CLD_STOPPED 0x00000010
238 #define SIGNAL_CLD_CONTINUED 0x00000020
239 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
241 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
243 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
244 SIGNAL_STOP_CONTINUED)
246 static inline void signal_set_stop_flags(struct signal_struct
*sig
,
249 WARN_ON(sig
->flags
& (SIGNAL_GROUP_EXIT
|SIGNAL_GROUP_COREDUMP
));
250 sig
->flags
= (sig
->flags
& ~SIGNAL_STOP_MASK
) | flags
;
253 /* If true, all threads except ->group_exit_task have pending SIGKILL */
254 static inline int signal_group_exit(const struct signal_struct
*sig
)
256 return (sig
->flags
& SIGNAL_GROUP_EXIT
) ||
257 (sig
->group_exit_task
!= NULL
);
260 extern void flush_signals(struct task_struct
*);
261 extern void ignore_signals(struct task_struct
*);
262 extern void flush_signal_handlers(struct task_struct
*, int force_default
);
263 extern int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
);
265 static inline int kernel_dequeue_signal(siginfo_t
*info
)
267 struct task_struct
*tsk
= current
;
271 spin_lock_irq(&tsk
->sighand
->siglock
);
272 ret
= dequeue_signal(tsk
, &tsk
->blocked
, info
?: &__info
);
273 spin_unlock_irq(&tsk
->sighand
->siglock
);
278 static inline void kernel_signal_stop(void)
280 spin_lock_irq(¤t
->sighand
->siglock
);
281 if (current
->jobctl
& JOBCTL_STOP_DEQUEUED
)
282 __set_current_state(TASK_STOPPED
);
283 spin_unlock_irq(¤t
->sighand
->siglock
);
287 extern int send_sig_info(int, struct siginfo
*, struct task_struct
*);
288 extern int force_sigsegv(int, struct task_struct
*);
289 extern int force_sig_info(int, struct siginfo
*, struct task_struct
*);
290 extern int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
);
291 extern int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
);
292 extern int kill_pid_info_as_cred(int, struct siginfo
*, struct pid
*,
293 const struct cred
*, u32
);
294 extern int kill_pgrp(struct pid
*pid
, int sig
, int priv
);
295 extern int kill_pid(struct pid
*pid
, int sig
, int priv
);
296 extern __must_check
bool do_notify_parent(struct task_struct
*, int);
297 extern void __wake_up_parent(struct task_struct
*p
, struct task_struct
*parent
);
298 extern void force_sig(int, struct task_struct
*);
299 extern int send_sig(int, struct task_struct
*, int);
300 extern int zap_other_threads(struct task_struct
*p
);
301 extern struct sigqueue
*sigqueue_alloc(void);
302 extern void sigqueue_free(struct sigqueue
*);
303 extern int send_sigqueue(struct sigqueue
*, struct task_struct
*, int group
);
304 extern int do_sigaction(int, struct k_sigaction
*, struct k_sigaction
*);
306 static inline int restart_syscall(void)
308 set_tsk_thread_flag(current
, TIF_SIGPENDING
);
309 return -ERESTARTNOINTR
;
312 static inline int signal_pending(struct task_struct
*p
)
314 return unlikely(test_tsk_thread_flag(p
,TIF_SIGPENDING
));
317 static inline int __fatal_signal_pending(struct task_struct
*p
)
319 return unlikely(sigismember(&p
->pending
.signal
, SIGKILL
));
322 static inline int fatal_signal_pending(struct task_struct
*p
)
324 return signal_pending(p
) && __fatal_signal_pending(p
);
327 static inline int signal_pending_state(long state
, struct task_struct
*p
)
329 if (!(state
& (TASK_INTERRUPTIBLE
| TASK_WAKEKILL
)))
331 if (!signal_pending(p
))
334 return (state
& TASK_INTERRUPTIBLE
) || __fatal_signal_pending(p
);
338 * Reevaluate whether the task has signals pending delivery.
339 * Wake the task if so.
340 * This is required every time the blocked sigset_t changes.
341 * callers must hold sighand->siglock.
343 extern void recalc_sigpending_and_wake(struct task_struct
*t
);
344 extern void recalc_sigpending(void);
346 extern void signal_wake_up_state(struct task_struct
*t
, unsigned int state
);
348 static inline void signal_wake_up(struct task_struct
*t
, bool resume
)
350 signal_wake_up_state(t
, resume
? TASK_WAKEKILL
: 0);
352 static inline void ptrace_signal_wake_up(struct task_struct
*t
, bool resume
)
354 signal_wake_up_state(t
, resume
? __TASK_TRACED
: 0);
357 #ifdef TIF_RESTORE_SIGMASK
359 * Legacy restore_sigmask accessors. These are inefficient on
360 * SMP architectures because they require atomic operations.
364 * set_restore_sigmask() - make sure saved_sigmask processing gets done
366 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
367 * will run before returning to user mode, to process the flag. For
368 * all callers, TIF_SIGPENDING is already set or it's no harm to set
369 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
370 * arch code will notice on return to user mode, in case those bits
371 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
372 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
374 static inline void set_restore_sigmask(void)
376 set_thread_flag(TIF_RESTORE_SIGMASK
);
377 WARN_ON(!test_thread_flag(TIF_SIGPENDING
));
379 static inline void clear_restore_sigmask(void)
381 clear_thread_flag(TIF_RESTORE_SIGMASK
);
383 static inline bool test_restore_sigmask(void)
385 return test_thread_flag(TIF_RESTORE_SIGMASK
);
387 static inline bool test_and_clear_restore_sigmask(void)
389 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK
);
392 #else /* TIF_RESTORE_SIGMASK */
394 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
395 static inline void set_restore_sigmask(void)
397 current
->restore_sigmask
= true;
398 WARN_ON(!test_thread_flag(TIF_SIGPENDING
));
400 static inline void clear_restore_sigmask(void)
402 current
->restore_sigmask
= false;
404 static inline bool test_restore_sigmask(void)
406 return current
->restore_sigmask
;
408 static inline bool test_and_clear_restore_sigmask(void)
410 if (!current
->restore_sigmask
)
412 current
->restore_sigmask
= false;
417 static inline void restore_saved_sigmask(void)
419 if (test_and_clear_restore_sigmask())
420 __set_current_blocked(¤t
->saved_sigmask
);
423 static inline sigset_t
*sigmask_to_save(void)
425 sigset_t
*res
= ¤t
->blocked
;
426 if (unlikely(test_restore_sigmask()))
427 res
= ¤t
->saved_sigmask
;
431 static inline int kill_cad_pid(int sig
, int priv
)
433 return kill_pid(cad_pid
, sig
, priv
);
436 /* These can be the second arg to send_sig_info/send_group_sig_info. */
437 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
438 #define SEND_SIG_PRIV ((struct siginfo *) 1)
439 #define SEND_SIG_FORCED ((struct siginfo *) 2)
442 * True if we are on the alternate signal stack.
444 static inline int on_sig_stack(unsigned long sp
)
447 * If the signal stack is SS_AUTODISARM then, by construction, we
448 * can't be on the signal stack unless user code deliberately set
449 * SS_AUTODISARM when we were already on it.
451 * This improves reliability: if user state gets corrupted such that
452 * the stack pointer points very close to the end of the signal stack,
453 * then this check will enable the signal to be handled anyway.
455 if (current
->sas_ss_flags
& SS_AUTODISARM
)
458 #ifdef CONFIG_STACK_GROWSUP
459 return sp
>= current
->sas_ss_sp
&&
460 sp
- current
->sas_ss_sp
< current
->sas_ss_size
;
462 return sp
> current
->sas_ss_sp
&&
463 sp
- current
->sas_ss_sp
<= current
->sas_ss_size
;
467 static inline int sas_ss_flags(unsigned long sp
)
469 if (!current
->sas_ss_size
)
472 return on_sig_stack(sp
) ? SS_ONSTACK
: 0;
475 static inline void sas_ss_reset(struct task_struct
*p
)
479 p
->sas_ss_flags
= SS_DISABLE
;
482 static inline unsigned long sigsp(unsigned long sp
, struct ksignal
*ksig
)
484 if (unlikely((ksig
->ka
.sa
.sa_flags
& SA_ONSTACK
)) && ! sas_ss_flags(sp
))
485 #ifdef CONFIG_STACK_GROWSUP
486 return current
->sas_ss_sp
;
488 return current
->sas_ss_sp
+ current
->sas_ss_size
;
493 extern void __cleanup_sighand(struct sighand_struct
*);
494 extern void flush_itimer_signals(void);
496 #define tasklist_empty() \
497 list_empty(&init_task.tasks)
499 #define next_task(p) \
500 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
502 #define for_each_process(p) \
503 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
505 extern bool current_is_single_threaded(void);
508 * Careful: do_each_thread/while_each_thread is a double loop so
509 * 'break' will not work as expected - use goto instead.
511 #define do_each_thread(g, t) \
512 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
514 #define while_each_thread(g, t) \
515 while ((t = next_thread(t)) != g)
517 #define __for_each_thread(signal, t) \
518 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
520 #define for_each_thread(p, t) \
521 __for_each_thread((p)->signal, t)
523 /* Careful: this is a double loop, 'break' won't work as expected. */
524 #define for_each_process_thread(p, t) \
525 for_each_process(p) for_each_thread(p, t)
527 typedef int (*proc_visitor
)(struct task_struct
*p
, void *data
);
528 void walk_process_tree(struct task_struct
*top
, proc_visitor
, void *);
530 static inline int get_nr_threads(struct task_struct
*tsk
)
532 return tsk
->signal
->nr_threads
;
535 static inline bool thread_group_leader(struct task_struct
*p
)
537 return p
->exit_signal
>= 0;
540 /* Do to the insanities of de_thread it is possible for a process
541 * to have the pid of the thread group leader without actually being
542 * the thread group leader. For iteration through the pids in proc
543 * all we care about is that we have a task with the appropriate
544 * pid, we don't actually care if we have the right task.
546 static inline bool has_group_leader_pid(struct task_struct
*p
)
548 return task_pid(p
) == p
->signal
->leader_pid
;
552 bool same_thread_group(struct task_struct
*p1
, struct task_struct
*p2
)
554 return p1
->signal
== p2
->signal
;
557 static inline struct task_struct
*next_thread(const struct task_struct
*p
)
559 return list_entry_rcu(p
->thread_group
.next
,
560 struct task_struct
, thread_group
);
563 static inline int thread_group_empty(struct task_struct
*p
)
565 return list_empty(&p
->thread_group
);
568 #define delay_group_leader(p) \
569 (thread_group_leader(p) && !thread_group_empty(p))
571 extern struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
572 unsigned long *flags
);
574 static inline struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
,
575 unsigned long *flags
)
577 struct sighand_struct
*ret
;
579 ret
= __lock_task_sighand(tsk
, flags
);
580 (void)__cond_lock(&tsk
->sighand
->siglock
, ret
);
584 static inline void unlock_task_sighand(struct task_struct
*tsk
,
585 unsigned long *flags
)
587 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, *flags
);
590 static inline unsigned long task_rlimit(const struct task_struct
*tsk
,
593 return READ_ONCE(tsk
->signal
->rlim
[limit
].rlim_cur
);
596 static inline unsigned long task_rlimit_max(const struct task_struct
*tsk
,
599 return READ_ONCE(tsk
->signal
->rlim
[limit
].rlim_max
);
602 static inline unsigned long rlimit(unsigned int limit
)
604 return task_rlimit(current
, limit
);
607 static inline unsigned long rlimit_max(unsigned int limit
)
609 return task_rlimit_max(current
, limit
);
612 #endif /* _LINUX_SCHED_SIGNAL_H */