1 #ifndef _LINUX_SCHED_SIGNAL_H
2 #define _LINUX_SCHED_SIGNAL_H
4 #include <linux/rculist.h>
5 #include <linux/signal.h>
6 #include <linux/cred.h>
7 #include <linux/sched.h>
8 #include <linux/sched/jobctl.h>
9 #include <linux/sched/task.h>
12 * Types defining task->signal and task->sighand and APIs using them:
15 struct sighand_struct
{
17 struct k_sigaction action
[_NSIG
];
19 wait_queue_head_t signalfd_wqh
;
23 * NOTE! "signal_struct" does not have its own
24 * locking, because a shared signal_struct always
25 * implies a shared sighand_struct, so locking
26 * sighand_struct is always a proper superset of
27 * the locking of signal_struct.
29 struct signal_struct
{
33 struct list_head thread_head
;
35 wait_queue_head_t wait_chldexit
; /* for wait4() */
37 /* current thread group signal load-balancing target: */
38 struct task_struct
*curr_target
;
40 /* shared signal handling: */
41 struct sigpending shared_pending
;
43 /* thread group exit support */
46 * - notify group_exit_task when ->count is equal to notify_count
47 * - everyone except group_exit_task is stopped during signal delivery
48 * of fatal signals, group_exit_task processes the signal.
51 struct task_struct
*group_exit_task
;
53 /* thread group stop support, overloads group_exit_code too */
55 unsigned int flags
; /* see SIGNAL_* flags below */
58 * PR_SET_CHILD_SUBREAPER marks a process, like a service
59 * manager, to re-parent orphan (double-forking) child processes
60 * to this process instead of 'init'. The service manager is
61 * able to receive SIGCHLD signals and is able to investigate
62 * the process until it calls wait(). All children of this
63 * process will inherit a flag if they should look for a
64 * child_subreaper process at exit.
66 unsigned int is_child_subreaper
:1;
67 unsigned int has_child_subreaper
:1;
69 #ifdef CONFIG_POSIX_TIMERS
71 /* POSIX.1b Interval Timers */
73 struct list_head posix_timers
;
75 /* ITIMER_REAL timer for the process */
76 struct hrtimer real_timer
;
80 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
81 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
82 * values are defined to 0 and 1 respectively
84 struct cpu_itimer it
[2];
87 * Thread group totals for process CPU timers.
88 * See thread_group_cputimer(), et al, for details.
90 struct thread_group_cputimer cputimer
;
92 /* Earliest-expiration cache. */
93 struct task_cputime cputime_expires
;
95 struct list_head cpu_timers
[3];
99 struct pid
*leader_pid
;
101 #ifdef CONFIG_NO_HZ_FULL
102 atomic_t tick_dep_mask
;
105 struct pid
*tty_old_pgrp
;
107 /* boolean value for session group leader */
110 struct tty_struct
*tty
; /* NULL if no tty */
112 #ifdef CONFIG_SCHED_AUTOGROUP
113 struct autogroup
*autogroup
;
116 * Cumulative resource counters for dead threads in the group,
117 * and for reaped dead child processes forked by this group.
118 * Live threads maintain their own counters and add to these
119 * in __exit_signal, except for the group leader.
121 seqlock_t stats_lock
;
122 u64 utime
, stime
, cutime
, cstime
;
125 struct prev_cputime prev_cputime
;
126 unsigned long nvcsw
, nivcsw
, cnvcsw
, cnivcsw
;
127 unsigned long min_flt
, maj_flt
, cmin_flt
, cmaj_flt
;
128 unsigned long inblock
, oublock
, cinblock
, coublock
;
129 unsigned long maxrss
, cmaxrss
;
130 struct task_io_accounting ioac
;
133 * Cumulative ns of schedule CPU time fo dead threads in the
134 * group, not including a zombie group leader, (This only differs
135 * from jiffies_to_ns(utime + stime) if sched_clock uses something
136 * other than jiffies.)
138 unsigned long long sum_sched_runtime
;
141 * We don't bother to synchronize most readers of this at all,
142 * because there is no reader checking a limit that actually needs
143 * to get both rlim_cur and rlim_max atomically, and either one
144 * alone is a single word that can safely be read normally.
145 * getrlimit/setrlimit use task_lock(current->group_leader) to
146 * protect this instead of the siglock, because they really
147 * have no need to disable irqs.
149 struct rlimit rlim
[RLIM_NLIMITS
];
151 #ifdef CONFIG_BSD_PROCESS_ACCT
152 struct pacct_struct pacct
; /* per-process accounting information */
154 #ifdef CONFIG_TASKSTATS
155 struct taskstats
*stats
;
159 struct tty_audit_buf
*tty_audit_buf
;
163 * Thread is the potential origin of an oom condition; kill first on
166 bool oom_flag_origin
;
167 short oom_score_adj
; /* OOM kill score adjustment */
168 short oom_score_adj_min
; /* OOM kill score adjustment min value.
169 * Only settable by CAP_SYS_RESOURCE. */
170 struct mm_struct
*oom_mm
; /* recorded mm when the thread group got
171 * killed by the oom killer */
173 struct mutex cred_guard_mutex
; /* guard against foreign influences on
174 * credential calculations
175 * (notably. ptrace) */
179 * Bits in flags field of signal_struct.
181 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
182 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
183 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
184 #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
186 * Pending notifications to parent.
188 #define SIGNAL_CLD_STOPPED 0x00000010
189 #define SIGNAL_CLD_CONTINUED 0x00000020
190 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
192 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
194 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
195 SIGNAL_STOP_CONTINUED)
197 static inline void signal_set_stop_flags(struct signal_struct
*sig
,
200 WARN_ON(sig
->flags
& (SIGNAL_GROUP_EXIT
|SIGNAL_GROUP_COREDUMP
));
201 sig
->flags
= (sig
->flags
& ~SIGNAL_STOP_MASK
) | flags
;
204 /* If true, all threads except ->group_exit_task have pending SIGKILL */
205 static inline int signal_group_exit(const struct signal_struct
*sig
)
207 return (sig
->flags
& SIGNAL_GROUP_EXIT
) ||
208 (sig
->group_exit_task
!= NULL
);
211 extern void flush_signals(struct task_struct
*);
212 extern void ignore_signals(struct task_struct
*);
213 extern void flush_signal_handlers(struct task_struct
*, int force_default
);
214 extern int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
);
216 static inline int kernel_dequeue_signal(siginfo_t
*info
)
218 struct task_struct
*tsk
= current
;
222 spin_lock_irq(&tsk
->sighand
->siglock
);
223 ret
= dequeue_signal(tsk
, &tsk
->blocked
, info
?: &__info
);
224 spin_unlock_irq(&tsk
->sighand
->siglock
);
229 static inline void kernel_signal_stop(void)
231 spin_lock_irq(¤t
->sighand
->siglock
);
232 if (current
->jobctl
& JOBCTL_STOP_DEQUEUED
)
233 __set_current_state(TASK_STOPPED
);
234 spin_unlock_irq(¤t
->sighand
->siglock
);
238 extern int send_sig_info(int, struct siginfo
*, struct task_struct
*);
239 extern int force_sigsegv(int, struct task_struct
*);
240 extern int force_sig_info(int, struct siginfo
*, struct task_struct
*);
241 extern int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
);
242 extern int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
);
243 extern int kill_pid_info_as_cred(int, struct siginfo
*, struct pid
*,
244 const struct cred
*, u32
);
245 extern int kill_pgrp(struct pid
*pid
, int sig
, int priv
);
246 extern int kill_pid(struct pid
*pid
, int sig
, int priv
);
247 extern int kill_proc_info(int, struct siginfo
*, pid_t
);
248 extern __must_check
bool do_notify_parent(struct task_struct
*, int);
249 extern void __wake_up_parent(struct task_struct
*p
, struct task_struct
*parent
);
250 extern void force_sig(int, struct task_struct
*);
251 extern int send_sig(int, struct task_struct
*, int);
252 extern int zap_other_threads(struct task_struct
*p
);
253 extern struct sigqueue
*sigqueue_alloc(void);
254 extern void sigqueue_free(struct sigqueue
*);
255 extern int send_sigqueue(struct sigqueue
*, struct task_struct
*, int group
);
256 extern int do_sigaction(int, struct k_sigaction
*, struct k_sigaction
*);
258 #ifdef TIF_RESTORE_SIGMASK
260 * Legacy restore_sigmask accessors. These are inefficient on
261 * SMP architectures because they require atomic operations.
265 * set_restore_sigmask() - make sure saved_sigmask processing gets done
267 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
268 * will run before returning to user mode, to process the flag. For
269 * all callers, TIF_SIGPENDING is already set or it's no harm to set
270 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
271 * arch code will notice on return to user mode, in case those bits
272 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
273 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
275 static inline void set_restore_sigmask(void)
277 set_thread_flag(TIF_RESTORE_SIGMASK
);
278 WARN_ON(!test_thread_flag(TIF_SIGPENDING
));
280 static inline void clear_restore_sigmask(void)
282 clear_thread_flag(TIF_RESTORE_SIGMASK
);
284 static inline bool test_restore_sigmask(void)
286 return test_thread_flag(TIF_RESTORE_SIGMASK
);
288 static inline bool test_and_clear_restore_sigmask(void)
290 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK
);
293 #else /* TIF_RESTORE_SIGMASK */
295 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
296 static inline void set_restore_sigmask(void)
298 current
->restore_sigmask
= true;
299 WARN_ON(!test_thread_flag(TIF_SIGPENDING
));
301 static inline void clear_restore_sigmask(void)
303 current
->restore_sigmask
= false;
305 static inline bool test_restore_sigmask(void)
307 return current
->restore_sigmask
;
309 static inline bool test_and_clear_restore_sigmask(void)
311 if (!current
->restore_sigmask
)
313 current
->restore_sigmask
= false;
318 static inline void restore_saved_sigmask(void)
320 if (test_and_clear_restore_sigmask())
321 __set_current_blocked(¤t
->saved_sigmask
);
324 static inline sigset_t
*sigmask_to_save(void)
326 sigset_t
*res
= ¤t
->blocked
;
327 if (unlikely(test_restore_sigmask()))
328 res
= ¤t
->saved_sigmask
;
332 static inline int kill_cad_pid(int sig
, int priv
)
334 return kill_pid(cad_pid
, sig
, priv
);
337 /* These can be the second arg to send_sig_info/send_group_sig_info. */
338 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
339 #define SEND_SIG_PRIV ((struct siginfo *) 1)
340 #define SEND_SIG_FORCED ((struct siginfo *) 2)
343 * True if we are on the alternate signal stack.
345 static inline int on_sig_stack(unsigned long sp
)
348 * If the signal stack is SS_AUTODISARM then, by construction, we
349 * can't be on the signal stack unless user code deliberately set
350 * SS_AUTODISARM when we were already on it.
352 * This improves reliability: if user state gets corrupted such that
353 * the stack pointer points very close to the end of the signal stack,
354 * then this check will enable the signal to be handled anyway.
356 if (current
->sas_ss_flags
& SS_AUTODISARM
)
359 #ifdef CONFIG_STACK_GROWSUP
360 return sp
>= current
->sas_ss_sp
&&
361 sp
- current
->sas_ss_sp
< current
->sas_ss_size
;
363 return sp
> current
->sas_ss_sp
&&
364 sp
- current
->sas_ss_sp
<= current
->sas_ss_size
;
368 static inline int sas_ss_flags(unsigned long sp
)
370 if (!current
->sas_ss_size
)
373 return on_sig_stack(sp
) ? SS_ONSTACK
: 0;
376 static inline void sas_ss_reset(struct task_struct
*p
)
380 p
->sas_ss_flags
= SS_DISABLE
;
383 static inline unsigned long sigsp(unsigned long sp
, struct ksignal
*ksig
)
385 if (unlikely((ksig
->ka
.sa
.sa_flags
& SA_ONSTACK
)) && ! sas_ss_flags(sp
))
386 #ifdef CONFIG_STACK_GROWSUP
387 return current
->sas_ss_sp
;
389 return current
->sas_ss_sp
+ current
->sas_ss_size
;
394 extern void __cleanup_sighand(struct sighand_struct
*);
395 extern void flush_itimer_signals(void);
397 #define tasklist_empty() \
398 list_empty(&init_task.tasks)
400 #define next_task(p) \
401 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
403 #define for_each_process(p) \
404 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
406 extern bool current_is_single_threaded(void);
409 * Careful: do_each_thread/while_each_thread is a double loop so
410 * 'break' will not work as expected - use goto instead.
412 #define do_each_thread(g, t) \
413 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
415 #define while_each_thread(g, t) \
416 while ((t = next_thread(t)) != g)
418 #define __for_each_thread(signal, t) \
419 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
421 #define for_each_thread(p, t) \
422 __for_each_thread((p)->signal, t)
424 /* Careful: this is a double loop, 'break' won't work as expected. */
425 #define for_each_process_thread(p, t) \
426 for_each_process(p) for_each_thread(p, t)
428 typedef int (*proc_visitor
)(struct task_struct
*p
, void *data
);
429 void walk_process_tree(struct task_struct
*top
, proc_visitor
, void *);
431 static inline int get_nr_threads(struct task_struct
*tsk
)
433 return tsk
->signal
->nr_threads
;
436 static inline bool thread_group_leader(struct task_struct
*p
)
438 return p
->exit_signal
>= 0;
441 /* Do to the insanities of de_thread it is possible for a process
442 * to have the pid of the thread group leader without actually being
443 * the thread group leader. For iteration through the pids in proc
444 * all we care about is that we have a task with the appropriate
445 * pid, we don't actually care if we have the right task.
447 static inline bool has_group_leader_pid(struct task_struct
*p
)
449 return task_pid(p
) == p
->signal
->leader_pid
;
453 bool same_thread_group(struct task_struct
*p1
, struct task_struct
*p2
)
455 return p1
->signal
== p2
->signal
;
458 static inline struct task_struct
*next_thread(const struct task_struct
*p
)
460 return list_entry_rcu(p
->thread_group
.next
,
461 struct task_struct
, thread_group
);
464 static inline int thread_group_empty(struct task_struct
*p
)
466 return list_empty(&p
->thread_group
);
469 #define delay_group_leader(p) \
470 (thread_group_leader(p) && !thread_group_empty(p))
472 extern struct sighand_struct
*__lock_task_sighand(struct task_struct
*tsk
,
473 unsigned long *flags
);
475 static inline struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
,
476 unsigned long *flags
)
478 struct sighand_struct
*ret
;
480 ret
= __lock_task_sighand(tsk
, flags
);
481 (void)__cond_lock(&tsk
->sighand
->siglock
, ret
);
485 static inline void unlock_task_sighand(struct task_struct
*tsk
,
486 unsigned long *flags
)
488 spin_unlock_irqrestore(&tsk
->sighand
->siglock
, *flags
);
491 static inline unsigned long task_rlimit(const struct task_struct
*tsk
,
494 return READ_ONCE(tsk
->signal
->rlim
[limit
].rlim_cur
);
497 static inline unsigned long task_rlimit_max(const struct task_struct
*tsk
,
500 return READ_ONCE(tsk
->signal
->rlim
[limit
].rlim_max
);
503 static inline unsigned long rlimit(unsigned int limit
)
505 return task_rlimit(current
, limit
);
508 static inline unsigned long rlimit_max(unsigned int limit
)
510 return task_rlimit_max(current
, limit
);
513 #endif /* _LINUX_SCHED_SIGNAL_H */