]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - include/linux/sched/signal.h
sched/headers: Move task_struct::signal and task_struct::sighand types and accessors...
[mirror_ubuntu-hirsute-kernel.git] / include / linux / sched / signal.h
1 #ifndef _LINUX_SCHED_SIGNAL_H
2 #define _LINUX_SCHED_SIGNAL_H
3
4 #include <linux/rculist.h>
5 #include <linux/signal.h>
6 #include <linux/cred.h>
7 #include <linux/sched.h>
8 #include <linux/sched/jobctl.h>
9 #include <linux/sched/task.h>
10
11 /*
12 * Types defining task->signal and task->sighand and APIs using them:
13 */
14
15 struct sighand_struct {
16 atomic_t count;
17 struct k_sigaction action[_NSIG];
18 spinlock_t siglock;
19 wait_queue_head_t signalfd_wqh;
20 };
21
22 /*
23 * NOTE! "signal_struct" does not have its own
24 * locking, because a shared signal_struct always
25 * implies a shared sighand_struct, so locking
26 * sighand_struct is always a proper superset of
27 * the locking of signal_struct.
28 */
29 struct signal_struct {
30 atomic_t sigcnt;
31 atomic_t live;
32 int nr_threads;
33 struct list_head thread_head;
34
35 wait_queue_head_t wait_chldexit; /* for wait4() */
36
37 /* current thread group signal load-balancing target: */
38 struct task_struct *curr_target;
39
40 /* shared signal handling: */
41 struct sigpending shared_pending;
42
43 /* thread group exit support */
44 int group_exit_code;
45 /* overloaded:
46 * - notify group_exit_task when ->count is equal to notify_count
47 * - everyone except group_exit_task is stopped during signal delivery
48 * of fatal signals, group_exit_task processes the signal.
49 */
50 int notify_count;
51 struct task_struct *group_exit_task;
52
53 /* thread group stop support, overloads group_exit_code too */
54 int group_stop_count;
55 unsigned int flags; /* see SIGNAL_* flags below */
56
57 /*
58 * PR_SET_CHILD_SUBREAPER marks a process, like a service
59 * manager, to re-parent orphan (double-forking) child processes
60 * to this process instead of 'init'. The service manager is
61 * able to receive SIGCHLD signals and is able to investigate
62 * the process until it calls wait(). All children of this
63 * process will inherit a flag if they should look for a
64 * child_subreaper process at exit.
65 */
66 unsigned int is_child_subreaper:1;
67 unsigned int has_child_subreaper:1;
68
69 #ifdef CONFIG_POSIX_TIMERS
70
71 /* POSIX.1b Interval Timers */
72 int posix_timer_id;
73 struct list_head posix_timers;
74
75 /* ITIMER_REAL timer for the process */
76 struct hrtimer real_timer;
77 ktime_t it_real_incr;
78
79 /*
80 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
81 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
82 * values are defined to 0 and 1 respectively
83 */
84 struct cpu_itimer it[2];
85
86 /*
87 * Thread group totals for process CPU timers.
88 * See thread_group_cputimer(), et al, for details.
89 */
90 struct thread_group_cputimer cputimer;
91
92 /* Earliest-expiration cache. */
93 struct task_cputime cputime_expires;
94
95 struct list_head cpu_timers[3];
96
97 #endif
98
99 struct pid *leader_pid;
100
101 #ifdef CONFIG_NO_HZ_FULL
102 atomic_t tick_dep_mask;
103 #endif
104
105 struct pid *tty_old_pgrp;
106
107 /* boolean value for session group leader */
108 int leader;
109
110 struct tty_struct *tty; /* NULL if no tty */
111
112 #ifdef CONFIG_SCHED_AUTOGROUP
113 struct autogroup *autogroup;
114 #endif
115 /*
116 * Cumulative resource counters for dead threads in the group,
117 * and for reaped dead child processes forked by this group.
118 * Live threads maintain their own counters and add to these
119 * in __exit_signal, except for the group leader.
120 */
121 seqlock_t stats_lock;
122 u64 utime, stime, cutime, cstime;
123 u64 gtime;
124 u64 cgtime;
125 struct prev_cputime prev_cputime;
126 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
127 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
128 unsigned long inblock, oublock, cinblock, coublock;
129 unsigned long maxrss, cmaxrss;
130 struct task_io_accounting ioac;
131
132 /*
133 * Cumulative ns of schedule CPU time fo dead threads in the
134 * group, not including a zombie group leader, (This only differs
135 * from jiffies_to_ns(utime + stime) if sched_clock uses something
136 * other than jiffies.)
137 */
138 unsigned long long sum_sched_runtime;
139
140 /*
141 * We don't bother to synchronize most readers of this at all,
142 * because there is no reader checking a limit that actually needs
143 * to get both rlim_cur and rlim_max atomically, and either one
144 * alone is a single word that can safely be read normally.
145 * getrlimit/setrlimit use task_lock(current->group_leader) to
146 * protect this instead of the siglock, because they really
147 * have no need to disable irqs.
148 */
149 struct rlimit rlim[RLIM_NLIMITS];
150
151 #ifdef CONFIG_BSD_PROCESS_ACCT
152 struct pacct_struct pacct; /* per-process accounting information */
153 #endif
154 #ifdef CONFIG_TASKSTATS
155 struct taskstats *stats;
156 #endif
157 #ifdef CONFIG_AUDIT
158 unsigned audit_tty;
159 struct tty_audit_buf *tty_audit_buf;
160 #endif
161
162 /*
163 * Thread is the potential origin of an oom condition; kill first on
164 * oom
165 */
166 bool oom_flag_origin;
167 short oom_score_adj; /* OOM kill score adjustment */
168 short oom_score_adj_min; /* OOM kill score adjustment min value.
169 * Only settable by CAP_SYS_RESOURCE. */
170 struct mm_struct *oom_mm; /* recorded mm when the thread group got
171 * killed by the oom killer */
172
173 struct mutex cred_guard_mutex; /* guard against foreign influences on
174 * credential calculations
175 * (notably. ptrace) */
176 };
177
178 /*
179 * Bits in flags field of signal_struct.
180 */
181 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
182 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
183 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
184 #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
185 /*
186 * Pending notifications to parent.
187 */
188 #define SIGNAL_CLD_STOPPED 0x00000010
189 #define SIGNAL_CLD_CONTINUED 0x00000020
190 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
191
192 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
193
194 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
195 SIGNAL_STOP_CONTINUED)
196
197 static inline void signal_set_stop_flags(struct signal_struct *sig,
198 unsigned int flags)
199 {
200 WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
201 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
202 }
203
204 /* If true, all threads except ->group_exit_task have pending SIGKILL */
205 static inline int signal_group_exit(const struct signal_struct *sig)
206 {
207 return (sig->flags & SIGNAL_GROUP_EXIT) ||
208 (sig->group_exit_task != NULL);
209 }
210
211 extern void flush_signals(struct task_struct *);
212 extern void ignore_signals(struct task_struct *);
213 extern void flush_signal_handlers(struct task_struct *, int force_default);
214 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
215
216 static inline int kernel_dequeue_signal(siginfo_t *info)
217 {
218 struct task_struct *tsk = current;
219 siginfo_t __info;
220 int ret;
221
222 spin_lock_irq(&tsk->sighand->siglock);
223 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
224 spin_unlock_irq(&tsk->sighand->siglock);
225
226 return ret;
227 }
228
229 static inline void kernel_signal_stop(void)
230 {
231 spin_lock_irq(&current->sighand->siglock);
232 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
233 __set_current_state(TASK_STOPPED);
234 spin_unlock_irq(&current->sighand->siglock);
235
236 schedule();
237 }
238 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
239 extern int force_sigsegv(int, struct task_struct *);
240 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
241 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
242 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
243 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
244 const struct cred *, u32);
245 extern int kill_pgrp(struct pid *pid, int sig, int priv);
246 extern int kill_pid(struct pid *pid, int sig, int priv);
247 extern int kill_proc_info(int, struct siginfo *, pid_t);
248 extern __must_check bool do_notify_parent(struct task_struct *, int);
249 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
250 extern void force_sig(int, struct task_struct *);
251 extern int send_sig(int, struct task_struct *, int);
252 extern int zap_other_threads(struct task_struct *p);
253 extern struct sigqueue *sigqueue_alloc(void);
254 extern void sigqueue_free(struct sigqueue *);
255 extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
256 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
257
258 #ifdef TIF_RESTORE_SIGMASK
259 /*
260 * Legacy restore_sigmask accessors. These are inefficient on
261 * SMP architectures because they require atomic operations.
262 */
263
264 /**
265 * set_restore_sigmask() - make sure saved_sigmask processing gets done
266 *
267 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
268 * will run before returning to user mode, to process the flag. For
269 * all callers, TIF_SIGPENDING is already set or it's no harm to set
270 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
271 * arch code will notice on return to user mode, in case those bits
272 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
273 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
274 */
275 static inline void set_restore_sigmask(void)
276 {
277 set_thread_flag(TIF_RESTORE_SIGMASK);
278 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
279 }
280 static inline void clear_restore_sigmask(void)
281 {
282 clear_thread_flag(TIF_RESTORE_SIGMASK);
283 }
284 static inline bool test_restore_sigmask(void)
285 {
286 return test_thread_flag(TIF_RESTORE_SIGMASK);
287 }
288 static inline bool test_and_clear_restore_sigmask(void)
289 {
290 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
291 }
292
293 #else /* TIF_RESTORE_SIGMASK */
294
295 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
296 static inline void set_restore_sigmask(void)
297 {
298 current->restore_sigmask = true;
299 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
300 }
301 static inline void clear_restore_sigmask(void)
302 {
303 current->restore_sigmask = false;
304 }
305 static inline bool test_restore_sigmask(void)
306 {
307 return current->restore_sigmask;
308 }
309 static inline bool test_and_clear_restore_sigmask(void)
310 {
311 if (!current->restore_sigmask)
312 return false;
313 current->restore_sigmask = false;
314 return true;
315 }
316 #endif
317
318 static inline void restore_saved_sigmask(void)
319 {
320 if (test_and_clear_restore_sigmask())
321 __set_current_blocked(&current->saved_sigmask);
322 }
323
324 static inline sigset_t *sigmask_to_save(void)
325 {
326 sigset_t *res = &current->blocked;
327 if (unlikely(test_restore_sigmask()))
328 res = &current->saved_sigmask;
329 return res;
330 }
331
332 static inline int kill_cad_pid(int sig, int priv)
333 {
334 return kill_pid(cad_pid, sig, priv);
335 }
336
337 /* These can be the second arg to send_sig_info/send_group_sig_info. */
338 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
339 #define SEND_SIG_PRIV ((struct siginfo *) 1)
340 #define SEND_SIG_FORCED ((struct siginfo *) 2)
341
342 /*
343 * True if we are on the alternate signal stack.
344 */
345 static inline int on_sig_stack(unsigned long sp)
346 {
347 /*
348 * If the signal stack is SS_AUTODISARM then, by construction, we
349 * can't be on the signal stack unless user code deliberately set
350 * SS_AUTODISARM when we were already on it.
351 *
352 * This improves reliability: if user state gets corrupted such that
353 * the stack pointer points very close to the end of the signal stack,
354 * then this check will enable the signal to be handled anyway.
355 */
356 if (current->sas_ss_flags & SS_AUTODISARM)
357 return 0;
358
359 #ifdef CONFIG_STACK_GROWSUP
360 return sp >= current->sas_ss_sp &&
361 sp - current->sas_ss_sp < current->sas_ss_size;
362 #else
363 return sp > current->sas_ss_sp &&
364 sp - current->sas_ss_sp <= current->sas_ss_size;
365 #endif
366 }
367
368 static inline int sas_ss_flags(unsigned long sp)
369 {
370 if (!current->sas_ss_size)
371 return SS_DISABLE;
372
373 return on_sig_stack(sp) ? SS_ONSTACK : 0;
374 }
375
376 static inline void sas_ss_reset(struct task_struct *p)
377 {
378 p->sas_ss_sp = 0;
379 p->sas_ss_size = 0;
380 p->sas_ss_flags = SS_DISABLE;
381 }
382
383 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
384 {
385 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
386 #ifdef CONFIG_STACK_GROWSUP
387 return current->sas_ss_sp;
388 #else
389 return current->sas_ss_sp + current->sas_ss_size;
390 #endif
391 return sp;
392 }
393
394 extern void __cleanup_sighand(struct sighand_struct *);
395 extern void flush_itimer_signals(void);
396
397 #define tasklist_empty() \
398 list_empty(&init_task.tasks)
399
400 #define next_task(p) \
401 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
402
403 #define for_each_process(p) \
404 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
405
406 extern bool current_is_single_threaded(void);
407
408 /*
409 * Careful: do_each_thread/while_each_thread is a double loop so
410 * 'break' will not work as expected - use goto instead.
411 */
412 #define do_each_thread(g, t) \
413 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
414
415 #define while_each_thread(g, t) \
416 while ((t = next_thread(t)) != g)
417
418 #define __for_each_thread(signal, t) \
419 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
420
421 #define for_each_thread(p, t) \
422 __for_each_thread((p)->signal, t)
423
424 /* Careful: this is a double loop, 'break' won't work as expected. */
425 #define for_each_process_thread(p, t) \
426 for_each_process(p) for_each_thread(p, t)
427
428 typedef int (*proc_visitor)(struct task_struct *p, void *data);
429 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
430
431 static inline int get_nr_threads(struct task_struct *tsk)
432 {
433 return tsk->signal->nr_threads;
434 }
435
436 static inline bool thread_group_leader(struct task_struct *p)
437 {
438 return p->exit_signal >= 0;
439 }
440
441 /* Do to the insanities of de_thread it is possible for a process
442 * to have the pid of the thread group leader without actually being
443 * the thread group leader. For iteration through the pids in proc
444 * all we care about is that we have a task with the appropriate
445 * pid, we don't actually care if we have the right task.
446 */
447 static inline bool has_group_leader_pid(struct task_struct *p)
448 {
449 return task_pid(p) == p->signal->leader_pid;
450 }
451
452 static inline
453 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
454 {
455 return p1->signal == p2->signal;
456 }
457
458 static inline struct task_struct *next_thread(const struct task_struct *p)
459 {
460 return list_entry_rcu(p->thread_group.next,
461 struct task_struct, thread_group);
462 }
463
464 static inline int thread_group_empty(struct task_struct *p)
465 {
466 return list_empty(&p->thread_group);
467 }
468
469 #define delay_group_leader(p) \
470 (thread_group_leader(p) && !thread_group_empty(p))
471
472 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
473 unsigned long *flags);
474
475 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
476 unsigned long *flags)
477 {
478 struct sighand_struct *ret;
479
480 ret = __lock_task_sighand(tsk, flags);
481 (void)__cond_lock(&tsk->sighand->siglock, ret);
482 return ret;
483 }
484
485 static inline void unlock_task_sighand(struct task_struct *tsk,
486 unsigned long *flags)
487 {
488 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
489 }
490
491 static inline unsigned long task_rlimit(const struct task_struct *tsk,
492 unsigned int limit)
493 {
494 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
495 }
496
497 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
498 unsigned int limit)
499 {
500 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
501 }
502
503 static inline unsigned long rlimit(unsigned int limit)
504 {
505 return task_rlimit(current, limit);
506 }
507
508 static inline unsigned long rlimit_max(unsigned int limit)
509 {
510 return task_rlimit_max(current, limit);
511 }
512
513 #endif /* _LINUX_SCHED_SIGNAL_H */