]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
3f07c014 IM |
2 | #ifndef _LINUX_SCHED_SIGNAL_H |
3 | #define _LINUX_SCHED_SIGNAL_H | |
4 | ||
b2d09103 | 5 | #include <linux/rculist.h> |
f361bf4a | 6 | #include <linux/signal.h> |
3f07c014 | 7 | #include <linux/sched.h> |
1e4bae64 | 8 | #include <linux/sched/jobctl.h> |
9164bb4a | 9 | #include <linux/sched/task.h> |
2a1f062a | 10 | #include <linux/cred.h> |
d036bda7 | 11 | #include <linux/refcount.h> |
2b69942f | 12 | #include <linux/posix-timers.h> |
4ef87322 PX |
13 | #include <linux/mm_types.h> |
14 | #include <asm/ptrace.h> | |
3f07c014 | 15 | |
c3edc401 IM |
16 | /* |
17 | * Types defining task->signal and task->sighand and APIs using them: | |
18 | */ | |
19 | ||
20 | struct sighand_struct { | |
c3edc401 | 21 | spinlock_t siglock; |
e2d9018e | 22 | refcount_t count; |
c3edc401 | 23 | wait_queue_head_t signalfd_wqh; |
e2d9018e | 24 | struct k_sigaction action[_NSIG]; |
c3edc401 IM |
25 | }; |
26 | ||
8d88460e IM |
27 | /* |
28 | * Per-process accounting stats: | |
29 | */ | |
30 | struct pacct_struct { | |
31 | int ac_flag; | |
32 | long ac_exitcode; | |
33 | unsigned long ac_mem; | |
34 | u64 ac_utime, ac_stime; | |
35 | unsigned long ac_minflt, ac_majflt; | |
36 | }; | |
37 | ||
38 | struct cpu_itimer { | |
39 | u64 expires; | |
40 | u64 incr; | |
41 | }; | |
42 | ||
1050b27c IM |
43 | /* |
44 | * This is the atomic variant of task_cputime, which can be used for | |
45 | * storing and updating task_cputime statistics without locking. | |
46 | */ | |
47 | struct task_cputime_atomic { | |
48 | atomic64_t utime; | |
49 | atomic64_t stime; | |
50 | atomic64_t sum_exec_runtime; | |
51 | }; | |
52 | ||
53 | #define INIT_CPUTIME_ATOMIC \ | |
54 | (struct task_cputime_atomic) { \ | |
55 | .utime = ATOMIC64_INIT(0), \ | |
56 | .stime = ATOMIC64_INIT(0), \ | |
57 | .sum_exec_runtime = ATOMIC64_INIT(0), \ | |
58 | } | |
59 | /** | |
60 | * struct thread_group_cputimer - thread group interval timer counts | |
61 | * @cputime_atomic: atomic thread group interval timers. | |
1050b27c IM |
62 | * |
63 | * This structure contains the version of task_cputime, above, that is | |
64 | * used for thread group CPU timer calculations. | |
65 | */ | |
66 | struct thread_group_cputimer { | |
67 | struct task_cputime_atomic cputime_atomic; | |
1050b27c IM |
68 | }; |
69 | ||
c3ad2c3b EB |
70 | struct multiprocess_signals { |
71 | sigset_t signal; | |
72 | struct hlist_node node; | |
73 | }; | |
74 | ||
c3edc401 IM |
75 | /* |
76 | * NOTE! "signal_struct" does not have its own | |
77 | * locking, because a shared signal_struct always | |
78 | * implies a shared sighand_struct, so locking | |
79 | * sighand_struct is always a proper superset of | |
80 | * the locking of signal_struct. | |
81 | */ | |
82 | struct signal_struct { | |
60d4de3f | 83 | refcount_t sigcnt; |
c3edc401 IM |
84 | atomic_t live; |
85 | int nr_threads; | |
86 | struct list_head thread_head; | |
87 | ||
88 | wait_queue_head_t wait_chldexit; /* for wait4() */ | |
89 | ||
90 | /* current thread group signal load-balancing target: */ | |
91 | struct task_struct *curr_target; | |
92 | ||
93 | /* shared signal handling: */ | |
94 | struct sigpending shared_pending; | |
95 | ||
c3ad2c3b EB |
96 | /* For collecting multiprocess signals during fork */ |
97 | struct hlist_head multiprocess; | |
98 | ||
c3edc401 IM |
99 | /* thread group exit support */ |
100 | int group_exit_code; | |
101 | /* overloaded: | |
102 | * - notify group_exit_task when ->count is equal to notify_count | |
103 | * - everyone except group_exit_task is stopped during signal delivery | |
104 | * of fatal signals, group_exit_task processes the signal. | |
105 | */ | |
106 | int notify_count; | |
107 | struct task_struct *group_exit_task; | |
108 | ||
109 | /* thread group stop support, overloads group_exit_code too */ | |
110 | int group_stop_count; | |
111 | unsigned int flags; /* see SIGNAL_* flags below */ | |
112 | ||
113 | /* | |
114 | * PR_SET_CHILD_SUBREAPER marks a process, like a service | |
115 | * manager, to re-parent orphan (double-forking) child processes | |
116 | * to this process instead of 'init'. The service manager is | |
117 | * able to receive SIGCHLD signals and is able to investigate | |
118 | * the process until it calls wait(). All children of this | |
119 | * process will inherit a flag if they should look for a | |
120 | * child_subreaper process at exit. | |
121 | */ | |
122 | unsigned int is_child_subreaper:1; | |
123 | unsigned int has_child_subreaper:1; | |
124 | ||
125 | #ifdef CONFIG_POSIX_TIMERS | |
126 | ||
127 | /* POSIX.1b Interval Timers */ | |
128 | int posix_timer_id; | |
129 | struct list_head posix_timers; | |
130 | ||
131 | /* ITIMER_REAL timer for the process */ | |
132 | struct hrtimer real_timer; | |
133 | ktime_t it_real_incr; | |
134 | ||
135 | /* | |
136 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use | |
137 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these | |
138 | * values are defined to 0 and 1 respectively | |
139 | */ | |
140 | struct cpu_itimer it[2]; | |
141 | ||
142 | /* | |
143 | * Thread group totals for process CPU timers. | |
144 | * See thread_group_cputimer(), et al, for details. | |
145 | */ | |
146 | struct thread_group_cputimer cputimer; | |
147 | ||
c3edc401 | 148 | #endif |
2b69942f TG |
149 | /* Empty if CONFIG_POSIX_TIMERS=n */ |
150 | struct posix_cputimers posix_cputimers; | |
c3edc401 | 151 | |
2c470475 | 152 | /* PID/PID hash table linkage. */ |
2c470475 | 153 | struct pid *pids[PIDTYPE_MAX]; |
c3edc401 IM |
154 | |
155 | #ifdef CONFIG_NO_HZ_FULL | |
156 | atomic_t tick_dep_mask; | |
157 | #endif | |
158 | ||
159 | struct pid *tty_old_pgrp; | |
160 | ||
161 | /* boolean value for session group leader */ | |
162 | int leader; | |
163 | ||
164 | struct tty_struct *tty; /* NULL if no tty */ | |
165 | ||
166 | #ifdef CONFIG_SCHED_AUTOGROUP | |
167 | struct autogroup *autogroup; | |
168 | #endif | |
169 | /* | |
170 | * Cumulative resource counters for dead threads in the group, | |
171 | * and for reaped dead child processes forked by this group. | |
172 | * Live threads maintain their own counters and add to these | |
173 | * in __exit_signal, except for the group leader. | |
174 | */ | |
175 | seqlock_t stats_lock; | |
176 | u64 utime, stime, cutime, cstime; | |
177 | u64 gtime; | |
178 | u64 cgtime; | |
179 | struct prev_cputime prev_cputime; | |
180 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | |
181 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | |
182 | unsigned long inblock, oublock, cinblock, coublock; | |
183 | unsigned long maxrss, cmaxrss; | |
184 | struct task_io_accounting ioac; | |
185 | ||
186 | /* | |
187 | * Cumulative ns of schedule CPU time fo dead threads in the | |
188 | * group, not including a zombie group leader, (This only differs | |
189 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | |
190 | * other than jiffies.) | |
191 | */ | |
192 | unsigned long long sum_sched_runtime; | |
193 | ||
194 | /* | |
195 | * We don't bother to synchronize most readers of this at all, | |
196 | * because there is no reader checking a limit that actually needs | |
197 | * to get both rlim_cur and rlim_max atomically, and either one | |
198 | * alone is a single word that can safely be read normally. | |
199 | * getrlimit/setrlimit use task_lock(current->group_leader) to | |
200 | * protect this instead of the siglock, because they really | |
201 | * have no need to disable irqs. | |
202 | */ | |
203 | struct rlimit rlim[RLIM_NLIMITS]; | |
204 | ||
205 | #ifdef CONFIG_BSD_PROCESS_ACCT | |
206 | struct pacct_struct pacct; /* per-process accounting information */ | |
207 | #endif | |
208 | #ifdef CONFIG_TASKSTATS | |
209 | struct taskstats *stats; | |
210 | #endif | |
211 | #ifdef CONFIG_AUDIT | |
212 | unsigned audit_tty; | |
213 | struct tty_audit_buf *tty_audit_buf; | |
214 | #endif | |
215 | ||
216 | /* | |
217 | * Thread is the potential origin of an oom condition; kill first on | |
218 | * oom | |
219 | */ | |
220 | bool oom_flag_origin; | |
221 | short oom_score_adj; /* OOM kill score adjustment */ | |
222 | short oom_score_adj_min; /* OOM kill score adjustment min value. | |
223 | * Only settable by CAP_SYS_RESOURCE. */ | |
224 | struct mm_struct *oom_mm; /* recorded mm when the thread group got | |
225 | * killed by the oom killer */ | |
226 | ||
227 | struct mutex cred_guard_mutex; /* guard against foreign influences on | |
228 | * credential calculations | |
eea96732 EB |
229 | * (notably. ptrace) |
230 | * Deprecated do not use in new code. | |
f7cfd871 | 231 | * Use exec_update_lock instead. |
eea96732 | 232 | */ |
f7cfd871 EB |
233 | struct rw_semaphore exec_update_lock; /* Held while task_struct is |
234 | * being updated during exec, | |
235 | * and may have inconsistent | |
236 | * permissions. | |
237 | */ | |
3859a271 | 238 | } __randomize_layout; |
c3edc401 IM |
239 | |
240 | /* | |
241 | * Bits in flags field of signal_struct. | |
242 | */ | |
243 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ | |
244 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ | |
245 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ | |
246 | #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ | |
247 | /* | |
248 | * Pending notifications to parent. | |
249 | */ | |
250 | #define SIGNAL_CLD_STOPPED 0x00000010 | |
251 | #define SIGNAL_CLD_CONTINUED 0x00000020 | |
252 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) | |
253 | ||
254 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ | |
255 | ||
256 | #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ | |
257 | SIGNAL_STOP_CONTINUED) | |
258 | ||
259 | static inline void signal_set_stop_flags(struct signal_struct *sig, | |
260 | unsigned int flags) | |
261 | { | |
262 | WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); | |
263 | sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; | |
264 | } | |
265 | ||
266 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ | |
267 | static inline int signal_group_exit(const struct signal_struct *sig) | |
268 | { | |
269 | return (sig->flags & SIGNAL_GROUP_EXIT) || | |
270 | (sig->group_exit_task != NULL); | |
271 | } | |
272 | ||
273 | extern void flush_signals(struct task_struct *); | |
274 | extern void ignore_signals(struct task_struct *); | |
275 | extern void flush_signal_handlers(struct task_struct *, int force_default); | |
9e9291c7 AV |
276 | extern int dequeue_signal(struct task_struct *task, |
277 | sigset_t *mask, kernel_siginfo_t *info); | |
c3edc401 | 278 | |
961366a0 | 279 | static inline int kernel_dequeue_signal(void) |
c3edc401 | 280 | { |
9e9291c7 | 281 | struct task_struct *task = current; |
ae7795bc | 282 | kernel_siginfo_t __info; |
c3edc401 IM |
283 | int ret; |
284 | ||
9e9291c7 AV |
285 | spin_lock_irq(&task->sighand->siglock); |
286 | ret = dequeue_signal(task, &task->blocked, &__info); | |
287 | spin_unlock_irq(&task->sighand->siglock); | |
c3edc401 IM |
288 | |
289 | return ret; | |
290 | } | |
291 | ||
292 | static inline void kernel_signal_stop(void) | |
293 | { | |
294 | spin_lock_irq(¤t->sighand->siglock); | |
295 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) | |
b5bf9a90 | 296 | set_special_state(TASK_STOPPED); |
c3edc401 IM |
297 | spin_unlock_irq(¤t->sighand->siglock); |
298 | ||
299 | schedule(); | |
300 | } | |
f8ec6601 EB |
301 | #ifdef __ARCH_SI_TRAPNO |
302 | # define ___ARCH_SI_TRAPNO(_a1) , _a1 | |
303 | #else | |
304 | # define ___ARCH_SI_TRAPNO(_a1) | |
305 | #endif | |
306 | #ifdef __ia64__ | |
307 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 | |
308 | #else | |
309 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) | |
310 | #endif | |
311 | ||
91ca180d EB |
312 | int force_sig_fault_to_task(int sig, int code, void __user *addr |
313 | ___ARCH_SI_TRAPNO(int trapno) | |
314 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | |
315 | , struct task_struct *t); | |
f8ec6601 EB |
316 | int force_sig_fault(int sig, int code, void __user *addr |
317 | ___ARCH_SI_TRAPNO(int trapno) | |
2e1661d2 | 318 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); |
f8ec6601 EB |
319 | int send_sig_fault(int sig, int code, void __user *addr |
320 | ___ARCH_SI_TRAPNO(int trapno) | |
321 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | |
322 | , struct task_struct *t); | |
323 | ||
f8eac901 | 324 | int force_sig_mceerr(int code, void __user *, short); |
38246735 EB |
325 | int send_sig_mceerr(int code, void __user *, short, struct task_struct *); |
326 | ||
327 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); | |
328 | int force_sig_pkuerr(void __user *addr, u32 pkey); | |
329 | ||
f71dd7dc EB |
330 | int force_sig_ptrace_errno_trap(int errno, void __user *addr); |
331 | ||
ae7795bc | 332 | extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); |
cb44c9a0 | 333 | extern void force_sigsegv(int sig); |
a89e9b8a | 334 | extern int force_sig_info(struct kernel_siginfo *); |
ae7795bc EB |
335 | extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); |
336 | extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); | |
70f1b0d3 | 337 | extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, |
6b4f3d01 | 338 | const struct cred *); |
c3edc401 IM |
339 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
340 | extern int kill_pid(struct pid *pid, int sig, int priv); | |
c3edc401 IM |
341 | extern __must_check bool do_notify_parent(struct task_struct *, int); |
342 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | |
3cf5d076 | 343 | extern void force_sig(int); |
c3edc401 IM |
344 | extern int send_sig(int, struct task_struct *, int); |
345 | extern int zap_other_threads(struct task_struct *p); | |
346 | extern struct sigqueue *sigqueue_alloc(void); | |
347 | extern void sigqueue_free(struct sigqueue *); | |
24122c7f | 348 | extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); |
c3edc401 IM |
349 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
350 | ||
2a1f062a IM |
351 | static inline int restart_syscall(void) |
352 | { | |
353 | set_tsk_thread_flag(current, TIF_SIGPENDING); | |
354 | return -ERESTARTNOINTR; | |
355 | } | |
356 | ||
357 | static inline int signal_pending(struct task_struct *p) | |
358 | { | |
359 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | |
360 | } | |
361 | ||
362 | static inline int __fatal_signal_pending(struct task_struct *p) | |
363 | { | |
364 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); | |
365 | } | |
366 | ||
367 | static inline int fatal_signal_pending(struct task_struct *p) | |
368 | { | |
369 | return signal_pending(p) && __fatal_signal_pending(p); | |
370 | } | |
371 | ||
372 | static inline int signal_pending_state(long state, struct task_struct *p) | |
373 | { | |
374 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) | |
375 | return 0; | |
376 | if (!signal_pending(p)) | |
377 | return 0; | |
378 | ||
379 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); | |
380 | } | |
381 | ||
4ef87322 PX |
382 | /* |
383 | * This should only be used in fault handlers to decide whether we | |
384 | * should stop the current fault routine to handle the signals | |
385 | * instead, especially with the case where we've got interrupted with | |
386 | * a VM_FAULT_RETRY. | |
387 | */ | |
388 | static inline bool fault_signal_pending(vm_fault_t fault_flags, | |
389 | struct pt_regs *regs) | |
390 | { | |
391 | return unlikely((fault_flags & VM_FAULT_RETRY) && | |
8b9a65fd PX |
392 | (fatal_signal_pending(current) || |
393 | (user_mode(regs) && signal_pending(current)))); | |
4ef87322 PX |
394 | } |
395 | ||
2a1f062a IM |
396 | /* |
397 | * Reevaluate whether the task has signals pending delivery. | |
398 | * Wake the task if so. | |
399 | * This is required every time the blocked sigset_t changes. | |
400 | * callers must hold sighand->siglock. | |
401 | */ | |
402 | extern void recalc_sigpending_and_wake(struct task_struct *t); | |
403 | extern void recalc_sigpending(void); | |
088fe47c | 404 | extern void calculate_sigpending(void); |
2a1f062a IM |
405 | |
406 | extern void signal_wake_up_state(struct task_struct *t, unsigned int state); | |
407 | ||
408 | static inline void signal_wake_up(struct task_struct *t, bool resume) | |
409 | { | |
410 | signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); | |
411 | } | |
412 | static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) | |
413 | { | |
414 | signal_wake_up_state(t, resume ? __TASK_TRACED : 0); | |
415 | } | |
416 | ||
924de3b8 EB |
417 | void task_join_group_stop(struct task_struct *task); |
418 | ||
c3edc401 IM |
419 | #ifdef TIF_RESTORE_SIGMASK |
420 | /* | |
421 | * Legacy restore_sigmask accessors. These are inefficient on | |
422 | * SMP architectures because they require atomic operations. | |
423 | */ | |
424 | ||
425 | /** | |
426 | * set_restore_sigmask() - make sure saved_sigmask processing gets done | |
427 | * | |
428 | * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code | |
429 | * will run before returning to user mode, to process the flag. For | |
430 | * all callers, TIF_SIGPENDING is already set or it's no harm to set | |
431 | * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the | |
432 | * arch code will notice on return to user mode, in case those bits | |
433 | * are scarce. We set TIF_SIGPENDING here to ensure that the arch | |
434 | * signal code always gets run when TIF_RESTORE_SIGMASK is set. | |
435 | */ | |
436 | static inline void set_restore_sigmask(void) | |
437 | { | |
438 | set_thread_flag(TIF_RESTORE_SIGMASK); | |
c3edc401 | 439 | } |
fcfc2aa0 | 440 | |
9e9291c7 | 441 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 442 | { |
9e9291c7 | 443 | clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); |
fcfc2aa0 AV |
444 | } |
445 | ||
c3edc401 IM |
446 | static inline void clear_restore_sigmask(void) |
447 | { | |
448 | clear_thread_flag(TIF_RESTORE_SIGMASK); | |
449 | } | |
9e9291c7 | 450 | static inline bool test_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 451 | { |
9e9291c7 | 452 | return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); |
fcfc2aa0 | 453 | } |
c3edc401 IM |
454 | static inline bool test_restore_sigmask(void) |
455 | { | |
456 | return test_thread_flag(TIF_RESTORE_SIGMASK); | |
457 | } | |
458 | static inline bool test_and_clear_restore_sigmask(void) | |
459 | { | |
460 | return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); | |
461 | } | |
462 | ||
463 | #else /* TIF_RESTORE_SIGMASK */ | |
464 | ||
465 | /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ | |
466 | static inline void set_restore_sigmask(void) | |
467 | { | |
468 | current->restore_sigmask = true; | |
c3edc401 | 469 | } |
9e9291c7 | 470 | static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 471 | { |
9e9291c7 | 472 | task->restore_sigmask = false; |
fcfc2aa0 | 473 | } |
c3edc401 IM |
474 | static inline void clear_restore_sigmask(void) |
475 | { | |
476 | current->restore_sigmask = false; | |
477 | } | |
478 | static inline bool test_restore_sigmask(void) | |
479 | { | |
480 | return current->restore_sigmask; | |
481 | } | |
9e9291c7 | 482 | static inline bool test_tsk_restore_sigmask(struct task_struct *task) |
fcfc2aa0 | 483 | { |
9e9291c7 | 484 | return task->restore_sigmask; |
fcfc2aa0 | 485 | } |
c3edc401 IM |
486 | static inline bool test_and_clear_restore_sigmask(void) |
487 | { | |
488 | if (!current->restore_sigmask) | |
489 | return false; | |
490 | current->restore_sigmask = false; | |
491 | return true; | |
492 | } | |
493 | #endif | |
494 | ||
495 | static inline void restore_saved_sigmask(void) | |
496 | { | |
497 | if (test_and_clear_restore_sigmask()) | |
498 | __set_current_blocked(¤t->saved_sigmask); | |
499 | } | |
500 | ||
b772434b ON |
501 | extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); |
502 | ||
503 | static inline void restore_saved_sigmask_unless(bool interrupted) | |
504 | { | |
505 | if (interrupted) | |
506 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | |
507 | else | |
508 | restore_saved_sigmask(); | |
509 | } | |
510 | ||
c3edc401 IM |
511 | static inline sigset_t *sigmask_to_save(void) |
512 | { | |
513 | sigset_t *res = ¤t->blocked; | |
514 | if (unlikely(test_restore_sigmask())) | |
515 | res = ¤t->saved_sigmask; | |
516 | return res; | |
517 | } | |
518 | ||
519 | static inline int kill_cad_pid(int sig, int priv) | |
520 | { | |
521 | return kill_pid(cad_pid, sig, priv); | |
522 | } | |
523 | ||
524 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ | |
ae7795bc EB |
525 | #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) |
526 | #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) | |
c3edc401 IM |
527 | |
528 | /* | |
529 | * True if we are on the alternate signal stack. | |
530 | */ | |
531 | static inline int on_sig_stack(unsigned long sp) | |
532 | { | |
533 | /* | |
534 | * If the signal stack is SS_AUTODISARM then, by construction, we | |
535 | * can't be on the signal stack unless user code deliberately set | |
536 | * SS_AUTODISARM when we were already on it. | |
537 | * | |
538 | * This improves reliability: if user state gets corrupted such that | |
539 | * the stack pointer points very close to the end of the signal stack, | |
540 | * then this check will enable the signal to be handled anyway. | |
541 | */ | |
542 | if (current->sas_ss_flags & SS_AUTODISARM) | |
543 | return 0; | |
544 | ||
545 | #ifdef CONFIG_STACK_GROWSUP | |
546 | return sp >= current->sas_ss_sp && | |
547 | sp - current->sas_ss_sp < current->sas_ss_size; | |
548 | #else | |
549 | return sp > current->sas_ss_sp && | |
550 | sp - current->sas_ss_sp <= current->sas_ss_size; | |
551 | #endif | |
552 | } | |
553 | ||
554 | static inline int sas_ss_flags(unsigned long sp) | |
555 | { | |
556 | if (!current->sas_ss_size) | |
557 | return SS_DISABLE; | |
558 | ||
559 | return on_sig_stack(sp) ? SS_ONSTACK : 0; | |
560 | } | |
561 | ||
562 | static inline void sas_ss_reset(struct task_struct *p) | |
563 | { | |
564 | p->sas_ss_sp = 0; | |
565 | p->sas_ss_size = 0; | |
566 | p->sas_ss_flags = SS_DISABLE; | |
567 | } | |
568 | ||
569 | static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) | |
570 | { | |
571 | if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) | |
572 | #ifdef CONFIG_STACK_GROWSUP | |
573 | return current->sas_ss_sp; | |
574 | #else | |
575 | return current->sas_ss_sp + current->sas_ss_size; | |
576 | #endif | |
577 | return sp; | |
578 | } | |
579 | ||
580 | extern void __cleanup_sighand(struct sighand_struct *); | |
581 | extern void flush_itimer_signals(void); | |
582 | ||
583 | #define tasklist_empty() \ | |
584 | list_empty(&init_task.tasks) | |
585 | ||
586 | #define next_task(p) \ | |
587 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | |
588 | ||
589 | #define for_each_process(p) \ | |
590 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | |
591 | ||
592 | extern bool current_is_single_threaded(void); | |
593 | ||
594 | /* | |
595 | * Careful: do_each_thread/while_each_thread is a double loop so | |
596 | * 'break' will not work as expected - use goto instead. | |
597 | */ | |
598 | #define do_each_thread(g, t) \ | |
599 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do | |
600 | ||
601 | #define while_each_thread(g, t) \ | |
602 | while ((t = next_thread(t)) != g) | |
603 | ||
604 | #define __for_each_thread(signal, t) \ | |
605 | list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) | |
606 | ||
607 | #define for_each_thread(p, t) \ | |
608 | __for_each_thread((p)->signal, t) | |
609 | ||
610 | /* Careful: this is a double loop, 'break' won't work as expected. */ | |
611 | #define for_each_process_thread(p, t) \ | |
612 | for_each_process(p) for_each_thread(p, t) | |
613 | ||
614 | typedef int (*proc_visitor)(struct task_struct *p, void *data); | |
615 | void walk_process_tree(struct task_struct *top, proc_visitor, void *); | |
616 | ||
1fb53567 EB |
617 | static inline |
618 | struct pid *task_pid_type(struct task_struct *task, enum pid_type type) | |
619 | { | |
2c470475 EB |
620 | struct pid *pid; |
621 | if (type == PIDTYPE_PID) | |
622 | pid = task_pid(task); | |
623 | else | |
624 | pid = task->signal->pids[type]; | |
625 | return pid; | |
1fb53567 EB |
626 | } |
627 | ||
7a36094d EB |
628 | static inline struct pid *task_tgid(struct task_struct *task) |
629 | { | |
6883f81a | 630 | return task->signal->pids[PIDTYPE_TGID]; |
7a36094d EB |
631 | } |
632 | ||
2c470475 EB |
633 | /* |
634 | * Without tasklist or RCU lock it is not safe to dereference | |
635 | * the result of task_pgrp/task_session even if task == current, | |
636 | * we can race with another thread doing sys_setsid/sys_setpgid. | |
637 | */ | |
638 | static inline struct pid *task_pgrp(struct task_struct *task) | |
639 | { | |
640 | return task->signal->pids[PIDTYPE_PGID]; | |
641 | } | |
642 | ||
643 | static inline struct pid *task_session(struct task_struct *task) | |
644 | { | |
645 | return task->signal->pids[PIDTYPE_SID]; | |
646 | } | |
647 | ||
9e9291c7 | 648 | static inline int get_nr_threads(struct task_struct *task) |
c3edc401 | 649 | { |
9e9291c7 | 650 | return task->signal->nr_threads; |
c3edc401 IM |
651 | } |
652 | ||
653 | static inline bool thread_group_leader(struct task_struct *p) | |
654 | { | |
655 | return p->exit_signal >= 0; | |
656 | } | |
657 | ||
c3edc401 IM |
658 | static inline |
659 | bool same_thread_group(struct task_struct *p1, struct task_struct *p2) | |
660 | { | |
661 | return p1->signal == p2->signal; | |
662 | } | |
663 | ||
664 | static inline struct task_struct *next_thread(const struct task_struct *p) | |
665 | { | |
666 | return list_entry_rcu(p->thread_group.next, | |
667 | struct task_struct, thread_group); | |
668 | } | |
669 | ||
670 | static inline int thread_group_empty(struct task_struct *p) | |
671 | { | |
672 | return list_empty(&p->thread_group); | |
673 | } | |
674 | ||
675 | #define delay_group_leader(p) \ | |
676 | (thread_group_leader(p) && !thread_group_empty(p)) | |
677 | ||
38fd525a EB |
678 | extern bool thread_group_exited(struct pid *pid); |
679 | ||
9e9291c7 | 680 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, |
c3edc401 IM |
681 | unsigned long *flags); |
682 | ||
9e9291c7 | 683 | static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, |
c3edc401 IM |
684 | unsigned long *flags) |
685 | { | |
686 | struct sighand_struct *ret; | |
687 | ||
9e9291c7 AV |
688 | ret = __lock_task_sighand(task, flags); |
689 | (void)__cond_lock(&task->sighand->siglock, ret); | |
c3edc401 IM |
690 | return ret; |
691 | } | |
692 | ||
9e9291c7 | 693 | static inline void unlock_task_sighand(struct task_struct *task, |
c3edc401 IM |
694 | unsigned long *flags) |
695 | { | |
9e9291c7 | 696 | spin_unlock_irqrestore(&task->sighand->siglock, *flags); |
c3edc401 IM |
697 | } |
698 | ||
9e9291c7 | 699 | static inline unsigned long task_rlimit(const struct task_struct *task, |
c3edc401 IM |
700 | unsigned int limit) |
701 | { | |
9e9291c7 | 702 | return READ_ONCE(task->signal->rlim[limit].rlim_cur); |
c3edc401 IM |
703 | } |
704 | ||
9e9291c7 | 705 | static inline unsigned long task_rlimit_max(const struct task_struct *task, |
c3edc401 IM |
706 | unsigned int limit) |
707 | { | |
9e9291c7 | 708 | return READ_ONCE(task->signal->rlim[limit].rlim_max); |
c3edc401 IM |
709 | } |
710 | ||
711 | static inline unsigned long rlimit(unsigned int limit) | |
712 | { | |
713 | return task_rlimit(current, limit); | |
714 | } | |
715 | ||
716 | static inline unsigned long rlimit_max(unsigned int limit) | |
717 | { | |
718 | return task_rlimit_max(current, limit); | |
719 | } | |
720 | ||
3f07c014 | 721 | #endif /* _LINUX_SCHED_SIGNAL_H */ |