]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/signal.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | |
7 | * | |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | |
9 | * Changes to use preallocated sigqueue structures | |
10 | * to allow signals to be sent reliably. | |
11 | */ | |
12 | ||
1da177e4 | 13 | #include <linux/slab.h> |
9984de1a | 14 | #include <linux/export.h> |
1da177e4 | 15 | #include <linux/init.h> |
589ee628 | 16 | #include <linux/sched/mm.h> |
8703e8a4 | 17 | #include <linux/sched/user.h> |
b17b0153 | 18 | #include <linux/sched/debug.h> |
29930025 | 19 | #include <linux/sched/task.h> |
68db0cf1 | 20 | #include <linux/sched/task_stack.h> |
32ef5517 | 21 | #include <linux/sched/cputime.h> |
1da177e4 LT |
22 | #include <linux/fs.h> |
23 | #include <linux/tty.h> | |
24 | #include <linux/binfmts.h> | |
179899fd | 25 | #include <linux/coredump.h> |
1da177e4 LT |
26 | #include <linux/security.h> |
27 | #include <linux/syscalls.h> | |
28 | #include <linux/ptrace.h> | |
7ed20e1a | 29 | #include <linux/signal.h> |
fba2afaa | 30 | #include <linux/signalfd.h> |
f84d49b2 | 31 | #include <linux/ratelimit.h> |
35de254d | 32 | #include <linux/tracehook.h> |
c59ede7b | 33 | #include <linux/capability.h> |
7dfb7103 | 34 | #include <linux/freezer.h> |
84d73786 SB |
35 | #include <linux/pid_namespace.h> |
36 | #include <linux/nsproxy.h> | |
6b550f94 | 37 | #include <linux/user_namespace.h> |
0326f5a9 | 38 | #include <linux/uprobes.h> |
90268439 | 39 | #include <linux/compat.h> |
2b5faa4c | 40 | #include <linux/cn_proc.h> |
52f5684c | 41 | #include <linux/compiler.h> |
31ea70e0 | 42 | #include <linux/posix-timers.h> |
43347d56 | 43 | #include <linux/livepatch.h> |
52f5684c | 44 | |
d1eb650f MH |
45 | #define CREATE_TRACE_POINTS |
46 | #include <trace/events/signal.h> | |
84d73786 | 47 | |
1da177e4 | 48 | #include <asm/param.h> |
7c0f6ba6 | 49 | #include <linux/uaccess.h> |
1da177e4 LT |
50 | #include <asm/unistd.h> |
51 | #include <asm/siginfo.h> | |
d550bbd4 | 52 | #include <asm/cacheflush.h> |
e1396065 | 53 | #include "audit.h" /* audit_signal_info() */ |
1da177e4 LT |
54 | |
55 | /* | |
56 | * SLAB caches for signal bits. | |
57 | */ | |
58 | ||
e18b890b | 59 | static struct kmem_cache *sigqueue_cachep; |
1da177e4 | 60 | |
f84d49b2 NO |
61 | int print_fatal_signals __read_mostly; |
62 | ||
35de254d | 63 | static void __user *sig_handler(struct task_struct *t, int sig) |
93585eea | 64 | { |
35de254d RM |
65 | return t->sighand->action[sig - 1].sa.sa_handler; |
66 | } | |
93585eea | 67 | |
e4a8b4ef | 68 | static inline bool sig_handler_ignored(void __user *handler, int sig) |
35de254d | 69 | { |
93585eea | 70 | /* Is it explicitly or implicitly ignored? */ |
93585eea | 71 | return handler == SIG_IGN || |
e4a8b4ef | 72 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
93585eea | 73 | } |
1da177e4 | 74 | |
41aaa481 | 75 | static bool sig_task_ignored(struct task_struct *t, int sig, bool force) |
1da177e4 | 76 | { |
35de254d | 77 | void __user *handler; |
1da177e4 | 78 | |
f008faff ON |
79 | handler = sig_handler(t, sig); |
80 | ||
86989c41 EB |
81 | /* SIGKILL and SIGSTOP may not be sent to the global init */ |
82 | if (unlikely(is_global_init(t) && sig_kernel_only(sig))) | |
83 | return true; | |
84 | ||
f008faff | 85 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
ac253850 | 86 | handler == SIG_DFL && !(force && sig_kernel_only(sig))) |
41aaa481 | 87 | return true; |
f008faff ON |
88 | |
89 | return sig_handler_ignored(handler, sig); | |
90 | } | |
91 | ||
6a0cdcd7 | 92 | static bool sig_ignored(struct task_struct *t, int sig, bool force) |
f008faff | 93 | { |
1da177e4 LT |
94 | /* |
95 | * Blocked signals are never ignored, since the | |
96 | * signal handler may change by the time it is | |
97 | * unblocked. | |
98 | */ | |
325d22df | 99 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
6a0cdcd7 | 100 | return false; |
1da177e4 | 101 | |
35de254d | 102 | /* |
628c1bcb ON |
103 | * Tracers may want to know about even ignored signal unless it |
104 | * is SIGKILL which can't be reported anyway but can be ignored | |
105 | * by SIGNAL_UNKILLABLE task. | |
35de254d | 106 | */ |
628c1bcb | 107 | if (t->ptrace && sig != SIGKILL) |
6a0cdcd7 | 108 | return false; |
628c1bcb ON |
109 | |
110 | return sig_task_ignored(t, sig, force); | |
1da177e4 LT |
111 | } |
112 | ||
113 | /* | |
114 | * Re-calculate pending state from the set of locally pending | |
115 | * signals, globally pending signals, and blocked signals. | |
116 | */ | |
938696a8 | 117 | static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) |
1da177e4 LT |
118 | { |
119 | unsigned long ready; | |
120 | long i; | |
121 | ||
122 | switch (_NSIG_WORDS) { | |
123 | default: | |
124 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | |
125 | ready |= signal->sig[i] &~ blocked->sig[i]; | |
126 | break; | |
127 | ||
128 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | |
129 | ready |= signal->sig[2] &~ blocked->sig[2]; | |
130 | ready |= signal->sig[1] &~ blocked->sig[1]; | |
131 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
132 | break; | |
133 | ||
134 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | |
135 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
136 | break; | |
137 | ||
138 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | |
139 | } | |
140 | return ready != 0; | |
141 | } | |
142 | ||
143 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | |
144 | ||
09ae854e | 145 | static bool recalc_sigpending_tsk(struct task_struct *t) |
1da177e4 | 146 | { |
3759a0d9 | 147 | if ((t->jobctl & JOBCTL_PENDING_MASK) || |
1da177e4 | 148 | PENDING(&t->pending, &t->blocked) || |
7bb44ade | 149 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
1da177e4 | 150 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
09ae854e | 151 | return true; |
7bb44ade | 152 | } |
09ae854e | 153 | |
b74d0deb RM |
154 | /* |
155 | * We must never clear the flag in another thread, or in current | |
156 | * when it's possible the current syscall is returning -ERESTART*. | |
157 | * So we don't clear it here, and only callers who know they should do. | |
158 | */ | |
09ae854e | 159 | return false; |
7bb44ade RM |
160 | } |
161 | ||
162 | /* | |
163 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. | |
164 | * This is superfluous when called on current, the wakeup is a harmless no-op. | |
165 | */ | |
166 | void recalc_sigpending_and_wake(struct task_struct *t) | |
167 | { | |
168 | if (recalc_sigpending_tsk(t)) | |
169 | signal_wake_up(t, 0); | |
1da177e4 LT |
170 | } |
171 | ||
172 | void recalc_sigpending(void) | |
173 | { | |
43347d56 MB |
174 | if (!recalc_sigpending_tsk(current) && !freezing(current) && |
175 | !klp_patch_pending(current)) | |
b74d0deb RM |
176 | clear_thread_flag(TIF_SIGPENDING); |
177 | ||
1da177e4 LT |
178 | } |
179 | ||
088fe47c EB |
180 | void calculate_sigpending(void) |
181 | { | |
182 | /* Have any signals or users of TIF_SIGPENDING been delayed | |
183 | * until after fork? | |
184 | */ | |
185 | spin_lock_irq(¤t->sighand->siglock); | |
186 | set_tsk_thread_flag(current, TIF_SIGPENDING); | |
187 | recalc_sigpending(); | |
188 | spin_unlock_irq(¤t->sighand->siglock); | |
189 | } | |
190 | ||
1da177e4 LT |
191 | /* Given the mask, find the first available signal that should be serviced. */ |
192 | ||
a27341cd LT |
193 | #define SYNCHRONOUS_MASK \ |
194 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ | |
a0727e8c | 195 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
a27341cd | 196 | |
fba2afaa | 197 | int next_signal(struct sigpending *pending, sigset_t *mask) |
1da177e4 LT |
198 | { |
199 | unsigned long i, *s, *m, x; | |
200 | int sig = 0; | |
f84d49b2 | 201 | |
1da177e4 LT |
202 | s = pending->signal.sig; |
203 | m = mask->sig; | |
a27341cd LT |
204 | |
205 | /* | |
206 | * Handle the first word specially: it contains the | |
207 | * synchronous signals that need to be dequeued first. | |
208 | */ | |
209 | x = *s &~ *m; | |
210 | if (x) { | |
211 | if (x & SYNCHRONOUS_MASK) | |
212 | x &= SYNCHRONOUS_MASK; | |
213 | sig = ffz(~x) + 1; | |
214 | return sig; | |
215 | } | |
216 | ||
1da177e4 LT |
217 | switch (_NSIG_WORDS) { |
218 | default: | |
a27341cd LT |
219 | for (i = 1; i < _NSIG_WORDS; ++i) { |
220 | x = *++s &~ *++m; | |
221 | if (!x) | |
222 | continue; | |
223 | sig = ffz(~x) + i*_NSIG_BPW + 1; | |
224 | break; | |
225 | } | |
1da177e4 LT |
226 | break; |
227 | ||
a27341cd LT |
228 | case 2: |
229 | x = s[1] &~ m[1]; | |
230 | if (!x) | |
1da177e4 | 231 | break; |
a27341cd | 232 | sig = ffz(~x) + _NSIG_BPW + 1; |
1da177e4 LT |
233 | break; |
234 | ||
a27341cd LT |
235 | case 1: |
236 | /* Nothing to do */ | |
1da177e4 LT |
237 | break; |
238 | } | |
f84d49b2 | 239 | |
1da177e4 LT |
240 | return sig; |
241 | } | |
242 | ||
f84d49b2 NO |
243 | static inline void print_dropped_signal(int sig) |
244 | { | |
245 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | |
246 | ||
247 | if (!print_fatal_signals) | |
248 | return; | |
249 | ||
250 | if (!__ratelimit(&ratelimit_state)) | |
251 | return; | |
252 | ||
747800ef | 253 | pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
f84d49b2 NO |
254 | current->comm, current->pid, sig); |
255 | } | |
256 | ||
d79fdd6d | 257 | /** |
7dd3db54 | 258 | * task_set_jobctl_pending - set jobctl pending bits |
d79fdd6d | 259 | * @task: target task |
7dd3db54 | 260 | * @mask: pending bits to set |
d79fdd6d | 261 | * |
7dd3db54 TH |
262 | * Clear @mask from @task->jobctl. @mask must be subset of |
263 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | | |
264 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is | |
265 | * cleared. If @task is already being killed or exiting, this function | |
266 | * becomes noop. | |
267 | * | |
268 | * CONTEXT: | |
269 | * Must be called with @task->sighand->siglock held. | |
270 | * | |
271 | * RETURNS: | |
272 | * %true if @mask is set, %false if made noop because @task was dying. | |
273 | */ | |
b76808e6 | 274 | bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) |
7dd3db54 TH |
275 | { |
276 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | | |
277 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); | |
278 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); | |
279 | ||
280 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) | |
281 | return false; | |
282 | ||
283 | if (mask & JOBCTL_STOP_SIGMASK) | |
284 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; | |
285 | ||
286 | task->jobctl |= mask; | |
287 | return true; | |
288 | } | |
289 | ||
d79fdd6d | 290 | /** |
a8f072c1 | 291 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
d79fdd6d TH |
292 | * @task: target task |
293 | * | |
a8f072c1 TH |
294 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
295 | * Clear it and wake up the ptracer. Note that we don't need any further | |
296 | * locking. @task->siglock guarantees that @task->parent points to the | |
297 | * ptracer. | |
d79fdd6d TH |
298 | * |
299 | * CONTEXT: | |
300 | * Must be called with @task->sighand->siglock held. | |
301 | */ | |
73ddff2b | 302 | void task_clear_jobctl_trapping(struct task_struct *task) |
d79fdd6d | 303 | { |
a8f072c1 TH |
304 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
305 | task->jobctl &= ~JOBCTL_TRAPPING; | |
650226bd | 306 | smp_mb(); /* advised by wake_up_bit() */ |
62c124ff | 307 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
d79fdd6d TH |
308 | } |
309 | } | |
310 | ||
e5c1902e | 311 | /** |
3759a0d9 | 312 | * task_clear_jobctl_pending - clear jobctl pending bits |
e5c1902e | 313 | * @task: target task |
3759a0d9 | 314 | * @mask: pending bits to clear |
e5c1902e | 315 | * |
3759a0d9 TH |
316 | * Clear @mask from @task->jobctl. @mask must be subset of |
317 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other | |
318 | * STOP bits are cleared together. | |
e5c1902e | 319 | * |
6dfca329 TH |
320 | * If clearing of @mask leaves no stop or trap pending, this function calls |
321 | * task_clear_jobctl_trapping(). | |
e5c1902e TH |
322 | * |
323 | * CONTEXT: | |
324 | * Must be called with @task->sighand->siglock held. | |
325 | */ | |
b76808e6 | 326 | void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) |
e5c1902e | 327 | { |
3759a0d9 TH |
328 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
329 | ||
330 | if (mask & JOBCTL_STOP_PENDING) | |
331 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; | |
332 | ||
333 | task->jobctl &= ~mask; | |
6dfca329 TH |
334 | |
335 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) | |
336 | task_clear_jobctl_trapping(task); | |
e5c1902e TH |
337 | } |
338 | ||
339 | /** | |
340 | * task_participate_group_stop - participate in a group stop | |
341 | * @task: task participating in a group stop | |
342 | * | |
a8f072c1 | 343 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
39efa3ef | 344 | * Group stop states are cleared and the group stop count is consumed if |
a8f072c1 | 345 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
39efa3ef | 346 | * stop, the appropriate %SIGNAL_* flags are set. |
e5c1902e TH |
347 | * |
348 | * CONTEXT: | |
349 | * Must be called with @task->sighand->siglock held. | |
244056f9 TH |
350 | * |
351 | * RETURNS: | |
352 | * %true if group stop completion should be notified to the parent, %false | |
353 | * otherwise. | |
e5c1902e TH |
354 | */ |
355 | static bool task_participate_group_stop(struct task_struct *task) | |
356 | { | |
357 | struct signal_struct *sig = task->signal; | |
a8f072c1 | 358 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
e5c1902e | 359 | |
a8f072c1 | 360 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
39efa3ef | 361 | |
3759a0d9 | 362 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
e5c1902e TH |
363 | |
364 | if (!consume) | |
365 | return false; | |
366 | ||
367 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | |
368 | sig->group_stop_count--; | |
369 | ||
244056f9 TH |
370 | /* |
371 | * Tell the caller to notify completion iff we are entering into a | |
372 | * fresh group stop. Read comment in do_signal_stop() for details. | |
373 | */ | |
374 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | |
2d39b3cd | 375 | signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); |
e5c1902e TH |
376 | return true; |
377 | } | |
378 | return false; | |
379 | } | |
380 | ||
924de3b8 EB |
381 | void task_join_group_stop(struct task_struct *task) |
382 | { | |
383 | /* Have the new thread join an on-going signal group stop */ | |
384 | unsigned long jobctl = current->jobctl; | |
385 | if (jobctl & JOBCTL_STOP_PENDING) { | |
386 | struct signal_struct *sig = current->signal; | |
387 | unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; | |
388 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; | |
389 | if (task_set_jobctl_pending(task, signr | gstop)) { | |
390 | sig->group_stop_count++; | |
391 | } | |
392 | } | |
393 | } | |
394 | ||
c69e8d9c DH |
395 | /* |
396 | * allocate a new signal queue record | |
397 | * - this may be called without locks if and only if t == current, otherwise an | |
5aba085e | 398 | * appropriate lock must be held to stop the target task from exiting |
c69e8d9c | 399 | */ |
f84d49b2 NO |
400 | static struct sigqueue * |
401 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | |
1da177e4 LT |
402 | { |
403 | struct sigqueue *q = NULL; | |
10b1fbdb | 404 | struct user_struct *user; |
1da177e4 | 405 | |
10b1fbdb | 406 | /* |
7cf7db8d TG |
407 | * Protect access to @t credentials. This can go away when all |
408 | * callers hold rcu read lock. | |
10b1fbdb | 409 | */ |
7cf7db8d | 410 | rcu_read_lock(); |
d84f4f99 | 411 | user = get_uid(__task_cred(t)->user); |
10b1fbdb | 412 | atomic_inc(&user->sigpending); |
7cf7db8d | 413 | rcu_read_unlock(); |
f84d49b2 | 414 | |
1da177e4 | 415 | if (override_rlimit || |
10b1fbdb | 416 | atomic_read(&user->sigpending) <= |
78d7d407 | 417 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
1da177e4 | 418 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
f84d49b2 NO |
419 | } else { |
420 | print_dropped_signal(sig); | |
421 | } | |
422 | ||
1da177e4 | 423 | if (unlikely(q == NULL)) { |
10b1fbdb | 424 | atomic_dec(&user->sigpending); |
d84f4f99 | 425 | free_uid(user); |
1da177e4 LT |
426 | } else { |
427 | INIT_LIST_HEAD(&q->list); | |
428 | q->flags = 0; | |
d84f4f99 | 429 | q->user = user; |
1da177e4 | 430 | } |
d84f4f99 DH |
431 | |
432 | return q; | |
1da177e4 LT |
433 | } |
434 | ||
514a01b8 | 435 | static void __sigqueue_free(struct sigqueue *q) |
1da177e4 LT |
436 | { |
437 | if (q->flags & SIGQUEUE_PREALLOC) | |
438 | return; | |
439 | atomic_dec(&q->user->sigpending); | |
440 | free_uid(q->user); | |
441 | kmem_cache_free(sigqueue_cachep, q); | |
442 | } | |
443 | ||
6a14c5c9 | 444 | void flush_sigqueue(struct sigpending *queue) |
1da177e4 LT |
445 | { |
446 | struct sigqueue *q; | |
447 | ||
448 | sigemptyset(&queue->signal); | |
449 | while (!list_empty(&queue->list)) { | |
450 | q = list_entry(queue->list.next, struct sigqueue , list); | |
451 | list_del_init(&q->list); | |
452 | __sigqueue_free(q); | |
453 | } | |
454 | } | |
455 | ||
456 | /* | |
9e7c8f8c | 457 | * Flush all pending signals for this kthread. |
1da177e4 | 458 | */ |
c81addc9 | 459 | void flush_signals(struct task_struct *t) |
1da177e4 LT |
460 | { |
461 | unsigned long flags; | |
462 | ||
463 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
9e7c8f8c ON |
464 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
465 | flush_sigqueue(&t->pending); | |
466 | flush_sigqueue(&t->signal->shared_pending); | |
1da177e4 LT |
467 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
468 | } | |
469 | ||
baa73d9e | 470 | #ifdef CONFIG_POSIX_TIMERS |
cbaffba1 ON |
471 | static void __flush_itimer_signals(struct sigpending *pending) |
472 | { | |
473 | sigset_t signal, retain; | |
474 | struct sigqueue *q, *n; | |
475 | ||
476 | signal = pending->signal; | |
477 | sigemptyset(&retain); | |
478 | ||
479 | list_for_each_entry_safe(q, n, &pending->list, list) { | |
480 | int sig = q->info.si_signo; | |
481 | ||
482 | if (likely(q->info.si_code != SI_TIMER)) { | |
483 | sigaddset(&retain, sig); | |
484 | } else { | |
485 | sigdelset(&signal, sig); | |
486 | list_del_init(&q->list); | |
487 | __sigqueue_free(q); | |
488 | } | |
489 | } | |
490 | ||
491 | sigorsets(&pending->signal, &signal, &retain); | |
492 | } | |
493 | ||
494 | void flush_itimer_signals(void) | |
495 | { | |
496 | struct task_struct *tsk = current; | |
497 | unsigned long flags; | |
498 | ||
499 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | |
500 | __flush_itimer_signals(&tsk->pending); | |
501 | __flush_itimer_signals(&tsk->signal->shared_pending); | |
502 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | |
503 | } | |
baa73d9e | 504 | #endif |
cbaffba1 | 505 | |
10ab825b ON |
506 | void ignore_signals(struct task_struct *t) |
507 | { | |
508 | int i; | |
509 | ||
510 | for (i = 0; i < _NSIG; ++i) | |
511 | t->sighand->action[i].sa.sa_handler = SIG_IGN; | |
512 | ||
513 | flush_signals(t); | |
514 | } | |
515 | ||
1da177e4 LT |
516 | /* |
517 | * Flush all handlers for a task. | |
518 | */ | |
519 | ||
520 | void | |
521 | flush_signal_handlers(struct task_struct *t, int force_default) | |
522 | { | |
523 | int i; | |
524 | struct k_sigaction *ka = &t->sighand->action[0]; | |
525 | for (i = _NSIG ; i != 0 ; i--) { | |
526 | if (force_default || ka->sa.sa_handler != SIG_IGN) | |
527 | ka->sa.sa_handler = SIG_DFL; | |
528 | ka->sa.sa_flags = 0; | |
522cff14 | 529 | #ifdef __ARCH_HAS_SA_RESTORER |
2ca39528 KC |
530 | ka->sa.sa_restorer = NULL; |
531 | #endif | |
1da177e4 LT |
532 | sigemptyset(&ka->sa.sa_mask); |
533 | ka++; | |
534 | } | |
535 | } | |
536 | ||
67a48a24 | 537 | bool unhandled_signal(struct task_struct *tsk, int sig) |
abd4f750 | 538 | { |
445a91d2 | 539 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
b460cbc5 | 540 | if (is_global_init(tsk)) |
67a48a24 CB |
541 | return true; |
542 | ||
445a91d2 | 543 | if (handler != SIG_IGN && handler != SIG_DFL) |
67a48a24 CB |
544 | return false; |
545 | ||
a288eecc TH |
546 | /* if ptraced, let the tracer determine */ |
547 | return !tsk->ptrace; | |
abd4f750 MAS |
548 | } |
549 | ||
57db7e4a EB |
550 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, |
551 | bool *resched_timer) | |
1da177e4 LT |
552 | { |
553 | struct sigqueue *q, *first = NULL; | |
1da177e4 | 554 | |
1da177e4 LT |
555 | /* |
556 | * Collect the siginfo appropriate to this signal. Check if | |
557 | * there is another siginfo for the same signal. | |
558 | */ | |
559 | list_for_each_entry(q, &list->list, list) { | |
560 | if (q->info.si_signo == sig) { | |
d4434207 ON |
561 | if (first) |
562 | goto still_pending; | |
1da177e4 LT |
563 | first = q; |
564 | } | |
565 | } | |
d4434207 ON |
566 | |
567 | sigdelset(&list->signal, sig); | |
568 | ||
1da177e4 | 569 | if (first) { |
d4434207 | 570 | still_pending: |
1da177e4 LT |
571 | list_del_init(&first->list); |
572 | copy_siginfo(info, &first->info); | |
57db7e4a EB |
573 | |
574 | *resched_timer = | |
575 | (first->flags & SIGQUEUE_PREALLOC) && | |
576 | (info->si_code == SI_TIMER) && | |
577 | (info->si_sys_private); | |
578 | ||
1da177e4 | 579 | __sigqueue_free(first); |
1da177e4 | 580 | } else { |
5aba085e RD |
581 | /* |
582 | * Ok, it wasn't in the queue. This must be | |
583 | * a fast-pathed signal or we must have been | |
584 | * out of queue space. So zero out the info. | |
1da177e4 | 585 | */ |
faf1f22b | 586 | clear_siginfo(info); |
1da177e4 LT |
587 | info->si_signo = sig; |
588 | info->si_errno = 0; | |
7486e5d9 | 589 | info->si_code = SI_USER; |
1da177e4 LT |
590 | info->si_pid = 0; |
591 | info->si_uid = 0; | |
592 | } | |
1da177e4 LT |
593 | } |
594 | ||
595 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |
57db7e4a | 596 | siginfo_t *info, bool *resched_timer) |
1da177e4 | 597 | { |
27d91e07 | 598 | int sig = next_signal(pending, mask); |
1da177e4 | 599 | |
2e01fabe | 600 | if (sig) |
57db7e4a | 601 | collect_signal(sig, pending, info, resched_timer); |
1da177e4 LT |
602 | return sig; |
603 | } | |
604 | ||
605 | /* | |
5aba085e | 606 | * Dequeue a signal and return the element to the caller, which is |
1da177e4 LT |
607 | * expected to free it. |
608 | * | |
609 | * All callers have to hold the siglock. | |
610 | */ | |
611 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |
612 | { | |
57db7e4a | 613 | bool resched_timer = false; |
c5363d03 | 614 | int signr; |
caec4e8d BH |
615 | |
616 | /* We only dequeue private signals from ourselves, we don't let | |
617 | * signalfd steal them | |
618 | */ | |
57db7e4a | 619 | signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); |
8bfd9a7a | 620 | if (!signr) { |
1da177e4 | 621 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
57db7e4a | 622 | mask, info, &resched_timer); |
baa73d9e | 623 | #ifdef CONFIG_POSIX_TIMERS |
8bfd9a7a TG |
624 | /* |
625 | * itimer signal ? | |
626 | * | |
627 | * itimers are process shared and we restart periodic | |
628 | * itimers in the signal delivery path to prevent DoS | |
629 | * attacks in the high resolution timer case. This is | |
5aba085e | 630 | * compliant with the old way of self-restarting |
8bfd9a7a TG |
631 | * itimers, as the SIGALRM is a legacy signal and only |
632 | * queued once. Changing the restart behaviour to | |
633 | * restart the timer in the signal dequeue path is | |
634 | * reducing the timer noise on heavy loaded !highres | |
635 | * systems too. | |
636 | */ | |
637 | if (unlikely(signr == SIGALRM)) { | |
638 | struct hrtimer *tmr = &tsk->signal->real_timer; | |
639 | ||
640 | if (!hrtimer_is_queued(tmr) && | |
2456e855 | 641 | tsk->signal->it_real_incr != 0) { |
8bfd9a7a TG |
642 | hrtimer_forward(tmr, tmr->base->get_time(), |
643 | tsk->signal->it_real_incr); | |
644 | hrtimer_restart(tmr); | |
645 | } | |
646 | } | |
baa73d9e | 647 | #endif |
8bfd9a7a | 648 | } |
c5363d03 | 649 | |
b8fceee1 | 650 | recalc_sigpending(); |
c5363d03 PE |
651 | if (!signr) |
652 | return 0; | |
653 | ||
654 | if (unlikely(sig_kernel_stop(signr))) { | |
8bfd9a7a TG |
655 | /* |
656 | * Set a marker that we have dequeued a stop signal. Our | |
657 | * caller might release the siglock and then the pending | |
658 | * stop signal it is about to process is no longer in the | |
659 | * pending bitmasks, but must still be cleared by a SIGCONT | |
660 | * (and overruled by a SIGKILL). So those cases clear this | |
661 | * shared flag after we've set it. Note that this flag may | |
662 | * remain set after the signal we return is ignored or | |
663 | * handled. That doesn't matter because its only purpose | |
664 | * is to alert stop-signal processing code when another | |
665 | * processor has come along and cleared the flag. | |
666 | */ | |
a8f072c1 | 667 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
8bfd9a7a | 668 | } |
baa73d9e | 669 | #ifdef CONFIG_POSIX_TIMERS |
57db7e4a | 670 | if (resched_timer) { |
1da177e4 LT |
671 | /* |
672 | * Release the siglock to ensure proper locking order | |
673 | * of timer locks outside of siglocks. Note, we leave | |
674 | * irqs disabled here, since the posix-timers code is | |
675 | * about to disable them again anyway. | |
676 | */ | |
677 | spin_unlock(&tsk->sighand->siglock); | |
96fe3b07 | 678 | posixtimer_rearm(info); |
1da177e4 | 679 | spin_lock(&tsk->sighand->siglock); |
9943d3ac EB |
680 | |
681 | /* Don't expose the si_sys_private value to userspace */ | |
682 | info->si_sys_private = 0; | |
1da177e4 | 683 | } |
baa73d9e | 684 | #endif |
1da177e4 LT |
685 | return signr; |
686 | } | |
687 | ||
688 | /* | |
689 | * Tell a process that it has a new active signal.. | |
690 | * | |
691 | * NOTE! we rely on the previous spin_lock to | |
692 | * lock interrupts for us! We can only be called with | |
693 | * "siglock" held, and the local interrupt must | |
694 | * have been disabled when that got acquired! | |
695 | * | |
696 | * No need to set need_resched since signal event passing | |
697 | * goes through ->blocked | |
698 | */ | |
910ffdb1 | 699 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
1da177e4 | 700 | { |
1da177e4 | 701 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
1da177e4 | 702 | /* |
910ffdb1 | 703 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
f021a3c2 | 704 | * case. We don't check t->state here because there is a race with it |
1da177e4 LT |
705 | * executing another processor and just now entering stopped state. |
706 | * By using wake_up_state, we ensure the process will wake up and | |
707 | * handle its death signal. | |
708 | */ | |
910ffdb1 | 709 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
1da177e4 LT |
710 | kick_process(t); |
711 | } | |
712 | ||
71fabd5e GA |
713 | /* |
714 | * Remove signals in mask from the pending set and queue. | |
715 | * Returns 1 if any signals were found. | |
716 | * | |
717 | * All callers must be holding the siglock. | |
71fabd5e | 718 | */ |
8f11351e | 719 | static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
71fabd5e GA |
720 | { |
721 | struct sigqueue *q, *n; | |
722 | sigset_t m; | |
723 | ||
724 | sigandsets(&m, mask, &s->signal); | |
725 | if (sigisemptyset(&m)) | |
8f11351e | 726 | return; |
71fabd5e | 727 | |
702a5073 | 728 | sigandnsets(&s->signal, &s->signal, mask); |
71fabd5e GA |
729 | list_for_each_entry_safe(q, n, &s->list, list) { |
730 | if (sigismember(mask, q->info.si_signo)) { | |
731 | list_del_init(&q->list); | |
732 | __sigqueue_free(q); | |
733 | } | |
734 | } | |
71fabd5e | 735 | } |
1da177e4 | 736 | |
614c517d ON |
737 | static inline int is_si_special(const struct siginfo *info) |
738 | { | |
4ff4c31a | 739 | return info <= SEND_SIG_PRIV; |
614c517d ON |
740 | } |
741 | ||
742 | static inline bool si_fromuser(const struct siginfo *info) | |
743 | { | |
744 | return info == SEND_SIG_NOINFO || | |
745 | (!is_si_special(info) && SI_FROMUSER(info)); | |
746 | } | |
747 | ||
39fd3393 SH |
748 | /* |
749 | * called with RCU read lock from check_kill_permission() | |
750 | */ | |
2a9b9094 | 751 | static bool kill_ok_by_cred(struct task_struct *t) |
39fd3393 SH |
752 | { |
753 | const struct cred *cred = current_cred(); | |
754 | const struct cred *tcred = __task_cred(t); | |
755 | ||
2a9b9094 CB |
756 | return uid_eq(cred->euid, tcred->suid) || |
757 | uid_eq(cred->euid, tcred->uid) || | |
758 | uid_eq(cred->uid, tcred->suid) || | |
759 | uid_eq(cred->uid, tcred->uid) || | |
760 | ns_capable(tcred->user_ns, CAP_KILL); | |
39fd3393 SH |
761 | } |
762 | ||
1da177e4 LT |
763 | /* |
764 | * Bad permissions for sending the signal | |
694f690d | 765 | * - the caller must hold the RCU read lock |
1da177e4 LT |
766 | */ |
767 | static int check_kill_permission(int sig, struct siginfo *info, | |
768 | struct task_struct *t) | |
769 | { | |
2e2ba22e | 770 | struct pid *sid; |
3b5e9e53 ON |
771 | int error; |
772 | ||
7ed20e1a | 773 | if (!valid_signal(sig)) |
3b5e9e53 ON |
774 | return -EINVAL; |
775 | ||
614c517d | 776 | if (!si_fromuser(info)) |
3b5e9e53 | 777 | return 0; |
e54dc243 | 778 | |
3b5e9e53 ON |
779 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
780 | if (error) | |
1da177e4 | 781 | return error; |
3b5e9e53 | 782 | |
065add39 | 783 | if (!same_thread_group(current, t) && |
39fd3393 | 784 | !kill_ok_by_cred(t)) { |
2e2ba22e ON |
785 | switch (sig) { |
786 | case SIGCONT: | |
2e2ba22e | 787 | sid = task_session(t); |
2e2ba22e ON |
788 | /* |
789 | * We don't return the error if sid == NULL. The | |
790 | * task was unhashed, the caller must notice this. | |
791 | */ | |
792 | if (!sid || sid == task_session(current)) | |
793 | break; | |
794 | default: | |
795 | return -EPERM; | |
796 | } | |
797 | } | |
c2f0c7c3 | 798 | |
6b4f3d01 | 799 | return security_task_kill(t, info, sig, NULL); |
1da177e4 LT |
800 | } |
801 | ||
fb1d910c TH |
802 | /** |
803 | * ptrace_trap_notify - schedule trap to notify ptracer | |
804 | * @t: tracee wanting to notify tracer | |
805 | * | |
806 | * This function schedules sticky ptrace trap which is cleared on the next | |
807 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by | |
808 | * ptracer. | |
809 | * | |
544b2c91 TH |
810 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
811 | * ptracer is listening for events, tracee is woken up so that it can | |
812 | * re-trap for the new event. If trapped otherwise, STOP trap will be | |
813 | * eventually taken without returning to userland after the existing traps | |
814 | * are finished by PTRACE_CONT. | |
fb1d910c TH |
815 | * |
816 | * CONTEXT: | |
817 | * Must be called with @task->sighand->siglock held. | |
818 | */ | |
819 | static void ptrace_trap_notify(struct task_struct *t) | |
820 | { | |
821 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); | |
822 | assert_spin_locked(&t->sighand->siglock); | |
823 | ||
824 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | |
910ffdb1 | 825 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
fb1d910c TH |
826 | } |
827 | ||
1da177e4 | 828 | /* |
7e695a5e ON |
829 | * Handle magic process-wide effects of stop/continue signals. Unlike |
830 | * the signal actions, these happen immediately at signal-generation | |
1da177e4 LT |
831 | * time regardless of blocking, ignoring, or handling. This does the |
832 | * actual continuing for SIGCONT, but not the actual stopping for stop | |
7e695a5e ON |
833 | * signals. The process stop is done as a signal action for SIG_DFL. |
834 | * | |
835 | * Returns true if the signal should be actually delivered, otherwise | |
836 | * it should be dropped. | |
1da177e4 | 837 | */ |
403bad72 | 838 | static bool prepare_signal(int sig, struct task_struct *p, bool force) |
1da177e4 | 839 | { |
ad16a460 | 840 | struct signal_struct *signal = p->signal; |
1da177e4 | 841 | struct task_struct *t; |
9490592f | 842 | sigset_t flush; |
1da177e4 | 843 | |
403bad72 | 844 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { |
5fa534c9 | 845 | if (!(signal->flags & SIGNAL_GROUP_EXIT)) |
403bad72 | 846 | return sig == SIGKILL; |
1da177e4 | 847 | /* |
7e695a5e | 848 | * The process is in the middle of dying, nothing to do. |
1da177e4 | 849 | */ |
7e695a5e | 850 | } else if (sig_kernel_stop(sig)) { |
1da177e4 LT |
851 | /* |
852 | * This is a stop signal. Remove SIGCONT from all queues. | |
853 | */ | |
9490592f | 854 | siginitset(&flush, sigmask(SIGCONT)); |
c09c1441 | 855 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f | 856 | for_each_thread(p, t) |
c09c1441 | 857 | flush_sigqueue_mask(&flush, &t->pending); |
1da177e4 | 858 | } else if (sig == SIGCONT) { |
fc321d2e | 859 | unsigned int why; |
1da177e4 | 860 | /* |
1deac632 | 861 | * Remove all stop signals from all queues, wake all threads. |
1da177e4 | 862 | */ |
9490592f | 863 | siginitset(&flush, SIG_KERNEL_STOP_MASK); |
c09c1441 | 864 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f | 865 | for_each_thread(p, t) { |
c09c1441 | 866 | flush_sigqueue_mask(&flush, &t->pending); |
3759a0d9 | 867 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
fb1d910c TH |
868 | if (likely(!(t->ptrace & PT_SEIZED))) |
869 | wake_up_state(t, __TASK_STOPPED); | |
870 | else | |
871 | ptrace_trap_notify(t); | |
9490592f | 872 | } |
1da177e4 | 873 | |
fc321d2e ON |
874 | /* |
875 | * Notify the parent with CLD_CONTINUED if we were stopped. | |
876 | * | |
877 | * If we were in the middle of a group stop, we pretend it | |
878 | * was already finished, and then continued. Since SIGCHLD | |
879 | * doesn't queue we report only CLD_STOPPED, as if the next | |
880 | * CLD_CONTINUED was dropped. | |
881 | */ | |
882 | why = 0; | |
ad16a460 | 883 | if (signal->flags & SIGNAL_STOP_STOPPED) |
fc321d2e | 884 | why |= SIGNAL_CLD_CONTINUED; |
ad16a460 | 885 | else if (signal->group_stop_count) |
fc321d2e ON |
886 | why |= SIGNAL_CLD_STOPPED; |
887 | ||
888 | if (why) { | |
021e1ae3 | 889 | /* |
ae6d2ed7 | 890 | * The first thread which returns from do_signal_stop() |
021e1ae3 ON |
891 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
892 | * notify its parent. See get_signal_to_deliver(). | |
893 | */ | |
2d39b3cd | 894 | signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); |
ad16a460 ON |
895 | signal->group_stop_count = 0; |
896 | signal->group_exit_code = 0; | |
1da177e4 | 897 | } |
1da177e4 | 898 | } |
7e695a5e | 899 | |
def8cf72 | 900 | return !sig_ignored(p, sig, force); |
1da177e4 LT |
901 | } |
902 | ||
71f11dc0 ON |
903 | /* |
904 | * Test if P wants to take SIG. After we've checked all threads with this, | |
905 | * it's equivalent to finding no threads not blocking SIG. Any threads not | |
906 | * blocking SIG were ruled out because they are not running and already | |
907 | * have pending signals. Such threads will dequeue from the shared queue | |
908 | * as soon as they're available, so putting the signal on the shared queue | |
909 | * will be equivalent to sending it to one such thread. | |
910 | */ | |
acd14e62 | 911 | static inline bool wants_signal(int sig, struct task_struct *p) |
71f11dc0 ON |
912 | { |
913 | if (sigismember(&p->blocked, sig)) | |
acd14e62 CB |
914 | return false; |
915 | ||
71f11dc0 | 916 | if (p->flags & PF_EXITING) |
acd14e62 CB |
917 | return false; |
918 | ||
71f11dc0 | 919 | if (sig == SIGKILL) |
acd14e62 CB |
920 | return true; |
921 | ||
71f11dc0 | 922 | if (task_is_stopped_or_traced(p)) |
acd14e62 CB |
923 | return false; |
924 | ||
71f11dc0 ON |
925 | return task_curr(p) || !signal_pending(p); |
926 | } | |
927 | ||
07296149 | 928 | static void complete_signal(int sig, struct task_struct *p, enum pid_type type) |
71f11dc0 ON |
929 | { |
930 | struct signal_struct *signal = p->signal; | |
931 | struct task_struct *t; | |
932 | ||
933 | /* | |
934 | * Now find a thread we can wake up to take the signal off the queue. | |
935 | * | |
936 | * If the main thread wants the signal, it gets first crack. | |
937 | * Probably the least surprising to the average bear. | |
938 | */ | |
939 | if (wants_signal(sig, p)) | |
940 | t = p; | |
07296149 | 941 | else if ((type == PIDTYPE_PID) || thread_group_empty(p)) |
71f11dc0 ON |
942 | /* |
943 | * There is just one thread and it does not need to be woken. | |
944 | * It will dequeue unblocked signals before it runs again. | |
945 | */ | |
946 | return; | |
947 | else { | |
948 | /* | |
949 | * Otherwise try to find a suitable thread. | |
950 | */ | |
951 | t = signal->curr_target; | |
952 | while (!wants_signal(sig, t)) { | |
953 | t = next_thread(t); | |
954 | if (t == signal->curr_target) | |
955 | /* | |
956 | * No thread needs to be woken. | |
957 | * Any eligible threads will see | |
958 | * the signal in the queue soon. | |
959 | */ | |
960 | return; | |
961 | } | |
962 | signal->curr_target = t; | |
963 | } | |
964 | ||
965 | /* | |
966 | * Found a killable thread. If the signal will be fatal, | |
967 | * then start taking the whole group down immediately. | |
968 | */ | |
fae5fa44 | 969 | if (sig_fatal(p, sig) && |
42691579 | 970 | !(signal->flags & SIGNAL_GROUP_EXIT) && |
71f11dc0 | 971 | !sigismember(&t->real_blocked, sig) && |
42691579 | 972 | (sig == SIGKILL || !p->ptrace)) { |
71f11dc0 ON |
973 | /* |
974 | * This signal will be fatal to the whole group. | |
975 | */ | |
976 | if (!sig_kernel_coredump(sig)) { | |
977 | /* | |
978 | * Start a group exit and wake everybody up. | |
979 | * This way we don't have other threads | |
980 | * running and doing things after a slower | |
981 | * thread has the fatal signal pending. | |
982 | */ | |
983 | signal->flags = SIGNAL_GROUP_EXIT; | |
984 | signal->group_exit_code = sig; | |
985 | signal->group_stop_count = 0; | |
986 | t = p; | |
987 | do { | |
6dfca329 | 988 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
71f11dc0 ON |
989 | sigaddset(&t->pending.signal, SIGKILL); |
990 | signal_wake_up(t, 1); | |
991 | } while_each_thread(p, t); | |
992 | return; | |
993 | } | |
994 | } | |
995 | ||
996 | /* | |
997 | * The signal is already in the shared-pending queue. | |
998 | * Tell the chosen thread to wake up and dequeue it. | |
999 | */ | |
1000 | signal_wake_up(t, sig == SIGKILL); | |
1001 | return; | |
1002 | } | |
1003 | ||
a19e2c01 | 1004 | static inline bool legacy_queue(struct sigpending *signals, int sig) |
af7fff9c PE |
1005 | { |
1006 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | |
1007 | } | |
1008 | ||
6b550f94 SH |
1009 | #ifdef CONFIG_USER_NS |
1010 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) | |
1011 | { | |
1012 | if (current_user_ns() == task_cred_xxx(t, user_ns)) | |
1013 | return; | |
1014 | ||
1015 | if (SI_FROMKERNEL(info)) | |
1016 | return; | |
1017 | ||
078de5f7 EB |
1018 | rcu_read_lock(); |
1019 | info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), | |
1020 | make_kuid(current_user_ns(), info->si_uid)); | |
1021 | rcu_read_unlock(); | |
6b550f94 SH |
1022 | } |
1023 | #else | |
1024 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) | |
1025 | { | |
1026 | return; | |
1027 | } | |
1028 | #endif | |
1029 | ||
7978b567 | 1030 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
5a883cee | 1031 | enum pid_type type, int from_ancestor_ns) |
1da177e4 | 1032 | { |
2ca3515a | 1033 | struct sigpending *pending; |
6e65acba | 1034 | struct sigqueue *q; |
7a0aeb14 | 1035 | int override_rlimit; |
6c303d3a | 1036 | int ret = 0, result; |
0a16b607 | 1037 | |
6e65acba | 1038 | assert_spin_locked(&t->sighand->siglock); |
921cf9f6 | 1039 | |
6c303d3a | 1040 | result = TRACE_SIGNAL_IGNORED; |
629d362b | 1041 | if (!prepare_signal(sig, t, |
4ff4c31a | 1042 | from_ancestor_ns || (info == SEND_SIG_PRIV))) |
6c303d3a | 1043 | goto ret; |
2ca3515a | 1044 | |
5a883cee | 1045 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
2acb024d PE |
1046 | /* |
1047 | * Short-circuit ignored signals and support queuing | |
1048 | * exactly one non-rt signal, so that we can get more | |
1049 | * detailed information about the cause of the signal. | |
1050 | */ | |
6c303d3a | 1051 | result = TRACE_SIGNAL_ALREADY_PENDING; |
7e695a5e | 1052 | if (legacy_queue(pending, sig)) |
6c303d3a ON |
1053 | goto ret; |
1054 | ||
1055 | result = TRACE_SIGNAL_DELIVERED; | |
1da177e4 | 1056 | /* |
f149b315 EB |
1057 | * Skip useless siginfo allocation for SIGKILL SIGSTOP, |
1058 | * and kernel threads. | |
1da177e4 | 1059 | */ |
4ff4c31a | 1060 | if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) |
1da177e4 LT |
1061 | goto out_set; |
1062 | ||
5aba085e RD |
1063 | /* |
1064 | * Real-time signals must be queued if sent by sigqueue, or | |
1065 | * some other real-time mechanism. It is implementation | |
1066 | * defined whether kill() does so. We attempt to do so, on | |
1067 | * the principle of least surprise, but since kill is not | |
1068 | * allowed to fail with EAGAIN when low on memory we just | |
1069 | * make sure at least one signal gets delivered and don't | |
1070 | * pass on the info struct. | |
1071 | */ | |
7a0aeb14 VN |
1072 | if (sig < SIGRTMIN) |
1073 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | |
1074 | else | |
1075 | override_rlimit = 0; | |
1076 | ||
75f296d9 | 1077 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); |
1da177e4 | 1078 | if (q) { |
2ca3515a | 1079 | list_add_tail(&q->list, &pending->list); |
1da177e4 | 1080 | switch ((unsigned long) info) { |
b67a1b9e | 1081 | case (unsigned long) SEND_SIG_NOINFO: |
faf1f22b | 1082 | clear_siginfo(&q->info); |
1da177e4 LT |
1083 | q->info.si_signo = sig; |
1084 | q->info.si_errno = 0; | |
1085 | q->info.si_code = SI_USER; | |
9cd4fd10 | 1086 | q->info.si_pid = task_tgid_nr_ns(current, |
09bca05c | 1087 | task_active_pid_ns(t)); |
078de5f7 | 1088 | q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4 | 1089 | break; |
b67a1b9e | 1090 | case (unsigned long) SEND_SIG_PRIV: |
faf1f22b | 1091 | clear_siginfo(&q->info); |
1da177e4 LT |
1092 | q->info.si_signo = sig; |
1093 | q->info.si_errno = 0; | |
1094 | q->info.si_code = SI_KERNEL; | |
1095 | q->info.si_pid = 0; | |
1096 | q->info.si_uid = 0; | |
1097 | break; | |
1098 | default: | |
1099 | copy_siginfo(&q->info, info); | |
6588c1e3 SB |
1100 | if (from_ancestor_ns) |
1101 | q->info.si_pid = 0; | |
1da177e4 LT |
1102 | break; |
1103 | } | |
6b550f94 SH |
1104 | |
1105 | userns_fixup_signal_uid(&q->info, t); | |
1106 | ||
621d3121 | 1107 | } else if (!is_si_special(info)) { |
ba005e1f MH |
1108 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
1109 | /* | |
1110 | * Queue overflow, abort. We may abort if the | |
1111 | * signal was rt and sent by user using something | |
1112 | * other than kill(). | |
1113 | */ | |
6c303d3a ON |
1114 | result = TRACE_SIGNAL_OVERFLOW_FAIL; |
1115 | ret = -EAGAIN; | |
1116 | goto ret; | |
ba005e1f MH |
1117 | } else { |
1118 | /* | |
1119 | * This is a silent loss of information. We still | |
1120 | * send the signal, but the *info bits are lost. | |
1121 | */ | |
6c303d3a | 1122 | result = TRACE_SIGNAL_LOSE_INFO; |
ba005e1f | 1123 | } |
1da177e4 LT |
1124 | } |
1125 | ||
1126 | out_set: | |
53c30337 | 1127 | signalfd_notify(t, sig); |
2ca3515a | 1128 | sigaddset(&pending->signal, sig); |
c3ad2c3b EB |
1129 | |
1130 | /* Let multiprocess signals appear after on-going forks */ | |
1131 | if (type > PIDTYPE_TGID) { | |
1132 | struct multiprocess_signals *delayed; | |
1133 | hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { | |
1134 | sigset_t *signal = &delayed->signal; | |
1135 | /* Can't queue both a stop and a continue signal */ | |
1136 | if (sig == SIGCONT) | |
1137 | sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); | |
1138 | else if (sig_kernel_stop(sig)) | |
1139 | sigdelset(signal, SIGCONT); | |
1140 | sigaddset(signal, sig); | |
1141 | } | |
1142 | } | |
1143 | ||
07296149 | 1144 | complete_signal(sig, t, type); |
6c303d3a | 1145 | ret: |
5a883cee | 1146 | trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); |
6c303d3a | 1147 | return ret; |
1da177e4 LT |
1148 | } |
1149 | ||
7978b567 | 1150 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
b213984b | 1151 | enum pid_type type) |
7978b567 | 1152 | { |
921cf9f6 SB |
1153 | int from_ancestor_ns = 0; |
1154 | ||
1155 | #ifdef CONFIG_PID_NS | |
dd34200a ON |
1156 | from_ancestor_ns = si_fromuser(info) && |
1157 | !task_pid_nr_ns(current, task_active_pid_ns(t)); | |
921cf9f6 SB |
1158 | #endif |
1159 | ||
5a883cee | 1160 | return __send_signal(sig, info, t, type, from_ancestor_ns); |
7978b567 SB |
1161 | } |
1162 | ||
4aaefee5 | 1163 | static void print_fatal_signal(int signr) |
45807a1d | 1164 | { |
4aaefee5 | 1165 | struct pt_regs *regs = signal_pt_regs(); |
747800ef | 1166 | pr_info("potentially unexpected fatal signal %d.\n", signr); |
45807a1d | 1167 | |
ca5cd877 | 1168 | #if defined(__i386__) && !defined(__arch_um__) |
747800ef | 1169 | pr_info("code at %08lx: ", regs->ip); |
45807a1d IM |
1170 | { |
1171 | int i; | |
1172 | for (i = 0; i < 16; i++) { | |
1173 | unsigned char insn; | |
1174 | ||
b45c6e76 AK |
1175 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1176 | break; | |
747800ef | 1177 | pr_cont("%02x ", insn); |
45807a1d IM |
1178 | } |
1179 | } | |
747800ef | 1180 | pr_cont("\n"); |
45807a1d | 1181 | #endif |
3a9f84d3 | 1182 | preempt_disable(); |
45807a1d | 1183 | show_regs(regs); |
3a9f84d3 | 1184 | preempt_enable(); |
45807a1d IM |
1185 | } |
1186 | ||
1187 | static int __init setup_print_fatal_signals(char *str) | |
1188 | { | |
1189 | get_option (&str, &print_fatal_signals); | |
1190 | ||
1191 | return 1; | |
1192 | } | |
1193 | ||
1194 | __setup("print-fatal-signals=", setup_print_fatal_signals); | |
1da177e4 | 1195 | |
4cd4b6d4 PE |
1196 | int |
1197 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |
1198 | { | |
b213984b | 1199 | return send_signal(sig, info, p, PIDTYPE_TGID); |
4cd4b6d4 PE |
1200 | } |
1201 | ||
4a30debf | 1202 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
40b3b025 | 1203 | enum pid_type type) |
4a30debf ON |
1204 | { |
1205 | unsigned long flags; | |
1206 | int ret = -ESRCH; | |
1207 | ||
1208 | if (lock_task_sighand(p, &flags)) { | |
b213984b | 1209 | ret = send_signal(sig, info, p, type); |
4a30debf ON |
1210 | unlock_task_sighand(p, &flags); |
1211 | } | |
1212 | ||
1213 | return ret; | |
1214 | } | |
1215 | ||
1da177e4 LT |
1216 | /* |
1217 | * Force a signal that the process can't ignore: if necessary | |
1218 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | |
ae74c3b6 LT |
1219 | * |
1220 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | |
1221 | * since we do not want to have a signal handler that was blocked | |
1222 | * be invoked when user space had explicitly blocked it. | |
1223 | * | |
80fe728d ON |
1224 | * We don't want to have recursive SIGSEGV's etc, for example, |
1225 | * that is why we also clear SIGNAL_UNKILLABLE. | |
1da177e4 | 1226 | */ |
1da177e4 LT |
1227 | int |
1228 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | |
1229 | { | |
1230 | unsigned long int flags; | |
ae74c3b6 LT |
1231 | int ret, blocked, ignored; |
1232 | struct k_sigaction *action; | |
1da177e4 LT |
1233 | |
1234 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
ae74c3b6 LT |
1235 | action = &t->sighand->action[sig-1]; |
1236 | ignored = action->sa.sa_handler == SIG_IGN; | |
1237 | blocked = sigismember(&t->blocked, sig); | |
1238 | if (blocked || ignored) { | |
1239 | action->sa.sa_handler = SIG_DFL; | |
1240 | if (blocked) { | |
1241 | sigdelset(&t->blocked, sig); | |
7bb44ade | 1242 | recalc_sigpending_and_wake(t); |
ae74c3b6 | 1243 | } |
1da177e4 | 1244 | } |
eb61b591 JI |
1245 | /* |
1246 | * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect | |
1247 | * debugging to leave init killable. | |
1248 | */ | |
1249 | if (action->sa.sa_handler == SIG_DFL && !t->ptrace) | |
80fe728d | 1250 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
b21c5bd5 | 1251 | ret = send_signal(sig, info, t, PIDTYPE_PID); |
1da177e4 LT |
1252 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1253 | ||
1254 | return ret; | |
1255 | } | |
1256 | ||
1da177e4 LT |
1257 | /* |
1258 | * Nuke all other threads in the group. | |
1259 | */ | |
09faef11 | 1260 | int zap_other_threads(struct task_struct *p) |
1da177e4 | 1261 | { |
09faef11 ON |
1262 | struct task_struct *t = p; |
1263 | int count = 0; | |
1da177e4 | 1264 | |
1da177e4 LT |
1265 | p->signal->group_stop_count = 0; |
1266 | ||
09faef11 | 1267 | while_each_thread(p, t) { |
6dfca329 | 1268 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
09faef11 ON |
1269 | count++; |
1270 | ||
1271 | /* Don't bother with already dead threads */ | |
1da177e4 LT |
1272 | if (t->exit_state) |
1273 | continue; | |
1da177e4 | 1274 | sigaddset(&t->pending.signal, SIGKILL); |
1da177e4 LT |
1275 | signal_wake_up(t, 1); |
1276 | } | |
09faef11 ON |
1277 | |
1278 | return count; | |
1da177e4 LT |
1279 | } |
1280 | ||
b8ed374e NK |
1281 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1282 | unsigned long *flags) | |
f63ee72e ON |
1283 | { |
1284 | struct sighand_struct *sighand; | |
1285 | ||
59dc6f3c | 1286 | rcu_read_lock(); |
f63ee72e ON |
1287 | for (;;) { |
1288 | sighand = rcu_dereference(tsk->sighand); | |
59dc6f3c | 1289 | if (unlikely(sighand == NULL)) |
f63ee72e | 1290 | break; |
59dc6f3c | 1291 | |
392809b2 ON |
1292 | /* |
1293 | * This sighand can be already freed and even reused, but | |
5f0d5a3a | 1294 | * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which |
392809b2 ON |
1295 | * initializes ->siglock: this slab can't go away, it has |
1296 | * the same object type, ->siglock can't be reinitialized. | |
1297 | * | |
1298 | * We need to ensure that tsk->sighand is still the same | |
1299 | * after we take the lock, we can race with de_thread() or | |
1300 | * __exit_signal(). In the latter case the next iteration | |
1301 | * must see ->sighand == NULL. | |
1302 | */ | |
59dc6f3c AMG |
1303 | spin_lock_irqsave(&sighand->siglock, *flags); |
1304 | if (likely(sighand == tsk->sighand)) | |
f63ee72e | 1305 | break; |
59dc6f3c | 1306 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
f63ee72e | 1307 | } |
59dc6f3c | 1308 | rcu_read_unlock(); |
f63ee72e ON |
1309 | |
1310 | return sighand; | |
1311 | } | |
1312 | ||
c69e8d9c DH |
1313 | /* |
1314 | * send signal info to all the members of a group | |
c69e8d9c | 1315 | */ |
01024980 EB |
1316 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
1317 | enum pid_type type) | |
1da177e4 | 1318 | { |
694f690d DH |
1319 | int ret; |
1320 | ||
1321 | rcu_read_lock(); | |
1322 | ret = check_kill_permission(sig, info, p); | |
1323 | rcu_read_unlock(); | |
f63ee72e | 1324 | |
4a30debf | 1325 | if (!ret && sig) |
40b3b025 | 1326 | ret = do_send_sig_info(sig, info, p, type); |
1da177e4 LT |
1327 | |
1328 | return ret; | |
1329 | } | |
1330 | ||
1331 | /* | |
146a505d | 1332 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4 | 1333 | * control characters do (^C, ^Z etc) |
c69e8d9c | 1334 | * - the caller must hold at least a readlock on tasklist_lock |
1da177e4 | 1335 | */ |
c4b92fc1 | 1336 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1da177e4 LT |
1337 | { |
1338 | struct task_struct *p = NULL; | |
1339 | int retval, success; | |
1340 | ||
1da177e4 LT |
1341 | success = 0; |
1342 | retval = -ESRCH; | |
c4b92fc1 | 1343 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
01024980 | 1344 | int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); |
1da177e4 LT |
1345 | success |= !err; |
1346 | retval = err; | |
c4b92fc1 | 1347 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
1348 | return success ? 0 : retval; |
1349 | } | |
1350 | ||
c4b92fc1 | 1351 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1da177e4 | 1352 | { |
d36174bc | 1353 | int error = -ESRCH; |
1da177e4 LT |
1354 | struct task_struct *p; |
1355 | ||
eca1a089 PM |
1356 | for (;;) { |
1357 | rcu_read_lock(); | |
1358 | p = pid_task(pid, PIDTYPE_PID); | |
1359 | if (p) | |
01024980 | 1360 | error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); |
eca1a089 PM |
1361 | rcu_read_unlock(); |
1362 | if (likely(!p || error != -ESRCH)) | |
1363 | return error; | |
6ca25b55 | 1364 | |
eca1a089 PM |
1365 | /* |
1366 | * The task was unhashed in between, try again. If it | |
1367 | * is dead, pid_task() will return NULL, if we race with | |
1368 | * de_thread() it will find the new leader. | |
1369 | */ | |
1370 | } | |
1da177e4 LT |
1371 | } |
1372 | ||
6c478ae9 | 1373 | static int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
c4b92fc1 EB |
1374 | { |
1375 | int error; | |
1376 | rcu_read_lock(); | |
b488893a | 1377 | error = kill_pid_info(sig, info, find_vpid(pid)); |
c4b92fc1 EB |
1378 | rcu_read_unlock(); |
1379 | return error; | |
1380 | } | |
1381 | ||
bb17fcca CB |
1382 | static inline bool kill_as_cred_perm(const struct cred *cred, |
1383 | struct task_struct *target) | |
d178bc3a SH |
1384 | { |
1385 | const struct cred *pcred = __task_cred(target); | |
bb17fcca CB |
1386 | |
1387 | return uid_eq(cred->euid, pcred->suid) || | |
1388 | uid_eq(cred->euid, pcred->uid) || | |
1389 | uid_eq(cred->uid, pcred->suid) || | |
1390 | uid_eq(cred->uid, pcred->uid); | |
d178bc3a SH |
1391 | } |
1392 | ||
2425c08b | 1393 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
d178bc3a | 1394 | int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, |
6b4f3d01 | 1395 | const struct cred *cred) |
46113830 HW |
1396 | { |
1397 | int ret = -EINVAL; | |
1398 | struct task_struct *p; | |
14d8c9f3 | 1399 | unsigned long flags; |
46113830 HW |
1400 | |
1401 | if (!valid_signal(sig)) | |
1402 | return ret; | |
1403 | ||
14d8c9f3 | 1404 | rcu_read_lock(); |
2425c08b | 1405 | p = pid_task(pid, PIDTYPE_PID); |
46113830 HW |
1406 | if (!p) { |
1407 | ret = -ESRCH; | |
1408 | goto out_unlock; | |
1409 | } | |
d178bc3a | 1410 | if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { |
46113830 HW |
1411 | ret = -EPERM; |
1412 | goto out_unlock; | |
1413 | } | |
6b4f3d01 | 1414 | ret = security_task_kill(p, info, sig, cred); |
8f95dc58 DQ |
1415 | if (ret) |
1416 | goto out_unlock; | |
14d8c9f3 TG |
1417 | |
1418 | if (sig) { | |
1419 | if (lock_task_sighand(p, &flags)) { | |
5a883cee | 1420 | ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0); |
14d8c9f3 TG |
1421 | unlock_task_sighand(p, &flags); |
1422 | } else | |
1423 | ret = -ESRCH; | |
46113830 HW |
1424 | } |
1425 | out_unlock: | |
14d8c9f3 | 1426 | rcu_read_unlock(); |
46113830 HW |
1427 | return ret; |
1428 | } | |
d178bc3a | 1429 | EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); |
1da177e4 LT |
1430 | |
1431 | /* | |
1432 | * kill_something_info() interprets pid in interesting ways just like kill(2). | |
1433 | * | |
1434 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | |
1435 | * is probably wrong. Should make it like BSD or SYSV. | |
1436 | */ | |
1437 | ||
bc64efd2 | 1438 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1da177e4 | 1439 | { |
8d42db18 | 1440 | int ret; |
d5df763b PE |
1441 | |
1442 | if (pid > 0) { | |
1443 | rcu_read_lock(); | |
1444 | ret = kill_pid_info(sig, info, find_vpid(pid)); | |
1445 | rcu_read_unlock(); | |
1446 | return ret; | |
1447 | } | |
1448 | ||
4ea77014 | 1449 | /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ |
1450 | if (pid == INT_MIN) | |
1451 | return -ESRCH; | |
1452 | ||
d5df763b PE |
1453 | read_lock(&tasklist_lock); |
1454 | if (pid != -1) { | |
1455 | ret = __kill_pgrp_info(sig, info, | |
1456 | pid ? find_vpid(-pid) : task_pgrp(current)); | |
1457 | } else { | |
1da177e4 LT |
1458 | int retval = 0, count = 0; |
1459 | struct task_struct * p; | |
1460 | ||
1da177e4 | 1461 | for_each_process(p) { |
d25141a8 SB |
1462 | if (task_pid_vnr(p) > 1 && |
1463 | !same_thread_group(p, current)) { | |
01024980 EB |
1464 | int err = group_send_sig_info(sig, info, p, |
1465 | PIDTYPE_MAX); | |
1da177e4 LT |
1466 | ++count; |
1467 | if (err != -EPERM) | |
1468 | retval = err; | |
1469 | } | |
1470 | } | |
8d42db18 | 1471 | ret = count ? retval : -ESRCH; |
1da177e4 | 1472 | } |
d5df763b PE |
1473 | read_unlock(&tasklist_lock); |
1474 | ||
8d42db18 | 1475 | return ret; |
1da177e4 LT |
1476 | } |
1477 | ||
1478 | /* | |
1479 | * These are for backward compatibility with the rest of the kernel source. | |
1480 | */ | |
1481 | ||
5aba085e | 1482 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1da177e4 | 1483 | { |
1da177e4 LT |
1484 | /* |
1485 | * Make sure legacy kernel users don't send in bad values | |
1486 | * (normal paths check this in check_kill_permission). | |
1487 | */ | |
7ed20e1a | 1488 | if (!valid_signal(sig)) |
1da177e4 LT |
1489 | return -EINVAL; |
1490 | ||
40b3b025 | 1491 | return do_send_sig_info(sig, info, p, PIDTYPE_PID); |
1da177e4 LT |
1492 | } |
1493 | ||
b67a1b9e ON |
1494 | #define __si_special(priv) \ |
1495 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | |
1496 | ||
1da177e4 LT |
1497 | int |
1498 | send_sig(int sig, struct task_struct *p, int priv) | |
1499 | { | |
b67a1b9e | 1500 | return send_sig_info(sig, __si_special(priv), p); |
1da177e4 LT |
1501 | } |
1502 | ||
52cba1a2 | 1503 | void force_sig(int sig, struct task_struct *p) |
1da177e4 | 1504 | { |
b67a1b9e | 1505 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1da177e4 LT |
1506 | } |
1507 | ||
1508 | /* | |
1509 | * When things go south during signal handling, we | |
1510 | * will force a SIGSEGV. And if the signal that caused | |
1511 | * the problem was already a SIGSEGV, we'll want to | |
1512 | * make sure we don't even try to deliver the signal.. | |
1513 | */ | |
52cba1a2 | 1514 | void force_sigsegv(int sig, struct task_struct *p) |
1da177e4 LT |
1515 | { |
1516 | if (sig == SIGSEGV) { | |
1517 | unsigned long flags; | |
1518 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1519 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
1520 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1521 | } | |
1522 | force_sig(SIGSEGV, p); | |
1da177e4 LT |
1523 | } |
1524 | ||
f8ec6601 EB |
1525 | int force_sig_fault(int sig, int code, void __user *addr |
1526 | ___ARCH_SI_TRAPNO(int trapno) | |
1527 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | |
1528 | , struct task_struct *t) | |
1529 | { | |
1530 | struct siginfo info; | |
1531 | ||
1532 | clear_siginfo(&info); | |
1533 | info.si_signo = sig; | |
1534 | info.si_errno = 0; | |
1535 | info.si_code = code; | |
1536 | info.si_addr = addr; | |
1537 | #ifdef __ARCH_SI_TRAPNO | |
1538 | info.si_trapno = trapno; | |
1539 | #endif | |
1540 | #ifdef __ia64__ | |
1541 | info.si_imm = imm; | |
1542 | info.si_flags = flags; | |
1543 | info.si_isr = isr; | |
1544 | #endif | |
1545 | return force_sig_info(info.si_signo, &info, t); | |
1546 | } | |
1547 | ||
1548 | int send_sig_fault(int sig, int code, void __user *addr | |
1549 | ___ARCH_SI_TRAPNO(int trapno) | |
1550 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | |
1551 | , struct task_struct *t) | |
1552 | { | |
1553 | struct siginfo info; | |
1554 | ||
1555 | clear_siginfo(&info); | |
1556 | info.si_signo = sig; | |
1557 | info.si_errno = 0; | |
1558 | info.si_code = code; | |
1559 | info.si_addr = addr; | |
1560 | #ifdef __ARCH_SI_TRAPNO | |
1561 | info.si_trapno = trapno; | |
1562 | #endif | |
1563 | #ifdef __ia64__ | |
1564 | info.si_imm = imm; | |
1565 | info.si_flags = flags; | |
1566 | info.si_isr = isr; | |
1567 | #endif | |
1568 | return send_sig_info(info.si_signo, &info, t); | |
1569 | } | |
1570 | ||
38246735 EB |
1571 | int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) |
1572 | { | |
1573 | struct siginfo info; | |
1574 | ||
1575 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); | |
1576 | clear_siginfo(&info); | |
1577 | info.si_signo = SIGBUS; | |
1578 | info.si_errno = 0; | |
1579 | info.si_code = code; | |
1580 | info.si_addr = addr; | |
1581 | info.si_addr_lsb = lsb; | |
1582 | return force_sig_info(info.si_signo, &info, t); | |
1583 | } | |
1584 | ||
1585 | int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) | |
1586 | { | |
1587 | struct siginfo info; | |
1588 | ||
1589 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); | |
1590 | clear_siginfo(&info); | |
1591 | info.si_signo = SIGBUS; | |
1592 | info.si_errno = 0; | |
1593 | info.si_code = code; | |
1594 | info.si_addr = addr; | |
1595 | info.si_addr_lsb = lsb; | |
1596 | return send_sig_info(info.si_signo, &info, t); | |
1597 | } | |
1598 | EXPORT_SYMBOL(send_sig_mceerr); | |
38246735 | 1599 | |
38246735 EB |
1600 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) |
1601 | { | |
1602 | struct siginfo info; | |
1603 | ||
1604 | clear_siginfo(&info); | |
1605 | info.si_signo = SIGSEGV; | |
1606 | info.si_errno = 0; | |
1607 | info.si_code = SEGV_BNDERR; | |
1608 | info.si_addr = addr; | |
1609 | info.si_lower = lower; | |
1610 | info.si_upper = upper; | |
1611 | return force_sig_info(info.si_signo, &info, current); | |
1612 | } | |
38246735 EB |
1613 | |
1614 | #ifdef SEGV_PKUERR | |
1615 | int force_sig_pkuerr(void __user *addr, u32 pkey) | |
1616 | { | |
1617 | struct siginfo info; | |
1618 | ||
1619 | clear_siginfo(&info); | |
1620 | info.si_signo = SIGSEGV; | |
1621 | info.si_errno = 0; | |
1622 | info.si_code = SEGV_PKUERR; | |
1623 | info.si_addr = addr; | |
1624 | info.si_pkey = pkey; | |
1625 | return force_sig_info(info.si_signo, &info, current); | |
1626 | } | |
1627 | #endif | |
f8ec6601 | 1628 | |
f71dd7dc EB |
1629 | /* For the crazy architectures that include trap information in |
1630 | * the errno field, instead of an actual errno value. | |
1631 | */ | |
1632 | int force_sig_ptrace_errno_trap(int errno, void __user *addr) | |
1633 | { | |
1634 | struct siginfo info; | |
1635 | ||
1636 | clear_siginfo(&info); | |
1637 | info.si_signo = SIGTRAP; | |
1638 | info.si_errno = errno; | |
1639 | info.si_code = TRAP_HWBKPT; | |
1640 | info.si_addr = addr; | |
1641 | return force_sig_info(info.si_signo, &info, current); | |
1642 | } | |
1643 | ||
c4b92fc1 EB |
1644 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1645 | { | |
146a505d PE |
1646 | int ret; |
1647 | ||
1648 | read_lock(&tasklist_lock); | |
1649 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); | |
1650 | read_unlock(&tasklist_lock); | |
1651 | ||
1652 | return ret; | |
c4b92fc1 EB |
1653 | } |
1654 | EXPORT_SYMBOL(kill_pgrp); | |
1655 | ||
1656 | int kill_pid(struct pid *pid, int sig, int priv) | |
1657 | { | |
1658 | return kill_pid_info(sig, __si_special(priv), pid); | |
1659 | } | |
1660 | EXPORT_SYMBOL(kill_pid); | |
1661 | ||
1da177e4 LT |
1662 | /* |
1663 | * These functions support sending signals using preallocated sigqueue | |
1664 | * structures. This is needed "because realtime applications cannot | |
1665 | * afford to lose notifications of asynchronous events, like timer | |
5aba085e | 1666 | * expirations or I/O completions". In the case of POSIX Timers |
1da177e4 LT |
1667 | * we allocate the sigqueue structure from the timer_create. If this |
1668 | * allocation fails we are able to report the failure to the application | |
1669 | * with an EAGAIN error. | |
1670 | */ | |
1da177e4 LT |
1671 | struct sigqueue *sigqueue_alloc(void) |
1672 | { | |
f84d49b2 | 1673 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1da177e4 | 1674 | |
f84d49b2 | 1675 | if (q) |
1da177e4 | 1676 | q->flags |= SIGQUEUE_PREALLOC; |
f84d49b2 NO |
1677 | |
1678 | return q; | |
1da177e4 LT |
1679 | } |
1680 | ||
1681 | void sigqueue_free(struct sigqueue *q) | |
1682 | { | |
1683 | unsigned long flags; | |
60187d27 ON |
1684 | spinlock_t *lock = ¤t->sighand->siglock; |
1685 | ||
1da177e4 LT |
1686 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1687 | /* | |
c8e85b4f ON |
1688 | * We must hold ->siglock while testing q->list |
1689 | * to serialize with collect_signal() or with | |
da7978b0 | 1690 | * __exit_signal()->flush_sigqueue(). |
1da177e4 | 1691 | */ |
60187d27 | 1692 | spin_lock_irqsave(lock, flags); |
c8e85b4f ON |
1693 | q->flags &= ~SIGQUEUE_PREALLOC; |
1694 | /* | |
1695 | * If it is queued it will be freed when dequeued, | |
1696 | * like the "regular" sigqueue. | |
1697 | */ | |
60187d27 | 1698 | if (!list_empty(&q->list)) |
c8e85b4f | 1699 | q = NULL; |
60187d27 ON |
1700 | spin_unlock_irqrestore(lock, flags); |
1701 | ||
c8e85b4f ON |
1702 | if (q) |
1703 | __sigqueue_free(q); | |
1da177e4 LT |
1704 | } |
1705 | ||
24122c7f | 1706 | int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) |
9e3bd6c3 | 1707 | { |
e62e6650 | 1708 | int sig = q->info.si_signo; |
2ca3515a | 1709 | struct sigpending *pending; |
24122c7f | 1710 | struct task_struct *t; |
e62e6650 | 1711 | unsigned long flags; |
163566f6 | 1712 | int ret, result; |
2ca3515a | 1713 | |
4cd4b6d4 | 1714 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e62e6650 ON |
1715 | |
1716 | ret = -1; | |
24122c7f EB |
1717 | rcu_read_lock(); |
1718 | t = pid_task(pid, type); | |
1719 | if (!t || !likely(lock_task_sighand(t, &flags))) | |
e62e6650 ON |
1720 | goto ret; |
1721 | ||
7e695a5e | 1722 | ret = 1; /* the signal is ignored */ |
163566f6 | 1723 | result = TRACE_SIGNAL_IGNORED; |
def8cf72 | 1724 | if (!prepare_signal(sig, t, false)) |
e62e6650 ON |
1725 | goto out; |
1726 | ||
1727 | ret = 0; | |
9e3bd6c3 PE |
1728 | if (unlikely(!list_empty(&q->list))) { |
1729 | /* | |
1730 | * If an SI_TIMER entry is already queue just increment | |
1731 | * the overrun count. | |
1732 | */ | |
9e3bd6c3 PE |
1733 | BUG_ON(q->info.si_code != SI_TIMER); |
1734 | q->info.si_overrun++; | |
163566f6 | 1735 | result = TRACE_SIGNAL_ALREADY_PENDING; |
e62e6650 | 1736 | goto out; |
9e3bd6c3 | 1737 | } |
ba661292 | 1738 | q->info.si_overrun = 0; |
9e3bd6c3 | 1739 | |
9e3bd6c3 | 1740 | signalfd_notify(t, sig); |
24122c7f | 1741 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
9e3bd6c3 PE |
1742 | list_add_tail(&q->list, &pending->list); |
1743 | sigaddset(&pending->signal, sig); | |
07296149 | 1744 | complete_signal(sig, t, type); |
163566f6 | 1745 | result = TRACE_SIGNAL_DELIVERED; |
e62e6650 | 1746 | out: |
24122c7f | 1747 | trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); |
e62e6650 ON |
1748 | unlock_task_sighand(t, &flags); |
1749 | ret: | |
24122c7f | 1750 | rcu_read_unlock(); |
e62e6650 | 1751 | return ret; |
9e3bd6c3 PE |
1752 | } |
1753 | ||
1da177e4 LT |
1754 | /* |
1755 | * Let a parent know about the death of a child. | |
1756 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | |
2b2a1ff6 | 1757 | * |
53c8f9f1 ON |
1758 | * Returns true if our parent ignored us and so we've switched to |
1759 | * self-reaping. | |
1da177e4 | 1760 | */ |
53c8f9f1 | 1761 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1da177e4 LT |
1762 | { |
1763 | struct siginfo info; | |
1764 | unsigned long flags; | |
1765 | struct sighand_struct *psig; | |
53c8f9f1 | 1766 | bool autoreap = false; |
bde8285e | 1767 | u64 utime, stime; |
1da177e4 LT |
1768 | |
1769 | BUG_ON(sig == -1); | |
1770 | ||
1771 | /* do_notify_parent_cldstop should have been called instead. */ | |
e1abb39c | 1772 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1da177e4 | 1773 | |
d21142ec | 1774 | BUG_ON(!tsk->ptrace && |
1da177e4 LT |
1775 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1776 | ||
b6e238dc ON |
1777 | if (sig != SIGCHLD) { |
1778 | /* | |
1779 | * This is only possible if parent == real_parent. | |
1780 | * Check if it has changed security domain. | |
1781 | */ | |
1782 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) | |
1783 | sig = SIGCHLD; | |
1784 | } | |
1785 | ||
faf1f22b | 1786 | clear_siginfo(&info); |
1da177e4 LT |
1787 | info.si_signo = sig; |
1788 | info.si_errno = 0; | |
b488893a | 1789 | /* |
32084504 EB |
1790 | * We are under tasklist_lock here so our parent is tied to |
1791 | * us and cannot change. | |
b488893a | 1792 | * |
32084504 EB |
1793 | * task_active_pid_ns will always return the same pid namespace |
1794 | * until a task passes through release_task. | |
b488893a PE |
1795 | * |
1796 | * write_lock() currently calls preempt_disable() which is the | |
1797 | * same as rcu_read_lock(), but according to Oleg, this is not | |
1798 | * correct to rely on this | |
1799 | */ | |
1800 | rcu_read_lock(); | |
32084504 | 1801 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
54ba47ed EB |
1802 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
1803 | task_uid(tsk)); | |
b488893a PE |
1804 | rcu_read_unlock(); |
1805 | ||
bde8285e FW |
1806 | task_cputime(tsk, &utime, &stime); |
1807 | info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); | |
1808 | info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); | |
1da177e4 LT |
1809 | |
1810 | info.si_status = tsk->exit_code & 0x7f; | |
1811 | if (tsk->exit_code & 0x80) | |
1812 | info.si_code = CLD_DUMPED; | |
1813 | else if (tsk->exit_code & 0x7f) | |
1814 | info.si_code = CLD_KILLED; | |
1815 | else { | |
1816 | info.si_code = CLD_EXITED; | |
1817 | info.si_status = tsk->exit_code >> 8; | |
1818 | } | |
1819 | ||
1820 | psig = tsk->parent->sighand; | |
1821 | spin_lock_irqsave(&psig->siglock, flags); | |
d21142ec | 1822 | if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4 LT |
1823 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1824 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | |
1825 | /* | |
1826 | * We are exiting and our parent doesn't care. POSIX.1 | |
1827 | * defines special semantics for setting SIGCHLD to SIG_IGN | |
1828 | * or setting the SA_NOCLDWAIT flag: we should be reaped | |
1829 | * automatically and not left for our parent's wait4 call. | |
1830 | * Rather than having the parent do it as a magic kind of | |
1831 | * signal handler, we just set this to tell do_exit that we | |
1832 | * can be cleaned up without becoming a zombie. Note that | |
1833 | * we still call __wake_up_parent in this case, because a | |
1834 | * blocked sys_wait4 might now return -ECHILD. | |
1835 | * | |
1836 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | |
1837 | * is implementation-defined: we do (if you don't want | |
1838 | * it, just use SIG_IGN instead). | |
1839 | */ | |
53c8f9f1 | 1840 | autoreap = true; |
1da177e4 | 1841 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
53c8f9f1 | 1842 | sig = 0; |
1da177e4 | 1843 | } |
53c8f9f1 | 1844 | if (valid_signal(sig) && sig) |
1da177e4 LT |
1845 | __group_send_sig_info(sig, &info, tsk->parent); |
1846 | __wake_up_parent(tsk, tsk->parent); | |
1847 | spin_unlock_irqrestore(&psig->siglock, flags); | |
2b2a1ff6 | 1848 | |
53c8f9f1 | 1849 | return autoreap; |
1da177e4 LT |
1850 | } |
1851 | ||
75b95953 TH |
1852 | /** |
1853 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | |
1854 | * @tsk: task reporting the state change | |
1855 | * @for_ptracer: the notification is for ptracer | |
1856 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | |
1857 | * | |
1858 | * Notify @tsk's parent that the stopped/continued state has changed. If | |
1859 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | |
1860 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | |
1861 | * | |
1862 | * CONTEXT: | |
1863 | * Must be called with tasklist_lock at least read locked. | |
1864 | */ | |
1865 | static void do_notify_parent_cldstop(struct task_struct *tsk, | |
1866 | bool for_ptracer, int why) | |
1da177e4 LT |
1867 | { |
1868 | struct siginfo info; | |
1869 | unsigned long flags; | |
bc505a47 | 1870 | struct task_struct *parent; |
1da177e4 | 1871 | struct sighand_struct *sighand; |
bde8285e | 1872 | u64 utime, stime; |
1da177e4 | 1873 | |
75b95953 | 1874 | if (for_ptracer) { |
bc505a47 | 1875 | parent = tsk->parent; |
75b95953 | 1876 | } else { |
bc505a47 ON |
1877 | tsk = tsk->group_leader; |
1878 | parent = tsk->real_parent; | |
1879 | } | |
1880 | ||
faf1f22b | 1881 | clear_siginfo(&info); |
1da177e4 LT |
1882 | info.si_signo = SIGCHLD; |
1883 | info.si_errno = 0; | |
b488893a | 1884 | /* |
5aba085e | 1885 | * see comment in do_notify_parent() about the following 4 lines |
b488893a PE |
1886 | */ |
1887 | rcu_read_lock(); | |
17cf22c3 | 1888 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); |
54ba47ed | 1889 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
b488893a PE |
1890 | rcu_read_unlock(); |
1891 | ||
bde8285e FW |
1892 | task_cputime(tsk, &utime, &stime); |
1893 | info.si_utime = nsec_to_clock_t(utime); | |
1894 | info.si_stime = nsec_to_clock_t(stime); | |
1da177e4 LT |
1895 | |
1896 | info.si_code = why; | |
1897 | switch (why) { | |
1898 | case CLD_CONTINUED: | |
1899 | info.si_status = SIGCONT; | |
1900 | break; | |
1901 | case CLD_STOPPED: | |
1902 | info.si_status = tsk->signal->group_exit_code & 0x7f; | |
1903 | break; | |
1904 | case CLD_TRAPPED: | |
1905 | info.si_status = tsk->exit_code & 0x7f; | |
1906 | break; | |
1907 | default: | |
1908 | BUG(); | |
1909 | } | |
1910 | ||
1911 | sighand = parent->sighand; | |
1912 | spin_lock_irqsave(&sighand->siglock, flags); | |
1913 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | |
1914 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | |
1915 | __group_send_sig_info(SIGCHLD, &info, parent); | |
1916 | /* | |
1917 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | |
1918 | */ | |
1919 | __wake_up_parent(tsk, parent); | |
1920 | spin_unlock_irqrestore(&sighand->siglock, flags); | |
1921 | } | |
1922 | ||
6527de95 | 1923 | static inline bool may_ptrace_stop(void) |
d5f70c00 | 1924 | { |
d21142ec | 1925 | if (!likely(current->ptrace)) |
6527de95 | 1926 | return false; |
d5f70c00 ON |
1927 | /* |
1928 | * Are we in the middle of do_coredump? | |
1929 | * If so and our tracer is also part of the coredump stopping | |
1930 | * is a deadlock situation, and pointless because our tracer | |
1931 | * is dead so don't allow us to stop. | |
1932 | * If SIGKILL was already sent before the caller unlocked | |
999d9fc1 | 1933 | * ->siglock we must see ->core_state != NULL. Otherwise it |
d5f70c00 | 1934 | * is safe to enter schedule(). |
9899d11f ON |
1935 | * |
1936 | * This is almost outdated, a task with the pending SIGKILL can't | |
1937 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported | |
1938 | * after SIGKILL was already dequeued. | |
d5f70c00 | 1939 | */ |
999d9fc1 | 1940 | if (unlikely(current->mm->core_state) && |
d5f70c00 | 1941 | unlikely(current->mm == current->parent->mm)) |
6527de95 | 1942 | return false; |
d5f70c00 | 1943 | |
6527de95 | 1944 | return true; |
d5f70c00 ON |
1945 | } |
1946 | ||
1a669c2f | 1947 | /* |
5aba085e | 1948 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1a669c2f RM |
1949 | * Called with the siglock held. |
1950 | */ | |
f99e9d8c | 1951 | static bool sigkill_pending(struct task_struct *tsk) |
1a669c2f | 1952 | { |
f99e9d8c CB |
1953 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1954 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); | |
1a669c2f RM |
1955 | } |
1956 | ||
1da177e4 LT |
1957 | /* |
1958 | * This must be called with current->sighand->siglock held. | |
1959 | * | |
1960 | * This should be the path for all ptrace stops. | |
1961 | * We always set current->last_siginfo while stopped here. | |
1962 | * That makes it a way to test a stopped process for | |
1963 | * being ptrace-stopped vs being job-control-stopped. | |
1964 | * | |
20686a30 ON |
1965 | * If we actually decide not to stop at all because the tracer |
1966 | * is gone, we keep current->exit_code unless clear_code. | |
1da177e4 | 1967 | */ |
fe1bc6a0 | 1968 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
b8401150 NK |
1969 | __releases(¤t->sighand->siglock) |
1970 | __acquires(¤t->sighand->siglock) | |
1da177e4 | 1971 | { |
ceb6bd67 TH |
1972 | bool gstop_done = false; |
1973 | ||
1a669c2f RM |
1974 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1975 | /* | |
1976 | * The arch code has something special to do before a | |
1977 | * ptrace stop. This is allowed to block, e.g. for faults | |
1978 | * on user stack pages. We can't keep the siglock while | |
1979 | * calling arch_ptrace_stop, so we must release it now. | |
1980 | * To preserve proper semantics, we must do this before | |
1981 | * any signal bookkeeping like checking group_stop_count. | |
1982 | * Meanwhile, a SIGKILL could come in before we retake the | |
1983 | * siglock. That must prevent us from sleeping in TASK_TRACED. | |
1984 | * So after regaining the lock, we must check for SIGKILL. | |
1985 | */ | |
1986 | spin_unlock_irq(¤t->sighand->siglock); | |
1987 | arch_ptrace_stop(exit_code, info); | |
1988 | spin_lock_irq(¤t->sighand->siglock); | |
3d749b9e ON |
1989 | if (sigkill_pending(current)) |
1990 | return; | |
1a669c2f RM |
1991 | } |
1992 | ||
b5bf9a90 PZ |
1993 | set_special_state(TASK_TRACED); |
1994 | ||
1da177e4 | 1995 | /* |
81be24b8 TH |
1996 | * We're committing to trapping. TRACED should be visible before |
1997 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | |
1998 | * Also, transition to TRACED and updates to ->jobctl should be | |
1999 | * atomic with respect to siglock and should be done after the arch | |
2000 | * hook as siglock is released and regrabbed across it. | |
b5bf9a90 PZ |
2001 | * |
2002 | * TRACER TRACEE | |
2003 | * | |
2004 | * ptrace_attach() | |
2005 | * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) | |
2006 | * do_wait() | |
2007 | * set_current_state() smp_wmb(); | |
2008 | * ptrace_do_wait() | |
2009 | * wait_task_stopped() | |
2010 | * task_stopped_code() | |
2011 | * [L] task_is_traced() [S] task_clear_jobctl_trapping(); | |
1da177e4 | 2012 | */ |
b5bf9a90 | 2013 | smp_wmb(); |
1da177e4 LT |
2014 | |
2015 | current->last_siginfo = info; | |
2016 | current->exit_code = exit_code; | |
2017 | ||
d79fdd6d | 2018 | /* |
0ae8ce1c TH |
2019 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
2020 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered | |
73ddff2b TH |
2021 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
2022 | * could be clear now. We act as if SIGCONT is received after | |
2023 | * TASK_TRACED is entered - ignore it. | |
d79fdd6d | 2024 | */ |
a8f072c1 | 2025 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
ceb6bd67 | 2026 | gstop_done = task_participate_group_stop(current); |
d79fdd6d | 2027 | |
fb1d910c | 2028 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
73ddff2b | 2029 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
fb1d910c TH |
2030 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
2031 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); | |
73ddff2b | 2032 | |
81be24b8 | 2033 | /* entering a trap, clear TRAPPING */ |
a8f072c1 | 2034 | task_clear_jobctl_trapping(current); |
d79fdd6d | 2035 | |
1da177e4 LT |
2036 | spin_unlock_irq(¤t->sighand->siglock); |
2037 | read_lock(&tasklist_lock); | |
3d749b9e | 2038 | if (may_ptrace_stop()) { |
ceb6bd67 TH |
2039 | /* |
2040 | * Notify parents of the stop. | |
2041 | * | |
2042 | * While ptraced, there are two parents - the ptracer and | |
2043 | * the real_parent of the group_leader. The ptracer should | |
2044 | * know about every stop while the real parent is only | |
2045 | * interested in the completion of group stop. The states | |
2046 | * for the two don't interact with each other. Notify | |
2047 | * separately unless they're gonna be duplicates. | |
2048 | */ | |
2049 | do_notify_parent_cldstop(current, true, why); | |
bb3696da | 2050 | if (gstop_done && ptrace_reparented(current)) |
ceb6bd67 TH |
2051 | do_notify_parent_cldstop(current, false, why); |
2052 | ||
53da1d94 MS |
2053 | /* |
2054 | * Don't want to allow preemption here, because | |
2055 | * sys_ptrace() needs this task to be inactive. | |
2056 | * | |
2057 | * XXX: implement read_unlock_no_resched(). | |
2058 | */ | |
2059 | preempt_disable(); | |
1da177e4 | 2060 | read_unlock(&tasklist_lock); |
53da1d94 | 2061 | preempt_enable_no_resched(); |
5d8f72b5 | 2062 | freezable_schedule(); |
1da177e4 LT |
2063 | } else { |
2064 | /* | |
2065 | * By the time we got the lock, our tracer went away. | |
6405f7f4 | 2066 | * Don't drop the lock yet, another tracer may come. |
ceb6bd67 TH |
2067 | * |
2068 | * If @gstop_done, the ptracer went away between group stop | |
2069 | * completion and here. During detach, it would have set | |
a8f072c1 TH |
2070 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
2071 | * TASK_STOPPED in do_signal_stop() on return, so notifying | |
2072 | * the real parent of the group stop completion is enough. | |
1da177e4 | 2073 | */ |
ceb6bd67 TH |
2074 | if (gstop_done) |
2075 | do_notify_parent_cldstop(current, false, why); | |
2076 | ||
9899d11f | 2077 | /* tasklist protects us from ptrace_freeze_traced() */ |
6405f7f4 | 2078 | __set_current_state(TASK_RUNNING); |
20686a30 ON |
2079 | if (clear_code) |
2080 | current->exit_code = 0; | |
6405f7f4 | 2081 | read_unlock(&tasklist_lock); |
1da177e4 LT |
2082 | } |
2083 | ||
2084 | /* | |
2085 | * We are back. Now reacquire the siglock before touching | |
2086 | * last_siginfo, so that we are sure to have synchronized with | |
2087 | * any signal-sending on another CPU that wants to examine it. | |
2088 | */ | |
2089 | spin_lock_irq(¤t->sighand->siglock); | |
2090 | current->last_siginfo = NULL; | |
2091 | ||
544b2c91 TH |
2092 | /* LISTENING can be set only during STOP traps, clear it */ |
2093 | current->jobctl &= ~JOBCTL_LISTENING; | |
2094 | ||
1da177e4 LT |
2095 | /* |
2096 | * Queued signals ignored us while we were stopped for tracing. | |
2097 | * So check for any that we should take before resuming user mode. | |
b74d0deb | 2098 | * This sets TIF_SIGPENDING, but never clears it. |
1da177e4 | 2099 | */ |
b74d0deb | 2100 | recalc_sigpending_tsk(current); |
1da177e4 LT |
2101 | } |
2102 | ||
3544d72a | 2103 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1da177e4 LT |
2104 | { |
2105 | siginfo_t info; | |
2106 | ||
faf1f22b | 2107 | clear_siginfo(&info); |
3544d72a | 2108 | info.si_signo = signr; |
1da177e4 | 2109 | info.si_code = exit_code; |
b488893a | 2110 | info.si_pid = task_pid_vnr(current); |
078de5f7 | 2111 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4 LT |
2112 | |
2113 | /* Let the debugger run. */ | |
3544d72a TH |
2114 | ptrace_stop(exit_code, why, 1, &info); |
2115 | } | |
2116 | ||
2117 | void ptrace_notify(int exit_code) | |
2118 | { | |
2119 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | |
f784e8a7 ON |
2120 | if (unlikely(current->task_works)) |
2121 | task_work_run(); | |
3544d72a | 2122 | |
1da177e4 | 2123 | spin_lock_irq(¤t->sighand->siglock); |
3544d72a | 2124 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1da177e4 LT |
2125 | spin_unlock_irq(¤t->sighand->siglock); |
2126 | } | |
2127 | ||
73ddff2b TH |
2128 | /** |
2129 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals | |
2130 | * @signr: signr causing group stop if initiating | |
2131 | * | |
2132 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr | |
2133 | * and participate in it. If already set, participate in the existing | |
2134 | * group stop. If participated in a group stop (and thus slept), %true is | |
2135 | * returned with siglock released. | |
2136 | * | |
2137 | * If ptraced, this function doesn't handle stop itself. Instead, | |
2138 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock | |
2139 | * untouched. The caller must ensure that INTERRUPT trap handling takes | |
2140 | * places afterwards. | |
2141 | * | |
2142 | * CONTEXT: | |
2143 | * Must be called with @current->sighand->siglock held, which is released | |
2144 | * on %true return. | |
2145 | * | |
2146 | * RETURNS: | |
2147 | * %false if group stop is already cancelled or ptrace trap is scheduled. | |
2148 | * %true if participated in group stop. | |
1da177e4 | 2149 | */ |
73ddff2b TH |
2150 | static bool do_signal_stop(int signr) |
2151 | __releases(¤t->sighand->siglock) | |
1da177e4 LT |
2152 | { |
2153 | struct signal_struct *sig = current->signal; | |
1da177e4 | 2154 | |
a8f072c1 | 2155 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
b76808e6 | 2156 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
f558b7e4 ON |
2157 | struct task_struct *t; |
2158 | ||
a8f072c1 TH |
2159 | /* signr will be recorded in task->jobctl for retries */ |
2160 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); | |
d79fdd6d | 2161 | |
a8f072c1 | 2162 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
573cf9ad | 2163 | unlikely(signal_group_exit(sig))) |
73ddff2b | 2164 | return false; |
1da177e4 | 2165 | /* |
408a37de TH |
2166 | * There is no group stop already in progress. We must |
2167 | * initiate one now. | |
2168 | * | |
2169 | * While ptraced, a task may be resumed while group stop is | |
2170 | * still in effect and then receive a stop signal and | |
2171 | * initiate another group stop. This deviates from the | |
2172 | * usual behavior as two consecutive stop signals can't | |
780006ea ON |
2173 | * cause two group stops when !ptraced. That is why we |
2174 | * also check !task_is_stopped(t) below. | |
408a37de TH |
2175 | * |
2176 | * The condition can be distinguished by testing whether | |
2177 | * SIGNAL_STOP_STOPPED is already set. Don't generate | |
2178 | * group_exit_code in such case. | |
2179 | * | |
2180 | * This is not necessary for SIGNAL_STOP_CONTINUED because | |
2181 | * an intervening stop signal is required to cause two | |
2182 | * continued events regardless of ptrace. | |
1da177e4 | 2183 | */ |
408a37de TH |
2184 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2185 | sig->group_exit_code = signr; | |
1da177e4 | 2186 | |
7dd3db54 TH |
2187 | sig->group_stop_count = 0; |
2188 | ||
2189 | if (task_set_jobctl_pending(current, signr | gstop)) | |
2190 | sig->group_stop_count++; | |
1da177e4 | 2191 | |
8d38f203 ON |
2192 | t = current; |
2193 | while_each_thread(current, t) { | |
1da177e4 | 2194 | /* |
a122b341 ON |
2195 | * Setting state to TASK_STOPPED for a group |
2196 | * stop is always done with the siglock held, | |
2197 | * so this check has no races. | |
1da177e4 | 2198 | */ |
7dd3db54 TH |
2199 | if (!task_is_stopped(t) && |
2200 | task_set_jobctl_pending(t, signr | gstop)) { | |
ae6d2ed7 | 2201 | sig->group_stop_count++; |
fb1d910c TH |
2202 | if (likely(!(t->ptrace & PT_SEIZED))) |
2203 | signal_wake_up(t, 0); | |
2204 | else | |
2205 | ptrace_trap_notify(t); | |
a122b341 | 2206 | } |
d79fdd6d | 2207 | } |
1da177e4 | 2208 | } |
73ddff2b | 2209 | |
d21142ec | 2210 | if (likely(!current->ptrace)) { |
5224fa36 | 2211 | int notify = 0; |
1da177e4 | 2212 | |
5224fa36 TH |
2213 | /* |
2214 | * If there are no other threads in the group, or if there | |
2215 | * is a group stop in progress and we are the last to stop, | |
2216 | * report to the parent. | |
2217 | */ | |
2218 | if (task_participate_group_stop(current)) | |
2219 | notify = CLD_STOPPED; | |
2220 | ||
b5bf9a90 | 2221 | set_special_state(TASK_STOPPED); |
5224fa36 TH |
2222 | spin_unlock_irq(¤t->sighand->siglock); |
2223 | ||
62bcf9d9 TH |
2224 | /* |
2225 | * Notify the parent of the group stop completion. Because | |
2226 | * we're not holding either the siglock or tasklist_lock | |
2227 | * here, ptracer may attach inbetween; however, this is for | |
2228 | * group stop and should always be delivered to the real | |
2229 | * parent of the group leader. The new ptracer will get | |
2230 | * its notification when this task transitions into | |
2231 | * TASK_TRACED. | |
2232 | */ | |
5224fa36 TH |
2233 | if (notify) { |
2234 | read_lock(&tasklist_lock); | |
62bcf9d9 | 2235 | do_notify_parent_cldstop(current, false, notify); |
5224fa36 TH |
2236 | read_unlock(&tasklist_lock); |
2237 | } | |
2238 | ||
2239 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
5d8f72b5 | 2240 | freezable_schedule(); |
73ddff2b | 2241 | return true; |
d79fdd6d | 2242 | } else { |
73ddff2b TH |
2243 | /* |
2244 | * While ptraced, group stop is handled by STOP trap. | |
2245 | * Schedule it and let the caller deal with it. | |
2246 | */ | |
2247 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); | |
2248 | return false; | |
ae6d2ed7 | 2249 | } |
73ddff2b | 2250 | } |
1da177e4 | 2251 | |
73ddff2b TH |
2252 | /** |
2253 | * do_jobctl_trap - take care of ptrace jobctl traps | |
2254 | * | |
3544d72a TH |
2255 | * When PT_SEIZED, it's used for both group stop and explicit |
2256 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with | |
2257 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain | |
2258 | * the stop signal; otherwise, %SIGTRAP. | |
2259 | * | |
2260 | * When !PT_SEIZED, it's used only for group stop trap with stop signal | |
2261 | * number as exit_code and no siginfo. | |
73ddff2b TH |
2262 | * |
2263 | * CONTEXT: | |
2264 | * Must be called with @current->sighand->siglock held, which may be | |
2265 | * released and re-acquired before returning with intervening sleep. | |
2266 | */ | |
2267 | static void do_jobctl_trap(void) | |
2268 | { | |
3544d72a | 2269 | struct signal_struct *signal = current->signal; |
73ddff2b | 2270 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
ae6d2ed7 | 2271 | |
3544d72a TH |
2272 | if (current->ptrace & PT_SEIZED) { |
2273 | if (!signal->group_stop_count && | |
2274 | !(signal->flags & SIGNAL_STOP_STOPPED)) | |
2275 | signr = SIGTRAP; | |
2276 | WARN_ON_ONCE(!signr); | |
2277 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), | |
2278 | CLD_STOPPED); | |
2279 | } else { | |
2280 | WARN_ON_ONCE(!signr); | |
2281 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); | |
2282 | current->exit_code = 0; | |
ae6d2ed7 | 2283 | } |
1da177e4 LT |
2284 | } |
2285 | ||
94eb22d5 | 2286 | static int ptrace_signal(int signr, siginfo_t *info) |
18c98b65 | 2287 | { |
8a352418 ON |
2288 | /* |
2289 | * We do not check sig_kernel_stop(signr) but set this marker | |
2290 | * unconditionally because we do not know whether debugger will | |
2291 | * change signr. This flag has no meaning unless we are going | |
2292 | * to stop after return from ptrace_stop(). In this case it will | |
2293 | * be checked in do_signal_stop(), we should only stop if it was | |
2294 | * not cleared by SIGCONT while we were sleeping. See also the | |
2295 | * comment in dequeue_signal(). | |
2296 | */ | |
2297 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | |
fe1bc6a0 | 2298 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
18c98b65 RM |
2299 | |
2300 | /* We're back. Did the debugger cancel the sig? */ | |
2301 | signr = current->exit_code; | |
2302 | if (signr == 0) | |
2303 | return signr; | |
2304 | ||
2305 | current->exit_code = 0; | |
2306 | ||
5aba085e RD |
2307 | /* |
2308 | * Update the siginfo structure if the signal has | |
2309 | * changed. If the debugger wanted something | |
2310 | * specific in the siginfo structure then it should | |
2311 | * have updated *info via PTRACE_SETSIGINFO. | |
2312 | */ | |
18c98b65 | 2313 | if (signr != info->si_signo) { |
faf1f22b | 2314 | clear_siginfo(info); |
18c98b65 RM |
2315 | info->si_signo = signr; |
2316 | info->si_errno = 0; | |
2317 | info->si_code = SI_USER; | |
6b550f94 | 2318 | rcu_read_lock(); |
18c98b65 | 2319 | info->si_pid = task_pid_vnr(current->parent); |
54ba47ed EB |
2320 | info->si_uid = from_kuid_munged(current_user_ns(), |
2321 | task_uid(current->parent)); | |
6b550f94 | 2322 | rcu_read_unlock(); |
18c98b65 RM |
2323 | } |
2324 | ||
2325 | /* If the (new) signal is now blocked, requeue it. */ | |
2326 | if (sigismember(¤t->blocked, signr)) { | |
b21c5bd5 | 2327 | send_signal(signr, info, current, PIDTYPE_PID); |
18c98b65 RM |
2328 | signr = 0; |
2329 | } | |
2330 | ||
2331 | return signr; | |
2332 | } | |
2333 | ||
20ab7218 | 2334 | bool get_signal(struct ksignal *ksig) |
1da177e4 | 2335 | { |
f6b76d4f ON |
2336 | struct sighand_struct *sighand = current->sighand; |
2337 | struct signal_struct *signal = current->signal; | |
2338 | int signr; | |
1da177e4 | 2339 | |
f784e8a7 ON |
2340 | if (unlikely(current->task_works)) |
2341 | task_work_run(); | |
72667028 | 2342 | |
0326f5a9 | 2343 | if (unlikely(uprobe_deny_signal())) |
20ab7218 | 2344 | return false; |
0326f5a9 | 2345 | |
13b1c3d4 | 2346 | /* |
5d8f72b5 ON |
2347 | * Do this once, we can't return to user-mode if freezing() == T. |
2348 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and | |
2349 | * thus do not need another check after return. | |
13b1c3d4 | 2350 | */ |
fc558a74 RW |
2351 | try_to_freeze(); |
2352 | ||
5d8f72b5 | 2353 | relock: |
f6b76d4f | 2354 | spin_lock_irq(&sighand->siglock); |
021e1ae3 ON |
2355 | /* |
2356 | * Every stopped thread goes here after wakeup. Check to see if | |
2357 | * we should notify the parent, prepare_signal(SIGCONT) encodes | |
2358 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | |
2359 | */ | |
f6b76d4f | 2360 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
c672af35 TH |
2361 | int why; |
2362 | ||
2363 | if (signal->flags & SIGNAL_CLD_CONTINUED) | |
2364 | why = CLD_CONTINUED; | |
2365 | else | |
2366 | why = CLD_STOPPED; | |
2367 | ||
f6b76d4f | 2368 | signal->flags &= ~SIGNAL_CLD_MASK; |
e4420551 | 2369 | |
ae6d2ed7 | 2370 | spin_unlock_irq(&sighand->siglock); |
fa00b80b | 2371 | |
ceb6bd67 TH |
2372 | /* |
2373 | * Notify the parent that we're continuing. This event is | |
2374 | * always per-process and doesn't make whole lot of sense | |
2375 | * for ptracers, who shouldn't consume the state via | |
2376 | * wait(2) either, but, for backward compatibility, notify | |
2377 | * the ptracer of the group leader too unless it's gonna be | |
2378 | * a duplicate. | |
2379 | */ | |
edf2ed15 | 2380 | read_lock(&tasklist_lock); |
ceb6bd67 TH |
2381 | do_notify_parent_cldstop(current, false, why); |
2382 | ||
bb3696da ON |
2383 | if (ptrace_reparented(current->group_leader)) |
2384 | do_notify_parent_cldstop(current->group_leader, | |
2385 | true, why); | |
edf2ed15 | 2386 | read_unlock(&tasklist_lock); |
ceb6bd67 | 2387 | |
e4420551 ON |
2388 | goto relock; |
2389 | } | |
2390 | ||
1da177e4 LT |
2391 | for (;;) { |
2392 | struct k_sigaction *ka; | |
1be53963 | 2393 | |
dd1d6772 TH |
2394 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2395 | do_signal_stop(0)) | |
7bcf6a2c | 2396 | goto relock; |
1be53963 | 2397 | |
73ddff2b TH |
2398 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { |
2399 | do_jobctl_trap(); | |
2400 | spin_unlock_irq(&sighand->siglock); | |
2401 | goto relock; | |
2402 | } | |
1da177e4 | 2403 | |
828b1f65 | 2404 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); |
7bcf6a2c | 2405 | |
dd1d6772 TH |
2406 | if (!signr) |
2407 | break; /* will return 0 */ | |
7bcf6a2c | 2408 | |
8a352418 | 2409 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
828b1f65 | 2410 | signr = ptrace_signal(signr, &ksig->info); |
dd1d6772 TH |
2411 | if (!signr) |
2412 | continue; | |
1da177e4 LT |
2413 | } |
2414 | ||
dd1d6772 TH |
2415 | ka = &sighand->action[signr-1]; |
2416 | ||
f9d4257e | 2417 | /* Trace actually delivered signals. */ |
828b1f65 | 2418 | trace_signal_deliver(signr, &ksig->info, ka); |
f9d4257e | 2419 | |
1da177e4 LT |
2420 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2421 | continue; | |
2422 | if (ka->sa.sa_handler != SIG_DFL) { | |
2423 | /* Run the handler. */ | |
828b1f65 | 2424 | ksig->ka = *ka; |
1da177e4 LT |
2425 | |
2426 | if (ka->sa.sa_flags & SA_ONESHOT) | |
2427 | ka->sa.sa_handler = SIG_DFL; | |
2428 | ||
2429 | break; /* will return non-zero "signr" value */ | |
2430 | } | |
2431 | ||
2432 | /* | |
2433 | * Now we are doing the default action for this signal. | |
2434 | */ | |
2435 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | |
2436 | continue; | |
2437 | ||
84d73786 | 2438 | /* |
0fbc26a6 | 2439 | * Global init gets no signals it doesn't want. |
b3bfa0cb SB |
2440 | * Container-init gets no signals it doesn't want from same |
2441 | * container. | |
2442 | * | |
2443 | * Note that if global/container-init sees a sig_kernel_only() | |
2444 | * signal here, the signal must have been generated internally | |
2445 | * or must have come from an ancestor namespace. In either | |
2446 | * case, the signal cannot be dropped. | |
84d73786 | 2447 | */ |
fae5fa44 | 2448 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
b3bfa0cb | 2449 | !sig_kernel_only(signr)) |
1da177e4 LT |
2450 | continue; |
2451 | ||
2452 | if (sig_kernel_stop(signr)) { | |
2453 | /* | |
2454 | * The default action is to stop all threads in | |
2455 | * the thread group. The job control signals | |
2456 | * do nothing in an orphaned pgrp, but SIGSTOP | |
2457 | * always works. Note that siglock needs to be | |
2458 | * dropped during the call to is_orphaned_pgrp() | |
2459 | * because of lock ordering with tasklist_lock. | |
2460 | * This allows an intervening SIGCONT to be posted. | |
2461 | * We need to check for that and bail out if necessary. | |
2462 | */ | |
2463 | if (signr != SIGSTOP) { | |
f6b76d4f | 2464 | spin_unlock_irq(&sighand->siglock); |
1da177e4 LT |
2465 | |
2466 | /* signals can be posted during this window */ | |
2467 | ||
3e7cd6c4 | 2468 | if (is_current_pgrp_orphaned()) |
1da177e4 LT |
2469 | goto relock; |
2470 | ||
f6b76d4f | 2471 | spin_lock_irq(&sighand->siglock); |
1da177e4 LT |
2472 | } |
2473 | ||
828b1f65 | 2474 | if (likely(do_signal_stop(ksig->info.si_signo))) { |
1da177e4 LT |
2475 | /* It released the siglock. */ |
2476 | goto relock; | |
2477 | } | |
2478 | ||
2479 | /* | |
2480 | * We didn't actually stop, due to a race | |
2481 | * with SIGCONT or something like that. | |
2482 | */ | |
2483 | continue; | |
2484 | } | |
2485 | ||
f6b76d4f | 2486 | spin_unlock_irq(&sighand->siglock); |
1da177e4 LT |
2487 | |
2488 | /* | |
2489 | * Anything else is fatal, maybe with a core dump. | |
2490 | */ | |
2491 | current->flags |= PF_SIGNALED; | |
2dce81bf | 2492 | |
1da177e4 | 2493 | if (sig_kernel_coredump(signr)) { |
2dce81bf | 2494 | if (print_fatal_signals) |
828b1f65 | 2495 | print_fatal_signal(ksig->info.si_signo); |
2b5faa4c | 2496 | proc_coredump_connector(current); |
1da177e4 LT |
2497 | /* |
2498 | * If it was able to dump core, this kills all | |
2499 | * other threads in the group and synchronizes with | |
2500 | * their demise. If we lost the race with another | |
2501 | * thread getting here, it set group_exit_code | |
2502 | * first and our do_group_exit call below will use | |
2503 | * that value and ignore the one we pass it. | |
2504 | */ | |
828b1f65 | 2505 | do_coredump(&ksig->info); |
1da177e4 LT |
2506 | } |
2507 | ||
2508 | /* | |
2509 | * Death signals, no core dump. | |
2510 | */ | |
828b1f65 | 2511 | do_group_exit(ksig->info.si_signo); |
1da177e4 LT |
2512 | /* NOTREACHED */ |
2513 | } | |
f6b76d4f | 2514 | spin_unlock_irq(&sighand->siglock); |
828b1f65 RW |
2515 | |
2516 | ksig->sig = signr; | |
2517 | return ksig->sig > 0; | |
1da177e4 LT |
2518 | } |
2519 | ||
5e6292c0 | 2520 | /** |
efee984c | 2521 | * signal_delivered - |
10b1c7ac | 2522 | * @ksig: kernel signal struct |
efee984c | 2523 | * @stepping: nonzero if debugger single-step or block-step in use |
5e6292c0 | 2524 | * |
e227867f | 2525 | * This function should be called when a signal has successfully been |
10b1c7ac | 2526 | * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
efee984c | 2527 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
10b1c7ac | 2528 | * is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
5e6292c0 | 2529 | */ |
10b1c7ac | 2530 | static void signal_delivered(struct ksignal *ksig, int stepping) |
5e6292c0 MF |
2531 | { |
2532 | sigset_t blocked; | |
2533 | ||
a610d6e6 AV |
2534 | /* A signal was successfully delivered, and the |
2535 | saved sigmask was stored on the signal frame, | |
2536 | and will be restored by sigreturn. So we can | |
2537 | simply clear the restore sigmask flag. */ | |
2538 | clear_restore_sigmask(); | |
2539 | ||
10b1c7ac RW |
2540 | sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); |
2541 | if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) | |
2542 | sigaddset(&blocked, ksig->sig); | |
5e6292c0 | 2543 | set_current_blocked(&blocked); |
df5601f9 | 2544 | tracehook_signal_handler(stepping); |
5e6292c0 MF |
2545 | } |
2546 | ||
2ce5da17 AV |
2547 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
2548 | { | |
2549 | if (failed) | |
2550 | force_sigsegv(ksig->sig, current); | |
2551 | else | |
10b1c7ac | 2552 | signal_delivered(ksig, stepping); |
2ce5da17 AV |
2553 | } |
2554 | ||
0edceb7b ON |
2555 | /* |
2556 | * It could be that complete_signal() picked us to notify about the | |
fec9993d ON |
2557 | * group-wide signal. Other threads should be notified now to take |
2558 | * the shared signals in @which since we will not. | |
0edceb7b | 2559 | */ |
f646e227 | 2560 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
0edceb7b | 2561 | { |
f646e227 | 2562 | sigset_t retarget; |
0edceb7b ON |
2563 | struct task_struct *t; |
2564 | ||
f646e227 ON |
2565 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2566 | if (sigisemptyset(&retarget)) | |
2567 | return; | |
2568 | ||
0edceb7b ON |
2569 | t = tsk; |
2570 | while_each_thread(tsk, t) { | |
fec9993d ON |
2571 | if (t->flags & PF_EXITING) |
2572 | continue; | |
2573 | ||
2574 | if (!has_pending_signals(&retarget, &t->blocked)) | |
2575 | continue; | |
2576 | /* Remove the signals this thread can handle. */ | |
2577 | sigandsets(&retarget, &retarget, &t->blocked); | |
2578 | ||
2579 | if (!signal_pending(t)) | |
2580 | signal_wake_up(t, 0); | |
2581 | ||
2582 | if (sigisemptyset(&retarget)) | |
2583 | break; | |
0edceb7b ON |
2584 | } |
2585 | } | |
2586 | ||
d12619b5 ON |
2587 | void exit_signals(struct task_struct *tsk) |
2588 | { | |
2589 | int group_stop = 0; | |
f646e227 | 2590 | sigset_t unblocked; |
d12619b5 | 2591 | |
77e4ef99 TH |
2592 | /* |
2593 | * @tsk is about to have PF_EXITING set - lock out users which | |
2594 | * expect stable threadgroup. | |
2595 | */ | |
780de9dd | 2596 | cgroup_threadgroup_change_begin(tsk); |
77e4ef99 | 2597 | |
5dee1707 ON |
2598 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2599 | tsk->flags |= PF_EXITING; | |
780de9dd | 2600 | cgroup_threadgroup_change_end(tsk); |
5dee1707 | 2601 | return; |
d12619b5 ON |
2602 | } |
2603 | ||
5dee1707 | 2604 | spin_lock_irq(&tsk->sighand->siglock); |
d12619b5 ON |
2605 | /* |
2606 | * From now this task is not visible for group-wide signals, | |
2607 | * see wants_signal(), do_signal_stop(). | |
2608 | */ | |
2609 | tsk->flags |= PF_EXITING; | |
77e4ef99 | 2610 | |
780de9dd | 2611 | cgroup_threadgroup_change_end(tsk); |
77e4ef99 | 2612 | |
5dee1707 ON |
2613 | if (!signal_pending(tsk)) |
2614 | goto out; | |
2615 | ||
f646e227 ON |
2616 | unblocked = tsk->blocked; |
2617 | signotset(&unblocked); | |
2618 | retarget_shared_pending(tsk, &unblocked); | |
5dee1707 | 2619 | |
a8f072c1 | 2620 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
e5c1902e | 2621 | task_participate_group_stop(tsk)) |
edf2ed15 | 2622 | group_stop = CLD_STOPPED; |
5dee1707 | 2623 | out: |
d12619b5 ON |
2624 | spin_unlock_irq(&tsk->sighand->siglock); |
2625 | ||
62bcf9d9 TH |
2626 | /* |
2627 | * If group stop has completed, deliver the notification. This | |
2628 | * should always go to the real parent of the group leader. | |
2629 | */ | |
ae6d2ed7 | 2630 | if (unlikely(group_stop)) { |
d12619b5 | 2631 | read_lock(&tasklist_lock); |
62bcf9d9 | 2632 | do_notify_parent_cldstop(tsk, false, group_stop); |
d12619b5 ON |
2633 | read_unlock(&tasklist_lock); |
2634 | } | |
2635 | } | |
2636 | ||
1da177e4 LT |
2637 | EXPORT_SYMBOL(recalc_sigpending); |
2638 | EXPORT_SYMBOL_GPL(dequeue_signal); | |
2639 | EXPORT_SYMBOL(flush_signals); | |
2640 | EXPORT_SYMBOL(force_sig); | |
1da177e4 LT |
2641 | EXPORT_SYMBOL(send_sig); |
2642 | EXPORT_SYMBOL(send_sig_info); | |
2643 | EXPORT_SYMBOL(sigprocmask); | |
1da177e4 LT |
2644 | |
2645 | /* | |
2646 | * System call entry points. | |
2647 | */ | |
2648 | ||
41c57892 RD |
2649 | /** |
2650 | * sys_restart_syscall - restart a system call | |
2651 | */ | |
754fe8d2 | 2652 | SYSCALL_DEFINE0(restart_syscall) |
1da177e4 | 2653 | { |
f56141e3 | 2654 | struct restart_block *restart = ¤t->restart_block; |
1da177e4 LT |
2655 | return restart->fn(restart); |
2656 | } | |
2657 | ||
2658 | long do_no_restart_syscall(struct restart_block *param) | |
2659 | { | |
2660 | return -EINTR; | |
2661 | } | |
2662 | ||
b182801a ON |
2663 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2664 | { | |
2665 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { | |
2666 | sigset_t newblocked; | |
2667 | /* A set of now blocked but previously unblocked signals. */ | |
702a5073 | 2668 | sigandnsets(&newblocked, newset, ¤t->blocked); |
b182801a ON |
2669 | retarget_shared_pending(tsk, &newblocked); |
2670 | } | |
2671 | tsk->blocked = *newset; | |
2672 | recalc_sigpending(); | |
2673 | } | |
2674 | ||
e6fa16ab ON |
2675 | /** |
2676 | * set_current_blocked - change current->blocked mask | |
2677 | * @newset: new mask | |
2678 | * | |
2679 | * It is wrong to change ->blocked directly, this helper should be used | |
2680 | * to ensure the process can't miss a shared signal we are going to block. | |
1da177e4 | 2681 | */ |
77097ae5 AV |
2682 | void set_current_blocked(sigset_t *newset) |
2683 | { | |
77097ae5 | 2684 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
0c4a8423 | 2685 | __set_current_blocked(newset); |
77097ae5 AV |
2686 | } |
2687 | ||
2688 | void __set_current_blocked(const sigset_t *newset) | |
e6fa16ab ON |
2689 | { |
2690 | struct task_struct *tsk = current; | |
2691 | ||
c7be96af WL |
2692 | /* |
2693 | * In case the signal mask hasn't changed, there is nothing we need | |
2694 | * to do. The current->blocked shouldn't be modified by other task. | |
2695 | */ | |
2696 | if (sigequalsets(&tsk->blocked, newset)) | |
2697 | return; | |
2698 | ||
e6fa16ab | 2699 | spin_lock_irq(&tsk->sighand->siglock); |
b182801a | 2700 | __set_task_blocked(tsk, newset); |
e6fa16ab ON |
2701 | spin_unlock_irq(&tsk->sighand->siglock); |
2702 | } | |
1da177e4 LT |
2703 | |
2704 | /* | |
2705 | * This is also useful for kernel threads that want to temporarily | |
2706 | * (or permanently) block certain signals. | |
2707 | * | |
2708 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | |
2709 | * interface happily blocks "unblockable" signals like SIGKILL | |
2710 | * and friends. | |
2711 | */ | |
2712 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |
2713 | { | |
73ef4aeb ON |
2714 | struct task_struct *tsk = current; |
2715 | sigset_t newset; | |
1da177e4 | 2716 | |
73ef4aeb | 2717 | /* Lockless, only current can change ->blocked, never from irq */ |
a26fd335 | 2718 | if (oldset) |
73ef4aeb | 2719 | *oldset = tsk->blocked; |
a26fd335 | 2720 | |
1da177e4 LT |
2721 | switch (how) { |
2722 | case SIG_BLOCK: | |
73ef4aeb | 2723 | sigorsets(&newset, &tsk->blocked, set); |
1da177e4 LT |
2724 | break; |
2725 | case SIG_UNBLOCK: | |
702a5073 | 2726 | sigandnsets(&newset, &tsk->blocked, set); |
1da177e4 LT |
2727 | break; |
2728 | case SIG_SETMASK: | |
73ef4aeb | 2729 | newset = *set; |
1da177e4 LT |
2730 | break; |
2731 | default: | |
73ef4aeb | 2732 | return -EINVAL; |
1da177e4 | 2733 | } |
a26fd335 | 2734 | |
77097ae5 | 2735 | __set_current_blocked(&newset); |
73ef4aeb | 2736 | return 0; |
1da177e4 LT |
2737 | } |
2738 | ||
41c57892 RD |
2739 | /** |
2740 | * sys_rt_sigprocmask - change the list of currently blocked signals | |
2741 | * @how: whether to add, remove, or set signals | |
ada9c933 | 2742 | * @nset: stores pending signals |
41c57892 RD |
2743 | * @oset: previous value of signal mask if non-null |
2744 | * @sigsetsize: size of sigset_t type | |
2745 | */ | |
bb7efee2 | 2746 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
17da2bd9 | 2747 | sigset_t __user *, oset, size_t, sigsetsize) |
1da177e4 | 2748 | { |
1da177e4 | 2749 | sigset_t old_set, new_set; |
bb7efee2 | 2750 | int error; |
1da177e4 LT |
2751 | |
2752 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2753 | if (sigsetsize != sizeof(sigset_t)) | |
bb7efee2 | 2754 | return -EINVAL; |
1da177e4 | 2755 | |
bb7efee2 ON |
2756 | old_set = current->blocked; |
2757 | ||
2758 | if (nset) { | |
2759 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | |
2760 | return -EFAULT; | |
1da177e4 LT |
2761 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2762 | ||
bb7efee2 | 2763 | error = sigprocmask(how, &new_set, NULL); |
1da177e4 | 2764 | if (error) |
bb7efee2 ON |
2765 | return error; |
2766 | } | |
1da177e4 | 2767 | |
bb7efee2 ON |
2768 | if (oset) { |
2769 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | |
2770 | return -EFAULT; | |
1da177e4 | 2771 | } |
bb7efee2 ON |
2772 | |
2773 | return 0; | |
1da177e4 LT |
2774 | } |
2775 | ||
322a56cb | 2776 | #ifdef CONFIG_COMPAT |
322a56cb AV |
2777 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, |
2778 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) | |
1da177e4 | 2779 | { |
322a56cb AV |
2780 | sigset_t old_set = current->blocked; |
2781 | ||
2782 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
2783 | if (sigsetsize != sizeof(sigset_t)) | |
2784 | return -EINVAL; | |
2785 | ||
2786 | if (nset) { | |
322a56cb AV |
2787 | sigset_t new_set; |
2788 | int error; | |
3968cf62 | 2789 | if (get_compat_sigset(&new_set, nset)) |
322a56cb | 2790 | return -EFAULT; |
322a56cb AV |
2791 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2792 | ||
2793 | error = sigprocmask(how, &new_set, NULL); | |
2794 | if (error) | |
2795 | return error; | |
2796 | } | |
f454322e | 2797 | return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; |
322a56cb AV |
2798 | } |
2799 | #endif | |
1da177e4 | 2800 | |
b1d294c8 | 2801 | static void do_sigpending(sigset_t *set) |
1da177e4 | 2802 | { |
1da177e4 | 2803 | spin_lock_irq(¤t->sighand->siglock); |
fe9c1db2 | 2804 | sigorsets(set, ¤t->pending.signal, |
1da177e4 LT |
2805 | ¤t->signal->shared_pending.signal); |
2806 | spin_unlock_irq(¤t->sighand->siglock); | |
2807 | ||
2808 | /* Outside the lock because only this thread touches it. */ | |
fe9c1db2 | 2809 | sigandsets(set, ¤t->blocked, set); |
5aba085e | 2810 | } |
1da177e4 | 2811 | |
41c57892 RD |
2812 | /** |
2813 | * sys_rt_sigpending - examine a pending signal that has been raised | |
2814 | * while blocked | |
20f22ab4 | 2815 | * @uset: stores pending signals |
41c57892 RD |
2816 | * @sigsetsize: size of sigset_t type or larger |
2817 | */ | |
fe9c1db2 | 2818 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
1da177e4 | 2819 | { |
fe9c1db2 | 2820 | sigset_t set; |
176826af DL |
2821 | |
2822 | if (sigsetsize > sizeof(*uset)) | |
2823 | return -EINVAL; | |
2824 | ||
b1d294c8 CB |
2825 | do_sigpending(&set); |
2826 | ||
2827 | if (copy_to_user(uset, &set, sigsetsize)) | |
2828 | return -EFAULT; | |
2829 | ||
2830 | return 0; | |
fe9c1db2 AV |
2831 | } |
2832 | ||
2833 | #ifdef CONFIG_COMPAT | |
fe9c1db2 AV |
2834 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, |
2835 | compat_size_t, sigsetsize) | |
1da177e4 | 2836 | { |
fe9c1db2 | 2837 | sigset_t set; |
176826af DL |
2838 | |
2839 | if (sigsetsize > sizeof(*uset)) | |
2840 | return -EINVAL; | |
2841 | ||
b1d294c8 CB |
2842 | do_sigpending(&set); |
2843 | ||
2844 | return put_compat_sigset(uset, &set, sigsetsize); | |
1da177e4 | 2845 | } |
fe9c1db2 | 2846 | #endif |
1da177e4 | 2847 | |
cc731525 EB |
2848 | enum siginfo_layout siginfo_layout(int sig, int si_code) |
2849 | { | |
2850 | enum siginfo_layout layout = SIL_KILL; | |
2851 | if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { | |
2852 | static const struct { | |
2853 | unsigned char limit, layout; | |
2854 | } filter[] = { | |
2855 | [SIGILL] = { NSIGILL, SIL_FAULT }, | |
2856 | [SIGFPE] = { NSIGFPE, SIL_FAULT }, | |
2857 | [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, | |
2858 | [SIGBUS] = { NSIGBUS, SIL_FAULT }, | |
2859 | [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, | |
c3aff086 | 2860 | #if defined(SIGEMT) && defined(NSIGEMT) |
cc731525 EB |
2861 | [SIGEMT] = { NSIGEMT, SIL_FAULT }, |
2862 | #endif | |
2863 | [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, | |
2864 | [SIGPOLL] = { NSIGPOLL, SIL_POLL }, | |
cc731525 | 2865 | [SIGSYS] = { NSIGSYS, SIL_SYS }, |
cc731525 | 2866 | }; |
31931c93 | 2867 | if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) { |
cc731525 | 2868 | layout = filter[sig].layout; |
31931c93 EB |
2869 | /* Handle the exceptions */ |
2870 | if ((sig == SIGBUS) && | |
2871 | (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) | |
2872 | layout = SIL_FAULT_MCEERR; | |
2873 | else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) | |
2874 | layout = SIL_FAULT_BNDERR; | |
2875 | #ifdef SEGV_PKUERR | |
2876 | else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) | |
2877 | layout = SIL_FAULT_PKUERR; | |
2878 | #endif | |
2879 | } | |
cc731525 EB |
2880 | else if (si_code <= NSIGPOLL) |
2881 | layout = SIL_POLL; | |
2882 | } else { | |
2883 | if (si_code == SI_TIMER) | |
2884 | layout = SIL_TIMER; | |
2885 | else if (si_code == SI_SIGIO) | |
2886 | layout = SIL_POLL; | |
2887 | else if (si_code < 0) | |
2888 | layout = SIL_RT; | |
cc731525 EB |
2889 | } |
2890 | return layout; | |
2891 | } | |
2892 | ||
ce395960 | 2893 | int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) |
1da177e4 | 2894 | { |
c999b933 | 2895 | if (copy_to_user(to, from , sizeof(struct siginfo))) |
1da177e4 | 2896 | return -EFAULT; |
c999b933 | 2897 | return 0; |
1da177e4 LT |
2898 | } |
2899 | ||
212a36a1 | 2900 | #ifdef CONFIG_COMPAT |
ea64d5ac EB |
2901 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, |
2902 | const struct siginfo *from) | |
2903 | #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) | |
2904 | { | |
2905 | return __copy_siginfo_to_user32(to, from, in_x32_syscall()); | |
2906 | } | |
2907 | int __copy_siginfo_to_user32(struct compat_siginfo __user *to, | |
2908 | const struct siginfo *from, bool x32_ABI) | |
2909 | #endif | |
2910 | { | |
2911 | struct compat_siginfo new; | |
2912 | memset(&new, 0, sizeof(new)); | |
2913 | ||
2914 | new.si_signo = from->si_signo; | |
2915 | new.si_errno = from->si_errno; | |
2916 | new.si_code = from->si_code; | |
2917 | switch(siginfo_layout(from->si_signo, from->si_code)) { | |
2918 | case SIL_KILL: | |
2919 | new.si_pid = from->si_pid; | |
2920 | new.si_uid = from->si_uid; | |
2921 | break; | |
2922 | case SIL_TIMER: | |
2923 | new.si_tid = from->si_tid; | |
2924 | new.si_overrun = from->si_overrun; | |
2925 | new.si_int = from->si_int; | |
2926 | break; | |
2927 | case SIL_POLL: | |
2928 | new.si_band = from->si_band; | |
2929 | new.si_fd = from->si_fd; | |
2930 | break; | |
2931 | case SIL_FAULT: | |
2932 | new.si_addr = ptr_to_compat(from->si_addr); | |
2933 | #ifdef __ARCH_SI_TRAPNO | |
2934 | new.si_trapno = from->si_trapno; | |
2935 | #endif | |
31931c93 EB |
2936 | break; |
2937 | case SIL_FAULT_MCEERR: | |
2938 | new.si_addr = ptr_to_compat(from->si_addr); | |
2939 | #ifdef __ARCH_SI_TRAPNO | |
2940 | new.si_trapno = from->si_trapno; | |
ea64d5ac | 2941 | #endif |
31931c93 EB |
2942 | new.si_addr_lsb = from->si_addr_lsb; |
2943 | break; | |
2944 | case SIL_FAULT_BNDERR: | |
2945 | new.si_addr = ptr_to_compat(from->si_addr); | |
2946 | #ifdef __ARCH_SI_TRAPNO | |
2947 | new.si_trapno = from->si_trapno; | |
ea64d5ac | 2948 | #endif |
31931c93 EB |
2949 | new.si_lower = ptr_to_compat(from->si_lower); |
2950 | new.si_upper = ptr_to_compat(from->si_upper); | |
2951 | break; | |
2952 | case SIL_FAULT_PKUERR: | |
2953 | new.si_addr = ptr_to_compat(from->si_addr); | |
2954 | #ifdef __ARCH_SI_TRAPNO | |
2955 | new.si_trapno = from->si_trapno; | |
ea64d5ac | 2956 | #endif |
31931c93 | 2957 | new.si_pkey = from->si_pkey; |
ea64d5ac EB |
2958 | break; |
2959 | case SIL_CHLD: | |
2960 | new.si_pid = from->si_pid; | |
2961 | new.si_uid = from->si_uid; | |
2962 | new.si_status = from->si_status; | |
2963 | #ifdef CONFIG_X86_X32_ABI | |
2964 | if (x32_ABI) { | |
2965 | new._sifields._sigchld_x32._utime = from->si_utime; | |
2966 | new._sifields._sigchld_x32._stime = from->si_stime; | |
2967 | } else | |
2968 | #endif | |
2969 | { | |
2970 | new.si_utime = from->si_utime; | |
2971 | new.si_stime = from->si_stime; | |
2972 | } | |
2973 | break; | |
2974 | case SIL_RT: | |
2975 | new.si_pid = from->si_pid; | |
2976 | new.si_uid = from->si_uid; | |
2977 | new.si_int = from->si_int; | |
2978 | break; | |
2979 | case SIL_SYS: | |
2980 | new.si_call_addr = ptr_to_compat(from->si_call_addr); | |
2981 | new.si_syscall = from->si_syscall; | |
2982 | new.si_arch = from->si_arch; | |
2983 | break; | |
2984 | } | |
2985 | ||
2986 | if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) | |
2987 | return -EFAULT; | |
2988 | ||
2989 | return 0; | |
2990 | } | |
2991 | ||
212a36a1 EB |
2992 | int copy_siginfo_from_user32(struct siginfo *to, |
2993 | const struct compat_siginfo __user *ufrom) | |
2994 | { | |
2995 | struct compat_siginfo from; | |
2996 | ||
2997 | if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) | |
2998 | return -EFAULT; | |
2999 | ||
3000 | clear_siginfo(to); | |
3001 | to->si_signo = from.si_signo; | |
3002 | to->si_errno = from.si_errno; | |
3003 | to->si_code = from.si_code; | |
3004 | switch(siginfo_layout(from.si_signo, from.si_code)) { | |
3005 | case SIL_KILL: | |
3006 | to->si_pid = from.si_pid; | |
3007 | to->si_uid = from.si_uid; | |
3008 | break; | |
3009 | case SIL_TIMER: | |
3010 | to->si_tid = from.si_tid; | |
3011 | to->si_overrun = from.si_overrun; | |
3012 | to->si_int = from.si_int; | |
3013 | break; | |
3014 | case SIL_POLL: | |
3015 | to->si_band = from.si_band; | |
3016 | to->si_fd = from.si_fd; | |
3017 | break; | |
3018 | case SIL_FAULT: | |
3019 | to->si_addr = compat_ptr(from.si_addr); | |
3020 | #ifdef __ARCH_SI_TRAPNO | |
3021 | to->si_trapno = from.si_trapno; | |
3022 | #endif | |
31931c93 EB |
3023 | break; |
3024 | case SIL_FAULT_MCEERR: | |
3025 | to->si_addr = compat_ptr(from.si_addr); | |
3026 | #ifdef __ARCH_SI_TRAPNO | |
3027 | to->si_trapno = from.si_trapno; | |
212a36a1 | 3028 | #endif |
31931c93 EB |
3029 | to->si_addr_lsb = from.si_addr_lsb; |
3030 | break; | |
3031 | case SIL_FAULT_BNDERR: | |
3032 | to->si_addr = compat_ptr(from.si_addr); | |
3033 | #ifdef __ARCH_SI_TRAPNO | |
3034 | to->si_trapno = from.si_trapno; | |
212a36a1 | 3035 | #endif |
31931c93 EB |
3036 | to->si_lower = compat_ptr(from.si_lower); |
3037 | to->si_upper = compat_ptr(from.si_upper); | |
3038 | break; | |
3039 | case SIL_FAULT_PKUERR: | |
3040 | to->si_addr = compat_ptr(from.si_addr); | |
3041 | #ifdef __ARCH_SI_TRAPNO | |
3042 | to->si_trapno = from.si_trapno; | |
212a36a1 | 3043 | #endif |
31931c93 | 3044 | to->si_pkey = from.si_pkey; |
212a36a1 EB |
3045 | break; |
3046 | case SIL_CHLD: | |
3047 | to->si_pid = from.si_pid; | |
3048 | to->si_uid = from.si_uid; | |
3049 | to->si_status = from.si_status; | |
3050 | #ifdef CONFIG_X86_X32_ABI | |
3051 | if (in_x32_syscall()) { | |
3052 | to->si_utime = from._sifields._sigchld_x32._utime; | |
3053 | to->si_stime = from._sifields._sigchld_x32._stime; | |
3054 | } else | |
3055 | #endif | |
3056 | { | |
3057 | to->si_utime = from.si_utime; | |
3058 | to->si_stime = from.si_stime; | |
3059 | } | |
3060 | break; | |
3061 | case SIL_RT: | |
3062 | to->si_pid = from.si_pid; | |
3063 | to->si_uid = from.si_uid; | |
3064 | to->si_int = from.si_int; | |
3065 | break; | |
3066 | case SIL_SYS: | |
3067 | to->si_call_addr = compat_ptr(from.si_call_addr); | |
3068 | to->si_syscall = from.si_syscall; | |
3069 | to->si_arch = from.si_arch; | |
3070 | break; | |
3071 | } | |
3072 | return 0; | |
3073 | } | |
3074 | #endif /* CONFIG_COMPAT */ | |
3075 | ||
943df148 ON |
3076 | /** |
3077 | * do_sigtimedwait - wait for queued signals specified in @which | |
3078 | * @which: queued signals to wait for | |
3079 | * @info: if non-null, the signal's siginfo is returned here | |
3080 | * @ts: upper bound on process time suspension | |
3081 | */ | |
1b3c872c | 3082 | static int do_sigtimedwait(const sigset_t *which, siginfo_t *info, |
2b1ecc3d | 3083 | const struct timespec *ts) |
943df148 | 3084 | { |
2456e855 | 3085 | ktime_t *to = NULL, timeout = KTIME_MAX; |
943df148 | 3086 | struct task_struct *tsk = current; |
943df148 | 3087 | sigset_t mask = *which; |
2b1ecc3d | 3088 | int sig, ret = 0; |
943df148 ON |
3089 | |
3090 | if (ts) { | |
3091 | if (!timespec_valid(ts)) | |
3092 | return -EINVAL; | |
2b1ecc3d TG |
3093 | timeout = timespec_to_ktime(*ts); |
3094 | to = &timeout; | |
943df148 ON |
3095 | } |
3096 | ||
3097 | /* | |
3098 | * Invert the set of allowed signals to get those we want to block. | |
3099 | */ | |
3100 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
3101 | signotset(&mask); | |
3102 | ||
3103 | spin_lock_irq(&tsk->sighand->siglock); | |
3104 | sig = dequeue_signal(tsk, &mask, info); | |
2456e855 | 3105 | if (!sig && timeout) { |
943df148 ON |
3106 | /* |
3107 | * None ready, temporarily unblock those we're interested | |
3108 | * while we are sleeping in so that we'll be awakened when | |
b182801a ON |
3109 | * they arrive. Unblocking is always fine, we can avoid |
3110 | * set_current_blocked(). | |
943df148 ON |
3111 | */ |
3112 | tsk->real_blocked = tsk->blocked; | |
3113 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | |
3114 | recalc_sigpending(); | |
3115 | spin_unlock_irq(&tsk->sighand->siglock); | |
3116 | ||
2b1ecc3d TG |
3117 | __set_current_state(TASK_INTERRUPTIBLE); |
3118 | ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, | |
3119 | HRTIMER_MODE_REL); | |
943df148 | 3120 | spin_lock_irq(&tsk->sighand->siglock); |
b182801a | 3121 | __set_task_blocked(tsk, &tsk->real_blocked); |
6114041a | 3122 | sigemptyset(&tsk->real_blocked); |
b182801a | 3123 | sig = dequeue_signal(tsk, &mask, info); |
943df148 ON |
3124 | } |
3125 | spin_unlock_irq(&tsk->sighand->siglock); | |
3126 | ||
3127 | if (sig) | |
3128 | return sig; | |
2b1ecc3d | 3129 | return ret ? -EINTR : -EAGAIN; |
943df148 ON |
3130 | } |
3131 | ||
41c57892 RD |
3132 | /** |
3133 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | |
3134 | * in @uthese | |
3135 | * @uthese: queued signals to wait for | |
3136 | * @uinfo: if non-null, the signal's siginfo is returned here | |
3137 | * @uts: upper bound on process time suspension | |
3138 | * @sigsetsize: size of sigset_t type | |
3139 | */ | |
17da2bd9 HC |
3140 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
3141 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, | |
3142 | size_t, sigsetsize) | |
1da177e4 | 3143 | { |
1da177e4 LT |
3144 | sigset_t these; |
3145 | struct timespec ts; | |
3146 | siginfo_t info; | |
943df148 | 3147 | int ret; |
1da177e4 LT |
3148 | |
3149 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3150 | if (sigsetsize != sizeof(sigset_t)) | |
3151 | return -EINVAL; | |
3152 | ||
3153 | if (copy_from_user(&these, uthese, sizeof(these))) | |
3154 | return -EFAULT; | |
5aba085e | 3155 | |
1da177e4 LT |
3156 | if (uts) { |
3157 | if (copy_from_user(&ts, uts, sizeof(ts))) | |
3158 | return -EFAULT; | |
1da177e4 LT |
3159 | } |
3160 | ||
943df148 | 3161 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
1da177e4 | 3162 | |
943df148 ON |
3163 | if (ret > 0 && uinfo) { |
3164 | if (copy_siginfo_to_user(uinfo, &info)) | |
3165 | ret = -EFAULT; | |
1da177e4 LT |
3166 | } |
3167 | ||
3168 | return ret; | |
3169 | } | |
3170 | ||
1b3c872c AV |
3171 | #ifdef CONFIG_COMPAT |
3172 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese, | |
3173 | struct compat_siginfo __user *, uinfo, | |
3174 | struct compat_timespec __user *, uts, compat_size_t, sigsetsize) | |
3175 | { | |
1b3c872c AV |
3176 | sigset_t s; |
3177 | struct timespec t; | |
3178 | siginfo_t info; | |
3179 | long ret; | |
3180 | ||
3181 | if (sigsetsize != sizeof(sigset_t)) | |
3182 | return -EINVAL; | |
3183 | ||
3968cf62 | 3184 | if (get_compat_sigset(&s, uthese)) |
1b3c872c | 3185 | return -EFAULT; |
1b3c872c AV |
3186 | |
3187 | if (uts) { | |
3188 | if (compat_get_timespec(&t, uts)) | |
3189 | return -EFAULT; | |
3190 | } | |
3191 | ||
3192 | ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); | |
3193 | ||
3194 | if (ret > 0 && uinfo) { | |
3195 | if (copy_siginfo_to_user32(uinfo, &info)) | |
3196 | ret = -EFAULT; | |
3197 | } | |
3198 | ||
3199 | return ret; | |
3200 | } | |
3201 | #endif | |
3202 | ||
41c57892 RD |
3203 | /** |
3204 | * sys_kill - send a signal to a process | |
3205 | * @pid: the PID of the process | |
3206 | * @sig: signal to be sent | |
3207 | */ | |
17da2bd9 | 3208 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
1da177e4 LT |
3209 | { |
3210 | struct siginfo info; | |
3211 | ||
faf1f22b | 3212 | clear_siginfo(&info); |
1da177e4 LT |
3213 | info.si_signo = sig; |
3214 | info.si_errno = 0; | |
3215 | info.si_code = SI_USER; | |
b488893a | 3216 | info.si_pid = task_tgid_vnr(current); |
078de5f7 | 3217 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4 LT |
3218 | |
3219 | return kill_something_info(sig, &info, pid); | |
3220 | } | |
3221 | ||
30b4ae8a TG |
3222 | static int |
3223 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | |
1da177e4 | 3224 | { |
1da177e4 | 3225 | struct task_struct *p; |
30b4ae8a | 3226 | int error = -ESRCH; |
1da177e4 | 3227 | |
3547ff3a | 3228 | rcu_read_lock(); |
228ebcbe | 3229 | p = find_task_by_vpid(pid); |
b488893a | 3230 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
30b4ae8a | 3231 | error = check_kill_permission(sig, info, p); |
1da177e4 LT |
3232 | /* |
3233 | * The null signal is a permissions and process existence | |
3234 | * probe. No signal is actually delivered. | |
3235 | */ | |
4a30debf | 3236 | if (!error && sig) { |
40b3b025 | 3237 | error = do_send_sig_info(sig, info, p, PIDTYPE_PID); |
4a30debf ON |
3238 | /* |
3239 | * If lock_task_sighand() failed we pretend the task | |
3240 | * dies after receiving the signal. The window is tiny, | |
3241 | * and the signal is private anyway. | |
3242 | */ | |
3243 | if (unlikely(error == -ESRCH)) | |
3244 | error = 0; | |
1da177e4 LT |
3245 | } |
3246 | } | |
3547ff3a | 3247 | rcu_read_unlock(); |
6dd69f10 | 3248 | |
1da177e4 LT |
3249 | return error; |
3250 | } | |
3251 | ||
30b4ae8a TG |
3252 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
3253 | { | |
5f74972c | 3254 | struct siginfo info; |
30b4ae8a | 3255 | |
5f74972c | 3256 | clear_siginfo(&info); |
30b4ae8a TG |
3257 | info.si_signo = sig; |
3258 | info.si_errno = 0; | |
3259 | info.si_code = SI_TKILL; | |
3260 | info.si_pid = task_tgid_vnr(current); | |
078de5f7 | 3261 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
30b4ae8a TG |
3262 | |
3263 | return do_send_specific(tgid, pid, sig, &info); | |
3264 | } | |
3265 | ||
6dd69f10 VL |
3266 | /** |
3267 | * sys_tgkill - send signal to one specific thread | |
3268 | * @tgid: the thread group ID of the thread | |
3269 | * @pid: the PID of the thread | |
3270 | * @sig: signal to be sent | |
3271 | * | |
72fd4a35 | 3272 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
6dd69f10 VL |
3273 | * exists but it's not belonging to the target process anymore. This |
3274 | * method solves the problem of threads exiting and PIDs getting reused. | |
3275 | */ | |
a5f8fa9e | 3276 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
6dd69f10 VL |
3277 | { |
3278 | /* This is only valid for single tasks */ | |
3279 | if (pid <= 0 || tgid <= 0) | |
3280 | return -EINVAL; | |
3281 | ||
3282 | return do_tkill(tgid, pid, sig); | |
3283 | } | |
3284 | ||
41c57892 RD |
3285 | /** |
3286 | * sys_tkill - send signal to one specific task | |
3287 | * @pid: the PID of the task | |
3288 | * @sig: signal to be sent | |
3289 | * | |
1da177e4 LT |
3290 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
3291 | */ | |
a5f8fa9e | 3292 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
1da177e4 | 3293 | { |
1da177e4 LT |
3294 | /* This is only valid for single tasks */ |
3295 | if (pid <= 0) | |
3296 | return -EINVAL; | |
3297 | ||
6dd69f10 | 3298 | return do_tkill(0, pid, sig); |
1da177e4 LT |
3299 | } |
3300 | ||
75907d4d AV |
3301 | static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) |
3302 | { | |
3303 | /* Not even root can pretend to send signals from the kernel. | |
3304 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | |
3305 | */ | |
66dd34ad | 3306 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
69828dce | 3307 | (task_pid_vnr(current) != pid)) |
75907d4d | 3308 | return -EPERM; |
69828dce | 3309 | |
75907d4d AV |
3310 | info->si_signo = sig; |
3311 | ||
3312 | /* POSIX.1b doesn't mention process groups. */ | |
3313 | return kill_proc_info(sig, info, pid); | |
3314 | } | |
3315 | ||
41c57892 RD |
3316 | /** |
3317 | * sys_rt_sigqueueinfo - send signal information to a signal | |
3318 | * @pid: the PID of the thread | |
3319 | * @sig: signal to be sent | |
3320 | * @uinfo: signal info to be sent | |
3321 | */ | |
a5f8fa9e HC |
3322 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
3323 | siginfo_t __user *, uinfo) | |
1da177e4 LT |
3324 | { |
3325 | siginfo_t info; | |
1da177e4 LT |
3326 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
3327 | return -EFAULT; | |
75907d4d AV |
3328 | return do_rt_sigqueueinfo(pid, sig, &info); |
3329 | } | |
1da177e4 | 3330 | |
75907d4d | 3331 | #ifdef CONFIG_COMPAT |
75907d4d AV |
3332 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, |
3333 | compat_pid_t, pid, | |
3334 | int, sig, | |
3335 | struct compat_siginfo __user *, uinfo) | |
3336 | { | |
eb5346c3 | 3337 | siginfo_t info; |
75907d4d AV |
3338 | int ret = copy_siginfo_from_user32(&info, uinfo); |
3339 | if (unlikely(ret)) | |
3340 | return ret; | |
3341 | return do_rt_sigqueueinfo(pid, sig, &info); | |
1da177e4 | 3342 | } |
75907d4d | 3343 | #endif |
1da177e4 | 3344 | |
9aae8fc0 | 3345 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
62ab4505 TG |
3346 | { |
3347 | /* This is only valid for single tasks */ | |
3348 | if (pid <= 0 || tgid <= 0) | |
3349 | return -EINVAL; | |
3350 | ||
3351 | /* Not even root can pretend to send signals from the kernel. | |
da48524e JT |
3352 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3353 | */ | |
69828dce VD |
3354 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3355 | (task_pid_vnr(current) != pid)) | |
62ab4505 | 3356 | return -EPERM; |
69828dce | 3357 | |
62ab4505 TG |
3358 | info->si_signo = sig; |
3359 | ||
3360 | return do_send_specific(tgid, pid, sig, info); | |
3361 | } | |
3362 | ||
3363 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, | |
3364 | siginfo_t __user *, uinfo) | |
3365 | { | |
3366 | siginfo_t info; | |
3367 | ||
3368 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | |
3369 | return -EFAULT; | |
3370 | ||
3371 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); | |
3372 | } | |
3373 | ||
9aae8fc0 AV |
3374 | #ifdef CONFIG_COMPAT |
3375 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, | |
3376 | compat_pid_t, tgid, | |
3377 | compat_pid_t, pid, | |
3378 | int, sig, | |
3379 | struct compat_siginfo __user *, uinfo) | |
3380 | { | |
eb5346c3 | 3381 | siginfo_t info; |
9aae8fc0 AV |
3382 | |
3383 | if (copy_siginfo_from_user32(&info, uinfo)) | |
3384 | return -EFAULT; | |
3385 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); | |
3386 | } | |
3387 | #endif | |
3388 | ||
0341729b | 3389 | /* |
b4e74264 | 3390 | * For kthreads only, must not be used if cloned with CLONE_SIGHAND |
0341729b | 3391 | */ |
b4e74264 | 3392 | void kernel_sigaction(int sig, __sighandler_t action) |
0341729b | 3393 | { |
ec5955b8 | 3394 | spin_lock_irq(¤t->sighand->siglock); |
b4e74264 ON |
3395 | current->sighand->action[sig - 1].sa.sa_handler = action; |
3396 | if (action == SIG_IGN) { | |
3397 | sigset_t mask; | |
0341729b | 3398 | |
b4e74264 ON |
3399 | sigemptyset(&mask); |
3400 | sigaddset(&mask, sig); | |
580d34e4 | 3401 | |
b4e74264 ON |
3402 | flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); |
3403 | flush_sigqueue_mask(&mask, ¤t->pending); | |
3404 | recalc_sigpending(); | |
3405 | } | |
0341729b ON |
3406 | spin_unlock_irq(¤t->sighand->siglock); |
3407 | } | |
b4e74264 | 3408 | EXPORT_SYMBOL(kernel_sigaction); |
0341729b | 3409 | |
68463510 DS |
3410 | void __weak sigaction_compat_abi(struct k_sigaction *act, |
3411 | struct k_sigaction *oact) | |
3412 | { | |
3413 | } | |
3414 | ||
88531f72 | 3415 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4 | 3416 | { |
afe2b038 | 3417 | struct task_struct *p = current, *t; |
1da177e4 | 3418 | struct k_sigaction *k; |
71fabd5e | 3419 | sigset_t mask; |
1da177e4 | 3420 | |
7ed20e1a | 3421 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4 LT |
3422 | return -EINVAL; |
3423 | ||
afe2b038 | 3424 | k = &p->sighand->action[sig-1]; |
1da177e4 | 3425 | |
afe2b038 | 3426 | spin_lock_irq(&p->sighand->siglock); |
1da177e4 LT |
3427 | if (oact) |
3428 | *oact = *k; | |
3429 | ||
68463510 DS |
3430 | sigaction_compat_abi(act, oact); |
3431 | ||
1da177e4 | 3432 | if (act) { |
9ac95f2f ON |
3433 | sigdelsetmask(&act->sa.sa_mask, |
3434 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
88531f72 | 3435 | *k = *act; |
1da177e4 LT |
3436 | /* |
3437 | * POSIX 3.3.1.3: | |
3438 | * "Setting a signal action to SIG_IGN for a signal that is | |
3439 | * pending shall cause the pending signal to be discarded, | |
3440 | * whether or not it is blocked." | |
3441 | * | |
3442 | * "Setting a signal action to SIG_DFL for a signal that is | |
3443 | * pending and whose default action is to ignore the signal | |
3444 | * (for example, SIGCHLD), shall cause the pending signal to | |
3445 | * be discarded, whether or not it is blocked" | |
3446 | */ | |
afe2b038 | 3447 | if (sig_handler_ignored(sig_handler(p, sig), sig)) { |
71fabd5e GA |
3448 | sigemptyset(&mask); |
3449 | sigaddset(&mask, sig); | |
afe2b038 ON |
3450 | flush_sigqueue_mask(&mask, &p->signal->shared_pending); |
3451 | for_each_thread(p, t) | |
c09c1441 | 3452 | flush_sigqueue_mask(&mask, &t->pending); |
1da177e4 | 3453 | } |
1da177e4 LT |
3454 | } |
3455 | ||
afe2b038 | 3456 | spin_unlock_irq(&p->sighand->siglock); |
1da177e4 LT |
3457 | return 0; |
3458 | } | |
3459 | ||
c09c1441 | 3460 | static int |
bcfe8ad8 | 3461 | do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp) |
1da177e4 | 3462 | { |
bcfe8ad8 | 3463 | struct task_struct *t = current; |
1da177e4 | 3464 | |
bcfe8ad8 AV |
3465 | if (oss) { |
3466 | memset(oss, 0, sizeof(stack_t)); | |
3467 | oss->ss_sp = (void __user *) t->sas_ss_sp; | |
3468 | oss->ss_size = t->sas_ss_size; | |
3469 | oss->ss_flags = sas_ss_flags(sp) | | |
3470 | (current->sas_ss_flags & SS_FLAG_BITS); | |
3471 | } | |
1da177e4 | 3472 | |
bcfe8ad8 AV |
3473 | if (ss) { |
3474 | void __user *ss_sp = ss->ss_sp; | |
3475 | size_t ss_size = ss->ss_size; | |
3476 | unsigned ss_flags = ss->ss_flags; | |
407bc16a | 3477 | int ss_mode; |
1da177e4 | 3478 | |
bcfe8ad8 AV |
3479 | if (unlikely(on_sig_stack(sp))) |
3480 | return -EPERM; | |
1da177e4 | 3481 | |
407bc16a | 3482 | ss_mode = ss_flags & ~SS_FLAG_BITS; |
bcfe8ad8 AV |
3483 | if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && |
3484 | ss_mode != 0)) | |
3485 | return -EINVAL; | |
1da177e4 | 3486 | |
407bc16a | 3487 | if (ss_mode == SS_DISABLE) { |
1da177e4 LT |
3488 | ss_size = 0; |
3489 | ss_sp = NULL; | |
3490 | } else { | |
bcfe8ad8 AV |
3491 | if (unlikely(ss_size < MINSIGSTKSZ)) |
3492 | return -ENOMEM; | |
1da177e4 LT |
3493 | } |
3494 | ||
bcfe8ad8 AV |
3495 | t->sas_ss_sp = (unsigned long) ss_sp; |
3496 | t->sas_ss_size = ss_size; | |
3497 | t->sas_ss_flags = ss_flags; | |
1da177e4 | 3498 | } |
bcfe8ad8 | 3499 | return 0; |
1da177e4 | 3500 | } |
bcfe8ad8 | 3501 | |
6bf9adfc AV |
3502 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
3503 | { | |
bcfe8ad8 AV |
3504 | stack_t new, old; |
3505 | int err; | |
3506 | if (uss && copy_from_user(&new, uss, sizeof(stack_t))) | |
3507 | return -EFAULT; | |
3508 | err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, | |
3509 | current_user_stack_pointer()); | |
3510 | if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) | |
3511 | err = -EFAULT; | |
3512 | return err; | |
6bf9adfc | 3513 | } |
1da177e4 | 3514 | |
5c49574f AV |
3515 | int restore_altstack(const stack_t __user *uss) |
3516 | { | |
bcfe8ad8 AV |
3517 | stack_t new; |
3518 | if (copy_from_user(&new, uss, sizeof(stack_t))) | |
3519 | return -EFAULT; | |
3520 | (void)do_sigaltstack(&new, NULL, current_user_stack_pointer()); | |
5c49574f | 3521 | /* squash all but EFAULT for now */ |
bcfe8ad8 | 3522 | return 0; |
5c49574f AV |
3523 | } |
3524 | ||
c40702c4 AV |
3525 | int __save_altstack(stack_t __user *uss, unsigned long sp) |
3526 | { | |
3527 | struct task_struct *t = current; | |
2a742138 SS |
3528 | int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | |
3529 | __put_user(t->sas_ss_flags, &uss->ss_flags) | | |
c40702c4 | 3530 | __put_user(t->sas_ss_size, &uss->ss_size); |
2a742138 SS |
3531 | if (err) |
3532 | return err; | |
3533 | if (t->sas_ss_flags & SS_AUTODISARM) | |
3534 | sas_ss_reset(t); | |
3535 | return 0; | |
c40702c4 AV |
3536 | } |
3537 | ||
90268439 | 3538 | #ifdef CONFIG_COMPAT |
6203deb0 DB |
3539 | static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, |
3540 | compat_stack_t __user *uoss_ptr) | |
90268439 AV |
3541 | { |
3542 | stack_t uss, uoss; | |
3543 | int ret; | |
90268439 AV |
3544 | |
3545 | if (uss_ptr) { | |
3546 | compat_stack_t uss32; | |
90268439 AV |
3547 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) |
3548 | return -EFAULT; | |
3549 | uss.ss_sp = compat_ptr(uss32.ss_sp); | |
3550 | uss.ss_flags = uss32.ss_flags; | |
3551 | uss.ss_size = uss32.ss_size; | |
3552 | } | |
bcfe8ad8 | 3553 | ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, |
90268439 | 3554 | compat_user_stack_pointer()); |
90268439 | 3555 | if (ret >= 0 && uoss_ptr) { |
bcfe8ad8 AV |
3556 | compat_stack_t old; |
3557 | memset(&old, 0, sizeof(old)); | |
3558 | old.ss_sp = ptr_to_compat(uoss.ss_sp); | |
3559 | old.ss_flags = uoss.ss_flags; | |
3560 | old.ss_size = uoss.ss_size; | |
3561 | if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) | |
90268439 AV |
3562 | ret = -EFAULT; |
3563 | } | |
3564 | return ret; | |
3565 | } | |
3566 | ||
6203deb0 DB |
3567 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
3568 | const compat_stack_t __user *, uss_ptr, | |
3569 | compat_stack_t __user *, uoss_ptr) | |
3570 | { | |
3571 | return do_compat_sigaltstack(uss_ptr, uoss_ptr); | |
3572 | } | |
3573 | ||
90268439 AV |
3574 | int compat_restore_altstack(const compat_stack_t __user *uss) |
3575 | { | |
6203deb0 | 3576 | int err = do_compat_sigaltstack(uss, NULL); |
90268439 AV |
3577 | /* squash all but -EFAULT for now */ |
3578 | return err == -EFAULT ? err : 0; | |
3579 | } | |
c40702c4 AV |
3580 | |
3581 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) | |
3582 | { | |
441398d3 | 3583 | int err; |
c40702c4 | 3584 | struct task_struct *t = current; |
441398d3 SS |
3585 | err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), |
3586 | &uss->ss_sp) | | |
3587 | __put_user(t->sas_ss_flags, &uss->ss_flags) | | |
c40702c4 | 3588 | __put_user(t->sas_ss_size, &uss->ss_size); |
441398d3 SS |
3589 | if (err) |
3590 | return err; | |
3591 | if (t->sas_ss_flags & SS_AUTODISARM) | |
3592 | sas_ss_reset(t); | |
3593 | return 0; | |
c40702c4 | 3594 | } |
90268439 | 3595 | #endif |
1da177e4 LT |
3596 | |
3597 | #ifdef __ARCH_WANT_SYS_SIGPENDING | |
3598 | ||
41c57892 RD |
3599 | /** |
3600 | * sys_sigpending - examine pending signals | |
d53238cd | 3601 | * @uset: where mask of pending signal is returned |
41c57892 | 3602 | */ |
d53238cd | 3603 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) |
1da177e4 | 3604 | { |
d53238cd | 3605 | sigset_t set; |
d53238cd DB |
3606 | |
3607 | if (sizeof(old_sigset_t) > sizeof(*uset)) | |
3608 | return -EINVAL; | |
3609 | ||
b1d294c8 CB |
3610 | do_sigpending(&set); |
3611 | ||
3612 | if (copy_to_user(uset, &set, sizeof(old_sigset_t))) | |
3613 | return -EFAULT; | |
3614 | ||
3615 | return 0; | |
1da177e4 LT |
3616 | } |
3617 | ||
8f13621a AV |
3618 | #ifdef CONFIG_COMPAT |
3619 | COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) | |
3620 | { | |
3621 | sigset_t set; | |
b1d294c8 CB |
3622 | |
3623 | do_sigpending(&set); | |
3624 | ||
3625 | return put_user(set.sig[0], set32); | |
8f13621a AV |
3626 | } |
3627 | #endif | |
3628 | ||
1da177e4 LT |
3629 | #endif |
3630 | ||
3631 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | |
41c57892 RD |
3632 | /** |
3633 | * sys_sigprocmask - examine and change blocked signals | |
3634 | * @how: whether to add, remove, or set signals | |
b013c399 | 3635 | * @nset: signals to add or remove (if non-null) |
41c57892 RD |
3636 | * @oset: previous value of signal mask if non-null |
3637 | * | |
5aba085e RD |
3638 | * Some platforms have their own version with special arguments; |
3639 | * others support only sys_rt_sigprocmask. | |
3640 | */ | |
1da177e4 | 3641 | |
b013c399 | 3642 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
b290ebe2 | 3643 | old_sigset_t __user *, oset) |
1da177e4 | 3644 | { |
1da177e4 | 3645 | old_sigset_t old_set, new_set; |
2e4f7c77 | 3646 | sigset_t new_blocked; |
1da177e4 | 3647 | |
b013c399 | 3648 | old_set = current->blocked.sig[0]; |
1da177e4 | 3649 | |
b013c399 ON |
3650 | if (nset) { |
3651 | if (copy_from_user(&new_set, nset, sizeof(*nset))) | |
3652 | return -EFAULT; | |
1da177e4 | 3653 | |
2e4f7c77 | 3654 | new_blocked = current->blocked; |
1da177e4 | 3655 | |
1da177e4 | 3656 | switch (how) { |
1da177e4 | 3657 | case SIG_BLOCK: |
2e4f7c77 | 3658 | sigaddsetmask(&new_blocked, new_set); |
1da177e4 LT |
3659 | break; |
3660 | case SIG_UNBLOCK: | |
2e4f7c77 | 3661 | sigdelsetmask(&new_blocked, new_set); |
1da177e4 LT |
3662 | break; |
3663 | case SIG_SETMASK: | |
2e4f7c77 | 3664 | new_blocked.sig[0] = new_set; |
1da177e4 | 3665 | break; |
2e4f7c77 ON |
3666 | default: |
3667 | return -EINVAL; | |
1da177e4 LT |
3668 | } |
3669 | ||
0c4a8423 | 3670 | set_current_blocked(&new_blocked); |
b013c399 ON |
3671 | } |
3672 | ||
3673 | if (oset) { | |
1da177e4 | 3674 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
b013c399 | 3675 | return -EFAULT; |
1da177e4 | 3676 | } |
b013c399 ON |
3677 | |
3678 | return 0; | |
1da177e4 LT |
3679 | } |
3680 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | |
3681 | ||
eaca6eae | 3682 | #ifndef CONFIG_ODD_RT_SIGACTION |
41c57892 RD |
3683 | /** |
3684 | * sys_rt_sigaction - alter an action taken by a process | |
3685 | * @sig: signal to be sent | |
f9fa0bc1 RD |
3686 | * @act: new sigaction |
3687 | * @oact: used to save the previous sigaction | |
41c57892 RD |
3688 | * @sigsetsize: size of sigset_t type |
3689 | */ | |
d4e82042 HC |
3690 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3691 | const struct sigaction __user *, act, | |
3692 | struct sigaction __user *, oact, | |
3693 | size_t, sigsetsize) | |
1da177e4 LT |
3694 | { |
3695 | struct k_sigaction new_sa, old_sa; | |
d8f993b3 | 3696 | int ret; |
1da177e4 LT |
3697 | |
3698 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3699 | if (sigsetsize != sizeof(sigset_t)) | |
d8f993b3 | 3700 | return -EINVAL; |
1da177e4 | 3701 | |
d8f993b3 CB |
3702 | if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
3703 | return -EFAULT; | |
1da177e4 LT |
3704 | |
3705 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | |
d8f993b3 CB |
3706 | if (ret) |
3707 | return ret; | |
1da177e4 | 3708 | |
d8f993b3 CB |
3709 | if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
3710 | return -EFAULT; | |
3711 | ||
3712 | return 0; | |
1da177e4 | 3713 | } |
08d32fe5 | 3714 | #ifdef CONFIG_COMPAT |
08d32fe5 AV |
3715 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3716 | const struct compat_sigaction __user *, act, | |
3717 | struct compat_sigaction __user *, oact, | |
3718 | compat_size_t, sigsetsize) | |
3719 | { | |
3720 | struct k_sigaction new_ka, old_ka; | |
08d32fe5 AV |
3721 | #ifdef __ARCH_HAS_SA_RESTORER |
3722 | compat_uptr_t restorer; | |
3723 | #endif | |
3724 | int ret; | |
3725 | ||
3726 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3727 | if (sigsetsize != sizeof(compat_sigset_t)) | |
3728 | return -EINVAL; | |
3729 | ||
3730 | if (act) { | |
3731 | compat_uptr_t handler; | |
3732 | ret = get_user(handler, &act->sa_handler); | |
3733 | new_ka.sa.sa_handler = compat_ptr(handler); | |
3734 | #ifdef __ARCH_HAS_SA_RESTORER | |
3735 | ret |= get_user(restorer, &act->sa_restorer); | |
3736 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
3737 | #endif | |
3968cf62 | 3738 | ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); |
3ddc5b46 | 3739 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
08d32fe5 AV |
3740 | if (ret) |
3741 | return -EFAULT; | |
08d32fe5 AV |
3742 | } |
3743 | ||
3744 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
3745 | if (!ret && oact) { | |
08d32fe5 AV |
3746 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
3747 | &oact->sa_handler); | |
f454322e DL |
3748 | ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, |
3749 | sizeof(oact->sa_mask)); | |
3ddc5b46 | 3750 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
08d32fe5 AV |
3751 | #ifdef __ARCH_HAS_SA_RESTORER |
3752 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
3753 | &oact->sa_restorer); | |
3754 | #endif | |
3755 | } | |
3756 | return ret; | |
3757 | } | |
3758 | #endif | |
eaca6eae | 3759 | #endif /* !CONFIG_ODD_RT_SIGACTION */ |
1da177e4 | 3760 | |
495dfbf7 AV |
3761 | #ifdef CONFIG_OLD_SIGACTION |
3762 | SYSCALL_DEFINE3(sigaction, int, sig, | |
3763 | const struct old_sigaction __user *, act, | |
3764 | struct old_sigaction __user *, oact) | |
3765 | { | |
3766 | struct k_sigaction new_ka, old_ka; | |
3767 | int ret; | |
3768 | ||
3769 | if (act) { | |
3770 | old_sigset_t mask; | |
3771 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | |
3772 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | |
3773 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | |
3774 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
3775 | __get_user(mask, &act->sa_mask)) | |
3776 | return -EFAULT; | |
3777 | #ifdef __ARCH_HAS_KA_RESTORER | |
3778 | new_ka.ka_restorer = NULL; | |
3779 | #endif | |
3780 | siginitset(&new_ka.sa.sa_mask, mask); | |
3781 | } | |
3782 | ||
3783 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
3784 | ||
3785 | if (!ret && oact) { | |
3786 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | |
3787 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | |
3788 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | |
3789 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
3790 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
3791 | return -EFAULT; | |
3792 | } | |
3793 | ||
3794 | return ret; | |
3795 | } | |
3796 | #endif | |
3797 | #ifdef CONFIG_COMPAT_OLD_SIGACTION | |
3798 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, | |
3799 | const struct compat_old_sigaction __user *, act, | |
3800 | struct compat_old_sigaction __user *, oact) | |
3801 | { | |
3802 | struct k_sigaction new_ka, old_ka; | |
3803 | int ret; | |
3804 | compat_old_sigset_t mask; | |
3805 | compat_uptr_t handler, restorer; | |
3806 | ||
3807 | if (act) { | |
3808 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | |
3809 | __get_user(handler, &act->sa_handler) || | |
3810 | __get_user(restorer, &act->sa_restorer) || | |
3811 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
3812 | __get_user(mask, &act->sa_mask)) | |
3813 | return -EFAULT; | |
3814 | ||
3815 | #ifdef __ARCH_HAS_KA_RESTORER | |
3816 | new_ka.ka_restorer = NULL; | |
3817 | #endif | |
3818 | new_ka.sa.sa_handler = compat_ptr(handler); | |
3819 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
3820 | siginitset(&new_ka.sa.sa_mask, mask); | |
3821 | } | |
3822 | ||
3823 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
3824 | ||
3825 | if (!ret && oact) { | |
3826 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | |
3827 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), | |
3828 | &oact->sa_handler) || | |
3829 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
3830 | &oact->sa_restorer) || | |
3831 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
3832 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
3833 | return -EFAULT; | |
3834 | } | |
3835 | return ret; | |
3836 | } | |
3837 | #endif | |
1da177e4 | 3838 | |
f6187769 | 3839 | #ifdef CONFIG_SGETMASK_SYSCALL |
1da177e4 LT |
3840 | |
3841 | /* | |
3842 | * For backwards compatibility. Functionality superseded by sigprocmask. | |
3843 | */ | |
a5f8fa9e | 3844 | SYSCALL_DEFINE0(sgetmask) |
1da177e4 LT |
3845 | { |
3846 | /* SMP safe */ | |
3847 | return current->blocked.sig[0]; | |
3848 | } | |
3849 | ||
a5f8fa9e | 3850 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
1da177e4 | 3851 | { |
c1095c6d ON |
3852 | int old = current->blocked.sig[0]; |
3853 | sigset_t newset; | |
1da177e4 | 3854 | |
5ba53ff6 | 3855 | siginitset(&newset, newmask); |
c1095c6d | 3856 | set_current_blocked(&newset); |
1da177e4 LT |
3857 | |
3858 | return old; | |
3859 | } | |
f6187769 | 3860 | #endif /* CONFIG_SGETMASK_SYSCALL */ |
1da177e4 LT |
3861 | |
3862 | #ifdef __ARCH_WANT_SYS_SIGNAL | |
3863 | /* | |
3864 | * For backwards compatibility. Functionality superseded by sigaction. | |
3865 | */ | |
a5f8fa9e | 3866 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
1da177e4 LT |
3867 | { |
3868 | struct k_sigaction new_sa, old_sa; | |
3869 | int ret; | |
3870 | ||
3871 | new_sa.sa.sa_handler = handler; | |
3872 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | |
c70d3d70 | 3873 | sigemptyset(&new_sa.sa.sa_mask); |
1da177e4 LT |
3874 | |
3875 | ret = do_sigaction(sig, &new_sa, &old_sa); | |
3876 | ||
3877 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | |
3878 | } | |
3879 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | |
3880 | ||
3881 | #ifdef __ARCH_WANT_SYS_PAUSE | |
3882 | ||
a5f8fa9e | 3883 | SYSCALL_DEFINE0(pause) |
1da177e4 | 3884 | { |
d92fcf05 | 3885 | while (!signal_pending(current)) { |
1df01355 | 3886 | __set_current_state(TASK_INTERRUPTIBLE); |
d92fcf05 ON |
3887 | schedule(); |
3888 | } | |
1da177e4 LT |
3889 | return -ERESTARTNOHAND; |
3890 | } | |
3891 | ||
3892 | #endif | |
3893 | ||
9d8a7652 | 3894 | static int sigsuspend(sigset_t *set) |
68f3f16d | 3895 | { |
68f3f16d AV |
3896 | current->saved_sigmask = current->blocked; |
3897 | set_current_blocked(set); | |
3898 | ||
823dd322 SL |
3899 | while (!signal_pending(current)) { |
3900 | __set_current_state(TASK_INTERRUPTIBLE); | |
3901 | schedule(); | |
3902 | } | |
68f3f16d AV |
3903 | set_restore_sigmask(); |
3904 | return -ERESTARTNOHAND; | |
3905 | } | |
68f3f16d | 3906 | |
41c57892 RD |
3907 | /** |
3908 | * sys_rt_sigsuspend - replace the signal mask for a value with the | |
3909 | * @unewset value until a signal is received | |
3910 | * @unewset: new signal mask value | |
3911 | * @sigsetsize: size of sigset_t type | |
3912 | */ | |
d4e82042 | 3913 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
150256d8 DW |
3914 | { |
3915 | sigset_t newset; | |
3916 | ||
3917 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3918 | if (sigsetsize != sizeof(sigset_t)) | |
3919 | return -EINVAL; | |
3920 | ||
3921 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
3922 | return -EFAULT; | |
68f3f16d | 3923 | return sigsuspend(&newset); |
150256d8 | 3924 | } |
ad4b65a4 AV |
3925 | |
3926 | #ifdef CONFIG_COMPAT | |
3927 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) | |
3928 | { | |
ad4b65a4 | 3929 | sigset_t newset; |
ad4b65a4 AV |
3930 | |
3931 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3932 | if (sigsetsize != sizeof(sigset_t)) | |
3933 | return -EINVAL; | |
3934 | ||
3968cf62 | 3935 | if (get_compat_sigset(&newset, unewset)) |
ad4b65a4 | 3936 | return -EFAULT; |
ad4b65a4 | 3937 | return sigsuspend(&newset); |
ad4b65a4 AV |
3938 | } |
3939 | #endif | |
150256d8 | 3940 | |
0a0e8cdf AV |
3941 | #ifdef CONFIG_OLD_SIGSUSPEND |
3942 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) | |
3943 | { | |
3944 | sigset_t blocked; | |
3945 | siginitset(&blocked, mask); | |
3946 | return sigsuspend(&blocked); | |
3947 | } | |
3948 | #endif | |
3949 | #ifdef CONFIG_OLD_SIGSUSPEND3 | |
3950 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) | |
3951 | { | |
3952 | sigset_t blocked; | |
3953 | siginitset(&blocked, mask); | |
3954 | return sigsuspend(&blocked); | |
3955 | } | |
3956 | #endif | |
150256d8 | 3957 | |
52f5684c | 3958 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
f269fdd1 DH |
3959 | { |
3960 | return NULL; | |
3961 | } | |
3962 | ||
1da177e4 LT |
3963 | void __init signals_init(void) |
3964 | { | |
41b27154 HD |
3965 | /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */ |
3966 | BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE | |
3967 | != offsetof(struct siginfo, _sifields._pad)); | |
aba1be2f | 3968 | BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); |
41b27154 | 3969 | |
0a31bd5f | 3970 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
1da177e4 | 3971 | } |
67fc4e0c JW |
3972 | |
3973 | #ifdef CONFIG_KGDB_KDB | |
3974 | #include <linux/kdb.h> | |
3975 | /* | |
0b44bf9a | 3976 | * kdb_send_sig - Allows kdb to send signals without exposing |
67fc4e0c JW |
3977 | * signal internals. This function checks if the required locks are |
3978 | * available before calling the main signal code, to avoid kdb | |
3979 | * deadlocks. | |
3980 | */ | |
0b44bf9a | 3981 | void kdb_send_sig(struct task_struct *t, int sig) |
67fc4e0c JW |
3982 | { |
3983 | static struct task_struct *kdb_prev_t; | |
0b44bf9a | 3984 | int new_t, ret; |
67fc4e0c JW |
3985 | if (!spin_trylock(&t->sighand->siglock)) { |
3986 | kdb_printf("Can't do kill command now.\n" | |
3987 | "The sigmask lock is held somewhere else in " | |
3988 | "kernel, try again later\n"); | |
3989 | return; | |
3990 | } | |
67fc4e0c JW |
3991 | new_t = kdb_prev_t != t; |
3992 | kdb_prev_t = t; | |
3993 | if (t->state != TASK_RUNNING && new_t) { | |
0b44bf9a | 3994 | spin_unlock(&t->sighand->siglock); |
67fc4e0c JW |
3995 | kdb_printf("Process is not RUNNING, sending a signal from " |
3996 | "kdb risks deadlock\n" | |
3997 | "on the run queue locks. " | |
3998 | "The signal has _not_ been sent.\n" | |
3999 | "Reissue the kill command if you want to risk " | |
4000 | "the deadlock.\n"); | |
4001 | return; | |
4002 | } | |
b213984b | 4003 | ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); |
0b44bf9a EB |
4004 | spin_unlock(&t->sighand->siglock); |
4005 | if (ret) | |
67fc4e0c JW |
4006 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
4007 | sig, t->pid); | |
4008 | else | |
4009 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); | |
4010 | } | |
4011 | #endif /* CONFIG_KGDB_KDB */ |