]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - kernel/signal.c
arm64: dts: imx8mq-evk: Enable SNVS power key
[mirror_ubuntu-eoan-kernel.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4 13#include <linux/slab.h>
9984de1a 14#include <linux/export.h>
1da177e4 15#include <linux/init.h>
589ee628 16#include <linux/sched/mm.h>
8703e8a4 17#include <linux/sched/user.h>
b17b0153 18#include <linux/sched/debug.h>
29930025 19#include <linux/sched/task.h>
68db0cf1 20#include <linux/sched/task_stack.h>
32ef5517 21#include <linux/sched/cputime.h>
3eb39f47 22#include <linux/file.h>
1da177e4 23#include <linux/fs.h>
3eb39f47 24#include <linux/proc_fs.h>
1da177e4
LT
25#include <linux/tty.h>
26#include <linux/binfmts.h>
179899fd 27#include <linux/coredump.h>
1da177e4
LT
28#include <linux/security.h>
29#include <linux/syscalls.h>
30#include <linux/ptrace.h>
7ed20e1a 31#include <linux/signal.h>
fba2afaa 32#include <linux/signalfd.h>
f84d49b2 33#include <linux/ratelimit.h>
35de254d 34#include <linux/tracehook.h>
c59ede7b 35#include <linux/capability.h>
7dfb7103 36#include <linux/freezer.h>
84d73786
SB
37#include <linux/pid_namespace.h>
38#include <linux/nsproxy.h>
6b550f94 39#include <linux/user_namespace.h>
0326f5a9 40#include <linux/uprobes.h>
90268439 41#include <linux/compat.h>
2b5faa4c 42#include <linux/cn_proc.h>
52f5684c 43#include <linux/compiler.h>
31ea70e0 44#include <linux/posix-timers.h>
43347d56 45#include <linux/livepatch.h>
76f969e8 46#include <linux/cgroup.h>
52f5684c 47
d1eb650f
MH
48#define CREATE_TRACE_POINTS
49#include <trace/events/signal.h>
84d73786 50
1da177e4 51#include <asm/param.h>
7c0f6ba6 52#include <linux/uaccess.h>
1da177e4
LT
53#include <asm/unistd.h>
54#include <asm/siginfo.h>
d550bbd4 55#include <asm/cacheflush.h>
e1396065 56#include "audit.h" /* audit_signal_info() */
1da177e4
LT
57
58/*
59 * SLAB caches for signal bits.
60 */
61
e18b890b 62static struct kmem_cache *sigqueue_cachep;
1da177e4 63
f84d49b2
NO
64int print_fatal_signals __read_mostly;
65
35de254d 66static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 67{
35de254d
RM
68 return t->sighand->action[sig - 1].sa.sa_handler;
69}
93585eea 70
e4a8b4ef 71static inline bool sig_handler_ignored(void __user *handler, int sig)
35de254d 72{
93585eea 73 /* Is it explicitly or implicitly ignored? */
93585eea 74 return handler == SIG_IGN ||
e4a8b4ef 75 (handler == SIG_DFL && sig_kernel_ignore(sig));
93585eea 76}
1da177e4 77
41aaa481 78static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
1da177e4 79{
35de254d 80 void __user *handler;
1da177e4 81
f008faff
ON
82 handler = sig_handler(t, sig);
83
86989c41
EB
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 return true;
87
f008faff 88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
ac253850 89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
41aaa481 90 return true;
f008faff
ON
91
92 return sig_handler_ignored(handler, sig);
93}
94
6a0cdcd7 95static bool sig_ignored(struct task_struct *t, int sig, bool force)
f008faff 96{
1da177e4
LT
97 /*
98 * Blocked signals are never ignored, since the
99 * signal handler may change by the time it is
100 * unblocked.
101 */
325d22df 102 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
6a0cdcd7 103 return false;
1da177e4 104
35de254d 105 /*
628c1bcb
ON
106 * Tracers may want to know about even ignored signal unless it
107 * is SIGKILL which can't be reported anyway but can be ignored
108 * by SIGNAL_UNKILLABLE task.
35de254d 109 */
628c1bcb 110 if (t->ptrace && sig != SIGKILL)
6a0cdcd7 111 return false;
628c1bcb
ON
112
113 return sig_task_ignored(t, sig, force);
1da177e4
LT
114}
115
116/*
117 * Re-calculate pending state from the set of locally pending
118 * signals, globally pending signals, and blocked signals.
119 */
938696a8 120static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
1da177e4
LT
121{
122 unsigned long ready;
123 long i;
124
125 switch (_NSIG_WORDS) {
126 default:
127 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 ready |= signal->sig[i] &~ blocked->sig[i];
129 break;
130
131 case 4: ready = signal->sig[3] &~ blocked->sig[3];
132 ready |= signal->sig[2] &~ blocked->sig[2];
133 ready |= signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
135 break;
136
137 case 2: ready = signal->sig[1] &~ blocked->sig[1];
138 ready |= signal->sig[0] &~ blocked->sig[0];
139 break;
140
141 case 1: ready = signal->sig[0] &~ blocked->sig[0];
142 }
143 return ready != 0;
144}
145
146#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
147
09ae854e 148static bool recalc_sigpending_tsk(struct task_struct *t)
1da177e4 149{
76f969e8 150 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
1da177e4 151 PENDING(&t->pending, &t->blocked) ||
76f969e8
RG
152 PENDING(&t->signal->shared_pending, &t->blocked) ||
153 cgroup_task_frozen(t)) {
1da177e4 154 set_tsk_thread_flag(t, TIF_SIGPENDING);
09ae854e 155 return true;
7bb44ade 156 }
09ae854e 157
b74d0deb
RM
158 /*
159 * We must never clear the flag in another thread, or in current
160 * when it's possible the current syscall is returning -ERESTART*.
161 * So we don't clear it here, and only callers who know they should do.
162 */
09ae854e 163 return false;
7bb44ade
RM
164}
165
166/*
167 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168 * This is superfluous when called on current, the wakeup is a harmless no-op.
169 */
170void recalc_sigpending_and_wake(struct task_struct *t)
171{
172 if (recalc_sigpending_tsk(t))
173 signal_wake_up(t, 0);
1da177e4
LT
174}
175
176void recalc_sigpending(void)
177{
43347d56
MB
178 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 !klp_patch_pending(current))
b74d0deb
RM
180 clear_thread_flag(TIF_SIGPENDING);
181
1da177e4 182}
fb50f5a4 183EXPORT_SYMBOL(recalc_sigpending);
1da177e4 184
088fe47c
EB
185void calculate_sigpending(void)
186{
187 /* Have any signals or users of TIF_SIGPENDING been delayed
188 * until after fork?
189 */
190 spin_lock_irq(&current->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
192 recalc_sigpending();
193 spin_unlock_irq(&current->sighand->siglock);
194}
195
1da177e4
LT
196/* Given the mask, find the first available signal that should be serviced. */
197
a27341cd
LT
198#define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
a0727e8c 200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
a27341cd 201
fba2afaa 202int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
203{
204 unsigned long i, *s, *m, x;
205 int sig = 0;
f84d49b2 206
1da177e4
LT
207 s = pending->signal.sig;
208 m = mask->sig;
a27341cd
LT
209
210 /*
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
213 */
214 x = *s &~ *m;
215 if (x) {
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
218 sig = ffz(~x) + 1;
219 return sig;
220 }
221
1da177e4
LT
222 switch (_NSIG_WORDS) {
223 default:
a27341cd
LT
224 for (i = 1; i < _NSIG_WORDS; ++i) {
225 x = *++s &~ *++m;
226 if (!x)
227 continue;
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
229 break;
230 }
1da177e4
LT
231 break;
232
a27341cd
LT
233 case 2:
234 x = s[1] &~ m[1];
235 if (!x)
1da177e4 236 break;
a27341cd 237 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
238 break;
239
a27341cd
LT
240 case 1:
241 /* Nothing to do */
1da177e4
LT
242 break;
243 }
f84d49b2 244
1da177e4
LT
245 return sig;
246}
247
f84d49b2
NO
248static inline void print_dropped_signal(int sig)
249{
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
251
252 if (!print_fatal_signals)
253 return;
254
255 if (!__ratelimit(&ratelimit_state))
256 return;
257
747800ef 258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
f84d49b2
NO
259 current->comm, current->pid, sig);
260}
261
d79fdd6d 262/**
7dd3db54 263 * task_set_jobctl_pending - set jobctl pending bits
d79fdd6d 264 * @task: target task
7dd3db54 265 * @mask: pending bits to set
d79fdd6d 266 *
7dd3db54
TH
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
271 * becomes noop.
272 *
273 * CONTEXT:
274 * Must be called with @task->sighand->siglock held.
275 *
276 * RETURNS:
277 * %true if @mask is set, %false if made noop because @task was dying.
278 */
b76808e6 279bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
7dd3db54
TH
280{
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
284
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
286 return false;
287
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
290
291 task->jobctl |= mask;
292 return true;
293}
294
d79fdd6d 295/**
a8f072c1 296 * task_clear_jobctl_trapping - clear jobctl trapping bit
d79fdd6d
TH
297 * @task: target task
298 *
a8f072c1
TH
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
302 * ptracer.
d79fdd6d
TH
303 *
304 * CONTEXT:
305 * Must be called with @task->sighand->siglock held.
306 */
73ddff2b 307void task_clear_jobctl_trapping(struct task_struct *task)
d79fdd6d 308{
a8f072c1
TH
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
650226bd 311 smp_mb(); /* advised by wake_up_bit() */
62c124ff 312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
d79fdd6d
TH
313 }
314}
315
e5c1902e 316/**
3759a0d9 317 * task_clear_jobctl_pending - clear jobctl pending bits
e5c1902e 318 * @task: target task
3759a0d9 319 * @mask: pending bits to clear
e5c1902e 320 *
3759a0d9
TH
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
e5c1902e 324 *
6dfca329
TH
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
e5c1902e
TH
327 *
328 * CONTEXT:
329 * Must be called with @task->sighand->siglock held.
330 */
b76808e6 331void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
e5c1902e 332{
3759a0d9
TH
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
334
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
337
338 task->jobctl &= ~mask;
6dfca329
TH
339
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
e5c1902e
TH
342}
343
344/**
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
347 *
a8f072c1 348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
39efa3ef 349 * Group stop states are cleared and the group stop count is consumed if
a8f072c1 350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
39efa3ef 351 * stop, the appropriate %SIGNAL_* flags are set.
e5c1902e
TH
352 *
353 * CONTEXT:
354 * Must be called with @task->sighand->siglock held.
244056f9
TH
355 *
356 * RETURNS:
357 * %true if group stop completion should be notified to the parent, %false
358 * otherwise.
e5c1902e
TH
359 */
360static bool task_participate_group_stop(struct task_struct *task)
361{
362 struct signal_struct *sig = task->signal;
a8f072c1 363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
e5c1902e 364
a8f072c1 365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
39efa3ef 366
3759a0d9 367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
e5c1902e
TH
368
369 if (!consume)
370 return false;
371
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
374
244056f9
TH
375 /*
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
378 */
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2d39b3cd 380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
e5c1902e
TH
381 return true;
382 }
383 return false;
384}
385
924de3b8
EB
386void task_join_group_stop(struct task_struct *task)
387{
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
396 }
397 }
398}
399
c69e8d9c
DH
400/*
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
5aba085e 403 * appropriate lock must be held to stop the target task from exiting
c69e8d9c 404 */
f84d49b2
NO
405static struct sigqueue *
406__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
1da177e4
LT
407{
408 struct sigqueue *q = NULL;
10b1fbdb 409 struct user_struct *user;
1da177e4 410
10b1fbdb 411 /*
7cf7db8d
TG
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
10b1fbdb 414 */
7cf7db8d 415 rcu_read_lock();
d84f4f99 416 user = get_uid(__task_cred(t)->user);
10b1fbdb 417 atomic_inc(&user->sigpending);
7cf7db8d 418 rcu_read_unlock();
f84d49b2 419
1da177e4 420 if (override_rlimit ||
10b1fbdb 421 atomic_read(&user->sigpending) <=
78d7d407 422 task_rlimit(t, RLIMIT_SIGPENDING)) {
1da177e4 423 q = kmem_cache_alloc(sigqueue_cachep, flags);
f84d49b2
NO
424 } else {
425 print_dropped_signal(sig);
426 }
427
1da177e4 428 if (unlikely(q == NULL)) {
10b1fbdb 429 atomic_dec(&user->sigpending);
d84f4f99 430 free_uid(user);
1da177e4
LT
431 } else {
432 INIT_LIST_HEAD(&q->list);
433 q->flags = 0;
d84f4f99 434 q->user = user;
1da177e4 435 }
d84f4f99
DH
436
437 return q;
1da177e4
LT
438}
439
514a01b8 440static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
441{
442 if (q->flags & SIGQUEUE_PREALLOC)
443 return;
444 atomic_dec(&q->user->sigpending);
445 free_uid(q->user);
446 kmem_cache_free(sigqueue_cachep, q);
447}
448
6a14c5c9 449void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
450{
451 struct sigqueue *q;
452
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
457 __sigqueue_free(q);
458 }
459}
460
461/*
9e7c8f8c 462 * Flush all pending signals for this kthread.
1da177e4 463 */
c81addc9 464void flush_signals(struct task_struct *t)
1da177e4
LT
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&t->sighand->siglock, flags);
9e7c8f8c
ON
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
1da177e4
LT
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
473}
fb50f5a4 474EXPORT_SYMBOL(flush_signals);
1da177e4 475
baa73d9e 476#ifdef CONFIG_POSIX_TIMERS
cbaffba1
ON
477static void __flush_itimer_signals(struct sigpending *pending)
478{
479 sigset_t signal, retain;
480 struct sigqueue *q, *n;
481
482 signal = pending->signal;
483 sigemptyset(&retain);
484
485 list_for_each_entry_safe(q, n, &pending->list, list) {
486 int sig = q->info.si_signo;
487
488 if (likely(q->info.si_code != SI_TIMER)) {
489 sigaddset(&retain, sig);
490 } else {
491 sigdelset(&signal, sig);
492 list_del_init(&q->list);
493 __sigqueue_free(q);
494 }
495 }
496
497 sigorsets(&pending->signal, &signal, &retain);
498}
499
500void flush_itimer_signals(void)
501{
502 struct task_struct *tsk = current;
503 unsigned long flags;
504
505 spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 __flush_itimer_signals(&tsk->pending);
507 __flush_itimer_signals(&tsk->signal->shared_pending);
508 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
509}
baa73d9e 510#endif
cbaffba1 511
10ab825b
ON
512void ignore_signals(struct task_struct *t)
513{
514 int i;
515
516 for (i = 0; i < _NSIG; ++i)
517 t->sighand->action[i].sa.sa_handler = SIG_IGN;
518
519 flush_signals(t);
520}
521
1da177e4
LT
522/*
523 * Flush all handlers for a task.
524 */
525
526void
527flush_signal_handlers(struct task_struct *t, int force_default)
528{
529 int i;
530 struct k_sigaction *ka = &t->sighand->action[0];
531 for (i = _NSIG ; i != 0 ; i--) {
532 if (force_default || ka->sa.sa_handler != SIG_IGN)
533 ka->sa.sa_handler = SIG_DFL;
534 ka->sa.sa_flags = 0;
522cff14 535#ifdef __ARCH_HAS_SA_RESTORER
2ca39528
KC
536 ka->sa.sa_restorer = NULL;
537#endif
1da177e4
LT
538 sigemptyset(&ka->sa.sa_mask);
539 ka++;
540 }
541}
542
67a48a24 543bool unhandled_signal(struct task_struct *tsk, int sig)
abd4f750 544{
445a91d2 545 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 546 if (is_global_init(tsk))
67a48a24
CB
547 return true;
548
445a91d2 549 if (handler != SIG_IGN && handler != SIG_DFL)
67a48a24
CB
550 return false;
551
a288eecc
TH
552 /* if ptraced, let the tracer determine */
553 return !tsk->ptrace;
abd4f750
MAS
554}
555
ae7795bc 556static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
57db7e4a 557 bool *resched_timer)
1da177e4
LT
558{
559 struct sigqueue *q, *first = NULL;
1da177e4 560
1da177e4
LT
561 /*
562 * Collect the siginfo appropriate to this signal. Check if
563 * there is another siginfo for the same signal.
564 */
565 list_for_each_entry(q, &list->list, list) {
566 if (q->info.si_signo == sig) {
d4434207
ON
567 if (first)
568 goto still_pending;
1da177e4
LT
569 first = q;
570 }
571 }
d4434207
ON
572
573 sigdelset(&list->signal, sig);
574
1da177e4 575 if (first) {
d4434207 576still_pending:
1da177e4
LT
577 list_del_init(&first->list);
578 copy_siginfo(info, &first->info);
57db7e4a
EB
579
580 *resched_timer =
581 (first->flags & SIGQUEUE_PREALLOC) &&
582 (info->si_code == SI_TIMER) &&
583 (info->si_sys_private);
584
1da177e4 585 __sigqueue_free(first);
1da177e4 586 } else {
5aba085e
RD
587 /*
588 * Ok, it wasn't in the queue. This must be
589 * a fast-pathed signal or we must have been
590 * out of queue space. So zero out the info.
1da177e4 591 */
faf1f22b 592 clear_siginfo(info);
1da177e4
LT
593 info->si_signo = sig;
594 info->si_errno = 0;
7486e5d9 595 info->si_code = SI_USER;
1da177e4
LT
596 info->si_pid = 0;
597 info->si_uid = 0;
598 }
1da177e4
LT
599}
600
601static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
ae7795bc 602 kernel_siginfo_t *info, bool *resched_timer)
1da177e4 603{
27d91e07 604 int sig = next_signal(pending, mask);
1da177e4 605
2e01fabe 606 if (sig)
57db7e4a 607 collect_signal(sig, pending, info, resched_timer);
1da177e4
LT
608 return sig;
609}
610
611/*
5aba085e 612 * Dequeue a signal and return the element to the caller, which is
1da177e4
LT
613 * expected to free it.
614 *
615 * All callers have to hold the siglock.
616 */
ae7795bc 617int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
1da177e4 618{
57db7e4a 619 bool resched_timer = false;
c5363d03 620 int signr;
caec4e8d
BH
621
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
624 */
57db7e4a 625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
8bfd9a7a 626 if (!signr) {
1da177e4 627 signr = __dequeue_signal(&tsk->signal->shared_pending,
57db7e4a 628 mask, info, &resched_timer);
baa73d9e 629#ifdef CONFIG_POSIX_TIMERS
8bfd9a7a
TG
630 /*
631 * itimer signal ?
632 *
633 * itimers are process shared and we restart periodic
634 * itimers in the signal delivery path to prevent DoS
635 * attacks in the high resolution timer case. This is
5aba085e 636 * compliant with the old way of self-restarting
8bfd9a7a
TG
637 * itimers, as the SIGALRM is a legacy signal and only
638 * queued once. Changing the restart behaviour to
639 * restart the timer in the signal dequeue path is
640 * reducing the timer noise on heavy loaded !highres
641 * systems too.
642 */
643 if (unlikely(signr == SIGALRM)) {
644 struct hrtimer *tmr = &tsk->signal->real_timer;
645
646 if (!hrtimer_is_queued(tmr) &&
2456e855 647 tsk->signal->it_real_incr != 0) {
8bfd9a7a
TG
648 hrtimer_forward(tmr, tmr->base->get_time(),
649 tsk->signal->it_real_incr);
650 hrtimer_restart(tmr);
651 }
652 }
baa73d9e 653#endif
8bfd9a7a 654 }
c5363d03 655
b8fceee1 656 recalc_sigpending();
c5363d03
PE
657 if (!signr)
658 return 0;
659
660 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
661 /*
662 * Set a marker that we have dequeued a stop signal. Our
663 * caller might release the siglock and then the pending
664 * stop signal it is about to process is no longer in the
665 * pending bitmasks, but must still be cleared by a SIGCONT
666 * (and overruled by a SIGKILL). So those cases clear this
667 * shared flag after we've set it. Note that this flag may
668 * remain set after the signal we return is ignored or
669 * handled. That doesn't matter because its only purpose
670 * is to alert stop-signal processing code when another
671 * processor has come along and cleared the flag.
672 */
a8f072c1 673 current->jobctl |= JOBCTL_STOP_DEQUEUED;
8bfd9a7a 674 }
baa73d9e 675#ifdef CONFIG_POSIX_TIMERS
57db7e4a 676 if (resched_timer) {
1da177e4
LT
677 /*
678 * Release the siglock to ensure proper locking order
679 * of timer locks outside of siglocks. Note, we leave
680 * irqs disabled here, since the posix-timers code is
681 * about to disable them again anyway.
682 */
683 spin_unlock(&tsk->sighand->siglock);
96fe3b07 684 posixtimer_rearm(info);
1da177e4 685 spin_lock(&tsk->sighand->siglock);
9943d3ac
EB
686
687 /* Don't expose the si_sys_private value to userspace */
688 info->si_sys_private = 0;
1da177e4 689 }
baa73d9e 690#endif
1da177e4
LT
691 return signr;
692}
fb50f5a4 693EXPORT_SYMBOL_GPL(dequeue_signal);
1da177e4 694
7146db33
EB
695static int dequeue_synchronous_signal(kernel_siginfo_t *info)
696{
697 struct task_struct *tsk = current;
698 struct sigpending *pending = &tsk->pending;
699 struct sigqueue *q, *sync = NULL;
700
701 /*
702 * Might a synchronous signal be in the queue?
703 */
704 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
705 return 0;
706
707 /*
708 * Return the first synchronous signal in the queue.
709 */
710 list_for_each_entry(q, &pending->list, list) {
711 /* Synchronous signals have a postive si_code */
712 if ((q->info.si_code > SI_USER) &&
713 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
714 sync = q;
715 goto next;
716 }
717 }
718 return 0;
719next:
720 /*
721 * Check if there is another siginfo for the same signal.
722 */
723 list_for_each_entry_continue(q, &pending->list, list) {
724 if (q->info.si_signo == sync->info.si_signo)
725 goto still_pending;
726 }
727
728 sigdelset(&pending->signal, sync->info.si_signo);
729 recalc_sigpending();
730still_pending:
731 list_del_init(&sync->list);
732 copy_siginfo(info, &sync->info);
733 __sigqueue_free(sync);
734 return info->si_signo;
735}
736
1da177e4
LT
737/*
738 * Tell a process that it has a new active signal..
739 *
740 * NOTE! we rely on the previous spin_lock to
741 * lock interrupts for us! We can only be called with
742 * "siglock" held, and the local interrupt must
743 * have been disabled when that got acquired!
744 *
745 * No need to set need_resched since signal event passing
746 * goes through ->blocked
747 */
910ffdb1 748void signal_wake_up_state(struct task_struct *t, unsigned int state)
1da177e4 749{
1da177e4 750 set_tsk_thread_flag(t, TIF_SIGPENDING);
1da177e4 751 /*
910ffdb1 752 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
f021a3c2 753 * case. We don't check t->state here because there is a race with it
1da177e4
LT
754 * executing another processor and just now entering stopped state.
755 * By using wake_up_state, we ensure the process will wake up and
756 * handle its death signal.
757 */
910ffdb1 758 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
1da177e4
LT
759 kick_process(t);
760}
761
71fabd5e
GA
762/*
763 * Remove signals in mask from the pending set and queue.
764 * Returns 1 if any signals were found.
765 *
766 * All callers must be holding the siglock.
71fabd5e 767 */
8f11351e 768static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
71fabd5e
GA
769{
770 struct sigqueue *q, *n;
771 sigset_t m;
772
773 sigandsets(&m, mask, &s->signal);
774 if (sigisemptyset(&m))
8f11351e 775 return;
71fabd5e 776
702a5073 777 sigandnsets(&s->signal, &s->signal, mask);
71fabd5e
GA
778 list_for_each_entry_safe(q, n, &s->list, list) {
779 if (sigismember(mask, q->info.si_signo)) {
780 list_del_init(&q->list);
781 __sigqueue_free(q);
782 }
783 }
71fabd5e 784}
1da177e4 785
ae7795bc 786static inline int is_si_special(const struct kernel_siginfo *info)
614c517d 787{
4ff4c31a 788 return info <= SEND_SIG_PRIV;
614c517d
ON
789}
790
ae7795bc 791static inline bool si_fromuser(const struct kernel_siginfo *info)
614c517d
ON
792{
793 return info == SEND_SIG_NOINFO ||
794 (!is_si_special(info) && SI_FROMUSER(info));
795}
796
39fd3393
SH
797/*
798 * called with RCU read lock from check_kill_permission()
799 */
2a9b9094 800static bool kill_ok_by_cred(struct task_struct *t)
39fd3393
SH
801{
802 const struct cred *cred = current_cred();
803 const struct cred *tcred = __task_cred(t);
804
2a9b9094
CB
805 return uid_eq(cred->euid, tcred->suid) ||
806 uid_eq(cred->euid, tcred->uid) ||
807 uid_eq(cred->uid, tcred->suid) ||
808 uid_eq(cred->uid, tcred->uid) ||
809 ns_capable(tcred->user_ns, CAP_KILL);
39fd3393
SH
810}
811
1da177e4
LT
812/*
813 * Bad permissions for sending the signal
694f690d 814 * - the caller must hold the RCU read lock
1da177e4 815 */
ae7795bc 816static int check_kill_permission(int sig, struct kernel_siginfo *info,
1da177e4
LT
817 struct task_struct *t)
818{
2e2ba22e 819 struct pid *sid;
3b5e9e53
ON
820 int error;
821
7ed20e1a 822 if (!valid_signal(sig))
3b5e9e53
ON
823 return -EINVAL;
824
614c517d 825 if (!si_fromuser(info))
3b5e9e53 826 return 0;
e54dc243 827
3b5e9e53
ON
828 error = audit_signal_info(sig, t); /* Let audit system see the signal */
829 if (error)
1da177e4 830 return error;
3b5e9e53 831
065add39 832 if (!same_thread_group(current, t) &&
39fd3393 833 !kill_ok_by_cred(t)) {
2e2ba22e
ON
834 switch (sig) {
835 case SIGCONT:
2e2ba22e 836 sid = task_session(t);
2e2ba22e
ON
837 /*
838 * We don't return the error if sid == NULL. The
839 * task was unhashed, the caller must notice this.
840 */
841 if (!sid || sid == task_session(current))
842 break;
b028fb61 843 /* fall through */
2e2ba22e
ON
844 default:
845 return -EPERM;
846 }
847 }
c2f0c7c3 848
6b4f3d01 849 return security_task_kill(t, info, sig, NULL);
1da177e4
LT
850}
851
fb1d910c
TH
852/**
853 * ptrace_trap_notify - schedule trap to notify ptracer
854 * @t: tracee wanting to notify tracer
855 *
856 * This function schedules sticky ptrace trap which is cleared on the next
857 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
858 * ptracer.
859 *
544b2c91
TH
860 * If @t is running, STOP trap will be taken. If trapped for STOP and
861 * ptracer is listening for events, tracee is woken up so that it can
862 * re-trap for the new event. If trapped otherwise, STOP trap will be
863 * eventually taken without returning to userland after the existing traps
864 * are finished by PTRACE_CONT.
fb1d910c
TH
865 *
866 * CONTEXT:
867 * Must be called with @task->sighand->siglock held.
868 */
869static void ptrace_trap_notify(struct task_struct *t)
870{
871 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
872 assert_spin_locked(&t->sighand->siglock);
873
874 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
910ffdb1 875 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
fb1d910c
TH
876}
877
1da177e4 878/*
7e695a5e
ON
879 * Handle magic process-wide effects of stop/continue signals. Unlike
880 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
881 * time regardless of blocking, ignoring, or handling. This does the
882 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
883 * signals. The process stop is done as a signal action for SIG_DFL.
884 *
885 * Returns true if the signal should be actually delivered, otherwise
886 * it should be dropped.
1da177e4 887 */
403bad72 888static bool prepare_signal(int sig, struct task_struct *p, bool force)
1da177e4 889{
ad16a460 890 struct signal_struct *signal = p->signal;
1da177e4 891 struct task_struct *t;
9490592f 892 sigset_t flush;
1da177e4 893
403bad72 894 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
5fa534c9 895 if (!(signal->flags & SIGNAL_GROUP_EXIT))
403bad72 896 return sig == SIGKILL;
1da177e4 897 /*
7e695a5e 898 * The process is in the middle of dying, nothing to do.
1da177e4 899 */
7e695a5e 900 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
901 /*
902 * This is a stop signal. Remove SIGCONT from all queues.
903 */
9490592f 904 siginitset(&flush, sigmask(SIGCONT));
c09c1441 905 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 906 for_each_thread(p, t)
c09c1441 907 flush_sigqueue_mask(&flush, &t->pending);
1da177e4 908 } else if (sig == SIGCONT) {
fc321d2e 909 unsigned int why;
1da177e4 910 /*
1deac632 911 * Remove all stop signals from all queues, wake all threads.
1da177e4 912 */
9490592f 913 siginitset(&flush, SIG_KERNEL_STOP_MASK);
c09c1441 914 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 915 for_each_thread(p, t) {
c09c1441 916 flush_sigqueue_mask(&flush, &t->pending);
3759a0d9 917 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
fb1d910c
TH
918 if (likely(!(t->ptrace & PT_SEIZED)))
919 wake_up_state(t, __TASK_STOPPED);
920 else
921 ptrace_trap_notify(t);
9490592f 922 }
1da177e4 923
fc321d2e
ON
924 /*
925 * Notify the parent with CLD_CONTINUED if we were stopped.
926 *
927 * If we were in the middle of a group stop, we pretend it
928 * was already finished, and then continued. Since SIGCHLD
929 * doesn't queue we report only CLD_STOPPED, as if the next
930 * CLD_CONTINUED was dropped.
931 */
932 why = 0;
ad16a460 933 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 934 why |= SIGNAL_CLD_CONTINUED;
ad16a460 935 else if (signal->group_stop_count)
fc321d2e
ON
936 why |= SIGNAL_CLD_STOPPED;
937
938 if (why) {
021e1ae3 939 /*
ae6d2ed7 940 * The first thread which returns from do_signal_stop()
021e1ae3 941 * will take ->siglock, notice SIGNAL_CLD_MASK, and
2e58f57d 942 * notify its parent. See get_signal().
021e1ae3 943 */
2d39b3cd 944 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
ad16a460
ON
945 signal->group_stop_count = 0;
946 signal->group_exit_code = 0;
1da177e4 947 }
1da177e4 948 }
7e695a5e 949
def8cf72 950 return !sig_ignored(p, sig, force);
1da177e4
LT
951}
952
71f11dc0
ON
953/*
954 * Test if P wants to take SIG. After we've checked all threads with this,
955 * it's equivalent to finding no threads not blocking SIG. Any threads not
956 * blocking SIG were ruled out because they are not running and already
957 * have pending signals. Such threads will dequeue from the shared queue
958 * as soon as they're available, so putting the signal on the shared queue
959 * will be equivalent to sending it to one such thread.
960 */
acd14e62 961static inline bool wants_signal(int sig, struct task_struct *p)
71f11dc0
ON
962{
963 if (sigismember(&p->blocked, sig))
acd14e62
CB
964 return false;
965
71f11dc0 966 if (p->flags & PF_EXITING)
acd14e62
CB
967 return false;
968
71f11dc0 969 if (sig == SIGKILL)
acd14e62
CB
970 return true;
971
71f11dc0 972 if (task_is_stopped_or_traced(p))
acd14e62
CB
973 return false;
974
71f11dc0
ON
975 return task_curr(p) || !signal_pending(p);
976}
977
07296149 978static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
71f11dc0
ON
979{
980 struct signal_struct *signal = p->signal;
981 struct task_struct *t;
982
983 /*
984 * Now find a thread we can wake up to take the signal off the queue.
985 *
986 * If the main thread wants the signal, it gets first crack.
987 * Probably the least surprising to the average bear.
988 */
989 if (wants_signal(sig, p))
990 t = p;
07296149 991 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
71f11dc0
ON
992 /*
993 * There is just one thread and it does not need to be woken.
994 * It will dequeue unblocked signals before it runs again.
995 */
996 return;
997 else {
998 /*
999 * Otherwise try to find a suitable thread.
1000 */
1001 t = signal->curr_target;
1002 while (!wants_signal(sig, t)) {
1003 t = next_thread(t);
1004 if (t == signal->curr_target)
1005 /*
1006 * No thread needs to be woken.
1007 * Any eligible threads will see
1008 * the signal in the queue soon.
1009 */
1010 return;
1011 }
1012 signal->curr_target = t;
1013 }
1014
1015 /*
1016 * Found a killable thread. If the signal will be fatal,
1017 * then start taking the whole group down immediately.
1018 */
fae5fa44 1019 if (sig_fatal(p, sig) &&
42691579 1020 !(signal->flags & SIGNAL_GROUP_EXIT) &&
71f11dc0 1021 !sigismember(&t->real_blocked, sig) &&
42691579 1022 (sig == SIGKILL || !p->ptrace)) {
71f11dc0
ON
1023 /*
1024 * This signal will be fatal to the whole group.
1025 */
1026 if (!sig_kernel_coredump(sig)) {
1027 /*
1028 * Start a group exit and wake everybody up.
1029 * This way we don't have other threads
1030 * running and doing things after a slower
1031 * thread has the fatal signal pending.
1032 */
1033 signal->flags = SIGNAL_GROUP_EXIT;
1034 signal->group_exit_code = sig;
1035 signal->group_stop_count = 0;
1036 t = p;
1037 do {
6dfca329 1038 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
71f11dc0
ON
1039 sigaddset(&t->pending.signal, SIGKILL);
1040 signal_wake_up(t, 1);
1041 } while_each_thread(p, t);
1042 return;
1043 }
1044 }
1045
1046 /*
1047 * The signal is already in the shared-pending queue.
1048 * Tell the chosen thread to wake up and dequeue it.
1049 */
1050 signal_wake_up(t, sig == SIGKILL);
1051 return;
1052}
1053
a19e2c01 1054static inline bool legacy_queue(struct sigpending *signals, int sig)
af7fff9c
PE
1055{
1056 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1057}
1058
6b550f94 1059#ifdef CONFIG_USER_NS
ae7795bc 1060static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
6b550f94
SH
1061{
1062 if (current_user_ns() == task_cred_xxx(t, user_ns))
1063 return;
1064
1065 if (SI_FROMKERNEL(info))
1066 return;
1067
078de5f7
EB
1068 rcu_read_lock();
1069 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1070 make_kuid(current_user_ns(), info->si_uid));
1071 rcu_read_unlock();
6b550f94
SH
1072}
1073#else
ae7795bc 1074static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
6b550f94
SH
1075{
1076 return;
1077}
1078#endif
1079
ae7795bc 1080static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
5a883cee 1081 enum pid_type type, int from_ancestor_ns)
1da177e4 1082{
2ca3515a 1083 struct sigpending *pending;
6e65acba 1084 struct sigqueue *q;
7a0aeb14 1085 int override_rlimit;
6c303d3a 1086 int ret = 0, result;
0a16b607 1087
6e65acba 1088 assert_spin_locked(&t->sighand->siglock);
921cf9f6 1089
6c303d3a 1090 result = TRACE_SIGNAL_IGNORED;
629d362b 1091 if (!prepare_signal(sig, t,
4ff4c31a 1092 from_ancestor_ns || (info == SEND_SIG_PRIV)))
6c303d3a 1093 goto ret;
2ca3515a 1094
5a883cee 1095 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
1096 /*
1097 * Short-circuit ignored signals and support queuing
1098 * exactly one non-rt signal, so that we can get more
1099 * detailed information about the cause of the signal.
1100 */
6c303d3a 1101 result = TRACE_SIGNAL_ALREADY_PENDING;
7e695a5e 1102 if (legacy_queue(pending, sig))
6c303d3a
ON
1103 goto ret;
1104
1105 result = TRACE_SIGNAL_DELIVERED;
1da177e4 1106 /*
a692933a 1107 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1da177e4 1108 */
a692933a 1109 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1da177e4
LT
1110 goto out_set;
1111
5aba085e
RD
1112 /*
1113 * Real-time signals must be queued if sent by sigqueue, or
1114 * some other real-time mechanism. It is implementation
1115 * defined whether kill() does so. We attempt to do so, on
1116 * the principle of least surprise, but since kill is not
1117 * allowed to fail with EAGAIN when low on memory we just
1118 * make sure at least one signal gets delivered and don't
1119 * pass on the info struct.
1120 */
7a0aeb14
VN
1121 if (sig < SIGRTMIN)
1122 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1123 else
1124 override_rlimit = 0;
1125
75f296d9 1126 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1da177e4 1127 if (q) {
2ca3515a 1128 list_add_tail(&q->list, &pending->list);
1da177e4 1129 switch ((unsigned long) info) {
b67a1b9e 1130 case (unsigned long) SEND_SIG_NOINFO:
faf1f22b 1131 clear_siginfo(&q->info);
1da177e4
LT
1132 q->info.si_signo = sig;
1133 q->info.si_errno = 0;
1134 q->info.si_code = SI_USER;
9cd4fd10 1135 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 1136 task_active_pid_ns(t));
078de5f7 1137 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1da177e4 1138 break;
b67a1b9e 1139 case (unsigned long) SEND_SIG_PRIV:
faf1f22b 1140 clear_siginfo(&q->info);
1da177e4
LT
1141 q->info.si_signo = sig;
1142 q->info.si_errno = 0;
1143 q->info.si_code = SI_KERNEL;
1144 q->info.si_pid = 0;
1145 q->info.si_uid = 0;
1146 break;
1147 default:
1148 copy_siginfo(&q->info, info);
6588c1e3
SB
1149 if (from_ancestor_ns)
1150 q->info.si_pid = 0;
1da177e4
LT
1151 break;
1152 }
6b550f94
SH
1153
1154 userns_fixup_signal_uid(&q->info, t);
1155
621d3121 1156 } else if (!is_si_special(info)) {
ba005e1f
MH
1157 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1158 /*
1159 * Queue overflow, abort. We may abort if the
1160 * signal was rt and sent by user using something
1161 * other than kill().
1162 */
6c303d3a
ON
1163 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1164 ret = -EAGAIN;
1165 goto ret;
ba005e1f
MH
1166 } else {
1167 /*
1168 * This is a silent loss of information. We still
1169 * send the signal, but the *info bits are lost.
1170 */
6c303d3a 1171 result = TRACE_SIGNAL_LOSE_INFO;
ba005e1f 1172 }
1da177e4
LT
1173 }
1174
1175out_set:
53c30337 1176 signalfd_notify(t, sig);
2ca3515a 1177 sigaddset(&pending->signal, sig);
c3ad2c3b
EB
1178
1179 /* Let multiprocess signals appear after on-going forks */
1180 if (type > PIDTYPE_TGID) {
1181 struct multiprocess_signals *delayed;
1182 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1183 sigset_t *signal = &delayed->signal;
1184 /* Can't queue both a stop and a continue signal */
1185 if (sig == SIGCONT)
1186 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1187 else if (sig_kernel_stop(sig))
1188 sigdelset(signal, SIGCONT);
1189 sigaddset(signal, sig);
1190 }
1191 }
1192
07296149 1193 complete_signal(sig, t, type);
6c303d3a 1194ret:
5a883cee 1195 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
6c303d3a 1196 return ret;
1da177e4
LT
1197}
1198
ae7795bc 1199static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
b213984b 1200 enum pid_type type)
7978b567 1201{
921cf9f6
SB
1202 int from_ancestor_ns = 0;
1203
1204#ifdef CONFIG_PID_NS
dd34200a
ON
1205 from_ancestor_ns = si_fromuser(info) &&
1206 !task_pid_nr_ns(current, task_active_pid_ns(t));
921cf9f6
SB
1207#endif
1208
5a883cee 1209 return __send_signal(sig, info, t, type, from_ancestor_ns);
7978b567
SB
1210}
1211
4aaefee5 1212static void print_fatal_signal(int signr)
45807a1d 1213{
4aaefee5 1214 struct pt_regs *regs = signal_pt_regs();
747800ef 1215 pr_info("potentially unexpected fatal signal %d.\n", signr);
45807a1d 1216
ca5cd877 1217#if defined(__i386__) && !defined(__arch_um__)
747800ef 1218 pr_info("code at %08lx: ", regs->ip);
45807a1d
IM
1219 {
1220 int i;
1221 for (i = 0; i < 16; i++) {
1222 unsigned char insn;
1223
b45c6e76
AK
1224 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1225 break;
747800ef 1226 pr_cont("%02x ", insn);
45807a1d
IM
1227 }
1228 }
747800ef 1229 pr_cont("\n");
45807a1d 1230#endif
3a9f84d3 1231 preempt_disable();
45807a1d 1232 show_regs(regs);
3a9f84d3 1233 preempt_enable();
45807a1d
IM
1234}
1235
1236static int __init setup_print_fatal_signals(char *str)
1237{
1238 get_option (&str, &print_fatal_signals);
1239
1240 return 1;
1241}
1242
1243__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1244
4cd4b6d4 1245int
ae7795bc 1246__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
4cd4b6d4 1247{
b213984b 1248 return send_signal(sig, info, p, PIDTYPE_TGID);
4cd4b6d4
PE
1249}
1250
ae7795bc 1251int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
40b3b025 1252 enum pid_type type)
4a30debf
ON
1253{
1254 unsigned long flags;
1255 int ret = -ESRCH;
1256
1257 if (lock_task_sighand(p, &flags)) {
b213984b 1258 ret = send_signal(sig, info, p, type);
4a30debf
ON
1259 unlock_task_sighand(p, &flags);
1260 }
1261
1262 return ret;
1263}
1264
1da177e4
LT
1265/*
1266 * Force a signal that the process can't ignore: if necessary
1267 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1268 *
1269 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1270 * since we do not want to have a signal handler that was blocked
1271 * be invoked when user space had explicitly blocked it.
1272 *
80fe728d
ON
1273 * We don't want to have recursive SIGSEGV's etc, for example,
1274 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1275 */
1da177e4 1276int
ae7795bc 1277force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1da177e4
LT
1278{
1279 unsigned long int flags;
ae74c3b6
LT
1280 int ret, blocked, ignored;
1281 struct k_sigaction *action;
1da177e4
LT
1282
1283 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1284 action = &t->sighand->action[sig-1];
1285 ignored = action->sa.sa_handler == SIG_IGN;
1286 blocked = sigismember(&t->blocked, sig);
1287 if (blocked || ignored) {
1288 action->sa.sa_handler = SIG_DFL;
1289 if (blocked) {
1290 sigdelset(&t->blocked, sig);
7bb44ade 1291 recalc_sigpending_and_wake(t);
ae74c3b6 1292 }
1da177e4 1293 }
eb61b591
JI
1294 /*
1295 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1296 * debugging to leave init killable.
1297 */
1298 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
80fe728d 1299 t->signal->flags &= ~SIGNAL_UNKILLABLE;
b21c5bd5 1300 ret = send_signal(sig, info, t, PIDTYPE_PID);
1da177e4
LT
1301 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1302
1303 return ret;
1304}
1305
1da177e4
LT
1306/*
1307 * Nuke all other threads in the group.
1308 */
09faef11 1309int zap_other_threads(struct task_struct *p)
1da177e4 1310{
09faef11
ON
1311 struct task_struct *t = p;
1312 int count = 0;
1da177e4 1313
1da177e4
LT
1314 p->signal->group_stop_count = 0;
1315
09faef11 1316 while_each_thread(p, t) {
6dfca329 1317 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
09faef11
ON
1318 count++;
1319
1320 /* Don't bother with already dead threads */
1da177e4
LT
1321 if (t->exit_state)
1322 continue;
1da177e4 1323 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1324 signal_wake_up(t, 1);
1325 }
09faef11
ON
1326
1327 return count;
1da177e4
LT
1328}
1329
b8ed374e
NK
1330struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1331 unsigned long *flags)
f63ee72e
ON
1332{
1333 struct sighand_struct *sighand;
1334
59dc6f3c 1335 rcu_read_lock();
f63ee72e
ON
1336 for (;;) {
1337 sighand = rcu_dereference(tsk->sighand);
59dc6f3c 1338 if (unlikely(sighand == NULL))
f63ee72e 1339 break;
59dc6f3c 1340
392809b2
ON
1341 /*
1342 * This sighand can be already freed and even reused, but
5f0d5a3a 1343 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
392809b2
ON
1344 * initializes ->siglock: this slab can't go away, it has
1345 * the same object type, ->siglock can't be reinitialized.
1346 *
1347 * We need to ensure that tsk->sighand is still the same
1348 * after we take the lock, we can race with de_thread() or
1349 * __exit_signal(). In the latter case the next iteration
1350 * must see ->sighand == NULL.
1351 */
59dc6f3c
AMG
1352 spin_lock_irqsave(&sighand->siglock, *flags);
1353 if (likely(sighand == tsk->sighand))
f63ee72e 1354 break;
59dc6f3c 1355 spin_unlock_irqrestore(&sighand->siglock, *flags);
f63ee72e 1356 }
59dc6f3c 1357 rcu_read_unlock();
f63ee72e
ON
1358
1359 return sighand;
1360}
1361
c69e8d9c
DH
1362/*
1363 * send signal info to all the members of a group
c69e8d9c 1364 */
ae7795bc
EB
1365int group_send_sig_info(int sig, struct kernel_siginfo *info,
1366 struct task_struct *p, enum pid_type type)
1da177e4 1367{
694f690d
DH
1368 int ret;
1369
1370 rcu_read_lock();
1371 ret = check_kill_permission(sig, info, p);
1372 rcu_read_unlock();
f63ee72e 1373
4a30debf 1374 if (!ret && sig)
40b3b025 1375 ret = do_send_sig_info(sig, info, p, type);
1da177e4
LT
1376
1377 return ret;
1378}
1379
1380/*
146a505d 1381 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1382 * control characters do (^C, ^Z etc)
c69e8d9c 1383 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1384 */
ae7795bc 1385int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1da177e4
LT
1386{
1387 struct task_struct *p = NULL;
1388 int retval, success;
1389
1da177e4
LT
1390 success = 0;
1391 retval = -ESRCH;
c4b92fc1 1392 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
01024980 1393 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1da177e4
LT
1394 success |= !err;
1395 retval = err;
c4b92fc1 1396 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1397 return success ? 0 : retval;
1398}
1399
ae7795bc 1400int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1da177e4 1401{
d36174bc 1402 int error = -ESRCH;
1da177e4
LT
1403 struct task_struct *p;
1404
eca1a089
PM
1405 for (;;) {
1406 rcu_read_lock();
1407 p = pid_task(pid, PIDTYPE_PID);
1408 if (p)
01024980 1409 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
eca1a089
PM
1410 rcu_read_unlock();
1411 if (likely(!p || error != -ESRCH))
1412 return error;
6ca25b55 1413
eca1a089
PM
1414 /*
1415 * The task was unhashed in between, try again. If it
1416 * is dead, pid_task() will return NULL, if we race with
1417 * de_thread() it will find the new leader.
1418 */
1419 }
1da177e4
LT
1420}
1421
ae7795bc 1422static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
c4b92fc1
EB
1423{
1424 int error;
1425 rcu_read_lock();
b488893a 1426 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1427 rcu_read_unlock();
1428 return error;
1429}
1430
bb17fcca
CB
1431static inline bool kill_as_cred_perm(const struct cred *cred,
1432 struct task_struct *target)
d178bc3a
SH
1433{
1434 const struct cred *pcred = __task_cred(target);
bb17fcca
CB
1435
1436 return uid_eq(cred->euid, pcred->suid) ||
1437 uid_eq(cred->euid, pcred->uid) ||
1438 uid_eq(cred->uid, pcred->suid) ||
1439 uid_eq(cred->uid, pcred->uid);
d178bc3a
SH
1440}
1441
2425c08b 1442/* like kill_pid_info(), but doesn't use uid/euid of "current" */
ae7795bc 1443int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
6b4f3d01 1444 const struct cred *cred)
46113830
HW
1445{
1446 int ret = -EINVAL;
1447 struct task_struct *p;
14d8c9f3 1448 unsigned long flags;
46113830
HW
1449
1450 if (!valid_signal(sig))
1451 return ret;
1452
14d8c9f3 1453 rcu_read_lock();
2425c08b 1454 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1455 if (!p) {
1456 ret = -ESRCH;
1457 goto out_unlock;
1458 }
d178bc3a 1459 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
46113830
HW
1460 ret = -EPERM;
1461 goto out_unlock;
1462 }
6b4f3d01 1463 ret = security_task_kill(p, info, sig, cred);
8f95dc58
DQ
1464 if (ret)
1465 goto out_unlock;
14d8c9f3
TG
1466
1467 if (sig) {
1468 if (lock_task_sighand(p, &flags)) {
5a883cee 1469 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
14d8c9f3
TG
1470 unlock_task_sighand(p, &flags);
1471 } else
1472 ret = -ESRCH;
46113830
HW
1473 }
1474out_unlock:
14d8c9f3 1475 rcu_read_unlock();
46113830
HW
1476 return ret;
1477}
d178bc3a 1478EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1da177e4
LT
1479
1480/*
1481 * kill_something_info() interprets pid in interesting ways just like kill(2).
1482 *
1483 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1484 * is probably wrong. Should make it like BSD or SYSV.
1485 */
1486
ae7795bc 1487static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1da177e4 1488{
8d42db18 1489 int ret;
d5df763b
PE
1490
1491 if (pid > 0) {
1492 rcu_read_lock();
1493 ret = kill_pid_info(sig, info, find_vpid(pid));
1494 rcu_read_unlock();
1495 return ret;
1496 }
1497
4ea77014 1498 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1499 if (pid == INT_MIN)
1500 return -ESRCH;
1501
d5df763b
PE
1502 read_lock(&tasklist_lock);
1503 if (pid != -1) {
1504 ret = __kill_pgrp_info(sig, info,
1505 pid ? find_vpid(-pid) : task_pgrp(current));
1506 } else {
1da177e4
LT
1507 int retval = 0, count = 0;
1508 struct task_struct * p;
1509
1da177e4 1510 for_each_process(p) {
d25141a8
SB
1511 if (task_pid_vnr(p) > 1 &&
1512 !same_thread_group(p, current)) {
01024980
EB
1513 int err = group_send_sig_info(sig, info, p,
1514 PIDTYPE_MAX);
1da177e4
LT
1515 ++count;
1516 if (err != -EPERM)
1517 retval = err;
1518 }
1519 }
8d42db18 1520 ret = count ? retval : -ESRCH;
1da177e4 1521 }
d5df763b
PE
1522 read_unlock(&tasklist_lock);
1523
8d42db18 1524 return ret;
1da177e4
LT
1525}
1526
1527/*
1528 * These are for backward compatibility with the rest of the kernel source.
1529 */
1530
ae7795bc 1531int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1da177e4 1532{
1da177e4
LT
1533 /*
1534 * Make sure legacy kernel users don't send in bad values
1535 * (normal paths check this in check_kill_permission).
1536 */
7ed20e1a 1537 if (!valid_signal(sig))
1da177e4
LT
1538 return -EINVAL;
1539
40b3b025 1540 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1da177e4 1541}
fb50f5a4 1542EXPORT_SYMBOL(send_sig_info);
1da177e4 1543
b67a1b9e
ON
1544#define __si_special(priv) \
1545 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1546
1da177e4
LT
1547int
1548send_sig(int sig, struct task_struct *p, int priv)
1549{
b67a1b9e 1550 return send_sig_info(sig, __si_special(priv), p);
1da177e4 1551}
fb50f5a4 1552EXPORT_SYMBOL(send_sig);
1da177e4 1553
52cba1a2 1554void force_sig(int sig, struct task_struct *p)
1da177e4 1555{
b67a1b9e 1556 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4 1557}
fb50f5a4 1558EXPORT_SYMBOL(force_sig);
1da177e4
LT
1559
1560/*
1561 * When things go south during signal handling, we
1562 * will force a SIGSEGV. And if the signal that caused
1563 * the problem was already a SIGSEGV, we'll want to
1564 * make sure we don't even try to deliver the signal..
1565 */
52cba1a2 1566void force_sigsegv(int sig, struct task_struct *p)
1da177e4
LT
1567{
1568 if (sig == SIGSEGV) {
1569 unsigned long flags;
1570 spin_lock_irqsave(&p->sighand->siglock, flags);
1571 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1572 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1573 }
1574 force_sig(SIGSEGV, p);
1da177e4
LT
1575}
1576
f8ec6601
EB
1577int force_sig_fault(int sig, int code, void __user *addr
1578 ___ARCH_SI_TRAPNO(int trapno)
1579 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1580 , struct task_struct *t)
1581{
ae7795bc 1582 struct kernel_siginfo info;
f8ec6601
EB
1583
1584 clear_siginfo(&info);
1585 info.si_signo = sig;
1586 info.si_errno = 0;
1587 info.si_code = code;
1588 info.si_addr = addr;
1589#ifdef __ARCH_SI_TRAPNO
1590 info.si_trapno = trapno;
1591#endif
1592#ifdef __ia64__
1593 info.si_imm = imm;
1594 info.si_flags = flags;
1595 info.si_isr = isr;
1596#endif
1597 return force_sig_info(info.si_signo, &info, t);
1598}
1599
1600int send_sig_fault(int sig, int code, void __user *addr
1601 ___ARCH_SI_TRAPNO(int trapno)
1602 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1603 , struct task_struct *t)
1604{
ae7795bc 1605 struct kernel_siginfo info;
f8ec6601
EB
1606
1607 clear_siginfo(&info);
1608 info.si_signo = sig;
1609 info.si_errno = 0;
1610 info.si_code = code;
1611 info.si_addr = addr;
1612#ifdef __ARCH_SI_TRAPNO
1613 info.si_trapno = trapno;
1614#endif
1615#ifdef __ia64__
1616 info.si_imm = imm;
1617 info.si_flags = flags;
1618 info.si_isr = isr;
1619#endif
1620 return send_sig_info(info.si_signo, &info, t);
1621}
1622
38246735
EB
1623int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1624{
ae7795bc 1625 struct kernel_siginfo info;
38246735
EB
1626
1627 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1628 clear_siginfo(&info);
1629 info.si_signo = SIGBUS;
1630 info.si_errno = 0;
1631 info.si_code = code;
1632 info.si_addr = addr;
1633 info.si_addr_lsb = lsb;
1634 return force_sig_info(info.si_signo, &info, t);
1635}
1636
1637int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1638{
ae7795bc 1639 struct kernel_siginfo info;
38246735
EB
1640
1641 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1642 clear_siginfo(&info);
1643 info.si_signo = SIGBUS;
1644 info.si_errno = 0;
1645 info.si_code = code;
1646 info.si_addr = addr;
1647 info.si_addr_lsb = lsb;
1648 return send_sig_info(info.si_signo, &info, t);
1649}
1650EXPORT_SYMBOL(send_sig_mceerr);
38246735 1651
38246735
EB
1652int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1653{
ae7795bc 1654 struct kernel_siginfo info;
38246735
EB
1655
1656 clear_siginfo(&info);
1657 info.si_signo = SIGSEGV;
1658 info.si_errno = 0;
1659 info.si_code = SEGV_BNDERR;
1660 info.si_addr = addr;
1661 info.si_lower = lower;
1662 info.si_upper = upper;
1663 return force_sig_info(info.si_signo, &info, current);
1664}
38246735
EB
1665
1666#ifdef SEGV_PKUERR
1667int force_sig_pkuerr(void __user *addr, u32 pkey)
1668{
ae7795bc 1669 struct kernel_siginfo info;
38246735
EB
1670
1671 clear_siginfo(&info);
1672 info.si_signo = SIGSEGV;
1673 info.si_errno = 0;
1674 info.si_code = SEGV_PKUERR;
1675 info.si_addr = addr;
1676 info.si_pkey = pkey;
1677 return force_sig_info(info.si_signo, &info, current);
1678}
1679#endif
f8ec6601 1680
f71dd7dc
EB
1681/* For the crazy architectures that include trap information in
1682 * the errno field, instead of an actual errno value.
1683 */
1684int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1685{
ae7795bc 1686 struct kernel_siginfo info;
f71dd7dc
EB
1687
1688 clear_siginfo(&info);
1689 info.si_signo = SIGTRAP;
1690 info.si_errno = errno;
1691 info.si_code = TRAP_HWBKPT;
1692 info.si_addr = addr;
1693 return force_sig_info(info.si_signo, &info, current);
1694}
1695
c4b92fc1
EB
1696int kill_pgrp(struct pid *pid, int sig, int priv)
1697{
146a505d
PE
1698 int ret;
1699
1700 read_lock(&tasklist_lock);
1701 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1702 read_unlock(&tasklist_lock);
1703
1704 return ret;
c4b92fc1
EB
1705}
1706EXPORT_SYMBOL(kill_pgrp);
1707
1708int kill_pid(struct pid *pid, int sig, int priv)
1709{
1710 return kill_pid_info(sig, __si_special(priv), pid);
1711}
1712EXPORT_SYMBOL(kill_pid);
1713
1da177e4
LT
1714/*
1715 * These functions support sending signals using preallocated sigqueue
1716 * structures. This is needed "because realtime applications cannot
1717 * afford to lose notifications of asynchronous events, like timer
5aba085e 1718 * expirations or I/O completions". In the case of POSIX Timers
1da177e4
LT
1719 * we allocate the sigqueue structure from the timer_create. If this
1720 * allocation fails we are able to report the failure to the application
1721 * with an EAGAIN error.
1722 */
1da177e4
LT
1723struct sigqueue *sigqueue_alloc(void)
1724{
f84d49b2 1725 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1da177e4 1726
f84d49b2 1727 if (q)
1da177e4 1728 q->flags |= SIGQUEUE_PREALLOC;
f84d49b2
NO
1729
1730 return q;
1da177e4
LT
1731}
1732
1733void sigqueue_free(struct sigqueue *q)
1734{
1735 unsigned long flags;
60187d27
ON
1736 spinlock_t *lock = &current->sighand->siglock;
1737
1da177e4
LT
1738 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1739 /*
c8e85b4f
ON
1740 * We must hold ->siglock while testing q->list
1741 * to serialize with collect_signal() or with
da7978b0 1742 * __exit_signal()->flush_sigqueue().
1da177e4 1743 */
60187d27 1744 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1745 q->flags &= ~SIGQUEUE_PREALLOC;
1746 /*
1747 * If it is queued it will be freed when dequeued,
1748 * like the "regular" sigqueue.
1749 */
60187d27 1750 if (!list_empty(&q->list))
c8e85b4f 1751 q = NULL;
60187d27
ON
1752 spin_unlock_irqrestore(lock, flags);
1753
c8e85b4f
ON
1754 if (q)
1755 __sigqueue_free(q);
1da177e4
LT
1756}
1757
24122c7f 1758int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
9e3bd6c3 1759{
e62e6650 1760 int sig = q->info.si_signo;
2ca3515a 1761 struct sigpending *pending;
24122c7f 1762 struct task_struct *t;
e62e6650 1763 unsigned long flags;
163566f6 1764 int ret, result;
2ca3515a 1765
4cd4b6d4 1766 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1767
1768 ret = -1;
24122c7f
EB
1769 rcu_read_lock();
1770 t = pid_task(pid, type);
1771 if (!t || !likely(lock_task_sighand(t, &flags)))
e62e6650
ON
1772 goto ret;
1773
7e695a5e 1774 ret = 1; /* the signal is ignored */
163566f6 1775 result = TRACE_SIGNAL_IGNORED;
def8cf72 1776 if (!prepare_signal(sig, t, false))
e62e6650
ON
1777 goto out;
1778
1779 ret = 0;
9e3bd6c3
PE
1780 if (unlikely(!list_empty(&q->list))) {
1781 /*
1782 * If an SI_TIMER entry is already queue just increment
1783 * the overrun count.
1784 */
9e3bd6c3
PE
1785 BUG_ON(q->info.si_code != SI_TIMER);
1786 q->info.si_overrun++;
163566f6 1787 result = TRACE_SIGNAL_ALREADY_PENDING;
e62e6650 1788 goto out;
9e3bd6c3 1789 }
ba661292 1790 q->info.si_overrun = 0;
9e3bd6c3 1791
9e3bd6c3 1792 signalfd_notify(t, sig);
24122c7f 1793 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1794 list_add_tail(&q->list, &pending->list);
1795 sigaddset(&pending->signal, sig);
07296149 1796 complete_signal(sig, t, type);
163566f6 1797 result = TRACE_SIGNAL_DELIVERED;
e62e6650 1798out:
24122c7f 1799 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
e62e6650
ON
1800 unlock_task_sighand(t, &flags);
1801ret:
24122c7f 1802 rcu_read_unlock();
e62e6650 1803 return ret;
9e3bd6c3
PE
1804}
1805
1da177e4
LT
1806/*
1807 * Let a parent know about the death of a child.
1808 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6 1809 *
53c8f9f1
ON
1810 * Returns true if our parent ignored us and so we've switched to
1811 * self-reaping.
1da177e4 1812 */
53c8f9f1 1813bool do_notify_parent(struct task_struct *tsk, int sig)
1da177e4 1814{
ae7795bc 1815 struct kernel_siginfo info;
1da177e4
LT
1816 unsigned long flags;
1817 struct sighand_struct *psig;
53c8f9f1 1818 bool autoreap = false;
bde8285e 1819 u64 utime, stime;
1da177e4
LT
1820
1821 BUG_ON(sig == -1);
1822
1823 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1824 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 1825
d21142ec 1826 BUG_ON(!tsk->ptrace &&
1da177e4
LT
1827 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1828
b6e238dc
ON
1829 if (sig != SIGCHLD) {
1830 /*
1831 * This is only possible if parent == real_parent.
1832 * Check if it has changed security domain.
1833 */
1834 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1835 sig = SIGCHLD;
1836 }
1837
faf1f22b 1838 clear_siginfo(&info);
1da177e4
LT
1839 info.si_signo = sig;
1840 info.si_errno = 0;
b488893a 1841 /*
32084504
EB
1842 * We are under tasklist_lock here so our parent is tied to
1843 * us and cannot change.
b488893a 1844 *
32084504
EB
1845 * task_active_pid_ns will always return the same pid namespace
1846 * until a task passes through release_task.
b488893a
PE
1847 *
1848 * write_lock() currently calls preempt_disable() which is the
1849 * same as rcu_read_lock(), but according to Oleg, this is not
1850 * correct to rely on this
1851 */
1852 rcu_read_lock();
32084504 1853 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
54ba47ed
EB
1854 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1855 task_uid(tsk));
b488893a
PE
1856 rcu_read_unlock();
1857
bde8285e
FW
1858 task_cputime(tsk, &utime, &stime);
1859 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1860 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1da177e4
LT
1861
1862 info.si_status = tsk->exit_code & 0x7f;
1863 if (tsk->exit_code & 0x80)
1864 info.si_code = CLD_DUMPED;
1865 else if (tsk->exit_code & 0x7f)
1866 info.si_code = CLD_KILLED;
1867 else {
1868 info.si_code = CLD_EXITED;
1869 info.si_status = tsk->exit_code >> 8;
1870 }
1871
1872 psig = tsk->parent->sighand;
1873 spin_lock_irqsave(&psig->siglock, flags);
d21142ec 1874 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1875 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1876 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1877 /*
1878 * We are exiting and our parent doesn't care. POSIX.1
1879 * defines special semantics for setting SIGCHLD to SIG_IGN
1880 * or setting the SA_NOCLDWAIT flag: we should be reaped
1881 * automatically and not left for our parent's wait4 call.
1882 * Rather than having the parent do it as a magic kind of
1883 * signal handler, we just set this to tell do_exit that we
1884 * can be cleaned up without becoming a zombie. Note that
1885 * we still call __wake_up_parent in this case, because a
1886 * blocked sys_wait4 might now return -ECHILD.
1887 *
1888 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1889 * is implementation-defined: we do (if you don't want
1890 * it, just use SIG_IGN instead).
1891 */
53c8f9f1 1892 autoreap = true;
1da177e4 1893 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
53c8f9f1 1894 sig = 0;
1da177e4 1895 }
53c8f9f1 1896 if (valid_signal(sig) && sig)
1da177e4
LT
1897 __group_send_sig_info(sig, &info, tsk->parent);
1898 __wake_up_parent(tsk, tsk->parent);
1899 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 1900
53c8f9f1 1901 return autoreap;
1da177e4
LT
1902}
1903
75b95953
TH
1904/**
1905 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1906 * @tsk: task reporting the state change
1907 * @for_ptracer: the notification is for ptracer
1908 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1909 *
1910 * Notify @tsk's parent that the stopped/continued state has changed. If
1911 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1912 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1913 *
1914 * CONTEXT:
1915 * Must be called with tasklist_lock at least read locked.
1916 */
1917static void do_notify_parent_cldstop(struct task_struct *tsk,
1918 bool for_ptracer, int why)
1da177e4 1919{
ae7795bc 1920 struct kernel_siginfo info;
1da177e4 1921 unsigned long flags;
bc505a47 1922 struct task_struct *parent;
1da177e4 1923 struct sighand_struct *sighand;
bde8285e 1924 u64 utime, stime;
1da177e4 1925
75b95953 1926 if (for_ptracer) {
bc505a47 1927 parent = tsk->parent;
75b95953 1928 } else {
bc505a47
ON
1929 tsk = tsk->group_leader;
1930 parent = tsk->real_parent;
1931 }
1932
faf1f22b 1933 clear_siginfo(&info);
1da177e4
LT
1934 info.si_signo = SIGCHLD;
1935 info.si_errno = 0;
b488893a 1936 /*
5aba085e 1937 * see comment in do_notify_parent() about the following 4 lines
b488893a
PE
1938 */
1939 rcu_read_lock();
17cf22c3 1940 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
54ba47ed 1941 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
b488893a
PE
1942 rcu_read_unlock();
1943
bde8285e
FW
1944 task_cputime(tsk, &utime, &stime);
1945 info.si_utime = nsec_to_clock_t(utime);
1946 info.si_stime = nsec_to_clock_t(stime);
1da177e4
LT
1947
1948 info.si_code = why;
1949 switch (why) {
1950 case CLD_CONTINUED:
1951 info.si_status = SIGCONT;
1952 break;
1953 case CLD_STOPPED:
1954 info.si_status = tsk->signal->group_exit_code & 0x7f;
1955 break;
1956 case CLD_TRAPPED:
1957 info.si_status = tsk->exit_code & 0x7f;
1958 break;
1959 default:
1960 BUG();
1961 }
1962
1963 sighand = parent->sighand;
1964 spin_lock_irqsave(&sighand->siglock, flags);
1965 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1966 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1967 __group_send_sig_info(SIGCHLD, &info, parent);
1968 /*
1969 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1970 */
1971 __wake_up_parent(tsk, parent);
1972 spin_unlock_irqrestore(&sighand->siglock, flags);
1973}
1974
6527de95 1975static inline bool may_ptrace_stop(void)
d5f70c00 1976{
d21142ec 1977 if (!likely(current->ptrace))
6527de95 1978 return false;
d5f70c00
ON
1979 /*
1980 * Are we in the middle of do_coredump?
1981 * If so and our tracer is also part of the coredump stopping
1982 * is a deadlock situation, and pointless because our tracer
1983 * is dead so don't allow us to stop.
1984 * If SIGKILL was already sent before the caller unlocked
999d9fc1 1985 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00 1986 * is safe to enter schedule().
9899d11f
ON
1987 *
1988 * This is almost outdated, a task with the pending SIGKILL can't
1989 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1990 * after SIGKILL was already dequeued.
d5f70c00 1991 */
999d9fc1 1992 if (unlikely(current->mm->core_state) &&
d5f70c00 1993 unlikely(current->mm == current->parent->mm))
6527de95 1994 return false;
d5f70c00 1995
6527de95 1996 return true;
d5f70c00
ON
1997}
1998
1a669c2f 1999/*
5aba085e 2000 * Return non-zero if there is a SIGKILL that should be waking us up.
1a669c2f
RM
2001 * Called with the siglock held.
2002 */
f99e9d8c 2003static bool sigkill_pending(struct task_struct *tsk)
1a669c2f 2004{
f99e9d8c
CB
2005 return sigismember(&tsk->pending.signal, SIGKILL) ||
2006 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
2007}
2008
1da177e4
LT
2009/*
2010 * This must be called with current->sighand->siglock held.
2011 *
2012 * This should be the path for all ptrace stops.
2013 * We always set current->last_siginfo while stopped here.
2014 * That makes it a way to test a stopped process for
2015 * being ptrace-stopped vs being job-control-stopped.
2016 *
20686a30
ON
2017 * If we actually decide not to stop at all because the tracer
2018 * is gone, we keep current->exit_code unless clear_code.
1da177e4 2019 */
ae7795bc 2020static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
b8401150
NK
2021 __releases(&current->sighand->siglock)
2022 __acquires(&current->sighand->siglock)
1da177e4 2023{
ceb6bd67
TH
2024 bool gstop_done = false;
2025
1a669c2f
RM
2026 if (arch_ptrace_stop_needed(exit_code, info)) {
2027 /*
2028 * The arch code has something special to do before a
2029 * ptrace stop. This is allowed to block, e.g. for faults
2030 * on user stack pages. We can't keep the siglock while
2031 * calling arch_ptrace_stop, so we must release it now.
2032 * To preserve proper semantics, we must do this before
2033 * any signal bookkeeping like checking group_stop_count.
2034 * Meanwhile, a SIGKILL could come in before we retake the
2035 * siglock. That must prevent us from sleeping in TASK_TRACED.
2036 * So after regaining the lock, we must check for SIGKILL.
2037 */
2038 spin_unlock_irq(&current->sighand->siglock);
2039 arch_ptrace_stop(exit_code, info);
2040 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
2041 if (sigkill_pending(current))
2042 return;
1a669c2f
RM
2043 }
2044
b5bf9a90
PZ
2045 set_special_state(TASK_TRACED);
2046
1da177e4 2047 /*
81be24b8
TH
2048 * We're committing to trapping. TRACED should be visible before
2049 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2050 * Also, transition to TRACED and updates to ->jobctl should be
2051 * atomic with respect to siglock and should be done after the arch
2052 * hook as siglock is released and regrabbed across it.
b5bf9a90
PZ
2053 *
2054 * TRACER TRACEE
2055 *
2056 * ptrace_attach()
2057 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2058 * do_wait()
2059 * set_current_state() smp_wmb();
2060 * ptrace_do_wait()
2061 * wait_task_stopped()
2062 * task_stopped_code()
2063 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1da177e4 2064 */
b5bf9a90 2065 smp_wmb();
1da177e4
LT
2066
2067 current->last_siginfo = info;
2068 current->exit_code = exit_code;
2069
d79fdd6d 2070 /*
0ae8ce1c
TH
2071 * If @why is CLD_STOPPED, we're trapping to participate in a group
2072 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
73ddff2b
TH
2073 * across siglock relocks since INTERRUPT was scheduled, PENDING
2074 * could be clear now. We act as if SIGCONT is received after
2075 * TASK_TRACED is entered - ignore it.
d79fdd6d 2076 */
a8f072c1 2077 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
ceb6bd67 2078 gstop_done = task_participate_group_stop(current);
d79fdd6d 2079
fb1d910c 2080 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
73ddff2b 2081 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
fb1d910c
TH
2082 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2083 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
73ddff2b 2084
81be24b8 2085 /* entering a trap, clear TRAPPING */
a8f072c1 2086 task_clear_jobctl_trapping(current);
d79fdd6d 2087
1da177e4
LT
2088 spin_unlock_irq(&current->sighand->siglock);
2089 read_lock(&tasklist_lock);
3d749b9e 2090 if (may_ptrace_stop()) {
ceb6bd67
TH
2091 /*
2092 * Notify parents of the stop.
2093 *
2094 * While ptraced, there are two parents - the ptracer and
2095 * the real_parent of the group_leader. The ptracer should
2096 * know about every stop while the real parent is only
2097 * interested in the completion of group stop. The states
2098 * for the two don't interact with each other. Notify
2099 * separately unless they're gonna be duplicates.
2100 */
2101 do_notify_parent_cldstop(current, true, why);
bb3696da 2102 if (gstop_done && ptrace_reparented(current))
ceb6bd67
TH
2103 do_notify_parent_cldstop(current, false, why);
2104
53da1d94
MS
2105 /*
2106 * Don't want to allow preemption here, because
2107 * sys_ptrace() needs this task to be inactive.
2108 *
2109 * XXX: implement read_unlock_no_resched().
2110 */
2111 preempt_disable();
1da177e4 2112 read_unlock(&tasklist_lock);
53da1d94 2113 preempt_enable_no_resched();
76f969e8 2114 cgroup_enter_frozen();
5d8f72b5 2115 freezable_schedule();
05b28926 2116 cgroup_leave_frozen(true);
1da177e4
LT
2117 } else {
2118 /*
2119 * By the time we got the lock, our tracer went away.
6405f7f4 2120 * Don't drop the lock yet, another tracer may come.
ceb6bd67
TH
2121 *
2122 * If @gstop_done, the ptracer went away between group stop
2123 * completion and here. During detach, it would have set
a8f072c1
TH
2124 * JOBCTL_STOP_PENDING on us and we'll re-enter
2125 * TASK_STOPPED in do_signal_stop() on return, so notifying
2126 * the real parent of the group stop completion is enough.
1da177e4 2127 */
ceb6bd67
TH
2128 if (gstop_done)
2129 do_notify_parent_cldstop(current, false, why);
2130
9899d11f 2131 /* tasklist protects us from ptrace_freeze_traced() */
6405f7f4 2132 __set_current_state(TASK_RUNNING);
20686a30
ON
2133 if (clear_code)
2134 current->exit_code = 0;
6405f7f4 2135 read_unlock(&tasklist_lock);
1da177e4
LT
2136 }
2137
2138 /*
2139 * We are back. Now reacquire the siglock before touching
2140 * last_siginfo, so that we are sure to have synchronized with
2141 * any signal-sending on another CPU that wants to examine it.
2142 */
2143 spin_lock_irq(&current->sighand->siglock);
2144 current->last_siginfo = NULL;
2145
544b2c91
TH
2146 /* LISTENING can be set only during STOP traps, clear it */
2147 current->jobctl &= ~JOBCTL_LISTENING;
2148
1da177e4
LT
2149 /*
2150 * Queued signals ignored us while we were stopped for tracing.
2151 * So check for any that we should take before resuming user mode.
b74d0deb 2152 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 2153 */
b74d0deb 2154 recalc_sigpending_tsk(current);
1da177e4
LT
2155}
2156
3544d72a 2157static void ptrace_do_notify(int signr, int exit_code, int why)
1da177e4 2158{
ae7795bc 2159 kernel_siginfo_t info;
1da177e4 2160
faf1f22b 2161 clear_siginfo(&info);
3544d72a 2162 info.si_signo = signr;
1da177e4 2163 info.si_code = exit_code;
b488893a 2164 info.si_pid = task_pid_vnr(current);
078de5f7 2165 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1da177e4
LT
2166
2167 /* Let the debugger run. */
3544d72a
TH
2168 ptrace_stop(exit_code, why, 1, &info);
2169}
2170
2171void ptrace_notify(int exit_code)
2172{
2173 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
f784e8a7
ON
2174 if (unlikely(current->task_works))
2175 task_work_run();
3544d72a 2176
1da177e4 2177 spin_lock_irq(&current->sighand->siglock);
3544d72a 2178 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1da177e4
LT
2179 spin_unlock_irq(&current->sighand->siglock);
2180}
2181
73ddff2b
TH
2182/**
2183 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2184 * @signr: signr causing group stop if initiating
2185 *
2186 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2187 * and participate in it. If already set, participate in the existing
2188 * group stop. If participated in a group stop (and thus slept), %true is
2189 * returned with siglock released.
2190 *
2191 * If ptraced, this function doesn't handle stop itself. Instead,
2192 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2193 * untouched. The caller must ensure that INTERRUPT trap handling takes
2194 * places afterwards.
2195 *
2196 * CONTEXT:
2197 * Must be called with @current->sighand->siglock held, which is released
2198 * on %true return.
2199 *
2200 * RETURNS:
2201 * %false if group stop is already cancelled or ptrace trap is scheduled.
2202 * %true if participated in group stop.
1da177e4 2203 */
73ddff2b
TH
2204static bool do_signal_stop(int signr)
2205 __releases(&current->sighand->siglock)
1da177e4
LT
2206{
2207 struct signal_struct *sig = current->signal;
1da177e4 2208
a8f072c1 2209 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
b76808e6 2210 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
f558b7e4
ON
2211 struct task_struct *t;
2212
a8f072c1
TH
2213 /* signr will be recorded in task->jobctl for retries */
2214 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
d79fdd6d 2215
a8f072c1 2216 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
573cf9ad 2217 unlikely(signal_group_exit(sig)))
73ddff2b 2218 return false;
1da177e4 2219 /*
408a37de
TH
2220 * There is no group stop already in progress. We must
2221 * initiate one now.
2222 *
2223 * While ptraced, a task may be resumed while group stop is
2224 * still in effect and then receive a stop signal and
2225 * initiate another group stop. This deviates from the
2226 * usual behavior as two consecutive stop signals can't
780006ea
ON
2227 * cause two group stops when !ptraced. That is why we
2228 * also check !task_is_stopped(t) below.
408a37de
TH
2229 *
2230 * The condition can be distinguished by testing whether
2231 * SIGNAL_STOP_STOPPED is already set. Don't generate
2232 * group_exit_code in such case.
2233 *
2234 * This is not necessary for SIGNAL_STOP_CONTINUED because
2235 * an intervening stop signal is required to cause two
2236 * continued events regardless of ptrace.
1da177e4 2237 */
408a37de
TH
2238 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2239 sig->group_exit_code = signr;
1da177e4 2240
7dd3db54
TH
2241 sig->group_stop_count = 0;
2242
2243 if (task_set_jobctl_pending(current, signr | gstop))
2244 sig->group_stop_count++;
1da177e4 2245
8d38f203
ON
2246 t = current;
2247 while_each_thread(current, t) {
1da177e4 2248 /*
a122b341
ON
2249 * Setting state to TASK_STOPPED for a group
2250 * stop is always done with the siglock held,
2251 * so this check has no races.
1da177e4 2252 */
7dd3db54
TH
2253 if (!task_is_stopped(t) &&
2254 task_set_jobctl_pending(t, signr | gstop)) {
ae6d2ed7 2255 sig->group_stop_count++;
fb1d910c
TH
2256 if (likely(!(t->ptrace & PT_SEIZED)))
2257 signal_wake_up(t, 0);
2258 else
2259 ptrace_trap_notify(t);
a122b341 2260 }
d79fdd6d 2261 }
1da177e4 2262 }
73ddff2b 2263
d21142ec 2264 if (likely(!current->ptrace)) {
5224fa36 2265 int notify = 0;
1da177e4 2266
5224fa36
TH
2267 /*
2268 * If there are no other threads in the group, or if there
2269 * is a group stop in progress and we are the last to stop,
2270 * report to the parent.
2271 */
2272 if (task_participate_group_stop(current))
2273 notify = CLD_STOPPED;
2274
b5bf9a90 2275 set_special_state(TASK_STOPPED);
5224fa36
TH
2276 spin_unlock_irq(&current->sighand->siglock);
2277
62bcf9d9
TH
2278 /*
2279 * Notify the parent of the group stop completion. Because
2280 * we're not holding either the siglock or tasklist_lock
2281 * here, ptracer may attach inbetween; however, this is for
2282 * group stop and should always be delivered to the real
2283 * parent of the group leader. The new ptracer will get
2284 * its notification when this task transitions into
2285 * TASK_TRACED.
2286 */
5224fa36
TH
2287 if (notify) {
2288 read_lock(&tasklist_lock);
62bcf9d9 2289 do_notify_parent_cldstop(current, false, notify);
5224fa36
TH
2290 read_unlock(&tasklist_lock);
2291 }
2292
2293 /* Now we don't run again until woken by SIGCONT or SIGKILL */
76f969e8 2294 cgroup_enter_frozen();
5d8f72b5 2295 freezable_schedule();
73ddff2b 2296 return true;
d79fdd6d 2297 } else {
73ddff2b
TH
2298 /*
2299 * While ptraced, group stop is handled by STOP trap.
2300 * Schedule it and let the caller deal with it.
2301 */
2302 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2303 return false;
ae6d2ed7 2304 }
73ddff2b 2305}
1da177e4 2306
73ddff2b
TH
2307/**
2308 * do_jobctl_trap - take care of ptrace jobctl traps
2309 *
3544d72a
TH
2310 * When PT_SEIZED, it's used for both group stop and explicit
2311 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2312 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2313 * the stop signal; otherwise, %SIGTRAP.
2314 *
2315 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2316 * number as exit_code and no siginfo.
73ddff2b
TH
2317 *
2318 * CONTEXT:
2319 * Must be called with @current->sighand->siglock held, which may be
2320 * released and re-acquired before returning with intervening sleep.
2321 */
2322static void do_jobctl_trap(void)
2323{
3544d72a 2324 struct signal_struct *signal = current->signal;
73ddff2b 2325 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
ae6d2ed7 2326
3544d72a
TH
2327 if (current->ptrace & PT_SEIZED) {
2328 if (!signal->group_stop_count &&
2329 !(signal->flags & SIGNAL_STOP_STOPPED))
2330 signr = SIGTRAP;
2331 WARN_ON_ONCE(!signr);
2332 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2333 CLD_STOPPED);
2334 } else {
2335 WARN_ON_ONCE(!signr);
2336 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2337 current->exit_code = 0;
ae6d2ed7 2338 }
1da177e4
LT
2339}
2340
76f969e8
RG
2341/**
2342 * do_freezer_trap - handle the freezer jobctl trap
2343 *
2344 * Puts the task into frozen state, if only the task is not about to quit.
2345 * In this case it drops JOBCTL_TRAP_FREEZE.
2346 *
2347 * CONTEXT:
2348 * Must be called with @current->sighand->siglock held,
2349 * which is always released before returning.
2350 */
2351static void do_freezer_trap(void)
2352 __releases(&current->sighand->siglock)
2353{
2354 /*
2355 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2356 * let's make another loop to give it a chance to be handled.
2357 * In any case, we'll return back.
2358 */
2359 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2360 JOBCTL_TRAP_FREEZE) {
2361 spin_unlock_irq(&current->sighand->siglock);
2362 return;
2363 }
2364
2365 /*
2366 * Now we're sure that there is no pending fatal signal and no
2367 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2368 * immediately (if there is a non-fatal signal pending), and
2369 * put the task into sleep.
2370 */
2371 __set_current_state(TASK_INTERRUPTIBLE);
2372 clear_thread_flag(TIF_SIGPENDING);
2373 spin_unlock_irq(&current->sighand->siglock);
2374 cgroup_enter_frozen();
2375 freezable_schedule();
2376}
2377
ae7795bc 2378static int ptrace_signal(int signr, kernel_siginfo_t *info)
18c98b65 2379{
8a352418
ON
2380 /*
2381 * We do not check sig_kernel_stop(signr) but set this marker
2382 * unconditionally because we do not know whether debugger will
2383 * change signr. This flag has no meaning unless we are going
2384 * to stop after return from ptrace_stop(). In this case it will
2385 * be checked in do_signal_stop(), we should only stop if it was
2386 * not cleared by SIGCONT while we were sleeping. See also the
2387 * comment in dequeue_signal().
2388 */
2389 current->jobctl |= JOBCTL_STOP_DEQUEUED;
fe1bc6a0 2390 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
2391
2392 /* We're back. Did the debugger cancel the sig? */
2393 signr = current->exit_code;
2394 if (signr == 0)
2395 return signr;
2396
2397 current->exit_code = 0;
2398
5aba085e
RD
2399 /*
2400 * Update the siginfo structure if the signal has
2401 * changed. If the debugger wanted something
2402 * specific in the siginfo structure then it should
2403 * have updated *info via PTRACE_SETSIGINFO.
2404 */
18c98b65 2405 if (signr != info->si_signo) {
faf1f22b 2406 clear_siginfo(info);
18c98b65
RM
2407 info->si_signo = signr;
2408 info->si_errno = 0;
2409 info->si_code = SI_USER;
6b550f94 2410 rcu_read_lock();
18c98b65 2411 info->si_pid = task_pid_vnr(current->parent);
54ba47ed
EB
2412 info->si_uid = from_kuid_munged(current_user_ns(),
2413 task_uid(current->parent));
6b550f94 2414 rcu_read_unlock();
18c98b65
RM
2415 }
2416
2417 /* If the (new) signal is now blocked, requeue it. */
2418 if (sigismember(&current->blocked, signr)) {
b21c5bd5 2419 send_signal(signr, info, current, PIDTYPE_PID);
18c98b65
RM
2420 signr = 0;
2421 }
2422
2423 return signr;
2424}
2425
20ab7218 2426bool get_signal(struct ksignal *ksig)
1da177e4 2427{
f6b76d4f
ON
2428 struct sighand_struct *sighand = current->sighand;
2429 struct signal_struct *signal = current->signal;
2430 int signr;
1da177e4 2431
f784e8a7
ON
2432 if (unlikely(current->task_works))
2433 task_work_run();
72667028 2434
0326f5a9 2435 if (unlikely(uprobe_deny_signal()))
20ab7218 2436 return false;
0326f5a9 2437
13b1c3d4 2438 /*
5d8f72b5
ON
2439 * Do this once, we can't return to user-mode if freezing() == T.
2440 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2441 * thus do not need another check after return.
13b1c3d4 2442 */
fc558a74
RW
2443 try_to_freeze();
2444
5d8f72b5 2445relock:
f6b76d4f 2446 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
2447 /*
2448 * Every stopped thread goes here after wakeup. Check to see if
2449 * we should notify the parent, prepare_signal(SIGCONT) encodes
2450 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2451 */
f6b76d4f 2452 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
2453 int why;
2454
2455 if (signal->flags & SIGNAL_CLD_CONTINUED)
2456 why = CLD_CONTINUED;
2457 else
2458 why = CLD_STOPPED;
2459
f6b76d4f 2460 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 2461
ae6d2ed7 2462 spin_unlock_irq(&sighand->siglock);
fa00b80b 2463
ceb6bd67
TH
2464 /*
2465 * Notify the parent that we're continuing. This event is
2466 * always per-process and doesn't make whole lot of sense
2467 * for ptracers, who shouldn't consume the state via
2468 * wait(2) either, but, for backward compatibility, notify
2469 * the ptracer of the group leader too unless it's gonna be
2470 * a duplicate.
2471 */
edf2ed15 2472 read_lock(&tasklist_lock);
ceb6bd67
TH
2473 do_notify_parent_cldstop(current, false, why);
2474
bb3696da
ON
2475 if (ptrace_reparented(current->group_leader))
2476 do_notify_parent_cldstop(current->group_leader,
2477 true, why);
edf2ed15 2478 read_unlock(&tasklist_lock);
ceb6bd67 2479
e4420551
ON
2480 goto relock;
2481 }
2482
35634ffa 2483 /* Has this task already been marked for death? */
cf43a757
EB
2484 if (signal_group_exit(signal)) {
2485 ksig->info.si_signo = signr = SIGKILL;
2486 sigdelset(&current->pending.signal, SIGKILL);
2487 recalc_sigpending();
35634ffa 2488 goto fatal;
cf43a757 2489 }
35634ffa 2490
1da177e4
LT
2491 for (;;) {
2492 struct k_sigaction *ka;
1be53963 2493
dd1d6772
TH
2494 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2495 do_signal_stop(0))
7bcf6a2c 2496 goto relock;
1be53963 2497
76f969e8
RG
2498 if (unlikely(current->jobctl &
2499 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2500 if (current->jobctl & JOBCTL_TRAP_MASK) {
2501 do_jobctl_trap();
2502 spin_unlock_irq(&sighand->siglock);
2503 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2504 do_freezer_trap();
2505
2506 goto relock;
2507 }
2508
2509 /*
2510 * If the task is leaving the frozen state, let's update
2511 * cgroup counters and reset the frozen bit.
2512 */
2513 if (unlikely(cgroup_task_frozen(current))) {
73ddff2b 2514 spin_unlock_irq(&sighand->siglock);
cb2c4cd8 2515 cgroup_leave_frozen(false);
73ddff2b
TH
2516 goto relock;
2517 }
1da177e4 2518
7146db33
EB
2519 /*
2520 * Signals generated by the execution of an instruction
2521 * need to be delivered before any other pending signals
2522 * so that the instruction pointer in the signal stack
2523 * frame points to the faulting instruction.
2524 */
2525 signr = dequeue_synchronous_signal(&ksig->info);
2526 if (!signr)
2527 signr = dequeue_signal(current, &current->blocked, &ksig->info);
7bcf6a2c 2528
dd1d6772
TH
2529 if (!signr)
2530 break; /* will return 0 */
7bcf6a2c 2531
8a352418 2532 if (unlikely(current->ptrace) && signr != SIGKILL) {
828b1f65 2533 signr = ptrace_signal(signr, &ksig->info);
dd1d6772
TH
2534 if (!signr)
2535 continue;
1da177e4
LT
2536 }
2537
dd1d6772
TH
2538 ka = &sighand->action[signr-1];
2539
f9d4257e 2540 /* Trace actually delivered signals. */
828b1f65 2541 trace_signal_deliver(signr, &ksig->info, ka);
f9d4257e 2542
1da177e4
LT
2543 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2544 continue;
2545 if (ka->sa.sa_handler != SIG_DFL) {
2546 /* Run the handler. */
828b1f65 2547 ksig->ka = *ka;
1da177e4
LT
2548
2549 if (ka->sa.sa_flags & SA_ONESHOT)
2550 ka->sa.sa_handler = SIG_DFL;
2551
2552 break; /* will return non-zero "signr" value */
2553 }
2554
2555 /*
2556 * Now we are doing the default action for this signal.
2557 */
2558 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2559 continue;
2560
84d73786 2561 /*
0fbc26a6 2562 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
2563 * Container-init gets no signals it doesn't want from same
2564 * container.
2565 *
2566 * Note that if global/container-init sees a sig_kernel_only()
2567 * signal here, the signal must have been generated internally
2568 * or must have come from an ancestor namespace. In either
2569 * case, the signal cannot be dropped.
84d73786 2570 */
fae5fa44 2571 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 2572 !sig_kernel_only(signr))
1da177e4
LT
2573 continue;
2574
2575 if (sig_kernel_stop(signr)) {
2576 /*
2577 * The default action is to stop all threads in
2578 * the thread group. The job control signals
2579 * do nothing in an orphaned pgrp, but SIGSTOP
2580 * always works. Note that siglock needs to be
2581 * dropped during the call to is_orphaned_pgrp()
2582 * because of lock ordering with tasklist_lock.
2583 * This allows an intervening SIGCONT to be posted.
2584 * We need to check for that and bail out if necessary.
2585 */
2586 if (signr != SIGSTOP) {
f6b76d4f 2587 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2588
2589 /* signals can be posted during this window */
2590
3e7cd6c4 2591 if (is_current_pgrp_orphaned())
1da177e4
LT
2592 goto relock;
2593
f6b76d4f 2594 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2595 }
2596
828b1f65 2597 if (likely(do_signal_stop(ksig->info.si_signo))) {
1da177e4
LT
2598 /* It released the siglock. */
2599 goto relock;
2600 }
2601
2602 /*
2603 * We didn't actually stop, due to a race
2604 * with SIGCONT or something like that.
2605 */
2606 continue;
2607 }
2608
35634ffa 2609 fatal:
f6b76d4f 2610 spin_unlock_irq(&sighand->siglock);
f2b31bb5
RG
2611 if (unlikely(cgroup_task_frozen(current)))
2612 cgroup_leave_frozen(true);
1da177e4
LT
2613
2614 /*
2615 * Anything else is fatal, maybe with a core dump.
2616 */
2617 current->flags |= PF_SIGNALED;
2dce81bf 2618
1da177e4 2619 if (sig_kernel_coredump(signr)) {
2dce81bf 2620 if (print_fatal_signals)
828b1f65 2621 print_fatal_signal(ksig->info.si_signo);
2b5faa4c 2622 proc_coredump_connector(current);
1da177e4
LT
2623 /*
2624 * If it was able to dump core, this kills all
2625 * other threads in the group and synchronizes with
2626 * their demise. If we lost the race with another
2627 * thread getting here, it set group_exit_code
2628 * first and our do_group_exit call below will use
2629 * that value and ignore the one we pass it.
2630 */
828b1f65 2631 do_coredump(&ksig->info);
1da177e4
LT
2632 }
2633
2634 /*
2635 * Death signals, no core dump.
2636 */
828b1f65 2637 do_group_exit(ksig->info.si_signo);
1da177e4
LT
2638 /* NOTREACHED */
2639 }
f6b76d4f 2640 spin_unlock_irq(&sighand->siglock);
828b1f65
RW
2641
2642 ksig->sig = signr;
2643 return ksig->sig > 0;
1da177e4
LT
2644}
2645
5e6292c0 2646/**
efee984c 2647 * signal_delivered -
10b1c7ac 2648 * @ksig: kernel signal struct
efee984c 2649 * @stepping: nonzero if debugger single-step or block-step in use
5e6292c0 2650 *
e227867f 2651 * This function should be called when a signal has successfully been
10b1c7ac 2652 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
efee984c 2653 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
10b1c7ac 2654 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
5e6292c0 2655 */
10b1c7ac 2656static void signal_delivered(struct ksignal *ksig, int stepping)
5e6292c0
MF
2657{
2658 sigset_t blocked;
2659
a610d6e6
AV
2660 /* A signal was successfully delivered, and the
2661 saved sigmask was stored on the signal frame,
2662 and will be restored by sigreturn. So we can
2663 simply clear the restore sigmask flag. */
2664 clear_restore_sigmask();
2665
10b1c7ac
RW
2666 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2667 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2668 sigaddset(&blocked, ksig->sig);
5e6292c0 2669 set_current_blocked(&blocked);
df5601f9 2670 tracehook_signal_handler(stepping);
5e6292c0
MF
2671}
2672
2ce5da17
AV
2673void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2674{
2675 if (failed)
2676 force_sigsegv(ksig->sig, current);
2677 else
10b1c7ac 2678 signal_delivered(ksig, stepping);
2ce5da17
AV
2679}
2680
0edceb7b
ON
2681/*
2682 * It could be that complete_signal() picked us to notify about the
fec9993d
ON
2683 * group-wide signal. Other threads should be notified now to take
2684 * the shared signals in @which since we will not.
0edceb7b 2685 */
f646e227 2686static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
0edceb7b 2687{
f646e227 2688 sigset_t retarget;
0edceb7b
ON
2689 struct task_struct *t;
2690
f646e227
ON
2691 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2692 if (sigisemptyset(&retarget))
2693 return;
2694
0edceb7b
ON
2695 t = tsk;
2696 while_each_thread(tsk, t) {
fec9993d
ON
2697 if (t->flags & PF_EXITING)
2698 continue;
2699
2700 if (!has_pending_signals(&retarget, &t->blocked))
2701 continue;
2702 /* Remove the signals this thread can handle. */
2703 sigandsets(&retarget, &retarget, &t->blocked);
2704
2705 if (!signal_pending(t))
2706 signal_wake_up(t, 0);
2707
2708 if (sigisemptyset(&retarget))
2709 break;
0edceb7b
ON
2710 }
2711}
2712
d12619b5
ON
2713void exit_signals(struct task_struct *tsk)
2714{
2715 int group_stop = 0;
f646e227 2716 sigset_t unblocked;
d12619b5 2717
77e4ef99
TH
2718 /*
2719 * @tsk is about to have PF_EXITING set - lock out users which
2720 * expect stable threadgroup.
2721 */
780de9dd 2722 cgroup_threadgroup_change_begin(tsk);
77e4ef99 2723
5dee1707
ON
2724 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2725 tsk->flags |= PF_EXITING;
780de9dd 2726 cgroup_threadgroup_change_end(tsk);
5dee1707 2727 return;
d12619b5
ON
2728 }
2729
5dee1707 2730 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2731 /*
2732 * From now this task is not visible for group-wide signals,
2733 * see wants_signal(), do_signal_stop().
2734 */
2735 tsk->flags |= PF_EXITING;
77e4ef99 2736
780de9dd 2737 cgroup_threadgroup_change_end(tsk);
77e4ef99 2738
5dee1707
ON
2739 if (!signal_pending(tsk))
2740 goto out;
2741
f646e227
ON
2742 unblocked = tsk->blocked;
2743 signotset(&unblocked);
2744 retarget_shared_pending(tsk, &unblocked);
5dee1707 2745
a8f072c1 2746 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
e5c1902e 2747 task_participate_group_stop(tsk))
edf2ed15 2748 group_stop = CLD_STOPPED;
5dee1707 2749out:
d12619b5
ON
2750 spin_unlock_irq(&tsk->sighand->siglock);
2751
62bcf9d9
TH
2752 /*
2753 * If group stop has completed, deliver the notification. This
2754 * should always go to the real parent of the group leader.
2755 */
ae6d2ed7 2756 if (unlikely(group_stop)) {
d12619b5 2757 read_lock(&tasklist_lock);
62bcf9d9 2758 do_notify_parent_cldstop(tsk, false, group_stop);
d12619b5
ON
2759 read_unlock(&tasklist_lock);
2760 }
2761}
2762
1da177e4
LT
2763/*
2764 * System call entry points.
2765 */
2766
41c57892
RD
2767/**
2768 * sys_restart_syscall - restart a system call
2769 */
754fe8d2 2770SYSCALL_DEFINE0(restart_syscall)
1da177e4 2771{
f56141e3 2772 struct restart_block *restart = &current->restart_block;
1da177e4
LT
2773 return restart->fn(restart);
2774}
2775
2776long do_no_restart_syscall(struct restart_block *param)
2777{
2778 return -EINTR;
2779}
2780
b182801a
ON
2781static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2782{
2783 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2784 sigset_t newblocked;
2785 /* A set of now blocked but previously unblocked signals. */
702a5073 2786 sigandnsets(&newblocked, newset, &current->blocked);
b182801a
ON
2787 retarget_shared_pending(tsk, &newblocked);
2788 }
2789 tsk->blocked = *newset;
2790 recalc_sigpending();
2791}
2792
e6fa16ab
ON
2793/**
2794 * set_current_blocked - change current->blocked mask
2795 * @newset: new mask
2796 *
2797 * It is wrong to change ->blocked directly, this helper should be used
2798 * to ensure the process can't miss a shared signal we are going to block.
1da177e4 2799 */
77097ae5
AV
2800void set_current_blocked(sigset_t *newset)
2801{
77097ae5 2802 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
0c4a8423 2803 __set_current_blocked(newset);
77097ae5
AV
2804}
2805
2806void __set_current_blocked(const sigset_t *newset)
e6fa16ab
ON
2807{
2808 struct task_struct *tsk = current;
2809
c7be96af
WL
2810 /*
2811 * In case the signal mask hasn't changed, there is nothing we need
2812 * to do. The current->blocked shouldn't be modified by other task.
2813 */
2814 if (sigequalsets(&tsk->blocked, newset))
2815 return;
2816
e6fa16ab 2817 spin_lock_irq(&tsk->sighand->siglock);
b182801a 2818 __set_task_blocked(tsk, newset);
e6fa16ab
ON
2819 spin_unlock_irq(&tsk->sighand->siglock);
2820}
1da177e4
LT
2821
2822/*
2823 * This is also useful for kernel threads that want to temporarily
2824 * (or permanently) block certain signals.
2825 *
2826 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2827 * interface happily blocks "unblockable" signals like SIGKILL
2828 * and friends.
2829 */
2830int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2831{
73ef4aeb
ON
2832 struct task_struct *tsk = current;
2833 sigset_t newset;
1da177e4 2834
73ef4aeb 2835 /* Lockless, only current can change ->blocked, never from irq */
a26fd335 2836 if (oldset)
73ef4aeb 2837 *oldset = tsk->blocked;
a26fd335 2838
1da177e4
LT
2839 switch (how) {
2840 case SIG_BLOCK:
73ef4aeb 2841 sigorsets(&newset, &tsk->blocked, set);
1da177e4
LT
2842 break;
2843 case SIG_UNBLOCK:
702a5073 2844 sigandnsets(&newset, &tsk->blocked, set);
1da177e4
LT
2845 break;
2846 case SIG_SETMASK:
73ef4aeb 2847 newset = *set;
1da177e4
LT
2848 break;
2849 default:
73ef4aeb 2850 return -EINVAL;
1da177e4 2851 }
a26fd335 2852
77097ae5 2853 __set_current_blocked(&newset);
73ef4aeb 2854 return 0;
1da177e4 2855}
fb50f5a4 2856EXPORT_SYMBOL(sigprocmask);
1da177e4 2857
ded653cc
DD
2858/*
2859 * The api helps set app-provided sigmasks.
2860 *
2861 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2862 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2863 */
2864int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2865 sigset_t *oldset, size_t sigsetsize)
2866{
2867 if (!usigmask)
2868 return 0;
2869
2870 if (sigsetsize != sizeof(sigset_t))
2871 return -EINVAL;
2872 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2873 return -EFAULT;
2874
2875 *oldset = current->blocked;
2876 set_current_blocked(set);
2877
2878 return 0;
2879}
2880EXPORT_SYMBOL(set_user_sigmask);
2881
2882#ifdef CONFIG_COMPAT
2883int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2884 sigset_t *set, sigset_t *oldset,
2885 size_t sigsetsize)
2886{
2887 if (!usigmask)
2888 return 0;
2889
2890 if (sigsetsize != sizeof(compat_sigset_t))
2891 return -EINVAL;
2892 if (get_compat_sigset(set, usigmask))
2893 return -EFAULT;
2894
2895 *oldset = current->blocked;
2896 set_current_blocked(set);
2897
2898 return 0;
2899}
2900EXPORT_SYMBOL(set_compat_user_sigmask);
2901#endif
2902
854a6ed5
DD
2903/*
2904 * restore_user_sigmask:
2905 * usigmask: sigmask passed in from userland.
2906 * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2907 * usigmask.
2908 *
2909 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2910 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2911 */
2912void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2913{
2914
2915 if (!usigmask)
2916 return;
2917 /*
2918 * When signals are pending, do not restore them here.
2919 * Restoring sigmask here can lead to delivering signals that the above
2920 * syscalls are intended to block because of the sigmask passed in.
2921 */
2922 if (signal_pending(current)) {
2923 current->saved_sigmask = *sigsaved;
2924 set_restore_sigmask();
2925 return;
2926 }
2927
2928 /*
2929 * This is needed because the fast syscall return path does not restore
2930 * saved_sigmask when signals are not pending.
2931 */
2932 set_current_blocked(sigsaved);
2933}
2934EXPORT_SYMBOL(restore_user_sigmask);
2935
41c57892
RD
2936/**
2937 * sys_rt_sigprocmask - change the list of currently blocked signals
2938 * @how: whether to add, remove, or set signals
ada9c933 2939 * @nset: stores pending signals
41c57892
RD
2940 * @oset: previous value of signal mask if non-null
2941 * @sigsetsize: size of sigset_t type
2942 */
bb7efee2 2943SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
17da2bd9 2944 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4 2945{
1da177e4 2946 sigset_t old_set, new_set;
bb7efee2 2947 int error;
1da177e4
LT
2948
2949 /* XXX: Don't preclude handling different sized sigset_t's. */
2950 if (sigsetsize != sizeof(sigset_t))
bb7efee2 2951 return -EINVAL;
1da177e4 2952
bb7efee2
ON
2953 old_set = current->blocked;
2954
2955 if (nset) {
2956 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2957 return -EFAULT;
1da177e4
LT
2958 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2959
bb7efee2 2960 error = sigprocmask(how, &new_set, NULL);
1da177e4 2961 if (error)
bb7efee2
ON
2962 return error;
2963 }
1da177e4 2964
bb7efee2
ON
2965 if (oset) {
2966 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2967 return -EFAULT;
1da177e4 2968 }
bb7efee2
ON
2969
2970 return 0;
1da177e4
LT
2971}
2972
322a56cb 2973#ifdef CONFIG_COMPAT
322a56cb
AV
2974COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2975 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
1da177e4 2976{
322a56cb
AV
2977 sigset_t old_set = current->blocked;
2978
2979 /* XXX: Don't preclude handling different sized sigset_t's. */
2980 if (sigsetsize != sizeof(sigset_t))
2981 return -EINVAL;
2982
2983 if (nset) {
322a56cb
AV
2984 sigset_t new_set;
2985 int error;
3968cf62 2986 if (get_compat_sigset(&new_set, nset))
322a56cb 2987 return -EFAULT;
322a56cb
AV
2988 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2989
2990 error = sigprocmask(how, &new_set, NULL);
2991 if (error)
2992 return error;
2993 }
f454322e 2994 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
322a56cb
AV
2995}
2996#endif
1da177e4 2997
b1d294c8 2998static void do_sigpending(sigset_t *set)
1da177e4 2999{
1da177e4 3000 spin_lock_irq(&current->sighand->siglock);
fe9c1db2 3001 sigorsets(set, &current->pending.signal,
1da177e4
LT
3002 &current->signal->shared_pending.signal);
3003 spin_unlock_irq(&current->sighand->siglock);
3004
3005 /* Outside the lock because only this thread touches it. */
fe9c1db2 3006 sigandsets(set, &current->blocked, set);
5aba085e 3007}
1da177e4 3008
41c57892
RD
3009/**
3010 * sys_rt_sigpending - examine a pending signal that has been raised
3011 * while blocked
20f22ab4 3012 * @uset: stores pending signals
41c57892
RD
3013 * @sigsetsize: size of sigset_t type or larger
3014 */
fe9c1db2 3015SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
1da177e4 3016{
fe9c1db2 3017 sigset_t set;
176826af
DL
3018
3019 if (sigsetsize > sizeof(*uset))
3020 return -EINVAL;
3021
b1d294c8
CB
3022 do_sigpending(&set);
3023
3024 if (copy_to_user(uset, &set, sigsetsize))
3025 return -EFAULT;
3026
3027 return 0;
fe9c1db2
AV
3028}
3029
3030#ifdef CONFIG_COMPAT
fe9c1db2
AV
3031COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3032 compat_size_t, sigsetsize)
1da177e4 3033{
fe9c1db2 3034 sigset_t set;
176826af
DL
3035
3036 if (sigsetsize > sizeof(*uset))
3037 return -EINVAL;
3038
b1d294c8
CB
3039 do_sigpending(&set);
3040
3041 return put_compat_sigset(uset, &set, sigsetsize);
1da177e4 3042}
fe9c1db2 3043#endif
1da177e4 3044
4ce5f9c9
EB
3045static const struct {
3046 unsigned char limit, layout;
3047} sig_sicodes[] = {
3048 [SIGILL] = { NSIGILL, SIL_FAULT },
3049 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3050 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3051 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3052 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3053#if defined(SIGEMT)
3054 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3055#endif
3056 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3057 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3058 [SIGSYS] = { NSIGSYS, SIL_SYS },
3059};
3060
b2a2ab52 3061static bool known_siginfo_layout(unsigned sig, int si_code)
4ce5f9c9
EB
3062{
3063 if (si_code == SI_KERNEL)
3064 return true;
3065 else if ((si_code > SI_USER)) {
3066 if (sig_specific_sicodes(sig)) {
3067 if (si_code <= sig_sicodes[sig].limit)
3068 return true;
3069 }
3070 else if (si_code <= NSIGPOLL)
3071 return true;
3072 }
3073 else if (si_code >= SI_DETHREAD)
3074 return true;
3075 else if (si_code == SI_ASYNCNL)
3076 return true;
3077 return false;
3078}
3079
a3670058 3080enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
cc731525
EB
3081{
3082 enum siginfo_layout layout = SIL_KILL;
3083 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
4ce5f9c9
EB
3084 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3085 (si_code <= sig_sicodes[sig].limit)) {
3086 layout = sig_sicodes[sig].layout;
31931c93
EB
3087 /* Handle the exceptions */
3088 if ((sig == SIGBUS) &&
3089 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3090 layout = SIL_FAULT_MCEERR;
3091 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3092 layout = SIL_FAULT_BNDERR;
3093#ifdef SEGV_PKUERR
3094 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3095 layout = SIL_FAULT_PKUERR;
3096#endif
3097 }
cc731525
EB
3098 else if (si_code <= NSIGPOLL)
3099 layout = SIL_POLL;
3100 } else {
3101 if (si_code == SI_TIMER)
3102 layout = SIL_TIMER;
3103 else if (si_code == SI_SIGIO)
3104 layout = SIL_POLL;
3105 else if (si_code < 0)
3106 layout = SIL_RT;
cc731525
EB
3107 }
3108 return layout;
3109}
3110
4ce5f9c9
EB
3111static inline char __user *si_expansion(const siginfo_t __user *info)
3112{
3113 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3114}
3115
ae7795bc 3116int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
1da177e4 3117{
4ce5f9c9 3118 char __user *expansion = si_expansion(to);
ae7795bc 3119 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
1da177e4 3120 return -EFAULT;
4ce5f9c9 3121 if (clear_user(expansion, SI_EXPANSION_SIZE))
1da177e4 3122 return -EFAULT;
c999b933 3123 return 0;
1da177e4
LT
3124}
3125
601d5abf
EB
3126static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3127 const siginfo_t __user *from)
4cd2e0e7 3128{
601d5abf 3129 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
4ce5f9c9
EB
3130 char __user *expansion = si_expansion(from);
3131 char buf[SI_EXPANSION_SIZE];
3132 int i;
3133 /*
3134 * An unknown si_code might need more than
3135 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3136 * extra bytes are 0. This guarantees copy_siginfo_to_user
3137 * will return this data to userspace exactly.
3138 */
3139 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3140 return -EFAULT;
3141 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3142 if (buf[i] != 0)
3143 return -E2BIG;
3144 }
3145 }
4cd2e0e7
EB
3146 return 0;
3147}
3148
601d5abf
EB
3149static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3150 const siginfo_t __user *from)
3151{
3152 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3153 return -EFAULT;
3154 to->si_signo = signo;
3155 return post_copy_siginfo_from_user(to, from);
3156}
3157
3158int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3159{
3160 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3161 return -EFAULT;
3162 return post_copy_siginfo_from_user(to, from);
3163}
3164
212a36a1 3165#ifdef CONFIG_COMPAT
ea64d5ac 3166int copy_siginfo_to_user32(struct compat_siginfo __user *to,
ae7795bc 3167 const struct kernel_siginfo *from)
ea64d5ac
EB
3168#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3169{
3170 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3171}
3172int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
ae7795bc 3173 const struct kernel_siginfo *from, bool x32_ABI)
ea64d5ac
EB
3174#endif
3175{
3176 struct compat_siginfo new;
3177 memset(&new, 0, sizeof(new));
3178
3179 new.si_signo = from->si_signo;
3180 new.si_errno = from->si_errno;
3181 new.si_code = from->si_code;
3182 switch(siginfo_layout(from->si_signo, from->si_code)) {
3183 case SIL_KILL:
3184 new.si_pid = from->si_pid;
3185 new.si_uid = from->si_uid;
3186 break;
3187 case SIL_TIMER:
3188 new.si_tid = from->si_tid;
3189 new.si_overrun = from->si_overrun;
3190 new.si_int = from->si_int;
3191 break;
3192 case SIL_POLL:
3193 new.si_band = from->si_band;
3194 new.si_fd = from->si_fd;
3195 break;
3196 case SIL_FAULT:
3197 new.si_addr = ptr_to_compat(from->si_addr);
3198#ifdef __ARCH_SI_TRAPNO
3199 new.si_trapno = from->si_trapno;
3200#endif
31931c93
EB
3201 break;
3202 case SIL_FAULT_MCEERR:
3203 new.si_addr = ptr_to_compat(from->si_addr);
3204#ifdef __ARCH_SI_TRAPNO
3205 new.si_trapno = from->si_trapno;
ea64d5ac 3206#endif
31931c93
EB
3207 new.si_addr_lsb = from->si_addr_lsb;
3208 break;
3209 case SIL_FAULT_BNDERR:
3210 new.si_addr = ptr_to_compat(from->si_addr);
3211#ifdef __ARCH_SI_TRAPNO
3212 new.si_trapno = from->si_trapno;
ea64d5ac 3213#endif
31931c93
EB
3214 new.si_lower = ptr_to_compat(from->si_lower);
3215 new.si_upper = ptr_to_compat(from->si_upper);
3216 break;
3217 case SIL_FAULT_PKUERR:
3218 new.si_addr = ptr_to_compat(from->si_addr);
3219#ifdef __ARCH_SI_TRAPNO
3220 new.si_trapno = from->si_trapno;
ea64d5ac 3221#endif
31931c93 3222 new.si_pkey = from->si_pkey;
ea64d5ac
EB
3223 break;
3224 case SIL_CHLD:
3225 new.si_pid = from->si_pid;
3226 new.si_uid = from->si_uid;
3227 new.si_status = from->si_status;
3228#ifdef CONFIG_X86_X32_ABI
3229 if (x32_ABI) {
3230 new._sifields._sigchld_x32._utime = from->si_utime;
3231 new._sifields._sigchld_x32._stime = from->si_stime;
3232 } else
3233#endif
3234 {
3235 new.si_utime = from->si_utime;
3236 new.si_stime = from->si_stime;
3237 }
3238 break;
3239 case SIL_RT:
3240 new.si_pid = from->si_pid;
3241 new.si_uid = from->si_uid;
3242 new.si_int = from->si_int;
3243 break;
3244 case SIL_SYS:
3245 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3246 new.si_syscall = from->si_syscall;
3247 new.si_arch = from->si_arch;
3248 break;
3249 }
3250
3251 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3252 return -EFAULT;
3253
3254 return 0;
3255}
3256
601d5abf
EB
3257static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3258 const struct compat_siginfo *from)
212a36a1 3259{
212a36a1 3260 clear_siginfo(to);
601d5abf
EB
3261 to->si_signo = from->si_signo;
3262 to->si_errno = from->si_errno;
3263 to->si_code = from->si_code;
3264 switch(siginfo_layout(from->si_signo, from->si_code)) {
212a36a1 3265 case SIL_KILL:
601d5abf
EB
3266 to->si_pid = from->si_pid;
3267 to->si_uid = from->si_uid;
212a36a1
EB
3268 break;
3269 case SIL_TIMER:
601d5abf
EB
3270 to->si_tid = from->si_tid;
3271 to->si_overrun = from->si_overrun;
3272 to->si_int = from->si_int;
212a36a1
EB
3273 break;
3274 case SIL_POLL:
601d5abf
EB
3275 to->si_band = from->si_band;
3276 to->si_fd = from->si_fd;
212a36a1
EB
3277 break;
3278 case SIL_FAULT:
601d5abf 3279 to->si_addr = compat_ptr(from->si_addr);
212a36a1 3280#ifdef __ARCH_SI_TRAPNO
601d5abf 3281 to->si_trapno = from->si_trapno;
212a36a1 3282#endif
31931c93
EB
3283 break;
3284 case SIL_FAULT_MCEERR:
601d5abf 3285 to->si_addr = compat_ptr(from->si_addr);
31931c93 3286#ifdef __ARCH_SI_TRAPNO
601d5abf 3287 to->si_trapno = from->si_trapno;
212a36a1 3288#endif
601d5abf 3289 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3290 break;
3291 case SIL_FAULT_BNDERR:
601d5abf 3292 to->si_addr = compat_ptr(from->si_addr);
31931c93 3293#ifdef __ARCH_SI_TRAPNO
601d5abf 3294 to->si_trapno = from->si_trapno;
212a36a1 3295#endif
601d5abf
EB
3296 to->si_lower = compat_ptr(from->si_lower);
3297 to->si_upper = compat_ptr(from->si_upper);
31931c93
EB
3298 break;
3299 case SIL_FAULT_PKUERR:
601d5abf 3300 to->si_addr = compat_ptr(from->si_addr);
31931c93 3301#ifdef __ARCH_SI_TRAPNO
601d5abf 3302 to->si_trapno = from->si_trapno;
212a36a1 3303#endif
601d5abf 3304 to->si_pkey = from->si_pkey;
212a36a1
EB
3305 break;
3306 case SIL_CHLD:
601d5abf
EB
3307 to->si_pid = from->si_pid;
3308 to->si_uid = from->si_uid;
3309 to->si_status = from->si_status;
212a36a1
EB
3310#ifdef CONFIG_X86_X32_ABI
3311 if (in_x32_syscall()) {
601d5abf
EB
3312 to->si_utime = from->_sifields._sigchld_x32._utime;
3313 to->si_stime = from->_sifields._sigchld_x32._stime;
212a36a1
EB
3314 } else
3315#endif
3316 {
601d5abf
EB
3317 to->si_utime = from->si_utime;
3318 to->si_stime = from->si_stime;
212a36a1
EB
3319 }
3320 break;
3321 case SIL_RT:
601d5abf
EB
3322 to->si_pid = from->si_pid;
3323 to->si_uid = from->si_uid;
3324 to->si_int = from->si_int;
212a36a1
EB
3325 break;
3326 case SIL_SYS:
601d5abf
EB
3327 to->si_call_addr = compat_ptr(from->si_call_addr);
3328 to->si_syscall = from->si_syscall;
3329 to->si_arch = from->si_arch;
212a36a1
EB
3330 break;
3331 }
3332 return 0;
3333}
601d5abf
EB
3334
3335static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3336 const struct compat_siginfo __user *ufrom)
3337{
3338 struct compat_siginfo from;
3339
3340 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3341 return -EFAULT;
3342
3343 from.si_signo = signo;
3344 return post_copy_siginfo_from_user32(to, &from);
3345}
3346
3347int copy_siginfo_from_user32(struct kernel_siginfo *to,
3348 const struct compat_siginfo __user *ufrom)
3349{
3350 struct compat_siginfo from;
3351
3352 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3353 return -EFAULT;
3354
3355 return post_copy_siginfo_from_user32(to, &from);
3356}
212a36a1
EB
3357#endif /* CONFIG_COMPAT */
3358
943df148
ON
3359/**
3360 * do_sigtimedwait - wait for queued signals specified in @which
3361 * @which: queued signals to wait for
3362 * @info: if non-null, the signal's siginfo is returned here
3363 * @ts: upper bound on process time suspension
3364 */
ae7795bc 3365static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
49c39f84 3366 const struct timespec64 *ts)
943df148 3367{
2456e855 3368 ktime_t *to = NULL, timeout = KTIME_MAX;
943df148 3369 struct task_struct *tsk = current;
943df148 3370 sigset_t mask = *which;
2b1ecc3d 3371 int sig, ret = 0;
943df148
ON
3372
3373 if (ts) {
49c39f84 3374 if (!timespec64_valid(ts))
943df148 3375 return -EINVAL;
49c39f84 3376 timeout = timespec64_to_ktime(*ts);
2b1ecc3d 3377 to = &timeout;
943df148
ON
3378 }
3379
3380 /*
3381 * Invert the set of allowed signals to get those we want to block.
3382 */
3383 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3384 signotset(&mask);
3385
3386 spin_lock_irq(&tsk->sighand->siglock);
3387 sig = dequeue_signal(tsk, &mask, info);
2456e855 3388 if (!sig && timeout) {
943df148
ON
3389 /*
3390 * None ready, temporarily unblock those we're interested
3391 * while we are sleeping in so that we'll be awakened when
b182801a
ON
3392 * they arrive. Unblocking is always fine, we can avoid
3393 * set_current_blocked().
943df148
ON
3394 */
3395 tsk->real_blocked = tsk->blocked;
3396 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3397 recalc_sigpending();
3398 spin_unlock_irq(&tsk->sighand->siglock);
3399
2b1ecc3d
TG
3400 __set_current_state(TASK_INTERRUPTIBLE);
3401 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3402 HRTIMER_MODE_REL);
943df148 3403 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3404 __set_task_blocked(tsk, &tsk->real_blocked);
6114041a 3405 sigemptyset(&tsk->real_blocked);
b182801a 3406 sig = dequeue_signal(tsk, &mask, info);
943df148
ON
3407 }
3408 spin_unlock_irq(&tsk->sighand->siglock);
3409
3410 if (sig)
3411 return sig;
2b1ecc3d 3412 return ret ? -EINTR : -EAGAIN;
943df148
ON
3413}
3414
41c57892
RD
3415/**
3416 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3417 * in @uthese
3418 * @uthese: queued signals to wait for
3419 * @uinfo: if non-null, the signal's siginfo is returned here
3420 * @uts: upper bound on process time suspension
3421 * @sigsetsize: size of sigset_t type
3422 */
17da2bd9 3423SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
49c39f84
AB
3424 siginfo_t __user *, uinfo,
3425 const struct __kernel_timespec __user *, uts,
17da2bd9 3426 size_t, sigsetsize)
1da177e4 3427{
1da177e4 3428 sigset_t these;
49c39f84 3429 struct timespec64 ts;
ae7795bc 3430 kernel_siginfo_t info;
943df148 3431 int ret;
1da177e4
LT
3432
3433 /* XXX: Don't preclude handling different sized sigset_t's. */
3434 if (sigsetsize != sizeof(sigset_t))
3435 return -EINVAL;
3436
3437 if (copy_from_user(&these, uthese, sizeof(these)))
3438 return -EFAULT;
5aba085e 3439
1da177e4 3440 if (uts) {
49c39f84 3441 if (get_timespec64(&ts, uts))
1da177e4 3442 return -EFAULT;
1da177e4
LT
3443 }
3444
943df148 3445 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
1da177e4 3446
943df148
ON
3447 if (ret > 0 && uinfo) {
3448 if (copy_siginfo_to_user(uinfo, &info))
3449 ret = -EFAULT;
1da177e4
LT
3450 }
3451
3452 return ret;
3453}
3454
df8522a3
AB
3455#ifdef CONFIG_COMPAT_32BIT_TIME
3456SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3457 siginfo_t __user *, uinfo,
3458 const struct old_timespec32 __user *, uts,
3459 size_t, sigsetsize)
3460{
3461 sigset_t these;
3462 struct timespec64 ts;
3463 kernel_siginfo_t info;
3464 int ret;
3465
3466 if (sigsetsize != sizeof(sigset_t))
3467 return -EINVAL;
3468
3469 if (copy_from_user(&these, uthese, sizeof(these)))
3470 return -EFAULT;
3471
3472 if (uts) {
3473 if (get_old_timespec32(&ts, uts))
3474 return -EFAULT;
3475 }
3476
3477 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3478
3479 if (ret > 0 && uinfo) {
3480 if (copy_siginfo_to_user(uinfo, &info))
3481 ret = -EFAULT;
3482 }
3483
3484 return ret;
3485}
3486#endif
3487
1b3c872c 3488#ifdef CONFIG_COMPAT
2367c4b5
AB
3489COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3490 struct compat_siginfo __user *, uinfo,
3491 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3492{
3493 sigset_t s;
3494 struct timespec64 t;
3495 kernel_siginfo_t info;
3496 long ret;
3497
3498 if (sigsetsize != sizeof(sigset_t))
3499 return -EINVAL;
3500
3501 if (get_compat_sigset(&s, uthese))
3502 return -EFAULT;
3503
3504 if (uts) {
3505 if (get_timespec64(&t, uts))
3506 return -EFAULT;
3507 }
3508
3509 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3510
3511 if (ret > 0 && uinfo) {
3512 if (copy_siginfo_to_user32(uinfo, &info))
3513 ret = -EFAULT;
3514 }
3515
3516 return ret;
3517}
3518
3519#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724 3520COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
1b3c872c 3521 struct compat_siginfo __user *, uinfo,
9afc5eee 3522 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
1b3c872c 3523{
1b3c872c 3524 sigset_t s;
49c39f84 3525 struct timespec64 t;
ae7795bc 3526 kernel_siginfo_t info;
1b3c872c
AV
3527 long ret;
3528
3529 if (sigsetsize != sizeof(sigset_t))
3530 return -EINVAL;
3531
3968cf62 3532 if (get_compat_sigset(&s, uthese))
1b3c872c 3533 return -EFAULT;
1b3c872c
AV
3534
3535 if (uts) {
49c39f84 3536 if (get_old_timespec32(&t, uts))
1b3c872c
AV
3537 return -EFAULT;
3538 }
3539
3540 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3541
3542 if (ret > 0 && uinfo) {
3543 if (copy_siginfo_to_user32(uinfo, &info))
3544 ret = -EFAULT;
3545 }
3546
3547 return ret;
3548}
3549#endif
2367c4b5 3550#endif
1b3c872c 3551
3eb39f47
CB
3552static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3553{
3554 clear_siginfo(info);
3555 info->si_signo = sig;
3556 info->si_errno = 0;
3557 info->si_code = SI_USER;
3558 info->si_pid = task_tgid_vnr(current);
3559 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3560}
3561
41c57892
RD
3562/**
3563 * sys_kill - send a signal to a process
3564 * @pid: the PID of the process
3565 * @sig: signal to be sent
3566 */
17da2bd9 3567SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4 3568{
ae7795bc 3569 struct kernel_siginfo info;
1da177e4 3570
3eb39f47 3571 prepare_kill_siginfo(sig, &info);
1da177e4
LT
3572
3573 return kill_something_info(sig, &info, pid);
3574}
3575
3eb39f47
CB
3576/*
3577 * Verify that the signaler and signalee either are in the same pid namespace
3578 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3579 * namespace.
3580 */
3581static bool access_pidfd_pidns(struct pid *pid)
3582{
3583 struct pid_namespace *active = task_active_pid_ns(current);
3584 struct pid_namespace *p = ns_of_pid(pid);
3585
3586 for (;;) {
3587 if (!p)
3588 return false;
3589 if (p == active)
3590 break;
3591 p = p->parent;
3592 }
3593
3594 return true;
3595}
3596
3597static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3598{
3599#ifdef CONFIG_COMPAT
3600 /*
3601 * Avoid hooking up compat syscalls and instead handle necessary
3602 * conversions here. Note, this is a stop-gap measure and should not be
3603 * considered a generic solution.
3604 */
3605 if (in_compat_syscall())
3606 return copy_siginfo_from_user32(
3607 kinfo, (struct compat_siginfo __user *)info);
3608#endif
3609 return copy_siginfo_from_user(kinfo, info);
3610}
3611
2151ad1b
CB
3612static struct pid *pidfd_to_pid(const struct file *file)
3613{
3614 if (file->f_op == &pidfd_fops)
3615 return file->private_data;
3616
3617 return tgid_pidfd_to_pid(file);
3618}
3619
3eb39f47
CB
3620/**
3621 * sys_pidfd_send_signal - send a signal to a process through a task file
3622 * descriptor
3623 * @pidfd: the file descriptor of the process
3624 * @sig: signal to be sent
3625 * @info: the signal info
3626 * @flags: future flags to be passed
3627 *
3628 * The syscall currently only signals via PIDTYPE_PID which covers
3629 * kill(<positive-pid>, <signal>. It does not signal threads or process
3630 * groups.
3631 * In order to extend the syscall to threads and process groups the @flags
3632 * argument should be used. In essence, the @flags argument will determine
3633 * what is signaled and not the file descriptor itself. Put in other words,
3634 * grouping is a property of the flags argument not a property of the file
3635 * descriptor.
3636 *
3637 * Return: 0 on success, negative errno on failure
3638 */
3639SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3640 siginfo_t __user *, info, unsigned int, flags)
3641{
3642 int ret;
3643 struct fd f;
3644 struct pid *pid;
3645 kernel_siginfo_t kinfo;
3646
3647 /* Enforce flags be set to 0 until we add an extension. */
3648 if (flags)
3649 return -EINVAL;
3650
738a7832 3651 f = fdget(pidfd);
3eb39f47
CB
3652 if (!f.file)
3653 return -EBADF;
3654
3655 /* Is this a pidfd? */
2151ad1b 3656 pid = pidfd_to_pid(f.file);
3eb39f47
CB
3657 if (IS_ERR(pid)) {
3658 ret = PTR_ERR(pid);
3659 goto err;
3660 }
3661
3662 ret = -EINVAL;
3663 if (!access_pidfd_pidns(pid))
3664 goto err;
3665
3666 if (info) {
3667 ret = copy_siginfo_from_user_any(&kinfo, info);
3668 if (unlikely(ret))
3669 goto err;
3670
3671 ret = -EINVAL;
3672 if (unlikely(sig != kinfo.si_signo))
3673 goto err;
3674
556a888a
JH
3675 /* Only allow sending arbitrary signals to yourself. */
3676 ret = -EPERM;
3eb39f47 3677 if ((task_pid(current) != pid) &&
556a888a
JH
3678 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3679 goto err;
3eb39f47
CB
3680 } else {
3681 prepare_kill_siginfo(sig, &kinfo);
3682 }
3683
3684 ret = kill_pid_info(sig, &kinfo, pid);
3685
3686err:
3687 fdput(f);
3688 return ret;
3689}
3eb39f47 3690
30b4ae8a 3691static int
ae7795bc 3692do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
1da177e4 3693{
1da177e4 3694 struct task_struct *p;
30b4ae8a 3695 int error = -ESRCH;
1da177e4 3696
3547ff3a 3697 rcu_read_lock();
228ebcbe 3698 p = find_task_by_vpid(pid);
b488893a 3699 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 3700 error = check_kill_permission(sig, info, p);
1da177e4
LT
3701 /*
3702 * The null signal is a permissions and process existence
3703 * probe. No signal is actually delivered.
3704 */
4a30debf 3705 if (!error && sig) {
40b3b025 3706 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4a30debf
ON
3707 /*
3708 * If lock_task_sighand() failed we pretend the task
3709 * dies after receiving the signal. The window is tiny,
3710 * and the signal is private anyway.
3711 */
3712 if (unlikely(error == -ESRCH))
3713 error = 0;
1da177e4
LT
3714 }
3715 }
3547ff3a 3716 rcu_read_unlock();
6dd69f10 3717
1da177e4
LT
3718 return error;
3719}
3720
30b4ae8a
TG
3721static int do_tkill(pid_t tgid, pid_t pid, int sig)
3722{
ae7795bc 3723 struct kernel_siginfo info;
30b4ae8a 3724
5f74972c 3725 clear_siginfo(&info);
30b4ae8a
TG
3726 info.si_signo = sig;
3727 info.si_errno = 0;
3728 info.si_code = SI_TKILL;
3729 info.si_pid = task_tgid_vnr(current);
078de5f7 3730 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
30b4ae8a
TG
3731
3732 return do_send_specific(tgid, pid, sig, &info);
3733}
3734
6dd69f10
VL
3735/**
3736 * sys_tgkill - send signal to one specific thread
3737 * @tgid: the thread group ID of the thread
3738 * @pid: the PID of the thread
3739 * @sig: signal to be sent
3740 *
72fd4a35 3741 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
3742 * exists but it's not belonging to the target process anymore. This
3743 * method solves the problem of threads exiting and PIDs getting reused.
3744 */
a5f8fa9e 3745SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
3746{
3747 /* This is only valid for single tasks */
3748 if (pid <= 0 || tgid <= 0)
3749 return -EINVAL;
3750
3751 return do_tkill(tgid, pid, sig);
3752}
3753
41c57892
RD
3754/**
3755 * sys_tkill - send signal to one specific task
3756 * @pid: the PID of the task
3757 * @sig: signal to be sent
3758 *
1da177e4
LT
3759 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3760 */
a5f8fa9e 3761SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 3762{
1da177e4
LT
3763 /* This is only valid for single tasks */
3764 if (pid <= 0)
3765 return -EINVAL;
3766
6dd69f10 3767 return do_tkill(0, pid, sig);
1da177e4
LT
3768}
3769
ae7795bc 3770static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
75907d4d
AV
3771{
3772 /* Not even root can pretend to send signals from the kernel.
3773 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3774 */
66dd34ad 3775 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
69828dce 3776 (task_pid_vnr(current) != pid))
75907d4d 3777 return -EPERM;
69828dce 3778
75907d4d
AV
3779 /* POSIX.1b doesn't mention process groups. */
3780 return kill_proc_info(sig, info, pid);
3781}
3782
41c57892
RD
3783/**
3784 * sys_rt_sigqueueinfo - send signal information to a signal
3785 * @pid: the PID of the thread
3786 * @sig: signal to be sent
3787 * @uinfo: signal info to be sent
3788 */
a5f8fa9e
HC
3789SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3790 siginfo_t __user *, uinfo)
1da177e4 3791{
ae7795bc 3792 kernel_siginfo_t info;
601d5abf 3793 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
3794 if (unlikely(ret))
3795 return ret;
75907d4d
AV
3796 return do_rt_sigqueueinfo(pid, sig, &info);
3797}
1da177e4 3798
75907d4d 3799#ifdef CONFIG_COMPAT
75907d4d
AV
3800COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3801 compat_pid_t, pid,
3802 int, sig,
3803 struct compat_siginfo __user *, uinfo)
3804{
ae7795bc 3805 kernel_siginfo_t info;
601d5abf 3806 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
75907d4d
AV
3807 if (unlikely(ret))
3808 return ret;
3809 return do_rt_sigqueueinfo(pid, sig, &info);
1da177e4 3810}
75907d4d 3811#endif
1da177e4 3812
ae7795bc 3813static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
62ab4505
TG
3814{
3815 /* This is only valid for single tasks */
3816 if (pid <= 0 || tgid <= 0)
3817 return -EINVAL;
3818
3819 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
3820 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3821 */
69828dce
VD
3822 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3823 (task_pid_vnr(current) != pid))
62ab4505 3824 return -EPERM;
69828dce 3825
62ab4505
TG
3826 return do_send_specific(tgid, pid, sig, info);
3827}
3828
3829SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3830 siginfo_t __user *, uinfo)
3831{
ae7795bc 3832 kernel_siginfo_t info;
601d5abf 3833 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
3834 if (unlikely(ret))
3835 return ret;
62ab4505
TG
3836 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3837}
3838
9aae8fc0
AV
3839#ifdef CONFIG_COMPAT
3840COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3841 compat_pid_t, tgid,
3842 compat_pid_t, pid,
3843 int, sig,
3844 struct compat_siginfo __user *, uinfo)
3845{
ae7795bc 3846 kernel_siginfo_t info;
601d5abf 3847 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4cd2e0e7
EB
3848 if (unlikely(ret))
3849 return ret;
9aae8fc0
AV
3850 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3851}
3852#endif
3853
0341729b 3854/*
b4e74264 3855 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
0341729b 3856 */
b4e74264 3857void kernel_sigaction(int sig, __sighandler_t action)
0341729b 3858{
ec5955b8 3859 spin_lock_irq(&current->sighand->siglock);
b4e74264
ON
3860 current->sighand->action[sig - 1].sa.sa_handler = action;
3861 if (action == SIG_IGN) {
3862 sigset_t mask;
0341729b 3863
b4e74264
ON
3864 sigemptyset(&mask);
3865 sigaddset(&mask, sig);
580d34e4 3866
b4e74264
ON
3867 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3868 flush_sigqueue_mask(&mask, &current->pending);
3869 recalc_sigpending();
3870 }
0341729b
ON
3871 spin_unlock_irq(&current->sighand->siglock);
3872}
b4e74264 3873EXPORT_SYMBOL(kernel_sigaction);
0341729b 3874
68463510
DS
3875void __weak sigaction_compat_abi(struct k_sigaction *act,
3876 struct k_sigaction *oact)
3877{
3878}
3879
88531f72 3880int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 3881{
afe2b038 3882 struct task_struct *p = current, *t;
1da177e4 3883 struct k_sigaction *k;
71fabd5e 3884 sigset_t mask;
1da177e4 3885
7ed20e1a 3886 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
3887 return -EINVAL;
3888
afe2b038 3889 k = &p->sighand->action[sig-1];
1da177e4 3890
afe2b038 3891 spin_lock_irq(&p->sighand->siglock);
1da177e4
LT
3892 if (oact)
3893 *oact = *k;
3894
68463510
DS
3895 sigaction_compat_abi(act, oact);
3896
1da177e4 3897 if (act) {
9ac95f2f
ON
3898 sigdelsetmask(&act->sa.sa_mask,
3899 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 3900 *k = *act;
1da177e4
LT
3901 /*
3902 * POSIX 3.3.1.3:
3903 * "Setting a signal action to SIG_IGN for a signal that is
3904 * pending shall cause the pending signal to be discarded,
3905 * whether or not it is blocked."
3906 *
3907 * "Setting a signal action to SIG_DFL for a signal that is
3908 * pending and whose default action is to ignore the signal
3909 * (for example, SIGCHLD), shall cause the pending signal to
3910 * be discarded, whether or not it is blocked"
3911 */
afe2b038 3912 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
71fabd5e
GA
3913 sigemptyset(&mask);
3914 sigaddset(&mask, sig);
afe2b038
ON
3915 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3916 for_each_thread(p, t)
c09c1441 3917 flush_sigqueue_mask(&mask, &t->pending);
1da177e4 3918 }
1da177e4
LT
3919 }
3920
afe2b038 3921 spin_unlock_irq(&p->sighand->siglock);
1da177e4
LT
3922 return 0;
3923}
3924
c09c1441 3925static int
22839869
WD
3926do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3927 size_t min_ss_size)
1da177e4 3928{
bcfe8ad8 3929 struct task_struct *t = current;
1da177e4 3930
bcfe8ad8
AV
3931 if (oss) {
3932 memset(oss, 0, sizeof(stack_t));
3933 oss->ss_sp = (void __user *) t->sas_ss_sp;
3934 oss->ss_size = t->sas_ss_size;
3935 oss->ss_flags = sas_ss_flags(sp) |
3936 (current->sas_ss_flags & SS_FLAG_BITS);
3937 }
1da177e4 3938
bcfe8ad8
AV
3939 if (ss) {
3940 void __user *ss_sp = ss->ss_sp;
3941 size_t ss_size = ss->ss_size;
3942 unsigned ss_flags = ss->ss_flags;
407bc16a 3943 int ss_mode;
1da177e4 3944
bcfe8ad8
AV
3945 if (unlikely(on_sig_stack(sp)))
3946 return -EPERM;
1da177e4 3947
407bc16a 3948 ss_mode = ss_flags & ~SS_FLAG_BITS;
bcfe8ad8
AV
3949 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3950 ss_mode != 0))
3951 return -EINVAL;
1da177e4 3952
407bc16a 3953 if (ss_mode == SS_DISABLE) {
1da177e4
LT
3954 ss_size = 0;
3955 ss_sp = NULL;
3956 } else {
22839869 3957 if (unlikely(ss_size < min_ss_size))
bcfe8ad8 3958 return -ENOMEM;
1da177e4
LT
3959 }
3960
bcfe8ad8
AV
3961 t->sas_ss_sp = (unsigned long) ss_sp;
3962 t->sas_ss_size = ss_size;
3963 t->sas_ss_flags = ss_flags;
1da177e4 3964 }
bcfe8ad8 3965 return 0;
1da177e4 3966}
bcfe8ad8 3967
6bf9adfc
AV
3968SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3969{
bcfe8ad8
AV
3970 stack_t new, old;
3971 int err;
3972 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3973 return -EFAULT;
3974 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
22839869
WD
3975 current_user_stack_pointer(),
3976 MINSIGSTKSZ);
bcfe8ad8
AV
3977 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3978 err = -EFAULT;
3979 return err;
6bf9adfc 3980}
1da177e4 3981
5c49574f
AV
3982int restore_altstack(const stack_t __user *uss)
3983{
bcfe8ad8
AV
3984 stack_t new;
3985 if (copy_from_user(&new, uss, sizeof(stack_t)))
3986 return -EFAULT;
22839869
WD
3987 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3988 MINSIGSTKSZ);
5c49574f 3989 /* squash all but EFAULT for now */
bcfe8ad8 3990 return 0;
5c49574f
AV
3991}
3992
c40702c4
AV
3993int __save_altstack(stack_t __user *uss, unsigned long sp)
3994{
3995 struct task_struct *t = current;
2a742138
SS
3996 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3997 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 3998 __put_user(t->sas_ss_size, &uss->ss_size);
2a742138
SS
3999 if (err)
4000 return err;
4001 if (t->sas_ss_flags & SS_AUTODISARM)
4002 sas_ss_reset(t);
4003 return 0;
c40702c4
AV
4004}
4005
90268439 4006#ifdef CONFIG_COMPAT
6203deb0
DB
4007static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4008 compat_stack_t __user *uoss_ptr)
90268439
AV
4009{
4010 stack_t uss, uoss;
4011 int ret;
90268439
AV
4012
4013 if (uss_ptr) {
4014 compat_stack_t uss32;
90268439
AV
4015 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4016 return -EFAULT;
4017 uss.ss_sp = compat_ptr(uss32.ss_sp);
4018 uss.ss_flags = uss32.ss_flags;
4019 uss.ss_size = uss32.ss_size;
4020 }
bcfe8ad8 4021 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
22839869
WD
4022 compat_user_stack_pointer(),
4023 COMPAT_MINSIGSTKSZ);
90268439 4024 if (ret >= 0 && uoss_ptr) {
bcfe8ad8
AV
4025 compat_stack_t old;
4026 memset(&old, 0, sizeof(old));
4027 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4028 old.ss_flags = uoss.ss_flags;
4029 old.ss_size = uoss.ss_size;
4030 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
90268439
AV
4031 ret = -EFAULT;
4032 }
4033 return ret;
4034}
4035
6203deb0
DB
4036COMPAT_SYSCALL_DEFINE2(sigaltstack,
4037 const compat_stack_t __user *, uss_ptr,
4038 compat_stack_t __user *, uoss_ptr)
4039{
4040 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4041}
4042
90268439
AV
4043int compat_restore_altstack(const compat_stack_t __user *uss)
4044{
6203deb0 4045 int err = do_compat_sigaltstack(uss, NULL);
90268439
AV
4046 /* squash all but -EFAULT for now */
4047 return err == -EFAULT ? err : 0;
4048}
c40702c4
AV
4049
4050int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4051{
441398d3 4052 int err;
c40702c4 4053 struct task_struct *t = current;
441398d3
SS
4054 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4055 &uss->ss_sp) |
4056 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4057 __put_user(t->sas_ss_size, &uss->ss_size);
441398d3
SS
4058 if (err)
4059 return err;
4060 if (t->sas_ss_flags & SS_AUTODISARM)
4061 sas_ss_reset(t);
4062 return 0;
c40702c4 4063}
90268439 4064#endif
1da177e4
LT
4065
4066#ifdef __ARCH_WANT_SYS_SIGPENDING
4067
41c57892
RD
4068/**
4069 * sys_sigpending - examine pending signals
d53238cd 4070 * @uset: where mask of pending signal is returned
41c57892 4071 */
d53238cd 4072SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
1da177e4 4073{
d53238cd 4074 sigset_t set;
d53238cd
DB
4075
4076 if (sizeof(old_sigset_t) > sizeof(*uset))
4077 return -EINVAL;
4078
b1d294c8
CB
4079 do_sigpending(&set);
4080
4081 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4082 return -EFAULT;
4083
4084 return 0;
1da177e4
LT
4085}
4086
8f13621a
AV
4087#ifdef CONFIG_COMPAT
4088COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4089{
4090 sigset_t set;
b1d294c8
CB
4091
4092 do_sigpending(&set);
4093
4094 return put_user(set.sig[0], set32);
8f13621a
AV
4095}
4096#endif
4097
1da177e4
LT
4098#endif
4099
4100#ifdef __ARCH_WANT_SYS_SIGPROCMASK
41c57892
RD
4101/**
4102 * sys_sigprocmask - examine and change blocked signals
4103 * @how: whether to add, remove, or set signals
b013c399 4104 * @nset: signals to add or remove (if non-null)
41c57892
RD
4105 * @oset: previous value of signal mask if non-null
4106 *
5aba085e
RD
4107 * Some platforms have their own version with special arguments;
4108 * others support only sys_rt_sigprocmask.
4109 */
1da177e4 4110
b013c399 4111SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
b290ebe2 4112 old_sigset_t __user *, oset)
1da177e4 4113{
1da177e4 4114 old_sigset_t old_set, new_set;
2e4f7c77 4115 sigset_t new_blocked;
1da177e4 4116
b013c399 4117 old_set = current->blocked.sig[0];
1da177e4 4118
b013c399
ON
4119 if (nset) {
4120 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4121 return -EFAULT;
1da177e4 4122
2e4f7c77 4123 new_blocked = current->blocked;
1da177e4 4124
1da177e4 4125 switch (how) {
1da177e4 4126 case SIG_BLOCK:
2e4f7c77 4127 sigaddsetmask(&new_blocked, new_set);
1da177e4
LT
4128 break;
4129 case SIG_UNBLOCK:
2e4f7c77 4130 sigdelsetmask(&new_blocked, new_set);
1da177e4
LT
4131 break;
4132 case SIG_SETMASK:
2e4f7c77 4133 new_blocked.sig[0] = new_set;
1da177e4 4134 break;
2e4f7c77
ON
4135 default:
4136 return -EINVAL;
1da177e4
LT
4137 }
4138
0c4a8423 4139 set_current_blocked(&new_blocked);
b013c399
ON
4140 }
4141
4142 if (oset) {
1da177e4 4143 if (copy_to_user(oset, &old_set, sizeof(*oset)))
b013c399 4144 return -EFAULT;
1da177e4 4145 }
b013c399
ON
4146
4147 return 0;
1da177e4
LT
4148}
4149#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4150
eaca6eae 4151#ifndef CONFIG_ODD_RT_SIGACTION
41c57892
RD
4152/**
4153 * sys_rt_sigaction - alter an action taken by a process
4154 * @sig: signal to be sent
f9fa0bc1
RD
4155 * @act: new sigaction
4156 * @oact: used to save the previous sigaction
41c57892
RD
4157 * @sigsetsize: size of sigset_t type
4158 */
d4e82042
HC
4159SYSCALL_DEFINE4(rt_sigaction, int, sig,
4160 const struct sigaction __user *, act,
4161 struct sigaction __user *, oact,
4162 size_t, sigsetsize)
1da177e4
LT
4163{
4164 struct k_sigaction new_sa, old_sa;
d8f993b3 4165 int ret;
1da177e4
LT
4166
4167 /* XXX: Don't preclude handling different sized sigset_t's. */
4168 if (sigsetsize != sizeof(sigset_t))
d8f993b3 4169 return -EINVAL;
1da177e4 4170
d8f993b3
CB
4171 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4172 return -EFAULT;
1da177e4
LT
4173
4174 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
d8f993b3
CB
4175 if (ret)
4176 return ret;
1da177e4 4177
d8f993b3
CB
4178 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4179 return -EFAULT;
4180
4181 return 0;
1da177e4 4182}
08d32fe5 4183#ifdef CONFIG_COMPAT
08d32fe5
AV
4184COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4185 const struct compat_sigaction __user *, act,
4186 struct compat_sigaction __user *, oact,
4187 compat_size_t, sigsetsize)
4188{
4189 struct k_sigaction new_ka, old_ka;
08d32fe5
AV
4190#ifdef __ARCH_HAS_SA_RESTORER
4191 compat_uptr_t restorer;
4192#endif
4193 int ret;
4194
4195 /* XXX: Don't preclude handling different sized sigset_t's. */
4196 if (sigsetsize != sizeof(compat_sigset_t))
4197 return -EINVAL;
4198
4199 if (act) {
4200 compat_uptr_t handler;
4201 ret = get_user(handler, &act->sa_handler);
4202 new_ka.sa.sa_handler = compat_ptr(handler);
4203#ifdef __ARCH_HAS_SA_RESTORER
4204 ret |= get_user(restorer, &act->sa_restorer);
4205 new_ka.sa.sa_restorer = compat_ptr(restorer);
4206#endif
3968cf62 4207 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3ddc5b46 4208 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
08d32fe5
AV
4209 if (ret)
4210 return -EFAULT;
08d32fe5
AV
4211 }
4212
4213 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4214 if (!ret && oact) {
08d32fe5
AV
4215 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4216 &oact->sa_handler);
f454322e
DL
4217 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4218 sizeof(oact->sa_mask));
3ddc5b46 4219 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
08d32fe5
AV
4220#ifdef __ARCH_HAS_SA_RESTORER
4221 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4222 &oact->sa_restorer);
4223#endif
4224 }
4225 return ret;
4226}
4227#endif
eaca6eae 4228#endif /* !CONFIG_ODD_RT_SIGACTION */
1da177e4 4229
495dfbf7
AV
4230#ifdef CONFIG_OLD_SIGACTION
4231SYSCALL_DEFINE3(sigaction, int, sig,
4232 const struct old_sigaction __user *, act,
4233 struct old_sigaction __user *, oact)
4234{
4235 struct k_sigaction new_ka, old_ka;
4236 int ret;
4237
4238 if (act) {
4239 old_sigset_t mask;
96d4f267 4240 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4241 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4242 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4243 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4244 __get_user(mask, &act->sa_mask))
4245 return -EFAULT;
4246#ifdef __ARCH_HAS_KA_RESTORER
4247 new_ka.ka_restorer = NULL;
4248#endif
4249 siginitset(&new_ka.sa.sa_mask, mask);
4250 }
4251
4252 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4253
4254 if (!ret && oact) {
96d4f267 4255 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4256 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4257 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4258 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4259 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4260 return -EFAULT;
4261 }
4262
4263 return ret;
4264}
4265#endif
4266#ifdef CONFIG_COMPAT_OLD_SIGACTION
4267COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4268 const struct compat_old_sigaction __user *, act,
4269 struct compat_old_sigaction __user *, oact)
4270{
4271 struct k_sigaction new_ka, old_ka;
4272 int ret;
4273 compat_old_sigset_t mask;
4274 compat_uptr_t handler, restorer;
4275
4276 if (act) {
96d4f267 4277 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4278 __get_user(handler, &act->sa_handler) ||
4279 __get_user(restorer, &act->sa_restorer) ||
4280 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4281 __get_user(mask, &act->sa_mask))
4282 return -EFAULT;
4283
4284#ifdef __ARCH_HAS_KA_RESTORER
4285 new_ka.ka_restorer = NULL;
4286#endif
4287 new_ka.sa.sa_handler = compat_ptr(handler);
4288 new_ka.sa.sa_restorer = compat_ptr(restorer);
4289 siginitset(&new_ka.sa.sa_mask, mask);
4290 }
4291
4292 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4293
4294 if (!ret && oact) {
96d4f267 4295 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4296 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4297 &oact->sa_handler) ||
4298 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4299 &oact->sa_restorer) ||
4300 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4301 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4302 return -EFAULT;
4303 }
4304 return ret;
4305}
4306#endif
1da177e4 4307
f6187769 4308#ifdef CONFIG_SGETMASK_SYSCALL
1da177e4
LT
4309
4310/*
4311 * For backwards compatibility. Functionality superseded by sigprocmask.
4312 */
a5f8fa9e 4313SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
4314{
4315 /* SMP safe */
4316 return current->blocked.sig[0];
4317}
4318
a5f8fa9e 4319SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4 4320{
c1095c6d
ON
4321 int old = current->blocked.sig[0];
4322 sigset_t newset;
1da177e4 4323
5ba53ff6 4324 siginitset(&newset, newmask);
c1095c6d 4325 set_current_blocked(&newset);
1da177e4
LT
4326
4327 return old;
4328}
f6187769 4329#endif /* CONFIG_SGETMASK_SYSCALL */
1da177e4
LT
4330
4331#ifdef __ARCH_WANT_SYS_SIGNAL
4332/*
4333 * For backwards compatibility. Functionality superseded by sigaction.
4334 */
a5f8fa9e 4335SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
4336{
4337 struct k_sigaction new_sa, old_sa;
4338 int ret;
4339
4340 new_sa.sa.sa_handler = handler;
4341 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 4342 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
4343
4344 ret = do_sigaction(sig, &new_sa, &old_sa);
4345
4346 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4347}
4348#endif /* __ARCH_WANT_SYS_SIGNAL */
4349
4350#ifdef __ARCH_WANT_SYS_PAUSE
4351
a5f8fa9e 4352SYSCALL_DEFINE0(pause)
1da177e4 4353{
d92fcf05 4354 while (!signal_pending(current)) {
1df01355 4355 __set_current_state(TASK_INTERRUPTIBLE);
d92fcf05
ON
4356 schedule();
4357 }
1da177e4
LT
4358 return -ERESTARTNOHAND;
4359}
4360
4361#endif
4362
9d8a7652 4363static int sigsuspend(sigset_t *set)
68f3f16d 4364{
68f3f16d
AV
4365 current->saved_sigmask = current->blocked;
4366 set_current_blocked(set);
4367
823dd322
SL
4368 while (!signal_pending(current)) {
4369 __set_current_state(TASK_INTERRUPTIBLE);
4370 schedule();
4371 }
68f3f16d
AV
4372 set_restore_sigmask();
4373 return -ERESTARTNOHAND;
4374}
68f3f16d 4375
41c57892
RD
4376/**
4377 * sys_rt_sigsuspend - replace the signal mask for a value with the
4378 * @unewset value until a signal is received
4379 * @unewset: new signal mask value
4380 * @sigsetsize: size of sigset_t type
4381 */
d4e82042 4382SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
4383{
4384 sigset_t newset;
4385
4386 /* XXX: Don't preclude handling different sized sigset_t's. */
4387 if (sigsetsize != sizeof(sigset_t))
4388 return -EINVAL;
4389
4390 if (copy_from_user(&newset, unewset, sizeof(newset)))
4391 return -EFAULT;
68f3f16d 4392 return sigsuspend(&newset);
150256d8 4393}
ad4b65a4
AV
4394
4395#ifdef CONFIG_COMPAT
4396COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4397{
ad4b65a4 4398 sigset_t newset;
ad4b65a4
AV
4399
4400 /* XXX: Don't preclude handling different sized sigset_t's. */
4401 if (sigsetsize != sizeof(sigset_t))
4402 return -EINVAL;
4403
3968cf62 4404 if (get_compat_sigset(&newset, unewset))
ad4b65a4 4405 return -EFAULT;
ad4b65a4 4406 return sigsuspend(&newset);
ad4b65a4
AV
4407}
4408#endif
150256d8 4409
0a0e8cdf
AV
4410#ifdef CONFIG_OLD_SIGSUSPEND
4411SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4412{
4413 sigset_t blocked;
4414 siginitset(&blocked, mask);
4415 return sigsuspend(&blocked);
4416}
4417#endif
4418#ifdef CONFIG_OLD_SIGSUSPEND3
4419SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4420{
4421 sigset_t blocked;
4422 siginitset(&blocked, mask);
4423 return sigsuspend(&blocked);
4424}
4425#endif
150256d8 4426
52f5684c 4427__weak const char *arch_vma_name(struct vm_area_struct *vma)
f269fdd1
DH
4428{
4429 return NULL;
4430}
4431
ae7795bc 4432static inline void siginfo_buildtime_checks(void)
1da177e4 4433{
aba1be2f 4434 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
41b27154 4435
ae7795bc
EB
4436 /* Verify the offsets in the two siginfos match */
4437#define CHECK_OFFSET(field) \
4438 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4439
4440 /* kill */
4441 CHECK_OFFSET(si_pid);
4442 CHECK_OFFSET(si_uid);
4443
4444 /* timer */
4445 CHECK_OFFSET(si_tid);
4446 CHECK_OFFSET(si_overrun);
4447 CHECK_OFFSET(si_value);
4448
4449 /* rt */
4450 CHECK_OFFSET(si_pid);
4451 CHECK_OFFSET(si_uid);
4452 CHECK_OFFSET(si_value);
4453
4454 /* sigchld */
4455 CHECK_OFFSET(si_pid);
4456 CHECK_OFFSET(si_uid);
4457 CHECK_OFFSET(si_status);
4458 CHECK_OFFSET(si_utime);
4459 CHECK_OFFSET(si_stime);
4460
4461 /* sigfault */
4462 CHECK_OFFSET(si_addr);
4463 CHECK_OFFSET(si_addr_lsb);
4464 CHECK_OFFSET(si_lower);
4465 CHECK_OFFSET(si_upper);
4466 CHECK_OFFSET(si_pkey);
4467
4468 /* sigpoll */
4469 CHECK_OFFSET(si_band);
4470 CHECK_OFFSET(si_fd);
4471
4472 /* sigsys */
4473 CHECK_OFFSET(si_call_addr);
4474 CHECK_OFFSET(si_syscall);
4475 CHECK_OFFSET(si_arch);
4476#undef CHECK_OFFSET
4477}
4478
4479void __init signals_init(void)
4480{
4481 siginfo_buildtime_checks();
4482
0a31bd5f 4483 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 4484}
67fc4e0c
JW
4485
4486#ifdef CONFIG_KGDB_KDB
4487#include <linux/kdb.h>
4488/*
0b44bf9a 4489 * kdb_send_sig - Allows kdb to send signals without exposing
67fc4e0c
JW
4490 * signal internals. This function checks if the required locks are
4491 * available before calling the main signal code, to avoid kdb
4492 * deadlocks.
4493 */
0b44bf9a 4494void kdb_send_sig(struct task_struct *t, int sig)
67fc4e0c
JW
4495{
4496 static struct task_struct *kdb_prev_t;
0b44bf9a 4497 int new_t, ret;
67fc4e0c
JW
4498 if (!spin_trylock(&t->sighand->siglock)) {
4499 kdb_printf("Can't do kill command now.\n"
4500 "The sigmask lock is held somewhere else in "
4501 "kernel, try again later\n");
4502 return;
4503 }
67fc4e0c
JW
4504 new_t = kdb_prev_t != t;
4505 kdb_prev_t = t;
4506 if (t->state != TASK_RUNNING && new_t) {
0b44bf9a 4507 spin_unlock(&t->sighand->siglock);
67fc4e0c
JW
4508 kdb_printf("Process is not RUNNING, sending a signal from "
4509 "kdb risks deadlock\n"
4510 "on the run queue locks. "
4511 "The signal has _not_ been sent.\n"
4512 "Reissue the kill command if you want to risk "
4513 "the deadlock.\n");
4514 return;
4515 }
b213984b 4516 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
0b44bf9a
EB
4517 spin_unlock(&t->sighand->siglock);
4518 if (ret)
67fc4e0c
JW
4519 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4520 sig, t->pid);
4521 else
4522 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4523}
4524#endif /* CONFIG_KGDB_KDB */